+ ./ya make . -T --test-size=small --test-size=medium --stat --test-threads 52 --link-threads 12 -DUSE_EAT_MY_DATA --build release --sanitize=address -DDEBUGINFO_LINES_ONLY --bazel-remote-store --bazel-remote-base-uri http://cachesrv.internal:8081 --bazel-remote-username cache_user --bazel-remote-password-file /tmp/tmp.XD7D8UzS7I --bazel-remote-put --dist-cache-max-file-size=209715200 -A --retest --stat -DCONSISTENT_DEBUG --no-dir-outputs --test-failure-code 0 --build-all --cache-size 2TB --force-build-depends --log-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/ya_log.txt --evlog-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/ya_evlog.jsonl --junit /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/junit.xml --build-results-report /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/report.json --output /home/runner/actions_runner/_work/ydb/ydb/tmp/out Output root is subdirectory of Arcadia root, this may cause non-idempotent build Configuring dependencies for platform default-linux-x86_64-release-asan [2 ymakes processing] [7157/7171 modules configured] [2 ymakes processing] [8172/8173 modules configured] [2 ymakes processing] [8246/8246 modules configured] Configuring dependencies for platform tools Warn[-WPluginErr]: in $B/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium: Requirement ram is redefined 16 -> 28 [3 ymakes processing] [8850/8850 modules configured] [3 ymakes processing] [8850/8850 modules configured] [144/144 modules rendered] [2 ymakes processing] [8850/8850 modules configured] [4926/5166 modules rendered] [2 ymakes processing] [8850/8850 modules configured] [5166/5166 modules rendered] Configuring dependencies for platform test_tool_tc1-global [0 ymakes processing] [8856/8856 modules configured] [5166/5166 modules rendered] Configuring tests execution Configuring local and dist store caches Configuration done. Preparing for execution |33.3%| CLEANING SYMRES | 2.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a | 3.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a | 3.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a | 3.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a | 3.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a | 3.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a | 3.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a | 3.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a | 3.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a | 3.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a | 4.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a | 4.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a | 4.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel | 6.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a | 6.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon64/liblibs-base64-neon64.a | 5.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/murmur/libcpp-digest-murmur.a | 6.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/deprecated/yajl/libcontrib-deprecated-yajl.a | 6.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/liblibrary-cpp-dwarf_backtrace.a | 6.1%| [AR] {BAZEL_DOWNLOAD} $(B)/certs/libcerts.global.a | 6.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/enumbitset/liblibrary-cpp-enumbitset.a | 6.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a | 6.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/events/liblibs-checkpoint_storage-events.a | 6.7%| PREPARE $(VCS) | 7.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/version/libversion.a | 8.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/dictionary/libarrow-accessor-dictionary.a | 8.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp2/libcontrib-libs-nghttp2.a | 8.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libssh2/libcontrib-libs-libssh2.a | 8.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a | 8.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a | 8.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a | 8.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a | 9.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a | 9.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a | 8.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a | 8.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a | 8.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/libydb-core-cms.a | 8.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a | 8.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a | 8.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a | 8.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a | 8.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut_common.cpp | 9.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a | 9.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a | 9.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/ydb/ydb-tests-stability-ydb | 9.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a | 9.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/libcore-config-init.a | 9.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a | 9.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a | 9.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/pdisk/mock/libblobstorage-pdisk-mock.a |10.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |10.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |10.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/libydb-core-formats.a |10.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/debug/libydb-core-debug.a |10.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/lib/libcore-control-lib.a | 9.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxx/liblibs-cxxsupp-libcxx.a | 9.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/docapi/libydb-core-docapi.a | 9.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libcore-config-protos.a | 9.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/core/libyt-yt-core.a | 9.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/libblobstorage-vdisk-ingress.a | 9.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/filestore/core/libcore-filestore-core.a | 9.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/splitter/libformats-arrow-splitter.a | 9.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/client/libyt-yt-client.a | 9.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/avro/liblibs-apache-avro.a | 9.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/misc/isa_crc64/libisa-l_crc_yt_patch.a |10.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/common/liblibrary-formats-arrow-accessor-common.a |10.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite/liblibrary-formats-arrow-accessor-composite.a |10.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/libcore-blobstorage-vdisk.a |10.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.global.a |10.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/probes.cpp |10.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/protos/libblobstorage-vdisk-protos.a |11.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/json/libcore-viewer-json.a |11.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/dictionary/libarrow-accessor-dictionary.global.a |11.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/abstract/libarrow-accessor-abstract.a |11.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/libydb-core-erasure.a |11.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/math_udf.cpp |11.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protoc/libpy3protobuf-builtin_proto-protos_from_protoc.global.a |11.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/test/libvdisk-hulldb-test.a |11.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protobuf/libpy3protobuf-builtin_proto-protos_from_protobuf.global.a |11.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/digest_udf.cpp |11.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/build/libyt-yt-build.a |11.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/json2_udf.cpp |11.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/engine/libydb-core-engine.a |11.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/reader/libformats-arrow-reader.a |11.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |12.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/libcore-config-validation.a |12.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests-oauthlib/libpy3contrib-python-requests-oauthlib.global.a |12.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests/py3/libpy3python-requests-py3.global.a |12.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite_serial/libarrow-accessor-composite_serial.a |12.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libpy3ydb-library-services.global.a |12.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/arrow/fbs/libclient-arrow-fbs.a |12.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/libpy3library-python-pytest.global.a |13.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/libpy3library-mkql_proto-protos.global.a |13.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/query_tracker_client/libyt-client-query_tracker_client.a |13.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyrsistent/py3/libpy3python-pyrsistent-py3.global.a |13.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/libpy3library-login-protos.global.a |13.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/libpy3library-formats-arrow-protos.global.a |13.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/arrow/libyt-client-arrow.a |13.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |13.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ydb/py3/libpy3python-ydb-py3.global.a |13.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/libydb-core-cms.global.a |13.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/libpy3library-folder_service-proto.global.a |13.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libpy3providers-s3-proto.global.a |13.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_common/libpy3python-testing-yatest_common.global.a |13.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/https/libyt-core-https.a |13.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/control/libydb-core-control.a |13.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/re2_udf.cpp |13.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/procfs/libyt-library-procfs.a |14.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/erasure/libyt-library-erasure.a |14.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/decimal/libyt-library-decimal.a |14.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/events/liblibs-audit-events.a |14.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/libcontrib-libs-highwayhash.a |14.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/switch/libformats-arrow-switch.a |15.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/numeric/libyt-library-numeric.a |15.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |15.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.a |15.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |15.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/libyt-yt-core.global.a |15.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |15.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/datetime2_udf.cpp |15.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/column_converters/libyt-library-column_converters.a |15.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/avx2/libhighwayhash-arch-avx2.a |15.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |15.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/sse41/libhighwayhash-arch-sse41.a |15.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/filesystem/librestricted-boost-filesystem.a |15.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/libyt-library-profiling.a |15.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.global.a |15.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.a |15.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/re2/libyt-library-re2.a |15.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/unicode_udf.cpp |16.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.global.a |16.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/rows/libformats-arrow-rows.a |16.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4-c3/libcontrib-libs-antlr4-c3.a |16.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/quantile_digest/libyt-library-quantile_digest.a |16.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Werkzeug/py3/libpy3python-Werkzeug-py3.global.a |16.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |16.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |16.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/discovery/libydb-core-discovery.a |16.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tz_types/libyt-library-tz_types.a |17.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |17.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/auth/libyt-library-auth.a |17.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tvm/libyt-library-tvm.a |17.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/transformer/libformats-arrow-transformer.a |17.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/string_udf.cpp |17.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/fs/libpy3library-python-fs.global.a |17.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.global.a |17.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/undumpable/libyt-library-undumpable.a |17.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libpy3api-grpc-draft.global.a |17.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/ytprof/api/liblibrary-ytprof-api.a |17.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |18.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |18.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.global.a |18.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/find_root/libpy3library-python-find_root.global.a |18.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/http/libyt-core-http.a |17.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |18.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/skiff_ext/libyt-library-skiff_ext.a |18.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/formats/libyt_proto-yt-formats.a |18.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/signals/libyt-library-signals.a |18.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |18.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/func/libpy3library-python-func.global.a |18.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/constructor/libpy3python-import_tracing-constructor.global.a |18.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |17.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/lib/libpy3python-import_tracing-lib.global.a |17.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/save_load/libformats-arrow-save_load.a |17.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |18.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |18.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |18.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/plugins/libpy3python-pytest-plugins.global.a |18.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.global.a |18.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/proto/liblibs-checkpoint_storage-proto.a |18.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tracing/libyt-library-tracing.a |18.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/libyt_proto-yt-core.a |18.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/reservoir_sampling/libpy3library-python-reservoir_sampling.global.a |18.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing_common/libfq-libs-checkpointing_common.a |19.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libpy3core-config-protos.global.a |18.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/common/libformats-arrow-common.a |19.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.a |19.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/libpy3libs-config-protos.global.a |19.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/other/libcore-blobstorage-other.a |19.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.global.a |19.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.a |19.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |19.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/python/libpy3cpython-symbols-python.global.a |19.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/libfq-libs-audit.a |19.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/lwtrace_probes/libcore-blobstorage-lwtrace_probes.a |19.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/librun.a |19.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blockstore/core/libcore-blockstore-core.a |19.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/s3transfer/py3/libpy3python-s3transfer-py3.global.a |19.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/responses/py3/libpy3python-responses-py3.global.a |19.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/registry/libpython-symbols-registry.a |19.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/filter/libpy3python-testing-filter.global.a |19.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/xmltodict/py3/libpy3python-xmltodict-py3.global.a |19.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/main/libpython-runtime_py3-main.a |19.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_lib/libpy3python-testing-yatest_lib.global.a |19.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/windows/libpy3library-python-windows.global.a |19.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.global.a |19.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |20.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytz/py3/libpy3python-pytz-py3.global.a |20.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.a |20.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/generated/codegen/main.cpp |20.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/libc/libpython-symbols-libc.global.a |20.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jmespath/py3/libpy3python-jmespath-py3.global.a |20.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |20.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.a |20.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |20.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/hash/libformats-arrow-hash.a |20.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.global.a |20.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |20.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.a |20.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/metadata/libcore-client-metadata.a |20.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.global.a |20.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/resource/libpy3library-python-resource.global.a |21.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.a |21.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |20.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |20.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/change_exchange/libydb-core-change_exchange.a |20.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing/libfq-libs-checkpointing.a |20.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.a |20.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/libcore-cms-console.a |20.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.a |20.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/libcore-client-server.a |20.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.global.a |21.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/recipe/libpy3python-testing-recipe.global.a |21.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/libcore-external_sources-object_storage.a |21.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/inference/libexternal_sources-object_storage-inference.a |21.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/deprecated/liblibrary-yaml_config-deprecated.a |21.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/util/libcms-console-util.a |21.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wheel/libpy3contrib-python-wheel.global.a |21.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/dictionary/libformats-arrow-dictionary.a |21.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.a |21.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/recipes/common/libpy3library-recipes-common.global.a |22.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/python-dateutil/py3/libpy3python-python-dateutil-py3.global.a |20.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytest/py3/libpy3python-pytest-py3.global.a |20.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/d8a0ba19a8c38a9b8166f51a9b_raw.auxcpp |20.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/serializer/utils.cpp |20.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |20.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/e72d7c890a4e6d107b4b82a390_raw.auxcpp |20.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.{pb.h ... grpc.pb.h} |21.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.{pb.h ... grpc.pb.h} |21.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/libydb-core-protos.a |21.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/b04a8a4d880a6ab0d69f34e52c_raw.auxcpp |21.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |21.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_io_options.cpp |21.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ce697fc3b324cb6152c4d7223d_raw.auxcpp |21.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_row.cpp |21.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/libfq-libs-checkpoint_storage.a |21.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/unistat/libmonlib-encode-unistat.a |21.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/b96345c0a74633add1330c0726_raw.auxcpp |21.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.{pb.h ... grpc.pb.h} |21.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/lib2/py/libpy3python3-lib2-py.global.a |21.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/value_consumer.cpp |21.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/botocore/py3/libpy3python-botocore-py3.global.a |21.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/afa8c81460609cfd6247d9dbf7_raw.auxcpp |21.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/arrow_batch_builder.cpp |21.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.{pb.h ... grpc.pb.h} |21.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/py3/libpy3python-moto-py3.global.a |21.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/priorities/service/service.cpp |21.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_pdiskfit/lib/libblobstorage-ut_pdiskfit-lib.a |22.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/special_keys.cpp |22.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/common/kqp_ut_common.cpp |22.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/formats/libyt-client-formats.a |22.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.grpc.pb.cc |22.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |22.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.grpc.pb.cc |22.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.global.a |22.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.pb.cc |22.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |22.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.pb.cc |22.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |22.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.pb.cc |22.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.a |22.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.grpc.pb.cc |22.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.pb.cc |23.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.pb.cc |23.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.grpc.pb.cc |23.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.pb.cc |23.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.grpc.pb.cc |23.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |23.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.pb.cc |23.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.pb.cc |23.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.grpc.pb.cc |23.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.grpc.pb.cc |23.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.pb.cc |23.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.pb.cc |23.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |23.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/other/mon_vdisk_stream.cpp |23.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.grpc.pb.cc |23.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.pb.cc |24.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.grpc.pb.cc |24.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.pb.cc |24.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.grpc.pb.cc |24.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/libyt_proto-yt-client.a |24.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.pb.cc |24.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.grpc.pb.cc |24.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.grpc.pb.cc |24.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.pb.cc |24.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.pb.cc |24.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.grpc.pb.cc |24.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.pb.cc |24.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.pb.cc |24.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.grpc.pb.cc |24.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.pb.cc |24.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/defaults.cpp |24.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/database_resolver_mock.cpp |24.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.grpc.pb.cc |24.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.grpc.pb.cc |24.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.pb.cc |24.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.grpc.pb.cc |25.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.pb.h_serialized.cpp |25.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.pb.cc |25.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.pb.cc |25.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.pb.cc |24.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.grpc.pb.cc |24.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.grpc.pb.cc |24.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.grpc.pb.cc |24.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config.grpc.pb.cc |24.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.pb.cc |24.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.pb.cc |25.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.grpc.pb.cc |25.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/essentials/tools/sql2yql/sql2yql |25.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.grpc.pb.cc |25.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.pb.cc |25.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.grpc.pb.cc |25.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/service_initializer.cpp |25.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.grpc.pb.cc |25.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.pb.cc |25.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/show_create/show_create.cpp |25.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health.pb.cc |25.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/formats/libyt-library-formats.a |25.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.grpc.pb.cc |25.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health.grpc.pb.cc |25.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.pb.cc |25.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.grpc.pb.cc |25.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.pb.cc |25.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.grpc.pb.cc |26.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |26.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.grpc.pb.cc |26.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/test/testhull_index.cpp |26.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/stub/libudf-service-stub.global.a |26.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.grpc.pb.cc |26.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.grpc.pb.cc |26.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.pb.cc |26.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/sentinel_ut.cpp |26.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.pb.cc |26.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.grpc.pb.cc |26.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.grpc.pb.cc |26.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.grpc.pb.cc |26.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.pb.cc |26.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.pb.cc |27.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.grpc.pb.cc |27.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.grpc.pb.cc |27.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.grpc.pb.cc |27.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.pb.cc |27.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.grpc.pb.cc |27.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.pb.cc |27.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.grpc.pb.cc |27.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.pb.cc |27.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.pb.cc |27.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.pb.cc |27.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/libcontrib-tools-python3.a |27.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.pb.cc |27.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.grpc.pb.cc |27.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.pb.cc |27.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg/libessentials-sql-pg.a |27.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.pb.cc |27.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.pb.cc |27.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.grpc.pb.cc |27.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.pb.cc |27.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.pb.cc |27.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.pb.cc |27.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/sql/ydb-tests-sql |27.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.grpc.pb.cc |27.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.pb.cc |28.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/src/ydb_setup.cpp |28.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/version/ut/version_ut.cpp |28.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.grpc.pb.cc |28.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.pb.cc |28.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.pb.cc |28.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.pb.cc |28.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.grpc.pb.cc |28.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.pb.cc |28.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/croaring/libcontrib-libs-croaring.a |28.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.grpc.pb.cc |28.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.pb.cc |28.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.grpc.pb.cc |28.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.grpc.pb.cc |28.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.grpc.pb.cc |28.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/health.grpc.pb.cc |28.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.grpc.pb.cc |28.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.pb.cc |29.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.grpc.pb.cc |29.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.grpc.pb.cc |29.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.pb.cc |29.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.grpc.pb.cc |29.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.pb.cc |29.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/feature_flags.grpc.pb.cc |29.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata.pb.cc |29.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.pb.cc |29.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.grpc.pb.cc |29.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/liblibs-breakpad-src.a |29.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.grpc.pb.cc |29.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.grpc.pb.cc |29.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.pb.cc |29.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.pb.cc |29.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.pb.cc |29.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/health.pb.cc |29.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/client/linux/libsrc-client-linux.a |29.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.pb.cc |29.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.grpc.pb.cc |30.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.pb.cc |30.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.pb.cc |30.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.grpc.pb.cc |30.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.grpc.pb.cc |30.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.grpc.pb.cc |30.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.pb.cc |30.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.grpc.pb.cc |30.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.pb.cc |30.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.grpc.pb.cc |30.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |30.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.grpc.pb.cc |30.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.grpc.pb.cc |30.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.pb.cc |30.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.pb.cc |30.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.grpc.pb.cc |30.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.pb.cc |30.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.grpc.pb.cc |30.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.pb.cc |30.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.grpc.pb.cc |30.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.pb.cc |31.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.grpc.pb.cc |31.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |31.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.grpc.pb.cc |31.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.grpc.pb.cc |31.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.pb.cc |31.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.grpc.pb.cc |31.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.pb.cc |31.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.grpc.pb.cc |31.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.grpc.pb.cc |31.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.grpc.pb.cc |31.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.pb.cc |31.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.pb.cc |31.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/formats/arrow/serializer/abstract.cpp |31.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.grpc.pb.cc |31.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.pb.cc |31.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.grpc.pb.cc |31.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.grpc.pb.cc |31.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.pb.cc |31.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.pb.cc |31.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor.cpp |31.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.grpc.pb.cc |31.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.grpc.pb.cc |31.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.pb.h_serialized.cpp |31.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.grpc.pb.cc |31.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.pb.cc |32.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.grpc.pb.cc |32.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.grpc.pb.cc |32.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.grpc.pb.cc |32.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.grpc.pb.cc |32.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.pb.cc |32.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.pb.cc |32.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.grpc.pb.cc |32.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.pb.cc |32.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.grpc.pb.cc |32.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.grpc.pb.cc |32.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |32.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.grpc.pb.cc |32.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.pb.cc |32.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.pb.cc |32.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.grpc.pb.cc |32.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.grpc.pb.cc |32.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.grpc.pb.cc |32.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/common/columnshard.cpp |32.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.pb.cc |32.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_state_storage.cpp |32.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.grpc.pb.cc |32.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.pb.cc |32.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.pb.cc |32.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.pb.cc |33.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.pb.cc |33.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.grpc.pb.cc |33.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.pb.cc |33.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery.cpp |32.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.grpc.pb.cc |33.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.pb.cc |33.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.grpc.pb.cc |33.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.pb.cc |33.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.grpc.pb.cc |33.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.pb.cc |33.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.pb.cc |33.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.grpc.pb.cc |33.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.grpc.pb.cc |33.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.pb.cc |33.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.pb.cc |33.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.pb.cc |33.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.grpc.pb.cc |33.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.grpc.pb.cc |33.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.pb.cc |34.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.pb.cc |34.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.pb.cc |34.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.pb.cc |34.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.grpc.pb.cc |34.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.grpc.pb.cc |34.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.pb.cc |34.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.pb.cc |34.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.pb.cc |34.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.pb.cc |34.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.grpc.pb.cc |34.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.grpc.pb.cc |34.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.grpc.pb.cc |34.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.pb.cc |34.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.grpc.pb.cc |34.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.grpc.pb.cc |34.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.pb.cc |34.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.grpc.pb.cc |34.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/database_basic_sausage_metainfo.grpc.pb.cc |34.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/database_basic_sausage_metainfo.pb.cc |34.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.grpc.pb.cc |34.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.grpc.pb.cc |34.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.pb.cc |34.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.pb.cc |35.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.grpc.pb.cc |35.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.pb.cc |35.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.pb.cc |35.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.pb.cc |35.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.grpc.pb.cc |35.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.pb.h_serialized.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.pb.cc |35.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.pb.cc |35.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.grpc.pb.cc |35.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |35.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.pb.cc |35.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/jaeger_tracing_configurator.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.grpc.pb.cc |35.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/formats/arrow/serializer/native.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.grpc.pb.cc |35.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/feature_flags.pb.cc |35.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.grpc.pb.cc |35.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.pb.cc |35.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata.grpc.pb.cc |35.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.pb.cc |35.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.grpc.pb.cc |35.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.pb.cc |35.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.pb.cc |35.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.pb.cc |35.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation.pb.cc |36.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.pb.cc |36.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation.grpc.pb.cc |36.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_quantum.cpp |36.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.grpc.pb.cc |35.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.grpc.pb.cc |35.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.pb.cc |35.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_stat_aggr.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.pb.cc |36.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.pb.cc |36.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.grpc.pb.cc |36.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.pb.cc |36.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.pb.cc |36.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.pb.cc |36.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.grpc.pb.cc |36.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.grpc.pb.cc |36.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.pb.cc |36.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.grpc.pb.cc |36.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.grpc.pb.cc |36.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.pb.cc |36.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_node_registration.cpp |36.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_generate.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.grpc.pb.cc |36.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_ic_debug.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.grpc.pb.cc |36.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.grpc.pb.cc |36.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.pb.cc |36.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |36.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.pb.cc |36.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.pb.cc |36.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.grpc.pb.cc |36.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.grpc.pb.cc |37.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.grpc.pb.cc |37.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.pb.cc |37.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.grpc.pb.cc |37.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.pb.cc |37.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config.pb.cc |37.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.pb.cc |37.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_pdiskfit/lib/objectwithstate.cpp |37.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.pb.cc |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libpy3core-issue-protos.global.a |37.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.grpc.pb.cc |37.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.grpc.pb.cc |37.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/ut_common.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.pb.cc |37.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.grpc.pb.cc |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpy3public-issue-protos.global.a |37.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.grpc.pb.cc |38.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_22b5b8dd6ea05f4194f60e6181.o |37.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/deprecated/yaml_config_parser.cpp |37.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |38.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/common/common.cpp |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/wardens/libpy3tests-library-wardens.global.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/stress/libpy3tests-library-stress.global.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libpy3providers-common-proto.global.a |38.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.pb.cc |38.3%| PREPARE $(LLD_ROOT-3808007503) |38.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.grpc.pb.cc |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libpy3yql-essentials-protos.global.a |38.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/connector_client_mock.cpp |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libpy3core-file_storage-proto.global.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/walle/libpy3tools-cfg-walle.global.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/k8s_api/libpy3tools-cfg-k8s_api.global.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/ydb_sdk_import/libpy3tests-oss-ydb_sdk_import.global.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/canonical/libpy3tests-oss-canonical.global.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/ydbd_slice/libpy3ydbd_slice.global.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libpy3api-protos-annotations.global.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libpy3essentials-public-types.global.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libpy3api-protos.global.a |38.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/libpy3api-grpc.global.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/clients/libpy3tests-library-clients.global.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/python/enable_v3_new_behavior/libpy3sdk-python-enable_v3_new_behavior.global.a |38.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_8e19d47784789c55156c57f816.o |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/libpy3ydb-tests-library.global.a |38.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_e68ca1a2fa9943132c020ae028.o |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/set/libcpp-unicode-set.a |38.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |38.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_9be8b6745d0fa150928bab4206.o |38.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/libpy3ydb-tools-cfg.global.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base32/libcpp-string_utils-base32.a |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/double_indexed_ut.cpp |38.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus.grpc.pb.cc |39.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/config_helpers.cpp |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/normalization/libcpp-unicode-normalization.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/workload/libpy3stress-transfer-workload.global.a |39.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_76cd981cf66123b7633d25b898.o |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/common/libpy3tests-stress-common.global.a |39.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_60a4829fdc305e3a74a7ddcb41.o |39.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_e66920085df69f6f7e41547063.o |39.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_2f0e0ac8198858b9ec9901778e.o |39.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_2492aafb6862566a2398c9f27e.o |39.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_3df021aac8504049c53286aea0.o |39.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_pdiskfit/lib/basic_test.cpp |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/workload/libpy3stress-simple_queue-workload.global.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/test_meta/libpy3tests-library-test_meta.global.a |39.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/logger.cpp |39.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_f738234258cd034cd5383f92ad.o |39.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_83efacabe56767ae4f106a6d27.o |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/lib/libpy3tests-sql-lib.global.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.global.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.global.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/check/libv1-lexer-check.a |39.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config.grpc.pb.cc |39.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yql/essentials/tools/sql2yql/sql2yql.cpp |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure_ansi/libv1-lexer-antlr4_pure_ansi.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/text/libv1-complete-text.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure/libv1-lexer-antlr4_pure.a |39.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/schema/libname-service-schema.a |39.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_fd8d9957a06c9923c501e36fd9.o |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/regex/libv1-lexer-regex.a |39.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_8491a772a9425d10f304e6f0e9.o |40.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/sqs/libpy3tests-library-sqs.global.a |40.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/service/query_history_ut.cpp |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/binding/libname-service-binding.a |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/topic_reader.cpp |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/libcomplete-name-service.a |40.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus.pb.cc |40.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/cluster/libname-service-cluster.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/yql/libcomplete-analysis-yql.a |40.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console.pb.cc |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_shared.cpp |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/libsql-v1-complete.a |40.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal.grpc.pb.cc |40.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_watch.cpp |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/libcomplete-name-object.a |40.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_base_init.cpp |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cluster/static/libname-cluster-static.a |40.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/table_writer.cpp |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/static/libobject-simple-static.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/antlr4/libv1-complete-antlr4.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.global.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/libname-object-simple.a |40.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_6e0da74b1512d0ffe19c5dc500.o |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/core/libv1-complete-core.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/local/libcomplete-analysis-local.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/check/libv1-complete-check.a |40.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/service/worker.h_serialized.cpp |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/global/libcomplete-analysis-global.a |40.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/json_change_record.cpp |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/libpy3library-ydb_issue-proto.global.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libpy3dq-actors-protos.global.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/top_keeper/libcpp-containers-top_keeper.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/syntax/libv1-complete-syntax.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libpy3scheme-defaults-protos.global.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libpy3core-protos-schemeshard.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.a |41.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config.pb.cc |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/union/libname-service-union.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libpy3yql-dq-proto.global.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/filelock/libpy3library-python-filelock.global.a |41.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console.grpc.pb.cc |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libpy3api-service-protos.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tenacity/py3/libpy3python-tenacity-py3.global.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/libpy3library-actors-protos.global.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/certifi/libpy3library-python-certifi.global.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libpy3core-scheme-protos.global.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1/py3/libpy3python-pyasn1-py3.global.a |41.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/py/py3/libpy3python-py-py3.global.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ptyprocess/py3/libpy3python-ptyprocess-py3.global.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/cores/libpy3library-python-cores.global.a |41.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_grpc.cpp |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jsonschema/py3/libpy3python-jsonschema-py3.global.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pexpect/py3/libpy3python-pexpect-py3.global.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/more-itertools/py3/libpy3python-more-itertools-py3.global.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wcwidth/py3/libpy3python-wcwidth-py3.global.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.context/libpy3contrib-python-jaraco.context.global.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.text/libpy3contrib-python-jaraco.text.global.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipdb/py3/libpy3python-ipdb-py3.global.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/platformdirs/libpy3contrib-python-platformdirs.global.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/rsa/py3/libpy3python-rsa-py3.global.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/oauthlib/libpy3contrib-python-oauthlib.global.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/prompt-toolkit/py3/libpy3python-prompt-toolkit-py3.global.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pure-eval/libpy3contrib-python-pure-eval.global.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/matplotlib-inline/libpy3contrib-python-matplotlib-inline.global.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.global.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/interface/libytflow-integration-interface.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/urllib3/py3/libpy3python-urllib3-py3.global.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/future/py3/libpy3python-future-py3.global.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.global.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/durationpy/libpy3contrib-python-durationpy.global.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/decorator/py3/libpy3python-decorator-py3.global.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jedi/py3/libpy3python-jedi-py3.global.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/expr_nodes/libproviders-ytflow-expr_nodes.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/proto/libytflow-integration-proto.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/proto/libproviders-yt-proto.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/executing/libpy3contrib-python-executing.global.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.global.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/threading/libessentials-utils-threading.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.global.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Lib/libpy3tools-python3-Lib.global.a |42.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/failure_injector/libessentials-utils-failure_injector.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/llvm16/libyt-comp_nodes-llvm16.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/binary_json/libessentials-types-binary_json.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/skiff/libyt-lib-skiff.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/opt/libproviders-yt-opt.a |42.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/io/libcpp-mapreduce-io.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/common/libcpp-mapreduce-common.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/dynumber/libessentials-types-dynumber.a |42.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal.pb.cc |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/libsql-v1-proto_parser.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr3_ansi/libv1-proto_parser-antlr3_ansi.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.global.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/parser/libcommon-schema-parser.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/common/libproviders-yt-common.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/decimal/libessentials-public-decimal.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpublic-issue-protos.a |43.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_finalize.cpp |43.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config.pb.cc |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http_client/libcpp-mapreduce-http_client.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http/libcpp-mapreduce-http.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/libcpp-mapreduce-interface.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/kubernetes/libpy3contrib-python-kubernetes.global.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/libessentials-public-issue.a |43.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config.grpc.pb.cc |43.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/worker.cpp |43.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_intent_determination.cpp |43.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_trackable.cpp |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/libproviders-yt-codec.a |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_weak_fields.cpp |43.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc.pb.cc |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_partition.cpp |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_push.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_io_filter.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/yql_yt_op_settings.h_serialized.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_merge.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_optimize.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_join_reorder.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_constraints.cpp |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/provider/libproviders-result-provider.a |44.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/etcd_proxy/service/etcd_impl.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_optimize.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_io_discovery_walk_folders.cpp |43.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sqlite3/libcontrib-libs-sqlite3.a |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_gateway.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_ytflow_optimize.cpp |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider_impl.cpp |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider_context.cpp |44.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_ytflow_integration.cpp |44.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_physical_optimize.cpp |44.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_sort.cpp |44.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_wide_flow.cpp |44.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_key.cpp |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_exec.cpp |44.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_io_discovery.cpp |44.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_integration.cpp |44.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_op_hash.cpp |44.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_misc.cpp |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_key_range.cpp |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_op_settings.cpp |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_field_subset.cpp |44.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/service.cpp |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_content.cpp |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_join_impl.cpp |44.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_io_utils.cpp |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_ytql.cpp |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.cpp |45.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_mkql_compiler.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_fuse.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_lambda.cpp |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_peephole.cpp |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_type_ann.cpp |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_load_table_meta.cpp |45.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_forwarding_gateway.cpp |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_table_desc.cpp |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_cbo_helpers.cpp |45.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_epoch.cpp |45.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/base_table_writer.cpp |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider.cpp |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_table.cpp |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/retry/libimpl-ydb_internal-retry.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.global.a |45.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_exec.cpp |45.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_output.cpp |45.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_input.cpp |45.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_load_columnar_stats.cpp |45.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_map.cpp |45.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_constraints.cpp |45.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink.cpp |45.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_write.cpp |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/operation/libclient-types-operation.a |45.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_join.cpp |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/include/libclient-persqueue_public-include.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/protos/libapi-protos-persqueue-deprecated.a |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_type_ann.cpp |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/proto/libsrc-client-proto.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/discovery/libydb-services-discovery.a |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_hybrid.cpp |45.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/libsrc-client-common_client.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/params/libsrc-client-params.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/libsrc-client-federated_topic.a |45.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extensions/solomon_stats/libclient-extensions-solomon_stats.a |45.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_physical_finalizing.cpp |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extension_common/libsrc-client-extension_common.a |45.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource.cpp |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/db_driver_state/libimpl-ydb_internal-db_driver_state.a |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_e31620202d3ba8df14ff2a18e1.o |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/impl/libclient-common_client-impl.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/import/libsrc-client-import.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libclient-yc_public-common.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/extract_predicate/libessentials-core-extract_predicate.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/rate_limiter/libsrc-client-rate_limiter.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/adapters/issue/libcpp-adapters-issue.a |45.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_horizontal_join.cpp |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/yson_value/libpublic-lib-yson_value.a |46.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |46.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_helpers.cpp |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/libessentials-sql-v0.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/dynamic_config/libydb-services-dynamic_config.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_discovery/libydb_cli_command_ydb_discovery.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/util/libydb_cli-dump-util.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/files/libydb_cli-dump-files.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/out/libapi-protos-out.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/command_base/libydb_cli_command_base.a |46.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_388aef0b6ac03d4f661ae7a30e.o |46.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/hooks/testing/controller.cpp |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_logical_optimize.cpp |46.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/tools/dqrun/lib/dqrun_lib.cpp |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/libydb-cpp-sdk-client-topic.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/events/libproviders-s3-events.a |46.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/credentials/libproviders-s3-credentials.a |46.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pire/libcpp-regex-pire.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libclient-yc_public-iam.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/gateway/libproviders-solomon-gateway.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/libclient-yc_private-accessservice.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/expr_nodes/libproviders-solomon-expr_nodes.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/client/libsolomon-solomon_accessor-client.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/events/libproviders-solomon-events.a |46.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/libproviders-dq-worker_manager.a |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/libconnector-api-service.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors_factory/libproviders-s3-actors_factory.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/task_meta/libproviders-pq-task_meta.a |47.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_mirror3of4/main.cpp |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libapi-service-protos.a |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner_actor/libproviders-dq-task_runner_actor.a |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/proto/libproviders-solomon-proto.a |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/pushdown/libproviders-generic-pushdown.a |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/interface/libcommon-arrow-interface.a |47.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/mkql/libproviders-dq-mkql.a |47.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/libproviders-pq-proto.a |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/native/libpq-gateway-native.a |47.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/typed_local.cpp |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/events/libdq-actors-events.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/planner/libproviders-dq-planner.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/state/libyql-dq-state.a |47.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/impl/libclient-persqueue_public-impl.a |47.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc.grpc.pb.cc |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_json/libydb-library-yaml_json.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/client/libcommon-token_accessor-client.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/grpc/libdq-api-grpc.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/tasks/libyql-dq-tasks.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/common/libproviders-dq-common.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.global.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/protos/liblibrary-schlab-protos.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/exec/libdq-provider-exec.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/public/liblibrary-yaml_config-public.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/libcommon.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/libydb-library-schlab.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/password_checker/liblibrary-login-password_checker.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.global.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/liblibrary-login-protos.a |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/DataStreams/liblibrary-arrow_clickhouse-DataStreams.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Common/liblibrary-arrow_clickhouse-Common.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/libydb-library-mkql_proto.a |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.a |47.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/export_reboots_common.cpp |47.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/util/liblibrary-actors-util.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Columns/liblibrary-arrow_clickhouse-Columns.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/events/libcore-wrappers-events.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/prof/liblibrary-actors-prof.a |47.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/wilson/liblibrary-actors-wilson.a |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libapi-grpc-draft.a |47.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |47.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/yaml/libcore-viewer-yaml.a |47.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libdq-actors-protos.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/provider/libproviders-clickhouse-provider.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/libcore-ymq-proto.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/harmonizer/libactors-core-harmonizer.a |47.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/evlog/libcore-util-evlog.a |47.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |47.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/program/libcore-tx-program.a |47.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |47.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |47.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/libdq-api-protos.a |48.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/actor_type/liblibrary-actors-actor_type.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/usage/libtx-tracing-usage.a |48.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/public/libtx-sequenceshard-public.a |48.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/ut_helpers/libcore-wrappers-ut_helpers.a |48.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/service/libtx-tracing-service.a |48.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |48.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |48.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/libproviders-dq-actors.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/provider/libproviders-solomon-provider.a |47.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp |47.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |47.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |47.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |48.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |48.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |48.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |48.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/libcore-tx-sequenceproxy.a |48.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors/libproviders-s3-actors.a |48.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/writer.cpp |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/protos/libcore-viewer-protos.a |48.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |48.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/base/cloud_enums.h_serialized.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/queue_id.cpp |48.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/xml_builder.cpp |48.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |48.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/protos/libolap-bg_tasks-protos.a |48.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |48.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/provider/libproviders-pq-provider.a |48.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |48.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/http/parser.rl6.cpp |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/libydb-core-wrappers.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.global.a |48.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/libessentials-parser-pg_wrapper.a |48.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/probes.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/base/query_id.h_serialized.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/helpers.cpp |48.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/acl.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydbd/export.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/program/registry.cpp |48.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/liblibrary-actors-core.a |48.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/hooks/testing/ro_controller.cpp |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/task.cpp |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/ttl/update.cpp |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/ttl/schema.cpp |48.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/tools/dump_ds_init/main.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sharding/hash.cpp |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sharding/unboxed_reader.cpp |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/status_channel.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/common.cpp |48.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_proxy/upload_rows_counters.h_serialized.cpp |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/upload_rows_counters.cpp |49.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/global.cpp |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/mon.cpp |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/provider/libproviders-s3-provider.a |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |49.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_proxy/read_table_impl.h_serialized.cpp |49.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |50.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator_impl.cpp |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/public/libtx-sequenceproxy-public.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/libcore-tx-sequenceshard.a |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/columns/schema.cpp |51.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/failing_mtpq.cpp |51.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |52.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/abstract/libtx-tiering-abstract.a |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/tier/s3_uri.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/resolvereq.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/tier/identifier.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/hash_slider.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/schema/schema.cpp |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/common/common.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/converter.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/describe.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/common.cpp |53.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/libydb-core-util.a |53.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |52.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |52.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/libydb-core-viewer.a |52.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/sharding.cpp |53.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |53.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/schema/update.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator__reserve.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/column_families/schema.cpp |53.0%| PREPARE $(YMAKE_PYTHON3-4256832079) |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cache.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/address_classifier.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/actor.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/backoff.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/aws.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ui64id.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/text.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/datareq.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.global.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/http/xml.cpp |53.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/cloud_events/libymq-actor-cloud_events.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/opt/libyql-dq-opt.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/columns/update.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_heap.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/update.cpp |53.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/schemereq.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/gen_step.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hyperlog_counter.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ulid.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/common/libymq-queues-common.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/concurrent_rw_hash.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/update.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/http/types.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/console.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/snapshotreq.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libapi-protos.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cpuinfo.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnsresolver/liblibrary-actors-dnsresolver.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator_client/client.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator__scheme.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_merge.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/common/libactors-testlib-common.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_filter.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/liblibrary-grpc-server.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_aggregate.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/format.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hazard.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fast_tls.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fragmented_buffer.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/source_location.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/stlog.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/random.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/page_map.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/tier/object.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/accessor/libydb-library-accessor.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr3_cpp_runtime/libcontrib-libs-antlr3_cpp_runtime.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/std/libymq-queues-std.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/memory_log/liblibrary-actors-memory_log.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/fifo/libymq-queues-fifo.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/libydb-library-aclib.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/liblibrary-aclib-protos.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/log_backend/liblibrary-actors-log_backend.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/table/table.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/chunks_limiter/libydb-library-chunks_limiter.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/read_table_impl.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/conclusion/libydb-library-conclusion.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/mock/liblibrary-folder_service-mock.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/simple_builder/liblibrary-formats-arrow-simple_builder.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/upb/libgrpc-third_party-upb.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_parquet/libydb-library-arrow_parquet.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/helpers/liblibrary-actors-helpers.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnscachelib/liblibrary-actors-dnscachelib.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/protos/liblibrary-db_pool-protos.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/liblibrary-actors-testlib.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/liblibrary-actors-protos.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/libydb-library-folder_service.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_extra/liblibs-libevent-event_extra.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/session.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/libydb-library-db_pool.a |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/modifier/liblibrary-formats-arrow-modifier.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_kernels/libydb-library-arrow_kernels.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/update.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/converter/libarrow-csv-converter.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/liblibrary-folder_service-proto.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/hash/liblibrary-formats-arrow-hash.a |53.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/slide_limiter/service/liblibrary-slide_limiter-service.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/random.cpp |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/signals/libydb-library-signals.a |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/compression.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/queue_attributes.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/http/liblibrary-actors-http.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang18-rt/lib/asan_cxx/libclang_rt.asan_cxx-x86_64.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/counters.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/ydb_convert.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/scalar/liblibrary-formats-arrow-scalar.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/actors/libgrpc-server-actors.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/fyamlcpp/libydb-library-fyamlcpp.a |53.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/validation/liblibrary-formats-arrow-validation.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/transformer/liblibrary-formats-arrow-transformer.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/switch/liblibrary-formats-arrow-switch.a |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/splitter/liblibrary-formats-arrow-splitter.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/upload_rows_common_impl.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libunwind/libcontrib-libs-libunwind.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/libydb-library-arrow_clickhouse.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/global_plugins/libydb-library-global_plugins.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/account_lockout/liblibrary-login-account_lockout.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/mock/libactors-interconnect-mock.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/error/liblibrary-http_proxy-error.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/cache/liblibrary-login-cache.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxabi-parts/liblibs-cxxsupp-libcxxabi-parts.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/punycode/libcpp-unicode-punycode.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/naming_conventions/libydb-library-naming_conventions.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/protos/liblibrary-pdisk_io-protos.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/context.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/authorization/liblibrary-http_proxy-authorization.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ncloud/impl/liblibrary-ncloud-impl.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/counter_time_keeper/liblibrary-persqueue-counter_time_keeper.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/deprecated/read_batch_converter/libpersqueue-deprecated-read_batch_converter.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/protobuf_printer/libydb-library-protobuf_printer.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pretty_types_print/protobuf/liblibrary-pretty_types_print-protobuf.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/liblibrary-mkql_proto-protos.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/probes/liblibrary-schlab-probes.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/security/libydb-library-security.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schemu/liblibrary-schlab-schemu.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schoot/liblibrary-schlab-schoot.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/grpc/libcommon-token_accessor-grpc.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/signal_backtrace/libydb-library-signal_backtrace.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/idna/py3/libpy3python-idna-py3.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schine/liblibrary-schlab-schine.a |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/libydb-library-login.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libydb-library-services.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/abstract/liblibrary-workload-abstract.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/c-ares/libcontrib-libs-c-ares.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/libydb-library-pdisk_io.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/liblibrary-actors-interconnect.a |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/http/http.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/logger/libydb-library-logger.a |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/action.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/dlq_helpers.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/curl/libcontrib-libs-curl.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/six/py3/libpy3python-six-py3.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/websocket-client/libpy3contrib-python-websocket-client.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.global.a |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/agent.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator_client/actor_client.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/column_families/update.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/histogram.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/private.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/client.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/states.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/object_counter.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/hash_modulo.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.global.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/update.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tld/liblibrary-cpp-tld.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/store/store.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/original/liblibs-linuxvdso-original.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/events_writer.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/update.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/liblibrary-ydb_issue-proto.a |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/persqueue/topic_parser/counters.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.functools/py3/libpy3python-jaraco.functools-py3.global.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/AsmPrinter/liblib-CodeGen-AsmPrinter.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/libyql-dq-actors.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/task_runner/libdq-actors-task_runner.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/common/libyql-dq-common.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/tzdata/liblibs-cctz-tzdata.global.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/memory_tracker.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/expr_nodes/libproviders-clickhouse-expr_nodes.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/input_transforms/libdq-actors-input_transforms.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/proto/libproviders-clickhouse-proto.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/transform/libyql-dq-transform.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4_cpp_runtime/libcontrib-libs-antlr4_cpp_runtime.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/proto/libproviders-generic-proto.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/spilling/libdq-actors-spilling.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/interface/libproviders-dq-interface.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/libproviders-common-arrow.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/pushdown/libproviders-common-pushdown.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Reader/liblib-Bitcode-Reader.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/counters/libproviders-dq-counters.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/MSF/liblib-DebugInfo-MSF.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Utils/liblib-Transforms-Utils.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/MCJIT/liblib-ExecutionEngine-MCJIT.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/type_ann/libyql-dq-type_ann.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/libproviders-common-http_gateway.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/config/libproviders-dq-config.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/expr_nodes/libproviders-s3-expr_nodes.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/helper/libproviders-dq-helper.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRPrinter/libllvm16-lib-IRPrinter.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ycloud/impl/liblibrary-ycloud-impl.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/upload_rows.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libyql-dq-proto.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/libllvm16-lib-ExecutionEngine.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/Symbolize/liblib-DebugInfo-Symbolize.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/tasks_list.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Writer/liblib-Bitcode-Writer.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/interface/libdq-worker_manager-interface.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/update.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/run_query.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/compute/libdq-actors-compute.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Linker/libllvm16-lib-Linker.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/expr_nodes/libproviders-generic-expr_nodes.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/runtime/libproviders-dq-runtime.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner/libproviders-dq-task_runner.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Frontend/OpenMP/liblib-Frontend-OpenMP.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/column_families.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/common/libproviders-pq-common.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/CodeView/liblib-DebugInfo-CodeView.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/libgeneric-connector-libcpp.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/signals/owner.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/cm_client/libproviders-pq-cm_client.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/topic_description.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/common/libproviders-solomon-common.a |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/base/libpublic-lib-base.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/expr_nodes/libproviders-pq-expr_nodes.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/update.cpp |54.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/Disassembler/libTarget-X86-Disassembler.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/PDB/liblib-DebugInfo-PDB.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/common/libproviders-s3-common.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/compressors/libproviders-s3-compressors.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/GlobalISel/liblib-CodeGen-GlobalISel.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/object_listers/libproviders-s3-object_listers.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/path_generator/libproviders-s3-path_generator.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libproviders-s3-proto.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TextAPI/libllvm16-lib-TextAPI.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/statistics/libproviders-s3-statistics.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/serializations/libproviders-s3-serializations.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine/liblib-Transforms-AggressiveInstCombine.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/range_helpers/libproviders-s3-range_helpers.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/commitreq.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/async_io/libproviders-pq-async_io.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/runtime/libyql-dq-runtime.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/libllvm16-lib-MC.a |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/xz/libcpp-streams-xz.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Coroutines/liblib-Transforms-Coroutines.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actors/libyql-utils-actors.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/expr_nodes/libproviders-ydb-expr_nodes.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/plan/libyql-utils-plan.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/actors/libproviders-solomon-actors.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/snappy/libcontrib-libs-snappy.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/libclient-nc_private-accessservice.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/opt/libproviders-dq-opt.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ProfileData/libllvm16-lib-ProfileData.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/provider/libproviders-generic-provider.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/proto/libproviders-ydb-proto.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/public/ydb_issue/libyql-public-ydb_issue.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_log/libyql-utils-actor_log.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/operation/libclient-yc_private-operation.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzmasdk/libcontrib-libs-lzmasdk.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/grpc/libsolomon-solomon_accessor-grpc.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/libproviders-dq-provider.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/scheme_types/libpublic-lib-scheme_types.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libapi-protos-annotations.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/object.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/libclient-yc_private-servicecontrol.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ngtcp2/libcontrib-libs-ngtcp2.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/value/libpublic-lib-value.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Object/libllvm16-lib-Object.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Crypto/liblibs-poco-Crypto.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/rpc_long_tx.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/NetSSL_OpenSSL/liblibs-poco-NetSSL_OpenSSL.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/events/libclient-yc_public-events.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/driver/libsrc-client-driver.a |54.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_f8b2cbafb1fed0e25bf9683c2d.o |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/update.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml/libcontrib-libs-yaml.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/json_value/libpublic-lib-json_value.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/common/libimpl-ydb_internal-common.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/libclient-yc_private-resourcemanager.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/discovery/libsrc-client-discovery.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_endpoints/libclient-impl-ydb_endpoints.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam_private/libsrc-client-iam_private.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam/libsrc-client-iam.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/config/libsrc-client-config.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/kqp_session_common/libimpl-ydb_internal-kqp_session_common.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/secure_protobuf_printer.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/session_pool/libimpl-ydb_internal-session_pool.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/libydb_cli-common-yql_parser.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-common/librestricted-aws-aws-c-common.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/re2/libcontrib-libs-re2.a |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getPageSize.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/shift10.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/AggregateFunctions/IAggregateFunction.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/malloc_extension/liblibs-tcmalloc-malloc_extension.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/DateLUTImpl.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnCompressed.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnAggregateFunction.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/coordination/libsrc-client-coordination.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/libclient-yc_private-iam.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/tx_proxy_status.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnMap.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/persqueue/topic_parser/topic_parser.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnsCommon.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnArray.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnTuple.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/provider/libproviders-ydb-provider.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/plain_status/libimpl-ydb_internal-plain_status.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/grpc_connections/libimpl-ydb_internal-grpc_connections.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/make_request/libimpl-ydb_internal-make_request.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedReadBuffer.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/libsrc-client-table.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnString.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/parseAddress.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Util/liblibs-poco-Util.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/DNSResolver.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/manager.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/CurrentThread.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/query_actor/query_actor.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/fq/libpublic-lib-fq.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/IPv6ToBinary.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/IColumn.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionCodecMultiple.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/experimental/libpublic-lib-experimental.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/table_creator/table_creator.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/slide_limiter/service/service.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/LZ4_decompress_faster.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_stats/libclient-impl-ydb_stats.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeArray.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/thread_pool/libimpl-ydb_internal-thread_pool.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnVector.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/libcontrib-restricted-abseil-cpp-tstring.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDate.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDate32.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/ttl/validator.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/value_helpers/libimpl-ydb_internal-value_helpers.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/ColumnGathererStream.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/getLeastSupertype.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/hash_intervals.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/datastreams/libsrc-client-datastreams.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/ProtobufWriter.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationWrapper.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/InstCombine/liblib-Transforms-InstCombine.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/OpenedFile.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_impl.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMapReadBufferFromFile.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/draft/libsrc-client-draft.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/libapi-grpc.a |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/TimeoutSetter.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_browse.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/FunctionHelpers.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/failure_injection.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationNullable.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationMap.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeMap.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_query.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |54.3%| PREPARE $(PYTHON) |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeString.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/createReadBufferFromFileBase.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/copyData.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/libsrc-client-topic.a |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/readFloatText.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/ClientInfo.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/InternalTextLogsQueue.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/libclient-types-credentials.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationNothing.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/ss_tasks/libsrc-client-ss_tasks.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.global.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/impl/libclient-query-impl.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Analysis/libllvm16-lib-Analysis.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/common/libclient-topic-common.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_vdisk.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDecimalBase.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferValidUTF8.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromPocoSocket.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/export/libsrc-client-export.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/parseDateTimeBestEffort.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/ICompressionCodec.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/BlockInfo.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFile.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/Block.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFileBase.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/layout/layout.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/IPO/liblib-Transforms-IPO.a |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionFactory.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionCodecLZ4.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_pipe_req.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTConstraintDeclaration.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteHelpers.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDictionaryAttributeDeclaration.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDatabaseOrNone.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDictionary.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTBackupQuery.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTColumnsMatcher.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDropQuery.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTCreateQuery.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/codecs/libclient-topic-codecs.global.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTColumnDeclaration.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/BaseSettings.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTColumnsTransformers.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/TablesStatus.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTAsterisk.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/UseSSL.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/libsrc-client-query.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.a |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeFixedString.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeLowCardinalityHelpers.cpp |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeInterval.cpp |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionCodecNone.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/result/libsrc-client-result.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeFactory.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/QueryThreadLog.cpp |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/libydb-services-metadata.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNested.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_pdisk.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFileDescriptor.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNullable.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationFixedString.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/status/libclient-types-status.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_description.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTAlterQuery.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_settings.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationLowCardinality.cpp |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeUUID.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/exceptions/libclient-types-exceptions.a |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeLowCardinality.cpp |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_pq.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNothing.cpp |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/fatal_error_handlers/libclient-types-fatal_error_handlers.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/impl/libclient-federated_topic-impl.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/protos/liblibrary-operation_id-protos.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/login/libtypes-credentials-login.a |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/kesus/libydb-services-kesus.a |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/obfuscate/libsdk-library-persqueue-obfuscate-v3.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/oauth2_token_exchange/libtypes-credentials-oauth2_token_exchange.a |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Scalar/liblib-Transforms-Scalar.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_scheme.cpp |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/fq/libydb-services-fq.a |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/issue/libsrc-library-issue.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/libsrc-client-types.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/decimal/libsrc-library-decimal.a |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypesNumber.cpp |54.8%| [CF] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/sandbox.cpp |54.7%| [CF] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/build_info.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/value/libsrc-client-value.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer_request.cpp |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/libsrc-library-operation_id.a |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryWithOnCluster.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/jwt/libsrc-library-jwt.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/topic_parser_public/libsdk-library-persqueue-topic_parser_public-v3.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/libcore-file_storage-http_download.a |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/string_utils/helpers/liblibrary-string_utils-helpers.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/fetcher.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/query_stats/libclient-table-query_stats.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSetQuery.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserCheckQuery.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDataType.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserBackupQuery.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDictionary.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserCreateQuery.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/sharding/libservices-lib-sharding.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/operation/libsrc-client-operation.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/grpc/libapi-grpc-persqueue-deprecated.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/impl/libclient-table-impl.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/grpc/client/libsdk-library-grpc-client-v3.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/scheme/libsrc-client-scheme.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ExpressionElementParsers.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.global.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDictionaryAttributeDeclaration.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/abstract/libservices-bg_tasks-abstract.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/serialize/libessentials-ast-serialize.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/auth/libydb-services-auth.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserExplainQuery.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/client/msgbus_client.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/IAST.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserInsertQuery.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserQuery.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserTablesInSelectQuery.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserShowPrivilegesQuery.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserShowGrantsQuery.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_profiles.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserShowTablesQuery.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSystemQuery.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/libessentials-core-expr_nodes.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserUseQuery.cpp |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserTablePropertiesQuery.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/datastreams/shard_iterator.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserWithElement.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/datastreams/next_token.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/impl/libclient-topic-impl.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/exception_policy/libudf-service-exception_policy.global.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/IAccumulatingTransform.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserUnionQueryElement.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/castColumn.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseDatabaseAndTableName.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Port.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/QueryWithOutputSettingsPushDownVisitor.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/lexer/libsql-v0-lexer.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ISource.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/replication/libydb-services-replication.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/monitoring/libydb-services-monitoring.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ResizeProcessor.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/formatSettingName.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/abstract/kqp_common.h_serialized.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/ut_common/datashard_ut_common.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/initializer/events.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/decoder.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/maintenance/libydb-services-maintenance.a |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/object.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/request_features.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/events.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/parsing.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/fetch_database.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/ydb_value_operator.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/restore_controller.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/keyvalue/libydb-services-keyvalue.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/request/common.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/table_record.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/preparation_controller.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/modification_controller.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/cluster_ordering/libservices-persqueue_cluster_discovery-cluster_ordering.a |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ymq/libydb-services-ymq.a |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/libessentials-utils-log.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseIntervalKind.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/backtrace/libessentials-utils-backtrace.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/libyql-essentials-utils.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/uuid/libessentials-types-uuid.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/libessentials-core-dq_integration.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/libproviders-common-metrics.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/request/libcore-arrow_kernels-request.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/fq/private_grpc.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/kikimr.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/IProcessor.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/settings/libessentials-sql-settings.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/clickhouse_client_udf.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/view/libydb-services-view.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Chunk.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/codecs.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/LimitTransform.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/libessentials-core-cbo.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/config/libydb-services-config.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/tablet/libydb-services-tablet.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/helpers.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ext_index/common/events.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ymq/utils.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_cluster_discovery/counters.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_worker.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseQuery.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseUserName.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_storage.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/TokenIterator.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/arrow/libpublic-udf-arrow.a |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/result_format/libessentials-public-result_format.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_wb_req.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/queryToString.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/sql_types/libessentials-core-sql_types.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/libessentials-minikql-jsonpath.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/proto/libfile_storage-http_download-proto.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/backup/libydb-services-backup.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/defs/libcore-file_storage-defs.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/libyql-essentials-ast.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/proto/libutils-log-proto.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/download/libcore-file_storage-download.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyHamcrest/py3/libpy3python-PyHamcrest-py3.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseIdentifierOrStringLiteral.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/histogram/libessentials-core-histogram.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libcore-file_storage-proto.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ConcatProcessor.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/udf_resolver/libcore-qplayer-udf_resolver.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/facade/libessentials-core-facade.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/minsketch/libessentials-core-minsketch.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/langver/libessentials-core-langver.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libcore-issue-protos.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/interface/libqplayer-storage-interface.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer_topic_data.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_settings/libessentials-core-pg_settings.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IRowOutputFormat.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/client/libcpp-mapreduce-client.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/rate_limiter/libydb-services-rate_limiter.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/schema.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/RawBLOBRowInputFormat.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/interface/libcore-url_preprocessing-interface.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr3/libparser-proto_ast-antlr3.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/registry/libcore-arrow_kernels-registry.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/dom/libessentials-minikql-dom.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/AvroRowInputFormat.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/CSVRowInputFormat.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/interface/libcore-url_lister-interface.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/credentials/libessentials-core-credentials.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/formatAST.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSettingsProfileElement.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSampleRatio.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserRolesOrUsersSet.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/datetime/libessentials-minikql-datetime.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1/libproto_ast-gen-v1.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/transform/libcore-dq_integration-transform.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_operation.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/user_data/libessentials-core-user_data.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes_gen/libessentials-core-expr_nodes_gen.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/codegen/llvm16/libminikql-codegen-llvm16.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSelectWithUnionQuery.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/Lexer.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.global.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/re2/libjsonpath-rewrapper-re2.global.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/result.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/peephole_opt/libessentials-core-peephole_opt.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/proto/libjsonpath-rewrapper-proto.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/llvm16/libminikql-computation-llvm16.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/libminikql-jsonpath-rewrapper.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/antlr4/libparser-common-antlr4.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/parser/libminikql-jsonpath-parser.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/lexer_common/libessentials-parser-lexer_common.a |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTFunction.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTIndexDeclaration.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/libessentials-parser-common.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/dynamic_node.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/show_create/formatters_common.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/base/msgbus.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/interface/libparser-pg_wrapper-interface.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/libessentials-core-file_storage.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/common/timeout.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/show_create/create_view_formatter.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/object.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTInsertQuery.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/proto/libparser-pg_catalog-proto.a |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTKillQueryQuery.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTLiteral.cpp |55.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr4/libparser-proto_ast-antlr4.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTNameTypePair.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTOptimizeQuery.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTProjectionDeclaration.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTOrderByElement.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQualifiedAsterisk.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTProjectionSelectQuery.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryParameter.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryWithOutput.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryWithTableAndOutput.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_factory.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_addmember.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_append.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_exists.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/libyql-essentials-minikql.a |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_func.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_container.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/object.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_aggrcount.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/jsonpath/libproto_ast-gen-jsonpath.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/query.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_decimal.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_apply.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_count.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_getelem.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/type_ann/libessentials-core-type_ann.a |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_just.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_coalesce.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_callable.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chain1_map.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/table_exists.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_some.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_compress.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/common_opt/libessentials-core-common_opt.a |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_exists.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_sum.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_enumerate.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi_antlr4/libproto_ast-gen-v1_ansi_antlr4.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/lib/actors/pq_schema_actor.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_ifpresent.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_collect.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0_proto_split/libproto_ast-gen-v0_proto_split.a |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_check_args.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_coalesce.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_top.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/lib/auth/auth_helpers.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_heap.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_next_value.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_minmax.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_iterator.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_if.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi/libproto_ast-gen-v1_ansi.a |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_now.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_measure_arg.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_blocks.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_list.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_rows_formatter.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_multimap.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/libyql-essentials-core.a |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_lookup.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_map.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_queue.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_listfromrange.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_cluster_discovery/grpc_service.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_removemember.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_random.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_rh_hash.cpp |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_safe_circular_buffer.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_multihopping.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_null.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_replicate.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_reverse.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_reduce.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_round.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_range.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_size.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_seq.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_lazy_list.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/service.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_join_dict.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_length.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_map_join.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_mapnext.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_prepend.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_logical.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_pickle.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_scalar_apply.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_nop.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_toindexdict.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_timezone.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_map.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tobytes.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_filter.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_squeeze_state.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_weakmember.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_varitem.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_visitall.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_unwrap.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/libessentials-minikql-computation.a |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_switch.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/object.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_chopper.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_top_sort.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_chain_map.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_withcontext.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_zip.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_combine.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_squeeze_to_list.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_while.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_condense.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_way.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_udf.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_time_order_recover.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_sort.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_skip.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_group.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_source.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/add_index.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_take.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_condense1.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tooptional.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tostring.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_grace_join.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/common/ss_dialog.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_guess.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_hasitems.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_contains.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_ensure.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_todict.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_grace_join_imp.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_hopping.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_iterable.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_invoke.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_if.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fromyson.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_discard.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_join.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_dictitems.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_element.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_mod.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_frombytes.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_div.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_mul.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_logical.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_combine.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_factory.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_extend.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fold1.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fold.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_flatmap.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_filter.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_flow.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chain_map.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fromstring.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_dynamic_variant.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chopper.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_skiptake.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_condense.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSetRoleQuery.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSelectWithUnionQuery.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSetQuery.cpp |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSelectQuery.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/configurator.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSettingsProfileElement.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/config_clusters/libyt-lib-config_clusters.a |55.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/libydb-services-ydb.a |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSubquery.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_map_join.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libyql-essentials-protos.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/request/request_actor_cb.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/libyql-essentials-sql.a |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/hyperscan/libjsonpath-rewrapper-hyperscan.global.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/CommonParsers.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/libproviders-result-expr_nodes.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.global.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/lambda_builder/libyt-lib-lambda_builder.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/behaviour.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/res_pull/libyt-lib-res_pull.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/QueryLog.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/protos/libservices-bg_tasks-protos.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/accessor/secret_id.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/protos/libcommon-metrics-protos.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/skiff/libcommon-schema-skiff.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fastlz/libcontrib-libs-fastlz.a |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/double-conversion/libcontrib-libs-double-conversion.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/data_plane_helpers.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/libcontrib-libs-farmhash.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bridge/libydb-services-bridge.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/mkql_dq/libproviders-yt-mkql_dq.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/modification.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/address_sorting/libgrpc-third_party-address_sorting.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/provider/libproviders-pg-provider.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/restore.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/local_discovery/grpc_service.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hdr_histogram/libcontrib-libs-hdr_histogram.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_object_storage.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSystemQuery.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTTTLElement.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTTablesInSelectQuery.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTUserNameWithHost.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_export.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain64/liblibs-base64-plain64.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTWindowDefinition.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_scripting.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_clickhouse_internal.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTWithAlias.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTWithElement.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_scheme.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTShowTablesQuery.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_operation.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_import.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateway/libproviders-common-gateway.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateways_utils/libproviders-common-gateways_utils.a |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTPartition.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTIdentifier.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_debug.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/dq/libproviders-common-dq.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/langver/libessentials-public-langver.a |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationArray.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_query.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/config/libproviders-common-config.a |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDate32.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_logstore.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationIP.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNumberBase.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/verbosePrintString.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeTuple.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/libproviders-common-schema.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/libproviders-common-codec.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/fq/ydb_over_fq.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromPocoSocket.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/expr/libcommon-schema-expr.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.global.a |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/FunctionFactory.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/behaviour.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadHelpers.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/kesus/grpc_service.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/checker_secret.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/comp_nodes/libproviders-common-comp_nodes.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_corei7/liblibs-hyperscan-runtime_corei7.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/alter.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ThreadPoolReader.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/mkql/libcommon-schema-mkql.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/expr_nodes/libproviders-pg-expr_nodes.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/SynchronousReader.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/initializer.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_core2/liblibs-hyperscan-runtime_core2.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_avx2/liblibs-hyperscan-runtime_avx2.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMapReadBufferFromFileDescriptor.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/config/libessentials-providers-config.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/structured_token/libproviders-common-structured_token.a |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/request/request_actor.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/AsynchronousReadBufferFromFile.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/toFixedString.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/datastreams_proxy.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/CompressionMethod.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/add_data.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/secret.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/udf_resolve/libproviders-common-udf_resolve.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMappedFileDescriptor.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql/libproviders-common-mkql.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/transform/libproviders-common-transform.a |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMappedFile.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/Progress.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromFile.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/PeekableReadBuffer.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromFileDescriptor.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/provider/libproviders-common-provider.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromFileBase.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libproviders-common-proto.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromMemory.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationUUID.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/config.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationTupleElement.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/initializer.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/registerDataTypeDateTime.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationTuple.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_table.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/JSONEachRowUtils.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/ProtobufReader.cpp |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_antlr4/libproto_ast-gen-v1_antlr4.a |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/client/grpc_client.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationNumber.cpp |56.2%| PREPARE $(OS_SDK_ROOT-sbr:243881345) |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/registerFormats.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/NativeFormat.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationString.cpp |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libcontrib-libs-googleapis-common-protos.a |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDateTime.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/secret_behaviour.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDateTime64.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/FormatFactory.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/persqueue.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/ExecutionSpeedLimits.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDecimal.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/NestedUtils.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationEnum.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDecimalBase.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/common.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_refresh.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeFunction.cpp |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/invoke_builtins/llvm16/libminikql-invoke_builtins-llvm16.a |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/BlockStreamProfileInfo.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/request/config.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/IParserBase.cpp |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42_aesni/libfarmhash-arch-sse42_aesni.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ISimpleTransform.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSampleRatio.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDateTime.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_subscribe.cpp |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/libessentials-public-udf.a |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/object.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/NativeBlockOutputStream.cpp |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4/libv1-lexer-antlr4.a |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/fetcher.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/deleting.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/SizeLimits.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/common/config.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeAggregateFunction.cpp |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/libsql-v1-lexer.a |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/snapshot.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/NamesAndTypes.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/accessor_init.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/access_behaviour.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/Field.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/registration.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libessentials-public-types.a |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_ansi/libv1-lexer-antlr4_ansi.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/SettingsFields.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeCustomGeo.cpp |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/row_spec/libyt-lib-row_spec.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/IBlockInputStream.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ClickHouseRevision.cpp |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4/libv1-proto_parser-antlr4.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/materializeBlock.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/snapshot.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/NativeBlockInputStream.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/AlignedBuffer.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/ColumnWithTypeAndName.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/common/service.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/FieldVisitorToString.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Epoll.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/FieldVisitorDump.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/CurrentMetrics.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/CurrentMemoryTracker.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/object.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/generic_manager.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/access.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/initialization.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/grpc_service.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/put_records_actor.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/common.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/activation.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ErrorCodes.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/SettingsEnums.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Allocator.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/manager.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/executor.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/FieldVisitorWriteBinary.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/IntervalKind.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/fetcher.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/MemoryTracker.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserAlterQuery.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/uuid/libsrc-library-uuid.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/activation/libproviders-common-activation.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/OutputStreamToOutputFormat.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/partition_writer.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/arrow/libcommon-codec-arrow.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/fetcher.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/getMultipleKeysFromConfig.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/OpenSSLHelpers.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/arrow/libessentials-minikql-arrow.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/export_s3_buffer_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/PODArray.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_clusters_updater_actor.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/FunctionsConversion.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/common.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/grpc_pq_read.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/hasLinuxCapability.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/libcpp-histogram-adaptive.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/hex.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/getNumberOfPhysicalCPUCores.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/Settings.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/deprecated/kicli/error.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/services_initializer.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/fetch/libessentials-utils-fetch.a |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/arrow_resolve/libproviders-common-arrow_resolve.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/isLocalAddress.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/setThreadName.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/fq/grpc_service.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_simple.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/distributed_commit_helper.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/show_create/create_table_formatter.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/schema_actors.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/snapshot.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/checker_access.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/protos/libhistogram-adaptive-protos.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/quoteString.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/thread_local_rng.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/expr_traits/libyt-lib-expr_traits.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/scheme_describe.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedReadBufferFromFile.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/createHardLink.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ThreadProfileEvents.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg_dummy/libessentials-sql-pg_dummy.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/checkStackSize.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedWriteBuffer.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ZooKeeper/IKeeper.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/hyperloglog/liblibrary-cpp-hyperloglog.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/TaskStatsInfoGetter.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/TimerDescriptor.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedReadBufferBase.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserWatchQuery.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IRowInputFormat.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ProfileEvents.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ProcfsMetricsProvider.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ThreadPool.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/escapeForFileName.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ISink.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/update_offsets_in_transaction_actor.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/direct_read_actor.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/formatIPv6.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/formatReadable.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/grpc_pq_schema.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/PipeFDs.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Executors/PollingQueue.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Config/AbstractConfigurationComparison.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/MaskOperations.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ThreadStatus.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Exception.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/persqueue_utils.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/FilterDescription.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/InsertQuerySettingsPushDownVisitor.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/ut/dq_block_hash_join_ut.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse41/libfarmhash-arch-sse41.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/alter_impl.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr3/libv1-lexer-antlr3.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnNullable.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/simdjson/libcontrib-libs-simdjson.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserProjectionSelectQuery.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/initializer.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/fetcher.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDropQuery.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnFixedString.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserExternalDDLQuery.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml-cpp/libcontrib-libs-yaml-cpp.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IOutputFormat.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/library/user_job_statistics/libmapreduce-library-user_job_statistics.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lua/libcontrib-libs-lua.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserOptimizeQuery.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr3/libv1-proto_parser-antlr3.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnConst.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ymq/grpc_service.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/demangle.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/write_session_actor.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/errnoToString.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserCase.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/read_session_actor.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getResource.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getThreadId.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/logging/libmapreduce-interface-logging.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/read_init_auth_actor.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/manager.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTExpressionList.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_dummy.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/preciseExp10.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/cms/libydb-services-cms.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr3_ansi/libv1-lexer-antlr3_ansi.a |56.7%| [CP] {default-linux-x86_64, release, asan} $(B)/common_test.context |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserRenameQuery.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/partition_actor.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/mremap.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSetRoleQuery.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/partition_writer_cache_actor.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDatabaseOrNone.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/AggregateFunctions/AggregateFunctionFactory.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/JSON.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getFQDNOrHostName.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/topic.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/expr_nodes/libproviders-yt-expr_nodes.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/grpc_pq_write.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/DateLUT.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserKillQueryQuery.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/job/libproviders-yt-job.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-checksums/librestricted-aws-aws-checksums.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_download/libyt-lib-yt_download.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ExpressionListParsers.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/providers/stat/expr_nodes/libproviders-stat-expr_nodes.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/deprecated/http-parser/libcontrib-deprecated-http-parser.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/qplayer/libyt-gateway-qplayer.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTFunctionWithKeyValueArguments.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/key_filter/libyt-lib-key_filter.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/ProfileEventsExt.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/libcontrib-libs-hyperscan.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSelectQuery.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDescribeTableQuery.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/support/libpublic-udf-support.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/container/librestricted-boost-container.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/log/libyt-lib-log.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeEnum.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ymq/ymq_proxy.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/init_yt_api/libyt-lib-init_yt_api.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googlemock/librestricted-googletest-googlemock.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/service.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/infer_schema/libyt-lib-infer_schema.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/abstract.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTRolesOrUsersSet.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTShowGrantsQuery.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/url_mapper/libyt-lib-url_mapper.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserUserNameWithHost.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserPartition.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/ut/dq_factories.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/lib/libyt-gateway-lib.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IInputFormat.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/mkql_helpers/libyt-lib-mkql_helpers.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4_ansi/libv1-proto_parser-antlr4_ansi.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/schema/libyt-lib-schema.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_thread/liblibs-libevent-event_thread.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.global.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libc_compat/libcontrib-libs-libc_compat.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libbz2/libcontrib-libs-libbz2.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypesDecimal.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/IDataType.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/bzip/libblockcodecs-codecs-bzip.global.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lzma/libblockcodecs-codecs-lzma.global.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnLowCardinality.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/EnumValues.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/regex/librestricted-boost-regex.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationCustomSimpleText.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationAggregateFunction.cpp |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/slide_limiter/usage/liblibrary-slide_limiter-usage.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/case_insensitive_string/liblibrary-cpp-case_insensitive_string.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/core/libcpp-blockcodecs-core.a |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/JSON/liblibs-poco-JSON.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/liburing/libcontrib-libs-liburing.a |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/libessentials-core-services.a |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_openssl/liblibs-libevent-event_openssl.a |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/brotli/libblockcodecs-codecs-brotli.global.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libaio/static/liblibs-libaio-static.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/avx2/liblibs-base64-avx2.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/RemoteHostFilter.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/test_server.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/google-auth/py3/libpy3python-google-auth-py3.global.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/crcutil/libcontrib-libs-crcutil.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_core/liblibs-libevent-event_core.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/ISerialization.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_proto_split/libproto_ast-gen-v1_proto_split.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/libcontrib-libs-linuxvdso.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDate.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fmt/libcontrib-libs-fmt.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadSettings.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/libessentials-sql-v1.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/kqp_common.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/expat/libcontrib-libs-expat.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42/libfarmhash-arch-sse42.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxrt/liblibs-cxxsupp-libcxxrt.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/2d_array/libcpp-containers-2d_array.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/builtins/liblibs-cxxsupp-builtins.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/atomizer/libcpp-containers-atomizer.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/extractTimeZoneFromFunctionArguments.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/ssse3/liblibs-base64-ssse3.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_vector/libcpp-containers-stack_vector.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitstream/Reader/liblib-Bitstream-Reader.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang18-rt/lib/asan/libclang_rt.asan-x86_64.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon32/liblibs-base64-neon32.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/flatbuffers/libcontrib-libs-flatbuffers.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/dec/libbrotli-c-dec.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/libcontrib-libs-cctz.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/dq/llvm16/libcomp_nodes-dq-llvm16.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/manager.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/enc/libbrotli-c-enc.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/ring_buffer/libcpp-containers-ring_buffer.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cpuid_check/liblibrary-cpp-cpuid_check.global.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/IFunction.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libidn/static/liblibs-libidn-static.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/DoubleConverter.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/liblibrary-cpp-blockcodecs.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/backtrace/libcontrib-libs-backtrace.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/common/libbrotli-c-common.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/ut_utils.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0/libproto_ast-gen-v0.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.global.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain32/liblibs-base64-plain32.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/importlib-resources/libpy3contrib-python-importlib-resources.global.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/diff/liblibrary-cpp-diff.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/native/libyt-gateway-native.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMapReadBufferFromFileWithCache.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/md5/libcpp-digest-md5.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/comp_nodes/libyql-dq-comp_nodes.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libxml/libcontrib-libs-libxml.a |56.8%| PREPARE $(CLANG_FORMAT-3855767795) |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc-format/liblibs-apache-orc-format.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyJWT/py3/libpy3python-PyJWT-py3.global.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/libcpp-digest-argonish.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/common/libdq-actors-common.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang18-rt/lib/asan_static/libclang_rt.asan_static-x86_64.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/escape/libcpp-html-escape.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/liblibrary-formats-arrow-protos.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDateTime64.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipmath/liblibrary-cpp-ipmath.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.collections/libpy3contrib-python-jaraco.collections.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/misc/libcpp-http-misc.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libpy3columnshard-common-protos.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/BinaryFormat/libllvm16-lib-BinaryFormat.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yson_helpers/libyt-lib-yson_helpers.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/packaging/py3/libpy3python-packaging-py3.global.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/parso/py3/libpy3python-parso-py3.global.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pluggy/py3/libpy3python-pluggy-py3.global.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1-modules/py3/libpy3python-pyasn1-modules-py3.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libpy3columnshard-engines-protos.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/liblibrary-formats-arrow.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libfyaml/libcontrib-libs-libfyaml.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/global/libcpp-logger-global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml/py3/libpy3python-ruamel.yaml-py3.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents/liblib-ExecutionEngine-PerfJITEvents.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/writer/libcpp-json-writer.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared/libExecutionEngine-Orc-Shared.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/liblibs-aws-sdk-cpp-aws-cpp-sdk-core.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/charset-normalizer/libpy3contrib-python-charset-normalizer.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/stack-data/libpy3contrib-python-stack-data.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/traitlets/py3/libpy3python-traitlets-py3.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/AsmParser/libllvm16-lib-AsmParser.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess/libExecutionEngine-Orc-TargetProcess.a |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/log.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/sha256.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Demangle/libllvm16-lib-Demangle.a |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/user_settings_names.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/attributes_md5.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/auth_mocks.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc/liblibs-apache-orc.a |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/infly.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/message_delay_stats.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/local_rate_limiter_allocator.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/slide_limiter/usage/abstract.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/slide_limiter/usage/events.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typing-extensions/py3/libpy3python-typing-extensions-py3.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typeguard/libpy3contrib-python-typeguard.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/actor/libmessagebus_actor.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/system/libsystem_allocator.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/asttokens/libpy3contrib-python-asttokens.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nayuki_md5/libcontrib-libs-nayuki_md5.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/dynamic_counters/libcpp-monlib-dynamic_counters.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/api/libcpp-malloc-api.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_part.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/protobuf/libmessagebus_protobuf.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/config/libcpp-messagebus-config.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/libcpp-lwtrace-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Remarks/libllvm16-lib-Remarks.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/libllvm16-lib-Target.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/libffi/libcontrib-restricted-libffi.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/CFGuard/liblib-Transforms-CFGuard.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/AsmParser/libTarget-X86-AsmParser.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/llhttp/libcontrib-restricted-llhttp.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libiconv/static/liblibs-libiconv-static.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCParser/liblib-MC-MCParser.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/randomSeed.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/legacy_protobuf/protos/libencode-legacy_protobuf-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pycparser/py3/libpy3python-pycparser-py3.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/prometheus/libmonlib-encode-prometheus.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TargetParser/libllvm16-lib-TargetParser.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ref/libinternal-proxies-ref.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/init/libcpp-openssl-init.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/ObjCARC/liblib-Transforms-ObjCARC.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/TargetInfo/libTarget-X86-TargetInfo.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Throttler.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/on_disk/chunks/libcpp-on_disk-chunks.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp3/libcontrib-libs-nghttp3.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lz4/libcontrib-libs-lz4.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/CastOverloadResolver.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzma/libcontrib-libs-lzma.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-event-stream/librestricted-aws-aws-c-event-stream.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packedtypes/liblibrary-cpp-packedtypes.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libraries/liblber/libopenldap-libraries-liblber.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre16/liblibs-pcre-pcre16.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libcontrib-libs-openldap.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/MCTargetDesc/libTarget-X86-MCTargetDesc.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/DWARF/liblib-DebugInfo-DWARF.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/sdk_core_access/libydb_sdk_core_access.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pcre/libcpp-regex-pcre.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/logger/libimpl-ydb_internal-logger.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Passes/libllvm16-lib-Passes.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnFunction.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/breakpad/libydb-library-breakpad.global.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/timezone_conversion/liblibrary-cpp-timezone_conversion.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/db_id_async_resolver/libproviders-common-db_id_async_resolver.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bit_io/liblibrary-cpp-bit_io.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnDecimal.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/skiff/liblibrary-cpp-skiff.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/libcontrib-libs-pcre.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/t1ha/libcontrib-libs-t1ha.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-sdkutils/librestricted-aws-aws-c-sdkutils.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/csv/libcpp-string_utils-csv.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/xxhash/libcontrib-libs-xxhash.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld/liblib-ExecutionEngine-RuntimeDyld.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/StringRef.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/utf8proc/libcontrib-libs-utf8proc.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/parse_size/libcpp-string_utils-parse_size.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sasl/libcontrib-libs-sasl.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-cal/librestricted-aws-aws-c-cal.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/url/libcpp-string_utils-url.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zlib/libcontrib-libs-zlib.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/libcontrib-libs-opentelemetry-proto.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Instrumentation/liblib-Transforms-Instrumentation.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-s3/librestricted-aws-aws-c-s3.a |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/sleep.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-compression/librestricted-aws-aws-c-compression.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_extensions/libcpp-testing-gtest_extensions.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/liblibs-aws-sdk-cpp-aws-cpp-sdk-s3.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/atomic/libcpp-threading-atomic.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd06/libcontrib-libs-zstd06.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/binsaver/liblibrary-cpp-binsaver.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-auth/librestricted-aws-aws-c-auth.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/iostreams/librestricted-boost-iostreams.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/chrono/librestricted-boost-chrono.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-http/librestricted-aws-aws-c-http.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-io/librestricted-aws-aws-c-io.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre32/liblibs-pcre-pcre32.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/future/libcpp-threading-future.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-mqtt/librestricted-aws-aws-c-mqtt.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/atomic/librestricted-boost-atomic.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/SelectionDAG/liblib-CodeGen-SelectionDAG.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Support/libllvm16-lib-Support.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd/libcontrib-libs-zstd.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_recompute_kmeans.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Vectorize/liblib-Transforms-Vectorize.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Net/liblibs-poco-Net.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-crt-cpp/librestricted-aws-aws-crt-cpp.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/fcontext_impl/libboost-context-fcontext_impl.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/ucontext_impl/libboost-context-ucontext_impl.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/impl_common/libboost-context-impl_common.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/exception/librestricted-boost-exception.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/coroutine/librestricted-boost-coroutine.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/thread_local/libcpp-threading-thread_local.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/XML/liblibs-poco-XML.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Foundation/liblibs-poco-Foundation.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/archive/liblibrary-cpp-archive.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/dragonbox/libdragonbox.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/s2n/librestricted-aws-s2n.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/graph/librestricted-boost-graph.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/libcontrib-restricted-abseil-cpp.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/cityhash-1.0.2/libcontrib-restricted-cityhash-1.0.2.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/random/librestricted-boost-random.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/lib/libcommon-math-lib.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_rb_tree/libcpp-containers-intrusive_rb_tree.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/thread/librestricted-boost-thread.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openssl/libcontrib-libs-openssl.a |57.2%| PREPARE $(FLAKE8_PY3-715603131) |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/uriparser/libcontrib-restricted-uriparser.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/serialization/librestricted-boost-serialization.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zstd/libblockcodecs-codecs-zstd.global.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lz4/libblockcodecs-codecs-lz4.global.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/program_options/librestricted-boost-program_options.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/fastlz/libblockcodecs-codecs-fastlz.global.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zlib/libblockcodecs-codecs-zlib.global.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/snappy/libblockcodecs-codecs-snappy.global.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/assert/libcpp-yt-assert.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/legacy_zstd06/libblockcodecs-codecs-legacy_zstd06.global.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/liblibrary-cpp-charset.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/greedy_dict/libcpp-codecs-greedy_dict.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/liblibrary-cpp-codecs.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cache/liblibrary-cpp-cache.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cgiparam/liblibrary-cpp-cgiparam.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/locale/librestricted-boost-locale.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/malloc/libcpp-yt-malloc.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/exception/libcpp-yt-exception.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_avl_tree/libcpp-containers-intrusive_avl_tree.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googletest/librestricted-googletest-googletest.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/config/liblibrary-cpp-config.a |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/compproto/liblibrary-cpp-compproto.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/comptable/liblibrary-cpp-comptable.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/absl_flat_hash/libcpp-containers-absl_flat_hash.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/colorizer/liblibrary-cpp-colorizer.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ytalloc/api/libcpp-ytalloc-api.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/lite/libcpp-charset-lite.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/error/libcpp-yt-error.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/disjoint_interval_tree/libcpp-containers-disjoint_interval_tree.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/compact_vector/libcpp-containers-compact_vector.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/bitseq/libcpp-containers-bitseq.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/listener/libcpp-coroutine-listener.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_array/libcpp-containers-stack_array.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/thrift/libcontrib-restricted-thrift.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/comptrie/libcpp-containers-comptrie.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/sorted_vector/libcpp-containers-sorted_vector.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.global.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/execprofile/liblibrary-cpp-execprofile.a |57.3%| PREPARE $(TEST_TOOL_HOST-sbr:9029509511) |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/paged_vector/libcpp-containers-paged_vector.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/libcore-backup-common.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/str_map/libcpp-containers-str_map.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/liblib-Target-X86.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/liblibrary-cpp-getopt.global.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/engine/libcpp-coroutine-engine.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/split/libcpp-deprecated-split.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/avx2/libinternal-proxies-avx2.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/kmp/libcpp-deprecated-kmp.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dbg_output/liblibrary-cpp-dbg_output.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/chacha_512/libblobstorage-crypto-chacha_512.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/enum_codegen/libcpp-deprecated-enum_codegen.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/small/libcpp-getopt-small.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/accessors/libcpp-deprecated-accessors.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/old_crc/libcpp-digest-old_crc.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/crc32c/libcpp-digest-crc32c.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/lower_case/libcpp-digest-lower_case.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/disjoint_sets/liblibrary-cpp-disjoint_sets.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/hdr/libcpp-histogram-hdr.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/libfq-libs-config.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dns/liblibrary-cpp-dns.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/pcdata/libcpp-html-pcdata.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/util/libyutil.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dot_product/liblibrary-cpp-dot_product.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/attrs/py3/libpy3python-attrs-py3.global.a |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ssse3/libinternal-proxies-ssse3.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse41/libinternal-proxies-sse41.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/iterator/liblibrary-cpp-iterator.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_comp_gen.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse2/libinternal-proxies-sse2.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/io/libcpp-http-io.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/int128/liblibrary-cpp-int128.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_exec_seat.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipv6_address/liblibrary-cpp-ipv6_address.a |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_part_loader.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/server/libcpp-http-server.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/simple/libcpp-http-simple.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/analytics/liblwtrace-mon-analytics.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/keys/libydb-library-keys.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/iniconfig/libpy3contrib-python-iniconfig.global.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cachetools/py3/libpy3python-cachetools-py3.global.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/easy_parse/libcpp-json-easy_parse.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/common/libcpp-json-common.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_borrowlogic.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/liblibrary-cpp-lwtrace.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/yson/libcpp-json-yson.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/l2_distance/liblibrary-cpp-l2_distance.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/fetch/libcpp-http-fetch.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/fast_sax/libcpp-json-fast_sax.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lcs/liblibrary-cpp-lcs.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/alloc_profiler/libcpp-lfalloc-alloc_profiler.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/dbg_info/libcpp-lfalloc-dbg_info.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lua/liblibrary-cpp-lua.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCDisassembler/liblib-MC-MCDisassembler.a |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_mem_warm.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.global.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRReader/libllvm16-lib-IRReader.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiosignal/libpy3contrib-python-aiosignal.global.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/monitoring/libcpp-messagebus-monitoring.a |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_sausage_meta.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/setuptools/py3/libpy3python-setuptools-py3.global.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/deprecated/json/libmonlib-deprecated-json.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipython/py3/libpy3python-ipython-py3.global.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/oldmodule/libcpp-messagebus-oldmodule.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/scheduler/libcpp-messagebus-scheduler.a |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/auth_factory.cpp |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.a |57.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_table.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.global.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/mime/types/libcpp-mime-types.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/task_scheduler/libcpp-threading-task_scheduler.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/tablesorter/libservice-pages-tablesorter.global.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/resources/libservice-pages-resources.global.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/icu/libcontrib-libs-icu.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/buffered/libmonlib-encode-buffered.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/libcpp-monlib-encode.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/libmonlib-service-pages.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/json/libmonlib-encode-json.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/libcpp-monlib-service.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/libcontrib-libs-grpc.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/liblibrary-cpp-messagebus.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/exception/libcpp-monlib-exception.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/liblibrary-cpp-json.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/messagebus/libcpp-monlib-messagebus.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/skip_list/libcpp-threading-skip_list.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/text/libmonlib-encode-text.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zstd/libcpp-streams-zstd.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/hyperscan/libcpp-regex-hyperscan.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/indent_text/libcpp-string_utils-indent_text.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/big_integer/libcpp-openssl-big_integer.a |57.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/holders/libcpp-openssl-holders.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/crypto/libcpp-openssl-crypto.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packers/liblibrary-cpp-packers.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/random_provider/liblibrary-cpp-random_provider.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/proto/libprotobuf-json-proto.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/metrics/libcpp-monlib-metrics.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/interop/libcpp-protobuf-interop.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/io/libcpp-openssl-io.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/proto/libprotobuf-util-proto.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lzma/libcpp-streams-lzma.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sighandler/liblibrary-cpp-sighandler.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/protos/libcpp-retry-protos.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/libcpp-protobuf-json.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/resource/liblibrary-cpp-resource.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/events/liblibs-control_plane_config-events.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/libcpp-protobuf-util.a |57.6%| {BAZEL_DOWNLOAD} $(B)/library/cpp/sanitizer/plugin/sanitizer.py.pyplugin |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/liblibrary-cpp-retry.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/brotli/libcpp-streams-brotli.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sliding_window/liblibrary-cpp-sliding_window.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zc_memory_input/libcpp-streams-zc_memory_input.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sse/liblibrary-cpp-sse.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/bzip2/libcpp-streams-bzip2.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/scheme/liblibrary-cpp-scheme.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.global.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/queue/libcpp-threading-queue.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/gateway/libfq-libs-gateway.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base64/libcpp-string_utils-base64.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/levenshtein_diff/libcpp-string_utils-levenshtein_diff.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/hmac/libfq-libs-hmac.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/proto/libcpp-unified_agent_client-proto.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/method/libcpp-openssl-method.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/quote/libcpp-string_utils-quote.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/scan/libcpp-string_utils-scan.a |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/index_events_processor.cpp |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_permissions.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/ztstrbuf/libcpp-string_utils-ztstrbuf.a |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tdigest/liblibrary-cpp-tdigest.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.global.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/poor_man_openmp/libcpp-threading-poor_man_openmp.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest_main/libcpp-testing-unittest_main.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/common/libcpp-testing-common.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cron/libcpp-threading-cron.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/hook/libcpp-testing-hook.a |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/probes.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/result_formatter/libfq-libs-result_formatter.a |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/config.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/blocking_queue/libcpp-threading-blocking_queue.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cancellation/libcpp-threading-cancellation.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/light_rw_lock/libcpp-threading-light_rw_lock.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/hot_swap/libcpp-threading-hot_swap.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/equeue/libcpp-threading-equeue.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/lib/libcommon-unicode_base-lib.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/liblibs-row_dispatcher-format_handler.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest/libcpp-testing-unittest.a |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/count_queues.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.global.a |57.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |57.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/audit/libydb-core-audit.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson_string/libcpp-yt-yson_string.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/time_provider/liblibrary-cpp-time_provider.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/document/libcpp-xml-document.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/tz/libcpp-type_info-tz.a |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/event_util.cpp |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson_pull/libyson_pull.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/relaxed_escaper/libcpp-string_utils-relaxed_escaper.a |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/synchronization_service/libcompute-ydb-synchronization_service.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.global.a |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/json/libcpp-yson-json.a |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/private_events.cpp |57.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/libcore-base-generated.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/liblibrary-cpp-type_info.a |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/slide_limiter/usage/config.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/init/libcpp-xml-init.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/uri/liblibrary-cpp-uri.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson/libcpp-yt-yson.a |57.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/purge_queue.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/cpu_clock/libcpp-yt-cpu_clock.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/libcpp-yt-logging.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/cursors/libunwind/libbacktrace-cursors-libunwind.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/liblibrary-cpp-yson.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/libcpp-yt-backtrace.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/node/libcpp-yson-node.a |57.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/version/libversion_definition.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/global/libcpp-yt-global.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/plain_text_formatter/libyt-logging-plain_text_formatter.a |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/create_queue.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/sys_params.cpp |57.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/bridge/libblobstorage-dsproxy-bridge.a |57.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/threading/libcpp-yt-threading.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/string/libcpp-yt-string.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/memory/libcpp-yt-memory.a |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/misc/libcpp-yt-misc.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/system/libcpp-yt-system.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.global.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_transfer.cpp |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/util/draft/libutil-draft.a |57.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/libydb-core-base.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_serialization_runtime/libtools-enum_parser-enum_serialization_runtime.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/knn/libknn_udf.global.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/libllvm16-lib-CodeGen.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/datetime/libdatetime_udf.global.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/proto/libbackup-common-proto.a |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/auth_multi_factory.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/controller/libcore-backup-controller.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/common/libcore-blobstorage-common.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/libcore-blobstorage-crypto.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/util/charset/libutil-charset.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IR/libllvm16-lib-IR.a |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/impl/table_writer.cpp |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/groupinfo/libcore-blobstorage-groupinfo.a |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/common/libfq-libs-common.a |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/cleanup_queue_data.cpp |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/retention.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/common/liblibs-compute-common.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/cloud_audit/libfq-libs-cloud_audit.a |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_users.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/traceid.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/storage_pools.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/blobstorage_vdiskid.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/table_index.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/metering.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/subdomain.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/blobstorage_syncstate.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/domain.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/tablet_status_checker.cpp |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/control_plane/libcompute-ydb-control_plane.a |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/auth.cpp |57.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/liblibs-compute-ydb.a |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/tablet.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/tx_processing.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/backtrace.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/logoblob.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/blobstorage_grouptype.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/event_filter.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/row_version.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/path.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/counters.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/blobstorage.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/memory_controller_iface.h_serialized.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/pool_stats_collector.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/services_assert.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_tracker.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/read_http_reply_protocol.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/name_service_client_protocol.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/long_timer.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/actor_activity_names.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/group_stat.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/executor.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/feature_flags_service.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/html.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/monitoring.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/localdb.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/local_user_token.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/kmeans_clusters.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_boot_lease.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/queue_schema.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/change_visibility.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/set_queue_attributes.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/program/resolver.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/receive_message.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/queues_list_reader.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/tag_queue.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/purge.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/send_message.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/delete_message.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/modify_permissions.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/create_user.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/cfg.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/proxy_actor.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/garbage_collector.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/service.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/time_cast/time_cast.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/get_queue_url.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/user_settings_reader.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/proxy_service.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_data_cleanup_logic.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/get_queue_attributes.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/actor.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/fifo_cleanup.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/schema.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/delete_user.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |57.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_queue_tags.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_queues.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/delete_queue.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/node_tracker.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/send_data_protocol.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/error.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/slide_limiter/usage/service.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_observer.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_exec_commit_mgr.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/read_data_protocol.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_comp_create.cpp |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/liblibs-config-protos.a |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_comp.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/mock/dsproxy_mock.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemon.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_exec_commit.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_dbase_apply.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_check_integrity_get.cpp |57.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |57.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_comp_gen.h_serialized.cpp |57.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/test_helper/kernels_wrapper.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_database.cpp |57.8%| PREPARE $(CLANG-874354456) |57.8%| PREPARE $(CLANG18-1866954364) |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/wilson_tracing_control.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/test_helper/program_constructor.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_proxy/control_plane_proxy.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/bridge/bridge.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/group_sessions.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_lookup.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/blob_depot.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/queue_leader.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_exec_broker.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_publish.cpp |57.8%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/svnversion/svn_interface.c |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_dbase_scheme.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/unisched.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/coro_tx.cpp |57.9%| [CC] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/build_info.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_counters.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/connect_socket_protocol.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/query.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/events/liblibs-test_connection-events.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/appdata.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_executor_compaction_logic.h_serialized.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_fwd_misc.cpp |57.9%| [BI] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/buildinfo_data.h |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/tablet_killer.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_executor.pb.cc |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_snapshot.cpp |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/service/libcore-graph-service.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemonactor.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_gclogic.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_tx_env.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_row_versions.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_misc.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_txloglogic.cpp |57.9%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/build_info/build_info_static.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/untag_queue.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_index_iter_create.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_replica.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_page_iface.h_serialized.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_charge_create.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_charge_range.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_page_label.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_row_eggs.h_serialized.cpp |57.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_dump.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_drop_dst_result.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_part_loader.h_serialized.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_overlay.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_event_filter.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_outset.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_range_cache.cpp |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/compilation/libkqp-common-compilation.a |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_committed.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_store_hotdog.cpp |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_slice.cpp |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/controller.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_part_group_iter_create.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_sausagecache.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_collect_garbage.cpp |57.4%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/svnversion/svnversion.cpp |57.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/base/blobstorage_events.cpp |57.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/garbage.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/audit_logins.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/blocks.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_commit_blob_seq.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/audit_dml_operations.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/exceptions_mapping.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/grpc_publisher_service_actor.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_mon.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/grpc_mon.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/grpc_helper.cpp |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/liblibs-apache-arrow.a |57.9%| [CC] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/sandbox.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_load.cpp |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/request_discriminator.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/metrics.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/audit_log.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_discover.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/metrics_actor.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_stat.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_guardian.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/event.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/blocks.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_create_replication.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/group_metrics_exchange.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/blob_mapping_cache.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/queue.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/sampling_throttling_control_internals.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/proxy.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/sampling_throttling_control.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/mock/yql_mock.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/read.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/throttler.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/rpc_calls.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_mon.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_bio_actor.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/given_id_range.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_gc.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_bootlogic.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_load_blob_queue.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/garbage_collection.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_get_block.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_boot_misc.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_block.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_resolve.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_encrypt.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/request.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__schema_upgrade.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/space_monitor.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_get.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__configure.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/impl/local_partition_reader.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_replica.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request_reporting.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/audit/audit_log_impl.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_scan.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/channel_kind.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_db_mon.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/mon_main.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_uncertain.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_init.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/assimilator.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_status.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_put.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/status.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_upload.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/resolved_value.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_table_btree_index.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_table_btree_index_histogram.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/executer_actor/shards_resolver/libkqp-executer_actor-shards_resolver.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/libcore-kqp-expr_nodes.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/load_based_timeout.cpp |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_write.cpp |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |58.1%| PREPARE $(GDB) |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_remove_directory.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_table.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_monitoring.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_patch.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/resolve_local_db_table.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_range.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_apply_config.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_replication.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_alter_coordination_node.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/local_rate_limiter.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_trash.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/operation_helpers.cpp |58.0%| PREPARE $(WITH_JDK17-sbr:7832760150) |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_kqp_tx.cpp |58.0%| PREPARE $(WITH_JDK-sbr:7832760150) |58.0%| PREPARE $(JDK17-472926544) |58.0%| PREPARE $(JDK_DEFAULT-472926544) |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_rename_tables.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/comm.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_attach_session.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_coordination_node.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_create_coordination_node.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_locks_helper.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_proxy.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_copy_table.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_common/rpc_common_kqp_session.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/tenant_resolver.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/events/liblibs-control_plane_proxy-events.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_drop_table.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/config.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_heartbeat.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_modify_permissions.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_stat.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/libfq-libs-control_plane_config.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_drop_coordination_node.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_block.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/describe_table.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_kh_describe.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_backup.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_external_data_source.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/s3.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/resource_pool_classifiers/resource_pool_classifiers.cpp |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/discovery_actor.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/agent.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_init_schema.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_decommit.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/libydb-core-kqp.global.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/simple/libkqp-common-simple.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/buffer/libkqp-common-buffer.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_schema/libfq-libs-db_schema.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_scheme_base.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_make_directory.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_rate_limiter_api.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/ydb_schema_query_actor.h_serialized.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/create_session.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_path.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_fq_internal.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_load.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_view.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/control.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/ydb_schema_query_actor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_script_executions.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/control_plane_storage_requester_actor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/kqp_yql.h_serialized.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/shutdown/state.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/shutdown/events.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/grpc_endpoint_publish_actor.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/http_service.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_user_request_context.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_lwtrace_probes.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/keep_alive.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/kqp_tx_info.h_serialized.cpp |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_yql.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_execute_script.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_export.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/grpc_request_proxy_simple.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |58.2%| PREPARE $(CLANG-1922233694) |58.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_types.cpp |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_kh_snapshots.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_import_data.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/grpc_service.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/testing.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_get_shard_locations.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/grpc_request_proxy.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_whoami.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/grpc/libfq-libs-grpc.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/graph_params/proto/liblibs-graph_params-proto.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_copy_tables.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_execute_scheme_query.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/metrics/libfq-libs-metrics.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/auth_factory.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_execute_data_query.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/tablet_queue.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/list_directory.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_object_storage.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/table_settings.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_execute_query.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_commit_transaction.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_list_objects_in_s3_export.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_alter_table.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/jaeger_tracing/sampling_throttling_configurator.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_keep_alive.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/libfq-libs-protos.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/control_plane_service/liblibs-rate_limiter-control_plane_service.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_cancel_operation.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_fq.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/libfq-libs-quota_manager.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_dynamic_config.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/private_client/libfq-libs-private_client.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/protos/libcore-keyvalue-protos.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/utils/liblibs-rate_limiter-utils.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/proto/liblibs-quota_manager-proto.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/quoter_service/liblibs-rate_limiter-quoter_service.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_delete.cpp |58.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/read_rule/libfq-libs-read_rule.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/filters/librow_dispatcher-format_handler-filters.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/common/librow_dispatcher-format_handler-common.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/parsers/librow_dispatcher-format_handler-parsers.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_fetch_script_results.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_cms.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_config.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/protos/liblibs-row_dispatcher-protos.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_load_rows.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_table_options.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_node_registration.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_list_operations.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/controller/replication.h_serialized.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_get_scale_recommendation.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_planner_strategy.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/libfq-libs-row_dispatcher.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_no_pg_wrapper/liblibs-row_dispatcher-purecalc_no_pg_wrapper.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_create_table.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_import.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_external_table.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_monitoring.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_discovery.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_keyvalue.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_begin_transaction.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_get_operation.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_prepare_data_query.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_table.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/replication.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_rollback_transaction.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/http_req.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/helper.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_login.cpp |58.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/stream_remover.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/rbo/libkqp-opt-rbo.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_bridge.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_forget_operation.cpp |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/stream_consumer_remover.cpp |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/proto/libkqp-proxy_service-proto.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/execute_data_query.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_actors.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/roaring/libroaring.global.a |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_with_stream.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_table.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_explain_yql_script.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_explain_data_query.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_read_columns.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/session_info.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/priorities/service/manager.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_kqp_base.cpp |58.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/tablet_flat_executor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_quotas.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/shard_writer.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_execute_yql_script.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_events.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/explain_data_query.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_read_rows.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_scan_executer.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_events.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_literal_executer.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compile_service/kqp_compile_computation_pattern_service.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_translate.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/opt/physical/predicate_collector.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_helpers.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_actor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_stream_execute_scan_query.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/config/bsconfig_ut.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_maintenance.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_common.cpp |57.8%| [CP] {default-linux-x86_64, release, asan} $(B)/yql/essentials/minikql/comp_nodes/llvm16/yql/essentials/minikql/computation/mkql_computation_node_codegen.h |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/lag_provider.cpp |57.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_read_table.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/client/ydb_topic/include/libclient-ydb_topic-include.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/metering/libydb-core-metering.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/libydb-core-load_test.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_transform.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_discoverer.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_executer_impl.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_ping.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/address_classification/counters.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_stream_execute_yql_script.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/address_classification/net_classifier.h_serialized.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/network/libessentials-utils-network.a |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/ycsb/info_collector.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_compute_database.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/config_examples.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_event_impl.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compile_service/kqp_compile_actor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__schema.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_explain_prepared.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/archive.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/actors/analyze_actor.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_planner.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_partitioned_executer.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_log_store.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/aggregated_result.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_service.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_data_executer.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/stream_creator.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/load_test/percentile.h_serialized.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/validators.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compile_service/kqp_compile_service.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.global.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.global.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_remover.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/libcompress_udf.global.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_index.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_alterer.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/checker.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/scan.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_state.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_gateway_proxy.cpp |58.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.global.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/static/libcommon-stat-static.a |58.1%| PREPARE $(CLANG16-1380963495) |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_statement_rewrite.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/common/schema.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/lib/libcommon-url_base-lib.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/shutdown/controller.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/manager.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_compilation/liblibs-row_dispatcher-purecalc_compilation.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_index.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/manager.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/table/behaviour.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_validate.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/initializer.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.global.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/behaviour.cpp |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator_impl.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/behaviour.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/probes.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/logs/log.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/fetcher.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/query_utils.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/batch/libkqp-common-batch.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_type_ann.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/init/init.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.global.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/object.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_tx.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memtable_collection.cpp |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json/libjson_udf.global.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.global.a |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.global.a |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/actors/pool_handlers_actors.cpp |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_column.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_ru_calc.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.global.a |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_column.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/static/libcommon-topfreq-static.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_tx_manager.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_scheme_executer.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mon/libydb-core-mon.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_runner.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/actors/scheme.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_timeouts.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_peer_stats_calculator.cpp |58.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/config/ut/ydb-services-config-ut |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_host.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/manager.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/nodes_manager.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/pdisk_write.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/add_column.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_effects.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.global.a |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/bsc_audit.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_resolve.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_sharding.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon/crossref.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_assign_stream_name.cpp |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydbd/ydbd |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/snapshot.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_create_dst_result.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo_transformer.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/group_write.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_indexes.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update_index.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/common.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_delete_index.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_creator.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/memory.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_defaults.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/rbo/kqp_operator.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_session_info.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_returning.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_opt.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.global.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/read_attributes_utils.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/logging.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_databases_cache.cpp |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_uniq_helper.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/codecs/libcore-persqueue-codecs.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/pdisk_log.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/config/libcore-persqueue-config.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/events/liblibs-row_dispatcher-events.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/purecalc/libcore-persqueue-purecalc.a |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/events/liblibs-rate_limiter-events.a |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/common/libkqp-workload_service-common.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/rbo/kqp_convert_to_physical.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert_index.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/protos/libcore-public_http-protos.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/secret_resolver.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/bulk_mkql_upsert.cpp |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_settings.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo_rules.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/tablet_flat_executed.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann_pg.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_datasource.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/actors/scheme_actors.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_init_schema.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_build_stage.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_results.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/counters/proxy_counters.cpp |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/protos/libgrpc_services-cancelation-protos.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libcore-protos-schemeshard.a |57.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_assign_tx_id.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/test_load_read_iterator.cpp |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_discovery_targets_result.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/rewrite_io_utils.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_provider.cpp |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/health_check/libydb-core-health_check.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/actors/cpu_load_actors.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.global.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.global.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/arrow/scheme/libio_formats-arrow-scheme.a |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/control_plane_storage_counters.cpp |57.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/public_http/libydb-core-public_http.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/pdisk_read.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/libydb-core-grpc_streaming.a |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/address_classification/net_classifier.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/libcore-grpc_services-cancelation.a |57.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/libydb-core-quoter.a |57.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/sessions/sessions.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.grpc.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_datasink.cpp |57.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/libydb-core-protos.a |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_source.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.grpc.pb.cc |57.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.grpc.pb.cc |57.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.grpc.pb.cc |57.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.pb.h_serialized.cpp |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_precompute.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.pb.cc |57.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.grpc.pb.cc |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_alter_dst_result.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_agg.cpp |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet.cpp |57.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_alter_replication.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/schema.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet_db.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_dummy.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_helpers.cpp |57.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.pb.cc |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_service.cpp |57.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.pb.cc |58.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.pb.cc |58.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_exec.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_drop_replication.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_stage_float_up.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/vdisk_write.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/yql_single_query.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/kqp.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/kqp_select.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.grpc.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_constant_folding_transformer.cpp |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/resource_pools/resource_pools.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_describe_replication.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_opt.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/service_actor.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.grpc.pb.cc |58.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_column_statistics_requester.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/shard_reader.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_worker_error.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_build_phy_query.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.grpc.pb.cc |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_phy_finalize.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_query_blocks_transformer.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_create_stream_result.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_phase.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_statistics_transformer.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/nodes/nodes.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/virtual_group.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/counters/counters.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.grpc.pb.cc |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mon/mon.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/node_service/kqp_node_service.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/health/health.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/kqp_upsert.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_effects.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_resolve_secret_result.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/test_load_actor.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/keyvalue_write.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_build_txs.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/memory_controller/memory_controller.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation.grpc.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.pb.h_serialized.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_compiler/kqp_olap_compiler.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_query_stats.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_kql.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_add.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_delete.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_describe.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_update.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_init_schema.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_create.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_delete.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_describe.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_update.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_release.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_acquire.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/feature_flags.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_session_attach.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.grpc.pb.cc |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_hash_func_propagate_transformer.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_session_detach.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_session_destroy.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_sessions_describe.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/get_group.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/feature_flags.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/database_basic_sausage_metainfo.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/debug_info.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/probes.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/database_basic_sausage_metainfo.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_worker_common.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/self_heal.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.pb.h_serialized.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_session_actor.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__init.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_runtime.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_init.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/events.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/http_router.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_resource_tree.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/probes.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/rate_accounting.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_worker_actor.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/health.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/grpc_request_context_wrapper.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tx_config_get.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_query_state.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet_html.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_mapper.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/health.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet_impl.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_phy_check.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.pb.cc |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/sys_view.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/drop_donor.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.pb.cc |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.pb.cc |58.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |58.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |58.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |58.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.grpc.pb.cc |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_box.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/node_report.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.grpc.pb.cc |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.grpc.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/storage_stats_calculator.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.pb.cc |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.pb.cc |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.grpc.pb.cc |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.grpc.pb.cc |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.grpc.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_temp_tables_manager.cpp |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/libydb-core-security.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.pb.cc |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/transfer/libydb-core-transfer.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/update_seen_operational.cpp |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/protos/libcore-pgproxy-protos.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/interface/liblibs-shared_resources-interface.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/events/libfq-libs-events.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.pb.cc |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/proto/liblibs-control_plane_storage-proto.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.pb.cc |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_caching/libydb-core-grpc_caching.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/events/liblibs-quota_manager-events.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_drive_status.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/ydb_dump/libcore-io_formats-ydb_dump.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_scan_iface.h_serialized.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/signer/libfq-libs-signer.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/libfq-libs-db_id_async_resolver_impl.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/request_validators.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/protos/libcore-tablet_flat-protos.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/events/libkqp-common-events.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_metrics_exchange.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/schema.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/http_request.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/request_controller_info.cpp |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tracing/libydb-core-tracing.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/events.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/certificate_check/cert_auth_processor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_utils.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/monitoring.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/local/libcolumnshard-blobs_action-local.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/session/libcolumnshard-bg_tasks-session.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/init_scheme.cpp |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/balance_coverage/libcore-tx-balance_coverage.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/protos/libcolumnshard-bg_tasks-protos.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/proxy_actor.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/events/libcolumnshard-bg_tasks-events.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/libcolumnshard-blobs_action-protos.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/events/liblibs-control_plane_storage-events.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/common/libcolumnshard-blobs_action-common.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/topics/libcore-kqp-topics.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libcore-scheme-protos.a |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tracing/http.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tracing/trace.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/migrate.cpp |58.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_session_timeout.cpp |58.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/disk_metrics.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/update_group_latencies.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_self_check.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/register_node.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_query_plan.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config_cmd.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config_fit_pdisks.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/controllers.cpp |58.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/libfq-libs-test_connection.a |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tracing/trace_collection.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/service/query_interval.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/console_interaction.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/scheduler/new/tree/dynamic.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_compute.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/service/db_counters.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/scheduler/new/kqp_compute_scheduler_service.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_effects.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_arrow_memory_pool.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/scheduler/new/tree/snapshot.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin_compact.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/scheduler/new/kqp_schedulable_actor.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_indexes.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/shred.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_join.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/load_everything.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal.grpc.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config_fit_groups.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_cbo.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_response.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_read_table.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges_predext.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/grouper.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_data_meta.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_sequencer_factory.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/sequencer.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/scrub.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_host_config.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/propose_group_key.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_layout_checker.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/stat_processor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/commit_config.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/layout_helpers.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/in_memory_control_plane_storage.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config.grpc.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_data.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/select_groups.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_program_builder.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_write_constraint.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_helpers.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_factory.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console.grpc.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_storage_pool.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/kesus_quoter_proxy.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sort.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config.grpc.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/bsc.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_wide_read.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/update_last_seen_ready.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tracing/tablet_info.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_write_actor_settings.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_config_set.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_semaphore_timeout.cpp |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/run_script_actor/kqp_run_script_actor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/service/sysview_service.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/quoter/public/libcore-quoter-public.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/events/events.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc.grpc.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/database/database.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/service/ext_counters.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/proxy/proxy.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_effects.cpp |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/local_pgwire/sql_parser.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config.grpc.pb.cc |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/local_pgwire/local_pgwire_util.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/counters/kqp_counters.cpp |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus.pb.cc |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc.pb.cc |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/manager/libcolumnshard-bg_tasks-manager.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_read_iterator_common.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/login_shared_func.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_write_table.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/public_http/http_service.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/scheduler/old/kqp_compute_scheduler.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/service.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/certificate_check/cert_check.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_deadline.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/log_backend/json_envelope.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/public_http/http_req.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/constructor.cpp |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/certificate_check/cert_auth_utils.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/manager.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/leader_tablet_info.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/nodes_health_check.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus.grpc.pb.cc |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/boot_queue.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_aggr_stat_response.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/blob_set.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ticket_parser.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_resolve.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/construction/context.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/read.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_ack_timeout.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/action.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/common.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/behaviour.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/read.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/service_impl.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_configure.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/abstract.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_response_tablet_distribution.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.global.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/response_tasks.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.global.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_base.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_handle.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/manager.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_linux.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libcolumnshard-common-protos.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__lock_tablet.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/spack/libmonlib-encode-spack.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablets_object.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_request.cpp |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/out/libcore-protos-out.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/status/libdata_sharing-initiator-status.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/lib/libcommon-compress_base-lib.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/partition_key_range/libcore-persqueue-partition_key_range.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_init.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/local_rpc/helper.cpp |58.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_group_info.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/protos/out/out_long_tx_service.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_response.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/follower_tablet_info.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tablet_move_info.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/aggregator.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/protos/out/out_sequenceshard.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/behaviour.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_domains.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/protos/out/out_cms.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/collector.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__delete_tablet_result.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablet_status.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__kill_node.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__process_boot_queue.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_impl.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/health_check/health_check.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/remove.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_init_schema.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/login_page.cpp |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/raw_socket/libydb-core-raw_socket.a |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tablet_info.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/libydb-core-scheme.a |58.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/util.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_transport.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/lib/libcommon-ip_base-lib.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/monitoring.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_schedule_traversal.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/cell_maker/libcore-io_formats-cell_maker.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_navigate.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/libfq-libs-shared_resources.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/drain.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.global.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_finish_trasersal.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/fill.cpp |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablet_groups.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme_types/libydb-core-scheme_types.a |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/libydb-core-mind.a |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/kqp_metadata_loader.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/offload_actor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/blob.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/domain_info.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_output_stream.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/aggregator_impl.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__disconnect_node.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/common_app.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/microseconds_sliding_window.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/query_data/kqp_query_data.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/kqp_gateway.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/metering_sink.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/kqp_workload_service.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__delete_tablet.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/type_codecs_defs.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/header.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__init_scheme.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__configure_scale_recommender.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__load_everything.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/node_info.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_datashard_scan_response.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/write_meta.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/write_id.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__seize_tablets_reply.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__adopt_tablet.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/tablets/tablets.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_balancer.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__switch_drain.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_delivery_problem.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/slot_indexes_pool.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__release_tablets_reply.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__request_tablet_owners.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_domain.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__create_tablet.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/balancer.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__stop_tablet.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__process_pending_operations.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/heartbeat.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/key.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_pool_info.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_log.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__request_tablet_seq.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__block_storage_result.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__sync_tablets.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/execute_queue.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_schemeshard_stats.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_dc_followers.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_read_actor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__reassign_groups_on_decommit.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/actors/test_runtime.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/utils.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__register_node.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__configure_subdomain.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_sequencer_actor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_tasks_runner.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/sourceid_info.h_serialized.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/storage_stats.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/constructor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/kqp_ic_gateway.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/storages_manager/manager.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__resume_tablet.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__reassign_groups.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__release_tablets.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/quota_tracker.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pq_rl_helpers.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/actors/wait_events.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/percentile_counter.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/local_pgwire_auth_actor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__cut_tablet_history.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pq_database.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_worker.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__response_tablet_seq.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__seize_tablets.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__unlock_tablet.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_statics.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_write_actor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/log_backend/log_backend.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/collector.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__delete_node.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/local_pgwire.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/pdisks.cpp |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_data/kqp_predictor.cpp |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablet_metrics.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/counters/libengines-changes-counters.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/services.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/address.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/blob_manager_db.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/manager/shared_blobs.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/actors/block_events.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/helpers.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/pgwire_kqp_proxy.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__status.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/duplicate_filtering.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/error_collector.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/splitter.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/writes_monitor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/counters/scan.h_serialized.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__generate_data_ut.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/log_backend/log_backend_build.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__tablet_owners_reply.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/common_data.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc_actor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/column_tables.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/lease_holder.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/background_controller.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/counters/columnshard.h_serialized.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/groups.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/transfer/transfer_writer.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/columnshard.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/events/delete_blobs.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_actor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__restart_tablet.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/manager.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/queue.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/runtime.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/storage_pools.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_result_write.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/shared_sausagecache.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/stats.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/version/version_definition.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storage.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/libreader-simple_reader-duplicates.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/req_tracer.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/portion_index.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/local_pgwire_connection.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/portions.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/source_id_encoding.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/appdata.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storages_manager.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/common.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/builder.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_bindings.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/status.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/vslots.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/blobs_manager.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/graph/shard/backends.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/metadata_initializers.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/logic.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/remap.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/permissions.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_node_enumeration.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/counters/libcolumnshard-blobs_action-counters.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/tasks_packer/libfq-libs-tasks_packer.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_resource_info_exchanger.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/storage.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libcolumnshard-engines-protos.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/protos/out/out.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_resource_estimation.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/partition_stats/partition_stats.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/logic.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/indexation.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/controller/controller.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/iterator.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/write.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_rm_service.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/users.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__load_state.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/groups.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/common/libscheme-defaults-common.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/common/pq_ut_common.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/meta.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/libtx-columnshard-common.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/libip_udf.global.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/ydb/libfq-libs-ydb.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/tcmalloc.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_node_location.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/memory_info.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/stats.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/profiler.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__extend_lease.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__init_scheme.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/context/libdata_sharing-common-context.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/tiering/libengines-scheme-tiering.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/protos/libgraph-shard-protos.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__load_state.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common/description.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/abstract_scheme.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/abstract.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/subscriber.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/predicate.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/cluster_tracker.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/local.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/scan.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_diff.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/resolver.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/partition_stats/top_partitions.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/engine_logs.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__update_epoch.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__init_scheme.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/tier_info.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/abstract.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/event_helpers.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/manager/sessions.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/group_members.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/owners.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common/conveyor_task.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/composite.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__register_node.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/user_info.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/session/destination.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/constructor.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/common/autoscaling_ut_common.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__assign_free_slots.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_config.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/libydb-core-pgproxy.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/write_quoter.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/dread_cache_service/caching_service.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/settings.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/mirrorer.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_scale_request.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/compaction_info.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/counters_manager.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__graceful_shutdown.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/writer.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/control.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_quoter.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer__balancing_app.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_pool_status.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/remove.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__migrate_state.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/audit_helpers/libcore-testlib-audit_helpers.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__alter_tenant.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/protos/libcore-graph-protos.a |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_snapshot_manager.cpp |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__update_config_subscription.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_locks/manager/manager.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/abstract/libcolumnshard-bg_tasks-abstract.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/dynamic_nameserver.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__update_config.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/request.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__check_slot_status.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/abstract/libengines-scheme-abstract.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/configured_tablet_bootstrapper.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/fetch_request_actor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/labels_maintainer.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq_impl.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_compaction.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/dynamic_nameserver_mon.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/account_read_quoter.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq_impl_app.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_blob_encoder.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/base_with_blobs.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mon_alloc/monitor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_pool.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/list_all_topics_actor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/partition_chooser_impl.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_scale_manager.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_monitoring.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_sourcemanager.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/common.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_context.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/index_chunk.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq_l2_cache.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer__balancing.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ownerinfo.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/sourceid.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor_sql.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructors.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_messages.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_slot_status.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/transfer.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/transaction.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/policy.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common/result.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/portions/portion_info.h_serialized.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/manager.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/predicate/range.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/view/manager.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/meta.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/filtered_scheme.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/filtered_scheme.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/objects_cache.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/modification/events/change_owning.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_data/kqp_prepared_query.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_records.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_ping.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kafka_proxy/kafka.h_serialized.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/modification/tasks/modification.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_init.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/column_features.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/versioned_index.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/predicate/container.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/filler.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_messages_int.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/granule_view.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_consumer_protocol.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/snapshot_scheme.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/snapshot_scheme.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_write.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer_app.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/events.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/blob_manager.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_read.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/constructor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/collector.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/constructor.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/metadata.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_collect_operation.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_data.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_store_metrics.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/sub_columns_fetching.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_monitoring.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/constructor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_stored_state_data.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_change_backend.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_simple_db_flat.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/shard_impl.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_helpers.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_draft.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/limit_sorted.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/write.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/priorities/service/counters.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_startup.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_init_schema.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/constructor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/rate_limiter_resources.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/data_accessor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_meta.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_accessor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_get_metrics.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/events.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/processor/schema.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/abstract.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/result.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/write_with_blobs.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/not_sorted.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_version.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_finish_ack_to_source.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_aggregate_data.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/libydb-core-testlib.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/contexts.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/meta.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/remove.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_data_ack_to_source.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_info.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/read.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/common.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/merge.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/query_stats/query_stats.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/predicate/filter.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_indexed.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/constructor.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/full_scan_sorted.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/manager.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/pg_tables/pg_tables.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/meta.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/index_info.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_locks/locks/snapshot.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/column_record.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/portion_info.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/storage_helpers.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_source_cursor.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/actor_helpers.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/fetching_executor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/view/behaviour.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/read_metadata.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write_index.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/column/libengines-scheme-column.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/constructor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/abstract.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/fetcher.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/changes.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/header.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/common.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/written.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/audit_helpers/audit_helper.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/checker.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/compacted.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/common.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/common/libengines-scheme-common.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.global.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_remove_blobs.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/actor.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_reader/contexts.h_serialized.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/libcolumnshard-data_sharing-protos.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet/libydb-core-tablet.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/common/libstorage-actualizer-common.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/libcolumnshard-export-protos.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/query_stats/query_metrics.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/processor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_to_source.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/labeled_db_counters.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/private/aggregated_counters.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/fetching_steps.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_locks/locks/list.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_write_source_cursor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_find_coordinator_actor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_portion.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/pipe_tracker.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/meta.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/labeled_counters_merger.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_transactional_producers_initializers.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_top_partitions.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipe_server.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kqp_helper.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_configure.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/merger.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/logic.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipe_client_cache.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_alter_configs_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_interval_metrics.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/test_shard_context.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_init_schema.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_from_source.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_list_offsets_actor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/processor_impl.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_ack_from_initiator.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipecache.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_impl.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_interval_summary.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/context.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_commit_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_collect.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_aggregate.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/move_portions.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_auth_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_start_from_initiator.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/merger.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/manager.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/read_with_blobs.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/counters.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/private/labeled_db_counters.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_app.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_tracing_signals.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/resource_pools/libydb-core-resource_pools.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_describe_configs_actor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/db_counters.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_api_versions_actor.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/blob_constructor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_metrics_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/write_controller.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/put_status.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp |59.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_af18efc2f04dd1af5ca802c329.o |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_topic_offsets_actor.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_data_from_source.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_metrics.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.global.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/selector/liblcbuckets-constructor-selector.global.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/txn_actor_response_builder.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_handshake_actor.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_fetch_actor.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_create_topics_actor.cpp |58.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_87b299e07b15c86f4f50f458ef.o |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_balance_actor_sql.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_consumer_members_metadata_initializers.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/resource_broker.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.global.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/libstorage-actualizer-counters.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_responsiveness_pinger.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_create_partitions_actor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/abstract/libsession-storage-abstract.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/test_shard_mon.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_read_session_actor.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_init.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/remove_portions.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/tx_load_everything.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/constructor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_metrics.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/defs.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_balancer_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/node_whiteboard.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/loading/stages.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_intermediate.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/defs.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_state.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/test_tablet.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_produce_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_get.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/tx_initialize.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/general_compaction.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blob.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/filter.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_state.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/tx_init_scheme.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_read_validate.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/limit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetched_data.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/constructor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_metadata_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/const.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_pipe_client.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_counters_aggregator.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/merge_subset.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_connection.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_storage_request.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/zero_level.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_consumer_groups_metadata_initializers.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_state_collect.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/abstract.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/tables/table_queries.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/storage.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/constructor.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/one_layer.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libscheme-defaults-protos.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/common/libcolumnshard-operations-common.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/transactions/libcolumnshard-bg_tasks-transactions.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/program/program.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_drop_stream_result.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/program/builder.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/ttl.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/common/libcolumnshard-export-common.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_index_record.cpp |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/events/tx_completed/libsubscriber-events-tx_completed.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_collector.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/common/libtx-schemeshard-common.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_portions.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tenant_runtime.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/export/session/cursor.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/fake_coordinator.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/result.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/with_appended.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor_composite/usage/libtx-conveyor_composite-usage.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tablet_flat_dummy.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/graph_reorder/libyt-lib-graph_reorder.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/subscriber/libsubscriber-abstract-subscriber.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Pygments/py3/libpy3python-Pygments-py3.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/tier/libsession-storage-tier.global.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/utils.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_write.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_tables.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/events.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/container.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_fetch_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/common_helper.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/iterator.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/utils/scheme_helpers.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/counters.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_portion_chunk.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/bootstrapper.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/events.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_resolver.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/merged_column.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/write.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/selector/backup/selector.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor_composite/usage/common.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/adapter.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/common/session/common.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/cursor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/meta.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_cursor.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/hash/libyt-lib-hash.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/actor/write.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/sfh/libcpp-digest-sfh.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/tiering.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/transfer.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/utils/metadata_helpers.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetch_steps.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/local_proxy/libreplication-ydb_proxy-local_proxy.a |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.a |58.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor.cpp |58.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |58.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tx_reader/libtx-columnshard-tx_reader.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_reset.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/events/libsubscriber-abstract-events.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/source.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_list_renderer.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/counters.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_delete.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/task.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/selector/abstract/selector.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_sys.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/optimizer.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_billing_helpers.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_findlatest.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blob_cache.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/read_coordinator.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/fetcher.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/schema_version/version.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/events/events.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_monitoring_proxy.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/portions_index.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_writelog.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_rebuildhistory.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_types.h_serialized.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_types.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/constructor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/storage.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path_element.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/actor/export_actor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_private_events.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/logic.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/actor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_blockbs.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tx_helpers.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_schema.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/user_attributes.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_xxport__helpers.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/context.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/node_tablet_monitor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc_actor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_scheme_query_executor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/index/index.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/granule.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/merge.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/constructor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetched_data.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/interval.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/stages.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/storage/s3/storage.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/iterator.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetched_data.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/resource_subscriber/actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/control.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tablet_helpers.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/context.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/control.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/db_wrapper.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/compacted_blob_constructor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/cs_helper.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/background_controller.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/indexed_blob_constructor.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/indexes/schema.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/tables_manager.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/column_engine_logs.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/granule/normalizer.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/read_metadata.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/column_engine.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cansel_build_index.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_unused_tables_template.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/task.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_delete.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/resource_subscriber/task.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/indexes/update.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__notify_tx_completion.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/granule/clean_granule.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__write.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/special_cleaner.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_subdomain_path_id.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/resource_subscriber/events.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/inflight_request_tracker.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__progress_tx.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__propose_cancel.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__scan.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/usage/events.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_v0_meta.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_actualization.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_self_pinger.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/scanner.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__write_index.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetching.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/manager/manager.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/ydbd/main.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/plain_read_data.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v2_chunks.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/constructor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/session.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_impl.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/leaked_blobs.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__data_erasure_manager.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/usage/common.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/usage/config.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_index_columns.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/tables/normalizer.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_view.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/hooks/abstract/abstract.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/state_server_interface.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_deprecated_snapshot.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__statistics.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__plan_step.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__init.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/usage/service.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/scheme.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_utils.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/portion.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_empty.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/propose_tx.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/secondary.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/abstract/abstract.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_version_info.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_mon.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/write_actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_info.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/primary.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/broken_blobs.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/slice_builder/builder.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_store.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_store.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/create_table.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_lock.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log_fragment.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_info_types.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/test_client.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/loading/stages.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/simple.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__backup_collection_common.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_path.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_svp_migration.cpp |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/sharing.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__notify.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init_root.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__list_users.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/create_store.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__fix_bad_paths.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_bg_tasks__list.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__background_compaction.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/two_part_description.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_board/subscriber.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__borrowed_compaction.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v1_chunks.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/opaque_path_description.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/events.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/helpers.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_table.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/schema.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cancel_tx.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_cdc_stream.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/backup.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_effective_acl.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__make_access_database_no_inheritable.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__conditional_erase.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_incremental_backup_collection.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/selector/liblcbuckets-planner-selector.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/protos/libcolumnshard-transactions-protos.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.global.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.global.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/resources/libtx-columnshard-resources.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/events/libolap-bg_tasks-events.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init_schema.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__login.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/events/tables_erased/libsubscriber-events-tables_erased.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_identificators.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/splitter/abstract/libcolumnshard-splitter-abstract.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_cdc_stream.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_backup_collection.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_change_path_state.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_build_index.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_bsv.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/selector/liblcbuckets-constructor-selector.a |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_f3c323ef80ada193284f036d44.o |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/libstorage-optimizer-tiling.global.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_continuous_backup.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_view.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_continuous_backup.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__find_subdomain_path_id.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_continuous_backup.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_proxy.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor_composite/service/libtx-conveyor_composite-service.a |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore_incremental_backup.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_index.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_blob_depot.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_data_source.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/conveyor_composite/service/counters.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_cdc_stream.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_resource_pool.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/conveyor_composite/service/common.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_apply_build_index.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/data_events/common/libtx-data_events-common.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.a |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/interaction.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/abstract.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_data_source.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/data_events/write_data.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_index.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_backup_collection.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/blob_info.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/settings.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/cache.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/chunks.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/chunk_meta.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/column_info.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/libydb-core-tx.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_pq.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/conveyor/service/worker.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Jinja2/py3/libpy3python-Jinja2-py3.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/public/libtx-coordinator-public.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/message_seqno.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy_schemereq.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/lwtrace_probes.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_consistent_copy_tables.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/options/update.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/service/allocation.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/ids.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/process.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/allocation.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/group.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/counters.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_actor.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_view.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sysview.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_cache/scheme_cache.h_serialized.cpp |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_table.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_lock.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/options/schema.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/write.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/subscriber.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/read_finished.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/common/libtx-replication-common.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/usage/events.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/range_treap.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/usage/abstract.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/time_counters.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/locks_db.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_just_reject.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/usage/stage_features.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sysview.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/coordinator/coordinator_hooks.cpp |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_restore_backup_collection.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp |58.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_table.cpp |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/protos/libtx-coordinator-protos.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/long_tx_service/public/libtx-long_tx_service-public.a |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/priorities/usage/abstract.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/priorities/usage/events.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_tables.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/topic_message.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__publish_to_scheme_board.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/counters.cpp |58.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/workers_pool.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_memory_changes.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_resource_pool.cpp |58.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/replica.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/backup_restore_traits.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_part.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/load_test.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_replication.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/erase_rows_condition.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/populator.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_backup.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/long_tx_service_impl.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/monitoring.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_schema.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_user_table.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_access_database.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_add_sharding_info.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_finish_async.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/backup_restore_traits.h_serialized.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks/dependencies.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/batch_slice.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/change_record_cdc_serializer.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__unmark_restore_tables.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/change_record_body_serializer.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/locks/locks.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/optimizer.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/change_exchange.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/tablet/gc_counters.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/data_events/shard_writer.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__check.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/change_exchange.h_serialized.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/change_record.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_cache/scheme_cache.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/abstract.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__init.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_domain_links.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/worker.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_reader.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/data_events/shards_splitter.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__last_step_subscriptions.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__create.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/data_events/columnshard_splitter.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/manager.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/service.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__restore_params.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_common.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__get.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__acquire_read_step.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/process.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__get.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__cancel.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__forget.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_state.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__clean_pathes.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__monitoring.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_impl.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/tablet/broken_txs.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/category.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__create.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__configure.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_failpoints.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/tablet/write_queue.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__stop_guard.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__get.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__schema.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/events.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks/read_start.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_committer.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__plan_step.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/sync.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/mediator_queue.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/normalizer.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/limiter/grouped_memory/service/actor.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__cancel.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor/usage/abstract.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/optimizer.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor/usage/config.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/tiling.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__forget.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_scan.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_index.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/commit_impl.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__read_step_subscriptions.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__schema_upgrade.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/export_s3_buffer.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp_effects.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__list.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export_flow_proposals.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp_delete_rows.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/limiter/grouped_memory/usage/service.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/limiter/grouped_memory/service/manager.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/export_iface.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor/usage/events.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__forget.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_volatile_snapshot_unit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor/usage/service.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/execution_unit_kind.h_serialized.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/modification/transactions/tx_change_blobs_owning.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init_populator.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector_cdc_stream.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/extstorage_usage_config.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/kmeans_helper.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor/service/service.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/limiter/grouped_memory/usage/config.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/tablet/ext_tx_base.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/key_conflicts.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_data_tx_out_rs_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_table_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__monitoring.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/incr_restore_helpers.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/reshuffle_kmeans.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/alter_cdc_stream_unit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_cdc_stream_unit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__background_cleaning.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/range_ops.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/backup_unit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__list.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_write_operation.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/priorities/usage/config.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_scheme_tx_out_rs_unit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/alter_table_unit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/service/scope.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_change_sending.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_and_wait_dependencies_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector_async_index.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks_db.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/cdc_stream_heartbeat.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/probes.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_index_notice_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__list.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__init.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/priorities/usage/service.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_distributed_erase_tx_unit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/cdc_stream_scan.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/direct_tx_unit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/common_level.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/tx_controller.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_persistent_snapshot_unit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/scan_common.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_distributed_erase_tx_out_rs_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__column_stats.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_async_index.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__data_cleanup.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/completed_operations_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_in_rs.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/recompute_kmeans.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/zero_level.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_incremental_restore_src_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/sample_k.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/type_serialization.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__migrate_schemeshard.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__read_columns.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/local_kmeans.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/complete_data_tx_unit.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/stream_scan_common.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_table_base.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/secondary_index.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_tx.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_borrowed.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_user_db.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_scan.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/datashard_s3_upload.h_serialized.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/load_write_details_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_commit_writes_tx_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_incr_restore.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_snapshot_tx_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/complete_write_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_write_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_uncommitted.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_direct_upload.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__write.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/load_and_wait_in_rs_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__read_iterator.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/query_stats/query_stats_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_data_tx_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/conflicts_cache.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cancel_tx_proposal.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__propose_tx_base.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_active_transaction.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_cdc_stream_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__store_scan_state.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/get_value.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__get_state_tx.cpp |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__compaction.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/abstract.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__schema_changed.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__mon_reset_schema_version.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_distributed_erase.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ut_helpers/test_table.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/managed_executor.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_volatile_snapshot_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_write_out_rs_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/prefix_kmeans.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/trace.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_direct_erase.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__s3_upload_txs.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_persistent_snapshot_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__store_table_path.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ut_helpers/test_topic.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__engine_host.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/incr_restore_scan.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__op_rows.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_direct_transaction.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__s3_download_txs.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cblas/libcontrib-libs-cblas.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execution_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_read_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_table_unit.cpp |59.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__progress_resend_rs.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/load_tx_details_unit.cpp |59.3%| PREPARE $(FLAKE8_PY2-2255386470) |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector_base.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__object_storage_listing.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_scheme_tx_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__conditional_erase_rows.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__progress_tx.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_change_sender_activation.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/plan_queue_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__compact_borrowed.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_commit_writes_tx_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/key_validator.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_change_receiving.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libf2c/libcontrib-libs-libf2c.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_kqp_scan_tx_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_kqp_data_tx_out_rs_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__stats.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__snapshot_txs.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/follower_edge.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_write_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__readset.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__plan_step.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_data_tx_unit.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clapack/part2/liblibs-clapack-part2.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/http_client.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finalize_build_index_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_dep_tracker.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_common_upload.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/initiate_build_index_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/upload_stats.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clapack/part1/liblibs-clapack-part1.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/move_index_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finish_propose_write_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_distributed_erase_tx_in_rs_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/memory_state_migration.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_trans_queue.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_commit_writes_tx_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/read_op_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/wait_for_stream_clearance_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finalize_plan_tx_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/read_table_scan_unit.cpp |59.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_data_tx_in_rs_unit.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/import_s3.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_exchange_split.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_kqp_data_tx_in_rs_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_cdc_stream.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/receive_snapshot_cleanup_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/remove_schema_snapshots.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/make_scan_snapshot_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/read_table_scan.cpp |59.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/remove_lock_change_records.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_subdomain_path_id.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/receive_snapshot_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finish_propose_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/remove_locks.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_write_tx_in_rs_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_write_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_scheme_tx_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_distributed_erase_tx_unit.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/volatile_tx_mon.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/move_table_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/wait_for_plan_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_snapshot_tx_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_s3_uploader.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_and_send_write_out_rs_unit.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/operation.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/compression_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/fq/ut_integration/ut_utils.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_outreadset.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/proxy_private.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/ydb_convert_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_scheme_tx_in_rs_unit.cpp |59.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_overload.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/error.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/objcopy_dcbdf62672440a626e79a64e14.o |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/nodes_health_check.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/task_result_write.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/boto3/py3/libpy3python-boto3-py3.global.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_kqp_data_tx_unit.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_offsets.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/ydb/objcopy_48e09f84949dd34b82c51f21a3.o |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_split_dst.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/libpy3kqprun_recipe.global.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_server.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/docker/libpy3contrib-python-docker.global.a |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/ydb/objcopy_3b212908932716bae8a8e38b2c.o |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_s3_uploads.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/protect_scheme_echoes_unit.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_distributed_erase_tx_unit.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/task_get.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/rate_limiter.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/rate_limiter_resources.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/result_writer.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_compute.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__kqp_scan.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_loans.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/nodes_manager.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/table_bindings_from_bindings.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_client.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_antlr4/libantlr_ast-gen-v1_antlr4.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_data_tx_unit.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_common.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/clusters_from_connections.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/task_ping.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/ydb/objcopy_ce63bab0f89a8715a42271a26a.o |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/restore_unit.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_schema_snapshots.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4/libantlr_ast-gen-v1_ansi_antlr4.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp.cpp |59.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/signer/ut/ydb-core-fq-libs-signer-ut |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_apply.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/make_snapshot_unit.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_locks_db.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_and_send_out_rs_unit.cpp |59.5%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/astdiff/astdiff |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/volatile_tx.cpp |59.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |59.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/validator/ut/validator_builder/yaml_config-validator-ut-validator_builder |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/pending_fetcher.cpp |59.5%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/test_import/libtest_import_udf.so |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_split_src.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ut_helpers/mock_service.cpp |59.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/metrics/ut/ydb-core-fq-libs-metrics-ut |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/numpy/random/libpy3py3-numpy-random.global.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_snapshots.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/ut/ydb-core-config-ut |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/numpy/random/libpy3py3-numpy-random.a |59.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/hmac/ut/ydb-core-fq-libs-hmac-ut |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/utils/libcore-config-utils.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_synclog.cpp >> Signer::Basic [GOOD] |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_pipeline.cpp |59.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/cluster_ordering/ut/cluster_ordering-ut |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/tpch/lib/libtests-tpch-lib.global.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/tpch/lib/libtests-tpch-lib.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/retry/libpy3library-python-retry.global.a |59.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/oidc_proxy/libydb-mvp-oidc_proxy.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/libpy3python-numpy-py3.global.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/serializer/stream.cpp |59.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/signer/ut/unittest >> Signer::Basic [GOOD] |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/context.cpp >> ValidatorBuilder::CreateMultitypeNode [GOOD] >> ValidatorBuilder::CanCreateAllTypesOfNodes [GOOD] >> ValidatorBuilder::CanHaveDuplicateType [GOOD] >> ValidatorBuilder::BuildSimpleValidator [GOOD] >> ValidatorBuilder::CanHaveMultipleType [GOOD] |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/vdisk_test.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_cleanup_page.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ftxui/libcontrib-libs-ftxui.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_simplebs.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_settings.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_protected_page.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/helpers.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/openid_connect.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_session_create_yandex.cpp |59.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/validator/ut/validator_builder/unittest >> ValidatorBuilder::CanHaveMultipleType [GOOD] |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/dataset.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_session_create_nebius.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_protected_page_handler.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_impersonate_start_page_nebius.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_client.cpp >> SanitizeLable::SkipSingleBadSymbol [GOOD] >> SanitizeLable::SkipBadSymbols [GOOD] >> Metrics::SeveralSubItems [GOOD] >> SanitizeLable::Truncate200 [GOOD] >> Metrics::SeveralTopItems [GOOD] >> Metrics::EmptyIssuesList [GOOD] >> SanitizeLable::Empty [GOOD] >> Metrics::CombineSubItems [GOOD] >> Metrics::MoreThanFiveItems [GOOD] >> Metrics::OnlyOneItem [GOOD] |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_session_create_handler.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_impersonate_stop_page_nebius.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_protected_page_nebius.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_session_create.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_protected_page_yandex.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_base.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_defrag.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/4ddd0d26de267b7b48b2409480_raw.auxcpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/kv.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/http_api_client/libpy3fq-libs-http_api_client.global.a |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/olap_workload/olap_workload |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/activation.pb.{h, cc} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/mon_reregister_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/target_cluster_injecting_channel.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |59.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/metrics/ut/unittest >> Metrics::OnlyOneItem [GOOD] |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/runlib/application.cpp >> HmacSha::HmacSha1 [GOOD] |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_huge.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/libpy3olap_workload.global.a |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/objcopy_9de271b22d7bcc64ef77cc3cde.o |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/workload/libpy3stress-olap_workload-workload.global.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_s3_downloads.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_s3_upload_rows.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_gc.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tbb/libcontrib-libs-tbb.a |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_stream.cpp >> TWeighedOrderingTest::WeighedOrderingTest [GOOD] >> TWeighedOrderingTest::WeighedSelectionTest [GOOD] >> TWeighedOrderingTest::SimpleSelectionTest [GOOD] |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/11a9f7a6b79b2ef13b0ccdb626_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_outofspace.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/topic_sdk_test_setup.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_repl.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/type/timeofday.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/type/dayofweek.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_load.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/dq_task_params.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_faketablet.cpp |59.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/core/libydb-mvp-core.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/6ff6cb7fc73023cd7bac8e9866_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_hugeblobctx.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/proto/dq_solomon_shard.pb.{h, cc} |59.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/hmac/ut/unittest >> HmacSha::HmacSha1 [GOOD] |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/audit.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgimpl.cpp |59.6%| [TS] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/cluster_ordering/ut/unittest >> TWeighedOrderingTest::SimpleSelectionTest [GOOD] |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_localrecovery.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/gen_restarts.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_1a1e300767b552f4c13c3295d0.o |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/cache_policy.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_many.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_dbstat.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_4f92526e13553482736b942b2c.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mvp_mem_profiler.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/src/kqp_runner.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/rich.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_3209cda00462f2963f3cbbc912.o |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/libpy3python-numpy-py3.a |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/reducer.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/runlib/utils.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_7eade8c49389813f8c36b72b5b.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_7bfd03a31f5e230607792f10cc.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/huge_migration_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/parser.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mvp_test_runtime.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mapper.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/fq_runner/libpy3tests-tools-fq_runner.global.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/datastreams_helpers/libpy3tests-tools-datastreams_helpers.global.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/vdisk_mock.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/filter.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/merger.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mvp_tokens.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_histogram_latency.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/jinja2cpp/libcontrib-libs-jinja2cpp.a |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/transfer/transfer |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/resource_manager.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction.cpp |59.6%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.{gen.h ... defs.inl.h} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_6b37760fb6a28054d0feafd61d.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_5923b362516b6632b9769a5db2.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_278b1a63a14648a80c4b930adb.o |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgwriter.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/serialize.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/90ae9161b2f57d308762e42ba4_raw.auxcpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogneighbors.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/src/actors.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/libpy3transfer.global.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/objcopy_b632f28ee823f938d14c0e85f9.o |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/marker.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limits.cpp |59.6%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/yql_res_expr_nodes.{gen.h ... defs.inl.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/assign_internal.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/prepare.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_transaction.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/visitor.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/chaos_lease_base.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/src/actors.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/graph_optimization.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/projection.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/data.pb.{h, cc} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/ut_transform/objcopy_342e8590e41686b18307d054a9.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/ut_transform/objcopy_b5b36403e069f48d06f8367722.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_brokendevice.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/ut_transform/objcopy_c693478edc1220e7a9143567d1.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_3ea8aa67e7c24c4f0e3b0406b9.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_51b071d7746089933668451b33.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/src/common.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/lib/libpy3tools-ydb_serializable-lib.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_bad_blobid.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_445797246443360525d31550d1.o |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/serializability/libpy3tests-library-serializability.global.a |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/events.pb.{h, cc} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/849c58233edc33539cbeb93a31_raw.auxcpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/patched/replxx/librestricted-patched-replxx.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_bf578b7161cc94bf18488d04ca.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/src/fq_setup.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_b8d63b589074145793d63c27a3.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_f928a40774b17a9d6cd7cabd2c.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_e7477203b27fa0321cf18fd7ee.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/runlib/kikimr_setup.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/blobs.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/ut_helpers.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_59eb97971e5f83d3296e6c33b5.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_60e08504076128d310212c6460.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_e0aef87c4bf15cfdc957f4bdd1.o |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bucket_quoter/liblibrary-cpp-bucket_quoter.a |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/example/ydb-tests-example |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_c623700776b43ee95ec93c56f9.o |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/common/metadata_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/common/encryption_ut.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_2b682e146a665bfa19210b0fd9.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_5acd2383ed2cd599cfd64f7c8a.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/workload/libpy3show_create-view-workload.global.a |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/server_restart/public-sdk-cpp-tests-integration-server_restart |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/abstract.h_serialized.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/local.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/config.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/common/libpy3functional-postgresql-common.global.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_b9aaa278b10ed44e5645b3ef2f.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_816e2dba53f55d924139cdb3c5.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_e4166f3d104a6751b45e7e712f.o |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/stream.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_detail.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_builder_stream.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/links.pb.{h, cc} |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/libpy3tests-postgres_integrations-library.global.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/ut/objcopy_cf5836766ac30ca7ea957ce368.o |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/helpers.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/ut/objcopy_899316667b8914fe8ec3af85d9.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/ut/objcopy_daba02a22b66dd174e40603586.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/query_executor.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/postgres_integrations/library/ut/ydb-tests-postgres_integrations-library-ut |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token.cpp |59.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_session/ydb-tests-functional-kqp-kqp_query_session |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/persistent_queue.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser_deserialize.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/run_actor.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/watermark_runtime_data.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/aggregation.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/permission.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/common/libpy3tests-olap-common.global.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_2d296dfaf373f7f15e6312517a.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_a65a4fae8912a32233240d3c51.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_938861be99a6cedecb22904193.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_1dba5118ef0a485f3bf803be50.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_6e536fb2c379a4ebe79c499de8.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/metadata/ut/functions_metadata_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/ut/main.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_filter.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/issue_message.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/src/fq_runner.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/statistics_producer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_value.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/access_control.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/fc437004cc347d978fb8cbf231_raw.auxcpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/cli_utils/melancholic_gopher.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/dummy.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/legacy_protobuf/protos/metric_meta.pb.{h, cc} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/ut/objcopy_c9ab749ab3188a8582c5cefa5e.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/util_pool_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/ut/objcopy_e2cd022168ff179d1441f5d3df.o |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.{pb.h ... grpc.pb.h} |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/ut/objcopy_0ade7a5662c6292edc3a8de02f.o |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/util_string_ut.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/libpy3tools-lib-cmds.global.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_3bb523a1011c0a7019f2684a90.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_cd57da3671b96739ee73293fb1.o |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/workload/libpy3stress-s3_backups-workload.global.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_e8c94c485e81b4b2899f52f594.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_08f7acdb6eb761b28bf6990862.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_resource.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_5294a064c14cf5a49516321590.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_c7c229be41e9b028572ad1aab3.o |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_by_signature/libstreams-factory-open_by_signature.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/libcpp-streams-lz.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/snappy/libstreams-lz-snappy.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_common/libstreams-factory-open_common.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/lz4/libstreams-lz-lz4.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/sampler_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/throttler_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/solomon/ydb-library-yql-tests-sql-solomon |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/e242d0eae6b82a1b268997c584_raw.auxcpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/throttling_channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/utilex/random.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/libpy3ydb_recipe.global.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/objcopy_c55121179eeb3b5753498290c4.o |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/jaeger_tracing/ut/ydb-core-jaeger_tracing-ut |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_detail.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/tools/lib/cmds/ut/ydb-public-tools-lib-cmds-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/ut/common/kqp_workload_service_ut_common.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/database.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/local_executor/libcpp-threading-local_executor.a |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/console_service.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/quota_service.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/storage_type_service.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/backup.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/token.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/service.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/database_service.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/resource_preset_service.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/storage_type.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/core/protos/mvp.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog_private_events.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/mvp/oidc_proxy/mvp.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/token_service.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/sensitive.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/token.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/metadata/ut/ydb-core-client-metadata-ut |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/access/access.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/helpers.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_status_codes.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message_format.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/token_exchange_service.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/server_detail.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |59.4%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/yql_testlib/yql_testlib.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/backoff_strategy.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/certs/libcerts.global.a |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/transfer/ut/functional/ydb-core-transfer-ut-functional |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/basic_example/public-sdk-cpp-tests-integration-basic_example |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_event_filter.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/double-conversion/libcontrib-libs-double-conversion.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libcore-protos-schemeshard.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libcore-scheme-protos.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libscheme-defaults-protos.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/builtins/liblibs-cxxsupp-builtins.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/config.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fmt/libcontrib-libs-fmt.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/liblibs-config-protos.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxrt/liblibs-cxxsupp-libcxxrt.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/c-ares/libcontrib-libs-c-ares.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxabi-parts/liblibs-cxxsupp-libcxxabi-parts.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_connectivity.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/main.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/objcopy_774cbd1f10ee287899289ecb3f.o |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxx/liblibs-cxxsupp-libcxx.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/upb/libgrpc-third_party-upb.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/commands/libcommands.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/address_sorting/libgrpc-third_party-address_sorting.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libcore-config-protos.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/resource/liblibrary-cpp-resource.a |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libc_compat/libcontrib-libs-libc_compat.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/malloc_extension/liblibs-tcmalloc-malloc_extension.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/no_percpu_cache/liblibs-tcmalloc-no_percpu_cache.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/re2/libcontrib-libs-re2.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/util/charset/libutil-charset.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_vdisk.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_serialization_runtime/libtools-enum_parser-enum_serialization_runtime.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/util/libyutil.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_242486256e1af973cd1d5376d1.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_5d73baff4bb68923ddbe5f4fcd.o |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/libpy3functional-sqs-merge_split_common_table.global.a |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_2efdf95387a81f55cf9c81071a.o |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libunwind/libcontrib-libs-libunwind.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/filesystem/librestricted-boost-filesystem.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/tcmalloc/libcpp-malloc-tcmalloc.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/exception/librestricted-boost-exception.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libcore-file_storage-proto.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openssl/libcontrib-libs-openssl.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_inference/libydb-library-arrow_inference.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpch/libbenchmarks-queries-tpch.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.global.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/mdb_endpoint_generator_ut.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpcds/libbenchmarks-queries-tpcds.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/table/libarrow-csv-table.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpch-dbgen/libbenchmarks-gen-tpch-dbgen.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/jinja2cpp/libcontrib-libs-jinja2cpp.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/benchmark_base/liblibrary-workload-benchmark_base.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_unreadable.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.global.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/query/liblibrary-workload-query.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/backup/libkikimr_backup.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_status.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/query/liblibrary-workload-query.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/validation/auth_config_validator_ut/auth_config_validator_ut.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/stat_visualization/libpublic-lib-stat_visualization.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/liblibrary-ydb_issue-proto.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libcolumnshard-common-protos.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/generated/codegen/main.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/liblibrary-login-protos.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/liblibrary-formats-arrow-protos.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/liblibrary-mkql_proto-protos.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/liblibrary-actors-protos.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/liblibrary-folder_service-proto.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/libcommands-interactive-highlight.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/color/libinteractive-highlight-color.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/complete/libcommands-interactive-complete.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/libydb_cli-commands-interactive.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libydb-library-services.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libapi-service-protos.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/libcontrib-libs-grpc.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libapi-protos-annotations.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libdq-actors-protos.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libyql-dq-proto.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/xxhash/libcontrib-libs-xxhash.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libproviders-s3-proto.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/regex/librestricted-boost-regex.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/core/protos/libmvp-core-protos.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_main/libcpp-testing-gtest_main.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/access/libclient-yc_private-access.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/openid_connect.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest/libcpp-testing-gtest.a |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/provider/yql_s3_listing_strategy_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/quota/libclient-yc_private-quota.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_console.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/security/simple/libmvp-security-simple.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/libclient-yc_private-oauth.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpublic-issue-protos.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/mvp/core/core_ydbc.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libyql-essentials-protos.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libcore-issue-protos.a |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator_grpc/solomon_recipe_grpc |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/libclient-nc_private-iam.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libessentials-public-types.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libcolumnshard-engines-protos.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/container/librestricted-boost-container.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libproviders-common-proto.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/atomic/librestricted-boost-atomic.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zlib/libcontrib-libs-zlib.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cpuid_check/liblibrary-cpp-cpuid_check.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/api/libcpp-malloc-api.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/absl_flat_hash/libcpp-containers-absl_flat_hash.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zstd/libblockcodecs-codecs-zstd.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/libtopic_workload.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/no_percpu_cache/liblibs-tcmalloc-no_percpu_cache.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd/libcontrib-libs-zstd.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/core/libcpp-blockcodecs-core.a |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/login.pb.{h, cc} |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/libcontrib-restricted-abseil-cpp-tstring.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/libcontrib-restricted-abseil-cpp.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcc/liblibrary-workload-tpcc.a |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/initiator.pb.{h, cc} |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_tasks.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/minikql.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pattern_formatter.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hedging_manager.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/auto_config_initializer.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_service.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_histograms.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_db.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/static_channel_factory.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata_proxy.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/id_generator.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/config.pb.{h, cc} |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/transfer_workload/libtransfer_workload.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/topic/libtopic.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/stream.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service_detail.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.{pb.h ... grpc.pb.h} |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/import/liblib-ydb_cli-import.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/cms/libsrc-client-cms.a |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/protos/events.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/actors/ut/yql_arrow_push_down_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pinger.pb.{h, cc} |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/libyc_private-ydb-v1.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pipe.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/liblib-ydb_cli-dump.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libapi-protos.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/config.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/tstool/libpy3tstool.global.a |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tools/tstool/objcopy_6077c98b9810fee0e2250a36a4.o |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/debug/libsrc-client-debug.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/logger_owner.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/gateways.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_propagator.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/chunk_queue/libcpp-threading-chunk_queue.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/1d4add45df109108c6e0289305_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxyobtain.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pending_fetcher.pb.{h, cc} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/tstool/tstool |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_admin.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_client.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_stats.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_thread_pool.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/config.pb.cc |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/0df74f5c3f7eead9a8ec0077f0_raw.auxcpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/quotas_manager.pb.{h, cc} |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/common/util_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/resource_manager.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/etc_client.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/icu/libcontrib-libs-icu.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/e39200e8838ea19804ddbf1a59_raw.auxcpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libpy3contrib-libs-googleapis-common-protos.global.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/helpers.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scheme_v1.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/helpers.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/fq.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/aba998449c2518e3272d8e87fb_raw.auxcpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_b34c6a8a5501db208eebc5d8e4.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_e32003454342267c2263935765.o |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.{pb.h ... grpc.pb.h} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_9a3dabea847c21e0b4fa4cda26.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_cca8dcd66462c9ca3c57fcb78e.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_reader.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/protos/operation_id.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/mvp/core/core_ydb.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/journal_client.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/tests/kikimr_tpch/kqp_tpch_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mvp_test_runtime.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_operation.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.{pb.h ... grpc.pb.h} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_1555e67a3dd43a3e7f09bf8eee.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_488333b1ebd4c1d6d8ec5bcb8f.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/ut_common/ut_common.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mvp_tokens.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/monitoring/libsrc-client-monitoring.a |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |59.8%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_3db6af291678d4ac330517956a.o |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/allure-pytest/libpy3contrib-python-allure-pytest.global.a |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_7f9e816a97aaeee837ac316091.o |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/meta/meta_cache_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_a38b1580810a6e4b419da99dcf.o |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/allure-python-commons/libpy3contrib-python-allure-python-commons.global.a |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_b9fd5c62781ec3b78d111a0ba7.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_9ea5b1fb7a4f8e1b0b8d7cf345.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_a5874452d3dbd6f6e49cd08be6.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/log_backend/json_envelope_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/metering/stream_ru_calculator_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/metering/time_grid_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_osiris.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/file_storage.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/helpers.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/validation/validators_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/grpc_proxy_status.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/fq_v1.{pb.h ... grpc.pb.h} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_4826ee2207124da1bc398e3bd8.o |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.{pb.h ... grpc.pb.h} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_5b5c3367c789898aa5a6cae866.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_8e57113197bb359e3999b04aab.o |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_cache.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.{pb.h ... grpc.pb.h} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_a0543c2dc30365e9b2ad3d0ca6.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_df0cb3f315162a3110ee243ecd.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_e0331f455507fe5ac3b71d0537.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/cdc/cdc |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/base_utils/node_by_host.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/f5842ce240ac3a1b94dd2c1f2e_raw.auxcpp |59.9%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_scheduler_thread.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_recoverylogwriter.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/libpy3cdc.global.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/workload/libpy3stress-cdc-workload.global.a |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/objcopy_7d7339f4588397fc771e31030c.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_7406de026bf25e30e96a88517d.o |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_mount_cache.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/chaos_lease.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config__intpy3___pb2.py.p5ju.yapyc3 |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config__intpy3___pb2.py{ ... i} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config__intpy3___pb2.py.p5ju.yapyc3 |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cache/libcomplete-name-cache.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cache/local/libname-cache-local.a |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.{pb.h ... grpc.pb.h} |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/tool |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/cached/libobject-simple-cached.a |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.{pb.h ... grpc.pb.h} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py{ ... i} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_logreplay.cpp |59.9%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config__intpy3___pb2.py{ ... i} |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libpy3client-yc_public-common.global.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__get_log_tail.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/blobstorage_grouptype_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/path_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/tools/dump/main.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/memory_stats_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_mon.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/table_index_ut.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/highlight/libsql-v1-highlight.a |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/logoblob_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.{pb.h ... grpc.pb.h} |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/highlight/libsql-v1-highlight.global.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/localdb_ut.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_0a1f127d9343562caddfbacf79.o |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libpy3client-yc_public-iam.global.a |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/statestorage_guardian_impl_ut.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_b866963286293af0b6f2139fed.o |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_178e64ce5db822fc6aa8b3e608.o |60.0%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/rescompiler/rescompiler |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_f9b0feecd0e36f08cbf5c53562.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfull.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_api.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_registry.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ydb |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/docs/generator/generator |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/io_formats/arrow/scheme/csv_arrow_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_base.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.{pb.h ... grpc.pb.h} |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/plugins/grpc_python/grpc_python |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllog.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/provider/ut/pushdown/pushdown_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/mock/libcommon-http_gateway-mock.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/helpers_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullstorageratio_ut.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_39cda017c3d5f0e18270b53881.o |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_blob_ut.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_ae5b9f6e7a00f305f01a3dde87.o |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_51fb1403d79c2fadb9d2ea6ce4.o |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_mon_dbmainpage.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/helpers/libpy3olap-scenario-helpers.global.a |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/docs/generator/libpy3olap-docs-generator.global.a |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/lib/libpy3tests-olap-lib.global.a |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/docs/generator/objcopy_ac8dbe7f54a2cb7efb6636f75f.o |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/mvp/oidc_proxy/oidc_proxy_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/viable_peer_registry.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/token_accessor.pb.{h, cc} |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/persqueue/topic_parser/ut/topic_names_converter_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_base/cli_kicli.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/histogram.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_algo_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_reader.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection_impl.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/ssa.pb.{h, cc} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/ut_client/backpressure_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_request.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/5732afb9e28f3811e12c561eb5_raw.auxcpp |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/python/mypy-protobuf/bin/protoc-gen-mypy/protoc-gen-mypy |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/base_utils/format_info.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/mvp/core/mvp_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut/ydb-core-base-ut |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_reader.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/serverless_proxy_config.{pb.h ... grpc.pb.h} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_kafka_functions.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/init/init_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_entryserialize.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/object_storage/inference/ut/arrow_inference_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_compactfreshappendix.cpp |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/ut/ydb-core-blobstorage-crypto-ut |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/ut_helpers/liblibs-quota_manager-ut_helpers.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_serialization.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_dfbd751fc64901b06ded4354c8.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_group/main.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/lib/libpy3tests-datashard-lib.global.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/object_storage_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_source_builder_ut.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_d709b1895f91108d9f51b703ea.o |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/auth_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_ec9bc627b6d56d1a941c2b7e4f.o |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_data_source_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/iceberg_ddl_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_6d8369510b03c08a300f2e2657.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_e1e64d508ce59834ec0a40f731.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_791e2f78c18891d943ecce5e41.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_7211c23d9494c46f0f60063e9e.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_7d0deb4120fbddf720c11b5358.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/ut/dlq_helpers_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/test_connection/ut/test_connection_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/init.h_serialized.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_48a08121f0a68da2f2666b0341.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_log_cleanup.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/bind_queue_ut.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/rows/libtest-libs-rows.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_heap_it_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/test/tool/perf/colons.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/ut/queue_attributes_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.{pb.h ... grpc.pb.h} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_afb48e06933bdee6c5245db82e.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_10b0cfa01297f7d7392eb4d9e4.o |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/table/libtest-libs-table.a |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_b306c2955ce13e6db6cae73363.o |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/lib/libpy3functional-tpc-lib.global.a |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/test/tool/perf/main.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_562a790b75f22ca86b37e5623e.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_e5d897582dc0fbda7c578cb53f.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_16997685291b6913e28a98236c.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/lib/libpy3olap-load-lib.global.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_generic_it_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_a51d334445f3e4e9170e666e7b.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_query_v1.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/service_discovery/service_discovery.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_writer.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_operation_v1.{pb.h ... grpc.pb.h} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.{pb.h ... grpc.pb.h} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_spacetracker_ut.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/pathid.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_visitor.cpp |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/ut/secure_protobuf_printer_ut.cpp >> TBlobStorageCrypto::TestMixedStreamCypher [GOOD] >> TBlobStorageCrypto::TestOffsetStreamCypher |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/ut/counters_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/protos/persqueue.pb.{h, cc} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_2aa1916d45dca98014edb3d732.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_1574e8a5a6c530c7bfd6378c4d.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_504b845d57f1a23561e970de61.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/ut/action_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_persqueue_v1.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_topic_v1.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/handoff_map.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/type_info.{pb.h ... grpc.pb.h} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo >> TBlobStorageCrypto::TestOffsetStreamCypher [GOOD] >> TBlobStorageCrypto::TestInplaceStreamCypher [GOOD] >> TBlobStorageCrypto::PerfTestStreamCypher |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_cache_append.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/init.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_stress.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/records.pb.{h, cc} |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Flask-Cors/py3/libpy3python-Flask-Cors-py3.global.a |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/static_validator/ut/ydb-library-yaml_config-static_validator-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_static_group.cpp >> TBlobStorageCrypto::PerfTestStreamCypher [GOOD] >> TBlobStorageCrypto::UnalignedTestStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestEqualInplaceStreamCypher |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Flask/py3/libpy3python-Flask-py3.global.a |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut_pg/flat_database_pg_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/pgproxy/pg_proxy_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/8dc9a4d43a36492d7f07bb1764_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memtable_collection_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/header.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/actors/libproviders-clickhouse-actors.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/local_gateway/libproviders-dq-local_gateway.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/colorama/py3/libpy3python-colorama-py3.global.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/click/py3/libpy3python-click-py3.global.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/itsdangerous/py3/libpy3python-itsdangerous-py3.global.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_response.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/stats_collector/libproviders-dq-stats_collector.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/kafka_test_client.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/dummy/libpq-gateway-dummy.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/sender.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut_common.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/libpy3ydb-dstool.global.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/objcopy_fca89909cedb628068681e1038.o |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/lib/libpy3dstool_lib.global.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ru_calculator/ut_ru_calculator.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/bindings/libyql-utils-bindings.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dqrun/dqrun.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/service/libproviders-dq-service.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/comp_nodes/libproviders-ydb-comp_nodes.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_system/libyql-utils-actor_system.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/utils.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_0446f521b26a2e8128f94ac50f.o |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/yt/dq_task_preprocessor/libproviders-yt-dq_task_preprocessor.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/workload/type/libpy3oltp_workload-workload-type.global.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/yt/actors/libproviders-yt-actors.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/actors/libproviders-ydb-actors.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_db.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_367e2bc5d83faa0907a06d2976.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/workload/libpy3stress-oltp_workload-workload.global.a |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_49a1ca9559288648fba9cf7b65.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/9270b323afac6e83cea5bf4aa4_raw.auxcpp |59.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/meta/libydb-mvp-meta.a |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_config_base/config_base.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/rpc.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/meta/meta_cache.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/custom_registry.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/meta/mvp.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp >> TBlobStorageCryptoRope::TestEqualInplaceStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestEqualMixedStreamCypher >> StaticValidator::HostConfigs [GOOD] >> StaticValidator::DomainsConfig [GOOD] >> StaticValidator::Hosts [GOOD] |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_produce_actor.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/main.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/collection.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/dstool/ydb-dstool |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/filter.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/task_controller.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attributes_stripper.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/index.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/stream_logic.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/reserve.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/quotas_manager.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_proxy.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar_statistics.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/http.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/timestamp_provider.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_client.cpp |59.8%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/9270b323afac6e83cea5bf4aa4_raw.auxcpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_state.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_chain.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/ut/graph_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge.cpp |59.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/static_validator/ut/unittest >> StaticValidator::Hosts [GOOD] |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/mvp/meta/meta_versions.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_checkpoint_storage_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/libessentials-core-url_preprocessing.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/libessentials-core-url_lister.a |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/memory/libqplayer-storage-memory.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/simple/libcore-cbo-simple.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_ext/libessentials-core-pg_ext.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/file/libqplayer-storage-file.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql_simple_file/libproviders-common-mkql_simple_file.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_process_notification.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/objcopy_bcf2142e31bf537964dc063d11.o |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/factories.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/d78d0f74a3f72be1016c0cf8cf_raw.auxcpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/prctl/libpy3library-python-prctl.global.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/prctl/libpy3library-python-prctl.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_4352b8b3e3cf61532c865b371b.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/libpy3oltp_workload.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/simplejson/py3/libpy3python-simplejson-py3.global.a |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/ydb_serializable/replay/replay |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_277b7e8f79021687bec95be8db.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_0359848ae21601186c5b0d9873.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_afdf6d60c4f76ae91a235d460b.o |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/testing/group_overseer/libblobstorage-testing-group_overseer.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/range_treap_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/mvp/meta/meta.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/yql_facade_run/libessentials-tools-yql_facade_run.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_permissions.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/pq/provider/ut/yql_pq_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_crypto_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/blobsan/blobsan |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/simplejson/py3/libpy3python-simplejson-py3.a |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/impl/libfmr-coordinator-impl.global.a |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/client/libfmr-coordinator-client.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/impl/libcoordinator-yt_coordinator_service-impl.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job/interface/libfmr-job-interface.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_factory/interface/libfmr-job_factory-interface.a |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job/impl/libfmr-job-impl.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/gc_service/interface/libfmr-gc_service-interface.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/interface/libcoordinator-yt_coordinator_service-interface.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/interface/libfmr-coordinator-interface.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/gc_service/impl/libfmr-gc_service-impl.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/impl/libfmr-coordinator-impl.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/fmr_tool_lib/libyt-fmr-fmr_tool_lib.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/libcoordinator-interface-proto_helpers.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_state_storage_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/file/libcoordinator-yt_coordinator_service-file.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_url_lister/libyt-lib-yt_url_lister.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/worker/impl/libfmr-worker-impl.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/secret_masker/dummy/liblib-secret_masker-dummy.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/file/libfmr-yt_job_service-file.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/utils/libyt-fmr-utils.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_factory/impl/libfmr-job_factory-impl.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/interface/libfmr-yt_job_service-interface.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/impl/libfmr-yt_job_service-impl.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/fmr/libyt-gateway-fmr.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_subscriber_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/local/interface/libtable_data_service-local-interface.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/local/impl/libtable_data_service-local-impl.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/proto/libyt-fmr-proto.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/interface/libfmr-table_data_service-interface.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/request_options/proto_helpers/libfmr-request_options-proto_helpers.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/request_options/libyt-fmr-request_options.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/tools/ytrun/lib/libtools-ytrun-lib.a |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/file/libyt-gateway-file.a |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_422ca1effff14e5a08952658d0.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_restore_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_5f161468ff5322b803d4d0dc79.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dqrun/dqrun |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/blobsan/main.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_yard.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/yql/libcpp-protobuf-yql.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/dynamic_prototype/libcpp-protobuf-dynamic_prototype.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dq/service_node/main.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/global_worker_manager/libproviders-dq-global_worker_manager.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/scheduler/libproviders-dq-scheduler.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/metrics/libproviders-dq-metrics.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/impl/table_writer_ut.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/libpy3yt-python-yt.global.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/yt/libdq-actors-yt.a |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_49bad8251d240ad7c49d384b91.o |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp >> TBlobStorageCryptoRope::TestEqualMixedStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestMixedStreamCypher |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_589315062f5401a368910248f0.o |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/group_size_in_units.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/delete/objcopy_e6184a39b8332c221c5cda3c2f.o |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_49e9948af399bc60603a7d2db5.o |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/type_info/libpy3python-yt-type_info.global.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_61613f0bd98876f149d8574891.o |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp >> TBlobStorageCryptoRope::TestMixedStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestOffsetStreamCypher |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_181bdcd1743e9a1a78fafe4b60.o |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/delete/objcopy_ffc5f76f7501b8251738448541.o |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp >> TBlobStorageCryptoRope::TestOffsetStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestInplaceStreamCypher |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/storagepoolmon/ut/storagepoolmon_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/phantom_blobs.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/4399546af28cb40e5d74ea4a4b_raw.auxcpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/delete/objcopy_609c2613d8f9c513602350c6a8.o |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/utils/libpy3fq-generic-utils.global.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp >> TBlobStorageCryptoRope::TestInplaceStreamCypher [GOOD] >> TBlobStorageCryptoRope::PerfTestStreamCypher |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/service_node/service_node |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_c114cbf6b820d92320c1e2c912.o |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_utils_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/rbo/kqp_rbo_ut.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_8ac5034640eee44b1cd5fa5253.o |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/query_actor/query_actor_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/yson/libpy3python-yt-yson.global.a |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/cloud_events_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp >> TBlobStorageCryptoRope::PerfTestStreamCypher [GOOD] >> TBlobStorageCryptoRope::UnalignedTestStreamCypher [GOOD] >> TChaCha::KeystreamTest1 [GOOD] >> TChaCha::KeystreamTest2 [GOOD] >> TChaCha::KeystreamTest3 [GOOD] >> TChaCha::KeystreamTest4 [GOOD] >> TChaCha::KeystreamTest5 [GOOD] >> TChaCha::KeystreamTest6 [GOOD] >> TChaCha::KeystreamTest7 [GOOD] >> TChaCha::KeystreamTest8 [GOOD] >> TChaCha::MultiEncipherOneDecipher [GOOD] >> TChaCha::SecondBlock [GOOD] >> TChaCha512::KeystreamTest1 [GOOD] >> TChaCha512::KeystreamTest2 [GOOD] >> TChaCha512::KeystreamTest3 [GOOD] >> TChaCha512::KeystreamTest4 [GOOD] >> TChaCha512::KeystreamTest5 [GOOD] >> TChaCha512::KeystreamTest6 [GOOD] >> TChaCha512::KeystreamTest7 [GOOD] >> TChaCha512::KeystreamTest8 [GOOD] >> TChaCha512::MultiEncipherOneDecipher [GOOD] >> TChaCha512::SecondBlock [GOOD] >> TChaCha512::CompatibilityTest |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_context.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_write_actor_ut.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/libpy3connector-tests-utils.global.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_actions.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/types/libpy3tests-utils-types.global.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/runtime/kqp_re2_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/service_mocks/ldap_mock/libtestlib-service_mocks-ldap_mock.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/backpressure.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_3ddbad334a37a829b3772ddb05.o |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/replay/libpy3tools-ydb_serializable-replay.global.a |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/replay/objcopy_efd352795aee39d7ac6e163a2d.o |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_95b3eecc97c453f0c55c456659.o |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/snap_vec_ut.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/proto/libkqprun-src-proto.a >> TChaCha512::CompatibilityTest [GOOD] >> TChaChaVec::KeystreamTest1 [GOOD] >> TChaChaVec::KeystreamTest2 [GOOD] >> TChaChaVec::KeystreamTest3 [GOOD] >> TChaChaVec::KeystreamTest4 [GOOD] >> TChaChaVec::KeystreamTest5 [GOOD] >> TChaChaVec::KeystreamTest6 [GOOD] >> TChaChaVec::KeystreamTest7 [GOOD] >> TChaChaVec::KeystreamTest8 [GOOD] >> TChaChaVec::MultiEncipherOneDecipher [GOOD] >> TChaChaVec::SecondBlock [GOOD] >> TChaChaVec::CompatibilityTest |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_5992d4831c5055a481712a2a80.o |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/709f125727d9ea4165df516509_raw.auxcpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/balance_coverage/balance_coverage_builder_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_36807918bd7a86c1ea37310c9c.o |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_1ab2a5a6dd84a6c9ff5d5c50b0.o |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.global.a |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_656baae3c1e24959f5bcc457d7.o |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_0ab925f82bbba07bf3b749dc3c.o |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_sysview_reboots/ut_sysview_reboots.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_failure_injection/ut_failure_injection.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_6887bde1dc99f5c5c2f0922842.o |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_3bdea7737a87c43bfaa0aaf4c3.o |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_color_limits.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/protos/data.pb.{h, cc} |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/libetcd-grpc.a |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_races.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/services.h_serialized.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_client.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_server_ut.cpp |59.3%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py.p5ju.yapyc3 |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation__intpy3___pb2.py{ ... i} |59.3%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ut_helpers/libpublic-lib-ut_helpers.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |59.3%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py{ ... i} >> TChaChaVec::CompatibilityTest [GOOD] >> TPoly1305::TestVector1 [GOOD] >> TPoly1305::TestVector2 [GOOD] >> TPoly1305::TestVector3 [GOOD] >> TPoly1305::TestVector4 [GOOD] >> TPoly1305Vec::TestVector1 [GOOD] >> TPoly1305Vec::TestVector2 [GOOD] >> TPoly1305Vec::TestVector3 [GOOD] >> TPoly1305Vec::TestVector4 [GOOD] >> TTest_t1ha::TestZeroInputHashIsNotZero [GOOD] >> TTest_t1ha::PerfTest [GOOD] >> TTest_t1ha::T1haHashResultsStablilityTest [GOOD] |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ymq.pb.{h, cc} |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/notify_manager.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata__intpy3___pb2.py{ ... i} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/datastreams_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client_ut.cpp |59.3%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/config.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/marker.pb.{h, cc} |59.3%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.{pb.h ... grpc.pb.h} |59.3%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/expr_nodes/dq_expr_nodes.{gen.h ... defs.inl.h} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/activation.pb.{h, cc} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/snapshot.pb.{h, cc} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/db_pool.pb.{h, cc} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/blobstorage_hulloptlsn.cpp |59.3%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/runtime_feature_flags.h |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/gateways.pb.{h, cc} |59.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/crypto/ut/unittest >> TTest_t1ha::T1haHashResultsStablilityTest [GOOD] |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduler_thread.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/issue_severity.pb.{h, cc} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/dynamic_table_transaction_mixin.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/health_config.pb.{h, cc} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_api.pb.{h, cc} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pending_fetcher.pb.{h, cc} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_type_compatibility.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_pdisk_config.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/read_limit.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/test_connection.pb.{h, cc} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/ready_event_reader_base.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_storage.pb.{h, cc} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/public.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/storage.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/12c5e25ae32e1ab2f8f95f91f5_raw.auxcpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_chunk_tracker.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/random_access_gzip.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_proxy.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.{pb.h ... grpc.pb.h} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/log_writer_detail.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/chunk_stripe_statistics.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/public.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/issue_id.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/helpers.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/file_client/config.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |59.4%| [EN] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/kv.h_serialized.{cpp, h} |59.4%| [EN] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/stock.h_serialized.{cpp, h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/validator_nameservice_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_state_load_plan.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hydra/version.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/common.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_callbacks.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/protocol.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_sectormap.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/data_statistics.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/two_level_fair_share_thread_pool.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/fq_private.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/config.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/packet.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_events.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/key_range.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/aclib.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/data.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_executor.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/interconnect.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_pool.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/issue_id.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/minikql/minikql_engine_host.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/gateways_config.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/services.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/storage.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/graph_params/proto/graph_params.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_tasks.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/election/public.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/sensitive.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/validator_bootstrap_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/method_helpers.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_dynamic_config.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/io_tags.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_formats.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream_pipe.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_discovery.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/merge_table_schemas.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/file_storage.pb.{h, cc} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/fq.pb.{h, cc} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scheme.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_value.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/ic_nodes_cache_service.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/clickhouse.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/protos/flat_table_part.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/task_controller.pb.{h, cc} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_monitoring.pb.{h, cc} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limiter.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/partition_reader.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/requests.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/test_connection.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/infinite_entity.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/address_helpers.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_atomicblockcounter.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query_stats.pb.{h, cc} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_drivemodel_db.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_discover_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/serializable_logger.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_serialization.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/kqprun |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/control/immediate_control_board_actor.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_store_permissions.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_signal_event.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/drivedata_serializer.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/registry_ut.cpp |59.4%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/afa8c81460609cfd6247d9dbf7_raw.auxcpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/erasure_checkers.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fair_share_hierarchical_queue.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_builder.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_sectorrestorator.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/generated/codegen/main.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_federation_discovery.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_reject_notification.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/config.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugerecovery.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_api_handler.cpp |59.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/logger.cpp |59.4%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/resource.{pb.h ... grpc.pb.h} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |59.4%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console__intpy3___pb2.py{ ... i} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/access_service.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config__intpy3___pb2.py{ ... i} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/http.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant__intpy3___pb2.py{ ... i} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |59.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_params.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |59.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/tls.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/uuid_text.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/workload.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/public.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/config.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/public.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/system_invokers.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/config.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_writer.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_rw_lock.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/connection.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_view.pb.{h, cc} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/write_data.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/consumer_client.cpp |59.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/plan2svg/ydb-tests-functional-kqp-plan2svg |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/transaction.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_delete.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/validation/column_shard_config_validator.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisproxy.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/manager.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_barrier.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_defs.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_common.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_stattablet.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/engine/minikql/flat_local_tx_factory.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/assimilation.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstvec.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/http_ping.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/downtime.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idx.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery_scan.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_store_walle_task.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_blobstorage_config.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/packet.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/dq_io.pb.{h, cc} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/queue_rowset.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__drop_yaml_config.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/api_adapters.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_writer.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/compile_context.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_read.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/action_queue.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/codicil_guarded_invoker.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_semaphore.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/delayed_executor.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/execution_stack.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_stathuge.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_looper.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/config.cpp |59.4%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/config.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/lease_manager.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_cache.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/config.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_bridge.pb.{h, cc} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lz.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_executor.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.{h, cc} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/fqrun/fqrun |59.4%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/throughput_throttler.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_clickhouse_internal.pb.{h, cc} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_yielder.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery_read_log.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/quantized_executor.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/node_directory.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_http.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query_stats.pb.{h, cc} |59.4%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/d8a0ba19a8c38a9b8166f51a9b_raw.auxcpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_v1.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/acl.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_log.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_write.cpp |59.5%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py{ ... i} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_import_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_mon.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color__intpy3___pb2.py{ ... i} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk__intpy3___pb2.py{ ... i} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/proto/yq_internal.pb.{h, cc} |59.4%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/feature_flags__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export__intpy3___pb2.py{ ... i} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources__intpy3___pb2.py{ ... i} |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/events.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replbroker.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/driver/libpy3nemesis.global.a |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/driver/objcopy_81ae81681ce2388a653cfa5ba3.o |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/library/libpy3tools-nemesis-library.global.a |59.4%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/feature_flags__intpy3___pb2.py{ ... i} |59.4%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |59.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_indexes/ydb-tests-functional-kqp-kqp_indexes |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.{pb.h ... grpc.pb.h} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_poller.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/nemesis/driver/nemesis |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/objcopy_4f055c289b3de8f2a1e827ae5c.o |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.{pb.h ... grpc.pb.h} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/access.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/config.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__cleanup_subscriptions.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_query_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/edaf602b2011baa1519a223d63_raw.auxcpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_1326afc143d720f2af434cd836.o |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/oauth_request.{pb.h ... grpc.pb.h} |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_1007df29dec27b0b7a1587d49f.o |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_b91160bcee04ad1f57e80af064.o |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/user_account_service.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/db_key_resolver.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/group_stat_aggregator.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/formatter.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/proto/quota_internal.pb.{h, cc} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/protos/events.pb.{h, cc} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/operation/operation.{pb.h ... grpc.pb.h} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/service_account_service.{pb.h ... grpc.pb.h} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/yandex_passport_cookie.{pb.h ... grpc.pb.h} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_logstore.pb.{h, cc} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_datastreams_v1.{pb.h ... grpc.pb.h} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_logstore_v1.{pb.h ... grpc.pb.h} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/adapters.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_discovery_v1.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/dqs.pb.{h, cc} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bitmap.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/helpers.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/rpc/status.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/signature.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bloom_filter.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.{pb.h ... grpc.pb.h} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/field_behavior.{pb.h ... grpc.pb.h} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/persqueue_error_codes_v1.pb.{h, cc} |59.5%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/expr_nodes/yql_pg_expr_nodes.{gen.h ... defs.inl.h} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/checksum.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_reader.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/cache_config.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/relaxed_mpsc_queue.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/config.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/config.cpp |59.6%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/expr_nodes/yql_s3_expr_nodes.{gen.h ... defs.inl.h} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/security_client.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/system_log_event_provider.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/digest.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/random.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hazard_ptr.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |59.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/arrow_filter.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/public.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/c6cd853d298b80d82dda8309af_raw.auxcpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_profiler.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/protos/pgproxy.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistics.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/channel_detail.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |58.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/core/libyt-yt-core.a |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_validate.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/dialer.cpp |58.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_monitoring_v1.{pb.h ... grpc.pb.h} |58.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/dq_effects.pb.{h, cc} |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_table_ut.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/listener.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/public.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/a1cd5217b8fdb95aa9fc955f31_raw.auxcpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tenant.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/socket.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/codicil_guarded_invoker.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelable_context.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_pool.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/local_bypass.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_update.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_looper.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/public.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_detail.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/spin_wait_slow_path_logger.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/random.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/init/dummy.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_login_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_service.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/process_exit_profiler.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/protobuf_helpers.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/system_log_event_provider.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_output.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node_detail.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_log_writer.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_yielder.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/pollable_detail.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/serializable_logger.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_executor.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_manager.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fls.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/coroutine.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zlib.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_detail.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_util.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_thread_pool.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/public.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/client.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/packet.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/current_invoker.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher_impl.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelation_token.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/config.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/future.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/snappy.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/stream.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/connection.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/bzip2.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/brotli.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/dictionary_codec.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/generated/runtime_feature_flags_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/public.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lz.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/codec.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lzma.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_rw_lock.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_semaphore.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/server.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zstd.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/config.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/action_queue.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_barrier.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_queue_scheduler_thread.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_queue.cpp |59.5%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/execution_stack.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream_pipe.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hulldefs.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_action_queue.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__log_cleanup.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/delayed_executor.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_pool.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/lease_manager.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/nonblocking_batcher.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_scheduler_thread.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_alarm.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/notify_manager.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_queue.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/crypto.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/system_invokers.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/retrying_periodic_executor.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/propagating_storage.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/profiling_helpers.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_throttler.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/tls.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/single_queue_scheduler_thread.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduled_executor.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/suspendable_action_queue.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/quantized_executor.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_affinity.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_detail.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_poller.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/throughput_throttler.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_writer.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/config.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/two_level_fair_share_thread_pool.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/dns_resolver.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/helpers.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/config.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/random_access_gzip.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/config.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/fluent_log.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/ut/ydb-core-base-generated-ut |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_parser.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/ares_dns_resolver.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/log_writer_detail.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/compression.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_callbacks.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/config.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/file_log_writer.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/formatter.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/logger_owner.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configuration_info_collector.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/blob_output.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fair_share_hierarchical_queue.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/adjusted_exponential_moving_average.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/backoff_strategy.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/zstd_compression.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bitmap.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/proc.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packed_unsigned_vector.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packing.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/coro_pipe.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/codicil.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/arithmetic_formula.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bloom_filter.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/configurable_singleton_def.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/parser_helpers.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/checksum.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/id_generator.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hazard_ptr.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pattern_formatter.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/config.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/cache_config.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/digest.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pool_allocator.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hedging_manager.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/histogram.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/error.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/memory_usage_tracker.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/linear_probe.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/request_queue_provider.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/public.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/protocol_version.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__configure.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/helpers.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fs.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/caching_channel_factory.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_statistics_producer.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/slab_allocator.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_profiler.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/relaxed_mpsc_queue.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize_dump.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistics.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/zerocopy_output_writer.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/shutdown.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/utf8_decoder.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistic_path.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/connection.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/public.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/socket.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/descriptors.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/local_address.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/context.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/dialer.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/config.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/address.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authentication_identity.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/listener.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_def.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/load.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_registry.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/schemas.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/profiling/timing.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authenticator.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/per_key_request_queue_provider.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/balancing_channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/channel_detail.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/hedging_channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/client.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/server.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dispatcher.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/peer_discovery.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_server.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message_format.cpp |59.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/null_channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controlling_service_base.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dynamic_channel_pool.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/allocation_tags.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/static_channel_factory.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/serialized_channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/roaming_channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/service_discovery/service_discovery.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/retrying_channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controller.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/thread.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/response_keeper.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/throttling_channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/tokenizer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/config.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/server_detail.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_writer.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/token.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/list_verb_lazy_yson_consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/utilex/random.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/stack.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/viable_peer_registry.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/stream.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attribute_consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/tokenizer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attributes_stripper.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/forwarding_consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/lexer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service_detail.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/depth_limiting_yson_consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/config.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token_writer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/producer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/null_consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_unknown_fields.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/parser.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser_deserialize.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_options.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut_large/ut_btree_index_large.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/stream.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_builder_stream.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_merger.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/convert.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_filter.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/syntax_checker.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_filtering_consumer.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/writer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_designated_consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/yson_builder.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_tenant_state.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/statistics_producer.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_persistent_storage.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_filter.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/bindings.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/exception_helpers.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/config.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attributes.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/ut/config_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_attribute_owner.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_public.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/common/ut/objcopy_caf222d14387d4810b5cb3e853.o |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/interned_attributes.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_node_factory.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_resolver.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/size.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/permission.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_builder.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limits.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/system_attribute_provider.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limiter.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/helpers.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/ut/utils_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/serialize.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/public.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduler_thread.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_visitor.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/static_service_dispatcher.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_statistics_producer.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_error_codes.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/compression.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/schemas.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/null_channel.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/virtual.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_detail.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_client.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/balancing_channel.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_server.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/service_combiner.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_snapshot.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |59.8%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controller.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/protocol_version.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/channel.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/console_audit.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/public.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authenticator.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_consumer.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/retrying_channel.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/protos/counters_shard.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scripting_v1.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/protos/tx_event.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/request_queue_provider.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/depth_limiting_yson_consumer.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/other/mon_get_blob_page.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/tx_processor.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controlling_service_base.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/roaming_channel.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/list_verb_lazy_yson_consumer.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_unknown_fields.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_helpers.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_buffer.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/protos/events.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/other/mon_blob_range_page.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_tenant_failed.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_configdummy.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_tracker.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_sort_schema.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/index.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugedefs.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_filtering_consumer.cpp |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/syntax_checker.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_tablet_v1.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/tokenizer.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/pipe.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_export_v1.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_view_v1.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/size_calcer.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_consumer.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/permutations.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.{pb.h ... grpc.pb.h} |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_consumer.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_node_factory.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_data_source.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema_serialization_helpers.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/b128a176091d6f4205660cfcb9_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/direct_read_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_source_factory.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisrunner.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/exception_helpers.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/selector.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_readactor.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_config_subscriptions.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_config.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__alter_tenant.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/static_service_dispatcher.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/validation_functions.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_config_subscription.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.{pb.h ... grpc.pb.h} |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_output.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/feature_flags_configurator.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/name_table.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/a2dcc326c51839151e5bfd92cd_raw.auxcpp |59.8%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_shred.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__replace_config_subscriptions.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_row.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/8750dadd4a699d4221d697bf03_raw.auxcpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/sticky_transaction_pool.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_tenant_done.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/stack.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/blobstorage_config.pb.cc |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/494cbbb0dc8b7c9860714b57e7_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/dcebaf9a0d338442b39b25171a_raw.auxcpp |59.9%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc__intpy3___pb2.py{ ... i} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group__intpy3___pb2.py.p5ju.yapyc3 |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configs_manager.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.8%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/grpc__intpy3___pb2.py.p5ju.yapyc3 |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old__intpy3___pb2.py.p5ju.yapyc3 |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old__intpy3___pb2.py{ ... i} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_proto2yaml_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_tenants_manager.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group__intpy3___pb2.py{ ... i} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme__intpy3___pb2.py{ ... i} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/http.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group.cpp |59.8%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |59.8%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/494cbbb0dc8b7c9860714b57e7_raw.auxcpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/serialize.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/validate_logical_type.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/protobuf_udf/libessentials-minikql-protobuf_udf.a |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/field_transformation.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmem.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/bin/objcopy_940b9a794cb8fbc6ebdf926276.o |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/bin/libpy3ydb_configure.global.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/0ec1f6840e6475509b66e2aab5_raw.auxcpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/batch_builder/merger.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_subdomain_key.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.{pb.h ... grpc.pb.h} |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache__intpy3___pb2.py{ ... i} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/cfg/bin/ydb_configure |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/209d59416ce59145c1e1d96c2f_raw.auxcpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage__intpy3___pb2.py{ ... i} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs__intpy3___pb2.py{ ... i} |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage__intpy3___pb2.py.p5ju.yapyc3 |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_parser_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/common.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxywrite.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_output.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_ut_large.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_resolve_node.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/config.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_reader.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/config.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/8518bc2ed0b218aec7a0a95428_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_row_reorderer.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/protos/graph.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/events.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/util/config_index.cpp |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/public.cpp |59.8%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_backup.pb.{h, cc} |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/wire_protocol.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/protos/retry_options.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap.cpp |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/validation/validators.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogreader.cpp |59.9%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/feature_flags.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/service_combiner.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_bridge.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/config.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/console_dumper_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_pool_state.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/batching_timestamp_provider.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/protos/events.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/datastreams.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_cache.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.{pb.h ... grpc.pb.h} |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.global.a |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.global.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/formats/arrow/arrow_helpers.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/net_classifier_updater.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_types.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group_resolver.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.global.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_tx_request.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_parser.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/log_settings_configurator.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/bin/main.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/timestamp_provider_base.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configs_subscriber.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_loggedrec.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_drain_node.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.{pb.h ... grpc.pb.h} |59.8%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/11a9f7a6b79b2ef13b0ccdb626_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay_yt/main.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_dynamic.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/yql_types.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_binding.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_events.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_block_and_get.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config__intpy3___pb2.py{ ... i} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme__intpy3___pb2.py.p5ju.yapyc3 |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op__intpy3___pb2.py.p5ju.yapyc3 |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.{pb.h ... grpc.pb.h} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config__intpy3___pb2.py.p5ju.yapyc3 |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op__intpy3___pb2.py{ ... i} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.{pb.h ... grpc.pb.h} |59.9%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/feature_flags__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_confirmed_subdomain.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_builder.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/functions.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/forwarding_consumer.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_actorsystem_perftest.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/blob_range.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_bs.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/null_consumer.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_scrub.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/portion_info.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/actors/test_runtime_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/virtual.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_client.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_disk.cpp |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/160c5542e9615810f3f86fe955_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token_writer.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/skynet.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/abstract.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.{pb.h ... grpc.pb.h} |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/serialize_deserialize.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_outofspace.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/graph_execute.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/validation.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/quota/quota.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_channel.cpp |59.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/libpy3yaml-config-protos.global.a |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/resource_preset.{pb.h ... grpc.pb.h} |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/cpp/src/arrow/python/libpy3src-arrow-python.a |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/google/benchmark/librestricted-google-benchmark.a |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay_yt/query_replay_yt |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/4306a854d105ac9e8a68bf91ca_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/f9b9904f5f29323034ee90b36a_raw.auxcpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/libpy3tests-tools-ydb_serializable.global.a |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/objcopy_3fdb568d483b57acc8e627f8c2.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/query_proccessor.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_performance_params.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize_dump.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/main.cpp |59.9%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/4306a854d105ac9e8a68bf91ca_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/accessor.pb.{h, cc} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/s3_backups/s3_backups |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/libpy3s3_backups.global.a |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/objcopy_4508aef343f36758ea760320db.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/tools/protobuf_plugin/config_proto_plugin |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/memory_usage_tracker.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/query_compiler.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_tablet_state.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_dispatcher_proxy.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cluster_info.cpp |59.9%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/config__intpy3___pb2.py.siec.yapyc3 |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_cache.cpp |59.9%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/config__intpy3___pb2.py{, i} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/query_replay.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_vdisk_guids.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/blobstorage_config__intpy3___pb2.py.siec.yapyc3 |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/blobstorage_config__intpy3___pb2.py{, i} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_events.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_partition_reader.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.{pb.h ... grpc.pb.h} |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_parser/enum_parser |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_session.cpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/registry/libcpp-dwarf_backtrace-registry.global.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_alarm.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_transport.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_dispatcher.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/nodes_manager.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/grpc_server.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_monactors.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__add_config_subscription.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_common.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/linear_probe.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_filter_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_265d7fd505d52534f38ea6fb7f.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_a52eb3c900a84eaad86a211549.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_40226ff8497733c6e798ee3940.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyarrow/libpy3contrib-python-pyarrow.global.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_fakeinitshard.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_table_v1.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_log_and_send.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/query_tracker_client.cpp |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.so |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/format_handler_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay/ydb_query_replay |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/version/version.cpp |59.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogrecovery.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/validator.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncquorum_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_27c0687ceeb7ce4ff5e4cea90a.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_7eab954373d77ffb1fab95ca0d.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_d68e1e5b762e412afe6a534487.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_14c03c6aecffbe39cb01ddf2ed.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_8db6616d40f8020d0632222fe3.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_d52256d4fa9895f38df6030445.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_9314464e3560b2511ac931acd9.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_8fca143a218b930f297b779e3a.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.{pb.h ... grpc.pb.h} |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_40779f0570229cef213050a4fa.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_osiris.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/fq_config.pb.{h, cc} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/per_key_request_queue_provider.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_manager.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_throttler.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/original.cpp |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/session_service.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_parser_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/cloud_user.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.{pb.h ... grpc.pb.h} |59.9%| [PD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/yaml-config-protos.{self.protodesc, protosrc} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/registry.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/common.pb.{h, cc} |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyarrow/libpy3contrib-python-pyarrow.a |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_actor.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/address.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/grpc/fq_private_v1.{pb.h ... grpc.pb.h} |59.9%| {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/protos/config.proto.{desc, 236947a227eabf309dc2ce63434b3df8.rawproto} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_costmodel.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_console.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/recovery/hulldb_recovery.cpp |59.9%| {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/blobstorage_config.proto.{desc, 236947a227eabf309dc2ce63434b3df8.rawproto} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_issue_message.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/zstd_compression.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/claims.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_defs.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/tsserver/tsserver |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/24b7f665b4aa031005ffced39b_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/serializer/parsing.cpp |60.0%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/py3cc/py3cc |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packing.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_643fa2679e88d9b2d33558b050.o |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_writer.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/daf02fd86bb7e2296f1437ae1f_raw.auxcpp |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/fixtures/libpy3tests-library-fixtures.global.a |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_53073eb93c76466fca8f474c5f.o |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/tsserver/main.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_fe15eb83a42d9d70d347bbba65.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_replication_v1.{pb.h ... grpc.pb.h} |60.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/blobstorage_hullcompdelete.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_task.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_debug.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__get_yaml_config.cpp |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/datetime/libdatetime_udf.so |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/85072bb936b0763f4b03040c4c_raw.auxcpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_init_scheme.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/8760973cadf4aa816a9d37589f_raw.auxcpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.{pb.h ... grpc.pb.h} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_7c0098f27edc25092453a8033c.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_b9fcf9641e3e569e88014f85ff.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_genconfig.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_7f02665786b7523f76c02ad1dd.o |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gbenchmark/libcpp-testing-gbenchmark.a |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/medium/objcopy_71b7c7df3e7853e6e7cd11e484.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/medium/objcopy_1583476a2a074be936cf5a393e.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/medium/objcopy_cc203073bb2a03b31e52a78f24.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/protos/viewer.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_public.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_update.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scheme.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_huge.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/protos/data.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_impl.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/size.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/9ce1d76b66c1825c22ae6402fe_raw.auxcpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_ymq_v1.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.{pb.h ... grpc.pb.h} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_c77713875cf17988efd8fc0fb3.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_c52ec5ba5ab0b788efaa5ed704.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/5c5fdf614c3039a8dba94a4f38_raw.auxcpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_903d4758faea71f1363e296b3f.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_48884f6b745ced4d3e78997cb1.o |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/json/ut/ydb-core-viewer-json-ut |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_5333c1912ecbac0f64ff97551f.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_aebf7c73fcaf6a54715cc177c8.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_d2e759e2d0ff1243166a3bc7d9.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_51562f83ff52d1ceaac0c36a08.o |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/top_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/deleter.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_fe9c8c25e6c570097a9d0c06f9.o |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/init/init.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/large_results/kqp_scriptexec_results_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_discovery.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_ut_local.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/ut/utils_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/objcopy_c96ef635306ccee8a5cf6359f1.o |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/meta/bin/main.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_root.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/bin/main.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_ut_pool.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/benchmarks_init/ydb-tests-functional-benchmarks_init |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker_ut.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_ce0222bab1634be9f9a52f715d.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_ec94bbf9004678001f4c8195e3.o |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/c664ef6ca80e747b410e1da324_raw.auxcpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_dc1e8788b8287c02880cfe2814.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_64bde13108f9284b2e9f0bbb7a.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_52d3e6a0651990fc997ab40ba2.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_03f75cad4510fd9d018635026c.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_da2669c2228a88c83cd32d45da.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/benchmarks_init/objcopy_287a0728f8b1ad204ac0396eb2.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/benchmarks_init/objcopy_c96c333b4f7fc5cb2b98b27907.o |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/ut_pg/scheme_tablecell_pg_ut.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_0c451aebc6dafbdf0d9da2ab02.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/benchmarks_init/objcopy_de67ee476035f2cc7c8d34c996.o |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/run_ydb.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_ce073e3cc612363936bdd04210.o |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tools/combiner_perf/libkqp-tools-combiner_perf.a |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/workload-transfer-topic-to-table.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_6cfba3dbee97ec121b2f346459.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_c43ce24509a50b033fa4050a33.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/supported_codecs.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/supported_codecs_fixture.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/workload-topic.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_partlayout_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_blobmap_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_iter_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/ydb-dump.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_delayedresp_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_ff581f3cff717ab223922f0cd8.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_d191482d8b66f1c03ea8df56d3.o |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/certificate_check/cert_check_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_8685c3ae88e5169a5acffc7bc4.o |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_5a4a401f33f46c70417a65f584.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_e317764e105a7e9e48b67a7b7e.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_86ad37399122e504f3e6d8378d.o |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/flavours/libpy3tests-library-flavours.global.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ctx_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_951c70889c9404d1662da27090.o |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/aae788a890ddcb1702c659c8aa_raw.auxcpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_359d47616c1036f0865eb1e662.o |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json/json_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/validator_bootstrap.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/certificate_check/cert_utils_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata__intpy3___pb2.py.p5ju.yapyc3 |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/meta/bin/mvp_meta |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/config_parser.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_id_dict_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_0664e2ab2eb37ae9f02538e483.o |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/ydb-tests-olap |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_4b767dce2ddf7a5424aef828d6.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_19422d2b60428207055b4ed843.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_bd8a6d25e26a719f80141d0711.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_2cc418e8604751e5b8f9029a81.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_e872ffee323253a62fe108f2f4.o |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_ac3c83156eb65915b12091966a.o |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pinger.pb.{h, cc} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/services.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_settings.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_export.pb.{h, cc} |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.so |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/data.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/fq_config.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/common/v1/common.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/queue_id_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/execution.h_serialized.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/params_ut.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/token_accessor.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_proxy.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/closed_interval_set_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/protos/config.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/ydb/ut/parse_command_line.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullcompactdeferredqueue_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/validation.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/iam_token_service.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/inside_ydb_ut/inside_ydb_ut.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/given_id_range_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dq/worker_node/main.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.{pb.h ... grpc.pb.h} |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/objcopy_484246668d943fbae3b476ec7d.o |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_readbatch_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/helpers/libclient-oauth2_token_exchange-helpers.a |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_readbulksst.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_expired_notifications.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/s3_recipe_helper/liblibrary-testlib-s3_recipe_helper.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/sequencer_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_log_merger_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |59.8%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console_config.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idxsnap.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.{pb.h ... grpc.pb.h} |59.8%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/kqp_expr_nodes.{gen.h ... defs.inl.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/compute.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_action_queue.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_basic_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/yson_format_conversion.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/validator.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_request.cpp |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/backup/ydb-tests-functional-backup |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_selector.cpp |59.7%| [CC] {tool} $(B)/ydb/core/protos/feature_flags.grpc.pb.cc |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_stats.pb.{h, cc} |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.so |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.{pb.h ... grpc.pb.h} |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tools/ydbd_slice/bin/objcopy_9509442a50bd9d1393fa0d54e4.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/ydbd_slice/bin/ydbd_slice |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_update_config.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk2/huge.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/assign_const.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hive/timestamp_map.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/pathid.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/aggr_keys.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/snapshot.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/login.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_yson_token.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/producer_client.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.so |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/accessor.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_issue_message.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/validation.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_config.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common.pb.{h, cc} |59.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |59.7%| [CC] {tool} $(B)/ydb/core/protos/feature_flags.pb.cc |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_operation.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/helpers.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_table.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/minikql.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_defrag.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/util_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |59.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/s3_recipe/s3_recipe |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/olap/high_load/read_update_write.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_replication.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_resource_tree_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelable_context.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_defs.h_serialized.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/brotli.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_cms.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/udf_resolver.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/actualization.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/select.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_state.h_serialized.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/executor.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/variator.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/compaction.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/abstract.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/bulk_upsert.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/json_change_record_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_internal_interface.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/secondary_index/objcopy_6b62c1db41e3ebd0278a84dced.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/secondary_index/objcopy_716263ce181e67161f84180281.o |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.{pb.h ... grpc.pb.h} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/secondary_index/objcopy_b83d9052e0bc89877bbe223294.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/helpers.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/resource.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/chunk_replica.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/allocation_tags.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_pool.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_import.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_cluster_discovery.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/local_bypass.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_transport.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_detail.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/common.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/nonblocking_batcher.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_federation_discovery_v1.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/combinatory/execute.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher_impl.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/crypto.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_maintenance.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_queue.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/server.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packed_unsigned_vector.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scripting.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduled_executor.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.{pb.h ... grpc.pb.h} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |59.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_state.h_serialized.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/user_account.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/shuffle_client.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/single_queue_scheduler_thread.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/pollable_detail.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_readbatch.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/thread.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/dns_resolver.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tornado/tornado-4/libpy3python-tornado-tornado-4.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/arithmetic_formula.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/connector.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/ares_dns_resolver.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/helpers.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/access_service.{pb.h ... grpc.pb.h} |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tornado/tornado-4/libpy3python-tornado-tornado-4.global.a |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/retry_config.pb.{h, cc} |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/tests/liblibrary-persqueue-tests.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/codec.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_queue.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/proto/result_set_meta.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/fluent_log.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_debug.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/7c1f1e5f5235f3f91f737c08e3_raw.auxcpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/adjusted_exponential_moving_average.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/task_command_executor.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/descriptors.cpp |59.7%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/expr_nodes/yql_generic_expr_nodes.{gen.h ... defs.inl.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/private.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/coro_pipe.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/check_schema_compatibility.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_public.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_rate_limiter.pb.{h, cc} |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/bin/moto_server |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_find_split_key.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/configurable_singleton_def.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/proc.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_rename_descriptor.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.{h, cc} |59.7%| [PR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/include/llvm/IR/Attributes.inc{, .d} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/process_exit_profiler.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/sink.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_export.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_statistics.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/config.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/protos/fq.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/shutdown.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/utf8_decoder.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/grpc_library_helper.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/connection.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lzma.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/server.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/parser.cpp |59.6%| [CC] {tool} $(S)/ydb/core/base/generated/codegen/main.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/hedging_channel.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dynamic_channel_pool.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/ut_helpers.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/ut/grpc/libgrpc_streaming-ut-grpc.a |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/proto/graph_description.pb.{h, cc} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_alloc.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/tools/decrypt/main.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/monitoring/mon_proto.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_253d734e8c901d319d84fcc6e9.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstslice.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_363b5875cc5c5e5745458b16b8.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_303f7409bfab4277e367bbd11a.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_e2a089b95d9316f6e26025d3e3.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistic_path.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/convert.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/peer_discovery.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmd_config.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/tokenizer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound_compressor.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/validator_nameservice.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/config.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_source_builder.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/serialized_channel.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/producer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_options.cpp |59.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_range.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_filter.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |59.6%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/objcopy_f8eedece62b0d046ee29007b2b.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_04f56802b68450abc8421282d0.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_6403bfa5c5e35b29a21c73fb0e.o |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_designated_consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/bindings.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_cms_v1.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |59.6%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/oltp_workload |59.6%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/simple_queue |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node_detail.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/tools/yqlrun/lib/libtools-yqlrun-lib.a |59.6%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/ydb_cli |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/tools/yqlrun/http/libtools-yqlrun-http.a |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_codegen_cpp.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yql/tools/yqlrun/yqlrun.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_dynamic_table_writer.cpp |59.7%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/cfg |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/ut/grpc/streaming_service.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/rescompressor/rescompressor |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_process.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/fields.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogformat.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/storage.pb.{h, cc} |59.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configs_provider.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_console.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__init_scheme.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__load_state.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |59.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/c29e775d4210e0bfea21a038e3_raw.auxcpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/interned_attributes.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/tools/yqlrun/yqlrun |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_scatter_gather.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/restore_corrupted_blob_actor.cpp |59.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/config.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_mongroups.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/tools/decrypt/decrypt |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_hive_create_tablet.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/system_attribute_provider.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_detail.cpp |59.6%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/generated/dispatch_op.h |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_test_shard_request.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_config.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/immediate_controls_configurator.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__set_config.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_b1ab101896e634020e0c6ffeaf.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_589d529f9477963cf67237781c.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_82d6d29ac7be3798b7e748facc.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_0aefef587c181350d3a25f70e0.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/ingress/blobstorage_ingress_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/ingress/blobstorage_ingress_matrix_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_965640ca94893d27c182c611e2.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_c068ee86eb127df13256bfbe45.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/object_storage.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/f58beed9514e65def82ad7e2a5_raw.auxcpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/36583b0197800233ec9e3729e1_raw.auxcpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs__intpy3___pb2.py.p5ju.yapyc3 |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_writer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/b3824ec07381bf48b3130c7ba9_raw.auxcpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_value.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_cluster_discovery.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_dynamic_config_v1.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_handshake.cpp |59.5%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_reader/contexts.h_serialized.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/modifications_validator.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/stream.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_table.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_status_codes.pb.{h, cc} |59.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |59.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_compactionstate.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/address_classifier_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/circular_queue_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/btree_cow_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/queue_inplace_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/bits_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/concurrent_rw_hash_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_cost_tracker.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cache_cache_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/event_priority_queue_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/page_map_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fragmented_buffer_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/btree_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fast_tls_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hyperlog_counter_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/lz4_data_generator_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cache_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hazard_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_heap_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/lf_stack_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_stack_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/log_priority_mute_checker_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/interval_set_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_fixed_hash_set_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/wildcard_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ulid_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/token_bucket_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/operation_queue_priority_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/queue_oneone_inplace_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/simple_cache_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_handle_class.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/stlog_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ui64id_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_drop.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_prepare_scheme.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/operation_queue_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/main.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/commands.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_run_query.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_prepare.cpp |59.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/public.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/blob_reader.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/remote_timestamp_provider.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_245adf3e28f56e6467e034d9f2.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_31d605682329607481eb568ed0.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_7648c2519d02b8456f762efc4b.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_2f7ac0f750374152d13c6bfbcf.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_854d6cc7a0cc5cdd793cfc1e6d.o |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_a926d3332cb769ac3e6c9e6e37.o |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |59.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/read_actors_factory.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_formats.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/wire_row_stream.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/aggr_common.h_serialized.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/audit_log.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/sessions.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_id_or_alias.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/program/aggr_common.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_node.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/node_broker/node_broker |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/control/ut/ydb-core-control-ut |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/hulldb_bulksst_add.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/public.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/libpy3node_broker.global.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/workload/libpy3stress-node_broker-workload.global.a |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/objcopy_2a9fba044b5f98d2ff5f5c7f44.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/spin_wait_slow_path_logger.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_datasnap.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/cloud.{pb.h ... grpc.pb.h} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/simple_queue/simple_queue |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/auto_config_initializer_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/accurate_accumulate/liblibrary-cpp-accurate_accumulate.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/libpy3simple_queue.global.a |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/objcopy_6c8bedcdc8efb835a928b278ce.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_state_load_plan.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/unittests.pb.{h, cc} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/ut/ydb-core-util-ut |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/idx_test/libpublic-lib-idx_test.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_firstrun.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/process_columns.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/interconnect.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/actors.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_affinity.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |59.6%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/plugins/cpp_styleguide/cpp_styleguide |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_reader.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/backup_service.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_storage_config.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_arrow.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_column_filter.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_hash.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_program_step.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_algo.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_log_writer.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/statistics_workload/libpy3statistics_workload.global.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/statistics_workload/objcopy_b4ebb94deb4cea673457b77fcc.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_790c6ea4aad5e761d21421b25d.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_16842d72ae0dac1856818f841e.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_bfb03c74768170a0b82d2bf355.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_323a17e94d8d570989807d19d3.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_347676f1cbc0086a238f181b11.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_d78a45708fbb346ab43f2c1bb7.o |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_927a1f7611cf94fb1cd21ef8cf.o |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_b06d27009e49b9ba3df883a226.o |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_1c0f807c059fe226699115f242.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_c98e5b95c64b8486a12f10d408.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisfinder.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/tests/tpch/cmd_run_bench.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/linear_regression/liblibrary-cpp-linear_regression.a |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_config.cpp |59.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |59.6%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/queue_transaction_mixin.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_point_consolidation_ut.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/proto/storage_meta.pb.{h, cc} |59.6%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_7c328c2741f9dd7697a2e0e8b1.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_44fac4fe441507735704a000ad.o |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_994fcbd53c4e2174c302bdb5ab.o |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dispatcher.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullactor.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_upload_options.cpp |59.6%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/5732afb9e28f3811e12c561eb5_raw.auxcpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.{pb.h ... grpc.pb.h} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_recovery.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.{pb.h ... grpc.pb.h} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper__intpy3___pb2.py{ ... i} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_syncloghttp.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config__intpy3___pb2.py{ ... i} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_debug_v1.{pb.h ... grpc.pb.h} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/local_ydb/objcopy_8d2ea3c78a255bb4c87c2fc54a.o |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/local_ydb/libpy3local_ydb.global.a |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health__intpy3___pb2.py{ ... i} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fls.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zstd.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/benchmark/main/libtesting-benchmark-main.global.a |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/tools/local_ydb/local_ydb |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/benchmark/libcpp-testing-benchmark.a |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/annotations.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/5f8a81cdd9d2daa4eaa6ef0775_raw.auxcpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/82d705ad49a94ea30cd4594c50_raw.auxcpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_coordination_v1.{pb.h ... grpc.pb.h} |59.6%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/proto/logger_config.pb.{h, cc} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/auth.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore/ut_incremental_restore.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/cloud_service.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/task.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_keyvalue.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rowset.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_8120ef49e7e653ed0601604313.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tests/tpch/tpch |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_d3af02c7d57ea2cbbe5d381baa.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_f93c60b04a0499f2ec6880591a.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/main.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/grpc/persqueue.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/run.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/public.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/proxy.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/generator.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/comparator.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_barrier.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/lwtrace.pb.{h, cc} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/read_actors_factory.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/helpers.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_storage.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/services_common.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_dynamic_config.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/public.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/nodes_manager.pb.{h, cc} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/propagating_storage.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/actors.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/formats/arrow/ut/ut_dictionary.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/public.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/btree_benchmark/main.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query.pb.{h, cc} |59.6%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/yql_expr_nodes.{gen.h ... defs.inl.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/gateways_config.pb.{h, cc} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/future.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_pdiskfit/ut/main.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/row_dispatcher.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/checkpoint_coordinator.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/issue_id.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_delayed_cost_loop.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/current_invoker.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_overload_handler.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/noop_timestamp_provider.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_error_codes.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/connector.pb.{h, cc} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |59.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/generated/codegen/main.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_6a5c78aa9f679a0920be5264fe.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_b031a661ba244dffa03ab0c7ec.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/main.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_connection.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_08a4b5d38a76e21591db0c3424.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_proxy.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pgwire.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_e2637cea0f2e4db109b364a246.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_f4b44a5d280d0f27f5ffd278e8.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_d0255dda539959b69d421868a2.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/5a2f230528097042fdaf726fed_raw.auxcpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_96b8686cd075e874d95d4aa5c5.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/13360e4ecdf34efe6c3a817a44_raw.auxcpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_cf3971576aced18377e99f5367.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_7c81cbfa6b5ce112674cb0a849.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_e2acb41e7099c0db4fe54a1587.o |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_object_storage.pb.{h, cc} |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/large/objcopy_04f2935f3ada8eb9d01ebaba6b.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/large/objcopy_28f172e1aa977d907bdfa0a81b.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/large/objcopy_6af7a7ce8a1ee5e67d75a2978a.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/suspendable_action_queue.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_sst.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/validation/auth_config_validator.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/retrying_periodic_executor.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_2194854d9f8cbb3e0ba798b861.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_703c8e1d9a9a2b271b8b995a29.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_bac05c8b5a79735451f58d9322.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_52e86d5ee8fadefdbb415ca379.o |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/service_account.{pb.h ... grpc.pb.h} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_912038ceef7de48e0e15c25307.o |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/iam_token_service.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/iam_token_service_subject.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/blob_output.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/objcopy_1f78e7638ae0f2e308bd7331f9.o |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/protobuf_helpers.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/objcopy_f4efacd00293c5fe09c3f84a62.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/objcopy_988cc467d4da79de606ebf50ee.o |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/workload/libpy3stress-reconfig_state_storage_workload-workload.global.a |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large |59.7%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.{gen.h ... defs.inl.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/slab_allocator.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/error.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authentication_identity.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |59.7%| [PR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/expr_nodes/yql_yt_expr_nodes.{gen.h ... defs.inl.h} |59.7%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/expr_nodes/dqs_expr_nodes.{gen.h ... defs.inl.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/local_address.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_update_downtimes.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_auth_v1.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/btree_benchmark/btree_benchmark |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_merger.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_remove_task_adapter.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/discovery/discovery.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/grpc/api.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.{pb.h ... grpc.pb.h} |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_1aeeb50f676472f975830c135d.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_bcbbd2d8f2367d5f3ed5199234.o |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/caching_channel_factory.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_f05ead59375a9db120b95dd730.o |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/access_service.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/info_collector.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_attribute_owner.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_helpers.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/formats/arrow/program/execution.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_batch.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/compile_result.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_rate_limiter_v1.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgreader.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/cypress_client/public.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/sentinel.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/writer.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/sensitive.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/row_dispatcher.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/1fe825be1cf65e9693ebce9341_raw.auxcpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache__intpy3___pb2.py.p5ju.yapyc3 |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/18547f9635bde59b5840161212_raw.auxcpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/checkpoint_coordinator.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/console.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/spec_patch.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_proto_split/SQLv1Parser.pb.{code0.cc ... main.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_statdb.cpp |59.7%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullrepljob.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/events.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction_impl.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/parser_detail.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_client.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/yson_builder.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/ydb/ut/ydb_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hulldb_bulksstmngr.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_resolver.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/services.{pb.h ... grpc.pb.h} |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_6508d12aaafde6f0a60fe8fff3.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_cd9abca883cad9b25e20bf2f08.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_bd84885c5c24478d181ba9e493.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_last_provided_config.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/config_helpers.cpp |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/plugins/grpc_cpp/grpc_cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_bridge_v1.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__get_yaml_metadata.cpp |59.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/client.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/codecs_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgwriter_ut.cpp |59.6%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_fsm.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_ut_configs.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmem_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_extr.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_computational_units.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgimpl_ut.cpp |59.7%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/8760973cadf4aa816a9d37589f_raw.auxcpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_scheduler.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_proxy.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_rewriter.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmovedpatch_actor.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_mon.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.{pb.h ... grpc.pb.h} |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/console_dumper.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/node_checkers.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine__intpy3___pb2.py{ ... i} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon__intpy3___pb2.py{ ... i} |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon__intpy3___pb2.py.p5ju.yapyc3 |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.{pb.h ... grpc.pb.h} |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.{pb.h ... grpc.pb.h} |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine__intpy3___pb2.py.p5ju.yapyc3 |59.7%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus__intpy3___pb2.py.p5ju.yapyc3 |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/a173ecb5b713824a01f21a39dc_raw.auxcpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/internal_client.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/timestamped_schema_helpers.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/blobstorage_config.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfullhandler.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/protos/container.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attribute_consumer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_0035b673555f394234ae284e25.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/blobstorage_hullwritesst_ut.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_45b6981aed17dda33d43217f52.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_9818d2b70aad7db98a0f9c044c.o |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_scheme_initroot.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_cms.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/merge_complex_types.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_root.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/config.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_status_codes.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/config.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_status_codes.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base__intpy3___pb2.py{ ... i} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/yql_mount.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/issue_id.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/coroutine.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_http_server.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/issue_severity.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_pdisk.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/bridge/objcopy_de8e7bde61396640f718e89d07.o |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.{pb.h ... grpc.pb.h} |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/bridge/objcopy_0adb3ed6d98cbd98d13d8a3085.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/bridge/objcopy_4b2ec656f7e85bc05586d7e6fc.o |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/audit.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/load.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__toggle_config_validator.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zlib.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_helpers.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/config.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_util.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_object_storage_v1.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_cache.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/base_utils/format_util.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/profiling/timing.cpp |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_queue_scheduler_thread.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/codicil.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/zerocopy_output_writer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/helpers.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_tablet.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/reference.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/context.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/converter.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pdisk.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/lexer.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/validators/core_validators.cpp |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst_it_all_ut.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_import_v1.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/cursor.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_tablet_counters.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_def.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_committer.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_mon.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unordered_schemaful_reader.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_load_state.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/02d68a63e2a652986bebc94975_raw.auxcpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replproxy.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/cancel_tx_ut.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_tables_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/objcopy_5fddfa8f171a3216cad65e02ab.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_actors_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/json_proto_conversion_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/locks_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hull.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_check_task_adapter.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_large.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/flat_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/object_storage_listing_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_common.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/operations.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attributes.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/util.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/dc048c91e67372877fc6ad2dfc_raw.auxcpp |59.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_5865a174a6c25ca1a2d6386702.o |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_c02c3d9f840d02af9fad858a55.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_5db899a01c2ec6f53648af6840.o |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_bfa810e70cd1de18c5d4a18a62.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_00c87b13e2f685811a9825079d.o |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__create_tenant.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllogcutternotify.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_initroot.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap__intpy3___pb2.py{ ... i} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination.pb.{h, cc} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_maintenance_v1.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/proto/source.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/health_config.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/compute.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_backup_v1.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_essence.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq__intpy3___pb2.py{ ... i} |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fs.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/rate_limiter.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/public.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_common.cpp |59.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation__intpy3___pb2.py.p5ju.yapyc3 |59.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/protoc |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_task.cpp |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/type_info.{pb.h ... grpc.pb.h} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/s3_import/large/ydb-tests-olap-s3_import-large |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_4b2e093abff756c97b675c0a31.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_a6e393b6d53f4c73feac80b55c.o |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.{pb.h ... grpc.pb.h} |59.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_89b3e69f7cdba68b4eefcae48c.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_93dc3386250916dfae1ecb9b13.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_b8aa61f402be805d2e3e9e75a2.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_3d6916930a438b51675ef6dda7.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_validate_config.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_64cecb639c5f85fbf868097a08.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |59.6%| {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/protos/config.proto.{desc, 236947a227eabf309dc2ce63434b3df8.rawproto} |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/resource_pools/ut/ydb-core-resource_pools-ut |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/minikql_compile/mkql_compile_service.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/large/objcopy_4943008ec342eed836b4112777.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_dblogcutter.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replrecoverymachine_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_065e9244d685c2b8f0ab66e414.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_17cef60c2dd0eb7ea46181ba87.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |59.6%| [PD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/protos/yaml-config-protos.{self.protodesc, protosrc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/kqp_mock.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullreplwritesst_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_953328e5c3275a286b65dc3b1d.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_d2d4e3343da9b011ee6a983244.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_b783a1a2aacb855daa1e55fad6.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/feature_flags.{pb.h ... grpc.pb.h} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_5accfe00d45fb7ebcc30e116b2.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/functions_executor_wrapper.cpp |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/feature_flags__intpy3___pb2.py{ ... i} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_461999da7ba13deab5689c18ec.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_65ac58c27d43a55d0ea4eda626.o |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_vdisk_internal.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_81b7879c9cfa37bdcf437f5ff4.o |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/compatibility/libpy3tests-library-compatibility.global.a |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/protos/config__intpy3___pb2.py{, i} |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py{ ... i} |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_b7f5600f224f7d7aa608ada59e.o |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_9beede1c5ddb1a5202bb8125bf.o |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console__intpy3___pb2.py{ ... i} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/helper.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/test/tool/surg/main.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_secrets_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/protos/config.pb.{h, cc} |59.5%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py{ ... i} ------- [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/grpc__intpy3___pb2.py{ ... i} ydb/core/protos/grpc.proto:6:1: warning: Import ydb/core/protos/msgbus_kv.proto is unused. |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py{ ... i} |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console_config__intpy3___pb2.py{ ... i} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/msgbus.{pb.h ... grpc.pb.h} |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console_config.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/grpc.{pb.h ... grpc.pb.h} |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/feature_flags__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/compaction_ut.cpp |59.6%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/11a9f7a6b79b2ef13b0ccdb626_raw.auxcpp |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/protos/config__intpy3___pb2.py.siec.yapyc3 |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_client_ut.cpp |59.5%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/feature_flags__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |59.5%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/protos/4306a854d105ac9e8a68bf91ca_raw.auxcpp |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |59.6%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/d8a0ba19a8c38a9b8166f51a9b_raw.auxcpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/common.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp >> ResourcePoolClassifierTest::StringSettingsParsing [GOOD] >> ResourcePoolTest::SecondsSettingsParsing [GOOD] >> ResourcePoolClassifierTest::IntSettingsParsing [GOOD] >> ResourcePoolClassifierTest::SettingsExtracting [GOOD] >> ResourcePoolTest::IntSettingsParsing [GOOD] >> ResourcePoolTest::SettingsExtracting [GOOD] >> ResourcePoolTest::SettingsValidation [GOOD] >> ResourcePoolTest::PercentSettingsParsing [GOOD] >> ResourcePoolClassifierTest::SettingsValidation [GOOD] |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |59.6%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/9270b323afac6e83cea5bf4aa4_raw.auxcpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/libcore-external_sources-hive_metastore.a |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_engine_flat_host_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_proto_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/kikimr_program_builder_ut.cpp |59.6%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/494cbbb0dc8b7c9860714b57e7_raw.auxcpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/grpc__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_d23500649301df2a8de48ba70d.o |59.5%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/pgwire/pgwire |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/grpc__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |59.6%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |59.6%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console_config__intpy3___pb2.py.p5ju.yapyc3 |59.6%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |59.5%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |59.5%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/afa8c81460609cfd6247d9dbf7_raw.auxcpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |59.6%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |59.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/resource_pools/ut/unittest >> ResourcePoolClassifierTest::SettingsValidation [GOOD] |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/ptr_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/batched_vec_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/bufferwithgaps_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/hive_metastore_native/libexternal_sources-hive_metastore-hive_metastore_native.a |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_c65a9d5efe13dc05c1466090ba.o |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/large/objcopy_363cd92f1d4b79ca063627ba22.o |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/large/objcopy_d305a8a4fbc1702039f0202072.o |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/public/types_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_93665db601a12d4842de4565e2.o |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_d0e1cde98d2ab34e72d18aae9c.o |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/rpc_calls_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/operation_helpers_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/ut/objcopy_1d0482d354dc270d18e7123281.o |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/make_config.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/microseconds_sliding_window_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/internals_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/cache_eviction_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/type_codecs_ut.cpp |59.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/protos/4306a854d105ac9e8a68bf91ca_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_kqp.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/dictionary_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_counters.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |59.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/11a9f7a6b79b2ef13b0ccdb626_raw.auxcpp |59.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/d8a0ba19a8c38a9b8166f51a9b_raw.auxcpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |59.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/9270b323afac6e83cea5bf4aa4_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_labeled.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |59.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/494cbbb0dc8b7c9860714b57e7_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partitiongraph_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/partition_end_watcher_ut.cpp |59.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/conveyor_composite/ut/ut_simple.cpp |59.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |59.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_common.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/quota_tracker_ut.cpp |59.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/metering_sink_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/utils_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |59.4%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/common.pb.{h, cc} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/credentials.pb.{h, cc} |59.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/afa8c81460609cfd6247d9dbf7_raw.auxcpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/snappy.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.{pb.h ... grpc.pb.h} |59.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/solomon/ydb-tests-fq-solomon |59.4%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config__intpy3___pb2.py.p5ju.yapyc3 |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/operations.{pb.h ... grpc.pb.h} |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |59.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/http_router_ut.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units__intpy3___pb2.py.p5ju.yapyc3 |59.5%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/config__intpy3___pb2.py{ ... i} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units__intpy3___pb2.py{ ... i} |59.4%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/olap_workload |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination.pb.{h, cc} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config__intpy3___pb2.py{ ... i} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/public.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/protos/event.pb.{h, cc} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_auth.pb.{h, cc} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |59.5%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/nemesis |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/clickhouse.pb.{h, cc} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_cache.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |59.4%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/serverless_proxy_config.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache_detail.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv__intpy3___pb2.py{ ... i} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/iam_token.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tenants_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/ut_helpers.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/backup_ut/backup_path_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cluster_info_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/pipe_tracker_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_metrics_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut_common.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/logical_type.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/proto/libtools-stress_tool-proto.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_data_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/backup_ut/list_objects_in_s3_export_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/solomon/actors/ut/dq_solomon_write_actor_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/scheduler/old/kqp_compute_scheduler_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/lib/libydb_device_test.a |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/options.cpp |59.4%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/msgbus__intpy3___pb2.py{ ... i} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/downtime_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_keyvalue_v1.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/file_log_writer.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/profiling_helpers.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.{pb.h ... grpc.pb.h} |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.{h, cc} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/time_text.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/helpers.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/dictionary_codec.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/helpers.cpp |59.4%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/config__intpy3___pb2.py{ ... i} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/show_create/view/show_create_view |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_parser.cpp |59.4%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/statistics_workload |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/rate_limiter.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_sysview/ut_sysview.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelation_token.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/libpy3show_create_view.global.a |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/objcopy_9ccdc4f01b578a43bc35d4d519.o |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/public.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/consumer.cpp |59.5%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/serverless_proxy_config.{pb.h ... grpc.pb.h} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/operation_client.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__replace_yaml_config.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/parser_helpers.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_config_v1.{pb.h ... grpc.pb.h} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_writer.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/config.cpp |59.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/response_keeper.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pool_allocator.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/bzip2.cpp |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/db_pool.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/client.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/composite_compare.cpp |59.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp |59.9%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/msgbus__intpy3___pb2.py{ ... i} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |60.0%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/feature_flags.grpc.pb.cc |60.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |60.1%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/config__intpy3___pb2.py.p5ju.yapyc3 |60.2%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/config__intpy3___pb2_grpc.py.p5ju.yapyc3 |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |60.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/backup_ut/encrypted_backup_ut.cpp |60.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |61.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replmonhandler.cpp |61.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |61.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/write.cpp |61.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/health_check/health_check_ut.cpp |61.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |61.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |61.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |62.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |62.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/table_creator/table_creator_ut.cpp |62.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_fill_node.cpp |62.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |63.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/objcopy_1406195445f45d950dda89fcd8.o |63.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_get_log_tail.cpp |63.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |63.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_tenant_pool_config.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/feature_flags.grpc.pb.cc |64.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/d8a0ba19a8c38a9b8166f51a9b_raw.auxcpp |64.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/494cbbb0dc8b7c9860714b57e7_raw.auxcpp |64.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/ut/ydb-core-client-ut |64.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/afa8c81460609cfd6247d9dbf7_raw.auxcpp |64.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |64.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/9270b323afac6e83cea5bf4aa4_raw.auxcpp |64.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/11a9f7a6b79b2ef13b0ccdb626_raw.auxcpp |64.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |64.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |64.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/feature_flags.grpc.pb.cc |64.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |64.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/4306a854d105ac9e8a68bf91ca_raw.auxcpp |64.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/d8a0ba19a8c38a9b8166f51a9b_raw.auxcpp |64.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/feature_flags.pb.cc |64.3%| {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/config.proto.{desc, 236947a227eabf309dc2ce63434b3df8.rawproto} |64.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/generated/codegen/main.cpp |64.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |64.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py.p5ju.yapyc3 |64.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console__intpy3___pb2.py.p5ju.yapyc3 |64.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/config__intpy3___pb2.py.p5ju.yapyc3 |64.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |64.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/feature_flags__intpy3___pb2.py.p5ju.yapyc3 |64.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/afa8c81460609cfd6247d9dbf7_raw.auxcpp |64.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_12d01741952bd4afa836364d84.o |64.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_15e284a8ecb30c90903e842e70.o |64.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |64.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_cee1e02beaf827051149b5ca30.o |64.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |64.4%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console.{pb.h ... grpc.pb.h} |64.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |64.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |64.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |64.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |64.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |64.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_list_tasks_adapter.cpp |64.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |64.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |64.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/init/init_noop.cpp |64.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/proto/device_perf_test.{pb.h ... grpc.pb.h} |65.0%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/dbc97ea0b10e1a7b45035ce1e1_raw.auxcpp |65.0%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console_config__intpy3___pb2.py.p5ju.yapyc3 |65.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |65.1%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |65.2%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |65.2%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/grpc__intpy3___pb2.py.p5ju.yapyc3 |65.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2_grpc.py.p5ju.yapyc3 |65.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/resource_broker_ut.cpp |65.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |65.4%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |65.5%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/grpc__intpy3___pb2_grpc.py.p5ju.yapyc3 |65.6%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py.p5ju.yapyc3 |65.6%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/config__intpy3___pb2.py{ ... i} |65.6%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |65.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |65.7%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/494cbbb0dc8b7c9860714b57e7_raw.auxcpp |65.8%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/9270b323afac6e83cea5bf4aa4_raw.auxcpp |65.8%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py{ ... i} |65.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp |66.1%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/serverless_proxy_config.{pb.h ... grpc.pb.h} |66.1%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/feature_flags__intpy3___pb2.py{ ... i} |66.3%| [PB] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/config.pb.{h, cc} |66.3%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/grpc__intpy3___pb2.py{ ... i} |66.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_create_task_adapter.cpp |66.4%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/11a9f7a6b79b2ef13b0ccdb626_raw.auxcpp |66.6%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/feature_flags__intpy3___pb2_grpc.py.p5ju.yapyc3 |66.6%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal__intpy3___pb2.py.p5ju.yapyc3 |66.6%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/feature_flags.{pb.h ... grpc.pb.h} |66.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |66.7%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |66.8%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/grpc.{pb.h ... grpc.pb.h} |66.9%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console__intpy3___pb2_grpc.py.p5ju.yapyc3 |66.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |67.0%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console_config__intpy3___pb2.py{ ... i} |67.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |67.0%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py{ ... i} |67.2%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/config__intpy3___pb2_grpc.py.p5ju.yapyc3 |67.2%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/feature_flags.pb.cc |67.2%| [PR] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/4306a854d105ac9e8a68bf91ca_raw.auxcpp |67.5%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/serverless_proxy_config__intpy3___pb2.py{ ... i} |67.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/generated/codegen/main.cpp |67.6%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/msgbus.{pb.h ... grpc.pb.h} |67.6%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/feature_flags.pb.cc |67.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |67.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmultiput_actor.cpp |67.8%| [PB] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/config__intpy3___pb2.py{, i} |67.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |67.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ticket_parser_ut.cpp |67.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |67.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/generated/codegen/main.cpp |67.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/cms/cms_ut.cpp |68.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |68.1%| [PD] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/yaml-config-protos.{self.protodesc, protosrc} |68.1%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/msgbus__intpy3___pb2.py{ ... i} |68.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_tenant.cpp |68.2%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.{pb.h ... grpc.pb.h} |68.3%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console__intpy3___pb2.py{ ... i} |68.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/config__intpy3___pb2.py.siec.yapyc3 |68.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/action.cpp |68.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__revert_pool_state.cpp |68.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |68.6%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |68.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/ut/ut_utils.cpp |68.9%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console_config.{pb.h ... grpc.pb.h} |69.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/action.cpp |69.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_4fdbe64ce62f955927d10364b5.o |69.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_c5a20cdd9533abc10e82efdd1a.o |69.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_e3bb1c534d69f237b55dd8dfe7.o |69.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/38dcacd12926621ca72e30ce1b_raw.auxcpp |69.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_b08299d456f3448b368e814cb8.o |69.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_fdd48fc620c42f480ae38b77f5.o |69.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_dae5a42f53b4f98bf1b9fd8118.o |69.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_1339ee5ef04af3a5a49d43a6c9.o |69.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_7a185a4b35de7733fde931d298.o |69.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_6b8c453743f8fd2c5380af70c6.o |69.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/queue_attributes.cpp |69.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/queue_attributes.cpp |70.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |70.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |70.1%| [AR] {default-linux-x86_64, release, asan} $(B)/library/cpp/svnversion/liblibrary-cpp-svnversion.a |70.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/simple_queue/simple_queue |70.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/serverless_proxy_config.grpc.pb.cc |70.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/circlebuf_ut.cpp |70.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/circlebufstream_ut.cpp |70.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_outofspace_ut.cpp |70.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_lsnmngr_ut.cpp |70.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/circlebufresize_ut.cpp |70.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_syncneighbors_ut.cpp |70.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/memusage_ut.cpp |70.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |70.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |71.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |71.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/simple_queue/simple_queue |71.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |71.1%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/serverless_proxy_config.grpc.pb.cc |71.2%| [AR] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/liblibrary-cpp-build_info.a |71.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/ut/helpers/libmkql_proto-ut-helpers.a |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |71.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |71.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |72.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |72.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |72.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |72.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |72.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |72.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |72.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |72.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_pdisk_error_ut.cpp |72.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |72.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |72.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |72.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |72.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config_ut.cpp |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |72.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |72.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |73.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |73.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |73.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/other/mon_vdisk_stream.cpp |73.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |73.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |73.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |73.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/other/libcore-blobstorage-other.a |73.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |73.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |73.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |73.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |73.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/other/mon_vdisk_stream.cpp |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer_ut.cpp |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/topic_data_ut.cpp |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |73.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/other/libcore-blobstorage-other.a |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |73.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/public_http/http_req.cpp |73.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |73.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_read.cpp |73.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/public_http/http_req.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_read.cpp |73.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/show_create/view/show_create_view |73.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/show_create/view/show_create_view |73.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |73.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |73.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |73.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |74.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |74.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |74.1%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yt/client/libyt-yt-client.a |74.2%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/client/libyt-yt-client.a |74.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery_read_log.cpp |74.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery_read_log.cpp |74.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery_scan.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/validation/column_shard_config_validator.cpp |74.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/validators/core_validators.cpp |74.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/cdc/cdc |74.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/cdc/cdc |74.6%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/validation/column_shard_config_validator.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery_scan.cpp |74.6%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/core_validators.cpp |74.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/olap_workload/olap_workload |74.8%| [AR] {default-linux-x86_64, release, asan} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |74.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/olap_workload/olap_workload |74.8%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |74.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |75.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |75.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_log.cpp |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_log.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/serverless_proxy_config.pb.cc |75.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/serverless_proxy_config.pb.cc |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/formats/arrow/serializer/native.cpp |75.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/transfer/transfer |75.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |75.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/node_broker/node_broker |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_gclogic_ut.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_self.cpp |75.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_sausage.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_cxx_database_ut.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_row_versions_ut.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_scheme.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_redo.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part_multi.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_memtable.cpp |75.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_screen.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_handle_ut.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_bloom.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_pages.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_range_cache_ut.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_forward.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_versions.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_decimal.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_stat.cpp |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ut/ydb-core-security-ut |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |75.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_clock_pro_ut.cpp |75.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/transfer/transfer |75.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/node_broker/node_broker |75.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |75.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/ut/objcopy_9f29b589555ed64086e5eadccf.o |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_part_ut.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_nodes.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_s3fifo_ut.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_comp_gen.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_proto.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_iterator.cpp |75.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_iface.cpp |75.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_switchable_ut.cpp |75.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |75.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |75.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/ut/xml_builder_ut.cpp |75.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_charge.cpp |75.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |75.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction_multi.cpp |75.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |75.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice_loader.cpp |75.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_iter_charge.cpp |75.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction.cpp |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/discovery_actor.cpp |75.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part.cpp |75.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |75.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |75.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/discovery_actor.cpp |76.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydb/ydb |76.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydb/ydb |76.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |76.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |76.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |76.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/formats/arrow/serializer/native.cpp |76.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |76.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/protos/libpy3yaml-config-protos.global.a |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/util.cpp |76.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |76.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |76.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/libpy3yaml-config-protos.global.a |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_common.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/util.cpp |76.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |76.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_common.cpp |76.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |76.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |76.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |76.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |76.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |76.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_data_cleanup.cpp |76.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |76.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |76.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/ut_perf/ydb-core-erasure-ut_perf |76.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |76.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |76.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |76.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/tools/simple_json_diff/simple_json_diff |76.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/docs/generator/generator |76.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/docs/generator/generator |76.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ut_ycsb.cpp |76.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |76.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |76.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |76.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |76.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |76.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_delete.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_recovery.cpp |76.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |76.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/folder.{pb.h ... grpc.pb.h} |76.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |76.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/transitional/folder_service.{pb.h ... grpc.pb.h} |76.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/folder_service.{pb.h ... grpc.pb.h} |76.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |76.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_delete.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/base/msgbus.cpp |76.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/s3_import/large/ydb-tests-olap-s3_import-large |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/jaeger_tracing/sampling_throttling_configurator.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper.cpp |76.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/s3_import/large/ydb-tests-olap-s3_import-large |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/validation/auth_config_validator.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/base/libpublic-lib-base.a |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/validation/auth_config_validator.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yt/core/libyt-yt-core.a |76.3%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/core/libyt-yt-core.a |76.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/lib/base/libpublic-lib-base.a |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |76.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |76.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/jaeger_tracing/sampling_throttling_configurator.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/base/msgbus.cpp |76.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |76.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |76.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |76.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |76.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |76.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |75.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |75.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |75.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |75.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |75.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/validator/ut/validator_checks/yaml_config-validator-ut-validator_checks |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/init/init_noop.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/init/init_noop.cpp |75.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/replication/ydb-tests-functional-replication |75.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |75.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/node_checkers.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config_helpers.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/node_checkers.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config_helpers.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/deprecated/kicli/configurator.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/http/types.cpp |74.2%| [LD] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tools/sql2yql/sql2yql |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/configurator.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_scan_events.cpp |74.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/essentials/tools/sql2yql/sql2yql |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/http/types.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_events.cpp |74.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/validator/ut/validator/ydb-library-yaml_config-validator-ut-validator |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/modifications_validator.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/http_service.cpp |73.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.cpp |73.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/modifications_validator.cpp |73.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/http_service.cpp |72.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/validators/validator_nameservice.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/filtered_scheme.cpp |71.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/debug_tools/ut/ydb-core-debug_tools-ut |71.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_defrag.cpp |71.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/statestorage_monitoring.cpp |71.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_alloc.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/validator_nameservice.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/deprecated/kicli/query.cpp |71.8%| RESOURCE $(sbr:4966407557) |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_sectorrestorator.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_internal_interface.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_compute_events.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_whoami.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/filtered_scheme.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_planner_strategy.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/filtered_scheme.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/grpc_service.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_cost_tracker.cpp |71.9%| [AR] {RESULT} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |71.9%| [SB] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/psql/psql |71.9%| [UN] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/common-test_framework-udfs_deps.pkg.fake |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__update_config.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_alloc.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_defrag.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/predicate/range.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/query.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/statestorage_monitoring.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_compute_events.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__drop_yaml_config.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_internal_interface.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_sectorrestorator.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/base_utils/format_util.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/http/xml.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/filtered_scheme.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_planner_strategy.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/console_dumper.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_whoami.cpp ------- [LD] {default-linux-x86_64, release, asan} $(B)/yql/tools/yqlrun/yqlrun ld.lld: warning: version script assignment of 'global' to symbol '__after_morecore_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'daylight' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'environ' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_environ' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__free_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__malloc_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__malloc_initialize_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__memalign_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'program_invocation_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'program_invocation_short_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__realloc_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'timezone' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tzname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__libc_start_main' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateHappensAfter' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateHappensBefore' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreWritesBegin' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreWritesEnd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreReadsBegin' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreReadsEnd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'abort' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'bind' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'close' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__close' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'closedir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'connect' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'creat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'creat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dl_iterate_phdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup3' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_create1' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_ctl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_pwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'eventfd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fork' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gettimeofday' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inotify_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inotify_init1' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'kill' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'listen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'nanosleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'on_exit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_broadcast' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_signal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_timedwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_kill' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_timedlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_trylock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_once' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_rdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_timedrdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_timedwrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_tryrdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_trywrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_wrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_trylock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'raise' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__res_iclose' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'rmdir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_setjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'signalfd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigsetjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__sigsetjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigsuspend' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socket' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socketpair' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpfile' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpfile64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'unlink' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'usleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'bcopy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dladdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dlerror' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dl_iterate_phdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_pwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fcvt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgets_unlocked' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fork' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'forkpty' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fread_unlocked' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstatat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstatat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gcvt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrlimit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrlimit64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrusage' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gettimeofday' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbrtowc' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbtowc' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memccpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mempcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'openpty' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'prlimit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'prlimit64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_key_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'putenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'shmat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socketpair' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'stpcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strftime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtod' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtod_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtof' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtof_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtold' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtold_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtol_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoll_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoul' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoul_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoull' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoull_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'swprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tzset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vswprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcschr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcscmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcscpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsftime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcsftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstod' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstod_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstof' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstof_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstol' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstold' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstold_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstol_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoll' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoll_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoul' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoul_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoull' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoull_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemmove' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmempcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemset' failed: symbol not defined |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dicts/libdicts_udf.so |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/structs/libstructs_udf.so |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__update_config_subscription.cpp |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dummylog/libdummylog.so |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/audit/audit_log_impl.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_cost_tracker.cpp |71.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/tools/yqlrun/yqlrun |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.so |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.so |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.so |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/simple/libsimple_udf.so |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/type_inspection/libtype_inspection_udf.so |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.so |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/callables/libcallables_udf.so |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/range.cpp |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.so |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/formats/arrow/program/execution.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__update_config.cpp |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/vector/libvector_udf.so |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/base_utils/format_util.cpp |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/lists/liblists_udf.so |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__drop_yaml_config.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__migrate_state.cpp |71.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/audit/libydb-core-audit.a |71.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/other/libcore-blobstorage-other.a |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.so |71.9%| [AR] {RESULT} $(B)/ydb/core/audit/libydb-core-audit.a |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/http/xml.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__update_config_subscription.cpp |71.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/pq_read |71.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/grpc_service.cpp |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/local_discovery/grpc_service.cpp |71.8%| PREPARE $(FLAKE8_LINTER-sbr:6561765464) |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/validators/validator.cpp |71.9%| [AR] {RESULT} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |71.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/validators/validator_bootstrap.cpp |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/formats/arrow/program/execution.cpp |71.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/audit/libydb-core-audit.a |71.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |71.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/validator.cpp |71.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__migrate_state.cpp |71.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/statestorage_proxy.cpp |71.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/console_dumper.cpp |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp |71.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.so |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/config.cpp |71.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/ut/ydb-core-erasure-ut |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/validator_bootstrap.cpp |71.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/static_validator/ut/example_configs/static_validator-ut-example_configs |71.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/resolver.cpp |71.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/validators/registry.cpp |71.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.so |71.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/config.cpp |71.4%| PREPARE $(BLACK_LINTER-sbr:8415400280) |71.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__graceful_shutdown.cpp |71.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.so |71.3%| [AR] {RESULT} $(B)/ydb/public/lib/base/libpublic-lib-base.a |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/resolver.cpp |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/registry.cpp |71.3%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/cpp_style_checker/cpp_style_checker |71.3%| [AR] {RESULT} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |71.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.so |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__update_epoch.cpp |71.0%| [AR] {RESULT} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |71.0%| [AR] {RESULT} $(B)/yt/yt/core/libyt-yt-core.a |71.0%| [AR] {RESULT} $(B)/yt/yt/client/libyt-yt-client.a |71.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.so |71.0%| [AR] {RESULT} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |71.0%| [LD] {RESULT} $(B)/ydb/tests/stress/simple_queue/simple_queue |71.0%| [LD] {RESULT} $(B)/ydb/tests/stress/transfer/transfer |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__graceful_shutdown.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/deprecated/kicli/dynamic_node.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__get_yaml_metadata.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/statestorage_proxy.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__update_epoch.cpp |70.6%| [LD] {RESULT} $(B)/ydb/tests/stress/node_broker/node_broker |70.6%| [LD] {RESULT} $(B)/ydb/tests/stress/olap_workload/olap_workload |70.5%| [LD] {RESULT} $(B)/ydb/apps/ydb/ydb |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_config_base/config_base.cpp |70.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/tools/protobuf_plugin/ut/ydb-core-config-tools-protobuf_plugin-ut |70.5%| [LD] {RESULT} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/dynamic_node.cpp |70.5%| [AR] {RESULT} $(B)/ydb/library/yaml_config/protos/libpy3yaml-config-protos.global.a |70.5%| [PD] {RESULT} $(B)/ydb/library/yaml_config/protos/yaml-config-protos.{self.protodesc, protosrc} |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__get_yaml_metadata.cpp |70.4%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/rm_service/kqp_resource_estimation.cpp |70.4%| [LD] {RESULT} $(B)/yql/essentials/tools/sql2yql/sql2yql |70.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |70.4%| [LD] {RESULT} $(B)/ydb/tests/stress/cdc/cdc |70.4%| [AR] {RESULT} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |70.4%| [LD] {RESULT} $(B)/ydb/tests/stress/show_create/view/show_create_view |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/audit/audit_log_impl.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__load_state.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_resource_estimation.cpp |70.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |70.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/codecs/ut/ydb-core-persqueue-codecs-ut |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_costmodel.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_last_provided_config.cpp |70.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__load_state.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common/description.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_costmodel.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_last_provided_config.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__init_scheme.cpp |69.8%| [TS] {RESULT} ydb/core/fq/libs/signer/ut/unittest |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmd_config.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/description.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/base/blobstorage_events.cpp |69.8%| [TS] {RESULT} ydb/services/persqueue_cluster_discovery/cluster_ordering/ut/unittest |69.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.so |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/logic.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__init_scheme.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmd_config.cpp |69.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/protos/out/out.cpp |69.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |69.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |69.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/iterator.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp |69.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |69.3%| [TS] {RESULT} ydb/core/resource_pools/ut/unittest |69.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/out/libcore-protos-out.a |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/iterator.cpp |69.2%| [AR] {RESULT} $(B)/ydb/core/protos/out/libcore-protos-out.a |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/config_parser.cpp |69.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |69.2%| [TS] {RESULT} ydb/core/fq/libs/hmac/ut/unittest |69.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/out/libcore-protos-out.a |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/meta.cpp |69.2%| COMPACTING CACHE 15.6GiB |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/abstract.cpp |69.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/bin/solomon_emulator |69.2%| [TS] {RESULT} ydb/library/yaml_config/static_validator/ut/unittest |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/abstract.cpp |69.2%| [TS] {RESULT} ydb/library/yaml_config/validator/ut/validator_builder/unittest |69.2%| [TM] {RESULT} ydb/core/fq/libs/metrics/ut/unittest |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp |69.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/recipe/solomon_recipe |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/meta.cpp |69.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_config_base/config_base.cpp |69.2%| [AR] {RESULT} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/local_discovery/grpc_service.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/config_parser.cpp |69.2%| [LD] {RESULT} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |69.2%| [LD] {RESULT} $(B)/ydb/tests/olap/s3_import/large/ydb-tests-olap-s3_import-large |69.2%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/logic.cpp |69.2%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/python/python3_small/libpython3_udf.so |69.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/ut/ydb-core-scheme-ut |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/base/blobstorage_events.cpp |69.2%| [TS] {RESULT} ydb/core/blobstorage/crypto/ut/unittest |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/protos/out/out.cpp |69.2%| [AR] {default-linux-x86_64, release, asan, pic} $(B)/yt/yt/core/libyt-yt-core.a |69.3%| [AR] {RESULT} $(B)/yt/yt/core/libyt-yt-core.a |69.3%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/core/libyt-yt-core.a |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/configs_config.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_config.cpp |69.3%| [LD] {RESULT} $(B)/ydb/tests/olap/docs/generator/generator |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.so |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_entryserialize.cpp |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/ydb-public-sdk-cpp-tests-integration-bulk_upsert |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/ydb-tests-fq-yt-kqp_yt_import |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_entryserialize.cpp |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/supp/ydb_supp |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.so |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.so |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sdk/cpp/sdk_credprovider/ydb-tests-functional-sdk-cpp-sdk_credprovider |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.so |69.3%| [LD] {RESULT} $(B)/yql/tools/yqlrun/yqlrun |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp |69.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/streaming_optimize/ydb-tests-fq-streaming_optimize |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/backup/s3_path_style/ydb-tests-functional-backup-s3_path_style |69.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |69.3%| [AR] {RESULT} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp |69.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_svc/ydb-tests-functional-kqp-kqp_query_svc |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/public-sdk-cpp-tests-integration-sessions_pool |69.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/ydb-public-sdk-cpp-tests-integration-sessions |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/serialize_deserialize.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/serialize_deserialize.cpp |69.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |69.4%| [AR] {RESULT} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |69.4%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/deprecated/kicli/result.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/result.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/statestorage_event_filter.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/statestorage_event_filter.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/validation/validators.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/validation/validators.cpp |69.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/validation/libcore-config-validation.a |69.4%| [AR] {RESULT} $(B)/ydb/core/config/validation/libcore-config-validation.a |69.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/config/validation/libcore-config-validation.a |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/log_backend/log_backend_build.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/log_backend/log_backend_build.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__get_yaml_config.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__get_yaml_config.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/remap.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/remap.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_discovery.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_discovery.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugedefs.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugedefs.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/auto_config_initializer.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/auto_config_initializer.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/actors/block_events.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/actors/block_events.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/init/dummy.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/configs_dispatcher_proxy.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/init/dummy.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_root.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/http.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/certificate_check/cert_auth_utils.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/logic.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_dispatcher_proxy.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/http.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/certificate_check/cert_auth_utils.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_root.cpp |69.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |69.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |69.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/logic.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/wilson_tracing_control.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/log_settings_configurator.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/wilson_tracing_control.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/log_settings_configurator.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/util/config_index.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/util/config_index.cpp |69.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/util/libcms-console-util.a |69.5%| [AR] {RESULT} $(B)/ydb/core/cms/console/util/libcms-console-util.a |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_linux.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/builder.cpp |69.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/cms/console/util/libcms-console-util.a |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/deprecated/client/msgbus_client.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_linux.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/client/msgbus_client.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/builder.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/actors/wait_events.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/actors/wait_events.cpp |69.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |69.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |69.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |69.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/grpc.pb.cc |69.5%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/grpc.pb.cc |69.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/protos/config.pb.cc |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/slide_limiter/usage/service.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/config.pb.cc |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/slide_limiter/usage/service.cpp |69.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |69.5%| [AR] {RESULT} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog_private_events.cpp |69.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog_private_events.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/limiter/grouped_memory/usage/config.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/usage/config.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_db.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/msgbus.grpc.pb.cc |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/certificate_check/cert_check.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_base/cli_cmds_db.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/msgbus.grpc.pb.cc |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/certificate_check/cert_check.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/statestorage.cpp |69.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |69.6%| [AR] {RESULT} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/statestorage.cpp |69.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_event_filter.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_event_filter.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_configuration_info_collector.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/formats/arrow/arrow_helpers.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configuration_info_collector.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/formats/arrow/arrow_helpers.cpp |69.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |69.6%| [AR] {RESULT} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/formats/arrow/serializer/abstract.cpp |69.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/formats/arrow/serializer/abstract.cpp |69.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |69.6%| [AR] {RESULT} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |69.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/tablet_killer.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/tablet_killer.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/deprecated/kicli/schema.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/error.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/schema.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/error.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__add_config_subscription.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__add_config_subscription.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/service/scope.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/service/scope.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider.cpp |69.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |69.7%| [AR] {RESULT} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |69.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/board_replica.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/board_replica.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console.grpc.pb.cc |69.7%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console.grpc.pb.cc |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/base_utils/format_info.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/slide_limiter/usage/config.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/base_utils/format_info.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/slide_limiter/usage/config.cpp |69.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/slide_limiter/usage/liblibrary-slide_limiter-usage.a |69.7%| [AR] {RESULT} $(B)/ydb/library/slide_limiter/usage/liblibrary-slide_limiter-usage.a |69.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/slide_limiter/usage/liblibrary-slide_limiter-usage.a |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor/usage/config.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor/usage/config.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/jaeger_tracing_configurator.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/jaeger_tracing_configurator.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/constructor.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/constructor.cpp |69.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |69.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |69.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |69.7%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console.pb.cc |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/batch_builder/merger.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/grpc.grpc.pb.cc |69.7%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console.pb.cc |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/batch_builder/merger.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/grpc.grpc.pb.cc |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/deprecated/kicli/kikimr.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor/usage/service.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/kikimr.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/mock/dsproxy_mock.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor/usage/service.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/manager.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/limiter/grouped_memory/usage/service.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/manager.cpp |69.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |69.8%| [AR] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/mock/dsproxy_mock.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/usage/service.cpp |69.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor/usage/events.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor/usage/events.cpp |69.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |69.8%| [AR] {RESULT} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |69.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/slide_limiter/service/service.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/slide_limiter/service/service.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/log_backend/log_backend.cpp |69.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/slide_limiter/service/liblibrary-slide_limiter-service.a |69.8%| [AR] {RESULT} $(B)/ydb/library/slide_limiter/service/liblibrary-slide_limiter-service.a |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/log_backend/log_backend.cpp |69.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/slide_limiter/service/liblibrary-slide_limiter-service.a |69.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |69.8%| [AR] {RESULT} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |69.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/base_utils/node_by_host.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/base_utils/node_by_host.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |69.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a |69.8%| [AR] {RESULT} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__replace_config_subscriptions.cpp |69.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__replace_config_subscriptions.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor/usage/abstract.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor/usage/abstract.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/limiter/grouped_memory/service/manager.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_config_subscriptions.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/manager.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/limiter/grouped_memory/service/actor.cpp |69.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |69.9%| [AR] {RESULT} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_config_subscriptions.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/constructor.cpp |69.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/constructor.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_config_subscription.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_config_subscription.cpp |69.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/pdisk/mock/libblobstorage-pdisk-mock.a |69.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/pdisk/mock/libblobstorage-pdisk-mock.a |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/service/actor.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/login_shared_func.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/secure_protobuf_printer.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/login_shared_func.cpp |69.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |69.9%| [AR] {RESULT} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |69.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/pdisk/mock/libblobstorage-pdisk-mock.a |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/secure_protobuf_printer.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp |69.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_base/cli_kicli.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_base/cli_kicli.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__get_log_tail.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__get_log_tail.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__log_cleanup.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console_config.grpc.pb.cc |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__log_cleanup.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console_config.grpc.pb.cc |69.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |69.9%| [AR] {RESULT} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |69.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor/service/service.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor/service/service.cpp |69.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |69.9%| [AR] {RESULT} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |69.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |69.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_vdisk_internal.pb.cc |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/service/events.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.pb.cc |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/service/events.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/service/process.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/service/process.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__configure.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__configure.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/service/manager.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/service/manager.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/init/init.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/init/init.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events.cpp |70.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/cloud_events/libymq-actor-cloud_events.a |70.0%| [AR] {RESULT} $(B)/ydb/core/ymq/actor/cloud_events/libymq-actor-cloud_events.a |70.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/cloud_events/libymq-actor-cloud_events.a |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/service/worker.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/service/worker.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |70.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/service/category.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/service/category.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/usage/service.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/config_helpers.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/usage/service.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/config_helpers.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/usage/events.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/usage/events.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_vdisk_internal.grpc.pb.cc |70.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.grpc.pb.cc |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/usage/common.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/usage/common.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/config.grpc.pb.cc |70.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/config.grpc.pb.cc |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/persqueue_utils.cpp |70.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/persqueue_utils.cpp |70.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/usage/config.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/usage/config.cpp |70.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/conveyor_composite/usage/libtx-conveyor_composite-usage.a |70.0%| [AR] {RESULT} $(B)/ydb/core/tx/conveyor_composite/usage/libtx-conveyor_composite-usage.a |70.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/conveyor_composite/usage/libtx-conveyor_composite-usage.a |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/service/service.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/service/service.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/common/service.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/common/service.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/request/config.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/service/workers_pool.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/request/config.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/service/workers_pool.cpp |70.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/conveyor_composite/service/libtx-conveyor_composite-service.a |70.1%| [AR] {RESULT} $(B)/ydb/core/tx/conveyor_composite/service/libtx-conveyor_composite-service.a |70.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/conveyor_composite/service/libtx-conveyor_composite-service.a |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/events_writer.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/events_writer.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/dlq_helpers.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/dlq_helpers.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_req_blockbs.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/monitoring.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_req_blockbs.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/monitoring.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp_timeouts.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp_timeouts.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_index_record.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_index_record.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/audit_helpers/audit_helper.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/audit_helpers/audit_helper.cpp |70.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/audit_helpers/libcore-testlib-audit_helpers.a |70.1%| [AR] {RESULT} $(B)/ydb/core/testlib/audit_helpers/libcore-testlib-audit_helpers.a |70.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/audit_helpers/libcore-testlib-audit_helpers.a |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/scheduler/old/kqp_compute_scheduler.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/scheduler/old/kqp_compute_scheduler.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/deprecated/client/grpc_client.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/client/grpc_client.cpp |70.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |70.1%| [AR] {RESULT} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |70.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_req_findlatest.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/version/version.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_req_findlatest.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/version/version.cpp |70.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/version/libversion.a |70.2%| [AR] {RESULT} $(B)/ydb/core/driver_lib/version/libversion.a |70.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/version/libversion.a |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/partition_writer_cache_actor.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_writer_cache_actor.cpp |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_req_writelog.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_req_writelog.cpp |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/shutdown/controller.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/shutdown/controller.cpp |70.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |70.2%| [AR] {RESULT} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/replica.cpp |70.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_req_rebuildhistory.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/replica.cpp |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/deprecated/kicli/error.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_req_rebuildhistory.cpp |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/config.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/deprecated/kicli/error.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/config.cpp |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/locks/read_start.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/read_start.cpp |70.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |70.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |70.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |70.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |70.2%| [AR] {RESULT} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |70.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_req_delete.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_req_delete.cpp |70.2%| [PK] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/{common-test_framework-udfs_deps.final.pkg.fake ... yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so} |70.2%| [CC] {tool} $(B)/ydb/core/protos/grpc.pb.cc |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers.cpp |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/cleanup_queue_data.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/msgbus.pb.cc |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/cleanup_queue_data.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/pdisk_log.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/msgbus.pb.cc |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/pdisk_log.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_settings.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_settings.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/memory_controller/memory_controller.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/keyvalue_write.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/keyvalue_write.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/memory_controller/memory_controller.cpp |70.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |70.3%| [AR] {RESULT} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |70.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/txn_actor_response_builder.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/txn_actor_response_builder.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/console_config.pb.cc |70.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/console_config.pb.cc |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/splitter/batch_slice.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/yql_single_query.cpp |70.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |70.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/batch_slice.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/yql_single_query.cpp |70.3%| [CC] {tool} $(B)/ydb/core/protos/grpc.grpc.pb.cc |70.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/partition_writer.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_writer.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/merged_column.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/merged_column.cpp |70.3%| [CC] {tool} $(B)/ydb/core/protos/serverless_proxy_config.pb.cc |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/pdisk_write.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/pdisk_write.cpp |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request.cpp |70.3%| [CC] {tool} $(B)/ydb/core/protos/console_config.grpc.pb.cc |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_write.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/incrhuge_keeper_write.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_cursor.cpp |70.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |70.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_cursor.cpp |70.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_portion_chunk.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_portion_chunk.cpp |70.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |70.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/run_query.cpp |70.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/run_query.cpp |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/ut/action_ut.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/ut/action_ut.cpp |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.cpp |70.4%| [CC] {tool} $(B)/ydb/core/protos/blobstorage_vdisk_internal.pb.cc |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.cpp |70.4%| [CC] {tool} $(B)/ydb/core/protos/console.pb.cc |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/ut/dlq_helpers_ut.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/ut/dlq_helpers_ut.cpp |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp.cpp |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/merger.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/merger.cpp |70.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |70.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |70.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/logic.cpp |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/context.cpp |70.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |70.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/logic.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/context.cpp |70.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |70.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |70.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |70.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |70.5%| [CC] {tool} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_api_versions_actor.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/apps/version/version_definition.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_api_versions_actor.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/apps/version/version_definition.cpp |70.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/apps/version/libversion_definition.a |70.5%| [AR] {RESULT} $(B)/ydb/apps/version/libversion_definition.a |70.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/apps/version/libversion_definition.a |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_metrics_actor.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_metrics_actor.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_metrics.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_metrics.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/tools/dump/main.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/collector.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/tools/dump/main.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/collector.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config_parser.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/priorities/usage/config.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/priorities/usage/config.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config_parser.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/ut/queue_attributes_ut.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/constructor.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/ut/queue_attributes_ut.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/constructor.cpp |70.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |70.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |70.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |70.5%| [CC] {tool} $(B)/ydb/core/protos/console_config.pb.cc |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp_event_impl.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp_event_impl.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/write_with_blobs.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/write_with_blobs.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/meta.cpp |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/meta.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/validation/auth_config_validator_ut/auth_config_validator_ut.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/validation/auth_config_validator_ut/auth_config_validator_ut.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/snapshot_scheme.cpp |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/ut/secure_protobuf_printer_ut.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/deprecated/yaml_config_parser.cpp |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_find_coordinator_actor.cpp |70.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/deprecated/liblibrary-yaml_config-deprecated.a |70.6%| [AR] {RESULT} $(B)/ydb/library/yaml_config/deprecated/liblibrary-yaml_config-deprecated.a |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/snapshot_scheme.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/ut/secure_protobuf_printer_ut.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_find_coordinator_actor.cpp |70.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/deprecated/liblibrary-yaml_config-deprecated.a |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/deprecated/yaml_config_parser.cpp |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/kqp.cpp |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/snapshot_scheme.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/kqp.cpp |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/priorities/usage/service.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/snapshot_scheme.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/priorities/usage/service.cpp |70.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |70.6%| [AR] {RESULT} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |70.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |70.6%| [CC] {tool} $(B)/ydb/core/protos/console.grpc.pb.cc |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_pdiskfit/lib/basic_test.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_pdiskfit/lib/basic_test.cpp |70.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_pdiskfit/lib/libblobstorage-ut_pdiskfit-lib.a |70.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/ut_pdiskfit/lib/libblobstorage-ut_pdiskfit-lib.a |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut.cpp |70.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_pdiskfit/lib/libblobstorage-ut_pdiskfit-lib.a |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut.cpp |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/column_features.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/column_features.cpp |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/ut/main.cpp |70.6%| [CC] {tool} $(B)/ydb/core/protos/config.grpc.pb.cc |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/ut/main.cpp |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/validation/validators_ut.cpp |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/priorities/service/manager.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/validation/validators_ut.cpp |70.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/priorities/service/manager.cpp |70.6%| [UN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/psql/psql |70.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/predicate/container.cpp |70.7%| [CC] {tool} $(B)/ydb/core/protos/serverless_proxy_config.grpc.pb.cc |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/container.cpp |70.7%| [CC] {tool} $(B)/ydb/core/protos/msgbus.pb.cc |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_meta.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_meta.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/nodes_manager.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/nodes_manager.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/priorities/service/service.cpp |70.7%| [CC] {tool} $(B)/ydb/core/protos/blobstorage_vdisk_internal.grpc.pb.cc |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/priorities/service/service.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/ut_helpers/mock_service.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/ut_helpers/mock_service.cpp |70.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |70.7%| [AR] {RESULT} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |70.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a |70.7%| [AR] {RESULT} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a |70.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |70.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_scan_common.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_common.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/objects_cache.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/objects_cache.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_produce_actor.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_produce_actor.cpp |70.7%| [CC] {tool} $(B)/ydb/core/protos/msgbus.grpc.pb.cc |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/predicate/filter.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/filter.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/ut/counters_ut.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp |70.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |70.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/certificate_check/cert_utils_ut.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/certificate_check/cert_utils_ut.cpp |70.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/ut/counters_ut.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/dataset.cpp |70.8%| [CC] {tool} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/dataset.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/validators/validator_nameservice_ut.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/validator_nameservice_ut.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_write_actor_ut.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_write_actor_ut.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/logs/log.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/logs/log.cpp |70.8%| [ld] {default-linux-x86_64, release, asan} $(B)/tools/black_linter/black_linter |70.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |70.8%| [AR] {RESULT} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/tools/dump_ds_init/main.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/shared_sausagecache.cpp |70.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/tools/dump_ds_init/main.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/shared_sausagecache.cpp |70.8%| [ld] {default-linux-x86_64, release, asan} $(B)/tools/flake8_linter/flake8_linter |70.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |70.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/validators/validator_bootstrap_ut.cpp |70.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/validator_bootstrap_ut.cpp |70.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp |70.8%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/simple_queue |70.8%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/simple_queue |70.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_state.h_serialized.cpp |70.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_state.h_serialized.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/logic.cpp |70.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |70.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |70.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/certificate_check/cert_check_ut.cpp |70.9%| [EN] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/logic.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_basic_ut.cpp |70.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp |70.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/certificate_check/cert_check_ut.cpp |70.9%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp |70.9%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_basic_ut.cpp |70.9%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_log_merger_ut.cpp |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/tests/tpch/cmd_run_bench.cpp |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_log_merger_ut.cpp |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_run_bench.cpp |70.9%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/ydb_cli |70.9%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/ydb_cli |70.9%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/olap_workload |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/conveyor_composite/ut/ut_simple.cpp |70.9%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/olap_workload |70.9%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/conveyor_composite/ut/ut_simple.cpp |70.9%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |70.9%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/msgbus__intpy3___pb2_grpc.py.p5ju.yapyc3 |70.9%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/msgbus__intpy3___pb2_grpc.py.p5ju.yapyc3 |70.9%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |70.9%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |70.9%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/msgbus__intpy3___pb2.py.p5ju.yapyc3 |70.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |70.9%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/msgbus__intpy3___pb2.py.p5ju.yapyc3 |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata_ut.cpp |70.9%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/5732afb9e28f3811e12c561eb5_raw.auxcpp |70.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/a14bb0205af400981ce9e41033_raw.auxcpp |70.9%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/5732afb9e28f3811e12c561eb5_raw.auxcpp |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata_ut.cpp |70.9%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/8760973cadf4aa816a9d37589f_raw.auxcpp |70.9%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/init/init.h_serialized.cpp |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/formats/arrow/ut/ut_dictionary.cpp |70.9%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/8760973cadf4aa816a9d37589f_raw.auxcpp |70.9%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/config/init/init.h_serialized.cpp |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/formats/arrow/ut/ut_dictionary.cpp |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ut.cpp |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ut.cpp |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk_ut.cpp |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk_ut.cpp >> PgTest::DumpStringCells >> PgTest::DumpStringCells [GOOD] |71.0%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/5732afb9e28f3811e12c561eb5_raw.auxcpp |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |71.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/5732afb9e28f3811e12c561eb5_raw.auxcpp |71.0%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/8760973cadf4aa816a9d37589f_raw.auxcpp |71.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/8760973cadf4aa816a9d37589f_raw.auxcpp |71.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |71.0%| [AR] {RESULT} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> PgTest::DumpStringCells [GOOD] |71.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/solomon/actors/ut/dq_solomon_write_actor_ut.cpp |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/dq_solomon_write_actor_ut.cpp |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/console_dumper_ut.cpp >> TErasureTypeTest::TestBlockByteOrder [GOOD] >> TErasureTypeTest::TestSplitDiffBlock4Plus2SpecialCase1 [GOOD] >> PersQueueCodecs::FromV1Codec [GOOD] |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/console_dumper_ut.cpp |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlockByteOrder [GOOD] |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestSplitDiffBlock4Plus2SpecialCase1 [GOOD] >> TErasureTypeTest::TestDifferentCasesInDiffSplitingMirror3Of4 [GOOD] |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest >> PersQueueCodecs::FromV1Codec [GOOD] |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data_ut.cpp |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data_ut.cpp |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestDifferentCasesInDiffSplitingMirror3Of4 [GOOD] >> TErasureTypeTest::TestStripe23LossOfAllPossible3 |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/version/ut/version_ut.cpp >> TErasureTypeTest::TestStripe33LossOfAllPossible3 |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/version/ut/version_ut.cpp |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/fqrun/src/common.cpp |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/validators/registry_ut.cpp >> TErasureTypeTest::TestDifferentCasesInDiffSplitingBlock4Plus2 [GOOD] |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/src/common.cpp |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> TErasureTypeTest::TestStripe32LossOfAllPossible2 |71.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/validators/registry_ut.cpp |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestDifferentCasesInDiffSplitingBlock4Plus2 [GOOD] >> ValidationTests::CanDispatchByTag [GOOD] |71.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config_parser_ut.cpp |71.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config_parser_ut.cpp >> ValidationTests::MapType [GOOD] >> TErasureTypeTest::TestBlock42LossOfAllPossible2 >> ValidationTests::HasReservedPaths [GOOD] |71.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/mvp/oidc_proxy/mvp.cpp |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::CanDispatchByTag [GOOD] |71.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/mvp/oidc_proxy/mvp.cpp |71.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/oidc_proxy/libydb-mvp-oidc_proxy.a |71.1%| [AR] {RESULT} $(B)/ydb/mvp/oidc_proxy/libydb-mvp-oidc_proxy.a >> TErasureTypeTest::TestBlock42PartialRestore0 >> ValidationTests::AdvancedCopyTo [GOOD] >> TErasureTypeTest::TestStripe23LossOfAllPossible3 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::HasReservedPaths [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::MapType [GOOD] |71.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/mvp/oidc_proxy/libydb-mvp-oidc_proxy.a |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest |71.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/auto_config_initializer_ut.cpp |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::AdvancedCopyTo [GOOD] |71.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/auto_config_initializer_ut.cpp >> TErasureTypeTest::TestStripe32LossOfAllPossible2 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> PgTest::DumpIntCells |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe23LossOfAllPossible3 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> PgTest::DumpIntCells [GOOD] |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe32LossOfAllPossible2 [GOOD] >> TErasureTypeTest::TestAllSpeciesCrcWhole1of2 >> ValidationTests::CanCopyTo [GOOD] |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> PgTest::DumpIntCells [GOOD] >> TErasureTypeTest::TestStripe31LossOfAllPossible1 |71.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/mock/yql_mock.cpp >> TErasureTypeTest::TestBlock42PartialRestore2 |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |71.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/mock/yql_mock.cpp >> TErasureTypeTest::TestEo [GOOD] |71.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |71.1%| [AR] {RESULT} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::CanCopyTo [GOOD] |71.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a >> TErasureTypeTest::TestBlock33LossOfAllPossible3 >> ErasureBrandNew::Block42_restore |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> TErasureTypeTest::TestStripe31LossOfAllPossible1 [GOOD] >> TErasureTypeTest::TestBlock42LossOfAllPossible2 [GOOD] >> TErasureTypeTest::TestBlock31LossOfAllPossible1 >> TErasureTypeTest::TestAllSpeciesCrcWhole2of2 |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestEo [GOOD] |71.1%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |71.1%| [TA] $(B)/ydb/core/config/tools/protobuf_plugin/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TErasureTypeTest::TestBlock42PartialRestore3 |71.1%| [TA] {RESULT} $(B)/ydb/core/config/tools/protobuf_plugin/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe31LossOfAllPossible1 [GOOD] |71.1%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |71.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/format_handler_ut.cpp |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/format_handler_ut.cpp |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42LossOfAllPossible2 [GOOD] >> TErasureTypeTest::TestBlock31LossOfAllPossible1 [GOOD] >> TErasureTypeTest::TestStripe33LossOfAllPossible3 [GOOD] >> TErasureTypeTest::TestBlock22LossOfAllPossible2 |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock31LossOfAllPossible1 [GOOD] |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/mvp/meta/meta_versions.cpp >> TErasureTypeTest::TestBlock43LossOfAllPossible3 |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/mvp/meta/meta_versions.cpp |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullcompactdeferredqueue_ut.cpp >> TErasureTypeTest::TestStripe43LossOfAllPossible3 >> TErasureTypeTest::TestStripe22LossOfAllPossible2 >> Scheme::NullCell [GOOD] >> Scheme::NotEmptyCell [GOOD] |71.2%| [TA] $(B)/ydb/core/scheme/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullcompactdeferredqueue_ut.cpp |71.2%| [TA] {RESULT} $(B)/ydb/core/scheme/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} >> TErasureTypeTest::TestAllSpecies1of2 >> Scheme::EmptyOwnedCellVec [GOOD] >> Scheme::NonEmptyOwnedCellVec [GOOD] |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe33LossOfAllPossible3 [GOOD] |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_pdisk_config.cpp |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_pdisk_config.cpp >> TErasureTypeTest::TestBlock22LossOfAllPossible2 [GOOD] >> SchemeRanges::RangesBorders [GOOD] >> TypesProto::Decimal22 [GOOD] |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::NotEmptyCell [GOOD] >> TErasureTypeTest::TestBlock32LossOfAllPossible2 |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::NonEmptyOwnedCellVec [GOOD] >> TErasureTypeTest::TestStripe22LossOfAllPossible2 [GOOD] >> Scheme::CompareOrder [GOOD] >> Scheme::CellVecTryParse [GOOD] |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/statestorage_ut.cpp |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock22LossOfAllPossible2 [GOOD] >> Scheme::OwnedCellVecFromSerialized [GOOD] >> Scheme::TSerializedCellMatrix [GOOD] >> ErasureBrandNew::Block42_encode |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/statestorage_ut.cpp |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> TypesProto::Decimal22 [GOOD] |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/fq/ydb_over_fq.cpp |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::CellVecTryParse [GOOD] |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe22LossOfAllPossible2 [GOOD] >> TErasureTypeTest::TestMirror3LossOfAllPossible3 |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::TSerializedCellMatrix [GOOD] >> TErasureTypeTest::TestStripe42LossOfAllPossible2 >> Scheme::UnsafeAppend [GOOD] >> Scheme::TSerializedCellVec [GOOD] |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp >> TErasureTypeTest::TestMirror3LossOfAllPossible3 [GOOD] >> SchemeRanges::CmpBorders [GOOD] >> SchemeBorders::Partial [GOOD] |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/kafka_test_client.cpp >> TErasureTypeTest::TestBlock42PartialRestore1 >> Scheme::EmptyCell [GOOD] >> Scheme::CompareUuidCells [GOOD] >> SchemeBorders::Full [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::TSerializedCellVec [GOOD] Test command err: Serialize: 0.001169s Cells constructor: 0.003669s Parse: 0.000421s Copy: 0.000145s Move: 0.000141s >> Scheme::YqlTypesMustBeDefined [GOOD] >> TErasureTypeTest::TestBlock32LossOfAllPossible2 [GOOD] |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestMirror3LossOfAllPossible3 [GOOD] |71.2%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_state.h_serialized.cpp >> TErasureTypeTest::TestBlock23LossOfAllPossible3 |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/kafka_test_client.cpp |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> SchemeBorders::Partial [GOOD] |71.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_state.h_serialized.cpp >> TErasureTypeTest::isSplittedDataEqualsToOldVerion [GOOD] >> TypesProto::Decimal35 [GOOD] >> TypesProto::DecimalNoTypeInfo [GOOD] |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/fq/ydb_over_fq.cpp |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::CompareUuidCells [GOOD] |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::YqlTypesMustBeDefined [GOOD] >> PersQueueCodecs::ToV1Codec [GOOD] |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock32LossOfAllPossible2 [GOOD] |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_tables_ut.cpp |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/apps/ydb/ut/parse_command_line.cpp |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_restore_ut.cpp |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> TypesProto::DecimalNoTypeInfo [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::isSplittedDataEqualsToOldVerion [GOOD] |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_restore_ut.cpp |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_tables_ut.cpp |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest >> PersQueueCodecs::ToV1Codec [GOOD] |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_chunk_tracker.cpp |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/apps/ydb/ut/parse_command_line.cpp |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_chunk_tracker.cpp >> TErasureTypeTest::TestBlock33LossOfAllPossible3 [GOOD] |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.3%| [TA] $(B)/ydb/core/scheme/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.3%| [TA] {RESULT} $(B)/ydb/core/scheme/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TErasureTypeTest::TestStripe42LossOfAllPossible2 [GOOD] |71.3%| [TA] $(B)/ydb/core/persqueue/codecs/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.3%| [TA] {RESULT} $(B)/ydb/core/persqueue/codecs/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |71.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_filter_ut.cpp |71.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock33LossOfAllPossible3 [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe42LossOfAllPossible2 [GOOD] |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_filter_ut.cpp >> test.py::py2_flake8 [GOOD] |71.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/fqrun/src/actors.cpp |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/src/actors.cpp |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part15/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part15/py2_flake8 |71.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_color_limits.cpp |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_color_limits.cpp >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/tests/kikimr_tpch/kqp_tpch_ut.cpp >> TErasureTypeTest::TestBlock23LossOfAllPossible3 [GOOD] |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part16/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/tests/kikimr_tpch/kqp_tpch_ut.cpp |71.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part16/py2_flake8 |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part19/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> ErasureBrandNew::Block42_encode [GOOD] >> ErasureBrandNew::Block42_chunked >> test_disk.py::flake8 [GOOD] >> test_tablet.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |71.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part19/py2_flake8 |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part5/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part5/py2_flake8 |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part9/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part9/py2_flake8 >> column_table_helper.py::flake8 [GOOD] >> range_allocator.py::flake8 [GOOD] >> s3_client.py::flake8 [GOOD] >> thread_helper.py::flake8 [GOOD] >> time_histogram.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> ydb_client.py::flake8 [GOOD] |71.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/mvp/core/core_ydb.cpp >> kikimr_config.py::flake8 [GOOD] |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/flake8 >> test_tablet.py::flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/tests/tools/nemesis/ut/flake8 |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/node_broker/tests/flake8 >> test_workload.py::flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/tests/stress/node_broker/tests/flake8 |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock23LossOfAllPossible3 [GOOD] >> test.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_log_scenario.py::flake8 [GOOD] >> upgrade_to_internal_path_id.py::flake8 [GOOD] >> zip_bomb.py::flake8 [GOOD] |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/common/flake8 >> ydb_client.py::flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/tests/olap/common/flake8 >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> tpc_tests.py::flake8 [GOOD] |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/library/ut/flake8 >> kikimr_config.py::flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/tests/library/ut/flake8 |71.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/mvp/core/core_ydb.cpp |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part10/flake8 >> test.py::flake8 [GOOD] |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/s3_backups/tests/flake8 >> test_workload.py::flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/tests/stress/s3_backups/tests/flake8 |71.4%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part10/flake8 |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/flake8 >> zip_bomb.py::flake8 [GOOD] |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part18/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part19/flake8 >> test.py::flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/tests/olap/flake8 |71.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part18/py2_flake8 |71.4%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part19/flake8 |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part3/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part3/py2_flake8 |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part8/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part8/py2_flake8 |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/flake8 >> tpc_tests.py::flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/library/benchmarks/runner/flake8 |71.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_state_storage_ut.cpp |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part5/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part5/py2_flake8 >> http_client.py::flake8 [GOOD] >> query_results.py::flake8 [GOOD] >> test_liveness_wardens.py::flake8 [GOOD] |71.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_state_storage_ut.cpp >> TErasureTypeTest::TestBlock43LossOfAllPossible3 [GOOD] |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part11/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part11/py2_flake8 |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/http_api_client/flake8 >> query_results.py::flake8 [GOOD] >> TErasureTypeTest::TestStripe43LossOfAllPossible3 [GOOD] |71.4%| [TS] {RESULT} ydb/core/fq/libs/http_api_client/flake8 |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/wardens/flake8 >> test_liveness_wardens.py::flake8 [GOOD] |71.4%| [TS] {RESULT} ydb/tests/functional/wardens/flake8 >> test_cte.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/fq/grpc_service.cpp |71.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/init/init.h_serialized.cpp >> test_s3.py::flake8 [GOOD] |71.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/config/init/init.h_serialized.cpp |71.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config_proto2yaml_ut.cpp |71.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/fq/libydb-services-fq.a |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/plan2svg/flake8 >> test_cte.py::flake8 [GOOD] |71.5%| [AR] {RESULT} $(B)/ydb/services/fq/libydb-services-fq.a |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part0/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.5%| [TS] {RESULT} ydb/tests/functional/kqp/plan2svg/flake8 |71.5%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part0/py2_flake8 |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part4/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/init/libcore-config-init.a |71.5%| [AR] {RESULT} $(B)/ydb/core/config/init/libcore-config-init.a |71.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/init/init_ut.cpp |71.5%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part4/py2_flake8 >> test_workload.py::flake8 [GOOD] |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config_proto2yaml_ut.cpp |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/flake8 >> test_s3.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.5%| [TS] {RESULT} ydb/tests/datashard/s3/flake8 |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/init/init_ut.cpp |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock43LossOfAllPossible3 [GOOD] |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/simple_queue/tests/flake8 >> test_workload.py::flake8 [GOOD] |71.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/config/init/libcore-config-init.a |71.5%| [TS] {RESULT} ydb/tests/stress/simple_queue/tests/flake8 |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe43LossOfAllPossible3 [GOOD] |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/fq/grpc_service.cpp >> test_workload.py::flake8 [GOOD] |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part4/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.5%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part4/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test_clickbench.py::flake8 [GOOD] >> test_external.py::flake8 [GOOD] >> test_import_csv.py::flake8 [GOOD] >> test_tpcds.py::flake8 [GOOD] >> test_tpch.py::flake8 [GOOD] >> test_upload.py::flake8 [GOOD] >> test_workload_oltp.py::flake8 [GOOD] >> test_workload_simple_queue.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/solomon/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.5%| [TS] {RESULT} ydb/library/yql/tests/sql/solomon/py2_flake8 |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/transfer/tests/flake8 >> test_workload.py::flake8 [GOOD] >> test_dml.py::flake8 [GOOD] |71.5%| [TS] {RESULT} ydb/tests/stress/transfer/tests/flake8 |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/load/flake8 >> test_workload_simple_queue.py::flake8 [GOOD] |71.5%| [TS] {RESULT} ydb/tests/olap/load/flake8 |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part9/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test_restarts.py::flake8 [GOOD] |71.5%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part9/py2_flake8 |71.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/fq/libydb-services-fq.a |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/dml/flake8 >> test_dml.py::flake8 [GOOD] |71.5%| [TS] {RESULT} ydb/tests/datashard/dml/flake8 >> test_partitioning.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_tpcds.py::flake8 [GOOD] >> test_tpch_spilling.py::flake8 [GOOD] >> test_bulkupserts_tpch.py::flake8 [GOOD] >> test_insert_delete_duplicate_records.py::flake8 [GOOD] >> test_insertinto_selectfrom.py::flake8 [GOOD] >> test_tiering.py::flake8 [GOOD] >> test_workload_manager.py::flake8 [GOOD] >> ErasureBrandNew::Block42_chunked [GOOD] >> test.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_stats_mode.py::flake8 [GOOD] |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/flake8 >> test_restarts.py::flake8 [GOOD] |71.5%| [TS] {RESULT} ydb/tests/functional/restarts/flake8 >> conftest.py::flake8 [GOOD] >> test_auditlog.py::flake8 [GOOD] >> test_transform.py::flake8 [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/large/flake8 >> test_tpch_spilling.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/tests/functional/tpc/large/flake8 |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/flake8 >> test_partitioning.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/tests/datashard/partitioning/flake8 >> __main__.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part14/flake8 >> test.py::flake8 [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/sql/large/flake8 >> test_workload_manager.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/tests/sql/large/flake8 |71.6%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part14/flake8 |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part0/flake8 >> test.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part0/flake8 |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/flake8 >> test_auditlog.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/tests/functional/audit/flake8 |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/plans/flake8 >> test_stats_mode.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/tests/fq/plans/flake8 |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/ut_transform/flake8 >> test_transform.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/library/yaml_config/ut_transform/flake8 >> __main__.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/ydb_serializable/flake8 >> __main__.py::flake8 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> ErasureBrandNew::Block42_chunked [GOOD] Test command err: totalSize# 501260482 period1# 2.240150s period2# 0.648514s MB/s1# 223.7620168 MB/s2# 772.9370253 factor# 3.454281635 |71.6%| [TS] {RESULT} ydb/tests/tools/ydb_serializable/flake8 |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part10/py2_flake8 >> test.py::py2_flake8 [GOOD] >> integrations_test.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part10/py2_flake8 >> __main__.py::flake8 [GOOD] >> parser.py::flake8 [GOOD] |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/mvp/core/core_ydbc.cpp |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/oltp_workload/flake8 >> __main__.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/tests/stress/oltp_workload/flake8 |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/postgres_integrations/library/ut/flake8 >> integrations_test.py::flake8 [GOOD] |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/mvp/core/core_ydbc.cpp |71.6%| [TS] {RESULT} ydb/tests/postgres_integrations/library/ut/flake8 |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/fqrun/src/fq_runner.cpp >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> select_positive_with_schema.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part17/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part17/py2_flake8 |71.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/core/libydb-mvp-core.a |71.6%| [AR] {RESULT} $(B)/ydb/mvp/core/libydb-mvp-core.a |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/docs/generator/flake8 >> parser.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_http_api.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/tests/olap/docs/generator/flake8 |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/src/fq_runner.cpp >> test.py::flake8 [GOOD] >> tablet_scheme_tests.py::flake8 [GOOD] >> test_ttl.py::flake8 [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/flake8 >> test.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/http_api/flake8 >> test_http_api.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/fq/http_api/flake8 |71.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/mvp/core/libydb-mvp-core.a |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/public/tools/lib/cmds/ut/flake8 >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_sql_streaming.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/public/tools/lib/cmds/ut/flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_tests/flake8 >> tablet_scheme_tests.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/functional/scheme_tests/flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/flake8 >> test_ttl.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/datashard/ttl/flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part0/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part0/py2_flake8 >> test_vector_index.py::flake8 [GOOD] >> test_vector_index_large_levels_and_clusters.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/flake8 >> test_sql_streaming.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part13/flake8 >> test.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part13/flake8 >> test_stability.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_clickhouse.py::flake8 [GOOD] >> test_greenplum.py::flake8 [GOOD] >> test_join.py::flake8 [GOOD] >> test_mysql.py::flake8 [GOOD] >> test_postgresql.py::flake8 [GOOD] >> test_ydb.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/fq/streaming_optimize/flake8 |71.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_parser_ut.cpp >> __main__.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/vector_index/large/flake8 >> test_vector_index_large_levels_and_clusters.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/datashard/vector_index/large/flake8 |71.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_parser_ut.cpp |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/tools/simple_json_diff/flake8 >> __main__.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/library/yaml_config/tools/simple_json_diff/flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stability/ydb/flake8 >> test_stability.py::flake8 [GOOD] |71.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_sectormap.cpp |71.7%| [TS] {RESULT} ydb/tests/stability/ydb/flake8 |71.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_sectormap.cpp >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/public/tools/ydb_recipe/flake8 >> __main__.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/public/tools/ydb_recipe/flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/analytics/flake8 >> test_ydb.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/fq/generic/analytics/flake8 >> overlapping_portions.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part2/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part2/py2_flake8 >> conftest.py::flake8 [GOOD] >> docker_wrapper_test.py::flake8 [GOOD] >> run_tests.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part1/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part1/py2_flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part12/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part12/py2_flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/oom/flake8 >> overlapping_portions.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/olap/oom/flake8 >> test.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |71.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp |71.7%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |71.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.cpp |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/run_tests/flake8 >> run_tests.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/library/benchmarks/runner/run_tests/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/postgres_integrations/go-libpq/flake8 >> docker_wrapper_test.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/postgres_integrations/go-libpq/flake8 >> test_schemeshard_limits.py::flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part8/flake8 >> test.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part8/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/show_create/view/tests/flake8 >> test_workload.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/stress/show_create/view/tests/flake8 >> test.py::flake8 [GOOD] >> hive_matchers.py::flake8 [GOOD] >> test_create_tablets.py::flake8 [GOOD] >> test_drain.py::flake8 [GOOD] >> test_kill_tablets.py::flake8 [GOOD] >> test_actorsystem.py::flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/flake8 >> test_schemeshard_limits.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/functional/limits/flake8 >> test_kqprun_recipe.py::flake8 [GOOD] >> compare.py::flake8 [GOOD] |71.8%| [CC] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |71.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp >> test.py::py2_flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part15/flake8 >> test.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part15/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/flake8 >> test_kill_tablets.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/functional/hive/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/autoconfig/flake8 >> test_actorsystem.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/functional/autoconfig/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/result_compare/flake8 >> compare.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/library/benchmarks/runner/result_compare/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/kqprun/tests/flake8 >> test_kqprun_recipe.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/tools/kqprun/tests/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part7/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part7/py2_flake8 |71.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.cpp |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/library/compatibility/binaries/downloader/flake8 >> __main__.py::flake8 [GOOD] >> test_commit.py::flake8 [GOOD] >> test_timeout.py::flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/merge_split_common_table/std/flake8 >> test.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/std/flake8 |71.8%| [TS] {RESULT} ydb/tests/library/compatibility/binaries/downloader/flake8 >> test.py::py2_flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/pq_read/test/flake8 >> test_timeout.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/tools/pq_read/test/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/tools/visualize_portions/flake8 >> __main__.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/core/tx/columnshard/tools/visualize_portions/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part7/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test_async_replication.py::flake8 [GOOD] >> test_leader_start_inflight.py::flake8 [GOOD] >> test_secondary_index.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part7/py2_flake8 >> base.py::flake8 [GOOD] >> data_correctness.py::flake8 [GOOD] >> data_migration_when_alter_ttl.py::flake8 [GOOD] >> tier_delete.py::flake8 [GOOD] >> ttl_delete_s3.py::flake8 [GOOD] >> ttl_unavailable_s3.py::flake8 [GOOD] >> unstable_connection.py::flake8 [GOOD] >> base.py::flake8 [GOOD] >> test_tpch_import.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part13/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part13/py2_flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/flake8 >> test_async_replication.py::flake8 [GOOD] >> alter_compression.py::flake8 [GOOD] >> base.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/datashard/async_replication/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/flake8 >> unstable_connection.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/large/flake8 >> test_leader_start_inflight.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/flake8 >> test_secondary_index.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/olap/ttl_tiering/flake8 |71.9%| [TS] {RESULT} ydb/tests/functional/sqs/large/flake8 |71.9%| [TS] {RESULT} ydb/tests/datashard/secondary_index/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/s3_import/flake8 >> test_tpch_import.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/olap/s3_import/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part2/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part2/py2_flake8 >> test_dump_restore.py::flake8 [GOOD] >> common.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_rename.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/flake8 >> base.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/olap/column_family/compression/flake8 >> test_pdisk_format_info.py::flake8 [GOOD] >> test_replication.py::flake8 [GOOD] >> test_self_heal.py::flake8 [GOOD] >> test_tablet_channel_migration.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/flake8 >> test_dump_restore.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/datashard/dump_restore/flake8 >> test_bridge.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/flake8 >> test_rename.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/functional/rename/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/flake8 >> test_tablet_channel_migration.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/functional/blobstorage/flake8 >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_common.py::flake8 [GOOD] >> test_yandex_audit.py::flake8 [GOOD] >> test_yandex_cloud_mode.py::flake8 [GOOD] >> test_yandex_cloud_queue_counters.py::flake8 [GOOD] >> test_multinode_cluster.py::flake8 [GOOD] >> test_recompiles_requests.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part9/flake8 >> test.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part9/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/flake8 >> test_bridge.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/functional/bridge/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part4/flake8 >> test.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part4/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part6/flake8 >> test.py::flake8 [GOOD] >> tstool.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part6/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/public/tools/local_ydb/flake8 >> __main__.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/public/tools/local_ydb/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/flake8 >> test_yandex_cloud_queue_counters.py::flake8 [GOOD] >> conftest.py::black [GOOD] >> test_join.py::black [GOOD] >> test_workload.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/multinode/flake8 >> test_recompiles_requests.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/functional/sqs/multinode/flake8 |71.9%| [TS] {RESULT} ydb/tests/functional/sqs/cloud/flake8 >> test_ttl.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tools/tstool/flake8 >> tstool.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tools/tstool/flake8 >> test_large_import.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_join.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/flake8 >> test_ttl.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/functional/ttl/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/streaming/black >> test_join.py::black [GOOD] |72.0%| [TS] {RESULT} ydb/tests/fq/generic/streaming/black |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/kv/tests/flake8 >> test_workload.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/stress/kv/tests/flake8 >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime_with_service_name.py::flake8 [GOOD] >> select_positive_with_service_name.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/s3_import/large/flake8 >> test_large_import.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/olap/s3_import/large/flake8 >> test_encryption.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_unknown_data_source.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/streaming/flake8 >> test_join.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/fq/generic/streaming/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/kqprun/recipe/flake8 >> __main__.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/tools/kqprun/recipe/flake8 |72.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config_ut.cpp |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/oracle/flake8 >> test.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/oracle/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/merge_split_common_table/fifo/flake8 >> test.py::flake8 [GOOD] >> test_crud.py::flake8 [GOOD] >> test_inserts.py::flake8 [GOOD] >> test_kv.py::flake8 [GOOD] >> test_account_actions.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/fifo/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/encryption/flake8 >> test_encryption.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/functional/encryption/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part18/flake8 >> test.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part18/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/common/flake8 >> test_unknown_data_source.py::flake8 [GOOD] >> test_acl.py::flake8 [GOOD] >> test_counters.py::flake8 [GOOD] >> test_format_without_version.py::flake8 [GOOD] >> test_garbage_collection.py::flake8 [GOOD] >> test_multiplexing_tables_format.py::flake8 [GOOD] >> test_ping.py::flake8 [GOOD] >> test_queue_attributes_validation.py::flake8 [GOOD] >> test_queue_counters.py::flake8 [GOOD] >> test_queue_tags.py::flake8 [GOOD] >> test_queues_managing.py::flake8 [GOOD] >> test_throttling.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/fq/common/flake8 >> reconfig_state_storage_workload_test.py::flake8 [GOOD] >> test_board_workload.py::flake8 [GOOD] >> test_scheme_board_workload.py::flake8 [GOOD] >> test_state_storage_workload.py::flake8 [GOOD] >> test_postgres.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/sql/flake8 >> test_kv.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/sql/flake8 >> OperationLog::Size1000 >> OperationLog::Size8 [GOOD] >> OperationLog::Size1 [GOOD] >> OperationLog::Size29 [GOOD] >> test_quota_exhaustion.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/common/flake8 >> test_throttling.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/postgresql/flake8 >> test_postgres.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/functional/postgresql/flake8 |72.0%| [TS] {RESULT} ydb/tests/functional/sqs/common/flake8 >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> scenario.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_case.py::flake8 [GOOD] >> Checks::BasicIntChecks [GOOD] >> Checks::BasicStringChecks [GOOD] >> Checks::IntArrayValidation [GOOD] >> Checks::MapValidation [GOOD] >> Checks::ErrorInCheck [GOOD] >> Checks::OpaqueMaps [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/reconfig_state_storage_workload/tests/flake8 >> test_state_storage_workload.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/stress/reconfig_state_storage_workload/tests/flake8 |72.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config_ut.cpp >> gen-report.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_serverless.py::flake8 [GOOD] >> Validator::IntValidation [GOOD] >> Validator::Enums [GOOD] >> Validator::BoolValidation [GOOD] >> Validator::IntArrayValidation [GOOD] >> Validator::MapValidation [GOOD] >> Validator::OpaqueMaps [GOOD] >> Validator::StringValidation [GOOD] >> Validator::MultitypeNodeValidation [GOOD] >> test_cms_erasure.py::flake8 [GOOD] >> test_cms_restart.py::flake8 [GOOD] >> test_cms_state_storage.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> test_query_cache.py::flake8 [GOOD] >> base.py::flake8 [GOOD] >> test_delete_all_after_inserts.py::flake8 [GOOD] >> test_delete_by_explicit_row_id.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/join/flake8 >> test_case.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/data_quotas/flake8 >> test_quota_exhaustion.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/join/flake8 |72.0%| [TS] {RESULT} ydb/tests/olap/data_quotas/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/validator/ut/validator_checks/unittest >> Checks::OpaqueMaps [GOOD] |72.0%| [TS] {RESULT} ydb/library/yaml_config/validator/ut/validator_checks/unittest |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/flake8 >> test_serverless.py::flake8 [GOOD] >> TErasurePerfTest::Split |72.1%| [TS] {RESULT} ydb/tests/functional/serverless/flake8 |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/query_cache/flake8 >> test_query_cache.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/flake8 >> utils.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/result_convert/flake8 >> gen-report.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/tests/functional/cms/flake8 |72.1%| [TS] {RESULT} ydb/library/benchmarks/runner/result_convert/flake8 |72.1%| [TS] {RESULT} ydb/tests/functional/query_cache/flake8 |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/flake8 >> test_delete_by_explicit_row_id.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_quoting.py::flake8 [GOOD] >> TErasurePerfTest::Split [GOOD] >> TErasurePerfTest::Restore |72.1%| [TS] {RESULT} ydb/tests/olap/delete/flake8 |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/validator/ut/validator/unittest >> Validator::MultitypeNodeValidation [GOOD] |72.1%| [TS] {RESULT} ydb/library/yaml_config/validator/ut/validator/unittest >> __main__.py::flake8 [GOOD] >> test_vector_index.py::flake8 [GOOD] >> test_vector_index_negative.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part1/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/flake8 >> test_quoting.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part1/py2_flake8 |72.1%| [TS] {RESULT} ydb/tests/functional/sqs/with_quotas/flake8 >> test_sql.py::flake8 [GOOD] >> test_update_script_tables.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stability/tool/flake8 >> __main__.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/tests/stability/tool/flake8 >> common.cpp::clang_format [GOOD] >> common.h::clang_format [GOOD] >> OperationLog::Size1000 [GOOD] >> OperationLog::ConcurrentWrites >> __main__.py::flake8 [GOOD] |72.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_races.cpp |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/canonical/flake8 >> test_sql.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/tests/functional/canonical/flake8 |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/vector_index/medium/flake8 >> test_vector_index_negative.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/tests/datashard/vector_index/medium/flake8 |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/flake8 >> test_update_script_tables.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/tests/functional/script_execution/flake8 |72.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_races.cpp >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |72.1%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/config.pb.cc |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/common/clang_format >> common.h::clang_format [GOOD] |72.1%| [TS] {RESULT} ydb/core/kqp/ut/federated_query/common/clang_format >> __main__.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/statistics_workload/flake8 >> __main__.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/tests/stress/statistics_workload/flake8 |72.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_actors_ut.cpp >> conftest.py::flake8 [GOOD] >> test_insert_restarts.py::flake8 [GOOD] >> runner.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/olap_workload/tests/flake8 >> test_workload.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/oltp_workload/tests/flake8 >> test_workload.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/tests/stress/olap_workload/tests/flake8 |72.1%| [TS] {RESULT} ydb/tests/stress/oltp_workload/tests/flake8 |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/ydb/flake8 >> test.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/ydb/flake8 |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part10/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test_clean.py::flake8 [GOOD] >> test_clickbench.py::flake8 [GOOD] >> test_diff_processing.py::flake8 [GOOD] >> test_external.py::flake8 [GOOD] >> test_import_csv.py::flake8 [GOOD] >> test_tpch.py::flake8 [GOOD] >> test_upload.py::flake8 [GOOD] >> test_workload_oltp.py::flake8 [GOOD] >> test_workload_simple_queue.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part10/py2_flake8 |72.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_actors_ut.cpp |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/driver/flake8 >> __main__.py::flake8 [GOOD] >> conftest.py::black [GOOD] >> test_clickhouse.py::black [GOOD] >> test_greenplum.py::black [GOOD] >> test_join.py::black [GOOD] >> test_mysql.py::black [GOOD] >> test_postgresql.py::black [GOOD] >> test_ydb.py::black [GOOD] >> test.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_auth_system_views.py::flake8 [GOOD] >> test_create_users.py::flake8 [GOOD] >> test_create_users_strict_acl_checks.py::flake8 [GOOD] >> test_db_counters.py::flake8 [GOOD] >> test_dynamic_tenants.py::flake8 [GOOD] >> test_publish_into_schemeboard_with_common_ssring.py::flake8 [GOOD] >> test_storage_config.py::flake8 [GOOD] >> test_system_views.py::flake8 [GOOD] >> test_tenants.py::flake8 [GOOD] >> test_user_administration.py::flake8 [GOOD] >> test_users_groups_with_acl.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/tools/nemesis/driver/flake8 |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/runner/flake8 >> runner.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/library/benchmarks/runner/runner/flake8 |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/restarts/flake8 >> test_insert_restarts.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/fq/restarts/flake8 >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/flake8 >> test_workload_simple_queue.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/functional/tpc/medium/flake8 |72.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/analytics/black >> test_ydb.py::black [GOOD] >> helpers.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_query.py::flake8 [GOOD] >> test_s3.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/viewer/tests/flake8 >> test.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/fq/generic/analytics/black |72.2%| [TS] {RESULT} ydb/core/viewer/tests/flake8 |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/tenants/flake8 >> test_users_groups_with_acl.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/functional/tenants/flake8 |72.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part11/flake8 >> test.py::flake8 [GOOD] >> test_split_merge.py::flake8 [GOOD] >> OperationLog::ConcurrentWrites [GOOD] >> test.py::py2_flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part11/flake8 |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part17/flake8 >> test.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part17/flake8 >> test_alter_ops.py::flake8 [GOOD] >> test_copy_ops.py::flake8 [GOOD] >> test_scheme_shard_operations.py::flake8 [GOOD] |72.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/config.pb.cc >> test_batch_operations.py::flake8 [GOOD] >> test_compatibility.py::flake8 [GOOD] >> test_data_type.py::flake8 [GOOD] >> test_example.py::flake8 [GOOD] >> test_export_s3.py::flake8 [GOOD] >> test_followers.py::flake8 [GOOD] >> test_rolling.py::flake8 [GOOD] >> test_statistics.py::flake8 [GOOD] >> test_stress.py::flake8 [GOOD] >> test_vector_index.py::flake8 [GOOD] >> udf/test_datetime2.py::flake8 [GOOD] >> udf/test_digest.py::flake8 [GOOD] >> udf/test_digest_regression.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> StaticConfigExamples::SingleNodeWithFile [GOOD] >> StaticConfigExamples::BLOCK42 [GOOD] >> StaticConfigExamples::MIRROR_3_DC_NODES [GOOD] >> StaticConfigExamples::MIRROR_3_DC_NODES_IN_MEMORY [GOOD] >> StaticConfigExamples::MIRROR_3_DC_9_NODES [GOOD] >> StaticConfigExamples::SINGLE_NODE_IN_MEMORY [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/flake8 >> test_split_merge.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/sql/lib/flake8 >> test_s3.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/datashard/split_merge/flake8 |72.2%| [TS] {RESULT} ydb/tests/sql/lib/flake8 >> test.py::flake8 [GOOD] |72.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/libydb-core-protos.a >> __main__.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] |72.2%| [AR] {RESULT} $(B)/ydb/core/protos/libydb-core-protos.a >> s3_helpers.py::flake8 [GOOD] >> test_bindings_0.py::flake8 [GOOD] >> test_bindings_1.py::flake8 [GOOD] >> test_compressions.py::flake8 [GOOD] >> test_early_finish.py::flake8 [GOOD] >> test_explicit_partitioning_0.py::flake8 [GOOD] >> test_explicit_partitioning_1.py::flake8 [GOOD] >> test_format_setting.py::flake8 [GOOD] >> test_formats.py::flake8 [GOOD] >> test_inflight.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_public_metrics.py::flake8 [GOOD] >> test_push_down.py::flake8 [GOOD] >> test_s3_0.py::flake8 [GOOD] >> test_s3_1.py::flake8 [GOOD] >> test_size_limit.py::flake8 [GOOD] |72.2%| [CC] {tool} $(B)/ydb/core/protos/config.pb.cc |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part6/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/flake8 >> test_scheme_shard_operations.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part1/flake8 >> test.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/compatibility/flake8 >> udf/test_digest_regression.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/flake8 >> test.py::flake8 [GOOD] >> test_statistics.py::flake8 [GOOD] >> test_streaming_join.py::flake8 [GOOD] >> test_test_connection.py::flake8 [GOOD] >> test_mixed.py::flake8 [GOOD] >> test_validation.py::flake8 [GOOD] >> test_ydb_over_fq.py::flake8 [GOOD] >> test_yq_v2.py::flake8 [GOOD] >> test_cp_ic.py::flake8 [GOOD] >> test_dispatch.py::flake8 [GOOD] >> test_retry.py::flake8 [GOOD] >> test_retry_high_rate.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/debug_tools/ut/unittest >> OperationLog::ConcurrentWrites [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/static_validator/ut/example_configs/unittest >> StaticConfigExamples::SINGLE_NODE_IN_MEMORY [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part12/flake8 >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/s3_backups/flake8 >> __main__.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/s3/flake8 >> test_yq_v2.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/mixedpy/flake8 >> test_mixed.py::flake8 [GOOD] |72.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/flake8 >> test_retry_high_rate.py::flake8 [GOOD] |72.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp >> test_generator.py::flake8 [GOOD] >> test_init.py::flake8 [GOOD] |72.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp |72.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp >> __main__.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part3/flake8 >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/flake8 >> test_init.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> main.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tools/cfg/bin/flake8 >> __main__.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part8/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part16/flake8 >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/serializable/flake8 >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_config_migration.py::flake8 [GOOD] >> test_config_with_metadata.py::flake8 [GOOD] >> test_configuration_version.py::flake8 [GOOD] >> test_distconf.py::flake8 [GOOD] >> test_generate_dynamic_config.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part2/flake8 >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/apps/dstool/flake8 >> main.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> helpers.py::flake8 [GOOD] >> test_ctas.py::flake8 [GOOD] >> test_yt_reading.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part3/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/jaeger_tracing/ut/ydb-core-jaeger_tracing-ut >> test.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_alter_compression.py::flake8 [GOOD] >> test_alter_tiering.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_read_update_write_load.py::flake8 [GOOD] >> test_scheme_load.py::flake8 [GOOD] >> test_simple.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/cdc/tests/flake8 >> test_workload.py::flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part6/py2_flake8 |72.3%| [TS] {RESULT} ydb/tests/functional/scheme_shard/flake8 |72.3%| [TS] {RESULT} ydb/library/yaml_config/static_validator/ut/example_configs/unittest |72.3%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part3/flake8 >> test_example.py::flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part16/flake8 |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/config/flake8 >> test_generate_dynamic_config.py::flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part2/flake8 |72.3%| [TS] {RESULT} ydb/apps/dstool/flake8 |72.3%| [TS] {RESULT} ydb/tests/functional/serializable/flake8 |72.3%| [TS] {RESULT} ydb/tests/functional/benchmarks_init/flake8 |72.3%| [TS] {RESULT} ydb/tools/cfg/bin/flake8 |72.3%| [TS] {RESULT} ydb/tests/functional/config/flake8 |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part14/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/mysql/flake8 >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_import/flake8 >> test_yt_reading.py::flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part8/py2_flake8 |72.3%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part12/flake8 |72.3%| [TS] {RESULT} ydb/tests/stress/mixedpy/flake8 |72.3%| [TS] {RESULT} ydb/tests/fq/multi_plane/flake8 |72.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part14/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test_copy_table.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_ydb_backup.py::flake8 [GOOD] >> test_ydb_flame_graph.py::flake8 [GOOD] >> test_ydb_impex.py::flake8 [GOOD] >> test_ydb_recursive_remove.py::flake8 [GOOD] >> test_ydb_scheme.py::flake8 [GOOD] >> test_ydb_scripting.py::flake8 [GOOD] >> test_ydb_sql.py::flake8 [GOOD] >> test_ydb_table.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/mysql/flake8 |72.4%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_import/flake8 |72.4%| [TS] {RESULT} ydb/tests/stress/s3_backups/flake8 |72.4%| [TS] {RESULT} ydb/tests/fq/s3/flake8 |72.4%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part1/flake8 |72.4%| [TS] {RESULT} ydb/tests/compatibility/flake8 |72.4%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/flake8 |72.4%| [TS] {RESULT} ydb/core/debug_tools/ut/unittest |72.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part3/py2_flake8 |72.4%| [LD] {RESULT} $(B)/ydb/core/jaeger_tracing/ut/ydb-core-jaeger_tracing-ut |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part7/flake8 >> test.py::flake8 [GOOD] |72.4%| [TS] {RESULT} ydb/tests/stress/cdc/tests/flake8 |72.4%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part7/flake8 |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/flake8 >> test_simple.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_select.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/log/tests/flake8 >> test_workload.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/flake8 >> test.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/example/flake8 >> test_example.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part6/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/flake8 >> test_copy_table.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/ydb_cli/flake8 >> test_ydb_table.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/ydb_serializable/replay/flake8 >> __main__.py::flake8 [GOOD] >> test_parametrized_queries.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/solomon/flake8 >> test.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/flake8 >> test_select.py::flake8 [GOOD] >> allure_utils.py::flake8 [GOOD] >> remote_execution.py::flake8 [GOOD] >> results_processor.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> ydb_cli.py::flake8 [GOOD] >> ydb_cluster.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part5/flake8 >> test.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/flake8 >> test_parametrized_queries.py::flake8 [GOOD] >> ThrottlerControlTests::MultiThreaded5Threads150Ticks500Init15Step >> SamplingControlTests::EdgeCaseUpper [GOOD] >> ThrottlerControlTests::Overflow_1 [GOOD] >> ThrottlerControlTests::Simple [GOOD] >> ThrottlerControlTests::Overflow_2 [GOOD] >> SamplingControlTests::Simple [GOOD] >> ThrottlerControlTests::MultiThreaded2Threads200Ticks30Init7Step >> SamplingControlTests::EdgeCaseLower [GOOD] >> ThrottlerControlTests::MultiThreaded10Threads100Ticks1000Init22Step |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/lib/flake8 >> ydb_cluster.py::flake8 [GOOD] |72.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp >> test_alloc_default.py::flake8 [GOOD] >> test_dc_local.py::flake8 [GOOD] >> test_result_limits.py::flake8 [GOOD] >> test_scheduling.py::flake8 [GOOD] >> ThrottlerControlTests::LongIdle [GOOD] |72.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp >> ThrottlerControlTests::MultiThreaded5Threads150Ticks500Init15Step [GOOD] >> ThrottlerControlTests::MultiThreaded10Threads100Ticks1000Init22Step [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/mem_alloc/flake8 >> test_scheduling.py::flake8 [GOOD] |72.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::LongIdle [GOOD] |72.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> SamplingControlTests::EdgeCaseUpper [GOOD] |72.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::Overflow_1 [GOOD] |72.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp |72.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::Overflow_2 [GOOD] |72.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::Simple [GOOD] |72.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> SamplingControlTests::EdgeCaseLower [GOOD] |72.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> SamplingControlTests::Simple [GOOD] |72.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp >> ThrottlerControlTests::MultiThreaded2Threads200Ticks30Init7Step [GOOD] |72.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::MultiThreaded10Threads100Ticks1000Init22Step [GOOD] |72.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::MultiThreaded5Threads150Ticks500Init15Step [GOOD] >> test_fifo_messaging.py::flake8 [GOOD] >> test_generic_messaging.py::flake8 [GOOD] >> test_polling.py::flake8 [GOOD] |72.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::MultiThreaded2Threads200Ticks30Init7Step [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/flake8 >> test_polling.py::flake8 [GOOD] |72.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/jaeger_tracing/ut/ydb-core-jaeger_tracing-ut |72.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/libydb-core-protos.a |72.5%| [TA] $(B)/ydb/core/jaeger_tracing/ut/test-results/unittest/{meta.json ... results_accumulator.log} |72.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |72.5%| [TS] {RESULT} ydb/tests/olap/scenario/flake8 |72.5%| [TS] {RESULT} ydb/tests/stress/log/tests/flake8 |72.5%| [TS] {RESULT} ydb/tests/functional/ydb_cli/flake8 |72.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |72.5%| [TS] {RESULT} ydb/tests/fq/mem_alloc/flake8 |72.5%| [LD] {RESULT} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |72.5%| [TS] {RESULT} ydb/tests/functional/sqs/messaging/flake8 |72.5%| [TA] {RESULT} $(B)/ydb/core/jaeger_tracing/ut/test-results/unittest/{meta.json ... results_accumulator.log} |72.5%| [TS] {RESULT} ydb/tests/tools/ydb_serializable/replay/flake8 |72.5%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part5/flake8 |72.5%| [TS] {RESULT} ydb/tests/olap/lib/flake8 |72.5%| [TS] {RESULT} ydb/tests/datashard/parametrized_queries/flake8 |72.5%| [TS] {RESULT} ydb/tests/fq/solomon/flake8 |72.5%| [TS] {RESULT} ydb/tests/datashard/select/flake8 |72.5%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/flake8 |72.5%| [TS] {RESULT} ydb/tests/example/flake8 |72.5%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part6/py2_flake8 |72.5%| [TS] {RESULT} ydb/tests/datashard/copy_table/flake8 |72.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |72.5%| [LD] {RESULT} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |72.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |72.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |72.5%| [LD] {RESULT} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |72.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |72.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |72.5%| [LD] {RESULT} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |72.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |72.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/sql/ydb-tests-sql |72.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |72.5%| [LD] {RESULT} $(B)/ydb/tests/sql/ydb-tests-sql |72.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/sql/ydb-tests-sql |72.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |72.5%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |72.6%| [LD] {RESULT} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |72.6%| [LD] {RESULT} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |72.6%| [LD] {RESULT} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |72.6%| [LD] {RESULT} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |72.6%| [LD] {RESULT} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom >> test_crud.py::flake8 [GOOD] >> test_discovery.py::flake8 [GOOD] >> test_execute_scheme.py::flake8 [GOOD] >> test_indexes.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_isolation.py::flake8 [GOOD] >> test_public_api.py::flake8 [GOOD] >> test_read_table.py::flake8 [GOOD] >> test_session_grace_shutdown.py::flake8 [GOOD] >> test_session_pool.py::flake8 [GOOD] |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |72.6%| [LD] {RESULT} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/api/flake8 >> test_session_pool.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_2_selects_limit.py::flake8 [GOOD] >> test_3_selects.py::flake8 [GOOD] >> test_bad_syntax.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_big_state.py::flake8 [GOOD] >> test_continue_mode.py::flake8 [GOOD] >> test_cpu_quota.py::flake8 [GOOD] >> test_delete_read_rules_after_abort_by_system.py::flake8 [GOOD] >> test_disposition.py::flake8 [GOOD] >> test_eval.py::flake8 [GOOD] >> test_invalid_consumer.py::flake8 [GOOD] >> test_kill_pq_bill.py::flake8 [GOOD] >> test_mem_alloc.py::flake8 [GOOD] >> test_metrics_cleanup.py::flake8 [GOOD] >> test_pq_read_write.py::flake8 [GOOD] >> test_public_metrics.py::flake8 [GOOD] >> test_read_rules_deletion.py::flake8 [GOOD] >> test_recovery.py::flake8 [GOOD] >> test_recovery_match_recognize.py::flake8 [GOOD] >> test_recovery_mz.py::flake8 [GOOD] >> test_restart_query.py::flake8 [GOOD] >> test_row_dispatcher.py::flake8 [GOOD] >> test_select_1.py::flake8 [GOOD] >> test_select_limit.py::flake8 [GOOD] >> test_select_limit_db_id.py::flake8 [GOOD] >> test_select_timings.py::flake8 [GOOD] >> test_stop.py::flake8 [GOOD] >> test_yds_bindings.py::flake8 [GOOD] >> test_yq_streaming.py::flake8 [GOOD] |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yds/flake8 >> test_yq_streaming.py::flake8 [GOOD] |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |72.6%| [TS] {RESULT} ydb/tests/functional/api/flake8 |72.6%| [LD] {RESULT} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |72.6%| [TS] {RESULT} ydb/tests/fq/yds/flake8 |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |72.6%| [LD] {RESULT} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |72.6%| [LD] {RESULT} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |72.6%| [LD] {RESULT} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |72.6%| [LD] {RESULT} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |72.7%| [LD] {RESULT} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |72.6%| [LD] {RESULT} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |72.7%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/oltp_workload |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |72.7%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/oltp_workload |72.7%| [LD] {RESULT} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |72.7%| [LD] {RESULT} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |72.7%| [LD] {RESULT} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |72.7%| [LD] {RESULT} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |72.7%| [LD] {RESULT} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication >> TErasurePerfTest::Restore [GOOD] >> TErasureSmallBlobSizePerfTest::StringErasureMode [GOOD] >> TErasureSmallBlobSizePerfTest::ConvertToRopeMode [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/erasure/ut_perf/unittest >> TErasureSmallBlobSizePerfTest::ConvertToRopeMode [GOOD] |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |72.7%| [TS] {RESULT} ydb/core/erasure/ut_perf/unittest |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |72.7%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |72.7%| [LD] {RESULT} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |72.7%| [LD] {RESULT} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |72.7%| [LD] {RESULT} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |72.7%| [LD] {RESULT} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |72.7%| [LD] {RESULT} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |72.8%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |72.8%| [LD] {RESULT} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |72.8%| [LD] {RESULT} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |72.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_actions.cpp |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |72.8%| [LD] {RESULT} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_actions.cpp |72.8%| [LD] {RESULT} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |72.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |72.8%| [LD] {RESULT} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |72.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |72.8%| [LD] {RESULT} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |72.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/mvp/meta/meta.cpp |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |72.8%| [LD] {RESULT} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |72.8%| [LD] {RESULT} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |72.8%| [LD] {RESULT} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/benchmarks_init/ydb-tests-functional-benchmarks_init |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |72.8%| [LD] {RESULT} $(B)/ydb/tests/functional/benchmarks_init/ydb-tests-functional-benchmarks_init |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/benchmarks_init/ydb-tests-functional-benchmarks_init |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/s3_backups/s3_backups |72.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/meta/libydb-mvp-meta.a |72.8%| [LD] {RESULT} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |72.8%| [LD] {RESULT} $(B)/ydb/tests/stress/s3_backups/s3_backups |72.9%| [AR] {RESULT} $(B)/ydb/mvp/meta/libydb-mvp-meta.a |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/s3_backups/s3_backups |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |72.9%| [LD] {RESULT} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/mvp/meta/meta.cpp |72.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/mvp/meta/libydb-mvp-meta.a |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |72.9%| [LD] {RESULT} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |72.9%| [LD] {RESULT} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/example/ydb-tests-example |72.9%| [LD] {RESULT} $(B)/ydb/tests/example/ydb-tests-example |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/example/ydb-tests-example |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/postgres_integrations/library/ut/ydb-tests-postgres_integrations-library-ut |72.9%| [LD] {RESULT} $(B)/ydb/tests/postgres_integrations/library/ut/ydb-tests-postgres_integrations-library-ut |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/postgres_integrations/library/ut/ydb-tests-postgres_integrations-library-ut |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/ut/ydb-core-config-ut |72.9%| [LD] {RESULT} $(B)/ydb/core/config/ut/ydb-core-config-ut |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |72.9%| [LD] {RESULT} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |72.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_yard.cpp |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_yard.cpp |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |72.9%| [LD] {RESULT} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests >> test_init.py::TestTpchInit::test_s1_column >> test_init.py::TestClickbenchInit::test_s1_s3 >> test_generator.py::TestTpchGenerator::test_s1_parts >> test_generator.py::TestTpcdsGenerator::test_s1 >> test_init.py::TestTpcdsInit::test_s1_s3 >> test_init.py::TestTpchInit::test_s1_column_decimal >> test_init.py::TestTpcdsInit::test_s1_column_decimal_ydb >> test_generator.py::TestTpcdsGenerator::test_s1_state >> test_init.py::TestTpchInit::test_s1_s3 >> test_init.py::TestClickbenchInit::test_s1_column >> test_generator.py::TestTpcdsGenerator::test_s1_parts >> test_init.py::TestTpcdsInit::test_s1_column >> test_init.py::TestTpchInit::test_s1_column [GOOD] |72.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/ut/ydb-core-config-ut |72.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_ut.cpp |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |72.9%| [LD] {RESULT} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch >> test_init.py::TestClickbenchInit::test_s1_s3 [GOOD] >> test_init.py::TestTpcdsInit::test_s100_column >> test_init.py::TestTpcdsInit::test_s1_s3 [GOOD] >> test_init.py::TestTpchInit::test_s100_column >> ConfigProto::ForbidNewRequired >> test_init.py::TestTpcdsInit::test_s1_column_decimal_ydb [GOOD] >> test_init.py::TestTpcdsInit::test_s1_row >> test_init.py::TestTpchInit::test_s1_column_decimal [GOOD] >> test_init.py::TestTpchInit::test_s1_s3 [GOOD] |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |72.9%| [LD] {RESULT} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |73.0%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe >> test_init.py::TestTpchInit::test_s1_column_decimal_ydb >> test_init.py::TestClickbenchInit::test_s1_column [GOOD] >> test_init.py::TestClickbenchInit::test_s1_row |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |73.0%| [LD] {RESULT} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/metadata/ut/ydb-core-client-metadata-ut |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/metadata/ut/ydb-core-client-metadata-ut |73.0%| [LD] {RESULT} $(B)/ydb/core/client/metadata/ut/ydb-core-client-metadata-ut |73.0%| [LD] {RESULT} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |73.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_ut.cpp >> ConfigProto::ForbidNewRequired [GOOD] >> test_init.py::TestTpcdsInit::test_s1_column [GOOD] >> test_init.py::TestTpcdsInit::test_s1_column_decimal >> test_init.py::TestTpchInit::test_s100_column [GOOD] >> test_init.py::TestClickbenchInit::test_s1_row [GOOD] >> test_init.py::TestTpcdsInit::test_s1_row [GOOD] >> test_init.py::TestTpcdsInit::test_s100_column [GOOD] >> test_init.py::TestTpchInit::test_s1_column_decimal_ydb [GOOD] |73.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/ut/unittest >> ConfigProto::ForbidNewRequired [GOOD] |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |73.0%| [TS] {RESULT} ydb/core/config/ut/unittest |73.0%| [LD] {RESULT} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |73.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_column [GOOD] |73.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_column_decimal [GOOD] |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |73.0%| [LD] {RESULT} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |73.0%| [LD] {RESULT} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests >> test_init.py::TestTpcdsInit::test_s1_column_decimal [GOOD] |73.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_s3 [GOOD] >> TFunctionsMetadataTest::Serialization |73.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s100_column [GOOD] >> TFunctionsMetadataTest::Serialization [GOOD] |73.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestClickbenchInit::test_s1_row [GOOD] |73.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_column_decimal_ydb [GOOD] |73.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s1_row [GOOD] |73.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s1_column_decimal [GOOD] |73.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s100_column [GOOD] |73.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/client/metadata/ut/unittest >> TFunctionsMetadataTest::Serialization [GOOD] |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/json/ut/ydb-core-viewer-json-ut |73.0%| [TS] {RESULT} ydb/core/client/metadata/ut/unittest |73.1%| [LD] {RESULT} $(B)/ydb/core/viewer/json/ut/ydb-core-viewer-json-ut |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |73.0%| [LD] {RESULT} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |73.1%| [LD] {RESULT} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |73.1%| [LD] {RESULT} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |73.1%| [LD] {RESULT} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |73.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp >> integrations_test.py::test_read_jtest_results[f/failed1] [GOOD] >> integrations_test.py::test_read_jtest_results[f/failed2] [GOOD] >> integrations_test.py::test_read_jtest_results[o/OK] [GOOD] >> integrations_test.py::test_read_jtest_results[f/error1] [GOOD] >> integrations_test.py::test_read_jtest_results[s/skipped1] [GOOD] >> integrations_test.py::test_read_jtest_results[s/skipped2] [GOOD] >> integrations_test.py::test_read_jtest_with_one_result [GOOD] >> Json::BasicRendering >> Json::BasicRendering [GOOD] |73.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp |73.1%| [AR] {tool} $(B)/ydb/core/protos/libydb-core-protos.a |73.1%| [AR] {RESULT} $(B)/ydb/core/protos/libydb-core-protos.a ------- [TS] {asan, default-linux-x86_64, release} ydb/tests/postgres_integrations/library/ut/py3test >> integrations_test.py::test_read_jtest_with_one_result [GOOD] Test command err: /home/runner/.ya/build/build_root/2btm/0017cf/ydb/tests/postgres_integrations/library/ut/test-results/py3test/ydb/tests/postgres_integrations/library/pytest_integration.py:26: PytestCollectionWarning: cannot collect test class 'TestCase' because it has a __init__ constructor (from: integrations_test.py) /home/runner/.ya/build/build_root/2btm/0017cf/ydb/tests/postgres_integrations/library/ut/test-results/py3test/ydb/tests/postgres_integrations/library/pytest_integration.py:20: PytestCollectionWarning: cannot collect test class 'TestState' because it has a __init__ constructor (from: integrations_test.py) |73.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/viewer/json/ut/unittest >> Json::BasicRendering [GOOD] |73.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests >> OldFormat::SameVersion [GOOD] >> OldFormat::DefaultRules [GOOD] >> OldFormat::PrevYear [GOOD] >> OldFormat::Trunk [GOOD] >> OldFormat::UnexpectedTrunk |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |73.1%| [TS] {RESULT} ydb/tests/postgres_integrations/library/ut/py3test |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |73.1%| [LD] {RESULT} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |73.1%| [LD] {RESULT} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |73.1%| [TS] {RESULT} ydb/core/viewer/json/ut/unittest >> OldFormat::UnexpectedTrunk [GOOD] >> OldFormat::TooOld [GOOD] >> OldFormat::OldNbs [GOOD] >> VersionParser::Basic [GOOD] >> YdbVersion::DefaultSameVersion [GOOD] >> YdbVersion::DefaultPrevMajor [GOOD] >> YdbVersion::DefaultNextMajor [GOOD] >> YdbVersion::DefaultHotfix [GOOD] >> YdbVersion::DefaultCompatible [GOOD] >> YdbVersion::DefaultNextYear [GOOD] >> YdbVersion::DefaultPrevYear [GOOD] >> YdbVersion::DefaultNewMajor [GOOD] >> YdbVersion::DefaultOldMajor [GOOD] >> YdbVersion::DefaultDifferentBuild [GOOD] >> YdbVersion::DefaultDifferentBuildIncompatible [GOOD] >> YdbVersion::LimitOld [GOOD] >> YdbVersion::LimitNew [GOOD] >> YdbVersion::CurrentCanLoadFrom [GOOD] >> YdbVersion::CurrentCanLoadFromAllOlder [GOOD] >> YdbVersion::CurrentCanLoadFromIncompatible [GOOD] >> YdbVersion::CurrentStoresReadableBy [GOOD] >> YdbVersion::StoredReadableBy [GOOD] >> YdbVersion::StoredReadableByIncompatible [GOOD] >> YdbVersion::StoredWithRules [GOOD] >> YdbVersion::StoredWithRulesIncompatible [GOOD] >> YdbVersion::OldNbsStored [GOOD] >> YdbVersion::OldNbsIncompatibleStored [GOOD] >> YdbVersion::NewNbsCurrent [GOOD] >> YdbVersion::NewNbsIncompatibleCurrent [GOOD] >> YdbVersion::OneAcceptedVersion [GOOD] >> YdbVersion::ForbiddenMinor [GOOD] >> YdbVersion::DefaultRulesWithExtraForbidden [GOOD] >> YdbVersion::ExtraAndForbidden [GOOD] >> YdbVersion::SomeRulesAndOtherForbidden [GOOD] >> YdbVersion::Component [GOOD] >> YdbVersion::OtherComponent [GOOD] >> YdbVersion::YDBAndNbs [GOOD] >> YdbVersion::DifferentYdbVersionsWithNBSRules [GOOD] >> YdbVersion::WithPatchAndWithoutPatch [GOOD] >> YdbVersion::AcceptSpecificHotfixWithoutPatch [GOOD] >> YdbVersion::TrunkYDBAndNbs [GOOD] >> YdbVersion::TrunkAndStable [GOOD] >> YdbVersion::CompatibleWithSelf [GOOD] >> YdbVersion::PrintCurrentVersionProto [GOOD] |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |73.1%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std ------- [TM] {asan, default-linux-x86_64, release} ydb/core/driver_lib/version/ut/unittest >> YdbVersion::PrintCurrentVersionProto [GOOD] Test command err: Application: "ydb" |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |73.1%| [TM] {RESULT} ydb/core/driver_lib/version/ut/unittest |73.1%| [LD] {RESULT} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api >> test_generator.py::TestTpcdsGenerator::test_s1_state_and_parts |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |73.1%| [LD] {RESULT} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/tools/lib/cmds/ut/ydb-public-tools-lib-cmds-ut |73.2%| [LD] {RESULT} $(B)/ydb/public/tools/lib/cmds/ut/ydb-public-tools-lib-cmds-ut |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/tools/lib/cmds/ut/ydb-public-tools-lib-cmds-ut |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/viewer/json/ut/ydb-core-viewer-json-ut |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |73.2%| [LD] {RESULT} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import >> test_init.py::TestTpchInit::test_s1_row |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut >> test_generator.py::TestTpchGenerator::test_s1_state_and_parts >> test_generator.py::TestTpchGenerator::test_s1 >> test_init.py::TestTpchInit::test_s1_row [GOOD] >> test_generator.py::TestTpchGenerator::test_s1_state |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |73.2%| [LD] {RESULT} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |73.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |73.2%| [LD] {RESULT} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |73.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_row [GOOD] |73.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |73.2%| [LD] {RESULT} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |73.2%| [LD] {RESULT} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |73.2%| [LD] {RESULT} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/tstool/tstool |73.2%| [LD] {RESULT} $(B)/ydb/tools/tstool/tstool |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/tstool/tstool |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |73.2%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |73.2%| [LD] {RESULT} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tests/tpch/tpch |73.2%| [LD] {RESULT} $(B)/ydb/core/kqp/tests/tpch/tpch |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/tests/tpch/tpch |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |73.2%| [LD] {RESULT} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |73.3%| [LD] {RESULT} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |73.3%| [LD] {tool} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |73.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |73.3%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/generated/dispatch_op.h |73.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/generated/dispatch_op.h |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |73.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |73.3%| [LD] {RESULT} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |73.3%| [LD] {RESULT} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/dstool/ydb-dstool |73.3%| [LD] {RESULT} $(B)/ydb/apps/dstool/ydb-dstool |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/dstool/ydb-dstool |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |73.3%| [LD] {RESULT} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/ydbd_slice/bin/ydbd_slice |73.3%| [LD] {RESULT} $(B)/ydb/tools/ydbd_slice/bin/ydbd_slice |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/ydbd_slice/bin/ydbd_slice >> test.py::test_kikimr_config_generator_generic_connector_config [GOOD] |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |73.3%| [LD] {RESULT} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |73.3%| [TS] {asan, default-linux-x86_64, release} ydb/public/tools/lib/cmds/ut/py3test >> test.py::test_kikimr_config_generator_generic_connector_config [GOOD] |73.3%| [LD] {tool} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |73.3%| [TS] {RESULT} ydb/public/tools/lib/cmds/ut/py3test |73.3%| [LD] {RESULT} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |73.3%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/runtime_feature_flags.h |73.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.h |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |73.3%| [LD] {RESULT} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |73.3%| [LD] {RESULT} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |73.4%| [LD] {RESULT} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large |73.4%| [LD] {RESULT} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |73.4%| [LD] {RESULT} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |73.4%| [LD] {RESULT} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |73.4%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |73.4%| [LD] {RESULT} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/ydb-tests-olap |73.4%| [LD] {RESULT} $(B)/ydb/tests/olap/ydb-tests-olap |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/ydb-tests-olap |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/ydb_serializable/replay/replay |73.4%| [LD] {RESULT} $(B)/ydb/tests/tools/ydb_serializable/replay/replay |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/ydb_serializable/replay/replay |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/tsserver/tsserver |73.4%| [LD] {RESULT} $(B)/ydb/tools/tsserver/tsserver |73.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tools/tsserver/tsserver |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |73.4%| [LD] {RESULT} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |73.4%| [LD] {RESULT} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/tools/local_ydb/local_ydb |73.4%| [LD] {RESULT} $(B)/ydb/public/tools/local_ydb/local_ydb |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/tools/local_ydb/local_ydb |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/cfg/bin/ydb_configure |73.4%| [LD] {RESULT} $(B)/ydb/tools/cfg/bin/ydb_configure |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/cfg/bin/ydb_configure |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |73.5%| [LD] {RESULT} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/meta/bin/mvp_meta |73.5%| [LD] {RESULT} $(B)/ydb/mvp/meta/bin/mvp_meta |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/meta/bin/mvp_meta |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/nemesis/driver/nemesis |73.5%| [LD] {RESULT} $(B)/ydb/tests/tools/nemesis/driver/nemesis |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/nemesis/driver/nemesis |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |73.5%| [LD] {RESULT} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |73.5%| [LD] {RESULT} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |73.5%| [LD] {RESULT} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |73.5%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |73.5%| [LD] {RESULT} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |73.5%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/ydb/ydb-tests-stability-ydb |73.5%| [LD] {RESULT} $(B)/ydb/tests/stability/ydb/ydb-tests-stability-ydb |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/ydb/ydb-tests-stability-ydb |73.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/service_initializer.cpp |73.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/service_initializer.cpp |73.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/converter.cpp |73.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/converter.cpp |73.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/counters.cpp |73.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/counters.cpp |73.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |73.5%| [AR] {RESULT} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |73.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |73.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/session.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/session.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_stat_aggr.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_stat_aggr.cpp >> TErasureTypeTest::TestBlock42PartialRestore0 [GOOD] |73.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42PartialRestore0 [GOOD] |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor.cpp >> TErasureTypeTest::TestBlock42PartialRestore3 [GOOD] >> TErasureTypeTest::TestBlock42PartialRestore1 [GOOD] >> TErasureTypeTest::TestBlock42PartialRestore2 [GOOD] |73.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42PartialRestore3 [GOOD] |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/schema/schema.cpp |73.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42PartialRestore2 [GOOD] |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/schema/schema.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/random.cpp |73.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42PartialRestore1 [GOOD] |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/random.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/upload_rows_common_impl.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/upload_rows_common_impl.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/http/http.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/http/http.cpp |73.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |73.6%| [AR] {RESULT} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |73.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/sharding.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/sharding.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/read_table_impl.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/read_table_impl.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/context.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/context.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/util/failure_injection.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/failure_injection.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_node_registration.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_node_registration.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/unisched.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/unisched.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_generate.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_generate.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/update.cpp |73.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/update.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/secret.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/secret.cpp |73.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_pq.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_pq.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_pdisk.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_pdisk.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/initializer.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/initializer.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/tools/dqrun/lib/dqrun_lib.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/persqueue.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/tools/dqrun/lib/dqrun_lib.cpp |73.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/persqueue.cpp |73.7%| [AR] {RESULT} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |73.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_vdisk.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_vdisk.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_pipe_req.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_pipe_req.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/secret_behaviour.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/secret_behaviour.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/layout/layout.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/layout/layout.cpp |73.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |73.7%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_column.cpp |73.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_column.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_public.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_public.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/tasks_list.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/tasks_list.cpp |73.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |73.7%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |73.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_query.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_query.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/processor.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_tenant_state.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/processor.cpp |73.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_tenant_state.cpp |73.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/board_publish.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/board_publish.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_reader/actor.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/actor.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_persistent_storage.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hulldefs.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_persistent_storage.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hulldefs.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_reader/fetching_steps.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/fetching_steps.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/object.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/object.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_locks/locks/list.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/list.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/fetcher.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/fetcher.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/actor.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/actor.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/compacted.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/compacted.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/board_lookup.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/board_lookup.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/constructor.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/constructor.cpp |73.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |73.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |73.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_write_source_cursor.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_write_source_cursor.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/query_stats/query_metrics.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/query_stats/query_metrics.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp_tx.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp_tx.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp_ru_calc.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp_ru_calc.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/written.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/written.cpp >> TErasureTypeTest::TestAllSpeciesCrcWhole2of2 [GOOD] |73.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestAllSpeciesCrcWhole2of2 [GOOD] |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/queue_leader.cpp |73.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/queue_leader.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/initializer.cpp |73.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/view/behaviour.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/initializer.cpp |73.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |73.9%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/view/behaviour.cpp |73.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/blob_depot.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/blob_depot.cpp |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_to_source.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_to_source.cpp |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_reader/fetcher.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/fetcher.cpp |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/behaviour.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/behaviour.cpp |73.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |73.9%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |73.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_validate.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_validate.cpp |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/behaviour.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/behaviour.cpp |73.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |73.9%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |73.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/changes.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/changes.cpp >> TErasureTypeTest::TestAllSpecies1of2 [GOOD] >> TErasureTypeTest::TestAllSpecies2of2 |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_reader/fetching_executor.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/fetching_executor.cpp |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/group_sessions.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/group_sessions.cpp |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/portion_info.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/portion_info.cpp >> TErasureTypeTest::TestAllSpeciesCrcWhole1of2 [GOOD] |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_remove_blobs.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_remove_blobs.cpp |73.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestAllSpeciesCrcWhole1of2 [GOOD] |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/column_record.cpp |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/column_record.cpp |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/bridge/bridge.cpp |73.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/bridge/libblobstorage-dsproxy-bridge.a |73.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/bridge/libblobstorage-dsproxy-bridge.a |73.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/bridge/bridge.cpp |74.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/dsproxy/bridge/libblobstorage-dsproxy-bridge.a |73.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tenant.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tenant.cpp |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/initializer.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/initializer.cpp |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replbroker.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replbroker.cpp |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_type_ann.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_type_ann.cpp |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write_index.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write_index.cpp |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/constructor.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/constructor.cpp |74.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |74.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |74.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp |74.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |74.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |74.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/index_info.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/index_info.cpp |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__cleanup_subscriptions.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__cleanup_subscriptions.cpp >> test_generator.py::TestTpchGenerator::test_s1_parts [GOOD] |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/read_metadata.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/read_metadata.cpp >> ErasureBrandNew::Block42_restore [GOOD] >> ErasureBrandNew::Block42_restore_benchmark |74.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |74.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |74.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |74.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1_parts [GOOD] |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_locks/locks/snapshot.cpp |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_http.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/snapshot.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_http.cpp |74.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |74.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |74.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/manager.cpp |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_stathuge.cpp |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/pg_tables/pg_tables.cpp |74.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_stathuge.cpp |74.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |74.0%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |74.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |74.1%| [AR] {RESULT} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |74.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/manager.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/pg_tables/pg_tables.cpp |74.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_source_cursor.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_source_cursor.cpp >> ErasureBrandNew::Block42_restore_benchmark [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> ErasureBrandNew::Block42_restore_benchmark [GOOD] Test command err: totalSize# 502193677 period1# 1.526523s period2# 0.625640s MB/s1# 328.9787818 MB/s2# 802.6879308 factor# 2.439938303 |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/table/behaviour.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/table/behaviour.cpp |74.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |74.1%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |74.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/walle_list_tasks_adapter.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_list_tasks_adapter.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_mon.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_mon.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/manager.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/manager.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/walle_create_task_adapter.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_create_task_adapter.cpp >> test_generator.py::TestTpchGenerator::test_s1 [GOOD] |74.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1 [GOOD] |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/full_scan_sorted.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/full_scan_sorted.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/merge.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/merge.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstvec.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstvec.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/request/request_actor.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/request/request_actor.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/api_adapters.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/api_adapters.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_tenant_pool_config.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_tenant_pool_config.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_blobstorage_config.cpp |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_blobstorage_config.cpp |74.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/events.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/events.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/assimilation.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/assimilation.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_indexed.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_indexed.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idx.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idx.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/meta.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/meta.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_defs.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_defs.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_check_integrity_get.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_check_integrity_get.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_aggregate_data.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_aggregate_data.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/query_stats/query_stats.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/query_stats/query_stats.cpp |74.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |74.2%| [AR] {RESULT} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |74.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/downtime.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/downtime.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_stattablet.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_stattablet.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_store_walle_task.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_store_walle_task.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/read_data_protocol.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/read_data_protocol.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_barrier.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_barrier.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisproxy.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisproxy.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_reader/contexts.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/contexts.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/service/add_data.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/add_data.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_validate.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_validate.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_index.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_index.cpp >> test_generator.py::TestTpchGenerator::test_s1_state [GOOD] |74.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1_state [GOOD] |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/manager.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/manager.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/engine/minikql/flat_local_tx_factory.cpp |74.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |74.3%| [AR] {RESULT} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/engine/minikql/flat_local_tx_factory.cpp |74.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemon.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemon.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugerecovery.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugerecovery.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/common/schema.cpp |74.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |74.3%| [AR] {RESULT} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/common/schema.cpp |74.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/write_data.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/write_data.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/send_data_protocol.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/send_data_protocol.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/control/immediate_control_board_actor.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/control/immediate_control_board_actor.cpp |74.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/control/libydb-core-control.a |74.3%| [AR] {RESULT} $(B)/ydb/core/control/libydb-core-control.a |74.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/control/libydb-core-control.a |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_get_metrics.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_get_metrics.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/walle_api_handler.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_api_handler.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/manager.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/manager.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_reject_notification.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/logger.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_reject_notification.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/logger.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/node_tracker.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/node_tracker.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/erasure_checkers.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/erasure_checkers.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_init_schema.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_init_schema.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_store_permissions.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_store_permissions.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/events.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/events.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_accessor.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_accessor.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_data_ack_to_source.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_data_ack_to_source.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/constructor.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/constructor.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/not_sorted.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/not_sorted.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_finish_ack_to_source.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_finish_ack_to_source.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp |74.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |74.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |74.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |74.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |74.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |74.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/shard_impl.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/checker.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/shard_impl.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/checker.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/abstract.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/abstract.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_change_backend.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_change_backend.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_compute_state.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_compute_state.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_startup.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_startup.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/data_accessor.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/data_accessor.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_index.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/scan.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_index.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/scan.cpp |74.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |74.5%| [AR] {RESULT} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |74.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_version.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_version.cpp |74.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |74.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |74.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp >> test_generator.py::TestTpchGenerator::test_s1_state_and_parts [GOOD] |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_statement_rewrite.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_statement_rewrite.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/http.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_monitoring.cpp |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1_state_and_parts [GOOD] |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_monitoring.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/datastreams/datastreams_proxy.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/http.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/datastreams_proxy.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp |74.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |74.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |74.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/constructor.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/constructor.cpp |74.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |74.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |74.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/list_queues.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_queues.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_store_metrics.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_store_metrics.cpp |74.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |74.5%| [AR] {RESULT} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |74.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/constructor.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/constructor.cpp |74.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |74.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |74.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/alter.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/alter.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/limit_sorted.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/limit_sorted.cpp |74.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |74.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |74.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/checker_secret.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/checker_secret.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/sub_columns_fetching.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/sub_columns_fetching.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/list_queue_tags.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_queue_tags.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/delete_queue.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/delete_queue.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_gateway_proxy.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_gateway_proxy.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/metadata.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/metadata.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_read.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_read.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/versioned_index.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/versioned_index.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp |74.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |74.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |74.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/fifo_cleanup.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/fifo_cleanup.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/behaviour.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/behaviour.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/schema.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/schema.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/delete_user.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/delete_user.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_browse.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_browse.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/actor.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/actor.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_log_store.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_log_store.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_scheme.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_scheme.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_draft.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_draft.cpp |74.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |74.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |74.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_impl.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_impl.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/pq.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_service.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/kesus/grpc_service.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_service.cpp |74.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/kesus/libydb-services-kesus.a |74.7%| [AR] {RESULT} $(B)/ydb/services/kesus/libydb-services-kesus.a |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/kesus/grpc_service.cpp |74.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/kesus/libydb-services-kesus.a |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp |74.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |74.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |74.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.cpp |74.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |74.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |74.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a >> TErasureTypeTest::TestAllSpecies2of2 [GOOD] |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compile_service/kqp_compile_service.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compile_service/kqp_compile_service.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/actors/analyze_actor.cpp |74.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestAllSpecies2of2 [GOOD] |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/actors/analyze_actor.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/read_balancer_app.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_write.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer_app.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_write.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/events.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/events.cpp |74.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/libreader-simple_reader-duplicates.a |74.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/libreader-simple_reader-duplicates.a |74.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/libreader-simple_reader-duplicates.a |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_init.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_init.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/restore.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/restore.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/modification.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/modification.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/granule_view.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/granule_view.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp |74.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |74.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/manager.cpp |74.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/manager.cpp |74.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |74.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |74.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_explain_prepared.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_explain_prepared.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp |74.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |74.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |74.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a >> test_generator.py::TestTpcdsGenerator::test_s1_parts [GOOD] |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/get_queue_attributes.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/get_queue_attributes.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/modification/events/change_owning.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/behaviour.cpp |74.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |74.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |74.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |74.8%| [AR] {RESULT} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |74.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |74.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/modification/events/change_owning.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/behaviour.cpp |74.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1_parts [GOOD] |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__update_slot_status.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_slot_status.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_ping.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_ping.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/transaction.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/transaction.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/read_balancer.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/user_settings_reader.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/user_settings_reader.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/filler.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/filler.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ownerinfo.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ownerinfo.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mon_alloc/monitor.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mon_alloc/monitor.cpp |74.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |74.9%| [AR] {RESULT} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |74.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_dblogcutter.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common/result.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_dblogcutter.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/query_data/kqp_prepared_query.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/result.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/query_data/kqp_prepared_query.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/modification/tasks/modification.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/modification/tasks/modification.cpp |74.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |74.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |74.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/view/manager.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_maintenance.cpp |74.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |74.9%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_maintenance.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/time_cast/time_cast.cpp |74.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/view/manager.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/time_cast/time_cast.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/sourceid.cpp |75.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |75.0%| [AR] {RESULT} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |75.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/sourceid.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_planner.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_planner.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_sourcemanager.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_sourcemanager.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/proxy_service.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/proxy_service.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_partitioned_executer.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_partitioned_executer.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/list_all_topics_actor.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/list_all_topics_actor.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_read_table.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_read_table.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compile_service/kqp_compile_actor.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compile_service/kqp_compile_actor.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_context.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_context.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_essence.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_essence.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_transform.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_transform.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/accessor/secret_id.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/accessor/secret_id.cpp |75.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |75.0%| [AR] {RESULT} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |75.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllogcutternotify.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/pq_l2_cache.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllogcutternotify.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq_l2_cache.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_pool.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_pool.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/get_queue_url.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/get_queue_url.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/policy.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/policy.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter.cpp |75.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |75.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |75.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_monitoring.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_monitoring.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/dynamic_nameserver_mon.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/dynamic_nameserver_mon.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/read_balancer__balancing.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer__balancing.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_stream_execute_yql_script.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_stream_execute_yql_script.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/transfer.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/transfer.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/labels_maintainer.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/labels_maintainer.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hull.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hull.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_remove_task.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replproxy.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_task.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replproxy.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_compaction.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_compaction.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_scale_manager.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_scale_manager.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/proxy_actor.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/proxy_actor.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/account_read_quoter.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/walle_check_task_adapter.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__check_slot_status.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_check_task_adapter.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/account_read_quoter.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__check_slot_status.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_blob_encoder.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_blob_encoder.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_committer.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_committer.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/pq_impl_app.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq_impl_app.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_helpers.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_helpers.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_pdisk.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_pdisk.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/writer/partition_chooser_impl.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/writer/partition_chooser_impl.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_http_server.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_http_server.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_actor.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_actor.cpp |75.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |75.2%| [AR] {RESULT} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.cpp |75.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_remove_permissions.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_permissions.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/service.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/service.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_data_executer.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_data_executer.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_executer_impl.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_executer_impl.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/cfg.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/cfg.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/configured_tablet_bootstrapper.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/configured_tablet_bootstrapper.cpp >> test_generator.py::TestTpcdsGenerator::test_s1_state [GOOD] |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1_state [GOOD] |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_locks/manager/manager.cpp |75.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |75.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |75.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_locks/manager/manager.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/dynamic_nameserver.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/dynamic_nameserver.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_stream_execute_scan_query.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_stream_execute_scan_query.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/garbage_collector.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/garbage_collector.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfullhandler.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfullhandler.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_mon.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_mon.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pdisk.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pdisk.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/fetch_request_actor.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/fetch_request_actor.cpp >> test_generator.py::TestTpcdsGenerator::test_s1 [GOOD] |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_scheduler.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_scheduler.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__toggle_config_validator.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__toggle_config_validator.cpp |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1 [GOOD] |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_chain.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_chain.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_rewriter.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_rewriter.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_scheme_initroot.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_scheme_initroot.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__alter_tenant.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__alter_tenant.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_state.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_state.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/config_helpers.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/config_helpers.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_translate.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_translate.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__update_pool_status.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_pool_status.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge.cpp |75.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |75.3%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |75.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tiering/tier/object.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/tier/object.cpp |75.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |75.3%| [AR] {RESULT} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |75.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_root.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_process_notification.cpp |75.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_process_notification.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_root.cpp |75.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block.cpp |75.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compile_service/kqp_compile_computation_pattern_service.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/read_quoter.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compile_service/kqp_compile_computation_pattern_service.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_quoter.cpp |75.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |75.4%| [AR] {RESULT} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |75.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_statdb.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_statdb.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_cms.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_cms.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_computational_units.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_computational_units.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_overload_handler.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_overload_handler.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/factories.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/factories.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/discovery/discovery.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/discovery/discovery.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/request/request_actor_cb.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp |75.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/discovery/libydb-core-discovery.a |75.4%| [AR] {RESULT} $(B)/ydb/core/discovery/libydb-core-discovery.a |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/request/request_actor_cb.cpp |75.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/discovery/libydb-core-discovery.a |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp |75.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |75.4%| [AR] {RESULT} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |75.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_sst.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_sst.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/walle_remove_task_adapter.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_remove_task_adapter.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/snapshotreq.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/snapshotreq.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/balance/utils.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/balance/utils.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_response.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/rm_service/kqp_snapshot_manager.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_response.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_snapshot_manager.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_syncloghttp.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_syncloghttp.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/explain_data_query.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/explain_data_query.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/counters_manager.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/counters_manager.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_update_downtimes.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_update_downtimes.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullrepljob.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullrepljob.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp |75.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |75.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp |75.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/info_collector.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/balance/sender.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/info_collector.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/balance/sender.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_literal_executer.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_literal_executer.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/writer/writer.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/writer/writer.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__update_config.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_config.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__assign_free_slots.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__assign_free_slots.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisfinder.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisfinder.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/balance/handoff_map.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/balance/handoff_map.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_firstrun.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_firstrun.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/write_quoter.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/write_quoter.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_scale_request.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_scale_request.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/read_balancer__balancing_app.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer__balancing_app.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/constructor.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/constructor.cpp |75.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |75.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |75.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_datasnap.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_datasnap.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/main.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/main.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_execute_yql_script.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/table/table.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_execute_yql_script.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/table/table.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/create_user.cpp |75.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |75.6%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |75.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/create_user.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/columns/update.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/columns/update.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/hulldb_bulksst_add.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/hulldb_bulksst_add.cpp |75.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |75.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |75.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/pq_impl.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq_impl.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/mirrorer.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/mirrorer.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_compactfreshappendix.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_compactfreshappendix.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/modify_permissions.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/modify_permissions.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config.cpp |75.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |75.7%| [AR] {RESULT} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |75.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/dread_cache_service/caching_service.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/dread_cache_service/caching_service.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/control.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/control.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_compactionstate.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_compactionstate.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_request.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_request.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/common/ss_dialog.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/common/ss_dialog.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_mongroups.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_mongroups.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__register_node.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__register_node.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_static_group.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_static_group.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_kqp_base.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_kqp_base.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/user_info.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/user_info.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_mon_dbmainpage.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_mon_dbmainpage.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllog.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllog.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_stress.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_stress.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_log_cleanup.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_log_cleanup.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/delete_message.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/delete_message.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_storage_config.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_storage_config.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common/conveyor_task.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/conveyor_task.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/immediate_controls_configurator.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/immediate_controls_configurator.cpp |75.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |75.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |75.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/send_message.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_cache_append.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/send_message.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_cache_append.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_config.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_config.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_mon.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_mon.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_explain_data_query.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_explain_data_query.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_read_rows.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_read_rows.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_read_columns.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_read_columns.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/scan.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/scan.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_recoverylogwriter.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_recoverylogwriter.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfull.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfull.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__init_scheme.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__init_scheme.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/event_helpers.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/event_helpers.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/external_sources/object_storage.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/object_storage.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/engine_logs.cpp |75.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |75.8%| [AR] {RESULT} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/purge.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/engine_logs.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/purge.cpp |75.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_node.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_node.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_handshake.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_handshake.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_explain_yql_script.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_explain_yql_script.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_table.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_table.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/auth/group_members.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/auth/group_members.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/service/add_index.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/add_index.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_actors.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_actors.cpp |75.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |75.9%| [AR] {RESULT} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |75.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_logreplay.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_logreplay.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/auth/owners.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/auth/owners.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__set_config.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__set_config.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_hive_create_tablet.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_hive_create_tablet.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_test_shard_request.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_test_shard_request.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_range.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_range.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_cluster_discovery.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_cluster_discovery.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/session/destination.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/session/destination.cpp |75.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |75.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |75.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__init_scheme.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__init_scheme.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_configs_provider.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configs_provider.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_osiris.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_osiris.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp |76.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |76.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |76.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_scatter_gather.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_scatter_gather.cpp |76.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |76.0%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |76.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_propagator.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_propagator.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/cluster_tracker.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/cluster_tracker.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxyobtain.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxyobtain.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_readbatch.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_readbatch.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/grpc_proxy_status.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/grpc_proxy_status.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__load_state.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__load_state.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_status.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_status.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/local.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/local.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_db.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_db.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_cache.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_cache.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/subscriber.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/subscriber.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/partition_stats/top_partitions.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/partition_stats/top_partitions.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_scan_executer.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__extend_lease.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_scan_executer.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/execute_data_query.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__extend_lease.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/execute_data_query.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_console.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_console.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__update_node_location.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata_proxy.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_node_location.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata_proxy.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/indexation.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/indexation.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_forget_operation.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_forget_operation.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullactor.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullactor.cpp |76.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |76.1%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_public.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/queues_list_reader.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/queues_list_reader.cpp |76.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_public.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator__scheme.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator__scheme.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pipe.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pipe.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_unreadable.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_unreadable.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/manager/sessions.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/manager/sessions.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_login.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_login.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__load_state.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_bridge.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__load_state.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_bridge.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/common/autoscaling_ut_common.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/common/autoscaling_ut_common.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_node_enumeration.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_node_enumeration.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/blobs_manager.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/blobs_manager.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp |76.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/tag_queue.cpp |76.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/tag_queue.cpp |76.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_vdisk.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_vdisk.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/object.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/object.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/receive_message.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/receive_message.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_admin.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_admin.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_connectivity.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_connectivity.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/status.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/status.cpp |76.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |76.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_console.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_console.cpp |76.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/auth/groups.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/auth/groups.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/auth/users.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/auth/users.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_cluster_discovery/grpc_service.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_cluster_discovery/grpc_service.cpp |76.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |76.2%| [AR] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |76.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_import.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_import.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/rm_service/kqp_rm_service.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_rm_service.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_prepare_data_query.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_prepare_data_query.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/service.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/service.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/libydb-services-metadata.a |76.3%| [AR] {RESULT} $(B)/ydb/services/metadata/libydb-services-metadata.a |76.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/libydb-services-metadata.a |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_rollback_transaction.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_rollback_transaction.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/writer/metadata_initializers.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/writer/metadata_initializers.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idxsnap.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idxsnap.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/events/delete_blobs.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/events/delete_blobs.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |76.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |76.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_node_registration.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_node_registration.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_readbulksst.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_readbulksst.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/rm_service/kqp_resource_info_exchanger.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_resource_info_exchanger.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |76.3%| [AR] {RESULT} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |76.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/writer/source_id_encoding.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/writer/source_id_encoding.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/executor.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |76.3%| [AR] {RESULT} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/executor.cpp |76.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_external_table.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_external_table.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/metering.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/metering.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/set_queue_attributes.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/set_queue_attributes.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_begin_transaction.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_begin_transaction.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/lease_holder.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/lease_holder.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_selector.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_remove_request.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_request.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |76.3%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_selector.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_get_operation.cpp |76.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_get_operation.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_resource.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_resource.cpp |76.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/libydb-core-mind.a |76.4%| [AR] {RESULT} $(B)/ydb/core/mind/libydb-core-mind.a |76.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/libydb-core-mind.a |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_keyvalue.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_keyvalue.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_discovery.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_discovery.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/change_visibility.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/change_visibility.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/manager.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/manager.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_get_scale_recommendation.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_create_table.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_get_scale_recommendation.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/common/pq_ut_common.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_create_table.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/common/pq_ut_common.cpp |76.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |76.4%| [AR] {RESULT} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_s3_upload_rows.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_s3_upload_rows.cpp |76.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/auth/permissions.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/partition_stats/partition_stats.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/auth/permissions.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/partition_stats/partition_stats.cpp |76.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |76.4%| [AR] {RESULT} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_monitoring.cpp |76.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |76.4%| [AR] {RESULT} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |76.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_monitoring.cpp |76.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/controller/controller.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/controller/controller.cpp |76.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |76.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |76.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__restart_tablet.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__restart_tablet.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_list_operations.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_s3_downloads.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_list_operations.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_s3_downloads.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_remove_expired_notifications.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_expired_notifications.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/lib/auth/auth_helpers.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/lib/auth/auth_helpers.cpp |76.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |76.5%| [AR] {RESULT} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |76.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/queue_schema.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/queue_schema.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/list_users.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_users.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_table_options.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_table_options.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/export_common.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/export_common.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_defs.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_defs.cpp |76.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |76.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |76.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_and_send_out_rs_unit.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_and_send_out_rs_unit.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_huge.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_public.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_huge.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_public.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/blobstorage_hullcompdelete.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/blobstorage_hullcompdelete.cpp |76.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |76.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |76.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_snapshots.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_snapshots.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/make_snapshot_unit.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/make_snapshot_unit.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/local_pgwire/local_pgwire_connection.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/local_pgwire/local_pgwire_connection.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__tablet_owners_reply.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__tablet_owners_reply.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_locks_db.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_locks_db.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/schemereq.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/schemereq.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_split_src.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_split_src.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_repl_apply.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_repl_apply.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_load_rows.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_load_rows.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/retention.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/retention.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_actor.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_actor.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/restore_unit.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/restore_unit.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_config.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_config.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/recovery/hulldb_recovery.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/recovery/hulldb_recovery.cpp |76.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/volatile_tx.cpp |76.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |76.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/volatile_tx.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_data_tx_unit.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_data_tx_unit.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_cms.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_cms.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_init_scheme.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_init_scheme.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/query/rpc_fetch_script_results.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/query/rpc_fetch_script_results.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_pipeline.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_pipeline.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_schema_snapshots.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_schema_snapshots.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_distributed_erase_tx_unit.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_distributed_erase_tx_unit.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__generate_data_ut.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__generate_data_ut.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__status.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__status.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/update.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/update.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/update.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/update.cpp |76.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |76.7%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |76.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_client.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_kqp.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_client.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_log_and_send.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_log_and_send.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_loans.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_loans.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/protect_scheme_echoes_unit.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/protect_scheme_echoes_unit.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_cache.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/auth_multi_factory.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_cache.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/auth_multi_factory.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/s3_delete.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_s3_uploads.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_events.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/s3_delete.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_s3_uploads.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_server.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_events.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_server.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_block_and_get.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_block_and_get.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_common.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_common.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_actor.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_actor.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_scrub.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_scrub.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_debug.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_debug.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_console.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_console.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cluster_info.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cluster_info.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_fakeinitshard.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_fakeinitshard.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_repl_offsets.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_repl_offsets.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__kqp_scan.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__kqp_scan.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/manager/shared_blobs.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/manager/shared_blobs.cpp |76.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |76.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |76.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/query_data/kqp_predictor.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/query_data/kqp_predictor.cpp |76.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |76.8%| [AR] {RESULT} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_split_dst.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_split_dst.cpp |76.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_loggedrec.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_loggedrec.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_tablet_metrics.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_metrics.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_configs_subscriber.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configs_subscriber.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/net_classifier_updater.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/net_classifier_updater.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_overload.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_overload.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/configs_dispatcher.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/grpc_server.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_dispatcher.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/grpc_server.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/prepare_scheme_tx_in_rs_unit.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_scheme_tx_in_rs_unit.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/local_pgwire/pgwire_kqp_proxy.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/local_pgwire/pgwire_kqp_proxy.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_confirmed_subdomain.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_confirmed_subdomain.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/lib/actors/pq_schema_actor.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/lib/actors/pq_schema_actor.cpp |76.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |76.9%| [AR] {RESULT} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |76.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group_resolver.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group_resolver.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_kqp_compute.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/blob_manager_db.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp_compute.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/blob_manager_db.cpp |76.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |76.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |76.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_genconfig.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_genconfig.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/operation.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/operation.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogreader.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogreader.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxywrite.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxywrite.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_tx_request.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_tx_request.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/configs_cache.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_cache.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_outreadset.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_outreadset.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_and_send_write_out_rs_unit.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_and_send_write_out_rs_unit.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_drain_node.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_drain_node.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_types.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_dynamic.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_types.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_dynamic.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/export_s3_uploader.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_dynamic_config.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/export_s3_uploader.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_dynamic_config.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_snapshot_tx_unit.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_snapshot_tx_unit.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_bs.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_bs.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_actorsystem_perftest.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_disk.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_actorsystem_perftest.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_disk.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/feature_flags_configurator.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/feature_flags_configurator.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_binding.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_readactor.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/local_pgwire/local_pgwire.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_binding.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_readactor.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/local_pgwire/local_pgwire.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp |77.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |77.0%| [AR] {RESULT} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |77.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_kqp_data_tx_unit.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_kqp_data_tx_unit.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisrunner.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisrunner.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_resolve_node.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_resolve_node.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_tracker.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_tracker.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/wait_for_plan_unit.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/wait_for_plan_unit.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_shred.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_shred.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/create_queue.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/create_queue.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_bridge.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_bridge.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_write_unit.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_write_unit.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/move_table_unit.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/move_table_unit.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/volatile_tx_mon.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/volatile_tx_mon.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_tenant_done.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_tenant_done.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_distributed_erase_tx_unit.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_distributed_erase_tx_unit.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_scheme_tx_unit.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_scheme_tx_unit.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__alter_tenant.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__alter_tenant.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__delete_node.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__delete_node.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/table_exists.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/table_exists.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_tenant_failed.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_tenant_failed.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/collector.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/collector.cpp |77.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |77.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |77.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/http_req.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/http_req.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_cancel_operation.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_cancel_operation.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/prepare_write_tx_in_rs_unit.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_write_tx_in_rs_unit.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_list_objects_in_s3_export.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_list_objects_in_s3_export.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_sender_cdc_stream.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/remove_locks.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_cdc_stream.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/remove_locks.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_statics.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_statics.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/receive_snapshot_unit.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__unlock_tablet.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__unlock_tablet.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/receive_snapshot_unit.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_configs_manager.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/finish_propose_unit.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finish_propose_unit.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configs_manager.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/remove_lock_change_records.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/remove_lock_change_records.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_keep_alive.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_keep_alive.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/upload_stats.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/upload_stats.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/make_scan_snapshot_unit.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/make_scan_snapshot_unit.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/receive_snapshot_cleanup_unit.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/receive_snapshot_cleanup_unit.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_subdomain_path_id.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/purge_queue.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_subdomain_path_id.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/table_settings.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/purge_queue.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/table_settings.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/remove_schema_snapshots.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__response_tablet_seq.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/remove_schema_snapshots.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__response_tablet_seq.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_alter_table.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_alter_table.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/read_table_scan.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/read_table_scan.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/prepare_kqp_data_tx_in_rs_unit.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_kqp_data_tx_in_rs_unit.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/local_pgwire/local_pgwire_auth_actor.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/local_pgwire/local_pgwire_auth_actor.cpp |77.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |77.2%| [AR] {RESULT} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |77.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__cut_tablet_history.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__cut_tablet_history.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/count_queues.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/count_queues.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/prepare_data_tx_in_rs_unit.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_data_tx_in_rs_unit.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_commit_writes_tx_unit.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_commit_writes_tx_unit.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__seize_tablets.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__seize_tablets.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/read_table_scan_unit.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/read_table_scan_unit.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_commit_transaction.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_commit_transaction.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__release_tablets.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__release_tablets.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_exchange_split.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_exchange_split.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/wait_for_stream_clearance_unit.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/wait_for_stream_clearance_unit.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/common/timeout.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/common/timeout.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/read_op_unit.cpp |77.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |77.3%| [AR] {RESULT} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/finalize_plan_tx_unit.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/read_op_unit.cpp |77.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finalize_plan_tx_unit.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__reassign_groups.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__reassign_groups.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__resume_tablet.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__resume_tablet.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_tenants_manager.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/import_s3.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_tenants_manager.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/import_s3.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/object.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/object.cpp |77.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |77.3%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |77.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/auth_factory.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/auth_factory.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/finish_propose_write_unit.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finish_propose_write_unit.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/memory_state_migration.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/memory_state_migration.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/prepare_distributed_erase_tx_in_rs_unit.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_distributed_erase_tx_in_rs_unit.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/list_permissions.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_permissions.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_trans_queue.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_trans_queue.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/index_events_processor.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/index_events_processor.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_fq.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_fq.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/move_index_unit.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/move_index_unit.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/actors/test_runtime.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_object_storage.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_object_storage.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/actors/test_runtime.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/auth_factory.cpp |77.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |77.4%| [AR] {RESULT} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |77.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/auth_factory.cpp |77.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |77.4%| [AR] {RESULT} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |77.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/initiate_build_index_unit.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/initiate_build_index_unit.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_common_upload.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_common_upload.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/table_description.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/table_description.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/finalize_build_index_unit.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finalize_build_index_unit.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/query/rpc_execute_query.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/query/rpc_execute_query.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__configure_subdomain.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__configure_subdomain.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_dep_tracker.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_dep_tracker.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__readset.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__readset.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__plan_step.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__plan_step.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/list_directory.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/list_directory.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/follower_edge.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/follower_edge.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__register_node.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__register_node.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_worker.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_write_actor.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_worker.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_write_actor.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_data_tx_unit.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_data_tx_unit.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__reassign_groups_on_decommit.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__reassign_groups_on_decommit.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__snapshot_txs.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__snapshot_txs.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_kqp_scan_tx_unit.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_kqp_scan_tx_unit.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/plan_queue_unit.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/plan_queue_unit.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_write_unit.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_write_unit.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/key_validator.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/key_validator.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_copy_tables.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_copy_tables.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__compact_borrowed.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__compact_borrowed.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_schemeshard_stats.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_commit_writes_tx_unit.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_schemeshard_stats.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_commit_writes_tx_unit.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/load_tx_details_unit.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/load_tx_details_unit.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__stats.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__stats.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_change_receiving.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_change_receiving.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_kqp_data_tx_out_rs_unit.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_kqp_data_tx_out_rs_unit.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__progress_tx.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__progress_tx.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_change_sender_activation.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_get_shard_locations.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_change_sender_activation.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_get_shard_locations.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__sync_tablets.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__sync_tablets.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__request_tablet_seq.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__request_tablet_seq.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_dc_followers.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1_state_and_parts 2025-06-24 19:56:44,358 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 19:56:44,634 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 18599 4.1G 4.1G 4.1G ydb-tests-functional-benchmarks_init --basetemp /home/runner/.ya/build/build_root/2btm/002325/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doc Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/functional/benchmarks_init/test_generator.py", line 191, in test_s1_state_and_parts return self.get_cannonical(paths=paths, execs=execs) File "ydb/tests/functional/benchmarks_init/test_generator.py", line 107, in get_cannonical return self.canonical_result(self.scale_hash(paths), self.tmp_path('s1.hash')) File "ydb/tests/functional/benchmarks_init/test_generator.py", line 90, in scale_hash t.join() File "contrib/tools/python3/Lib/threading.py", line 1149, in join self._wait_for_tstate_lock() File "contrib/tools/python3/Lib/threading.py", line 1169, in _wait_for_tstate_lock if lock.acquire(block, timeout): File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Thread 0x00007fbfc85e0640 (most recent call first): File "ydb/tests/functional/benchmarks_init/test_generator.py", line 69 in calc_hashes File "ydb/tests/functional/benchmarks_init/test_generator.py", line 82 in _calc_hash File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007fbfd313f640 (most recent call first): File "ydb/tests/functional/benchmarks_init/test_generator.py", line 82 in _calc_hash File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Current thread 0x00007fbfd8ee8980 (most recent call first): File "contrib/tools/python3/Lib/posixpath.py", line 471 in _joinrealpath File "contrib/tools/python3/Lib/posixpath.py", line 427 in realpath File "contrib/tools/python3/Lib/inspect.py", line 1016 in getmodule File "contrib/tools/python3/Lib/inspect.py", line 1090 in findsource File "contrib/python/pytest/py3/_pytest/_code/source.py", line 121 in findsource File "contrib/python/pytest/py3/_pytest/_code/code.py", line 106 in fullsource File "contrib/python/pytest/py3/_pytest/_code/code.py", line 250 in getsource File "contrib/python/pytest/py3/_pytest/_code/code.py", line 833 in _getentrysource File "contrib/python/pytest/py3/_pytest/_code/code.py", line 931 in repr_traceback_entry File "contrib/python/pytest/py3/_pytest/_code/code.py", line 993 in repr_traceback File "contrib/python/pytest/py3/_pytest/_code/code.py", line 1063 in repr_excinfo File "contrib/python/pytest/py3/_pytest/_code/code.py", line 698 in getrepr File "contrib/python/pytest/py3/_pytest/terminal.py", line 893 in pytest_keyboard_interrupt File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/main.py", line 287 in wrap_session File "contrib/python/pytest/py3/_pytest/main.py", line 320 in pytest_cmdline_main File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175 in main File "library/python/pytest/main.py", line 101 in main Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...s', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/002325/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff/test_generator/chunk3/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/002325', '--source-root', '/home/runner/.ya/build/build_root/2btm/002325/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/002325/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff/test_generator/chunk3/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/benchmarks_init', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '3', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/functional/benchmarks_init', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_generator.py']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...s', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/002325/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff/test_generator/chunk3/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/002325', '--source-root', '/home/runner/.ya/build/build_root/2btm/002325/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/002325/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff/test_generator/chunk3/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/benchmarks_init', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '3', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/functional/benchmarks_init', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_generator.py']' stopped by 600 seconds timeout",), {}) 2025-06-24 19:57:15,715 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-06-24 19:57:15,715 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_dc_followers.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__conditional_erase_rows.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__conditional_erase_rows.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/storages_manager/manager.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/storages_manager/manager.cpp |77.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |77.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |77.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_collector_base.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector_base.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_scheme_tx_unit.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_scheme_tx_unit.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__progress_resend_rs.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__progress_resend_rs.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_execute_scheme_query.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_execute_scheme_query.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__block_storage_result.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__block_storage_result.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_tasks_runner.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_tasks_runner.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_import_data.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_import_data.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/create_table_unit.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_table_unit.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__s3_download_txs.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__s3_download_txs.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/object.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/object.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_log.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_log.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_read_unit.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_read_unit.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__object_storage_listing.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__object_storage_listing.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/testing.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/testing.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execution_unit.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execution_unit.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/storage_pool_info.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_pool_info.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_execute_data_query.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_direct_transaction.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_execute_data_query.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_direct_transaction.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/table_settings.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/table_settings.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__stop_tablet.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__stop_tablet.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__op_rows.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__op_rows.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/incr_restore_scan.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__process_pending_operations.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/incr_restore_scan.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__process_pending_operations.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/kqp_ic_gateway.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/kqp_ic_gateway.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__store_table_path.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_sequencer_actor.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__store_table_path.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/create_persistent_snapshot_unit.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_sequencer_actor.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_persistent_snapshot_unit.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_direct_erase.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_direct_erase.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__s3_upload_txs.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__s3_upload_txs.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_read_actor.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_read_actor.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/balancer.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/balancer.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__request_tablet_owners.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__request_tablet_owners.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_domain.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_domain.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__create_tablet.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/grpc_endpoint_publish_actor.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__create_tablet.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/grpc_endpoint_publish_actor.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__engine_host.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/create_volatile_snapshot_unit.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_volatile_snapshot_unit.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__engine_host.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_kh_snapshots.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_kh_snapshots.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__mon_reset_schema_version.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__mon_reset_schema_version.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_delivery_problem.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_delivery_problem.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__schema_changed.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__schema_changed.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_distributed_erase.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_distributed_erase.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__release_tablets_reply.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__release_tablets_reply.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_export.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_export.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_write_out_rs_unit.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_write_out_rs_unit.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__compaction.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__compaction.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/grpc_request_proxy_simple.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/grpc_request_proxy_simple.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__switch_drain.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__switch_drain.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/prefix_kmeans.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__get_state_tx.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__get_state_tx.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/prefix_kmeans.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/export_scan.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/export_scan.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/storage_balancer.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_balancer.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__store_scan_state.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__store_scan_state.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/create_cdc_stream_unit.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_cdc_stream_unit.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__propose_tx_base.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__propose_tx_base.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__adopt_tablet.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__adopt_tablet.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__cancel_tx_proposal.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cancel_tx_proposal.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/load_and_wait_in_rs_unit.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/load_and_wait_in_rs_unit.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__seize_tablets_reply.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__seize_tablets_reply.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/query/rpc_execute_script.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/query/rpc_execute_script.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_data_tx_unit.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_data_tx_unit.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/keep_alive.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/keep_alive.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_active_transaction.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/conflicts_cache.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/conflicts_cache.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_active_transaction.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_direct_upload.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_direct_upload.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__write.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__write.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/grpc_request_proxy.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/grpc_request_proxy.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_datashard_scan_response.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_sender.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_datashard_scan_response.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_write_unit.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_write_unit.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/abstract.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/abstract.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_view.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_view.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/complete_write_unit.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/complete_write_unit.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__cleanup_uncommitted.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cleanup_uncommitted.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__configure_scale_recommender.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__configure_scale_recommender.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/load_write_details_unit.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/load_write_details_unit.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_snapshot_tx_unit.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_snapshot_tx_unit.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_commit_writes_tx_unit.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_operation.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_commit_writes_tx_unit.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_operation.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/node_info.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/node_info.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/describe.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/describe.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_sender_incr_restore.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_incr_restore.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__cleanup_borrowed.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cleanup_borrowed.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/op_load.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/op_load.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__read_iterator.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__read_iterator.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__cleanup_tx.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cleanup_tx.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_quantum.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_quantum.cpp |77.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |77.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__delete_tablet.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__delete_tablet.cpp |77.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_user_db.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_user_db.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_sender_table_base.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_table_base.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/complete_data_tx_unit.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/complete_data_tx_unit.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__init_scheme.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__migrate_schemeshard.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__migrate_schemeshard.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__init_scheme.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/domain_info.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/kqp_workload_service.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/domain_info.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/kqp_workload_service.cpp |77.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |77.9%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |77.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_path.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_path.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_state_storage.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_state_storage.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__read_columns.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__read_columns.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__load_everything.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__load_everything.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/viewer_topic_data.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__disconnect_node.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/viewer_topic_data.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__disconnect_node.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/completed_operations_unit.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/completed_operations_unit.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/kqp_gateway.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/kqp_gateway.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_analyze.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_analyze.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/local_kmeans.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/local_kmeans.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/secondary_index.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/create_session.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/secondary_index.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/create_session.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/datareq.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/datareq.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/create_incremental_restore_src_unit.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_incremental_restore_src_unit.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_fq_internal.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_fq_internal.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/sample_k.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/sample_k.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/manager.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/manager.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_make_directory.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_make_directory.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/abstract/kqp_common.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/kqp_common.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_scheme_base.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_scheme_base.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_collector.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/drop_persistent_snapshot_unit.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_persistent_snapshot_unit.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_distributed_erase_tx_out_rs_unit.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_distributed_erase_tx_out_rs_unit.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__column_stats.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__column_stats.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__data_cleanup.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__cleanup_in_rs.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__data_cleanup.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cleanup_in_rs.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_sender_async_index.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_async_index.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_tablet_groups.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_groups.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/aggregator_impl.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/aggregator_impl.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/recompute_kmeans.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/recompute_kmeans.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_rate_limiter_api.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_rate_limiter_api.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_finish_trasersal.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_finish_trasersal.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/fill.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/fill.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/direct_tx_unit.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/direct_tx_unit.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_distributed_erase_tx_unit.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_distributed_erase_tx_unit.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_navigate.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_schedule_traversal.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_navigate.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_schedule_traversal.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/drain.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/op_init_schema.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/op_init_schema.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/drain.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/zero_level.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/zero_level.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/drop_index_notice_unit.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/login_page.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_index_notice_unit.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/login_page.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_collector_async_index.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector_async_index.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/cdc_stream_heartbeat.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/cdc_stream_heartbeat.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_and_wait_dependencies_unit.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_and_wait_dependencies_unit.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/agent.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/agent.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/cdc_stream_scan.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/cdc_stream_scan.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tablet_info.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tablet_info.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_output_stream.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_output_stream.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/common_level.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/common_level.cpp |78.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |78.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |78.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_scheme_tx_out_rs_unit.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_scheme_tx_out_rs_unit.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/ut_utils.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/ut_utils.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/alter_table_unit.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/alter_table_unit.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/kqp_metadata_loader.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/kqp_metadata_loader.cpp |78.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |78.2%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |78.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_change_sending.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_change_sending.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__read_step_subscriptions.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__read_step_subscriptions.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_init_schema.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_init_schema.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/backup_unit.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/backup_unit.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_write_operation.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_write_operation.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/s3.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__init.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/drop_cdc_stream_unit.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/s3.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_cdc_stream_unit.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__init.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_decommit.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/abstract.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/abstract.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_decommit.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__schema_upgrade.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__schema_upgrade.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_transport.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_transport.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_external_data_source.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_external_data_source.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/alter_cdc_stream_unit.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/tx_controller.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/alter_cdc_stream_unit.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/tx_controller.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_backup.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_backup.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/drop_table_unit.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_table_unit.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__monitoring.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__monitoring.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_data_tx_out_rs_unit.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_kh_describe.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_data_tx_out_rs_unit.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__process_boot_queue.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_kh_describe.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__process_boot_queue.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_collector_cdc_stream.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector_cdc_stream.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_tablet_status.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_status.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/drop_volatile_snapshot_unit.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_volatile_snapshot_unit.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/long_tx_service/commit_impl.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/long_tx_service/commit_impl.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/locks_db.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks_db.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__delete_tablet_result.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__delete_tablet_result.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__kill_node.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__kill_node.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/mediator_queue.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/mediator_queue.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/kmeans_helper.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/kmeans_helper.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/reshuffle_kmeans.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/reshuffle_kmeans.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/collector.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/collector.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/behaviour.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/common/common.cpp |78.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |78.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |78.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/common/common.cpp |78.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |78.3%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |78.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |78.3%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/behaviour.cpp |78.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |78.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/service.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/service.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/follower_tablet_info.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/follower_tablet_info.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_domains.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_domains.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/aggregator.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/aggregator.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__plan_step.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__plan_step.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/monitoring.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/monitoring.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_block.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_block.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_wb_req.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_wb_req.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/actor.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/actor.cpp |78.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |78.4%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |78.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__stop_guard.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__stop_guard.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__schema.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__schema.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tablet_move_info.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/storage_group_info.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tablet_move_info.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_group_info.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/test_server.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/test_server.cpp |78.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |78.4%| [AR] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |78.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_response.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_response.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/describe_table.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/describe_table.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_drop_coordination_node.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_drop_coordination_node.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_storage.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_storage.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__configure.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__configure.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_modify_permissions.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_modify_permissions.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator_state.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_state.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__acquire_read_step.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__acquire_read_step.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/viewer.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/viewer.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator_impl.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_impl.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__monitoring.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__monitoring.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/tablet/ext_tx_base.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/tablet/ext_tx_base.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/modification/transactions/tx_change_blobs_owning.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/modification/transactions/tx_change_blobs_owning.cpp |78.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |78.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |78.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__restore_params.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__restore_params.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_request.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_request.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__last_step_subscriptions.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__last_step_subscriptions.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_drop_table.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/column_families/schema.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/column_families/schema.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_drop_table.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_create_coordination_node.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_create_coordination_node.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/health_check/health_check.cpp |78.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/health_check/libydb-core-health_check.a |78.5%| [AR] {RESULT} $(B)/ydb/core/health_check/libydb-core-health_check.a |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_impl.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/health_check/health_check.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_impl.cpp |78.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/health_check/libydb-core-health_check.a |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/table_profiles.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/table_profiles.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_tablets_object.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablets_object.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/optimizer.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/optimizer.cpp |78.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |78.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |78.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_init.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_init.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_common/rpc_common_kqp_session.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_common/rpc_common_kqp_session.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_copy_table.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_copy_table.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_cache/scheme_cache.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_cache/scheme_cache.cpp |78.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |78.6%| [AR] {RESULT} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |78.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ymq/ymq_proxy.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ymq/ymq_proxy.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/local_rpc/helper.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/local_rpc/helper.cpp |78.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |78.6%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |78.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__lock_tablet.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__lock_tablet.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_response_tablet_distribution.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_response_tablet_distribution.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/behaviour.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/behaviour.cpp |78.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |78.6%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |78.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_configure.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_configure.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/tiling.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_coordination_node.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_coordination_node.cpp |78.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/libstorage-optimizer-tiling.global.a |78.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/libstorage-optimizer-tiling.global.a |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/tiling.cpp |78.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/libstorage-optimizer-tiling.global.a |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__init.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__init.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_ack_timeout.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_ack_timeout.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/tablet/broken_txs.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/tablet/broken_txs.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/s3.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/s3.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/boot_queue.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/boot_queue.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/data_events/columnshard_splitter.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/data_events/columnshard_splitter.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/query/rpc_attach_session.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/query/rpc_attach_session.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_resolve.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_resolve.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/sync.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/sync.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/manager.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__check.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__check.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |78.7%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/manager.cpp |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |78.7%| [AR] {RESULT} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/abstract.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/abstract.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |78.7%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/grpc_pq_write.cpp |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/grpc_pq_write.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/normalizer.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/normalizer.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |78.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/service_impl.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/service_impl.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/libydb-core-tx.a |78.7%| [AR] {RESULT} $(B)/ydb/core/tx/libydb-core-tx.a |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/libydb-core-tx.a |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/ticket_parser.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ticket_parser.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/libydb-core-security.a |78.7%| [AR] {RESULT} $(B)/ydb/core/security/libydb-core-security.a |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/security/libydb-core-security.a |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/data_events/shards_splitter.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/data_events/shards_splitter.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_read_iterator_common.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_read_iterator_common.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/service.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/service.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |78.7%| [AR] {RESULT} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/locks/locks.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/data_events/shard_writer.cpp |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/data_events/shard_writer.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |78.8%| [AR] {RESULT} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/object.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/locks/locks.cpp |78.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/object.cpp |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |78.8%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |78.8%| [AR] {RESULT} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |78.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |78.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/service/ext_counters.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/service/ext_counters.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_aggr_stat_response.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_aggr_stat_response.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/comm.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/comm.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_dummy.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_dummy.cpp |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/libydb-services-ydb.a |78.8%| [AR] {RESULT} $(B)/ydb/services/ydb/libydb-services-ydb.a |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/tablet/write_queue.cpp |78.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ydb/libydb-services-ydb.a |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/tablet/write_queue.cpp |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |78.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_rename_tables.cpp |78.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_rename_tables.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/local_rate_limiter.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/local_rate_limiter.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_analyze_deadline.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_analyze_deadline.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/query/rpc_kqp_tx.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/query/rpc_kqp_tx.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/counters/kqp_counters.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/monitoring.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/long_tx_service/long_tx_service_impl.cpp |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |78.8%| [AR] {RESULT} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service_impl.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/monitoring.cpp |78.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/counters/kqp_counters.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/leader_tablet_info.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/leader_tablet_info.cpp |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |78.9%| [AR] {RESULT} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/partition_actor.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_actor.cpp |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |78.9%| [AR] {RESULT} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/public_http/http_service.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/public_http/http_service.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/proxy/proxy.cpp |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/public_http/libydb-core-public_http.a |78.9%| [AR] {RESULT} $(B)/ydb/core/public_http/libydb-core-public_http.a |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/proxy/proxy.cpp |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.a |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |78.9%| [AR] {RESULT} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |78.9%| [AR] {RESULT} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/operation_helpers.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/tablet/gc_counters.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/operation_helpers.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/tablet/gc_counters.cpp |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |78.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator__reserve.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator__reserve.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tiering/fetcher.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/fetcher.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/construction/context.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/construction/context.cpp |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |78.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/resolve_local_db_table.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/manager.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/resolve_local_db_table.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/manager.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/populator.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/populator.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_trash.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_trash.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_semaphore_timeout.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_timeout.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_config_set.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_config_set.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/load_test.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/load_test.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/locks/dependencies.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/dependencies.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/hash_slider.cpp |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/read_init_auth_actor.cpp |79.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/read_init_auth_actor.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/hash_slider.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/database/database.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/database/database.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |79.0%| [AR] {RESULT} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql.cpp |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |79.0%| [AR] {RESULT} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql.cpp |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_alter_coordination_node.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_alter_coordination_node.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/events/events.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/events/events.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tracing/tablet_info.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |79.0%| [AR] {RESULT} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tracing/tablet_info.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tracing/libydb-core-tracing.a |79.0%| [AR] {RESULT} $(B)/ydb/core/tracing/libydb-core-tracing.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tracing/libydb-core-tracing.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |79.0%| [AR] {RESULT} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_replication.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/service/sysview_service.cpp |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_replication.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/service/sysview_service.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |79.0%| [AR] {RESULT} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_write_table.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/kesus_quoter_proxy.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_write_table.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/kesus_quoter_proxy.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |79.1%| [AR] {RESULT} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_patch.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_range.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_patch.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_range.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/load_based_timeout.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/load_based_timeout.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/op_apply_config.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/op_apply_config.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/abstract.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/abstract.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/abstract/fetcher.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/fetcher.cpp |79.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |79.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |79.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/logger.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/logger.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/optimizer.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/optimizer.cpp |79.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |79.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |79.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/initializer.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/initializer.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_remove_directory.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_remove_directory.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet.cpp |79.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |79.1%| [AR] {RESULT} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet.cpp |79.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |79.1%| [AR] {RESULT} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |79.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/schema/update.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/schema/update.cpp |79.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |79.1%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/subscriber.cpp |79.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |79.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/subscriber.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/options/schema.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/options/schema.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/s3_write.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/s3_write.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/update_last_seen_ready.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/update_last_seen_ready.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/alter_impl.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/quoter_service.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/alter_impl.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service.cpp |79.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/quoter/libydb-core-quoter.a |79.2%| [AR] {RESULT} $(B)/ydb/core/quoter/libydb-core-quoter.a |79.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/quoter/libydb-core-quoter.a |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_effects.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_effects.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/topic.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/topic.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/options/update.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/options/update.cpp |79.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |79.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |79.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/hash_intervals.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/hash_intervals.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/resolved_value.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/resolved_value.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_finish_async.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_finish_async.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_add_sharding_info.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_add_sharding_info.cpp |79.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |79.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/status.cpp |79.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/status.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/resolvereq.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/resolvereq.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/grpc_pq_schema.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/grpc_pq_schema.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/s3_upload.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/s3_upload.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/viewer_request.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/select_groups.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/select_groups.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/viewer_request.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_wide_read.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_wide_read.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp |79.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/libydb-core-viewer.a |79.2%| [AR] {RESULT} $(B)/ydb/core/viewer/libydb-core-viewer.a |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sort.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sort.cpp |79.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.a |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_status.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_status.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_put.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/cmds_storage_pool.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_put.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/cmds_storage_pool.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/run_script_actor/kqp_run_script_actor.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/run_script_actor/kqp_run_script_actor.cpp |79.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |79.3%| [AR] {RESULT} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |79.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/stat_processor.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/stat_processor.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/bsc.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/read_session_actor.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/bsc.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/read_session_actor.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/query_actor/query_actor.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/query_actor/query_actor.cpp |79.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |79.3%| [AR] {RESULT} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |79.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/persqueue/topic_parser/topic_parser.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/persqueue/topic_parser/topic_parser.cpp |79.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |79.3%| [AR] {RESULT} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |79.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/update_offsets_in_transaction_actor.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/update_offsets_in_transaction_actor.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/write_session_actor.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/write_session_actor.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/layout_helpers.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_write_constraint.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/layout_helpers.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_write_constraint.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/group_layout_checker.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/group_layout_checker.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/propose_group_key.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/propose_group_key.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/commit_config.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/commit_config.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/cmds_host_config.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/cmds_host_config.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/grouper.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/grouper.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/snapshot.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/snapshot.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_self_check.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_self_check.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/checker_access.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/checker_access.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/direct_read_actor.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/direct_read_actor.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/ut_common/datashard_ut_common.cpp |79.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |79.4%| [AR] {RESULT} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/ut_common/datashard_ut_common.cpp |79.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/scrub.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/scrub.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/cache.cpp |79.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |79.4%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/cache.cpp |79.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |79.4%| [AR] {RESULT} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |79.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/scheme_describe.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/scheme_describe.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_uncertain.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_uncertain.cpp |79.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/mon_main.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/mon_main.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/table_creator/table_creator.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/table_creator/table_creator.cpp |79.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |79.4%| [AR] {RESULT} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/channel_kind.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/channel_kind.cpp |79.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_session_timeout.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_session_timeout.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ymq/grpc_service.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request_reporting.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request_reporting.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ymq/grpc_service.cpp |79.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ymq/libydb-services-ymq.a |79.5%| [AR] {RESULT} $(B)/ydb/services/ymq/libydb-services-ymq.a |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmultiput_actor.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmultiput_actor.cpp |79.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ymq/libydb-services-ymq.a |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_helpers.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_helpers.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/ttl/validator.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/ttl/validator.cpp |79.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |79.5%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |79.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/shred.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/shred.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_cbo.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_cbo.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__revert_pool_state.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__revert_pool_state.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/config_fit_pdisks.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/config_fit_pdisks.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/config_cmd.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/config_cmd.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_tenant.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_tenant.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_response.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_response.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replmonhandler.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replmonhandler.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/statestorage_replica.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/statestorage_replica.cpp |79.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |79.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/console_interaction.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/config_fit_groups.cpp |79.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/console_interaction.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/config_fit_groups.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/backup/impl/local_partition_reader.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/backup/impl/local_partition_reader.cpp |79.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |79.5%| [AR] {RESULT} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |79.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/update_group_latencies.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/update_group_latencies.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges_predext.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges_predext.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/register_node.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/register_node.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/assimilator.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/tx_proxy_status.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/assimilator.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/tx_proxy_status.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/load_everything.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/load_everything.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/s3_scan.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/s3_scan.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/disk_metrics.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/disk_metrics.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/columns/schema.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_get_log_tail.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_get_log_tail.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/columns/schema.cpp |79.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |79.6%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |79.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin_compact.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin_compact.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/request_controller_info.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/request_controller_info.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_fill_node.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_get.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_fill_node.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_get.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/migrate.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/migrate.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__replace_yaml_config.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__replace_yaml_config.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/group_metrics_exchange.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/group_metrics_exchange.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_ic_debug.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_ic_debug.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/space_monitor.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/space_monitor.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/update_seen_operational.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/update_seen_operational.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tiering/manager.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/manager.cpp |79.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |79.6%| [AR] {RESULT} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |79.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/cmds_drive_status.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/cmds_drive_status.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/init_scheme.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/init_scheme.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_indexes.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_indexes.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_encrypt.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_encrypt.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/monitoring.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/monitoring.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/schema_actors.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/schema_actors.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/storage_stats_calculator.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/storage_stats_calculator.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v1_chunks.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v1_chunks.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/cmds_box.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/node_report.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/cmds_box.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/node_report.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/distributed_commit_helper.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/distributed_commit_helper.cpp |79.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |79.7%| [AR] {RESULT} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/request.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_init.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/request.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_init.cpp |79.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |79.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |79.7%| [AR] {RESULT} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |79.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/drop_donor.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/drop_donor.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/backup.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/backup.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_block.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_block.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_simple.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_simple.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_phy_check.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_phy_check.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/sys_view.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/sys_view.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/group_mapper.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/group_mapper.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_join.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_join.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/schema.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/schema.cpp |79.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |79.8%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |79.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_temp_tables_manager.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_temp_tables_manager.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/write.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/write.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp |79.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |79.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |79.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/create_store.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/create_store.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/broken_blobs.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/broken_blobs.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/sharing.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/sharing.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_resolve.cpp |79.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |79.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_resolve.cpp |79.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_query_plan.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_query_plan.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/rpc_long_tx.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/rpc_long_tx.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_info.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_info.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/get_group.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/get_group.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/services_initializer.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/services_initializer.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_get_block.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/simple.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_get_block.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/simple.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/self_heal.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/self_heal.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_version_info.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_ttl_preset_setting_version_info.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_store.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_store.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/abstract/abstract.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/abstract/abstract.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp |79.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |79.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |79.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/create_table.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/create_table.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/update.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/update.cpp |79.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |79.9%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |79.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_empty.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_empty.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_worker_common.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_worker_common.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_store.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_store.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/portion.cpp |79.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |79.9%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/portion.cpp |79.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/slice_builder/builder.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/slice_builder/builder.cpp |79.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |79.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/loading/stages.cpp |79.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/loading/stages.cpp |79.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |79.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |79.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/garbage_collection.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/garbage_collection.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_query_state.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_query_state.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_hash_func_propagate_transformer.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_hash_func_propagate_transformer.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_clusters_updater_actor.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_clusters_updater_actor.cpp |79.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |79.9%| [AR] {RESULT} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |80.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/counters/counters.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/abstract/common.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/primary.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/common.cpp |80.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |80.0%| [AR] {RESULT} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/primary.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/config.cpp |80.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/counters/counters.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/test_load_actor.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/test_load_actor.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/config.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/write_actor.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/write_actor.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_worker_actor.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_worker_actor.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_deprecated_snapshot.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_deprecated_snapshot.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/secondary.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/secondary.cpp |80.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |80.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/kqp_upsert.cpp |80.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/kqp_upsert.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/tables/normalizer.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/tables/normalizer.cpp |80.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |80.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |80.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/propose_tx.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/propose_tx.cpp |80.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |80.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |80.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/object.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/object.cpp |80.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |80.0%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |80.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/query_compiler/kqp_olap_compiler.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/query_compiler/kqp_olap_compiler.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |80.1%| [AR] {RESULT} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/scheme.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/scheme.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mon/mon.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mon/mon.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mon/libydb-core-mon.a |80.1%| [AR] {RESULT} $(B)/ydb/core/mon/libydb-core-mon.a |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mon/libydb-core-mon.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_index_columns.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_index_columns.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/minikql_compile/mkql_compile_service.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/minikql_compile/mkql_compile_service.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |80.1%| [AR] {RESULT} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/grpc_pq_read.cpp |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/grpc_pq_read.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |80.1%| [AR] {RESULT} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__plan_step.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__plan_step.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |80.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator_impl.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_impl.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |80.1%| [AR] {RESULT} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_query_stats.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_query_stats.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_session_actor.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_session_actor.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/manager/manager.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/manager/manager.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |80.1%| [AR] {RESULT} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |80.1%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/hooks/abstract/abstract.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/hooks/abstract/abstract.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__init.cpp |80.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |80.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |80.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__init.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__statistics.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/commitreq.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__statistics.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/commitreq.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard_view.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_view.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_gc.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_gc.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/fetcher.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/fetcher.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/given_id_range.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/given_id_range.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v2_chunks.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v2_chunks.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__create_tenant.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__create_tenant.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_effects.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_effects.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_build_txs.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_build_txs.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/indexes/update.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/indexes/update.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/leaked_blobs.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/leaked_blobs.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_phase.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/task.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_phase.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/task.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/fetcher.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/fetcher.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_kql.cpp |80.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |80.2%| [AR] {RESULT} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_kql.cpp |80.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/resource_subscriber/events.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/events.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_validate_config.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_validate_config.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/nodes/nodes.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/nodes/nodes.cpp |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |80.3%| [AR] {RESULT} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |80.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/virtual_group.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_mon.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/virtual_group.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_mon.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/plain_read_data.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/plain_read_data.cpp |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |80.3%| [AR] {RESULT} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |80.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetching.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetching.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_initroot.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_initroot.cpp |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |80.3%| [AR] {RESULT} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |80.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/constructor.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/constructor.cpp |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |80.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |80.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_tablet_counters.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_tablet_counters.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_actualization.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_actualization.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/resource_subscriber/task.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/task.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/scanner.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/scanner.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/special_cleaner.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/special_cleaner.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/granule/clean_granule.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/granule/clean_granule.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__write_index.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__write_index.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmovedpatch_actor.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmovedpatch_actor.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/session.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/session.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_query_blocks_transformer.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_query_blocks_transformer.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_unused_tables_template.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_unused_tables_template.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_load_state.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_load_state.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_mon.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_mon.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_build_phy_query.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_build_phy_query.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/service_actor.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/service_actor.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/column_engine.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/column_engine.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_statistics_transformer.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_statistics_transformer.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/signals/owner.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/signals/owner.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/signals/libydb-library-signals.a |80.4%| [AR] {RESULT} $(B)/ydb/library/signals/libydb-library-signals.a |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/signals/libydb-library-signals.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__propose_cancel.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__propose_cancel.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/indexes/schema.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/indexes/schema.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__scan.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |80.4%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/control.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__scan.cpp |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/control.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |80.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/node_service/kqp_node_service.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/node_service/kqp_node_service.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |80.4%| [AR] {RESULT} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_v0_meta.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_v0_meta.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_extr.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |80.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__progress_tx.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_extr.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__progress_tx.cpp |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |80.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hulldb_bulksstmngr.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hulldb_bulksstmngr.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/topic_description.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/topic_description.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard_subdomain_path_id.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_subdomain_path_id.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_phy_finalize.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/update.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |80.5%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/update.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_phy_finalize.cpp |80.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/granule/normalizer.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/granule/normalizer.cpp |80.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |80.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |80.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/vdisk_write.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/vdisk_write.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/service/executor.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/executor.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/manager.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/manager.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_column_statistics_requester.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/read.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_column_statistics_requester.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/read.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard_impl.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_impl.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/inflight_request_tracker.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/inflight_request_tracker.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__notify_tx_completion.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__notify_tx_completion.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetched_data.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetched_data.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/proxy.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/proxy.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/queue.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/queue.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_opt.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_opt.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__write.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__write.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/resource_subscriber/actor.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/actor.cpp |80.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |80.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |80.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_recovery.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_recovery.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/iterator.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/iterator.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/kqp_select.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/kqp_select.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_constant_folding_transformer.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_constant_folding_transformer.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/sentinel.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/sentinel.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/common.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/common.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/db_wrapper.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/db_wrapper.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/node_tablet_monitor.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/node_tablet_monitor.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/compacted_blob_constructor.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/compacted_blob_constructor.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/storage/s3/storage.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/storage/s3/storage.cpp |80.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |80.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |80.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/background_controller.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/background_controller.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_fsm.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_fsm.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/service/activation.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/activation.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/read_metadata.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/read_metadata.cpp |80.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |80.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |80.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |80.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |80.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |80.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetched_data.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetched_data.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/context.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/blob_mapping_cache.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/context.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/blob_mapping_cache.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/column_engine_logs.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/constructor.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/column_engine_logs.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/constructor.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/group_metrics_exchange.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/group_metrics_exchange.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard_schema.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_schema.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_monitoring_proxy.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_monitoring_proxy.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/indexed_blob_constructor.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/indexed_blob_constructor.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |80.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_reader/actor.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/actor.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/control.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/control.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |80.7%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard_private_events.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_private_events.cpp |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_stage_float_up.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_stage_float_up.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/datastreams/put_records_actor.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/put_records_actor.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/column_families.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/column_families.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/selector/abstract/selector.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/selector/abstract/selector.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |80.7%| [AR] {RESULT} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |80.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/tables_manager.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/tables_manager.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/counters.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/counters.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/context.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |80.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/context.cpp |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blob_cache.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blob_cache.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/address_classification/net_classifier.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/address_classification/net_classifier.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |80.7%| [AR] {RESULT} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/interval.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/interval.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/granule/stages.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/granule/stages.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/index/index.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/index/index.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/merge.cpp |80.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |80.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/merge.cpp |80.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/events/events.cpp |80.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/events/events.cpp |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |80.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |80.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/abstract/initialization.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/initialization.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp |80.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |80.8%| [AR] {RESULT} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_list_renderer.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_list_renderer.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/granule/granule.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/granule/granule.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/pdisk_read.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/pdisk_read.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/actors/cpu_load_actors.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/actors/cpu_load_actors.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_req_reset.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_req_reset.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_sys.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_sys.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_helpers.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_helpers.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_reader/read_coordinator.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/read_coordinator.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/schema_version/version.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/schema_version/version.cpp |80.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |80.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |80.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_reader/task.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/task.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/transfer.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_precompute.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/transfer.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/ut_common.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_precompute.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/ut_common.cpp |80.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |80.9%| [AR] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |80.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |80.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |80.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |80.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/event.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/event.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/statestorage_guardian.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/granule/storage.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/statestorage_guardian.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/granule/storage.cpp |80.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |80.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |80.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/blocks.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/blocks.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/granule/portions_index.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/granule/portions_index.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/restore_corrupted_blob_actor.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/restore_corrupted_blob_actor.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/fetcher.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/access.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/fetcher.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_stat.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/access.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_stat.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_process.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_process.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/upload_rows.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/test_load_read_iterator.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/upload_rows.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/test_load_read_iterator.cpp |80.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |80.9%| [AR] {RESULT} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |80.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/constructor.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/constructor.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |80.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/actor/export_actor.cpp |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/actor/export_actor.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_source.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_source.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/selector/backup/selector.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/selector/backup/selector.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |81.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_agg.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_agg.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |81.0%| [AR] {RESULT} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/actor/write.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/actor/write.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_resolver.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |81.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_resolver.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/generic_manager.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstslice.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/generic_manager.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstslice.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |81.0%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/util/memory_tracker.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/memory_tracker.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/libydb-core-util.a |81.0%| [AR] {RESULT} $(B)/ydb/core/util/libydb-core-util.a |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/util/libydb-core-util.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/optimizer.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/optimizer.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |81.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetch_steps.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetch_steps.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |81.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/bootstrapper.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/bootstrapper.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/actors/scheme_actors.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/actors/scheme_actors.cpp |81.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |81.1%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |81.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/update.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/object.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/object.cpp |81.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |81.1%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/update.cpp |81.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_provider.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_provider.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_discover.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_discover.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/meta.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/meta.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/run.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/run.cpp |81.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/run/librun.a |81.1%| [AR] {RESULT} $(B)/ydb/core/driver_lib/run/librun.a |81.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/run/librun.a |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_reader/events.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/events.cpp |81.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |81.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |81.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_results.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_results.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/bulk_mkql_upsert.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/bulk_mkql_upsert.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/metrics.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/metrics.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/utils/metadata_helpers.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/utils/metadata_helpers.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/registration.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_service.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/registration.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/source.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_service.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/source.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/snapshot.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/snapshot.cpp |81.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |81.1%| [AR] {RESULT} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |81.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/rewrite_io_utils.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/rewrite_io_utils.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/tiering.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/tiering.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/events.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/events.cpp |81.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |81.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |81.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/iterator.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/iterator.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp |81.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |81.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |81.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/utils/scheme_helpers.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/utils/scheme_helpers.cpp |81.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |81.2%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |81.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_collector.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_collector.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/cursor.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/cursor.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_exec.cpp |81.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |81.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |81.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_exec.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_fetch_actor.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_fetch_actor.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/update.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/update.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_load.cpp |81.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |81.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |81.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_load.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp |81.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |81.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |81.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_datasource.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_datasource.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann_pg.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann_pg.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/common/session/common.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/common/session/common.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/adapter.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/adapter.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_state_collect.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_state_collect.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo_rules.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo_rules.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_mon.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_mon.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/write.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/write.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/access_behaviour.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/access_behaviour.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/tables/table_queries.cpp |81.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |81.3%| [AR] {RESULT} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/tables/table_queries.cpp |81.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_cms.cpp |81.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |81.3%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_cms.cpp |81.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/accessor_init.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/accessor_init.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator_client/actor_client.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator_client/actor_client.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway.cpp |81.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |81.3%| [AR] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway.cpp |81.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_pipe_client.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_pipe_client.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/datastreams/grpc_service.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/result.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/result.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_update_config.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_tables.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_update_config.cpp |81.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_tables.cpp |81.4%| [AR] {RESULT} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/grpc_service.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data.cpp |81.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_storage_request.cpp |81.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/libydb-core-cms.a |81.4%| [AR] {RESULT} $(B)/ydb/core/cms/libydb-core-cms.a |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_storage_request.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_build_stage.cpp |81.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/cms/libydb-core-cms.a |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_build_stage.cpp |81.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |81.4%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |81.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/const.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/const.cpp |81.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |81.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |81.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/with_appended.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/with_appended.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_databases_cache.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_databases_cache.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/memory.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/memory.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/common/config.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/common/config.cpp |81.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |81.4%| [AR] {RESULT} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |81.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_portions.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_portions.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert_index.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetched_data.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert_index.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetched_data.cpp |81.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |81.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/one_layer.cpp |81.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/one_layer.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_counters_aggregator.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_counters_aggregator.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_metrics.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_metrics.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_intermediate.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_intermediate.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/snapshot.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/snapshot.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_datasink.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_datasink.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_opt.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_opt.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/constructor.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/constructor.cpp |81.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |81.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |81.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/common.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/common.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/op_commit_blob_seq.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/op_commit_blob_seq.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/read_attributes_utils.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/read_attributes_utils.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/balance/deleter.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/balance/deleter.cpp |81.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |81.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |81.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/abstract.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/abstract.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp |81.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |81.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |81.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_session_info.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_session_info.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/ttl.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/garbage.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/ttl.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/garbage.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_osiris.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_osiris.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/zero_level.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/zero_level.cpp |81.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |81.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |81.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |81.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |81.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/blocks.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/blocks.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/node_whiteboard.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/node_whiteboard.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_responsiveness_pinger.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_responsiveness_pinger.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_consumer_groups_metadata_initializers.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_consumer_groups_metadata_initializers.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_state.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_state.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_vdisk_guids.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_vdisk_guids.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogrecovery.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogrecovery.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |81.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_metadata_actor.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |81.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_metadata_actor.cpp |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/constructor.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/constructor.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/store/store.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/fetcher.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |81.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/fetcher.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |81.6%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/store/store.cpp |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_uniq_helper.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_uniq_helper.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_task.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_task.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/merge_subset.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_actor.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/merge_subset.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_collect_garbage.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_actor.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_collect_garbage.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |81.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/rbo/kqp_convert_to_physical.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/rbo/kqp_convert_to_physical.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/resource_broker.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/resource_broker.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet/libydb-core-tablet.a |81.6%| [AR] {RESULT} $(B)/ydb/core/tablet/libydb-core-tablet.a |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tablet/libydb-core-tablet.a |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/group_write.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/group_write.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/storage.cpp |81.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/load_test/libydb-core-load_test.a |81.7%| [AR] {RESULT} $(B)/ydb/core/load_test/libydb-core-load_test.a |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/storage.cpp |81.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |81.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/load_test/libydb-core-load_test.a |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_monactors.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_monactors.cpp |81.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |81.7%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |81.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |81.7%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_connection.cpp |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_connection.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_init.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_init.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_returning.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_returning.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/service/deleting.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/deleting.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/limit.cpp |81.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |81.7%| [AR] {RESULT} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/limit.cpp |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_defaults.cpp |81.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |81.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_defaults.cpp |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/counters.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/counters.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/constructor.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/constructor.cpp |81.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |81.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |81.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |81.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/column_families/update.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemonactor.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/column_families/update.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemonactor.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |81.8%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/snapshot.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/snapshot.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |81.8%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/update.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/update.cpp |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |81.8%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/object.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/object.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_create_partitions_actor.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_create_partitions_actor.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update_index.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update_index.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |81.8%| [AR] {RESULT} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_sharding.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_sharding.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_delete_index.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_delete_index.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/rbo/kqp_operator.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/rbo/kqp_operator.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/appdata.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/appdata.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/libydb-core-base.a |81.8%| [AR] {RESULT} $(B)/ydb/core/base/libydb-core-base.a |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/base/libydb-core-base.a |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/loading/stages.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/loading/stages.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |81.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/hash_modulo.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/hash_modulo.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/add_column.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |81.8%| [AR] {RESULT} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/add_column.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_balancer_actor.cpp |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_balancer_actor.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/db_counters.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/db_counters.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_handshake_actor.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_handshake_actor.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_tablet_state.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_tablet_state.cpp |81.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/server/libcore-client-server.a |81.9%| [AR] {RESULT} $(B)/ydb/core/client/server/libcore-client-server.a |81.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/client/server/libcore-client-server.a |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_read_session_actor.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_read_session_actor.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/connect_socket_protocol.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/connect_socket_protocol.cpp |81.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |81.9%| [AR] {RESULT} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |81.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_collect.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_collect.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_aggregate.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_aggregate.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_create_topics_actor.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_create_topics_actor.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/general_compaction.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/general_compaction.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/common.cpp |81.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |81.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/common.cpp |81.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |81.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |81.9%| [AR] {RESULT} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |81.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_fetch_actor.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_fetch_actor.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_interval_summary.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_interval_summary.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_topic_offsets_actor.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_topic_offsets_actor.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/remove_portions.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_balance_actor_sql.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/remove_portions.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_balance_actor_sql.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/accessor_subscribe.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_subscribe.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_describe_configs_actor.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_describe_configs_actor.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_indexes.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_indexes.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_auth_actor.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_auth_actor.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/query.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/query.cpp |82.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |82.0%| [AR] {RESULT} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |82.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_peer_stats_calculator.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_peer_stats_calculator.cpp |82.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |82.0%| [AR] {RESULT} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |82.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |82.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |82.0%| [AR] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_consumer_members_metadata_initializers.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_consumer_members_metadata_initializers.cpp |82.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_pool_state.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_pool_state.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_interval_metrics.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_interval_metrics.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/processor_impl.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/processor_impl.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/read_with_blobs.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/read_with_blobs.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_init_schema.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/untag_queue.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_init_schema.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/untag_queue.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/accessor_refresh.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_refresh.cpp |82.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |82.0%| [AR] {RESULT} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/coro_tx.cpp |82.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/coro_tx.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_snapshot.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_snapshot.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_configure.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_configure.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo_transformer.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/rbo/kqp_rbo_transformer.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_top_partitions.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_top_partitions.cpp |82.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |82.0%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |82.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/rbo/libkqp-opt-rbo.a |82.0%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/rbo/libkqp-opt-rbo.a |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |82.1%| [AR] {RESULT} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/rbo/libkqp-opt-rbo.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_subdomain_key.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_subdomain_key.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp_resolve.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp_resolve.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/libcore-cms-console.a |82.1%| [AR] {RESULT} $(B)/ydb/core/cms/console/libcore-cms-console.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/manager.cpp |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/cms/console/libcore-cms-console.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_commit_actor.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |82.1%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/manager.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_commit_actor.cpp |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_data_from_source.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_data_from_source.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_effects.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_effects.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator_impl.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator_impl.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/manager.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/manager.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |82.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_column.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_column.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |82.1%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_list_offsets_actor.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_list_offsets_actor.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/move_portions.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/meta.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/move_portions.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/meta.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/actors/scheme.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/actors/scheme.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |82.1%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_transactional_producers_initializers.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_transactional_producers_initializers.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_data_cleanup_logic.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_data_cleanup_logic.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kqp_helper.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kqp_helper.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/dst_alterer.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/dst_alterer.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_start_from_initiator.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_start_from_initiator.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/stream_creator.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/stream_creator.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator__schema.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator__schema.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/dst_remover.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/worker.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/dst_remover.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/worker.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_alter_configs_actor.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_alter_configs_actor.cpp |82.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |82.2%| [AR] {RESULT} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_load_blob_queue.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_load_blob_queue.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_self_pinger.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_self_pinger.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_ack_from_initiator.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_ack_from_initiator.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/init/init.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/init/init.cpp |82.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |82.2%| [AR] {RESULT} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/internal/rate_limiter_resources.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/rate_limiter_resources.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp_tx_manager.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp_tx_manager.cpp |82.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |82.2%| [AR] {RESULT} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_from_source.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_from_source.cpp |82.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |82.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_portion.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_portion.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_bio_actor.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_bio_actor.cpp |82.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |82.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/query_utils.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/query_utils.cpp |82.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |82.2%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_drop_replication.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_drop_replication.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/validators.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/validators.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/show_create/show_create.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/show_create/show_create.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_runner.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_runner.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_create_stream_result.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_create_stream_result.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_compute_database.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/merger.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_compute_database.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/merger.cpp |82.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |82.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |82.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/blob_manager.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/blob_manager.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_delete.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_delete.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_host.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_host.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp |82.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |82.3%| [AR] {RESULT} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |82.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |82.3%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |82.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |82.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_table.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_table.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__make_access_database_no_inheritable.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__make_access_database_no_inheritable.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_describe_replication.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_describe_replication.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_bg_tasks__list.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_bg_tasks__list.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__list_users.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__list_users.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_cdc_stream.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_cdc_stream.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_ping.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_ping.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__borrowed_compaction.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__borrowed_compaction.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/apps/etcd_proxy/service/etcd_impl.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp |82.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |82.4%| [AR] {RESULT} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_impl.cpp |82.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |82.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |82.4%| [AR] {RESULT} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |82.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_scheme_executer.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_scheme_executer.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/tablet_flat_executor.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/tablet_flat_executor.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/connector_client_mock.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/resource_pools/resource_pools.cpp |82.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |82.4%| [AR] {RESULT} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |82.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |82.4%| [AR] {RESULT} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/connector_client_mock.cpp |82.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |82.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/resource_pools/resource_pools.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/config/bsconfig_ut.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/config/bsconfig_ut.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_group/main.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_group/main.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/query_actor/query_actor_ut.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/query_actor/query_actor_ut.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/test_helper/shard_reader.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/shard_reader.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_create_replication.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_create_replication.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/tx_helpers.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tx_helpers.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/external_sources/object_storage/inference/ut/arrow_inference_ut.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/object_storage/inference/ut/arrow_inference_ut.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/mvp/core/mvp_ut.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/mvp/core/mvp_ut.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_discovery_targets_result.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_discovery_targets_result.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_init_schema.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_init_schema.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_quotas.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_quotas.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_assign_tx_id.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_assign_tx_id.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/common/common.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/common/common.cpp |82.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |82.5%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |82.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/tablet_helpers.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tablet_helpers.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers_ut.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers_ut.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_bad_blobid.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_bad_blobid.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_brokendevice.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_brokendevice.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc_actor.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc_actor.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/hooks/testing/ro_controller.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/hooks/testing/ro_controller.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/program/resolver.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/program/resolver.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/ut_client/backpressure_ut.cpp |82.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/program/libcore-tx-program.a |82.5%| [AR] {RESULT} $(B)/ydb/core/tx/program/libcore-tx-program.a |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/ut_client/backpressure_ut.cpp |82.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/program/libcore-tx-program.a |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/ut_produce_actor.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_produce_actor.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_faketablet.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_faketablet.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_load.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_load.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/test_helper/helper.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_proxy/control_plane_proxy.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/helper.cpp |82.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |82.6%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/control_plane_proxy.cpp |82.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_ut_common.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_outofspace.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_outofspace.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/prepare.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/secret_resolver.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/prepare.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/secret_resolver.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_huge.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_huge.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/mvp/oidc_proxy/oidc_proxy_ut.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_proxy_ut.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_boot_lease.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_boot_lease.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_repl.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_repl.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/cs_helper.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/cs_helper.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/basics/runtime.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/runtime.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_defrag.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_defrag.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/ut/graph_ut.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/basics/appdata.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/appdata.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/ut/graph_ut.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/logging.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/logging.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/test_helper/shard_writer.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/shard_writer.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp |82.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |82.6%| [AR] {RESULT} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |82.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc_actor.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc_actor.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_synclog.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_synclog.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/storage/vslots.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/helpers.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/storage/vslots.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/helpers.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storages_manager.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storages_manager.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_part_loader.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_part_loader.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/tenant_runtime.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tenant_runtime.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storage.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storage.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/test_shard_mon.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/test_shard_mon.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_create_dst_result.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_create_dst_result.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/tx_load_everything.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/tx_load_everything.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/write.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/write.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/test_shard_context.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/test_shard_context.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/execute_queue.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/execute_queue.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_bindings.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_bindings.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/basics/helpers.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/helpers.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_drop_dst_result.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_drop_dst_result.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_assign_stream_name.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_assign_stream_name.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/yql_testlib/yql_testlib.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/yql_testlib/yql_testlib.cpp |82.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |82.7%| [AR] {RESULT} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |82.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/storage/storage_pools.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/storage/storage_pools.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc.cpp |82.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |82.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |82.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/tablet_queue.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/tablet_queue.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/hooks/testing/controller.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/hooks/testing/controller.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |82.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |82.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/transfer/transfer_writer.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/base_table_writer.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/transfer/transfer_writer.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/dst_creator.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/transfer/libydb-core-transfer.a |82.8%| [AR] {RESULT} $(B)/ydb/core/transfer/libydb-core-transfer.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/service.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/base_table_writer.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/dst_creator.cpp |82.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/transfer/libydb-core-transfer.a |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/service.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |82.8%| [AR] {RESULT} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |82.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/storage.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/storage.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/basics/services.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/services.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |82.8%| [AR] {RESULT} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/runlib/utils.cpp |82.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/runlib/utils.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/storage/groups.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/storage/groups.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/remove.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/remove.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |82.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/controller.cpp |82.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/controller.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_impl.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_impl.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/runlib/kikimr_setup.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/runlib/kikimr_setup.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/fqrun/src/fq_setup.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/src/fq_setup.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |82.8%| [AR] {RESULT} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |82.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/ut_common/ut_common.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/runlib/application.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |82.8%| [AR] {RESULT} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/runlib/application.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/ut_common/ut_common.cpp |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |82.9%| [AR] {RESULT} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/storage/pdisks.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/storage/pdisks.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/storage/storage_stats.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/storage/storage_stats.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |82.9%| [AR] {RESULT} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_dbstat.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_localrecovery.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_dbstat.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_localrecovery.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/tablets/tablets.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/tablets/tablets.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |82.9%| [AR] {RESULT} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_committer.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_committer.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_gc.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_gc.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/export_reboots_common.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/export_reboots_common.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/ut/common/kqp_workload_service_ut_common.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/ut/common/kqp_workload_service_ut_common.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |82.9%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/vdisk_mock.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/vdisk_mock.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_heartbeat.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_heartbeat.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_reader.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_reader.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |82.9%| [AR] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/topic_sdk_test_setup.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/topic_sdk_test_setup.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |82.9%| [AR] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import__list.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__list.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/resource_pool_classifiers/resource_pool_classifiers.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/resource_pool_classifiers/resource_pool_classifiers.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |82.9%| [AR] {RESULT} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |83.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export__list.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__list.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/internal/nodes_health_check.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/nodes_health_check.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__init_populator.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_populator.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_result_write.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_result_write.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__background_cleaning.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__background_cleaning.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import__forget.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__forget.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_index.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_index.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export__cancel.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__cancel.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__forget.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__forget.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_scan.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_scan.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export__get.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__get.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__list.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__list.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__clean_pathes.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__clean_pathes.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_recompute_kmeans.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__cancel.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__cancel.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_recompute_kmeans.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import__get.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__get.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export__forget.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__forget.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__get.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__get.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_common.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_common.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_domain_links.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_domain_links.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import__create.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__create.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export__create.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__create.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__unmark_restore_tables.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__unmark_restore_tables.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_actor.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/local_proxy/local_partition_actor.cpp |83.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/ydb_proxy/local_proxy/libreplication-ydb_proxy-local_proxy.a |83.1%| [AR] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/local_proxy/libreplication-ydb_proxy-local_proxy.a |83.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/replication/ydb_proxy/local_proxy/libreplication-ydb_proxy-local_proxy.a |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_access_database.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_access_database.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_table.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_table.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_backup.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_backup.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_schema.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_schema.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_replication.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_replication.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_resource_pool.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_resource_pool.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_table.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_table.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_tables.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_tables.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__publish_to_scheme_board.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__publish_to_scheme_board.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_memory_changes.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_memory_changes.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_part.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_part.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sysview.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sysview.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_table.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_table.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_lock.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_lock.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sysview.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sysview.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_pq.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_pq.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/in_memory_control_plane_storage.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/in_memory_control_plane_storage.cpp |83.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |83.3%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_init.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_init.cpp |83.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_backup_collection.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_backup_collection.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_data_source.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_data_source.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_db_mon.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_db_mon.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_resource_pool.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_resource_pool.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_apply_build_index.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_apply_build_index.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_cdc_stream.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_data_source.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_cdc_stream.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_data_source.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator__configure.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator__configure.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_index.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_index.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator__schema_upgrade.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator__schema_upgrade.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_continuous_backup.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_continuous_backup.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore_incremental_backup.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore_incremental_backup.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_view.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_view.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_continuous_backup.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator__init.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_continuous_backup.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator__init.cpp |83.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |83.4%| [AR] {RESULT} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |83.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_boot_misc.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_boot_misc.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_many.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_many.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_bsv.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_bsv.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/health/health.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/health/health.cpp |83.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |83.4%| [AR] {RESULT} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |83.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_mon.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_mon.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_simplebs.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_simplebs.cpp |83.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |83.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |83.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/failing_mtpq.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/failing_mtpq.cpp |83.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |83.4%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |83.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_change_path_state.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_change_path_state.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/state_server_interface.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/state_server_interface.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/show_create/create_table_formatter.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/show_create/create_table_formatter.cpp |83.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |83.5%| [AR] {RESULT} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |83.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_resolve_secret_result.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_resolve_secret_result.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_incremental_backup_collection.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_incremental_backup_collection.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_utils.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_utils.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_worker_error.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_worker_error.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__data_erasure_manager.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__data_erasure_manager.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cancel_tx.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cancel_tx.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_info_types.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_info_types.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__conditional_erase.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__conditional_erase.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__backup_collection_common.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__backup_collection_common.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__background_compaction.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__background_compaction.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__notify.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__notify.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_svp_migration.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_svp_migration.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_bootlogic.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_bootlogic.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__init_root.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_root.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/apps/ydbd/main.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/apps/ydbd/main.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_lock.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_path.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_lock.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/tablet_flat_executed.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/tablet_flat_executed.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_alter_replication.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_alter_replication.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export_flow_proposals.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export_flow_proposals.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__init_schema.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_schema.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_alter_dst_result.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_alter_dst_result.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/test/testhull_index.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/test/testhull_index.cpp |83.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/test/libvdisk-hulldb-test.a |83.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/test/libvdisk-hulldb-test.a |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/test_helper/controllers.cpp |83.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/test/libvdisk-hulldb-test.a |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/controllers.cpp |83.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |83.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |83.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_write.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_write.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/tx_init_scheme.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/tx_init_scheme.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_drop_stream_result.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_drop_stream_result.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/tx_initialize.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/tx_initialize.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp |83.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |83.6%| [AR] {RESULT} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |83.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/tablet_flat_dummy.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tablet_flat_dummy.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_just_reject.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_just_reject.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_state.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_state.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/fake_coordinator.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/fake_coordinator.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_consistent_copy_tables.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_consistent_copy_tables.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_restore_backup_collection.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_restore_backup_collection.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/ut_helpers.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/ut_helpers.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_view.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_view.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_read_validate.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_read_validate.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replrecoverymachine_ut.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replrecoverymachine_ut.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/sessions/sessions.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/sessions/sessions.cpp |83.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |83.7%| [AR] {RESULT} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |83.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/test_tablet.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/test_tablet.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_build_index.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_build_index.cpp |83.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |83.7%| [AR] {RESULT} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp |83.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_blob_depot.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_blob_depot.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__find_subdomain_path_id.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_continuous_backup.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__find_subdomain_path_id.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_continuous_backup.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_get.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_get.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |83.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |83.7%| [AR] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |83.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |83.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |83.7%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |83.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor.cpp |83.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |83.7%| [AR] {RESULT} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_backup_collection.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_backup_collection.cpp |83.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__login.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__login.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__fix_bad_paths.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__fix_bad_paths.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_ut.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_ut.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_cdc_stream.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_cdc_stream.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_heap_it_ut.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_heap_it_ut.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_index.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_index.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/common_helper.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/common_helper.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/backpressure.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/backpressure.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_mirror3of4/main.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_mirror3of4/main.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_ut_pool.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_ut_pool.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_ut.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_ut.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/test_client.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/test_client.cpp |83.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/libydb-core-testlib.a |83.9%| [AR] {RESULT} $(B)/ydb/core/testlib/libydb-core-testlib.a |83.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/libydb-core-testlib.a |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/cancel_tx_ut.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/cancel_tx_ut.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/actors/test_runtime_ut.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/actors/test_runtime_ut.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ut_ycsb.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ut_ycsb.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scan/kqp_point_consolidation_ut.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_point_consolidation_ut.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/blobsan/main.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/blobsan/main.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/actors/run_actor.cpp |83.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |83.9%| [AR] {RESULT} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |83.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/actors/run_actor.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/combinatory/actualization.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/actualization.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/quoter_service_ut.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/combinatory/abstract.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/abstract.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__init.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/topic_data_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/topic_data_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_data_cleanup.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_data_cleanup.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/common/kqp_ut_common.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/common/kqp_ut_common.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay/main.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/main.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/board_subscriber_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/board_subscriber_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_kqp.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_kqp.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_counters.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_counters.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/sentinel_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/sentinel_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_login_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_login_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay/query_compiler.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/query_compiler.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/rbo/kqp_rbo_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/rbo/kqp_rbo_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullreplwritesst_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullreplwritesst_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_query_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_query_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_ut_common.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/viewer_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/viewer_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scheme/kqp_secrets_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_secrets_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/backup_ut/backup_path_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/backup_ut/backup_path_ut.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_import_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_import_ut.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/downtime_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/downtime_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/table_creator/table_creator_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/table_creator/table_creator_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_server_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_server_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/generated/runtime_feature_flags_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/generated/runtime_feature_flags_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_pdisk_error_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_pdisk_error_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tenants_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tenants_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_sysview_reboots/ut_sysview_reboots.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_sysview_reboots/ut_sysview_reboots.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_common.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/runtime/kqp_re2_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_re2_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/resource_broker_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/resource_broker_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ctx_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ctx_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/dictionary_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/dictionary_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/health_check/health_check_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/health_check/health_check_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/combinatory/execute.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/execute.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/src/ydb_setup.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/src/ydb_setup.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/ut_helpers.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/ut_helpers.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_ut_configs.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_ut_configs.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/blobstorage_hullwritesst_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/blobstorage_hullwritesst_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_ut_common.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_readbatch_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_readbatch_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/mon_reregister_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/mon_reregister_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_labeled.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_labeled.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/pq_async_io/ut_helpers.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/pq_async_io/ut_helpers.cpp |84.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |84.7%| [AR] {RESULT} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |84.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/src/kqp_runner.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/src/kqp_runner.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/combinatory/bulk_upsert.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/bulk_upsert.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cluster_info_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_ut_local.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cluster_info_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_ut_local.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_large.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_large.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/datastreams/datastreams_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/datastreams_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/flat_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/flat_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/cloud_events_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/cloud_events_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/closed_interval_set_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/closed_interval_set_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/group_size_in_units.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/group_size_in_units.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore/ut_incremental_restore.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore/ut_incremental_restore.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_common.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/compaction_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/compaction_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/helpers/query_executor.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/query_executor.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk2/huge.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk2/huge.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/helpers/writer.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/writer.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/cms/cms_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/cms/cms_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/helpers/aggregation.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/aggregation.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/locks_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/locks_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/huge_migration_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/huge_migration_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_ut_large.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_ut_large.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/phantom_blobs.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/phantom_blobs.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay/query_replay.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/query_replay.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/given_id_range_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/given_id_range_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_pdiskfit/ut/main.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_pdiskfit/ut/main.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay/query_proccessor.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/query_proccessor.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_generic_it_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_generic_it_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/helpers/local.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/local.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_failure_injection/ut_failure_injection.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_failure_injection/ut_failure_injection.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/combinatory/executor.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/executor.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/direct_read_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/direct_read_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/src/actors.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/src/actors.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |85.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |85.5%| [AR] {RESULT} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |85.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_table_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_table_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/vdisk_test.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/vdisk_test.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/common/columnshard.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/common/columnshard.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/helpers/typed_local.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/typed_local.cpp |85.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |85.5%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |85.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |85.5%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |85.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |85.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/backup_ut/list_objects_in_s3_export_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/backup_ut/list_objects_in_s3_export_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/combinatory/variator.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/variator.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/gen_restarts.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/gen_restarts.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay_yt/main.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay_yt/main.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/large_results/kqp_scriptexec_results_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/large_results/kqp_scriptexec_results_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst_it_all_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst_it_all_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |85.6%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |85.6%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |85.6%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_sysview/ut_sysview.cpp |85.6%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |85.6%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_sysview/ut_sysview.cpp |85.6%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/ticket_parser_ut.cpp |85.6%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/statistics_workload |85.6%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/statistics_workload |85.6%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ticket_parser_ut.cpp |85.6%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/cfg |85.6%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/cfg |85.6%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |85.6%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_discover_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_discover_ut.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |85.7%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |85.7%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |85.7%| [TA] $(B)/ydb/core/erasure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |85.7%| [TA] {RESULT} $(B)/ydb/core/erasure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |85.7%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |85.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/libcore-base-generated.a |85.7%| [AR] {RESULT} $(B)/ydb/core/base/generated/libcore-base-generated.a |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |85.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/libcore-base-generated.a |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |85.7%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |85.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |85.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |85.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |85.8%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/nemesis |85.8%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/nemesis |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/backup_ut/encrypted_backup_ut.cpp |85.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |85.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/backup_ut/encrypted_backup_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |85.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_reader/contexts.h_serialized.cpp |85.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_reader/contexts.h_serialized.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |85.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |85.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |85.8%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |85.8%| [LD] {RESULT} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |85.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |85.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |85.8%| [LD] {RESULT} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |85.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |85.8%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |85.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |85.8%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |85.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/ut/ydb-core-base-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/base/ut/ydb-core-base-ut |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |85.8%| [LD] {RESULT} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |85.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |85.8%| [LD] {RESULT} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |85.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut |85.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut |85.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |85.8%| [TA] $(B)/ydb/tests/functional/benchmarks_init/test-results/py3test/{meta.json ... results_accumulator.log} |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |85.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |85.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |85.8%| [TA] {RESULT} $(B)/ydb/tests/functional/benchmarks_init/test-results/py3test/{meta.json ... results_accumulator.log} |85.8%| [LD] {RESULT} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |85.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |85.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |85.8%| [LD] {RESULT} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |85.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |85.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp >> SysViewQueryHistory::TopDurationAdd [GOOD] |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut |85.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::TopDurationAdd [GOOD] |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |85.8%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |85.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |85.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut >> SysViewQueryHistory::ServiceQueryHistoryAdd [GOOD] >> SysViewQueryHistory::ScanQueryHistoryMerge [GOOD] |85.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::ServiceQueryHistoryAdd [GOOD] |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut >> AuthDatabaseAdmin::FailOnEmptyOwnerAndNoToken [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndEmptyToken [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndTokenWithEmptyUserSid [GOOD] |85.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |85.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::ScanQueryHistoryMerge [GOOD] |85.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::FailOnEmptyOwnerAndTokenWithEmptyUserSid [GOOD] |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/ut/ydb-core-util-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |85.8%| [LD] {RESULT} $(B)/ydb/core/util/ut/ydb-core-util-ut |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut >> TopicNameConverterTest::Paths [GOOD] >> TopicNameConverterTest::PathFromDiscoveryConverter [GOOD] |85.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |85.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/base/ut/ydb-core-base-ut |85.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::PathFromDiscoveryConverter [GOOD] >> TResizableCircleBufTest::Test1 [GOOD] >> TResizableCircleBufTest::Test2 [GOOD] >> TTrackable::TBuffer [GOOD] |85.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |85.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut >> TVDiskConfigTest::RtmrProblem1 [GOOD] >> TVDiskConfigTest::RtmrProblem2 [GOOD] >> TVDiskConfigTest::ThreeLevels [GOOD] |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut |85.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TTrackable::TBuffer [GOOD] |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TVDiskConfigTest::ThreeLevels [GOOD] |85.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/util/ut/ydb-core-util-ut |85.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |85.9%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut >> TBlobStorageCompStrat::Test1 |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/ut/ydb-core-base-generated-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/base/generated/ut/ydb-core-base-generated-ut >> TBlobStorageCompStrat::Test1 [GOOD] >> BootstrapTabletsValidatorTests::TestUnknownNodeForTablet [GOOD] >> NameserviceConfigValidatorTests::TestAddNewNode [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingHostPort >> ResourceBrokerConfigValidatorTests::TestMinConfig [GOOD] >> ResourceBrokerConfigValidatorTests::TestRepeatedQueueName >> NameserviceConfigValidatorTests::TestDuplicatingHostPort [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingAddrPort [GOOD] |86.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut >> ResourceBrokerConfigValidatorTests::TestRepeatedQueueName [GOOD] >> ResourceBrokerConfigValidatorTests::TestNoDefaultQueue [GOOD] >> ResourceBrokerConfigValidatorTests::TestNoUnknownTask [GOOD] |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |85.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TBlobStorageCompStrat::Test1 [GOOD] |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestDuplicatingAddrPort [GOOD] |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |86.0%| [LD] {RESULT} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |86.0%| [LD] {RESULT} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestNoUnknownTask [GOOD] >> TFreshAppendixTest::IterateForwardIncluding [GOOD] >> TFreshAppendixTest::IterateForwardExcluding [GOOD] |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |86.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |86.0%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |86.0%| [LD] {RESULT} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TFreshAppendixTest::IterateForwardExcluding [GOOD] |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut >> TBlobStorageHullFresh::SolomonStandCrash [GOOD] >> TBlobStorageHullFreshSegment::IteratorTest |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp >> TFreshAppendixTest::IterateForwardAll [GOOD] >> TFreshAppendixTest::IterateBackwardIncluding [GOOD] >> TBlobStorageHullFreshSegment::IteratorTest [GOOD] |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp >> TActorTest::TestCreateChildActor [GOOD] >> TActorTest::TestBlockEvents |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |86.0%| [LD] {RESULT} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |86.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |86.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut >> TActorTest::TestBlockEvents [GOOD] |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |86.0%| [LD] {RESULT} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut >> Config::IncludeScope [GOOD] |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TFreshAppendixTest::IterateBackwardIncluding [GOOD] |86.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/ut/ydb-core-base-generated-ut >> TBlobStorageIngressMatrix::VectorTestEmpty [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitwiseComplement2 [GOOD] |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFreshSegment::IteratorTest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestBlockEvents [GOOD] Test command err: ... waiting for blocked 3 events ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... waiting for blocked 3 events (done) ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... waiting for blocked 1 more event ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... waiting for blocked 1 more event (done) ... waiting for processed 2 more events ... waiting for processed 2 more events (done) ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... waiting for processed 3 more events ... waiting for processed 3 more events (done) >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizeMinusOne |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |86.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> Config::IncludeScope [GOOD] |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestBitwiseComplement2 [GOOD] |86.0%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |86.0%| [LD] {RESULT} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |86.0%| [LD] {RESULT} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |86.0%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |86.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |86.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |86.1%| [LD] {RESULT} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut >> TBlobStorageHullFresh::SimpleBackWardEnd2Times [GOOD] >> TBlobStorageHullFresh::Perf >> TVDiskConfigTest::JustConfig [GOOD] >> TVDiskConfigTest::Basic [GOOD] >> TVDiskConfigTest::NoMoneyNoHoney [GOOD] >> TBlobStorageHullFreshSegment::PerfAppendix >> TBlobStorageHullFresh::AppendixPerf |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.1%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut >> TSTreeTest::Basic [GOOD] >> TSVecTest::Basic [GOOD] |86.1%| [LD] {RESULT} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut |86.1%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp >> TBlobStorageDiskBlob::Merge [GOOD] >> TBlobStorageHullDecimal::TestMkDecimal [GOOD] >> TPDiskErrorStateTests::Basic [GOOD] >> TPDiskErrorStateTests::Basic2 [GOOD] >> TPDiskErrorStateTests::BasicErrorReason [GOOD] |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TSVecTest::Basic [GOOD] |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |86.1%| [LD] {RESULT} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TVDiskConfigTest::NoMoneyNoHoney [GOOD] >> TBlobStorageHullFresh::SimpleBackwardEnd [GOOD] >> TBlobStorageHullFresh::SimpleBackWardMiddle2Times [GOOD] |86.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TPDiskErrorStateTests::BasicErrorReason [GOOD] |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageHullDecimal::TestMkDecimal [GOOD] >> TFreshAppendixTest::IterateBackwardAll [GOOD] >> TFreshAppendixTest::IterateBackwardExcluding [GOOD] |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::SimpleBackWardMiddle2Times [GOOD] |86.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |86.1%| [LD] {RESULT} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |86.1%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TFreshAppendixTest::IterateBackwardExcluding [GOOD] |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp >> TBlobStorageHullFresh::Perf [GOOD] |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> AuthDatabaseAdmin::FailOnOwnerAndTokenWithEmptyUserSid [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthDatabaseAdmin::PassOnOwnerMatchGroupSid [GOOD] >> TCircularQueueTest::ShouldPush [GOOD] >> TCircularQueueTest::ShouldNotPushTwice [GOOD] >> TCircularQueueTest::ShouldRemove [GOOD] >> TCircularQueueTest::ShouldNotRemoveMissing [GOOD] >> TCircularQueueTest::ShouldRemoveCurrent [GOOD] >> TCircularQueueTest::ShouldRemoveCurrentLast [GOOD] >> TConcurrentRWHashTest::TEmptyGetTest [GOOD] >> TConcurrentRWHashTest::TInsertTest [GOOD] >> TConcurrentRWHashTest::TInsertIfAbsentTest [GOOD] >> TConcurrentRWHashTest::TInsertIfAbsentTestFunc [GOOD] >> TConcurrentRWHashTest::TRemoveTest [GOOD] >> TConcurrentRWHashTest::TEraseTest [GOOD] >> TCowBTreeTest::Empty [GOOD] >> TCowBTreeTest::Basics [GOOD] >> TCowBTreeTest::ClearAndReuse >> TLsnMngrTests::AllocLsnForLocalUse2Threads |86.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::Perf [GOOD] >> TCowBTreeTest::ClearAndReuse [GOOD] >> TCowBTreeTest::MultipleSnapshots >> DSProxyStrategyTest::Restore_block42 |86.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::PassOnOwnerMatchGroupSid [GOOD] >> DiscoveryConverterTest::AccountDatabase [GOOD] >> DiscoveryConverterTest::CmWay [GOOD] |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |86.1%| [LD] {RESULT} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut >> TIntrusiveStackTest::TestEmptyPop [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentRefCountNeverEmpty >> TIntrusiveStackTest::TestPushPop [GOOD] >> TCacheCacheTest::Random |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |86.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::CmWay [GOOD] |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut >> TCacheCacheTest::Random [GOOD] >> TCacheTest::TestUnboundedMapCache [GOOD] >> TCacheTest::EnsureNoLeakAfterUnboundedCacheOnMapDtor [GOOD] >> TCacheTest::TestSizeBasedOverflowCallback [GOOD] >> TCacheTest::TestLruCache [GOOD] >> TCacheTest::EnsureNoLeakAfterLruCacheDtor [GOOD] >> TCacheTest::Test2QCache [GOOD] >> TCacheTest::EnsureNoLeakAfterQ2CacheDtor [GOOD] >> TCacheTest::TestUpdateItemSize [GOOD] >> TCircularOperationQueueTest::CheckOnDoneInflight1 [GOOD] >> TCircularOperationQueueTest::CheckOnDoneInflight2 [GOOD] >> TCircularOperationQueueTest::CheckOnDoneNotExisting [GOOD] >> TCircularOperationQueueTest::CheckRemoveNotRunning [GOOD] >> TCircularOperationQueueTest::CheckRemoveRunning [GOOD] >> TCircularOperationQueueTest::CheckRemoveWaiting [GOOD] >> TCircularOperationQueueTest::CheckRemoveNotExisting [GOOD] >> TCircularOperationQueueTest::CheckTimeout [GOOD] >> TCircularOperationQueueTest::CheckTimeoutWhenFirstItemRemoved [GOOD] >> TCircularOperationQueueTest::RemoveExistingWhenShuffle [GOOD] >> TCircularOperationQueueTest::BasicRPSCheck [GOOD] >> TCircularOperationQueueTest::BasicRPSCheckWithRound [GOOD] >> TCircularOperationQueueTest::CheckWakeupAfterStop [GOOD] >> TCircularOperationQueueTest::CheckWakeupWhenRPSExhausted [GOOD] >> TCircularOperationQueueTest::CheckWakeupWhenRPSExhausted2 [GOOD] >> TCircularOperationQueueTest::CheckStartAfterStop [GOOD] |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |86.1%| [LD] {RESULT} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |86.1%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |86.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |86.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp >> TBlobStorageSyncNeighborsTest::CheckRevLookup [GOOD] >> TBlobStorageSyncNeighborsTest::CheckIsMyDomain [GOOD] >> TBlobStorageSyncNeighborsTest::CheckFailDomainsIterators [GOOD] >> TBlobStorageSyncNeighborsTest::CheckVDiskDistance [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TCircularOperationQueueTest::CheckStartAfterStop [GOOD] Test command err: 0.27412 |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TBlobStorageSyncNeighborsTest::CheckVDiskDistance [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse2Threads [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse10Threads >> DSProxyStrategyTest::Restore_mirror3dc >> TULID::HeadByteOrder [GOOD] >> TTokenBucketTest::DelayCalculation [GOOD] >> TQueueInplaceTests::TestSimpleInplace [GOOD] >> TPriorityOperationQueueTest::UpdateNonExistingShouldReturnFalse [GOOD] >> TPriorityOperationQueueTest::ShouldUpdatePriorityReadyQueue [GOOD] >> TPriorityOperationQueueTest::ShouldReturnExecTimeWhenUpdateRunningPriority [GOOD] >> TStrongTypeTest::DefaultConstructorValue [GOOD] >> TQueueInplaceTests::CleanInDestructor [GOOD] >> TULID::TailByteOrder >> TSimpleCacheTest::TestNotSoSimpleCache [GOOD] >> TULID::ParseAndFormat [GOOD] >> TPriorityOperationQueueTest::ShouldStartEmpty [GOOD] >> TPriorityQueueTest::TestOrder [GOOD] >> TStrongTypeTest::DefaultConstructorDeleted [GOOD] >> TPriorityOperationQueueTest::ShouldUpdatePriorityWaitingQueue [GOOD] >> TPriorityOperationQueueTest::ShouldStartByPriorityWithRemove [GOOD] >> TTokenBucketTest::Unlimited [GOOD] >> TTokenBucketTest::Limited [GOOD] >> TSimpleCacheTest::TestSimpleCache [GOOD] >> TPriorityOperationQueueTest::ShouldStartByPriority [GOOD] >> TULID::TailByteOrder [GOOD] |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large |86.2%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large >> TULID::EveryBitOrder [GOOD] >> TULID::Generate [GOOD] >> TWildcardTest::TestWildcard [GOOD] >> TWildcardTest::TestWildcards [GOOD] |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp >> DiscoveryConverterTest::FullLegacyNames [GOOD] >> DiscoveryConverterTest::FirstClass [GOOD] |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |86.2%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut >> TCircleBufTest::SimpleTest [GOOD] >> TCircleBufTest::PtrTest [GOOD] >> TLsnAllocTrackerTests::Test1 [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse |86.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TWildcardTest::TestWildcards [GOOD] |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |86.2%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |86.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |86.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::FirstClass [GOOD] >> TFragmentedBufferTest::TestWriteRead [GOOD] >> TFragmentedBufferTest::TestOverwriteRead [GOOD] >> TFragmentedBufferTest::TestIsNotMonolith [GOOD] >> TFragmentedBufferTest::TestSetMonolith [GOOD] >> TFragmentedBufferTest::TestReplaceWithSetMonolith [GOOD] >> THazardTest::CachedPointers [GOOD] >> THazardTest::AutoProtectedPointers [GOOD] >> THyperLogCounterTest::TestGetSet [GOOD] >> THyperLogCounterTest::TestIncrement [GOOD] >> THyperLogCounterTest::TestAddRandom |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |86.2%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |86.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large >> THyperLogCounterTest::TestAddRandom [GOOD] >> THyperLogCounterTest::TestAddFixed >> THyperLogCounterTest::TestAddFixed [GOOD] >> THyperLogCounterTest::TestHybridIncrement [GOOD] >> THyperLogCounterTest::TestHybridAdd [GOOD] >> TIntervalSetTest::IntervalMapTestEmpty [GOOD] >> TIntervalSetTest::IntervalMapTestSpecificAdd [GOOD] >> TIntervalSetTest::IntervalMapTestAdd |86.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/btree_benchmark/btree_benchmark |86.2%| [LD] {RESULT} $(B)/ydb/core/util/btree_benchmark/btree_benchmark |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TIntervalSetTest::IntervalMapTestAdd [GOOD] >> TIntervalSetTest::IntervalMapTestAddSubtract [GOOD] >> TIntervalSetTest::IntervalMapTestAddAgainstReference |86.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TLsnMngrTests::AllocLsnForLocalUse [GOOD] |86.2%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp >> TIntervalSetTest::IntervalMapTestAddAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapTestIsSubsetOfAgainstReference >> TBlobStorageAnubisAlgo::Mirror3 [GOOD] |86.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |86.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp >> TIntervalSetTest::IntervalMapTestIsSubsetOfAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapIntersection |86.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |86.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TLsnMngrTests::AllocLsnForLocalUse [GOOD] >> TIntervalSetTest::IntervalSetTestEmpty [GOOD] >> TIntervalSetTest::IntervalSetTestSpecificAdd [GOOD] >> TIntervalSetTest::IntervalSetTestAdd |86.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |86.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TBlobStorageAnubisAlgo::Mirror3 [GOOD] |86.2%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/tools/decrypt/decrypt |86.2%| [LD] {RESULT} $(B)/ydb/core/backup/tools/decrypt/decrypt |86.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp >> TIntervalSetTest::IntervalSetTestAdd [GOOD] >> TIntervalSetTest::IntervalSetTestAddSubtract [GOOD] >> TIntervalSetTest::IntervalMapTestSubtract [GOOD] >> TIntervalSetTest::IntervalSetTestSubtract [GOOD] >> TIntervalSetTest::IntervalMapTestSubtractAgainstReference |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp |86.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/util/btree_benchmark/btree_benchmark >> TIntervalSetTest::IntervalMapTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestSubtractAgainstReference |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp >> TIntervalSetTest::IntervalSetTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestAddAgainstReference >> TIntervalSetTest::IntervalMapIntersection [GOOD] >> TIntervalSetTest::IntervalMapIntersectionInplace >> TIntervalSetTest::IntervalSetTestAddAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestIsSubsetOfAgainstReference >> TIntervalSetTest::IntervalSetTestIsSubsetOfAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapTestToStringAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestToStringAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapUnion |86.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |86.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/backup/tools/decrypt/decrypt |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TCircleBufStringStreamTest::TestNotAligned [GOOD] >> TCircleBufStringStreamTest::TestOverflow [GOOD] >> TCircleBufTest::EmptyTest [GOOD] >> TCircleBufTest::OverflowTest [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentRefCountNeverEmpty [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentRefCountHeavyContention >> TIntervalSetTest::IntervalMapIntersectionInplace [GOOD] >> TIntervalSetTest::IntervalMapIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalMapDifference >> TIntervalSetTest::IntervalVecTestEmpty [GOOD] >> TIntervalSetTest::IntervalVecTestSpecificAdd [GOOD] >> TIntervalSetTest::IntervalVecTestAdd >> TCircularOperationQueueTest::ShouldStartEmpty [GOOD] >> TCircularOperationQueueTest::ShouldNotStartUntilStart [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight1 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight2 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight3 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight10 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight100 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue1 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue2 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue3 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue10 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue100 [GOOD] >> TCircularOperationQueueTest::ShouldScheduleWakeupWhenNothingStarted [GOOD] >> TCircularOperationQueueTest::ShouldScheduleWakeupWhenHasWaitingAndStart [GOOD] >> TCircularOperationQueueTest::UseMinOperationRepeatDelayWhenTimeout [GOOD] >> TCircularOperationQueueTest::ShouldReturnExecTime [GOOD] >> TCircularOperationQueueTest::ShouldTryToStartAnotherOneWhenStartFails [GOOD] >> TCircularOperationQueueTest::ShouldShuffle [GOOD] >> TCircularOperationQueueTest::RemoveNonExistingWhenShuffle [GOOD] >> TCircularOperationQueueTest::ShouldTolerateInaccurateTimer [GOOD] >> TCircularQueueTest::Empty [GOOD] >> TCircularQueueTest::ShouldNextSingleItem [GOOD] >> TCircularQueueTest::ShouldNextMulti [GOOD] >> TCircularQueueTest::ShouldGetQueue [GOOD] >> TIntervalSetTest::IntervalVecTestAdd [GOOD] >> TIntervalSetTest::IntervalVecTestAddSubtract [GOOD] >> TIntervalSetTest::IntervalVecTestSubtract [GOOD] >> TIntervalSetTest::IntervalVecTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecTestAddAgainstReference >> TIntervalSetTest::IntervalVecTestAddAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecTestIsSubsetOfAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecTestToStringAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecUnion >> TLsnMngrTests::AllocLsnForLocalUse10Threads [GOOD] >> TOutOfSpaceStateTests::TestLocal [GOOD] >> TOutOfSpaceStateTests::TestGlobal [GOOD] >> TBlobStorageHullFreshSegment::PerfAppendix [GOOD] >> TBlobStorageHullFreshSegment::PerfSkipList |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TCircleBufTest::OverflowTest [GOOD] >> TIntervalSetTest::IntervalVecUnion [GOOD] >> TIntervalSetTest::IntervalVecUnionInplace |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut >> TIntervalSetTest::IntervalMapUnion [GOOD] >> TIntervalSetTest::IntervalSetUnion |86.3%| [LD] {RESULT} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TCircularQueueTest::ShouldGetQueue [GOOD] |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TOutOfSpaceStateTests::TestGlobal [GOOD] >> TIntervalSetTest::IntervalVecUnionInplace [GOOD] >> TIntervalSetTest::IntervalVecUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalVecIntersection |86.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |86.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp >> TCowBTreeTest::MultipleSnapshots [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithGc |86.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp >> TIntervalSetTest::IntervalVecIntersection [GOOD] >> TIntervalSetTest::IntervalVecIntersectionInplace >> TIntervalSetTest::IntervalMapDifference [GOOD] >> TIntervalSetTest::IntervalMapDifferenceInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetUnion [GOOD] >> TIntervalSetTest::IntervalMapUnionInplace |86.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |86.3%| [LD] {RESULT} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut >> TIntervalSetTest::IntervalVecIntersectionInplace [GOOD] >> TIntervalSetTest::IntervalVecIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalVecDifference |86.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TIntervalSetTest::IntervalVecDifference [GOOD] >> TIntervalSetTest::IntervalVecDifferenceInplaceSelf [GOOD] >> TIntrusiveFixedHashSetTest::TestEmptyFind [GOOD] >> TIntrusiveFixedHashSetTest::TestPushFindClear [GOOD] >> TIntrusiveHeapTest::TestEmpty [GOOD] >> TIntrusiveHeapTest::TestAddRemove [GOOD] >> TIntrusiveHeapTest::TestUpdateNoChange [GOOD] >> TIntrusiveHeapTest::TestUpdateIncrease [GOOD] >> TIntrusiveHeapTest::TestUpdateDecrease [GOOD] |86.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TIntervalSetTest::IntervalMapUnionInplace [GOOD] >> TIntervalSetTest::IntervalSetUnionInplace |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TIntervalSetTest::IntervalMapDifferenceInplaceSelf [GOOD] |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TIntrusiveHeapTest::TestUpdateDecrease [GOOD] |86.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TFlatDatabasePgTest::BasicTypes >> TIntervalSetTest::IntervalSetUnionInplace [GOOD] >> TIntervalSetTest::IntervalMapUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetIntersection >> TQueryResultSizeTrackerTest::CheckAll [GOOD] >> TFlatDatabasePgTest::BasicTypes [GOOD] >> TBlobStorageHullFreshSegment::PerfSkipList [GOOD] |86.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |86.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |86.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/combinatory/select.cpp |86.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/select.cpp |86.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |86.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp >> TIntervalSetTest::IntervalSetIntersection [GOOD] >> TIntervalSetTest::IntervalSetIntersectionInplace |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::CheckAll [GOOD] |86.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TFlatDatabasePgTest::BasicTypes [GOOD] |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFreshSegment::PerfSkipList [GOOD] >> TQueryResultSizeTrackerTest::CheckOnlyQueryResult [GOOD] >> TQueryResultSizeTrackerTest::CheckWithoutQueryResult [GOOD] |86.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSize |86.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp >> TIntervalSetTest::IntervalSetIntersectionInplace [GOOD] >> TIntervalSetTest::IntervalSetIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetDifference |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest |86.3%| [TA] $(B)/ydb/core/tablet_flat/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |86.3%| [TA] {RESULT} $(B)/ydb/core/tablet_flat/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::CheckWithoutQueryResult [GOOD] >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizePlusOne |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::CheckOnlyQueryResult [GOOD] >> TIntervalSetTest::IntervalSetDifference [GOOD] >> TIntervalSetTest::IntervalSetDifferenceInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetTestIterator [GOOD] >> ReadBatcher::Range >> TLockFreeIntrusiveStackTest::ConcurrentRefCountHeavyContention [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentAutoNeverEmpty |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> ReadBatcher::ReadBatcher |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |86.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |86.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |86.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp >> TBlobStorageHullCompactDeferredQueueTest::Basic |86.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |86.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TIntervalSetTest::IntervalSetTestIterator [GOOD] |86.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp >> TDelayedResponsesTests::Test [GOOD] |86.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp >> TBlobStorageIngressMatrix::VectorTestMinus [GOOD] >> TBlobStorageIngressMatrix::VectorTestIterator3 [GOOD] >> TBlobStorageIngressMatrix::VectorTestIterator1 [GOOD] >> TBlobStorageIngressMatrix::VectorTestIterator2 [GOOD] >> TBlobStorageIngress::Ingress [GOOD] >> TBlobStorageIngress::IngressCacheMirror3 [GOOD] >> TBlobStorageIngress::IngressCache4Plus2 [GOOD] >> TBlobStorageIngressMatrix::VectorTest [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitsBefore1 [GOOD] >> TBlobStorageIngressMatrix::ShiftedMainBitVec [GOOD] |86.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp |86.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |86.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp >> TBlobStorageIngress::BarrierIngressQuorumBasicMirror3_4_2 [GOOD] >> TBlobStorageIngress::BarrierIngressQuorumBasic4Plus2_8_1 [GOOD] >> TBlobStorageIngress::BarrierIngressQuorumMirror3 [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |86.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |86.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TDelayedResponsesTests::Test [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::IngressCache4Plus2 [GOOD] >> TBlobStorageIngressMatrix::MatrixTest [GOOD] >> TBlobStorageIngressMatrix::ShiftedBitVecBase [GOOD] >> TBlobStorageIngressMatrix::ShiftedHandoffBitVec [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestIterator2 [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestIterator3 [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::ShiftedMainBitVec [GOOD] |86.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp >> TBlobStorageIngress::IngressCreateFromRepl [GOOD] >> TBlobStorageIngress::IngressGetMainReplica [GOOD] >> TBlobStorageIngress::IngressHandoffPartsDelete [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::BarrierIngressQuorumMirror3 [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitwiseAnd [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitwiseComplement1 [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitsBefore2 [GOOD] |86.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |86.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |86.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |86.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a >> FormatTimes::ParseDuration [GOOD] |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |86.4%| [LD] {RESULT} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |86.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::ShiftedHandoffBitVec [GOOD] >> StatsFormat::AggregateStat [GOOD] |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |86.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut >> FormatTimes::DurationMs [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestBitsBefore2 [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::IngressHandoffPartsDelete [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> Config::ExcludeScope [GOOD] >> FormatTimes::DurationUs [GOOD] |86.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |86.4%| [AR] {RESULT} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::ParseDuration [GOOD] >> ReadBatcher::ReadBatcher [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest |86.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |86.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::AggregateStat [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationMs [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> Config::ExcludeScope [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationUs [GOOD] >> TYardTest::TestEmptyLogRead >> TPDiskUtil::SectorRestorator [GOOD] >> TPDiskUtil::SectorRestoratorOldNewHash [GOOD] >> TPDiskUtil::SectorPrint [GOOD] >> TPDiskUtil::TChunkIdFormatter [GOOD] >> TPDiskUtil::TOwnerPrintTest [GOOD] >> TPDiskUtil::TChunkStateEnumPrintTest [GOOD] >> TPDiskUtil::TIoResultEnumPrintTest [GOOD] >> TPDiskUtil::TIoTypeEnumPrintTest [GOOD] >> TPDiskUtil::TestNVMeSerial [GOOD] >> TPDiskUtil::TestDeviceList [GOOD] >> TPDiskUtil::TestBufferPool >> WilsonTrace::LogWriteChunkWriteChunkRead >> TYardTest::TestBadDeviceInit |86.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |86.4%| [AR] {RESULT} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a >> TYardTest::TestEmptyLogRead [GOOD] >> TYardTest::TestChunkWriteRead >> WilsonTrace::LogWriteChunkWriteChunkRead [GOOD] >> TYardTest::TestWholeLogRead |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> ReadBatcher::ReadBatcher [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithGc [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithClear >> TPDiskTest::TestAbstractPDiskInterface [GOOD] >> TPDiskTest::TestPDiskActorErrorState >> StatsFormat::FullStat [GOOD] >> TBlobStoragePDiskCrypto::TestMixedStreamCypher [GOOD] >> TBlobStoragePDiskCrypto::TestInplaceStreamCypher |86.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |86.4%| [AR] {RESULT} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a >> TPDiskConfig::GetOwnerWeight [GOOD] >> TPDiskRaces::KillOwnerWhileDeletingChunk >> PDiskCompatibilityInfo::OldCompatible >> TYardTest::TestBadDeviceInit [GOOD] >> TYardTest::TestChunkReadRandomOffset >> TBlobStoragePDiskCrypto::TestInplaceStreamCypher [GOOD] >> TBlockDeviceTest::TestDeviceWithSubmitGetThread >> TPDiskTest::TestPDiskActorErrorState [GOOD] >> TPDiskTest::TestChunkWriteRelease >> TPDiskTest::TestThatEveryValueOfEStateEnumKeepsItIntegerValue [GOOD] >> TPDiskTest::TestPDiskActorPDiskStopStart >> TYardTest::TestWholeLogRead [GOOD] >> TYardTest::TestSysLogReordering >> TBlobStorageHullSstIt::TestSeekToLast [GOOD] >> TBlobStorageHullSstIt::TestSstIndexSaveLoad [GOOD] >> TBlobStorageHullOrderedSstsIt::TestSeekToFirst >> TBlobStorageHullWriteSst::BlockOneSstOneIndex [GOOD] >> TBlobStorageHullSstIt::TestSeekToFirst [GOOD] >> TBlobStorageHullWriteSst::BlockOneSstMultiIndex [GOOD] >> TBlobStorageHullSstIt::TestSeekExactAndPrev [GOOD] >> TBlobStorageHullSstIt::TestSeekNotExactBefore [GOOD] >> TBlockDeviceTest::TestDeviceWithSubmitGetThread [GOOD] >> TBlockDeviceTest::TestWriteSectorMapAllTypes >> TYardTest::TestInit >> PDiskCompatibilityInfo::OldCompatible [GOOD] >> PDiskCompatibilityInfo::Incompatible |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::FullStat [GOOD] |86.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a >> TBlobStorageHullOrderedSstsIt::TestSeekToFirst [GOOD] >> TBlobStorageHullOrderedSstsIt::TestSeekToLast [GOOD] >> TBlobStorageHullOrderedSstsIt::TestSeekAfterAndPrev [GOOD] >> TBlobStorageHullSstIt::TestSstIndexSeekAndIterate [GOOD] >> TBlobStorageHullWriteSst::BlockMultiSstOneIndex |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |86.4%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node >> TBlobStorageHullWriteSst::LogoBlobOneSstMultiIndex [GOOD] >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndexPartOutbound [GOOD] >> TBlobStorageHullWriteSst::BlockMultiSstOneIndex [GOOD] >> TPDiskTest::TestPDiskActorPDiskStopStart [GOOD] >> TPDiskTest::TestPDiskActorPDiskStopBroken |86.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dq/service_node/service_node >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndex [GOOD] >> TBlobStorageHullWriteSst::LogoBlobMultiSstMultiIndex [GOOD] >> TBlobStorageHullSstIt::TestSeekExactAndNext [GOOD] >> TBlobStorageHullSstIt::TestSeekBefore [GOOD] >> TBlobStorageHullSstIt::TestSeekAfterAndPrev [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndex [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstMultiIndexPartOutbound [GOOD] |86.5%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/service_node/service_node |86.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dq/service_node/service_node |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullSstIt::TestSeekNotExactBefore [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullSstIt::TestSstIndexSaveLoad [GOOD] >> TYardTest::TestChunkWriteRead [GOOD] >> TYardTest::TestChunkWriteReadWithHddSectorMap >> PDiskCompatibilityInfo::Incompatible [GOOD] >> PDiskCompatibilityInfo::NewIncompatibleWithDefault >> TPDiskTest::TestPDiskActorPDiskStopBroken [GOOD] >> TPDiskTest::TestPDiskActorPDiskStopUninitialized |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::BlockOneSstMultiIndex [GOOD] |86.5%| [TA] $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.5%| [TA] {RESULT} $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::BlockMultiSstOneIndex [GOOD] >> PDiskCompatibilityInfo::NewIncompatibleWithDefault [GOOD] >> TPDiskUtil::TestBufferPool [GOOD] >> TPDiskUtil::SectorMap [GOOD] >> TPDiskUtil::SectorMapStoreLoadFromFile [GOOD] >> TSectorMapPerformance::TestHDD1960GBRead100MBOnFirstSector >> PDiskCompatibilityInfo::Trunk |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndexPartOutbound [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullOrderedSstsIt::TestSeekAfterAndPrev [GOOD] >> TYardTest::TestInit [GOOD] >> TYardTest::TestInitOnIncompleteFormat >> TPDiskTest::TestPDiskActorPDiskStopUninitialized [GOOD] >> TPDiskTest::TestPDiskOwnerRecreation >> TLockFreeIntrusiveStackTest::ConcurrentAutoNeverEmpty [GOOD] >> PDiskCompatibilityInfo::Trunk [GOOD] >> PDiskCompatibilityInfo::SuppressCompatibilityCheck |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobOneSstMultiIndexPartOutbound [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobMultiSstMultiIndex [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullSstIt::TestSeekAfterAndPrev [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexWithSmallWriteBlocks [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentAutoHeavyContention >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexPartOutbound >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexPartOutbound [GOOD] |86.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexPartOutbound [GOOD] >> PDiskCompatibilityInfo::SuppressCompatibilityCheck [GOOD] >> PDiskCompatibilityInfo::Migration >> TActorTest::TestWaitFuture [GOOD] |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |86.5%| [LD] {RESULT} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |86.5%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TActorTest::TestHandleEvent [GOOD] >> TActorTest::TestGetCtxTime [GOOD] >> TActorTest::TestScheduleEvent [GOOD] >> TActorTest::TestScheduleReaction >> SysViewQueryHistory::AddDedup2 [GOOD] >> SysViewQueryHistory::AddDedup [GOOD] >> TYardTest::TestInitOnIncompleteFormat [GOOD] >> TYardTest::TestInitOwner |86.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TActorTest::TestWaitForFirstEvent >> TYardTest::TestChunkWriteReadWithHddSectorMap [GOOD] >> TYardTest::TestChunkWriteReadMultiple >> TActorTest::TestScheduleReaction [GOOD] >> TActorTest::TestDie >> TCowBTreeTest::MultipleSnapshotsWithClear [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithClearWithGc >> TActorTest::TestWaitForFirstEvent [GOOD] >> TActorTest::TestSendFromAnotherThread >> TActorTest::TestWaitFor [GOOD] >> TActorTest::TestDie [GOOD] >> TActorTest::TestFilteredGrab >> TActorTest::TestFilteredGrab [GOOD] >> TYardTest::TestInitOwner [GOOD] >> TYardTest::TestIncorrectRequests >> TActorTest::TestStateSwitch |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitFuture [GOOD] >> PDiskCompatibilityInfo::Migration [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestGetCtxTime [GOOD] >> ReadOnlyPDisk::SimpleRestartReadOnly >> TActorTest::TestStateSwitch [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AddDedup [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestScheduleReaction [GOOD] >> TActorTest::TestSendEvent [GOOD] >> TActorTest::TestSendAfterDelay >> TSectorMapPerformance::TestHDD1960GBRead100MBOnFirstSector [GOOD] >> TSectorMapPerformance::TestHDD1960GBRead100MBOnLastSector |86.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitForFirstEvent [GOOD] Test command err: ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger (done) ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitFor [GOOD] Test command err: ... waiting for value = 42 ... waiting for value = 42 (done) >> TActorTest::TestSendAfterDelay [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestFilteredGrab [GOOD] >> ReadOnlyPDisk::SimpleRestartReadOnly [GOOD] >> ReadOnlyPDisk::StartReadOnlyUnformattedShouldFail >> TBlockDeviceTest::TestWriteSectorMapAllTypes [GOOD] >> TBlockDeviceTest::WriteReadRestart |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestStateSwitch [GOOD] >> ReadOnlyPDisk::StartReadOnlyUnformattedShouldFail [GOOD] >> ReadOnlyPDisk::StartReadOnlyZeroedShouldFail >> TYardTest::TestIncorrectRequests [GOOD] >> TYardTest::TestLogWriteRead >> AuthDatabaseAdmin::PassOnOwnerMatchUserSid [GOOD] >> AuthDatabaseAdmin::PassOnOwnerMatchUserSidWithGroup [GOOD] >> AuthTokenAllowed::FailOnListAndEmptyToken [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestSendAfterDelay [GOOD] >> ReadOnlyPDisk::StartReadOnlyZeroedShouldFail [GOOD] >> ReadOnlyPDisk::VDiskStartsOnReadOnlyPDisk >> TYardTest::TestLogWriteRead [GOOD] >> TYardTest::TestLogWriteReadMedium >> AddressClassifierTest::TestAddressExtraction [GOOD] >> AddressClassifierTest::TestAddressParsing [GOOD] >> AddressClassifierTest::TestClassfierWithAllIpTypes [GOOD] >> AddressClassifierTest::TestLabeledClassifier [GOOD] >> AddressClassifierTest::TestLabeledClassifierFromNetData [GOOD] >> TBitsTest::TestNaiveClz [GOOD] >> TBTreeTest::Basics [GOOD] >> TBTreeTest::ClearAndReuse |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::FailOnListAndEmptyToken [GOOD] >> TActorTest::TestSendFromAnotherThread [GOOD] >> DiscoveryConverterTest::FullLegacyPath [GOOD] >> DiscoveryConverterTest::FullLegacyNamesWithRootDatabase [GOOD] >> TBTreeTest::ClearAndReuse [GOOD] >> TBTreeTest::SeekForwardPermutationsInplace [GOOD] >> TBTreeTest::SeekForwardPermutationsThreadSafe [GOOD] >> TBTreeTest::SeekBackwardPermutationsInplace >> TBlobStorageSyncNeighborsTest::IterateOverAllDisks [GOOD] >> TBlobStorageSyncNeighborsTest::SerDes [GOOD] >> TBlobStorageSyncNeighborsTest::CheckVDiskIterators [GOOD] >> TCircleBufStringStreamTest::TestAligned [GOOD] >> TCowBTreeTest::SeekForwardPermutationsInplace [GOOD] >> TCowBTreeTest::SeekForwardPermutationsThreadSafe [GOOD] >> TCowBTreeTest::SeekBackwardPermutationsInplace [GOOD] >> TCowBTreeTest::SeekBackwardPermutationsThreadSafe [GOOD] >> TCowBTreeTest::RandomInsertInplace >> TBTreeTest::SeekBackwardPermutationsInplace [GOOD] >> TBTreeTest::SeekBackwardPermutationsThreadSafe [GOOD] >> TBTreeTest::RandomInsertInplace >> ReadOnlyPDisk::VDiskStartsOnReadOnlyPDisk [GOOD] >> ReadOnlyPDisk::ReadOnlyPDiskEvents >> TSubgroupPartLayoutTest::CountEffectiveReplicas1of4 >> TBlobStorageGroupInfoBlobMapTest::BelongsToSubgroupBenchmark >> TPDiskTest::TestPDiskOwnerRecreation [GOOD] >> TPDiskTest::TestPDiskOwnerRecreationWithStableOwner >> TYardTest::TestLogWriteReadMedium [GOOD] >> TYardTest::TestLogWriteReadMediumWithHddSectorMap >> TopicNameConverterForCPTest::CorrectLegacyTopics [GOOD] >> TopicNameConverterForCPTest::CorrectModernTopics [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TCircleBufStringStreamTest::TestAligned [GOOD] |86.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::FullLegacyNamesWithRootDatabase [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestSendFromAnotherThread [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> TBlobStorageGroupInfoIterTest::PerRealmIterator [GOOD] >> TBlobStorageGroupInfoIterTest::WalkFailRealms [GOOD] >> ReadOnlyPDisk::ReadOnlyPDiskEvents [GOOD] >> ShredPDisk::EmptyShred |86.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterForCPTest::CorrectModernTopics [GOOD] |86.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::WalkFailRealms [GOOD] |86.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |86.5%| [TA] $(B)/ydb/core/testlib/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBlobStorageGroupInfoIterTest::IteratorForwardAndBackward [GOOD] >> TBlobStorageGroupInfoIterTest::PerFailDomainRange [GOOD] |86.5%| [TA] {RESULT} $(B)/ydb/core/testlib/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TYardTest::TestLogWriteReadMediumWithHddSectorMap [GOOD] >> TYardTest::TestLogWriteReadLarge |86.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |86.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |86.5%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |86.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp >> TSectorMapPerformance::TestHDD1960GBRead100MBOnLastSector [GOOD] >> TSectorMapPerformance::TestHDD1960GBWrite100MBOnFirstSector >> TBlobStorageGroupInfoIterTest::Domains [GOOD] >> TBlobStorageGroupInfoIterTest::Indexes [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentAutoHeavyContention [GOOD] >> TLogPriorityMuteTests::MuteUntilTest [GOOD] >> TLogPriorityMuteTests::AtomicMuteUntilTest [GOOD] >> TLogPriorityMuteTests::UnmuteTest [GOOD] >> TLogPriorityMuteTests::AtomicUnmuteTest [GOOD] >> TLogPriorityMuteTests::CheckPriorityWithSetMuteTest [GOOD] >> TLogPriorityMuteTests::AtomicCheckPriorityWithSetMuteTest [GOOD] >> TLogPriorityMuteTests::CheckPriorityWithSetMuteDurationTest [GOOD] >> TLogPriorityMuteTests::AtomicCheckPriorityWithSetMuteDurationTest [GOOD] >> TOneOneQueueTests::TestSimpleEnqueueDequeue [GOOD] >> TOneOneQueueTests::CleanInDestructor [GOOD] >> TOneOneQueueTests::ReadIterator [GOOD] >> TPageMapTest::TestResize [GOOD] >> TPageMapTest::TestRandom |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::PerFailDomainRange [GOOD] >> TYardTest::TestLogWriteReadLarge [GOOD] >> TYardTest::TestLogWriteCutEqual >> TSectorMapPerformance::TestHDD1960GBWrite100MBOnFirstSector [GOOD] |86.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a >> TSectorMapPerformance::TestHDD1960GBWrite100MBOnLastSector >> ShredPDisk::EmptyShred [GOOD] >> ShredPDisk::SimpleShred >> TBlobStorageHullDecimal::TestMkRatio [GOOD] >> TBlobStorageHullDecimal::TestMult [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::Indexes [GOOD] |86.6%| [AR] {RESULT} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a >> AuthTokenAllowed::PassOnListMatchUserSid [GOOD] >> AuthTokenAllowed::PassOnListMatchUserSidWithGroup [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithClearWithGc [GOOD] >> TCowBTreeTest::DuplicateKeysInplace >> TBlobStorageGroupInfoBlobMapTest::CheckCorrectBehaviourWithHashOverlow [GOOD] >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper >> DiscoveryConverterTest::DiscoveryConverter [GOOD] >> DiscoveryConverterTest::EmptyModern [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnListMatchUserSidWithGroup [GOOD] |86.6%| [TA] $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageHullDecimal::TestMult [GOOD] >> TPDiskTest::TestChunkWriteRelease [GOOD] >> TPDiskTest::TestLogWriteReadWithRestarts |86.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::EmptyModern [GOOD] >> TBlobStorageGroupInfoTest::GroupQuorumCheckerOrdinary >> THullDsHeapItTest::HeapAppendixTreeForwardIteratorBenchmark >> TSectorMapPerformance::TestHDD1960GBWrite100MBOnLastSector [GOOD] >> TSectorMapPerformance::TestSSD1960GBRead100MBOnFirstSector >> TBlobStorageGroupInfoTest::TestBelongsToSubgroup >> TCowBTreeTest::DuplicateKeysInplace [GOOD] >> TCowBTreeTest::DuplicateKeysThreadSafe >> TPDiskTest::TestPDiskOwnerRecreationWithStableOwner [GOOD] >> TPDiskTest::TestPDiskManyOwnersInitiation |86.6%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |86.6%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |86.6%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp >> ShredPDisk::SimpleShred [GOOD] >> ShredPDisk::SimpleShredRepeat >> TSectorMapPerformance::TestSSD1960GBRead100MBOnFirstSector [GOOD] >> TBlobStorageGroupInfoTest::TestBelongsToSubgroup [GOOD] >> TSectorMapPerformance::TestSSD1960GBWrite100MBOnFirstSector >> TBlobStorageGroupInfoTest::SubgroupPartLayout >> TBlobStorageGroupInfoTest::GroupQuorumCheckerOrdinary [GOOD] >> TBlobStorageGroupInfoTest::GroupQuorumCheckerMirror3dc [GOOD] >> THullDsHeapItTest::HeapAppendixTreeForwardIteratorBenchmark [GOOD] >> THullDsHeapItTest::HeapAppendixTreeBackwardIteratorBenchmark |86.6%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp >> TBTreeTest::RandomInsertInplace [GOOD] >> TBTreeTest::RandomInsertThreadSafe >> TBlobStorageGroupInfoIterTest::IteratorForward [GOOD] >> TBlobStorageGroupInfoIterTest::IteratorBackward [GOOD] >> TSectorMapPerformance::TestSSD1960GBWrite100MBOnFirstSector [GOOD] >> TSectorMapPerformance::TestSSD1960GBRead1000MBOnFirstSector |86.6%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |86.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp >> THullDsHeapItTest::HeapAppendixTreeBackwardIteratorBenchmark [GOOD] |86.6%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |86.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoTest::GroupQuorumCheckerMirror3dc [GOOD] >> TBlobStorageLinearTrackBar::TestLinearTrackBarDouble [GOOD] >> TBlobStorageLinearTrackBar::TestLinearTrackBarWithDecimal [GOOD] >> ReadBatcher::Range [GOOD] >> TopicNameConverterTest::LegacyStyleDoubleName [GOOD] >> TopicNameConverterTest::NoTopicName [GOOD] |86.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a >> NameserviceConfigValidatorTests::TestEmptyConfig [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingId [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingResolveHostPort [GOOD] >> NameserviceConfigValidatorTests::TestEmptyAddresses [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::IteratorBackward [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageLinearTrackBar::TestLinearTrackBarWithDecimal [GOOD] >> TBlockDeviceTest::WriteReadRestart [GOOD] >> TChunkTrackerTest::AddRemove [GOOD] >> TChunkTrackerTest::TwoOwnersInterference [GOOD] >> TChunkTrackerTest::AddOwnerWithWeight [GOOD] >> TChunkTrackerTest::ZeroWeight [GOOD] >> TColorLimitsTest::Colors [GOOD] >> TColorLimitsTest::OwnerFreeSpaceShare [GOOD] >> TLogCache::Simple [GOOD] >> TLogCache::EraseRangeOnEmpty [GOOD] >> TLogCache::EraseRangeOutsideOfData [GOOD] >> TLogCache::EraseRangeSingleMinElement [GOOD] >> TLogCache::EraseRangeSingleMidElement [GOOD] >> TLogCache::EraseRangeSingleMaxElement [GOOD] >> TLogCache::EraseRangeSample [GOOD] >> TLogCache::EraseRangeAllExact [GOOD] >> TLogCache::EraseRangeAllAmple [GOOD] >> ShredPDisk::SimpleShredRepeatAfterPDiskRestart >> TCowBTreeTest::RandomInsertInplace [GOOD] >> TCowBTreeTest::RandomInsertThreadSafe >> THullDsGenericNWayIt::ForwardIteration [GOOD] >> THullDsGenericNWayIt::BackwardIteration [GOOD] >> NameserviceConfigValidatorTests::TestModifyIdForHostPort [GOOD] >> NameserviceConfigValidatorTests::TestModifyIdForResolveHostPort [GOOD] >> NameserviceConfigValidatorTests::TestModifyResolveHost [GOOD] >> NameserviceConfigValidatorTests::TestModifyPort [GOOD] |86.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::NoTopicName [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapAppendixTreeBackwardIteratorBenchmark [GOOD] >> TBlobStorageHullStorageRatio::Test >> TBlobStorageHullStorageRatio::Test [GOOD] >> TBlobStorageKeyBarrierTest::ParseTest [GOOD] >> ShredPDisk::SimpleShredRepeat [GOOD] >> ShredPDisk::SimpleShredDirtyChunks |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestEmptyAddresses [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas3of4 >> TPDiskTest::TestPDiskManyOwnersInitiation [GOOD] >> TPDiskTest::TestVDiskMock >> TCowBTreeTest::DuplicateKeysThreadSafe [GOOD] >> TCowBTreeTest::IteratorDestructor [GOOD] >> TCowBTreeTest::Concurrent |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsGenericNWayIt::BackwardIteration [GOOD] |86.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |86.6%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestModifyPort [GOOD] |86.6%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp >> AuthTokenAllowed::PassOnEmptyListAndEmptyToken [GOOD] >> AuthTokenAllowed::FailOnListMatchGroupSid [GOOD] >> SysViewQueryHistory::TopReadBytesAdd [GOOD] |86.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> ReadBatcher::Range [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageKeyBarrierTest::ParseTest [GOOD] >> SysViewQueryHistory::AddDedupRandom [GOOD] >> NameserviceConfigValidatorTests::TestRemoveTooMany [GOOD] >> ResourceBrokerConfigValidatorTests::TestEmptyConfig [GOOD] >> ResourceBrokerConfigValidatorTests::TestEmptyQueueName >> ResourceBrokerConfigValidatorTests::TestEmptyQueueName [GOOD] >> ResourceBrokerConfigValidatorTests::TestEmptyTaskName [GOOD] >> TRegistryTests::TestLock [GOOD] >> TRegistryTests::TestClasses [GOOD] >> TRegistryTests::TestDisableEnable [GOOD] >> TCowBTreeTest::Concurrent [GOOD] >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::FailOnListMatchGroupSid [GOOD] |86.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::TopReadBytesAdd [GOOD] >> TCowBTreeTest::Alignment [GOOD] |86.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AddDedupRandom [GOOD] >> TPDiskTest::TestVDiskMock [GOOD] >> TPDiskTest::TestRealFile >> TBlobStorageHullCompactDeferredQueueTest::Basic [GOOD] >> DiscoveryConverterTest::MinimalName [GOOD] >> DiscoveryConverterTest::WithLogbrokerPath [GOOD] >> THullDsHeapItTest::HeapLevelSliceForwardIteratorBenchmark |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestEmptyTaskName [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> TRegistryTests::TestDisableEnable [GOOD] >> THullDsHeapItTest::HeapLevelSliceForwardIteratorBenchmark [GOOD] >> THullDsHeapItTest::HeapLevelSliceBackwardIteratorBenchmark ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper [GOOD] Test command err: [0:1:0:3:1]# 173 184 157 167 152 185 195 192 144 [0:1:1:1:1]# 189 195 192 171 157 161 167 155 196 [0:1:3:3:1]# 184 157 182 152 185 157 192 144 189 [0:1:3:4:0]# 148 154 155 158 194 160 156 163 140 [0:1:2:3:2]# 152 177 174 176 154 146 161 170 168 [0:1:1:2:1]# 157 167 152 189 195 192 171 157 161 [0:1:1:0:2]# 158 150 131 167 177 161 177 174 173 [0:1:3:0:1]# 161 155 171 196 154 167 184 157 182 [0:1:0:3:2]# 174 173 152 146 184 176 168 157 161 [0:1:2:2:0]# 163 140 161 148 162 159 168 178 190 [0:1:0:2:0]# 161 156 163 159 196 148 190 162 168 [0:1:3:2:1]# 152 185 157 192 144 189 161 155 171 [0:1:2:3:1]# 157 182 173 185 157 167 144 189 195 [0:1:3:1:2]# 157 161 170 131 190 158 161 178 167 [0:1:2:0:1]# 155 171 157 154 167 155 157 182 173 [0:1:3:0:2]# 131 190 158 161 178 167 173 152 177 [0:1:2:0:2]# 190 158 150 178 167 177 152 177 174 [0:1:2:4:1]# 154 167 155 157 182 173 185 157 167 [0:1:2:1:2]# 161 170 168 190 158 150 178 167 177 [0:1:2:4:2]# 178 167 177 152 177 174 176 154 146 [0:1:0:2:1]# 167 152 185 195 192 144 157 161 155 [0:1:0:0:0]# 190 162 168 174 148 154 177 158 194 [0:1:3:2:0]# 156 163 140 196 148 162 162 168 178 [0:1:1:0:1]# 171 157 161 167 155 196 182 173 184 [0:1:0:2:2]# 146 184 176 168 157 161 150 131 190 [0:1:1:0:0]# 178 190 162 155 174 148 160 177 158 [0:1:2:3:0]# 194 160 177 163 140 161 148 162 159 [0:1:2:4:0]# 154 155 174 194 160 177 163 140 161 [0:1:1:3:2]# 177 174 173 154 146 184 170 168 157 [0:1:2:1:1]# 144 189 195 155 171 157 154 167 155 [0:1:1:1:0]# 162 159 196 178 190 162 155 174 148 [0:1:1:3:1]# 182 173 184 157 167 152 189 195 192 [0:1:3:4:1]# 196 154 167 184 157 182 152 185 157 [0:1:1:4:2]# 167 177 161 177 174 173 154 146 184 [0:1:0:1:0]# 159 196 148 190 162 168 174 148 154 [0:1:3:4:2]# 161 178 167 173 152 177 184 176 154 [0:1:0:0:1]# 157 161 155 155 196 154 173 184 157 [0:1:1:4:0]# 155 174 148 160 177 158 140 161 156 [0:1:2:1:0]# 148 162 159 168 178 190 154 155 174 [0:1:2:0:0]# 168 178 190 154 155 174 194 160 177 [0:1:3:3:2]# 173 152 177 184 176 154 157 161 170 [0:1:0:4:0]# 174 148 154 177 158 194 161 156 163 [0:1:1:2:0]# 140 161 156 162 159 196 178 190 162 [0:1:0:1:1]# 195 192 144 157 161 155 155 196 154 [0:1:3:0:0]# 162 168 178 148 154 155 158 194 160 [0:1:3:1:1]# 192 144 189 161 155 171 196 154 167 [0:1:0:4:1]# 155 196 154 173 184 157 167 152 185 [0:1:2:2:1]# 185 157 167 144 189 195 155 171 157 [0:1:3:1:0]# 196 148 162 162 168 178 148 154 155 [0:1:2:2:2]# 176 154 146 161 170 168 190 158 150 [0:1:0:3:0]# 177 158 194 161 156 163 159 196 148 [0:1:3:3:0]# 158 194 160 156 163 140 196 148 162 [0:1:0:1:2]# 168 157 161 150 131 190 177 161 178 [0:1:3:2:2]# 184 176 154 157 161 170 131 190 158 [0:1:1:3:0]# 160 177 158 140 161 156 162 159 196 [0:1:1:2:2]# 154 146 184 170 168 157 158 150 131 [0:1:1:4:1]# 167 155 196 182 173 184 157 167 152 [0:1:1:1:2]# 170 168 157 158 150 131 167 177 161 [0:1:0:0:2]# 150 131 190 177 161 178 174 173 152 [0:1:0:4:2]# 177 161 178 174 173 152 146 184 176 mean# 166.6666667 dev# 15.11254078 |86.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |86.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::WithLogbrokerPath [GOOD] |86.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp >> ShredPDisk::SimpleShredDirtyChunks [GOOD] >> ShredPDisk::KillVDiskWhilePreShredding >> THullDsHeapItTest::HeapLevelSliceBackwardIteratorBenchmark [GOOD] >> ShredPDisk::SimpleShredRepeatAfterPDiskRestart [GOOD] >> BootstrapTabletsValidatorTests::TestNoNodeForTablet [GOOD] >> BootstrapTabletsValidatorTests::TestRequiredTablet [GOOD] >> BootstrapTabletsValidatorTests::TestImportantTablet [GOOD] >> BootstrapTabletsValidatorTests::TestCompactionBroker [GOOD] >> TPDiskTest::TestLogWriteReadWithRestarts [GOOD] >> TPDiskTest::TestLogSpliceNonceJump |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TBlobStorageBarriersTreeTest::Tree [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> BootstrapTabletsValidatorTests::TestCompactionBroker [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapLevelSliceBackwardIteratorBenchmark [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TBlobStorageBarriersTreeTest::Tree [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> ShredPDisk::SimpleShredRepeatAfterPDiskRestart [GOOD] Test command err: GREEN 0.5025125628 0 CYAN 0.8623115578 0.862 LIGHT_YELLOW 0.8934673367 0.893 YELLOW 0.9145728643 0.914 LIGHT_ORANGE 0.9306532663 0.93 PRE_ORANGE 0.9467336683 0.946 ORANGE 0.9668341709 0.966 RED 0.9879396985 0.987 BLACK 0.9979899497 0.997 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:390 >> ResourceBrokerConfigValidatorTests::TestRepeatedTaskName [GOOD] >> ResourceBrokerConfigValidatorTests::TestUnknownQueue [GOOD] >> ResourceBrokerConfigValidatorTests::TestUnlimitedResource [GOOD] >> ResourceBrokerConfigValidatorTests::TestUnusedQueue [GOOD] >> NameserviceConfigValidatorTests::TestLongWalleDC [GOOD] >> NameserviceConfigValidatorTests::TestModifyClusterUUID [GOOD] >> NameserviceConfigValidatorTests::TestModifyIdForAddrPort [GOOD] >> NameserviceConfigValidatorTests::TestModifyHost [GOOD] >> TBlobStorageBarriersTreeTest::MemViewSnapshots [GOOD] >> TBlobStorageIngress::IngressPartsWeMustHaveLocally [GOOD] >> TBlobStorageIngress::IngressLocalParts [GOOD] >> TBlobStorageIngress::IngressPrintDistribution [GOOD] |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed |86.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.6%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed >> TRegistryTests::TestAddGet [GOOD] >> TRegistryTests::TestCheckConfig [GOOD] >> ResourceBrokerConfigValidatorTests::TestZeroQueueWeight [GOOD] >> ResourceBrokerConfigValidatorTests::TestZeroDefaultDuration [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |86.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |86.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TBlobStorageHullCompactDeferredQueueTest::Basic [GOOD] Test command err: STEP 1 STEP 2 StringToId# 63 numItems# 110271 >> TBTreeTest::RandomInsertThreadSafe [GOOD] >> TBTreeTest::DuplicateKeysInplace |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::IngressPrintDistribution [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestModifyHost [GOOD] |86.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a >> TQueueBackpressureTest::PerfInFlight |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TBlobStorageBarriersTreeTest::MemViewSnapshots [GOOD] >> TPDiskTest::TestRealFile [GOOD] >> TPDiskTest::TestSIGSEGVInTUndelivered >> TQueueBackpressureTest::CreateDelete [GOOD] >> TPDiskTest::TestLogSpliceNonceJump [GOOD] >> TPDiskTest::TestMultipleLogSpliceNonceJump |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestUnusedQueue [GOOD] |86.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client >> TQueueBackpressureTest::IncorrectMessageId [GOOD] |86.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestZeroDefaultDuration [GOOD] >> ShredPDisk::KillVDiskWhilePreShredding [GOOD] >> ShredPDisk::KillVDiskWhileShredding |86.7%| [TA] $(B)/ydb/core/blobstorage/vdisk/ingress/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TCowBTreeTest::Alignment [GOOD] Test command err: Producer 0 worked for 0.2373503846 seconds Producer 1 worked for 0.3334201802 seconds Consumer 0 worked for 0.3707975757 seconds on a snapshot of size 20000 Consumer 1 worked for 0.5733982912 seconds on a snapshot of size 40000 Consumer 2 worked for 0.5556634659 seconds on a snapshot of size 60000 Consumer 3 worked for 0.928072037 seconds on a snapshot of size 80000 Consumers had 1199943 successful seeks |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TPDiskTest::TestSIGSEGVInTUndelivered [GOOD] >> TPDiskTest::TestPDiskOnDifferentKeys >> TSectorMapPerformance::TestSSD1960GBRead1000MBOnFirstSector [GOOD] >> TSectorMapPerformance::TestSSD1960GBWrite1000MBOnFirstSector |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::CreateDelete [GOOD] |86.7%| [TA] $(B)/ydb/core/blobstorage/vdisk/hullop/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::IncorrectMessageId [GOOD] >> TBTreeTest::DuplicateKeysInplace [GOOD] >> TBTreeTest::DuplicateKeysThreadSafe |86.7%| [TA] $(B)/ydb/core/cms/console/validators/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPDiskTest::TestPDiskOnDifferentKeys [GOOD] >> TPDiskTest::WrongPDiskKey |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group |86.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBlobStorageQueueTest::TMessageLost [GOOD] |86.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group |86.7%| [TA] {RESULT} $(B)/ydb/core/cms/console/validators/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TQueueBackpressureTest::PerfTrivial >> TPDiskTest::WrongPDiskKey [GOOD] >> TPDiskTest::TestStartEncryptedOrPlainAndRestart |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |86.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 >> TBTreeTest::DuplicateKeysThreadSafe [GOOD] >> TBTreeTest::ShouldCallDtorsInplace |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TBlobStorageQueueTest::TMessageLost [GOOD] |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut >> TBTreeTest::ShouldCallDtorsInplace [GOOD] >> TBTreeTest::ShouldCallDtorsThreadSafe [GOOD] >> TBTreeTest::Concurrent |86.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TBlobStorageHullDecimal::TestRoundToInt [GOOD] >> TBlobStorageHullDecimal::TestToUi64 [GOOD] |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |86.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 >> TPDiskTest::TestMultipleLogSpliceNonceJump [GOOD] >> TPDiskTest::TestFakeErrorPDiskManyLogWrite |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |86.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |86.7%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a >> TPDiskRaces::KillOwnerWhileDeletingChunk [GOOD] >> TPDiskRaces::KillOwnerWhileDeletingChunkWithInflight >> THullDsHeapItTest::HeapForwardIteratorAllEntities >> TopicNameConverterTest::LegacyStyle [GOOD] >> TopicNameConverterTest::FirstClass [GOOD] >> TBTreeTest::Concurrent [GOOD] >> TBTreeTest::IteratorDestructor [GOOD] >> TCacheCacheTest::MoveToWarm [GOOD] >> TCacheCacheTest::EvictNext [GOOD] >> CompressionTest::lz4_generator_basic [GOOD] >> CompressionTest::lz4_generator_deflates >> THullDsHeapItTest::HeapForwardIteratorAllEntities [GOOD] >> THullDsHeapItTest::HeapBackwardIteratorAllEntities |86.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a |86.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a >> CompressionTest::lz4_generator_deflates [GOOD] >> StLog::Basic [GOOD] >> THullDsHeapItTest::HeapBackwardIteratorAllEntities [GOOD] |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageHullDecimal::TestToUi64 [GOOD] |86.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a >> TSectorMapPerformance::TestSSD1960GBWrite1000MBOnFirstSector [GOOD] |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TopicNameConverterForCPTest::BadLegacyTopics [GOOD] >> TopicNameConverterForCPTest::BadModernTopics [GOOD] |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |86.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk >> ShredPDisk::KillVDiskWhileShredding [GOOD] >> ShredPDisk::InitVDiskAfterShredding |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapBackwardIteratorAllEntities [GOOD] >> TYardTest::TestChunkWriteReadMultiple [GOOD] >> TYardTest::TestChunkWriteReadMultipleWithHddSectorMap >> TBlobStorageHullFresh::SimpleForward [GOOD] >> TBlobStorageHullFresh::SimpleBackwardMiddle [GOOD] |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut |86.8%| [LD] {RESULT} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut |86.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut |86.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::FirstClass [GOOD] |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group |86.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TQueueBackpressureTest::PerfTrivial [GOOD] >> SysViewQueryHistory::AggrMerge [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TSectorMapPerformance::TestSSD1960GBWrite1000MBOnFirstSector [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::SimpleBackwardMiddle [GOOD] |86.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterForCPTest::BadModernTopics [GOOD] |86.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |86.8%| [LD] {RESULT} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |86.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |86.8%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut >> TBlobStorageDiskBlob::CreateFromDistinctParts |86.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |86.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.8%| [LD] {RESULT} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> TBlobStorageDiskBlob::CreateFromDistinctParts [GOOD] >> TBlobStorageDiskBlob::CreateIterate [GOOD] >> TPDiskTest::TestFakeErrorPDiskManyLogWrite [GOOD] >> TPDiskTest::TestFakeErrorPDiskLogRead |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::PerfTrivial [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AggrMerge [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> StLog::Basic [GOOD] Test command err: Producer 0 worked for 0.193677885 seconds Producer 1 worked for 0.3803629492 seconds Consumer 0 worked for 1.074749839 seconds Consumer 1 worked for 0.7455534986 seconds Consumer 2 worked for 0.8963103913 seconds Consumer 3 worked for 1.165834555 seconds |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageDiskBlob::CreateIterate [GOOD] |86.8%| [TA] $(B)/ydb/library/persqueue/topic_parser/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> TCowBTreeTest::RandomInsertThreadSafe [GOOD] >> TCowBTreeTest::SnapshotCascade [GOOD] >> TCowBTreeTest::SnapshotRollback |86.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |86.8%| [LD] {RESULT} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |86.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |86.8%| [TA] {RESULT} $(B)/ydb/library/persqueue/topic_parser/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> ShredPDisk::InitVDiskAfterShredding [GOOD] >> ShredPDisk::ReinitVDiskWhilePreShredding |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> TPDiskTest::TestFakeErrorPDiskLogRead [GOOD] >> TPDiskTest::TestFakeErrorPDiskSysLogRead |86.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |86.8%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |86.8%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest |86.8%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPDiskTest::TestFakeErrorPDiskSysLogRead [GOOD] >> TPDiskTest::TestFakeErrorPDiskManyChunkRead >> TIncrHugeBlobIdDict::Basic [GOOD] >> TBsVDiskOutOfSpace::WriteUntilOrangeZone [GOOD] >> TBsVDiskOutOfSpace::WriteUntilYellowZone >> TIncrHugeBasicTest::WriteReadDeleteEnum [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest |86.8%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizeMinusOne [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBlobIdDict::Basic [GOOD] >> TIncrHugeBasicTest::Recovery [GOOD] >> AuthTokenAllowed::FailOnListAndTokenWithEmptyUserSid [GOOD] >> AuthTokenAllowed::FailOnListAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthTokenAllowed::FailOnListAndNoToken [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::WriteReadDeleteEnum [GOOD] >> TBsVDiskBadBlobId::PutBlobWithBadId >> TBsVDiskRepl1::ReplProxyKeepBits >> VDiskTest::HugeBlobWrite >> TLogoBlobTest::LogoBlobSort [GOOD] >> TMemoryStatsAggregator::Aggregate_Empty [GOOD] >> TMemoryStatsAggregator::Aggregate_Single [GOOD] >> TMemoryStatsAggregator::Aggregate_ExternalConsumption_CollidingHosts [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas1of4 [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas2of4 |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::FailOnListAndNoToken [GOOD] >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame1 >> ShredPDisk::ReinitVDiskWhilePreShredding [GOOD] >> ShredPDisk::ReinitVDiskWhileShredding |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::Recovery [GOOD] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizeMinusOne [GOOD] >> TBsDbStat::ChaoticParallelWrite_DbStat ------- [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TMemoryStatsAggregator::Aggregate_ExternalConsumption_CollidingHosts [GOOD] Test command err: AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 65 MemAvailable: 85 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 145 SoftLimit: 165 TargetUtilization: 185 ExternalConsumption: 194 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 >> TBsVDiskManyPutGet::ManyPutGetWaitCompaction >> TYardTest::TestChunkReadRandomOffset [GOOD] >> TYardTest::TestChunkContinuity2 >> TQueueBackpressureTest::PerfInFlight [GOOD] |86.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp >> TPageMapTest::TestRandom [GOOD] >> TPageMapTest::TestIntrusive [GOOD] >> TPageMapTest::TestSimplePointer [GOOD] >> TPageMapTest::TestSharedPointer [GOOD] >> TPageMapTest::TestSimplePointerFull >> TBsLocalRecovery::StartStopNotEmptyDB |86.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp >> TPageMapTest::TestSimplePointerFull [GOOD] >> TPriorityOperationQueueTest::ShouldNotStartUntilStart [GOOD] >> TBsLocalRecovery::WriteRestartReadHuge >> TYardTest::TestChunkContinuity2 [GOOD] >> TYardTest::TestChunkContinuity3000 >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardFresh >> TCowBTreeTest::SnapshotRollback [GOOD] >> TCowBTreeTest::SnapshotRollbackEarlyErase >> TSubgroupPartLayoutTest::CountEffectiveReplicas2of4 [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardFresh |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::PerfInFlight [GOOD] |86.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/combinatory/compaction.cpp |86.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/combinatory/compaction.cpp >> TBsVDiskExtreme::Simple3Put3GetFresh >> TBsVDiskBadBlobId::PutBlobWithBadId [GOOD] >> TBsVDiskBrokenPDisk::WriteUntilDeviceDeath >> TBsVDiskExtreme::SimpleGetFromEmptyDB |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/blobsan/blobsan >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame1 [GOOD] >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame2 |86.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp >> TBsVDiskGC::GCPutKeepIntoEmptyDB |86.9%| [LD] {RESULT} $(B)/ydb/tools/blobsan/blobsan >> TYardTest::TestChunkContinuity3000 [GOOD] >> TYardTest::TestChunkContinuity9000 >> TBsVDiskExtremeHandoffHuge::SimpleHndPut1SeqGetFresh |86.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp >> TBsVDiskRange::Simple3PutRangeGetNothingForwardFresh |86.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TSubgroupPartLayoutTest::CountEffectiveReplicas2of4 [GOOD] Test command err: testing erasure none main# 0 main# 1 Checked 2 cases, took 19628 us testing erasure block-4-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 main# 32 main# 33 main# 34 main# 35 main# 36 main# 37 main# 38 main# 39 main# 40 main# 41 main# 42 main# 43 main# 44 main# 45 main# 46 main# 47 main# 48 main# 49 main# 50 main# 51 main# 52 main# 53 main# 54 main# 55 main# 56 main# 57 main# 58 main# 59 main# 60 main# 61 main# 62 main# 63 Checked 262144 cases, took 2190279 us testing erasure mirror-3-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 Checked 512 cases, took 128 us testing erasure block-2-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 4096 cases, took 242063 us testing erasure mirror-3 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 Checked 64 cases, took 22 us testing erasure block-3-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 32768 cases, took 1837143 us testing erasure stripe-2-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 4096 cases, took 561158 us |86.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |86.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |86.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut >> TYardTest::TestChunkContinuity9000 [GOOD] >> TYardTest::TestChunkLock >> TBsVDiskRange::Simple3PutRangeGetAllForwardFresh |86.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |86.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp >> TBsVDiskRepl3::SyncLogTest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TPriorityOperationQueueTest::ShouldNotStartUntilStart [GOOD] |86.9%| [TA] $(B)/ydb/core/blobstorage/backpressure/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsVDiskGC::TGCManyVPutsDelTabletTest >> ShredPDisk::ReinitVDiskWhileShredding [GOOD] >> ShredPDisk::RetryPreShredCompactError >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardFresh >> TYardTest::TestChunkLock [GOOD] >> TYardTest::TestChunkUnlock |86.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/backpressure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |86.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |86.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut >> TYardTest::TestChunkUnlock [GOOD] >> TYardTest::TestChunkUnlockHarakiri >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardCompaction >> TBsVDiskRepl1::ReplProxyKeepBits [GOOD] >> TBsVDiskRepl2::ReplEraseDiskRestoreWOOneDisk >> TBsVDiskManyPutGet::ManyPutGetWaitCompaction [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGetFreshIndexOnly >> TIncrHugeBasicTest::Defrag >> TCowBTreeTest::SnapshotRollbackEarlyErase [GOOD] >> TCowBTreeTest::ShouldCallDtorsInplace >> TCowBTreeTest::ShouldCallDtorsInplace [GOOD] >> TCowBTreeTest::ShouldCallDtorsThreadSafe |86.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp >> TYardTest::TestChunkUnlockHarakiri [GOOD] >> TYardTest::TestChunkReserve >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardCompaction |86.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_reader/contexts.h_serialized.cpp >> TBsVDiskExtreme::Simple3Put3GetFresh [GOOD] >> TBsVDiskExtreme::Simple3Put3GetCompaction >> TCowBTreeTest::ShouldCallDtorsThreadSafe [GOOD] >> TEventPriorityQueueTest::TestPriority [GOOD] >> TFastTlsTest::IterationAfterThreadDeath >> TBsVDiskExtreme::SimpleGetFromEmptyDB [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetFresh >> TFastTlsTest::IterationAfterThreadDeath [GOOD] >> TFastTlsTest::ManyThreadLocals [GOOD] >> TFastTlsTest::ManyConcurrentKeys >> SysViewQueryHistory::StableMerge2 [GOOD] >> TYardTest::TestChunkReserve [GOOD] >> TYardTest::TestCheckSpace >> TPDiskTest::TestFakeErrorPDiskManyChunkRead [GOOD] >> TPDiskTest::TestFakeErrorPDiskManyChunkWrite >> TBsVDiskBrokenPDisk::WriteUntilDeviceDeath [GOOD] >> TBsVDiskDefrag::DefragEmptyDB >> TBsVDiskExtremeHandoffHuge::SimpleHndPut1SeqGetFresh [GOOD] >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetFresh |86.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |86.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_reader/contexts.h_serialized.cpp |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TBsVDiskRange::Simple3PutRangeGetNothingForwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingForwardCompaction >> ShredPDisk::RetryPreShredCompactError [GOOD] >> ShredPDisk::RetryShredError >> Path::Name_EnglishAlphabet [GOOD] >> Path::Name_RussianAlphabet [GOOD] >> Path::Name_RussianAlphabet_SetLocale_C [GOOD] >> Path::Name_RussianAlphabet_SetLocale_C_UTF8 [GOOD] >> Path::Name_ExtraSymbols [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGetAllFresh >> TBsVDiskGC::GCPutKeepIntoEmptyDB [GOOD] >> TBsVDiskGC::GCPutBarrierVDisk0NoSync >> SysViewQueryHistory::AggrMergeDedup [GOOD] >> TFastTlsTest::ManyConcurrentKeys [GOOD] >> TFifoQueueTest::ShouldPushPop [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead2 [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead3 [GOOD] >> TFragmentedBufferTest::Test3WriteRead [GOOD] >> TFragmentedBufferTest::Test5WriteRead [GOOD] >> TFragmentedBufferTest::TestGetMonolith [GOOD] >> TFragmentedBufferTest::CopyFrom [GOOD] >> TFragmentedBufferTest::ReadWriteRandom |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |86.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit >> TBsVDiskRange::Simple3PutRangeGetAllForwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllForwardCompaction |86.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |86.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a >> TYardTest::TestCheckSpace [GOOD] >> TYardTest::TestBootingState |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |86.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut >> TBsVDiskRepl3::SyncLogTest [GOOD] >> THugeMigration::ExtendMap_HugeBlobs >> SemiSortedDeltaAndVarLengthCodec::Random32 |86.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardCompaction >> SemiSortedDeltaAndVarLengthCodec::Random32 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::Random64 |86.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tools/blobsan/blobsan |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::StableMerge2 [GOOD] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> Path::Name_ExtraSymbols [GOOD] >> SemiSortedDeltaAndVarLengthCodec::Random64 [GOOD] >> SemiSortedDeltaCodec::BasicTest32 [GOOD] >> SemiSortedDeltaCodec::BasicTest64 [GOOD] >> TIncrHugeBasicTest::WriteReadDeleteEnumRecover [GOOD] |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |86.9%| [LD] {RESULT} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AggrMergeDedup [GOOD] >> TPDiskTest::TestFakeErrorPDiskManyChunkWrite [GOOD] >> TPDiskTest::PDiskRestart >> TBsVDiskGC::TGCManyVPutsDelTabletTest [GOOD] >> TBsVDiskManyPutGet::ManyPutGet >> RunLengthCodec::Random32 |86.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a |86.9%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a >> VarLengthIntCodec::BasicTest64 [GOOD] >> VarLengthIntCodec::Random32 >> RunLengthCodec::Random32 [GOOD] >> RunLengthCodec::Random64 >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame2 [GOOD] >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame3 |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::WriteReadDeleteEnumRecover [GOOD] >> TPDiskTest::PDiskRestart [GOOD] >> RunLengthCodec::Random64 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::BasicTest32 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::BasicTest64 [GOOD] >> TPDiskTest::PDiskRestartManyLogWrites >> VarLengthIntCodec::Random32 [GOOD] >> VarLengthIntCodec::Random64 |87.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a >> VarLengthIntCodec::Random64 [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaCodec::BasicTest64 [GOOD] >> ShredPDisk::RetryShredError [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardCompaction [GOOD] >> TBsVDiskRepl1::ReplProxyData >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllFresh >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetFresh [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetCompaction >> TBlobStorageSyncLogKeeper::CutLog_EntryPointNewFormat [GOOD] >> TBlobStorageSyncLogMem::EmptyMemRecLog [GOOD] >> TBlobStorageSyncLogMem::FilledIn1 [GOOD] >> TBlobStorageSyncLogMem::EmptyMemRecLogPutAfterSnapshot [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGetFreshIndexOnly [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGetCompactionIndexOnly >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardFresh >> TBsVDiskExtreme::Simple3Put3GetCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsOkFresh >> TPDiskTest::PDiskRestartManyLogWrites [GOOD] >> TPDiskTest::TestLogSpliceChunkReserve >> TBlobStorageSyncLogDsk::SeveralChunks [GOOD] >> TBlobStorageSyncLogDsk::OverlappingPages_OnePageIndexed [GOOD] >> TBlobStorageSyncLogDsk::OverlappingPages_SeveralPagesIndexed [GOOD] >> TBlobStorageSyncLogDsk::TrimLog [GOOD] >> TBsVDiskDefrag::DefragEmptyDB [GOOD] >> TBsVDiskDefrag::Defrag50PercentGarbage |87.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |87.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a >> TBsVDiskExtreme::Simple3Put1SeqGetAllFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGetAllCompaction >> TBsVDiskExtremeHuge::Simple3Put3GetFresh |87.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a >> TBsVDiskRange::Simple3PutRangeGetAllForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardCompaction |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaAndVarLengthCodec::BasicTest64 [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardFresh >> TBsVDiskGC::GCPutBarrierVDisk0NoSync [GOOD] >> TBsVDiskGC::GCPutBarrierSync |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> VarLengthIntCodec::Random64 [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardFresh |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> ShredPDisk::RetryShredError [GOOD] Test command err: /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:390 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:390 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:390 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:390 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:390 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:390 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:390 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:390 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:390 |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogDsk::TrimLog [GOOD] >> TBlobStorageReplRecoveryMachine::BasicFunctionality >> TBlobStorageSyncLogData::SerializeParseEmpty1_Proto [GOOD] >> TBlobStorageSyncLogData::SerializeParseEmpty2_Proto [GOOD] >> SemiSortedDeltaCodec::Random32 |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogMem::EmptyMemRecLogPutAfterSnapshot [GOOD] |87.0%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp >> SemiSortedDeltaCodec::Random32 [GOOD] >> SemiSortedDeltaCodec::Random64 >> TYardTest::TestBootingState [GOOD] >> TYardTest::Test3AsyncLog >> SemiSortedDeltaCodec::Random64 [GOOD] |87.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp >> TFragmentedBufferTest::ReadWriteRandom [GOOD] >> TBsVDiskRepl1::ReplProxyData [GOOD] >> TBsVDiskRepl1::ReplEraseDiskRestore >> CodecsTest::Basic [GOOD] >> CodecsTest::NaturalNumbersAndZero >> TBsVDiskExtreme::Simple3Put1SeqSubsOkFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsOkCompaction >> CodecsTest::NaturalNumbersAndZero [GOOD] >> CodecsTest::LargeAndRepeated [GOOD] >> NaiveFragmentWriterTest::Basic [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllCompaction >> TBlobStorageSyncLogDsk::AddByOne [GOOD] >> TBlobStorageSyncLogDsk::AddFive [GOOD] >> TBlobStorageSyncLogDsk::ComplicatedSerializeWithOverlapping [GOOD] >> TBlobStorageSyncLogDsk::DeleteChunks [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardCompaction |87.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp >> TBlobStorageReplRecoveryMachine::BasicFunctionality [GOOD] >> THugeMigration::ExtendMap_HugeBlobs [GOOD] >> THugeMigration::ExtendMap_SmallBlobsBecameHuge >> TBsVDiskExtremeHuge::Simple3Put3GetFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put3GetCompaction |87.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetCompaction [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetFresh >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardCompaction >> TYardTest::Test3AsyncLog [GOOD] >> TYardTest::TestChunkRecommit |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> NaiveFragmentWriterTest::Basic [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaCodec::Random64 [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGetAllCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGet2Fresh >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame3 [GOOD] >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame4 [GOOD] >> TBlobStorageHullHugeChain::HeapAllocSmall [GOOD] >> TBlobStorageHullHugeHeap::AllocateAllFromOneChunk [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndNoToken [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndInvalidTokenSerialized [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardFresh |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogDsk::DeleteChunks [GOOD] >> NaiveFragmentWriterTest::Long >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardCompaction >> TYardTest::TestChunkRecommit [GOOD] >> TYardTest::TestChunkRestartRecommit >> TBsVDiskManyPutGet::ManyPutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiSinglePutGet >> THugeHeapCtxTests::Basic [GOOD] >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizePlusOne [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBlobStorageReplRecoveryMachine::BasicFunctionality [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::AllocateAllFromOneChunk [GOOD] >> TPDiskTest::TestLogSpliceChunkReserve [GOOD] >> TPDiskTest::SpaceColor [GOOD] >> TPDiskTest::RecreateWithInvalidPDiskKey >> TBlobStorageHullHugeHeap::AllocateAllReleaseAll [GOOD] >> TBlobStorageHullHugeHeap::AllocateAllSerializeDeserializeReleaseAll [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TFragmentedBufferTest::ReadWriteRandom [GOOD] >> TBlobStorageSyncLogMem::FilledIn1PutAfterSnapshot >> TBlobStorageSyncLogMem::FilledIn1PutAfterSnapshot [GOOD] >> TBlobStorageSyncLogMem::ManyLogoBlobsPerf |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnEmptyListAndInvalidTokenSerialized [GOOD] >> NaiveFragmentWriterTest::Long [GOOD] >> ReorderCodecTest::Basic |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> THugeHeapCtxTests::Basic [GOOD] >> ReorderCodecTest::Basic [GOOD] >> RunLengthCodec::BasicTest32 [GOOD] >> RunLengthCodec::BasicTest64 [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame4 [GOOD] >> TPDiskTest::RecreateWithInvalidPDiskKey [GOOD] >> TPDiskTest::SmallDisk10Gb >> TBlobStorageBlocksCacheTest::PutDeepIntoPast >> TBsVDiskManyPutGet::ManyPutRangeGetCompactionIndexOnly [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGet2ChannelsIndexOnly >> TYardTest::TestChunkRestartRecommit [GOOD] >> TYardTest::TestChunkDelete >> TopTest::Test2 [GOOD] >> TBlobStorageBlocksCacheTest::PutDeepIntoPast [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetFresh [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetCompaction >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Fresh >> TBsVDiskGC::GCPutBarrierSync [GOOD] >> TBsVDiskGC::GCPutKeepBarrierSync |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizePlusOne [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsOkCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorFresh >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::AllocateAllSerializeDeserializeReleaseAll [GOOD] >> TPDiskRaces::KillOwnerWhileDeletingChunkWithInflight [GOOD] >> TPDiskRaces::KillOwnerWhileDeletingChunkWithInflightMock >> TBsVDiskExtremeHuge::Simple3Put3GetCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkFresh |87.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |87.0%| [AR] {RESULT} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a >> TBsVDiskExtreme::Simple3Put1SeqGet2Fresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGet2Compaction >> TBlobStorageHullHugeHeap::RecoveryMode [GOOD] >> TBlobStorageHullHugeHeap::BorderValues [GOOD] >> TPDiskTest::SmallDisk10Gb [GOOD] >> TPDiskTest::SuprisinglySmallDisk >> TYardTest::TestChunkWriteReadMultipleWithHddSectorMap [GOOD] >> TYardTest::TestChunkWriteReadWhole >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TopTest::Test2 [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh >> TYardTest::TestChunkDelete [GOOD] >> TYardTest::TestChunkForget |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> RunLengthCodec::BasicTest64 [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::PutDeepIntoPast [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh >> TBlobStorageHullHugeHeap::WriteRestore >> TBlobStorageHullHugeHeap::WriteRestore [GOOD] >> TBlobStorageHullHugeKeeperPersState::SerializeParse |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dqrun/dqrun |87.0%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dqrun/dqrun |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dqrun/dqrun >> TBlobStorageHullHugeKeeperPersState::SerializeParse [GOOD] >> TYardTest::TestChunkForget [GOOD] >> TYardTest::Test3HugeAsyncLog >> TBlobStorageHullHugeChain::AllocFreeAllocTest [GOOD] >> TBlobStorageHullHugeChain::AllocFreeRestartAllocTest >> TBlobStorageHullHugeChain::AllocFreeRestartAllocTest [GOOD] >> TBlobStorageBlocksCacheTest::PutIntoPast [GOOD] |87.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::BorderValues [GOOD] >> THugeMigration::ExtendMap_SmallBlobsBecameHuge [GOOD] >> THugeMigration::RollbackMap_HugeBlobs >> TPDiskTest::SuprisinglySmallDisk [GOOD] >> TPDiskTest::PDiskSlotSizeInUnits >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeKeeperPersState::SerializeParse [GOOD] >> TYardTest::TestChunkWriteReadWhole [GOOD] >> TYardTest::TestChunkWriteReadWholeWithHddSectorMap >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetCompaction [GOOD] >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Fresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeChain::AllocFreeRestartAllocTest [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::PutIntoPast [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkCompaction >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |87.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp >> TBsVDiskManyPutGet::ManyMultiSinglePutGet [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] >> TBsVDiskManyPutGet::ManyMultiPutGet >> TBlobStorageBlocksCacheTest::DeepInFlight [GOOD] >> TBlobStorageBlocksCacheTest::MultipleTables [GOOD] |87.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBsVDiskExtreme::Simple3Put1SeqGet2Compaction [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartFresh >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |87.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/libydb-core-persqueue.a >> HullReplWriteSst::Basic |87.0%| [AR] {RESULT} $(B)/ydb/core/persqueue/libydb-core-persqueue.a >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSize [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TYardTest::Test3HugeAsyncLog [GOOD] >> TYardTest::TestChunkFlushReboot |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::DeepInFlight [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::MultipleTables [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TYardTest::TestChunkWriteReadWholeWithHddSectorMap [GOOD] >> TYardTest::TestChunkWrite20Read02 >> TBsLocalRecovery::WriteRestartReadHuge [GOOD] >> TBsLocalRecovery::WriteRestartReadHugeIncreased >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction [GOOD] >> TYardTest::TestChunkWrite20Read02 [GOOD] >> TYardTest::TestChunkUnlockRestart >> TYardTest::TestChunkFlushReboot [GOOD] >> TYardTest::TestAllocateAllChunks |87.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh [GOOD] >> TLogoBlobIdHashTest::SimpleTest [GOOD] >> TLogoBlobIdHashTest::SimpleTestPartIdDoesNotMatter [GOOD] >> TLocalDbTest::BackupTaskNameChangedAtLoadTime [GOOD] >> TLogoBlobIdHashTest::SimpleTestBlobSizeDoesNotMatter [GOOD] >> TGuardianImpl::FollowerTrackerDuplicates [GOOD] >> TChainLayoutBuilder::TestProdConf [GOOD] >> TChainLayoutBuilder::TestMilestoneId [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSize [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TYardTest::TestChunkUnlockRestart [GOOD] >> TYardTest::TestHttpInfo |87.1%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp >> TBsVDiskManyPutGet::ManyPutRangeGet2ChannelsIndexOnly [GOOD] >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize >> TYardTest::TestAllocateAllChunks [GOOD] >> TYardTest::TestChunkDeletionWhileWriting >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorFresh >> TBlobStorageHullHugeChain::HeapAllocLargeStandard [GOOD] >> TBlobStorageHullHugeChain::HeapAllocLargeNonStandard [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction [GOOD] >> TopTest::Test1 [GOOD] >> TYardTest::TestHttpInfo [GOOD] >> TYardTest::TestHttpInfoFileDoesntExist >> TBsVDiskExtreme::Simple3Put1GetMissingPartFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |87.1%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |87.1%| [TA] $(B)/ydb/core/util/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBlobStorageSyncLogMem::ManyLogoBlobsPerf [GOOD] >> TBlobStorageSyncLogMem::ManyLogoBlobsBuildSwapSnapshot |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TGuardianImpl::FollowerTrackerDuplicates [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthTokenAllowed::PassOnListMatchGroupSid [GOOD] >> TYardTest::TestHttpInfoFileDoesntExist [GOOD] >> TYardTest::TestFirstRecordToKeep |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction [GOOD] >> TBlobStorageSyncLogMem::ManyLogoBlobsBuildSwapSnapshot [GOOD] >> VarLengthIntCodec::BasicTest32 [GOOD] >> THugeMigration::RollbackMap_HugeBlobs [GOOD] >> TMonitoring::ReregisterTest >> TBsVDiskGC::GCPutKeepBarrierSync [GOOD] >> TBsVDiskGC::GCPutManyBarriersNoSync >> TLogoBlobIdHashTest::SimpleTestWithDifferentTabletId [GOOD] >> TLogoBlobIdHashTest::SimpleTestWithDifferentSteps [GOOD] >> TLogoBlobIdHashTest::SimpleTestWithDifferentChannel [GOOD] >> TLogoBlobTest::LogoBlobParse [GOOD] >> TLogoBlobTest::LogoBlobCompare [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TChainLayoutBuilder::TestMilestoneId [GOOD] |87.1%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |87.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp >> TYardTest::TestChunkDeletionWhileWriting [GOOD] >> TYardTest::TestChunkPriorityBlock |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeChain::HeapAllocLargeNonStandard [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TopTest::Test1 [GOOD] |87.1%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |87.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp >> TMonitoring::ReregisterTest [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndEmptyToken [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndNoToken [GOOD] >> TBlobStorageBlocksCacheTest::Repeat >> TMemoryStatsAggregator::Aggregate_Summarize_ExternalConsumption_DifferentHosts [GOOD] >> TMemoryStatsAggregator::Aggregate_Summarize_NoExternalConsumption_DifferentHosts [GOOD] >> TMemoryStatsAggregator::Aggregate_Summarize_ExternalConsumption_OneHost [GOOD] >> TMemoryStatsAggregator::Aggregate_Summarize_NoExternalConsumption_OneHost [GOOD] >> TBlobStorageBlocksCacheTest::Repeat [GOOD] >> Path::CanonizeOld [GOOD] >> Path::CanonizeFast [GOOD] >> Path::CanonizedStringIsSame1 [GOOD] >> Path::CanonizedStringIsSame2 [GOOD] >> Path::Name_AllSymbols [GOOD] >> TBlobStorageGroupTypeTest::TestCorrectLayout [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnListMatchGroupSid [GOOD] >> TBlobStorageGroupTypeTest::OutputInfoAboutErasureSpecies [GOOD] >> TGuardianImpl::FollowerTracker [GOOD] >> Path::Name_WeirdLocale_RegularName [GOOD] >> Path::Name_WeirdLocale_WeirdName [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TLogoBlobTest::LogoBlobCompare [GOOD] >> TYardTest::TestChunkPriorityBlock [GOOD] >> TYardTest::TestFirstRecordToKeep [GOOD] >> TYardTest::TestDamagedFirstRecordToKeep >> TBlobStorageGroupInfoTest::SubgroupPartLayout [GOOD] >> TStateStorageConfig::TestReplicaSelection |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::Repeat [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TMemoryStatsAggregator::Aggregate_Summarize_NoExternalConsumption_OneHost [GOOD] Test command err: AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 96 MemAvailable: 126 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 216 SoftLimit: 246 TargetUtilization: 276 ExternalConsumption: 306 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 96 MemAvailable: 126 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 216 SoftLimit: 246 TargetUtilization: 276 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 80 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 96 MemAvailable: 126 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 216 SoftLimit: 246 TargetUtilization: 276 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 >> TBsVDiskRepl2::ReplEraseDiskRestoreWOOneDisk [GOOD] >> TBsVDiskRepl3::ReplEraseDiskRestoreMultipart >> TBsVDiskManyPutGet::ManyMultiPutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> VarLengthIntCodec::BasicTest32 [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> Path::Name_WeirdLocale_WeirdName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TMonitoring::ReregisterTest [GOOD] Test command err: RUN TEST SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> Path::Name_AllSymbols [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::FailOnOwnerAndNoToken [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction >> TBsVDiskGC::GCPutManyBarriersNoSync [GOOD] >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoTest::SubgroupPartLayout [GOOD] |87.1%| [TA] $(B)/ydb/core/blobstorage/vdisk/query/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TYardTest::TestChunkPriorityBlock [GOOD] |87.1%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction [GOOD] |87.1%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |87.1%| [TA] $(B)/ydb/core/blobstorage/vdisk/huge/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSid [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndToken [GOOD] >> TYardTest::TestDamagedFirstRecordToKeep [GOOD] >> TYardTest::TestDamageAtTheBoundary >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch [GOOD] >> TStateStorageConfig::TestReplicaSelection [GOOD] >> TStateStorageConfig::TestMultiReplicaFailDomains >> SysViewQueryHistory::StableMerge [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnEmptyListAndToken [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction [GOOD] >> TBlobStorageBlocksCacheTest::LegacyAndModern [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::StableMerge [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::LegacyAndModern [GOOD] >> TBsVDiskRepl1::ReplEraseDiskRestore [GOOD] >> TBsVDiskRepl1::ReadOnly >> FormatCSV::Instants [GOOD] >> FormatCSV::EmptyData >> TBatchedVecTest::TestToStringInt [GOOD] >> TableIndex::CompatibleSecondaryIndex [GOOD] >> TBatchedVecTest::TestOutputTOutputType [GOOD] >> TableIndex::NotCompatibleSecondaryIndex >> BufferWithGaps::Basic [GOOD] >> BufferWithGaps::IsReadable >> FormatCSV::EmptyData [GOOD] >> FormatCSV::Common |87.1%| [TA] $(B)/ydb/core/blobstorage/vdisk/synclog/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BufferWithGaps::IsReadable [GOOD] >> TableIndex::NotCompatibleSecondaryIndex [GOOD] >> PtrTest::Test1 [GOOD] >> TableIndex::CompatibleVectorIndex [GOOD] >> TableIndex::NotCompatibleVectorIndex >> TableIndex::NotCompatibleVectorIndex [GOOD] >> FormatCSV::Common [GOOD] >> FormatCSV::Strings [GOOD] >> FormatCSV::Nulls [GOOD] >> TVDiskDefrag::HugeHeapDefragmentationRequired [GOOD] >> TTrackable::TVector >> ConsoleDumper::Basic >> TTrackable::TVector [GOOD] >> TTrackable::TList [GOOD] >> TTrackable::TString [GOOD] >> ConsoleDumper::Basic [GOOD] >> ConsoleDumper::CoupleMerge >> ConsoleDumper::CoupleMerge [GOOD] >> ConsoleDumper::CoupleOverwrite [GOOD] >> ConsoleDumper::CoupleMergeOverwriteRepeated [GOOD] >> ConsoleDumper::ReverseMerge [GOOD] >> ConsoleDumper::ReverseOverwrite [GOOD] >> ConsoleDumper::ReverseMergeOverwriteRepeated [GOOD] >> ConsoleDumper::Different [GOOD] >> ConsoleDumper::SimpleNode >> ConsoleDumper::SimpleNode [GOOD] >> ConsoleDumper::JoinSimilar [GOOD] >> ConsoleDumper::DontJoinDifferent [GOOD] >> ConsoleDumper::SimpleTenant |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TableIndex::NotCompatibleVectorIndex [GOOD] >> TPDiskTest::PDiskSlotSizeInUnits [FAIL] >> TPDiskTest::TestChunkWriteCrossOwner >> MdbEndpoingGenerator::Legacy [GOOD] >> MdbEndpoingGenerator::Generic_NoTransformHost [GOOD] >> MdbEndpoingGenerator::Generic_WithTransformHost [GOOD] >> ConsoleDumper::SimpleTenant [GOOD] >> ConsoleDumper::SimpleNodeTenant [GOOD] >> ConsoleDumper::SimpleHostId [GOOD] >> ConsoleDumper::SimpleNodeId >> ConsoleDumper::SimpleNodeId [GOOD] >> ConsoleDumper::DontJoinNodeTenant [GOOD] >> ConsoleDumper::JoinMultipleSimple [GOOD] >> ConsoleDumper::MergeNode [GOOD] >> ConsoleDumper::MergeOverwriteRepeatedNode >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest [GOOD] >> ConsoleDumper::MergeOverwriteRepeatedNode [GOOD] >> ConsoleDumper::Ordering [GOOD] >> ConsoleDumper::IgnoreUnmanagedItems >> ConsoleDumper::IgnoreUnmanagedItems [GOOD] >> YamlConfig::CollectLabels |87.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/base/ut/gtest >> PtrTest::Test1 [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TTrackable::TString [GOOD] |87.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/defrag/ut/unittest >> TVDiskDefrag::HugeHeapDefragmentationRequired [GOOD] >> TPDiskTest::TestChunkWriteCrossOwner [GOOD] >> TPDiskTest::PlainChunksWriteReadALot >> YamlConfig::CollectLabels [GOOD] >> YamlConfig::MaterializeSpecificConfig [GOOD] >> YamlConfig::MaterializeAllConfigSimple [GOOD] >> YamlConfig::MaterializeAllConfigs ------- [TS] {asan, default-linux-x86_64, release} ydb/core/io_formats/arrow/scheme/ut/unittest >> FormatCSV::Nulls [GOOD] Test command err: 12000000 Cannot read CSV: no columns specified Cannot read CSV: Invalid: Empty CSV file d'Artagnan '"' Jeanne d'Arc "'" 'd'Artagnan' ''"'' 'Jeanne d'Arc' '"'"' d'Artagnan '"' Jeanne d'Arc "'" src: ,"","" ,"","" ,, parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: ,"","" ,"","" ,, parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: \N,"","" \N,"\N","\N" \N,\N,\N parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,\N,\N ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: NULL,"","" NULL,"NULL","NULL" NULL,NULL,NULL parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,NULL,NULL ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ >> Mvp::TokenatorGetMetadataTokenGood [GOOD] >> Mvp::TokenatorRefreshMetadataTokenGood >> YamlConfig::MaterializeAllConfigs [GOOD] >> YamlConfig::AppendVolatileConfig [GOOD] >> YamlConfig::AppendAndResolve [GOOD] >> YamlConfig::GetMetadata [GOOD] >> YamlConfig::ReplaceMetadata [GOOD] >> YamlConfigParser::Iterate [GOOD] >> YamlConfigParser::ProtoBytesFieldDoesNotDecodeBase64 [GOOD] >> YamlConfigParser::PdiskCategoryFromString [GOOD] >> YamlConfigParser::AllowDefaultHostConfigId >> YamlConfigParser::AllowDefaultHostConfigId [GOOD] >> ConfigValidation::StaticGroupSizesGrow >> YamlConfigParser::IncorrectHostConfigIdFails [GOOD] >> YamlConfigParser::NoMixedHostConfigIds >> ConfigValidation::SameStaticGroup [GOOD] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest >> MdbEndpoingGenerator::Generic_WithTransformHost [GOOD] >> YamlConfigParser::NoMixedHostConfigIds [GOOD] >> YamlConfigProto2Yaml::StorageConfig [GOOD] >> ConfigValidation::StaticGroupSizesGrow [GOOD] >> GroupStress::Test [GOOD] >> ConfigValidation::StaticGroupSizesShrink [GOOD] >> ConfigValidation::VDiskChanged [GOOD] >> ConfigValidation::TooManyVDiskChanged [GOOD] >> DatabaseConfigValidation::AllowedFields [GOOD] >> DatabaseConfigValidation::NotAllowedFields |87.2%| [TA] $(B)/ydb/core/base/ut_auth/test-results/unittest/{meta.json ... results_accumulator.log} >> TStateStorageConfig::TestMultiReplicaFailDomains [GOOD] >> TStateStorageConfig::TestReplicaSelectionUniqueCombinations >> DatabaseConfigValidation::NotAllowedFields [GOOD] >> TBsVDiskRepl1::ReadOnly [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_group/unittest >> GroupStress::Test [GOOD] >> AuthConfigValidation::AcceptValidPasswordComplexity [GOOD] >> AuthConfigValidation::CannotAcceptInvalidPasswordComplexity [GOOD] >> AuthConfigValidation::AcceptValidAccountLockoutConfig [GOOD] >> AuthConfigValidation::CannotAcceptInvalidAccountLockoutConfig [GOOD] |87.2%| [TA] $(B)/ydb/core/sys_view/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/validation/ut/unittest >> DatabaseConfigValidation::NotAllowedFields [GOOD] >> RuntimeFeatureFlags::DefaultValues [GOOD] >> RuntimeFeatureFlags::ConversionToProto >> RuntimeFeatureFlags::ConversionToProto [GOOD] >> RuntimeFeatureFlags::ConversionFromProto [GOOD] >> RuntimeFeatureFlags::UpdatingRuntimeFlags [GOOD] |87.2%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/ut/unittest >> YamlConfigProto2Yaml::StorageConfig [GOOD] Test command err: host_config: "[{\"drive\":[{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"},{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_02\"}],\"host_config_id\":1},{\"drive\":[{\"type\":\"SSD\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"}],\"host_config_id\":2}]" "\/dev\/disk\/by-partlabel\/kikimr_nvme_02" host_config: "[{\"drive\":[{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"},{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_02\"}],\"host_config_id\":1},{\"drive\":[{\"type\":\"SSD\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"}],\"host_config_id\":2}]" host_configs: - host_config_id: 1 drive: - path: /dev/disk/by-partlabel/kikimr_nvme_01 type: NVME expected_slot_count: 9 - path: /dev/disk/by-partlabel/kikimr_nvme_02 type: NVME expected_slot_count: 9 - host_config_id: 2 drive: - path: /dev/disk/by-partlabel/kikimr_nvme_01 type: SSD expected_slot_count: 9 hosts: - host: sas8-6954.search.yandex.net port: 19000 host_config_id: 1 - host: sas8-6955.search.yandex.net port: 19000 host_config_id: 2 item_config_generation: 0 |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_large/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRepl1::ReadOnly [GOOD] Test command err: 2025-06-24T20:27:29.679125Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:27:29.919354Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17114412108944892389] 2025-06-24T20:27:30.943769Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> ColumnShardConfigValidation::AcceptDefaultCompression [GOOD] >> ColumnShardConfigValidation::NotAcceptDefaultCompression [GOOD] >> ColumnShardConfigValidation::CorrectPlainCompression [GOOD] >> ColumnShardConfigValidation::NotCorrectPlainCompression [GOOD] >> ColumnShardConfigValidation::CorrectLZ4Compression [GOOD] >> ColumnShardConfigValidation::NotCorrectLZ4Compression [GOOD] >> ColumnShardConfigValidation::CorrectZSTDCompression [GOOD] >> ColumnShardConfigValidation::NotCorrectZSTDCompression [GOOD] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/validation/auth_config_validator_ut/unittest >> AuthConfigValidation::CannotAcceptInvalidAccountLockoutConfig [GOOD] |87.2%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/objcopy_f8eedece62b0d046ee29007b2b.o |87.2%| [PY] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/objcopy_f8eedece62b0d046ee29007b2b.o |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/base/generated/ut/unittest >> RuntimeFeatureFlags::UpdatingRuntimeFlags [GOOD] >> ExternalDataSourceTest::ValidateName [GOOD] >> ExternalDataSourceTest::ValidatePack [GOOD] >> ExternalDataSourceTest::ValidateAuth [GOOD] >> ExternalDataSourceTest::ValidateParameters [GOOD] >> ExternalDataSourceTest::ValidateHasExternalTable [GOOD] >> ExternalDataSourceTest::ValidateProperties [GOOD] >> ExternalDataSourceTest::ValidateLocation [GOOD] >> ExternalSourceBuilderTest::ValidateName [GOOD] >> ExternalSourceBuilderTest::ValidateAuthWithoutCondition [GOOD] >> ExternalSourceBuilderTest::ValidateAuthWithCondition [GOOD] >> ExternalSourceBuilderTest::ValidateUnsupportedField [GOOD] >> ExternalSourceBuilderTest::ValidateNonRequiredField [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredField [GOOD] >> ExternalSourceBuilderTest::ValidateNonRequiredFieldValues [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredFieldValues [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredFieldOnCondition [GOOD] >> IcebergDdlTest::HiveCatalogWithS3Test >> TStreamRequestUnitsCalculatorTest::Basic [GOOD] >> TTimeGridTest::TimeGrid >> IcebergDdlTest::HiveCatalogWithS3Test [GOOD] >> IcebergDdlTest::HadoopCatalogWithS3Test [GOOD] >> ObjectStorageTest::SuccessValidation [GOOD] >> ObjectStorageTest::FailedCreate [GOOD] >> ObjectStorageTest::FailedValidation [GOOD] >> ObjectStorageTest::FailedJsonListValidation [GOOD] >> ObjectStorageTest::FailedOptionalTypeValidation [GOOD] >> ObjectStorageTest::WildcardsValidation [GOOD] >> TTimeGridTest::TimeGrid [GOOD] >> TBlobStorageHullFresh::AppendixPerf [GOOD] >> TBlobStorageHullFresh::AppendixPerf_Tune >> ParseStats::ParseWithSources [GOOD] >> ParseStats::ParseJustOutput [GOOD] >> ParseStats::ParseMultipleGraphsV1 [GOOD] >> ParseStats::ParseMultipleGraphsV2 |87.2%| [TA] $(B)/ydb/core/blobstorage/vdisk/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> ParseStats::ParseMultipleGraphsV2 [GOOD] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/validation/column_shard_config_validator_ut/unittest >> ColumnShardConfigValidation::NotCorrectZSTDCompression [GOOD] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/external_sources/ut/unittest >> ObjectStorageTest::WildcardsValidation [GOOD] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/metering/ut/unittest >> TTimeGridTest::TimeGrid [GOOD] >> TBsVDiskRepl3::ReplEraseDiskRestoreMultipart [GOOD] >> TBsVDiskRepl3::AnubisTest [GOOD] >> TBsVDiskRepl3::ReplPerf >> DoubleIndexedTests::TestUpsertBySingleKey [GOOD] >> DoubleIndexedTests::TestUpsertByBothKeys [GOOD] >> DoubleIndexedTests::TestMerge [GOOD] >> DoubleIndexedTests::TestFind [GOOD] >> DoubleIndexedTests::TestErase [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest >> ParseStats::ParseMultipleGraphsV2 [GOOD] >> TBsVDiskDefrag::Defrag50PercentGarbage [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingKeyFresh >> TPDiskRaces::KillOwnerWhileDeletingChunkWithInflightMock [GOOD] >> TPDiskRaces::Decommit >> ClosedIntervalSet::Union >> Mvp::TokenatorRefreshMetadataTokenGood [GOOD] |87.2%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_double_indexed/unittest >> DoubleIndexedTests::TestErase [GOOD] |87.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp >> PushdownTest::NoFilter |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/sequenceshard/public/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/mvp/core/ut/unittest >> Mvp::TokenatorRefreshMetadataTokenGood [GOOD] Test command err: 2025-06-24T20:27:35.388660Z :MVP DEBUG: mvp_tokens.cpp:77: Refreshing token metadataTokenName 2025-06-24T20:27:35.389318Z :MVP DEBUG: mvp_tokens.cpp:217: Updating metadata token 2025-06-24T20:27:35.484705Z :MVP DEBUG: mvp_tokens.cpp:77: Refreshing token metadataTokenName 2025-06-24T20:27:35.485020Z :MVP DEBUG: mvp_tokens.cpp:217: Updating metadata token 2025-06-24T20:27:40.486845Z :MVP DEBUG: mvp_tokens.cpp:77: Refreshing token metadataTokenName 2025-06-24T20:27:40.487363Z :MVP DEBUG: mvp_tokens.cpp:217: Updating metadata token |87.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/object_storage_listing_ut.cpp >> ArrowInferenceTest::csv_simple [GOOD] >> ArrowInferenceTest::tsv_simple [GOOD] >> ArrowInferenceTest::tsv_empty >> PushdownTest::NoFilter [GOOD] >> TYardTest::TestDamageAtTheBoundary [GOOD] >> TYardTest::TestDestroySystem >> ArrowInferenceTest::tsv_empty [GOOD] >> ArrowInferenceTest::broken_json [GOOD] >> ArrowInferenceTest::empty_json_each_row [GOOD] >> ArrowInferenceTest::empty_json_list >> PushdownTest::Equal >> ArrowInferenceTest::empty_json_list [GOOD] >> ArrowInferenceTest::broken_json_list [GOOD] >> PushdownTest::Equal [GOOD] >> PushdownTest::NotEqualInt32Int64 [GOOD] >> TBlobStorageHullFresh::AppendixPerf_Tune [GOOD] >> Backpressure::MonteCarlo >> TArrowPushDown::SimplePushDown [GOOD] >> TArrowPushDown::FilterEverything [GOOD] >> TArrowPushDown::MatchSeveralRowGroups [GOOD] >> PushdownTest::TrueCoalesce [GOOD] |87.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/object_storage_listing_ut.cpp |87.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp >> PushdownTest::CmpInt16AndInt32 [GOOD] >> PushdownTest::PartialAnd |87.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp >> PushdownTest::PartialAnd [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/external_sources/object_storage/inference/ut/gtest >> ArrowInferenceTest::broken_json_list [GOOD] Test command err: {
: Error: couldn't open csv/tsv file, check format and compression parameters: empty file, code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: empty file, code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: empty file, code: 1001 } 2025-06-24T20:27:41.633323Z 1 00h00m00.000000s :OBJECT_STORAGE_INFERENCINATOR DEBUG: TArrowInferencinator: [1:6:6]. HandleFileError: {
: Error: couldn't run arrow json chunker for /path/is/neither/real: Invalid: straddling object straddles two block boundaries (try to increase block size?), code: 1001 } {
: Error: couldn't run arrow json chunker for /path/is/neither/real: Invalid: straddling object straddles two block boundaries (try to increase block size?), code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: Invalid: JSON parse error: Invalid value. in row 0, code: 1001 } |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::AppendixPerf_Tune [GOOD] >> TBsLocalRecovery::WriteRestartReadHugeIncreased [GOOD] >> TBsLocalRecovery::WriteRestartReadHugeDecreased >> PushdownTest::PartialAndOneBranchPushdownable |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/actors/ut/unittest >> TArrowPushDown::MatchSeveralRowGroups [GOOD] >> PushdownTest::PartialAndOneBranchPushdownable [GOOD] >> TPGTest::TestLogin >> TBsVDiskExtreme::Simple3Put1GetMissingKeyFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction >> PushdownTest::NotNull [GOOD] >> TPGTest::TestLogin [GOOD] >> ArrowTest::BatchBuilder >> TYardTest::TestDestroySystem [GOOD] >> TYardTest::TestCutMultipleLogChunks >> TCollectingS3ListingStrategyTests::IfNoIssuesOccursShouldReturnCollectedPaths [GOOD] >> TCollectingS3ListingStrategyTests::IfThereAreMoreRecordsThanSpecifiedByLimitShouldReturnError [GOOD] >> TCollectingS3ListingStrategyTests::IfAnyIterationReturnIssueThanWholeStrategyShouldReturnIt [GOOD] >> TCollectingS3ListingStrategyTests::IfExceptionIsReturnedFromIteratorThanItShouldCovertItToIssue [GOOD] >> PushdownTest::NotNullForDatetime [GOOD] >> ArrowTest::BatchBuilder [GOOD] >> ArrowTest::ArrowToYdbConverter >> ArrowTest::ArrowToYdbConverter [GOOD] >> ArrowTest::SortWithCompositeKey [GOOD] >> ArrowTest::MergingSortedInputStream [GOOD] >> ArrowTest::MergingSortedInputStreamReversed >> ArrowTest::MergingSortedInputStreamReversed [GOOD] >> ArrowTest::MergingSortedInputStreamReplace [GOOD] >> ArrowTest::MaxVersionFilter [GOOD] >> ArrowTest::EqualKeysVersionFilter [GOOD] >> ColumnFilter::MergeFilters >> PushdownTest::IsNull >> ColumnFilter::MergeFilters [GOOD] >> ColumnFilter::CombineFilters [GOOD] >> ColumnFilter::ApplyFilterToFilter [GOOD] >> ColumnFilter::FilterSlice [GOOD] >> ColumnFilter::FilterCheckSlice [GOOD] >> ColumnFilter::FilterSlice1 [GOOD] >> ColumnFilter::CutFilter1 [GOOD] >> ColumnFilter::CutFilter2 [GOOD] >> Dictionary::Simple >> PushdownTest::IsNull [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/pgproxy/ut/unittest >> TPGTest::TestLogin [GOOD] Test command err: 2025-06-24T20:27:43.271314Z :PGWIRE INFO: sock_listener.cpp:66: Listening on [::]:8260 2025-06-24T20:27:43.287545Z :PGWIRE DEBUG: pg_connection.cpp:61: (#13,[::1]:47480) incoming connection opened 2025-06-24T20:27:43.287805Z :PGWIRE DEBUG: pg_connection.cpp:241: (#13,[::1]:47480) -> [1] 'i' "Initial" Size(15) protocol(0x00000300) user=user 2025-06-24T20:27:43.288173Z :PGWIRE DEBUG: pg_connection.cpp:241: (#13,[::1]:47480) <- [1] 'R' "Auth" Size(4) OK >> TestS3UrlEscape::EscapeEscapedForce [GOOD] >> TestS3UrlEscape::EscapeUnescapeForceRet [GOOD] >> TestS3UrlEscape::EscapeAdditionalSymbols [GOOD] >> TestUrlBuilder::UriOnly >> TestUrlBuilder::UriOnly [GOOD] >> TestUrlBuilder::Basic [GOOD] >> TestUrlBuilder::BasicWithEncoding [GOOD] >> TestUrlBuilder::BasicWithAdditionalEncoding [GOOD] >> PushdownTest::StringFieldsNotSupported >> PushdownTest::StringFieldsNotSupported [GOOD] >> MetaCache::BasicForwarding >> MetaCache::BasicForwarding [GOOD] >> MetaCache::TimeoutFallback >> TMemoryPoolTest::Transactions [GOOD] >> TMemoryPoolTest::AppendString [GOOD] >> TMemoryPoolTest::AllocOneByte [GOOD] >> TMemoryPoolTest::LongRollback [GOOD] >> UtilString::ShrinkToFit [GOOD] >> TMemoryPoolTest::TransactionsWithAlignment [GOOD] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/provider/ut/unittest >> TCollectingS3ListingStrategyTests::IfExceptionIsReturnedFromIteratorThanItShouldCovertItToIssue [GOOD] >> PushdownTest::StringFieldsNotSupported2 >> MetaCache::TimeoutFallback [GOOD] >> PushdownTest::StringFieldsNotSupported2 [GOOD] >> TYardTest::TestCutMultipleLogChunks [GOOD] >> TYardTest::TestDestructionWhileWritingChunk >> JsonEnvelopeTest::Simple [GOOD] >> JsonEnvelopeTest::NoReplace [GOOD] >> JsonEnvelopeTest::ArrayItem [GOOD] >> JsonEnvelopeTest::Escape [GOOD] >> JsonEnvelopeTest::BinaryData [GOOD] |87.2%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> PushdownTest::RegexpPushdown |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/common/ut/unittest >> TestUrlBuilder::BasicWithAdditionalEncoding [GOOD] >> PushdownTest::RegexpPushdown [GOOD] >> TStateStorageConfig::TestReplicaSelectionUniqueCombinations [GOOD] >> TStateStorageConfig::UniformityTest ------- [TS] {asan, default-linux-x86_64, release} ydb/mvp/meta/ut/unittest >> MetaCache::TimeoutFallback [GOOD] Test command err: 2025-06-24T20:27:44.486813Z :HTTP INFO: http_proxy_acceptor.cpp:89: Listening on http://[::]:24485 2025-06-24T20:27:44.491492Z :HTTP INFO: http_proxy_acceptor.cpp:89: Listening on http://[::]:21531 2025-06-24T20:27:44.492068Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [1:14:2061] 2025-06-24T20:27:44.492156Z :HTTP DEBUG: http_proxy_outgoing.cpp:188: resolving 127.0.0.1:24485 2025-06-24T20:27:44.492304Z :HTTP DEBUG: http_proxy_outgoing.cpp:155: connecting to 127.0.0.1:24485 2025-06-24T20:27:44.492621Z :HTTP DEBUG: http_proxy_outgoing.cpp:329: (#11,127.0.0.1:24485) outgoing connection opened 2025-06-24T20:27:44.492702Z :HTTP DEBUG: http_proxy_outgoing.cpp:331: (#11,127.0.0.1:24485) <- (GET /server) 2025-06-24T20:27:44.503736Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#12,[::ffff:127.0.0.1]:59022) incoming connection opened 2025-06-24T20:27:44.503976Z :HTTP DEBUG: http_proxy_incoming.cpp:156: (#12,[::ffff:127.0.0.1]:59022) -> (GET /server) 2025-06-24T20:27:44.504178Z :HTTP DEBUG: meta_cache.cpp:231: Updating ownership http://127.0.0.1:21531 with deadline 2025-06-24T20:28:44.504119Z 2025-06-24T20:27:44.504252Z :HTTP DEBUG: meta_cache.cpp:237: SetRefreshTime "/server" to 2025-06-24T20:28:44.504119Z (+1750796924.504119s) 2025-06-24T20:27:44.504337Z :HTTP DEBUG: meta_cache.cpp:198: IncomingForward /server to http://127.0.0.1:21531 timeout 30.000000s 2025-06-24T20:27:44.504521Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [1:16:2063] 2025-06-24T20:27:44.504569Z :HTTP DEBUG: http_proxy_outgoing.cpp:188: resolving 127.0.0.1:21531 2025-06-24T20:27:44.504659Z :HTTP DEBUG: http_proxy_outgoing.cpp:155: connecting to 127.0.0.1:21531 2025-06-24T20:27:44.504900Z :HTTP DEBUG: http_proxy_outgoing.cpp:329: (#13,127.0.0.1:21531) outgoing connection opened 2025-06-24T20:27:44.504947Z :HTTP DEBUG: http_proxy_outgoing.cpp:331: (#13,127.0.0.1:21531) <- (GET /server) 2025-06-24T20:27:44.505187Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#14,[::ffff:127.0.0.1]:32962) incoming connection opened 2025-06-24T20:27:44.505305Z :HTTP DEBUG: http_proxy_incoming.cpp:156: (#14,[::ffff:127.0.0.1]:32962) -> (GET /server) 2025-06-24T20:27:44.505643Z :HTTP DEBUG: http_proxy_incoming.cpp:280: (#14,[::ffff:127.0.0.1]:32962) <- (200 Found, 6 bytes) 2025-06-24T20:27:44.505729Z :HTTP DEBUG: http_proxy_incoming.cpp:335: (#14,[::ffff:127.0.0.1]:32962) connection closed 2025-06-24T20:27:44.505932Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: (#13,127.0.0.1:21531) -> (200 Found, 6 bytes) 2025-06-24T20:27:44.506013Z :HTTP DEBUG: http_proxy_outgoing.cpp:109: (#13,127.0.0.1:21531) connection closed 2025-06-24T20:27:44.506386Z :HTTP DEBUG: meta_cache.cpp:146: Cache received successfull (200) response for /server 2025-06-24T20:27:44.506510Z :HTTP DEBUG: http_proxy_incoming.cpp:280: (#12,[::ffff:127.0.0.1]:59022) <- (200 Found, 6 bytes) 2025-06-24T20:27:44.506582Z :HTTP DEBUG: http_proxy_incoming.cpp:335: (#12,[::ffff:127.0.0.1]:59022) connection closed 2025-06-24T20:27:44.506677Z :HTTP DEBUG: http_proxy.cpp:146: Connection closed [1:16:2063] 2025-06-24T20:27:44.506827Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: (#11,127.0.0.1:24485) -> (200 Found, 6 bytes) 2025-06-24T20:27:44.506872Z :HTTP DEBUG: http_proxy_outgoing.cpp:109: (#11,127.0.0.1:24485) connection closed 2025-06-24T20:27:44.507127Z :HTTP DEBUG: http_proxy.cpp:146: Connection closed [1:14:2061] 2025-06-24T20:27:44.695862Z :HTTP INFO: http_proxy_acceptor.cpp:89: Listening on http://[::]:3324 2025-06-24T20:27:44.696282Z :HTTP INFO: http_proxy_acceptor.cpp:89: Listening on http://[::]:25598 2025-06-24T20:27:44.696693Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [2:14:2061] 2025-06-24T20:27:44.696735Z :HTTP DEBUG: http_proxy_outgoing.cpp:188: resolving 127.0.0.1:3324 2025-06-24T20:27:44.696830Z :HTTP DEBUG: http_proxy_outgoing.cpp:155: connecting to 127.0.0.1:3324 2025-06-24T20:27:44.697074Z :HTTP DEBUG: http_proxy_outgoing.cpp:329: (#11,127.0.0.1:3324) outgoing connection opened 2025-06-24T20:27:44.697120Z :HTTP DEBUG: http_proxy_outgoing.cpp:331: (#11,127.0.0.1:3324) <- (GET /server) 2025-06-24T20:27:44.703427Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#12,[::ffff:127.0.0.1]:32882) incoming connection opened 2025-06-24T20:27:44.703559Z :HTTP DEBUG: http_proxy_incoming.cpp:156: (#12,[::ffff:127.0.0.1]:32882) -> (GET /server) 2025-06-24T20:27:44.703735Z :HTTP DEBUG: meta_cache.cpp:231: Updating ownership http://127.0.0.1:25598 with deadline 2025-06-24T20:37:44.703704Z 2025-06-24T20:27:44.703802Z :HTTP DEBUG: meta_cache.cpp:237: SetRefreshTime "/server" to 2025-06-24T20:37:44.703704Z (+1750797464.703704s) 2025-06-24T20:27:44.703867Z :HTTP DEBUG: meta_cache.cpp:198: IncomingForward /server to http://127.0.0.1:25598 timeout 30.000000s 2025-06-24T20:27:44.704038Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [2:16:2063] 2025-06-24T20:27:44.704076Z :HTTP DEBUG: http_proxy_outgoing.cpp:188: resolving 127.0.0.1:25598 2025-06-24T20:27:44.704145Z :HTTP DEBUG: http_proxy_outgoing.cpp:155: connecting to 127.0.0.1:25598 2025-06-24T20:27:44.704365Z :HTTP DEBUG: http_proxy_outgoing.cpp:329: (#13,127.0.0.1:25598) outgoing connection opened 2025-06-24T20:27:44.704411Z :HTTP DEBUG: http_proxy_outgoing.cpp:331: (#13,127.0.0.1:25598) <- (GET /server) 2025-06-24T20:27:44.704545Z :HTTP ERROR: http_proxy_outgoing.cpp:122: (#13,127.0.0.1:25598) connection closed with error: Connection timed out 2025-06-24T20:27:44.704823Z :HTTP WARN: meta_cache.cpp:151: Cache received failed response with error "Connection timed out" for /server - retrying locally 2025-06-24T20:27:44.705014Z :HTTP DEBUG: http_proxy.cpp:146: Connection closed [2:16:2063] 2025-06-24T20:27:44.705089Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#14,[::ffff:127.0.0.1]:60608) incoming connection opened 2025-06-24T20:27:44.705198Z :HTTP DEBUG: http_proxy_incoming.cpp:156: (#14,[::ffff:127.0.0.1]:60608) -> (GET /server) 2025-06-24T20:27:44.705257Z :HTTP DEBUG: http_proxy_incoming.cpp:190: (#14,[::ffff:127.0.0.1]:60608) connection closed 2025-06-24T20:27:44.717357Z :HTTP DEBUG: http_proxy_incoming.cpp:280: (#12,[::ffff:127.0.0.1]:32882) <- (200 Found, 6 bytes) 2025-06-24T20:27:44.717553Z :HTTP DEBUG: http_proxy_incoming.cpp:335: (#12,[::ffff:127.0.0.1]:32882) connection closed 2025-06-24T20:27:44.717773Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: (#11,127.0.0.1:3324) -> (200 Found, 6 bytes) 2025-06-24T20:27:44.717844Z :HTTP DEBUG: http_proxy_outgoing.cpp:109: (#11,127.0.0.1:3324) connection closed 2025-06-24T20:27:44.718244Z :HTTP DEBUG: http_proxy.cpp:146: Connection closed [2:14:2061] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/log_backend/ut/unittest >> JsonEnvelopeTest::BinaryData [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_util/unittest >> TMemoryPoolTest::TransactionsWithAlignment [GOOD] >> TYardTest::TestDestructionWhileWritingChunk [GOOD] >> TYardTest::TestDestructionWhileReadingChunk >> EncryptedFileSerializerTest::SerializeWholeFileAtATime [GOOD] >> EncryptedFileSerializerTest::WrongParametersForSerializer [GOOD] >> EncryptedFileSerializerTest::WrongParametersForDeserializer [GOOD] >> EncryptedFileSerializerTest::SplitOnBlocks >> ActionParsingTest::ToAndFromStringAreConsistent [GOOD] >> ActionParsingTest::ActionsForQueueTest [GOOD] >> ActionParsingTest::BatchActionTest [GOOD] >> ActionParsingTest::ActionsForMessageTest [GOOD] >> ActionParsingTest::FastActionsTest [GOOD] >> HttpCountersTest::CountersAggregationTest [GOOD] >> LazyCounterTest::LazyCounterTest [GOOD] >> LazyCounterTest::AggregationLazyTest [GOOD] >> LazyCounterTest::AggregationNonLazyTest [GOOD] >> LazyCounterTest::HistogramAggregationTest [GOOD] >> MessageAttributeValidationTest::MessageAttributeValidationTest [GOOD] >> MessageBodyValidationTest::MessageBodyValidationTest [GOOD] >> MeteringCountersTest::CountersAggregationTest [GOOD] >> NameValidationTest::NameValidationTest [GOOD] >> QueueAttributes::BasicStdTest [GOOD] >> QueueAttributes::BasicFifoTest [GOOD] >> QueueAttributes::BasicClampTest [GOOD] >> QueueCountersTest::InsertCountersTest >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction [GOOD] >> TDqBlockHashJoinBasicTest::TestBasicPassthrough ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest >> PushdownTest::RegexpPushdown [GOOD] Test command err: Initial program: ( (let $data_source (DataSource '"generic" '"test_cluster")) (let $empty_lambda (lambda '($arg) (Bool '"true"))) (let $table (MrTableConcat (Key '('table (String '"test_table")))) ) (let $read (Read! world $data_source $table)) (let $map_lambda (lambda '($row) (OptionalIf (Bool '"true") $row ) )) (let $filtered_data (FlatMap (Right! $read) $map_lambda)) (let $resulte_data_sink (DataSink '"result")) (let $result (ResWrite! (Left! $read) $resulte_data_sink (Key) $filtered_data '('('type)))) (return (Commit! $result $resulte_data_sink)) ) 2025-06-24 20:27:41.413 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (Bool '"true") $4))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:41.417 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (Bool '"true") $4))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:41.418 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [generic] yql_generic_io_discovery.cpp:55: discovered cluster name: test_cluster 2025-06-24 20:27:41.418 INFO yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [generic] yql_generic_load_meta.cpp:91: Loading table meta for: `test_cluster`.`test_table` 2025-06-24 20:27:41.420 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:41.433 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:41.434 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:41.439 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (Bool '"true")) (let $2 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($5) $1))) (let $3 (DataSink '"result")) (let $4 (ResWrite! (Left! $2) $3 (Key) (FlatMap (Right! $2) (lambda '($6) (OptionalIf $1 $6))) '('('type)))) (return (Commit! $4 $3)) ) 2025-06-24 20:27:41.440 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_co_simple1.cpp:986: OptionalIf over Bool 'true 2025-06-24 20:27:41.441 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:41.442 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:41.442 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:41.442 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_co_simple1.cpp:2040: FlatMap with Just 2025-06-24 20:27:41.447 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (Right! $1) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:41.448 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (Right! $1) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:41.449 INFO yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [generic] yql_optimize.cpp:135: PhysicalOptimizer-TrimReadWorld 2025-06-24 20:27:41.449 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (ResWrite! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)))) (return (Commit! $2 $1)) ) 2025-06-24 20:27:41.450 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (ResWrite! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)))) (return (Commit! $2 $1)) ) 2025-06-24 20:27:41.455 INFO yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [RESULT] yql_result_provider.cpp:773: ResPull 2025-06-24 20:27:41.455 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-06-24 20:27:41.456 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-06-24 20:27:41.457 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Optimized expr: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-06-24 20:27:41.458 INFO yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [generic] yql_generic_dq_integration.cpp:193: Filling source settings: cluster: test_cluster, table: test_table, endpoint: host: "host" port: 42 2025-06-24 20:27:41.479 INFO yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [generic] yql_optimize.cpp:135: BuildGenericDqSourceSettings 2025-06-24 20:27:41.481 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Built settings: ( (let $1 (DataSink '"result")) (let $2 '('"col_bool" '"col_date" '"col_datetime" '"col_double" '"col_dynumber" '"col_float" '"col_int16" '"col_int32" '"col_int64" '"col_int8" '"col_interval" '"col_json" '"col_json_document" '"col_optional_bool" '"col_optional_date" '"col_optional_datetime" '"col_optional_double" '"col_optional_dynumber" '"col_optional_float" '"col_optional_int16" '"col_optional_int32" '"col_optional_int64" '"col_optional_int8" '"col_optional_interval" '"col_optional_json" '"col_optional_json_document" '"col_optional_string" '"col_optional_timestamp" '"col_optional_tz_date" '"col_optional_tz_datetime" '"col_optional_tz_timestamp" '"col_optional_uint16" '"col_optional_uint32" '"col_optional_uint64" '"col_optional_uint8" '"col_optional_utf8" '"col_optional_uuid" '"col_optional_yson" '"col_string" '"col_timestamp" '"col_tz_date" '"col_tz_datetime" '"col_tz_timestamp" '"col_uint16" '"col_uint32" '"col_uint64" '"col_uint8" '"col_utf8" '"col_uuid" '"col_yson")) (let $3 (GenSourceSettings world '"test_cluster" '"test_table" (SecureParam '"cluster:default_test_cluster") $2 (lambda '($32) (Bool '"true")))) (let $4 (DataType 'Bool)) (let $5 (DataType 'Date)) (let $6 (DataType 'Datetime)) (let $7 (DataType 'Double)) (let $8 (DataType 'DyNumber)) (let $9 (DataType 'Float)) (let $10 (DataType 'Int16)) (let $11 (DataType 'Int32)) (let $12 (DataType 'Int64)) (let $13 (DataType 'Int8)) (let $14 (DataType 'Interval)) (let $15 (DataType 'Json)) (let $16 (DataType 'JsonDocument)) (let $17 (DataType 'String)) (let $18 (DataType 'Timestamp)) (let $19 (DataType 'TzDate)) (let $20 (DataType 'TzDatetime)) (let $21 (DataType 'TzTimestamp)) (let $22 (DataType 'Uint16)) (let $23 (DataType 'Uint32)) (let $24 (DataType 'Uint64)) (let $25 (DataType 'Uint8)) (let $26 (DataType 'Utf8)) (let $27 (DataType 'Uuid)) (let $28 (DataType 'Yson)) (let $29 (StructType '('"col_bool" $4) '('"col_date" $5) '('"col_datetime" $6) '('"col_double" $7) '('"col_dynumber" $8) '('"col_float" $9) '('"col_int16" $10) '('"col_int32" $11) '('"col_int64" $12) '('"col_int8" $13) '('"col_interval" $14) '('"col_json" $15) '('"col_json_document" $16) '('"col_optional_bool" (OptionalType $4)) '('"col_optional_date" (OptionalType $5)) '('"col_optional_datetime" (OptionalType $6)) '('"col_optional_double" (OptionalType $7)) '('"col_optional_dynumber" (OptionalType $8)) '('"col_optional_float" (OptionalType $9)) '('"col_optional_int16" (OptionalType $10)) '('"col_optional_int32" (OptionalType $11)) '('"col_optional_int64" (OptionalType $12)) '('"col_optional_int8" (OptionalType $13)) '('"col_optional_interval" (OptionalType $14)) '('"col_optional_json" (OptionalType $15)) '('"col_optional_json_document" (OptionalType $16)) '('"col_optional_string" (OptionalType $17)) '('"col_optional_timestamp" (OptionalType $18)) '('"col_optional_tz_date" (OptionalT ... sitive" $6) '('"DotNl" $6) '('"Literal" $6) '('"LogErrors" $6) '('"LongestMatch" $6) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $6) '('"NeverNl" $6) '('"OneLine" $6) '('"PerlClasses" $6) '('"PosixSyntax" $6) '('"Utf8" $6) '('"WordBoundary" $6)))) '"" '())) (return (OptionalIf (Apply $9 (Just (Member $5 '"col_string"))) $5)) )))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:45.083 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (block '( (let $6 (DataType 'Bool)) (let $7 (OptionalType (StructType '('"CaseSensitive" $6) '('"DotNl" $6) '('"Literal" $6) '('"LogErrors" $6) '('"LongestMatch" $6) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $6) '('"NeverNl" $6) '('"OneLine" $6) '('"PerlClasses" $6) '('"PosixSyntax" $6) '('"Utf8" $6) '('"WordBoundary" $6)))) (let $8 (DataType 'String)) (let $9 (CallableType '() '($6) '((OptionalType $8)))) (let $10 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $7)) (VoidType) '"" $9 (TupleType $8 $7) '"" '())) (return (OptionalIf (Apply $10 (Just (Member $5 '"col_string"))) $5)) )))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-24 20:27:45.101 INFO yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [generic] yql_optimize.cpp:135: PhysicalOptimizer-TrimReadWorld 2025-06-24 20:27:45.111 INFO yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [default] physical_opt.cpp:76: Push filter lambda: ( (return (lambda '($1) (block '( (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (return (Apply $6 (Just (Member $1 '"col_string")))) )))) ) 2025-06-24 20:27:45.112 INFO yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [generic] yql_optimize.cpp:135: PhysicalOptimizer-PushFilterToReadTable 2025-06-24 20:27:45.113 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-24 20:27:45.114 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-24 20:27:45.116 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-24 20:27:45.118 TRACE yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [generic] yql_generic_physical_opt.cpp:142: Push filter. Lambda is already not empty 2025-06-24 20:27:45.129 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Optimized expr: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-24 20:27:45.130 INFO yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [generic] yql_generic_dq_integration.cpp:193: Filling source settings: cluster: test_cluster, table: test_table, endpoint: host: "host" port: 42 2025-06-24 20:27:45.153 INFO yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [generic] yql_optimize.cpp:135: BuildGenericDqSourceSettings 2025-06-24 20:27:45.162 DEBUG yql-providers-generic-provider-ut-pushdown(pid=84426, tid=0x00007F69902AFF40) [core] yql_out_transformers.cpp:62: Built settings: ( (let $1 (DataSink '"result")) (let $2 '('"col_bool" '"col_date" '"col_datetime" '"col_double" '"col_dynumber" '"col_float" '"col_int16" '"col_int32" '"col_int64" '"col_int8" '"col_interval" '"col_json" '"col_json_document" '"col_optional_bool" '"col_optional_date" '"col_optional_datetime" '"col_optional_double" '"col_optional_dynumber" '"col_optional_float" '"col_optional_int16" '"col_optional_int32" '"col_optional_int64" '"col_optional_int8" '"col_optional_interval" '"col_optional_json" '"col_optional_json_document" '"col_optional_string" '"col_optional_timestamp" '"col_optional_tz_date" '"col_optional_tz_datetime" '"col_optional_tz_timestamp" '"col_optional_uint16" '"col_optional_uint32" '"col_optional_uint64" '"col_optional_uint8" '"col_optional_utf8" '"col_optional_uuid" '"col_optional_yson" '"col_string" '"col_timestamp" '"col_tz_date" '"col_tz_datetime" '"col_tz_timestamp" '"col_uint16" '"col_uint32" '"col_uint64" '"col_uint8" '"col_utf8" '"col_uuid" '"col_yson")) (let $3 (DataType 'Bool)) (let $4 (OptionalType (StructType '('"CaseSensitive" $3) '('"DotNl" $3) '('"Literal" $3) '('"LogErrors" $3) '('"LongestMatch" $3) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $3) '('"NeverNl" $3) '('"OneLine" $3) '('"PerlClasses" $3) '('"PosixSyntax" $3) '('"Utf8" $3) '('"WordBoundary" $3)))) (let $5 (DataType 'String)) (let $6 (OptionalType $5)) (let $7 (CallableType '() '($3) '($6))) (let $8 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $4)) (VoidType) '"" $7 (TupleType $5 $4) '"" '())) (let $9 (GenSourceSettings world '"test_cluster" '"test_table" (SecureParam '"cluster:default_test_cluster") $2 (lambda '($37) (Apply $8 (Just (Member $37 '"col_string")))))) (let $10 (DataType 'Bool)) (let $11 (DataType 'Date)) (let $12 (DataType 'Datetime)) (let $13 (DataType 'Double)) (let $14 (DataType 'DyNumber)) (let $15 (DataType 'Float)) (let $16 (DataType 'Int16)) (let $17 (DataType 'Int32)) (let $18 (DataType 'Int64)) (let $19 (DataType 'Int8)) (let $20 (DataType 'Interval)) (let $21 (DataType 'Json)) (let $22 (DataType 'JsonDocument)) (let $23 (DataType 'Timestamp)) (let $24 (DataType 'TzDate)) (let $25 (DataType 'TzDatetime)) (let $26 (DataType 'TzTimestamp)) (let $27 (DataType 'Uint16)) (let $28 (DataType 'Uint32)) (let $29 (DataType 'Uint64)) (let $30 (DataType 'Uint8)) (let $31 (DataType 'Utf8)) (let $32 (DataType 'Uuid)) (let $33 (DataType 'Yson)) (let $34 (StructType '('"col_bool" $10) '('"col_date" $11) '('"col_datetime" $12) '('"col_double" $13) '('"col_dynumber" $14) '('"col_float" $15) '('"col_int16" $16) '('"col_int32" $17) '('"col_int64" $18) '('"col_int8" $19) '('"col_interval" $20) '('"col_json" $21) '('"col_json_document" $22) '('"col_optional_bool" (OptionalType $10)) '('"col_optional_date" (OptionalType $11)) '('"col_optional_datetime" (OptionalType $12)) '('"col_optional_double" (OptionalType $13)) '('"col_optional_dynumber" (OptionalType $14)) '('"col_optional_float" (OptionalType $15)) '('"col_optional_int16" (OptionalType $16)) '('"col_optional_int32" (OptionalType $17)) '('"col_optional_int64" (OptionalType $18)) '('"col_optional_int8" (OptionalType $19)) '('"col_optional_interval" (OptionalType $20)) '('"col_optional_json" (OptionalType $21)) '('"col_optional_json_document" (OptionalType $22)) '('"col_optional_string" $6) '('"col_optional_timestamp" (OptionalType $23)) '('"col_optional_tz_date" (OptionalType $24)) '('"col_optional_tz_datetime" (OptionalType $25)) '('"col_optional_tz_timestamp" (OptionalType $26)) '('"col_optional_uint16" (OptionalType $27)) '('"col_optional_uint32" (OptionalType $28)) '('"col_optional_uint64" (OptionalType $29)) '('"col_optional_uint8" (OptionalType $30)) '('"col_optional_utf8" (OptionalType $31)) '('"col_optional_uuid" (OptionalType $32)) '('"col_optional_yson" (OptionalType $33)) '('"col_string" $5) '('"col_timestamp" $23) '('"col_tz_date" $24) '('"col_tz_datetime" $25) '('"col_tz_timestamp" $26) '('"col_uint16" $27) '('"col_uint32" $28) '('"col_uint64" $29) '('"col_uint8" $30) '('"col_utf8" $31) '('"col_uuid" $32) '('"col_yson" $33))) (let $35 (DqSourceWrap $9 (DataSource '"generic" '"test_cluster") $34)) (let $36 (ResWrite! world $1 (Key) (FlatMap $35 (lambda '($38) (OptionalIf (Apply $8 (Just (Member $38 '"col_string"))) $38))) '('('type)))) (return (Commit! $36 $1)) ) Dq source filter settings: filter_typed { regexp { value { column: "col_string" } pattern { typed_value { type { type_id: STRING } value { bytes_value: "\\\\d+" } } } } } >> QueueCountersTest::InsertCountersTest [GOOD] >> QueueCountersTest::RemoveQueueCountersNonLeaderWithoutFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersLeaderWithoutFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersNonLeaderWithFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersLeaderWithFolderTest [GOOD] >> QueueCountersTest::CountersAggregationTest [GOOD] >> QueueCountersTest::CountersAggregationCloudTest >> Mvp::OpenIdConnectRequestWithIamTokenYandex >> TDqBlockHashJoinBasicTest::TestBasicPassthrough [GOOD] >> TDqBlockHashJoinBasicTest::TestEmptyStreams >> QueueCountersTest::CountersAggregationCloudTest [GOOD] >> RedrivePolicy::RedrivePolicyValidationTest [GOOD] >> RedrivePolicy::RedrivePolicyToJsonTest [GOOD] >> RedrivePolicy::RedrivePolicyArnValidationTest [GOOD] >> SecureProtobufPrinterTest::MessageBody [GOOD] >> SecureProtobufPrinterTest::Tokens [GOOD] >> StringValidationTest::IsAlphaNumAndPunctuationTest [GOOD] >> UserCountersTest::DisableCountersTest >> EncryptedFileSerializerTest::SplitOnBlocks [GOOD] >> Mvp::OpenIdConnectRequestWithIamTokenYandex [GOOD] >> Mvp::OpenIdConnectRequestWithIamTokenNebius [GOOD] >> EncryptedFileSerializerTest::EmptyFile [GOOD] >> Mvp::OpenIdConnectNonAuthorizeRequestWithOptionMethodYandex [GOOD] >> EncryptedFileSerializerTest::ReadPartial [GOOD] >> Mvp::OpenIdConnectNonAuthorizeRequestWithOptionMethodNebius >> EncryptedFileSerializerTest::DeleteLastByte [GOOD] >> EncryptedFileSerializerTest::AddByte [GOOD] >> EncryptedFileSerializerTest::RemoveLastBlock >> Mvp::OpenIdConnectNonAuthorizeRequestWithOptionMethodNebius [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckValidCookieYandex >> TDqBlockHashJoinBasicTest::TestEmptyStreams [GOOD] >> UserCountersTest::DisableCountersTest [GOOD] >> UserCountersTest::RemoveUserCountersTest [GOOD] >> UserCountersTest::CountersAggregationTest [GOOD] >> LongTxServicePublicTypes::LongTxId [GOOD] >> EncryptedFileSerializerTest::RemoveLastBlock [GOOD] >> EncryptedFileSerializerTest::ChangeAnyByte >> LongTxServicePublicTypes::SnapshotMaxTxId [GOOD] >> LongTxServicePublicTypes::Snapshot [GOOD] >> LongTxServicePublicTypes::SnapshotReadOnly [GOOD] >> TYardTest::TestDestructionWhileReadingChunk [GOOD] >> TYardTest::TestDestructionWhileReadingLog >> Mvp::OpenIdConnectSessionServiceCheckValidCookieYandex [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckValidCookieNebius [GOOD] >> Mvp::OpenIdConnectProxyOnHttpsHost >> EncryptedFileSerializerTest::ChangeAnyByte [GOOD] >> EncryptedFileSerializerTest::BigHeaderSize [GOOD] >> EncryptedFileSerializerTest::BigBlockSize [GOOD] >> EncryptedFileSerializerTest::RestoreFromState [GOOD] >> EncryptedFileSerializerTest::IVSerialization [GOOD] >> PathsNormalizationTest::NormalizeItemPath [GOOD] >> PathsNormalizationTest::NormalizeItemPrefix [GOOD] >> PathsNormalizationTest::NormalizeExportPrefix [GOOD] >> Mirror3of4::ReplicationSmall >> Mvp::OpenIdConnectProxyOnHttpsHost [GOOD] >> Mvp::OpenIdConnectFixLocationHeader >> TestFederatedQueryHelpers::TestCheckNestingDepth [GOOD] >> TestFederatedQueryHelpers::TestTruncateIssues [GOOD] >> TestFederatedQueryHelpers::TestValidateResultSetColumns [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction [GOOD] Test command err: 2025-06-24T20:27:07.269106Z :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:560: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVPut: TabletID cannot be empty; id# [0:1:10:0:0:10:1] Marker# BSVS43 2025-06-24T20:27:10.929382Z :BS_VDISK_OTHER ERROR: vdisk_context.h:143: PDiskId# 1 VDISK[0:_:0:0:0]: (0) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'PDiskId# 1 TEvLog error because PDisk State# Error, there is a terminal internal error in PDisk. Did you check EvYardInit result? Marker# BSY07 StateErrorReason# PDisk is in StateError, reason# Received TEvYardControl::Brake' 2025-06-24T20:27:10.929511Z :BS_SKELETON ERROR: blobstorage_skeletonfront.cpp:1751: PDiskId# 1 VDISK[0:_:0:0:0]: (0) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# PDiskId# 1 TEvLog error because PDisk State# Error, there is a terminal internal error in PDisk. Did you check EvYardInit result? Marker# BSY07 StateErrorReason# PDisk is in StateError, reason# Received TEvYardControl::Brake Marker# BSVSF03 >> Mvp::OpenIdConnectFixLocationHeader [GOOD] >> Mvp::OpenIdConnectExchangeNebius |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/comp_nodes/ut/unittest >> TDqBlockHashJoinBasicTest::TestEmptyStreams [GOOD] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/long_tx_service/public/ut/unittest >> LongTxServicePublicTypes::SnapshotReadOnly [GOOD] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/test_connection/ut/unittest >> Mvp::OpenIdConnectExchangeNebius [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckAuthorizationFail >> TYardTest::TestDestructionWhileReadingLog [GOOD] >> TYardTest::TestFormatInfo >> Init::TWithDefaultParser [GOOD] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/backup/common/ut/unittest >> PathsNormalizationTest::NormalizeExportPrefix [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckAuthorizationFail [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlow >> StaticNodeSelectorsInit::TestStaticNodeSelectorForActorSystem >> StaticNodeSelectorsInit::TestStaticNodeSelectorForActorSystem [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorWithAnotherLabel >> StaticNodeSelectorsInit::TestStaticNodeSelectorWithAnotherLabel [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorInheritance [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeId >> TYardTest::TestFormatInfo [GOOD] >> TYardTest::TestEnormousDisk |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/federated_query/ut/unittest >> TestFederatedQueryHelpers::TestValidateResultSetColumns [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlow [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlowAjax >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeId [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeHost [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeKind |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/base/ut/unittest >> UserCountersTest::CountersAggregationTest [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeKind [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlowAjax [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlow >> ClosedIntervalSet::Union [GOOD] >> ClosedIntervalSet::Difference >> Mvp::OpenIdConnectWrongStateAuthorizationFlow [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlowAjax >> Mvp::OpenIdConnectWrongStateAuthorizationFlowAjax [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAuthorizationFail >> Mvp::OpenIdConnectSessionServiceCreateAuthorizationFail [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalid >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalid [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalidAjax |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/init/ut/unittest >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeKind [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalidAjax [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateOpenIdScopeMissed >> Mvp::OpenIdConnectSessionServiceCreateOpenIdScopeMissed [GOOD] >> Mvp::OpenIdConnectAllowedHostsList >> Mvp::OpenIdConnectAllowedHostsList [GOOD] >> Mvp::OpenIdConnectHandleNullResponseFromProtectedResource >> Mvp::OpenIdConnectHandleNullResponseFromProtectedResource [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateNotFoundCookie >> Mvp::OpenIdConnectSessionServiceCreateNotFoundCookie [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateGetWrongStateAndWrongCookie >> Mvp::OpenIdConnectSessionServiceCreateGetWrongStateAndWrongCookie [GOOD] >> Mvp::OidcImpersonationStartFlow >> Mvp::OidcImpersonationStartFlow [GOOD] >> Mvp::OidcImpersonationStartNeedServiceAccountId >> TBsLocalRecovery::WriteRestartReadHugeDecreased [GOOD] >> TBsOther1::PoisonPill >> Mvp::OidcImpersonationStartNeedServiceAccountId [GOOD] >> Mvp::OidcImpersonationStopFlow >> test.py::test[solomon-BadDownsamplingAggregation-] >> TPDiskTest::PlainChunksWriteReadALot [GOOD] >> Mvp::OidcImpersonationStopFlow [GOOD] >> Mvp::OidcImpersonatedAccessToProtectedResource >> TSubgroupPartLayoutTest::CountEffectiveReplicas3of4 [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas4of4 >> Mvp::OidcImpersonatedAccessToProtectedResource [GOOD] >> Mvp::OidcImpersonatedAccessNotAuthorized [GOOD] >> Mvp::OpenIdConnectStreamingRequestResponseYandex [GOOD] >> Mvp::OpenIdConnectStreamingRequestResponseNebius >> Mvp::OpenIdConnectStreamingRequestResponseNebius [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/mvp/oidc_proxy/ut/unittest >> Mvp::OpenIdConnectStreamingRequestResponseNebius [GOOD] Test command err: 2025-06-24T20:27:46.648827Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:46.649239Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2025-06-24T20:27:46.672926Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:46.673283Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2025-06-24T20:27:46.690141Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:46.690400Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 204 2025-06-24T20:27:46.728434Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:46.728759Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 204 2025-06-24T20:27:46.804207Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:46.804411Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 204 2025-06-24T20:27:46.903919Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:46.904272Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 204 2025-06-24T20:27:47.194125Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-24T20:27:47.194220Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:47.194469Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 400 2025-06-24T20:27:47.194508Z :MVP DEBUG: oidc_protected_page.cpp:262: Try to send request to HTTPS port 2025-06-24T20:27:47.194565Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:47.194727Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2025-06-24T20:27:47.219240Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-24T20:27:47.219327Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:47.219593Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 400 2025-06-24T20:27:47.443218Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-24T20:27:47.443314Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:47.443580Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 307 2025-06-24T20:27:47.452626Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-24T20:27:47.452700Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:47.452932Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 302 2025-06-24T20:27:47.475294Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-24T20:27:47.475374Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:47.475609Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 302 2025-06-24T20:27:47.488080Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-24T20:27:47.488166Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:47.488401Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 302 2025-06-24T20:27:47.504121Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-24T20:27:47.504211Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:47.504462Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 302 2025-06-24T20:27:47.639884Z :MVP DEBUG: oidc_protected_page_nebius.cpp:21: Start OIDC process 2025-06-24T20:27:47.640462Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_79632E6F617574682E7964622D766965776572: c2Vz****aWU= (CE0CB168)) 2025-06-24T20:27:47.640535Z :MVP DEBUG: oidc_protected_page_nebius.cpp:93: Exchange session token 2025-06-24T20:27:47.640958Z :MVP DEBUG: oidc_protected_page_nebius.cpp:50: Getting access token: 200 OK 2025-06-24T20:27:47.641037Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:47.641408Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2025-06-24T20:27:47.967241Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 401 2025-06-24T20:27:48.180009Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-06-24T20:27:48.180769Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-24T20:27:48.181299Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-24T20:27:48.191266Z :MVP DEBUG: oidc_session_create_yandex.cpp:69: SessionService.Create(): OK 2025-06-24T20:27:48.207430Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-24T20:27:48.207518Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:48.207805Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2025-06-24T20:27:48.421937Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-06-24T20:27:48.422784Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-24T20:27:48.423241Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-24T20:27:48.434097Z :MVP DEBUG: oidc_session_create_yandex.cpp:69: SessionService.Create(): OK 2025-06-24T20:27:48.456198Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-24T20:27:48.456279Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:48.456515Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2025-06-24T20:27:48.530403Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-24T20:27:48.530674Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2025-06-24T20:27:48.742131Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-24T20:27:48.742354Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2025-06-24T20:27:48.901795Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-24T20:27:48.924201Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-24T20:27:49.067256Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 401 2025-06-24T20:27:49.287782Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-24T20:27:49.288639Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-24T20:27:49.307721Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 400 2025-06-24T20:27:49.418246Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-24T20:27:49.418709Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-24T20:27:49.578041Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 400 2025-06-24T20:27:49.687525Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-24T20:27:49.688189Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-24T20:27:49.852681Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 412 2025-06-24T20:27:50.158056Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-06-24T20:27:50.169206Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-06-24T20:27:50.195911Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-06-24T20:27:50.253558Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:50.253931Z :MVP DEBUG: oidc_protected_page.cpp:45: Can not process request to protected resource: GET /counters HTTP/1.1 Host: ydb.viewer.page Accept: */* Accept-Encoding: deflate Authorization: 2025-06-24T20:27:50.319966Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-24T20:27:50.320172Z :MVP DEBUG: oidc_session_create.cpp:43: Restore oidc context failed: Cannot find cookie ydb_oidc_cookie 2025-06-24T20:27:50.462860Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-24T20:27:50.463044Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2025-06-24T20:27:50.652439Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:23: Start impersonation process 2025-06-24T20:27:50.652549Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-06-24T20:27:50.652597Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:49: Request impersonated token 2025-06-24T20:27:50.652959Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:100: Incoming response from authorization server: 200 2025-06-24T20:27:50.653078Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:89: Set impersonated cookie: (__Host_impersonated_cookie_636C69656E745F6964: aW1w****bg== (B126DD61)) 2025-06-24T20:27:50.921507Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:23: Start impersonation process 2025-06-24T20:27:50.921620Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-06-24T20:27:51.157080Z :MVP DEBUG: oidc_cleanup_page.cpp:20: Clear cookie: (__Host_impersonated_cookie_636C69656E745F6964) 2025-06-24T20:27:51.351901Z :MVP DEBUG: oidc_protected_page_nebius.cpp:21: Start OIDC process 2025-06-24T20:27:51.352013Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-06-24T20:27:51.352074Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_impersonated_cookie_636C69656E745F6964: aW1w****ZQ== (1A20D8C0)) 2025-06-24T20:27:51.352117Z :MVP DEBUG: oidc_protected_page_nebius.cpp:104: Exchange impersonated token 2025-06-24T20:27:51.352432Z :MVP DEBUG: oidc_protected_page_nebius.cpp:50: Getting access token: 200 OK 2025-06-24T20:27:51.358425Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:51.363662Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2025-06-24T20:27:51.414935Z :MVP DEBUG: oidc_protected_page_nebius.cpp:21: Start OIDC process 2025-06-24T20:27:51.415044Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-06-24T20:27:51.415128Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_impersonated_cookie_636C69656E745F6964: aW1w****ZQ== (1A20D8C0)) 2025-06-24T20:27:51.415252Z :MVP DEBUG: oidc_protected_page_nebius.cpp:104: Exchange impersonated token 2025-06-24T20:27:51.415825Z :MVP DEBUG: oidc_protected_page_nebius.cpp:50: Getting access token: 401 OK 2025-06-24T20:27:51.415877Z :MVP DEBUG: oidc_protected_page_nebius.cpp:62: Getting access token: {"error": "bad_token"} 2025-06-24T20:27:51.415926Z :MVP DEBUG: oidc_protected_page_nebius.cpp:118: Clear impersonated cookie (__Host_impersonated_cookie_636C69656E745F6964) and retry 2025-06-24T20:27:51.444770Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:51.445208Z :MVP DEBUG: oidc_protected_page.cpp:53: Incoming incomplete response for protected resource: 200 2025-06-24T20:27:51.445354Z :MVP DEBUG: oidc_protected_page.cpp:73: Incoming data chunk for protected resource: 59 bytes 2025-06-24T20:27:51.445510Z :MVP DEBUG: oidc_protected_page.cpp:73: Incoming data chunk for protected resource: 59 bytes 2025-06-24T20:27:51.445586Z :MVP DEBUG: oidc_protected_page.cpp:73: Incoming data chunk for protected resource: 14 bytes 2025-06-24T20:27:51.445659Z :MVP DEBUG: oidc_protected_page.cpp:73: Incoming data chunk for protected resource: 0 bytes 2025-06-24T20:27:51.485332Z :MVP DEBUG: oidc_protected_page.cpp:130: Forward user request bypass OIDC 2025-06-24T20:27:51.485760Z :MVP DEBUG: oidc_protected_page.cpp:53: Incoming incomplete response for protected resource: 200 2025-06-24T20:27:51.485878Z :MVP DEBUG: oidc_protected_page.cpp:73: Incoming data chunk for protected resource: 59 bytes 2025-06-24T20:27:51.486028Z :MVP DEBUG: oidc_protected_page.cpp:73: Incoming data chunk for protected resource: 59 bytes 2025-06-24T20:27:51.486113Z :MVP DEBUG: oidc_protected_page.cpp:73: Incoming data chunk for protected resource: 14 bytes 2025-06-24T20:27:51.486186Z :MVP DEBUG: oidc_protected_page.cpp:73: Incoming data chunk for protected resource: 0 bytes |87.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |87.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TPDiskTest::PlainChunksWriteReadALot [GOOD] Test command err: ... Awaiting EvPDiskStateUpdate SlotSizeInUnits# 0 NumActiveSlots# 0 (TWithBackTrace) Event queue is still empty.ydb/library/actors/testlib/test_runtime.cpp:1375: TBackTrace::Capture()+28 (0x2B5DF9C) TWithBackTrace::TWithBackTrace<>()+65 (0x6F8A381) NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant)+29118 (0x6F8637E) NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration)+1076 (0x6F8C674) NKikimr::NNodeWhiteboard::TEvWhiteboard::TEvPDiskStateUpdate* NActors::TTestActorRuntimeBase::GrabEdgeEventIf(TAutoPtr&, std::__y1::function, TDuration)+643 (0x23A1963) THolder NActors::TTestActorRuntimeBase::GrabEdgeEvent(TDuration)+332 (0x23A11AC) NKikimr::NTestSuiteTPDiskTest::AwaitAndCheckEvPDiskStateUpdate(NKikimr::TActorTestContext&, unsigned int, unsigned int)+1329 (0x23066C1) NKikimr::NTestSuiteTPDiskTest::TTestCasePDiskSlotSizeInUnits::Execute_(NUnitTest::TTestContext&)+1159 (0x2309B07) std::__y1::__function::__func, void ()>::operator()()+280 (0x235F698) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x2C3E546) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x2C0D969) NKikimr::NTestSuiteTPDiskTest::TCurrentTest::Execute()+1204 (0x235E864) NUnitTest::TTestFactory::Execute()+2438 (0x2C0F236) NUnitTest::RunMain(int, char**)+5213 (0x2C38A2D) ??+0 (0x7FA6266E5D90) __libc_start_main+128 (0x7FA6266E5E40) _start+41 (0x222A029) seed# 1750796855434266 total_speed# 0.2147483648 GB/s |87.2%| [TA] {RESULT} $(B)/ydb/core/util/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.2%| [TS] {RESULT} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest |87.2%| [TS] {RESULT} ydb/core/config/validation/ut/unittest |87.2%| [TS] {RESULT} ydb/library/yaml_config/ut/unittest |87.3%| [TM] {RESULT} ydb/core/tablet_flat/ut_large/unittest |87.3%| [TS] {RESULT} ydb/core/ymq/base/ut/unittest |87.3%| [TS] {RESULT} ydb/core/kqp/federated_query/ut/unittest |87.3%| [TS] {RESULT} ydb/core/fq/libs/test_connection/ut/unittest |87.3%| [TS] {RESULT} ydb/core/backup/common/ut/unittest |87.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TM] {RESULT} ydb/library/yql/dq/comp_nodes/ut/unittest |87.3%| [TS] {RESULT} ydb/core/tx/long_tx_service/public/ut/unittest |87.3%| [TM] {RESULT} ydb/core/tablet_flat/ut_util/unittest |87.3%| [TS] {RESULT} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest |87.3%| [TA] {RESULT} $(B)/ydb/core/base/ut_auth/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TM] {RESULT} ydb/core/blobstorage/ut_group/unittest |87.3%| [TA] {RESULT} $(B)/ydb/core/sys_view/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TS] {RESULT} ydb/mvp/meta/ut/unittest |87.3%| [TS] {RESULT} ydb/core/log_backend/ut/unittest |87.3%| [TS] {RESULT} ydb/library/yql/providers/s3/common/ut/unittest |87.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TS] {RESULT} ydb/library/yql/providers/s3/provider/ut/unittest |87.3%| [TS] {RESULT} ydb/core/external_sources/object_storage/inference/ut/gtest |87.3%| [TS] {RESULT} ydb/core/pgproxy/ut/unittest |87.3%| [TS] {RESULT} ydb/library/yql/providers/s3/actors/ut/unittest |87.3%| [TS] {RESULT} ydb/mvp/core/ut/unittest |87.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/query/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TS] {RESULT} ydb/core/blobstorage/vdisk/defrag/ut/unittest |87.3%| [TS] {RESULT} ydb/core/io_formats/arrow/scheme/ut/unittest |87.3%| [TS] {RESULT} ydb/core/tx/sequenceshard/public/ut/unittest |87.3%| [TS] {RESULT} ydb/core/blobstorage/base/ut/gtest |87.3%| [TS] {RESULT} ydb/core/tx/scheme_board/ut_double_indexed/unittest |87.3%| [TS] {RESULT} ydb/core/metering/ut/unittest |87.3%| [TM] {RESULT} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest |87.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TS] {RESULT} ydb/core/external_sources/ut/unittest |87.3%| [TS] {RESULT} ydb/core/config/validation/column_shard_config_validator_ut/unittest |87.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |87.3%| [TS] {RESULT} ydb/mvp/oidc_proxy/ut/unittest |87.3%| [TS] {RESULT} ydb/core/config/validation/auth_config_validator_ut/unittest |87.3%| [TS] {RESULT} ydb/core/base/generated/ut/unittest |87.3%| [TS] {RESULT} ydb/core/config/init/ut/unittest >> TStateStorageConfig::UniformityTest [GOOD] |87.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |87.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |87.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a >> BlobDepotWithTestShard::PlainGroup [GOOD] |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TStateStorageConfig::UniformityTest [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump_ds_init] |87.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |87.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |87.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a >> TBsOther1::PoisonPill [GOOD] >> TBsOther1::ChaoticParallelWrite |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_testshard/unittest >> BlobDepotWithTestShard::PlainGroup [GOOD] |87.4%| [TA] $(B)/ydb/core/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |87.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |87.4%| [TM] {RESULT} ydb/core/blobstorage/ut_testshard/unittest |87.4%| [TA] {RESULT} $(B)/ydb/core/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |87.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |87.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |87.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |87.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |87.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |87.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |87.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |87.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |87.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |87.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a >> BlobDepot::BasicPutAndGet |87.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage >> TYardTest::TestSysLogReordering [GOOD] >> TYardTest::TestStartingPoints |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |87.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage >> test.py::test[solomon-BadDownsamplingAggregation-] [GOOD] |87.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage >> test.py::test[solomon-BadDownsamplingDisabled-] >> BSCStopPDisk::PDiskStop >> TPDiskRaces::Decommit [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> TPDiskRaces::DecommitWithInflight |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |87.4%| [AR] {RESULT} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |87.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |87.4%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |87.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |87.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump_ds_init] [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> CheckIntegrityMirror3dc::PlacementOk >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump] >> TYardTest::TestStartingPoints [GOOD] >> TYardTest::TestWhiteboard >> BSCStopPDisk::PDiskStop [GOOD] >> Dictionary::Simple [GOOD] >> Dictionary::ComparePayloadAndFull >> CheckIntegrityMirror3dc::PlacementOkWithErrors >> CheckIntegrityBlock42::PlacementOkWithErrors >> CheckIntegrityBlock42::DataOk >> CheckIntegrityMirror3dc::PlacementBlobIsLost >> TBsVDiskRepl3::ReplPerf [GOOD] >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 >> CheckIntegrityBlock42::DataErrorAdditionalUnequalParts >> BlobDepot::BasicPutAndGet [GOOD] >> BlobDepot::TestBlockedEvGetRequest >> CheckIntegrityBlock42::PlacementOk >> CheckIntegrityBlock42::PlacementWrongDisks >> CheckIntegrityMirror3of4::PlacementOk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> BSCStopPDisk::PDiskStop [GOOD] Test command err: RandomSeed# 17373889083741053673 >> TYardTest::TestWhiteboard [GOOD] >> TYardTest::TestMultiYardLogLatency >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 [GOOD] >> CheckIntegrityBlock42::PlacementBlobIsLost |87.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |87.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |87.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication >> TStateStorageRingGroupState::TestProxyConfigMismatch |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> CheckIntegrityBlock42::DataOk [GOOD] >> CheckIntegrityBlock42::DataOkAdditionalEqualParts |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> CheckIntegrityMirror3dc::PlacementOk [GOOD] >> CheckIntegrityMirror3dc::PlacementOkHandoff ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRepl3::ReplPerf [GOOD] Test command err: 2025-06-24T20:27:24.466407Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:27:24.587323Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 1672061976270585840] 2025-06-24T20:27:25.711611Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-24T20:27:38.151828Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:3:0]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:27:38.231260Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:3:0]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 11180000377908980758] 2025-06-24T20:27:38.287796Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:3:0]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-24T20:27:55.507827Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:27:55.635452Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 3053654359292016325] 2025-06-24T20:27:56.186713Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> SelfHealActorTest::SingleErrorDisk |87.4%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> BsControllerTest::SelfHealBlock4Plus2 >> BsControllerTest::TestLocalBrokenRelocation >> TStateStorageRingGroupState::TestProxyConfigMismatch [GOOD] >> TStateStorageRingGroupState::TestProxyConfigMismatchNotSent >> SelfHealActorTest::SingleErrorDisk [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> TStateStorageRingGroupState::TestBoardConfigMismatch ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 [GOOD] Test command err: RandomSeed# 11661436848280525362 2025-06-24T20:28:03.940932Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [1:214:30] SessionId# [2:131:1] Cookie# 2820632152673126976 2025-06-24T20:28:03.941001Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [2:131:1] Inserted# false Subscription# {SessionId# [2:131:1] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-24T20:28:03.954688Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 1 Cookie# 2820632152673126976 SessionId# [2:131:1] Binding# {9.5/18320475540771259230@[2:126:8]} Record# {BoundNodes { NodeId { Host: "127.0.0.7" Port: 19001 NodeId: 7 } Meta { Fingerprint: "\3403\207\365\032>ClusterStateGeneration=0 msgGeneration=0 Info->ClusterStateGuid=2 msgGuid=0 2025-06-24T20:28:04.214826Z 1 00h00m23.635116s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:04.214862Z 1 00h00m23.635116s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:04.214904Z 1 00h00m23.635116s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 0 2025-06-24T20:28:04.214942Z 1 00h00m23.635116s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 2} 2025-06-24T20:28:04.214966Z 1 00h00m23.635116s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=0 Info->ClusterStateGuid=0 clusterStateGuid=2 2025-06-24T20:28:04.215022Z 1 00h00m23.635116s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 2 2025-06-24T20:28:04.235499Z 1 00h00m30.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:04.235588Z 1 00h00m30.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-24T20:28:04.235632Z 1 00h00m30.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:28:04.235659Z 1 00h00m30.200000s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 msgGeneration=0 Info->ClusterStateGuid=2 msgGuid=0 2025-06-24T20:28:04.235712Z 1 00h00m30.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-24T20:28:04.235760Z 1 00h00m30.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-24T20:28:04.235821Z 1 00h00m30.200000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 0 2025-06-24T20:28:04.235875Z 1 00h00m30.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 2 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-24T20:28:04.235902Z 1 00h00m30.200000s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=0 Info->ClusterStateGuid=0 clusterStateGuid=2 2025-06-24T20:28:04.235978Z 1 00h00m30.200000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 2 2025-06-24T20:28:04.251825Z 1 00h00m40.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:04.251916Z 1 00h00m40.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-24T20:28:04.251977Z 1 00h00m40.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:28:04.252007Z 1 00h00m40.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-24T20:28:04.252062Z 1 00h00m40.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-24T20:28:04.252120Z 1 00h00m40.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-24T20:28:04.264226Z 1 00h00m50.252524s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:04.264327Z 1 00h00m50.252524s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:04.264404Z 1 00h00m50.252524s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:04.264444Z 1 00h00m50.252524s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:04.264478Z 1 00h00m50.252524s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:04.264534Z 1 00h00m50.252524s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 1 ClusterStateGuid: 0} 2025-06-24T20:28:04.264557Z 1 00h00m50.252524s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=0 2025-06-24T20:28:04.264621Z 1 00h00m50.252524s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 0 2025-06-24T20:28:04.264839Z 1 00h00m50.400000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:04.264896Z 1 00h00m50.400000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-24T20:28:04.264933Z 1 00h00m50.400000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:28:04.264986Z 1 00h00m50.400000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-24T20:28:04.265037Z 1 00h00m50.400000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-24T20:28:04.265076Z 1 00h00m50.400000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 1 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-24T20:28:04.265098Z 1 00h00m50.400000s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=0 2025-06-24T20:28:04.265155Z 1 00h00m50.400000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 0 |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest |87.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor >> BlobDepot::TestBlockedEvGetRequest [GOOD] >> BlobDepot::BasicRange |87.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |87.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |87.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor >> CheckIntegrityBlock42::PlacementWrongDisks [GOOD] >> TStateStorageRingGroupState::TestProxyConfigMismatchNotSent [GOOD] >> CheckIntegrityMirror3dc::DataErrorOneCopy >> TStateStorageRingGroupState::TestBoardConfigMismatch [GOOD] >> CheckIntegrityBlock42::DataErrorAdditionalUnequalParts [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsOneBroken |87.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |87.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |87.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> CheckIntegrityMirror3of4::PlacementOk [GOOD] >> CheckIntegrityMirror3of4::PlacementMissingParts >> CheckIntegrityBlock42::PlacementOkWithErrors [GOOD] >> CheckIntegrityBlock42::PlacementWithErrorsOnBlobDisks >> CheckIntegrityBlock42::PlacementOk [GOOD] >> CheckIntegrityBlock42::PlacementOkHandoff |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> SelfHealActorTest::SingleErrorDisk [GOOD] >> Dictionary::ComparePayloadAndFull [GOOD] >> Hash::ScalarBinaryHash [GOOD] >> Hash::ScalarCTypeHash [GOOD] >> Hash::ScalarCompositeHash [GOOD] >> ProgramStep::Round0 [GOOD] >> ProgramStep::Round1 [GOOD] >> ProgramStep::Filter >> CheckIntegrityMirror3dc::PlacementOkWithErrors [GOOD] >> CheckIntegrityMirror3dc::PlacementOkWithErrorsOnBlobDisks >> CheckIntegrityMirror3dc::PlacementBlobIsLost [GOOD] >> CheckIntegrityMirror3dc::PlacementDisintegrated >> CheckIntegrityBlock42::PlacementBlobIsLost [GOOD] >> CheckIntegrityBlock42::PlacementAllOnHandoff ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestProxyConfigMismatch [GOOD] Test command err: RandomSeed# 17176958395194957203 2025-06-24T20:28:05.082263Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [2:221:20] SessionId# [6:118:2] Cookie# 12518999677662716532 2025-06-24T20:28:05.082345Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [6:118:2] Inserted# false Subscription# {SessionId# [6:118:2] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-24T20:28:05.098678Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 2 Cookie# 12518999677662716532 SessionId# [6:118:2] Binding# {2.2/12518999677662716532@[6:118:2]} Record# {RootNodeId: 3 } 2025-06-24T20:28:05.098801Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {2.3/12518999677662716532@[6:118:2]} PrevRootNodeId# 2 ConfigUpdate# false 2025-06-24T20:28:05.098894Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [5:242:20] SessionId# [3:91:4] Cookie# 5552613666136286376 2025-06-24T20:28:05.098953Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 5 SessionId# [3:91:4] Inserted# false Subscription# {SessionId# [3:91:4] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-24T20:28:05.099257Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 5 Cookie# 5552613666136286376 SessionId# [3:91:4] Binding# {6.0/13936628741451892838@[0:0:0]} Record# {BoundNodes { NodeId { Host: "127.0.0.6" Port: 19001 NodeId: 6 } Meta { Fingerprint: "\3403\207\365\032>ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=2 2025-06-24T20:28:05.427911Z 1 00h00m19.319493s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 2 2025-06-24T20:28:05.429601Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:05.429697Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-24T20:28:05.429746Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:28:05.429778Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-24T20:28:05.429854Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-24T20:28:05.429915Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 1 ClusterStateGuid: 2 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-24T20:28:05.429945Z 1 00h00m20.100000s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=2 2025-06-24T20:28:05.430031Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 2 2025-06-24T20:28:05.430183Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-24T20:28:05.430227Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=1 msgGeneration=3 Info->ClusterStateGuid=2 msgGuid=4 2025-06-24T20:28:05.430285Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 3 ClusterStateGuid: 4 2025-06-24T20:28:05.430377Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaUpdate TabletID: 72057594037932033} 2025-06-24T20:28:05.430414Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=1 msgGeneration=3 Info->ClusterStateGuid=2 msgGuid=4 2025-06-24T20:28:05.430516Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 3 ClusterStateGuid: 4 2025-06-24T20:28:05.452188Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: NKikimrStateStorage.TEvCleanup TabletID: 72057594037932033 ProposedLeader { RawX1: 0 RawX2: 0 } ClusterStateGeneration: 3 ClusterStateGuid: 4 2025-06-24T20:28:05.452273Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=1 msgGeneration=3 Info->ClusterStateGuid=2 msgGuid=4 2025-06-24T20:28:05.452356Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 3 ClusterStateGuid: 4 >> ProgramStep::Filter [GOOD] >> ProgramStep::Add [GOOD] >> ProgramStep::Substract [GOOD] >> ProgramStep::Multiply [GOOD] >> ProgramStep::Divide >> BsControllerTest::SelfHealMirror3dc >> TYardTest::TestMultiYardLogLatency [GOOD] >> TYardTest::TestMultiYardStartingPoints >> ProgramStep::Divide [GOOD] >> ProgramStep::Gcd [GOOD] >> ProgramStep::Lcm [GOOD] >> ProgramStep::Mod [GOOD] >> ProgramStep::ModOrZero [GOOD] >> ProgramStep::Abs [GOOD] >> ProgramStep::Negate [GOOD] >> ProgramStep::Compares >> ProgramStep::Compares [GOOD] >> ProgramStep::Logic0 [GOOD] >> ProgramStep::Logic1 >> CheckIntegrityBlock42::DataOkAdditionalEqualParts [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsTwoBroken |87.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |87.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |87.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk >> ProgramStep::Logic1 [GOOD] >> ProgramStep::StartsWith [GOOD] >> ProgramStep::EndsWith [GOOD] >> ProgramStep::MatchSubstring [GOOD] >> ProgramStep::StartsWithIgnoreCase [GOOD] >> ProgramStep::EndsWithIgnoreCase >> ProgramStep::EndsWithIgnoreCase [GOOD] >> ProgramStep::MatchSubstringIgnoreCase [GOOD] >> ProgramStep::ScalarTest [GOOD] >> ProgramStep::TestValueFromNull >> CheckIntegrityMirror3dc::PlacementOkHandoff [GOOD] >> CheckIntegrityMirror3dc::PlacementMissingParts >> ProgramStep::TestValueFromNull [GOOD] >> ProgramStep::MergeFilterSimple >> TBsLocalRecovery::StartStopNotEmptyDB [GOOD] >> TBsLocalRecovery::WriteRestartRead >> ProgramStep::MergeFilterSimple [GOOD] >> ProgramStep::Projection [GOOD] >> ProgramStep::MinMax [GOOD] >> ProgramStep::Sum [GOOD] >> ProgramStep::SumGroupBy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestBoardConfigMismatch [GOOD] Test command err: RandomSeed# 6021088541599882106 2025-06-24T20:28:06.039690Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [2:221:20] SessionId# [8:124:2] Cookie# 7944479148857429332 2025-06-24T20:28:06.039773Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [8:124:2] Inserted# false Subscription# {SessionId# [8:124:2] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-24T20:28:06.045813Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 2 Cookie# 7944479148857429332 SessionId# [8:124:2] Binding# Record# {DeletedBoundNodeIds { Host: "127.0.0.6" Port: 19001 NodeId: 6 } DeletedBoundNodeIds { Host: "127.0.0.4" Port: 19001 NodeId: 4 } } 2025-06-24T20:28:06.045924Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [8:124:2] Inserted# false Subscription# {SessionId# [8:124:2] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-24T20:28:06.045973Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 2 NodeId# 127.0.0.6:19001/6 2025-06-24T20:28:06.046016Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 2 NodeId# 127.0.0.4:19001/4 2025-06-24T20:28:06.058850Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:28:06.058953Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:28:06.058986Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:28:06.062334Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639257 Sender# [1:304:36] SessionId# [0:0:0] Cookie# 0 2025-06-24T20:28:06.062563Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.062627Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [1:214:30] SessionId# [7:146:1] Cookie# 14613221473284509051 2025-06-24T20:28:06.062678Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-24T20:28:06.062774Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 1 Cookie# 14613221473284509051 SessionId# [7:146:1] Binding# {2.8/2556410023781839816@[7:121:2]} Record# {CacheUpdate { } } 2025-06-24T20:28:06.062826Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-24T20:28:06.062894Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.062930Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.062956Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.063069Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.063119Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.063161Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.063267Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [7:256:20] SessionId# [2:120:6] Cookie# 2556410023781839816 2025-06-24T20:28:06.063300Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [2:120:6] Inserted# false Subscription# {SessionId# [2:120:6] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-24T20:28:06.063356Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 7 Cookie# 2556410023781839816 SessionId# [2:120:6] Binding# {8.8/7944479148857429332@[2:123:7]} Record# {CacheUpdate { } } 2025-06-24T20:28:06.063394Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [2:120:6] Inserted# false Subscription# {SessionId# [2:120:6] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-24T20:28:06.063445Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [7:256:20] SessionId# [1:145:6] Cookie# 14613221473284509051 2025-06-24T20:28:06.063496Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [1:145:6] Inserted# false Subscription# {SessionId# [1:145:6] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-24T20:28:06.063568Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 7 Cookie# 14613221473284509051 SessionId# [1:145:6] Binding# {7.8/14613221473284509051@[1:145:6]} Record# {RootNodeId: 8 CacheUpdate { } } 2025-06-24T20:28:06.063623Z 3 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [7:256:20] SessionId# [3:97:6] Cookie# 1734054898340593477 2025-06-24T20:28:06.063651Z 3 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [3:97:6] Inserted# false Subscription# {SessionId# [3:97:6] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-24T20:28:06.063692Z 3 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 7 Cookie# 1734054898340593477 SessionId# [3:97:6] Binding# {7.8/1734054898340593477@[3:97:6]} Record# {RootNodeId: 8 CacheUpdate { } } 2025-06-24T20:28:06.063722Z 5 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [7:256:20] SessionId# [5:57:6] Cookie# 3900998518498012161 2025-06-24T20:28:06.063747Z 5 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [5:57:6] Inserted# false Subscription# {SessionId# [5:57:6] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-24T20:28:06.063802Z 5 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 7 Cookie# 3900998518498012161 SessionId# [5:57:6] Binding# {7.8/3900998518498012161@[5:57:6]} Record# {RootNodeId: 8 CacheUpdate { } } 2025-06-24T20:28:06.063946Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [2:221:20] SessionId# [8:124:2] Cookie# 7944479148857429332 2025-06-24T20:28:06.063973Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [8:124:2] Inserted# false Subscription# {SessionId# [8:124:2] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-24T20:28:06.064008Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 2 Cookie# 7944479148857429332 SessionId# [8:124:2] Binding# Record# {CacheUpdate { } } 2025-06-24T20:28:06.064037Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [8:124:2] Inserted# false Subscription# {SessionId# [8:124:2] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-24T20:28:06.064114Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [2:221:20] SessionId# [7:121:2] Cookie# 2556410023781839816 2025-06-24T20:28:06.064145Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [7:121:2] Inserted# false Subscription# {SessionId# [7:121:2] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-24T20:28:06.064194Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 2 Cookie# 2556410023781839816 SessionId# [7:121:2] Binding# {2.8/2556410023781839816@[7:121:2]} Record# {RootNodeId: 8 CacheUpdate { } } 2025-06-24T20:28:06.064261Z 9 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [5:242:20] SessionId# [9:64:5] Cookie# 4306583299964691626 2025-06-24T20:28:06.064291Z 9 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 5 SessionId# [9:64:5] Inserted# false Subscription# {SessionId# [9:64:5] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-24T20:28:06.064335Z 9 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 5 Cookie# 4306583299964691626 SessionId# [9:64:5] Binding# {5.8/4306583299964691626@[9:64:5]} Record# {RootNodeId: 8 CacheUpdate { } } 2025-06-24T20:28:06.064367Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.064496Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.064528Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.064555Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.064601Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.064638Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.064663Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.064708Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [8:263:20] SessionId# [2:123:7] Cookie# 7944479148857429332 2025-06-24T20:28:06.064749Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 8 SessionId# [2:123:7] Inserted# false Subscription# {SessionId# [2:123:7] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-24T20:28:06.064793Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 8 Cookie# 7944479148857429332 SessionId# [2:123:7] Binding# {8.8/7944479148857429332@[2:123:7]} Record# {RootNodeId: 8 CacheUpdate { } } 2025-06-24T20:28:06.064833Z 4 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [8:263:20] SessionId# [4:79:7] Cookie# 7807688575265139950 2025-06-24T20:28:06.064859Z 4 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 8 SessionId# [4:79:7] Inserted# false Subscription# {SessionId# [4:79:7] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-24T20:28:06.064946Z 4 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 8 Cookie# 7807688575265139950 SessionId# [4:79:7] Binding# {8.8/7807688575265139950@[4:79:7]} Record# {RootNodeId: 8 Ca ... tateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.074320Z 1 00h00m00.082032s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.074361Z 1 00h00m00.082032s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.074386Z 1 00h00m00.082032s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.074410Z 1 00h00m00.082032s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.074438Z 1 00h00m00.082032s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.074474Z 1 00h00m00.082032s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.074512Z 1 00h00m00.082032s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.074639Z 1 00h00m00.182670s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.074690Z 1 00h00m00.182670s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.074722Z 1 00h00m00.182670s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.074768Z 1 00h00m00.182670s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.074797Z 1 00h00m00.182670s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.074821Z 1 00h00m00.182670s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.074845Z 1 00h00m00.182670s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.075051Z 1 00h00m00.408099s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.075086Z 1 00h00m00.408099s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.075111Z 1 00h00m00.408099s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.075141Z 1 00h00m00.408099s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.075523Z 1 00h00m00.408099s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.075567Z 1 00h00m00.408099s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.075597Z 1 00h00m00.408099s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.075866Z 1 00h00m00.881499s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.075924Z 1 00h00m00.881499s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.075951Z 1 00h00m00.881499s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.075974Z 1 00h00m00.881499s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.076009Z 1 00h00m00.881499s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.076043Z 1 00h00m00.881499s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.076073Z 1 00h00m00.881499s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.077129Z 1 00h00m01.875639s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.077194Z 1 00h00m01.875639s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.077226Z 1 00h00m01.875639s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.077251Z 1 00h00m01.875639s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.077280Z 1 00h00m01.875639s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.077309Z 1 00h00m01.875639s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.077363Z 1 00h00m01.875639s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.078980Z 1 00h00m03.943450s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.079048Z 1 00h00m03.943450s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.079094Z 1 00h00m03.943450s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.079119Z 1 00h00m03.943450s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.079279Z 1 00h00m03.943450s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.079311Z 1 00h00m03.943450s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.079345Z 1 00h00m03.943450s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.082883Z 1 00h00m08.327209s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.082943Z 1 00h00m08.327209s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.082977Z 1 00h00m08.327209s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.083006Z 1 00h00m08.327209s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.083039Z 1 00h00m08.327209s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.083078Z 1 00h00m08.327209s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.083107Z 1 00h00m08.327209s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.086245Z 1 00h00m10.002048s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 0 2025-06-24T20:28:06.086339Z 1 00h00m10.002048s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 1 2025-06-24T20:28:06.093300Z 1 00h00m17.533102s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.093426Z 1 00h00m17.533102s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.093477Z 1 00h00m17.533102s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.093510Z 1 00h00m17.533102s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.093556Z 1 00h00m17.533102s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.093612Z 1 00h00m17.533102s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.093665Z 1 00h00m17.533102s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.097157Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 5 ClusterStateGuid: 6 2025-06-24T20:28:06.106425Z 1 00h00m30.182670s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 5 ClusterStateGuid: 6 2025-06-24T20:28:06.113197Z 1 00h00m38.338420s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:06.113316Z 1 00h00m38.338420s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:06.113374Z 1 00h00m38.338420s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:06.113405Z 1 00h00m38.338420s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:06.113463Z 1 00h00m38.338420s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.113520Z 1 00h00m38.338420s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:06.113561Z 1 00h00m38.338420s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestProxyConfigMismatchNotSent [GOOD] Test command err: RandomSeed# 15977618788613196105 2025-06-24T20:28:05.821031Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639240 Sender# [1:214:30] SessionId# [7:146:1] Cookie# 4450129049012009741 2025-06-24T20:28:05.821109Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-24T20:28:05.821162Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC16@distconf_binding.cpp:529} TEvNodeConfigUnbind NodeId# 1 Cookie# 4450129049012009741 SessionId# [7:146:1] Binding# {9.1/10766564161079319924@[7:31:8]} 2025-06-24T20:28:05.821220Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC06@distconf_binding.cpp:538} UnbindNode NodeId# 1 Reason# explicit unbind request 2025-06-24T20:28:05.821263Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.6:19001/6 2025-06-24T20:28:05.821335Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.7:19001/7 2025-06-24T20:28:05.821362Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.9:19001/9 2025-06-24T20:28:05.821389Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.4:19001/4 2025-06-24T20:28:05.821426Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.8:19001/8 2025-06-24T20:28:05.821454Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.1:19001/1 2025-06-24T20:28:05.821492Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.2:19001/2 2025-06-24T20:28:05.821518Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.5:19001/5 2025-06-24T20:28:05.821542Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.3:19001/3 2025-06-24T20:28:05.821608Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC55@distconf_binding.cpp:219} UnsubscribeInterconnect NodeId# 1 Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} 2025-06-24T20:28:05.821660Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [1:214:30] SessionId# [2:131:1] Cookie# 2624042491857690228 2025-06-24T20:28:05.821691Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [2:131:1] Inserted# false Subscription# {SessionId# [2:131:1] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-24T20:28:05.828967Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 1 Cookie# 2624042491857690228 SessionId# [2:131:1] Binding# {1.9/2624042491857690228@[2:131:1]} Record# {RootNodeId: 1 } 2025-06-24T20:28:05.829073Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {1.1/2624042491857690228@[2:131:1]} PrevRootNodeId# 9 ConfigUpdate# false 2025-06-24T20:28:05.829165Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [1:214:30] SessionId# [6:143:1] Cookie# 6832257350717492539 2025-06-24T20:28:05.829233Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [6:143:1] Inserted# false Subscription# {SessionId# [6:143:1] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-24T20:28:05.829321Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 1 Cookie# 6832257350717492539 SessionId# [6:143:1] Binding# {1.9/6832257350717492539@[6:143:1]} Record# {RootNodeId: 1 } 2025-06-24T20:28:05.829356Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {1.1/6832257350717492539@[6:143:1]} PrevRootNodeId# 9 ConfigUpdate# false 2025-06-24T20:28:05.829477Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639240 Sender# [9:270:20] SessionId# [2:126:8] Cookie# 18372537391714817499 2025-06-24T20:28:05.829506Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 9 SessionId# [2:126:8] Inserted# false Subscription# {SessionId# [2:126:8] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-24T20:28:05.829567Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC16@distconf_binding.cpp:529} TEvNodeConfigUnbind NodeId# 9 Cookie# 18372537391714817499 SessionId# [2:126:8] Binding# {1.1/2624042491857690228@[2:131:1]} 2025-06-24T20:28:05.829604Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC06@distconf_binding.cpp:538} UnbindNode NodeId# 9 Reason# explicit unbind request 2025-06-24T20:28:05.829640Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 9 NodeId# 127.0.0.6:19001/6 2025-06-24T20:28:05.829682Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 9 NodeId# 127.0.0.7:19001/7 2025-06-24T20:28:05.829718Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 9 NodeId# 127.0.0.9:19001/9 2025-06-24T20:28:05.829743Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 9 NodeId# 127.0.0.4:19001/4 2025-06-24T20:28:05.829776Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 9 NodeId# 127.0.0.1:19001/1 2025-06-24T20:28:05.829805Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 9 NodeId# 127.0.0.8:19001/8 2025-06-24T20:28:05.829832Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 9 NodeId# 127.0.0.5:19001/5 2025-06-24T20:28:05.829856Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 9 NodeId# 127.0.0.2:19001/2 2025-06-24T20:28:05.829881Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 9 NodeId# 127.0.0.3:19001/3 2025-06-24T20:28:05.829944Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC55@distconf_binding.cpp:219} UnsubscribeInterconnect NodeId# 9 Subscription# {SessionId# [2:126:8] SubscriptionCookie# 0} 2025-06-24T20:28:05.829991Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [9:270:20] SessionId# [7:31:8] Cookie# 10766564161079319924 2025-06-24T20:28:05.830021Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 9 SessionId# [7:31:8] Inserted# false Subscription# {SessionId# [7:31:8] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-24T20:28:05.830079Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 9 Cookie# 10766564161079319924 SessionId# [7:31:8] Binding# {9.1/10766564161079319924@[7:31:8]} Record# {RootNodeId: 9 } 2025-06-24T20:28:05.830134Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {9.9/10766564161079319924@[7:31:8]} PrevRootNodeId# 1 ConfigUpdate# false 2025-06-24T20:28:05.830180Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [9:270:20] SessionId# [8:18:8] Cookie# 168168214297003245 2025-06-24T20:28:05.830215Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 9 SessionId# [8:18:8] Inserted# false Subscription# {SessionId# [8:18:8] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-24T20:28:05.830264Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 9 Cookie# 168168214297003245 SessionId# [8:18:8] Binding# {9.1/168168214297003245@[8:18:8]} Record# {RootNodeId: 9 } 2025-06-24T20:28:05.830304Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {9.9/168168214297003245@[8:18:8]} PrevRootNodeId# 1 ConfigUpdate# false 2025-06-24T20:28:05.830729Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [7:256:20] SessionId# [9:32:7] Cookie# 10766564161079319924 2025-06-24T20:28:05.830762Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [9:32:7] Inserted# false Subscription# {SessionId# [9:32:7] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-24T20:28:05.830946Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 7 Cookie# 10766564161079319924 SessionId# [9:32:7] Binding# Record# {DeletedBoundNodeIds { Host: "127.0.0.6" Port: 19001 NodeId: 6 } DeletedBoundNodeIds { Host: "127.0.0.9" Port: 19001 NodeId: 9 } DeletedBoundNodeIds { Host: "127.0.0.4" Port: 19001 NodeId: 4 } DeletedBoundNodeIds { Host: "127.0.0.8" Port: 19001 NodeId: 8 } DeletedBoundNodeIds { Host: "127.0.0.1" Port: 19001 NodeId: 1 } DeletedBoundNodeIds { Host: "127.0.0.2" Port: 19001 NodeId: 2 } DeletedBoundNodeIds { Host: "127.0.0.5" Port: 19001 NodeId: 5 } DeletedBoundNodeIds { Host: "127.0.0.3" Port: 19001 NodeId: 3 } } 2025-06-24T20:28:05.831004Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [9:32:7] Inserted# false Subscription# {SessionId# [9:32:7] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-24T20:28:05.831045Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.6:19001/6 2025-06-24T20:28:05.831090Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.9:19001/9 2025-06-24T20:28:05.831126Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.4:19001/4 2025-06-24T20:28:05.831179Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.8:19001/8 2025-06-24T20:28:05.831204Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.1:19001/1 2025-06-24T20:28:05.831229Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.2:19001/2 2025-06-24T20:28:05.831252Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.5:19001/5 2025-06-24T20:28:05.831276Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.3:19001/3 2025-06-24T20:28:05.831353Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 5 SessionId# [0:0:0] Inserted# true Subscription# {SessionId# [0:0:0] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-24T20:28:05.831427Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC29@distconf_binding.cpp:80} Initiated bind NodeId# 5 Binding# {5.0/18372537391714817500@[0:0:0]} SessionId# [0:0:0] 2025-06-24T20:28:05.831474Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [2:221:20] SessionId# [9:127:2] Cookie# 18372537391714817499 2025-06-24T20:28:05.831524Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 2 Cookie# 18372537391714817499 SessionId# [9:127:2] Binding# {5.0/18372537391714817500@[0:0:0]} Record# {RootNodeId: 1 } 2025-06-24T20:28:05.831575Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [6:249:20] SessionId# [4:73:5] Cookie# 10488801031758692526 2025-06-24T20:28:05.831615Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNo ... p TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:05.986644Z 1 00h00m00.012048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:05.986708Z 1 00h00m00.012048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.986770Z 1 00h00m00.012048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.986801Z 1 00h00m00.012048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.986919Z 1 00h00m00.033848s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:05.986971Z 1 00h00m00.033848s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:05.987010Z 1 00h00m00.033848s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:05.987043Z 1 00h00m00.033848s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:05.987072Z 1 00h00m00.033848s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.987102Z 1 00h00m00.033848s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.987175Z 1 00h00m00.033848s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.987304Z 1 00h00m00.080064s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:05.987363Z 1 00h00m00.080064s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:05.987393Z 1 00h00m00.080064s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:05.987434Z 1 00h00m00.080064s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:05.987468Z 1 00h00m00.080064s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.987505Z 1 00h00m00.080064s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.987547Z 1 00h00m00.080064s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.987748Z 1 00h00m00.185436s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:05.987791Z 1 00h00m00.185436s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:05.987820Z 1 00h00m00.185436s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:05.987864Z 1 00h00m00.185436s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:05.987895Z 1 00h00m00.185436s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.987923Z 1 00h00m00.185436s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.987951Z 1 00h00m00.185436s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.988362Z 1 00h00m00.408824s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:05.988409Z 1 00h00m00.408824s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:05.988446Z 1 00h00m00.408824s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:05.988474Z 1 00h00m00.408824s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:05.988518Z 1 00h00m00.408824s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.988565Z 1 00h00m00.408824s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.988609Z 1 00h00m00.408824s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.988952Z 1 00h00m00.904745s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:05.988997Z 1 00h00m00.904745s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:05.989026Z 1 00h00m00.904745s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:05.989054Z 1 00h00m00.904745s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:05.989084Z 1 00h00m00.904745s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.989112Z 1 00h00m00.904745s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.989152Z 1 00h00m00.904745s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.990201Z 1 00h00m01.946179s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:05.990280Z 1 00h00m01.946179s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:05.990317Z 1 00h00m01.946179s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:05.990346Z 1 00h00m01.946179s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:05.990382Z 1 00h00m01.946179s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.990590Z 1 00h00m01.946179s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.990664Z 1 00h00m01.946179s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.992949Z 1 00h00m04.320648s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:05.993026Z 1 00h00m04.320648s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:05.993060Z 1 00h00m04.320648s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:05.993102Z 1 00h00m04.320648s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:05.993134Z 1 00h00m04.320648s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.993166Z 1 00h00m04.320648s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.993196Z 1 00h00m04.320648s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.997331Z 1 00h00m09.686947s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:05.997401Z 1 00h00m09.686947s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-24T20:28:05.997436Z 1 00h00m09.686947s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-24T20:28:05.997463Z 1 00h00m09.686947s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-24T20:28:05.997496Z 1 00h00m09.686947s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.997539Z 1 00h00m09.686947s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.997583Z 1 00h00m09.686947s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:28:05.999205Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:28:05.999303Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-24T20:28:05.999376Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:28:05.999408Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-24T20:28:05.999481Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-24T20:28:05.999557Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} >> ProgramStep::SumGroupBy [GOOD] >> ProgramStep::SumGroupByNotNull [GOOD] >> ProgramStep::MinMaxSomeGroupBy [GOOD] >> ProgramStep::MinMaxSomeGroupByNotNull >> ProgramStep::MinMaxSomeGroupByNotNull [GOOD] >> SortableBatchPosition::FindPosition [GOOD] >> CheckIntegrityMirror3of4::PlacementMissingParts [GOOD] >> CheckIntegrityMirror3of4::PlacementDisintegrated |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest |87.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |87.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |87.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing >> BsControllerTest::DecommitRejected >> TYardTest::TestMultiYardStartingPoints [GOOD] >> TYardTest::TestMultiYardLogMultipleWriteRead >> CheckIntegrityBlock42::PlacementOkHandoff [GOOD] >> CheckIntegrityBlock42::PlacementMissingParts >> CheckIntegrityMirror3dc::DataErrorOneCopy [GOOD] >> CheckIntegrityMirror3dc::DataErrorManyCopies >> SelfHealActorTest::NoMoreThanOneReplicating >> CheckIntegrityBlock42::DataErrorSixPartsTwoBroken [GOOD] >> CheckIntegrityBlock42::DataOkErasureFiveParts >> SelfHealActorTest::NoMoreThanOneReplicating [GOOD] >> CheckIntegrityBlock42::PlacementAllOnHandoff [GOOD] >> CheckIntegrityBlock42::PlacementDisintegrated >> BsControllerTest::TestLocalSelfHeal |87.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |87.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage >> CheckIntegrityBlock42::DataErrorSixPartsOneBroken [GOOD] >> CheckIntegrityBlock42::DataErrorFivePartsOneBroken >> Donor::ContinueWithFaultyDonor >> CheckIntegrityBlock42::PlacementWithErrorsOnBlobDisks [GOOD] >> CheckIntegrityBlock42::PlacementStatusUnknown >> BlobDepot::BasicRange [GOOD] >> BlobDepot::BasicDiscover >> CheckIntegrityMirror3dc::PlacementDisintegrated [GOOD] >> CheckIntegrityMirror3dc::DataOk >> CheckIntegrityMirror3dc::PlacementMissingParts [GOOD] |87.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage >> Donor::CheckOnlineReadRequestToDonor >> BsControllerTest::DecommitRejected [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/formats/arrow/ut/unittest >> SortableBatchPosition::FindPosition [GOOD] Test command err: Process: 100000d;/100000; 10000d;/10000; NO_CODEC(poolsize=1024;keylen=1) 0.2021203448 0.2210911404 NO_CODEC(poolsize=1024;keylen=10) 0.1534132783 0.2482180533 NO_CODEC(poolsize=1024;keylen=16) 0.1104676508 0.2045372848 NO_CODEC(poolsize=1024;keylen=32) 0.06592569055 0.1591802296 NO_CODEC(poolsize=1024;keylen=64) 0.03972180035 0.1324717476 NO_CODEC(poolsize=128;keylen=1) 0.2016566193 0.2164784476 NO_CODEC(poolsize=128;keylen=10) 0.07304169975 0.08752922393 NO_CODEC(poolsize=128;keylen=16) 0.05151637558 0.06514358749 NO_CODEC(poolsize=128;keylen=32) 0.02919093319 0.04189888314 NO_CODEC(poolsize=128;keylen=64) 0.01605694811 0.02821124922 NO_CODEC(poolsize=16;keylen=1) 0.2010010074 0.2099570542 NO_CODEC(poolsize=16;keylen=10) 0.0719219365 0.07635285397 NO_CODEC(poolsize=16;keylen=16) 0.05039654131 0.05396013899 NO_CODEC(poolsize=16;keylen=32) 0.02807102527 0.03070808446 NO_CODEC(poolsize=16;keylen=64) 0.01493699686 0.01701612239 NO_CODEC(poolsize=1;keylen=1) 0.2008730831 0.2086845872 NO_CODEC(poolsize=1;keylen=10) 0.07177339648 0.07487027428 NO_CODEC(poolsize=1;keylen=16) 0.0502445638 0.05244238527 NO_CODEC(poolsize=1;keylen=32) 0.02791992658 0.0291982148 NO_CODEC(poolsize=1;keylen=64) 0.01478641518 0.01551089526 NO_CODEC(poolsize=512;keylen=1) 0.2021203448 0.2210911404 NO_CODEC(poolsize=512;keylen=10) 0.1482943606 0.1971260763 NO_CODEC(poolsize=512;keylen=16) 0.1053484084 0.1534129488 NO_CODEC(poolsize=512;keylen=32) 0.0608061115 0.1080222928 NO_CODEC(poolsize=512;keylen=64) 0.03460202321 0.08129402495 NO_CODEC(poolsize=64;keylen=1) 0.2013687897 0.2136153969 NO_CODEC(poolsize=64;keylen=10) 0.07240183504 0.08114272681 NO_CODEC(poolsize=64;keylen=16) 0.05087647028 0.05875304549 NO_CODEC(poolsize=64;keylen=32) 0.02855098581 0.03550414104 NO_CODEC(poolsize=64;keylen=64) 0.01541697597 0.02181403389 lz4(poolsize=1024;keylen=1) 0.006629768257 0.05541610349 lz4(poolsize=1024;keylen=10) 0.04233951498 0.3344832994 lz4(poolsize=1024;keylen=16) 0.05657489465 0.404264214 lz4(poolsize=1024;keylen=32) 0.09037137941 0.5318074361 lz4(poolsize=1024;keylen=64) 0.01074936154 0.1063492063 lz4(poolsize=128;keylen=1) 0.003831111821 0.02881389382 lz4(poolsize=128;keylen=10) 0.00718182175 0.06087121933 lz4(poolsize=128;keylen=16) 0.008735936466 0.07523964551 lz4(poolsize=128;keylen=32) 0.01375268158 0.117441454 lz4(poolsize=128;keylen=64) 0.02262360212 0.1850289108 lz4(poolsize=16;keylen=1) 0.00273442178 0.01820340324 lz4(poolsize=16;keylen=10) 0.003078137332 0.02169239789 lz4(poolsize=16;keylen=16) 0.003266503667 0.02356577168 lz4(poolsize=16;keylen=32) 0.003742685614 0.02844311377 lz4(poolsize=16;keylen=64) 0.004937163375 0.03979647465 lz4(poolsize=1;keylen=1) 0.00251497006 0.01603325416 lz4(poolsize=1;keylen=10) 0.002531395234 0.01628089447 lz4(poolsize=1;keylen=16) 0.002515970516 0.01617933723 lz4(poolsize=1;keylen=32) 0.00251450677 0.01630226314 lz4(poolsize=1;keylen=64) 0.002511620933 0.01653353149 lz4(poolsize=512;keylen=1) 0.005362411291 0.04359726295 lz4(poolsize=512;keylen=10) 0.02347472854 0.1933066062 lz4(poolsize=512;keylen=16) 0.03056053336 0.2426853056 lz4(poolsize=512;keylen=32) 0.04856356058 0.3467897492 lz4(poolsize=512;keylen=64) 0.04102771881 0.3228658321 lz4(poolsize=64;keylen=1) 0.003312844256 0.02372010279 lz4(poolsize=64;keylen=10) 0.004839661617 0.03863241259 lz4(poolsize=64;keylen=16) 0.005715507689 0.04687204687 lz4(poolsize=64;keylen=32) 0.007821957352 0.06669044223 lz4(poolsize=64;keylen=64) 0.01258912656 0.1073551894 zstd(poolsize=1024;keylen=1) 0.007324840764 0.0754840827 zstd(poolsize=1024;keylen=10) 0.04506846012 0.3776978417 zstd(poolsize=1024;keylen=16) 0.0655640205 0.4694540288 zstd(poolsize=1024;keylen=32) 0.1110720087 0.6098141264 zstd(poolsize=1024;keylen=64) 0.1914108287 0.7447345433 zstd(poolsize=128;keylen=1) 0.003769847609 0.04002713704 zstd(poolsize=128;keylen=10) 0.007456731695 0.07809798271 zstd(poolsize=128;keylen=16) 0.0102539786 0.1029455519 zstd(poolsize=128;keylen=32) 0.01677217062 0.1578947368 zstd(poolsize=128;keylen=64) 0.03005940945 0.2517949988 zstd(poolsize=16;keylen=1) 0.002620896858 0.02794819359 zstd(poolsize=16;keylen=10) 0.002816201441 0.03048416019 zstd(poolsize=16;keylen=16) 0.003368308096 0.03570300158 zstd(poolsize=16;keylen=32) 0.004159808469 0.0434375 zstd(poolsize=16;keylen=64) 0.005779996974 0.05875115349 zstd(poolsize=1;keylen=1) 0.002461243407 0.02626193724 zstd(poolsize=1;keylen=10) 0.002154636612 0.0234375 zstd(poolsize=1;keylen=16) 0.002356872222 0.02519132653 zstd(poolsize=1;keylen=32) 0.002427911996 0.02573879886 zstd(poolsize=1;keylen=64) 0.00258021431 0.02699269609 zstd(poolsize=512;keylen=1) 0.005583027596 0.05848930481 zstd(poolsize=512;keylen=10) 0.0236929438 0.2237078941 zstd(poolsize=512;keylen=16) 0.03443366072 0.2936507937 zstd(poolsize=512;keylen=32) 0.05917328099 0.4212765957 zstd(poolsize=512;keylen=64) 0.1058929843 0.5749553837 zstd(poolsize=64;keylen=1) 0.00319560285 0.03401360544 zstd(poolsize=64;keylen=10) 0.004852093844 0.05176470588 zstd(poolsize=64;keylen=16) 0.00633344236 0.06557881773 zstd(poolsize=64;keylen=32) 0.009647738439 0.09619952494 zstd(poolsize=64;keylen=64) 0.01626771323 0.1514644351 NO_CODEC --1000 ----1 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----16 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----64 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----128 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----512 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----1024 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% --10000 ---- ... "N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(36):{\"i\":\"1,2,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N4 -> N5[label="2"]; N0 -> N5[label="3"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=int16;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ConsistentWritesWhenSwitchingToDonorMode |87.5%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/test-results/unittest/{meta.json ... results_accumulator.log} >> CheckIntegrityMirror3of4::PlacementDisintegrated [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> SelfHealActorTest::NoMoreThanOneReplicating [GOOD] >> CheckIntegrityMirror3dc::PlacementOkWithErrorsOnBlobDisks [GOOD] >> CheckIntegrityMirror3of4::PlacementBlobIsLost >> CheckIntegrityBlock42::PlacementMissingParts [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> Donor::SlayAfterWiping >> Donor::MultipleEvicts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::PlacementMissingParts [GOOD] Test command err: RandomSeed# 2718575619986144589 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** >> BlobDepot::BasicDiscover [GOOD] >> BlobDepot::BasicBlock >> CheckIntegrityBlock42::DataOkErasureFiveParts [GOOD] >> CheckIntegrityMirror3dc::DataErrorManyCopies [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::DecommitRejected [GOOD] Test command err: 2025-06-24T20:28:08.398919Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-24T20:28:08.398978Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-24T20:28:08.399084Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-24T20:28:08.399110Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-24T20:28:08.400404Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-24T20:28:08.400440Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-24T20:28:08.400489Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-24T20:28:08.400525Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-24T20:28:08.400564Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-24T20:28:08.400592Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-24T20:28:08.400649Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-24T20:28:08.400681Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-24T20:28:08.400718Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-24T20:28:08.400742Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-24T20:28:08.400774Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-24T20:28:08.400795Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-24T20:28:08.400831Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-24T20:28:08.400859Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-24T20:28:08.400892Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-24T20:28:08.400912Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-24T20:28:08.400960Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-24T20:28:08.400983Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-24T20:28:08.401028Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-24T20:28:08.401052Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-24T20:28:08.401086Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-24T20:28:08.401106Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-24T20:28:08.401136Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-24T20:28:08.401168Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-24T20:28:08.401219Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-24T20:28:08.401239Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-24T20:28:08.414866Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:508:32] Status# ERROR ClientId# [1:508:32] ServerId# [0:0:0] PipeClient# [1:508:32] 2025-06-24T20:28:08.416718Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:509:20] Status# ERROR ClientId# [2:509:20] ServerId# [0:0:0] PipeClient# [2:509:20] 2025-06-24T20:28:08.416785Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:510:20] Status# ERROR ClientId# [3:510:20] ServerId# [0:0:0] PipeClient# [3:510:20] 2025-06-24T20:28:08.416857Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:511:20] Status# ERROR ClientId# [4:511:20] ServerId# [0:0:0] PipeClient# [4:511:20] 2025-06-24T20:28:08.416903Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:512:20] Status# ERROR ClientId# [5:512:20] ServerId# [0:0:0] PipeClient# [5:512:20] 2025-06-24T20:28:08.416952Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:513:20] Status# ERROR ClientId# [6:513:20] ServerId# [0:0:0] PipeClient# [6:513:20] 2025-06-24T20:28:08.416993Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:514:20] Status# ERROR ClientId# [7:514:20] ServerId# [0:0:0] PipeClient# [7:514:20] 2025-06-24T20:28:08.417043Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:515:20] Status# ERROR ClientId# [8:515:20] ServerId# [0:0:0] PipeClient# [8:515:20] 2025-06-24T20:28:08.417089Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:516:20] Status# ERROR ClientId# [9:516:20] ServerId# [0:0:0] PipeClient# [9:516:20] 2025-06-24T20:28:08.417139Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:517:20] Status# ERROR ClientId# [10:517:20] ServerId# [0:0:0] PipeClient# [10:517:20] 2025-06-24T20:28:08.417194Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:518:20] Status# ERROR ClientId# [11:518:20] ServerId# [0:0:0] PipeClient# [11:518:20] 2025-06-24T20:28:08.417253Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:519:20] Status# ERROR ClientId# [12:519:20] ServerId# [0:0:0] PipeClient# [12:519:20] 2025-06-24T20:28:08.417300Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:520:20] Status# ERROR ClientId# [13:520:20] ServerId# [0:0:0] PipeClient# [13:520:20] 2025-06-24T20:28:08.417367Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:521:20] Status# ERROR ClientId# [14:521:20] ServerId# [0:0:0] PipeClient# [14:521:20] 2025-06-24T20:28:08.417408Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:522:20] Status# ERROR ClientId# [15:522:20] ServerId# [0:0:0] PipeClient# [15:522:20] 2025-06-24T20:28:08.522977Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] Connect 2025-06-24T20:28:08.523074Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] Connect 2025-06-24T20:28:08.523163Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] Connect 2025-06-24T20:28:08.523224Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] Connect 2025-06-24T20:28:08.523283Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] Connect 2025-06-24T20:28:08.523323Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] Connect 2025-06-24T20:28:08.523365Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] Connect 2025-06-24T20:28:08.523401Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] Connect 2025-06-24T20:28:08.523449Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] Connect 2025-06-24T20:28:08.523487Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] Connect 2025-06-24T20:28:08.523526Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] Connect 2025-06-24T20:28:08.523563Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] Connect 2025-06-24T20:28:08.523613Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] Connect 2025-06-24T20:28:08.523648Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] Connect 2025-06-24T20:28:08.523692Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] Connect 2025-06-24T20:28:08.525916Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:581:60] Status# OK ClientId# [1:581:60] ServerId# [1:610:61] PipeClient# [1:581:60] 2025-06-24T20:28:08.525980Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] State switched from 0 to 1 2025-06-24T20:28:08.529965Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:582:21] Status# OK ClientId# [2:582:21] ServerId# [1:611:62] PipeClient# [2:582:21] 2025-06-24T20:28:08.530027Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] State switched from 0 to 1 2025-06-24T20:28:08.530090Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:583:21] Status# OK ClientId# [3:583:21] ServerId# [1:612:63] PipeClient# [3:583:21] 2025-06-24T20:28:08.530119Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] State switched from 0 to 1 2025-06-24T20:28:08.530169Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:584:21] Status# OK ClientId# [4:584:21] ServerId# [1:613:64] PipeClient# [4:584:21] 2025-06-24T20:28:08.530193Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] State switched from 0 to 1 2025-06-24T20:28:08.530227Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:585:21] Status# OK ClientId# [5:585:21] ServerId# [1:614:65] PipeClient# [5:585:21] 2025-06-24T20:28:08.530250Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] State switched from 0 to 1 2025-06-24T20:28:08.530284Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:586:21] Status# OK ClientId# [6:586:21] ServerId# [1:615:66] PipeClient# [6:586:21] 2025-06-24T20:28:08.530308Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] State switched from 0 to 1 2025-06-24T20:28:08.530350Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:587:21] Status# OK ClientId# [7:587:21] ServerId# [1:616:67] PipeClient# [7:587:21] 2025-06-24T20:28:08.530384Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] State switched from 0 to 1 2025-06-24T20:28:08.530424Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:588:21] Status# OK ClientId# [8:588:21] ServerId# [1:617:68] PipeClient# [8:588:21] 2025-06-24T20:28:08.530449Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] State switched from 0 to 1 2025-06-24T20:28:08.530508Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:589:21] Status# OK ClientId# [9:589:21] ServerId# [1:618:69] PipeClient# [9:589:21] 2025-06-24T20:28:08.530536Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] State switched from 0 to 1 2025-06-24T20:28:08.530576Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:590:21] Status# OK ClientId# [10:590:21] ServerId# [1:619:70] PipeClient# [10:590:21] 2025-06-24T20:28:08.530607Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] State switched from 0 to 1 2025-06-24T20:28:08.530665Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:591:21] Status# OK ClientId# [11:591:21] ServerId# [1:620:71] PipeClient# [11:591:21] 2025-06-24T20:28:08.530698Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] State switched from 0 to 1 2025-06-24T20:28:08.530768Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:592:21] Status# OK ClientId# [12:592:21] ServerId# [1:621:72] PipeClient# [12:592:21] 2025-06-24T20:28:08.530798Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] State switched from 0 to 1 2025-06-24T20:28:08.530840Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:593:21] Status# OK ClientId# [13:593:21] ServerId# [1:622:73] PipeClient# [13:593:21] 2025-06-24T20:28:08.530864Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] State switched from 0 to 1 2025-06-24T20:28:08.530900Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:594:21] Status# OK ClientId# [14:594:21] ServerId# [1:623:74] PipeClient# [14:594:21] 2025-06-24T20:28:08.530925Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] State switched from 0 to 1 2025-06-24T20:28:08.530961Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:595:21] Status# OK ClientId# [15:595:21] ServerId# [1:624:75] PipeClient# [15:595:21] 2025-06-24T20:28:08.530985Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] State switched from 0 to 1 2025-06-24T20:28:08.533836Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:08.533916Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] VDiskId# [80000000:1:0:0:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-24T20:28:08.573766Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] VDiskId# [80000000:1:0:0:0] status changed to INIT_PENDING 2025-06-24T20:28:08.588390Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-06-24T20:28:08.588474Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] VDiskId# [80000000:1:0:1:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-24T20:28:08.588584Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] VDiskId# [80000000:1:0:1:0] status changed to INIT_PENDING 2025-06-24T20:28:08.588741Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-06-24T20:28:08.588779Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] VDiskId# [80000000:1:0:2:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-24T20:28:08.588846Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] VDiskId# [80000000:1:0:2:0] status changed to INIT_PENDING 2025-06-24T20:28:08.588962Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-06-24T20:28:08.589006Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] VDiskId# [80000000:1:1:0:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-24T20:28:08.589068Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] VDiskId# [80000000:1:1:0:0] status changed to INIT_PENDING 2025-06-24T20:28:08.589193Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:08.589231Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] VDiskId# [80000000:1:1:1:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-24T20:28:08.589283Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] VDiskId# [80000000:1:1:1:0] status changed to INIT_PENDING 2025-06-24T2 ... [80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-24T20:28:09.011123Z 3 00h01m05.964512s :BS_NODE DEBUG: [3] VDiskId# [80000001:1:2:2:0] status changed to REPLICATING 2025-06-24T20:28:09.012203Z 1 00h01m05.964512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-24T20:28:09.012476Z 14 00h01m06.872512s :BS_NODE DEBUG: [14] VDiskId# [80000001:1:1:1:0] status changed to REPLICATING 2025-06-24T20:28:09.012857Z 1 00h01m06.872512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-24T20:28:09.013167Z 1 00h01m10.000000s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-24T20:28:09.013959Z 10 00h01m16.029512s :BS_NODE DEBUG: [10] VDiskId# [80000001:1:0:0:0] status changed to READY 2025-06-24T20:28:09.014305Z 1 00h01m16.029512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-24T20:28:09.014521Z 2 00h01m17.090512s :BS_NODE DEBUG: [2] VDiskId# [80000001:1:2:1:0] status changed to READY 2025-06-24T20:28:09.014909Z 1 00h01m17.090512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-24T20:28:09.023286Z 1 00h01m20.000000s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-24T20:28:09.023744Z 11 00h01m21.798512s :BS_NODE DEBUG: [11] VDiskId# [80000001:1:0:1:0] status changed to READY 2025-06-24T20:28:09.024148Z 1 00h01m21.798512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-24T20:28:09.024424Z 14 00h01m24.059536s :BS_NODE DEBUG: [14] VDiskId# [80000000:3:2:1:0] status changed to READY 2025-06-24T20:28:09.024806Z 1 00h01m24.059536s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-24T20:28:09.025460Z 8 00h01m24.060048s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-06-24T20:28:09.025516Z 8 00h01m24.060048s :BS_NODE DEBUG: [8] VDiskId# [80000000:2:2:1:0] destroyed 2025-06-24T20:28:09.025799Z 14 00h01m26.191512s :BS_NODE DEBUG: [14] VDiskId# [80000001:1:1:1:0] status changed to READY 2025-06-24T20:28:09.026118Z 1 00h01m26.191512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-24T20:28:09.026295Z 13 00h01m26.981024s :BS_NODE DEBUG: [13] VDiskId# [80000000:3:2:0:0] status changed to READY 2025-06-24T20:28:09.026743Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483648 2025-06-24T20:28:09.027302Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:09.027366Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:0:0] DiskIsOk# true 2025-06-24T20:28:09.027625Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:09.027670Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:1:0] DiskIsOk# true 2025-06-24T20:28:09.027706Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:09.027733Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:2:0] DiskIsOk# true 2025-06-24T20:28:09.027766Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:09.027793Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:0:0] DiskIsOk# true 2025-06-24T20:28:09.027818Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:09.027843Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:1:0] DiskIsOk# true 2025-06-24T20:28:09.027869Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:09.027924Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:2:0] DiskIsOk# true 2025-06-24T20:28:09.027959Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:09.027995Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:2:0:0] DiskIsOk# true 2025-06-24T20:28:09.028027Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:09.028070Z 1 00h01m26.981024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:2:1:0] DiskIsOk# true 2025-06-24T20:28:09.030378Z 1 00h01m26.981536s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:09.030460Z 1 00h01m26.981536s :BS_NODE DEBUG: [1] VDiskId# [80000000:3:0:0:0] -> [80000000:4:0:0:0] 2025-06-24T20:28:09.031045Z 1 00h01m26.981536s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483648 Items# [80000000:3:2:2:0]: 9:1000:1000 -> 15:1000:1001 ConfigTxSeqNo# 23 2025-06-24T20:28:09.031081Z 1 00h01m26.981536s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483648 Success# true 2025-06-24T20:28:09.039362Z 7 00h01m26.981536s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-06-24T20:28:09.039448Z 7 00h01m26.981536s :BS_NODE DEBUG: [7] VDiskId# [80000000:1:2:0:0] destroyed 2025-06-24T20:28:09.039613Z 2 00h01m26.981536s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-06-24T20:28:09.039700Z 2 00h01m26.981536s :BS_NODE DEBUG: [2] VDiskId# [80000000:3:0:1:0] -> [80000000:4:0:1:0] 2025-06-24T20:28:09.039813Z 3 00h01m26.981536s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-06-24T20:28:09.039861Z 3 00h01m26.981536s :BS_NODE DEBUG: [3] VDiskId# [80000000:3:0:2:0] -> [80000000:4:0:2:0] 2025-06-24T20:28:09.039983Z 4 00h01m26.981536s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-06-24T20:28:09.040036Z 4 00h01m26.981536s :BS_NODE DEBUG: [4] VDiskId# [80000000:3:1:0:0] -> [80000000:4:1:0:0] 2025-06-24T20:28:09.040131Z 5 00h01m26.981536s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:09.040185Z 5 00h01m26.981536s :BS_NODE DEBUG: [5] VDiskId# [80000000:3:1:1:0] -> [80000000:4:1:1:0] 2025-06-24T20:28:09.040285Z 6 00h01m26.981536s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2025-06-24T20:28:09.040329Z 6 00h01m26.981536s :BS_NODE DEBUG: [6] VDiskId# [80000000:3:1:2:0] -> [80000000:4:1:2:0] 2025-06-24T20:28:09.040393Z 9 00h01m26.981536s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-06-24T20:28:09.040458Z 13 00h01m26.981536s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-06-24T20:28:09.040516Z 13 00h01m26.981536s :BS_NODE DEBUG: [13] VDiskId# [80000000:3:2:0:0] -> [80000000:4:2:0:0] 2025-06-24T20:28:09.040603Z 14 00h01m26.981536s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-24T20:28:09.040666Z 14 00h01m26.981536s :BS_NODE DEBUG: [14] VDiskId# [80000000:3:2:1:0] -> [80000000:4:2:1:0] 2025-06-24T20:28:09.040768Z 15 00h01m26.981536s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2025-06-24T20:28:09.040812Z 15 00h01m26.981536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] PDiskId# 1000 VSlotId# 1001 created 2025-06-24T20:28:09.040911Z 15 00h01m26.981536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to INIT_PENDING 2025-06-24T20:28:09.042078Z 15 00h01m29.756512s :BS_NODE DEBUG: [15] VDiskId# [80000001:1:1:2:0] status changed to READY 2025-06-24T20:28:09.043003Z 15 00h01m30.698536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to REPLICATING 2025-06-24T20:28:09.043696Z 12 00h01m31.070512s :BS_NODE DEBUG: [12] VDiskId# [80000001:1:0:2:0] status changed to READY 2025-06-24T20:28:09.044258Z 1 00h01m32.123512s :BS_NODE DEBUG: [1] VDiskId# [80000001:1:2:0:0] status changed to READY 2025-06-24T20:28:09.044770Z 13 00h01m35.948512s :BS_NODE DEBUG: [13] VDiskId# [80000001:1:1:0:0] status changed to READY 2025-06-24T20:28:09.045358Z 3 00h01m36.674512s :BS_NODE DEBUG: [3] VDiskId# [80000001:1:2:2:0] status changed to READY 2025-06-24T20:28:09.046195Z 15 00h01m37.823536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to READY 2025-06-24T20:28:09.047053Z 9 00h01m37.824048s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-06-24T20:28:09.047108Z 9 00h01m37.824048s :BS_NODE DEBUG: [9] VDiskId# [80000000:3:2:2:0] destroyed >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump_ds_init] >> CheckIntegrityBlock42::DataErrorFivePartsOneBroken [GOOD] >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed >> CheckIntegrityBlock42::PlacementStatusUnknown [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3of4::PlacementDisintegrated [GOOD] Test command err: RandomSeed# 5451439969047006709 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** Group is disintegrated or has network problems >> CheckIntegrityBlock42::PlacementDisintegrated [GOOD] >> CheckIntegrityBlock42::DataStatusUnknown >> TYardTest::TestMultiYardLogMultipleWriteRead [GOOD] >> TYardTest::TestSysLogOverwrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::PlacementMissingParts [GOOD] Test command err: RandomSeed# 4499890766737632567 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:4:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |87.5%| [TS] {RESULT} ydb/core/formats/arrow/ut/unittest |87.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/test-results/unittest/{meta.json ... results_accumulator.log} >> CheckIntegrityMirror3of4::PlacementBlobIsLost [GOOD] >> test.py::test[solomon-BadDownsamplingDisabled-] [GOOD] >> test.py::test[solomon-BadDownsamplingFill-] |87.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup |87.5%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |87.5%| [AR] {RESULT} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::DataErrorManyCopies [GOOD] Test command err: RandomSeed# 9201656460624041742 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 ], ver1 disks [ 2 ] ERROR: There are unequal parts *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 2 ], ver1 disks [ 3 4 5 ] ERROR: There are unequal parts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataOkErasureFiveParts [GOOD] Test command err: RandomSeed# 7488750503465802443 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:4:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 0 ] part 2: ver0 disks [ 7 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 6 0 ]; part 2 disks [ 7 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 0 ]; part 2 disks [ 7 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::PlacementStatusUnknown [GOOD] Test command err: RandomSeed# 837053873165422603 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** >> CheckIntegrityMirror3dc::DataOk [GOOD] >> BSCReadOnlyPDisk::ReadOnlyNotAllowed |87.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |87.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3of4::PlacementBlobIsLost [GOOD] Test command err: RandomSeed# 9863576176534320285 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:2:0] FINISHED WITH OK *** |87.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart >> Donor::CheckOnlineReadRequestToDonor [GOOD] >> VDiskBalancing::TestStopOneNode_Block42 >> BSCReadOnlyPDisk::ReadOnlyOneByOne >> BlobDepot::BasicBlock [GOOD] >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive >> BlobDepot::BasicCollectGarbage >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob >> VDiskBalancing::TestStopOneNode_Mirror3dc >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup [GOOD] >> VDiskBalancing::TestRandom_Block42 |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::DataOk [GOOD] Test command err: RandomSeed# 2052633641046706939 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Group is disintegrated or has network problems *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 2 ] >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed [GOOD] >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::CheckOnlineReadRequestToDonor [GOOD] Test command err: RandomSeed# 16604987565860432063 2025-06-24T20:28:12.167789Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:12.174509Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5465683109671747757] 2025-06-24T20:28:12.230086Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:0:0:0:2097152:1] 2025-06-24T20:28:12.230412Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 1 PartsResurrected# 1 >> BsControllerTest::TestLocalSelfHeal [GOOD] >> VDiskBalancing::TestRandom_Mirror3dc >> CheckIntegrityBlock42::DataStatusUnknown [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup [GOOD] Test command err: RandomSeed# 5730123821934752135 2025-06-24T20:28:13.083599Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.083794Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.083888Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.083969Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.084046Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.084115Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.084187Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.084287Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.085334Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.085435Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.085489Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.085556Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.085610Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.085683Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.085737Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.085788Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.085864Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.085927Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.085989Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.086032Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.086092Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.086136Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.086207Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.086244Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.107437Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.107546Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.107599Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.107650Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.107696Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.107784Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.107836Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.107886Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 >> TYardTest::TestSysLogOverwrite [GOOD] >> TYardTest::TestUpsAndDownsAtTheBoundary ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed [GOOD] Test command err: RandomSeed# 11969965985860429667 2025-06-24T20:28:13.096591Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.096774Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.096870Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.096953Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.097047Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.097110Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.097178Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.098317Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.098440Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.098513Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.098565Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.098618Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.098674Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.098728Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.098806Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.098864Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.098915Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.099009Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.099048Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.099083Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.099117Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:13.113127Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.113218Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.113268Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.113337Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.113400Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.113447Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:13.113511Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 >> Donor::ContinueWithFaultyDonor [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken [GOOD] Test command err: RandomSeed# 14885232583377881555 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:4:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 ], ver1 disks [ 7 ], ver2 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] ERROR: There are unequal parts Erasure info: { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: Erasure info: ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:3:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 ], ver1 disks [ 0 ] part 2: ver0 disks [ 6 ], ver1 disks [ 1 ] part 3: ver0 disks [ 6 ], ver1 disks [ 2 ] part 4: ver0 disks [ 3 ], ver1 disks [ 6 ] part 5: ver0 disks [ 4 ], ver1 disks [ 6 ] part 6: ver0 disks [ 5 ], ver1 disks [ 6 ] ERROR: There are unequal parts Erasure info: { part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ERROR: There are erasure restore fails >> TBsLocalRecovery::WriteRestartRead [GOOD] >> TBsLocalRecovery::MultiPutWriteRestartRead >> Donor::MultipleEvicts [GOOD] >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump_ds_init] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump] >> TBsDbStat::ChaoticParallelWrite_DbStat [GOOD] >> TBsHuge::Simple >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::TestLocalSelfHeal [GOOD] Test command err: 2025-06-24T20:28:09.136339Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-24T20:28:09.136396Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-24T20:28:09.136486Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-24T20:28:09.136508Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-24T20:28:09.136559Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-24T20:28:09.136583Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-24T20:28:09.136625Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-24T20:28:09.136646Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-24T20:28:09.136694Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-24T20:28:09.136715Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-24T20:28:09.136770Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-24T20:28:09.136790Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-24T20:28:09.136831Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-24T20:28:09.136851Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-24T20:28:09.136882Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-24T20:28:09.136906Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-24T20:28:09.136938Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-24T20:28:09.136961Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-24T20:28:09.136997Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-24T20:28:09.137019Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-24T20:28:09.137070Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-24T20:28:09.137093Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-24T20:28:09.137132Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-24T20:28:09.137153Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-24T20:28:09.137219Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-24T20:28:09.137244Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-24T20:28:09.137277Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-24T20:28:09.137298Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-24T20:28:09.137336Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-24T20:28:09.137358Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-24T20:28:09.137403Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-06-24T20:28:09.137438Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-06-24T20:28:09.137487Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-06-24T20:28:09.137510Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-06-24T20:28:09.137543Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-06-24T20:28:09.137563Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-06-24T20:28:09.137618Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-06-24T20:28:09.137645Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-06-24T20:28:09.137682Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-06-24T20:28:09.137702Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-06-24T20:28:09.137757Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-06-24T20:28:09.137787Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-06-24T20:28:09.137824Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-06-24T20:28:09.137844Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-06-24T20:28:09.137882Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-06-24T20:28:09.137902Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-06-24T20:28:09.137939Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-06-24T20:28:09.137958Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-06-24T20:28:09.137994Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-06-24T20:28:09.138013Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-06-24T20:28:09.138047Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-06-24T20:28:09.138070Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-06-24T20:28:09.138131Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-06-24T20:28:09.138174Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-06-24T20:28:09.138211Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-06-24T20:28:09.138230Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-06-24T20:28:09.138285Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-06-24T20:28:09.138306Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-06-24T20:28:09.138339Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-06-24T20:28:09.138360Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-06-24T20:28:09.138394Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-06-24T20:28:09.138413Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-06-24T20:28:09.138453Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-06-24T20:28:09.138473Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-06-24T20:28:09.138519Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2025-06-24T20:28:09.138544Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2025-06-24T20:28:09.138579Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2025-06-24T20:28:09.138599Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2025-06-24T20:28:09.138644Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2025-06-24T20:28:09.138666Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2025-06-24T20:28:09.138701Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2025-06-24T20:28:09.138722Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2025-06-24T20:28:09.182651Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2713:53] Status# ERROR ClientId# [1:2713:53] ServerId# [0:0:0] PipeClient# [1:2713:53] 2025-06-24T20:28:09.196271Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2714:41] Status# ERROR ClientId# [2:2714:41] ServerId# [0:0:0] PipeClient# [2:2714:41] 2025-06-24T20:28:09.196348Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2715:41] Status# ERROR ClientId# [3:2715:41] ServerId# [0:0:0] PipeClient# [3:2715:41] 2025-06-24T20:28:09.196399Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2716:41] Status# ERROR ClientId# [4:2716:41] ServerId# [0:0:0] PipeClient# [4:2716:41] 2025-06-24T20:28:09.196451Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2717:41] Status# ERROR ClientId# [5:2717:41] ServerId# [0:0:0] PipeClient# [5:2717:41] 2025-06-24T20:28:09.196489Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2718:41] Status# ERROR ClientId# [6:2718:41] ServerId# [0:0:0] PipeClient# [6:2718:41] 2025-06-24T20:28:09.196537Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2719:41] Status# ERROR ClientId# [7:2719:41] ServerId# [0:0:0] PipeClient# [7:2719:41] 2025-06-24T20:28:09.196590Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2720:41] Status# ERROR ClientId# [8:2720:41] ServerId# [0:0:0] PipeClient# [8:2720:41] 2025-06-24T20:28:09.196635Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2721:41] Status# ERROR ClientId# [9:2721:41] ServerId# [0:0:0] PipeClient# [9:2721:41] 2025-06-24T20:28:09.196679Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2722:41] Status# ERROR ClientId# [10:2722:41] ServerId# [0:0:0] PipeClient# [10:2722:41] 2025-06-24T20:28:09.196724Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2723:41] Status# ERROR ClientId# [11:2723:41] ServerId# [0:0:0] PipeClient# [11:2723:41] 2025-06-24T20:28:09.196766Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2724:41] Status# ERROR ClientId# [12:2724:41] ServerId# [0:0:0] PipeClient# [12:2724:41] 2025-06-24T20:28:09.196802Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2725:41] Status# ERROR ClientId# [13:2725:41] ServerId# [0:0:0] PipeClient# [13:2725:41] 2025-06-24T20:28:09.196838Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2726:41] Status# ERROR ClientId# [14:2726:41] ServerId# [0:0:0] PipeClient# [14:2726:41] 2025-06-24T20:28:09.196880Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2727:41] Status# ERROR ClientId# [15:2727:41] ServerId# [0:0:0] PipeClient# [15:2727:41] 2025-06-24T20:28:09.196919Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2728:41] Status# ERROR ClientId# [16:2728:41] ServerId# [0:0:0] PipeClient# [16:2728:41] 2025-06-24T20:28:09.196957Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2729:41] Status# ERROR ClientId# [17:2729:41] ServerId# [0:0:0] PipeClient# [17:2729:41] 2025-06-24T20:28:09.196994Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2730:41] Status# ERROR ClientId# [18:2730:41] ServerId# [0:0:0] PipeClient# [18:2730:41] 2025-06-24T20:28:09.197031Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2731:41] Status# ERROR ClientId# [19:2731:41] ServerId# [0:0:0] PipeClient# [19:2731:41] 2025-06-24T20:28:09.197070Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2732:41] Status# ERROR ClientId# [20:2732:41] ServerId# [0:0:0] PipeClient# [20:2732:41] 2025-06-24T20:28:09.197120Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2733:41] Status# ERROR ClientId# [21:2733:41] ServerId# [0:0:0] PipeClient# [21:2733:41] 2025-06-24T20:28:09.197162Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2734:41] Status# ERROR ClientId# [22:2734:41] ServerId# [0:0:0] PipeClient# [22:2734:41] 2025-06-24T20:28:09.197199Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2735:41] Status# ERROR ClientId# [23:2735:41] ServerId# [0:0:0] PipeClient# [23:2735:41] 2025-06-24T20:28:09.197255Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2736:41] Status# ERROR ClientId# [24:2736:41] ServerId# [0:0:0] PipeClient# [24:2736:41] 2025-06-24T20:28:09.197308Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2737:41] Status# ERROR ClientId# [25:2737:41] ServerId# [0:0:0] PipeClient# [25:2737:41] 2025-06-24T20:28:09.197345Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2738:41] Status# ERROR ClientId# [26:2738:41] ServerId# [0:0:0] PipeClient# [26:2738:41] 2025-06-24T20:28:09.197382Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2739:41] Status# ERROR ClientId# [27:2739:41] ServerId# [0:0:0] PipeClient# [27:2739:41] 2025-06-24T20:28:09.197427Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2740:41] Status# ERROR ClientId# [28:2740:41] ServerId# [0:0:0] PipeClient# [28:2740:41] 2025-06-24T20:28:09.197466Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2741:41] Status# ERROR ClientId# [29:2741:41] ServerId# [0:0:0] PipeClient# [29:2741:41] 2025-06-24T20:28:09.197504Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2742:41] Status# ERROR ClientId# [30:2742:41] ServerId# [0:0:0] PipeClient# [30:2742:41] 2025-06-24T20:28:09.197560Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2743:41] Status# ERROR ClientId# [31:2743:41] ServerId# [0:0:0] PipeClient# [31:2743:41] 2025-06-24T20:28:09.197601Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2744:41] Status# ERROR ClientId# [32:2744:41] ServerId# [0:0:0] PipeClient# [32:2744:41] 2025-06-24T20:28:09.197637Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2745:41] Status# ERROR ClientId# [33:2745:41] ServerId# [0:0:0] PipeClient# [33:2745:41] 2025-06-24T20:28:09.197675Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2746:41] Status# ERROR ClientId# [34:2746:41] ServerId# [0:0:0] PipeClient# [34:2746:41] 2025-06-24T20:28:09.197711Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2747:41] Status# ERROR ClientId# [35:2747:41 ... Reassigner TEvVStatusResult GroupId# 2147483671 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:13.028142Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483671 VDiskId# [80000017:1:2:1:0] DiskIsOk# true 2025-06-24T20:28:13.028188Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483671 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:13.028216Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483671 VDiskId# [80000017:1:2:2:0] DiskIsOk# true 2025-06-24T20:28:13.034402Z 1 00h05m00.105120s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483671 Items# [80000017:1:0:0:0]: 5:1001:1001 -> 5:1000:1010 ConfigTxSeqNo# 48 2025-06-24T20:28:13.034473Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483671 Success# true 2025-06-24T20:28:13.034670Z 17 00h05m00.105120s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-24T20:28:13.034762Z 17 00h05m00.105120s :BS_NODE DEBUG: [17] VDiskId# [80000017:1:1:0:0] -> [80000017:2:1:0:0] 2025-06-24T20:28:13.034911Z 35 00h05m00.105120s :BS_NODE DEBUG: [35] NodeServiceSetUpdate 2025-06-24T20:28:13.034970Z 35 00h05m00.105120s :BS_NODE DEBUG: [35] VDiskId# [80000017:1:2:2:0] -> [80000017:2:2:2:0] 2025-06-24T20:28:13.035068Z 20 00h05m00.105120s :BS_NODE DEBUG: [20] NodeServiceSetUpdate 2025-06-24T20:28:13.035131Z 20 00h05m00.105120s :BS_NODE DEBUG: [20] VDiskId# [80000017:1:1:1:0] -> [80000017:2:1:1:0] 2025-06-24T20:28:13.035286Z 5 00h05m00.105120s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:13.035334Z 5 00h05m00.105120s :BS_NODE DEBUG: [5] VDiskId# [80000017:2:0:0:0] PDiskId# 1000 VSlotId# 1010 created 2025-06-24T20:28:13.035422Z 5 00h05m00.105120s :BS_NODE DEBUG: [5] VDiskId# [80000017:2:0:0:0] status changed to INIT_PENDING 2025-06-24T20:28:13.035540Z 23 00h05m00.105120s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-06-24T20:28:13.035606Z 23 00h05m00.105120s :BS_NODE DEBUG: [23] VDiskId# [80000017:1:1:2:0] -> [80000017:2:1:2:0] 2025-06-24T20:28:13.035740Z 8 00h05m00.105120s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-06-24T20:28:13.035799Z 8 00h05m00.105120s :BS_NODE DEBUG: [8] VDiskId# [80000017:1:0:1:0] -> [80000017:2:0:1:0] 2025-06-24T20:28:13.035890Z 11 00h05m00.105120s :BS_NODE DEBUG: [11] NodeServiceSetUpdate 2025-06-24T20:28:13.035955Z 11 00h05m00.105120s :BS_NODE DEBUG: [11] VDiskId# [80000017:1:0:2:0] -> [80000017:2:0:2:0] 2025-06-24T20:28:13.036062Z 29 00h05m00.105120s :BS_NODE DEBUG: [29] NodeServiceSetUpdate 2025-06-24T20:28:13.036112Z 29 00h05m00.105120s :BS_NODE DEBUG: [29] VDiskId# [80000017:1:2:0:0] -> [80000017:2:2:0:0] 2025-06-24T20:28:13.036213Z 32 00h05m00.105120s :BS_NODE DEBUG: [32] NodeServiceSetUpdate 2025-06-24T20:28:13.036274Z 32 00h05m00.105120s :BS_NODE DEBUG: [32] VDiskId# [80000017:1:2:1:0] -> [80000017:2:2:1:0] 2025-06-24T20:28:13.036647Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483655 2025-06-24T20:28:13.037788Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:13.037846Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:1:0:1:0] DiskIsOk# true 2025-06-24T20:28:13.037891Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:13.037924Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:1:0:2:0] DiskIsOk# true 2025-06-24T20:28:13.037959Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:13.038003Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:1:1:0:0] DiskIsOk# true 2025-06-24T20:28:13.038046Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:13.038087Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:1:1:1:0] DiskIsOk# true 2025-06-24T20:28:13.038133Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:13.038164Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:1:1:2:0] DiskIsOk# true 2025-06-24T20:28:13.038215Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:13.038274Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:1:2:0:0] DiskIsOk# true 2025-06-24T20:28:13.038311Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:13.038364Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:1:2:1:0] DiskIsOk# true 2025-06-24T20:28:13.038405Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:13.038436Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:1:2:2:0] DiskIsOk# true 2025-06-24T20:28:13.044873Z 1 00h05m00.105632s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483655 Items# [80000007:1:0:0:0]: 5:1001:1000 -> 5:1002:1010 ConfigTxSeqNo# 49 2025-06-24T20:28:13.044933Z 1 00h05m00.105632s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483655 Success# true 2025-06-24T20:28:13.045142Z 17 00h05m00.105632s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-24T20:28:13.045224Z 17 00h05m00.105632s :BS_NODE DEBUG: [17] VDiskId# [80000007:1:1:0:0] -> [80000007:2:1:0:0] 2025-06-24T20:28:13.045353Z 35 00h05m00.105632s :BS_NODE DEBUG: [35] NodeServiceSetUpdate 2025-06-24T20:28:13.045406Z 35 00h05m00.105632s :BS_NODE DEBUG: [35] VDiskId# [80000007:1:2:2:0] -> [80000007:2:2:2:0] 2025-06-24T20:28:13.045509Z 20 00h05m00.105632s :BS_NODE DEBUG: [20] NodeServiceSetUpdate 2025-06-24T20:28:13.045563Z 20 00h05m00.105632s :BS_NODE DEBUG: [20] VDiskId# [80000007:1:1:1:0] -> [80000007:2:1:1:0] 2025-06-24T20:28:13.045664Z 5 00h05m00.105632s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:13.045738Z 5 00h05m00.105632s :BS_NODE DEBUG: [5] VDiskId# [80000007:2:0:0:0] PDiskId# 1002 VSlotId# 1010 created 2025-06-24T20:28:13.045828Z 5 00h05m00.105632s :BS_NODE DEBUG: [5] VDiskId# [80000007:2:0:0:0] status changed to INIT_PENDING 2025-06-24T20:28:13.045960Z 23 00h05m00.105632s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-06-24T20:28:13.046012Z 23 00h05m00.105632s :BS_NODE DEBUG: [23] VDiskId# [80000007:1:1:2:0] -> [80000007:2:1:2:0] 2025-06-24T20:28:13.046128Z 8 00h05m00.105632s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-06-24T20:28:13.046199Z 8 00h05m00.105632s :BS_NODE DEBUG: [8] VDiskId# [80000007:1:0:1:0] -> [80000007:2:0:1:0] 2025-06-24T20:28:13.046301Z 11 00h05m00.105632s :BS_NODE DEBUG: [11] NodeServiceSetUpdate 2025-06-24T20:28:13.046351Z 11 00h05m00.105632s :BS_NODE DEBUG: [11] VDiskId# [80000007:1:0:2:0] -> [80000007:2:0:2:0] 2025-06-24T20:28:13.046455Z 29 00h05m00.105632s :BS_NODE DEBUG: [29] NodeServiceSetUpdate 2025-06-24T20:28:13.046504Z 29 00h05m00.105632s :BS_NODE DEBUG: [29] VDiskId# [80000007:1:2:0:0] -> [80000007:2:2:0:0] 2025-06-24T20:28:13.046590Z 32 00h05m00.105632s :BS_NODE DEBUG: [32] NodeServiceSetUpdate 2025-06-24T20:28:13.046637Z 32 00h05m00.105632s :BS_NODE DEBUG: [32] VDiskId# [80000007:1:2:1:0] -> [80000007:2:2:1:0] 2025-06-24T20:28:13.047927Z 5 00h05m01.715120s :BS_NODE DEBUG: [5] VDiskId# [80000017:2:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.048584Z 5 00h05m03.305048s :BS_NODE DEBUG: [5] VDiskId# [80000077:2:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.049736Z 5 00h05m03.496632s :BS_NODE DEBUG: [5] VDiskId# [80000007:2:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.050618Z 5 00h05m03.700584s :BS_NODE DEBUG: [5] VDiskId# [80000047:2:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.051520Z 5 00h05m03.712560s :BS_NODE DEBUG: [5] VDiskId# [80000067:2:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.052593Z 5 00h05m04.123072s :BS_NODE DEBUG: [5] VDiskId# [80000057:2:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.054484Z 5 00h05m05.307608s :BS_NODE DEBUG: [5] VDiskId# [80000027:2:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.055783Z 5 00h05m05.363096s :BS_NODE DEBUG: [5] VDiskId# [80000037:2:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.056922Z 5 00h05m11.836632s :BS_NODE DEBUG: [5] VDiskId# [80000007:2:0:0:0] status changed to READY 2025-06-24T20:28:13.058514Z 5 00h05m11.837144s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:13.058589Z 5 00h05m11.837144s :BS_NODE DEBUG: [5] VDiskId# [80000007:1:0:0:0] destroyed 2025-06-24T20:28:13.058783Z 5 00h05m14.137560s :BS_NODE DEBUG: [5] VDiskId# [80000067:2:0:0:0] status changed to READY 2025-06-24T20:28:13.060350Z 5 00h05m14.138072s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:13.060418Z 5 00h05m14.138072s :BS_NODE DEBUG: [5] VDiskId# [80000067:1:0:0:0] destroyed 2025-06-24T20:28:13.061478Z 5 00h05m16.404608s :BS_NODE DEBUG: [5] VDiskId# [80000027:2:0:0:0] status changed to READY 2025-06-24T20:28:13.063123Z 5 00h05m16.405120s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:13.063210Z 5 00h05m16.405120s :BS_NODE DEBUG: [5] VDiskId# [80000027:1:0:0:0] destroyed 2025-06-24T20:28:13.063412Z 5 00h05m19.804096s :BS_NODE DEBUG: [5] VDiskId# [80000037:2:0:0:0] status changed to READY 2025-06-24T20:28:13.064863Z 5 00h05m19.804608s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:13.064919Z 5 00h05m19.804608s :BS_NODE DEBUG: [5] VDiskId# [80000037:1:0:0:0] destroyed 2025-06-24T20:28:13.078465Z 5 00h05m27.365120s :BS_NODE DEBUG: [5] VDiskId# [80000017:2:0:0:0] status changed to READY 2025-06-24T20:28:13.080648Z 5 00h05m27.365632s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:13.080726Z 5 00h05m27.365632s :BS_NODE DEBUG: [5] VDiskId# [80000017:1:0:0:0] destroyed 2025-06-24T20:28:13.080925Z 5 00h05m27.806584s :BS_NODE DEBUG: [5] VDiskId# [80000047:2:0:0:0] status changed to READY 2025-06-24T20:28:13.083077Z 5 00h05m27.807096s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:13.083177Z 5 00h05m27.807096s :BS_NODE DEBUG: [5] VDiskId# [80000047:1:0:0:0] destroyed 2025-06-24T20:28:13.083398Z 5 00h05m28.398072s :BS_NODE DEBUG: [5] VDiskId# [80000057:2:0:0:0] status changed to READY 2025-06-24T20:28:13.085766Z 5 00h05m28.398584s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:13.085830Z 5 00h05m28.398584s :BS_NODE DEBUG: [5] VDiskId# [80000057:1:0:0:0] destroyed 2025-06-24T20:28:13.087816Z 5 00h05m34.830048s :BS_NODE DEBUG: [5] VDiskId# [80000077:2:0:0:0] status changed to READY 2025-06-24T20:28:13.095620Z 5 00h05m34.830560s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:13.095701Z 5 00h05m34.830560s :BS_NODE DEBUG: [5] VDiskId# [80000077:1:0:0:0] destroyed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataStatusUnknown [GOOD] Test command err: RandomSeed# 6023466986052782596 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Group is disintegrated or has network problems *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: part 2: part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: >> BsControllerTest::TestLocalBrokenRelocation [GOOD] >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed >> TBsOther1::ChaoticParallelWrite [GOOD] >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ContinueWithFaultyDonor [GOOD] Test command err: RandomSeed# 18337221001264354812 2025-06-24T20:28:12.387707Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:12.389891Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13807801409477180148] 2025-06-24T20:28:12.429657Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> Donor::SlayAfterWiping [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::MultipleEvicts [GOOD] Test command err: RandomSeed# 2866023280685318564 0 donors: 2025-06-24T20:28:13.671075Z 15 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:13.677804Z 15 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16302282691599133321] 2025-06-24T20:28:13.700086Z 15 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 14:1000 2025-06-24T20:28:13.835549Z 14 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:13.842079Z 14 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16302282691599133321] 2025-06-24T20:28:13.860481Z 14 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 15:1000 2025-06-24T20:28:13.946285Z 15 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:13.951403Z 15 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16302282691599133321] 2025-06-24T20:28:13.966001Z 15 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 14:1000 2025-06-24T20:28:14.047091Z 14 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:14.052540Z 14 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16302282691599133321] 2025-06-24T20:28:14.067212Z 14 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 15:1000 2025-06-24T20:28:14.150197Z 15 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:14.155973Z 15 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16302282691599133321] 2025-06-24T20:28:14.171079Z 15 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 14:1000 2025-06-24T20:28:14.292399Z 14 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:14.298751Z 14 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16302282691599133321] 2025-06-24T20:28:14.313878Z 14 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 15:1000 2025-06-24T20:28:14.412881Z 15 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:14.418832Z 15 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16302282691599133321] 2025-06-24T20:28:14.434018Z 15 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 14:1000 2025-06-24T20:28:14.517981Z 14 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:14.524391Z 14 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16302282691599133321] 2025-06-24T20:28:14.545182Z 14 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 15:1000 2025-06-24T20:28:14.690102Z 15 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:14.697083Z 15 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16302282691599133321] 2025-06-24T20:28:14.714227Z 15 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 14:1000 >> BSCReadOnlyPDisk::ReadOnlySlay |87.5%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> TBsHuge::Simple [GOOD] >> TBsHuge::SimpleErasureNone >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::TestLocalBrokenRelocation [GOOD] Test command err: 2025-06-24T20:28:05.889983Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-24T20:28:05.890046Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-24T20:28:05.890169Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-24T20:28:05.890196Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-24T20:28:05.890262Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-24T20:28:05.890289Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-24T20:28:05.890340Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-24T20:28:05.890364Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-24T20:28:05.890421Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-24T20:28:05.890459Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-24T20:28:05.890513Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-24T20:28:05.890538Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-24T20:28:05.890578Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-24T20:28:05.890601Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-24T20:28:05.890645Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-24T20:28:05.890667Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-24T20:28:05.890703Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-24T20:28:05.890729Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-24T20:28:05.890772Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-24T20:28:05.890795Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-24T20:28:05.890855Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-24T20:28:05.890882Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-24T20:28:05.890919Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-24T20:28:05.890941Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-24T20:28:05.891024Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-24T20:28:05.891051Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-24T20:28:05.891089Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-24T20:28:05.891114Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-24T20:28:05.891177Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-24T20:28:05.891200Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-24T20:28:05.891261Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-06-24T20:28:05.891305Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-06-24T20:28:05.891366Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-06-24T20:28:05.891392Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-06-24T20:28:05.891433Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-06-24T20:28:05.891453Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-06-24T20:28:05.891515Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-06-24T20:28:05.891541Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-06-24T20:28:05.891588Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-06-24T20:28:05.891611Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-06-24T20:28:05.891672Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-06-24T20:28:05.891707Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-06-24T20:28:05.891756Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-06-24T20:28:05.891777Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-06-24T20:28:05.891817Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-06-24T20:28:05.891846Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-06-24T20:28:05.891884Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-06-24T20:28:05.891908Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-06-24T20:28:05.891951Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-06-24T20:28:05.891975Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-06-24T20:28:05.892018Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-06-24T20:28:05.892046Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-06-24T20:28:05.892128Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-06-24T20:28:05.892162Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-06-24T20:28:05.892207Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-06-24T20:28:05.892227Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-06-24T20:28:05.892280Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-06-24T20:28:05.892299Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-06-24T20:28:05.892330Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-06-24T20:28:05.892350Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-06-24T20:28:05.892387Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-06-24T20:28:05.892408Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-06-24T20:28:05.892449Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-06-24T20:28:05.892470Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-06-24T20:28:05.892524Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2025-06-24T20:28:05.892550Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2025-06-24T20:28:05.892586Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2025-06-24T20:28:05.892606Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2025-06-24T20:28:05.892662Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2025-06-24T20:28:05.892686Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2025-06-24T20:28:05.892727Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2025-06-24T20:28:05.892750Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2025-06-24T20:28:05.938912Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2713:53] Status# ERROR ClientId# [1:2713:53] ServerId# [0:0:0] PipeClient# [1:2713:53] 2025-06-24T20:28:05.948627Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2714:41] Status# ERROR ClientId# [2:2714:41] ServerId# [0:0:0] PipeClient# [2:2714:41] 2025-06-24T20:28:05.948710Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2715:41] Status# ERROR ClientId# [3:2715:41] ServerId# [0:0:0] PipeClient# [3:2715:41] 2025-06-24T20:28:05.948768Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2716:41] Status# ERROR ClientId# [4:2716:41] ServerId# [0:0:0] PipeClient# [4:2716:41] 2025-06-24T20:28:05.948826Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2717:41] Status# ERROR ClientId# [5:2717:41] ServerId# [0:0:0] PipeClient# [5:2717:41] 2025-06-24T20:28:05.948880Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2718:41] Status# ERROR ClientId# [6:2718:41] ServerId# [0:0:0] PipeClient# [6:2718:41] 2025-06-24T20:28:05.948930Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2719:41] Status# ERROR ClientId# [7:2719:41] ServerId# [0:0:0] PipeClient# [7:2719:41] 2025-06-24T20:28:05.948987Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2720:41] Status# ERROR ClientId# [8:2720:41] ServerId# [0:0:0] PipeClient# [8:2720:41] 2025-06-24T20:28:05.949045Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2721:41] Status# ERROR ClientId# [9:2721:41] ServerId# [0:0:0] PipeClient# [9:2721:41] 2025-06-24T20:28:05.949082Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2722:41] Status# ERROR ClientId# [10:2722:41] ServerId# [0:0:0] PipeClient# [10:2722:41] 2025-06-24T20:28:05.949124Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2723:41] Status# ERROR ClientId# [11:2723:41] ServerId# [0:0:0] PipeClient# [11:2723:41] 2025-06-24T20:28:05.949164Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2724:41] Status# ERROR ClientId# [12:2724:41] ServerId# [0:0:0] PipeClient# [12:2724:41] 2025-06-24T20:28:05.949202Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2725:41] Status# ERROR ClientId# [13:2725:41] ServerId# [0:0:0] PipeClient# [13:2725:41] 2025-06-24T20:28:05.949238Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2726:41] Status# ERROR ClientId# [14:2726:41] ServerId# [0:0:0] PipeClient# [14:2726:41] 2025-06-24T20:28:05.949282Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2727:41] Status# ERROR ClientId# [15:2727:41] ServerId# [0:0:0] PipeClient# [15:2727:41] 2025-06-24T20:28:05.949319Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2728:41] Status# ERROR ClientId# [16:2728:41] ServerId# [0:0:0] PipeClient# [16:2728:41] 2025-06-24T20:28:05.949357Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2729:41] Status# ERROR ClientId# [17:2729:41] ServerId# [0:0:0] PipeClient# [17:2729:41] 2025-06-24T20:28:05.949395Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2730:41] Status# ERROR ClientId# [18:2730:41] ServerId# [0:0:0] PipeClient# [18:2730:41] 2025-06-24T20:28:05.949436Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2731:41] Status# ERROR ClientId# [19:2731:41] ServerId# [0:0:0] PipeClient# [19:2731:41] 2025-06-24T20:28:05.949476Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2732:41] Status# ERROR ClientId# [20:2732:41] ServerId# [0:0:0] PipeClient# [20:2732:41] 2025-06-24T20:28:05.949545Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2733:41] Status# ERROR ClientId# [21:2733:41] ServerId# [0:0:0] PipeClient# [21:2733:41] 2025-06-24T20:28:05.949591Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2734:41] Status# ERROR ClientId# [22:2734:41] ServerId# [0:0:0] PipeClient# [22:2734:41] 2025-06-24T20:28:05.949630Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2735:41] Status# ERROR ClientId# [23:2735:41] ServerId# [0:0:0] PipeClient# [23:2735:41] 2025-06-24T20:28:05.949705Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2736:41] Status# ERROR ClientId# [24:2736:41] ServerId# [0:0:0] PipeClient# [24:2736:41] 2025-06-24T20:28:05.949749Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2737:41] Status# ERROR ClientId# [25:2737:41] ServerId# [0:0:0] PipeClient# [25:2737:41] 2025-06-24T20:28:05.949790Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2738:41] Status# ERROR ClientId# [26:2738:41] ServerId# [0:0:0] PipeClient# [26:2738:41] 2025-06-24T20:28:05.949831Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2739:41] Status# ERROR ClientId# [27:2739:41] ServerId# [0:0:0] PipeClient# [27:2739:41] 2025-06-24T20:28:05.949872Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2740:41] Status# ERROR ClientId# [28:2740:41] ServerId# [0:0:0] PipeClient# [28:2740:41] 2025-06-24T20:28:05.949910Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2741:41] Status# ERROR ClientId# [29:2741:41] ServerId# [0:0:0] PipeClient# [29:2741:41] 2025-06-24T20:28:05.949964Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2742:41] Status# ERROR ClientId# [30:2742:41] ServerId# [0:0:0] PipeClient# [30:2742:41] 2025-06-24T20:28:05.950012Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2743:41] Status# ERROR ClientId# [31:2743:41] ServerId# [0:0:0] PipeClient# [31:2743:41] 2025-06-24T20:28:05.950056Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2744:41] Status# ERROR ClientId# [32:2744:41] ServerId# [0:0:0] PipeClient# [32:2744:41] 2025-06-24T20:28:05.950094Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2745:41] Status# ERROR ClientId# [33:2745:41] ServerId# [0:0:0] PipeClient# [33:2745:41] 2025-06-24T20:28:05.950150Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2746:41] Status# ERROR ClientId# [34:2746:41] ServerId# [0:0:0] PipeClient# [34:2746:41] 2025-06-24T20:28:05.950186Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2747:41] Status# ERROR ClientId# [35:2747:41 ... 25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000001:2:2:2:0] -> [80000001:3:2:2:0] 2025-06-24T20:28:13.779767Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000021:2:2:2:0] -> [80000021:3:2:2:0] 2025-06-24T20:28:13.779828Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000031:2:2:2:0] -> [80000031:3:2:2:0] 2025-06-24T20:28:13.779872Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000051:2:2:2:0] -> [80000051:3:2:2:0] 2025-06-24T20:28:13.779911Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000061:2:2:2:0] -> [80000061:3:2:2:0] 2025-06-24T20:28:13.780552Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-06-24T20:28:13.780611Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000010:2:1:0:0] -> [80000010:3:1:0:0] 2025-06-24T20:28:13.780657Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000040:2:1:0:0] -> [80000040:3:1:0:0] 2025-06-24T20:28:13.780696Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000070:2:1:0:0] -> [80000070:3:1:0:0] 2025-06-24T20:28:13.780745Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000001:2:1:1:0] -> [80000001:3:1:1:0] 2025-06-24T20:28:13.780813Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000021:2:1:1:0] -> [80000021:3:1:1:0] 2025-06-24T20:28:13.780858Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000031:2:1:1:0] -> [80000031:3:1:1:0] 2025-06-24T20:28:13.780898Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000051:2:1:1:0] -> [80000051:3:1:1:0] 2025-06-24T20:28:13.780942Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000061:2:1:1:0] -> [80000061:3:1:1:0] 2025-06-24T20:28:13.780987Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000002:1:1:2:0] -> [80000002:2:1:2:0] 2025-06-24T20:28:13.781032Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000012:1:1:2:0] -> [80000012:2:1:2:0] 2025-06-24T20:28:13.781071Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000022:1:1:2:0] -> [80000022:2:1:2:0] 2025-06-24T20:28:13.781132Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000032:1:1:2:0] -> [80000032:2:1:2:0] 2025-06-24T20:28:13.781183Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000042:1:1:2:0] -> [80000042:2:1:2:0] 2025-06-24T20:28:13.781237Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000052:1:1:2:0] -> [80000052:2:1:2:0] 2025-06-24T20:28:13.781276Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000062:1:1:2:0] -> [80000062:2:1:2:0] 2025-06-24T20:28:13.781339Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000072:1:1:2:0] -> [80000072:2:1:2:0] 2025-06-24T20:28:13.781946Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2025-06-24T20:28:13.782000Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000010:2:2:2:0] -> [80000010:3:2:2:0] 2025-06-24T20:28:13.782041Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000040:2:2:2:0] -> [80000040:3:2:2:0] 2025-06-24T20:28:13.782081Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000070:2:2:2:0] -> [80000070:3:2:2:0] 2025-06-24T20:28:13.782133Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000002:1:2:0:0] -> [80000002:2:2:0:0] 2025-06-24T20:28:13.782195Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000012:1:2:0:0] -> [80000012:2:2:0:0] 2025-06-24T20:28:13.782236Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000022:1:2:0:0] -> [80000022:2:2:0:0] 2025-06-24T20:28:13.782285Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000032:1:2:0:0] -> [80000032:2:2:0:0] 2025-06-24T20:28:13.782338Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000042:1:2:0:0] -> [80000042:2:2:0:0] 2025-06-24T20:28:13.782378Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000052:1:2:0:0] -> [80000052:2:2:0:0] 2025-06-24T20:28:13.782415Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000062:1:2:0:0] -> [80000062:2:2:0:0] 2025-06-24T20:28:13.782470Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000072:1:2:0:0] -> [80000072:2:2:0:0] 2025-06-24T20:28:13.782921Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] NodeServiceSetUpdate 2025-06-24T20:28:13.782982Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000010:2:1:1:0] -> [80000010:3:1:1:0] 2025-06-24T20:28:13.783023Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000040:2:1:1:0] -> [80000040:3:1:1:0] 2025-06-24T20:28:13.783072Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000070:2:1:1:0] -> [80000070:3:1:1:0] 2025-06-24T20:28:13.783122Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000001:2:1:2:0] -> [80000001:3:1:2:0] 2025-06-24T20:28:13.783315Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000021:2:1:2:0] -> [80000021:3:1:2:0] 2025-06-24T20:28:13.783361Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000031:2:1:2:0] -> [80000031:3:1:2:0] 2025-06-24T20:28:13.783401Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000051:2:1:2:0] -> [80000051:3:1:2:0] 2025-06-24T20:28:13.783446Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000061:2:1:2:0] -> [80000061:3:1:2:0] 2025-06-24T20:28:13.785859Z 7 01h25m01.501560s :BS_NODE DEBUG: [7] VDiskId# [80000001:3:0:1:0] status changed to REPLICATING 2025-06-24T20:28:13.786376Z 7 01h25m02.019560s :BS_NODE DEBUG: [7] VDiskId# [80000021:3:0:1:0] status changed to REPLICATING 2025-06-24T20:28:13.786854Z 5 01h25m02.268560s :BS_NODE DEBUG: [5] VDiskId# [80000072:2:0:2:0] status changed to REPLICATING 2025-06-24T20:28:13.787338Z 7 01h25m03.345560s :BS_NODE DEBUG: [7] VDiskId# [80000031:3:0:1:0] status changed to REPLICATING 2025-06-24T20:28:13.787737Z 2 01h25m03.494560s :BS_NODE DEBUG: [2] VDiskId# [80000042:2:0:2:0] status changed to REPLICATING 2025-06-24T20:28:13.788066Z 4 01h25m03.508560s :BS_NODE DEBUG: [4] VDiskId# [80000022:2:0:2:0] status changed to REPLICATING 2025-06-24T20:28:13.788402Z 2 01h25m03.700560s :BS_NODE DEBUG: [2] VDiskId# [80000062:2:0:2:0] status changed to REPLICATING 2025-06-24T20:28:13.788726Z 4 01h25m03.726560s :BS_NODE DEBUG: [4] VDiskId# [80000002:2:0:2:0] status changed to REPLICATING 2025-06-24T20:28:13.789067Z 4 01h25m03.794560s :BS_NODE DEBUG: [4] VDiskId# [80000032:2:0:2:0] status changed to REPLICATING 2025-06-24T20:28:13.789379Z 5 01h25m03.828560s :BS_NODE DEBUG: [5] VDiskId# [80000052:2:0:2:0] status changed to REPLICATING 2025-06-24T20:28:13.789747Z 4 01h25m03.850560s :BS_NODE DEBUG: [4] VDiskId# [80000012:2:0:2:0] status changed to REPLICATING 2025-06-24T20:28:13.790083Z 10 01h25m04.114560s :BS_NODE DEBUG: [10] VDiskId# [80000040:3:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.790466Z 8 01h25m04.180560s :BS_NODE DEBUG: [8] VDiskId# [80000061:3:0:1:0] status changed to REPLICATING 2025-06-24T20:28:13.790762Z 10 01h25m04.788560s :BS_NODE DEBUG: [10] VDiskId# [80000070:3:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.791089Z 7 01h25m04.968560s :BS_NODE DEBUG: [7] VDiskId# [80000051:3:0:1:0] status changed to REPLICATING 2025-06-24T20:28:13.792898Z 10 01h25m06.007560s :BS_NODE DEBUG: [10] VDiskId# [80000010:3:0:0:0] status changed to REPLICATING 2025-06-24T20:28:13.793590Z 2 01h25m10.815560s :BS_NODE DEBUG: [2] VDiskId# [80000042:2:0:2:0] status changed to READY 2025-06-24T20:28:13.794488Z 1 01h25m10.816072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.794551Z 1 01h25m10.816072s :BS_NODE DEBUG: [1] VDiskId# [80000042:1:0:2:0] destroyed 2025-06-24T20:28:13.794688Z 7 01h25m12.777560s :BS_NODE DEBUG: [7] VDiskId# [80000021:3:0:1:0] status changed to READY 2025-06-24T20:28:13.795500Z 1 01h25m12.778072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.795558Z 1 01h25m12.778072s :BS_NODE DEBUG: [1] VDiskId# [80000021:2:0:1:0] destroyed 2025-06-24T20:28:13.795709Z 7 01h25m14.888560s :BS_NODE DEBUG: [7] VDiskId# [80000051:3:0:1:0] status changed to READY 2025-06-24T20:28:13.796365Z 1 01h25m14.889072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.796408Z 1 01h25m14.889072s :BS_NODE DEBUG: [1] VDiskId# [80000051:2:0:1:0] destroyed 2025-06-24T20:28:13.797189Z 8 01h25m16.884560s :BS_NODE DEBUG: [8] VDiskId# [80000061:3:0:1:0] status changed to READY 2025-06-24T20:28:13.798003Z 1 01h25m16.885072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.798046Z 1 01h25m16.885072s :BS_NODE DEBUG: [1] VDiskId# [80000061:2:0:1:0] destroyed 2025-06-24T20:28:13.798156Z 5 01h25m17.389560s :BS_NODE DEBUG: [5] VDiskId# [80000072:2:0:2:0] status changed to READY 2025-06-24T20:28:13.798878Z 1 01h25m17.390072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.798926Z 1 01h25m17.390072s :BS_NODE DEBUG: [1] VDiskId# [80000072:1:0:2:0] destroyed 2025-06-24T20:28:13.799031Z 4 01h25m18.324560s :BS_NODE DEBUG: [4] VDiskId# [80000002:2:0:2:0] status changed to READY 2025-06-24T20:28:13.799753Z 1 01h25m18.325072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.799799Z 1 01h25m18.325072s :BS_NODE DEBUG: [1] VDiskId# [80000002:1:0:2:0] destroyed 2025-06-24T20:28:13.799894Z 7 01h25m19.880560s :BS_NODE DEBUG: [7] VDiskId# [80000031:3:0:1:0] status changed to READY 2025-06-24T20:28:13.800542Z 1 01h25m19.881072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.800586Z 1 01h25m19.881072s :BS_NODE DEBUG: [1] VDiskId# [80000031:2:0:1:0] destroyed 2025-06-24T20:28:13.800675Z 4 01h25m19.974560s :BS_NODE DEBUG: [4] VDiskId# [80000022:2:0:2:0] status changed to READY 2025-06-24T20:28:13.801372Z 1 01h25m19.975072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.801418Z 1 01h25m19.975072s :BS_NODE DEBUG: [1] VDiskId# [80000022:1:0:2:0] destroyed 2025-06-24T20:28:13.801892Z 10 01h25m20.507560s :BS_NODE DEBUG: [10] VDiskId# [80000070:3:0:0:0] status changed to READY 2025-06-24T20:28:13.802671Z 1 01h25m20.508072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.802718Z 1 01h25m20.508072s :BS_NODE DEBUG: [1] VDiskId# [80000070:2:0:0:0] destroyed 2025-06-24T20:28:13.803060Z 4 01h25m25.146560s :BS_NODE DEBUG: [4] VDiskId# [80000032:2:0:2:0] status changed to READY 2025-06-24T20:28:13.803864Z 1 01h25m25.147072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.803910Z 1 01h25m25.147072s :BS_NODE DEBUG: [1] VDiskId# [80000032:1:0:2:0] destroyed 2025-06-24T20:28:13.804008Z 4 01h25m25.566560s :BS_NODE DEBUG: [4] VDiskId# [80000012:2:0:2:0] status changed to READY 2025-06-24T20:28:13.804603Z 1 01h25m25.567072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.804644Z 1 01h25m25.567072s :BS_NODE DEBUG: [1] VDiskId# [80000012:1:0:2:0] destroyed 2025-06-24T20:28:13.805010Z 7 01h25m26.233560s :BS_NODE DEBUG: [7] VDiskId# [80000001:3:0:1:0] status changed to READY 2025-06-24T20:28:13.805663Z 1 01h25m26.234072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.805712Z 1 01h25m26.234072s :BS_NODE DEBUG: [1] VDiskId# [80000001:2:0:1:0] destroyed 2025-06-24T20:28:13.807049Z 5 01h25m31.353560s :BS_NODE DEBUG: [5] VDiskId# [80000052:2:0:2:0] status changed to READY 2025-06-24T20:28:13.807900Z 1 01h25m31.354072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.807949Z 1 01h25m31.354072s :BS_NODE DEBUG: [1] VDiskId# [80000052:1:0:2:0] destroyed 2025-06-24T20:28:13.808685Z 10 01h25m34.343560s :BS_NODE DEBUG: [10] VDiskId# [80000010:3:0:0:0] status changed to READY 2025-06-24T20:28:13.809363Z 1 01h25m34.344072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.809409Z 1 01h25m34.344072s :BS_NODE DEBUG: [1] VDiskId# [80000010:2:0:0:0] destroyed 2025-06-24T20:28:13.810220Z 2 01h25m35.459560s :BS_NODE DEBUG: [2] VDiskId# [80000062:2:0:2:0] status changed to READY 2025-06-24T20:28:13.810911Z 1 01h25m35.460072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.810953Z 1 01h25m35.460072s :BS_NODE DEBUG: [1] VDiskId# [80000062:1:0:2:0] destroyed 2025-06-24T20:28:13.811368Z 10 01h25m38.521560s :BS_NODE DEBUG: [10] VDiskId# [80000040:3:0:0:0] status changed to READY 2025-06-24T20:28:13.812178Z 1 01h25m38.522072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:13.812226Z 1 01h25m38.522072s :BS_NODE DEBUG: [1] VDiskId# [80000040:2:0:0:0] destroyed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SlayAfterWiping [GOOD] Test command err: RandomSeed# 17806488488847473750 2025-06-24T20:28:13.794199Z 1 00h01m14.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:13.800324Z 1 00h01m14.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 1120479626733549999] 2025-06-24T20:28:13.840109Z 1 00h01m14.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> BlobDepot::BasicCollectGarbage [GOOD] >> BlobDepot::VerifiedRandom |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 [GOOD] >> TBsLocalRecovery::MultiPutWriteRestartRead [GOOD] >> TBsLocalRecovery::MultiPutWriteRestartReadHuge |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 12519454402329984315 2025-06-24T20:28:17.303742Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.303916Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.304021Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.304089Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.304159Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.304230Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.304293Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.305243Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.305322Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.305374Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.305421Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.305496Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.305551Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.305601Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.305674Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.305727Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.305762Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.305832Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.305867Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.305908Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.305948Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:17.324026Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.324127Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.324189Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.324279Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.324350Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.324405Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:17.324452Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 |87.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> VDiskRestart::Simple [GOOD] >> TBsHuge::SimpleErasureNone [GOOD] >> TBsLocalRecovery::ChaoticWriteRestart |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 [GOOD] Test command err: RandomSeed# 9247001090530117718 SEND TEvPut with key [1:1:1:0:0:100:0] 2025-06-24T20:28:16.340439Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-24T20:28:16.340864Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 Node 4: Node 5: Node 6: 2 Node 7: 3 2025-06-24T20:28:16.417617Z 1 00h01m00.011024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 7 Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 2 Node 4: Node 5: 1 Node 6: Node 7: 3 Start compaction 1 Finish compaction 1 >> VDiskBalancing::TestStopOneNode_Block42 [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> VDiskRestart::Simple [GOOD] >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob [GOOD] >> TBsLocalRecovery::MultiPutWriteRestartReadHuge [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeXXX >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly >> TIncrHugeBasicTest::Defrag [GOOD] |87.6%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/test-results/unittest/{meta.json ... results_accumulator.log} >> TPDiskRaces::DecommitWithInflight [GOOD] >> TPDiskRaces::DecommitWithInflightMock |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42 [GOOD] Test command err: RandomSeed# 13694239141997354448 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-24T20:28:14.946169Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-06-24T20:28:14.946428Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:223:17] ServerId# [1:301:63] TabletId# 72057594037932033 PipeClientId# [8:223:17] 2025-06-24T20:28:14.946563Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-06-24T20:28:14.946679Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:202:17] ServerId# [1:298:60] TabletId# 72057594037932033 PipeClientId# [5:202:17] 2025-06-24T20:28:14.946846Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-06-24T20:28:14.946975Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-06-24T20:28:14.947083Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction >> BSCRestartPDisk::RestartNotAllowed >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 11337842153689220709 SEND TEvPut with key [1:1:1:0:0:3201024:0] 2025-06-24T20:28:17.449270Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-24T20:28:17.449747Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 Node 4: Node 5: Node 6: 2 Node 7: 3 2025-06-24T20:28:17.554485Z 1 00h01m00.011024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 7 Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 2 Node 4: Node 5: 1 Node 6: Node 7: 3 Start compaction 1 Finish compaction 1 |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::Defrag [GOOD] Test command err: 2025-06-24T20:27:12.075707Z :BS_INCRHUGE DEBUG: incrhuge_keeper.cpp:72: BlockSize# 8128 BlocksInChunk# 2304 BlocksInMinBlob# 65 MaxBlobsPerChunk# 35 BlocksInDataSection# 2303 BlocksInIndexSection# 1 2025-06-24T20:27:12.075841Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:152: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] starting ReadLog 2025-06-24T20:27:12.082883Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:161: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] finished ReadLog 2025-06-24T20:27:12.082948Z :BS_INCRHUGE DEBUG: incrhuge_keeper_recovery.cpp:200: [PDisk# 000000001 Recovery] ApplyReadLog Chunks# [] Deletes# [] Owners# {} CurrentSerNum# 0 NextLsn# 1 2025-06-24T20:27:12.082994Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:515: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] ready 2025-06-24T20:27:12.083021Z :TEST DEBUG: test_actor_concurrent.h:153: finished Init Reference# [] Enumerated# [] InFlightDeletes# [] 2025-06-24T20:27:12.083032Z :TEST DEBUG: test_actor_concurrent.h:209: ActionsTaken# 1 2025-06-24T20:27:12.083045Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 0 InFlightWritesSize# 0 2025-06-24T20:27:12.084536Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:811717:0:0] Lsn# 0 NumReq# 0 2025-06-24T20:27:12.086293Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 1 InFlightWritesSize# 1 2025-06-24T20:27:12.087379Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 0 HandleWrite Lsn# 0 DataSize# 811717 WriteQueueSize# 1 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.087410Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.087441Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-06-24T20:27:12.087466Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-06-24T20:27:12.091260Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1745495:1:0] Lsn# 1 NumReq# 1 2025-06-24T20:27:12.094781Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 2 InFlightWritesSize# 2 2025-06-24T20:27:12.095273Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 1 HandleWrite Lsn# 1 DataSize# 1745495 WriteQueueSize# 2 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.095294Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 2 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.095322Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-06-24T20:27:12.095341Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-06-24T20:27:12.107407Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:602037:2:0] Lsn# 2 NumReq# 2 2025-06-24T20:27:12.108609Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 3 InFlightWritesSize# 3 2025-06-24T20:27:12.110088Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1287465:3:0] Lsn# 3 NumReq# 3 2025-06-24T20:27:12.110507Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 2 HandleWrite Lsn# 2 DataSize# 602037 WriteQueueSize# 3 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.110526Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 3 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.110543Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-06-24T20:27:12.110572Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-06-24T20:27:12.110599Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 3 HandleWrite Lsn# 3 DataSize# 1287465 WriteQueueSize# 4 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.110620Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 4 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.110635Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-06-24T20:27:12.110643Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-06-24T20:27:12.119181Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 4 InFlightWritesSize# 4 2025-06-24T20:27:12.121171Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1501676:4:0] Lsn# 4 NumReq# 4 2025-06-24T20:27:12.124172Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 5 InFlightWritesSize# 5 2025-06-24T20:27:12.124508Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 4 HandleWrite Lsn# 4 DataSize# 1501676 WriteQueueSize# 5 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.124531Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 5 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.124552Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-06-24T20:27:12.124570Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-06-24T20:27:12.135597Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:460: [PDisk# 000000001 Logger] ApplyLogChunkItem Lsn# 1 Status# OK 2025-06-24T20:27:12.135655Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 2 ChunkSerNum# 1000 2025-06-24T20:27:12.135690Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 3 ChunkSerNum# 1001 2025-06-24T20:27:12.135723Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 4 ChunkSerNum# 1002 2025-06-24T20:27:12.135735Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 5 ChunkSerNum# 1003 2025-06-24T20:27:12.135744Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 6 ChunkSerNum# 1004 2025-06-24T20:27:12.135754Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 7 ChunkSerNum# 1005 2025-06-24T20:27:12.135774Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 8 ChunkSerNum# 1006 2025-06-24T20:27:12.135788Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 9 ChunkSerNum# 1007 2025-06-24T20:27:12.135811Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 5 WriteInProgressItemsSize# 0 2025-06-24T20:27:12.135834Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-06-24T20:27:12.136622Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem OffsetInBlocks# 0 IndexInsideChunk# 0 SizeInBlocks# 100 SizeInBytes# 812800 Offset# 0 Size# 812800 End# 812800 Id# 0000000000000000 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-24T20:27:12.136648Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 1 ProcessWriteItem entry 2025-06-24T20:27:12.137038Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 1 ProcessWriteItem OffsetInBlocks# 100 IndexInsideChunk# 1 SizeInBlocks# 215 SizeInBytes# 1747520 Offset# 812800 Size# 1747520 End# 2560320 Id# 0000000000000001 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-24T20:27:12.137053Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 2 ProcessWriteItem entry 2025-06-24T20:27:12.137207Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 2 ProcessWriteItem OffsetInBlocks# 315 IndexInsideChunk# 2 SizeInBlocks# 75 SizeInBytes# 609600 Offset# 2560320 Size# 609600 End# 3169920 Id# 0000000000000002 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-24T20:27:12.137218Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 3 ProcessWriteItem entry 2025-06-24T20:27:12.137518Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 3 ProcessWriteItem OffsetInBlocks# 390 IndexInsideChunk# 3 SizeInBlocks# 159 SizeInBytes# 1292352 Offset# 3169920 Size# 1292352 End# 4462272 Id# 0000000000000003 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-24T20:27:12.137546Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 4 ProcessWriteItem entry 2025-06-24T20:27:12.137909Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 4 ProcessWriteItem OffsetInBlocks# 549 IndexInsideChunk# 4 SizeInBlocks# 185 SizeInBytes# 1503680 Offset# 4462272 Size# 1503680 End# 5965952 Id# 0000000000000004 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-24T20:27:12.143823Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:687721:5:0] Lsn# 5 NumReq# 5 2025-06-24T20:27:12.145197Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 6 InFlightWritesSize# 6 2025-06-24T20:27:12.151759Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 5 HandleWrite Lsn# 5 DataSize# 687721 WriteQueueSize# 1 WriteInProgressItemsSize# 5 2025-06-24T20:27:12.151789Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 5 2025-06-24T20:27:12.151832Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 0 ApplyBlobWrite Status# OK 2025-06-24T20:27:12.151967Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 4 2025-06-24T20:27:12.151985Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 5 ProcessWriteItem entry 2025-06-24T20:27:12.152192Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 5 ProcessWriteItem OffsetInBlocks# 734 IndexInsideChunk# 5 SizeInBlocks# 85 SizeInBytes# 690880 Offset# 5965952 Size# 690880 End# 6656832 Id# 0000000000000005 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-24T20:27:12.155852Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1957662:6:0] Lsn# 6 NumReq# 6 2025-06-24T20:27:12.160072Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 6 HandleWrite Lsn# 6 DataSize# 1957662 WriteQueueSize# 1 WriteInProgressItemsSize# 5 2025-06-24T20:27:12.160104Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 5 2025-06-24T20:27:12.161860Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 7 InFlightWritesSize# 7 2025-06-24T20:27:12.163805Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1824284:7:0] Lsn# 7 NumReq# 7 2025-06-24T20:27:12.164531Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 7 HandleWrite Lsn# 7 DataSize# 1824284 WriteQueueSize# 2 WriteInProgressItemsSize# 5 2025-06-24T20:27:12.164555Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 2 WriteInProgressItemsSize# 5 2025-06-24T20:27:12.164600Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 1 ApplyBlobWrite Status# OK 2025-06-24T20:27:12.164863Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 2 WriteInProgressItem ... 0001 Logger] ProcessDeleteQueueItem Lsn# 778 Status# OK 2025-06-24T20:28:20.272763Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 778 Virtual# false 2025-06-24T20:28:20.272776Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:196: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 1189 finished Status# OK 2025-06-24T20:28:20.272787Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:249: [PDisk# 000000001 Deleter] deleting 0000000000000002 from lookup table 2025-06-24T20:28:20.272807Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 779 Virtual# true 2025-06-24T20:28:20.272827Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 535 ApplyBlobWrite Status# OK 2025-06-24T20:28:20.273138Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 2 WriteInProgressItemsSize# 4 2025-06-24T20:28:20.273154Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 540 ProcessWriteItem entry 2025-06-24T20:28:20.273385Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 540 ProcessWriteItem OffsetInBlocks# 599 IndexInsideChunk# 5 SizeInBlocks# 115 SizeInBytes# 934720 Offset# 4868672 Size# 934720 End# 5803392 Id# 0000000000000002 ChunkIdx# 33 ChunkSerNum# 1107 Defrag# false 2025-06-24T20:28:20.275382Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:2093185:1190:0] Lsn# 1190 NumReq# 42 2025-06-24T20:28:20.279281Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 536 ApplyBlobWrite Status# OK 2025-06-24T20:28:20.279546Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 4 2025-06-24T20:28:20.279564Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 541 ProcessWriteItem entry 2025-06-24T20:28:20.279872Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 541 ProcessWriteItem OffsetInBlocks# 714 IndexInsideChunk# 6 SizeInBlocks# 153 SizeInBytes# 1243584 Offset# 5803392 Size# 1243584 End# 7046976 Id# 0000000000000028 ChunkIdx# 33 ChunkSerNum# 1107 Defrag# false 2025-06-24T20:28:20.279912Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 542 HandleWrite Lsn# 1190 DataSize# 2093185 WriteQueueSize# 1 WriteInProgressItemsSize# 5 2025-06-24T20:28:20.279927Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 5 2025-06-24T20:28:20.279952Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:460: [PDisk# 000000001 Logger] ApplyLogChunkItem Lsn# 779 Status# OK 2025-06-24T20:28:20.279978Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:474: [PDisk# 000000001 Logger] DeleteChunk ChunkIdx# 27 ChunkSerNum# 1101 2025-06-24T20:28:20.280001Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 782 Virtual# true 2025-06-24T20:28:20.280021Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:309: [PDisk# 000000001 Deleter] finished chunk delete ChunkIdx# 27 Status# OK 2025-06-24T20:28:20.280039Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:460: [PDisk# 000000001 Logger] ApplyLogChunkItem Lsn# 780 Status# OK 2025-06-24T20:28:20.280057Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:460: [PDisk# 000000001 Logger] ApplyLogChunkItem Lsn# 781 Status# OK 2025-06-24T20:28:20.280075Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:474: [PDisk# 000000001 Logger] DeleteChunk ChunkIdx# 30 ChunkSerNum# 1104 2025-06-24T20:28:20.280090Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 782 Virtual# true 2025-06-24T20:28:20.280106Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:309: [PDisk# 000000001 Deleter] finished chunk delete ChunkIdx# 30 Status# OK 2025-06-24T20:28:20.280125Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 537 ApplyBlobWrite Status# OK 2025-06-24T20:28:20.280480Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 4 2025-06-24T20:28:20.280495Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 542 ProcessWriteItem entry 2025-06-24T20:28:20.280941Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 542 ProcessWriteItem OffsetInBlocks# 867 IndexInsideChunk# 7 SizeInBlocks# 258 SizeInBytes# 2097024 Offset# 7046976 Size# 2097024 End# 9144000 Id# 000000000000002f ChunkIdx# 33 ChunkSerNum# 1107 Defrag# false 2025-06-24T20:28:20.280963Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 538 ApplyBlobWrite Status# OK 2025-06-24T20:28:20.281143Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 4 2025-06-24T20:28:20.281162Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 539 ApplyBlobWrite Status# OK 2025-06-24T20:28:20.281435Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 3 2025-06-24T20:28:20.281462Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:46: [PDisk# 000000001 Defragmenter] overall efficiency 0.205 2025-06-24T20:28:20.281479Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:77: [PDisk# 000000001 Defragmenter] starting ChunkInProgress# 31 efficiency# 0.196 2025-06-24T20:28:20.287213Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 540 ApplyBlobWrite Status# OK 2025-06-24T20:28:20.287491Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 2 2025-06-24T20:28:20.287522Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:105: [PDisk# 000000001 Defragmenter] ApplyScan received 2025-06-24T20:28:20.287558Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:163: [PDisk# 000000001 Defragmenter] sending TEvChunkRead ChunkIdx# 31 OffsetInBlocks# 487 sizeInBlocks# 162 2025-06-24T20:28:20.287576Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:163: [PDisk# 000000001 Defragmenter] sending TEvChunkRead ChunkIdx# 31 OffsetInBlocks# 1231 sizeInBlocks# 205 2025-06-24T20:28:20.287593Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:163: [PDisk# 000000001 Defragmenter] sending TEvChunkRead ChunkIdx# 31 OffsetInBlocks# 1436 sizeInBlocks# 85 2025-06-24T20:28:20.287608Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 541 ApplyBlobWrite Status# OK 2025-06-24T20:28:20.288010Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 1 2025-06-24T20:28:20.288531Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 43 InFlightWritesSize# 19 2025-06-24T20:28:20.296774Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:2044453:1191:0] Lsn# 1191 NumReq# 43 2025-06-24T20:28:20.299242Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 543 HandleWrite Lsn# 1191 DataSize# 2044453 WriteQueueSize# 1 WriteInProgressItemsSize# 1 2025-06-24T20:28:20.299264Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 1 2025-06-24T20:28:20.299282Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 543 ProcessWriteItem entry 2025-06-24T20:28:20.299724Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 543 ProcessWriteItem OffsetInBlocks# 1125 IndexInsideChunk# 8 SizeInBlocks# 252 SizeInBytes# 2048256 Offset# 9144000 Size# 2048256 End# 11192256 Id# 0000000000000023 ChunkIdx# 33 ChunkSerNum# 1107 Defrag# false 2025-06-24T20:28:20.301402Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 44 InFlightWritesSize# 20 2025-06-24T20:28:20.303711Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:1815657:1192:0] Lsn# 1192 NumReq# 44 2025-06-24T20:28:20.307223Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 542 ApplyBlobWrite Status# OK 2025-06-24T20:28:20.315582Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 45 InFlightWritesSize# 21 2025-06-24T20:28:20.324840Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:1564457:1193:0] Lsn# 1193 NumReq# 45 2025-06-24T20:28:20.335967Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 46 InFlightWritesSize# 22 2025-06-24T20:28:20.346913Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 1 2025-06-24T20:28:20.348415Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:1579253:1194:0] Lsn# 1194 NumReq# 46 2025-06-24T20:28:20.351602Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 47 InFlightWritesSize# 23 2025-06-24T20:28:20.352233Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:584806:1195:0] Lsn# 1195 NumReq# 47 2025-06-24T20:28:20.353385Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 48 InFlightWritesSize# 24 2025-06-24T20:28:20.354216Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:831121:1196:0] Lsn# 1196 NumReq# 48 2025-06-24T20:28:20.359267Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 544 HandleWrite Lsn# 1192 DataSize# 1815657 WriteQueueSize# 1 WriteInProgressItemsSize# 1 2025-06-24T20:28:20.359296Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 1 2025-06-24T20:28:20.359313Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 544 ProcessWriteItem entry 2025-06-24T20:28:20.359734Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 544 ProcessWriteItem OffsetInBlocks# 1377 IndexInsideChunk# 9 SizeInBlocks# 224 SizeInBytes# 1820672 Offset# 11192256 Size# 1820672 End# 13012928 Id# 0000000000000011 ChunkIdx# 33 ChunkSerNum# 1107 Defrag# false 2025-06-24T20:28:20.359769Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:191: [PDisk# 000000001 Defragmenter] ApplyRead offsetInBlocks# 487 index# 3 Status# OK 2025-06-24T20:28:20.361037Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:251: [PDisk# 000000001 Defragmenter] EnqueueDefragWrite chunkIdx# 31 index# 3 Id# 0000000000000025 2025-06-24T20:28:20.361063Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 2 2025-06-24T20:28:20.361082Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 545 ProcessWriteItem entry 2025-06-24T20:28:20.361104Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:399: [PDisk# 000000001 Writer] QueryId# 545 DeleteInProgress# false 2025-06-24T20:28:20.361396Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 545 ProcessWriteItem OffsetInBlocks# 1601 IndexInsideChunk# 10 SizeInBlocks# 162 SizeInBytes# 1316736 Offset# 13012928 Size# 1316736 End# 14329664 Id# 0000000000000025 ChunkIdx# 33 ChunkSerNum# 1107 Defrag# true 2025-06-24T20:28:20.361617Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 543 ApplyBlobWrite Status# OK 2025-06-24T20:28:20.361901Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 2 2025-06-24T20:28:20.361928Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:191: [PDisk# 000000001 Defragmenter] ApplyRead offsetInBlocks# 1231 index# 7 Status# OK >> Donor::SkipBadDonor >> TYardTest::TestUpsAndDownsAtTheBoundary [GOOD] >> TYardTest::TestUnflushedChunk >> BsControllerTest::SelfHealBlock4Plus2 [GOOD] >> BSCRestartPDisk::RestartOneByOneWithReconnects >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] >> BSCRestartPDisk::RestartOneByOne |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> TBsVDiskOutOfSpace::WriteUntilYellowZone [GOOD] >> TBsVDiskRange::RangeGetFromEmptyDB >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 12423252868616514660 SEND TEvPut with key [1:1:1:0:0:3201024:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:3201024:0] 2025-06-24T20:28:18.584921Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-06-24T20:28:18.585166Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:223:17] ServerId# [1:301:63] TabletId# 72057594037932033 PipeClientId# [8:223:17] 2025-06-24T20:28:18.585264Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-06-24T20:28:18.585360Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:202:17] ServerId# [1:298:60] TabletId# 72057594037932033 PipeClientId# [5:202:17] 2025-06-24T20:28:18.585463Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-06-24T20:28:18.585560Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-06-24T20:28:18.585655Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction >> test.py::test[solomon-BadDownsamplingFill-] [GOOD] >> test.py::test[solomon-BadDownsamplingInterval-] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob [GOOD] Test command err: RandomSeed# 10929581438049938288 SEND TEvPut with key [1:1:1:0:0:533504:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:533504:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:533504:0] 2025-06-24T20:28:16.654110Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:533504:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload [GOOD] >> Mirror3of4::ReplicationSmall [GOOD] >> Mirror3of4::ReplicationHuge |87.6%| [TA] $(B)/ydb/core/blobstorage/incrhuge/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly [GOOD] Test command err: RandomSeed# 6703787728475325175 2025-06-24T20:28:22.004498Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.004673Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.004753Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.004816Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.004886Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.004952Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.005014Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.005082Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.005955Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.006031Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.006083Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.006127Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.006187Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.006269Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.006337Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.006387Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.006457Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.006504Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.006562Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.006593Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.006623Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.006668Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.006712Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.006750Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-24T20:28:22.016760Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.016872Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.016924Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.016973Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.017017Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.017067Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.017112Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.017165Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-24T20:28:22.343385Z 1 00h01m30.011024s :BS_LOCALRECOVERY CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "Some error reason" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] Test command err: RandomSeed# 3866175774660942080 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-24T20:28:17.047534Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction >> TBsVDiskRange::RangeGetFromEmptyDB [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardFresh ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::SelfHealBlock4Plus2 [GOOD] Test command err: 2025-06-24T20:28:05.860740Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-24T20:28:05.860793Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-24T20:28:05.860874Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-24T20:28:05.860896Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-24T20:28:05.860944Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-24T20:28:05.860966Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-24T20:28:05.861018Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-24T20:28:05.861040Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-24T20:28:05.861085Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-24T20:28:05.861106Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-24T20:28:05.861139Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-24T20:28:05.861160Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-24T20:28:05.861210Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-24T20:28:05.861230Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-24T20:28:05.861261Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-24T20:28:05.861282Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-24T20:28:05.861348Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-24T20:28:05.861368Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-24T20:28:05.861400Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-24T20:28:05.861426Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-24T20:28:05.861471Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-24T20:28:05.861494Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-24T20:28:05.861543Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-24T20:28:05.861564Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-24T20:28:05.861595Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-24T20:28:05.861615Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-24T20:28:05.861653Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-24T20:28:05.861675Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-24T20:28:05.861720Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-24T20:28:05.861741Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-24T20:28:05.861789Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-06-24T20:28:05.861811Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-06-24T20:28:05.861842Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-06-24T20:28:05.861871Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-06-24T20:28:05.861909Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-06-24T20:28:05.861930Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-06-24T20:28:05.861981Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-06-24T20:28:05.862003Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-06-24T20:28:05.862049Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-06-24T20:28:05.862070Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-06-24T20:28:05.862101Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-06-24T20:28:05.862140Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-06-24T20:28:05.862198Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-06-24T20:28:05.862218Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-06-24T20:28:05.862250Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-06-24T20:28:05.862271Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-06-24T20:28:05.862304Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-06-24T20:28:05.862324Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-06-24T20:28:05.862359Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-06-24T20:28:05.862380Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-06-24T20:28:05.862411Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-06-24T20:28:05.862431Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-06-24T20:28:05.862475Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-06-24T20:28:05.862506Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-06-24T20:28:05.862565Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-06-24T20:28:05.862597Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-06-24T20:28:05.862636Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-06-24T20:28:05.862658Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-06-24T20:28:05.862690Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-06-24T20:28:05.862709Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-06-24T20:28:05.862741Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-06-24T20:28:05.862761Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-06-24T20:28:05.862805Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-06-24T20:28:05.862827Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-06-24T20:28:05.880424Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2157:49] Status# ERROR ClientId# [1:2157:49] ServerId# [0:0:0] PipeClient# [1:2157:49] 2025-06-24T20:28:05.881584Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2158:37] Status# ERROR ClientId# [2:2158:37] ServerId# [0:0:0] PipeClient# [2:2158:37] 2025-06-24T20:28:05.881635Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2159:37] Status# ERROR ClientId# [3:2159:37] ServerId# [0:0:0] PipeClient# [3:2159:37] 2025-06-24T20:28:05.881679Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2160:37] Status# ERROR ClientId# [4:2160:37] ServerId# [0:0:0] PipeClient# [4:2160:37] 2025-06-24T20:28:05.881739Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2161:37] Status# ERROR ClientId# [5:2161:37] ServerId# [0:0:0] PipeClient# [5:2161:37] 2025-06-24T20:28:05.881792Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2162:37] Status# ERROR ClientId# [6:2162:37] ServerId# [0:0:0] PipeClient# [6:2162:37] 2025-06-24T20:28:05.881841Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2163:37] Status# ERROR ClientId# [7:2163:37] ServerId# [0:0:0] PipeClient# [7:2163:37] 2025-06-24T20:28:05.881885Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2164:37] Status# ERROR ClientId# [8:2164:37] ServerId# [0:0:0] PipeClient# [8:2164:37] 2025-06-24T20:28:05.881925Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2165:37] Status# ERROR ClientId# [9:2165:37] ServerId# [0:0:0] PipeClient# [9:2165:37] 2025-06-24T20:28:05.881964Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2166:37] Status# ERROR ClientId# [10:2166:37] ServerId# [0:0:0] PipeClient# [10:2166:37] 2025-06-24T20:28:05.882003Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2167:37] Status# ERROR ClientId# [11:2167:37] ServerId# [0:0:0] PipeClient# [11:2167:37] 2025-06-24T20:28:05.882060Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2168:37] Status# ERROR ClientId# [12:2168:37] ServerId# [0:0:0] PipeClient# [12:2168:37] 2025-06-24T20:28:05.882123Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2169:37] Status# ERROR ClientId# [13:2169:37] ServerId# [0:0:0] PipeClient# [13:2169:37] 2025-06-24T20:28:05.882177Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2170:37] Status# ERROR ClientId# [14:2170:37] ServerId# [0:0:0] PipeClient# [14:2170:37] 2025-06-24T20:28:05.882217Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2171:37] Status# ERROR ClientId# [15:2171:37] ServerId# [0:0:0] PipeClient# [15:2171:37] 2025-06-24T20:28:05.882254Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2172:37] Status# ERROR ClientId# [16:2172:37] ServerId# [0:0:0] PipeClient# [16:2172:37] 2025-06-24T20:28:05.882296Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2173:37] Status# ERROR ClientId# [17:2173:37] ServerId# [0:0:0] PipeClient# [17:2173:37] 2025-06-24T20:28:05.882332Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2174:37] Status# ERROR ClientId# [18:2174:37] ServerId# [0:0:0] PipeClient# [18:2174:37] 2025-06-24T20:28:05.882374Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2175:37] Status# ERROR ClientId# [19:2175:37] ServerId# [0:0:0] PipeClient# [19:2175:37] 2025-06-24T20:28:05.882413Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2176:37] Status# ERROR ClientId# [20:2176:37] ServerId# [0:0:0] PipeClient# [20:2176:37] 2025-06-24T20:28:05.882464Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2177:37] Status# ERROR ClientId# [21:2177:37] ServerId# [0:0:0] PipeClient# [21:2177:37] 2025-06-24T20:28:05.882519Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2178:37] Status# ERROR ClientId# [22:2178:37] ServerId# [0:0:0] PipeClient# [22:2178:37] 2025-06-24T20:28:05.882559Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2179:37] Status# ERROR ClientId# [23:2179:37] ServerId# [0:0:0] PipeClient# [23:2179:37] 2025-06-24T20:28:05.882597Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2180:37] Status# ERROR ClientId# [24:2180:37] ServerId# [0:0:0] PipeClient# [24:2180:37] 2025-06-24T20:28:05.882639Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2181:37] Status# ERROR ClientId# [25:2181:37] ServerId# [0:0:0] PipeClient# [25:2181:37] 2025-06-24T20:28:05.882691Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2182:37] Status# ERROR ClientId# [26:2182:37] ServerId# [0:0:0] PipeClient# [26:2182:37] 2025-06-24T20:28:05.882739Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2183:37] Status# ERROR ClientId# [27:2183:37] ServerId# [0:0:0] PipeClient# [27:2183:37] 2025-06-24T20:28:05.882779Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2184:37] Status# ERROR ClientId# [28:2184:37] ServerId# [0:0:0] PipeClient# [28:2184:37] 2025-06-24T20:28:05.882817Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2185:37] Status# ERROR ClientId# [29:2185:37] ServerId# [0:0:0] PipeClient# [29:2185:37] 2025-06-24T20:28:05.882880Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2186:37] Status# ERROR ClientId# [30:2186:37] ServerId# [0:0:0] PipeClient# [30:2186:37] 2025-06-24T20:28:05.882923Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2187:37] Status# ERROR ClientId# [31:2187:37] ServerId# [0:0:0] PipeClient# [31:2187:37] 2025-06-24T20:28:05.882961Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2188:37] Status# ERROR ClientId# [32:2188:37] ServerId# [0:0:0] PipeClient# [32:2188:37] 2025-06-24T20:28:06.168361Z 1 00h00m00.002048s :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.222922s 2025-06-24T20:28:06.168495Z 1 00h00m00.002048s :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.223076s 2025-06-24T20:28:06.200975Z 1 00h00m00.002560s :BS_NODE DEBUG: [1] CheckState from [1:2257:73] expected 1 current 0 2025-06-24T20:28:06.201058Z 2 00h00m00.002560s :BS_NODE DEBUG: [2] CheckState from [2:2258:38] expected 1 current 0 2025-06-24T20:28:06.201105Z 3 00h00m00.002560s :BS_NODE DEBUG: [3] CheckState from [3:2259:38] expected 1 current 0 2025-06-24T20:28:06.201140Z 4 00h00m00.002560s :BS_NODE DEBUG: [4] CheckState from [4:2260:38] expected 1 current 0 2025-06-24T20:28:06.201169Z 5 00h00m00.002560s :BS_NODE DEBUG: [5] CheckState from [5:2261:38] expected 1 current 0 2025-06-24T20:28:06.201197Z 6 00h00m00.002560s :BS_NODE DEBUG: [6] CheckState from [6:2262:38] expected 1 current 0 2025-06-24T20:28:06.201225Z 7 00h00m00.002560s :BS_NODE DEBUG: [7] CheckState from [7 ... Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483688 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:21.401069Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483688 VDiskId# [80000028:5:0:7:0] DiskIsOk# true 2025-06-24T20:28:21.405962Z 1 05h15m00.122016s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:21.406044Z 1 05h15m00.122016s :BS_NODE DEBUG: [1] VDiskId# [80000028:5:0:0:0] -> [80000028:6:0:0:0] 2025-06-24T20:28:21.406574Z 1 05h15m00.122016s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483688 Items# [80000028:5:0:5:0]: 26:1000:1008 -> 9:1001:1015 ConfigTxSeqNo# 470 2025-06-24T20:28:21.406611Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483688 Success# true 2025-06-24T20:28:21.406759Z 19 05h15m00.122016s :BS_NODE DEBUG: [19] NodeServiceSetUpdate 2025-06-24T20:28:21.406811Z 19 05h15m00.122016s :BS_NODE DEBUG: [19] VDiskId# [80000028:5:0:2:0] -> [80000028:6:0:2:0] 2025-06-24T20:28:21.406897Z 4 05h15m00.122016s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-06-24T20:28:21.406942Z 4 05h15m00.122016s :BS_NODE DEBUG: [4] VDiskId# [80000028:5:0:3:0] -> [80000028:6:0:3:0] 2025-06-24T20:28:21.407019Z 5 05h15m00.122016s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:21.407061Z 5 05h15m00.122016s :BS_NODE DEBUG: [5] VDiskId# [80000028:5:0:4:0] -> [80000028:6:0:4:0] 2025-06-24T20:28:21.407138Z 7 05h15m00.122016s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-06-24T20:28:21.407204Z 7 05h15m00.122016s :BS_NODE DEBUG: [7] VDiskId# [80000028:5:0:6:0] -> [80000028:6:0:6:0] 2025-06-24T20:28:21.407284Z 8 05h15m00.122016s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-06-24T20:28:21.407330Z 8 05h15m00.122016s :BS_NODE DEBUG: [8] VDiskId# [80000028:5:0:7:0] -> [80000028:6:0:7:0] 2025-06-24T20:28:21.407390Z 26 05h15m00.122016s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.407465Z 9 05h15m00.122016s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-06-24T20:28:21.407508Z 9 05h15m00.122016s :BS_NODE DEBUG: [9] VDiskId# [80000028:6:0:5:0] PDiskId# 1001 VSlotId# 1015 created 2025-06-24T20:28:21.407591Z 9 05h15m00.122016s :BS_NODE DEBUG: [9] VDiskId# [80000028:6:0:5:0] status changed to INIT_PENDING 2025-06-24T20:28:21.407701Z 27 05h15m00.122016s :BS_NODE DEBUG: [27] NodeServiceSetUpdate 2025-06-24T20:28:21.407750Z 27 05h15m00.122016s :BS_NODE DEBUG: [27] VDiskId# [80000028:5:0:1:0] -> [80000028:6:0:1:0] 2025-06-24T20:28:21.408004Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483672 2025-06-24T20:28:21.408441Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483672 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:21.408484Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483672 VDiskId# [80000018:4:0:0:0] DiskIsOk# true 2025-06-24T20:28:21.408708Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483672 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:21.408739Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483672 VDiskId# [80000018:4:0:2:0] DiskIsOk# true 2025-06-24T20:28:21.408770Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483672 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:21.408797Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483672 VDiskId# [80000018:4:0:3:0] DiskIsOk# true 2025-06-24T20:28:21.408831Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483672 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:21.408863Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483672 VDiskId# [80000018:4:0:4:0] DiskIsOk# true 2025-06-24T20:28:21.408895Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483672 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:21.408922Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483672 VDiskId# [80000018:4:0:5:0] DiskIsOk# true 2025-06-24T20:28:21.408950Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483672 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:21.408978Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483672 VDiskId# [80000018:4:0:6:0] DiskIsOk# true 2025-06-24T20:28:21.409006Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483672 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:21.409033Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483672 VDiskId# [80000018:4:0:7:0] DiskIsOk# true 2025-06-24T20:28:21.412525Z 1 05h15m00.122528s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:21.412600Z 1 05h15m00.122528s :BS_NODE DEBUG: [1] VDiskId# [80000018:4:0:0:0] -> [80000018:5:0:0:0] 2025-06-24T20:28:21.413113Z 1 05h15m00.122528s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483672 Items# [80000018:4:0:1:0]: 26:1000:1009 -> 22:1001:1015 ConfigTxSeqNo# 471 2025-06-24T20:28:21.413149Z 1 05h15m00.122528s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483672 Success# true 2025-06-24T20:28:21.413301Z 4 05h15m00.122528s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-06-24T20:28:21.413353Z 4 05h15m00.122528s :BS_NODE DEBUG: [4] VDiskId# [80000018:4:0:3:0] -> [80000018:5:0:3:0] 2025-06-24T20:28:21.413441Z 5 05h15m00.122528s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:21.413487Z 5 05h15m00.122528s :BS_NODE DEBUG: [5] VDiskId# [80000018:4:0:4:0] -> [80000018:5:0:4:0] 2025-06-24T20:28:21.413573Z 22 05h15m00.122528s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-06-24T20:28:21.413616Z 22 05h15m00.122528s :BS_NODE DEBUG: [22] VDiskId# [80000018:5:0:1:0] PDiskId# 1001 VSlotId# 1015 created 2025-06-24T20:28:21.413684Z 22 05h15m00.122528s :BS_NODE DEBUG: [22] VDiskId# [80000018:5:0:1:0] status changed to INIT_PENDING 2025-06-24T20:28:21.413777Z 6 05h15m00.122528s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2025-06-24T20:28:21.413825Z 6 05h15m00.122528s :BS_NODE DEBUG: [6] VDiskId# [80000018:4:0:2:0] -> [80000018:5:0:2:0] 2025-06-24T20:28:21.413910Z 7 05h15m00.122528s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-06-24T20:28:21.413955Z 7 05h15m00.122528s :BS_NODE DEBUG: [7] VDiskId# [80000018:4:0:6:0] -> [80000018:5:0:6:0] 2025-06-24T20:28:21.414038Z 8 05h15m00.122528s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-06-24T20:28:21.414084Z 8 05h15m00.122528s :BS_NODE DEBUG: [8] VDiskId# [80000018:4:0:7:0] -> [80000018:5:0:7:0] 2025-06-24T20:28:21.414147Z 26 05h15m00.122528s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.414219Z 27 05h15m00.122528s :BS_NODE DEBUG: [27] NodeServiceSetUpdate 2025-06-24T20:28:21.414283Z 27 05h15m00.122528s :BS_NODE DEBUG: [27] VDiskId# [80000018:4:0:5:0] -> [80000018:5:0:5:0] 2025-06-24T20:28:21.415126Z 6 05h15m01.135992s :BS_NODE DEBUG: [6] VDiskId# [80000013:4:0:1:0] status changed to REPLICATING 2025-06-24T20:28:21.415657Z 22 05h15m01.197920s :BS_NODE DEBUG: [22] VDiskId# [8000003b:4:0:1:0] status changed to REPLICATING 2025-06-24T20:28:21.416271Z 22 05h15m01.728480s :BS_NODE DEBUG: [22] VDiskId# [80000023:4:0:1:0] status changed to REPLICATING 2025-06-24T20:28:21.416847Z 22 05h15m01.803944s :BS_NODE DEBUG: [22] VDiskId# [8000001b:4:0:1:0] status changed to REPLICATING 2025-06-24T20:28:21.417678Z 22 05h15m02.280528s :BS_NODE DEBUG: [22] VDiskId# [80000018:5:0:1:0] status changed to REPLICATING 2025-06-24T20:28:21.418291Z 22 05h15m02.451432s :BS_NODE DEBUG: [22] VDiskId# [8000002b:4:0:1:0] status changed to REPLICATING 2025-06-24T20:28:21.418875Z 9 05h15m02.939016s :BS_NODE DEBUG: [9] VDiskId# [80000028:6:0:5:0] status changed to REPLICATING 2025-06-24T20:28:21.419190Z 9 05h15m03.580504s :BS_NODE DEBUG: [9] VDiskId# [80000003:4:0:1:0] status changed to REPLICATING 2025-06-24T20:28:21.419527Z 22 05h15m03.652968s :BS_NODE DEBUG: [22] VDiskId# [80000033:4:0:1:0] status changed to REPLICATING 2025-06-24T20:28:21.420089Z 22 05h15m03.713456s :BS_NODE DEBUG: [22] VDiskId# [8000000b:4:0:1:0] status changed to REPLICATING 2025-06-24T20:28:21.421499Z 22 05h15m09.051968s :BS_NODE DEBUG: [22] VDiskId# [80000033:4:0:1:0] status changed to READY 2025-06-24T20:28:21.422599Z 26 05h15m09.052480s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.422655Z 26 05h15m09.052480s :BS_NODE DEBUG: [26] VDiskId# [80000033:3:0:1:0] destroyed 2025-06-24T20:28:21.422784Z 22 05h15m09.622456s :BS_NODE DEBUG: [22] VDiskId# [8000000b:4:0:1:0] status changed to READY 2025-06-24T20:28:21.425553Z 26 05h15m09.622968s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.425607Z 26 05h15m09.622968s :BS_NODE DEBUG: [26] VDiskId# [8000000b:3:0:1:0] destroyed 2025-06-24T20:28:21.425969Z 22 05h15m10.045480s :BS_NODE DEBUG: [22] VDiskId# [80000023:4:0:1:0] status changed to READY 2025-06-24T20:28:21.426923Z 26 05h15m10.045992s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.426971Z 26 05h15m10.045992s :BS_NODE DEBUG: [26] VDiskId# [80000023:3:0:1:0] destroyed 2025-06-24T20:28:21.427100Z 22 05h15m11.018944s :BS_NODE DEBUG: [22] VDiskId# [8000001b:4:0:1:0] status changed to READY 2025-06-24T20:28:21.428018Z 26 05h15m11.019456s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.428064Z 26 05h15m11.019456s :BS_NODE DEBUG: [26] VDiskId# [8000001b:3:0:1:0] destroyed 2025-06-24T20:28:21.428203Z 22 05h15m13.433528s :BS_NODE DEBUG: [22] VDiskId# [80000018:5:0:1:0] status changed to READY 2025-06-24T20:28:21.429090Z 26 05h15m13.434040s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.429139Z 26 05h15m13.434040s :BS_NODE DEBUG: [26] VDiskId# [80000018:4:0:1:0] destroyed 2025-06-24T20:28:21.429273Z 6 05h15m14.800992s :BS_NODE DEBUG: [6] VDiskId# [80000013:4:0:1:0] status changed to READY 2025-06-24T20:28:21.430041Z 26 05h15m14.801504s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.430087Z 26 05h15m14.801504s :BS_NODE DEBUG: [26] VDiskId# [80000013:3:0:1:0] destroyed 2025-06-24T20:28:21.431138Z 9 05h15m24.544016s :BS_NODE DEBUG: [9] VDiskId# [80000028:6:0:5:0] status changed to READY 2025-06-24T20:28:21.431856Z 26 05h15m24.544528s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.431900Z 26 05h15m24.544528s :BS_NODE DEBUG: [26] VDiskId# [80000028:5:0:5:0] destroyed 2025-06-24T20:28:21.432586Z 22 05h15m25.302920s :BS_NODE DEBUG: [22] VDiskId# [8000003b:4:0:1:0] status changed to READY 2025-06-24T20:28:21.433473Z 26 05h15m25.303432s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.433520Z 26 05h15m25.303432s :BS_NODE DEBUG: [26] VDiskId# [8000003b:3:0:1:0] destroyed 2025-06-24T20:28:21.434660Z 9 05h15m30.477504s :BS_NODE DEBUG: [9] VDiskId# [80000003:4:0:1:0] status changed to READY 2025-06-24T20:28:21.435451Z 26 05h15m30.478016s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.435511Z 26 05h15m30.478016s :BS_NODE DEBUG: [26] VDiskId# [80000003:3:0:1:0] destroyed 2025-06-24T20:28:21.435639Z 22 05h15m31.946432s :BS_NODE DEBUG: [22] VDiskId# [8000002b:4:0:1:0] status changed to READY 2025-06-24T20:28:21.436534Z 26 05h15m31.946944s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:21.436579Z 26 05h15m31.946944s :BS_NODE DEBUG: [26] VDiskId# [8000002b:3:0:1:0] destroyed >> TYardTest::TestUnflushedChunk [GOOD] >> TYardTest::TestRedZoneSurvivability |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload [GOOD] >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] Test command err: RandomSeed# 10220062591791725212 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-24T20:28:18.183514Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:6339:830] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Start compaction Finish compaction >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump_ds_init] >> Donor::SkipBadDonor [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SkipBadDonor [GOOD] Test command err: RandomSeed# 10065400179292956690 2025-06-24T20:28:25.089610Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:25.091688Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 12580676226693766123] 2025-06-24T20:28:25.112059Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction [GOOD] >> BlobDepot::VerifiedRandom [GOOD] >> BlobDepot::LoadPutAndRead >> TYardTest::TestRedZoneSurvivability [GOOD] >> TYardTest::TestSlay >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize [GOOD] >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize [GOOD] Test command err: 2025-06-24T20:28:28.599073Z :BS_VDISK_GET CRIT: query_base.h:102: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVGetResult: Result message is too large; size# 67108001 orig# {ExtrQuery# [5000:1:0:0:0:100000:1] sh# 257 sz# 99743 c# 0}{ExtrQuery# [5000:1:1:0:0:100000:1] sh# 257 sz# 99743 c# 1}{ExtrQuery# [5000:1:2:0:0:100000:1] sh# 257 sz# 99743 c# 2}{ExtrQuery# [5000:1:3:0:0:100000:1] sh# 257 sz# 99743 c# 3}{ExtrQuery# [5000:1:4:0:0:100000:1] sh# 257 sz# 99743 c# 4}{ExtrQuery# [5000:1:5:0:0:100000:1] sh# 257 sz# 99743 c# 5}{ExtrQuery# [5000:1:6:0:0:100000:1] sh# 257 sz# 99743 c# 6}{ExtrQuery# [5000:1:7:0:0:100000:1] sh# 257 sz# 99743 c# 7}{ExtrQuery# [5000:1:8:0:0:100000:1] sh# 257 sz# 99743 c# 8}{ExtrQuery# [5000:1:9:0:0:100000:1] sh# 257 sz# 99743 c# 9}{ExtrQuery# [5000:1:10:0:0:100000:1] sh# 257 sz# 99743 c# 10}{ExtrQuery# [5000:1:11:0:0:100000:1] sh# 257 sz# 99743 c# 11}{ExtrQuery# [5000:1:12:0:0:100000:1] sh# 257 sz# 99743 c# 12}{ExtrQuery# [5000:1:13:0:0:100000:1] sh# 257 sz# 99743 c# 13}{ExtrQuery# [5000:1:14:0:0:100000:1] sh# 257 sz# 99743 c# 14}{ExtrQuery# [5000:1:15:0:0:100000:1] sh# 257 sz# 99743 c# 15}{ExtrQuery# [5000:1:16:0:0:100000:1] sh# 257 sz# 99743 c# 16}{ExtrQuery# [5000:1:17:0:0:100000:1] sh# 257 sz# 99743 c# 17}{ExtrQuery# [5000:1:18:0:0:100000:1] sh# 257 sz# 99743 c# 18}{ExtrQuery# [5000:1:19:0:0:100000:1] sh# 257 sz# 99743 c# 19}{ExtrQuery# [5000:1:20:0:0:100000:1] sh# 257 sz# 99743 c# 20}{ExtrQuery# [5000:1:21:0:0:100000:1] sh# 257 sz# 99743 c# 21}{ExtrQuery# [5000:1:22:0:0:100000:1] sh# 257 sz# 99743 c# 22}{ExtrQuery# [5000:1:23:0:0:100000:1] sh# 257 sz# 99743 c# 23}{ExtrQuery# [5000:1:24:0:0:100000:1] sh# 257 sz# 99743 c# 24}{ExtrQuery# [5000:1:25:0:0:100000:1] sh# 257 sz# 99743 c# 25}{ExtrQuery# [5000:1:26:0:0:100000:1] sh# 257 sz# 99743 c# 26}{ExtrQuery# [5000:1:27:0:0:100000:1] sh# 257 sz# 99743 c# 27}{ExtrQuery# [5000:1:28:0:0:100000:1] sh# 257 sz# 99743 c# 28}{ExtrQuery# [5000:1:29:0:0:100000:1] sh# 257 sz# 99743 c# 29}{ExtrQuery# [5000:1:30:0:0:100000:1] sh# 257 sz# 99743 c# 30}{ExtrQuery# [5000:1:31:0:0:100000:1] sh# 257 sz# 99743 c# 31}{ExtrQuery# [5000:1:32:0:0:100000:1] sh# 257 sz# 99743 c# 32}{ExtrQuery# [5000:1:33:0:0:100000:1] sh# 257 sz# 99743 c# 33}{ExtrQuery# [5000:1:34:0:0:100000:1] sh# 257 sz# 99743 c# 34}{ExtrQuery# [5000:1:35:0:0:100000:1] sh# 257 sz# 99743 c# 35}{ExtrQuery# [5000:1:36:0:0:100000:1] sh# 257 sz# 99743 c# 36}{ExtrQuery# [5000:1:37:0:0:100000:1] sh# 257 sz# 99743 c# 37}{ExtrQuery# [5000:1:38:0:0:100000:1] sh# 257 sz# 99743 c# 38}{ExtrQuery# [5000:1:39:0:0:100000:1] sh# 257 sz# 99743 c# 39}{ExtrQuery# [5000:1:40:0:0:100000:1] sh# 257 sz# 99743 c# 40}{ExtrQuery# [5000:1:41:0:0:100000:1] sh# 257 sz# 99743 c# 41}{ExtrQuery# [5000:1:42:0:0:100000:1] sh# 257 sz# 99743 c# 42}{ExtrQuery# [5000:1:43:0:0:100000:1] sh# 257 sz# 99743 c# 43}{ExtrQuery# [5000:1:44:0:0:100000:1] sh# 257 sz# 99743 c# 44}{ExtrQuery# [5000:1:45:0:0:100000:1] sh# 257 sz# 99743 c# 45}{ExtrQuery# [5000:1:46:0:0:100000:1] sh# 257 sz# 99743 c# 46}{ExtrQuery# [5000:1:47:0:0:100000:1] sh# 257 sz# 99743 c# 47}{ExtrQuery# [5000:1:48:0:0:100000:1] sh# 257 sz# 99743 c# 48}{ExtrQuery# [5000:1:49:0:0:100000:1] sh# 257 sz# 99743 c# 49}{ExtrQuery# [5000:1:50:0:0:100000:1] sh# 257 sz# 99743 c# 50}{ExtrQuery# [5000:1:51:0:0:100000:1] sh# 257 sz# 99743 c# 51}{ExtrQuery# [5000:1:52:0:0:100000:1] sh# 257 sz# 99743 c# 52}{ExtrQuery# [5000:1:53:0:0:100000:1] sh# 257 sz# 99743 c# 53}{ExtrQuery# [5000:1:54:0:0:100000:1] sh# 257 sz# 99743 c# 54}{ExtrQuery# [5000:1:55:0:0:100000:1] sh# 257 sz# 99743 c# 55}{ExtrQuery# [5000:1:56:0:0:100000:1] sh# 257 sz# 99743 c# 56}{ExtrQuery# [5000:1:57:0:0:100000:1] sh# 257 sz# 99743 c# 57}{ExtrQuery# [5000:1:58:0:0:100000:1] sh# 257 sz# 99743 c# 58}{ExtrQuery# [5000:1:59:0:0:100000:1] sh# 257 sz# 99743 c# 59}{ExtrQuery# [5000:1:60:0:0:100000:1] sh# 257 sz# 99743 c# 60}{ExtrQuery# [5000:1:61:0:0:100000:1] sh# 257 sz# 99743 c# 61}{ExtrQuery# [5000:1:62:0:0:100000:1] sh# 257 sz# 99743 c# 62}{ExtrQuery# [5000:1:63:0:0:100000:1] sh# 257 sz# 99743 c# 63}{ExtrQuery# [5000:1:64:0:0:100000:1] sh# 257 sz# 99743 c# 64}{ExtrQuery# [5000:1:65:0:0:100000:1] sh# 257 sz# 99743 c# 65}{ExtrQuery# [5000:1:66:0:0:100000:1] sh# 257 sz# 99743 c# 66}{ExtrQuery# [5000:1:67:0:0:100000:1] sh# 257 sz# 99743 c# 67}{ExtrQuery# [5000:1:68:0:0:100000:1] sh# 257 sz# 99743 c# 68}{ExtrQuery# [5000:1:69:0:0:100000:1] sh# 257 sz# 99743 c# 69}{ExtrQuery# [5000:1:70:0:0:100000:1] sh# 257 sz# 99743 c# 70}{ExtrQuery# [5000:1:71:0:0:100000:1] sh# 257 sz# 99743 c# 71}{ExtrQuery# [5000:1:72:0:0:100000:1] sh# 257 sz# 99743 c# 72}{ExtrQuery# [5000:1:73:0:0:100000:1] sh# 257 sz# 99743 c# 73}{ExtrQuery# [5000:1:74:0:0:100000:1] sh# 257 sz# 99743 c# 74}{ExtrQuery# [5000:1:75:0:0:100000:1] sh# 257 sz# 99743 c# 75}{ExtrQuery# [5000:1:76:0:0:100000:1] sh# 257 sz# 99743 c# 76}{ExtrQuery# [5000:1:77:0:0:100000:1] sh# 257 sz# 99743 c# 77}{ExtrQuery# [5000:1:78:0:0:100000:1] sh# 257 sz# 99743 c# 78}{ExtrQuery# [5000:1:79:0:0:100000:1] sh# 257 sz# 99743 c# 79}{ExtrQuery# [5000:1:80:0:0:100000:1] sh# 257 sz# 99743 c# 80}{ExtrQuery# [5000:1:81:0:0:100000:1] sh# 257 sz# 99743 c# 81}{ExtrQuery# [5000:1:82:0:0:100000:1] sh# 257 sz# 99743 c# 82}{ExtrQuery# [5000:1:83:0:0:100000:1] sh# 257 sz# 99743 c# 83}{ExtrQuery# [5000:1:84:0:0:100000:1] sh# 257 sz# 99743 c# 84}{ExtrQuery# [5000:1:85:0:0:100000:1] sh# 257 sz# 99743 c# 85}{ExtrQuery# [5000:1:86:0:0:100000:1] sh# 257 sz# 99743 c# 86}{ExtrQuery# [5000:1:87:0:0:100000:1] sh# 257 sz# 99743 c# 87}{ExtrQuery# [5000:1:88:0:0:100000:1] sh# 257 sz# 99743 c# 88}{ExtrQuery# [5000:1:89:0:0:100000:1] sh# 257 sz# 99743 c# 89}{ExtrQuery# [5000:1:90:0:0:100000:1] sh# 257 sz# 99743 c# 90}{ExtrQuery# [5000:1:91:0:0:100000:1] sh# 257 sz# 99743 c# 91}{ExtrQuery# [5000:1:92:0:0:100000:1] sh# 257 sz# 99743 c# 92}{ExtrQuery# [5000:1:93:0:0:100000:1] sh# 257 sz# 99743 c# 93}{ExtrQuery# [5000:1:94:0:0:100000:1] sh# 257 sz# 99743 c# 94}{ExtrQuery# [5000:1:95:0:0:100000:1] sh# 257 sz# 99743 c# 95}{ExtrQuery# [5000:1:96:0:0:100000:1] sh# 257 sz# 99743 c# 96}{ExtrQuery# [5000:1:97:0:0:100000:1] sh# 257 sz# 99743 c# 97}{ExtrQuery# [5000:1:98:0:0:100000:1] sh# 257 sz# 99743 c# 98}{ExtrQuery# [5000:1:99:0:0:100000:1] sh# 257 sz# 99743 c# 99}{ExtrQuery# [5000:1:100:0:0:100000:1] sh# 257 sz# 99743 c# 100}{ExtrQuery# [5000:1:101:0:0:100000:1] sh# 257 sz# 99743 c# 101}{ExtrQuery# [5000:1:102:0:0:100000:1] sh# 257 sz# 99743 c# 102}{ExtrQuery# [5000:1:103:0:0:100000:1] sh# 257 sz# 99743 c# 103}{ExtrQuery# [5000:1:104:0:0:100000:1] sh# 257 sz# 99743 c# 104}{ExtrQuery# [5000:1:105:0:0:100000:1] sh# 257 sz# 99743 c# 105}{ExtrQuery# [5000:1:106:0:0:100000:1] sh# 257 sz# 99743 c# 106}{ExtrQuery# [5000:1:107:0:0:100000:1] sh# 257 sz# 99743 c# 107}{ExtrQuery# [5000:1:108:0:0:100000:1] sh# 257 sz# 99743 c# 108}{ExtrQuery# [5000:1:109:0:0:100000:1] sh# 257 sz# 99743 c# 109}{ExtrQuery# [5000:1:110:0:0:100000:1] sh# 257 sz# 99743 c# 110}{ExtrQuery# [5000:1:111:0:0:100000:1] sh# 257 sz# 99743 c# 111}{ExtrQuery# [5000:1:112:0:0:100000:1] sh# 257 sz# 99743 c# 112}{ExtrQuery# [5000:1:113:0:0:100000:1] sh# 257 sz# 99743 c# 113}{ExtrQuery# [5000:1:114:0:0:100000:1] sh# 257 sz# 99743 c# 114}{ExtrQuery# [5000:1:115:0:0:100000:1] sh# 257 sz# 99743 c# 115}{ExtrQuery# [5000:1:116:0:0:100000:1] sh# 257 sz# 99743 c# 116}{ExtrQuery# [5000:1:117:0:0:100000:1] sh# 257 sz# 99743 c# 117}{ExtrQuery# [5000:1:118:0:0:100000:1] sh# 257 sz# 99743 c# 118}{ExtrQuery# [5000:1:119:0:0:100000:1] sh# 257 sz# 99743 c# 119}{ExtrQuery# [5000:1:120:0:0:100000:1] sh# 257 sz# 99743 c# 120}{ExtrQuery# [5000:1:121:0:0:100000:1] sh# 257 sz# 99743 c# 121}{ExtrQuery# [5000:1:122:0:0:100000:1] sh# 257 sz# 99743 c# 122}{ExtrQuery# [5000:1:123:0:0:100000:1] sh# 257 sz# 99743 c# 123}{ExtrQuery# [5000:1:124:0:0:100000:1] sh# 257 sz# 99743 c# 124}{ExtrQuery# [5000:1:125:0:0:100000:1] sh# 257 sz# 99743 c# 125}{ExtrQuery# [5000:1:126:0:0:100000:1] sh# 257 sz# 99743 c# 126}{ExtrQuery# [5000:1:127:0:0:100000:1] sh# 257 sz# 99743 c# 127}{ExtrQuery# [5000:1:128:0:0:100000:1] sh# 257 sz# 99743 c# 128}{ExtrQuery# [5000:1:129:0:0:100000:1] sh# 257 sz# 99743 c# 129}{ExtrQuery# [5000:1:130:0:0:100000:1] sh# 257 sz# 99743 c# 130}{ExtrQuery# [5000:1:131:0:0:100000:1] sh# 257 sz# 99743 c# 131}{ExtrQuery# [5000:1:132:0:0:100000:1] sh# 257 sz# 99743 c# 132}{ExtrQuery# [5000:1:133:0:0:100000:1] sh# 257 sz# 99743 c# 133}{ExtrQuery# [5000:1:134:0:0:100000:1] sh# 257 sz# 99743 c# 134}{ExtrQuery# [5000:1:135:0:0:100000:1] sh# 257 sz# 99743 c# 135}{ExtrQuery# [5000:1:136:0:0:100000:1] sh# 257 sz# 99743 c# 136}{ExtrQuery# [5000:1:137:0:0:100000:1] sh# 257 sz# 99743 c# 137}{ExtrQuery# [5000:1:138:0:0:100000:1] sh# 257 sz# 99743 c# 138}{ExtrQuery# [5000:1:139:0:0:100000:1] sh# 257 sz# 99743 c# 139}{ExtrQuery# [5000:1:140:0:0:100000:1] sh# 257 sz# 99743 c# 140}{ExtrQuery# [5000:1:141:0:0:100000:1] sh# 257 sz# 99743 c# 141}{ExtrQuery# [5000:1:142:0:0:100000:1] sh# 257 sz# 99743 c# 142}{ExtrQuery# [5000:1:143:0:0:100000:1] sh# 257 sz# 99743 c# 143}{ExtrQuery# [5000:1:144:0:0:100000:1] sh# 257 sz# 99743 c# 144}{ExtrQuery# [5000:1:145:0:0:100000:1] sh# 257 sz# 99743 c# 145}{ExtrQuery# [5000:1:146:0:0:100000:1] sh# 257 sz# 99743 c# 146}{ExtrQuery# [5000:1:147:0:0:100000:1] sh# 257 sz# 99743 c# 147}{ExtrQuery# [5000:1:148:0:0:100000:1] sh# 257 sz# 99743 c# 148}{ExtrQuery# [5000:1:149:0:0:100000:1] sh# 257 sz# 99743 c# 149}{ExtrQuery# [5000:1:150:0:0:100000:1] sh# 257 sz# 99743 c# 150}{ExtrQuery# [5000:1:151:0:0:100000:1] sh# 257 sz# 99743 c# 151}{ExtrQuery# [5000:1:152:0:0:100000:1] sh# 257 sz# 99743 c# 152}{ExtrQuery# [5000:1:153:0:0:100000:1] sh# 257 sz# 99743 c# 153}{ExtrQuery# [5000:1:154:0:0:100000:1] sh# 257 sz# 99743 c# 154}{ExtrQuery# [5000:1:155:0:0:100000:1] sh# 257 sz# 99743 c# 155}{ExtrQuery# [5000:1:156:0:0:100000:1] sh# 257 sz# 99743 c# 156}{ExtrQuery# [5000:1:157:0:0:100000:1] sh# 257 sz# 99743 c# 157}{ExtrQuery# [5000:1:158:0:0:100000:1] sh# 257 sz# 99743 c# 158}{ExtrQuery# [5000:1:159:0:0:100000:1] sh# 257 sz# 99743 c# 159}{ExtrQuery# [5000:1:160:0:0:100000:1] sh# 257 sz# 99743 c# 160}{ExtrQuery# [5000:1:161:0:0:100000:1] sh# 257 sz# 99743 c# 161}{ExtrQuery# [5000:1:162:0:0:100000:1] sh# 257 sz# 99743 c# 162}{ExtrQuery# [5000:1:163:0:0:100000:1] sh# 257 sz# 99743 c# 163}{ExtrQuery# [5000:1:164:0:0:100000:1] sh# 257 sz# 99743 c# 164}{ExtrQuery# [5000:1:165:0:0:100000:1] sh# 257 sz# 99743 c# 165}{ExtrQuery# [5000:1:166:0:0:100000:1] sh# 257 sz# 99743 c# 166}{ExtrQuery# [5000:1:167:0:0:100000:1] sh# 257 sz# 99743 c# 167}{ExtrQuery# [5000:1:168:0:0:100000:1] sh# 257 sz# 99743 c# 168}{ExtrQuery# [5000:1:169:0:0:100000:1] sh# 257 sz# 99743 c# 169}{ExtrQuery# [5000:1:170:0:0:100000:1] sh# 257 sz# 99743 c# 170}{ExtrQuery# [5000:1:171:0:0:100000:1] sh# 257 sz# 99743 c# 171}{ExtrQuery# [5000:1:172:0:0:100000:1] sh# 257 sz# 99743 c# 172}{ExtrQuery# [5000:1:173:0:0:100000:1] sh# 257 sz# 99743 c# 173}{ExtrQuery# [5000:1:174:0:0:100000:1] sh# 257 sz# 99743 c# 174}{ExtrQuery# [5000:1:175:0:0:100000:1] sh# 257 sz# 99743 c# 175}{ExtrQuery# [5000:1:176:0:0:100000:1] sh# 257 sz# 99743 c# 176}{ExtrQuery# [5000:1:177:0:0:100000:1] sh# 257 sz# 99743 c# 177}{ExtrQuery# [5000:1:178:0:0:100000:1] sh# 257 sz# 99743 c# 178}{ExtrQuery# [5000:1:179:0:0:100000:1] sh# 257 sz# 99743 c# 179}{ExtrQuery# [5000:1:180:0:0:100000:1] sh# 257 sz# 99743 c# 180}{ExtrQuery# [5000:1:181:0:0:100000:1] sh# 257 sz# 99743 c# 181}{ExtrQuery# [5000:1:182:0:0:100000:1] sh# 257 sz# 99743 c# 182}{ExtrQuery# [5000:1:183:0:0:100000:1] sh# 257 sz# 99743 c# 183}{ExtrQuery# [5000:1:184:0:0:100000:1] sh# 257 sz# 99743 c# 184}{ExtrQuery# [5000:1:185:0:0:100000:1] sh# 257 sz# 99743 c# 185}{ExtrQuery# [5000:1:186:0:0:100000:1] sh# 257 sz# 99743 c# 186}{ExtrQuery# [5000:1:187:0:0:100000:1] sh# 257 sz# 99743 c# 187}{ExtrQuery# [5000:1:188:0:0:100000:1] sh# 257 sz# 99743 c# 188}{ExtrQuery# [5000:1:189:0:0:100000:1] sh# 257 sz# 99743 c# 189}{ExtrQuery# [5000:1:190:0:0:100000:1] sh# 257 sz# 99743 c# 190}{ExtrQuery# [5000:1:191 ... sz# 99743 c# 484}{ExtrQuery# [5000:1:485:0:0:100000:1] sh# 257 sz# 99743 c# 485}{ExtrQuery# [5000:1:486:0:0:100000:1] sh# 257 sz# 99743 c# 486}{ExtrQuery# [5000:1:487:0:0:100000:1] sh# 257 sz# 99743 c# 487}{ExtrQuery# [5000:1:488:0:0:100000:1] sh# 257 sz# 99743 c# 488}{ExtrQuery# [5000:1:489:0:0:100000:1] sh# 257 sz# 99743 c# 489}{ExtrQuery# [5000:1:490:0:0:100000:1] sh# 257 sz# 99743 c# 490}{ExtrQuery# [5000:1:491:0:0:100000:1] sh# 257 sz# 99743 c# 491}{ExtrQuery# [5000:1:492:0:0:100000:1] sh# 257 sz# 99743 c# 492}{ExtrQuery# [5000:1:493:0:0:100000:1] sh# 257 sz# 99743 c# 493}{ExtrQuery# [5000:1:494:0:0:100000:1] sh# 257 sz# 99743 c# 494}{ExtrQuery# [5000:1:495:0:0:100000:1] sh# 257 sz# 99743 c# 495}{ExtrQuery# [5000:1:496:0:0:100000:1] sh# 257 sz# 99743 c# 496}{ExtrQuery# [5000:1:497:0:0:100000:1] sh# 257 sz# 99743 c# 497}{ExtrQuery# [5000:1:498:0:0:100000:1] sh# 257 sz# 99743 c# 498}{ExtrQuery# [5000:1:499:0:0:100000:1] sh# 257 sz# 99743 c# 499}{ExtrQuery# [5000:1:500:0:0:100000:1] sh# 257 sz# 99743 c# 500}{ExtrQuery# [5000:1:501:0:0:100000:1] sh# 257 sz# 99743 c# 501}{ExtrQuery# [5000:1:502:0:0:100000:1] sh# 257 sz# 99743 c# 502}{ExtrQuery# [5000:1:503:0:0:100000:1] sh# 257 sz# 99743 c# 503}{ExtrQuery# [5000:1:504:0:0:100000:1] sh# 257 sz# 99743 c# 504}{ExtrQuery# [5000:1:505:0:0:100000:1] sh# 257 sz# 99743 c# 505}{ExtrQuery# [5000:1:506:0:0:100000:1] sh# 257 sz# 99743 c# 506}{ExtrQuery# [5000:1:507:0:0:100000:1] sh# 257 sz# 99743 c# 507}{ExtrQuery# [5000:1:508:0:0:100000:1] sh# 257 sz# 99743 c# 508}{ExtrQuery# [5000:1:509:0:0:100000:1] sh# 257 sz# 99743 c# 509}{ExtrQuery# [5000:1:510:0:0:100000:1] sh# 257 sz# 99743 c# 510}{ExtrQuery# [5000:1:511:0:0:100000:1] sh# 257 sz# 99743 c# 511}{ExtrQuery# [5000:1:512:0:0:100000:1] sh# 257 sz# 99743 c# 512}{ExtrQuery# [5000:1:513:0:0:100000:1] sh# 257 sz# 99743 c# 513}{ExtrQuery# [5000:1:514:0:0:100000:1] sh# 257 sz# 99743 c# 514}{ExtrQuery# [5000:1:515:0:0:100000:1] sh# 257 sz# 99743 c# 515}{ExtrQuery# [5000:1:516:0:0:100000:1] sh# 257 sz# 99743 c# 516}{ExtrQuery# [5000:1:517:0:0:100000:1] sh# 257 sz# 99743 c# 517}{ExtrQuery# [5000:1:518:0:0:100000:1] sh# 257 sz# 99743 c# 518}{ExtrQuery# [5000:1:519:0:0:100000:1] sh# 257 sz# 99743 c# 519}{ExtrQuery# [5000:1:520:0:0:100000:1] sh# 257 sz# 99743 c# 520}{ExtrQuery# [5000:1:521:0:0:100000:1] sh# 257 sz# 99743 c# 521}{ExtrQuery# [5000:1:522:0:0:100000:1] sh# 257 sz# 99743 c# 522}{ExtrQuery# [5000:1:523:0:0:100000:1] sh# 257 sz# 99743 c# 523}{ExtrQuery# [5000:1:524:0:0:100000:1] sh# 257 sz# 99743 c# 524}{ExtrQuery# [5000:1:525:0:0:100000:1] sh# 257 sz# 99743 c# 525}{ExtrQuery# [5000:1:526:0:0:100000:1] sh# 257 sz# 99743 c# 526}{ExtrQuery# [5000:1:527:0:0:100000:1] sh# 257 sz# 99743 c# 527}{ExtrQuery# [5000:1:528:0:0:100000:1] sh# 257 sz# 99743 c# 528}{ExtrQuery# [5000:1:529:0:0:100000:1] sh# 257 sz# 99743 c# 529}{ExtrQuery# [5000:1:530:0:0:100000:1] sh# 257 sz# 99743 c# 530}{ExtrQuery# [5000:1:531:0:0:100000:1] sh# 257 sz# 99743 c# 531}{ExtrQuery# [5000:1:532:0:0:100000:1] sh# 257 sz# 99743 c# 532}{ExtrQuery# [5000:1:533:0:0:100000:1] sh# 257 sz# 99743 c# 533}{ExtrQuery# [5000:1:534:0:0:100000:1] sh# 257 sz# 99743 c# 534}{ExtrQuery# [5000:1:535:0:0:100000:1] sh# 257 sz# 99743 c# 535}{ExtrQuery# [5000:1:536:0:0:100000:1] sh# 257 sz# 99743 c# 536}{ExtrQuery# [5000:1:537:0:0:100000:1] sh# 257 sz# 99743 c# 537}{ExtrQuery# [5000:1:538:0:0:100000:1] sh# 257 sz# 99743 c# 538}{ExtrQuery# [5000:1:539:0:0:100000:1] sh# 257 sz# 99743 c# 539}{ExtrQuery# [5000:1:540:0:0:100000:1] sh# 257 sz# 99743 c# 540}{ExtrQuery# [5000:1:541:0:0:100000:1] sh# 257 sz# 99743 c# 541}{ExtrQuery# [5000:1:542:0:0:100000:1] sh# 257 sz# 99743 c# 542}{ExtrQuery# [5000:1:543:0:0:100000:1] sh# 257 sz# 99743 c# 543}{ExtrQuery# [5000:1:544:0:0:100000:1] sh# 257 sz# 99743 c# 544}{ExtrQuery# [5000:1:545:0:0:100000:1] sh# 257 sz# 99743 c# 545}{ExtrQuery# [5000:1:546:0:0:100000:1] sh# 257 sz# 99743 c# 546}{ExtrQuery# [5000:1:547:0:0:100000:1] sh# 257 sz# 99743 c# 547}{ExtrQuery# [5000:1:548:0:0:100000:1] sh# 257 sz# 99743 c# 548}{ExtrQuery# [5000:1:549:0:0:100000:1] sh# 257 sz# 99743 c# 549}{ExtrQuery# [5000:1:550:0:0:100000:1] sh# 257 sz# 99743 c# 550}{ExtrQuery# [5000:1:551:0:0:100000:1] sh# 257 sz# 99743 c# 551}{ExtrQuery# [5000:1:552:0:0:100000:1] sh# 257 sz# 99743 c# 552}{ExtrQuery# [5000:1:553:0:0:100000:1] sh# 257 sz# 99743 c# 553}{ExtrQuery# [5000:1:554:0:0:100000:1] sh# 257 sz# 99743 c# 554}{ExtrQuery# [5000:1:555:0:0:100000:1] sh# 257 sz# 99743 c# 555}{ExtrQuery# [5000:1:556:0:0:100000:1] sh# 257 sz# 99743 c# 556}{ExtrQuery# [5000:1:557:0:0:100000:1] sh# 257 sz# 99743 c# 557}{ExtrQuery# [5000:1:558:0:0:100000:1] sh# 257 sz# 99743 c# 558}{ExtrQuery# [5000:1:559:0:0:100000:1] sh# 257 sz# 99743 c# 559}{ExtrQuery# [5000:1:560:0:0:100000:1] sh# 257 sz# 99743 c# 560}{ExtrQuery# [5000:1:561:0:0:100000:1] sh# 257 sz# 99743 c# 561}{ExtrQuery# [5000:1:562:0:0:100000:1] sh# 257 sz# 99743 c# 562}{ExtrQuery# [5000:1:563:0:0:100000:1] sh# 257 sz# 99743 c# 563}{ExtrQuery# [5000:1:564:0:0:100000:1] sh# 257 sz# 99743 c# 564}{ExtrQuery# [5000:1:565:0:0:100000:1] sh# 257 sz# 99743 c# 565}{ExtrQuery# [5000:1:566:0:0:100000:1] sh# 257 sz# 99743 c# 566}{ExtrQuery# [5000:1:567:0:0:100000:1] sh# 257 sz# 99743 c# 567}{ExtrQuery# [5000:1:568:0:0:100000:1] sh# 257 sz# 99743 c# 568}{ExtrQuery# [5000:1:569:0:0:100000:1] sh# 257 sz# 99743 c# 569}{ExtrQuery# [5000:1:570:0:0:100000:1] sh# 257 sz# 99743 c# 570}{ExtrQuery# [5000:1:571:0:0:100000:1] sh# 257 sz# 99743 c# 571}{ExtrQuery# [5000:1:572:0:0:100000:1] sh# 257 sz# 99743 c# 572}{ExtrQuery# [5000:1:573:0:0:100000:1] sh# 257 sz# 99743 c# 573}{ExtrQuery# [5000:1:574:0:0:100000:1] sh# 257 sz# 99743 c# 574}{ExtrQuery# [5000:1:575:0:0:100000:1] sh# 257 sz# 99743 c# 575}{ExtrQuery# [5000:1:576:0:0:100000:1] sh# 257 sz# 99743 c# 576}{ExtrQuery# [5000:1:577:0:0:100000:1] sh# 257 sz# 99743 c# 577}{ExtrQuery# [5000:1:578:0:0:100000:1] sh# 257 sz# 99743 c# 578}{ExtrQuery# [5000:1:579:0:0:100000:1] sh# 257 sz# 99743 c# 579}{ExtrQuery# [5000:1:580:0:0:100000:1] sh# 257 sz# 99743 c# 580}{ExtrQuery# [5000:1:581:0:0:100000:1] sh# 257 sz# 99743 c# 581}{ExtrQuery# [5000:1:582:0:0:100000:1] sh# 257 sz# 99743 c# 582}{ExtrQuery# [5000:1:583:0:0:100000:1] sh# 257 sz# 99743 c# 583}{ExtrQuery# [5000:1:584:0:0:100000:1] sh# 257 sz# 99743 c# 584}{ExtrQuery# [5000:1:585:0:0:100000:1] sh# 257 sz# 99743 c# 585}{ExtrQuery# [5000:1:586:0:0:100000:1] sh# 257 sz# 99743 c# 586}{ExtrQuery# [5000:1:587:0:0:100000:1] sh# 257 sz# 99743 c# 587}{ExtrQuery# [5000:1:588:0:0:100000:1] sh# 257 sz# 99743 c# 588}{ExtrQuery# [5000:1:589:0:0:100000:1] sh# 257 sz# 99743 c# 589}{ExtrQuery# [5000:1:590:0:0:100000:1] sh# 257 sz# 99743 c# 590}{ExtrQuery# [5000:1:591:0:0:100000:1] sh# 257 sz# 99743 c# 591}{ExtrQuery# [5000:1:592:0:0:100000:1] sh# 257 sz# 99743 c# 592}{ExtrQuery# [5000:1:593:0:0:100000:1] sh# 257 sz# 99743 c# 593}{ExtrQuery# [5000:1:594:0:0:100000:1] sh# 257 sz# 99743 c# 594}{ExtrQuery# [5000:1:595:0:0:100000:1] sh# 257 sz# 99743 c# 595}{ExtrQuery# [5000:1:596:0:0:100000:1] sh# 257 sz# 99743 c# 596}{ExtrQuery# [5000:1:597:0:0:100000:1] sh# 257 sz# 99743 c# 597}{ExtrQuery# [5000:1:598:0:0:100000:1] sh# 257 sz# 99743 c# 598}{ExtrQuery# [5000:1:599:0:0:100000:1] sh# 257 sz# 99743 c# 599}{ExtrQuery# [5000:1:600:0:0:100000:1] sh# 257 sz# 99743 c# 600}{ExtrQuery# [5000:1:601:0:0:100000:1] sh# 257 sz# 99743 c# 601}{ExtrQuery# [5000:1:602:0:0:100000:1] sh# 257 sz# 99743 c# 602}{ExtrQuery# [5000:1:603:0:0:100000:1] sh# 257 sz# 99743 c# 603}{ExtrQuery# [5000:1:604:0:0:100000:1] sh# 257 sz# 99743 c# 604}{ExtrQuery# [5000:1:605:0:0:100000:1] sh# 257 sz# 99743 c# 605}{ExtrQuery# [5000:1:606:0:0:100000:1] sh# 257 sz# 99743 c# 606}{ExtrQuery# [5000:1:607:0:0:100000:1] sh# 257 sz# 99743 c# 607}{ExtrQuery# [5000:1:608:0:0:100000:1] sh# 257 sz# 99743 c# 608}{ExtrQuery# [5000:1:609:0:0:100000:1] sh# 257 sz# 99743 c# 609}{ExtrQuery# [5000:1:610:0:0:100000:1] sh# 257 sz# 99743 c# 610}{ExtrQuery# [5000:1:611:0:0:100000:1] sh# 257 sz# 99743 c# 611}{ExtrQuery# [5000:1:612:0:0:100000:1] sh# 257 sz# 99743 c# 612}{ExtrQuery# [5000:1:613:0:0:100000:1] sh# 257 sz# 99743 c# 613}{ExtrQuery# [5000:1:614:0:0:100000:1] sh# 257 sz# 99743 c# 614}{ExtrQuery# [5000:1:615:0:0:100000:1] sh# 257 sz# 99743 c# 615}{ExtrQuery# [5000:1:616:0:0:100000:1] sh# 257 sz# 99743 c# 616}{ExtrQuery# [5000:1:617:0:0:100000:1] sh# 257 sz# 99743 c# 617}{ExtrQuery# [5000:1:618:0:0:100000:1] sh# 257 sz# 99743 c# 618}{ExtrQuery# [5000:1:619:0:0:100000:1] sh# 257 sz# 99743 c# 619}{ExtrQuery# [5000:1:620:0:0:100000:1] sh# 257 sz# 99743 c# 620}{ExtrQuery# [5000:1:621:0:0:100000:1] sh# 257 sz# 99743 c# 621}{ExtrQuery# [5000:1:622:0:0:100000:1] sh# 257 sz# 99743 c# 622}{ExtrQuery# [5000:1:623:0:0:100000:1] sh# 257 sz# 99743 c# 623}{ExtrQuery# [5000:1:624:0:0:100000:1] sh# 257 sz# 99743 c# 624}{ExtrQuery# [5000:1:625:0:0:100000:1] sh# 257 sz# 99743 c# 625}{ExtrQuery# [5000:1:626:0:0:100000:1] sh# 257 sz# 99743 c# 626}{ExtrQuery# [5000:1:627:0:0:100000:1] sh# 257 sz# 99743 c# 627}{ExtrQuery# [5000:1:628:0:0:100000:1] sh# 257 sz# 99743 c# 628}{ExtrQuery# [5000:1:629:0:0:100000:1] sh# 257 sz# 99743 c# 629}{ExtrQuery# [5000:1:630:0:0:100000:1] sh# 257 sz# 99743 c# 630}{ExtrQuery# [5000:1:631:0:0:100000:1] sh# 257 sz# 99743 c# 631}{ExtrQuery# [5000:1:632:0:0:100000:1] sh# 257 sz# 99743 c# 632}{ExtrQuery# [5000:1:633:0:0:100000:1] sh# 257 sz# 99743 c# 633}{ExtrQuery# [5000:1:634:0:0:100000:1] sh# 257 sz# 99743 c# 634}{ExtrQuery# [5000:1:635:0:0:100000:1] sh# 257 sz# 99743 c# 635}{ExtrQuery# [5000:1:636:0:0:100000:1] sh# 257 sz# 99743 c# 636}{ExtrQuery# [5000:1:637:0:0:100000:1] sh# 257 sz# 99743 c# 637}{ExtrQuery# [5000:1:638:0:0:100000:1] sh# 257 sz# 99743 c# 638}{ExtrQuery# [5000:1:639:0:0:100000:1] sh# 257 sz# 99743 c# 639}{ExtrQuery# [5000:1:640:0:0:100000:1] sh# 257 sz# 99743 c# 640}{ExtrQuery# [5000:1:641:0:0:100000:1] sh# 257 sz# 99743 c# 641}{ExtrQuery# [5000:1:642:0:0:100000:1] sh# 257 sz# 99743 c# 642}{ExtrQuery# [5000:1:643:0:0:100000:1] sh# 257 sz# 99743 c# 643}{ExtrQuery# [5000:1:644:0:0:100000:1] sh# 257 sz# 99743 c# 644}{ExtrQuery# [5000:1:645:0:0:100000:1] sh# 257 sz# 99743 c# 645}{ExtrQuery# [5000:1:646:0:0:100000:1] sh# 257 sz# 99743 c# 646}{ExtrQuery# [5000:1:647:0:0:100000:1] sh# 257 sz# 99743 c# 647}{ExtrQuery# [5000:1:648:0:0:100000:1] sh# 257 sz# 99743 c# 648}{ExtrQuery# [5000:1:649:0:0:100000:1] sh# 257 sz# 99743 c# 649}{ExtrQuery# [5000:1:650:0:0:100000:1] sh# 257 sz# 99743 c# 650}{ExtrQuery# [5000:1:651:0:0:100000:1] sh# 257 sz# 99743 c# 651}{ExtrQuery# [5000:1:652:0:0:100000:1] sh# 257 sz# 99743 c# 652}{ExtrQuery# [5000:1:653:0:0:100000:1] sh# 257 sz# 99743 c# 653}{ExtrQuery# [5000:1:654:0:0:100000:1] sh# 257 sz# 99743 c# 654}{ExtrQuery# [5000:1:655:0:0:100000:1] sh# 257 sz# 99743 c# 655}{ExtrQuery# [5000:1:656:0:0:100000:1] sh# 257 sz# 99743 c# 656}{ExtrQuery# [5000:1:657:0:0:100000:1] sh# 257 sz# 99743 c# 657}{ExtrQuery# [5000:1:658:0:0:100000:1] sh# 257 sz# 99743 c# 658}{ExtrQuery# [5000:1:659:0:0:100000:1] sh# 257 sz# 99743 c# 659}{ExtrQuery# [5000:1:660:0:0:100000:1] sh# 257 sz# 99743 c# 660}{ExtrQuery# [5000:1:661:0:0:100000:1] sh# 257 sz# 99743 c# 661}{ExtrQuery# [5000:1:662:0:0:100000:1] sh# 257 sz# 99743 c# 662}{ExtrQuery# [5000:1:663:0:0:100000:1] sh# 257 sz# 99743 c# 663}{ExtrQuery# [5000:1:664:0:0:100000:1] sh# 257 sz# 99743 c# 664}{ExtrQuery# [5000:1:665:0:0:100000:1] sh# 257 sz# 99743 c# 665}{ExtrQuery# [5000:1:666:0:0:100000:1] sh# 257 sz# 99743 c# 666}{ExtrQuery# [5000:1:667:0:0:100000:1] sh# 257 sz# 99743 c# 667}{ExtrQuery# [5000:1:668:0:0:100000:1] sh# 257 sz# 99743 c# 668}{ExtrQuery# [5000:1:669:0:0:100000:1] sh# 257 sz# 99743 c# 669}{ExtrQuery# [5000:1:670:0:0:100000:1] sh# 257 sz# 99743 c# 670}{ExtrQuery# [5000:1:671:0:0:100000:1] sh# 257 sz# 99743 c# 671}{ExtrQuery# [5000:1:672:0:0:17027:1] sh# 257 sz# 16770 c# 672} {MsgQoS} Notify# 0 Internals# 0 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0}; VDISK CAN NOT REPLY ON TEvVGet REQUEST >> TYardTest::TestSlay [GOOD] >> TYardTest::TestSlayRace ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 17650313294823250851 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive [GOOD] Test command err: RandomSeed# 10355277871811774800 >> test.py::test[solomon-BadDownsamplingInterval-] [GOOD] >> test.py::test[solomon-Basic-default.txt] >> TYardTest::TestSlayRace [GOOD] >> TYardTest::TestSlayRecreate >> TYardTest::TestSlayRecreate [GOOD] >> TYardTest::TestSlayLogWriteRaceActor >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] Test command err: RandomSeed# 5917034249687049138 2025-06-24T20:28:20.623426Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:20.625501Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16618389303239639894] 2025-06-24T20:28:20.654088Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> TBsLocalRecovery::ChaoticWriteRestart [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHuge [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump_ds_init] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump] >> BSCRestartPDisk::RestartNotAllowed [GOOD] >> BsControllerTest::SelfHealMirror3dc [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeXXX [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartNotAllowed [GOOD] Test command err: RandomSeed# 12077697145649733388 |87.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::SelfHealMirror3dc [GOOD] Test command err: 2025-06-24T20:28:07.013727Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-24T20:28:07.013777Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-24T20:28:07.013853Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-24T20:28:07.013876Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-24T20:28:07.013925Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-24T20:28:07.013949Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-24T20:28:07.014002Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-24T20:28:07.014025Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-24T20:28:07.014075Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-24T20:28:07.014097Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-24T20:28:07.014154Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-24T20:28:07.014178Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-24T20:28:07.014217Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-24T20:28:07.014237Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-24T20:28:07.014270Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-24T20:28:07.014291Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-24T20:28:07.014327Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-24T20:28:07.014350Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-24T20:28:07.014387Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-24T20:28:07.014417Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-24T20:28:07.014464Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-24T20:28:07.014488Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-24T20:28:07.014521Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-24T20:28:07.014542Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-24T20:28:07.014604Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-24T20:28:07.014629Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-24T20:28:07.014662Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-24T20:28:07.014683Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-24T20:28:07.014722Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-24T20:28:07.014743Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-24T20:28:07.014780Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-06-24T20:28:07.014801Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-06-24T20:28:07.014859Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-06-24T20:28:07.014880Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-06-24T20:28:07.014913Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-06-24T20:28:07.014933Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-06-24T20:28:07.014980Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-06-24T20:28:07.015003Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-06-24T20:28:07.015049Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-06-24T20:28:07.015076Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-06-24T20:28:07.015128Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-06-24T20:28:07.018631Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-06-24T20:28:07.019255Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-06-24T20:28:07.019460Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-06-24T20:28:07.019757Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-06-24T20:28:07.019850Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-06-24T20:28:07.020075Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-06-24T20:28:07.020186Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-06-24T20:28:07.020236Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-06-24T20:28:07.020258Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-06-24T20:28:07.020297Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-06-24T20:28:07.020322Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-06-24T20:28:07.020379Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-06-24T20:28:07.020417Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-06-24T20:28:07.020459Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-06-24T20:28:07.020482Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-06-24T20:28:07.020539Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-06-24T20:28:07.020561Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-06-24T20:28:07.020594Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-06-24T20:28:07.020618Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-06-24T20:28:07.020651Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-06-24T20:28:07.020680Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-06-24T20:28:07.020724Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-06-24T20:28:07.020745Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-06-24T20:28:07.020778Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2025-06-24T20:28:07.020799Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2025-06-24T20:28:07.020833Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2025-06-24T20:28:07.020855Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2025-06-24T20:28:07.020902Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2025-06-24T20:28:07.020926Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2025-06-24T20:28:07.020974Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2025-06-24T20:28:07.020997Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2025-06-24T20:28:07.037409Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2713:53] Status# ERROR ClientId# [1:2713:53] ServerId# [0:0:0] PipeClient# [1:2713:53] 2025-06-24T20:28:07.038658Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2714:41] Status# ERROR ClientId# [2:2714:41] ServerId# [0:0:0] PipeClient# [2:2714:41] 2025-06-24T20:28:07.038715Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2715:41] Status# ERROR ClientId# [3:2715:41] ServerId# [0:0:0] PipeClient# [3:2715:41] 2025-06-24T20:28:07.038765Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2716:41] Status# ERROR ClientId# [4:2716:41] ServerId# [0:0:0] PipeClient# [4:2716:41] 2025-06-24T20:28:07.038827Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2717:41] Status# ERROR ClientId# [5:2717:41] ServerId# [0:0:0] PipeClient# [5:2717:41] 2025-06-24T20:28:07.038862Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2718:41] Status# ERROR ClientId# [6:2718:41] ServerId# [0:0:0] PipeClient# [6:2718:41] 2025-06-24T20:28:07.038910Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2719:41] Status# ERROR ClientId# [7:2719:41] ServerId# [0:0:0] PipeClient# [7:2719:41] 2025-06-24T20:28:07.038961Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2720:41] Status# ERROR ClientId# [8:2720:41] ServerId# [0:0:0] PipeClient# [8:2720:41] 2025-06-24T20:28:07.038997Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2721:41] Status# ERROR ClientId# [9:2721:41] ServerId# [0:0:0] PipeClient# [9:2721:41] 2025-06-24T20:28:07.039045Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2722:41] Status# ERROR ClientId# [10:2722:41] ServerId# [0:0:0] PipeClient# [10:2722:41] 2025-06-24T20:28:07.039086Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2723:41] Status# ERROR ClientId# [11:2723:41] ServerId# [0:0:0] PipeClient# [11:2723:41] 2025-06-24T20:28:07.039125Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2724:41] Status# ERROR ClientId# [12:2724:41] ServerId# [0:0:0] PipeClient# [12:2724:41] 2025-06-24T20:28:07.039179Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2725:41] Status# ERROR ClientId# [13:2725:41] ServerId# [0:0:0] PipeClient# [13:2725:41] 2025-06-24T20:28:07.039222Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2726:41] Status# ERROR ClientId# [14:2726:41] ServerId# [0:0:0] PipeClient# [14:2726:41] 2025-06-24T20:28:07.039263Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2727:41] Status# ERROR ClientId# [15:2727:41] ServerId# [0:0:0] PipeClient# [15:2727:41] 2025-06-24T20:28:07.039298Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2728:41] Status# ERROR ClientId# [16:2728:41] ServerId# [0:0:0] PipeClient# [16:2728:41] 2025-06-24T20:28:07.039332Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2729:41] Status# ERROR ClientId# [17:2729:41] ServerId# [0:0:0] PipeClient# [17:2729:41] 2025-06-24T20:28:07.039366Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2730:41] Status# ERROR ClientId# [18:2730:41] ServerId# [0:0:0] PipeClient# [18:2730:41] 2025-06-24T20:28:07.039401Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2731:41] Status# ERROR ClientId# [19:2731:41] ServerId# [0:0:0] PipeClient# [19:2731:41] 2025-06-24T20:28:07.039440Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2732:41] Status# ERROR ClientId# [20:2732:41] ServerId# [0:0:0] PipeClient# [20:2732:41] 2025-06-24T20:28:07.039489Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2733:41] Status# ERROR ClientId# [21:2733:41] ServerId# [0:0:0] PipeClient# [21:2733:41] 2025-06-24T20:28:07.039528Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2734:41] Status# ERROR ClientId# [22:2734:41] ServerId# [0:0:0] PipeClient# [22:2734:41] 2025-06-24T20:28:07.039563Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2735:41] Status# ERROR ClientId# [23:2735:41] ServerId# [0:0:0] PipeClient# [23:2735:41] 2025-06-24T20:28:07.039616Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2736:41] Status# ERROR ClientId# [24:2736:41] ServerId# [0:0:0] PipeClient# [24:2736:41] 2025-06-24T20:28:07.039651Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2737:41] Status# ERROR ClientId# [25:2737:41] ServerId# [0:0:0] PipeClient# [25:2737:41] 2025-06-24T20:28:07.039702Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2738:41] Status# ERROR ClientId# [26:2738:41] ServerId# [0:0:0] PipeClient# [26:2738:41] 2025-06-24T20:28:07.039737Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2739:41] Status# ERROR ClientId# [27:2739:41] ServerId# [0:0:0] PipeClient# [27:2739:41] 2025-06-24T20:28:07.039779Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2740:41] Status# ERROR ClientId# [28:2740:41] ServerId# [0:0:0] PipeClient# [28:2740:41] 2025-06-24T20:28:07.039815Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2741:41] Status# ERROR ClientId# [29:2741:41] ServerId# [0:0:0] PipeClient# [29:2741:41] 2025-06-24T20:28:07.039852Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2742:41] Status# ERROR ClientId# [30:2742:41] ServerId# [0:0:0] PipeClient# [30:2742:41] 2025-06-24T20:28:07.039896Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2743:41] Status# ERROR ClientId# [31:2743:41] ServerId# [0:0:0] PipeClient# [31:2743:41] 2025-06-24T20:28:07.039942Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2744:41] Status# ERROR ClientId# [32:2744:41] ServerId# [0:0:0] PipeClient# [32:2744:41] 2025-06-24T20:28:07.039977Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2745:41] Status# ERROR ClientId# [33:2745:41] ServerId# [0:0:0] PipeClient# [33:2745:41] 2025-06-24T20:28:07.040014Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2746:41] Status# ERROR ClientId# [34:2746:41] ServerId# [0:0:0] PipeClient# [34:2746:41] 2025-06-24T20:28:07.040050Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2747:41] Status# ERROR ClientId# [35:2747:41 ... 00004b:5:2:2:0] DiskIsOk# true 2025-06-24T20:28:35.163077Z 1 05h45m00.123040s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:35.163202Z 1 05h45m00.123040s :BS_NODE DEBUG: [1] VDiskId# [8000004b:5:0:2:0] -> [8000004b:6:0:2:0] 2025-06-24T20:28:35.163821Z 1 05h45m00.123040s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483723 Items# [8000004b:5:0:0:0]: 5:1001:1008 -> 12:1000:1009 ConfigTxSeqNo# 545 2025-06-24T20:28:35.163865Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483723 Success# true 2025-06-24T20:28:35.164056Z 35 05h45m00.123040s :BS_NODE DEBUG: [35] NodeServiceSetUpdate 2025-06-24T20:28:35.164119Z 35 05h45m00.123040s :BS_NODE DEBUG: [35] VDiskId# [8000004b:5:2:2:0] -> [8000004b:6:2:2:0] 2025-06-24T20:28:35.164232Z 18 05h45m00.123040s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-24T20:28:35.164286Z 18 05h45m00.123040s :BS_NODE DEBUG: [18] VDiskId# [8000004b:5:1:0:0] -> [8000004b:6:1:0:0] 2025-06-24T20:28:35.164408Z 21 05h45m00.123040s :BS_NODE DEBUG: [21] NodeServiceSetUpdate 2025-06-24T20:28:35.164467Z 21 05h45m00.123040s :BS_NODE DEBUG: [21] VDiskId# [8000004b:5:1:1:0] -> [8000004b:6:1:1:0] 2025-06-24T20:28:35.164551Z 5 05h45m00.123040s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.164654Z 9 05h45m00.123040s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-06-24T20:28:35.164707Z 9 05h45m00.123040s :BS_NODE DEBUG: [9] VDiskId# [8000004b:5:0:1:0] -> [8000004b:6:0:1:0] 2025-06-24T20:28:35.164826Z 12 05h45m00.123040s :BS_NODE DEBUG: [12] NodeServiceSetUpdate 2025-06-24T20:28:35.164875Z 12 05h45m00.123040s :BS_NODE DEBUG: [12] VDiskId# [8000004b:6:0:0:0] PDiskId# 1000 VSlotId# 1009 created 2025-06-24T20:28:35.164964Z 12 05h45m00.123040s :BS_NODE DEBUG: [12] VDiskId# [8000004b:6:0:0:0] status changed to INIT_PENDING 2025-06-24T20:28:35.165093Z 30 05h45m00.123040s :BS_NODE DEBUG: [30] NodeServiceSetUpdate 2025-06-24T20:28:35.165151Z 30 05h45m00.123040s :BS_NODE DEBUG: [30] VDiskId# [8000004b:5:2:0:0] -> [8000004b:6:2:0:0] 2025-06-24T20:28:35.165259Z 15 05h45m00.123040s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2025-06-24T20:28:35.165316Z 15 05h45m00.123040s :BS_NODE DEBUG: [15] VDiskId# [8000004b:5:1:2:0] -> [8000004b:6:1:2:0] 2025-06-24T20:28:35.165421Z 33 05h45m00.123040s :BS_NODE DEBUG: [33] NodeServiceSetUpdate 2025-06-24T20:28:35.165473Z 33 05h45m00.123040s :BS_NODE DEBUG: [33] VDiskId# [8000004b:5:2:1:0] -> [8000004b:6:2:1:0] 2025-06-24T20:28:35.165834Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483655 2025-06-24T20:28:35.166805Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:35.166863Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:5:0:1:0] DiskIsOk# true 2025-06-24T20:28:35.166908Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:35.166982Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:5:0:2:0] DiskIsOk# true 2025-06-24T20:28:35.167022Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:35.167054Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:5:1:0:0] DiskIsOk# true 2025-06-24T20:28:35.167091Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:35.167125Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:5:1:1:0] DiskIsOk# true 2025-06-24T20:28:35.167175Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:35.167209Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:5:1:2:0] DiskIsOk# true 2025-06-24T20:28:35.167243Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:35.167274Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:5:2:0:0] DiskIsOk# true 2025-06-24T20:28:35.167308Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:35.167339Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:5:2:1:0] DiskIsOk# true 2025-06-24T20:28:35.167367Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-24T20:28:35.167397Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:5:2:2:0] DiskIsOk# true 2025-06-24T20:28:35.173686Z 1 05h45m00.123552s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-24T20:28:35.173775Z 1 05h45m00.123552s :BS_NODE DEBUG: [1] VDiskId# [80000007:6:0:0:0] PDiskId# 1000 VSlotId# 1016 created 2025-06-24T20:28:35.173857Z 1 05h45m00.123552s :BS_NODE DEBUG: [1] VDiskId# [80000007:6:0:0:0] status changed to INIT_PENDING 2025-06-24T20:28:35.174481Z 1 05h45m00.123552s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483655 Items# [80000007:5:0:0:0]: 5:1001:1000 -> 1:1000:1016 ConfigTxSeqNo# 546 2025-06-24T20:28:35.174521Z 1 05h45m00.123552s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483655 Success# true 2025-06-24T20:28:35.174726Z 17 05h45m00.123552s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-24T20:28:35.174799Z 17 05h45m00.123552s :BS_NODE DEBUG: [17] VDiskId# [80000007:5:1:0:0] -> [80000007:6:1:0:0] 2025-06-24T20:28:35.174915Z 35 05h45m00.123552s :BS_NODE DEBUG: [35] NodeServiceSetUpdate 2025-06-24T20:28:35.174971Z 35 05h45m00.123552s :BS_NODE DEBUG: [35] VDiskId# [80000007:5:2:0:0] -> [80000007:6:2:0:0] 2025-06-24T20:28:35.175097Z 20 05h45m00.123552s :BS_NODE DEBUG: [20] NodeServiceSetUpdate 2025-06-24T20:28:35.175163Z 20 05h45m00.123552s :BS_NODE DEBUG: [20] VDiskId# [80000007:5:1:1:0] -> [80000007:6:1:1:0] 2025-06-24T20:28:35.175242Z 5 05h45m00.123552s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.175339Z 23 05h45m00.123552s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-06-24T20:28:35.175391Z 23 05h45m00.123552s :BS_NODE DEBUG: [23] VDiskId# [80000007:5:1:2:0] -> [80000007:6:1:2:0] 2025-06-24T20:28:35.175487Z 26 05h45m00.123552s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-24T20:28:35.175542Z 26 05h45m00.123552s :BS_NODE DEBUG: [26] VDiskId# [80000007:5:2:1:0] -> [80000007:6:2:1:0] 2025-06-24T20:28:35.175643Z 9 05h45m00.123552s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-06-24T20:28:35.175696Z 9 05h45m00.123552s :BS_NODE DEBUG: [9] VDiskId# [80000007:5:0:1:0] -> [80000007:6:0:1:0] 2025-06-24T20:28:35.175799Z 11 05h45m00.123552s :BS_NODE DEBUG: [11] NodeServiceSetUpdate 2025-06-24T20:28:35.175852Z 11 05h45m00.123552s :BS_NODE DEBUG: [11] VDiskId# [80000007:5:0:2:0] -> [80000007:6:0:2:0] 2025-06-24T20:28:35.175951Z 33 05h45m00.123552s :BS_NODE DEBUG: [33] NodeServiceSetUpdate 2025-06-24T20:28:35.176007Z 33 05h45m00.123552s :BS_NODE DEBUG: [33] VDiskId# [80000007:5:2:2:0] -> [80000007:6:2:2:0] 2025-06-24T20:28:35.177292Z 4 05h45m02.033968s :BS_NODE DEBUG: [4] VDiskId# [80000067:6:0:0:0] status changed to REPLICATING 2025-06-24T20:28:35.177990Z 4 05h45m02.659480s :BS_NODE DEBUG: [4] VDiskId# [80000057:6:0:0:0] status changed to REPLICATING 2025-06-24T20:28:35.178652Z 1 05h45m03.107552s :BS_NODE DEBUG: [1] VDiskId# [80000007:6:0:0:0] status changed to REPLICATING 2025-06-24T20:28:35.178949Z 4 05h45m03.158528s :BS_NODE DEBUG: [4] VDiskId# [80000017:6:0:0:0] status changed to REPLICATING 2025-06-24T20:28:35.179630Z 4 05h45m03.386016s :BS_NODE DEBUG: [4] VDiskId# [80000027:6:0:0:0] status changed to REPLICATING 2025-06-24T20:28:35.180253Z 4 05h45m03.433456s :BS_NODE DEBUG: [4] VDiskId# [80000077:6:0:0:0] status changed to REPLICATING 2025-06-24T20:28:35.180936Z 4 05h45m03.518992s :BS_NODE DEBUG: [4] VDiskId# [80000047:6:0:0:0] status changed to REPLICATING 2025-06-24T20:28:35.181610Z 4 05h45m03.530504s :BS_NODE DEBUG: [4] VDiskId# [80000037:6:0:0:0] status changed to REPLICATING 2025-06-24T20:28:35.182309Z 12 05h45m04.428040s :BS_NODE DEBUG: [12] VDiskId# [8000004b:6:0:0:0] status changed to REPLICATING 2025-06-24T20:28:35.185209Z 1 05h45m16.777552s :BS_NODE DEBUG: [1] VDiskId# [80000007:6:0:0:0] status changed to READY 2025-06-24T20:28:35.186219Z 5 05h45m16.778064s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.186291Z 5 05h45m16.778064s :BS_NODE DEBUG: [5] VDiskId# [80000007:5:0:0:0] destroyed 2025-06-24T20:28:35.186531Z 4 05h45m18.032480s :BS_NODE DEBUG: [4] VDiskId# [80000057:6:0:0:0] status changed to READY 2025-06-24T20:28:35.187746Z 5 05h45m18.032992s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.187805Z 5 05h45m18.032992s :BS_NODE DEBUG: [5] VDiskId# [80000057:5:0:0:0] destroyed 2025-06-24T20:28:35.188432Z 12 05h45m20.805040s :BS_NODE DEBUG: [12] VDiskId# [8000004b:6:0:0:0] status changed to READY 2025-06-24T20:28:35.189247Z 5 05h45m20.805552s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.189305Z 5 05h45m20.805552s :BS_NODE DEBUG: [5] VDiskId# [8000004b:5:0:0:0] destroyed 2025-06-24T20:28:35.189478Z 4 05h45m22.701456s :BS_NODE DEBUG: [4] VDiskId# [80000077:6:0:0:0] status changed to READY 2025-06-24T20:28:35.190514Z 5 05h45m22.701968s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.190566Z 5 05h45m22.701968s :BS_NODE DEBUG: [5] VDiskId# [80000077:5:0:0:0] destroyed 2025-06-24T20:28:35.191106Z 4 05h45m27.721016s :BS_NODE DEBUG: [4] VDiskId# [80000027:6:0:0:0] status changed to READY 2025-06-24T20:28:35.192174Z 5 05h45m27.721528s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.192230Z 5 05h45m27.721528s :BS_NODE DEBUG: [5] VDiskId# [80000027:5:0:0:0] destroyed 2025-06-24T20:28:35.192361Z 4 05h45m27.745968s :BS_NODE DEBUG: [4] VDiskId# [80000067:6:0:0:0] status changed to READY 2025-06-24T20:28:35.193369Z 5 05h45m27.746480s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.193418Z 5 05h45m27.746480s :BS_NODE DEBUG: [5] VDiskId# [80000067:5:0:0:0] destroyed 2025-06-24T20:28:35.193561Z 4 05h45m28.794992s :BS_NODE DEBUG: [4] VDiskId# [80000047:6:0:0:0] status changed to READY 2025-06-24T20:28:35.194576Z 5 05h45m28.795504s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.194630Z 5 05h45m28.795504s :BS_NODE DEBUG: [5] VDiskId# [80000047:5:0:0:0] destroyed 2025-06-24T20:28:35.196070Z 4 05h45m32.041504s :BS_NODE DEBUG: [4] VDiskId# [80000037:6:0:0:0] status changed to READY 2025-06-24T20:28:35.197134Z 5 05h45m32.042016s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.197189Z 5 05h45m32.042016s :BS_NODE DEBUG: [5] VDiskId# [80000037:5:0:0:0] destroyed 2025-06-24T20:28:35.198194Z 4 05h45m36.402528s :BS_NODE DEBUG: [4] VDiskId# [80000017:6:0:0:0] status changed to READY 2025-06-24T20:28:35.199301Z 5 05h45m36.403040s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-24T20:28:35.199357Z 5 05h45m36.403040s :BS_NODE DEBUG: [5] VDiskId# [80000017:5:0:0:0] destroyed |87.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |87.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |87.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |87.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |87.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |87.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/test-results/unittest/{meta.json ... results_accumulator.log} |87.6%| [TA] $(B)/ydb/core/mind/bscontroller/ut_selfheal/test-results/unittest/{meta.json ... results_accumulator.log} >> BlobDepot::LoadPutAndRead [GOOD] >> BlobDepot::DecommitPutAndRead |87.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/incrhuge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.6%| [LD] {RESULT} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |87.6%| [LD] {RESULT} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |87.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |87.6%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_selfheal/test-results/unittest/{meta.json ... results_accumulator.log} >> TPDiskRaces::DecommitWithInflightMock [GOOD] >> TPDiskRaces::KillOwnerWhileDecommitting >> TS3WrapperTests::AbortUnknownUpload >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump] [GOOD] >> TS3WrapperTests::AbortUnknownUpload [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump_ds_init] >> TS3WrapperTests::CopyPartUpload >> TInterconnectTest::OldFormat >> test.py::test[solomon-Basic-default.txt] [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas4of4 [GOOD] >> test.py::test[solomon-BasicExtractMembers-default.txt] >> TestProtocols::TestConnectProtocol >> ClosedIntervalSet::Difference [GOOD] >> ClosedIntervalSet::Contains >> TInterconnectTest::TestSimplePingPong >> TS3WrapperTests::MultipartUpload >> TS3WrapperTests::CopyPartUpload [GOOD] >> TInterconnectTest::TestNotifyUndelivered |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortUnknownUpload [GOOD] Test command err: 2025-06-24T20:28:42.794688Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# AD2D55CC-5ABE-43DB-98DA-0A50B37C9E51, request# AbortMultipartUpload { Bucket: TEST Key: key UploadId: uploadId } REQUEST: DELETE /TEST/key?uploadId=uploadId HTTP/1.1 HEADERS: Host: localhost:6783 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: CE113B65-937C-48F5-BA76-6186F9D4B6EB amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 6 / /TEST/key / uploadId=uploadId 2025-06-24T20:28:42.812130Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# AD2D55CC-5ABE-43DB-98DA-0A50B37C9E51, response# >> ClosedIntervalSet::Contains [GOOD] >> ClosedIntervalSet::EnumInRange >> TS3WrapperTests::CompleteUnknownUpload >> TS3WrapperTests::HeadObject >> TS3WrapperTests::HeadUnknownObject >> TInterconnectTest::OldFormat [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnNew |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortMultipartUpload >> TS3WrapperTests::MultipartUpload [GOOD] >> TS3WrapperTests::UploadUnknownPart >> TestProtocols::TestConnectProtocol [GOOD] >> TestProtocols::TestHTTPCollected |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TInterconnectTest::TestSimplePingPong [GOOD] >> TInterconnectTest::TestSubscribeByFlag >> TS3WrapperTests::GetUnknownObject >> TS3WrapperTests::HeadObject [GOOD] >> TInterconnectTest::TestBlobEvent >> TS3WrapperTests::CompleteUnknownUpload [GOOD] >> TS3WrapperTests::HeadUnknownObject [GOOD] >> TInterconnectTest::TestNotifyUndelivered [GOOD] >> TInterconnectTest::TestBlobEvent220BytesPreSerialized >> TInterconnectTest::TestNotifyUndeliveredOnMissedActor >> TestProtocols::TestHTTPCollected [GOOD] >> TInterconnectTest::TestTraceIdPassThrough |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TSubgroupPartLayoutTest::CountEffectiveReplicas4of4 [GOOD] Test command err: testing erasure block-3-1 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 256 cases, took 95 us testing erasure stripe-4-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 main# 32 main# 33 main# 34 main# 35 main# 36 main# 37 main# 38 main# 39 main# 40 main# 41 main# 42 main# 43 main# 44 main# 45 main# 46 main# 47 main# 48 main# 49 main# 50 main# 51 main# 52 main# 53 main# 54 main# 55 main# 56 main# 57 main# 58 main# 59 main# 60 main# 61 main# 62 main# 63 Checked 262144 cases, took 1397824 us testing erasure block-2-3 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 1048576 cases, took 3221437 us testing erasure stripe-3-1 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 256 cases, took 80480 us testing erasure stripe-3-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 32768 cases, took 251587 us testing erasure stripe-2-3 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 1048576 cases, took 3215686 us |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortMultipartUpload [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnNew [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnOld >> TS3WrapperTests::UploadUnknownPart [GOOD] >> TS3WrapperTests::GetObject ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::CopyPartUpload [GOOD] Test command err: 2025-06-24T20:28:43.477911Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 782721F8-C7B9-4B9B-ACEF-2A4B2F9C3A16, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:22078 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: CFB7637A-051A-40FC-93A7-FFBBB0EF6659 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-24T20:28:43.484003Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 782721F8-C7B9-4B9B-ACEF-2A4B2F9C3A16, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-24T20:28:43.484487Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# A2F53D54-36DC-4545-989E-28023FD1322C, request# CreateMultipartUpload { Bucket: TEST Key: key1 } REQUEST: POST /TEST/key1?uploads HTTP/1.1 HEADERS: Host: localhost:22078 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 96E652BA-C152-4A39-BD67-DAC38348529D amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key1 / uploads= 2025-06-24T20:28:43.490551Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# A2F53D54-36DC-4545-989E-28023FD1322C, response# CreateMultipartUploadResult { Bucket: Key: TEST/key1 UploadId: 1 } 2025-06-24T20:28:43.495410Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 061DEAB3-FB75-4E8F-B711-2C780C6AF057, request# UploadPartCopy { Bucket: TEST Key: key1 UploadId: 1 PartNumber: 1 } REQUEST: PUT /TEST/key1?partNumber=1&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:22078 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E1ACE4E6-BE2D-48C9-9AE7-EACE8A3F3D56 amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-copy-source: /TEST/key x-amz-copy-source-range: bytes=1-2 S3_MOCK::HttpServeWrite: /TEST/key1 / partNumber=1&uploadId=1 / 0 2025-06-24T20:28:43.503882Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 061DEAB3-FB75-4E8F-B711-2C780C6AF057, response# UploadPartCopyResult { } 2025-06-24T20:28:43.504516Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 3DD3D148-3F39-4B0F-811C-E3E521262963, request# CompleteMultipartUpload { Bucket: TEST Key: key1 UploadId: 1 MultipartUpload: { Parts: [afc7e8a98f75755e513d9d5ead888e1d] } } REQUEST: POST /TEST/key1?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:22078 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C3D7FAC9-1722-457D-9431-923E5ED7DC90 amz-sdk-request: attempt=1 content-length: 235 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /TEST/key1 / uploadId=1 2025-06-24T20:28:43.511183Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 3DD3D148-3F39-4B0F-811C-E3E521262963, response# CompleteMultipartUploadResult { Bucket: Key: TEST/key1 ETag: afc7e8a98f75755e513d9d5ead888e1d } 2025-06-24T20:28:43.515422Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# A3EDEA3A-56A8-4461-8531-0549018B4358, request# GetObject { Bucket: TEST Key: key1 Range: bytes=0-1 } REQUEST: GET /TEST/key1 HTTP/1.1 HEADERS: Host: localhost:22078 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: AEC5CEFE-4311-47AF-B201-1B28FB1661C5 amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-1 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key1 / 2 2025-06-24T20:28:43.522854Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# A3EDEA3A-56A8-4461-8531-0549018B4358, response# GetObjectResult { } >> TS3WrapperTests::GetUnknownObject [GOOD] >> TS3WrapperTests::PutObject |87.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp >> TestProtocols::TestResolveProtocol >> TInterconnectTest::TestBlobEvent [GOOD] >> TInterconnectTest::TestBlobEvent220Bytes >> TInterconnectTest::TestSubscribeByFlag [GOOD] >> TInterconnectTest::TestReconnect >> TInterconnectTest::TestTraceIdPassThrough [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::MultipartUpload [GOOD] Test command err: 2025-06-24T20:28:43.880569Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 4543F15C-90CD-4DF8-895B-A346BF30F281, request# CreateMultipartUpload { Bucket: TEST Key: key } REQUEST: POST /TEST/key?uploads HTTP/1.1 HEADERS: Host: localhost:8592 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 6E890ED8-4BDB-4381-B525-44A620858DE3 amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key / uploads= 2025-06-24T20:28:43.901354Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 4543F15C-90CD-4DF8-895B-A346BF30F281, response# CreateMultipartUploadResult { Bucket: Key: TEST/key UploadId: 1 } 2025-06-24T20:28:43.903400Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# EC34247E-F2F1-4368-ABE4-80044A791009, request# UploadPart { Bucket: TEST Key: key UploadId: 1 PartNumber: 1 } REQUEST: PUT /TEST/key?partNumber=1&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:8592 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 99F4821D-345C-4289-AA2D-074A05EEA243 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /TEST/key / partNumber=1&uploadId=1 / 4 2025-06-24T20:28:43.920308Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# EC34247E-F2F1-4368-ABE4-80044A791009, response# UploadPartResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-24T20:28:43.923435Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 1420EC61-CB3C-4F3E-A9E8-1D12389545CB, request# CompleteMultipartUpload { Bucket: TEST Key: key UploadId: 1 MultipartUpload: { Parts: [841a2d689ad86bd1611447453c22c6fc] } } REQUEST: POST /TEST/key?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:8592 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C8FBE0AD-DA1E-499B-9BDB-2F19D9C2E748 amz-sdk-request: attempt=1 content-length: 235 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /TEST/key / uploadId=1 2025-06-24T20:28:43.943239Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 1420EC61-CB3C-4F3E-A9E8-1D12389545CB, response# CompleteMultipartUploadResult { Bucket: Key: TEST/key ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-24T20:28:43.943827Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# D2D8517B-673B-4579-8222-B9A485F132CD, request# GetObject { Bucket: TEST Key: key Range: bytes=0-3 } REQUEST: GET /TEST/key HTTP/1.1 HEADERS: Host: localhost:8592 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: D6152F75-8773-4A21-BB96-F9D3BF1A73B4 amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-3 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2025-06-24T20:28:43.949103Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# D2D8517B-673B-4579-8222-B9A485F132CD, response# GetObjectResult { } >> TInterconnectTest::TestNotifyUndeliveredOnMissedActor [GOOD] >> TInterconnectTest::TestPreSerializedBlobEventUpToMebibytes >> TS3WrapperTests::GetObject [GOOD] >> TInterconnectTest::TestBlobEvent220BytesPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizes >> TInterconnectTest::OldFormatSuppressVersionCheckOnOld [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheck >> TS3WrapperTests::PutObject [GOOD] |87.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp >> TestProtocols::TestResolveProtocol [GOOD] >> TestProtocols::TestHTTPCollectedVerySlow >> TInterconnectTest::TestReconnect [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::UploadUnknownPart [GOOD] >> TInterconnectTest::TestSubscribeAndUnsubsribeByEvent Test command err: 2025-06-24T20:28:44.305864Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 263F0218-BA60-445D-833F-50D99F449810, request# UploadPart { Bucket: TEST Key: key UploadId: uploadId PartNumber: 1 } REQUEST: PUT /TEST/key?partNumber=1&uploadId=uploadId HTTP/1.1 HEADERS: Host: localhost:17492 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7D87B6D7-DC87-4B5B-BA69-C381750C3D15 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /TEST/key / partNumber=1&uploadId=uploadId / 4 2025-06-24T20:28:44.327460Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 263F0218-BA60-445D-833F-50D99F449810, response# ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::HeadObject [GOOD] Test command err: 2025-06-24T20:28:44.153080Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# C487A406-5B86-4FAD-9DA0-C1D7E490544F, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:29453 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C6937921-A11D-474F-B233-619B7566F7F7 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-24T20:28:44.175749Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# C487A406-5B86-4FAD-9DA0-C1D7E490544F, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-24T20:28:44.176361Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 8BDEA014-C4D3-4802-898A-4A29C344F04A, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:29453 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 805C1F9C-7B2C-4202-ABA7-D0F1D7B19E98 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2025-06-24T20:28:44.187239Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 8BDEA014-C4D3-4802-898A-4A29C344F04A, response# HeadObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc ContentLength: 4 } >> TInterconnectTest::TestBlobEvent220Bytes [GOOD] >> TInterconnectTest::TestAddressResolve >> TInterconnectTest::TestPreSerializedBlobEventUpToMebibytes [GOOD] >> TInterconnectTest::TestPingPongThroughSubChannel ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::HeadUnknownObject [GOOD] Test command err: 2025-06-24T20:28:44.246520Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# CBE2DCBA-A4FF-4F85-AEF3-3170EB98930C, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:61451 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 0EFA08CA-FD12-4788-B1E7-051C4F7D008C amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 2025-06-24T20:28:44.263722Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# CBE2DCBA-A4FF-4F85-AEF3-3170EB98930C, response# No response body. ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::CompleteUnknownUpload [GOOD] Test command err: 2025-06-24T20:28:44.203161Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 3EB7A5D9-93E5-4E2F-B242-F50A41D69A6B, request# CompleteMultipartUpload { Bucket: TEST Key: key UploadId: uploadId MultipartUpload: { Parts: [ETag] } } REQUEST: POST /TEST/key?uploadId=uploadId HTTP/1.1 HEADERS: Host: localhost:12606 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 616CB1A5-7E8B-4BAF-A009-369FE71D2D93 amz-sdk-request: attempt=1 content-length: 207 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /TEST/key / uploadId=uploadId 2025-06-24T20:28:44.209322Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 3EB7A5D9-93E5-4E2F-B242-F50A41D69A6B, response# ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortMultipartUpload [GOOD] Test command err: 2025-06-24T20:28:44.296922Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# B0078A76-9635-453B-87E4-B9D76E4D8B97, request# CreateMultipartUpload { Bucket: TEST Key: key } REQUEST: POST /TEST/key?uploads HTTP/1.1 HEADERS: Host: localhost:1529 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B611EFE4-A910-4BC6-9726-E5C22D1D499C amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key / uploads= 2025-06-24T20:28:44.325260Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# B0078A76-9635-453B-87E4-B9D76E4D8B97, response# CreateMultipartUploadResult { Bucket: Key: TEST/key UploadId: 1 } 2025-06-24T20:28:44.325870Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 5E358EE3-CC48-49FB-B0FD-E5CBD34F001E, request# AbortMultipartUpload { Bucket: TEST Key: key UploadId: 1 } REQUEST: DELETE /TEST/key?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:1529 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 583FD435-8160-481B-BE62-2D8ED6E52120 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 6 / /TEST/key / uploadId=1 2025-06-24T20:28:44.336126Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 5E358EE3-CC48-49FB-B0FD-E5CBD34F001E, response# AbortMultipartUploadResult { } 2025-06-24T20:28:44.336626Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# EDA8CCC2-BCEC-4142-B562-63879E9320ED, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:1529 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B18E7F94-3E0F-486C-B211-2433A470A720 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 2025-06-24T20:28:44.344099Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# EDA8CCC2-BCEC-4142-B562-63879E9320ED, response# No response body. >> TInterconnectTest::TestBlobEventDifferentSizes [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerialized |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestTraceIdPassThrough [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetUnknownObject [GOOD] Test command err: 2025-06-24T20:28:44.416323Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# A74A89AD-273E-446E-B96B-5DE68C11623B, request# GetObject { Bucket: TEST Key: key Range: bytes=0-3 } REQUEST: GET /TEST/key HTTP/1.1 HEADERS: Host: localhost:64254 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 32D2B799-D759-42B8-8C8C-4B082873A3A5 amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-3 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 2025-06-24T20:28:44.441022Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# A74A89AD-273E-446E-B96B-5DE68C11623B, response# No response body. >> TInterconnectTest::OldFormatSuppressVersionCheck [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetObject [GOOD] Test command err: 2025-06-24T20:28:44.726284Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 701E38C3-C875-4C20-90A2-8DAB4B4163FB, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:29312 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 519F027A-9BEF-49D3-B165-C3788A6B41D6 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-24T20:28:44.745172Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 701E38C3-C875-4C20-90A2-8DAB4B4163FB, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-24T20:28:44.745805Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# A05EF926-6E40-42DD-8A58-BD18A06F7081, request# GetObject { Bucket: TEST Key: key Range: bytes=0-3 } REQUEST: GET /TEST/key HTTP/1.1 HEADERS: Host: localhost:29312 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 61FD1272-052D-4EE1-881C-E828FF275C1A amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-3 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2025-06-24T20:28:44.766448Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# A05EF926-6E40-42DD-8A58-BD18A06F7081, response# GetObjectResult { } >> TInterconnectTest::TestAddressResolve [GOOD] >> TInterconnectTest::OldNbs ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::PutObject [GOOD] Test command err: 2025-06-24T20:28:44.942155Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 6650A0DA-BC3B-4F6B-AD38-ABD2E92850E2, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:23935 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 9D96C666-6771-448E-8CC6-0FE61C5180A9 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-24T20:28:44.963505Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 6650A0DA-BC3B-4F6B-AD38-ABD2E92850E2, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } >> TInterconnectTest::TestPingPongThroughSubChannel [GOOD] >> TInterconnectTest::TestSubscribeAndUnsubsribeByEvent [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw >> TActorActivity::Basic [GOOD] >> ActorBootstrapped::TestBootstrapped >> TInterconnectTest::TestManyEvents >> ActorBootstrapped::TestBootstrapped [GOOD] >> ActorBootstrapped::TestBootstrappedParent ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::OldFormatSuppressVersionCheck [GOOD] Test command err: 2025-06-24T20:28:44.091721Z node 4 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [4:22:2057] [node 3] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-24T20:28:44.702323Z node 5 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [5:20:2058] [node 6] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-24T20:28:45.291190Z node 8 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [8:22:2057] [node 7] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-24T20:28:45.299746Z node 7 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [7:20:2058] [node 8] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default >> TInterconnectTest::TestConnectAndDisconnect >> ActorBootstrapped::TestBootstrappedParent [GOOD] >> TActorTracker::Basic |87.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |87.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain >> TInterconnectTest::TestConnectAndDisconnect [GOOD] >> TInterconnectTest::TestBlobEventPreSerialized >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw [GOOD] >> TActorTracker::Basic [GOOD] |87.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestSubscribeAndUnsubsribeByEvent [GOOD] Test command err: 2025-06-24T20:28:44.984547Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @206 (null) -> PendingActivation 2025-06-24T20:28:44.984619Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [6:10:2048] [node 5] ICP01 ready to work 2025-06-24T20:28:44.984823Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @206 (null) -> PendingActivation 2025-06-24T20:28:44.984853Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [5:1:2048] [node 6] ICP01 ready to work 2025-06-24T20:28:44.985219Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @99 PendingActivation -> PendingNodeInfo 2025-06-24T20:28:44.986659Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:119: Proxy [5:1:2048] [node 6] ICP02 configured for host ::1:11456 2025-06-24T20:28:44.986806Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @488 PendingNodeInfo -> PendingConnection 2025-06-24T20:28:44.987281Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:677: Handshake [5:21:2058] [node 6] ICH01 starting outgoing handshake 2025-06-24T20:28:44.987591Z node 5 :INTERCONNECT DEBUG: interconnect_resolve.cpp:128: ICR04 Host: ::1, RESOLVED address 2025-06-24T20:28:44.989028Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:682: Handshake [5:21:2058] [node 6] ICH05 connected to peer 2025-06-24T20:28:45.003508Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_server.cpp:104: ICListener: [0:0:0] ICL04 Accepted from: ::1:56418 2025-06-24T20:28:45.004220Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:914: Handshake [6:23:2058] [node 0] ICH02 starting incoming handshake 2025-06-24T20:28:45.006310Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [5:21:2058] [node 6] ICH07 SendExBlock ExRequest Protocol: 2 ProgramPID: 95806 ProgramStartTime: 6384393146854 Serial: 269013337 ReceiverNodeId: 6 SenderActorId: "[5:269013337:0]" SenderHostName: "::1" ReceiverHostName: "::1" UUID: "Cluster for process with id: 95806" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 95806" AcceptUUID: "Cluster for process with id: 95806" } RequestModernFrame: true RequestAuthOnly: false RequestExtendedTraceFmt: true RequestExternalDataChannel: true HandshakeId: "\313J\221\353\216\262~\034\210\364\322|\215D\022\277\371\316g\200 \341*YB\000\316\223\356a1B" RequestXxhash: true RequestXdcShuffle: true 2025-06-24T20:28:45.011894Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [6:23:2058] [node 5] ICH07 ReceiveExBlock ExRequest Protocol: 2 ProgramPID: 95806 ProgramStartTime: 6384393146854 Serial: 269013337 ReceiverNodeId: 6 SenderActorId: "[5:269013337:0]" SenderHostName: "::1" ReceiverHostName: "::1" UUID: "Cluster for process with id: 95806" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 95806" AcceptUUID: "Cluster for process with id: 95806" } RequestModernFrame: true RequestAuthOnly: false RequestExtendedTraceFmt: true RequestExternalDataChannel: true HandshakeId: "\313J\221\353\216\262~\034\210\364\322|\215D\022\277\371\316g\200 \341*YB\000\316\223\356a1B" RequestXxhash: true RequestXdcShuffle: true 2025-06-24T20:28:45.011991Z node 6 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [6:23:2058] [node 5] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-24T20:28:45.012522Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @99 PendingActivation -> PendingNodeInfo 2025-06-24T20:28:45.014197Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:119: Proxy [6:10:2048] [node 5] ICP02 configured for host ::1:23740 2025-06-24T20:28:45.014261Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:262: Proxy [6:10:2048] [node 5] ICP17 incoming handshake (actor [6:23:2058]) 2025-06-24T20:28:45.014322Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @488 PendingNodeInfo -> PendingConnection 2025-06-24T20:28:45.014406Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:210: Proxy [6:10:2048] [node 5] ICP07 issued incoming handshake reply 2025-06-24T20:28:45.014489Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:219: Proxy [6:10:2048] [node 5] ICP08 No active sessions, becoming PendingConnection 2025-06-24T20:28:45.014541Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @220 PendingConnection -> PendingConnection 2025-06-24T20:28:45.015076Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [6:23:2058] [node 5] ICH07 SendExBlock ExReply Success { Protocol: 2 ProgramPID: 95806 ProgramStartTime: 6384433035492 Serial: 3943831144 SenderActorId: "[6:3943831144:0]" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 95806" AcceptUUID: "Cluster for process with id: 95806" } StartEncryption: false UseModernFrame: true AuthOnly: false UseExtendedTraceFmt: true UseExternalDataChannel: true UseXxhash: true UseXdcShuffle: true } 2025-06-24T20:28:45.019775Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [5:21:2058] [node 6] ICH07 ReceiveExBlock ExReply Success { Protocol: 2 ProgramPID: 95806 ProgramStartTime: 6384433035492 Serial: 3943831144 SenderActorId: "[6:3943831144:0]" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 95806" AcceptUUID: "Cluster for process with id: 95806" } StartEncryption: false UseModernFrame: true AuthOnly: false UseExtendedTraceFmt: true UseExternalDataChannel: true UseXxhash: true UseXdcShuffle: true } 2025-06-24T20:28:45.019852Z node 5 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [5:21:2058] [node 6] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-24T20:28:45.020010Z node 5 :INTERCONNECT DEBUG: interconnect_resolve.cpp:128: ICR04 Host: ::1, RESOLVED address 2025-06-24T20:28:45.023303Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_server.cpp:104: ICListener: [0:0:0] ICL04 Accepted from: ::1:56432 2025-06-24T20:28:45.023692Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:914: Handshake [6:25:2059] [node 0] ICH02 starting incoming handshake 2025-06-24T20:28:45.027936Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [5:21:2058] [node 6] ICH07 SendExBlock ExternalDataChannelParams HandshakeId: "\313J\221\353\216\262~\034\210\364\322|\215D\022\277\371\316g\200 \341*YB\000\316\223\356a1B" 2025-06-24T20:28:45.028080Z node 5 :INTERCONNECT INFO: interconnect_handshake.cpp:375: Handshake [5:21:2058] [node 6] ICH04 handshake succeeded 2025-06-24T20:28:45.035468Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:338: Proxy [5:1:2048] [node 6] ICP20 outgoing handshake succeeded 2025-06-24T20:28:45.035562Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:460: Proxy [5:1:2048] [node 6] ICP052 dropped outgoing handshake: [5:21:2058] poison: false 2025-06-24T20:28:45.035618Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @350 PendingConnection -> StateWork 2025-06-24T20:28:45.035792Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:377: Proxy [5:1:2048] [node 6] ICP22 created new session: [5:26:2048] 2025-06-24T20:28:45.035873Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:259: Session [5:26:2048] [node 6] ICS09 handshake done sender: [5:21:2058] self: [5:269013337:0] peer: [6:3943831144:0] socket: 24 2025-06-24T20:28:45.035940Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:281: Session [5:26:2048] [node 6] ICS10 traffic start 2025-06-24T20:28:45.036028Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:297: Session [5:26:2048] [node 6] ICS11 registering socket in PollerActor 2025-06-24T20:28:45.036106Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:26:2048] [node 6] ICS23 confirm count: 0 2025-06-24T20:28:45.036170Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:336: Session [5:26:2048] [node 6] ICS06 rewind SendQueue size# 0 LastConfirmed# 0 NextSerial# 1 2025-06-24T20:28:45.036227Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:26:2048] [node 6] ICS23 confirm count: 0 2025-06-24T20:28:45.036286Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:222: Session [5:26:2048] [node 6] ICS04 subscribe for session state for [5:19:2057] 2025-06-24T20:28:45.043644Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:143: InputSession [5:27:2048] [node 6] ICIS01 InputSession created 2025-06-24T20:28:45.043802Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:27:2048] [node 6] ICIS02 ReceiveData called 2025-06-24T20:28:45.043897Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:27:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-24T20:28:45.044287Z node 6 :INTERCONNECT INFO: interconnect_handshake.cpp:375: Handshake [6:23:2058] [node 5] ICH04 handshake succeeded 2025-06-24T20:28:45.047891Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:334: Proxy [6:10:2048] [node 5] ICP19 incoming handshake succeeded 2025-06-24T20:28:45.047976Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:445: Proxy [6:10:2048] [node 5] ICP111 dropped incoming handshake: [6:23:2058] poison: false 2025-06-24T20:28:45.048025Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @350 PendingConnection -> StateWork 2025-06-24T20:28:45.048142Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:377: Proxy [6:10:2048] [node 5] ICP22 created new session: [6:28:2048] 2025-06-24T20:28:45.048198Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:259: Session [6:28:2048] [node 5] ICS09 handshake done sender: [6:23:2058] self: [6:3943831144:0] peer: [5:269013337:0] socket: 25 2025-06-24T20:28:45.048256Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:281: Session [6:28:2048] [node 5] ICS10 traffic start 2025-06-24T20:28:45.048317Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:297: Session [6:28:2048] [node 5] ICS11 registering socket in PollerActor 2025-06-24T20:28:45.048371Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 0 2025-06-24T20:28:45.048406Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:336: Session [6:28:2048] [node 5] ICS06 rewind SendQueue size# 0 LastConfirmed# 0 NextSerial# 1 2025-06-24T20:28:45.048444Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 0 2025-06-24T20:28:45.048516Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:143: InputSession [6:29:2048] [node 5] ICIS01 InputSession created 2025-06-24T20:28:45.048575Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:27:2048] [node 6] ICIS02 ReceiveData called 2025-06-24T20:28:45.048651Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:27:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-24T20:28:45.048728Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:27:2048] [node 6] ICIS02 ReceiveData called 2025-06-24T20:28:45.048769Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:27:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-24T20:28:45.048816Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [6:29:2048] [node 5] ICIS02 ReceiveData called 2025-06-24T20:28:45.048870Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [6:29:2048] [node 5] ICIS12 Read r ... 37\360S\350" RequestXxhash: true RequestXdcShuffle: true 2025-06-24T20:28:45.090617Z node 5 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [5:42:2061] [node 6] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-24T20:28:45.091080Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:262: Proxy [5:1:2048] [node 6] ICP17 incoming handshake (actor [5:42:2061]) 2025-06-24T20:28:45.091162Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:445: Proxy [5:1:2048] [node 6] ICP111 dropped incoming handshake: [5:36:2060] poison: true 2025-06-24T20:28:45.091220Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.h:480: Proxy [5:1:2048] [node 6] ICP04 terminating current session as we are negotiating a new one 2025-06-24T20:28:45.091278Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:96: Session [5:26:2048] [node 6] ICS01 socket: -1 reason# NewSession 2025-06-24T20:28:45.091338Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:542: Proxy [5:1:2048] [node 6] ICP30 unregister session Session# [5:26:2048] VirtualId# [5:269013337:0] 2025-06-24T20:28:45.091394Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @206 StateWork -> PendingActivation 2025-06-24T20:28:45.091469Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_channel.cpp:337: OutputChannel 0 [node 6] ICOCH89 Notyfying about Undelivered messages! NotYetConfirmed size: 1, Queue size: 0 2025-06-24T20:28:45.091545Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @488 PendingActivation -> PendingConnection 2025-06-24T20:28:45.091618Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:210: Proxy [5:1:2048] [node 6] ICP07 issued incoming handshake reply 2025-06-24T20:28:45.091694Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:219: Proxy [5:1:2048] [node 6] ICP08 No active sessions, becoming PendingConnection 2025-06-24T20:28:45.091743Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @220 PendingConnection -> PendingConnection 2025-06-24T20:28:45.092785Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [5:42:2061] [node 6] ICH07 SendExBlock ExReply Success { Protocol: 2 ProgramPID: 95806 ProgramStartTime: 6384393146854 Serial: 269013338 SenderActorId: "[5:269013338:0]" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 95806" AcceptUUID: "Cluster for process with id: 95806" } StartEncryption: false UseModernFrame: true AuthOnly: false UseExtendedTraceFmt: true UseExternalDataChannel: true UseXxhash: true UseXdcShuffle: true } 2025-06-24T20:28:45.094080Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [6:40:2063] [node 5] ICH07 ReceiveExBlock ExReply Success { Protocol: 2 ProgramPID: 95806 ProgramStartTime: 6384393146854 Serial: 269013338 SenderActorId: "[5:269013338:0]" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 95806" AcceptUUID: "Cluster for process with id: 95806" } StartEncryption: false UseModernFrame: true AuthOnly: false UseExtendedTraceFmt: true UseExternalDataChannel: true UseXxhash: true UseXdcShuffle: true } 2025-06-24T20:28:45.094147Z node 6 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [6:40:2063] [node 5] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-24T20:28:45.094317Z node 6 :INTERCONNECT DEBUG: interconnect_resolve.cpp:128: ICR04 Host: ::1, RESOLVED address 2025-06-24T20:28:45.096976Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_server.cpp:104: ICListener: [0:0:0] ICL04 Accepted from: ::1:54200 2025-06-24T20:28:45.097597Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [6:40:2063] [node 5] ICH07 SendExBlock ExternalDataChannelParams HandshakeId: "\350\320\225\211\037\372\216h\360\024\026}\305qR\222\366s\326\241\260\003\345q\261\244;\223\037\360S\350" 2025-06-24T20:28:45.097719Z node 6 :INTERCONNECT INFO: interconnect_handshake.cpp:375: Handshake [6:40:2063] [node 5] ICH04 handshake succeeded 2025-06-24T20:28:45.097972Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:338: Proxy [6:10:2048] [node 5] ICP20 outgoing handshake succeeded 2025-06-24T20:28:45.098028Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:460: Proxy [6:10:2048] [node 5] ICP052 dropped outgoing handshake: [6:40:2063] poison: false 2025-06-24T20:28:45.098093Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @350 PendingConnection -> StateWork 2025-06-24T20:28:45.098231Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:377: Proxy [6:10:2048] [node 5] ICP22 created new session: [6:45:2048] 2025-06-24T20:28:45.098319Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:259: Session [6:45:2048] [node 5] ICS09 handshake done sender: [6:40:2063] self: [6:3943831145:0] peer: [5:269013338:0] socket: 25 2025-06-24T20:28:45.098378Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:281: Session [6:45:2048] [node 5] ICS10 traffic start 2025-06-24T20:28:45.098444Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:297: Session [6:45:2048] [node 5] ICS11 registering socket in PollerActor 2025-06-24T20:28:45.098519Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2025-06-24T20:28:45.098563Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:336: Session [6:45:2048] [node 5] ICS06 rewind SendQueue size# 0 LastConfirmed# 0 NextSerial# 1 2025-06-24T20:28:45.098614Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2025-06-24T20:28:45.098690Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:147: Session [6:45:2048] [node 5] ICS02 send event from: [6:20:2057] to: [5:19:2057] 2025-06-24T20:28:45.099171Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:914: Handshake [5:44:2062] [node 0] ICH02 starting incoming handshake 2025-06-24T20:28:45.099302Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:143: InputSession [6:46:2048] [node 5] ICIS01 InputSession created 2025-06-24T20:28:45.100302Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:929: Session [6:45:2048] [node 5] ICS22 outgoing packet Serial# 1 Confirm# 0 DataSize# 84 InflightDataAmount# 84 2025-06-24T20:28:45.100409Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2025-06-24T20:28:45.100528Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [6:46:2048] [node 5] ICIS02 ReceiveData called 2025-06-24T20:28:45.100605Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [6:46:2048] [node 5] ICIS12 Read recvres# -11 num# 1 err# 2025-06-24T20:28:45.100975Z node 5 :INTERCONNECT INFO: interconnect_handshake.cpp:375: Handshake [5:42:2061] [node 6] ICH04 handshake succeeded 2025-06-24T20:28:45.101261Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [6:46:2048] [node 5] ICIS02 ReceiveData called 2025-06-24T20:28:45.101322Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [6:46:2048] [node 5] ICIS12 Read recvres# -11 num# 1 err# 2025-06-24T20:28:45.101380Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:334: Proxy [5:1:2048] [node 6] ICP19 incoming handshake succeeded 2025-06-24T20:28:45.101426Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:445: Proxy [5:1:2048] [node 6] ICP111 dropped incoming handshake: [5:42:2061] poison: false 2025-06-24T20:28:45.101472Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @350 PendingConnection -> StateWork 2025-06-24T20:28:45.101578Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:377: Proxy [5:1:2048] [node 6] ICP22 created new session: [5:47:2048] 2025-06-24T20:28:45.101626Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:259: Session [5:47:2048] [node 6] ICS09 handshake done sender: [5:42:2061] self: [5:269013338:0] peer: [6:3943831145:0] socket: 26 2025-06-24T20:28:45.101673Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:281: Session [5:47:2048] [node 6] ICS10 traffic start 2025-06-24T20:28:45.101748Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:297: Session [5:47:2048] [node 6] ICS11 registering socket in PollerActor 2025-06-24T20:28:45.101855Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:47:2048] [node 6] ICS23 confirm count: 0 2025-06-24T20:28:45.101892Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:336: Session [5:47:2048] [node 6] ICS06 rewind SendQueue size# 0 LastConfirmed# 0 NextSerial# 1 2025-06-24T20:28:45.101927Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:47:2048] [node 6] ICS23 confirm count: 0 2025-06-24T20:28:45.102082Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:143: InputSession [5:48:2048] [node 6] ICIS01 InputSession created 2025-06-24T20:28:45.102122Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:48:2048] [node 6] ICIS02 ReceiveData called 2025-06-24T20:28:45.102213Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:48:2048] [node 6] ICIS12 Read recvres# 106 num# 1 err# 2025-06-24T20:28:45.102280Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:48:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-24T20:28:45.102326Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [6:46:2048] [node 5] ICIS02 ReceiveData called 2025-06-24T20:28:45.102371Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [6:46:2048] [node 5] ICIS12 Read recvres# -11 num# 1 err# 2025-06-24T20:28:45.102419Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:48:2048] [node 6] ICIS02 ReceiveData called 2025-06-24T20:28:45.102485Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:48:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-24T20:28:45.102544Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2025-06-24T20:28:45.102590Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2025-06-24T20:28:45.102657Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2025-06-24T20:28:45.102683Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2025-06-24T20:28:45.102730Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:48:2048] [node 6] ICIS02 ReceiveData called 2025-06-24T20:28:45.102767Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:48:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-24T20:28:45.102823Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:47:2048] [node 6] ICS23 confirm count: 0 2025-06-24T20:28:45.102854Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:47:2048] [node 6] ICS23 confirm count: 0 2025-06-24T20:28:45.102910Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:47:2048] [node 6] ICS23 confirm count: 0 2025-06-24T20:28:45.102936Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:47:2048] [node 6] ICS23 confirm count: 0 |87.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |87.7%| [TA] $(B)/ydb/core/wrappers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestPingPongThroughSubChannel [GOOD] >> TInterconnectTest::TestBlobEventPreSerialized [GOOD] >> TInterconnectTest::OldNbs [GOOD] >> TInterconnectTest::TestBlobEventUpToMebibytes |87.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |87.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump_ds_init] [GOOD] |87.7%| [TA] {RESULT} $(B)/ydb/core/wrappers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TActorTracker::Basic [GOOD] Test command err: ASYNC_DESTROYER |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw [GOOD] >> TestProtocols::TestHTTPCollectedVerySlow [GOOD] >> TestProtocols::TestHTTPRequest >> TInterconnectTest::TestBlobEventUpToMebibytes [GOOD] >> TInterconnectTest::TestBlobEventsThroughSubChannels >> ClosedIntervalSet::EnumInRange [GOOD] >> ClosedIntervalSet::EnumInRangeReverse |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::OldNbs [GOOD] >> TYardTest::TestSlayLogWriteRaceActor [GOOD] >> TYardTest::TestMultiYardHarakiri >> TestProtocols::TestHTTPRequest [GOOD] >> TInterconnectTest::TestBlobEventsThroughSubChannels [GOOD] >> TInterconnectTest::TestManyEvents [GOOD] >> TInterconnectTest::TestCrossConnect >> BlobDepot::DecommitPutAndRead [GOOD] >> BlobDepot::DecommitVerifiedRandom |87.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |87.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TestProtocols::TestHTTPRequest [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/library/yaml_config/ut_transform/py3test >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump_ds_init] [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx >> TSchemeShardExtSubDomainTest::CreateItemsInsideExtSubdomainAtGSSwithoutTSS >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain >> TSchemeShardExtSubDomainTest::Fake [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed >> TSchemeShardExtSubDomainTest::Create |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventsThroughSubChannels [GOOD] >> TExternalTableTest::ParallelCreateSameExternalTable >> TExternalTableTest::ParallelCreateExternalTable >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain >> TExternalTableTest::CreateExternalTable >> TExternalTableTest::SchemeErrors >> TSchemeShardExtSubDomainTest::CreateAndWait >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-false |87.7%| [TM] {RESULT} ydb/library/yaml_config/ut_transform/py3test >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-ExternalHive >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-true |87.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |87.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant >> TExternalTableTest::DropExternalTable >> TExternalTableTest::ReplaceExternalTableShouldFailIfEntityOfAnotherTypeWithSameNameExists >> TExternalTableTest::ReadOnlyMode >> TExternalTableTest::ReplaceExternalTableIfNotExists >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDrop >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCreateClean >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanWithRetry >> TExternalTableTest::DropTableTwice >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst >> TExternalTableTest::ParallelCreateSameExternalTable [GOOD] |87.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |87.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer >> TSchemeShardExtSubDomainTest::CreateAndWait [GOOD] >> TSchemeShardExtSubDomainTest::Create [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-ExternalHive >> TSchemeShardExtSubDomainTest::CreateItemsInsideExtSubdomainAtGSSwithoutTSS [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-ExternalHive >> TExternalTableTest::SchemeErrors [GOOD] >> TExternalTableTest::CreateExternalTable [GOOD] >> TExternalTableTest::CreateExternalTableShouldFailIfSuchEntityAlreadyExists >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed [GOOD] >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-false >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] >> TExternalTableTest::DropExternalTable [GOOD] >> TExternalTableTest::Decimal >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-true >> TExternalTableTest::ParallelCreateExternalTable [GOOD] >> TExternalTableTest::ReplaceExternalTableIfNotExists [GOOD] >> TExternalTableTest::ReplaceExternalTableShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] >> TExternalTableTest::ReadOnlyMode [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ParallelCreateSameExternalTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:112:2142] 2025-06-24T20:28:50.313383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.313506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.313554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.313613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.313674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.313712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.313792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.313894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.314837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.323448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.530424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:28:50.530501Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.531421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.548845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.549859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.550056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.557620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.557883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.558629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.558911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.561620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.561823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.563075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.563158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.563250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.563304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.563350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.563624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.571092Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.734192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.734483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.734779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.734837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.735124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.735399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.737960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.738215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.738661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.738793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.738872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.738947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.741767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.741860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.741922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.744244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.744309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.744375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.744444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.748649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.751333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.751594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.752827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.752992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.753056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.753397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.753469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.753701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.753813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28: ... ated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.874636Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:50.874817Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NilNoviSubLuna" took 180us result status StatusSuccess 2025-06-24T20:28:50.875209Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 125 2025-06-24T20:28:50.875576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 125: send EvNotifyTxCompletion 2025-06-24T20:28:50.875620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 125 TestWaitNotification wait txId: 126 2025-06-24T20:28:50.875729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 126: send EvNotifyTxCompletion 2025-06-24T20:28:50.875755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 126 TestWaitNotification wait txId: 127 2025-06-24T20:28:50.875804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 127: send EvNotifyTxCompletion 2025-06-24T20:28:50.875840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 127 2025-06-24T20:28:50.876394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 125, at schemeshard: 72057594046678944 2025-06-24T20:28:50.876547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 125: got EvNotifyTxCompletionResult 2025-06-24T20:28:50.876586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 125: satisfy waiter [1:342:2331] 2025-06-24T20:28:50.876787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 126, at schemeshard: 72057594046678944 2025-06-24T20:28:50.876891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 126: got EvNotifyTxCompletionResult 2025-06-24T20:28:50.876917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 126: satisfy waiter [1:342:2331] 2025-06-24T20:28:50.877030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 127, at schemeshard: 72057594046678944 2025-06-24T20:28:50.877128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 127: got EvNotifyTxCompletionResult 2025-06-24T20:28:50.877156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 127: satisfy waiter [1:342:2331] TestWaitNotification: OK eventTxId 125 TestWaitNotification: OK eventTxId 126 TestWaitNotification: OK eventTxId 127 2025-06-24T20:28:50.877684Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:50.877892Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NilNoviSubLuna" took 224us result status StatusSuccess 2025-06-24T20:28:50.878275Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 128 2025-06-24T20:28:50.881675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "NilNoviSubLuna" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.882039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "NilNoviSubLuna" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } 2025-06-24T20:28:50.882144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 128:0, path# /MyRoot/NilNoviSubLuna 2025-06-24T20:28:50.882294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 128:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/NilNoviSubLuna', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-24T20:28:50.884713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 128, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/NilNoviSubLuna\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges)" TxId: 128 SchemeshardId: 72057594046678944 PathId: 3 PathCreateTxId: 125, at schemeshard: 72057594046678944 2025-06-24T20:28:50.884975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/NilNoviSubLuna', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), operation: CREATE EXTERNAL TABLE, path: /MyRoot/NilNoviSubLuna TestModificationResult got TxId: 128, wait until txId: 128 >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-false >> test.py::test[solomon-BasicExtractMembers-default.txt] [GOOD] >> test.py::test[solomon-Downsampling-default.txt] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-false >> ClosedIntervalSet::EnumInRangeReverse [GOOD] >> GivenIdRange::IssueNewRange [GOOD] >> GivenIdRange::Trim >> TReplicationTests::Create >> TReplicationTests::CreateSequential >> TExternalTableTest::DropTableTwice [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::SchemeErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:112:2142] 2025-06-24T20:28:50.392736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.392844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.392889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.392928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.392984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.393037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.393121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.393222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.394043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.394439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.535788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:28:50.535858Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.536610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.552946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.553809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.553973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.560386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.560616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.561235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.561467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.564024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.564207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.565376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.565434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.565525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.565575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.565616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.565835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.572028Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.713899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.714189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.714422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.714476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.714722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.714872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.725622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.725908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.726198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.726271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.726316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.726360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.737933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.738012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.738051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.746554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.746643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.746716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.746805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.751312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.757566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.757896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.759159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.759345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.759405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.759748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.759829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.760042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.760151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28: ... 025-06-24T20:28:50.915604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 126:0, path# /MyRoot/DirA/Table2 2025-06-24T20:28:50.915895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 126:1, propose status:StatusSchemeError, reason: Type 'BlaBlaType' specified for column 'RowId' is not supported by storage, at schemeshard: 72057594046678944 2025-06-24T20:28:50.917980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 126, response: Status: StatusSchemeError Reason: "Type \'BlaBlaType\' specified for column \'RowId\' is not supported by storage" TxId: 126 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.918230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 126, database: /MyRoot, subject: , status: StatusSchemeError, reason: Type 'BlaBlaType' specified for column 'RowId' is not supported by storage, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 126, wait until txId: 126 TestModificationResults wait txId: 127 2025-06-24T20:28:50.921498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "" Type: "Uint64" } } } TxId: 127 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.921865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 127:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "" Type: "Uint64" } } 2025-06-24T20:28:50.922013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 127:0, path# /MyRoot/DirA/Table2 2025-06-24T20:28:50.922189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 127:1, propose status:StatusSchemeError, reason: Columns cannot have an empty name, at schemeshard: 72057594046678944 2025-06-24T20:28:50.924521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 127, response: Status: StatusSchemeError Reason: "Columns cannot have an empty name" TxId: 127 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.924811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 127, database: /MyRoot, subject: , status: StatusSchemeError, reason: Columns cannot have an empty name, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 127, wait until txId: 127 TestModificationResults wait txId: 128 2025-06-24T20:28:50.928082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" TypeId: 27 } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.928397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" TypeId: 27 } } 2025-06-24T20:28:50.928483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 128:0, path# /MyRoot/DirA/Table2 2025-06-24T20:28:50.928608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 128:1, propose status:StatusSchemeError, reason: Cannot set TypeId for column 'RowId', use Type, at schemeshard: 72057594046678944 2025-06-24T20:28:50.930881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 128, response: Status: StatusSchemeError Reason: "Cannot set TypeId for column \'RowId\', use Type" TxId: 128 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.931136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusSchemeError, reason: Cannot set TypeId for column 'RowId', use Type, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 128, wait until txId: 128 TestModificationResults wait txId: 129 2025-06-24T20:28:50.934364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" } } } TxId: 129 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.934740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 129:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" } } 2025-06-24T20:28:50.934834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 129:0, path# /MyRoot/DirA/Table2 2025-06-24T20:28:50.934970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 129:1, propose status:StatusSchemeError, reason: Missing Type for column 'RowId', at schemeshard: 72057594046678944 2025-06-24T20:28:50.937429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 129, response: Status: StatusSchemeError Reason: "Missing Type for column \'RowId\'" TxId: 129 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.937672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 129, database: /MyRoot, subject: , status: StatusSchemeError, reason: Missing Type for column 'RowId', operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 129, wait until txId: 129 TestModificationResults wait txId: 130 2025-06-24T20:28:50.941068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" Id: 2 } Columns { Name: "RowId2" Type: "Uint64" Id: 2 } } } TxId: 130 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.941420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 130:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" Id: 2 } Columns { Name: "RowId2" Type: "Uint64" Id: 2 } } 2025-06-24T20:28:50.941505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 130:0, path# /MyRoot/DirA/Table2 2025-06-24T20:28:50.941699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 130:1, propose status:StatusSchemeError, reason: Duplicate column id: 2, at schemeshard: 72057594046678944 2025-06-24T20:28:50.944066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 130, response: Status: StatusSchemeError Reason: "Duplicate column id: 2" TxId: 130 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.944300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 130, database: /MyRoot, subject: , status: StatusSchemeError, reason: Duplicate column id: 2, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 130, wait until txId: 130 TestModificationResults wait txId: 131 2025-06-24T20:28:50.947838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource1" Location: "/" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } } } TxId: 131 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.948167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 131:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource1" Location: "/" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } } 2025-06-24T20:28:50.948239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 131:0, path# /MyRoot/DirA/Table2 2025-06-24T20:28:50.948365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 131:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/ExternalDataSource1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), at schemeshard: 72057594046678944 2025-06-24T20:28:50.950352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 131, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" TxId: 131 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.950560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 131, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/ExternalDataSource1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 131, wait until txId: 131 >> TExternalTableTest::CreateExternalTableShouldFailIfSuchEntityAlreadyExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:112:2142] 2025-06-24T20:28:50.819707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.819808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.819852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.819888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.819932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.819961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.820025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.820111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.820927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.821310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.936109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:28:50.936177Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.936866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.957003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.959091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.959294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.974958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.975234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.975874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.976104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.982823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.983038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.984192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.984249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.984319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.984363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.984398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.984640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.991204Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:51.138891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.139193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.139409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.139456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.139663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.139841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.141818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.142007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.142281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.142361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.142455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.142484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.145840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.145895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.145935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.147696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.147752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.147812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.147887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.151663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.153557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.153747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.154768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.154902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.154954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.155256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.155328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.155519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.155602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28: ... schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T20:28:51.237341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:28:51.237379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T20:28:51.237409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T20:28:51.237439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-24T20:28:51.238366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:28:51.238454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:28:51.238519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:28:51.238571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T20:28:51.238612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:28:51.239503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:28:51.239577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:28:51.239604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:28:51.239638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T20:28:51.239668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:28:51.239725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T20:28:51.245648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:28:51.246501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T20:28:51.246729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T20:28:51.246777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T20:28:51.247229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T20:28:51.247338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:28:51.247390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:306:2295] TestWaitNotification: OK eventTxId 101 2025-06-24T20:28:51.247882Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:51.248075Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 214us result status StatusSuccess 2025-06-24T20:28:51.248357Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-06-24T20:28:51.250832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.251070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } 2025-06-24T20:28:51.251123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-24T20:28:51.251173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusPreconditionFailed, reason: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-24T20:28:51.254461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusPreconditionFailed Reason: "Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.254719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, operation: CREATE EXTERNAL TABLE, path: /MyRoot/ExternalTable TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T20:28:51.255055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:28:51.255095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:28:51.255542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:28:51.255668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:28:51.255701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:314:2303] TestWaitNotification: OK eventTxId 102 2025-06-24T20:28:51.256162Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:51.256324Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 187us result status StatusPathDoesNotExist 2025-06-24T20:28:51.256480Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TExternalTableTest::Decimal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:112:2142] 2025-06-24T20:28:50.927118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.927230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.927261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.927291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.927322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.927368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.927419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.927484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.928070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.928347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:51.000521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:28:51.000588Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:51.001369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:51.024121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:51.027676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:51.027913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:51.037196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:51.037464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:51.038202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.038450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:51.041631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.041853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:51.043269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.043344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.043489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:51.043539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:51.043584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:51.043854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.051246Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:51.266832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.267058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.267305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.267361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.267580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.267714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.270468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.270680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.270930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.271000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.271045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.271085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.273760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.273834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.273878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.275731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.275792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.275850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.275924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.278550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.280533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.280760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.281875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.282009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.282087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.282350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.282411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.282635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.282743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28: ... # 102:0 ProgressState 2025-06-24T20:28:51.387422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:28:51.387477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:28:51.387523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:28:51.387562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:28:51.387602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T20:28:51.387659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:28:51.387704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T20:28:51.387750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T20:28:51.387834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T20:28:51.387877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T20:28:51.387916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 6 2025-06-24T20:28:51.387950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T20:28:51.389003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:28:51.389104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:28:51.389141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:28:51.389187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2025-06-24T20:28:51.389231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T20:28:51.390092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:28:51.390211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:28:51.390257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:28:51.390292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T20:28:51.390327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:28:51.390398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T20:28:51.394230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:28:51.394734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T20:28:51.394995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:28:51.395042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:28:51.395510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:28:51.395618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:28:51.395671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:331:2320] TestWaitNotification: OK eventTxId 102 2025-06-24T20:28:51.396189Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:51.396429Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 253us result status StatusSuccess 2025-06-24T20:28:51.396811Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-06-24T20:28:51.404056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "UniqueName" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.404418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 103:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "UniqueName" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } 2025-06-24T20:28:51.404541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_external_table.cpp:312: [72057594046678944] TAlterExternalTable Propose: opId# 103:0, path# /MyRoot/UniqueName, ReplaceIfExists: 1 2025-06-24T20:28:51.404683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalTable, at schemeshard: 72057594046678944 2025-06-24T20:28:51.408161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/UniqueName\', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalTable" TxId: 103 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-06-24T20:28:51.408405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalTable, operation: CREATE EXTERNAL TABLE, path: /MyRoot/UniqueName TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:28:51.408779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:28:51.408825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:28:51.409219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:28:51.409304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:28:51.409341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:339:2328] TestWaitNotification: OK eventTxId 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:112:2142] 2025-06-24T20:28:50.784346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.784451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.784487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.784522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.784571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.784615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.784685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.784777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.785522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.785887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.880361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:28:50.880405Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.881100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.901631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.902695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.902900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.910165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.910406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.911086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.911353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.914018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.914222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.915435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.915492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.915589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.915637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.915675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.915885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.924294Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:51.068601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.068864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.069082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.069130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.069343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.069496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.080138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.080339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.080586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.080647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.080685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.080724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.083574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.083655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.083700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.087318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.087375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.087432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.087489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.091092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.095758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.096069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.097174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.097332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.097386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.097744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.097816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.098014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.098119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28: ... ogressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.555593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 129 ready parts: 1/1 2025-06-24T20:28:51.555721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 129 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.556290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T20:28:51.556386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T20:28:51.556431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 129 2025-06-24T20:28:51.556478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 10 2025-06-24T20:28:51.556520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2025-06-24T20:28:51.557415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T20:28:51.557492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T20:28:51.557532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 129 2025-06-24T20:28:51.557564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-06-24T20:28:51.557590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-24T20:28:51.557654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 129, ready parts: 0/1, is published: true 2025-06-24T20:28:51.559922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 129:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:129 msg type: 269090816 2025-06-24T20:28:51.560099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 129, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:28:51.560568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 FAKE_COORDINATOR: Add transaction: 129 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 129 at step: 5000005 2025-06-24T20:28:51.562701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.562831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 129 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.562883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 129:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000005, at schemeshard: 72057594046678944 2025-06-24T20:28:51.563041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 129:0 128 -> 240 2025-06-24T20:28:51.563280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-24T20:28:51.563366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-24T20:28:51.563807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-24T20:28:51.565913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.565959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:51.566131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-24T20:28:51.566263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.566310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:488:2443], at schemeshard: 72057594046678944, txId: 129, path id: 1 2025-06-24T20:28:51.566348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:488:2443], at schemeshard: 72057594046678944, txId: 129, path id: 5 FAKE_COORDINATOR: Erasing txId 129 2025-06-24T20:28:51.566664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 129:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.566706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 129:0 ProgressState 2025-06-24T20:28:51.566797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#129:0 progress is 1/1 2025-06-24T20:28:51.566829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-24T20:28:51.566887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#129:0 progress is 1/1 2025-06-24T20:28:51.566918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-24T20:28:51.566953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 129, ready parts: 1/1, is published: false 2025-06-24T20:28:51.566995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-24T20:28:51.567028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 129:0 2025-06-24T20:28:51.567063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 129:0 2025-06-24T20:28:51.567135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-24T20:28:51.567202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 129, publications: 2, subscribers: 0 2025-06-24T20:28:51.567254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-24T20:28:51.567294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 5], 3 2025-06-24T20:28:51.568197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T20:28:51.568289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T20:28:51.568323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 129 2025-06-24T20:28:51.568363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-24T20:28:51.568413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2025-06-24T20:28:51.569262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 3 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T20:28:51.569347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 3 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T20:28:51.569387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 129 2025-06-24T20:28:51.569418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 3 2025-06-24T20:28:51.569447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-24T20:28:51.569509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 129, subscribers: 0 2025-06-24T20:28:51.574989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-24T20:28:51.575306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 TestModificationResult got TxId: 129, wait until txId: 129 >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::Drop >> TStorageTenantTest::CreateTableInsideSubDomain2 >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlter [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-false >> TStorageTenantTest::CreateSolomonInsideSubDomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ParallelCreateExternalTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:112:2142] 2025-06-24T20:28:50.856526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.856638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.856679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.856740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.856794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.856831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.856899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.856991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.857826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.858216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.947686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:28:50.947776Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.948603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.965027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.965915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.966080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.973104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.973402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.974108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.974377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.977265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.977461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.978717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.978783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.978875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.978921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.978966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.979242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.986359Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:51.161771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.162006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.162225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.162274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.162485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.162651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.165613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.165835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.166086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.166158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.166202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.166244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.169064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.169153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.169205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.176095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.176165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.176222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.176286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.186192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.192167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.192438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.193449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.193613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.193672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.193947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.194000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.194185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.194286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28: ... 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable1" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Content: "" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.370125Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable2" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:51.370363Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable2" took 246us result status StatusSuccess 2025-06-24T20:28:51.370795Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable2" PathDescription { Self { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable2" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key1" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false } Columns { Name: "key2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false } Content: "" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.372125Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:51.372326Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 204us result status StatusSuccess 2025-06-24T20:28:51.373558Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "ExternalTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 126 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.374327Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:51.374574Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable1" took 228us result status StatusSuccess 2025-06-24T20:28:51.374954Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable1" PathDescription { Self { Name: "ExternalTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 126 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable1" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Content: "" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.375637Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:51.375910Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable2" took 224us result status StatusSuccess 2025-06-24T20:28:51.376298Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable2" PathDescription { Self { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable2" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key1" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false } Columns { Name: "key2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false } Content: "" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableIfNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-24T20:28:50.915250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.915356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.915424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.915472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.915517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.915551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.915640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.915723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.916583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.916973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:51.022593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:28:51.022666Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:51.023495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:51.053975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:51.054389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:51.054591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:51.084969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:51.085423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:51.086258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.086587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:51.093908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.094150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:51.095351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.095420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.095504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:51.095549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:51.095591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:51.095811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.120381Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:51.298425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.298682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.298845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.298882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.299071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.299129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.304301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.304537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.304780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.304853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.304898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.304939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.307098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.307208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.307258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.312275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.312348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.312414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.312498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.315841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.320127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.320368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.321422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.321573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.321628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.321953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.322011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.322197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.322302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28: ... MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.512496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_external_table.cpp:58: [72057594046678944] TAlterExternalTable TPropose, operationId: 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-24T20:28:51.512683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 128 -> 240 2025-06-24T20:28:51.513015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:28:51.513137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:28:51.514825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T20:28:51.515835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 FAKE_COORDINATOR: Erasing txId 104 2025-06-24T20:28:51.518168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.518222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:51.518417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:28:51.518496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:28:51.518610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.518664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-24T20:28:51.518721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-24T20:28:51.518762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-24T20:28:51.519111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.519188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T20:28:51.519316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T20:28:51.519357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T20:28:51.519401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T20:28:51.519441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T20:28:51.519480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-24T20:28:51.519535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T20:28:51.519585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T20:28:51.519627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T20:28:51.519714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T20:28:51.519760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:28:51.519827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-24T20:28:51.519883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-24T20:28:51.519917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 4 2025-06-24T20:28:51.521413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T20:28:51.521515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T20:28:51.521612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-24T20:28:51.521666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-24T20:28:51.521719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T20:28:51.524130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T20:28:51.524248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T20:28:51.524285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T20:28:51.524331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-24T20:28:51.524376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:28:51.524475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-24T20:28:51.528078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T20:28:51.529322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T20:28:51.529602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T20:28:51.529662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T20:28:51.530188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T20:28:51.530304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T20:28:51.530347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:392:2381] TestWaitNotification: OK eventTxId 104 2025-06-24T20:28:51.531011Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:51.531312Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 310us result status StatusSuccess 2025-06-24T20:28:51.531755Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 3 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 3 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/other_location" Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased [GOOD] Test command err: 2025-06-24T20:28:19.968721Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:6:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.968757Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:720:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.968784Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:21:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.968808Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:794:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.968833Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:274:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.968855Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:229:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.968875Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:594:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.968901Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:774:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.968923Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:200:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.968945Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:716:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.969474Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:298:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.969496Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:322:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.969524Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:764:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.969545Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:667:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.969567Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:69:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.969590Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:584:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.969616Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:648:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.969637Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:837:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.969658Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:531:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.969676Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:99:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.970366Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:36:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.970389Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:847:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.970410Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:701:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.970428Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:482:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.970456Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:866:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.970474Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:103:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.970494Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:681:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.970515Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:895:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.970537Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:171:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.970555Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:609:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.972203Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:516:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.972227Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:244:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.972252Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:64:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.972271Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:337:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.972291Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:958:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.972311Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:614:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.972331Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:750:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.972352Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:380:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.972370Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:506:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.972386Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:65:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973021Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:541:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973042Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:108:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973062Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:546:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973079Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:113:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973100Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:861:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973118Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:613:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973134Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:31:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973149Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:891:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973170Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:832:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973189Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:419:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973756Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:983:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973774Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:997:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973792Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:579:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973810Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:725:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973830Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:438:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973852Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:672:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973870Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:502:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973885Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:779:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973903Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:74:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.973919Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:871:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.974548Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:857:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.974564Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:682:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.974586Z :BS_VDISK_PUT CR ... [5000:1:137:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.977688Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:404:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.977707Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:161:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.977724Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:45:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.977743Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:949:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.977761Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:133:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.977779Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:434:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.977813Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:652:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.977830Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:852:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.977847Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:929:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.978525Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:361:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.978543Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:239:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.978562Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:371:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.978583Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:954:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.978601Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:346:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.978618Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:162:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.978637Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:511:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.978658Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:176:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.978676Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:409:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.978708Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:963:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.979371Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:118:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.979390Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:370:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.979412Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:551:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.979435Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:395:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.979454Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:424:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.979471Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:225:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.979499Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:711:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.979521Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:890:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.979540Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:195:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.979559Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:98:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980082Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:580:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980099Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:30:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980115Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:842:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980133Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:205:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980154Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:784:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980171Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:463:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980193Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:939:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980210Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:696:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980235Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:147:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980254Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:448:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980740Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:439:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980773Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:341:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980818Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:234:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980837Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:920:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980855Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:618:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980873Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:157:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980896Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:905:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980914Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:308:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980930Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:823:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.980948Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:973:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.981503Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:278:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.981527Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:492:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.981544Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:16:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.981561Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:915:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.981578Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:191:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.981593Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:376:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.981605Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:472:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.981617Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:706:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.981629Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:924:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.981644Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:264:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.982122Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:585:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.982143Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:1:0:0:66560:1] Marker# BSVS08 2025-06-24T20:28:19.982157Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:623: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:210:0:0:66560:1] Marker# BSVS08 >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-true >> GivenIdRange::Trim [GOOD] >> GivenIdRange::Subtract ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::DropTableTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:112:2142] 2025-06-24T20:28:51.264399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:51.264486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:51.264525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:51.264559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:51.264606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:51.264648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:51.264714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:51.264794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:51.265540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:51.265876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:51.351100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:28:51.351184Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:51.351898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:51.368556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:51.369479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:51.369648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:51.376601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:51.376865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:51.377491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.377717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:51.380328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.380508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:51.381636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.381694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.381775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:51.381831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:51.381871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:51.382091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.388829Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:51.534012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.534244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.534451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.534515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.534741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.534912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.537408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.537605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.537849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.537913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.537959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.538002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.540276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.540341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.540384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.542520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.542592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.542653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.542711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.546475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.549220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.549456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.550448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.550596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.550643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.550894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.550947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.551117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.551232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28: ... D INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.769238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:51.769392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:28:51.769530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:28:51.769670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.769716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-24T20:28:51.769761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-24T20:28:51.769797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 103, path id: 2 FAKE_COORDINATOR: Erasing txId 103 2025-06-24T20:28:51.770140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.770188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T20:28:51.770301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:28:51.770335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:51.770371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:28:51.770403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:51.770462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-24T20:28:51.770517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:51.770576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:28:51.770608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:28:51.770693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:28:51.770740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:28:51.770781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 103, publications: 3, subscribers: 0 2025-06-24T20:28:51.770817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-24T20:28:51.770845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-24T20:28:51.770870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-24T20:28:51.771404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:51.771495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:51.771526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:28:51.771572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-24T20:28:51.771619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:28:51.772225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:28:51.772270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T20:28:51.772358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T20:28:51.772871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:51.772965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:51.772999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:28:51.773026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-24T20:28:51.773053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:28:51.774816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:51.774909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:51.774941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:28:51.774969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T20:28:51.775004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:28:51.775074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-24T20:28:51.777892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:28:51.777990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T20:28:51.779894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:28:51.779981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:28:51.780270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:28:51.780317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:28:51.780747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:28:51.780857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:28:51.780893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:370:2359] TestWaitNotification: OK eventTxId 103 2025-06-24T20:28:51.781416Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:51.781651Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 225us result status StatusPathDoesNotExist 2025-06-24T20:28:51.781833Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::CreateExternalTableShouldFailIfSuchEntityAlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:112:2142] 2025-06-24T20:28:50.464199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.464308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.464361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.464412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.464451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.464487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.464561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.464641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.465432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.465780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.565934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:28:50.566005Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.566747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.581260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.582173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.582359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.594735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.595073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.595793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.596023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.608308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.608572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.609943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.610026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.610116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.610163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.610232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.610469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.621958Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.889805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.889996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.890164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.890216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.890436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.890599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.896059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.896247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.896496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.896563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.896602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.896661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.898458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.898514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.898574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.900260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.900309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.900361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.900416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.904072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.905744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.905930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.906888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.907009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.907055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.907307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.907358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.907520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.907613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28: ... RD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T20:28:52.159957Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:28:52.160841Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:28:52.160934Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:28:52.160967Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:28:52.160995Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T20:28:52.161027Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:28:52.161106Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T20:28:52.162804Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:28:52.164688Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:28:52.164777Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T20:28:52.165025Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:28:52.165088Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:28:52.165550Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:28:52.165657Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:28:52.165698Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:337:2326] TestWaitNotification: OK eventTxId 102 2025-06-24T20:28:52.166235Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:52.166455Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 258us result status StatusSuccess 2025-06-24T20:28:52.166865Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-06-24T20:28:52.170451Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/new_location" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:52.170854Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 103:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/new_location" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } 2025-06-24T20:28:52.170948Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 103:0, path# /MyRoot/ExternalTable 2025-06-24T20:28:52.171090Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/ExternalTable', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-24T20:28:52.173889Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges)" TxId: 103 SchemeshardId: 72057594046678944 PathId: 3 PathCreateTxId: 102, at schemeshard: 72057594046678944 2025-06-24T20:28:52.174157Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/ExternalTable', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), operation: CREATE EXTERNAL TABLE, path: /MyRoot/ExternalTable TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:28:52.174559Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:28:52.174614Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:28:52.175060Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:28:52.175194Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:28:52.175238Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:345:2334] TestWaitNotification: OK eventTxId 103 2025-06-24T20:28:52.175758Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:52.175976Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 248us result status StatusSuccess 2025-06-24T20:28:52.176332Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> GivenIdRange::Subtract [GOOD] >> GivenIdRange::Points >> TReplicationTests::Create [GOOD] >> TReplicationTests::ConsistencyLevel ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::Decimal [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:112:2142] 2025-06-24T20:28:50.846234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.846320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.846356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.846388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.846435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.846475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.846568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.846648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.851562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.851932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.948739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:28:50.948799Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.949447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.964540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.965599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.965745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.977699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.977932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.978592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.978808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.985077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.985249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.986310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.986362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.986433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.986474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.986537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.986717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.999940Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:51.135167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.135405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.135610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.135656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.135890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.136022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.138756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.138968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.139255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.139319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.139360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.139403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.141537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.141594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.141649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.143591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.143636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.143687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.143751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.147390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.149044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.149248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.150187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.150306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.150358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.150645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.150703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.150865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.150960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28: ... at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:28:52.275496Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:28:52.275662Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:52.275699Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:210:2210], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T20:28:52.275741Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:210:2210], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-06-24T20:28:52.275771Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:210:2210], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-06-24T20:28:52.275808Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:210:2210], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T20:28:52.276087Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.276141Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T20:28:52.276267Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:28:52.276310Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:28:52.276356Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:28:52.276401Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:28:52.276447Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T20:28:52.276517Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:28:52.276566Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T20:28:52.276604Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T20:28:52.276688Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T20:28:52.276732Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:28:52.276776Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 3, subscribers: 0 2025-06-24T20:28:52.276813Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T20:28:52.276848Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-24T20:28:52.276876Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T20:28:52.278016Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:28:52.278127Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:28:52.278177Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:28:52.278227Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T20:28:52.278275Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T20:28:52.279840Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:28:52.279967Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:28:52.280025Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:28:52.280067Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T20:28:52.280105Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:28:52.281101Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:28:52.281185Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:28:52.281219Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:28:52.281252Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T20:28:52.281302Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:28:52.281392Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T20:28:52.283310Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:28:52.285172Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:28:52.285273Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T20:28:52.285508Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T20:28:52.285577Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T20:28:52.286048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T20:28:52.286167Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:28:52.286215Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:337:2326] TestWaitNotification: OK eventTxId 101 2025-06-24T20:28:52.286764Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:52.287040Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 321us result status StatusSuccess 2025-06-24T20:28:52.287525Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Decimal(35,9)" TypeId: 4865 Id: 1 NotNull: false TypeInfo { DecimalPrecision: 35 DecimalScale: 9 } } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive >> TReplicationTests::CreateSequential [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased [GOOD] >> TReplicationTests::CreateInParallel >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-true >> GivenIdRange::Points [GOOD] >> GivenIdRange::Runs >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-false >> GivenIdRange::Runs [GOOD] >> GivenIdRange::Allocate >> TSchemeShardExtSubDomainTest::CreateAndAlter-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst |87.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_external_table/test-results/unittest/{meta.json ... results_accumulator.log} >> GivenIdRange::Allocate [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-ExternalHive >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::Drop [GOOD] >> TSchemeShardExtSubDomainTest::Drop-ExternalHive >> TStorageTenantTest::LsLs |87.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-ExternalHive |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |87.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table/test-results/unittest/{meta.json ... results_accumulator.log} |87.8%| [LD] {RESULT} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-false >> TReplicationTests::ConsistencyLevel [GOOD] >> TReplicationTests::Alter >> TStorageTenantTest::Boot |87.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased [GOOD] Test command err: 2025-06-24T20:28:18.901999Z :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2989} PDiskId# 1 ownerId# 5 invalid OwnerRound, got# 101 expected# 151 error in TLogWrite for ownerId# 5 ownerRound# 101 lsn# 16 PDiskId# 1 >> TReplicationTests::CreateInParallel [GOOD] >> TReplicationTests::CreateDropRecreate >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-false >> TTransferTests::Create >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore >> TTransferTests::Create_Disabled >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |87.8%| [LD] {RESULT} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration >> TStorageTenantTest::DeclareAndDefine >> TStorageTenantTest::CreateTableInsideSubDomain >> TStorageTenantTest::GenericCases |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blob_depot/ut/unittest >> GivenIdRange::Allocate [GOOD] |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace >> Mirror3of4::ReplicationHuge [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |87.8%| [TM] {RESULT} ydb/core/blob_depot/ut/unittest |87.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |87.8%| [TA] $(B)/ydb/core/blobstorage/ut_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::Drop-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-true >> TReplicationTests::Alter [GOOD] >> TReplicationTests::CannotAddReplicationConfig >> BlobDepot::DecommitVerifiedRandom [GOOD] |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup >> TReplicationTests::CreateDropRecreate [GOOD] |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup >> TReplicationTests::CreateWithoutCredentials |87.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} |87.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst-ExternalHive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_mirror3of4/unittest >> Mirror3of4::ReplicationHuge [GOOD] Test command err: 2025-06-24T20:27:47.255992Z 1 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:0:0]: (0) SKELETON START Marker# BSVS37 2025-06-24T20:27:47.256312Z 2 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:1:0]: (0) SKELETON START Marker# BSVS37 2025-06-24T20:27:47.256496Z 3 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:2:0]: (0) SKELETON START Marker# BSVS37 2025-06-24T20:27:47.256676Z 4 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:3:0]: (0) SKELETON START Marker# BSVS37 2025-06-24T20:27:47.256844Z 5 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:4:0]: (0) SKELETON START Marker# BSVS37 2025-06-24T20:27:47.257035Z 6 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:5:0]: (0) SKELETON START Marker# BSVS37 2025-06-24T20:27:47.257169Z 7 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:6:0]: (0) SKELETON START Marker# BSVS37 2025-06-24T20:27:47.257334Z 8 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:7:0]: (0) SKELETON START Marker# BSVS37 2025-06-24T20:27:47.257778Z 1 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery START 2025-06-24T20:27:47.257863Z 1 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Sending TEvYardInit: pdiskGuid# 17962622651541064853 skeletonid# [1:139:13] selfid# [1:155:22] delay 0.000000 sec 2025-06-24T20:27:47.257935Z 2 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:1:0]: (0) LocalRecovery START 2025-06-24T20:27:47.257989Z 2 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) Sending TEvYardInit: pdiskGuid# 3012415997019635033 skeletonid# [2:140:11] selfid# [2:156:12] delay 0.000000 sec 2025-06-24T20:27:47.258028Z 3 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:2:0]: (0) LocalRecovery START 2025-06-24T20:27:47.258070Z 3 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) Sending TEvYardInit: pdiskGuid# 3685541132163410450 skeletonid# [3:141:11] selfid# [3:157:12] delay 0.000000 sec 2025-06-24T20:27:47.258130Z 4 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:3:0]: (0) LocalRecovery START 2025-06-24T20:27:47.258167Z 4 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) Sending TEvYardInit: pdiskGuid# 15059534241133457905 skeletonid# [4:142:11] selfid# [4:158:12] delay 0.000000 sec 2025-06-24T20:27:47.258201Z 5 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:4:0]: (0) LocalRecovery START 2025-06-24T20:27:47.258235Z 5 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) Sending TEvYardInit: pdiskGuid# 1254591372743085457 skeletonid# [5:143:11] selfid# [5:159:12] delay 0.000000 sec 2025-06-24T20:27:47.258269Z 6 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:5:0]: (0) LocalRecovery START 2025-06-24T20:27:47.258302Z 6 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) Sending TEvYardInit: pdiskGuid# 3428540879204156296 skeletonid# [6:144:11] selfid# [6:160:12] delay 0.000000 sec 2025-06-24T20:27:47.258335Z 7 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:6:0]: (0) LocalRecovery START 2025-06-24T20:27:47.258398Z 7 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) Sending TEvYardInit: pdiskGuid# 10683358342047120937 skeletonid# [7:145:11] selfid# [7:161:12] delay 0.000000 sec 2025-06-24T20:27:47.258433Z 8 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:7:0]: (0) LocalRecovery START 2025-06-24T20:27:47.258467Z 8 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) Sending TEvYardInit: pdiskGuid# 2379803874306131016 skeletonid# [8:146:11] selfid# [8:162:12] delay 0.000000 sec 2025-06-24T20:27:47.258851Z 1 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[1:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:0:0] PDiskGuid# 17962622651541064853 CutLogID# [1:139:13] WhiteboardProxyId# [1:122:10] SlotId# 0 GroupSizeInUnits# 0} 2025-06-24T20:27:47.259798Z 1 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[1:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 OwnerWeight# 1 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-24T20:27:47.259925Z 2 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[2:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:1:0] PDiskGuid# 3012415997019635033 CutLogID# [2:140:11] WhiteboardProxyId# [2:124:10] SlotId# 0 GroupSizeInUnits# 0} 2025-06-24T20:27:47.259985Z 2 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[2:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 OwnerWeight# 1 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-24T20:27:47.260046Z 3 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[3:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:2:0] PDiskGuid# 3685541132163410450 CutLogID# [3:141:11] WhiteboardProxyId# [3:126:10] SlotId# 0 GroupSizeInUnits# 0} 2025-06-24T20:27:47.260110Z 3 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[3:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 OwnerWeight# 1 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-24T20:27:47.260168Z 4 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[4:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:3:0] PDiskGuid# 15059534241133457905 CutLogID# [4:142:11] WhiteboardProxyId# [4:128:10] SlotId# 0 GroupSizeInUnits# 0} 2025-06-24T20:27:47.260233Z 4 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[4:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 OwnerWeight# 1 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-24T20:27:47.260287Z 5 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[5:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:4:0] PDiskGuid# 1254591372743085457 CutLogID# [5:143:11] WhiteboardProxyId# [5:130:10] SlotId# 0 GroupSizeInUnits# 0} 2025-06-24T20:27:47.260343Z 5 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[5:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 OwnerWeight# 1 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-24T20:27:47.260392Z 6 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[6:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:5:0] PDiskGuid# 3428540879204156296 CutLogID# [6:144:11] WhiteboardProxyId# [6:132:10] SlotId# 0 GroupSizeInUnits# 0} 2025-06-24T20:27:47.260439Z 6 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[6:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 OwnerWeight# 1 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-24T20:27:47.260500Z 7 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[7:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:6:0] PDiskGuid# 10683358342047120937 CutLogID# [7:145:11] WhiteboardProxyId# [7:134:10] SlotId# 0 GroupSizeInUnits# 0} 2025-06-24T20:27:47.260563Z 7 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[7:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 OwnerWeight# 1 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-24T20:27:47.260612Z 8 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[8:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:7:0] PDiskGuid# 2379803874306131016 CutLogID# [8:146:11] WhiteboardProxyId# [8:136:10] SlotId# 0 GroupSizeInUnits# 0} 2025-06-24T20:27:47.260665Z 8 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[8:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 OwnerWeight# 1 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-24T20:27:47.262178Z 1 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:0:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-06-24T20:27:47.263345Z 2 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:1:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-06-24T20:27:47.264340Z 3 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:2:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-06-24T20:27:47.265214Z 4 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:3:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [E ... PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 25 Cookie# 0}} Recipient# [7:345:29] 2025-06-24T20:28:55.166134Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:609} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 583 Lsn# 25 LsnSegmentStart# 25 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2025-06-24T20:28:55.166174Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 25 Cookie# 0}} Recipient# [8:355:29] 2025-06-24T20:28:55.169203Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:609} PDiskMock[7:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 584 Lsn# 26 LsnSegmentStart# 26 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:6:0] 2025-06-24T20:28:55.169270Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 26 Cookie# 0}} Recipient# [7:345:29] 2025-06-24T20:28:55.169359Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:609} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 583 Lsn# 26 LsnSegmentStart# 26 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2025-06-24T20:28:55.169397Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 26 Cookie# 0}} Recipient# [8:355:29] 2025-06-24T20:28:55.169653Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-24T20:28:55.169930Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:609} PDiskMock[7:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 584 Lsn# 27 LsnSegmentStart# 27 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:6:0] 2025-06-24T20:28:55.169970Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 27 Cookie# 0}} Recipient# [7:345:29] 2025-06-24T20:28:55.170020Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) GLUEREAD(0x51100052ef80): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335319806528} 2025-06-24T20:28:55.170080Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:609} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 583 Lsn# 27 LsnSegmentStart# 27 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2025-06-24T20:28:55.170117Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 27 Cookie# 0}} Recipient# [8:355:29] 2025-06-24T20:28:55.170230Z 2 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:761} PDiskMock[2:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335319806528} VDiskId# [0:4294967295:0:1:0] 2025-06-24T20:28:55.171218Z 2 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:801} PDiskMock[2:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 89335319806528 StatusFlags# None} 2025-06-24T20:28:55.171410Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) GLUEREAD FINISHED(0x51100052ef80): actualReadN# 1 origReadN# 1 2025-06-24T20:28:55.171805Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:2] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 1369701526376808448} BlockedGeneration# 0} 2025-06-24T20:28:55.175558Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-24T20:28:55.176677Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) GLUEREAD(0x511000538bc0): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335319967808} 2025-06-24T20:28:55.177202Z 3 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:761} PDiskMock[3:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335319967808} VDiskId# [0:4294967295:0:2:0] 2025-06-24T20:28:55.178332Z 3 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:801} PDiskMock[3:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 89335319967808 StatusFlags# None} 2025-06-24T20:28:55.178511Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) GLUEREAD FINISHED(0x511000538bc0): actualReadN# 1 origReadN# 1 2025-06-24T20:28:55.178659Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:1] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 2522623030983655424} BlockedGeneration# 0} 2025-06-24T20:28:55.181469Z 4 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-24T20:28:55.181797Z 4 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 793240774073384960} BlockedGeneration# 0} 2025-06-24T20:28:55.182748Z 5 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-24T20:28:55.182999Z 5 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 793240774073384960} BlockedGeneration# 0} 2025-06-24T20:28:55.183881Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-24T20:28:55.184154Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) GLUEREAD(0x5110005bd3c0): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335319821888} 2025-06-24T20:28:55.184256Z 6 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:761} PDiskMock[6:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335319821888} VDiskId# [0:4294967295:0:5:0] 2025-06-24T20:28:55.185431Z 6 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:801} PDiskMock[6:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 89335319821888 StatusFlags# None} 2025-06-24T20:28:55.185520Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) GLUEREAD FINISHED(0x5110005bd3c0): actualReadN# 1 origReadN# 1 2025-06-24T20:28:55.185678Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:2] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 1946162278680231936} {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 1946162278680231936} BlockedGeneration# 0} 2025-06-24T20:28:55.194325Z 7 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-24T20:28:55.194706Z 7 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:0] NODATA Ingress# 216780021769961472} BlockedGeneration# 0} 2025-06-24T20:28:55.195806Z 8 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-24T20:28:55.196090Z 8 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:0] NODATA Ingress# 216780021769961472} BlockedGeneration# 0} |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true >> TTransferTests::Create_Disabled [GOOD] >> TTransferTests::CreateWithoutCredentials |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TTransferTests::Create [GOOD] >> TTransferTests::CreateSequential >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool ------- [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/unittest >> BlobDepot::DecommitVerifiedRandom [GOOD] Test command err: Mersenne random seed 817558118 RandomSeed# 5145173874831068798 Mersenne random seed 612049996 Mersenne random seed 2395594773 Mersenne random seed 4248497498 Mersenne random seed 1862657749 2025-06-24T20:28:12.386437Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.386618Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.386681Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.386740Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.386796Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.386852Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:2] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.386921Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.386978Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.395427Z 1 00h00m25.012048s :BS_PROXY_PUT ERROR: [bbc5f8e4ec8bb573] Result# TEvPutResult {Id# [15:1:1:0:1:100:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000000:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 2025-06-24T20:28:12.396882Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:2] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.397087Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.397146Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.397201Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.397256Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.397327Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.397382Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.397438Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.457426Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.457670Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.457730Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.457802Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:2] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.457858Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.457913Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.457969Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.458025Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:28:12.458299Z 1 00h00m25.012048s :BS_PROXY_PUT ERROR: [8cecb5d29727fe1d] Result# TEvPutResult {Id# [16:2:2:0:2:100:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000000:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 Mersenne random seed 3996460083 Read over the barrier, blob id# [15:1:1:0:1:100:0] Read over the barrier, blob id# [15:1:2:0:1:100:0] 2025-06-24T20:28:15.860992Z 1 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-24T20:28:15.861372Z 2 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-24T20:28:15.861459Z 3 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-24T20:28:15.861532Z 4 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-24T20:28:15.861606Z 5 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-24T20:28:15.861680Z 6 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-24T20:28:15.861775Z 7 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-24T20:28:15.861870Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 Put over the barrier, blob id# [15:1:1:0:99:100:0] Put over the barrier, blob id# [15:1:3:0:99:100:0] 2025-06-24T20:28:15.969777Z 1 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-24T20:28:15.970131Z 2 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-24T20:28:15.970244Z 3 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-24T20:28:15.970348Z 4 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-24T20:28:15.970442Z 5 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-24T20:28:15.970532Z 6 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-24T20:28:15.970616Z 7 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-24T20:28:15.970709Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 Read over the barrier, blob id# [15:1:5:0:1:100:0] Read over the barrier, blob id# [15:1:6:0:1:100:0] Read over the barrier, blob id# [15:1:19:0:1:100:0] Read over the barrier, blob id# [15:2:1:0:1:100:0] Read over the barrier, blob id# [15:2:2:0:1:100:0] TEvRange returned collected blob with id# [15:1:17:0:1:100:0] TEvRange returned collected blob with id# [15:1:19:0:1:100:0] TEvRange returned collected blob with id# [15:2:1:0:1:100:0] TEvRange returned collected blob with id# [15:2:2:0:1:100:0] TEvRange returned collected blob with id# [15:2:3:0:1:100:0] TEvRange returned collected blob with id# [15:2:4:0:1:100:0] TEvRange returned collected blob with id# [15:2:5:0:1:100:0] TEvRange returned collected blob with id# [15:2:6:0:1:100:0] Read over the barrier, blob id# [100:1:3:0:1:100:0] Read over the barrier, blob id# [100:1:5:0:1:100:0] Read over the barrier, blob id# [100:1:6:0:1:100:0] Read over the barrier, blob id# [100:2:1:0:1:100:0] Read over the barrier, blob id# [100:2:2:0:1:100:0] TEvRange returned collected blob with id# [100:2:2:0:1:100:0] TEvRange returned collected blob with id# [100:2:3:0:1:100:0] TEvRange returned collected blob with id# [100:2:4:0:1:100:0] TEvRange returned collected blob with id# [100:2:5:0:1:100:0] TEvRange returned collected blob with id# [100:2:6:0:1:100:0] Mersenne random seed 722566741 TEvRange returned collected blob with id# [101:1:1:2:15639855:118:0] TEvRange returned collected blob with id# [101:1:1:2:15639855:118:0] TEvRange returned collected blob with id# [101:1:1:2:15639855:118:0] Read over the barrier, blob id# [101:1:1:2:15639855:118:0] Read over the bar ... :12732348:530:0] TEvRange returned collected blob with id# [15:2:3:2:13420720:453:0] Read over the barrier, blob id# [17:2:2:2:209812:336:0] Read over the barrier, blob id# [17:2:2:2:9211028:112:0] 2025-06-24T20:28:53.335347Z 2 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [15 2 25 5 soft] new key# [15 2 25 4 soft] new barrier# 4:1 2025-06-24T20:28:53.337066Z 1 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [15 2 25 5 soft] new key# [15 2 25 4 soft] new barrier# 4:1 2025-06-24T20:28:53.337351Z 3 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [15 2 25 5 soft] new key# [15 2 25 4 soft] new barrier# 4:1 2025-06-24T20:28:53.337539Z 4 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [15 2 25 5 soft] new key# [15 2 25 4 soft] new barrier# 4:1 2025-06-24T20:28:53.337714Z 5 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [15 2 25 5 soft] new key# [15 2 25 4 soft] new barrier# 4:1 2025-06-24T20:28:53.350495Z 6 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [15 2 25 5 soft] new key# [15 2 25 4 soft] new barrier# 4:1 2025-06-24T20:28:53.350874Z 7 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [15 2 25 5 soft] new key# [15 2 25 4 soft] new barrier# 4:1 2025-06-24T20:28:53.351053Z 8 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [15 2 25 5 soft] new key# [15 2 25 4 soft] new barrier# 4:1 Read over the barrier, blob id# [15:2:3:1:616642:797:0] Read over the barrier, blob id# [15:4:10:0:12348641:978:0] Read over the barrier, blob id# [15:2:3:1:616642:797:0] Read over the barrier, blob id# [15:2:6:1:13970261:531:0] Read over the barrier, blob id# [16:1:2:1:15025604:147:0] Read over the barrier, blob id# [15:2:6:1:13970261:531:0] Read over the barrier, blob id# [15:2:3:1:616642:797:0] Read over the barrier, blob id# [15:2:3:0:12605821:515:0] Read over the barrier, blob id# [16:1:2:1:15025604:147:0] Read over the barrier, blob id# [17:4:5:0:895401:821:0] Read over the barrier, blob id# [15:2:3:1:616642:797:0] Read over the barrier, blob id# [15:2:6:1:6697616:957:0] Read over the barrier, blob id# [15:2:6:1:13970261:531:0] Read over the barrier, blob id# [15:2:6:1:13970261:531:0] Read over the barrier, blob id# [15:2:6:1:6697616:957:0] Read over the barrier, blob id# [15:1:1:2:12732348:530:0] Read over the barrier, blob id# [15:2:6:0:2512877:809:0] Read over the barrier, blob id# [15:4:10:0:12348641:978:0] Read over the barrier, blob id# [15:1:1:2:8443799:244:0] Read over the barrier, blob id# [15:1:1:2:12732348:530:0] Read over the barrier, blob id# [15:2:3:2:13420720:453:0] 2025-06-24T20:28:54.064483Z 1 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 19 3 hard] new key# [16 2 19 0 hard] new barrier# 4:0 2025-06-24T20:28:54.065503Z 2 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 19 3 hard] new key# [16 2 19 0 hard] new barrier# 4:0 2025-06-24T20:28:54.065689Z 3 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 19 3 hard] new key# [16 2 19 0 hard] new barrier# 4:0 2025-06-24T20:28:54.065843Z 4 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 19 3 hard] new key# [16 2 19 0 hard] new barrier# 4:0 2025-06-24T20:28:54.065998Z 5 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 19 3 hard] new key# [16 2 19 0 hard] new barrier# 4:0 2025-06-24T20:28:54.066150Z 6 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 19 3 hard] new key# [16 2 19 0 hard] new barrier# 4:0 2025-06-24T20:28:54.066312Z 7 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 19 3 hard] new key# [16 2 19 0 hard] new barrier# 4:0 2025-06-24T20:28:54.066461Z 8 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 19 3 hard] new key# [16 2 19 0 hard] new barrier# 4:0 TEvRange returned collected blob with id# [15:2:3:1:616642:797:0] TEvRange returned collected blob with id# [15:2:6:1:6697616:957:0] TEvRange returned collected blob with id# [15:2:6:1:13970261:531:0] Read over the barrier, blob id# [15:2:3:1:616642:797:0] Read over the barrier, blob id# [15:1:1:2:12732348:530:0] Read over the barrier, blob id# [15:4:10:0:12348641:978:0] Read over the barrier, blob id# [15:2:3:2:13420720:453:0] Read over the barrier, blob id# [15:1:1:2:8443799:244:0] Read over the barrier, blob id# [15:4:10:0:12348641:978:0] Read over the barrier, blob id# [15:1:1:2:12732348:530:0] Read over the barrier, blob id# [15:1:1:2:12732348:530:0] Read over the barrier, blob id# [15:1:1:2:8443799:244:0] Read over the barrier, blob id# [15:2:6:1:6697616:957:0] 2025-06-24T20:28:54.294366Z 1 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 25 5 hard] barrier# 4:2 new key# [15 0 34 0 hard] barrier# 4:1 2025-06-24T20:28:54.295718Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 25 5 hard] barrier# 4:2 new key# [15 0 34 0 hard] barrier# 4:1 2025-06-24T20:28:54.295911Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 25 5 hard] barrier# 4:2 new key# [15 0 34 0 hard] barrier# 4:1 2025-06-24T20:28:54.296099Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 25 5 hard] barrier# 4:2 new key# [15 0 34 0 hard] barrier# 4:1 2025-06-24T20:28:54.296286Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 25 5 hard] barrier# 4:2 new key# [15 0 34 0 hard] barrier# 4:1 2025-06-24T20:28:54.296493Z 6 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 25 5 hard] barrier# 4:2 new key# [15 0 34 0 hard] barrier# 4:1 2025-06-24T20:28:54.296669Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 25 5 hard] barrier# 4:2 new key# [15 0 34 0 hard] barrier# 4:1 2025-06-24T20:28:54.296829Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 25 5 hard] barrier# 4:2 new key# [15 0 34 0 hard] barrier# 4:1 TEvRange returned collected blob with id# [15:2:3:1:616642:797:0] TEvRange returned collected blob with id# [15:2:6:1:6697616:957:0] TEvRange returned collected blob with id# [15:2:6:1:13970261:531:0] TEvRange returned collected blob with id# [15:1:1:2:8443799:244:0] TEvRange returned collected blob with id# [15:1:1:2:12732348:530:0] TEvRange returned collected blob with id# [15:2:3:2:13420720:453:0] Read over the barrier, blob id# [16:2:4:2:1201355:811:0] Read over the barrier, blob id# [16:2:4:2:3018133:420:0] Read over the barrier, blob id# [16:2:4:2:1201355:811:0] Read over the barrier, blob id# [17:3:2:2:14295292:878:0] Read over the barrier, blob id# [16:1:2:1:15025604:147:0] Read over the barrier, blob id# [16:2:4:0:15027508:344:0] Read over the barrier, blob id# [16:2:4:2:1201355:811:0] Read over the barrier, blob id# [17:3:3:2:15667175:501:0] Read over the barrier, blob id# [16:2:4:2:1201355:811:0] Read over the barrier, blob id# [16:2:4:0:15027508:344:0] Read over the barrier, blob id# [15:1:1:2:8443799:244:0] Read over the barrier, blob id# [15:2:6:0:2512877:809:0] Read over the barrier, blob id# [15:2:6:0:2512877:809:0] TEvRange returned collected blob with id# [15:1:1:2:8443799:244:0] TEvRange returned collected blob with id# [15:1:1:2:12732348:530:0] TEvRange returned collected blob with id# [15:2:3:2:13420720:453:0] Read over the barrier, blob id# [15:2:6:0:2512877:809:0] Read over the barrier, blob id# [15:2:3:1:616642:797:0] Read over the barrier, blob id# [15:4:10:0:12348641:978:0] Read over the barrier, blob id# [15:2:6:1:13970261:531:0] Read over the barrier, blob id# [15:1:1:2:12732348:530:0] Read over the barrier, blob id# [16:2:4:1:15878092:802:0] Read over the barrier, blob id# [15:1:1:2:8443799:244:0] Read over the barrier, blob id# [15:4:10:0:12348641:978:0] Read over the barrier, blob id# [16:2:4:0:7863003:685:0] 2025-06-24T20:28:55.216992Z 6 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 2 26 0 soft] barrier# 1:0 new key# [17 2 37 0 soft] barrier# 0:1 2025-06-24T20:28:55.217494Z 1 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 2 26 0 soft] barrier# 1:0 new key# [17 2 37 0 soft] barrier# 0:1 2025-06-24T20:28:55.217739Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 2 26 0 soft] barrier# 1:0 new key# [17 2 37 0 soft] barrier# 0:1 2025-06-24T20:28:55.217899Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 2 26 0 soft] barrier# 1:0 new key# [17 2 37 0 soft] barrier# 0:1 2025-06-24T20:28:55.218054Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 2 26 0 soft] barrier# 1:0 new key# [17 2 37 0 soft] barrier# 0:1 2025-06-24T20:28:55.218213Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 2 26 0 soft] barrier# 1:0 new key# [17 2 37 0 soft] barrier# 0:1 2025-06-24T20:28:55.218433Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 2 26 0 soft] barrier# 1:0 new key# [17 2 37 0 soft] barrier# 0:1 2025-06-24T20:28:55.218604Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 2 26 0 soft] barrier# 1:0 new key# [17 2 37 0 soft] barrier# 0:1 Read over the barrier, blob id# [15:5:11:1:7716766:273:0] >> TReplicationTests::CreateWithoutCredentials [GOOD] >> TReplicationTests::Describe >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive >> TReplicationTests::CannotAddReplicationConfig [GOOD] >> TReplicationTests::CannotSetAsyncReplicaAttribute >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-false |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TTransferTests::CreateWithoutCredentials [GOOD] >> TTransferTests::CreateWrongConfig >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> test.py::test[solomon-Downsampling-default.txt] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:50.447760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.447850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.447899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.447937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.447996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.448050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.448125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.448212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.449061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.449409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.576396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:50.576458Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.590228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.594742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.594958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.616984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.617239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.617973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.618256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.629417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.629612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.630920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.630982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.631107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.631186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.631236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.631399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.653605Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.892712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.892944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.893200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.893251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.893465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.893550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.896014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.896209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.896439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.896490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.896549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.896585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.900335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.900420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.900461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.904777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.904836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.904895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.904944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.908535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.913281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.913553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.914604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.914767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.914828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.915202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.915267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.915436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.915516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:50.918002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.918054Z node 1 :FLAT_TX_SCHEMESHARD ... n_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:57.082472Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:57.084770Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:57.084842Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:57.084908Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:57.086997Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:57.087059Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:57.087120Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:57.087214Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:57.087422Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:57.089295Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:57.089510Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:57.090556Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:57.090727Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 30064773230 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:57.090798Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:57.091100Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:57.091193Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:57.091417Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:57.091515Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:57.094326Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:57.094394Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:57.094661Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:57.094723Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T20:28:57.095163Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:57.095232Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T20:28:57.095375Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:28:57.095423Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:28:57.095478Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:28:57.095521Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:28:57.095575Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T20:28:57.095630Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:28:57.095698Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T20:28:57.095744Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T20:28:57.095839Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:28:57.095896Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T20:28:57.095942Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T20:28:57.096619Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T20:28:57.096755Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T20:28:57.096814Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T20:28:57.096877Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T20:28:57.096934Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:57.097052Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T20:28:57.100368Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T20:28:57.100960Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T20:28:57.101632Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [7:271:2260] Bootstrap 2025-06-24T20:28:57.128860Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [7:271:2260] Become StateWork (SchemeCache [7:276:2265]) 2025-06-24T20:28:57.132316Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_1" ExternalSchemeShard: true } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:57.132541Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 101:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_1" ExternalSchemeShard: true } 2025-06-24T20:28:57.132606Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 101:0, path /MyRoot/USER_1 2025-06-24T20:28:57.132785Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), at schemeshard: 72057594046678944 2025-06-24T20:28:57.132848Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusPathDoesNotExist, reason: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), at schemeshard: 72057594046678944 2025-06-24T20:28:57.133852Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [7:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:28:57.136976Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusPathDoesNotExist Reason: "Invalid AlterExtSubDomain request: Check failed: path: \'/MyRoot/USER_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:57.137272Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), operation: ALTER DATABASE, path: /MyRoot/USER_1 2025-06-24T20:28:57.137977Z node 7 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> test.py::test[solomon-DownsamplingValidSettings-default.txt] >> TReplicationTests::Describe [GOOD] >> TReplicationTests::CreateReplicatedTable >> TTransferTests::CreateSequential [GOOD] >> TTransferTests::CreateInParallel >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-true >> TReplicationTests::CannotSetAsyncReplicaAttribute [GOOD] >> TReplicationTests::AlterReplicatedTable >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:50.439677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.439777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.439819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.439878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.439927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.439956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.440027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.440107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.440932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.441296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.543687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:50.543734Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.561609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.566466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.566664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.574721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.574963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.575715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.576013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.579031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.579242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.580328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.580398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.580525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.580596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.580654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.580799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.588160Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.740677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.740934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.741211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.741279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.741540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.741628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.748352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.748613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.748894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.748968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.749024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.749084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.756069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.756146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.756203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.764364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.764470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.764542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.764628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.775708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.784077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.784353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.785592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.785761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.785846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.786205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.786269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.786459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.786580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:50.792298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.792366Z node 1 :FLAT_TX_SCHEMESHARD ... 678944, LocalPathId: 2], Generation: 2, ActorId:[7:400:2368], EffectiveACLVersion: 0, SubdomainVersion: 4, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 4, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:28:57.437801Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186234409546 2025-06-24T20:28:57.437837Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186234409546, txId: 0, path id: [OwnerId: 72075186234409546, LocalPathId: 1] 2025-06-24T20:28:57.437977Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409546 2025-06-24T20:28:57.438027Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:496:2436], at schemeshard: 72075186234409546, txId: 0, path id: 1 2025-06-24T20:28:57.439515Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186234409546, msg: Owner: 72075186234409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409546, cookie: 0 2025-06-24T20:28:57.439827Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:28:57.439880Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T20:28:57.440040Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:28:57.440093Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:57.440145Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:28:57.440191Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:57.440244Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-24T20:28:57.440308Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:57.440354Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:28:57.440397Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:28:57.440490Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T20:28:57.442449Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:28:57.442555Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:28:57.443963Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:28:57.444024Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:28:57.444533Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:28:57.444659Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:28:57.444723Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:573:2511] TestWaitNotification: OK eventTxId 103 2025-06-24T20:28:57.445318Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:57.445536Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 252us result status StatusSuccess 2025-06-24T20:28:57.446018Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:57.446598Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:57.446796Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 243us result status StatusSuccess 2025-06-24T20:28:57.447250Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:57.447862Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186234409546 2025-06-24T20:28:57.448066Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186234409546 describe path "/MyRoot/USER_0" took 227us result status StatusSuccess 2025-06-24T20:28:57.448509Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186234409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186234409546, at schemeshard: 72075186234409546 >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:50.236974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.237062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.237107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.237143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.237200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.237237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.237303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.237381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.238132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.238455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.323636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:50.323687Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.337181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.341681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.341866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.350101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.350365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.351085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.351442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.354078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.354256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.355393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.355453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.355605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.355667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.355709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.355841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.363416Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.494142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.494400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.494698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.494745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.494966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.495047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.497490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.497693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.497995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.498062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.498109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.498143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.500172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.500242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.500290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.501707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.501769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.501808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.501857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.504450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.506021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.506200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.506963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.507089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.507132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.507404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.507456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.507604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.507713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:50.510148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.510230Z node 1 :FLAT_TX_SCHEMESHARD ... p Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:57.611863Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 30064773230 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:57.611935Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 103:0, at tablet# 72057594046678944 2025-06-24T20:28:57.612268Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:0 128 -> 240 2025-06-24T20:28:57.612344Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 103:0, at tablet# 72057594046678944 2025-06-24T20:28:57.612517Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T20:28:57.612788Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:576: Send TEvUpdateTenantSchemeShard, to actor: [7:396:2363], msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 }, at schemeshard: 72057594046678944 2025-06-24T20:28:57.615864Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5902: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186234409546, msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } 2025-06-24T20:28:57.616091Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 }, at schemeshard: 72075186234409546 2025-06-24T20:28:57.616308Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:589: Cannot publish paths for unknown operation id#0 FAKE_COORDINATOR: Erasing txId 103 2025-06-24T20:28:57.616691Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:57.616752Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:28:57.616990Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:57.617051Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:208:2208], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-24T20:28:57.617543Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:28:57.617614Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 103:0, ProgressState, NeedSyncHive: 0 2025-06-24T20:28:57.617671Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:0 240 -> 240 2025-06-24T20:28:57.618744Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:57.618881Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:57.618935Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:28:57.618995Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 6 2025-06-24T20:28:57.619058Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-24T20:28:57.619390Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-24T20:28:57.623270Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5889: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 2 TabletID: 72075186234409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 4 UserAttributesVersion: 1 TenantHive: 72075186233409546 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-24T20:28:57.623381Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:28:57.623497Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:568: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[7:396:2363], EffectiveACLVersion: 0, SubdomainVersion: 4, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 4, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:28:57.623698Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186234409546 2025-06-24T20:28:57.623738Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186234409546, txId: 0, path id: [OwnerId: 72075186234409546, LocalPathId: 1] 2025-06-24T20:28:57.623894Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409546 2025-06-24T20:28:57.623930Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:489:2429], at schemeshard: 72075186234409546, txId: 0, path id: 1 2025-06-24T20:28:57.624827Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186234409546, msg: Owner: 72075186234409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409546, cookie: 0 2025-06-24T20:28:57.625153Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:28:57.625243Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:28:57.625362Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:28:57.625432Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T20:28:57.625585Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:28:57.625641Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:57.625690Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:28:57.625736Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:57.625783Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-24T20:28:57.625839Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:57.625886Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:28:57.625932Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:28:57.626020Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 TestModificationResult got TxId: 103, wait until txId: 104 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 2025-06-24T20:28:57.628382Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:28:57.628447Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:28:57.628990Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:28:57.629142Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:28:57.629196Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:564:2502] TestWaitNotification: OK eventTxId 103 |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TStorageTenantTest::Boot [GOOD] >> TStorageTenantTest::CopyTableAndConcurrentSplit >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:50.213560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.213668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.213713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.213755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.213807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.213864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.213933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.214027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.214866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.215265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.315477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:50.315548Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.345405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.345919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.346097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.354085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.354280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.355071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.355397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.358394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.358593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.359855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.359936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.360162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.360213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.360260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.360377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.368217Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.513687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.513973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.514252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.514306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.514576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.514682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.517039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.517264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.517477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.517560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.517618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.517656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.520090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.520164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.520219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.522319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.522378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.522429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.522491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.526756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.529254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.529463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.530602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.530760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.530818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.531162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.531237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.531423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.531507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:50.534103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.534157Z node 1 :FLAT_TX_SCHEMESHARD ... 594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:57.372574Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:28:57.372605Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T20:28:57.372637Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-24T20:28:57.372704Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-24T20:28:57.381881Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72075186233409546 at ss 72057594046678944 2025-06-24T20:28:57.381952Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72075186233409546 at ss 72057594046678944 2025-06-24T20:28:57.381984Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72075186233409546 at ss 72057594046678944 2025-06-24T20:28:57.382012Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72075186233409546 at ss 72057594046678944 2025-06-24T20:28:57.382392Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:28:57.382468Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T20:28:57.382643Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:28:57.382697Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:57.382748Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:28:57.382796Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:57.382851Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-24T20:28:57.382908Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:57.382962Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:28:57.383006Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:28:57.383258Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T20:28:57.392031Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:28:57.392595Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-24T20:28:57.392911Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:57.393270Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T20:28:57.393576Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186234409547 2025-06-24T20:28:57.393918Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:28:57.394021Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186234409546 2025-06-24T20:28:57.394410Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T20:28:57.394683Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T20:28:57.395847Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Forgetting tablet 72075186234409547 2025-06-24T20:28:57.401664Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T20:28:57.401902Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186234409546 2025-06-24T20:28:57.403033Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186234409548 Forgetting tablet 72075186234409548 2025-06-24T20:28:57.404656Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T20:28:57.404877Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:28:57.405375Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:28:57.405440Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:28:57.405578Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:28:57.406207Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:28:57.406273Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:28:57.406369Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:57.408917Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T20:28:57.408989Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T20:28:57.409165Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T20:28:57.409197Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186234409547 2025-06-24T20:28:57.409260Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T20:28:57.409286Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186234409546 2025-06-24T20:28:57.411228Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T20:28:57.411291Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186234409548 2025-06-24T20:28:57.411523Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T20:28:57.411630Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:28:57.411910Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:28:57.411965Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:28:57.412464Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:28:57.412563Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:28:57.412610Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:579:2517] TestWaitNotification: OK eventTxId 103 2025-06-24T20:28:57.413184Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:57.413401Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 238us result status StatusPathDoesNotExist 2025-06-24T20:28:57.413593Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent [GOOD] >> TTransferTests::CreateWrongConfig [GOOD] >> TTransferTests::CreateWrongBatchSize >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-true [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:50.445278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.445374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.445416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.445452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.445496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.445547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.445615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.445684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.446476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.446859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.571940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:50.571990Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.591216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.595334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.595478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.602949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.603213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.603887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.604274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.607247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.607423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.608619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.608726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.608845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.608895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.608939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.609106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.615814Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.829233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.829521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.829832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.829895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.830179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.830288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.841172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.841426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.841684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.841746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.841804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.841849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.848252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.848324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.848400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.850414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.850462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.850541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.850623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.861996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.867883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.868061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.868806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.868912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.868956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.869229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.869293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.869453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.869527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:50.871674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.871723Z node 1 :FLAT_TX_SCHEMESHARD ... ated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:58.405583Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 Forgetting tablet 72075186234409547 2025-06-24T20:28:58.406055Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:28:58.407035Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T20:28:58.407316Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T20:28:58.407794Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186234409546 2025-06-24T20:28:58.408663Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186234409548 2025-06-24T20:28:58.409865Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Forgetting tablet 72075186234409546 2025-06-24T20:28:58.414731Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T20:28:58.414970Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186234409548 2025-06-24T20:28:58.416501Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T20:28:58.416706Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:28:58.417268Z node 6 :TX_DATASHARD ERROR: datashard.cpp:3573: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186234409549 2025-06-24T20:28:58.417332Z node 6 :TX_DATASHARD ERROR: datashard.cpp:3573: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186234409550 2025-06-24T20:28:58.417761Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:28:58.417832Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:28:58.417966Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:28:58.418885Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:28:58.418956Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:28:58.419037Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:58.420962Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T20:28:58.421026Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T20:28:58.421119Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T20:28:58.421145Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186234409547 2025-06-24T20:28:58.423516Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T20:28:58.423563Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186234409546 2025-06-24T20:28:58.423643Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T20:28:58.423708Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186234409548 2025-06-24T20:28:58.423903Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T20:28:58.423995Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-24T20:28:58.424341Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-24T20:28:58.424392Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-24T20:28:58.424905Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-24T20:28:58.425026Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T20:28:58.425089Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [6:781:2686] TestWaitNotification: OK eventTxId 105 2025-06-24T20:28:58.425763Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:58.425983Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir/table_1" took 269us result status StatusPathDoesNotExist 2025-06-24T20:28:58.426151Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/dir/table_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/dir/table_1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:28:58.426757Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:58.426936Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 200us result status StatusPathDoesNotExist 2025-06-24T20:28:58.427090Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:28:58.427690Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:58.427870Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 207us result status StatusSuccess 2025-06-24T20:28:58.428308Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> Yq_1::DeleteConnections >> TStorageTenantTest::CreateSolomonInsideSubDomain [GOOD] >> TReplicationTests::AlterReplicatedTable [GOOD] >> TReplicationTests::AlterReplicatedIndexTable >> TYardTest::TestMultiYardHarakiri [GOOD] >> TYardTest::TestStartingPointReboots ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:50.588534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.588614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.588653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.588685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.588726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.588756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.588807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.588868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.589588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.589909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.673912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:50.673979Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.714354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.725256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.725477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.767782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.768099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.768990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.769453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.777488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.777724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.779115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.779244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.779373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.779442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.779495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.779670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.796674Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.955350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.955604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.955895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.955952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.956219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.956321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.958463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.958671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.958885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.958944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.959001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.959040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.960990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.961353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.961399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.963413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.963462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.963524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.963590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.967539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.969859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.970087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.971294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.971440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.971505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.971871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.971944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.972111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.972254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:50.974895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.974944Z node 1 :FLAT_TX_SCHEMESHARD ... , msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:58.763922Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:28:58.763954Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T20:28:58.763990Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:28:58.764461Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:58.764541Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:28:58.764569Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:28:58.764599Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T20:28:58.764631Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T20:28:58.764687Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-24T20:28:58.766465Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T20:28:58.766529Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-24T20:28:58.766560Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T20:28:58.767184Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:28:58.767244Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T20:28:58.767422Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:28:58.767468Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:58.767514Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:28:58.767559Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:58.767606Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-24T20:28:58.767660Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:28:58.767711Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:28:58.767749Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:28:58.767930Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T20:28:58.769097Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:28:58.769907Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-24T20:28:58.771272Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:58.782146Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:58.782617Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T20:28:58.783039Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-24T20:28:58.783390Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T20:28:58.783624Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409548 2025-06-24T20:28:58.785330Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-06-24T20:28:58.786022Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T20:28:58.786209Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:28:58.789245Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:28:58.789644Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:28:58.789713Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:28:58.789860Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:28:58.790620Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:28:58.790684Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:28:58.790765Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:58.795157Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T20:28:58.795240Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T20:28:58.795354Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T20:28:58.795378Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T20:28:58.795749Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T20:28:58.795804Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T20:28:58.802391Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T20:28:58.802524Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:28:58.802863Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:28:58.802918Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:28:58.803523Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:28:58.803642Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:28:58.803689Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:548:2495] TestWaitNotification: OK eventTxId 103 2025-06-24T20:28:58.804304Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:58.804539Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 279us result status StatusPathDoesNotExist 2025-06-24T20:28:58.804752Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TTransferTests::CreateInParallel [GOOD] >> TTransferTests::CreateDropRecreate >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true [GOOD] >> IncrementalBackup::SimpleRestore >> Yq_1::ModifyConnections ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:50.141216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.141302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.141340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.141379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.141427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.141457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.141539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.141602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.142279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.142637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.331374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:50.331438Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.369161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.374023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.374189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.396725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.396963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.397701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.398032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.405719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.405906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.407025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.407089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.407245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.407296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.407340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.407490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.428055Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.615746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.615965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.616263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.616316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.616543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.616619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.624636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.624860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.625082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.625140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.625187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.625224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.630781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.630857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.630915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.637012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.637083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.637140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.637204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.661134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.668355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.668672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.670043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.670220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.670291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.670731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.670813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.671027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.671446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:50.683288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.683371Z node 1 :FLAT_TX_SCHEMESHARD ... .cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:28:59.070340Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 134 2025-06-24T20:28:59.071493Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:28:59.078039Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:28:59.083013Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:28:59.083112Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:104: TDropExtSubdomain TDeleteExternalShards, operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:59.083316Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 134 -> 135 2025-06-24T20:28:59.083673Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:59.083762Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T20:28:59.088071Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:59.088145Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:59.088309Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:28:59.088496Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:59.088536Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-24T20:28:59.088583Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T20:28:59.088927Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:28:59.088995Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:399: [72057594046678944] TDeleteParts opId# 102:0 ProgressState 2025-06-24T20:28:59.089051Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 135 -> 240 2025-06-24T20:28:59.090117Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:28:59.090220Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:28:59.090258Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:28:59.090298Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T20:28:59.090336Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:28:59.091070Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:28:59.091247Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:28:59.091283Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:28:59.091315Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T20:28:59.091352Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:28:59.091427Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T20:28:59.093422Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:28:59.093484Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T20:28:59.093659Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:28:59.093709Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:28:59.093773Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:28:59.093818Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:28:59.093873Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T20:28:59.093939Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:28:59.093995Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T20:28:59.094042Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T20:28:59.094158Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:28:59.094865Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:28:59.094944Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:28:59.095031Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:28:59.095706Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:28:59.095778Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:28:59.095871Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:59.097651Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:28:59.097852Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:28:59.100380Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T20:28:59.100512Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T20:28:59.100839Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:28:59.100905Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:28:59.101446Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:28:59.101586Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:28:59.101645Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [7:341:2330] TestWaitNotification: OK eventTxId 102 2025-06-24T20:28:59.102322Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:28:59.102563Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 307us result status StatusPathDoesNotExist 2025-06-24T20:28:59.102788Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst [GOOD] >> IncrementalBackup::SimpleBackup >> TStorageTenantTest::CreateTableInsideSubDomain2 [GOOD] >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-false >> TTransferTests::CreateWrongBatchSize [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsSmall >> TReplicationTests::CreateReplicatedTable [GOOD] >> TReplicationTests::DropReplicationWithInvalidCredentials >> IncrementalBackup::BackupRestore >> IncrementalBackup::SimpleRestoreBackupCollection+WithIncremental ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateSolomonInsideSubDomain [GOOD] Test command err: 2025-06-24T20:28:53.164541Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615569839097795:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:53.164608Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003973/r3tmp/tmpKPX6aF/pdisk_1.dat 2025-06-24T20:28:53.670779Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:53.687631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:53.687748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:53.694605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15371 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:28:53.991322Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519615569839097976:2142] Handle TEvNavigate describe path dc-1 2025-06-24T20:28:54.019412Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519615574134065712:2445] HANDLE EvNavigateScheme dc-1 2025-06-24T20:28:54.019571Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615569839098002:2156], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:54.019597Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519615569839098002:2156], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:28:54.019771Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519615574134065713:2446][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:28:54.022562Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615569839097653:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615574134065717:2446] 2025-06-24T20:28:54.022635Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615569839097653:2051] Subscribe: subscriber# [1:7519615574134065717:2446], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:54.022716Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615569839097656:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615574134065718:2446] 2025-06-24T20:28:54.022736Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615569839097656:2054] Subscribe: subscriber# [1:7519615574134065718:2446], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:54.022758Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615569839097659:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615574134065719:2446] 2025-06-24T20:28:54.022775Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615569839097659:2057] Subscribe: subscriber# [1:7519615574134065719:2446], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:54.022837Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615574134065717:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615569839097653:2051] 2025-06-24T20:28:54.022867Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615574134065718:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615569839097656:2054] 2025-06-24T20:28:54.022888Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615574134065719:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615569839097659:2057] 2025-06-24T20:28:54.022941Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615574134065713:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615574134065714:2446] 2025-06-24T20:28:54.022985Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615574134065713:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615574134065715:2446] 2025-06-24T20:28:54.023045Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519615574134065713:2446][/dc-1] Set up state: owner# [1:7519615569839098002:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:54.023220Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615574134065713:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615574134065716:2446] 2025-06-24T20:28:54.023275Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519615574134065713:2446][/dc-1] Path was already updated: owner# [1:7519615569839098002:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:54.023316Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615574134065717:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615574134065714:2446], cookie# 1 2025-06-24T20:28:54.023338Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615574134065718:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615574134065715:2446], cookie# 1 2025-06-24T20:28:54.023352Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615574134065719:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615574134065716:2446], cookie# 1 2025-06-24T20:28:54.023393Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615569839097653:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615574134065717:2446] 2025-06-24T20:28:54.023425Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615569839097653:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615574134065717:2446], cookie# 1 2025-06-24T20:28:54.023465Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615569839097656:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615574134065718:2446] 2025-06-24T20:28:54.023480Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615569839097656:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615574134065718:2446], cookie# 1 2025-06-24T20:28:54.023495Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615569839097659:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615574134065719:2446] 2025-06-24T20:28:54.023511Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615569839097659:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615574134065719:2446], cookie# 1 2025-06-24T20:28:54.097611Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615574134065717:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615569839097653:2051], cookie# 1 2025-06-24T20:28:54.097650Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615574134065718:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615569839097656:2054], cookie# 1 2025-06-24T20:28:54.097668Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615574134065719:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615569839097659:2057], cookie# 1 2025-06-24T20:28:54.097709Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615574134065713:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615574134065714:2446], cookie# 1 2025-06-24T20:28:54.097734Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615574134065713:2446][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:28:54.097751Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615574134065713:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615574134065715:2446], cookie# 1 2025-06-24T20:28:54.097764Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615574134065713:2446][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:28:54.097777Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615574134065713:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615574134065716:2446], cookie# 1 2025-06-24T20:28:54.097805Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519615574134065713:2446][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:28:54.139247Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615569839098002:2156], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-24T20:28:55.672811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 8 ShardOwnerId: 72057594046644480 ShardLocalIdx: 8, at schemeshard: 72057594046644480 2025-06-24T20:28:55.672963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T20:28:55.673119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-24T20:28:55.673277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T20:28:55.673540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T20:28:55.673570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T20:28:55.673616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T20:28:55.673819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T20:28:55.673840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T20:28:55.673989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T20:28:55.681814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715661 2025-06-24T20:28:55.682602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T20:28:55.682630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T20:28:55.682681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-06-24T20:28:55.682697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:7 tabletId 72075186224037894 2025-06-24T20:28:55.682721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T20:28:55.682728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T20:28:55.682763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-24T20:28:55.682778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-24T20:28:55.682829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-24T20:28:55.682838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-06-24T20:28:55.682877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T20:28:55.682888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T20:28:55.682910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:8 2025-06-24T20:28:55.682917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:8 tabletId 72075186224037895 2025-06-24T20:28:55.682934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-24T20:28:55.682948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-24T20:28:55.683020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046644480 2025-06-24T20:28:55.683080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T20:28:55.683107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T20:28:55.683127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T20:28:55.683207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T20:28:55.720689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T20:28:56.067740Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:28:56.283479Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519615578829696517:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:56.283667Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615578829696517:2109], cacheItem# { Subscriber: { Subscriber: [3:7519615578829696572:2134] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:28:56.283776Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615583124664139:2305], recipient# [3:7519615583124664138:2277], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:57.287614Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519615578829696517:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:57.287753Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615578829696517:2109], cacheItem# { Subscriber: { Subscriber: [3:7519615578829696572:2134] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:28:57.287833Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615587419631438:2307], recipient# [3:7519615587419631437:2278], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:58.291486Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519615578829696517:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:58.291599Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615578829696517:2109], cacheItem# { Subscriber: { Subscriber: [3:7519615578829696572:2134] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:28:58.291703Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615591714598736:2308], recipient# [3:7519615591714598735:2279], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:50.500338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.500436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.500488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.500549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.500604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.500634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.500698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.500775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.501571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.501887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.592594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:50.592660Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.611718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.616973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.617172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.634291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.634568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.635389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.635768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.639684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.639908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.641182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.641250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.641389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.641461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.641505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.641673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.651127Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.865764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.865998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.866273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.866332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.866590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.866681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.868932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.869117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.869337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.869403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.869476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.869515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.871373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.871419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.871449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.872655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.872699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.872729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.872764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.875291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.876631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.876797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.877483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.877591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.877632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.877876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.877916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.878033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.878104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:50.879569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.879600Z node 1 :FLAT_TX_SCHEMESHARD ... D DEBUG: schemeshard__operation_common_subdomain.cpp:120: NSubDomainState::TConfigureParts operationId# 102:0 Got OK TEvConfigureStatus from tablet# 72075186233409549 shardIdx# 72057594046678944:4 at schemeshard# 72057594046678944 2025-06-24T20:29:00.012469Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 3 -> 128 2025-06-24T20:29:00.015531Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:00.015768Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:00.015837Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:00.015909Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 102:0, at tablet# 72057594046678944 2025-06-24T20:29:00.015980Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 102 ready parts: 1/1 2025-06-24T20:29:00.016165Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:29:00.020451Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-24T20:29:00.020664Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-24T20:29:00.021144Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:29:00.021323Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 34359740526 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:29:00.021395Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:0, at tablet# 72057594046678944 2025-06-24T20:29:00.021818Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 240 2025-06-24T20:29:00.021899Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:0, at tablet# 72057594046678944 2025-06-24T20:29:00.022061Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T20:29:00.022190Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:568: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[8:361:2333], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 72075186233409549, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T20:29:00.028006Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:29:00.028089Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:29:00.028394Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:29:00.028465Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [8:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T20:29:00.028934Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:00.029015Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 102:0, ProgressState, NeedSyncHive: 0 2025-06-24T20:29:00.029072Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 240 -> 240 2025-06-24T20:29:00.029993Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:29:00.030136Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:29:00.030197Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:29:00.030263Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T20:29:00.030322Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-24T20:29:00.030433Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T20:29:00.040171Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:00.040253Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T20:29:00.040452Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:29:00.040507Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:29:00.040563Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:29:00.040630Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:29:00.040682Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T20:29:00.040780Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [8:302:2291] message: TxId: 102 2025-06-24T20:29:00.040850Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:29:00.040905Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T20:29:00.040947Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T20:29:00.041184Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T20:29:00.041932Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:29:00.043807Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:29:00.043873Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [8:508:2445] TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 2025-06-24T20:29:00.052309Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "USER_0" ExternalStatisticsAggregator: false } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:29:00.052517Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 103:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "USER_0" ExternalStatisticsAggregator: false } 2025-06-24T20:29:00.052590Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 103:0, path /MyRoot/USER_0 2025-06-24T20:29:00.052764Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 103:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, at schemeshard: 72057594046678944 2025-06-24T20:29:00.052822Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, at schemeshard: 72057594046678944 2025-06-24T20:29:00.056444Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:29:00.056769Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, operation: ALTER DATABASE, path: /MyRoot/USER_0 TestModificationResult got TxId: 103, wait until txId: 103 >> TStorageTenantTest::LsLs [GOOD] >> TPDiskRaces::KillOwnerWhileDecommitting [GOOD] >> TPDiskRaces::KillOwnerWhileDecommittingWithInflight ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:50.099760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.099837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.099883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.099913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.099951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.099976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.100012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.100056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.100591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.100847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.227091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:50.235606Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.278230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.278774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.278986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.323314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.323540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.324358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.324704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.328093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.328301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.329511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.329598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.329875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.329935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.330006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.330117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.347000Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:28:50.547996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:50.548235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.548477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:50.548529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:50.548793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:50.548870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:50.551307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.551486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:50.551631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.551671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:50.551710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:50.551732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:50.558818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.558883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:50.558939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:50.562093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.562153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.562210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.562262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:50.573104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:50.575177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:50.575384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:50.576484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.576625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:50.576694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.577028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:50.577090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:50.577261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:50.577358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:50.579240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.579277Z node 1 :FLAT_TX_SCHEMESHARD ... ails: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 6 2025-06-24T20:29:00.165341Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5902: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186233409546, msg: TabletId: 72057594046678944 Generation: 2 UserAttributes { Key: "user__attr_1" Value: "value" } UserAttributesVersion: 2 2025-06-24T20:29:00.168162Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046678944 Generation: 2 UserAttributes { Key: "user__attr_1" Value: "value" } UserAttributesVersion: 2, at schemeshard: 72075186233409546 2025-06-24T20:29:00.171451Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:589: Cannot publish paths for unknown operation id#0 2025-06-24T20:29:00.175709Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:29:00.176619Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:29:00.180397Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:29:00.181079Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:208:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 FAKE_COORDINATOR: Erasing txId 104 2025-06-24T20:29:00.183579Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T20:29:00.183928Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T20:29:00.184032Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T20:29:00.184154Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 6 2025-06-24T20:29:00.184292Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T20:29:00.184527Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-24T20:29:00.187867Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-24T20:29:00.187914Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-24T20:29:00.188075Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-24T20:29:00.188112Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:447:2397], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-24T20:29:00.188326Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5889: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 2 TabletID: 72075186233409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 3 UserAttributesVersion: 2 TenantHive: 18446744073709551615 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-24T20:29:00.188625Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:29:00.188795Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:568: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[7:353:2332], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 2, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 2, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:29:00.189694Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186233409546, cookie: 0 2025-06-24T20:29:00.189808Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T20:29:00.190572Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T20:29:00.193201Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T20:29:00.193757Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T20:29:00.197751Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T20:29:00.198377Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T20:29:00.198689Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [7:551:2499] TestWaitNotification: OK eventTxId 104 2025-06-24T20:29:00.202396Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:29:00.203699Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 1.33ms result status StatusSuccess 2025-06-24T20:29:00.206961Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 2 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } StoragePools { Name: "pool-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "user__attr_1" Value: "value" } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:29:00.211004Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-24T20:29:00.212519Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/USER_0" took 1.51ms result status StatusSuccess 2025-06-24T20:29:00.213390Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 2 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } StoragePools { Name: "pool-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "user__attr_1" Value: "value" } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet [GOOD] Test command err: 2025-06-24T20:28:53.115186Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615569748699450:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:53.115407Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003961/r3tmp/tmpvGVKl4/pdisk_1.dat 2025-06-24T20:28:53.556025Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:53.568711Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:53.568854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:53.579049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7240 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:28:53.812566Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519615569748699586:2136] Handle TEvNavigate describe path dc-1 2025-06-24T20:28:53.834395Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519615569748700024:2435] HANDLE EvNavigateScheme dc-1 2025-06-24T20:28:53.834526Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615569748699610:2150], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:53.834582Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519615569748699610:2150], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:28:53.834773Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519615569748700025:2436][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:28:53.836716Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615565453731976:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615569748700029:2436] 2025-06-24T20:28:53.836772Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615565453731976:2051] Subscribe: subscriber# [1:7519615569748700029:2436], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:53.836822Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615565453731979:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615569748700030:2436] 2025-06-24T20:28:53.836837Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615565453731979:2054] Subscribe: subscriber# [1:7519615569748700030:2436], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:53.836856Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615565453731982:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615569748700031:2436] 2025-06-24T20:28:53.836870Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615565453731982:2057] Subscribe: subscriber# [1:7519615569748700031:2436], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:53.836948Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615569748700029:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615565453731976:2051] 2025-06-24T20:28:53.836999Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615569748700030:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615565453731979:2054] 2025-06-24T20:28:53.837022Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615569748700031:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615565453731982:2057] 2025-06-24T20:28:53.837069Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615569748700025:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615569748700026:2436] 2025-06-24T20:28:53.837102Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615569748700025:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615569748700027:2436] 2025-06-24T20:28:53.837149Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519615569748700025:2436][/dc-1] Set up state: owner# [1:7519615569748699610:2150], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:53.837274Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615569748700025:2436][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615569748700028:2436] 2025-06-24T20:28:53.837334Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519615569748700025:2436][/dc-1] Path was already updated: owner# [1:7519615569748699610:2150], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:53.837364Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615565453731976:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615569748700029:2436] 2025-06-24T20:28:53.837394Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615565453731979:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615569748700030:2436] 2025-06-24T20:28:53.837413Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615565453731982:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615569748700031:2436] 2025-06-24T20:28:53.838524Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519615569748700025:2436][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519615569748699610:2150], cookie# 1 2025-06-24T20:28:53.838609Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615569748700029:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615569748700026:2436], cookie# 1 2025-06-24T20:28:53.838629Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615569748700030:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615569748700027:2436], cookie# 1 2025-06-24T20:28:53.838643Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615569748700031:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615569748700028:2436], cookie# 1 2025-06-24T20:28:53.838672Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615565453731976:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615569748700029:2436], cookie# 1 2025-06-24T20:28:53.838705Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615565453731979:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615569748700030:2436], cookie# 1 2025-06-24T20:28:53.838728Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615565453731982:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615569748700031:2436], cookie# 1 2025-06-24T20:28:53.838763Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615569748700029:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615565453731976:2051], cookie# 1 2025-06-24T20:28:53.838779Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615569748700030:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615565453731979:2054], cookie# 1 2025-06-24T20:28:53.838792Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615569748700031:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615565453731982:2057], cookie# 1 2025-06-24T20:28:53.838819Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615569748700025:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615569748700026:2436], cookie# 1 2025-06-24T20:28:53.838844Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615569748700025:2436][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:28:53.838859Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615569748700025:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615569748700027:2436], cookie# 1 2025-06-24T20:28:53.838874Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615569748700025:2436][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:28:53.839125Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615569748700025:2436][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615569748700028:2436], cookie# 1 2025-06-24T20:28:53.844342Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519615569748700025:2436][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:28:53.940386Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615569748699610:2150], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } Pat ... thId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 } 2025-06-24T20:28:54.758740Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519615575346774927:2225][/dc-1/USER_0] Path was already updated: owner# [3:7519615575346774549:2109], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:54.757920Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615565453731979:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 18446744073709551615 }: sender# [3:7519615575346774932:2225] 2025-06-24T20:28:54.757961Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615565453731982:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 18446744073709551615 }: sender# [3:7519615575346774933:2225] 2025-06-24T20:28:54.757996Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615565453731976:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 18446744073709551615 }: sender# [3:7519615575346774931:2225] 2025-06-24T20:28:54.758982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046644480, cookie: 281474976715660 2025-06-24T20:28:54.759071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046644480, cookie: 281474976715660 2025-06-24T20:28:54.759084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046644480, txId: 281474976715660 2025-06-24T20:28:54.759100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715660, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 7 2025-06-24T20:28:54.759116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-24T20:28:54.759388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046644480, cookie: 281474976715660 2025-06-24T20:28:54.759450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046644480, cookie: 281474976715660 2025-06-24T20:28:54.759458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715660 2025-06-24T20:28:54.759471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715660, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 18446744073709551615 2025-06-24T20:28:54.759486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-06-24T20:28:54.759527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715660, subscribers: 1 2025-06-24T20:28:54.759539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [1:7519615574043667802:2276] 2025-06-24T20:28:54.759821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:28:54.759840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:28:54.759851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:28:54.759861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:28:54.761671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715660 2025-06-24T20:28:54.761734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715660 2025-06-24T20:28:54.762777Z node 1 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-24T20:28:54.768664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-24T20:28:54.769053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-06-24T20:28:54.769335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-24T20:28:54.769515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-06-24T20:28:54.769636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-24T20:28:54.769777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-24T20:28:54.769890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T20:28:54.770014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T20:28:54.770198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T20:28:54.770224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T20:28:54.770354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T20:28:54.770490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T20:28:54.770507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T20:28:54.770550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T20:28:54.772467Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037888 2025-06-24T20:28:54.773488Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037891 2025-06-24T20:28:54.773597Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037889 2025-06-24T20:28:54.782588Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037890 2025-06-24T20:28:54.793486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T20:28:54.793532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T20:28:54.793573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T20:28:54.793582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T20:28:54.793612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-24T20:28:54.793623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-24T20:28:54.793648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T20:28:54.793666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T20:28:54.793712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T20:28:54.793746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 TabletID: 72075186224037888 Status: OK Info { TabletID: 72075186224037888 Channels { Channel: 0 ChannelType: 0 History { FromGeneration: 0 GroupID: 2181038081 } StoragePool: "name_USER_0_kind_storage-pool-number-2" } Channels { Channel: 1 ChannelType: 0 History { FromGeneration: 0 GroupID: 2181038081 } StoragePool: "name_USER_0_kind_storage-pool-number-2" } Channels { Channel: 2 ChannelType: 0 History { FromGeneration: 0 GroupID: 2181038081 } StoragePool: "name_USER_0_kind_storage-pool-number-2" } TabletType: Coordinator Version: 1 TenantIdOwner: 72057594046644480 TenantIdLocalId: 2 } 2025-06-24T20:28:54.796248Z node 1 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) >> TDataShardTrace::TestTraceDistributedUpsert-UseSink >> TDataShardTrace::TestTraceDistributedUpsert+UseSink >> TReplicationTests::AlterReplicatedIndexTable [GOOD] >> TReplicationTests::CopyReplicatedTable >> TDataShardTrace::TestTraceDistributedSelectViaReadActors >> TDataShardTrace::TestTraceWriteImmediateOnShard ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateTableInsideSubDomain2 [GOOD] Test command err: 2025-06-24T20:28:53.119835Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615572707795707:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:53.124265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039a9/r3tmp/tmpyqtRC1/pdisk_1.dat 2025-06-24T20:28:53.762267Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:53.776671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:53.776790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:53.785648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19869 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:28:54.106940Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519615572707795738:2142] Handle TEvNavigate describe path dc-1 2025-06-24T20:28:54.129541Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:28:54.130582Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615572707795761:2155], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:54.130717Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615572707795761:2155], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:54.130742Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519615572707795761:2155], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:28:54.135295Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519615577002763477:2445][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:28:54.149861Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615568412828125:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615577002763483:2445] 2025-06-24T20:28:54.149961Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615568412828125:2057] Subscribe: subscriber# [1:7519615577002763483:2445], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:54.150127Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615577002763483:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615568412828125:2057] 2025-06-24T20:28:54.150204Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615577002763477:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615577002763480:2445] 2025-06-24T20:28:54.150277Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615568412828125:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615577002763483:2445] 2025-06-24T20:28:54.150308Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615568412828119:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615577002763481:2445] 2025-06-24T20:28:54.150326Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615568412828119:2051] Subscribe: subscriber# [1:7519615577002763481:2445], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:54.150344Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615568412828122:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615577002763482:2445] 2025-06-24T20:28:54.150354Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615568412828122:2054] Subscribe: subscriber# [1:7519615577002763482:2445], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:54.150376Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615577002763481:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615568412828119:2051] 2025-06-24T20:28:54.150417Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615577002763482:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615568412828122:2054] 2025-06-24T20:28:54.150447Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615577002763477:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615577002763478:2445] 2025-06-24T20:28:54.150524Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519615577002763477:2445][/dc-1] Set up state: owner# [1:7519615572707795761:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:54.150670Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615577002763477:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615577002763479:2445] 2025-06-24T20:28:54.150736Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519615577002763477:2445][/dc-1] Path was already updated: owner# [1:7519615572707795761:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:54.161837Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615568412828119:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615577002763481:2445] 2025-06-24T20:28:54.163709Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615568412828122:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615577002763482:2445] 2025-06-24T20:28:54.163792Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519615577002763484:2446] HANDLE EvNavigateScheme dc-1 2025-06-24T20:28:54.202276Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615572707795761:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 } 2025-06-24T20:28:54.202656Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615572707795761:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 }, by path# { Subscriber: { Subscriber: [1:7519615577002763477:2445] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:28:54.202868Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp ... 615568412828119:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519615589887665995:2882] 2025-06-24T20:28:57.695355Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615568412828122:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519615589887665996:2882] 2025-06-24T20:28:57.695449Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615568412828125:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519615589887665997:2882] 2025-06-24T20:28:57.695517Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615572707795761:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-24T20:28:57.695587Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615572707795761:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519615589887665987:2882] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:28:57.695683Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615568412828119:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [1:7519615589887665998:2883] 2025-06-24T20:28:57.695691Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615572707795761:2155], cacheItem# { Subscriber: { Subscriber: [1:7519615589887665987:2882] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:28:57.695698Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7519615568412828119:2051] Upsert description: path# /dc-1/.metadata/workload_manager/running_requests 2025-06-24T20:28:57.695726Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615568412828125:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [1:7519615589887666000:2883] 2025-06-24T20:28:57.695741Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7519615568412828125:2057] Upsert description: path# /dc-1/.metadata/workload_manager/running_requests 2025-06-24T20:28:57.695770Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615568412828119:2051] Subscribe: subscriber# [1:7519615589887665998:2883], path# /dc-1/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:57.695794Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615568412828125:2057] Subscribe: subscriber# [1:7519615589887666000:2883], path# /dc-1/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:57.695823Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615568412828122:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [1:7519615589887665999:2883] 2025-06-24T20:28:57.695835Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7519615568412828122:2054] Upsert description: path# /dc-1/.metadata/workload_manager/running_requests 2025-06-24T20:28:57.695849Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615589887665998:2883][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [1:7519615568412828119:2051] 2025-06-24T20:28:57.695863Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615568412828122:2054] Subscribe: subscriber# [1:7519615589887665999:2883], path# /dc-1/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:57.695875Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615589887666000:2883][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [1:7519615568412828125:2057] 2025-06-24T20:28:57.695892Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615568412828119:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519615589887665998:2883] 2025-06-24T20:28:57.695904Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615589887665999:2883][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [1:7519615568412828122:2054] 2025-06-24T20:28:57.695905Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615568412828125:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519615589887666000:2883] 2025-06-24T20:28:57.695919Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615568412828122:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519615589887665999:2883] 2025-06-24T20:28:57.695935Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615589887665988:2883][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [1:7519615589887665989:2883] 2025-06-24T20:28:57.695993Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615589887665988:2883][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [1:7519615589887665991:2883] 2025-06-24T20:28:57.696027Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519615589887665988:2883][/dc-1/.metadata/workload_manager/running_requests] Set up state: owner# [1:7519615572707795761:2155], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:57.696062Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615589887665988:2883][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [1:7519615589887665990:2883] 2025-06-24T20:28:57.696086Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615572707795761:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-24T20:28:57.696090Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519615589887665988:2883][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [1:7519615572707795761:2155], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:57.696151Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615572707795761:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519615589887665988:2883] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:28:57.696217Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615572707795761:2155], cacheItem# { Subscriber: { Subscriber: [1:7519615589887665988:2883] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:28:57.696314Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615589887666001:2884], recipient# [1:7519615589887665986:2281], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:57.868307Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519615580777968405:2113], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:57.868434Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615580777968405:2113], cacheItem# { Subscriber: { Subscriber: [3:7519615585072935903:2239] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:28:57.868522Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615589367903402:2364], recipient# [3:7519615589367903401:2284], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> Yq_1::DescribeJob >> TReplicationTests::DropReplicationWithInvalidCredentials [GOOD] >> TReplicationTests::DropReplicationWithUnknownSecret >> TDataShardTrace::TestTraceDistributedSelect >> TTransferTests::CreateDropRecreate [GOOD] >> TTransferTests::ConsistencyLevel |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut >> TTransferTests::CreateWrongFlushIntervalIsSmall [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsBig ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::LsLs [GOOD] Test command err: 2025-06-24T20:28:54.924575Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615573566729076:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:54.924710Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:28:55.056520Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519615578896221878:2202];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:55.355053Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039d7/r3tmp/tmpuQvrVO/pdisk_1.dat 2025-06-24T20:28:55.731229Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:55.735233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:55.743392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:55.743469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:55.770100Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:55.816911Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:28:55.817032Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:28:55.817925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:28:55.903270Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:28:56.027347Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11552 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:28:56.139034Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519615573566729087:2143] Handle TEvNavigate describe path dc-1 2025-06-24T20:28:56.177787Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519615582156664148:2458] HANDLE EvNavigateScheme dc-1 2025-06-24T20:28:56.177925Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615577861696410:2157], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:56.178008Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519615577861696831:2449][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519615577861696410:2157], cookie# 1 2025-06-24T20:28:56.188017Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615577861696835:2449][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615577861696832:2449], cookie# 1 2025-06-24T20:28:56.188102Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615573566728763:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615577861696835:2449], cookie# 1 2025-06-24T20:28:56.188129Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615577861696836:2449][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615577861696833:2449], cookie# 1 2025-06-24T20:28:56.188141Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615577861696837:2449][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615577861696834:2449], cookie# 1 2025-06-24T20:28:56.188157Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615577861696835:2449][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615573566728763:2052], cookie# 1 2025-06-24T20:28:56.188182Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615577861696831:2449][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615577861696832:2449], cookie# 1 2025-06-24T20:28:56.188202Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615577861696831:2449][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:28:56.188214Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615573566728766:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615577861696836:2449], cookie# 1 2025-06-24T20:28:56.188224Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615573566728769:2058] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615577861696837:2449], cookie# 1 2025-06-24T20:28:56.188235Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615577861696836:2449][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615573566728766:2055], cookie# 1 2025-06-24T20:28:56.188253Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615577861696837:2449][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615573566728769:2058], cookie# 1 2025-06-24T20:28:56.188273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615577861696831:2449][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615577861696833:2449], cookie# 1 2025-06-24T20:28:56.188282Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615577861696831:2449][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:28:56.188290Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615577861696831:2449][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615577861696834:2449], cookie# 1 2025-06-24T20:28:56.188304Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519615577861696831:2449][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:28:56.188361Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615577861696410:2157], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T20:28:56.202835Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615577861696410:2157], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519615577861696831:2449] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:28:56.203069Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615577861696410:2157], cacheItem# { Subscriber: { Subscriber: [1:7519615577861696831:2449] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T20:28:56.212117Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615582156664149:2459], recipient# [1:7519615582156664148:2458], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:28:56.212217Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519615582156664148:2458] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:28:56.284345Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519615582156664148:2458] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } TClient::Ls response: 2025-06-24T20:28:56.295280Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519615582156664148:2458] Handle TEvDescribeSchemeResult Forward to# [1:7519615582156664147:2457] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { ...
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:00.315495Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615578896221937:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:00.315626Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615578896221937:2108], cacheItem# { Subscriber: { Subscriber: [2:7519615600371058481:2120] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.315684Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615578896221937:2108], cacheItem# { Subscriber: { Subscriber: [2:7519615600371058482:2121] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.315785Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615600371058497:2123], recipient# [2:7519615600371058479:2268], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:00.316182Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519615600371058479:2268], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:00.384129Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615578896221937:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:00.384986Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615578896221937:2108], cacheItem# { Subscriber: { Subscriber: [2:7519615583191189257:2113] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.385292Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615600371058501:2124], recipient# [2:7519615600371058500:2269], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:00.433773Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615578896221937:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:00.433938Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615578896221937:2108], cacheItem# { Subscriber: { Subscriber: [2:7519615600371058481:2120] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.433996Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615578896221937:2108], cacheItem# { Subscriber: { Subscriber: [2:7519615600371058482:2121] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.434114Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615600371058502:2125], recipient# [2:7519615600371058479:2268], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:00.434683Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519615600371058479:2268], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:00.691103Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615578896221937:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:00.691255Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615578896221937:2108], cacheItem# { Subscriber: { Subscriber: [2:7519615600371058481:2120] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.691309Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615578896221937:2108], cacheItem# { Subscriber: { Subscriber: [2:7519615600371058482:2121] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.691435Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615600371058504:2126], recipient# [2:7519615600371058479:2268], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:00.692537Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519615600371058479:2268], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } |87.9%| [TM] {RESULT} ydb/core/blobstorage/ut_mirror3of4/unittest |87.9%| [TS] {RESULT} ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/unittest |87.9%| [LD] {RESULT} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut >> TStorageTenantTest::CreateTableInsideSubDomain [GOOD] >> TStorageTenantTest::DeclareAndDefine [GOOD] >> TReplicationTests::DropReplicationWithUnknownSecret [GOOD] >> test.py::test[solomon-DownsamplingValidSettings-default.txt] [GOOD] >> test.py::test[solomon-HistResponse-default.txt] >> TReplicationTests::CopyReplicatedTable [GOOD] >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true >> Yq_1::DescribeConnection >> Yq_1::ListConnections ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateTableInsideSubDomain [GOOD] Test command err: 2025-06-24T20:28:55.993675Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615581301786237:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:55.993727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003983/r3tmp/tmpYykOTB/pdisk_1.dat 2025-06-24T20:28:56.680828Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:56.751754Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:56.751847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:56.763427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:28:56.995387Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19440 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:28:57.068077Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519615585596753548:2142] Handle TEvNavigate describe path dc-1 2025-06-24T20:28:57.118927Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519615589891721296:2442] HANDLE EvNavigateScheme dc-1 2025-06-24T20:28:57.119061Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615585596753571:2155], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:57.119135Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519615585596753983:2437][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519615585596753571:2155], cookie# 1 2025-06-24T20:28:57.128189Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615585596753987:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585596753984:2437], cookie# 1 2025-06-24T20:28:57.128242Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615585596753988:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585596753985:2437], cookie# 1 2025-06-24T20:28:57.128259Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615585596753989:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585596753986:2437], cookie# 1 2025-06-24T20:28:57.128289Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615581301785926:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585596753987:2437], cookie# 1 2025-06-24T20:28:57.128315Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615581301785929:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585596753988:2437], cookie# 1 2025-06-24T20:28:57.128331Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615581301785932:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585596753989:2437], cookie# 1 2025-06-24T20:28:57.128359Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615585596753987:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615581301785926:2051], cookie# 1 2025-06-24T20:28:57.128383Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615585596753988:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615581301785929:2054], cookie# 1 2025-06-24T20:28:57.128396Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615585596753989:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615581301785932:2057], cookie# 1 2025-06-24T20:28:57.128435Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615585596753983:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615585596753984:2437], cookie# 1 2025-06-24T20:28:57.128470Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615585596753983:2437][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:28:57.128489Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615585596753983:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615585596753985:2437], cookie# 1 2025-06-24T20:28:57.128499Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615585596753983:2437][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:28:57.128552Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615585596753983:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615585596753986:2437], cookie# 1 2025-06-24T20:28:57.128581Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519615585596753983:2437][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:28:57.128665Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615585596753571:2155], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T20:28:57.148928Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615585596753571:2155], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519615585596753983:2437] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:28:57.149197Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615585596753571:2155], cacheItem# { Subscriber: { Subscriber: [1:7519615585596753983:2437] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T20:28:57.156098Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615589891721297:2443], recipient# [1:7519615589891721296:2442], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:28:57.156173Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519615589891721296:2442] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:28:57.199083Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519615589891721296:2442] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T20:28:57.215200Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519615589891721296:2442] Handle TEvDescribeSchemeResult Forward to# [1:7519615589891721295:2441] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:28:57.275612Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519615585596753548:2142] Handle TEvProposeTransaction 2025-06-24T20:28:57.275638Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:75196155855 ... g { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } FollowerCount: 2 PartitioningPolicy { MinPartitionsCount: 2 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } DomainKey { SchemeShard: 72057594046644480 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "SimpleTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750796939400 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "SimpleTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "k... (TRUNCATED) 2025-06-24T20:28:59.682997Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7519615581301785926:2051] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7519615592243710382:2107] 2025-06-24T20:28:59.683035Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7519615581301785926:2051] Unsubscribe: subscriber# [3:7519615592243710382:2107], path# /dc-1/USER_0 2025-06-24T20:28:59.683076Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7519615581301785929:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7519615592243710383:2107] 2025-06-24T20:28:59.683087Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7519615581301785929:2054] Unsubscribe: subscriber# [3:7519615592243710383:2107], path# /dc-1/USER_0 2025-06-24T20:28:59.683118Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7519615581301785932:2057] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7519615592243710384:2107] 2025-06-24T20:28:59.683159Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7519615581301785932:2057] Unsubscribe: subscriber# [3:7519615592243710384:2107], path# /dc-1/USER_0 2025-06-24T20:28:59.690593Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-24T20:28:59.691667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:29:00.491590Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519615592243710389:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:00.491694Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615592243710389:2109], cacheItem# { Subscriber: { Subscriber: [3:7519615592243710434:2134] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.491767Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615600833645389:2356], recipient# [3:7519615600833645388:2284], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:01.495562Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519615592243710389:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:01.495735Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615592243710389:2109], cacheItem# { Subscriber: { Subscriber: [3:7519615592243710434:2134] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:01.495849Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615605128612687:2357], recipient# [3:7519615605128612686:2285], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:02.498900Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519615592243710389:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:02.499012Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615592243710389:2109], cacheItem# { Subscriber: { Subscriber: [3:7519615592243710434:2134] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:02.503262Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615609423579985:2358], recipient# [3:7519615609423579984:2286], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TStorageTenantTest::GenericCases [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_replication/unittest >> TReplicationTests::DropReplicationWithUnknownSecret [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:52.665655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:52.665753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:52.665793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:52.665829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:52.665946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:52.665987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:52.666074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:52.666152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:52.667034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:52.667404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:52.766917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:52.766984Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:52.787274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:52.787755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:52.787948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:52.797007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:52.797207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:52.798137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:52.798478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:52.801929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:52.802113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:52.803380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:52.803453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:52.803687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:52.803740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:52.803790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:52.803892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.811457Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:28:52.959360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:52.959593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.959868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:52.959932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:52.960186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:52.960305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:52.969015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:52.969269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:52.969553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.969618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:52.969673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:52.969715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:52.977394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.977518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:52.977644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:52.981566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.981640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.981704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:52.981783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:52.986408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:52.988975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:52.989206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:52.990397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:52.990588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:52.990654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:52.990983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:52.991058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:52.991267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:52.991353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:52.994973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:52.995054Z node 1 :FLAT_TX_SCHEMESHARD ... ToDone TxId: 102 ready parts: 1/1 2025-06-24T20:29:05.265379Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:29:05.265435Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:29:05.265489Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T20:29:05.265546Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:29:05.265608Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T20:29:05.265661Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T20:29:05.265833Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:29:05.265903Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T20:29:05.265956Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T20:29:05.266001Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T20:29:05.267525Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274137603, Sender [9:210:2210], Recipient [9:127:2151]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 7 } 2025-06-24T20:29:05.267580Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5035: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-24T20:29:05.267669Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:29:05.267757Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:29:05.267800Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:29:05.267863Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T20:29:05.267921Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:29:05.268039Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:29:05.268917Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274137603, Sender [9:210:2210], Recipient [9:127:2151]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 } 2025-06-24T20:29:05.268963Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5035: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-24T20:29:05.269041Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:29:05.269130Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:29:05.269163Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:29:05.269195Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T20:29:05.269230Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:29:05.269328Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T20:29:05.269387Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:29:05.269660Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435084, Sender [9:127:2151], Recipient [9:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvCleanDroppedPaths 2025-06-24T20:29:05.269706Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5123: StateWork, processing event TEvPrivate::TEvCleanDroppedPaths 2025-06-24T20:29:05.269792Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:29:05.269857Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:29:05.269950Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:29:05.272654Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:29:05.273674Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:29:05.273720Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:29:05.274792Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:29:05.274837Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:29:05.274942Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T20:29:05.275242Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:29:05.275311Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:29:05.275800Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [9:461:2413], Recipient [9:127:2151]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:05.275885Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:05.275940Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:29:05.276136Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [9:408:2360], Recipient [9:127:2151]: NKikimrScheme.TEvNotifyTxCompletion TxId: 102 2025-06-24T20:29:05.276178Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:29:05.276274Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:29:05.276401Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:29:05.276466Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [9:459:2411] 2025-06-24T20:29:05.276685Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [9:461:2413], Recipient [9:127:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:29:05.276729Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:29:05.276780Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-24T20:29:05.277256Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [9:462:2414], Recipient [9:127:2151]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Replication" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-24T20:29:05.277322Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:29:05.277450Z node 9 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Replication" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:29:05.277684Z node 9 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Replication" took 245us result status StatusPathDoesNotExist 2025-06-24T20:29:05.277874Z node 9 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Replication\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Replication" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::DeclareAndDefine [GOOD] Test command err: 2025-06-24T20:28:55.505566Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615581087236648:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:55.505640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003998/r3tmp/tmp2GVVTd/pdisk_1.dat 2025-06-24T20:28:56.307049Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:56.452305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:56.452392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:56.456596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:28:56.583370Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19304 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:28:56.771345Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519615581087236844:2117] Handle TEvNavigate describe path dc-1 2025-06-24T20:28:56.807948Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519615585382204626:2440] HANDLE EvNavigateScheme dc-1 2025-06-24T20:28:56.808093Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615585382204163:2130], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:56.808166Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519615585382204607:2433][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519615585382204163:2130], cookie# 1 2025-06-24T20:28:56.820193Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615585382204611:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585382204608:2433], cookie# 1 2025-06-24T20:28:56.820283Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615585382204612:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585382204609:2433], cookie# 1 2025-06-24T20:28:56.820302Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615585382204613:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585382204610:2433], cookie# 1 2025-06-24T20:28:56.820335Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615581087236558:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585382204611:2433], cookie# 1 2025-06-24T20:28:56.820366Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615581087236561:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585382204612:2433], cookie# 1 2025-06-24T20:28:56.820386Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615581087236564:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585382204613:2433], cookie# 1 2025-06-24T20:28:56.820417Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615585382204611:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615581087236558:2050], cookie# 1 2025-06-24T20:28:56.820433Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615585382204612:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615581087236561:2053], cookie# 1 2025-06-24T20:28:56.820447Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615585382204613:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615581087236564:2056], cookie# 1 2025-06-24T20:28:56.822273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615585382204607:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615585382204608:2433], cookie# 1 2025-06-24T20:28:56.822321Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615585382204607:2433][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:28:56.822351Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615585382204607:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615585382204609:2433], cookie# 1 2025-06-24T20:28:56.822364Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615585382204607:2433][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:28:56.822379Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615585382204607:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615585382204610:2433], cookie# 1 2025-06-24T20:28:56.822408Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519615585382204607:2433][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:28:56.822497Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615585382204163:2130], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T20:28:56.829984Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615585382204163:2130], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519615585382204607:2433] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:28:56.832813Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615585382204163:2130], cacheItem# { Subscriber: { Subscriber: [1:7519615585382204607:2433] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T20:28:56.835809Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615585382204627:2441], recipient# [1:7519615585382204626:2440], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:28:56.835886Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519615585382204626:2440] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:28:56.881150Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519615585382204626:2440] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T20:28:56.884864Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519615585382204626:2440] Handle TEvDescribeSchemeResult Forward to# [1:7519615585382204625:2439] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:28:56.911671Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519615581087236844:2117] Handle TEvProposeTransaction 2025-06-24T20:28:56.911708Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:75196155810 ... nder# [1:7519615602562074618:3033] 2025-06-24T20:29:00.978076Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615581087236564:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519615602562074624:3034] 2025-06-24T20:29:00.978120Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615585382204163:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-24T20:29:00.978174Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615585382204163:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519615602562074604:3032] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:29:00.978272Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615585382204163:2130], cacheItem# { Subscriber: { Subscriber: [1:7519615602562074604:3032] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.978302Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615585382204163:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-24T20:29:00.978357Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615585382204163:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519615602562074605:3033] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:29:00.978407Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615585382204163:2130], cacheItem# { Subscriber: { Subscriber: [1:7519615602562074605:3033] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.978465Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615585382204163:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-24T20:29:00.978505Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615585382204163:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519615602562074606:3034] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:29:00.978552Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615585382204163:2130], cacheItem# { Subscriber: { Subscriber: [1:7519615602562074606:3034] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:00.978660Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615602562074625:3035], recipient# [1:7519615602562074599:2296], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:00.978726Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615602562074626:3036], recipient# [1:7519615602562074603:2297], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:01.508207Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615585382204163:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:01.508356Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615585382204163:2130], cacheItem# { Subscriber: { Subscriber: [1:7519615585382204616:2436] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:01.508472Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615606857041941:3039], recipient# [1:7519615606857041940:2298], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:01.542978Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615585382204163:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:01.543102Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615585382204163:2130], cacheItem# { Subscriber: { Subscriber: [1:7519615585382204616:2436] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:01.543208Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615606857041943:3040], recipient# [1:7519615606857041942:2299], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:01.983696Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615585382204163:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:01.983969Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615585382204163:2130], cacheItem# { Subscriber: { Subscriber: [1:7519615602562074606:3034] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:01.984117Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615606857041948:3044], recipient# [1:7519615606857041947:2300], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_replication/unittest >> TReplicationTests::CopyReplicatedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:52.580016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:52.580100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:52.580139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:52.580175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:52.580216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:52.580246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:52.580331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:52.580404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:52.581132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:52.581440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:52.667964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:52.668014Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:52.684890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:52.685339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:52.685524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:52.694230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:52.694406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:52.695021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:52.695350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:52.697882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:52.698008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:52.698864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:52.698929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:52.699072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:52.699103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:52.699131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:52.699215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.705342Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:28:52.827049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:52.827288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.827609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:52.827656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:52.827889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:52.827955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:52.830983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:52.831214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:52.831408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.831456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:52.831496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:52.831531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:52.840430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.840515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:52.840574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:52.842775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.842829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:52.842882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:52.842929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:52.846875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:52.850281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:52.850568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:52.851664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:52.851810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:52.851864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:52.852203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:52.852267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:52.852443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:52.852533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:52.855327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:52.855376Z node 1 :FLAT_TX_SCHEMESHARD ... Id: 102 } 2025-06-24T20:29:05.885547Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 408 RawX2: 34359740743 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T20:29:05.886167Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-24T20:29:05.887088Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 408 RawX2: 34359740743 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T20:29:05.903569Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T20:29:05.904248Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 408 RawX2: 34359740743 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T20:29:05.904889Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T20:29:05.905476Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1055: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged CollectSchemaChanged: false 2025-06-24T20:29:05.928060Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:05.933708Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:05.960789Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 34359740662 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T20:29:05.960839Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T20:29:05.960929Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 34359740662 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T20:29:05.960966Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T20:29:05.961027Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 309 RawX2: 34359740662 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T20:29:05.961072Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T20:29:05.961346Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:05.961383Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T20:29:05.961653Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T20:29:05.961680Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T20:29:05.975923Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:05.976075Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:05.976318Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 102:0ProgressState, operation type TxCopyTable 2025-06-24T20:29:05.976498Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1060: Set barrier, OperationId: 102:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-24T20:29:05.976858Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1104: All parts have reached barrier, tx: 102, done: 0, blocked: 1 2025-06-24T20:29:05.977407Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 102:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 102 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-06-24T20:29:05.977608Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 240 -> 240 2025-06-24T20:29:05.991998Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:29:05.992074Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T20:29:05.992225Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:29:05.992268Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:29:05.992320Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:29:05.992377Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:29:05.992433Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T20:29:05.992517Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [8:337:2314] message: TxId: 102 2025-06-24T20:29:05.992785Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:29:05.992969Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T20:29:05.993137Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T20:29:05.993504Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T20:29:05.993705Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:29:06.009381Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:29:06.009700Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [8:439:2398] TestWaitNotification: OK eventTxId 102 2025-06-24T20:29:06.016225Z node 8 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/CopyTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:29:06.017461Z node 8 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/CopyTable" took 1.01ms result status StatusSuccess 2025-06-24T20:29:06.018965Z node 8 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/CopyTable" PathDescription { Self { Name: "CopyTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "CopyTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> Yq_1::CreateConnection_With_Existing_Name >> TTransferTests::CreateWrongFlushIntervalIsBig [GOOD] >> Yq_1::Basic_Null |87.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_replication/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::GenericCases [GOOD] Test command err: 2025-06-24T20:28:55.739340Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615579664351867:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:55.739397Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003965/r3tmp/tmpzxXCLd/pdisk_1.dat 2025-06-24T20:28:56.419795Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:56.531556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:56.531676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:56.550028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:28:56.795953Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6540 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:28:56.827410Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519615579664352052:2117] Handle TEvNavigate describe path dc-1 2025-06-24T20:28:56.874229Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519615583959319833:2438] HANDLE EvNavigateScheme dc-1 2025-06-24T20:28:56.874370Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615583959319374:2131], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:56.874470Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519615583959319815:2432][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519615583959319374:2131], cookie# 1 2025-06-24T20:28:56.876317Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615583959319819:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615583959319816:2432], cookie# 1 2025-06-24T20:28:56.876356Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615583959319820:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615583959319817:2432], cookie# 1 2025-06-24T20:28:56.876370Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615583959319821:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615583959319818:2432], cookie# 1 2025-06-24T20:28:56.876401Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615579664351765:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615583959319819:2432], cookie# 1 2025-06-24T20:28:56.876425Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615579664351768:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615583959319820:2432], cookie# 1 2025-06-24T20:28:56.876454Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615579664351771:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615583959319821:2432], cookie# 1 2025-06-24T20:28:56.876495Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615583959319819:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615579664351765:2050], cookie# 1 2025-06-24T20:28:56.876510Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615583959319820:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615579664351768:2053], cookie# 1 2025-06-24T20:28:56.876522Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615583959319821:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615579664351771:2056], cookie# 1 2025-06-24T20:28:56.876555Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615583959319815:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615583959319816:2432], cookie# 1 2025-06-24T20:28:56.876578Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615583959319815:2432][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:28:56.876593Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615583959319815:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615583959319817:2432], cookie# 1 2025-06-24T20:28:56.876603Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615583959319815:2432][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:28:56.876615Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615583959319815:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615583959319818:2432], cookie# 1 2025-06-24T20:28:56.876647Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519615583959319815:2432][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:28:56.876704Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615583959319374:2131], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T20:28:56.894843Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615583959319374:2131], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519615583959319815:2432] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:28:56.894988Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615583959319374:2131], cacheItem# { Subscriber: { Subscriber: [1:7519615583959319815:2432] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T20:28:56.897642Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615583959319834:2439], recipient# [1:7519615583959319833:2438], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:28:56.897708Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519615583959319833:2438] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:28:56.939384Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519615583959319833:2438] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T20:28:56.942770Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519615583959319833:2438] Handle TEvDescribeSchemeResult Forward to# [1:7519615583959319832:2437] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:28:56.971131Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519615579664352052:2117] Handle TEvProposeTransaction 2025-06-24T20:28:56.971179Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:751961557966 ... 480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:02.853184Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615609729124621:3152], recipient# [1:7519615609729124620:2305], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:03.409935Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615583959319374:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:03.410044Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615583959319374:2131], cacheItem# { Subscriber: { Subscriber: [1:7519615605434157258:3138] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:03.410115Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615614024091934:3156], recipient# [1:7519615614024091933:2306], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:03.734804Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615583959319374:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:03.735572Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615583959319374:2131], cacheItem# { Subscriber: { Subscriber: [1:7519615583959319823:2434] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:03.735642Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615614024091939:3157], recipient# [1:7519615614024091938:2307], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:03.856195Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615583959319374:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:03.856611Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615583959319374:2131], cacheItem# { Subscriber: { Subscriber: [1:7519615583959319823:2434] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:03.857160Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615614024091941:3158], recipient# [1:7519615614024091940:2308], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:04.419307Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615583959319374:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:04.419426Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615583959319374:2131], cacheItem# { Subscriber: { Subscriber: [1:7519615605434157258:3138] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:04.419510Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615618319059254:3162], recipient# [1:7519615618319059253:2309], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:04.755910Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615583959319374:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:04.756020Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615583959319374:2131], cacheItem# { Subscriber: { Subscriber: [1:7519615583959319823:2434] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:04.756129Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615618319059259:3163], recipient# [1:7519615618319059258:2310], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:04.863514Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615583959319374:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:04.863614Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615583959319374:2131], cacheItem# { Subscriber: { Subscriber: [1:7519615583959319823:2434] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:04.863697Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615618319059261:3164], recipient# [1:7519615618319059260:2311], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> Yq_1::Basic >> Yq_1::CreateQuery_With_Idempotency >> TTransferTests::ConsistencyLevel [GOOD] >> TTransferTests::Alter |87.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut >> PrivateApi::PingTask |87.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_transfer/unittest >> TTransferTests::CreateWrongFlushIntervalIsBig [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:55.519883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:55.519975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:55.520015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:55.520064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:55.520111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:55.520142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:55.520210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:55.520299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:55.521035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:55.521395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:55.609330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:55.609390Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:55.627104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:55.627602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:55.627769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:55.671429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:55.671714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:55.672405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:55.672812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:55.683790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:55.684034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:55.685367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:55.685462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:55.685732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:55.685782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:55.685825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:55.685923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.702404Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:28:55.836829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:55.837093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.837366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:55.837427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:55.837686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:55.837760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:55.840355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:55.840563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:55.840763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.840813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:55.840855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:55.840893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:55.842923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.842986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:55.843028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:55.845458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.845509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.845556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:55.845616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:55.849021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:55.852268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:55.852463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:55.853507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:55.853660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:55.853711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:55.854052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:55.854105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:55.854278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:55.854354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:55.856967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:55.857014Z node 1 :FLAT_TX_SCHEMESHARD ... :309:2294], Recipient [6:126:2150]: NKikimrTxColumnShard.TEvNotifyTxCompletionResult Origin: 72075186233409546 TxId: 101 2025-06-24T20:29:08.177925Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4994: StateWork, processing event TEvColumnShard::TEvNotifyTxCompletionResult 2025-06-24T20:29:08.178004Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6226: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-06-24T20:29:08.178051Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T20:29:08.178202Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-06-24T20:29:08.178380Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T20:29:08.183812Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:29:08.183878Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:29:08.183920Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 101:0 2025-06-24T20:29:08.184066Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [6:126:2150], Recipient [6:126:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:08.184096Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:08.184147Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:29:08.184184Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T20:29:08.184322Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:29:08.184358Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:29:08.184397Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:29:08.184443Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:29:08.184480Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:29:08.184523Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T20:29:08.184607Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [6:343:2319] message: TxId: 101 2025-06-24T20:29:08.184660Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:29:08.184701Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T20:29:08.184739Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T20:29:08.184899Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:29:08.191769Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:29:08.191887Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [6:343:2319] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 101 at schemeshard: 72057594046678944 2025-06-24T20:29:08.192122Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:29:08.192171Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [6:344:2320] 2025-06-24T20:29:08.192392Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [6:346:2322], Recipient [6:126:2150]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:29:08.192440Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:29:08.192484Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T20:29:08.193317Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [6:390:2359], Recipient [6:126:2150]: {TEvModifySchemeTransaction txid# 102 TabletId# 72057594046678944} 2025-06-24T20:29:08.193383Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T20:29:08.195969Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTransfer Replication { Name: "Transfer" Config { TransferSpecific { Target { SrcPath: "/MyRoot1/Table" DstPath: "/MyRoot/Table" } Batching { FlushIntervalMilliSeconds: 86400001 } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:29:08.197469Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_replication.cpp:349: [72057594046678944] TCreateReplication Propose: opId# 102:0, path# /MyRoot/Transfer 2025-06-24T20:29:08.197593Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Flush interval must be less than or equal to 24 hours, at schemeshard: 72057594046678944 2025-06-24T20:29:08.197868Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:29:08.204056Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Flush interval must be less than or equal to 24 hours" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:29:08.204357Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Flush interval must be less than or equal to 24 hours, operation: CREATE TRANSFER, path: /MyRoot/Transfer 2025-06-24T20:29:08.204420Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T20:29:08.204712Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:29:08.204771Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:29:08.205213Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [6:396:2365], Recipient [6:126:2150]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:08.205288Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:08.205329Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:29:08.205478Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [6:343:2319], Recipient [6:126:2150]: NKikimrScheme.TEvNotifyTxCompletion TxId: 102 2025-06-24T20:29:08.205511Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:29:08.205596Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:29:08.205710Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:29:08.205751Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [6:394:2363] 2025-06-24T20:29:08.205933Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [6:396:2365], Recipient [6:126:2150]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:29:08.205967Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:29:08.206007Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-24T20:29:08.206364Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [6:397:2366], Recipient [6:126:2150]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Transfer" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-24T20:29:08.206431Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:29:08.206553Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Transfer" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:29:08.206802Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Transfer" took 238us result status StatusPathDoesNotExist 2025-06-24T20:29:08.206983Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Transfer\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Transfer" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |87.9%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [LD] {RESULT} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true [GOOD] >> TStorageTenantTest::CopyTableAndConcurrentSplit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:50.800069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:50.800169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.800240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:50.800291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:50.800345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:50.800374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:50.800464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:50.800537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:50.801372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:50.801765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:50.890400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:50.890458Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:50.906429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:50.906839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:50.907022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:50.913414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:50.913551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:50.914022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:50.914261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:50.916895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.917054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:50.918112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:50.918187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:50.918423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:50.918471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:50.918514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:50.918602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:50.925244Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:28:51.090661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.090929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.091260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.091320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.091543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.091619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.096951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.097171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.097388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.097449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.097500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.097541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.103986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.104052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.104098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.113704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.113780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.113834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.113912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.129194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.132030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.132284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.133337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.133491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.133552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.133848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.133930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.134100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.134169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:51.141086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.141145Z node 1 :FLAT_TX_SCHEMESHARD ... HARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 116, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-24T20:29:11.894570Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 116, path id: [OwnerId: 72075186233409546, LocalPathId: 9] 2025-06-24T20:29:11.894684Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-24T20:29:11.894754Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:732:2632], at schemeshard: 72075186233409546, txId: 116, path id: 1 2025-06-24T20:29:11.894813Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:732:2632], at schemeshard: 72075186233409546, txId: 116, path id: 9 2025-06-24T20:29:11.895536Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-24T20:29:11.895602Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 116:0 ProgressState, operation type: TxCreateTable, at tablet# 72075186233409546 2025-06-24T20:29:11.895861Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 116:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72075186233409546 OwnerIdx: 11 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 9 BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-24T20:29:11.896464Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 1 Version: 16 PathOwnerId: 72075186233409546, cookie: 116 2025-06-24T20:29:11.896568Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 1 Version: 16 PathOwnerId: 72075186233409546, cookie: 116 2025-06-24T20:29:11.896623Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 116 2025-06-24T20:29:11.896669Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 116, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 16 2025-06-24T20:29:11.896725Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 13 2025-06-24T20:29:11.897640Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 9 Version: 1 PathOwnerId: 72075186233409546, cookie: 116 2025-06-24T20:29:11.897710Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 9 Version: 1 PathOwnerId: 72075186233409546, cookie: 116 2025-06-24T20:29:11.897737Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 116 2025-06-24T20:29:11.897767Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 116, pathId: [OwnerId: 72075186233409546, LocalPathId: 9], version: 1 2025-06-24T20:29:11.897796Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 9] was 4 2025-06-24T20:29:11.897860Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 116, ready parts: 0/1, is published: true 2025-06-24T20:29:11.900251Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 116:0 from tablet: 72075186233409546 to tablet: 72057594037968897 cookie: 72075186233409546:11 msg type: 268697601 2025-06-24T20:29:11.900394Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 116, partId: 0, tablet: 72057594037968897 2025-06-24T20:29:11.900441Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1810: TOperation RegisterRelationByShardIdx, TxId: 116, shardIdx: 72075186233409546:11, partId: 0 2025-06-24T20:29:11.901248Z node 7 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72075186233409546 OwnerIdx: 11 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 9 BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-24T20:29:11.901482Z node 7 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72075186233409546, OwnerIdx 11, type DataShard, boot OK, tablet id 72075186233409556 2025-06-24T20:29:11.901693Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5947: Handle TEvCreateTabletReply at schemeshard: 72075186233409546 message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-24T20:29:11.901747Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation FindRelatedPartByShardIdx, TxId: 116, shardIdx: 72075186233409546:11, partId: 0 2025-06-24T20:29:11.901886Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 116:0, at schemeshard: 72075186233409546, message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-24T20:29:11.901967Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:176: TCreateParts opId# 116:0 HandleReply TEvCreateTabletReply, at tabletId: 72075186233409546 2025-06-24T20:29:11.902060Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:179: TCreateParts opId# 116:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-24T20:29:11.902160Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 116:0 2 -> 3 2025-06-24T20:29:11.903553Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 116 2025-06-24T20:29:11.903718Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 116 2025-06-24T20:29:11.906322Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-24T20:29:11.908327Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-24T20:29:11.908888Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_table.cpp:200: TCreateTable TConfigureParts operationId# 116:0 ProgressState at tabletId# 72075186233409546 2025-06-24T20:29:11.909481Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:220: TCreateTable TConfigureParts operationId# 116:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409556 seqNo: 3:8 2025-06-24T20:29:11.911780Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:236: TCreateTable TConfigureParts operationId# 116:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409556 message: TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 680 RawX2: 30064773663 } TxBody: "\n\236\004\n\007Table11\020\t\032\r\n\003key\030\002 \001(\000@\000\032\020\n\005Value\030\200$ \002(\000@\000(\001:\262\003\022\253\003\010\200\200\200\002\020\254\002\030\364\003 \200\200\200\010(\0000\200\200\200 8\200\200\200\010@\2008H\000RX\010\000\020\000\030\010 \010(\200\200\200@0\377\377\377\377\0178\001B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen1P\nX\200\200\001`nh\000p\000Rb\010\001\020\200\200\200\024\030\005 \020(\200\200\200\200\0020\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen2P\nX\200\200\001`nh\200\200\200\004p\200\200\200\004Rc\010\002\020\200\200\200\310\001\030\005 \020(\200\200\200\200@0\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen3P\nX\200\200\001`nh\200\200\200(p\200\200\200(X\001`\005j$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionr\017compaction_gen0z\017compaction_gen0\202\001\004scan\210\001\200\200\200\010\220\001\364\003\230\0012\270\001\2008\300\001\006R\002\020\001J\026/MyRoot/USER_0/Table11\242\001\006\001\000\000\000\000\200\252\001\000\260\001\001\270\001\000\210\002\001\222\002\013\t\n\000\220\000\000\020\000\001\020\t:\004\010\003\020\010" TxId: 116 ExecLevel: 0 Flags: 0 SchemeShardId: 72075186233409546 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } SubDomainPathId: 1 2025-06-24T20:29:11.925661Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 116:0 from tablet: 72075186233409546 to tablet: 72075186233409556 cookie: 72075186233409546:11 msg type: 269549568 2025-06-24T20:29:11.926377Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 116, partId: 0, tablet: 72075186233409556 TestModificationResult got TxId: 116, wait until txId: 116 TestModificationResults wait txId: 117 2025-06-24T20:29:12.050947Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table12" Columns { Name: "key" Type: "Uint32" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "key" } } TxId: 117 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-24T20:29:12.054478Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 117, response: Status: StatusQuotaExceeded Reason: "Request exceeded a limit on the number of schema operations, try again later." TxId: 117 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:29:12.054908Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 117, database: /MyRoot/USER_0, subject: , status: StatusQuotaExceeded, reason: Request exceeded a limit on the number of schema operations, try again later., operation: CREATE TABLE, path: /MyRoot/USER_0/Table12 TestModificationResult got TxId: 117, wait until txId: 117 |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TYardTest::TestLogWriteCutEqual [GOOD] >> TYardTest::TestLogWriteCutEqualRandomWait ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CopyTableAndConcurrentSplit [GOOD] Test command err: 2025-06-24T20:28:55.026947Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615578838750223:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:55.027012Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039bb/r3tmp/tmpbzwKOq/pdisk_1.dat 2025-06-24T20:28:55.552085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:55.552204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:55.557170Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:55.568448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13575 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:28:55.817585Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519615578838750274:2116] Handle TEvNavigate describe path dc-1 2025-06-24T20:28:55.841260Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519615578838750737:2429] HANDLE EvNavigateScheme dc-1 2025-06-24T20:28:55.841396Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615578838750300:2130], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:55.841426Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519615578838750300:2130], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:28:55.841617Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519615578838750738:2430][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:28:55.843896Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615574543782698:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615578838750742:2430] 2025-06-24T20:28:55.843966Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615574543782698:2049] Subscribe: subscriber# [1:7519615578838750742:2430], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:55.844027Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615574543782701:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615578838750743:2430] 2025-06-24T20:28:55.844044Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615574543782701:2052] Subscribe: subscriber# [1:7519615578838750743:2430], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:55.844067Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615574543782704:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615578838750744:2430] 2025-06-24T20:28:55.844082Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615574543782704:2055] Subscribe: subscriber# [1:7519615578838750744:2430], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:55.844121Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615578838750742:2430][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615574543782698:2049] 2025-06-24T20:28:55.844165Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615578838750743:2430][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615574543782701:2052] 2025-06-24T20:28:55.844189Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615578838750744:2430][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615574543782704:2055] 2025-06-24T20:28:55.844236Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615578838750738:2430][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615578838750739:2430] 2025-06-24T20:28:55.844268Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615578838750738:2430][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615578838750740:2430] 2025-06-24T20:28:55.844331Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519615578838750738:2430][/dc-1] Set up state: owner# [1:7519615578838750300:2130], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:55.844452Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615578838750738:2430][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615578838750741:2430] 2025-06-24T20:28:55.844500Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519615578838750738:2430][/dc-1] Path was already updated: owner# [1:7519615578838750300:2130], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:55.844540Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615578838750742:2430][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615578838750739:2430], cookie# 1 2025-06-24T20:28:55.844555Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615578838750743:2430][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615578838750740:2430], cookie# 1 2025-06-24T20:28:55.844569Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615578838750744:2430][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615578838750741:2430], cookie# 1 2025-06-24T20:28:55.844599Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615574543782698:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615578838750742:2430] 2025-06-24T20:28:55.844645Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615574543782698:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615578838750742:2430], cookie# 1 2025-06-24T20:28:55.844678Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615574543782701:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615578838750743:2430] 2025-06-24T20:28:55.844697Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615574543782701:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615578838750743:2430], cookie# 1 2025-06-24T20:28:55.844713Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615574543782704:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615578838750744:2430] 2025-06-24T20:28:55.844727Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615574543782704:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615578838750744:2430], cookie# 1 2025-06-24T20:28:55.847232Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615578838750742:2430][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615574543782698:2049], cookie# 1 2025-06-24T20:28:55.847266Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615578838750743:2430][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615574543782701:2052], cookie# 1 2025-06-24T20:28:55.847295Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615578838750744:2430][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615574543782704:2055], cookie# 1 2025-06-24T20:28:55.847327Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615578838750738:2430][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615578838750739:2430], cookie# 1 2025-06-24T20:28:55.847366Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615578838750738:2430][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:28:55.847386Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615578838750738:2430][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615578838750740:2430], cookie# 1 2025-06-24T20:28:55.847397Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615578838750738:2430][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:28:55.847409Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615578838750738:2430][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615578838750741:2430], cookie# 1 2025-06-24T20:28:55.847434Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519615578838750738:2430][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:28:55.904863Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615578838750300:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:10.246497Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519615642436244762:2760], recipient# [4:7519615642436244760:2340], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:10.246639Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:29:10.511648Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519615599486570793:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:10.511760Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519615599486570793:2109], cacheItem# { Subscriber: { Subscriber: [4:7519615638141277430:2750] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:10.512087Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519615599486570793:2109], cacheItem# { Subscriber: { Subscriber: [4:7519615638141277431:2751] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:10.512720Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519615642436244763:2761], recipient# [4:7519615638141277426:2335], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:10.516587Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519615638141277426:2335], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:11.245486Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519615599486570793:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:11.245578Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519615599486570793:2109], cacheItem# { Subscriber: { Subscriber: [4:7519615638141277430:2750] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:11.245617Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519615599486570793:2109], cacheItem# { Subscriber: { Subscriber: [4:7519615638141277431:2751] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:11.245661Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519615599486570793:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:11.245731Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519615599486570793:2109], cacheItem# { Subscriber: { Subscriber: [4:7519615603781538455:2342] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:11.245766Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519615599486570793:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:11.245810Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519615599486570793:2109], cacheItem# { Subscriber: { Subscriber: [4:7519615638141277433:2753] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:11.245866Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519615646731212063:2762], recipient# [4:7519615638141277426:2335], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:11.245909Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519615646731212064:2763], recipient# [4:7519615646731212061:2341], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:11.245939Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519615646731212065:2764], recipient# [4:7519615646731212062:2342], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:11.246342Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519615638141277426:2335], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:11.246469Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; >> TInterconnectTest::TestCrossConnect [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> TInterconnectTest::TestManyEventsWithReconnect >> Sharding::XXUsage >> Sharding::XXUsage [GOOD] >> TTransferTests::Alter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> Sharding::XXUsage [GOOD] Test command err: 2865397997825499194 1649023207403899303 909013562825850868 7201223470256597761 2267473837310157707 11947650537982585993 12568442826584460683 7288151640938152200 13214453100711282600 8629651860627115771 13589239153090479527 18242045530461954106 15752905914277594648 3559124316294962897 13847021369492626086 12700886757760142925 16945054259601516301 7511482479018136689 11346063335320548505 1090586296517175420 11600933043584302347 14127878442584930586 6225185466783120262 4315863869814574796 8015748288314335531 6442930255141068622 9199472602652279751 11111345963115464354 14707636174201661920 18270251457069575918 10116856886307834793 10432653728132690579 11836737778469218661 11431530881135024953 10325890137615021551 5125002744768050039 2508052638655939467 974752213156838339 17441327050259872846 13940135469183137679 17552047500025573814 11238000814704302280 10178038168628967530 11264020744949824160 4597364651565266150 17483934405102407657 10309951052161163581 752366733063705305 6214830592767535102 11738663044317480836 7674613650421074157 9616000879802319826 4812962856018850749 9271709336368307094 7366984393740591077 4596324992319746398 10826662350408295983 7658007961291494056 2203197662762389973 16888915609583923014 2865397997825499194 16596107091067199952 12046146610050994480 10064612833499959786 8350180275438075825 4056203507557425070 17512523816873226029 11953324670668286625 8868881213553857001 2341501299353233737 10464417414901951369 732462200727397916 8028516483887087832 9138470217491443070 7566224042881078760 2666924672213776608 4206493774857233822 6981557604514290795 818724823379332747 1694450281102550562 14678481893083507874 13245206414591816112 7638383067359831197 8425627889989971873 14447577754270341021 4748101351376377848 2198740298522298320 5137277087570613681 12195398431746077686 5768570360462736565 >> TDataShardTrace::TestTraceWriteImmediateOnShard [GOOD] >> TInterconnectTest::TestManyEventsWithReconnect [GOOD] >> TInterconnectTest::TestEventWithPayloadSerialization ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_transfer/unittest >> TTransferTests::Alter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:28:55.505285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:55.505378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:55.505421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:55.505459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:55.505522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:55.505565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:55.505643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:55.505724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:55.506541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:55.506935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:55.602948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:55.603034Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:55.622738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:55.627515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:55.627726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:55.637178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:55.637446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:55.638201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:55.638634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:55.642348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:55.642601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:55.643902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:55.643970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:55.644109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:55.644160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:55.644211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:55.644382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.652235Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:28:55.804100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:55.804341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.804615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:55.804675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:55.804948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:55.805035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:55.807696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:55.807980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:55.808222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.808277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:55.808310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:55.808343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:55.811362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.811423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:55.811462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:55.813640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.813687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:55.813732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:55.813795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:55.817256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:55.819633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:55.819817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:55.821011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:55.821159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:55.821206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:55.821546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:55.821605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:55.821782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:55.821856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:55.824383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:55.824429Z node 1 :FLAT_TX_SCHEMESHARD ... lterReplication TConfigureParts opId# 104:0 HandleReply NKikimrReplication.TEvAlterReplicationResult OperationId { TxId: 104 PartId: 0 } Origin: 72075186233409547 Status: SUCCESS 2025-06-24T20:29:16.061650Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 3 -> 128 2025-06-24T20:29:16.062267Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:29:16.062547Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:694: Ack tablet strongly msg opId: 104:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:3 2025-06-24T20:29:16.087907Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T20:29:16.088274Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:29:16.088589Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 104:0 2025-06-24T20:29:16.089346Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [6:126:2150], Recipient [6:126:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:16.089381Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:16.089435Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T20:29:16.089474Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_replication.cpp:189: [72057594046678944] TAlterReplication TPropose opId# 104:0 ProgressState 2025-06-24T20:29:16.089809Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:29:16.090136Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 104 ready parts: 1/1 2025-06-24T20:29:16.090852Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 104 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:29:16.101339Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:29:16.101423Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 msg type: 269090816 2025-06-24T20:29:16.101870Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-24T20:29:16.102438Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269287424, Sender [6:137:2158], Recipient [6:259:2248] 2025-06-24T20:29:16.103054Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4971: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T20:29:16.103681Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:29:16.104648Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 25769805934 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:29:16.104974Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_replication.cpp:203: [72057594046678944] TAlterReplication TPropose opId# 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-24T20:29:16.105737Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 128 -> 240 2025-06-24T20:29:16.111586Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:29:16.111722Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T20:29:16.111830Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:694: Ack tablet strongly msg opId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 2025-06-24T20:29:16.119970Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:29:16.120044Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:384: Ack coordinator stepId#5000005 first txId#104 countTxs#1 2025-06-24T20:29:16.120117Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:354: Ack mediator stepId#5000005 2025-06-24T20:29:16.120155Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 104:0 2025-06-24T20:29:16.120394Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [6:126:2150], Recipient [6:126:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:16.120429Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:16.120499Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:29:16.120544Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:29:16.120798Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:29:16.120852Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [6:208:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 FAKE_COORDINATOR: Erasing txId 104 2025-06-24T20:29:16.121352Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T20:29:16.121428Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T20:29:16.121555Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:29:16.121593Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T20:29:16.121630Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T20:29:16.121676Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T20:29:16.121709Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T20:29:16.121749Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-24T20:29:16.121791Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T20:29:16.121836Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T20:29:16.121871Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T20:29:16.122004Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T20:29:16.122053Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 104, publications: 1, subscribers: 0 2025-06-24T20:29:16.122088Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 4 2025-06-24T20:29:16.122773Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274137603, Sender [6:208:2208], Recipient [6:126:2150]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Version: 4 } 2025-06-24T20:29:16.122814Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5035: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-24T20:29:16.122907Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T20:29:16.122999Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T20:29:16.123044Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T20:29:16.124816Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-24T20:29:16.125211Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T20:29:16.125894Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-24T20:29:16.126244Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:29:16.147133Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:29:16.148234Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T20:29:16.148584Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 |87.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceWriteImmediateOnShard [GOOD] Test command err: 2025-06-24T20:29:11.597534Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:11.598513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:11.598906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00492d/r3tmp/tmpdQYEBS/pdisk_1.dat 2025-06-24T20:29:13.290773Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:29:13.349988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:29:13.477489Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:29:13.549302Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750796942309137 != 1750796942309141 2025-06-24T20:29:13.661549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:29:13.661714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:29:13.676243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:29:13.780746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:29:15.016481Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> TInterconnectTest::TestEventWithPayloadSerialization [GOOD] |87.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> IncrementalBackup::SimpleRestoreBackupCollection+WithIncremental [FAIL] >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> IncrementalBackup::SimpleBackup [GOOD] >> Initializer::Simple >> IncrementalBackup::MultiRestore >> TDataShardTrace::TestTraceDistributedUpsert+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestEventWithPayloadSerialization [GOOD] Test command err: Starting iteration 0 Starting iteration 1 Starting iteration 2 Starting iteration 3 Starting iteration 4 Starting iteration 5 Starting iteration 6 Starting iteration 7 Starting iteration 8 Starting iteration 9 Starting iteration 10 Starting iteration 11 Starting iteration 12 Starting iteration 13 Starting iteration 14 Starting iteration 15 Starting iteration 16 Starting iteration 17 Starting iteration 18 Starting iteration 19 Starting iteration 20 Starting iteration 21 Starting iteration 22 Starting iteration 23 Starting iteration 24 Starting iteration 25 Starting iteration 26 Starting iteration 27 Starting iteration 28 Starting iteration 29 Starting iteration 30 Starting iteration 31 Starting iteration 32 Starting iteration 33 Starting iteration 34 Starting iteration 35 Starting iteration 36 Starting iteration 37 Starting iteration 38 Starting iteration 39 Starting iteration 40 Starting iteration 41 Starting iteration 42 Starting iteration 43 Starting iteration 44 Starting iteration 45 Starting iteration 46 Starting iteration 47 Starting iteration 48 Starting iteration 49 0 0 0 1 0 3 0 7 0 15 0 31 0 63 0 127 0 255 0 511 0 1023 0 2047 0 4095 0 8191 0 16383 0 32767 0 65535 1 0 1 1 1 3 1 7 1 15 1 31 1 63 1 127 1 255 1 511 1 1023 1 2047 1 4095 1 8191 1 16383 1 32767 1 65535 3 0 3 1 3 3 3 7 3 15 3 31 3 63 3 127 3 255 3 511 3 1023 3 2047 3 4095 3 8191 3 16383 3 32767 3 65535 7 0 7 1 7 3 7 7 7 15 7 31 7 63 7 127 7 255 7 511 7 1023 7 2047 7 4095 7 8191 7 16383 7 32767 7 65535 15 0 15 1 15 3 15 7 15 15 15 31 15 63 15 127 15 255 15 511 15 1023 15 2047 15 4095 15 8191 15 16383 15 32767 15 65535 31 0 31 1 31 3 31 7 31 15 31 31 31 63 31 127 31 255 31 511 31 1023 31 2047 31 4095 31 8191 31 16383 31 32767 31 65535 63 0 63 1 63 3 63 7 63 15 63 31 63 63 63 127 63 255 63 511 63 1023 63 2047 63 4095 63 8191 63 16383 63 32767 63 65535 127 0 127 1 127 3 127 7 127 15 127 31 127 63 127 127 127 255 127 511 127 1023 127 2047 127 4095 127 8191 127 16383 127 32767 127 65535 255 0 255 1 255 3 255 7 255 15 255 31 255 63 255 127 255 255 255 511 255 1023 255 2047 255 4095 255 8191 255 16383 255 32767 255 65535 511 0 511 1 511 3 511 7 511 15 511 31 511 63 511 127 511 255 511 511 511 1023 511 2047 511 4095 511 8191 511 16383 511 32767 511 65535 1023 0 1023 1 1023 3 1023 7 1023 15 1023 31 1023 63 1023 127 1023 255 1023 511 1023 1023 1023 2047 1023 4095 1023 8191 1023 16383 1023 32767 1023 65535 2047 0 2047 1 2047 3 2047 7 2047 15 2047 31 2047 63 2047 127 2047 255 2047 511 2047 1023 2047 2047 2047 4095 2047 8191 2047 16383 2047 32767 2047 65535 4095 0 4095 1 4095 3 4095 7 4095 15 4095 31 4095 63 4095 127 4095 255 4095 511 4095 1023 4095 2047 4095 4095 4095 8191 4095 16383 4095 32767 4095 65535 8191 0 8191 1 8191 3 8191 7 8191 15 8191 31 8191 63 8191 127 8191 255 8191 511 8191 1023 8191 2047 8191 4095 8191 8191 8191 16383 8191 32767 8191 65535 16383 0 16383 1 16383 3 16383 7 16383 15 16383 31 16383 63 16383 127 16383 255 16383 511 16383 1023 16383 2047 16383 4095 16383 8191 16383 16383 16383 32767 16383 65535 32767 0 32767 1 32767 3 32767 7 32767 15 32767 31 32767 63 32767 127 32767 255 32767 511 32767 1023 32767 2047 32767 4095 32767 8191 32767 16383 32767 32767 32767 65535 65535 0 65535 1 65535 3 65535 7 65535 15 65535 31 65535 63 65535 127 65535 255 65535 511 65535 1023 65535 2047 65535 4095 65535 8191 65535 16383 65535 32767 65535 65535 >> IncrementalBackup::BackupRestore [GOOD] >> IncrementalBackup::ComplexRestoreBackupCollection+WithIncremental ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedUpsert+UseSink [GOOD] Test command err: 2025-06-24T20:29:13.824826Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:13.825194Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:13.825368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0048f3/r3tmp/tmp2a4kks/pdisk_1.dat 2025-06-24T20:29:15.129963Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:29:15.133367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:29:15.170410Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:29:15.174775Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750796942471289 != 1750796942471293 2025-06-24T20:29:15.223522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:29:15.223694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:29:15.235444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:29:15.317444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:29:16.065060Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:19.341734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:886:2728], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:19.341866Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:897:2733], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:19.341983Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:19.348181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:29:19.375456Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T20:29:19.560714Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:900:2736], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:29:19.908301Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:962:2778] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:29:21.215120Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyht2zkb7b9dfdh2xh58xkrh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmY5NzZlODItMWNlZTFhNy00ODY5NTg5OS1hMzFkNDE4Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trace: (Session.query.QUERY_ACTION_EXECUTE -> [(CompileService -> [(CompileActor)]) , (DataExecuter -> [(WaitForTableResolve) , (ComputeActor -> [(ForwardWriteActor)]) , (RunTasks) , (Commit -> [(Datashard.WriteTransaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWriteResult)]) , (Datashard.WriteTransaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWriteResult)])])])]) >> TDataShardTrace::TestTraceDistributedSelectViaReadActors [GOOD] |87.9%| [TA] $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] $(B)/ydb/core/actorlib_impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPDiskTest::TestStartEncryptedOrPlainAndRestart [FAIL] >> TPDiskUtil::AtomicBlockCounterFunctional [GOOD] >> TPDiskUtil::AtomicBlockCounterSeqno [GOOD] >> TPDiskUtil::Light >> TPDiskUtil::Light [GOOD] >> TPDiskUtil::LightOverflow [GOOD] >> TPDiskUtil::DriveEstimator >> TDatabaseResolverTests::ClickHouseNative >> TPDiskRaces::KillOwnerWhileDecommittingWithInflight [GOOD] >> TPDiskRaces::KillOwnerWhileDecommittingWithInflightMock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedSelectViaReadActors [GOOD] Test command err: 2025-06-24T20:29:12.745795Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:12.748444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:12.749639Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0048f6/r3tmp/tmp8p4bO1/pdisk_1.dat 2025-06-24T20:29:14.190528Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:29:14.219328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:29:14.372483Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:29:14.416018Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750796942276503 != 1750796942276507 2025-06-24T20:29:14.539211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:29:14.540894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:29:14.566741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:29:14.735440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:29:15.498995Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:20.115337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:886:2728], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:20.115487Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:897:2733], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:20.115592Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:20.145611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:29:20.293474Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T20:29:20.545975Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:900:2736], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:29:21.095092Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:962:2778] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:29:23.500958Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyht30baf9fq7y6jtrh3g0ax, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmMzZmFiYmYtODgzMzdmNzMtYjYzNmUxOWItMWQ2YTRhYmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:29:24.329375Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyht33wmbekztzewbmcvm99s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjQyYzJhZGItZmU0OTE2Ni1iZmNjNGUzLWI5YmEyNDM5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:29:25.081300Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyht34g2edaskbyypba5rgfr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTkzMDA1ZjktYWVjZmY5MzEtZjZjOGUyNzEtYThmZTQyNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCreateClean [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestReboot >> IncrementalBackup::SimpleRestore [GOOD] >> IncrementalBackup::SimpleBackupBackupCollection+WithIncremental >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDrop [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDropIndex >> TDatabaseResolverTests::ClickHouseNative [GOOD] >> TDatabaseResolverTests::ClickHouseHttp >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError >> TDatabaseResolverTests::ClickHouseHttp [GOOD] >> TDataShardTrace::TestTraceDistributedSelect [GOOD] >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError [GOOD] >> TDatabaseResolverTests::DataStreams_Serverless >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanWithRetry [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanManyTables >> TDatabaseResolverTests::DataStreams_Serverless [GOOD] >> TDatabaseResolverTests::DataStreams_PermissionDenied >> TDatabaseResolverTests::Greenplum_MasterNode >> TDatabaseResolverTests::DataStreams_PermissionDenied [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError [GOOD] Test command err: 2025-06-24T20:29:33.932025Z node 1 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed DataStreams database with id etn021us5r9rhld1vgb1 via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgb1': Status: 404 Response body: {"message":"Database not found"} >> TDatabaseResolverTests::Greenplum_MasterNode [GOOD] >> TDatabaseResolverTests::Greenplum_PermissionDenied >> TDatabaseResolverTests::Greenplum_PermissionDenied [GOOD] >> TDatabaseResolverTests::MySQL >> TDatabaseResolverTests::MySQL [GOOD] >> TDatabaseResolverTests::MySQL_PermissionDenied |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ClickHouseHttp [GOOD] |87.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |87.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::DataStreams_PermissionDenied [GOOD] Test command err: 2025-06-24T20:29:34.611519Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed DataStreams database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgbh': you have no permission to resolve database id into database endpoint. >> TDatabaseResolverTests::DataStreams_Dedicated >> TDatabaseResolverTests::DataStreams_Dedicated [GOOD] >> TDatabaseResolverTests::ClickHouse_PermissionDenied |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Greenplum_PermissionDenied [GOOD] Test command err: 2025-06-24T20:29:35.169936Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed Greenplum database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-greenplum/v1/clusters/etn021us5r9rhld1vgbh/master-hosts': you have no permission to resolve database id into database endpoint. >> TDatabaseResolverTests::Ydb_Dedicated >> TDatabaseResolverTests::ClickHouse_PermissionDenied [GOOD] >> test.py::test[solomon-HistResponse-default.txt] [GOOD] >> test.py::test[solomon-InvalidProject-] >> TDatabaseResolverTests::Ydb_Dedicated [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedSelect [GOOD] Test command err: 2025-06-24T20:29:15.576643Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:15.579947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:15.581039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004954/r3tmp/tmpnNcv2d/pdisk_1.dat 2025-06-24T20:29:17.566925Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:29:17.580871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:29:17.737418Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:29:17.767413Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750796942783778 != 1750796942783782 2025-06-24T20:29:17.859162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:29:17.859319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:29:17.873580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:29:17.984419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:29:19.312002Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:22.313335Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:886:2728], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:22.313485Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:897:2733], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:22.313582Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:22.348686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:29:22.432294Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T20:29:22.802681Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:900:2736], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:29:23.132230Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:962:2778] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:29:26.186157Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyht32g6deeaz352kadt49x7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzZhYmJkNzgtZDAyYzYyNzgtMjk3Y2UyZjUtOTMxYjk1Nzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:29:26.804325Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyht36becax4phwvsys9tx46, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzVlMTlmZTctNTJjOWI2NDEtYjNiZmEzNjctNGE2Y2Q4ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:29:31.531183Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyht38pc8pk30vg01xj0430c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODY0NjIwNTItMjM3MDMyOGUtYjY1ODA0NmYtYjk2Y2Y2NjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |87.9%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {RESULT} $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {RESULT} $(B)/ydb/core/actorlib_impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [LD] {RESULT} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut >> TDataShardTrace::TestTraceDistributedUpsert-UseSink [GOOD] >> TDatabaseResolverTests::MySQL_PermissionDenied [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ClickHouse_PermissionDenied [GOOD] Test command err: 2025-06-24T20:29:37.022259Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed ClickHouse database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-clickhouse/v1/clusters/etn021us5r9rhld1vgbh/hosts': you have no permission to resolve database id into database endpoint. Please check that your service account has role `managed-clickhouse.viewer`. |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Dedicated [GOOD] >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental [GOOD] >> TDatabaseResolverTests::PostgreSQL |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedUpsert-UseSink [GOOD] Test command err: 2025-06-24T20:29:17.449356Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:17.452607Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.454154Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0048f9/r3tmp/tmpLNhvPv/pdisk_1.dat 2025-06-24T20:29:21.244321Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:29:21.367920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:29:21.694506Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:29:21.770416Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750796942204816 != 1750796942204820 2025-06-24T20:29:21.960820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:29:21.961342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:29:21.980277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:29:22.388098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:29:26.961152Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:33.787861Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:886:2728], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:33.787994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:897:2733], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:33.788119Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:33.802210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:29:33.878642Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T20:29:34.121545Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:900:2736], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:29:34.310059Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:962:2778] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:29:36.398731Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyht3dpse4d0r7pt3y78fqfy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzRhOTFjNjEtZDVmYWY2N2QtNGI1OTc0NjQtMTA4MWU3ZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trace: (Session.query.QUERY_ACTION_EXECUTE -> [(CompileService -> [(CompileActor)]) , (LiteralExecuter) , (DataExecuter -> [(WaitForTableResolve) , (RunTasks) , (Datashard.Transaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendResult)]) , (Datashard.Transaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendResult)])])]) >> TDatabaseResolverTests::PostgreSQL [GOOD] >> TDatabaseResolverTests::PostgreSQL_PermissionDenied >> TDatabaseResolverTests::Ydb_Serverless >> TDatabaseResolverTests::Ydb_Serverless [GOOD] >> TDatabaseResolverTests::PostgreSQL_PermissionDenied [GOOD] |88.0%| [TA] $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::MySQL_PermissionDenied [GOOD] Test command err: 2025-06-24T20:29:37.773552Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed MySQL database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-mysql/v1/clusters/etn021us5r9rhld1vgbh/hosts': you have no permission to resolve database id into database endpoint. >> TDatabaseResolverTests::Ydb_Serverless_Timeout |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Serverless [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::PostgreSQL_PermissionDenied [GOOD] Test command err: 2025-06-24T20:29:39.840050Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed PostgreSQL database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-postgresql/v1/clusters/etn021us5r9rhld1vgbh/hosts': you have no permission to resolve database id into database endpoint. Please check that your service account has role `managed-postgresql.viewer`. |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TDatabaseResolverTests::Ydb_Serverless_Timeout [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Serverless_Timeout [GOOD] Test command err: 2025-06-24T20:29:41.992277Z node 1 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed Ydb database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgbh': Connection timeout |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> BSCRestartPDisk::RestartOneByOneWithReconnects [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> TYardTest::TestStartingPointReboots [GOOD] >> TYardTest::TestRestartAtNonceJump |88.0%| [TA] $(B)/ydb/core/fq/libs/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOneWithReconnects [GOOD] Test command err: RandomSeed# 15964626652064844076 >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |88.0%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |88.0%| [TA] {RESULT} $(B)/ydb/core/fq/libs/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.0%| [LD] {RESULT} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> IncrementalBackup::SimpleBackupBackupCollection+WithIncremental [GOOD] >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental >> QueryStats::Ranges [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> Yq_1::DeleteConnections [GOOD] >> Yq_1::Create_And_Modify_The_Same_Connection >> BSCRestartPDisk::RestartOneByOne [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> IncrementalBackup::E2EBackupCollection >> IncrementalBackup::ComplexRestoreBackupCollection+WithIncremental [FAIL] >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> QueryStats::Ranges [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore [GOOD] Test command err: 2025-06-24T20:28:55.366906Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615580771118860:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:55.366953Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00399a/r3tmp/tmpVWG1Sn/pdisk_1.dat 2025-06-24T20:28:55.912005Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:55.927045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:55.928503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:55.938410Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30870 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:28:56.201895Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519615580771119057:2130] Handle TEvNavigate describe path dc-1 2025-06-24T20:28:56.227688Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519615585066086813:2441] HANDLE EvNavigateScheme dc-1 2025-06-24T20:28:56.227877Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615580771119087:2145], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:56.227930Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519615580771119087:2145], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:28:56.228137Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519615585066086814:2442][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:28:56.229930Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615580771118759:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615585066086818:2442] 2025-06-24T20:28:56.229983Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615580771118759:2051] Subscribe: subscriber# [1:7519615585066086818:2442], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:56.230040Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615580771118762:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615585066086819:2442] 2025-06-24T20:28:56.230055Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615580771118762:2054] Subscribe: subscriber# [1:7519615585066086819:2442], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:56.230077Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519615580771118765:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519615585066086820:2442] 2025-06-24T20:28:56.230092Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519615580771118765:2057] Subscribe: subscriber# [1:7519615585066086820:2442], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:28:56.230517Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615585066086818:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615580771118759:2051] 2025-06-24T20:28:56.230543Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615585066086819:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615580771118762:2054] 2025-06-24T20:28:56.230592Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519615585066086820:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615580771118765:2057] 2025-06-24T20:28:56.230639Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615585066086814:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615585066086815:2442] 2025-06-24T20:28:56.230670Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615585066086814:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615585066086816:2442] 2025-06-24T20:28:56.230748Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519615585066086814:2442][/dc-1] Set up state: owner# [1:7519615580771119087:2145], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:56.230851Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519615585066086814:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519615585066086817:2442] 2025-06-24T20:28:56.230895Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519615585066086814:2442][/dc-1] Path was already updated: owner# [1:7519615580771119087:2145], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:28:56.230929Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615585066086818:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585066086815:2442], cookie# 1 2025-06-24T20:28:56.230943Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615585066086819:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585066086816:2442], cookie# 1 2025-06-24T20:28:56.230955Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615585066086820:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585066086817:2442], cookie# 1 2025-06-24T20:28:56.230982Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615580771118759:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615585066086818:2442] 2025-06-24T20:28:56.231009Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615580771118759:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585066086818:2442], cookie# 1 2025-06-24T20:28:56.231041Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615580771118762:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615585066086819:2442] 2025-06-24T20:28:56.231064Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615580771118762:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585066086819:2442], cookie# 1 2025-06-24T20:28:56.231078Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519615580771118765:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519615585066086820:2442] 2025-06-24T20:28:56.231090Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615580771118765:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615585066086820:2442], cookie# 1 2025-06-24T20:28:56.232306Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615585066086818:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615580771118759:2051], cookie# 1 2025-06-24T20:28:56.232327Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615585066086819:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615580771118762:2054], cookie# 1 2025-06-24T20:28:56.232341Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615585066086820:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615580771118765:2057], cookie# 1 2025-06-24T20:28:56.232391Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615585066086814:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615585066086815:2442], cookie# 1 2025-06-24T20:28:56.232419Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615585066086814:2442][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:28:56.232439Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615585066086814:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615585066086816:2442], cookie# 1 2025-06-24T20:28:56.232450Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615585066086814:2442][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:28:56.232463Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615585066086814:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615585066086817:2442], cookie# 1 2025-06-24T20:28:56.232485Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519615585066086814:2442][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:28:56.322413Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615580771119087:2145], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:43.991533Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615787989493295:5981], recipient# [3:7519615787989493294:4948], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:44.059513Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615636324737077:2267], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:44.059625Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615636324737077:2267], cacheItem# { Subscriber: { Subscriber: [2:7519615773763690632:2306] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:44.059682Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615636324737077:2267], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:44.059735Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615636324737077:2267], cacheItem# { Subscriber: { Subscriber: [2:7519615773763690632:2306] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:44.059770Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615636324737077:2267], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:44.059825Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615636324737077:2267], cacheItem# { Subscriber: { Subscriber: [2:7519615773763690631:2305] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:44.059877Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615790943559952:2361], recipient# [2:7519615790943559949:2499], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:44.063355Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615790943559954:2363], recipient# [2:7519615790943559951:2501], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:44.064190Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615790943559953:2362], recipient# [2:7519615790943559950:2500], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:45.061868Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615636324737077:2267], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:45.061969Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615636324737077:2267], cacheItem# { Subscriber: { Subscriber: [2:7519615773763690632:2306] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:45.062033Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615795238527252:2364], recipient# [2:7519615795238527251:2502], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:45.071517Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615636324737077:2267], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:45.071615Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615636324737077:2267], cacheItem# { Subscriber: { Subscriber: [2:7519615773763690632:2306] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:45.071661Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615636324737077:2267], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:45.071720Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615636324737077:2267], cacheItem# { Subscriber: { Subscriber: [2:7519615773763690631:2305] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:45.071767Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615795238527255:2365], recipient# [2:7519615795238527253:2503], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:45.071812Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615795238527256:2366], recipient# [2:7519615795238527254:2504], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |88.0%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation >> TPDiskRaces::KillOwnerWhileDecommittingWithInflightMock [GOOD] >> TPDiskRaces::OwnerRecreationRaces |88.0%| [TA] $(B)/ydb/core/sys_view/query_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |88.0%| [LD] {RESULT} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |88.0%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |88.0%| [TA] {RESULT} $(B)/ydb/core/sys_view/query_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOne [GOOD] Test command err: RandomSeed# 17613855692673651240 |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> Yq_1::DescribeJob [FAIL] >> Yq_1::DescribeQuery |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |88.0%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> TYardTest::TestRestartAtNonceJump [GOOD] >> TYardTest::TestRestartAtChunkEnd |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental [GOOD] Test command err: 2025-06-24T20:29:08.527768Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:08.528195Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:08.528388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004027/r3tmp/tmp2sD20V/pdisk_1.dat 2025-06-24T20:29:09.117289Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:29:09.148269Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:555:2480], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:09.148848Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:09.149405Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:29:09.150593Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:552:2478], Recipient [1:371:2365]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-24T20:29:09.150856Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T20:29:10.119591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-24T20:29:10.121012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:10.122377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:29:10.126095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:29:10.127348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:29:10.128961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:29:10.139430Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:10.152422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:10.155269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:29:10.156015Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:10.156349Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:10.158509Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:10.158844Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:10.159844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:10.160499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:29:10.160964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:29:10.160998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:29:10.161839Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:10.165639Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:10.166295Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:10.170866Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:10.173941Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:10.174904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:10.175407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:29:10.175733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:29:10.176408Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:10.179370Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:10.179402Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:10.180517Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:10.180550Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:10.180596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:10.180935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:10.181308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-24T20:29:10.181345Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:10.181378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:29:10.236399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:29:10.239628Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:10.239985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:29:10.242043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:29:10.250440Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877760, Sender [1:560:2485], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:562:2486] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T20:29:10.250498Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5046: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T20:29:10.250877Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5783: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-24T20:29:10.251598Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269091328, Sender [1:367:2361], Recipient [1:371:2365]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-24T20:29:10.254735Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:564:2488], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:10.255317Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:10.255352Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:29:10.255972Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [1:552:2478], Recipient [1:371:2365]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-24T20:29:10.256271Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:29:10.257129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:10.257425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-24T20:29:10.257682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:10.443058Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 273285138, Sender [1:44:2091], Recipient [1:371:2365]: ... _TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:37.184157Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:29:37.184299Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269551620, Sender [2:696:2567], Recipient [2:371:2365]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 696 RawX2: 8589937159 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-24T20:29:37.184336Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4983: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-24T20:29:37.184404Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 696 RawX2: 8589937159 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-24T20:29:37.184443Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 281474976715662, tablet: 72075186224037888, partId: 0 2025-06-24T20:29:37.184681Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480, message: Source { RawX1: 696 RawX2: 8589937159 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-24T20:29:37.184774Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 281474976715662:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-24T20:29:37.184884Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 281474976715662:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 696 RawX2: 8589937159 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-24T20:29:37.185089Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715662:0, shardIdx: 72057594046644480:1, shard: 72075186224037888, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:37.185144Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-24T20:29:37.185196Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976715662:0, datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-24T20:29:37.185237Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976715662:0, datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-06-24T20:29:37.185269Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715662:0 129 -> 240 2025-06-24T20:29:37.185384Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:37.185882Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-24T20:29:37.185916Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:37.185947Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715662:0 2025-06-24T20:29:37.186039Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [2:905:2703] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-24T20:29:37.186098Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [2:696:2567] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-24T20:29:37.186200Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-24T20:29:37.186279Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T20:29:37.186450Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037889 state Ready 2025-06-24T20:29:37.186491Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-24T20:29:37.186631Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [2:371:2365], Recipient [2:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:37.186664Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:37.186708Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-24T20:29:37.186761Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:0ProgressState, operation type TxCopyTable 2025-06-24T20:29:37.186826Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:37.186870Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1060: Set barrier, OperationId: 281474976715662:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-24T20:29:37.186919Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1104: All parts have reached barrier, tx: 281474976715662, done: 0, blocked: 1 2025-06-24T20:29:37.187014Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715662 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-06-24T20:29:37.187056Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715662:0 240 -> 240 2025-06-24T20:29:37.187598Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:37.187635Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715662:0 2025-06-24T20:29:37.187763Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [2:371:2365], Recipient [2:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:37.187802Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:37.187865Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-24T20:29:37.187909Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715662:0 ProgressState 2025-06-24T20:29:37.188033Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:37.188069Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715662:0 progress is 1/1 2025-06-24T20:29:37.188104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-06-24T20:29:37.188153Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715662:0 progress is 1/1 2025-06-24T20:29:37.188192Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-06-24T20:29:37.188233Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715662, ready parts: 1/1, is published: true 2025-06-24T20:29:37.188305Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:885:2687] message: TxId: 281474976715662 2025-06-24T20:29:37.188355Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-06-24T20:29:37.188400Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715662:0 2025-06-24T20:29:37.188433Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715662:0 2025-06-24T20:29:37.188590Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 11] was 3 2025-06-24T20:29:37.188630Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 3 2025-06-24T20:29:37.189073Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:37.189169Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [2:885:2687] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-24T20:29:37.189528Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [2:892:2693], Recipient [2:371:2365]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:29:37.189565Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:29:37.189594Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:29:37.556327Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [2:983:2762], serverId# [2:984:2763], sessionId# [0:0:0] 2025-06-24T20:29:37.557467Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyht3h1rfs95mgp8wf1grd5t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGM2OTcwZjktOTAxZGNkYy03N2QxYzQ0Zi0yYzUyNmI0ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }, { items { uint32_value: 5 } items { uint32_value: 50 } } |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains [GOOD] Test command err: 2025-06-24T20:28:55.311908Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615578192478821:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:55.311961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039c4/r3tmp/tmpIUGqJm/pdisk_1.dat 2025-06-24T20:28:55.785723Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519615580359977103:2249];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:28:55.785850Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:28:56.321716Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:28:56.483293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:28:56.527389Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:28:56.556949Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:56.580470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:56.580573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:56.585609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:28:56.585700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:28:56.597847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:28:56.620374Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:28:56.622437Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22841 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:28:57.020324Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519615578192478997:2143] Handle TEvNavigate describe path dc-1 2025-06-24T20:28:57.075885Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519615586782414050:2455] HANDLE EvNavigateScheme dc-1 2025-06-24T20:28:57.076321Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519615578192479032:2159], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:28:57.076407Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519615582487446591:2322][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519615578192479032:2159], cookie# 1 2025-06-24T20:28:57.091333Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615582487446610:2322][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615582487446607:2322], cookie# 1 2025-06-24T20:28:57.091416Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615582487446611:2322][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615582487446608:2322], cookie# 1 2025-06-24T20:28:57.091437Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519615582487446612:2322][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615582487446609:2322], cookie# 1 2025-06-24T20:28:57.091505Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615573897511378:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615582487446610:2322], cookie# 1 2025-06-24T20:28:57.091559Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615573897511381:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615582487446611:2322], cookie# 1 2025-06-24T20:28:57.091585Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519615573897511384:2058] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519615582487446612:2322], cookie# 1 2025-06-24T20:28:57.091616Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615582487446610:2322][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615573897511378:2052], cookie# 1 2025-06-24T20:28:57.091630Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615582487446611:2322][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615573897511381:2055], cookie# 1 2025-06-24T20:28:57.091642Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519615582487446612:2322][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615573897511384:2058], cookie# 1 2025-06-24T20:28:57.091680Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615582487446591:2322][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615582487446607:2322], cookie# 1 2025-06-24T20:28:57.091707Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615582487446591:2322][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:28:57.091722Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615582487446591:2322][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615582487446608:2322], cookie# 1 2025-06-24T20:28:57.091733Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519615582487446591:2322][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:28:57.091746Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519615582487446591:2322][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519615582487446609:2322], cookie# 1 2025-06-24T20:28:57.091771Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519615582487446591:2322][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:28:57.091849Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519615578192479032:2159], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T20:28:57.129390Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519615578192479032:2159], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519615582487446591:2322] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:28:57.129551Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519615578192479032:2159], cacheItem# { Subscriber: { Subscriber: [1:7519615582487446591:2322] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T20:28:57.144385Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519615586782414051:2456], recipient# [1:7519615586782414050:2455], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:28:57.144487Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519615586782414050:2455] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:28:57.273797Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519615586782414050:2455] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T20:28:57.286030Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519615586782414050:2455] Handle TEvDescribeSchemeResult Forward to# [1:7519615586782414049:2454] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxP ... th RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:48.467029Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615580359977107:2108], cacheItem# { Subscriber: { Subscriber: [2:7519615653374421221:2133] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:48.467247Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615807993244014:2189], recipient# [2:7519615807993244013:2335], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:48.495433Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519615608482455148:2176], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:48.495563Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615608482455148:2176], cacheItem# { Subscriber: { Subscriber: [3:7519615763101277970:2205] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:48.495617Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615608482455148:2176], cacheItem# { Subscriber: { Subscriber: [3:7519615763101277971:2206] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:48.495778Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615806050951005:2230], recipient# [3:7519615806050951002:2485], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:48.845481Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519615608482455148:2176], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:48.845605Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615608482455148:2176], cacheItem# { Subscriber: { Subscriber: [3:7519615771691212582:2209] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:48.845672Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519615608482455148:2176], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:48.845731Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519615608482455148:2176], cacheItem# { Subscriber: { Subscriber: [3:7519615771691212582:2209] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:48.845785Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615806050951010:2231], recipient# [3:7519615806050951008:2486], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:48.845905Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519615806050951011:2232], recipient# [3:7519615806050951009:2487], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:49.371593Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615580359977107:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:49.371728Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615580359977107:2108], cacheItem# { Subscriber: { Subscriber: [2:7519615584654944426:2113] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:49.371822Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615812288211315:2190], recipient# [2:7519615812288211314:2336], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:49.475645Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519615580359977107:2108], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:29:49.475772Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519615580359977107:2108], cacheItem# { Subscriber: { Subscriber: [2:7519615653374421221:2133] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:29:49.475848Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519615812288211318:2191], recipient# [2:7519615812288211317:2337], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |88.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |88.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |88.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |88.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TBlobStorageWardenTest::TestCreatePDiskAndGroup >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup |88.1%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/test-results/unittest/{meta.json ... results_accumulator.log} |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |88.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanManyTables [GOOD] >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp >> UpsertLoad::ShouldWriteDataBulkUpsertBatch >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental [GOOD] >> Yq_1::DescribeConnection [GOOD] >> Yq_1::DeleteQuery >> ReadLoad::ShouldReadKqp |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest |88.1%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/test-results/unittest/{meta.json ... results_accumulator.log} |88.1%| [LD] {RESULT} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut >> AggregateStatistics::ShouldBePings >> UpsertLoad::ShouldCreateTable >> AggregateStatistics::ShouldBePings [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental [GOOD] Test command err: 2025-06-24T20:29:11.509515Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:11.523317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:11.524490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004039/r3tmp/tmpHeErPc/pdisk_1.dat 2025-06-24T20:29:13.879932Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:29:13.896153Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:555:2480], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:13.896773Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:13.897087Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:29:13.898721Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:552:2478], Recipient [1:371:2365]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-24T20:29:13.898789Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T20:29:15.003241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-24T20:29:15.003982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:15.004651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:29:15.005045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:29:15.005476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:29:15.005616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:29:15.005720Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:15.006400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:15.006630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:29:15.006688Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:15.006734Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:15.006969Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:15.007010Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:15.007081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:15.007195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:29:15.007376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:29:15.007563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:29:15.008083Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:15.009390Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:15.009537Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:15.014241Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:15.014301Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:15.014382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:15.014436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:29:15.014482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:29:15.014571Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:15.015070Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:15.015107Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:15.015254Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:15.015288Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:15.015333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:15.015368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:15.015435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-24T20:29:15.015469Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:15.015508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:29:15.035984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:29:15.036576Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:15.036653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:29:15.036860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:29:15.038164Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877760, Sender [1:560:2485], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:562:2486] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T20:29:15.038214Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5046: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T20:29:15.038255Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5783: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-24T20:29:15.038365Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269091328, Sender [1:367:2361], Recipient [1:371:2365]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-24T20:29:15.038792Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:564:2488], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:15.038845Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:15.038893Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:29:15.039030Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [1:552:2478], Recipient [1:371:2365]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-24T20:29:15.039065Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:29:15.039130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:15.039282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-24T20:29:15.039321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:15.111159Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 273285138, Sender [1:44:2091], Recipient [1:371:2365]: ... awX1: 626 RawX2: 12884904418 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-24T20:29:57.092877Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 281474976715662, tablet: 72075186224037888, partId: 1 2025-06-24T20:29:57.092967Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480, message: Source { RawX1: 626 RawX2: 12884904418 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-24T20:29:57.093001Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 281474976715662:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-24T20:29:57.093061Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 281474976715662:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 626 RawX2: 12884904418 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-24T20:29:57.093101Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715662:1, shardIdx: 72057594046644480:1, shard: 72075186224037888, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:57.093132Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-24T20:29:57.093482Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976715662:1, datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-24T20:29:57.093852Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976715662:1, datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-06-24T20:29:57.093885Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715662:1 129 -> 240 2025-06-24T20:29:57.094300Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:57.095222Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-24T20:29:57.095261Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:57.095296Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715662:1 2025-06-24T20:29:57.107191Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:891:2695] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-24T20:29:57.107583Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:626:2530] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-24T20:29:57.108166Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037889 state Ready 2025-06-24T20:29:57.108528Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-24T20:29:57.110003Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:372:2366], Recipient [3:372:2366]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:57.110039Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:57.110085Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-24T20:29:57.110136Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:1ProgressState, operation type TxCopyTable 2025-06-24T20:29:57.110486Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:57.110830Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1060: Set barrier, OperationId: 281474976715662:1, name: CopyTableBarrier, done: 1, blocked: 1, parts count: 2 2025-06-24T20:29:57.123436Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1104: All parts have reached barrier, tx: 281474976715662, done: 1, blocked: 1 2025-06-24T20:29:57.124589Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:1 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715662 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-06-24T20:29:57.124641Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715662:1 240 -> 240 2025-06-24T20:29:57.131663Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-24T20:29:57.131737Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T20:29:57.132142Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:57.132176Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715662:1 2025-06-24T20:29:57.132293Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:372:2366], Recipient [3:372:2366]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:57.132324Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:57.132362Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-24T20:29:57.132716Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715662:1 ProgressState 2025-06-24T20:29:57.134146Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:57.134229Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715662:1 progress is 2/2 2025-06-24T20:29:57.134682Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-06-24T20:29:57.135073Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715662:1 progress is 2/2 2025-06-24T20:29:57.135498Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-06-24T20:29:57.135877Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715662, ready parts: 2/2, is published: true 2025-06-24T20:29:57.140066Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:861:2675] message: TxId: 281474976715662 2025-06-24T20:29:57.140824Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-06-24T20:29:57.141184Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715662:0 2025-06-24T20:29:57.141597Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715662:0 2025-06-24T20:29:57.142311Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 2 2025-06-24T20:29:57.142349Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715662:1 2025-06-24T20:29:57.142368Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715662:1 2025-06-24T20:29:57.142784Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 11] was 3 2025-06-24T20:29:57.142815Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-24T20:29:57.143783Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:57.143885Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:861:2675] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-24T20:29:57.144217Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:875:2682], Recipient [3:372:2366]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:29:57.144249Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:29:57.144271Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:29:57.855045Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [3:975:2760], serverId# [3:976:2761], sessionId# [0:0:0] 2025-06-24T20:29:57.855650Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyht44jcfre96y8e9cxhpm3n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjdhNzcxMzQtODRjOWNkMy1lOWU0Y2FjOC0xZjA3NTgwYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } 2025-06-24T20:29:58.214495Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyht457sc7zzaf0pnph2n501, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjEyZjI3YWMtNzI2Yzk1NjAtYjM0Zjc2OGItZWQ5OTNiNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom >> TYardTest::TestRestartAtChunkEnd [GOOD] >> TYardTestRestore::TestRestore15 >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout >> TBlobStorageGroupInfoBlobMapTest::BelongsToSubgroupBenchmark [GOOD] >> TBlobStorageGroupInfoBlobMapTest::BasicChecks >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout [GOOD] >> ReadLoad::ShouldReadIterate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBePings [GOOD] Test command err: 2025-06-24T20:30:02.980022Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:03.156595Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:03.315593Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-24T20:30:03.315711Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 2 2025-06-24T20:30:03.315749Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-24T20:30:03.316361Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:16:2056], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-24T20:30:03.316395Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:03.316460Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:19:2055], server id = [0:0:0], tablet id = 2, status = ERROR 2025-06-24T20:30:03.316477Z node 2 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:03.316532Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 2 2025-06-24T20:30:03.316564Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 >> UpsertLoad::ShouldWriteKqpUpsert >> UpsertLoad::ShouldWriteDataBulkUpsert >> Yq_1::CreateConnection_With_Existing_Name [GOOD] >> Yq_1::CreateConnections_With_Idempotency ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout [GOOD] Test command err: 2025-06-24T20:30:03.989482Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:04.017365Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-06-24T20:30:04.026726Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:04.032024Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-24T20:30:04.034680Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:04.035652Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:04.036139Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-24T20:30:04.036497Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:04.043670Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:42:2057], server id = [3:42:2057], tablet id = 3, status = OK 2025-06-24T20:30:04.044845Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:42:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:04.046427Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-06-24T20:30:04.046505Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-24T20:30:04.046929Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-24T20:30:04.048431Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:42:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-24T20:30:04.048548Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:04.049686Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [4:47:2057], tablet id = 4, status = OK 2025-06-24T20:30:04.049745Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:47:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:04.051672Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-06-24T20:30:04.051724Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-06-24T20:30:04.052407Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T20:30:04.054531Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-24T20:30:04.054561Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:04.055832Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-06-24T20:30:04.068373Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:04.069338Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T20:30:04.069703Z node 3 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:04.069723Z node 3 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T20:30:04.088660Z node 2 :STATISTICS DEBUG: service_impl.cpp:401: Skip TEvKeepAliveTimeout 2025-06-24T20:30:04.089150Z node 1 :STATISTICS INFO: service_impl.cpp:416: Node 2 is unavailable 2025-06-24T20:30:04.089855Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-24T20:30:04.090624Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:04.090963Z node 1 :STATISTICS DEBUG: service_impl.cpp:393: Skip TEvKeepAliveTimeout 2025-06-24T20:30:04.091654Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:04.091677Z node 1 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T20:30:04.092822Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:04.093514Z node 1 :STATISTICS DEBUG: service_impl.cpp:428: Skip TEvAggregateKeepAlive >> AggregateStatistics::ShouldBeCorrectlyAggregateStatisticsFromAllNodes >> AggregateStatistics::ShouldBeCcorrectProcessingOfLocalTablets >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental [GOOD] >> AggregateStatistics::ShouldBeCcorrectProcessingOfLocalTablets [GOOD] >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql >> UpsertLoad::ShouldWriteKqpUpsert2 >> AggregateStatistics::ShouldBeCorrectlyAggregateStatisticsFromAllNodes [GOOD] >> TYardTestRestore::TestRestore15 [GOOD] >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCcorrectProcessingOfLocalTablets [GOOD] Test command err: 2025-06-24T20:30:07.930356Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:07.930629Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:9:2056], server id = [1:9:2056], tablet id = 2 2025-06-24T20:30:07.930656Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 2 is not local. 2025-06-24T20:30:07.930718Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-24T20:30:07.954003Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [1:8:2055], tablet id = 1, status = ERROR 2025-06-24T20:30:07.954078Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 1 is not local. 2025-06-24T20:30:07.956021Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-24T20:30:07.956046Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:07.957164Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [1:11:2058], tablet id = 4, status = ERROR 2025-06-24T20:30:07.957526Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 4 is not local. 2025-06-24T20:30:07.957918Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:12:2059], server id = [1:12:2059], tablet id = 5 2025-06-24T20:30:07.957945Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 5 is not local. 2025-06-24T20:30:07.958627Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 6 2025-06-24T20:30:07.958691Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [1:14:2061], tablet id = 7, status = ERROR 2025-06-24T20:30:07.958708Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 7 is not local. 2025-06-24T20:30:07.974040Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [0:0:0], tablet id = 6, status = ERROR 2025-06-24T20:30:07.974472Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:07.974857Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:15:2062], server id = [1:15:2062], tablet id = 8 2025-06-24T20:30:07.974885Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 8 is not local. 2025-06-24T20:30:07.975311Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout [GOOD] >> PrivateApi::PingTask [GOOD] >> PrivateApi::GetTask >> AggregateStatistics::ShouldBeCcorrectProcessingTabletTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCorrectlyAggregateStatisticsFromAllNodes [GOOD] Test command err: 2025-06-24T20:30:08.622286Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:08.634122Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-06-24T20:30:08.652329Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:08.664581Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:39:2059], server id = [1:39:2059], tablet id = 2, status = OK 2025-06-24T20:30:08.664676Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:39:2059], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:08.665124Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:40:2060], server id = [1:40:2060], tablet id = 3, status = OK 2025-06-24T20:30:08.665151Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:40:2060], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:08.681543Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:08.682828Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:08.682922Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-24T20:30:08.701854Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 2 2025-06-24T20:30:08.703092Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-24T20:30:08.708164Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:44:2057], server id = [3:44:2057], tablet id = 5, status = OK 2025-06-24T20:30:08.708260Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:44:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:08.708702Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:46:2057], server id = [2:46:2057], tablet id = 4, status = OK 2025-06-24T20:30:08.709575Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:46:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:08.712526Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-06-24T20:30:08.714812Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-24T20:30:08.723375Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:08.724773Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-06-24T20:30:08.724848Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 5 2025-06-24T20:30:08.724916Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-24T20:30:08.728732Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:39:2059], server id = [0:0:0], tablet id = 2, status = ERROR 2025-06-24T20:30:08.728760Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:08.729661Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:44:2057], server id = [0:0:0], tablet id = 5, status = ERROR 2025-06-24T20:30:08.729679Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:08.729713Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:46:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-24T20:30:08.729729Z node 2 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:08.730087Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:40:2060], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-24T20:30:08.730108Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:08.730575Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:49:2057], server id = [4:49:2057], tablet id = 6, status = OK 2025-06-24T20:30:08.730638Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:49:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:08.738266Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 6 2025-06-24T20:30:08.747216Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T20:30:08.750505Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-06-24T20:30:08.756949Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:49:2057], server id = [0:0:0], tablet id = 6, status = ERROR 2025-06-24T20:30:08.756996Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:08.768518Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-06-24T20:30:08.769333Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-24T20:30:08.775084Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 2 2025-06-24T20:30:08.775165Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 >> AggregateStatistics::ShouldBeCcorrectProcessingTabletTimeout [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout [GOOD] Test command err: 2025-06-24T20:30:09.519939Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:09.520978Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-06-24T20:30:09.521319Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:09.521444Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-24T20:30:09.521685Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:09.521813Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:09.521919Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-24T20:30:09.521949Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:09.522050Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:42:2057], server id = [3:42:2057], tablet id = 3, status = OK 2025-06-24T20:30:09.522119Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:42:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:09.522246Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-06-24T20:30:09.522309Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-24T20:30:09.522340Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-24T20:30:09.522463Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:42:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-24T20:30:09.522486Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:09.522561Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [4:47:2057], tablet id = 4, status = OK 2025-06-24T20:30:09.522604Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:47:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:09.522957Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-06-24T20:30:09.523015Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-06-24T20:30:09.523054Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T20:30:09.523332Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-24T20:30:09.523361Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:09.523475Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-06-24T20:30:09.534425Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:09.534499Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T20:30:09.534544Z node 3 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:09.534571Z node 3 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T20:30:09.545240Z node 2 :STATISTICS DEBUG: service_impl.cpp:401: Skip TEvKeepAliveTimeout 2025-06-24T20:30:09.545334Z node 1 :STATISTICS INFO: service_impl.cpp:416: Node 2 is unavailable 2025-06-24T20:30:09.545379Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-24T20:30:09.545509Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:09.545549Z node 1 :STATISTICS DEBUG: service_impl.cpp:393: Skip TEvKeepAliveTimeout 2025-06-24T20:30:09.545627Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:09.545656Z node 1 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T20:30:09.545777Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:09.545826Z node 1 :STATISTICS DEBUG: service_impl.cpp:428: Skip TEvAggregateKeepAlive >> TPDiskRaces::OwnerRecreationRaces [GOOD] >> TPDiskRaces::OwnerKilledWhileReadingLog >> Yq_1::ModifyConnections [GOOD] >> test.py::test[solomon-InvalidProject-] [GOOD] >> test.py::test[solomon-LabelColumns-default.txt] >> Yq_1::ModifyQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCcorrectProcessingTabletTimeout [GOOD] Test command err: 2025-06-24T20:30:10.748455Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-24T20:30:10.749486Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [1:8:2055], tablet id = 1, status = OK 2025-06-24T20:30:10.749785Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:8:2055], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:10.749913Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:9:2056], server id = [1:9:2056], tablet id = 2, status = OK 2025-06-24T20:30:10.749985Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:9:2056], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:10.750052Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-24T20:30:10.750238Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [1:10:2057], tablet id = 3, status = OK 2025-06-24T20:30:10.750290Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:10:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:10.750338Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [1:11:2058], tablet id = 4, status = OK 2025-06-24T20:30:10.750368Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:11:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:10.750407Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-24T20:30:10.750426Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:10.750458Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:12:2059], server id = [1:12:2059], tablet id = 5, status = OK 2025-06-24T20:30:10.750503Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:12:2059], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:10.750556Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-24T20:30:10.750662Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [1:13:2060], tablet id = 6, status = OK 2025-06-24T20:30:10.750696Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:13:2060], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:10.750749Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 5 2025-06-24T20:30:10.750801Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-24T20:30:10.750834Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:10.750878Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [1:14:2061], tablet id = 7, status = OK 2025-06-24T20:30:10.750916Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:14:2061], path = { OwnerId: 3 LocalId: 3 } 2025-06-24T20:30:10.750961Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:12:2059], server id = [0:0:0], tablet id = 5, status = ERROR 2025-06-24T20:30:10.750992Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:10.751017Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 7 2025-06-24T20:30:10.751070Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [0:0:0], tablet id = 7, status = ERROR 2025-06-24T20:30:10.751100Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:10.762678Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 1 has already been processed 2025-06-24T20:30:10.762766Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 2 2025-06-24T20:30:10.762831Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 2 is not local. 2025-06-24T20:30:10.762971Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 3 has already been processed 2025-06-24T20:30:10.763011Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 4 2025-06-24T20:30:10.763035Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 4 is not local. 2025-06-24T20:30:10.763071Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 5 has already been processed 2025-06-24T20:30:10.763128Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 6 2025-06-24T20:30:10.763163Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 6 is not local. 2025-06-24T20:30:10.763191Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-24T20:30:10.763315Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-24T20:30:10.763348Z node 1 :STATISTICS DEBUG: service_impl.cpp:1021: Skip TEvStatisticsRequestTimeout 2025-06-24T20:30:10.763434Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:9:2056], server id = [0:0:0], tablet id = 2, status = ERROR 2025-06-24T20:30:10.763459Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:10.763518Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-24T20:30:10.763538Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-24T20:30:10.763565Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [0:0:0], tablet id = 6, status = ERROR 2025-06-24T20:30:10.763583Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected >> TBlobStorageWardenTest::TestCreatePDiskAndGroup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TYardTestRestore::TestRestore15 [GOOD] Test command err: (PDisk.ChunkRead -> [(PDisk.CompletionChunkRead) , (PDisk.ChunkRead.CompletionPart) , (PDisk.ChunkRead.CompletionPart) , (PDisk.ChunkRead.CompletionPart)]) (PDisk.ChunkWrite -> [(PDisk.CompletionChunkWrite) , (PDisk.ChunkWritePiece) , (PDisk.ChunkWritePiece) , (PDisk.ChunkWritePiece)]) (PDisk.LogWrite) (PDisk.LogWrite) (PDisk.LogRead) |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest |88.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TBlobStorageGroupInfoBlobMapTest::BasicChecks [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDropIndex [GOOD] >> TSchemeshardBackgroundCleaningTest::TempInTemp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest >> TBlobStorageWardenTest::TestCreatePDiskAndGroup [GOOD] Test command err: 2025-06-24T20:30:01.206585Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:2:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:30:01.225507Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:2:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:1:0] targetVDisk# [3e000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:30:01.228497Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:1:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:30:01.722113Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:30:01.722241Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:1:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:30:01.723048Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:2:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 Sending TEvPut Sending TEvGet Sending TEvVGet Sending TEvPut Sending TEvGet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoBlobMapTest::BasicChecks [GOOD] Test command err: None domains 1 new (ns): 227.9994275 None domains 1 old (ns): 122.8926075 None domains 9 new (ns): 314.1385521 None domains 9 old (ns): 94.75068481 Mirror3 domains 4 new (ns): 222.7642831 Mirror3 domains 4 old (ns): 104.0758151 Mirror3 domains 9 new (ns): 209.2673283 Mirror3 domains 9 old (ns): 127.6147866 4Plus2Block domains 8 new (ns): 275.1820909 4Plus2Block domains 8 old (ns): 114.8169184 4Plus2Block domains 9 new (ns): 135.2555642 4Plus2Block domains 9 old (ns): 88.66402833 ErasureMirror3of4 domains 8 new (ns): 162.5932357 ErasureMirror3of4 domains 8 old (ns): 153.5312264 ErasureMirror3of4 domains 9 new (ns): 196.2619131 ErasureMirror3of4 domains 9 old (ns): 122.9617381 >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup [GOOD] >> KqpDataIntegrityTrails::BrokenReadLock-UseSink |88.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |88.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup [GOOD] Test command err: 2025-06-24T20:30:03.013659Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:2:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:30:03.079341Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:2:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:1:0] targetVDisk# [3e000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:30:03.079903Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:1:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:30:05.921540Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:30:05.923411Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:1:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:30:05.924895Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:2:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 Sending TEvPut Sending TEvGet Sending TEvVGet Sending TEvPut 2025-06-24T20:30:12.169097Z node 1 :BS_CONTROLLER ERROR: {BSCTXPGK04@propose_group_key.cpp:47} Group LifeCyclePhase does not match ELCP_INITIAL GroupId.GetRawId()# 3187671040 LifeCyclePhase# 3 2025-06-24T20:30:12.169523Z node 1 :BS_CONTROLLER ERROR: {BSCTXPGK10@propose_group_key.cpp:108} TTxProposeGroupKey error GroupId# 3187671040 Status# ERROR Request# {NodeId: 2 GroupId: 3187671040 LifeCyclePhase: 1 MainKeyId: "/home/runner/.ya/build/build_root/2btm/004485/r3tmp/tmpsPtOnD//key.txt" EncryptedGroupKey: "\276q\032\3035\037\254\246\353f\274\027\267\215?\241\030Ee\030iI\2654u\305\211\316\321\347m\211\250\301>r" MainKeyVersion: 1 GroupKeyNonce: 3187671040 } Sending TEvGet |88.1%| [TA] $(B)/ydb/core/blobstorage/groupinfo/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |88.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |88.1%| [TA] $(B)/ydb/core/mind/ut_fat/test-results/unittest/{meta.json ... results_accumulator.log} >> IncrementalBackup::E2EBackupCollection [FAIL] |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |88.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |88.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/groupinfo/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.2%| [TA] {RESULT} $(B)/ydb/core/mind/ut_fat/test-results/unittest/{meta.json ... results_accumulator.log} |88.2%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |88.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |88.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange >> TableCreator::CreateTables |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> Yq_1::CreateQuery_With_Idempotency [FAIL] >> Yq_1::CreateQuery_Without_Connection |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental [GOOD] Test command err: 2025-06-24T20:29:08.591048Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:08.591502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:08.591693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00402b/r3tmp/tmpPoVJQi/pdisk_1.dat 2025-06-24T20:29:08.942032Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:29:08.948182Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:555:2480], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:08.948286Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:08.948330Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:29:08.948461Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:552:2478], Recipient [1:371:2365]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-24T20:29:08.948493Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T20:29:09.590450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-24T20:29:09.600517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:09.602528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:29:09.607585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:29:09.610421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:29:09.638991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:29:09.640192Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:09.649805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:09.660694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:29:09.661030Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:09.661349Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:09.676303Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:09.676365Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:09.680227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:09.681178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:29:09.682090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:29:09.682377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:29:09.695118Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:09.698989Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:09.699733Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:09.705313Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:09.705372Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:09.705454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:09.705568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:29:09.705613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:29:09.705713Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:09.706159Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:09.706187Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:09.706275Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:09.706296Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:09.706327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:09.706358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:09.706404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-24T20:29:09.706431Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:09.706472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:29:09.709480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:29:09.709951Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:09.710003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:29:09.710182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:29:09.711284Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877760, Sender [1:560:2485], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:562:2486] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T20:29:09.711333Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5046: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T20:29:09.711970Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5783: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-24T20:29:09.713320Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269091328, Sender [1:367:2361], Recipient [1:371:2365]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-24T20:29:09.730734Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:564:2488], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:09.731182Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:09.731820Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:29:09.733949Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [1:552:2478], Recipient [1:371:2365]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-24T20:29:09.734003Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:29:09.734951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:09.743541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-24T20:29:09.743889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:09.995000Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 273285138, Sender [1:44:2091], Recipient [1:371:2365]: ... meshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 6/7 2025-06-24T20:30:03.851966Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715668:6 progress is 6/7 2025-06-24T20:30:03.851990Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 6/7 2025-06-24T20:30:03.852013Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 6/7, is published: true 2025-06-24T20:30:03.852226Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:372:2366], Recipient [3:372:2366]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:30:03.852254Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:30:03.852288Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715668:4, at schemeshard: 72057594046644480 2025-06-24T20:30:03.852318Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715668:4 ProgressState 2025-06-24T20:30:03.852383Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:30:03.852412Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715668:4 progress is 7/7 2025-06-24T20:30:03.852437Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-06-24T20:30:03.852475Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715668:4 progress is 7/7 2025-06-24T20:30:03.852496Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-06-24T20:30:03.852846Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 7/7, is published: true 2025-06-24T20:30:03.853240Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:1196:2903] message: TxId: 281474976715668 2025-06-24T20:30:03.853619Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-06-24T20:30:03.854015Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715668:0 2025-06-24T20:30:03.854422Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715668:0 2025-06-24T20:30:03.854805Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 2 2025-06-24T20:30:03.855189Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715668:1 2025-06-24T20:30:03.855214Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715668:1 2025-06-24T20:30:03.855255Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 2 2025-06-24T20:30:03.855276Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715668:2 2025-06-24T20:30:03.855295Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715668:2 2025-06-24T20:30:03.855322Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-24T20:30:03.855342Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715668:3 2025-06-24T20:30:03.855361Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715668:3 2025-06-24T20:30:03.855450Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 3 2025-06-24T20:30:03.855873Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 3 2025-06-24T20:30:03.855924Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715668:4 2025-06-24T20:30:03.855946Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715668:4 2025-06-24T20:30:03.856005Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 21] was 3 2025-06-24T20:30:03.856034Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 3 2025-06-24T20:30:03.856060Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715668:5 2025-06-24T20:30:03.856082Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715668:5 2025-06-24T20:30:03.856129Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 3 2025-06-24T20:30:03.856445Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 3 2025-06-24T20:30:03.856811Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715668:6 2025-06-24T20:30:03.856852Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715668:6 2025-06-24T20:30:03.856911Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 23] was 3 2025-06-24T20:30:03.856938Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 3 2025-06-24T20:30:03.858926Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:30:03.859075Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:30:03.859330Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:30:03.859489Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:30:03.859611Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:1196:2903] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-06-24T20:30:03.860883Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:1203:2909], Recipient [3:372:2366]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:03.860930Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:03.860958Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:30:04.437582Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037893, clientId# [3:1487:3134], serverId# [3:1488:3135], sessionId# [0:0:0] 2025-06-24T20:30:04.439035Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jyht4b3t1vtskyw3tb2c2h2f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MzAyOWE1ZDgtMzQwNTY3YzctOWFiYzY2MS1jNTc0NzYyMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }, { items { uint32_value: 5 } items { uint32_value: 50 } } 2025-06-24T20:30:04.956658Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037895, clientId# [3:1516:3151], serverId# [3:1517:3152], sessionId# [0:0:0] 2025-06-24T20:30:04.958168Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyht4bsyad08xrzhzj5gs9rf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=M2UwOWQ2ODctNTQxZWNiMGQtMmVkZjg1NWYtNThlODI4YWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 11 } items { uint32_value: 101 } }, { items { uint32_value: 21 } items { uint32_value: 201 } }, { items { uint32_value: 31 } items { uint32_value: 301 } }, { items { uint32_value: 41 } items { uint32_value: 401 } }, { items { uint32_value: 51 } items { uint32_value: 501 } } 2025-06-24T20:30:05.301172Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037892, clientId# [3:1545:3168], serverId# [3:1546:3169], sessionId# [0:0:0] 2025-06-24T20:30:05.301414Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyht4c68c6e7j9thpqykdq7e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NDY5ODg5NjMtZGQxZmVjNzgtZWI1ZjVkNWQtMWM1ZWFjNTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 12 } items { uint32_value: 102 } }, { items { uint32_value: 22 } items { uint32_value: 202 } }, { items { uint32_value: 32 } items { uint32_value: 302 } }, { items { uint32_value: 42 } items { uint32_value: 402 } }, { items { uint32_value: 52 } items { uint32_value: 502 } } 2025-06-24T20:30:05.937254Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [3:1574:3185], serverId# [3:1575:3186], sessionId# [0:0:0] 2025-06-24T20:30:05.938065Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jyht4cgne4pm4xtsgcyw6cy4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MWU2ZDBiMWYtYjM5NzE0MmMtMzAyYzhlZmUtZDQ5YTk5NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 13 } items { uint32_value: 103 } }, { items { uint32_value: 23 } items { uint32_value: 203 } }, { items { uint32_value: 33 } items { uint32_value: 303 } }, { items { uint32_value: 43 } items { uint32_value: 403 } }, { items { uint32_value: 53 } items { uint32_value: 503 } } >> UpsertLoad::ShouldWriteDataBulkUpsertBatch [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom [GOOD] |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> Yq_1::ListConnections [GOOD] >> Yq_1::ListConnectionsOnEmptyConnectionsTable >> Yq_1::Basic_Null [GOOD] >> Yq_1::Basic_TaggedLiteral >> UpsertLoad::ShouldWriteDataBulkUpsert [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsert2 >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp [GOOD] |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom [GOOD] Test command err: 2025-06-24T20:30:14.290971Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:14.291440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:14.291686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003352/r3tmp/tmpLEii0M/pdisk_1.dat 2025-06-24T20:30:14.683871Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:14.700298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:14.768903Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:14.775206Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797004708024 != 1750797004708028 2025-06-24T20:30:14.832699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:14.832853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:14.848424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:14.937157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:16.358280Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 KeyFrom: 12345 } 2025-06-24T20:30:16.358457Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:693:2575], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 KeyFrom: 12345 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-24T20:30:16.610691Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:693:2575], subTag: 2} TUpsertActor finished in 0.251364s, errors=0 2025-06-24T20:30:16.611175Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:694:2576] with tag# 2 >> UpsertLoad::ShouldCreateTable [GOOD] >> UpsertLoad::ShouldDropCreateTable >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> UpsertLoad::ShouldWriteKqpUpsert2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:231:2060] recipient: [1:225:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:231:2060] recipient: [1:225:2143] Leader for TabletID 72057594046678944 is [1:242:2154] sender: [1:243:2060] recipient: [1:225:2143] 2025-06-24T20:28:51.016448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:51.016544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:51.016586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:51.016624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:51.016666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:51.016704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:51.016753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:51.016825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:51.017719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:51.018060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:51.108426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:51.108513Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:51.145607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:51.145958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:51.146130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:51.156254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:51.156502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:51.157222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.157517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:51.163906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.164129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:51.165456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.165526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.165698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:51.165745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:51.165789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:51.165930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.184202Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:242:2154] sender: [1:355:2060] recipient: [1:17:2064] 2025-06-24T20:28:51.363325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.363624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.363859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.363906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.364171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.364264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.373763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.374024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.374301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.374365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.374433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.374480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.383978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.384089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.384167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.390943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.391035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.391113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.391201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.403401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.408013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.408230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.409253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.409398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 250 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.409460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.409749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.409809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.410014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.410093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:51.413412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.413484Z node 1 :FLAT_TX_SCHEMESHARD ... ady parts: 2/3 2025-06-24T20:30:19.518095Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:2 progress is 2/3 2025-06-24T20:30:19.518148Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 2/3 2025-06-24T20:30:19.518190Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 2/3, is published: true 2025-06-24T20:30:19.518465Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T20:30:19.518503Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:30:19.518533Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 104:0 2025-06-24T20:30:19.518815Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [7:985:2744] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 104 at schemeshard: 72057594046678944 2025-06-24T20:30:19.519303Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [7:242:2154], Recipient [7:985:2744]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 104 2025-06-24T20:30:19.519343Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-24T20:30:19.519371Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 104 datashard 72075186233409551 state Ready 2025-06-24T20:30:19.519445Z node 7 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186233409551 Got TEvSchemaChangedResult from SS at 72075186233409551 2025-06-24T20:30:19.519632Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [7:242:2154], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:30:19.519658Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:30:19.519729Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T20:30:19.519757Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T20:30:19.519811Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:30:19.519838Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 3/3 2025-06-24T20:30:19.519876Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-24T20:30:19.519903Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 3/3 2025-06-24T20:30:19.519922Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-24T20:30:19.519955Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-06-24T20:30:19.520003Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:584:2401] message: TxId: 104 2025-06-24T20:30:19.520050Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-24T20:30:19.520094Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T20:30:19.520125Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T20:30:19.520249Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 4 2025-06-24T20:30:19.520284Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:1 2025-06-24T20:30:19.520303Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:1 2025-06-24T20:30:19.520344Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 11] was 3 2025-06-24T20:30:19.520371Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:2 2025-06-24T20:30:19.520391Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:2 2025-06-24T20:30:19.520448Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 3 2025-06-24T20:30:19.522642Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:30:19.522731Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:30:19.522793Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [7:584:2401] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 104 at schemeshard: 72057594046678944 2025-06-24T20:30:19.522916Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T20:30:19.522951Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [7:1037:2781] 2025-06-24T20:30:19.523195Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [7:1039:2783], Recipient [7:242:2154]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:19.523223Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:19.523245Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-24T20:30:19.524036Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [8:560:2103], Recipient [7:242:2154] 2025-06-24T20:30:19.524113Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T20:30:19.526468Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/tmp" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "NotTempTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Utf8" } KeyColumnNames: "key" } IndexDescription { Name: "ValueIndex" KeyColumnNames: "value" } } AllowCreateInTempDir: false } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:19.533722Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-24T20:30:19.534198Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 105:1, propose status:StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-24T20:30:19.536561Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:30:19.562960Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 105, response: Status: StatusPreconditionFailed Reason: "Check failed: path: \'/MyRoot/tmp\', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges)" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:19.565732Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), operation: CREATE TABLE WITH INDEXES, path: /MyRoot/tmp/NotTempTable 2025-06-24T20:30:19.567369Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-24T20:30:19.571266Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-24T20:30:19.571662Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-24T20:30:19.576519Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [7:1109:2853], Recipient [7:242:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:30:19.576993Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:30:19.577471Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:30:19.578380Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [7:584:2401], Recipient [7:242:2154]: NKikimrScheme.TEvNotifyTxCompletion TxId: 105 2025-06-24T20:30:19.579716Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:30:19.580215Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-24T20:30:19.581102Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T20:30:19.581513Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [7:1107:2851] 2025-06-24T20:30:19.584303Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [7:1109:2853], Recipient [7:242:2154]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:19.585177Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:19.585561Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 >> ReadLoad::ShouldReadIterate [GOOD] >> ReadLoad::ShouldReadIterateMoreThanRows >> TSchemeshardBackgroundCleaningTest::TempInTemp [GOOD] >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom [GOOD] |88.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |88.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsert2 [GOOD] Test command err: 2025-06-24T20:30:14.801376Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:14.801800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:14.802054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003378/r3tmp/tmpETfTUQ/pdisk_1.dat 2025-06-24T20:30:16.201183Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:16.228430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:16.340918Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:16.346301Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797009673286 != 1750797009673290 2025-06-24T20:30:16.413773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:16.413923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:16.427551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:16.665717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:19.247661Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "JustTable" } UpsertKqpStart { RowCount: 20 Inflight: 5 } 2025-06-24T20:30:19.248811Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 2025-06-24T20:30:19.308235Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 2} started# 5 actors each with inflight# 4 2025-06-24T20:30:19.308972Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-24T20:30:19.309372Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-24T20:30:19.309399Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-24T20:30:19.311359Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-24T20:30:19.311426Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-24T20:30:19.349938Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 1} session: ydb://session/3?node_id=1&id=ZWFmZDNjMjktNGQ0ZmIwYjctZDYwMGQ5MDMtNGI0ODBkMGU= 2025-06-24T20:30:19.373010Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 2} session: ydb://session/3?node_id=1&id=OWUwNGE1ZjItZGRkMjIwZmItNTQyMDQxZmQtN2Q5OGQyY2U= 2025-06-24T20:30:19.400707Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 3} session: ydb://session/3?node_id=1&id=YmMzZjEyOTQtMTkwYjU3NDAtMjUwMmZjOWEtNzUwMGEwYzY= 2025-06-24T20:30:19.413892Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 4} session: ydb://session/3?node_id=1&id=OWM1Y2Y0YTEtZDk4ZjFhNWYtMTgzZjRhNmEtODJkNmVlMzY= 2025-06-24T20:30:19.427020Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 5} session: ydb://session/3?node_id=1&id=OWViMTlhY2UtM2E5YWFjODItODE1MTRjY2YtNWE5NWRhMGM= 2025-06-24T20:30:19.490108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:737:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.495227Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:708:2590], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.497555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:734:2610], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.497617Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:735:2611], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.497660Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2612], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.498358Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:738:2614], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.504784Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.611083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:30:19.718082Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:754:2630] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:19.718300Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:755:2631] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:19.719064Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:756:2632] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:19.726882Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:763:2633] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:19.894619Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:20.081848Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:20.088205Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:747:2623], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:20.089003Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:748:2624], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:20.089054Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:749:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:20.089434Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:750:2626], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:20.201945Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:851:2692] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:22.643990Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 1} finished in 1750797022.643623s, errors=0 2025-06-24T20:30:22.645591Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1750797022643 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:22.680514Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:924:2730] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:22.944115Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 2} finished in 1750797022.943704s, errors=0 2025-06-24T20:30:22.944814Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1750797022943 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:23.016967Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:975:2752] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:23.308082Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 5} finished in 1750797023.307662s, errors=0 2025-06-24T20:30:23.309192Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1750797023307 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:23.338579Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1026:2774] txid# 281474976715678, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:23.832876Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 3} finished in 1750797023.832842s, errors=0 2025-06-24T20:30:23.833123Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1750797023832 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:23.875175Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1077:2796] txid# 281474976715683, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:24.125298Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 4} finished in 1750797024.124917s, errors=0 2025-06-24T20:30:24.125935Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1750797024124 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:24.126712Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 2} finished in 4.820225s, oks# 20, errors# 0 2025-06-24T20:30:24.137399Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:694:2576] with tag# 2 >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink |88.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> UpsertLoad::ShouldWriteKqpUpsert [GOOD] >> KqpDataIntegrityTrails::BrokenReadLockAbortedTx |88.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom [GOOD] Test command err: 2025-06-24T20:30:15.047186Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:15.047597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:15.047814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033fd/r3tmp/tmpvokpxY/pdisk_1.dat 2025-06-24T20:30:17.426263Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:17.443119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:17.789470Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:17.838830Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797010193020 != 1750797010193024 2025-06-24T20:30:17.946428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:17.946581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:17.960258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:18.094159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:19.708017Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } UpsertKqpStart { RowCount: 20 Inflight: 5 KeyFrom: 12345 } 2025-06-24T20:30:19.709644Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 KeyFrom: 12345 2025-06-24T20:30:19.774542Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 2} started# 5 actors each with inflight# 4 2025-06-24T20:30:19.775313Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-24T20:30:19.775724Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-24T20:30:19.776574Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-24T20:30:19.777370Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-24T20:30:19.777766Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-24T20:30:19.816249Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 1} session: ydb://session/3?node_id=1&id=YjVhNGE3NGYtYWQyZjU5YTEtNWQyNDY1NzctMTNiMWJkNDY= 2025-06-24T20:30:19.829358Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 2} session: ydb://session/3?node_id=1&id=ZWIwYzYxN2ItMjgzYTNhMDgtMzBmNzUwYzAtYzlhMzEwMg== 2025-06-24T20:30:19.840825Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 3} session: ydb://session/3?node_id=1&id=NDhjMzc3YzItODQxY2Q4OTgtNmUxNmNmYzEtZTUyN2M3ZmQ= 2025-06-24T20:30:19.851087Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 4} session: ydb://session/3?node_id=1&id=YjM2OTg0MGUtZDU4NDZkOWEtNTcwMThiZDctZDM5NDZmZWE= 2025-06-24T20:30:19.860850Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 5} session: ydb://session/3?node_id=1&id=NmZhNzFiOGQtYmUwNTBkNTgtOGNlMjQxMGEtOTFiNTIwODk= 2025-06-24T20:30:19.903081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:735:2611], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.904783Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:708:2590], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.906902Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:734:2610], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.906954Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2612], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.907356Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:737:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.908223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:738:2614], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:19.911094Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:20.024670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:30:20.356098Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:754:2630] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:20.356286Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:755:2631] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:20.357009Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:756:2632] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:20.357552Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:763:2633] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:20.408668Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:20.670908Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:20.673557Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:747:2623], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:20.674350Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:748:2624], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:20.674394Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:749:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:20.674816Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:750:2626], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:20.733196Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:851:2692] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:25.151842Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 4} finished in 1750797025.151798s, errors=0 2025-06-24T20:30:25.153956Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1750797025151 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:25.242800Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:924:2730] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:25.621090Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 3} finished in 1750797025.621050s, errors=0 2025-06-24T20:30:25.621766Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1750797025621 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:25.727751Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:975:2752] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:25.806529Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:992:2761] txid# 281474976715675, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:26.139511Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 1} finished in 1750797026.127088s, errors=0 2025-06-24T20:30:26.140794Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1750797026127 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:26.141203Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 2} finished in 1750797026.140827s, errors=0 2025-06-24T20:30:26.141660Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1750797026140 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:26.226076Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1073:2792] txid# 281474976715683, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:26.671993Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 5} finished in 1750797026.671636s, errors=0 2025-06-24T20:30:26.673039Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1750797026671 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:26.673761Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 2} finished in 6.901090s, oks# 20, errors# 0 2025-06-24T20:30:26.675822Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:694:2576] with tag# 2 |88.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::TempInTemp [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:231:2060] recipient: [1:225:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:231:2060] recipient: [1:225:2143] Leader for TabletID 72057594046678944 is [1:241:2153] sender: [1:242:2060] recipient: [1:225:2143] 2025-06-24T20:28:51.301620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:51.301709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:51.301744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:51.301775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:51.301815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:51.301840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:51.301894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:51.301955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:51.302700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:51.303038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:51.399204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:51.399273Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:51.413304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:51.420005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:51.420225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:51.446132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:51.446828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:51.447608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.448195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:51.453134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.453378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:51.454879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.454953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.455188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:51.455265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:51.455313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:51.455494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.478953Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:241:2153] sender: [1:355:2060] recipient: [1:17:2064] 2025-06-24T20:28:51.767288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.767637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.767895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.767951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.768226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.768310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.780257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.780497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.780755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.780826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.780873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.780909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.792238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.792321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.792362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.804140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.804208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.804272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.804339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.808102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.811665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.811880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.812971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.813138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 250 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.813192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.813493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.813559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.813740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.813819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:51.824627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.824703Z node 1 :FLAT_TX_SCHEMESHARD ... 46678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T20:30:27.115897Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-24T20:30:27.115949Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-24T20:30:27.116343Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [7:688:2505], Recipient [7:242:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:30:27.116625Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:30:27.116994Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:30:27.117738Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [7:587:2404], Recipient [7:242:2154]: NKikimrScheme.TEvNotifyTxCompletion TxId: 106 2025-06-24T20:30:27.117781Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:30:27.118164Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-24T20:30:27.118667Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T20:30:27.118715Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [7:686:2503] 2025-06-24T20:30:27.118913Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [7:688:2505], Recipient [7:242:2154]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:27.118954Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:27.118993Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 TestModificationResults wait txId: 107 2025-06-24T20:30:27.120880Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [8:563:2103], Recipient [7:242:2154] 2025-06-24T20:30:27.121939Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T20:30:27.134454Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/test/tmp/a/b" OperationType: ESchemeOpMkDir MkDir { Name: "tmp2" } TempDirOwnerActorId { RawX1: 563 RawX2: 34359740471 } AllowCreateInTempDir: false } TxId: 107 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:27.139214Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/test/tmp/a/b/tmp2, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-24T20:30:27.140506Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 107:1, propose status:StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/test/tmp/a/b', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-24T20:30:27.142206Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:30:27.148124Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 107, response: Status: StatusPreconditionFailed Reason: "Check failed: path: \'/MyRoot/test/tmp/a/b\', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges)" TxId: 107 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:27.148441Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 107, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/test/tmp/a/b', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), operation: CREATE DIRECTORY, path: /MyRoot/test/tmp/a/b/tmp2 2025-06-24T20:30:27.148503Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 2025-06-24T20:30:27.148888Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-24T20:30:27.148930Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-24T20:30:27.149266Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [7:694:2511], Recipient [7:242:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:30:27.149314Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:30:27.149355Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:30:27.149472Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [7:587:2404], Recipient [7:242:2154]: NKikimrScheme.TEvNotifyTxCompletion TxId: 107 2025-06-24T20:30:27.149504Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:30:27.149583Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-24T20:30:27.149690Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-24T20:30:27.149730Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [7:692:2509] 2025-06-24T20:30:27.149882Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [7:694:2511], Recipient [7:242:2154]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:27.149918Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:27.149956Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-24T20:30:27.150339Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [8:563:2103], Recipient [7:242:2154] 2025-06-24T20:30:27.150388Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T20:30:27.156821Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/test/tmp/a/b" OperationType: ESchemeOpMkDir MkDir { Name: "tmp2" } TempDirOwnerActorId { RawX1: 563 RawX2: 34359740471 } AllowCreateInTempDir: true } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:27.157120Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/test/tmp/a/b/tmp2, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T20:30:27.157188Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 108:1, propose status:StatusPreconditionFailed, reason: Can't create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can't be created in another temporary directory., at schemeshard: 72057594046678944 2025-06-24T20:30:27.157376Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:30:27.165292Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 108, response: Status: StatusPreconditionFailed Reason: "Can\'t create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can\'t be created in another temporary directory." TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:27.166285Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Can't create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can't be created in another temporary directory., operation: CREATE DIRECTORY, path: /MyRoot/test/tmp/a/b/tmp2 2025-06-24T20:30:27.166360Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-24T20:30:27.166856Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-24T20:30:27.166907Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-24T20:30:27.167449Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [7:700:2517], Recipient [7:242:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:30:27.167532Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:30:27.167577Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:30:27.167742Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [7:587:2404], Recipient [7:242:2154]: NKikimrScheme.TEvNotifyTxCompletion TxId: 108 2025-06-24T20:30:27.167784Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:30:27.167864Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-24T20:30:27.167987Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-24T20:30:27.168040Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [7:698:2515] 2025-06-24T20:30:27.168228Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [7:700:2517], Recipient [7:242:2154]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:27.168283Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:27.168322Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 108 |88.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |88.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpPg::JoinWithQueryService+StreamLookup >> Cdc::UuidExchange[PqRunner] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsert [GOOD] Test command err: 2025-06-24T20:30:14.841755Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:14.842117Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:14.842309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00334d/r3tmp/tmpkDPkhg/pdisk_1.dat 2025-06-24T20:30:17.993733Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.145002s 2025-06-24T20:30:17.993866Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.145143s 2025-06-24T20:30:18.008790Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:18.030496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:18.364827Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:18.580781Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797008297064 != 1750797008297068 2025-06-24T20:30:18.697084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:18.719799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:18.768906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:19.328831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:21.628475Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } UpsertKqpStart { RowCount: 20 Inflight: 5 } 2025-06-24T20:30:21.628573Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 2025-06-24T20:30:21.631425Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 2} started# 5 actors each with inflight# 4 2025-06-24T20:30:21.631473Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-24T20:30:21.631522Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-24T20:30:21.631567Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-24T20:30:21.631623Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-24T20:30:21.631647Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-24T20:30:21.820304Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 1} session: ydb://session/3?node_id=1&id=YjA5MzU2YTAtNjllZWI1YzktYzE0ZDM2NGMtMWFhNzNiOTU= 2025-06-24T20:30:21.850520Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 2} session: ydb://session/3?node_id=1&id=ZjA3ZTEwNTItNDZlNmM5MWYtZGMwYmQwNGEtNTVmOWMyNDU= 2025-06-24T20:30:21.912294Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 3} session: ydb://session/3?node_id=1&id=NDhjZjE1ZmItYzc3MGFjZTgtYjgzZWRkMjItMmViNjUwMg== 2025-06-24T20:30:21.963225Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 4} session: ydb://session/3?node_id=1&id=NjU0NDQ4MmMtNTM2OWMzNjEtNDMwM2QwMzctMmFlNjVlZTU= 2025-06-24T20:30:21.982353Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 5} session: ydb://session/3?node_id=1&id=ZjNkZDVmN2QtN2QzMWU4M2UtMzBiMzZjZS1mMmQ1ZTRhZQ== 2025-06-24T20:30:22.073980Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:734:2610], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:22.084461Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:708:2590], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:22.099750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:735:2611], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:22.100276Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2612], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:22.100667Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:737:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:22.100717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:738:2614], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:22.101671Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:22.337862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:30:22.518371Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:754:2630] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:22.518544Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:755:2631] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:22.519593Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:756:2632] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:22.520121Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:763:2633] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:22.574202Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:22.870912Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:22.877026Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:747:2623], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:22.877113Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:748:2624], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:22.877501Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:749:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:22.878255Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:750:2626], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:22.968666Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:851:2692] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:27.988466Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 5} finished in 1750797027.988414s, errors=0 2025-06-24T20:30:27.988812Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1750797027988 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:28.002670Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:924:2730] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:28.086036Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 2} finished in 1750797028.085990s, errors=0 2025-06-24T20:30:28.086354Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1750797028085 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:28.100629Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:975:2752] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:28.202882Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1025:2773] txid# 281474976715678, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:28.225009Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 4} finished in 1750797028.224971s, errors=0 2025-06-24T20:30:28.225417Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1750797028224 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:28.308969Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 3} finished in 1750797028.308925s, errors=0 2025-06-24T20:30:28.309139Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1750797028308 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:28.322884Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1076:2795] txid# 281474976715683, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:28.405724Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:694:2576], subTag: 1} finished in 1750797028.405682s, errors=0 2025-06-24T20:30:28.406042Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:693:2575], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1750797028405 OperationsOK: 4 OperationsError: 0 } 2025-06-24T20:30:28.406176Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 2} finished in 6.774807s, oks# 20, errors# 0 2025-06-24T20:30:28.406352Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:694:2576] with tag# 2 |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/control/ut/ydb-core-control-ut |88.2%| [LD] {RESULT} $(B)/ydb/core/control/ut/ydb-core-control-ut >> Cdc::KeysOnlyLogDebezium |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/control/ut/ydb-core-control-ut >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWoIndexes |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> test.py::test[solomon-LabelColumns-default.txt] [GOOD] >> test.py::test[solomon-Subquery-default.txt] >> Cdc::KeysOnlyLog[PqRunner] |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |88.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume >> TSchemeShardSysViewTest::AsyncCreateDifferentSysViews >> TableCreator::CreateTables [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::E2EBackupCollection [FAIL] Test command err: 2025-06-24T20:29:08.458377Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:08.458675Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:08.458837Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004033/r3tmp/tmpoPaRTs/pdisk_1.dat 2025-06-24T20:29:08.818305Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:29:08.819647Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:555:2480], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:08.819718Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:08.819755Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:29:08.819841Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:552:2478], Recipient [1:371:2365]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-24T20:29:08.819885Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T20:29:08.940980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-24T20:29:08.941210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:08.941407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:29:08.941463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:29:08.941633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:29:08.941690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:29:08.941755Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:08.942239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:08.942396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:29:08.942449Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:08.942482Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:08.942666Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:08.942726Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:08.942841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:08.942900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:29:08.942965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:29:08.943000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:29:08.943076Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:08.943544Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:08.943597Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:08.943744Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:08.943769Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:08.943812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:08.943885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:29:08.943913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:29:08.943981Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:08.944224Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:08.944242Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-24T20:29:08.944307Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:29:08.944333Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:29:08.944377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:08.944408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:29:08.944450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-24T20:29:08.944495Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:29:08.944526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:29:08.947423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:29:08.947777Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:29:08.947815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:29:08.947950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:29:08.949166Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877760, Sender [1:560:2485], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:562:2486] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T20:29:08.949213Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5046: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T20:29:08.949273Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5783: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-24T20:29:08.949395Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269091328, Sender [1:367:2361], Recipient [1:371:2365]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-24T20:29:08.949800Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:564:2488], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:08.949849Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:29:08.949886Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:29:08.950042Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [1:552:2478], Recipient [1:371:2365]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-24T20:29:08.950082Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:29:08.950139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:08.950174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-24T20:29:08.950215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:29:09.000466Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 273285138, Sender [1:44:2091], Recipient [1:371:2365]: ... le TEvSchemaChangedResult 281474976715668 datashard 72075186224037893 state Ready 2025-06-24T20:30:13.266867Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037893 Got TEvSchemaChangedResult from SS at 72075186224037893 2025-06-24T20:30:13.267394Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:30:13.267439Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715668:0 2025-06-24T20:30:13.267470Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715668:2 2025-06-24T20:30:13.267637Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:30:13.267686Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:30:13.267745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-24T20:30:13.267788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715668:0 ProgressState 2025-06-24T20:30:13.267908Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:30:13.267944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715668:0 progress is 2/3 2025-06-24T20:30:13.267983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/3 2025-06-24T20:30:13.268029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715668:0 progress is 2/3 2025-06-24T20:30:13.268059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/3 2025-06-24T20:30:13.268108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 2/3, is published: true 2025-06-24T20:30:13.268365Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:30:13.268402Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:30:13.268455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715668:2, at schemeshard: 72057594046644480 2025-06-24T20:30:13.268489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715668:2 ProgressState 2025-06-24T20:30:13.268531Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:30:13.268563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715668:2 progress is 3/3 2025-06-24T20:30:13.268594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 3/3 2025-06-24T20:30:13.268632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715668:2 progress is 3/3 2025-06-24T20:30:13.268659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 3/3 2025-06-24T20:30:13.268687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 3/3, is published: true 2025-06-24T20:30:13.268764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:1379:3048] message: TxId: 281474976715668 2025-06-24T20:30:13.268820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 3/3 2025-06-24T20:30:13.268872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715668:0 2025-06-24T20:30:13.268935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715668:0 2025-06-24T20:30:13.269100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 3 2025-06-24T20:30:13.269142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 3 2025-06-24T20:30:13.269194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715668:1 2025-06-24T20:30:13.269222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715668:1 2025-06-24T20:30:13.269303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 2 2025-06-24T20:30:13.269330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715668:2 2025-06-24T20:30:13.269355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715668:2 2025-06-24T20:30:13.269385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 9] was 3 2025-06-24T20:30:13.270106Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:30:13.270296Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:30:13.270382Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [1:1379:3048] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-06-24T20:30:13.270739Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [1:1386:3054], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:13.270779Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:13.270806Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:30:13.300508Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [1:1458:3108], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:13.300617Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:13.300664Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:30:13.300788Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [1:1462:3112], Recipient [1:371:2365]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:13.300817Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:30:13.300841Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:30:13.467902Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:30:13.467984Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:30:13.468198Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:371:2365], Recipient [1:371:2365]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:30:13.468243Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:30:13.755679Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037893, clientId# [1:1494:3140], serverId# [1:1495:3141], sessionId# [0:0:0] 2025-06-24T20:30:13.755986Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jyht4mjx3fkbed28zwabrpr4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjA2Yjk4YmYtZTRhODRkYjItMTM4YzVkNTUtYjJlMzY0ZWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } assertion failed at ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:925, virtual void NKikimr::NTestSuiteIncrementalBackup::TTestCaseE2EBackupCollection::Execute_(NUnitTest::TTestContext &): (expected == actual) failed: ("{ items { uint32_value: 2 } items { uint32_value: 200 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }" != "{ items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }") , with diff: "{ items { uint32_value: (|1 } items { uint3)2(|_value:) (|10 )} (|}, { )items { uint32_value: 2(0| } items { uint32_value: 2)0 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }" TBackTrace::Capture()+28 (0x19D0423C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x1A1C3300) NKikimr::NTestSuiteIncrementalBackup::TTestCaseE2EBackupCollection::Execute_(NUnitTest::TTestContext&)+10537 (0x1990E9D9) std::__y1::__function::__func, void ()>::operator()()+280 (0x19917A38) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x1A1FA4E6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1A1C9E89) NKikimr::NTestSuiteIncrementalBackup::TCurrentTest::Execute()+1204 (0x199168E4) NUnitTest::TTestFactory::Execute()+2438 (0x1A1CB756) NUnitTest::RunMain(int, char**)+5213 (0x1A1F4A5D) ??+0 (0x7FEA3D876D90) __libc_start_main+128 (0x7FEA3D876E40) _start+41 (0x16F04029) >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom [GOOD] >> TSchemeShardSysViewTest::AsyncCreateSameSysView >> TSchemeShardSysViewTest::CreateSysView >> TSchemeShardSysViewTest::DropSysView >> TSchemeShardSysViewTest::AsyncCreateDirWithSysView >> UpsertLoad::ShouldWriteDataBulkUpsert2 [GOOD] >> TSchemeShardSysViewTest::AsyncDropSameSysView >> TPDiskRaces::OwnerKilledWhileReadingLog [GOOD] >> TPDiskRaces::OwnerKilledWhileReadingLogAndThenKillLastOwner >> TSchemeShardSysViewTest::CreateExistingSysView |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |88.2%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut >> TSchemeShardSysViewTest::EmptyName |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |88.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index ------- [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> TableCreator::CreateTables [GOOD] Test command err: 2025-06-24T20:30:17.610935Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615932913071680:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:17.612064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030f6/r3tmp/tmpvO88me/pdisk_1.dat 2025-06-24T20:30:22.487724Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:22.633230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615932913071680:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:22.633279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:30:25.269215Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:25.269584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:27.170706Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:27.295926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:27.882882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:27.882988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:27.909868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:27.936064Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519615932913071663:2079] 1750797017569620 != 1750797017569623 2025-06-24T20:30:27.960041Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TClient is connected to server localhost:25748 TServer::EnableGrpc on GrpcPort 64281, node 1 2025-06-24T20:30:28.475869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:28.475898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:28.475908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:28.476024Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:30:28.761173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:30:28.783764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:30:28.899219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:28.901721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpDataIntegrityTrails::BrokenReadLock-UseSink [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 [GOOD] >> TSchemeShardSysViewTest::AsyncCreateDifferentSysViews [GOOD] >> ReadLoad::ShouldReadKqp [GOOD] >> ReadLoad::ShouldReadKqpMoreThanRows >> ReadLoad::ShouldReadIterateMoreThanRows [GOOD] >> TSchemeShardSysViewTest::ReadOnlyMode >> Yq_1::Basic [GOOD] >> Yq_1::Basic_EmptyList >> TSchemeShardSysViewTest::DropSysView [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom [GOOD] Test command err: 2025-06-24T20:30:13.938069Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:13.938539Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:13.938734Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033a3/r3tmp/tmpWYebkB/pdisk_1.dat 2025-06-24T20:30:14.290450Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:14.294404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:14.360252Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:14.366209Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797001938096 != 1750797001938100 2025-06-24T20:30:14.421594Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:14.421762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:14.436558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:14.537387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:15.922999Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 100 Inflight: 3 BatchSize: 7 } 2025-06-24T20:30:15.924509Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:693:2575], subTag: 2} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 3 BatchSize: 7 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-24T20:30:16.250791Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:693:2575], subTag: 2} TUpsertActor finished in 0.302631s, errors=0 2025-06-24T20:30:16.251646Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:694:2576] with tag# 2 2025-06-24T20:30:30.566362Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:30.566808Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:30.566944Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033a3/r3tmp/tmp6ugYk5/pdisk_1.dat 2025-06-24T20:30:30.959943Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T20:30:30.961784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:30.990720Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:30.993045Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750797020738315 != 1750797020738319 2025-06-24T20:30:31.042714Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:31.042856Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:31.055302Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:31.148912Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:31.512754Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 10 Inflight: 3 KeyFrom: 12345 } 2025-06-24T20:30:31.512902Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:692:2575], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 KeyFrom: 12345 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-24T20:30:31.595905Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:692:2575], subTag: 2} TUpsertActor finished in 0.082643s, errors=0 2025-06-24T20:30:31.596022Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:693:2576] with tag# 2 |88.2%| [TA] $(B)/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> UpsertLoad::ShouldDropCreateTable [GOOD] >> TSchemeShardSysViewTest::AsyncCreateDirWithSysView [GOOD] >> TSchemeShardSysViewTest::AsyncCreateSameSysView [GOOD] >> TSchemeShardSysViewTest::CreateSysView [GOOD] >> TSchemeShardSysViewTest::AsyncDropSameSysView [GOOD] >> TSchemeShardSysViewTest::EmptyName [GOOD] >> TSchemeShardSysViewTest::CreateExistingSysView [GOOD] |88.2%| [TA] $(B)/ydb/library/table_creator/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsert2 [GOOD] Test command err: 2025-06-24T20:30:15.456288Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:15.456720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:15.456960Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00339b/r3tmp/tmpTB8ySz/pdisk_1.dat 2025-06-24T20:30:16.652536Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:16.690875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:16.854904Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:16.859962Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797008880625 != 1750797008880629 2025-06-24T20:30:16.949942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:16.951613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:16.979599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:17.185621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:19.155238Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-24T20:30:19.156900Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:693:2575], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-24T20:30:19.321182Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:693:2575], subTag: 2} TUpsertActor finished in 0.152544s, errors=0 2025-06-24T20:30:19.322390Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:694:2576] with tag# 2 2025-06-24T20:30:31.198479Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:31.198932Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:31.199065Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00339b/r3tmp/tmp1I6MV5/pdisk_1.dat 2025-06-24T20:30:31.515940Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T20:30:31.517781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:31.568898Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:31.574377Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750797023560077 != 1750797023560081 2025-06-24T20:30:31.734864Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:31.735030Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:31.760596Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:31.851487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:32.197948Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-24T20:30:32.198105Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:692:2575], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" 2025-06-24T20:30:32.275972Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:692:2575], subTag: 2} TUpsertActor finished in 0.077416s, errors=0 2025-06-24T20:30:32.276092Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:693:2576] with tag# 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::AsyncCreateDifferentSysViews [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:33.117224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:33.117312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.117349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:33.117393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:33.117450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:33.117476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:33.117531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.117608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:33.118363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:33.118707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:33.202215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:33.202281Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:33.215878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:33.220257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:33.220449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:33.228829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:33.229172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:33.229820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.230136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:33.232940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.233149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:33.234263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.234325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.234449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:33.234502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:33.234553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:33.234693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.241562Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:30:33.404489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:33.404756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.405041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:33.405090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:33.405326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:33.405403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:33.413538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.413758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:33.413996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.414059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:33.414115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:33.414169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:33.419990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.420086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:33.420145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:33.428317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.428385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.428454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.428503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:33.432428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:33.436593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:33.436925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:33.440245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.440468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:33.440560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.441588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:33.441695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.441905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:33.442007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:33.446781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.446835Z node 1 :FLAT_TX_SCHEMESHARD ... 4T20:30:33.562381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:30:33.562418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-24T20:30:33.562450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:30:33.562475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:30:33.562498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:30:33.562555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T20:30:33.562584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-06-24T20:30:33.562616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-24T20:30:33.562645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 4], 2 2025-06-24T20:30:33.563305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:33.563357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.563407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-24T20:30:33.563443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 103, path id: 4 2025-06-24T20:30:33.564406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:33.564487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:33.564517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:30:33.564546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T20:30:33.564576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:30:33.566117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:33.566211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:33.566241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:30:33.566268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 2 2025-06-24T20:30:33.566300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T20:30:33.566366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-24T20:30:33.569454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:30:33.569698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 102 2025-06-24T20:30:33.569972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:30:33.570012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-24T20:30:33.570124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:30:33.570147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:30:33.570623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:30:33.570726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:30:33.570768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:344:2333] 2025-06-24T20:30:33.570932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:30:33.571021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:30:33.571054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:344:2333] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-24T20:30:33.571574Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/sys_view_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:33.571783Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/sys_view_1" took 250us result status StatusSuccess 2025-06-24T20:30:33.572132Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/sys_view_1" PathDescription { Self { Name: "sys_view_1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "sys_view_1" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:33.572627Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/sys_view_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:33.572796Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/sys_view_2" took 182us result status StatusSuccess 2025-06-24T20:30:33.573066Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/sys_view_2" PathDescription { Self { Name: "sys_view_2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "sys_view_2" Type: ENodes SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLock-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 18655, MsgBus: 16286 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044f6/r3tmp/tmp1ZQBjR/pdisk_1.dat TServer::EnableGrpc on GrpcPort 18655, node 1 TClient is connected to server localhost:16286 TClient is connected to server localhost:16286 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::DropSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:33.405642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:33.405718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.405762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:33.405805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:33.405845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:33.405889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:33.405945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.406013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:33.406696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:33.407014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:33.526887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:33.526938Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:33.548491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:33.553229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:33.553411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:33.561461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:33.561691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:33.562401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.562702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:33.565625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.565809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:33.567028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.567095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.567263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:33.567325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:33.567373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:33.567544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.574677Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:30:33.729077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:33.729324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.729614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:33.729667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:33.729892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:33.729986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:33.732497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.732695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:33.732914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.732974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:33.733026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:33.733065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:33.735200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.735267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:33.735325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:33.737299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.737363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.737405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.737457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:33.748128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:33.750576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:33.750819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:33.751969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.752114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:33.752178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.752537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:33.752607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.752793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:33.752874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:33.755417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.755467Z node 1 :FLAT_TX_SCHEMESHARD ... se BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:33.863990Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 224us result status StatusPathDoesNotExist 2025-06-24T20:30:33.864159Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.sys/new_sys_view\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.sys\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/.sys/new_sys_view" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.sys" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: ".sys" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:30:33.865181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:359:2058] recipient: [1:105:2138] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:362:2058] recipient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:363:2058] recipient: [1:361:2348] Leader for TabletID 72057594046678944 is [1:364:2349] sender: [1:365:2058] recipient: [1:361:2348] 2025-06-24T20:30:33.905368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:33.905470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.905546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:33.905597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:33.905633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:33.905664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:33.905716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.905794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:33.906601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:33.906927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:33.922258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:33.923767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:33.923954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:33.924130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:33.924164Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:33.924370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:33.925231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 2, at schemeshard: 72057594046678944 2025-06-24T20:30:33.925329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: .sys, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:30:33.925423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.925495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.925947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.926039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T20:30:33.926282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.926372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.926465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.926557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.926660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.926869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.927194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.927383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.927783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.927875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.928041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.928121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.928222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.928440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.928523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.928710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.928957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.929041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.929167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.929230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.929296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.935754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:33.937646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.937727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.938193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:33.938263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:33.938317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:33.940582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:364:2349] sender: [1:425:2058] recipient: [1:15:2062] 2025-06-24T20:30:34.007567Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:34.007821Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 309us result status StatusPathDoesNotExist 2025-06-24T20:30:34.007977Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.sys/new_sys_view\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.sys\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/.sys/new_sys_view" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.sys" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: ".sys" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::AsyncCreateDirWithSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:33.669473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:33.669567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.669616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:33.669658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:33.669697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:33.669726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:33.669779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.669856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:33.670645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:33.671005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:33.748975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:33.749038Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:33.764167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:33.764528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:33.764671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:33.770890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:33.771074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:33.771621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.771869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:33.774006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.774177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:33.775085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.775170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.775361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:33.775396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:33.775428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:33.775498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.780866Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:33.914752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:33.915060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.923557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:33.923686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:33.923983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:33.924079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:33.928222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.928436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:33.928658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.928717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:33.928781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:33.928816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:33.936174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.936240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:33.936555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:33.939012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.939076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.939119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.939182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:33.955409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:33.964096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:33.964332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:33.965462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.965628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:33.965690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.965973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:33.966028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.966242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:33.966329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:33.971822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.971874Z node 1 :FLAT_TX_SCHEMESHARD ... eration.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.083104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T20:30:34.083187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:30:34.083215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:30:34.083241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:30:34.083277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:30:34.083322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T20:30:34.083353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:30:34.083378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T20:30:34.083409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T20:30:34.083466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:30:34.083497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T20:30:34.083519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-06-24T20:30:34.083558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T20:30:34.084282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:34.084390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:34.084424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:30:34.084451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T20:30:34.084486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:30:34.085309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:34.085400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:34.085440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:30:34.085466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T20:30:34.085492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:30:34.085556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T20:30:34.088736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:34.089902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-24T20:30:34.090183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T20:30:34.090237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-24T20:30:34.090326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:30:34.090349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:30:34.090799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T20:30:34.090933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:30:34.090972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:327:2316] 2025-06-24T20:30:34.091285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:30:34.091365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:30:34.091423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:327:2316] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-24T20:30:34.091954Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:34.092230Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys" took 257us result status StatusSuccess 2025-06-24T20:30:34.092723Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys" PathDescription { Self { Name: ".sys" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 } ChildrenExist: true } Children { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:34.093273Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:34.093483Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 205us result status StatusSuccess 2025-06-24T20:30:34.093798Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 [GOOD] Test command err: 2025-06-24T20:30:14.790505Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:14.790960Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:14.791218Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003375/r3tmp/tmpsx70gW/pdisk_1.dat 2025-06-24T20:30:15.938852Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:15.973000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:16.019259Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:16.025608Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797009426327 != 1750797009426331 2025-06-24T20:30:16.129917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:16.143485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:16.177010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:16.322993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:20.325945Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 } 2025-06-24T20:30:20.326098Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:693:2575], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-24T20:30:20.728757Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:693:2575], subTag: 2} TUpsertActor finished in 0.401869s, errors=0 2025-06-24T20:30:20.729562Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:694:2576] with tag# 2 2025-06-24T20:30:31.738862Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:31.741402Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:31.741591Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003375/r3tmp/tmpgJaWjO/pdisk_1.dat 2025-06-24T20:30:32.391983Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T20:30:32.393921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:32.444159Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:32.446505Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750797025706180 != 1750797025706184 2025-06-24T20:30:32.506326Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:32.506481Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:32.518154Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:32.630668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:33.010301Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 } 2025-06-24T20:30:33.011749Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:692:2575], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" 2025-06-24T20:30:33.133020Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:692:2575], subTag: 2} TUpsertActor finished in 0.120763s, errors=0 2025-06-24T20:30:33.133142Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:693:2576] with tag# 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::AsyncCreateSameSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:33.559491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:33.559585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.559634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:33.559668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:33.559706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:33.559733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:33.559789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.559855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:33.560537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:33.560859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:33.652384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:33.652442Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:33.683579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:33.684052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:33.684297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:33.708890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:33.709121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:33.709818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.710169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:33.713514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.713756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:33.715097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.715208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.715489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:33.715563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:33.715612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:33.715712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.723594Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:33.873403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:33.873668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.873954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:33.874016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:33.874252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:33.874336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:33.877251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.877440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:33.877666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.877719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:33.877764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:33.877822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:33.879942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.880002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:33.880061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:33.882073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.882127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.882173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.882226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:33.886154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:33.888395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:33.888625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:33.889758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.889911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:33.889977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.890440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:33.890519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.890726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:33.890815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:33.893452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.893540Z node 1 :FLAT_TX_SCHEMESHARD ... at schemeshard: 72057594046678944 2025-06-24T20:30:33.986618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:33.986806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:45: [72057594046678944] TCreateSysView::TPropose, opId: 102:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003 2025-06-24T20:30:33.986960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 240 2025-06-24T20:30:33.987131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:30:33.987243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:30:33.989714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.989765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:30:33.989934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:30:33.990074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.990113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T20:30:33.990163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 3 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T20:30:33.990632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.990677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T20:30:33.990816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:30:33.990853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:30:33.990923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:30:33.991019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:30:33.991064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T20:30:33.991110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:30:33.991167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T20:30:33.991201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T20:30:33.991285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:30:33.991339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T20:30:33.991390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-06-24T20:30:33.991422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T20:30:33.992224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:33.992322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:33.992367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:30:33.992413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T20:30:33.992457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:30:33.993274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:33.993354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:33.993387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:30:33.993416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T20:30:33.993465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:30:33.993551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T20:30:33.997655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:33.998082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 102 2025-06-24T20:30:33.998383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:30:33.998467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-24T20:30:33.998574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:30:33.998614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:30:33.999059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:30:33.999300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:30:33.999351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:336:2325] 2025-06-24T20:30:33.999602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:30:33.999722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:30:33.999750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:336:2325] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-24T20:30:34.000428Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:34.000697Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 285us result status StatusSuccess 2025-06-24T20:30:34.001112Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::AsyncDropSameSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:33.741752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:33.741854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.741915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:33.741956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:33.742000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:33.742044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:33.742106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.742191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:33.742957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:33.743342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:33.832425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:33.832499Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:33.849305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:33.849754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:33.849919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:33.857039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:33.857189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:33.857698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.857959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:33.860542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.860748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:33.861961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.862034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.862289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:33.862346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:33.862396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:33.862483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.869472Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:34.066290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:34.066567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.066866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:34.066928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:34.067185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:34.067261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:34.072772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.072952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:34.073220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.073316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:34.073369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:34.073402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:34.076839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.076898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:34.076960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:34.078723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.078778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.078820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.078877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:34.082845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:34.085087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:34.085282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:34.086260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.086381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:34.086454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.086740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:34.086799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.086943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:34.087006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:34.092559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:34.092622Z node 1 :FLAT_TX_SCHEMESHARD ... ePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:30:34.202271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:34.202315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-24T20:30:34.202360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-24T20:30:34.202388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-24T20:30:34.202814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.202885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T20:30:34.202983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:30:34.203034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:30:34.203079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:30:34.203108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:30:34.203158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-24T20:30:34.203202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:30:34.203237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:30:34.203268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:30:34.203361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:30:34.203402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 103, publications: 3, subscribers: 0 2025-06-24T20:30:34.203451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T20:30:34.203482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-24T20:30:34.203508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-24T20:30:34.204554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:34.204693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:34.204743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:30:34.204798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T20:30:34.204854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:30:34.206421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:34.206536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:34.206583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:30:34.206622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T20:30:34.206660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:30:34.207630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:34.207737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:34.207768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:30:34.207798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-24T20:30:34.207829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:30:34.207908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-24T20:30:34.208446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:30:34.208492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T20:30:34.208569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:30:34.210401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:30:34.212824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:30:34.213444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:30:34.213914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 2025-06-24T20:30:34.214282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:30:34.214326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 TestWaitNotification wait txId: 104 2025-06-24T20:30:34.214428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T20:30:34.214464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T20:30:34.214892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:30:34.215053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:30:34.215125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:363:2352] 2025-06-24T20:30:34.215268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T20:30:34.215352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T20:30:34.215373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:363:2352] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 2025-06-24T20:30:34.215905Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:34.216116Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 244us result status StatusPathDoesNotExist 2025-06-24T20:30:34.216290Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.sys/new_sys_view\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.sys\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/.sys/new_sys_view" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.sys" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: ".sys" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::CreateSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:33.596875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:33.596985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.597032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:33.597070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:33.597120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:33.597154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:33.597204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.597286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:33.598064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:33.598412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:33.684254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:33.684316Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:33.707951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:33.708387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:33.708589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:33.726815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:33.727093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:33.727881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.728240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:33.734443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.734685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:33.735947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.736038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:33.736261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:33.736314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:33.736372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:33.736461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.744254Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:33.882165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:33.882433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.882728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:33.882778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:33.882990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:33.883063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:33.894065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.894318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:33.894568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.894633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:33.894691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:33.894729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:33.902688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.902765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:33.902832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:33.905290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.905359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:33.905406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.905465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:33.916518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:33.925477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:33.925729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:33.926955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.927109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:33.927193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.927504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:33.927583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:33.927790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:33.927894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:33.930457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:33.930508Z node 1 :FLAT_TX_SCHEMESHARD ... ient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:341:2058] recipient: [1:339:2326] Leader for TabletID 72057594046678944 is [1:342:2327] sender: [1:343:2058] recipient: [1:339:2326] 2025-06-24T20:30:34.096776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:34.096880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:34.096931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:34.096987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:34.097028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:34.097065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:34.097134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:34.097200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:34.098071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:34.098479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:34.133792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:34.135455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:34.135680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:34.135834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:34.135871Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:34.136131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:34.136932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-24T20:30:34.137034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: .sys, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:30:34.137082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: .sys, child name: new_sys_view, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T20:30:34.137181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.137270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.137747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.137840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T20:30:34.138014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-24T20:30:34.138157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.138250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.138341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.138468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.138546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.138721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.139062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.139316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.139786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.139877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.140119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.140221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.140316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.140521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.140617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.140860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.141129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.141206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.141355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.141435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.141491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.147494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:34.150509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:34.150576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:34.150669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:34.150722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:34.150763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:34.151808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:342:2327] sender: [1:403:2058] recipient: [1:15:2062] 2025-06-24T20:30:34.223229Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:34.223494Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 282us result status StatusSuccess 2025-06-24T20:30:34.223772Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::EmptyName [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:33.966632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:33.966750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.966795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:33.966866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:33.966922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:33.966959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:33.967027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.967118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:33.968021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:33.968437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:34.085275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:34.085334Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:34.110311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:34.110803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:34.110985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:34.125828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:34.126112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:34.126873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.127209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:34.131355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:34.131614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:34.132978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:34.133068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:34.133343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:34.133409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:34.133462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:34.133557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.143293Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:34.287341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:34.287560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.287837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:34.287880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:34.288109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:34.288183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:34.290745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.290927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:34.291090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.291134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:34.291206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:34.291235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:34.294131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.294190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:34.294236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:34.296340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.296404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.296449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.296500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:34.299722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:34.301585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:34.301756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:34.302661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.302782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:34.302837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.303112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:34.303197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.303334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:34.303396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:34.309003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:34.309059Z node 1 :FLAT_TX_SCHEMESHARD ... Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-24T20:30:34.352262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.352371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:34.352442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002, at schemeshard: 72057594046678944 2025-06-24T20:30:34.352625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 128 -> 240 2025-06-24T20:30:34.352759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:34.352834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:30:34.353701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:30:34.354158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T20:30:34.355880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:34.355914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:34.356030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:30:34.356094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:34.356122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T20:30:34.356168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T20:30:34.356554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.356607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T20:30:34.356705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:30:34.356742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:30:34.356785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:30:34.356812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:30:34.356846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T20:30:34.356960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:30:34.357015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T20:30:34.357058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T20:30:34.357145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:30:34.357182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T20:30:34.357213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T20:30:34.357235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T20:30:34.357768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:30:34.357860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:30:34.357916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:30:34.357971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T20:30:34.358022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:30:34.358652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:30:34.358733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:30:34.358764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:30:34.358786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T20:30:34.358816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:30:34.358871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T20:30:34.361960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:30:34.362256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T20:30:34.362448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T20:30:34.362490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T20:30:34.362784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T20:30:34.362869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:30:34.362900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:307:2296] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T20:30:34.365082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView CreateSysView { Name: "" Type: EPartitionStats } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:34.365249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_sysview.cpp:116: [72057594046678944] TCreateSysView Propose, path: /MyRoot/.sys/, opId: 102:0 2025-06-24T20:30:34.365309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_sysview.cpp:122: [72057594046678944] TCreateSysView Propose, path: /MyRoot/.sys/, opId: 102:0, sysViewDescription: Name: "" Type: EPartitionStats 2025-06-24T20:30:34.365431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/.sys/', error: path part shouldn't be empty, at schemeshard: 72057594046678944 2025-06-24T20:30:34.367469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/.sys/\', error: path part shouldn\'t be empty" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:34.367713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/.sys/', error: path part shouldn't be empty, operation: CREATE SYSTEM VIEW, path: /MyRoot/.sys/ TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T20:30:34.367970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:30:34.368010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:30:34.368360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:30:34.368484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:30:34.368525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:314:2303] TestWaitNotification: OK eventTxId 102 >> KqpPg::TypeCoercionBulkUpsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> ReadLoad::ShouldReadIterateMoreThanRows [GOOD] Test command err: 2025-06-24T20:30:14.428480Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:14.428897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:14.429097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00333d/r3tmp/tmp5Y7xQL/pdisk_1.dat 2025-06-24T20:30:14.940768Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:14.944181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:15.393440Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:15.460496Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797005788262 != 1750797005788266 2025-06-24T20:30:15.559443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:15.560648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:15.584997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:15.766203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:17.958840Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 1000 2025-06-24T20:30:17.968212Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:693:2575], subTag: 1} TUpsertActor Bootstrap called: RowCount: 1000 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-06-24T20:30:18.637429Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:693:2575], subTag: 1} TUpsertActor finished in 0.666365s, errors=0 2025-06-24T20:30:18.649695Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadIteratorStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadIteratorStart { RowCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 } 2025-06-24T20:30:18.651715Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:334: ReadIteratorLoadScenario# [1:702:2584] with id# {Tag: 0, parent: [1:693:2575], subTag: 3} Bootstrap called: RowCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 2025-06-24T20:30:18.910044Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:396: ReadIteratorLoadScenario# {Tag: 0, parent: [1:693:2575], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-06-24T20:30:18.911304Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:705:2587] 2025-06-24T20:30:18.912436Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 1} Bootstrap called, sample# 0 2025-06-24T20:30:18.913598Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 1} Connect to# 72075186224037888 called 2025-06-24T20:30:18.915330Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-06-24T20:30:19.040313Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 1} finished in 0.124521s, read# 1000 2025-06-24T20:30:19.041576Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:705:2587] with chunkSize# 0 finished: 0 { DurationMs: 124 OperationsOK: 1000 OperationsError: 0 } 2025-06-24T20:30:19.042748Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:708:2590] 2025-06-24T20:30:19.043132Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 2} Bootstrap called, sample# 0 2025-06-24T20:30:19.043558Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 2} Connect to# 72075186224037888 called 2025-06-24T20:30:19.044929Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 2} Handle TEvClientConnected called, Status# OK 2025-06-24T20:30:22.348964Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 2} finished in 3.303983s, read# 1000 2025-06-24T20:30:22.349370Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:708:2590] with chunkSize# 1 finished: 0 { DurationMs: 3303 OperationsOK: 1000 OperationsError: 0 } 2025-06-24T20:30:22.349466Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:711:2593] 2025-06-24T20:30:22.349505Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 3} Bootstrap called, sample# 0 2025-06-24T20:30:22.349535Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 3} Connect to# 72075186224037888 called 2025-06-24T20:30:22.350044Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 3} Handle TEvClientConnected called, Status# OK 2025-06-24T20:30:22.431775Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 3} finished in 0.081677s, read# 1000 2025-06-24T20:30:22.431928Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:711:2593] with chunkSize# 10 finished: 0 { DurationMs: 81 OperationsOK: 1000 OperationsError: 0 } 2025-06-24T20:30:22.432664Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:714:2596] 2025-06-24T20:30:22.433273Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 4} Bootstrap called, sample# 1000 2025-06-24T20:30:22.433304Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 4} Connect to# 72075186224037888 called 2025-06-24T20:30:22.434136Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 4} Handle TEvClientConnected called, Status# OK 2025-06-24T20:30:22.437715Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 4} finished in 0.002960s, sampled# 1000, iter finished# 1, oks# 1000 2025-06-24T20:30:22.437831Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:506: ReadIteratorLoadScenario# {Tag: 0, parent: [1:693:2575], subTag: 3} received keyCount# 1000 2025-06-24T20:30:22.437988Z node 1 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:551: ReadIteratorLoadScenario# {Tag: 0, parent: [1:693:2575], subTag: 3} started read actor with id# [1:717:2599] 2025-06-24T20:30:22.438043Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:79: TReadIteratorPoints# {Tag: 0, parent: [1:702:2584], subTag: 5} Bootstrap called, will read keys# 1000 2025-06-24T20:30:26.740208Z node 1 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:559: ReadIteratorLoadScenario# {Tag: 0, parent: [1:693:2575], subTag: 3} received point times# 1000, Inflight left# 0 2025-06-24T20:30:26.741797Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:482: headread with inflight# 1 finished: 0 { DurationMs: 4302 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 2\n95%: 13\n99%: 32\n99.9%: 251\n" } 2025-06-24T20:30:26.742679Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:616: ReadIteratorLoadScenario# {Tag: 0, parent: [1:693:2575], subTag: 3} finished in 8.090193s with report: { DurationMs: 124 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 1, type# FullScan with chunk# inf" } { DurationMs: 3303 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 2, type# FullScan with chunk# 1" } { DurationMs: 81 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 3, type# FullScan with chunk# 10" } { DurationMs: 4302 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 2\n95%: 13\n99%: 32\n99.9%: 251\n" PrefixInfo: "Test run# 4, type# ReadHeadPoints with inflight# 1" } 2025-06-24T20:30:26.744477Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:702:2584] with tag# 3 2025-06-24T20:30:31.988031Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:31.988536Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:31.988693Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00333d/r3tmp/tmpY3UCez/pdisk_1.dat 2025-06-24T20:30:32.329226Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T20:30:32.330744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:32.368221Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:32.370145Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750797028001722 != 1750797028001726 2025-06-24T20:30:32.421895Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:32.422049Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:32.433677Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:32.542051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:32.951349Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 10 2025-06-24T20:30:32.951753Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:692:2575], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-06-24T20:30:32.974764Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:692:2575], subTag: 1} TUpsertActor finished in 0.022620s, errors=0 2025-06-24T20:30:32.979919Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadIteratorStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadIteratorStart { RowCount: 10 ReadCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 } 2025-06-24T20:30:32.980127Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:334: ReadIteratorLoadScenario# [2:701:2584] with id# {Tag: 0, parent: [2:692:2575], subTag: 3} Bootstrap called: RowCount: 10 ReadCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 2025-06-24T20:30:33.002912Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:396: ReadIteratorLoadScenario# {Tag: 0, parent: [2:692:2575], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-06-24T20:30:33.003081Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:704:2587] 2025-06-24T20:30:33.003420Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 1} Bootstrap called, sample# 0 2025-06-24T20:30:33.003471Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 1} Connect to# 72075186224037888 called 2025-06-24T20:30:33.003804Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-06-24T20:30:33.004568Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 1} finished in 0.000710s, read# 10 2025-06-24T20:30:33.004737Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:704:2587] with chunkSize# 0 finished: 0 { DurationMs: 0 OperationsOK: 10 OperationsError: 0 } 2025-06-24T20:30:33.004878Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:707:2590] 2025-06-24T20:30:33.004942Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 2} Bootstrap called, sample# 0 2025-06-24T20:30:33.004971Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 2} Connect to# 72075186224037888 called 2025-06-24T20:30:33.005190Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 2} Handle TEvClientConnected called, Status# OK 2025-06-24T20:30:33.008055Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 2} finished in 0.002823s, read# 10 2025-06-24T20:30:33.008176Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:707:2590] with chunkSize# 1 finished: 0 { DurationMs: 2 OperationsOK: 10 OperationsError: 0 } 2025-06-24T20:30:33.008281Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:710:2593] 2025-06-24T20:30:33.008322Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 3} Bootstrap called, sample# 0 2025-06-24T20:30:33.008361Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 3} Connect to# 72075186224037888 called 2025-06-24T20:30:33.008615Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 3} Handle TEvClientConnected called, Status# OK 2025-06-24T20:30:33.009258Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 3} finished in 0.000596s, read# 10 2025-06-24T20:30:33.009375Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:710:2593] with chunkSize# 10 finished: 0 { DurationMs: 0 OperationsOK: 10 OperationsError: 0 } 2025-06-24T20:30:33.009509Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:713:2596] 2025-06-24T20:30:33.009583Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 4} Bootstrap called, sample# 10 2025-06-24T20:30:33.009622Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 4} Connect to# 72075186224037888 called 2025-06-24T20:30:33.009830Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 4} Handle TEvClientConnected called, Status# OK 2025-06-24T20:30:33.010231Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [2:701:2584], subTag: 4} finished in 0.000336s, sampled# 10, iter finished# 1, oks# 10 2025-06-24T20:30:33.010333Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:506: ReadIteratorLoadScenario# {Tag: 0, parent: [2:692:2575], subTag: 3} received keyCount# 10 2025-06-24T20:30:33.010480Z node 2 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:551: ReadIteratorLoadScenario# {Tag: 0, parent: [2:692:2575], subTag: 3} started read actor with id# [2:716:2599] 2025-06-24T20:30:33.010552Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:79: TReadIteratorPoints# {Tag: 0, parent: [2:701:2584], subTag: 5} Bootstrap called, will read keys# 10 2025-06-24T20:30:33.442966Z node 2 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:559: ReadIteratorLoadScenario# {Tag: 0, parent: [2:692:2575], subTag: 3} received point times# 1000, Inflight left# 0 2025-06-24T20:30:33.443199Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:482: headread with inflight# 1 finished: 0 { DurationMs: 432 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 2\n99.9%: 27\n" } 2025-06-24T20:30:33.443397Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:616: ReadIteratorLoadScenario# {Tag: 0, parent: [2:692:2575], subTag: 3} finished in 0.463073s with report: { DurationMs: 0 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 1, type# FullScan with chunk# inf" } { DurationMs: 2 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 2, type# FullScan with chunk# 1" } { DurationMs: 0 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 3, type# FullScan with chunk# 10" } { DurationMs: 432 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 2\n99.9%: 27\n" PrefixInfo: "Test run# 4, type# ReadHeadPoints with inflight# 1" } 2025-06-24T20:30:33.443514Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:701:2584] with tag# 3 >> TSchemeShardSysViewTest::ReadOnlyMode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::CreateExistingSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:33.810449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:33.810536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.810585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:33.810641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:33.810680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:33.810715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:33.810769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:33.810858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:33.811760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:33.812127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:33.910935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:33.911008Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:33.949887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:33.950574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:33.950798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:33.993251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:33.993542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:33.994276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:33.994659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:34.004838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:34.005071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:34.008450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:34.008550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:34.008796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:34.008864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:34.008917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:34.009022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.024025Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:34.157446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:34.157695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.157962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:34.158011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:34.158236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:34.158299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:34.160630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.160826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:34.161026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.161078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:34.161122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:34.161162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:34.168340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.168419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:34.168488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:34.173102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.173169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.173223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.173273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:34.177504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:34.179740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:34.179941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:34.180920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.181060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:34.181118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.181418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:34.181476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.181653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:34.181746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:34.186431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:34.186492Z node 1 :FLAT_TX_SCHEMESHARD ... 34.272576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T20:30:34.273651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:34.273751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:34.273785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:30:34.273835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T20:30:34.273882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:30:34.275123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:34.275233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:34.275266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:30:34.275296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T20:30:34.275330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:30:34.275415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T20:30:34.278890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:34.280409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T20:30:34.280634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:30:34.280685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:30:34.281129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:30:34.281248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:30:34.281291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:332:2321] TestWaitNotification: OK eventTxId 102 2025-06-24T20:30:34.281788Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:34.282029Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 261us result status StatusSuccess 2025-06-24T20:30:34.282436Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-06-24T20:30:34.286268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView CreateSysView { Name: "new_sys_view" Type: ENodes } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:34.286462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_sysview.cpp:116: [72057594046678944] TCreateSysView Propose, path: /MyRoot/.sys/new_sys_view, opId: 103:0 2025-06-24T20:30:34.286520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_sysview.cpp:122: [72057594046678944] TCreateSysView Propose, path: /MyRoot/.sys/new_sys_view, opId: 103:0, sysViewDescription: Name: "new_sys_view" Type: ENodes 2025-06-24T20:30:34.286675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/.sys/new_sys_view', error: path exist, request doesn't accept it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeSysView, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-24T20:30:34.290916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/.sys/new_sys_view\', error: path exist, request doesn\'t accept it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeSysView, state: EPathStateNoChanges)" TxId: 103 SchemeshardId: 72057594046678944 PathId: 3 PathCreateTxId: 102, at schemeshard: 72057594046678944 2025-06-24T20:30:34.291203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/.sys/new_sys_view', error: path exist, request doesn't accept it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeSysView, state: EPathStateNoChanges), operation: CREATE SYSTEM VIEW, path: /MyRoot/.sys/new_sys_view TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:30:34.291558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:30:34.291603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:30:34.292063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:30:34.292217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:30:34.292262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:340:2329] TestWaitNotification: OK eventTxId 103 2025-06-24T20:30:34.292833Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:34.293081Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 261us result status StatusSuccess 2025-06-24T20:30:34.293663Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpPg::JoinWithQueryService+StreamLookup [GOOD] >> KqpPg::Insert_Serial+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldDropCreateTable [GOOD] Test command err: 2025-06-24T20:30:14.100556Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:14.100998Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:14.101247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00338f/r3tmp/tmpywQlZh/pdisk_1.dat 2025-06-24T20:30:14.503455Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:14.512469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:14.595528Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:14.601776Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797004557878 != 1750797004557882 2025-06-24T20:30:14.657388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:14.657556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:14.672577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:14.781727Z node 1 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:194: TLoad# 0 creates table# BrandNewTable in dir# /Root 2025-06-24T20:30:15.813947Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:604:2511], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:15.815088Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:15.977304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:18.982873Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# BrandNewTable in dir# /Root with rows# 10 2025-06-24T20:30:19.044297Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:600:2508], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" 2025-06-24T20:30:19.116179Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:600:2508], subTag: 1} TUpsertActor finished in 0.068485s, errors=0 2025-06-24T20:30:19.117949Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "BrandNewTable" CreateTable: true MinParts: 11 MaxParts: 13 MaxPartSizeMb: 1234 } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-24T20:30:19.127265Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:600:2508], subTag: 3} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" 2025-06-24T20:30:19.290067Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:600:2508], subTag: 3} TUpsertActor finished in 0.160714s, errors=0 2025-06-24T20:30:19.290501Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:710:2585] with tag# 3 2025-06-24T20:30:31.600724Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:31.601121Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:31.601253Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00338f/r3tmp/tmpYA4ma7/pdisk_1.dat 2025-06-24T20:30:31.952361Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T20:30:31.954134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:31.997639Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:32.000121Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750797025979975 != 1750797025979979 2025-06-24T20:30:32.046792Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:32.046936Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:32.059746Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:32.148414Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:32.461550Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 } UpsertBulkStart { RowCount: 100 Inflight: 3 } 2025-06-24T20:30:32.461696Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:692:2575], subTag: 2} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 2025-06-24T20:30:32.963876Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:692:2575], subTag: 2} TUpsertActor finished in 0.501735s, errors=0 2025-06-24T20:30:32.963987Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:693:2576] with tag# 2 2025-06-24T20:30:32.970915Z node 2 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:174: TLoad# 0 drops table# table in dir# /Root 2025-06-24T20:30:32.986967Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:735:2617], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:32.987096Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:33.038380Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:33.300563Z node 2 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:194: TLoad# 0 creates table# table in dir# /Root 2025-06-24T20:30:33.321585Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:798:2660], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:33.321694Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:33.335132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:33.389715Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-24T20:30:33.625071Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# table in dir# /Root with rows# 10 2025-06-24T20:30:33.625451Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:731:2614], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" 2025-06-24T20:30:33.637644Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:731:2614], subTag: 1} TUpsertActor finished in 0.011828s, errors=0 2025-06-24T20:30:33.637955Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "table" DropTable: true } TargetShard { TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-24T20:30:33.638109Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:731:2614], subTag: 3} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" 2025-06-24T20:30:33.695880Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:731:2614], subTag: 3} TUpsertActor finished in 0.057490s, errors=0 2025-06-24T20:30:33.695981Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:889:2732] with tag# 3 >> Cdc::UuidExchange[PqRunner] [GOOD] >> Cdc::UuidExchange[YdsRunner] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWoIndexes [GOOD] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithSyncIndex >> KqpPg::InsertFromSelect_Simple+useSink |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink [GOOD] |88.3%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [TA] {RESULT} $(B)/ydb/library/table_creator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:34.580419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:34.580523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:34.580566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:34.580619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:34.580704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:34.580748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:34.580826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:34.580899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:34.581602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:34.581975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:34.670548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:34.670593Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:34.695883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:34.696399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:34.696570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:34.706376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:34.706587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:34.707215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.707507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:34.710890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:34.711098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:34.712382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:34.712506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:34.712751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:34.712819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:34.712876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:34.712966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.721539Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:34.895066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:34.895381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.895690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:34.895749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:34.895998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:34.896078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:34.904598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.904838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:34.905041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.905105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:34.905158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:34.905191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:34.908349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.908417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:34.908475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:34.912736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.912806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:34.912855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.912910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:34.917015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:34.920233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:34.920434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:34.921354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:34.921496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:34.921549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.921868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:34.921928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:34.922087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:34.922154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:34.928703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:34.928768Z node 1 :FLAT_TX_SCHEMESHARD ... DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-24T20:30:35.434363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:30:35.434927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000003 2025-06-24T20:30:35.435497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:35.435639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:35.435691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:45: [72057594046678944] TCreateSysView::TPropose, opId: 103:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003 2025-06-24T20:30:35.435829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:0 128 -> 240 2025-06-24T20:30:35.436000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:30:35.436091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:30:35.438215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:35.438252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:30:35.438371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:30:35.438533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:35.438585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:440:2396], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-24T20:30:35.438635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:440:2396], at schemeshard: 72057594046678944, txId: 103, path id: 3 FAKE_COORDINATOR: Erasing txId 103 2025-06-24T20:30:35.438967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:30:35.439014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T20:30:35.439102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:30:35.439168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:30:35.439208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:30:35.439241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:30:35.439274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-24T20:30:35.439313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:30:35.439346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:30:35.439378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:30:35.439446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:30:35.439494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-06-24T20:30:35.439531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-06-24T20:30:35.439578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T20:30:35.440051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:35.440107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:35.440128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:30:35.440157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T20:30:35.440185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:30:35.440524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:35.440566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:30:35.440587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:30:35.440614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T20:30:35.440632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:30:35.440678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-24T20:30:35.452650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:30:35.452776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:30:35.453065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:30:35.453110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:30:35.453587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:30:35.453691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:30:35.453732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:473:2426] TestWaitNotification: OK eventTxId 103 2025-06-24T20:30:35.454272Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:30:35.454500Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 238us result status StatusSuccess 2025-06-24T20:30:35.454863Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 103 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpPg::EmptyQuery+useSink >> KqpPg::ReadPgArray |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> KqpPg::ReadPgArray [GOOD] >> KqpPg::TableArrayInsert+useSink >> Cdc::KeysOnlyLog[PqRunner] [GOOD] >> Cdc::KeysOnlyLog[YdsRunner] >> KqpPg::CreateTableBulkUpsertAndRead |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> Cdc::KeysOnlyLogDebezium [GOOD] >> Cdc::DocApi[PqRunner] >> TAsyncIndexTests::CdcAndMergeWithReboots[PipeResets] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |88.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_sysview/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 21517, MsgBus: 9114 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004517/r3tmp/tmpJaaQgD/pdisk_1.dat TServer::EnableGrpc on GrpcPort 21517, node 1 TClient is connected to server localhost:9114 TClient is connected to server localhost:9114 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink [GOOD] >> TAsyncIndexTests::SplitIndexWithReboots[PipeResets] >> KqpDataIntegrityTrails::BrokenReadLockAbortedTx [GOOD] >> TAsyncIndexTests::SplitIndexWithReboots[TabletReboots] >> Yq_1::Create_And_Modify_The_Same_Connection [GOOD] >> TVectorIndexTests::CreateTablePrefix |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |88.3%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema >> test.py::test[solomon-Subquery-default.txt] [GOOD] >> test.py::test[solomon-UnknownSetting-] >> TAsyncIndexTests::SplitBothWithReboots[TabletReboots] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 16381, MsgBus: 2339 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004510/r3tmp/tmpogWPeq/pdisk_1.dat TServer::EnableGrpc on GrpcPort 16381, node 1 TClient is connected to server localhost:2339 TClient is connected to server localhost:2339 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CreateTable ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLockAbortedTx [GOOD] Test command err: Trying to start YDB, gRPC: 26667, MsgBus: 20911 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004516/r3tmp/tmp1np6Mm/pdisk_1.dat TServer::EnableGrpc on GrpcPort 26667, node 1 TClient is connected to server localhost:20911 TClient is connected to server localhost:20911 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::Create_And_Modify_The_Same_Connection [GOOD] Test command err: 2025-06-24T20:29:00.396106Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615599297954968:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:00.396492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0624 20:29:00.969056007 103144 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:00.969220478 103144 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:01.158905Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21108: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21108 } ] 2025-06-24T20:29:01.391377Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:01.404521Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:02.417040Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21108: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21108 } ] 2025-06-24T20:29:02.436087Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21108: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21108 2025-06-24T20:29:02.539289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:03.537718Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:04.555480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:05.415546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615599297954968:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:05.417625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:05.568986Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:06.089613352 103271 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:06.089755894 103271 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:06.227859Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21108: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21108 } ] 2025-06-24T20:29:06.423777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:06.568176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:07.435886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:07.591824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:08.443600Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:08.595564Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:09.455985Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:09.599660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:10.327862Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21108: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21108 2025-06-24T20:29:10.482458Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:10.605677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:11.128042436 103271 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:11.128154290 103271 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:11.487715Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:11.634374Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:12.505876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:12.642618Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:13.508189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:13.657129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003486/r3tmp/tmpkOjLTL/pdisk_1.dat 2025-06-24T20:29:14.279907Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:29:15.013778Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519615659427497652:2292], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:15.013900Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:15.013935Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:15.085833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:29:15.085942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:29:15.102206Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21108, node 1 2025-06-24T20:29:15.249593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:29:15.249620Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:29:15.249630Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:29:15.249741Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:29:15.249910Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded E0624 20:29:16.190397938 103270 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:16.193825250 103270 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:17.987007Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-24T20:29:17.987036Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-24T20:29:17.987044Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-24T20:29:18.452190Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-24T20:29:18.452217Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-24T20:29:18.452442Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-24T20:29:18.521353Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table " ... d=4&id=ZmVhNjM3ZTAtNDEzNWYxYTAtOWZmOGJlNGMtYzkyZWZlNg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Drain async output 0. Free space decreased: -9223372036787666944, sent data from buffer: 234 2025-06-24T20:30:33.158828Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715732, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-24T20:30:33.158840Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715732, task: 2. Tasks execution finished 2025-06-24T20:30:33.158854Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [4:7519616001174867050:2726], TxId: 281474976715732, task: 2. Ctx: { TraceId : 01jyht57c997xq68kyabcf4tvt. SessionId : ydb://session/3?node_id=4&id=ZmVhNjM3ZTAtNDEzNWYxYTAtOWZmOGJlNGMtYzkyZWZlNg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-24T20:30:33.158878Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616001174867049:2725], TxId: 281474976715732, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ZmVhNjM3ZTAtNDEzNWYxYTAtOWZmOGJlNGMtYzkyZWZlNg==. TraceId : 01jyht57c997xq68kyabcf4tvt. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646927 2025-06-24T20:30:33.158907Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616001174867049:2725], TxId: 281474976715732, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ZmVhNjM3ZTAtNDEzNWYxYTAtOWZmOGJlNGMtYzkyZWZlNg==. TraceId : 01jyht57c997xq68kyabcf4tvt. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:30:33.158924Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715732, task: 1. Tasks execution finished 2025-06-24T20:30:33.158938Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519616001174867049:2725], TxId: 281474976715732, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ZmVhNjM3ZTAtNDEzNWYxYTAtOWZmOGJlNGMtYzkyZWZlNg==. TraceId : 01jyht57c997xq68kyabcf4tvt. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:33.159064Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715732, task: 1. pass away 2025-06-24T20:30:33.159191Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715732;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:33.159454Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1860: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Create new TableWriteActor for table `Root/yq/connections` ([72057594046644480:5:1]). lockId=281474976715723. ActorId=[4:7519616001174867056:2412] 2025-06-24T20:30:33.159502Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:388: Table: `Root/yq/connections` ([72057594046644480:5:1]), SessionActorId: [4:7519615949635256745:2412]Open: token=0 2025-06-24T20:30:33.159542Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1987: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], ProcessRequestQueue [OwnerId: 72057594046644480, LocalPathId: 5] NOT READY queue=1 2025-06-24T20:30:33.159771Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:394: SelfId: [4:7519616001174867056:2412], Table: `Root/yq/connections` ([72057594046644480:5:1]), SessionActorId: [4:7519615949635256745:2412]Write: token=0 2025-06-24T20:30:33.159862Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:402: SelfId: [4:7519616001174867056:2412], Table: `Root/yq/connections` ([72057594046644480:5:1]), SessionActorId: [4:7519615949635256745:2412]Close: token=0 2025-06-24T20:30:33.159906Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3180: SelfId: [4:7519616001174867054:2726], TxId: 281474976715732, task: 2. TKqpForwardWriteActor recieve EvBufferWriteResult from [4:7519616001174867044:2412] 2025-06-24T20:30:33.159924Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3198: SelfId: [4:7519616001174867054:2726], TxId: 281474976715732, task: 2. Finished 2025-06-24T20:30:33.159944Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616001174867050:2726], TxId: 281474976715732, task: 2. Ctx: { TraceId : 01jyht57c997xq68kyabcf4tvt. SessionId : ydb://session/3?node_id=4&id=ZmVhNjM3ZTAtNDEzNWYxYTAtOWZmOGJlNGMtYzkyZWZlNg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:33.159980Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715732, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-24T20:30:33.159991Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715732, task: 2. Tasks execution finished 2025-06-24T20:30:33.160005Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519616001174867050:2726], TxId: 281474976715732, task: 2. Ctx: { TraceId : 01jyht57c997xq68kyabcf4tvt. SessionId : ydb://session/3?node_id=4&id=ZmVhNjM3ZTAtNDEzNWYxYTAtOWZmOGJlNGMtYzkyZWZlNg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:33.160059Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715732, task: 2. pass away 2025-06-24T20:30:33.160117Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715732;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:33.160498Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2087: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Start prepare for distributed commit 2025-06-24T20:30:33.160511Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:909: SelfId: [4:7519616001174867056:2412], Table: `Root/yq/connections` ([72057594046644480:5:1]), SessionActorId: [4:7519615949635256745:2412]SetPrepare; txId=281474976715732 2025-06-24T20:30:33.160526Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2052: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Flush data 2025-06-24T20:30:33.160678Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1050: SelfId: [4:7519616001174867056:2412], Table: `Root/yq/connections` ([72057594046644480:5:1]), SessionActorId: [4:7519615949635256745:2412]Send EvWrite to ShardID=72075186224037890, isPrepare=1, isImmediateCommit=0, TxId=281474976715732, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715723 DataShard: 72075186224037890 Generation: 1 Counter: 1 SchemeShard: 72057594046644480 PathId: 5, Size=324, Cookie=1, OperationsCount=1, IsFinal=1, Attempts=0, Mode=1, BufferMemory=324 2025-06-24T20:30:33.160778Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2196: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Send EvWrite (external) to ShardID=72075186224037900, isPrepare=1, isImmediateCommit=0, TxId=281474976715732, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715723 DataShard: 72075186224037900 Generation: 1 Counter: 1 SchemeShard: 72057594046644480 PathId: 15, Size=0, Cookie=0, OperationsCount=0, IsFinal=1, Attempts=0 2025-06-24T20:30:33.163497Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2586: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Recv EvWriteResult (external) from ShardID=72075186224037900, Status=STATUS_PREPARED, TxId=281474976715732, Locks= , Cookie=0 2025-06-24T20:30:33.163554Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2840: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Got prepared result TxId=281474976715732, TabletId=72075186224037900, Cookie=0 2025-06-24T20:30:33.163611Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2052: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Flush data 2025-06-24T20:30:33.164029Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [4:7519616001174867056:2412], Table: `Root/yq/connections` ([72057594046644480:5:1]), SessionActorId: [4:7519615949635256745:2412]Recv EvWriteResult from ShardID=72075186224037890, Status=STATUS_PREPARED, TxId=281474976715732, Locks= , Cookie=1 2025-06-24T20:30:33.164095Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2130: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Start distributed commit with TxId=281474976715732 2025-06-24T20:30:33.164111Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:917: SelfId: [4:7519616001174867056:2412], Table: `Root/yq/connections` ([72057594046644480:5:1]), SessionActorId: [4:7519615949635256745:2412]SetDistributedCommit; txId=281474976715732 2025-06-24T20:30:33.164144Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2297: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Execute planned transaction, coordinator: 72057594046316545, volitale: 1, shards: 2 2025-06-24T20:30:33.166159Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2353: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Got transaction status, status: 16 2025-06-24T20:30:33.166185Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2353: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Got transaction status, status: 17 2025-06-24T20:30:33.172387Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2586: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Recv EvWriteResult (external) from ShardID=72075186224037900, Status=STATUS_COMPLETED, TxId=281474976715732, Locks= , Cookie=0 2025-06-24T20:30:33.172421Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2873: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Got completed result TxId=281474976715732, TabletId=72075186224037900, Cookie=0, Locks= 2025-06-24T20:30:33.173125Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [4:7519616001174867056:2412], Table: `Root/yq/connections` ([72057594046644480:5:1]), SessionActorId: [4:7519615949635256745:2412]Recv EvWriteResult from ShardID=72075186224037890, Status=STATUS_COMPLETED, TxId=281474976715732, Locks= , Cookie=0 2025-06-24T20:30:33.173156Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [4:7519616001174867056:2412], Table: `Root/yq/connections` ([72057594046644480:5:1]), SessionActorId: [4:7519615949635256745:2412]Got completed result TxId=281474976715732, TabletId=72075186224037890, Cookie=0, Mode=2, Locks= 2025-06-24T20:30:33.173174Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [4:7519616001174867044:2412], SessionActorId: [4:7519615949635256745:2412], Committed TxId=281474976715732 E0624 20:30:33.323972336 108467 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:30:33.324139462 108467 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:30:33.630511Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:7649: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:7649 2025-06-24T20:30:34.629552Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: Client is stopped |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TBlobStorageProxyTest::TestPartialGetBlock >> TAsyncIndexTests::MergeMainWithReboots[TabletReboots] >> TBlobStorageProxyTest::TestVPutVGet >> TVectorIndexTests::CreateTablePrefix [GOOD] >> TBlobStorageProxyTest::TestDoubleFailure >> KqpPg::Insert_Serial+useSink [GOOD] >> KqpPg::Insert_Serial-useSink >> Cdc::UuidExchange[YdsRunner] [GOOD] >> Cdc::UuidExchange[TopicRunner] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestReboot [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex >> Yq_1::CreateConnections_With_Idempotency [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTablePrefix [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:39.773337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:39.773425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:39.773470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:39.773523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:39.773575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:39.773625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:39.773690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:39.773773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:39.774654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:39.775078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:39.872593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:39.872661Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:39.913729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:39.914268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:39.917878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:39.936682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:39.936958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:39.937720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:39.938135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:39.948779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:39.949081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:39.950485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:39.950574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:39.950814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:39.950870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:39.950915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:39.951022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:39.972104Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:40.125328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:40.125605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.125912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:40.125964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:40.126210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:40.126300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:40.132835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:40.133104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:40.133367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.133463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:40.133515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:40.133568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:40.136734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.136813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:40.136858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:40.140484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.140564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.140619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:40.140709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:40.152081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:40.156633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:40.156864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:40.158005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:40.158178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:40.158240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:40.159404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:40.159526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:40.159768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:40.159868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:40.165189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:40.165252Z node 1 :FLAT_TX_SCHEMESHARD ... ementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-24T20:30:40.916907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:40.916993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:30:40.917028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:30:40.917057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 18446744073709551615 2025-06-24T20:30:40.917102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 4 2025-06-24T20:30:40.917176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/5, is published: true 2025-06-24T20:30:40.921773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:3, at schemeshard: 72057594046678944 2025-06-24T20:30:40.921845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:3 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:40.922206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T20:30:40.922371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:3 progress is 2/5 2025-06-24T20:30:40.922411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/5 2025-06-24T20:30:40.922505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:3 progress is 2/5 2025-06-24T20:30:40.922539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/5 2025-06-24T20:30:40.922583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/5, is published: true 2025-06-24T20:30:40.924055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:40.924203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:40.924257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:40.924301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:40.924489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:4, at schemeshard: 72057594046678944 2025-06-24T20:30:40.924548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:4 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:40.924799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-24T20:30:40.924927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:4 progress is 3/5 2025-06-24T20:30:40.924961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/5 2025-06-24T20:30:40.924998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:4 progress is 3/5 2025-06-24T20:30:40.925027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/5 2025-06-24T20:30:40.925073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/5, is published: true 2025-06-24T20:30:40.925326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-24T20:30:40.925363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:2 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:40.925541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T20:30:40.925647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:2 progress is 4/5 2025-06-24T20:30:40.925677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 4/5 2025-06-24T20:30:40.925706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:2 progress is 4/5 2025-06-24T20:30:40.925730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 4/5 2025-06-24T20:30:40.925758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 4/5, is published: true 2025-06-24T20:30:40.925985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.926025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:40.926187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T20:30:40.926276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 5/5 2025-06-24T20:30:40.926304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-24T20:30:40.926355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 5/5 2025-06-24T20:30:40.926397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-24T20:30:40.926428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 5/5, is published: true 2025-06-24T20:30:40.926504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:448:2392] message: TxId: 102 2025-06-24T20:30:40.926563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-24T20:30:40.926609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T20:30:40.926643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T20:30:40.926752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:30:40.926804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:1 2025-06-24T20:30:40.926833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:1 2025-06-24T20:30:40.926865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T20:30:40.926902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:2 2025-06-24T20:30:40.926926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:2 2025-06-24T20:30:40.927001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T20:30:40.927032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:3 2025-06-24T20:30:40.927069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:3 2025-06-24T20:30:40.927116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-24T20:30:40.927197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:4 2025-06-24T20:30:40.927221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:4 2025-06-24T20:30:40.927268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-24T20:30:40.927673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:40.927740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:40.927780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:40.927952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:40.928104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:40.930284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:30:40.933229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:30:40.933297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:590:2527] TestWaitNotification: OK eventTxId 102 >> TBlobStorageProxyTest::TestPutGetStatusErasureMirror3 >> TBlobStorageProxyTest::TestGetMultipart >> Yq_1::DescribeQuery [GOOD] >> TBlobStorageProxyTest::TestGetAndRangeGetManyBlobs >> Cdc::KeysOnlyLog[YdsRunner] [GOOD] >> Cdc::KeysOnlyLog[TopicRunner] >> Yq_1::ListConnectionsOnEmptyConnectionsTable [GOOD] >> TBlobStorageProxyTest::TestProxyGetSingleTimeout >> TAsyncIndexTests::CreateTable [GOOD] >> TBlobStorageProxyTest::TestPartialGetBlock [GOOD] >> TBlobStorageProxyTest::TestPartialGetMirror >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithSyncIndex [GOOD] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithAsyncIndex >> TBlobStorageProxyTest::TestVPutVGet [GOOD] >> TBlobStorageProxyTest::TestVPutVGetLimit >> TBlobStorageProxyTest::TestProxyPutInvalidSize >> KqpPg::EmptyQuery+useSink [GOOD] >> KqpPg::EmptyQuery-useSink >> TBlobStorageProxyTest::TestProxyRestoreOnGetBlock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:30:42.052651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:42.052746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:42.052786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:42.052823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:42.052873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:42.052928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:42.053000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:42.053068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:42.053853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:42.054223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:42.220999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:30:42.221066Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:42.234696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:42.239254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:42.239458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:42.247587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:42.247846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:42.248546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:42.248893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:42.252069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:42.252244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:42.253382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:42.253438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:42.253567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:42.253620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:42.253665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:42.253823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.260762Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:30:42.410811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:42.411063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.411394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:42.411453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:42.411719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:42.411830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:42.414094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:42.414296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:42.414533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.414598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:42.414643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:42.414701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:42.416939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.417001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:42.417052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:42.418791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.418837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.418884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:42.418950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:42.429755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:42.431790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:42.431999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:42.433050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:42.433206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:30:42.433253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:42.433562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:30:42.433623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:42.433807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:30:42.433889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:30:42.436235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:42.436285Z node 1 :FLAT_TX_SCHEMESHARD ... 2075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:42.830422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.830458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T20:30:42.830492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T20:30:42.838225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:30:42.838691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:30:42.849218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:30:42.850069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T20:30:42.850316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.850521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:30:42.850778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T20:30:42.851456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.851832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T20:30:42.851900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-24T20:30:42.852014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:2 progress is 2/3 2025-06-24T20:30:42.852066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-24T20:30:42.852130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:2 progress is 2/3 2025-06-24T20:30:42.852168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-24T20:30:42.852206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-24T20:30:42.852711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.852751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T20:30:42.852816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 3/3 2025-06-24T20:30:42.852843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T20:30:42.852877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 3/3 2025-06-24T20:30:42.852933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T20:30:42.852970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-24T20:30:42.853049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:382:2348] message: TxId: 101 2025-06-24T20:30:42.853096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T20:30:42.853147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T20:30:42.853181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T20:30:42.853325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T20:30:42.853376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:1 2025-06-24T20:30:42.853416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:1 2025-06-24T20:30:42.853511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T20:30:42.853540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:2 2025-06-24T20:30:42.853563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:2 2025-06-24T20:30:42.853641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T20:30:42.856860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:30:42.856925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:383:2349] TestWaitNotification: OK eventTxId 101 2025-06-24T20:30:42.857566Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:30:42.857839Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex" took 304us result status StatusSuccess 2025-06-24T20:30:42.858791Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex" PathDescription { Self { Name: "UserDefinedIndex" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "UserDefinedIndex" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::CreateConnections_With_Idempotency [GOOD] Test command err: 2025-06-24T20:29:11.087867Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615648854225254:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:11.129399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:29:14.197874Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; E0624 20:29:15.862680171 104923 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:15.863365723 104923 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:15.968542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:16.080273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615648854225254:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:16.080325Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:16.949843Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15499: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:15499 } ] 2025-06-24T20:29:17.415775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.446335Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.923531Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15499: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:15499 2025-06-24T20:29:18.020482Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15499: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:15499 } ] 2025-06-24T20:29:18.418172Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:18.456064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.426338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.456559Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.434193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.475807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:20.943049552 105377 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:20.944495105 105377 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:21.232161Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15499: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:15499 } ] 2025-06-24T20:29:21.455331Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.479496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.468754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.484137Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.483721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.483749Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.499993Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.500054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:25.001333Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15499: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:15499 2025-06-24T20:29:25.237675Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15499: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:15499 } ] 2025-06-24T20:29:25.503830Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:25.531801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:26.045983096 105377 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:26.063856365 105377 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:26.528277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:26.528620Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.533618Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.533647Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:28.535792Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:28.535819Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:29.540659Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:29.541015Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:30.577608Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:30.592064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:31.202165289 105377 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:31.203200464 105377 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:31.483420Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/tenant_acks". Create session error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15499: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:15499 } ] 2025-06-24T20:29:31.507252Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15499: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:15499 } ] 2025-06-24T20:29:31.515552Z node 1 :YQL_NODES_MANAGER ER ... omerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Create sink for output 0 { Sink { Type: "KqpTableSink" Settings { type_url: "type.googleapis.com/NKikimrKqp.TKqpTableSinkSettings" value: "\032\036\n\016Root/yq/quotas\020\200\202\224\204\200\200\200\200\001\030\003(\001\"\023\n\014subject_type\020\001 \201 \"\021\n\nsubject_id\020\002 \201 \"\022\n\013metric_name\020\003 \201 *\026\n\020limit_updated_at\020\005 2*\022\n\014metric_limit\020\004 \004*\022\n\013metric_name\020\003 \201 *\022\n\014metric_usage\020\006 \004*\021\n\nsubject_id\020\002 \201 *\023\n\014subject_type\020\001 \201 *\026\n\020usage_updated_at\020\007 20\274\247\200\200\200\200@8\004@\000H\001R\022\t;\215d\207\357\n[h\021=\t\000\000\004\000\020\000X\000`\000h\004h\003h\002h\005h\001h\000h\006x\000" } } } 2025-06-24T20:30:39.059634Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-24T20:30:39.059664Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: 2025-06-24T20:30:39.059749Z node 4 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:358: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. About to drain async output 0. FreeSpace: 67108864, allowedOvercommit: 4194304, toSend: 71303168, finished: 0 2025-06-24T20:30:39.059871Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3311: TxId: 281474976715710, task: 1. Add data: 78 / 78 2025-06-24T20:30:39.059930Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3280: TxId: 281474976715710, task: 1. Send data=78, closed=1, bufferActorId=[4:7519616026710150459:2365] 2025-06-24T20:30:39.059955Z node 4 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:372: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Drain async output 0. Free space decreased: -9223372036787666944, sent data from buffer: 78 2025-06-24T20:30:39.059975Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715710, task: 1. Tasks execution finished 2025-06-24T20:30:39.059987Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-24T20:30:39.060012Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-24T20:30:39.060032Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: 2025-06-24T20:30:39.060047Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715710, task: 1. Tasks execution finished 2025-06-24T20:30:39.060055Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-24T20:30:39.060105Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:39.060123Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715710, task: 1. Tasks execution finished 2025-06-24T20:30:39.060134Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-24T20:30:39.060203Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1860: SelfId: [4:7519616026710150459:2365], SessionActorId: [4:7519615988055441969:2365], Create new TableWriteActor for table `Root/yq/quotas` ([72057594046644480:3:1]). lockId=281474976715708. ActorId=[4:7519616026710150466:2365] 2025-06-24T20:30:39.060245Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:388: Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7519615988055441969:2365]Open: token=0 2025-06-24T20:30:39.060266Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1987: SelfId: [4:7519616026710150459:2365], SessionActorId: [4:7519615988055441969:2365], ProcessRequestQueue [OwnerId: 72057594046644480, LocalPathId: 3] NOT READY queue=1 2025-06-24T20:30:39.060317Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:394: SelfId: [4:7519616026710150466:2365], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7519615988055441969:2365]Write: token=0 2025-06-24T20:30:39.060415Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:402: SelfId: [4:7519616026710150466:2365], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7519615988055441969:2365]Close: token=0 2025-06-24T20:30:39.060451Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3180: SelfId: [4:7519616026710150465:2365], TxId: 281474976715710, task: 1. TKqpForwardWriteActor recieve EvBufferWriteResult from [4:7519616026710150459:2365] 2025-06-24T20:30:39.060466Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3198: SelfId: [4:7519616026710150465:2365], TxId: 281474976715710, task: 1. Finished 2025-06-24T20:30:39.060482Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:39.060499Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715710, task: 1. Tasks execution finished 2025-06-24T20:30:39.060512Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519616026710150463:2365], TxId: 281474976715710, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzNlZDJkOWItNmEyNmI4ODEtZDU5MWEzYy1kYmQ5YTM3NA==. TraceId : 01jyht5dede0b27x476mjj72hw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:39.060587Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715710, task: 1. pass away 2025-06-24T20:30:39.060666Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715710;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:39.061019Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2110: SelfId: [4:7519616026710150459:2365], SessionActorId: [4:7519615988055441969:2365], Start immediate commit 2025-06-24T20:30:39.061035Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:923: SelfId: [4:7519616026710150466:2365], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7519615988055441969:2365]SetImmediateCommit 2025-06-24T20:30:39.061051Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2052: SelfId: [4:7519616026710150459:2365], SessionActorId: [4:7519615988055441969:2365], Flush data 2025-06-24T20:30:39.061191Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1050: SelfId: [4:7519616026710150466:2365], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7519615988055441969:2365]Send EvWrite to ShardID=72075186224037895, isPrepare=0, isImmediateCommit=1, TxId=0, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715708 DataShard: 72075186224037895 Generation: 1 Counter: 5 SchemeShard: 72057594046644480 PathId: 3, Size=136, Cookie=1, OperationsCount=1, IsFinal=1, Attempts=0, Mode=3, BufferMemory=136 2025-06-24T20:30:39.061458Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [4:7519616026710150453:2364], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7519615988055441965:2364]Recv EvWriteResult from ShardID=72075186224037895, Status=STATUS_COMPLETED, TxId=13, Locks= , Cookie=1 2025-06-24T20:30:39.061480Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [4:7519616026710150453:2364], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7519615988055441965:2364]Got completed result TxId=13, TabletId=72075186224037895, Cookie=1, Mode=3, Locks= 2025-06-24T20:30:39.061521Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [4:7519616026710150446:2364], SessionActorId: [4:7519615988055441965:2364], Committed TxId=0 2025-06-24T20:30:39.072237Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [4:7519616026710150466:2365], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7519615988055441969:2365]Recv EvWriteResult from ShardID=72075186224037895, Status=STATUS_COMPLETED, TxId=14, Locks= , Cookie=1 2025-06-24T20:30:39.072287Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [4:7519616026710150466:2365], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7519615988055441969:2365]Got completed result TxId=14, TabletId=72075186224037895, Cookie=1, Mode=3, Locks= 2025-06-24T20:30:39.072340Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [4:7519616026710150459:2365], SessionActorId: [4:7519615988055441969:2365], Committed TxId=0 2025-06-24T20:30:39.607554Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:27156: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:27156 |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |88.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup >> TBlobStorageProxyTest::TestNormal |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |88.3%| [LD] {RESULT} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::ListConnectionsOnEmptyConnectionsTable [GOOD] Test command err: 2025-06-24T20:29:09.597050Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615639030153736:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:09.597688Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:29:14.161472Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:14.161546Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615660504990255:2253];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:14.161726Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:29:15.074629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615639030153736:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:15.074764Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0624 20:29:16.364320776 104818 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:16.364455949 104818 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:17.719458Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:27219: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27219 } ] 2025-06-24T20:29:17.937383Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.937414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:18.896941Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:27219: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:27219 2025-06-24T20:29:18.971678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:18.971701Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.148262Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:27219: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27219 } ] 2025-06-24T20:29:19.163931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615660504990255:2253];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:19.163970Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:19.979621Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.979641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.168732Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.988549Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.988579Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.179485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.199565Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:27219: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27219 } ] E0624 20:29:21.359870681 105106 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:21.360032400 105106 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:21.995467Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.995986Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.181554Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.030265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.030289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.188661Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.254030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.254050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.254060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:25.292478Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:25.292504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:25.292515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:26.321062Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:26.321086Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:26.321097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.056138Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:27219: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:27219 E0624 20:29:27.076149551 105106 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:27.077700966 105106 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:27.072747Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:27219: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27219 } ] 2025-06-24T20:29:27.363884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.363958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.364468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:28.407317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:28.437565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migration ... ished=1, RowCount=0, TxLocks= , BrokenTxLocks= 2025-06-24T20:30:39.029438Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1050: TxId: 281474976715692, task: 1, CA Id [4:7519616028695252569:2535]. Taken 0 locks 2025-06-24T20:30:39.029456Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1064: TxId: 281474976715692, task: 1, CA Id [4:7519616028695252569:2535]. new data for read #0 seqno = 1 finished = 1 2025-06-24T20:30:39.029486Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616028695252569:2535], TxId: 281474976715692, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 276037645 2025-06-24T20:30:39.029520Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616028695252569:2535], TxId: 281474976715692, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:39.029544Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715692, task: 1, CA Id [4:7519616028695252569:2535]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-06-24T20:30:39.029565Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1227: TxId: 281474976715692, task: 1, CA Id [4:7519616028695252569:2535]. enter pack cells method shardId: 72075186224037900 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-24T20:30:39.029593Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1308: TxId: 281474976715692, task: 1, CA Id [4:7519616028695252569:2535]. exit pack cells method shardId: 72075186224037900 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-24T20:30:39.029608Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1365: TxId: 281474976715692, task: 1, CA Id [4:7519616028695252569:2535]. returned 0 rows; processed 0 rows 2025-06-24T20:30:39.029648Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1402: TxId: 281474976715692, task: 1, CA Id [4:7519616028695252569:2535]. dropping batch for read #0 2025-06-24T20:30:39.029660Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715692, task: 1, CA Id [4:7519616028695252569:2535]. effective maxinflight 1 sorted 1 2025-06-24T20:30:39.029675Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715692, task: 1, CA Id [4:7519616028695252569:2535]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-24T20:30:39.029695Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715692, task: 1, CA Id [4:7519616028695252569:2535]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-24T20:30:39.029812Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519616028695252569:2535], TxId: 281474976715692, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:30:39.032198Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616028695252570:2536], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-24T20:30:39.032350Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7519616028695252570:2536], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7519616028695252569 RawX2: 4503616807242215 } } DstEndpoint { ActorId { RawX1: 7519616028695252570 RawX2: 4503616807242216 } } InMemory: true DstStageId: 1 } Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 2 SrcEndpoint { ActorId { RawX1: 7519616028695252570 RawX2: 4503616807242216 } } DstEndpoint { ActorId { RawX1: 7519616028695252563 RawX2: 4503616807242037 } } InMemory: true } 2025-06-24T20:30:39.032364Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1081: SelfId: [4:7519616028695252570:2536], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Update input channelId: 1, peer: [4:7519616028695252569:2535] 2025-06-24T20:30:39.032440Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616028695252570:2536], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-24T20:30:39.032534Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7519616028695252570:2536], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7519616028695252569 RawX2: 4503616807242215 } } DstEndpoint { ActorId { RawX1: 7519616028695252570 RawX2: 4503616807242216 } } InMemory: true DstStageId: 1 } Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 2 SrcEndpoint { ActorId { RawX1: 7519616028695252570 RawX2: 4503616807242216 } } DstEndpoint { ActorId { RawX1: 7519616028695252563 RawX2: 4503616807242037 } } InMemory: true } 2025-06-24T20:30:39.032558Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616028695252570:2536], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646923 2025-06-24T20:30:39.032575Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715692, task: 2. Finish input channelId: 1, from: [4:7519616028695252569:2535] 2025-06-24T20:30:39.032611Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616028695252570:2536], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:39.032666Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519616028695252570:2536], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:30:39.032689Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616028695252569:2535], TxId: 281474976715692, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646927 2025-06-24T20:30:39.032707Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616028695252569:2535], TxId: 281474976715692, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:39.032730Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715692, task: 1. Tasks execution finished 2025-06-24T20:30:39.032764Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519616028695252569:2535], TxId: 281474976715692, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:39.032862Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715692, task: 1. pass away 2025-06-24T20:30:39.040981Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715692;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:39.041237Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616028695252570:2536], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:39.041277Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715692, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-24T20:30:39.041290Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715692, task: 2. Tasks execution finished 2025-06-24T20:30:39.041300Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519616028695252570:2536], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YTRiMjZmN2MtODU3ZGU1NzgtYTJlNWU1MjUtMzBiMDgwNDg=. TraceId : 01jyht5d3b6928x4ksja2tzrny. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:39.047482Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715692, task: 2. pass away 2025-06-24T20:30:39.048984Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715692;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:39.835349Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:14220: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:14220 >> TBlobStorageProxyTest::TestPartialGetMirror [GOOD] >> TBlobStorageProxyTest::TestVPutVGetLimit [GOOD] >> PrivateApi::GetTask [GOOD] >> PrivateApi::Nodes >> TBlobStorageProxyTest::TestProxyPutInvalidSize [GOOD] >> TBlobStorageProxyTest::TestProxyLongTailDiscoverSingleFailure |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |88.3%| [LD] {RESULT} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut >> TBlobStorageProxyTest::TestDoubleFailure [GOOD] >> TBlobStorageProxyTest::TestDoubleFailureMirror3Plus2 |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPartialGetMirror [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestVPutVGetLimit [GOOD] >> TColumnShardTestSchema::CreateTable+Reboots+GenerateInternalPathId |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TBlobStorageProxyTest::TestProxyGetSingleTimeout [GOOD] >> TBlobStorageProxyTest::TestProxyDiscoverSingleTimeout >> TBlobStorageProxyTest::TestGetMultipart [GOOD] >> TBlobStorageProxyTest::TestGetFail >> test.py::test[solomon-UnknownSetting-] [GOOD] >> Cdc::UuidExchange[TopicRunner] [GOOD] >> Cdc::UpdatesLog[PqRunner] >> TColumnShardTestSchema::TTL-Reboot-Internal+FirstPkColumn >> Cdc::KeysOnlyLog[TopicRunner] [GOOD] >> Cdc::NewAndOldImagesLog[PqRunner] >> TBlobStorageProxyTest::TestPutGetStatusErasureMirror3 [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasureMirror3Plus2 >> KqpPg::EmptyQuery-useSink [GOOD] >> KqpPg::DuplicatedColumns+useSink >> ReadLoad::ShouldReadKqpMoreThanRows [GOOD] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithAsyncIndex [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsOnce >> KqpPg::Insert_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText+useSink >> TColumnShardTestSchema::CreateTable+Reboots+GenerateInternalPathId [GOOD] >> TColumnShardTestSchema::HotTiers >> TBlobStorageProxyTest::TestNormal [GOOD] >> TBlobStorageProxyTest::TestNormalMirror >> TBlobStorageProxyTest::TestDoubleFailureMirror3Plus2 [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ExportWithLostAnswer >> TBlobStorageProxyTest::TestGetFail [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::CreateTable+Reboots+GenerateInternalPathId [GOOD] Test command err: 2025-06-24T20:30:47.480692Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T20:30:47.537350Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T20:30:47.537679Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T20:30:47.545530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:30:47.545795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:30:47.546071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:30:47.546212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:30:47.546350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:30:47.546475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:30:47.546602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:30:47.546730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:30:47.546839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T20:30:47.546957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T20:30:47.547084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T20:30:47.579596Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T20:30:47.579793Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T20:30:47.579849Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T20:30:47.580040Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:47.580217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T20:30:47.580293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T20:30:47.580333Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T20:30:47.580440Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T20:30:47.580494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T20:30:47.580548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T20:30:47.580595Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T20:30:47.580768Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:47.580845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T20:30:47.580891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T20:30:47.580922Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T20:30:47.581036Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T20:30:47.581109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T20:30:47.581177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T20:30:47.581213Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T20:30:47.581270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T20:30:47.581320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T20:30:47.581352Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T20:30:47.581558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T20:30:47.581611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T20:30:47.581638Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T20:30:47.581895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T20:30:47.582217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T20:30:47.582280Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T20:30:47.582442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T20:30:47.582505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T20:30:47.582534Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T20:30:47.582619Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T20:30:47.582704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T20:30:47.582758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T20:30:47.582792Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T20:30:47.583278Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=47; 2025-06-24T20:30:47.583368Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=37; 2025-06-24T20:30:47.583461Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=44; 2025-06-24T20:30:47.583541Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=36; 2025-06-24T20:30:47.583651Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T20:30:47.583756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T20:30:47.583799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T20:30:47.583847Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 8;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-24T20:30:49.378265Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:322:2330];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=119;this=88923004925888;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;fline=schema.h:38;event=sync_schema; 2025-06-24T20:30:49.390528Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004925888;op_tx=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129166052416;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T20:30:49.390620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004925888;op_tx=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129166052416;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T20:30:49.390683Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004925888;op_tx=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750797048535;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129166052416;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=119; 2025-06-24T20:30:49.391061Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T20:30:49.391234Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750797048535 at tablet 9437184, mediator 0 2025-06-24T20:30:49.391301Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[4] execute at tablet 9437184 2025-06-24T20:30:49.391582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-24T20:30:49.391642Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000018, ss: 20} ttl settings: { Version: 1 } at tablet 9437184 2025-06-24T20:30:49.391728Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000018; 2025-06-24T20:30:49.391806Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000018; 2025-06-24T20:30:49.392285Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000018; 2025-06-24T20:30:49.392406Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tx_controller.cpp:215;event=finished_tx;tx_id=119; 2025-06-24T20:30:49.412668Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[4] complete at tablet 9437184 CreateTable: { SeqNo { Generation: 20 } EnsureTables { Tables { PathId: 21 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4609 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T20:30:49.414318Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:322:2330];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=120;this=88923004928576;method=TTxController::StartProposeOnExecute;tx_info=120:TX_KIND_SCHEMA;min=1750797048538;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T20:30:49.433093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750797048538;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;this=88923004928576;op_tx=120:TX_KIND_SCHEMA;min=1750797048538;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T20:30:49.433239Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750797048538;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;this=88923004928576;op_tx=120:TX_KIND_SCHEMA;min=1750797048538;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=120; CreateTable: { SeqNo { Generation: 21 } EnsureTables { Tables { PathId: 22 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4610 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T20:30:49.434695Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:322:2330];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=121;this=88923004930368;method=TTxController::StartProposeOnExecute;tx_info=121:TX_KIND_SCHEMA;min=1750797048540;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T20:30:49.446992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750797048540;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;this=88923004930368;op_tx=121:TX_KIND_SCHEMA;min=1750797048540;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T20:30:49.447094Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750797048540;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;this=88923004930368;op_tx=121:TX_KIND_SCHEMA;min=1750797048540;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=121; CreateTable: { SeqNo { Generation: 22 } EnsureTables { Tables { PathId: 23 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4612 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T20:30:49.448462Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:322:2330];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=122;this=88923004932160;method=TTxController::StartProposeOnExecute;tx_info=122:TX_KIND_SCHEMA;min=1750797048541;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T20:30:49.462026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750797048541;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;this=88923004932160;op_tx=122:TX_KIND_SCHEMA;min=1750797048541;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T20:30:49.462130Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750797048541;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;this=88923004932160;op_tx=122:TX_KIND_SCHEMA;min=1750797048541;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=122; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestDoubleFailureMirror3Plus2 [GOOD] Test command err: 2025-06-24T20:30:43.433066Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/00208c/r3tmp/tmpu7WAOu//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-24T20:30:43.433603Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/00208c/r3tmp/tmpu7WAOu//vdisk_bad_1/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 2 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 2 2025-06-24T20:30:43.472897Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 2 VDISK[0:_:0:1:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T20:30:43.518958Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T20:30:47.801260Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/00208c/r3tmp/tmpam2Ve8//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-24T20:30:47.839495Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/00208c/r3tmp/tmpam2Ve8//vdisk_bad_1/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 2 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 2 2025-06-24T20:30:47.853460Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T20:30:47.853615Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 2 VDISK[0:_:0:1:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> ReadLoad::ShouldReadKqpMoreThanRows [GOOD] Test command err: 2025-06-24T20:30:13.844844Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:13.845271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:13.845485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003384/r3tmp/tmpAOA844/pdisk_1.dat 2025-06-24T20:30:14.254948Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:14.258582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:14.321513Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:14.328128Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797002046524 != 1750797002046528 2025-06-24T20:30:14.380202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:14.380364Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:14.392485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:14.495969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:15.438115Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 100 2025-06-24T20:30:15.487828Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:693:2575], subTag: 1} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-06-24T20:30:15.757707Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:693:2575], subTag: 1} TUpsertActor finished in 0.267596s, errors=0 2025-06-24T20:30:15.767417Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadKqpStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadKqpStart { RowCount: 100 Inflights: 10 } 2025-06-24T20:30:15.769121Z node 1 :DS_LOAD_TEST NOTICE: kqp_select.cpp:322: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 3} Bootstrap called: RowCount: 100 Inflights: 10 2025-06-24T20:30:16.036630Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:366: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-06-24T20:30:16.036799Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:400: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 3} started fullscan actor# [1:705:2587] 2025-06-24T20:30:16.036893Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 1} Bootstrap called, sample# 100 2025-06-24T20:30:16.036940Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 1} Connect to# 72075186224037888 called 2025-06-24T20:30:16.037183Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-06-24T20:30:16.049354Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [1:702:2584], subTag: 1} finished in 0.011056s, sampled# 100, iter finished# 1, oks# 100 2025-06-24T20:30:16.054600Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:416: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 3} received keyCount# 100 2025-06-24T20:30:16.056576Z node 1 :DS_LOAD_TEST NOTICE: kqp_select.cpp:445: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:693:2575], subTag: 3} started# 10 actors each with inflight# 1 2025-06-24T20:30:16.056940Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 2} Bootstrap called 2025-06-24T20:30:16.057640Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 2} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-24T20:30:16.057683Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 3} Bootstrap called 2025-06-24T20:30:16.057705Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 3} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-24T20:30:16.058031Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 4} Bootstrap called 2025-06-24T20:30:16.058051Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 4} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-24T20:30:16.058560Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 5} Bootstrap called 2025-06-24T20:30:16.058927Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 5} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-24T20:30:16.058953Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 6} Bootstrap called 2025-06-24T20:30:16.058972Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 6} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-24T20:30:16.068877Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 7} Bootstrap called 2025-06-24T20:30:16.068914Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 7} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-24T20:30:16.068937Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 8} Bootstrap called 2025-06-24T20:30:16.068956Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 8} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-24T20:30:16.069596Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 9} Bootstrap called 2025-06-24T20:30:16.069990Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 9} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-24T20:30:16.070416Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 10} Bootstrap called 2025-06-24T20:30:16.070435Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 10} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-24T20:30:16.070455Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 11} Bootstrap called 2025-06-24T20:30:16.070816Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 11} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-24T20:30:16.104812Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 2} session: ydb://session/3?node_id=1&id=YzYyNjk0NjMtMzY3NWNlZjMtNjVkMWM5MzQtODI4N2E3MjU= 2025-06-24T20:30:16.106934Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 3} session: ydb://session/3?node_id=1&id=MjFjY2YxYzEtZWMzNGMyZS03ZDkyZTA3YS00MTUyOTY3 2025-06-24T20:30:16.130145Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 4} session: ydb://session/3?node_id=1&id=YjYzYzJlNWItYmU1ODhkZDYtMzhlNTYyZDMtMTVlMThmYjc= 2025-06-24T20:30:16.131370Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 5} session: ydb://session/3?node_id=1&id=YjNiZWNkY2UtNDRiOGUzNWItMTNlNzA2MjUtYjcyYWU5ZTM= 2025-06-24T20:30:16.146902Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 6} session: ydb://session/3?node_id=1&id=NTRhMDZkMGUtNjU2ZjhlN2UtNjAyMjlkY2YtMWQxYmQ1ZjE= 2025-06-24T20:30:16.194034Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 7} session: ydb://session/3?node_id=1&id=ZjIxYmM4YjMtNGMyZjQwNTgtMjg2NzM0ZjEtZmM0MTQ2YTU= 2025-06-24T20:30:16.215734Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 8} session: ydb://session/3?node_id=1&id=M2Y4MmJiNjgtYzY3MzI0NDktZDliZDEzZjItNWE4MTgyZTA= 2025-06-24T20:30:16.256128Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 9} session: ydb://session/3?node_id=1&id=NjIxNzRkNzQtNjcyOWU2NzktYWMwNzc2ZDYtYjBmMWI3YTk= 2025-06-24T20:30:16.280993Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 10} session: ydb://session/3?node_id=1&id=MmQ2MjU0ZTQtMmZiODA2OTUtYTUxMDcwMTQtMTJlOWQ5NDA= 2025-06-24T20:30:16.297427Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:702:2584], subTag: 11} session: ydb://session/3?node_id=1&id=ZmRjZWRjNzQtMmRkMjVkY2QtODc4Y2Q2ZTYtZThhMWY1MzI= 2025-06-24T20:30:16.307138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:729:2611], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:16.307315Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:761:2637], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:16.307403Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:762:2638], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: R ... cePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:39.436847Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:795:2672] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:39.437877Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:799:2676] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:39.438668Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:800:2677] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:30:39.465086Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:39.605823Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:780:2657], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:39.605970Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:781:2658], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:39.606034Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:782:2659], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:39.606100Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:783:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:39.606169Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:784:2661], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:39.606247Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:787:2664], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:39.606314Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:788:2665], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:39.606387Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:796:2673], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:39.606442Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:810:2681], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:39.606520Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:815:2683], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:39.651218Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:936:2773] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:40.366775Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:701:2584], subTag: 5} finished in 0.983624s, errors=0 2025-06-24T20:30:40.367087Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished: 5 { Tag: 5 DurationMs: 983 OperationsOK: 100 OperationsError: 0 } 2025-06-24T20:30:40.381145Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:1965:3195] txid# 281474976715769, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:41.025222Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:701:2584], subTag: 10} finished in 1.633232s, errors=0 2025-06-24T20:30:41.025385Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished: 10 { Tag: 10 DurationMs: 1633 OperationsOK: 100 OperationsError: 0 } 2025-06-24T20:30:41.040147Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:2972:3601] txid# 281474976715870, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:41.516848Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:701:2584], subTag: 2} finished in 2.137444s, errors=0 2025-06-24T20:30:41.517206Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished: 2 { Tag: 2 DurationMs: 2137 OperationsOK: 100 OperationsError: 0 } 2025-06-24T20:30:41.531768Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:3979:4007] txid# 281474976715971, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:42.382484Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:701:2584], subTag: 11} finished in 2.988745s, errors=0 2025-06-24T20:30:42.382812Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished: 11 { Tag: 11 DurationMs: 2988 OperationsOK: 100 OperationsError: 0 } 2025-06-24T20:30:42.398627Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:4986:4413] txid# 281474976716072, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:43.006267Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:701:2584], subTag: 4} finished in 3.623262s, errors=0 2025-06-24T20:30:43.006544Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished: 4 { Tag: 4 DurationMs: 3623 OperationsOK: 100 OperationsError: 0 } 2025-06-24T20:30:43.022619Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:5993:4819] txid# 281474976716173, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:43.780231Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:701:2584], subTag: 8} finished in 4.391771s, errors=0 2025-06-24T20:30:43.780642Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished: 8 { Tag: 8 DurationMs: 4391 OperationsOK: 100 OperationsError: 0 } 2025-06-24T20:30:43.797764Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7000:5225] txid# 281474976716274, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:44.504085Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:701:2584], subTag: 6} finished in 5.119058s, errors=0 2025-06-24T20:30:44.504532Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished: 6 { Tag: 6 DurationMs: 5119 OperationsOK: 100 OperationsError: 0 } 2025-06-24T20:30:44.522456Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8007:5631] txid# 281474976716375, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:45.276886Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:701:2584], subTag: 7} finished in 5.890120s, errors=0 2025-06-24T20:30:45.277321Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished: 7 { Tag: 7 DurationMs: 5890 OperationsOK: 100 OperationsError: 0 } 2025-06-24T20:30:45.301800Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:9014:6037] txid# 281474976716476, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:46.639769Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:701:2584], subTag: 3} finished in 7.260171s, errors=0 2025-06-24T20:30:46.640288Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished: 3 { Tag: 3 DurationMs: 7260 OperationsOK: 100 OperationsError: 0 } 2025-06-24T20:30:46.662103Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:10021:6443] txid# 281474976716577, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:47.916117Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:701:2584], subTag: 9} finished in 8.525812s, errors=0 2025-06-24T20:30:47.916672Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished: 9 { Tag: 9 DurationMs: 8525 OperationsOK: 100 OperationsError: 0 } 2025-06-24T20:30:47.916750Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:480: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:692:2575], subTag: 3} finished in 8.542392s, oks# 1000, errors# 0 2025-06-24T20:30:47.917114Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:701:2584] with tag# 3 >> TBlobStorageProxyTest::TestProxyDiscoverSingleTimeout [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::DescribeQuery [GOOD] Test command err: 2025-06-24T20:29:06.266360Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615625175726004:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:06.266395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0624 20:29:08.300957102 104166 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:08.301121618 104166 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:08.404343Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:08.434046Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14845: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:14845 } ] 2025-06-24T20:29:08.514230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:09.579653Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:10.602638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:11.194108Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14845: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:14845 } ] 2025-06-24T20:29:11.277542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615625175726004:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:11.277592Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:11.620178Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14845: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:14845 2025-06-24T20:29:11.691336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:12.292237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:12.734620Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:13.315593Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:13.390506590 104482 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:13.392070173 104482 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:13.747333Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:14.316602Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:14.425174Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14845: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:14845 } ] 2025-06-24T20:29:14.551891Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14845: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:14845 2025-06-24T20:29:14.903779Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:15.327279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:15.912254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:16.367645Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:16.943644Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.391606Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.959962Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:18.397136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:18.546886753 104480 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:18.574058112 104480 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:18.971512Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.235418Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14845: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:14845 } ] 2025-06-24T20:29:19.344918Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14845: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:14845 2025-06-24T20:29:19.410438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.976191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.407927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.990886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.411702Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.319658Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.414207Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.419570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.423473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:23.692526711 104480 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:23.692644420 104480 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:24.424405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.424429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.932289Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14845: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:14845 } ] 2025-06-24T20:29:25.056260Z node 1 :YQL_NODES_MANAGER ER ... BUG: kqp_read_actor.cpp:632: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. Processing resolved ShardId# 72075186224037894, partition range: [(String : yandexcloud://some_folder_id, String : utque7l36fit61tk5mkc) ; ()), i: 0, state ranges: 0, points: 1 2025-06-24T20:30:39.547547Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:670: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. Add point to new shardId: 72075186224037894 2025-06-24T20:30:39.547663Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:714: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. Pending shards States: TShardState{ TabletId: 72075186224037894, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://some_folder_id, String : utque7l36fit61tk5mkc)], RetryAttempt: 0, ResolveAttempt: 0 }; In Flight shards States: TShardState{ TabletId: 0, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://some_folder_id, String : utque7l36fit61tk5mkc)], RetryAttempt: 0, ResolveAttempt: 1 }; 2025-06-24T20:30:39.547677Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. effective maxinflight 1024 sorted 0 2025-06-24T20:30:39.547688Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:462: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. BEFORE: 1.0 2025-06-24T20:30:39.547736Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:884: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. Send EvRead to shardId: 72075186224037894, tablePath: Root/yq/queries, ranges: , limit: (empty maybe), readId = 0, reverse = 0, snapshot = (txid=0,step=0), lockTxId = 0, lockNodeId = 0 2025-06-24T20:30:39.547766Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:476: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. AFTER: 0.1 2025-06-24T20:30:39.547778Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. Scheduled table scans, in flight: 1 shards. pending shards to read: 0, 2025-06-24T20:30:39.549558Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:958: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. Recv TEvReadResult from ShardID=72075186224037894, ReadId=0, Status=SUCCESS, Finished=1, RowCount=1, TxLocks= , BrokenTxLocks= 2025-06-24T20:30:39.549586Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1050: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. Taken 0 locks 2025-06-24T20:30:39.549600Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1064: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. new data for read #0 seqno = 1 finished = 1 2025-06-24T20:30:39.549625Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616024710093671:3049], TxId: 281474976710809, task: 1. Ctx: { TraceId : 01jyht5dd48v26mxn42w0argnm. SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 276037645 2025-06-24T20:30:39.549646Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616024710093671:3049], TxId: 281474976710809, task: 1. Ctx: { TraceId : 01jyht5dd48v26mxn42w0argnm. SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:39.549662Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-06-24T20:30:39.549681Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1227: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. enter pack cells method shardId: 72075186224037894 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-24T20:30:39.549708Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1308: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. exit pack cells method shardId: 72075186224037894 processedRows: 0 packed rows: 1 freeSpace: 8386364 2025-06-24T20:30:39.549726Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1365: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. returned 1 rows; processed 1 rows 2025-06-24T20:30:39.549761Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1402: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. dropping batch for read #0 2025-06-24T20:30:39.549773Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. effective maxinflight 1024 sorted 0 2025-06-24T20:30:39.549787Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-24T20:30:39.549804Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976710809, task: 1, CA Id [4:7519616024710093671:3049]. returned async data processed rows 1 left freeSpace 8386364 received rows 1 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-24T20:30:39.550008Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519616024710093671:3049], TxId: 281474976710809, task: 1. Ctx: { TraceId : 01jyht5dd48v26mxn42w0argnm. SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:30:39.550027Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616024710093671:3049], TxId: 281474976710809, task: 1. Ctx: { TraceId : 01jyht5dd48v26mxn42w0argnm. SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:39.550060Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976710809, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-24T20:30:39.550080Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616024710093672:3050], TxId: 281474976710809, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . TraceId : 01jyht5dd48v26mxn42w0argnm. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646923 2025-06-24T20:30:39.550104Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976710809, task: 2. Finish input channelId: 1, from: [4:7519616024710093671:3049] 2025-06-24T20:30:39.550135Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616024710093672:3050], TxId: 281474976710809, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . TraceId : 01jyht5dd48v26mxn42w0argnm. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:30:39.550294Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519616024710093672:3050], TxId: 281474976710809, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . TraceId : 01jyht5dd48v26mxn42w0argnm. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:30:39.550310Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616024710093671:3049], TxId: 281474976710809, task: 1. Ctx: { TraceId : 01jyht5dd48v26mxn42w0argnm. SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646927 2025-06-24T20:30:39.550334Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616024710093671:3049], TxId: 281474976710809, task: 1. Ctx: { TraceId : 01jyht5dd48v26mxn42w0argnm. SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:39.550354Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710809, task: 1. Tasks execution finished 2025-06-24T20:30:39.550368Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519616024710093671:3049], TxId: 281474976710809, task: 1. Ctx: { TraceId : 01jyht5dd48v26mxn42w0argnm. SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:39.550471Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710809, task: 1. pass away 2025-06-24T20:30:39.550555Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710809;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:39.550919Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616024710093672:3050], TxId: 281474976710809, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . TraceId : 01jyht5dd48v26mxn42w0argnm. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:30:39.550952Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710809, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-24T20:30:39.550962Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710809, task: 2. Tasks execution finished 2025-06-24T20:30:39.550973Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519616024710093672:3050], TxId: 281474976710809, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=NTg2ODUxZjEtMzIxYzJlOS05NjdiMjI3NS1lYmVmZjcxMg==. CustomerSuppliedId : . TraceId : 01jyht5dd48v26mxn42w0argnm. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:39.551023Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710809, task: 2. pass away 2025-06-24T20:30:39.551070Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710809;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; E0624 20:30:39.847796012 108745 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:30:39.847959794 108745 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:30:40.274224Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:64182: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:64182 |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestGetFail [GOOD] |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |88.4%| [LD] {RESULT} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |88.4%| [TA] $(B)/ydb/core/load_test/ut_ycsb/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyDiscoverSingleTimeout [GOOD] >> Cdc::DocApi[PqRunner] [GOOD] >> Cdc::DocApi[YdsRunner] >> Yq_1::ModifyQuery [GOOD] >> TColumnShardTestSchema::RebootColdTiers |88.4%| [TM] {asan, default-linux-x86_64, pic, release} ydb/library/yql/tests/sql/solomon/pytest >> test.py::test[solomon-UnknownSetting-] [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasureMirror3Plus2 [GOOD] |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> YdbTableSplit::MergeByNoLoadAfterSplit >> TBlobStorageProxyTest::TestNormalMirror [GOOD] >> TColumnShardTestSchema::CreateTable-Reboots-GenerateInternalPathId >> BSCReadOnlyPDisk::ReadOnlyOneByOne [GOOD] >> TPDiskRaces::OwnerKilledWhileReadingLogAndThenKillLastOwner [GOOD] >> TPDiskTest::PDiskOwnerSlayRace |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasureMirror3Plus2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::ModifyQuery [GOOD] Test command err: 2025-06-24T20:29:01.358694Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615605457928023:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:01.378899Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:29:02.553640Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; E0624 20:29:02.672304865 103483 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:02.672444262 103483 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:02.766032Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:8057: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:8057 } ] 2025-06-24T20:29:04.382337Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:8057: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:8057 2025-06-24T20:29:04.495586Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:8057: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:8057 } ] 2025-06-24T20:29:04.808616Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:05.794755Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:8057: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:8057 } ] 2025-06-24T20:29:05.814185Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:06.331269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615605457928023:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:06.420523Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:06.811608Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:07.439199Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:07.746737043 103740 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:07.753081712 103740 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:07.835929Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:08.259420Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:8057: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:8057 } ] 2025-06-24T20:29:08.271538Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:8057: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:8057 2025-06-24T20:29:08.440272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:08.843499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:09.447090Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:09.849592Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:10.459382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:10.851875Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:11.495258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:11.815517Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:8057: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:8057 } ] 2025-06-24T20:29:11.868578Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:12.496253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:12.846393157 103740 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:12.851542226 103740 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:12.879776Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:13.499065Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:13.890299Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:14.500344Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:14.894647Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:15.503309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:15.902657Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:16.513007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:16.905928Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.521170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:17.877685705 103741 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:17.877777928 103741 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:17.879860Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:8057: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:8057 2025-06-24T20:29:17.880012Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:8057: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:8057 2025-06-24T20:29:17.909485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.946959Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:8057: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:8057 } ] 2025-06-24T20:29:18.530296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:18.908 ... 37f980a8-64d37865-49d961b4-347fcfb816#mock_cloud} Creating finalizing transformer, output #199865 2025-06-24T20:30:48.972010Z node 4 :YQL_PROXY DEBUG: log.cpp:64: SessionId: 37f980a8-64d37865-49d961b4-347fcfb8 2025-06-24 20:30:48.971 DEBUG ydb-services-fq-ut_integration(pid=102871, tid=0x00007EFC7D4B7640) [DQ] yql_dq_provider.cpp:126: {utque7l36bb7hak40k33#utrue7l35t98bv682m5d#yandexcloud://Execute_folder_id#37f980a8-64d37865-49d961b4-347fcfb816#mock_cloud} CloseSession utque7l36bb7hak40k33#utrue7l35t98bv682m5d#yandexcloud://Execute_folder_id#37f980a8-64d37865-49d961b4-347fcfb816#mock_cloud 2025-06-24T20:30:48.975815Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519616063410017729:2367] TxId: 281474976715835. Ctx: { TraceId: 01jyht5q316812nvgq59cs5kkh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OWVkYjEwZS0zMjFhMmMtOGI2OGU2NmQtYmEyNjRiZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-24T20:30:48.983870Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3311: SelfId: [4:7519616063410017750:3231], TxId: 281474976715835, task: 5. Add data: 1189 / 1189 2025-06-24T20:30:48.983963Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3280: SelfId: [4:7519616063410017750:3231], TxId: 281474976715835, task: 5. Send data=1189, closed=1, bufferActorId=[4:7519616063410017728:2367] 2025-06-24T20:30:48.984005Z node 4 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:372: SelfId: [4:7519616063410017737:3231], TxId: 281474976715835, task: 5. Ctx: { SessionId : ydb://session/3?node_id=4&id=OWVkYjEwZS0zMjFhMmMtOGI2OGU2NmQtYmEyNjRiZmU=. CustomerSuppliedId : . TraceId : 01jyht5q316812nvgq59cs5kkh. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Drain async output 0. Free space decreased: -9223372036787666944, sent data from buffer: 1189 2025-06-24T20:30:48.984045Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715835, task: 5. Tasks execution finished, don't wait for ack delivery in input channelId: 2, seqNo: [1] 2025-06-24T20:30:48.984061Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715835, task: 5. Tasks execution finished 2025-06-24T20:30:48.984073Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [4:7519616063410017737:3231], TxId: 281474976715835, task: 5. Ctx: { SessionId : ydb://session/3?node_id=4&id=OWVkYjEwZS0zMjFhMmMtOGI2OGU2NmQtYmEyNjRiZmU=. CustomerSuppliedId : . TraceId : 01jyht5q316812nvgq59cs5kkh. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-24T20:30:48.984174Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1860: SelfId: [4:7519616063410017728:2367], SessionActorId: [4:7519615986100600438:2367], Create new TableWriteActor for table `Root/yq/queries` ([72057594046644480:9:1]). lockId=281474976715832. ActorId=[4:7519616063410017752:2367] 2025-06-24T20:30:48.984222Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:388: Table: `Root/yq/queries` ([72057594046644480:9:1]), SessionActorId: [4:7519615986100600438:2367]Open: token=2 2025-06-24T20:30:48.984255Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1987: SelfId: [4:7519616063410017728:2367], SessionActorId: [4:7519615986100600438:2367], ProcessRequestQueue [OwnerId: 72057594046644480, LocalPathId: 9] NOT READY queue=1 2025-06-24T20:30:48.984320Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:394: SelfId: [4:7519616063410017752:2367], Table: `Root/yq/queries` ([72057594046644480:9:1]), SessionActorId: [4:7519615986100600438:2367]Write: token=2 2025-06-24T20:30:48.985586Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:402: SelfId: [4:7519616063410017752:2367], Table: `Root/yq/queries` ([72057594046644480:9:1]), SessionActorId: [4:7519615986100600438:2367]Close: token=2 2025-06-24T20:30:48.985654Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616063410017737:3231], TxId: 281474976715835, task: 5. Ctx: { SessionId : ydb://session/3?node_id=4&id=OWVkYjEwZS0zMjFhMmMtOGI2OGU2NmQtYmEyNjRiZmU=. CustomerSuppliedId : . TraceId : 01jyht5q316812nvgq59cs5kkh. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646735 2025-06-24T20:30:48.985746Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519616063410017737:3231], TxId: 281474976715835, task: 5. Ctx: { SessionId : ydb://session/3?node_id=4&id=OWVkYjEwZS0zMjFhMmMtOGI2OGU2NmQtYmEyNjRiZmU=. CustomerSuppliedId : . TraceId : 01jyht5q316812nvgq59cs5kkh. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Handle abort execution event from: [4:7519616063410017729:2367], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T20:30:48.985858Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715835, task: 5. pass away 2025-06-24T20:30:48.985942Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715835;task_id=5;success=0;message={
: Error: COMPUTE_STATE_FAILURE }; 2025-06-24T20:30:48.986655Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OWVkYjEwZS0zMjFhMmMtOGI2OGU2NmQtYmEyNjRiZmU=, ActorId: [4:7519615986100600438:2367], ActorState: ExecuteState, TraceId: 01jyht5q316812nvgq59cs5kkh, Create QueryResponse for error on request, msg: 2025-06-24T20:30:48.987844Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2144: SelfId: [4:7519616063410017728:2367], SessionActorId: [4:7519615986100600438:2367], Start rollback 2025-06-24T20:30:48.994963Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2196: SelfId: [4:7519616063410017728:2367], SessionActorId: [4:7519615986100600438:2367], Send EvWrite (external) to ShardID=72075186224037899, isPrepare=0, isImmediateCommit=1, TxId=0, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715832 DataShard: 72075186224037899 Generation: 1 Counter: 9 SchemeShard: 72057594046644480 PathId: 14, Size=0, Cookie=0, OperationsCount=0, IsFinal=1, Attempts=0 2025-06-24T20:30:48.995096Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2196: SelfId: [4:7519616063410017728:2367], SessionActorId: [4:7519615986100600438:2367], Send EvWrite (external) to ShardID=72075186224037894, isPrepare=0, isImmediateCommit=1, TxId=0, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715832 DataShard: 72075186224037894 Generation: 1 Counter: 11 SchemeShard: 72057594046644480 PathId: 9, Size=0, Cookie=0, OperationsCount=0, IsFinal=1, Attempts=0 2025-06-24T20:30:48.995169Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2196: SelfId: [4:7519616063410017728:2367], SessionActorId: [4:7519615986100600438:2367], Send EvWrite (external) to ShardID=72075186224037898, isPrepare=0, isImmediateCommit=1, TxId=0, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715832 DataShard: 72075186224037898 Generation: 1 Counter: 11 SchemeShard: 72057594046644480 PathId: 13, Size=0, Cookie=0, OperationsCount=0, IsFinal=1, Attempts=0 2025-06-24T20:30:48.996360Z node 4 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage.cpp:599: DB Error, Status: CLIENT_CANCELLED, Issues: [ {
: Error: GRpc error: (1): Cancelled on the server side } {
: Error: Grpc error response on endpoint localhost:28225 } ], Query: --!syntax_v1 -- Query name: HardPingTask(write) PRAGMA TablePathPrefix("Root/yq"); DECLARE $tenant as String; DECLARE $scope as String; DECLARE $job_id as String; DECLARE $job as String; DECLARE $query as String; DECLARE $status as Int64; DECLARE $internal as String; DECLARE $result_id as String; DECLARE $query_id as String; DECLARE $now as Timestamp; DECLARE $ttl as Timestamp; DECLARE $retry_counter_update_time as Timestamp; DECLARE $retry_rate as Double; DECLARE $retry_counter as Uint64; DECLARE $owner as String; UPDATE `pending_small` SET `last_seen_at` = $now, `assigned_until` = $ttl, `retry_counter` = $retry_counter, `retry_counter_updated_at` = $retry_counter_update_time, `retry_rate` = $retry_rate, `owner` = $owner WHERE `tenant` = $tenant AND `scope` = $scope AND `query_id` = $query_id; UPSERT INTO `jobs` (`scope`, `query_id`, `job_id`, `job`) VALUES($scope, $query_id, $job_id, $job); UPDATE `queries` SET `query` = $query, `status` = $status, `internal` = $internal, `result_sets_expire_at` = NULL, `expire_at` = NULL, `meta_revision` = `meta_revision` + 1 WHERE `scope` = $scope AND `query_id` = $query_id; 2025-06-24T20:30:48.998289Z node 4 :FQ_PINGER WARN: pinger.cpp:358: QueryId: utque7l36bb7hak40k33, Owner: 37f980a8-64d37865-49d961b4-347fcfb816 Ping response error: [ {
: Error: GRpc error: (1): Cancelled on the server side } {
: Error: Grpc error response on endpoint [::]:28225 } ]. Retry after: 0.000000s 2025-06-24T20:30:48.999599Z node 4 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: PingTaskRequest - PingTaskResult: {owner_id: "37f980a8-64d37865-49d961b4-347fcfb816" query_id { value: "utque7l36bb7hak40k33" } scope: "yandexcloud://Execute_folder_id" started_at { seconds: 1750797048 nanos: 831000000 } deadline { seconds: 1750883448 nanos: 759667000 } tenant: "TestTenant" } ERROR: [ {
: Error: GRpc error: (1): Cancelled on the server side } {
: Error: Grpc error response on endpoint localhost:28225 } ] 2025-06-24T20:30:49.003438Z node 4 :YQL_PRIVATE_PROXY ERROR: task_ping.cpp:69: PrivatePingTask - QueryId: utque7l36bb7hak40k33, Owner: 37f980a8-64d37865-49d961b4-347fcfb816, Failed with code: GENERIC_ERROR Details:
: Error: GRpc error: (1): Cancelled on the server side
: Error: Grpc error response on endpoint localhost:28225
: Error: ControlPlane PingTaskError 2025-06-24T20:30:49.011037Z node 4 :FQ_PINGER WARN: pinger.cpp:358: QueryId: utque7l36bb7hak40k33, Owner: 37f980a8-64d37865-49d961b4-347fcfb816 Ping response error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:28225: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint [::]:28225 } ]. Retry after: 0.084118s 2025-06-24T20:30:49.103277Z node 4 :FQ_PINGER WARN: pinger.cpp:358: QueryId: utque7l36bb7hak40k33, Owner: 37f980a8-64d37865-49d961b4-347fcfb816 Ping response error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:28225: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint [::]:28225 } ]. Retry after: 0.148180s 2025-06-24T20:30:49.283398Z node 4 :FQ_PINGER WARN: pinger.cpp:358: QueryId: utque7l36bb7hak40k33, Owner: 37f980a8-64d37865-49d961b4-347fcfb816 Ping response error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:28225: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint [::]:28225 } ]. Retry after: 0.218643s 2025-06-24T20:30:49.507443Z node 4 :FQ_PINGER WARN: pinger.cpp:358: QueryId: utque7l36bb7hak40k33, Owner: 37f980a8-64d37865-49d961b4-347fcfb816 Ping response error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:28225: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint [::]:28225 } ]. Retry after: 0.653315s 2025-06-24T20:30:49.646693Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:28225: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:28225 2025-06-24T20:30:50.163390Z node 4 :FQ_PINGER WARN: pinger.cpp:358: QueryId: utque7l36bb7hak40k33, Owner: 37f980a8-64d37865-49d961b4-347fcfb816 Ping response error: {
: Error: Client is stopped }. Retry after: 0.897896s >> YdbTableSplit::RenameTablesAndSplit >> TPDiskTest::PDiskOwnerSlayRace [GOOD] >> TPDiskTest::CommitDeleteChunks |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestNormalMirror [GOOD] >> Cdc::UpdatesLog[PqRunner] [GOOD] >> Cdc::UpdatesLog[YdsRunner] >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads >> Yq_1::CreateQuery_Without_Connection [GOOD] >> Initializer::Simple [GOOD] >> TPDiskTest::CommitDeleteChunks [GOOD] >> TPDiskTest::DeviceHaltTooLong >> KqpPg::DuplicatedColumns+useSink [GOOD] >> KqpPg::DuplicatedColumns-useSink >> YdbTableSplit::SplitByLoadWithReads ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyOneByOne [GOOD] Test command err: RandomSeed# 3177318705015053682 |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |88.4%| [TA] {RESULT} $(B)/ydb/core/load_test/ut_ycsb/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut >> KqpPg::TypeCoercionBulkUpsert [GOOD] >> KqpPg::TypeCoercionInsert+useSink |88.4%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |88.4%| [TM] {RESULT} ydb/library/yql/tests/sql/solomon/pytest |88.4%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut >> TBlobStorageProxyTest::TestProxyRestoreOnGetBlock [GOOD] >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror >> Cdc::NewAndOldImagesLog[PqRunner] [GOOD] >> Cdc::NewAndOldImagesLog[YdsRunner] >> TColumnShardTestSchema::CreateTable-Reboots-GenerateInternalPathId [GOOD] >> YdbTableSplit::SplitByLoadWithReadsMultipleSplitsWithData |88.4%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> Yq_1::DeleteQuery [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> Initializer::Simple [GOOD] Test command err: 2025-06-24T20:29:37.690111Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:29:37.690505Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:37.690687Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004879/r3tmp/tmpzgAuE4/pdisk_1.dat 2025-06-24T20:29:38.898922Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 7350, node 1 TClient is connected to server localhost:26090 2025-06-24T20:29:39.754016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:29:39.804257Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:29:39.804314Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:29:39.804373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:29:39.804690Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:29:39.810359Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:29:39.811752Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750796964629023 != 1750796964629027 2025-06-24T20:29:39.859462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:29:39.859595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:29:39.873011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:29:50.146137Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:642:2533], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:50.146410Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:652:2538], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:50.146892Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:29:50.174091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:29:50.273101Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:656:2541], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T20:29:50.306044Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:50.938785Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:726:2580] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:29:51.786552Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:736:2589], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/test]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:29:51.790118Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTczMGZjNDAtNjgzZGUyZC00MzVhYmEyZS03N2QwNjg2Mg==, ActorId: [1:633:2525], ActorState: ExecuteState, TraceId: 01jyht3xnteyk9f5kfhzsey9s3, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: REQUEST=SELECT * FROM `/Root/.metadata/test`;RESULT=
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/test]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 ;EXPECTATION=0 REQUEST=SELECT * FROM `/Root/.metadata/test`;EXPECTATION=0 2025-06-24T20:29:51.880807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:29:52.808327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:29:53.754660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:29:55.037785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Initialization finished 2025-06-24T20:30:10.156280Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyht4f311hetpm8kmvq31pqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mzc0YzgxZjItM2JiN2QwNDItY2I2NThjYTEtMjFlZmIyZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/test`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/test`;EXPECTATION=1 REQUEST=DROP TABLE `/Root/.metadata/test`;EXPECTATION=0;WAITING=1 2025-06-24T20:30:21.071576Z node 1 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [1:1286:2980] txid# 281474976715678, Access denied for root@builtin on path /Root/.metadata/test, with access RemoveSchema 2025-06-24T20:30:21.073147Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1286:2980] txid# 281474976715678, issues: { message: "Access denied for root@builtin on path /Root/.metadata/test" issue_code: 200000 severity: 1 } REQUEST=DROP TABLE `/Root/.metadata/test`;RESULT=
: Error: Execution, code: 1060
:1:12: Error: Executing DROP TABLE
: Error: Access denied., code: 2018
: Error: Access denied for root@builtin on path /Root/.metadata/test, code: 200000 ;EXPECTATION=0 FINISHED_REQUEST=DROP TABLE `/Root/.metadata/test`;EXPECTATION=0;WAITING=1 2025-06-24T20:30:32.344425Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715681. Ctx: { TraceId: 01jyht56qz2m2va42ydwbshjx8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmUyMjI2OTctZjhjMTcxYy01YTk3M2Y3OC0yZmZhZDRjOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=DELETE FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 REQUEST=DELETE FROM `/Root/.metadata/initialization/migrations`;RESULT=
: Fatal: ydb/core/kqp/host/kqp_host.cpp:977 ExecuteDataQuery(): requirement false failed, message: Unexpected query type for execute script action: Ddl, code: 1 ;EXPECTATION=0 FINISHED_REQUEST=DELETE FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 REQUEST=DROP TABLE `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 2025-06-24T20:30:53.920428Z node 1 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [1:1444:3088] txid# 281474976715686, Access denied for root@builtin on path /Root/.metadata/initialization/migrations, with access RemoveSchema 2025-06-24T20:30:53.920627Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1444:3088] txid# 281474976715686, issues: { message: "Access denied for root@builtin on path /Root/.metadata/initialization/migrations" issue_code: 200000 severity: 1 } REQUEST=DROP TABLE `/Root/.metadata/initialization/migrations`;RESULT=
: Error: Execution, code: 1060
:1:12: Error: Executing DROP TABLE
: Error: Access denied., code: 2018
: Error: Access denied for root@builtin on path /Root/.metadata/initialization/migrations, code: 200000 ;EXPECTATION=0 FINISHED_REQUEST=DROP TABLE `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 >> KqpPg::InsertValuesFromTableWithDefaultText+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::CreateTable-Reboots-GenerateInternalPathId [GOOD] Test command err: 2025-06-24T20:30:54.566529Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T20:30:54.586976Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T20:30:54.587486Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T20:30:54.593639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:30:54.593841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:30:54.594047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:30:54.594156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:30:54.594237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:30:54.594335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:30:54.594434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:30:54.594504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:30:54.594582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T20:30:54.594665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T20:30:54.594747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T20:30:54.622418Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T20:30:54.622558Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T20:30:54.622602Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T20:30:54.622756Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:54.622890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T20:30:54.622949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T20:30:54.622985Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T20:30:54.623054Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T20:30:54.623092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T20:30:54.623122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T20:30:54.623186Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T20:30:54.623331Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:54.623392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T20:30:54.623423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T20:30:54.623441Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T20:30:54.623512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T20:30:54.623551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T20:30:54.623601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T20:30:54.623624Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T20:30:54.623669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T20:30:54.623708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T20:30:54.623759Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T20:30:54.623978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T20:30:54.624073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T20:30:54.624104Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T20:30:54.624318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T20:30:54.624365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T20:30:54.624413Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T20:30:54.624590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T20:30:54.624638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T20:30:54.624668Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T20:30:54.624775Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T20:30:54.624873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T20:30:54.624913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T20:30:54.624948Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T20:30:54.625307Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=40; 2025-06-24T20:30:54.625423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=47; 2025-06-24T20:30:54.625505Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=34; 2025-06-24T20:30:54.625592Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=42; 2025-06-24T20:30:54.625694Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T20:30:54.625815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T20:30:54.625851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T20:30:54.625897Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... et_id=9437184;tx_id=119;this=88923004878848;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-24T20:30:56.131808Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=119;this=88923004878848;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;fline=schema.h:38;event=sync_schema; 2025-06-24T20:30:56.146171Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004878848;op_tx=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129165874176;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T20:30:56.146279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004878848;op_tx=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129165874176;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T20:30:56.146341Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004878848;op_tx=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750797055588;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129165874176;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=119; 2025-06-24T20:30:56.146723Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T20:30:56.146877Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750797055588 at tablet 9437184, mediator 0 2025-06-24T20:30:56.146932Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[36] execute at tablet 9437184 2025-06-24T20:30:56.147305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-24T20:30:56.147393Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 20, ss: 20} ttl settings: { Version: 1 } at tablet 9437184 2025-06-24T20:30:56.147491Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:304;method=RegisterTable;path_id=20; 2025-06-24T20:30:56.147570Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine.h:144;event=RegisterTable;path_id=20; 2025-06-24T20:30:56.148088Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=20; 2025-06-24T20:30:56.148223Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tx_controller.cpp:215;event=finished_tx;tx_id=119; 2025-06-24T20:30:56.161700Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[36] complete at tablet 9437184 CreateTable: { SeqNo { Generation: 20 } EnsureTables { Tables { PathId: 21 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4609 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T20:30:56.163619Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=120;this=88923004881536;method=TTxController::StartProposeOnExecute;tx_info=120:TX_KIND_SCHEMA;min=1750797055591;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T20:30:56.180069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750797055591;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;this=88923004881536;op_tx=120:TX_KIND_SCHEMA;min=1750797055591;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T20:30:56.180165Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750797055591;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;this=88923004881536;op_tx=120:TX_KIND_SCHEMA;min=1750797055591;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=120; CreateTable: { SeqNo { Generation: 21 } EnsureTables { Tables { PathId: 22 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4610 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T20:30:56.181729Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=121;this=88923004883328;method=TTxController::StartProposeOnExecute;tx_info=121:TX_KIND_SCHEMA;min=1750797055593;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T20:30:56.195222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750797055593;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;this=88923004883328;op_tx=121:TX_KIND_SCHEMA;min=1750797055593;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T20:30:56.195326Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750797055593;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;this=88923004883328;op_tx=121:TX_KIND_SCHEMA;min=1750797055593;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=121; CreateTable: { SeqNo { Generation: 22 } EnsureTables { Tables { PathId: 23 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4612 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T20:30:56.196951Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=122;this=88923004885120;method=TTxController::StartProposeOnExecute;tx_info=122:TX_KIND_SCHEMA;min=1750797055594;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T20:30:56.210204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750797055594;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;this=88923004885120;op_tx=122:TX_KIND_SCHEMA;min=1750797055594;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T20:30:56.210286Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750797055594;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;this=88923004885120;op_tx=122:TX_KIND_SCHEMA;min=1750797055594;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=122; >> YdbTableSplit::SplitByLoadWithUpdates >> AsyncIndexChangeExchange::SenderShouldShakeHandsOnce [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsTwice >> YdbTableSplit::SplitByLoadWithDeletes ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::DeleteQuery [GOOD] Test command err: 2025-06-24T20:29:08.711392Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615635778333161:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:08.711544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:29:10.246274Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; E0624 20:29:12.552010910 104748 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:12.552191083 104748 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:12.746211Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:13.715248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615635778333161:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:13.715321Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:13.719330Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21225: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21225 } ] 2025-06-24T20:29:13.991847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:14.743466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:14.871218Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21225: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21225 2025-06-24T20:29:14.958542Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21225: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21225 } ] 2025-06-24T20:29:15.024028Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:15.747993Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:16.028297Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:16.759863Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.033416Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:17.581122566 104907 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:17.581278853 104907 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:17.740072Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21225: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21225 } ] 2025-06-24T20:29:17.928700Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:18.032683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:18.934163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.051486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.936061Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.053919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.937346Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.055707Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.940823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.057939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.308846Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21225: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21225 2025-06-24T20:29:22.308937Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21225: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21225 } ] E0624 20:29:22.616181718 104905 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:22.616886029 104905 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:22.945731Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.064181Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.964080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.068141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.971970Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:25.072096Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:26.047452Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:26.093535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.047874Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.096061Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:27.661484483 104907 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:27.663227211 104907 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:28.052328Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:28.112554Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:28.417337Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/quotas". Create session error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21225: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21225 } ] 2025-06-24T20:29:28.428687Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all add ... lt. Database : . }. CA StateFunc 271646922 2025-06-24T20:30:53.308325Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:527: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. Received TEvResolveKeySetResult update for table 'Root/yq/queries' 2025-06-24T20:30:53.308389Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:632: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. Processing resolved ShardId# 72075186224037891, partition range: [(String : yandexcloud://Execute_folder_id, String : utque7l366gbb4b5s07i) ; ()), i: 0, state ranges: 0, points: 1 2025-06-24T20:30:53.308409Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:670: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. Add point to new shardId: 72075186224037891 2025-06-24T20:30:53.308501Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:714: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. Pending shards States: TShardState{ TabletId: 72075186224037891, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://Execute_folder_id, String : utque7l366gbb4b5s07i)], RetryAttempt: 0, ResolveAttempt: 0 }; In Flight shards States: TShardState{ TabletId: 0, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://Execute_folder_id, String : utque7l366gbb4b5s07i)], RetryAttempt: 0, ResolveAttempt: 1 }; 2025-06-24T20:30:53.308515Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. effective maxinflight 1024 sorted 0 2025-06-24T20:30:53.308528Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:462: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. BEFORE: 1.0 2025-06-24T20:30:53.308570Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:884: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. Send EvRead to shardId: 72075186224037891, tablePath: Root/yq/queries, ranges: , limit: (empty maybe), readId = 0, reverse = 0, snapshot = (txid=0,step=0), lockTxId = 0, lockNodeId = 0 2025-06-24T20:30:53.308610Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:476: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. AFTER: 0.1 2025-06-24T20:30:53.308622Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. Scheduled table scans, in flight: 1 shards. pending shards to read: 0, 2025-06-24T20:30:53.312901Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:958: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. Recv TEvReadResult from ShardID=72075186224037891, ReadId=0, Status=SUCCESS, Finished=1, RowCount=0, TxLocks= , BrokenTxLocks= 2025-06-24T20:30:53.312943Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1050: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. Taken 0 locks 2025-06-24T20:30:53.312960Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1064: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. new data for read #0 seqno = 1 finished = 1 2025-06-24T20:30:53.312988Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616086297000239:3079], TxId: 281474976715812, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5ty74by2j352k6nnq42x. SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 276037645 2025-06-24T20:30:53.313009Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616086297000239:3079], TxId: 281474976715812, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5ty74by2j352k6nnq42x. SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:30:53.313034Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-06-24T20:30:53.313055Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1227: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. enter pack cells method shardId: 72075186224037891 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-24T20:30:53.313071Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1308: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. exit pack cells method shardId: 72075186224037891 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-24T20:30:53.313084Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1365: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. returned 0 rows; processed 0 rows 2025-06-24T20:30:53.313124Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1402: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. dropping batch for read #0 2025-06-24T20:30:53.313135Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. effective maxinflight 1024 sorted 0 2025-06-24T20:30:53.313148Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-24T20:30:53.313166Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715812, task: 1, CA Id [4:7519616086297000239:3079]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-24T20:30:53.313255Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519616086297000239:3079], TxId: 281474976715812, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5ty74by2j352k6nnq42x. SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:30:53.313275Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616086297000240:3080], TxId: 281474976715812, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. TraceId : 01jyht5ty74by2j352k6nnq42x. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646923 2025-06-24T20:30:53.313294Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715812, task: 2. Finish input channelId: 1, from: [4:7519616086297000239:3079] 2025-06-24T20:30:53.313319Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616086297000240:3080], TxId: 281474976715812, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. TraceId : 01jyht5ty74by2j352k6nnq42x. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:30:53.313366Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519616086297000240:3080], TxId: 281474976715812, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. TraceId : 01jyht5ty74by2j352k6nnq42x. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:30:53.313390Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616086297000239:3079], TxId: 281474976715812, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5ty74by2j352k6nnq42x. SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646927 2025-06-24T20:30:53.313405Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616086297000239:3079], TxId: 281474976715812, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5ty74by2j352k6nnq42x. SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:30:53.313426Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715812, task: 1. Tasks execution finished 2025-06-24T20:30:53.313437Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519616086297000239:3079], TxId: 281474976715812, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5ty74by2j352k6nnq42x. SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:53.313544Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715812, task: 1. pass away 2025-06-24T20:30:53.313638Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715812;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:53.314005Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519616086297000240:3080], TxId: 281474976715812, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. TraceId : 01jyht5ty74by2j352k6nnq42x. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:30:53.314041Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715812, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-24T20:30:53.314052Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715812, task: 2. Tasks execution finished 2025-06-24T20:30:53.314064Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519616086297000240:3080], TxId: 281474976715812, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=Y2MyMDNiMjItNDdkYTk1ZDItMzM4YTc5YjAtNmMyMTIyYzc=. TraceId : 01jyht5ty74by2j352k6nnq42x. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:53.314120Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715812, task: 2. pass away 2025-06-24T20:30:53.314168Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715812;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:53.320263Z node 4 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: DescribeQueryRequest - DescribeQueryResult: {query_id: "utque7l366gbb4b5s07i" } ERROR: {
: Error: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp:664: Query does not exist or permission denied. Please check the id of the query or your access rights, code: 1000 } 2025-06-24T20:30:53.438647Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:3213: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:3213 2025-06-24T20:30:54.432329Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: Client is stopped |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |88.4%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [LD] {RESULT} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut >> Yq_1::Basic_EmptyList [GOOD] >> Yq_1::Basic_EmptyDict >> PrivateApi::Nodes [GOOD] >> KqpPg::DuplicatedColumns-useSink [GOOD] >> KqpPg::InsertFromSelect_NoReorder+useSink |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |88.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup >> Cdc::UpdatesLog[YdsRunner] [GOOD] >> Cdc::UpdatesLog[TopicRunner] >> TPDiskUtil::DriveEstimator [GOOD] >> TPDiskUtil::OffsetParsingCorrectness ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::CreateQuery_Without_Connection [GOOD] Test command err: 2025-06-24T20:29:15.271021Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615666241967055:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:15.271095Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0624 20:29:19.422845513 105547 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:19.445879992 105547 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:21.556760Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10095: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:10095 } ] 2025-06-24T20:29:23.272241Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10095: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:10095 2025-06-24T20:29:23.854230Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10095: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:10095 } ] E0624 20:29:24.093334653 106112 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:24.095695394 106112 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:26.640834Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10095: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:10095 } ] E0624 20:29:29.274634243 106112 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:29.276395878 106112 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:30.919862Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:30.963464Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:31.236605Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10095: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:10095 2025-06-24T20:29:31.237772Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10095: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:10095 2025-06-24T20:29:31.272555Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10095: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:10095 } ] 2025-06-24T20:29:32.735059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615666241967055:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:32.735101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:32.746130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:33.789516Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:33.789562Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:34.313877085 106111 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:34.314361019 106111 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:34.856531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:34.856558Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:35.860847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:35.860871Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:36.863706Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:36.863732Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:37.159459Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10095: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:10095 } ] 2025-06-24T20:29:37.215371Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/queries". Create session error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10095: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:10095 } ] 2025-06-24T20:29:37.874502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:37.874810Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:38.882931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:38.882967Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:39.444979680 106111 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:39.459666214 106111 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:39.895432Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:39.895462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:40.901134Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:40.901158Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:41.904902Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:41.904926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:42.915894Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:42.915920Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:43.925978Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:43.926028Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:44.516120171 106111 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:44.516542383 106111 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:44.935857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:44.936212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action= ... : TxId: 281474976715796, task: 1, CA Id [1:7519616072089647138:3012]. enter pack cells method shardId: 72075186224037900 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-24T20:30:50.294328Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1308: TxId: 281474976715796, task: 1, CA Id [1:7519616072089647138:3012]. exit pack cells method shardId: 72075186224037900 processedRows: 0 packed rows: 1 freeSpace: 8388508 2025-06-24T20:30:50.294346Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1365: TxId: 281474976715796, task: 1, CA Id [1:7519616072089647138:3012]. returned 1 rows; processed 1 rows 2025-06-24T20:30:50.294384Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1402: TxId: 281474976715796, task: 1, CA Id [1:7519616072089647138:3012]. dropping batch for read #0 2025-06-24T20:30:50.294395Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715796, task: 1, CA Id [1:7519616072089647138:3012]. effective maxinflight 1024 sorted 0 2025-06-24T20:30:50.294404Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715796, task: 1, CA Id [1:7519616072089647138:3012]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-24T20:30:50.294419Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715796, task: 1, CA Id [1:7519616072089647138:3012]. returned async data processed rows 1 left freeSpace 8388508 received rows 1 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-24T20:30:50.300318Z node 1 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [1:7519616072089647117:2342], Table: `Root/yq/quotas` ([72057594046644480:13:1]), SessionActorId: [1:7519616007665132808:2342]Recv EvWriteResult from ShardID=72075186224037894, Status=STATUS_COMPLETED, TxId=55, Locks= , Cookie=1 2025-06-24T20:30:50.300361Z node 1 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [1:7519616072089647117:2342], Table: `Root/yq/quotas` ([72057594046644480:13:1]), SessionActorId: [1:7519616007665132808:2342]Got completed result TxId=55, TabletId=72075186224037894, Cookie=1, Mode=3, Locks= 2025-06-24T20:30:50.300415Z node 1 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [1:7519616072089647109:2342], SessionActorId: [1:7519616007665132808:2342], Committed TxId=0 2025-06-24T20:30:50.304048Z node 1 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [1:7519616072089647143:2535], Table: `Root/yq/quotas` ([72057594046644480:13:1]), SessionActorId: [1:7519616024845003968:2535]Recv EvWriteResult from ShardID=72075186224037894, Status=STATUS_COMPLETED, TxId=56, Locks= , Cookie=1 2025-06-24T20:30:50.304088Z node 1 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [1:7519616072089647143:2535], Table: `Root/yq/quotas` ([72057594046644480:13:1]), SessionActorId: [1:7519616024845003968:2535]Got completed result TxId=56, TabletId=72075186224037894, Cookie=1, Mode=3, Locks= 2025-06-24T20:30:50.304153Z node 1 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [1:7519616072089647127:2535], SessionActorId: [1:7519616024845003968:2535], Committed TxId=0 2025-06-24T20:30:50.328548Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519616072089647138:3012], TxId: 281474976715796, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=. CustomerSuppliedId : . TraceId : 01jyht5rcwc079wdxf9krd4txw. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:30:50.328608Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519616072089647139:3013], TxId: 281474976715796, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=. TraceId : 01jyht5rcwc079wdxf9krd4txw. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646923 2025-06-24T20:30:50.328640Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715796, task: 2. Finish input channelId: 1, from: [1:7519616072089647138:3012] 2025-06-24T20:30:50.331324Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519616072089647138:3012], TxId: 281474976715796, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=. CustomerSuppliedId : . TraceId : 01jyht5rcwc079wdxf9krd4txw. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:50.331405Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715796, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-24T20:30:50.331428Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519616072089647138:3012], TxId: 281474976715796, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=. CustomerSuppliedId : . TraceId : 01jyht5rcwc079wdxf9krd4txw. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646927 2025-06-24T20:30:50.332629Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519616072089647139:3013], TxId: 281474976715796, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=. TraceId : 01jyht5rcwc079wdxf9krd4txw. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:30:50.335228Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519616072089647138:3012], TxId: 281474976715796, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=. CustomerSuppliedId : . TraceId : 01jyht5rcwc079wdxf9krd4txw. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:50.335294Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715796, task: 1. Tasks execution finished 2025-06-24T20:30:50.335311Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519616072089647138:3012], TxId: 281474976715796, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=. CustomerSuppliedId : . TraceId : 01jyht5rcwc079wdxf9krd4txw. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:50.335460Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715796, task: 1. pass away 2025-06-24T20:30:50.335591Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715796;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:50.344784Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519616072089647139:3013], TxId: 281474976715796, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=. TraceId : 01jyht5rcwc079wdxf9krd4txw. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:30:50.344934Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519616072089647139:3013], TxId: 281474976715796, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=. TraceId : 01jyht5rcwc079wdxf9krd4txw. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:30:50.344974Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715796, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-24T20:30:50.344986Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715796, task: 2. Tasks execution finished 2025-06-24T20:30:50.344999Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519616072089647139:3013], TxId: 281474976715796, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=. TraceId : 01jyht5rcwc079wdxf9krd4txw. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:50.345097Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715796, task: 2. pass away 2025-06-24T20:30:50.345168Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715796;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:50.347354Z node 1 :KQP_SESSION ERROR: kqp_session_actor.cpp:2865: SessionId: ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=, ActorId: [1:7519616007665132973:2365], ActorState: ReadyState, Internal error, message: TKqpSessionActor in state ReadyState received unexpected event NKikimr::NGRpcService::TEvClientLost(0x108c0001) sender: [1:8320808721877066593:7169396] 2025-06-24T20:30:50.352203Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715797. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=1&id=NzNlOTM1NDQtZjZmYWZjMDMtOTI2Mjg0NTAtZWFkZmUwNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-06-24T20:30:50.360025Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: CLIENT_CANCELLED
: Error: GRpc error: (1): Cancelled on the server side
: Error: Grpc error response on endpoint [::]:14358 2025-06-24T20:30:50.370114Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage.cpp:561: DB Error, Status: CLIENT_CANCELLED, Issues: [ {
: Error: GRpc error: (1): Cancelled on the server side } {
: Error: Grpc error response on endpoint localhost:14358 } ], Query: --!syntax_v1 -- Query name: NodesHealthCheck(read) PRAGMA TablePathPrefix("Root/yq"); DECLARE $now as Timestamp; DECLARE $tenant as String; SELECT `node_id`, `instance_id`, `hostname`, `active_workers`, `memory_limit`, `memory_allocated`, `interconnect_port`, `node_address`, `data_center` FROM `nodes` WHERE `tenant` = $tenant AND `expire_at` >= $now; 2025-06-24T20:30:50.386396Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: NodesHealthCheckRequest - NodesHealthCheckResult: {tenant: "TestTenant" node { node_id: 1 instance_id: "b095cf68-4be6ae7c-4f9b808a-d5580778" hostname: "ghrun-4pjfmvffsq" node_address: "127.0.1.1" } } ERROR: [ {
: Error: GRpc error: (1): Cancelled on the server side } {
: Error: Grpc error response on endpoint localhost:14358 } ] 2025-06-24T20:30:50.386988Z node 1 :YQL_NODES_MANAGER ERROR: nodes_health_check.cpp:65: Failed with code: INTERNAL_ERROR Details:
: Error: Can't do NodesHealthCheck: (yexception) ydb/core/fq/libs/actors/nodes_health_check.cpp:95:
: Error: GRpc error: (1): Cancelled on the server side
: Error: Grpc error response on endpoint localhost:14358 2025-06-24T20:30:50.855947Z node 1 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:14358: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:14358 [good] Yq_1::CreateQuery_Without_Connection |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |88.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> PrivateApi::Nodes [GOOD] Test command err: 2025-06-24T20:29:14.099758Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615661104848942:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:14.100405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0624 20:29:15.412109802 105575 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:15.412283199 105575 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:15.418307Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:15.425675Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:16.471457Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:16.626199Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2104: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:2104 2025-06-24T20:29:17.215132Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2104: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:2104 } ] 2025-06-24T20:29:17.551589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.961893Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2104: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:2104 } ] 2025-06-24T20:29:18.555921Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.079566Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615661104848942:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:19.079853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:19.558168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.088906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:20.415906720 105814 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:20.416088088 105814 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:20.575248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.084606Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2104: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:2104 } ] 2025-06-24T20:29:21.109883Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.123297Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2104: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:2104 2025-06-24T20:29:22.304462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.304490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.309076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.310047Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.329491Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.329523Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.843693Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2104: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:2104 } ] 2025-06-24T20:29:25.336372Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:25.337354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:25.561722031 105814 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:25.562415829 105814 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:26.340291Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:26.340322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.345805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.345840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:28.399684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:28.400981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:29.420808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:29.450332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:30.416264Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:30.450015Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:30.652549654 105814 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:30.653343338 105814 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:31.431688Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:31.511749Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:32.489412Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:32.536955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:32.698659Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2104: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:2104 2025-06-24T20:29:32.710191Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all add ... 7519616105963850795:2348], task: 1 2025-06-24T20:30:57.713115Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:141: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Set execution timeout 299.997505s 2025-06-24T20:30:57.713511Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1452: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Create sink for output 0 { Sink { Type: "KqpTableSink" Settings { type_url: "type.googleapis.com/NKikimrKqp.TKqpTableSinkSettings" value: "\032\035\n\rRoot/yq/nodes\020\200\202\224\204\200\200\200\200\001\030\t(\001\"\r\n\006tenant\020\001 \201 \"\r\n\007node_id\020\003 \002*\024\n\016active_workers\020\005 \004*\022\n\013data_center\020\013 \201 *\017\n\texpire_at\020\010 2*\017\n\010hostname\020\004 \201 *\022\n\013instance_id\020\002 \201 *\027\n\021interconnect_port\020\t \002*\026\n\020memory_allocated\020\007 \004*\022\n\014memory_limit\020\006 \004*\023\n\014node_address\020\n \201 *\r\n\007node_id\020\003 \002*\r\n\006tenant\020\001 \201 0\245\247\200\200\200\200@8\007@\000H\001R\022\t\'\014H\373\001\013[h\021,\t\000\000\007\000\020\000X\000`\000h\004h\nh\007h\003h\002h\010h\006h\005h\th\001h\000x\000" } } } 2025-06-24T20:30:57.713656Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-24T20:30:57.713691Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: 2025-06-24T20:30:57.713776Z node 7 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:358: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. About to drain async output 0. FreeSpace: 67108864, allowedOvercommit: 4194304, toSend: 71303168, finished: 0 2025-06-24T20:30:57.713892Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3311: TxId: 281474976715686, task: 1. Add data: 122 / 122 2025-06-24T20:30:57.713949Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3280: TxId: 281474976715686, task: 1. Send data=122, closed=1, bufferActorId=[7:7519616105963850791:2348] 2025-06-24T20:30:57.713969Z node 7 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:372: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Drain async output 0. Free space decreased: -9223372036787666944, sent data from buffer: 122 2025-06-24T20:30:57.713987Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715686, task: 1. Tasks execution finished 2025-06-24T20:30:57.714001Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-24T20:30:57.714022Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-24T20:30:57.714042Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: 2025-06-24T20:30:57.714058Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715686, task: 1. Tasks execution finished 2025-06-24T20:30:57.714069Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-24T20:30:57.714113Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:57.714135Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715686, task: 1. Tasks execution finished 2025-06-24T20:30:57.714146Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-24T20:30:57.714207Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1860: SelfId: [7:7519616105963850791:2348], SessionActorId: [7:7519616097373914300:2348], Create new TableWriteActor for table `Root/yq/nodes` ([72057594046644480:9:1]). lockId=281474976715685. ActorId=[7:7519616105963850798:2348] 2025-06-24T20:30:57.714257Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:388: Table: `Root/yq/nodes` ([72057594046644480:9:1]), SessionActorId: [7:7519616097373914300:2348]Open: token=0 2025-06-24T20:30:57.714281Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1987: SelfId: [7:7519616105963850791:2348], SessionActorId: [7:7519616097373914300:2348], ProcessRequestQueue [OwnerId: 72057594046644480, LocalPathId: 9] NOT READY queue=1 2025-06-24T20:30:57.714337Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:394: SelfId: [7:7519616105963850798:2348], Table: `Root/yq/nodes` ([72057594046644480:9:1]), SessionActorId: [7:7519616097373914300:2348]Write: token=0 2025-06-24T20:30:57.714437Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:402: SelfId: [7:7519616105963850798:2348], Table: `Root/yq/nodes` ([72057594046644480:9:1]), SessionActorId: [7:7519616097373914300:2348]Close: token=0 2025-06-24T20:30:57.714472Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3180: SelfId: [7:7519616105963850797:2348], TxId: 281474976715686, task: 1. TKqpForwardWriteActor recieve EvBufferWriteResult from [7:7519616105963850791:2348] 2025-06-24T20:30:57.714487Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3198: SelfId: [7:7519616105963850797:2348], TxId: 281474976715686, task: 1. Finished 2025-06-24T20:30:57.714503Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:30:57.714521Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715686, task: 1. Tasks execution finished 2025-06-24T20:30:57.714537Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [7:7519616105963850795:2348], TxId: 281474976715686, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyht5znd6kv69yv8taxf4w7t. SessionId : ydb://session/3?node_id=7&id=NTE0ODc1NjEtNTNkNzliZS0yYmY2NWZjLWJmYWM3Mzlm. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T20:30:57.714620Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715686, task: 1. pass away 2025-06-24T20:30:57.714695Z node 7 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715686;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:30:57.715093Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2110: SelfId: [7:7519616105963850791:2348], SessionActorId: [7:7519616097373914300:2348], Start immediate commit 2025-06-24T20:30:57.715108Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:923: SelfId: [7:7519616105963850798:2348], Table: `Root/yq/nodes` ([72057594046644480:9:1]), SessionActorId: [7:7519616097373914300:2348]SetImmediateCommit 2025-06-24T20:30:57.715124Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2052: SelfId: [7:7519616105963850791:2348], SessionActorId: [7:7519616097373914300:2348], Flush data 2025-06-24T20:30:57.715290Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1050: SelfId: [7:7519616105963850798:2348], Table: `Root/yq/nodes` ([72057594046644480:9:1]), SessionActorId: [7:7519616097373914300:2348]Send EvWrite to ShardID=72075186224037894, isPrepare=0, isImmediateCommit=1, TxId=0, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715685 DataShard: 72075186224037894 Generation: 1 Counter: 3 SchemeShard: 72057594046644480 PathId: 9, Size=228, Cookie=1, OperationsCount=1, IsFinal=1, Attempts=0, Mode=3, BufferMemory=228 2025-06-24T20:30:57.724123Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [7:7519616105963850798:2348], Table: `Root/yq/nodes` ([72057594046644480:9:1]), SessionActorId: [7:7519616097373914300:2348]Recv EvWriteResult from ShardID=72075186224037894, Status=STATUS_COMPLETED, TxId=10, Locks= , Cookie=1 2025-06-24T20:30:57.724168Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [7:7519616105963850798:2348], Table: `Root/yq/nodes` ([72057594046644480:9:1]), SessionActorId: [7:7519616097373914300:2348]Got completed result TxId=10, TabletId=72075186224037894, Cookie=1, Mode=3, Locks= 2025-06-24T20:30:57.724225Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [7:7519616105963850791:2348], SessionActorId: [7:7519616097373914300:2348], Committed TxId=0 2025-06-24T20:30:58.083398Z node 7 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:20670: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:20670 >> Cdc::NewAndOldImagesLog[YdsRunner] [GOOD] >> Cdc::NewAndOldImagesLog[TopicRunner] >> TPDiskUtil::OffsetParsingCorrectness [GOOD] >> TPDiskUtil::PayloadParsingTest [GOOD] >> TPDiskUtil::FormatSectorMap >> TPDiskUtil::FormatSectorMap [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull+useSink >> Cdc::DocApi[YdsRunner] [GOOD] >> Cdc::DocApi[TopicRunner] >> TBlobStorageProxyTest::TestProxyLongTailDiscoverSingleFailure [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsTwice [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsAfterAddingIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TPDiskUtil::FormatSectorMap [GOOD] Test command err: Path# /home/runner/.ya/build/build_root/2btm/0031d6/r3tmp/tmpV4pAig//pdisk/data.bin testCase# 0 plainDataChunk# 0 all chunk reads are received all chunk writes are received all log writes are received testCase# 1 plainDataChunk# 1 all chunk reads are received all chunk writes are received all log writes are received testCase# 2 plainDataChunk# 0 restart all chunk reads are received all chunk writes are received all log writes are received testCase# 3 plainDataChunk# 1 restart all chunk reads are received all chunk writes are received all log writes are received reformat testCase# 0 plainDataChunk# 0 all chunk reads are received all chunk writes are received all log writes are received testCase# 1 plainDataChunk# 1 all chunk reads are received all chunk writes are received all log writes are received testCase# 2 plainDataChunk# 0 restart all chunk reads are received all chunk writes are received (TWithBackTrace) Event queue is still empty.ydb/library/actors/testlib/test_runtime.cpp:1375: TBackTrace::Capture()+28 (0x2B5DF9C) TWithBackTrace::TWithBackTrace<>()+65 (0x6F8A381) NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant)+29118 (0x6F8637E) NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration)+1076 (0x6F8C674) NKikimr::NPDisk::TEvLogResult* NActors::TTestActorRuntimeBase::GrabEdgeEventIf(TAutoPtr&, std::__y1::function, TDuration)+643 (0x2395DB3) THolder NActors::TTestActorRuntimeBase::GrabEdgeEvent(TDuration)+332 (0x23955FC) NKikimr::NTestSuiteTPDiskTest::TTestCaseTestStartEncryptedOrPlainAndRestart::Execute_(NUnitTest::TTestContext&)+9993 (0x2322DC9) std::__y1::__function::__func, void ()>::operator()()+280 (0x235F698) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x2C3E546) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x2C0D969) NKikimr::NTestSuiteTPDiskTest::TCurrentTest::Execute()+1204 (0x235E864) NUnitTest::TTestFactory::Execute()+2438 (0x2C0F236) NUnitTest::RunMain(int, char**)+5213 (0x2C38A2D) ??+0 (0x7F71D658AD90) __libc_start_main+128 (0x7F71D658AE40) _start+41 (0x222A029) |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |88.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyLongTailDiscoverSingleFailure [GOOD] Test command err: 2025-06-24T20:30:52.499402Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f78/r3tmp/tmphufUCK//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-24T20:30:52.582511Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T20:30:54.314252Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f78/r3tmp/tmphufUCK//vdisk_bad_1/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 2 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 2 2025-06-24T20:30:54.362619Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 2 VDISK[0:_:0:1:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T20:30:56.986501Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f78/r3tmp/tmphufUCK//vdisk_bad_2/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 3 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 3 2025-06-24T20:30:57.014644Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 3 VDISK[0:_:0:2:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T20:30:58.962043Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 4 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f78/r3tmp/tmphufUCK//vdisk_bad_3/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 4 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 4 2025-06-24T20:30:58.966458Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 4 VDISK[0:_:0:3:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 4 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T20:31:01.014522Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 5 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f78/r3tmp/tmphufUCK//vdisk_bad_4/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 5 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 5 2025-06-24T20:31:01.130746Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 5 VDISK[0:_:0:4:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 5 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T20:31:03.155886Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 6 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f78/r3tmp/tmphufUCK//vdisk_bad_5/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 6 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 6 2025-06-24T20:31:03.174553Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 6 VDISK[0:_:0:5:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 6 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror [GOOD] >> THiveTest::TestNoMigrationToSelf |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror [GOOD] >> THiveTest::TestNoMigrationToSelf [GOOD] >> THiveTest::TestReCreateTablet |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |88.4%| [LD] {RESULT} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut >> THiveTest::TestReCreateTablet [GOOD] >> THiveTest::TestReCreateTabletError >> KqpPg::InsertFromSelect_NoReorder+useSink [GOOD] >> KqpPg::DropTablePg >> THiveTest::TestFollowers >> Cdc::UpdatesLog[TopicRunner] [GOOD] >> Cdc::VirtualTimestamps[PqRunner] >> THiveTest::TestReCreateTabletError [GOOD] >> THiveTest::TestNodeDisconnect >> TCutHistoryRestrictions::BasicTest [GOOD] >> TCutHistoryRestrictions::EmptyAllowList [GOOD] >> TCutHistoryRestrictions::EmptyDenyList [GOOD] >> TCutHistoryRestrictions::BothListsEmpty [GOOD] >> ObjectDistribution::TestImbalanceCalcualtion >> ObjectDistribution::TestImbalanceCalcualtion [GOOD] >> ObjectDistribution::TestAllowedDomainsAndDown >> ObjectDistribution::TestAllowedDomainsAndDown [GOOD] >> ObjectDistribution::TestAddSameNode [GOOD] >> ObjectDistribution::TestManyIrrelevantNodes |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |88.4%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader >> Cdc::NewAndOldImagesLog[TopicRunner] [GOOD] >> Cdc::NewAndOldImagesLogDebezium >> THiveTest::TestDrain >> KqpPg::TableArrayInsert+useSink [GOOD] >> KqpPg::TableArrayInsert-useSink >> THiveTest::TestNodeDisconnect [GOOD] >> THiveTest::TestReassignGroupsWithRecreateTablet >> TCutHistoryRestrictions::SameTabletInBothLists [GOOD] >> THeavyPerfTest::TTestLoadEverything >> THiveTest::TestFollowers [GOOD] >> THiveTest::TestFollowersReconfiguration >> THiveTest::TestReassignGroupsWithRecreateTablet [GOOD] >> THiveTest::TestReassignUseRelativeSpace >> KqpPg::InsertFromSelect_Simple+useSink [GOOD] >> KqpPg::InsertFromSelect_Simple-useSink >> AsyncIndexChangeExchange::SenderShouldShakeHandsAfterAddingIndex [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnFreshTable >> THiveTest::TestUpdateChannelValues >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull-useSink >> Donor::ConsistentWritesWhenSwitchingToDonorMode [GOOD] >> THiveTest::TestFollowersReconfiguration [GOOD] >> THiveTest::TestFollowerPromotion >> THiveTest::TestReassignUseRelativeSpace [GOOD] >> THiveTest::TestManyFollowersOnOneNode >> KqpPg::CreateTableBulkUpsertAndRead [GOOD] >> KqpPg::CopyTableSerialColumns+useSink >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex [GOOD] >> THiveTest::TestUpdateChannelValues [GOOD] >> THiveTest::TestStorageBalancer ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ConsistentWritesWhenSwitchingToDonorMode [GOOD] Test command err: RandomSeed# 12051741388102308441 Reassign# 2 -- VSlotId { NodeId: 3 PDiskId: 1000 VSlotId: 1000 } GroupId: 2181038080 GroupGeneration: 1 VDiskKind: "Default" FailDomainIdx: 2 VDiskMetrics { SatisfactionRank: 0 VSlotId { NodeId: 3 PDiskId: 1000 VSlotId: 1000 } State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } Status: "READY" Ready: true Put# [1:1:1:0:0:21:0] Put# [1:1:2:0:0:6:0] Put# [1:1:3:0:0:19:0] 2025-06-24T20:28:11.081624Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:11.088518Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13529189485853287491] 2025-06-24T20:28:11.112164Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:1:0:0:21:6] 2025-06-24T20:28:11.112414Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 1 PartsResurrected# 1 Put# [1:1:4:0:0:99:0] Put# [1:1:5:0:0:91:0] Put# [1:1:6:0:0:63:0] Put# [1:1:7:0:0:77:0] Put# [1:1:8:0:0:49:0] Put# [1:1:9:0:0:51:0] Put# [1:1:10:0:0:3:0] Put# [1:1:11:0:0:40:0] Put# [1:1:12:0:0:45:0] Put# [1:1:13:0:0:20:0] Put# [1:1:14:0:0:12:0] Put# [1:1:15:0:0:93:0] Put# [1:1:16:0:0:16:0] Put# [1:1:17:0:0:53:0] Put# [1:1:18:0:0:37:0] Put# [1:1:19:0:0:69:0] Put# [1:1:20:0:0:79:0] Put# [1:1:21:0:0:80:0] Put# [1:1:22:0:0:32:0] Put# [1:1:23:0:0:13:0] Put# [1:1:24:0:0:57:0] Put# [1:1:25:0:0:87:0] Put# [1:1:26:0:0:58:0] Put# [1:1:27:0:0:31:0] Put# [1:1:28:0:0:13:0] Put# [1:1:29:0:0:64:0] Put# [1:1:30:0:0:86:0] Put# [1:1:31:0:0:65:0] Put# [1:1:32:0:0:29:0] Put# [1:1:33:0:0:19:0] Put# [1:1:34:0:0:27:0] Put# [1:1:35:0:0:37:0] Put# [1:1:36:0:0:18:0] Put# [1:1:37:0:0:12:0] Put# [1:1:38:0:0:64:0] Put# [1:1:39:0:0:13:0] Put# [1:1:40:0:0:64:0] Put# [1:1:41:0:0:80:0] Put# [1:1:42:0:0:28:0] Put# [1:1:43:0:0:88:0] Put# [1:1:44:0:0:97:0] Put# [1:1:45:0:0:5:0] Put# [1:1:46:0:0:85:0] Put# [1:1:47:0:0:30:0] Put# [1:1:48:0:0:94:0] Put# [1:1:49:0:0:16:0] Put# [1:1:50:0:0:72:0] Put# [1:1:51:0:0:45:0] Put# [1:1:52:0:0:44:0] Put# [1:1:53:0:0:1:0] Put# [1:1:54:0:0:14:0] Put# [1:1:55:0:0:30:0] Put# [1:1:56:0:0:39:0] Put# [1:1:57:0:0:20:0] Put# [1:1:58:0:0:56:0] Put# [1:1:59:0:0:95:0] Put# [1:1:60:0:0:5:0] Put# [1:1:61:0:0:19:0] Put# [1:1:62:0:0:50:0] Put# [1:1:63:0:0:52:0] Put# [1:1:64:0:0:99:0] Put# [1:1:65:0:0:25:0] Put# [1:1:66:0:0:54:0] Put# [1:1:67:0:0:62:0] Put# [1:1:68:0:0:97:0] Put# [1:1:69:0:0:57:0] Put# [1:1:70:0:0:41:0] Put# [1:1:71:0:0:46:0] Put# [1:1:72:0:0:16:0] Put# [1:1:73:0:0:53:0] Put# [1:1:74:0:0:11:0] Put# [1:1:75:0:0:58:0] Put# [1:1:76:0:0:53:0] Put# [1:1:77:0:0:82:0] Put# [1:1:78:0:0:78:0] Put# [1:1:79:0:0:44:0] Put# [1:1:80:0:0:83:0] Put# [1:1:81:0:0:50:0] Put# [1:1:82:0:0:33:0] Put# [1:1:83:0:0:61:0] Put# [1:1:84:0:0:98:0] Put# [1:1:85:0:0:17:0] Put# [1:1:86:0:0:63:0] Put# [1:1:87:0:0:66:0] Put# [1:1:88:0:0:26:0] Put# [1:1:89:0:0:28:0] Put# [1:1:90:0:0:20:0] Put# [1:1:91:0:0:94:0] Put# [1:1:92:0:0:76:0] Put# [1:1:93:0:0:62:0] Put# [1:1:94:0:0:86:0] Put# [1:1:95:0:0:44:0] Put# [1:1:96:0:0:43:0] Put# [1:1:97:0:0:2:0] Put# [1:1:98:0:0:59:0] Put# [1:1:99:0:0:69:0] Put# [1:1:100:0:0:2:0] Put# [1:1:101:0:0:27:0] Put# [1:1:102:0:0:78:0] Put# [1:1:103:0:0:21:0] Put# [1:1:104:0:0:53:0] Put# [1:1:105:0:0:74:0] Put# [1:1:106:0:0:84:0] Put# [1:1:107:0:0:75:0] Put# [1:1:108:0:0:15:0] Put# [1:1:109:0:0:35:0] Put# [1:1:110:0:0:32:0] Put# [1:1:111:0:0:76:0] Put# [1:1:112:0:0:95:0] Put# [1:1:113:0:0:66:0] Put# [1:1:114:0:0:25:0] Put# [1:1:115:0:0:12:0] Put# [1:1:116:0:0:34:0] Put# [1:1:117:0:0:73:0] Put# [1:1:118:0:0:84:0] Put# [1:1:119:0:0:51:0] Put# [1:1:120:0:0:99:0] Put# [1:1:121:0:0:61:0] Put# [1:1:122:0:0:49:0] Put# [1:1:123:0:0:6:0] Put# [1:1:124:0:0:1:0] Put# [1:1:125:0:0:46:0] Put# [1:1:126:0:0:43:0] Put# [1:1:127:0:0:65:0] Put# [1:1:128:0:0:4:0] Put# [1:1:129:0:0:13:0] Put# [1:1:130:0:0:35:0] Put# [1:1:131:0:0:20:0] Put# [1:1:132:0:0:62:0] Put# [1:1:133:0:0:64:0] Put# [1:1:134:0:0:7:0] Put# [1:1:135:0:0:94:0] Put# [1:1:136:0:0:7:0] Put# [1:1:137:0:0:91:0] Put# [1:1:138:0:0:7:0] Put# [1:1:139:0:0:20:0] Put# [1:1:140:0:0:31:0] Put# [1:1:141:0:0:51:0] Put# [1:1:142:0:0:79:0] Put# [1:1:143:0:0:54:0] Put# [1:1:144:0:0:9:0] Put# [1:1:145:0:0:14:0] Put# [1:1:146:0:0:71:0] Put# [1:1:147:0:0:71:0] Put# [1:1:148:0:0:8:0] Put# [1:1:149:0:0:31:0] Put# [1:1:150:0:0:83:0] Put# [1:1:151:0:0:87:0] Put# [1:1:152:0:0:93:0] Put# [1:1:153:0:0:70:0] Put# [1:1:154:0:0:5:0] Put# [1:1:155:0:0:65:0] Put# [1:1:156:0:0:75:0] Put# [1:1:157:0:0:27:0] Put# [1:1:158:0:0:50:0] Put# [1:1:159:0:0:100:0] Put# [1:1:160:0:0:67:0] Put# [1:1:161:0:0:23:0] Put# [1:1:162:0:0:44:0] Put# [1:1:163:0:0:35:0] Put# [1:1:164:0:0:25:0] Put# [1:1:165:0:0:98:0] Put# [1:1:166:0:0:35:0] Put# [1:1:167:0:0:4:0] Put# [1:1:168:0:0:25:0] Put# [1:1:169:0:0:22:0] Put# [1:1:170:0:0:36:0] Put# [1:1:171:0:0:96:0] Put# [1:1:172:0:0:3:0] Put# [1:1:173:0:0:12:0] Put# [1:1:174:0:0:89:0] Put# [1:1:175:0:0:4:0] Put# [1:1:176:0:0:22:0] Put# [1:1:177:0:0:3:0] Put# [1:1:178:0:0:11:0] Put# [1:1:179:0:0:7:0] Put# [1:1:180:0:0:31:0] Put# [1:1:181:0:0:31:0] Put# [1:1:182:0:0:64:0] Put# [1:1:183:0:0:70:0] Put# [1:1:184:0:0:84:0] Put# [1:1:185:0:0:48:0] Put# [1:1:186:0:0:91:0] Put# [1:1:187:0:0:85:0] Put# [1:1:188:0:0:95:0] Put# [1:1:189:0:0:93:0] Put# [1:1:190:0:0:98:0] Put# [1:1:191:0:0:56:0] Put# [1:1:192:0:0:61:0] Put# [1:1:193:0:0:80:0] Put# [1:1:194:0:0:52:0] Put# [1:1:195:0:0:49:0] Put# [1:1:196:0:0:8:0] Put# [1:1:197:0:0:39:0] Put# [1:1:198:0:0:88:0] Put# [1:1:199:0:0:78:0] Put# [1:1:200:0:0:16:0] Put# [1:1:201:0:0:3:0] Put# [1:1:202:0:0:89:0] Put# [1:1:203:0:0:51:0] Put# [1:1:204:0:0:72:0] Put# [1:1:205:0:0:46:0] Put# [1:1:206:0:0:43:0] Put# [1:1:207:0:0:49:0] Put# [1:1:208:0:0:63:0] Put# [1:1:209:0:0:39:0] Put# [1:1:210:0:0:48:0] Put# [1:1:211:0:0:91:0] Put# [1:1:212:0:0:5:0] Put# [1:1:213:0:0:91:0] Put# [1:1:214:0:0:7:0] Put# [1:1:215:0:0:28:0] Put# [1:1:216:0:0:32:0] Put# [1:1:217:0:0:32:0] Put# [1:1:218:0:0:30:0] Put# [1:1:219:0:0:93:0] Put# [1:1:220:0:0:74:0] Put# [1:1:221:0:0:32:0] Put# [1:1:222:0:0:61:0] Put# [1:1:223:0:0:59:0] Put# [1:1:224:0:0:54:0] Put# [1:1:225:0:0:49:0] Put# [1:1:226:0:0:79:0] Put# [1:1:227:0:0:73:0] Put# [1:1:228:0:0:56:0] Put# [1:1:229:0:0:75:0] Put# [1:1:230:0:0:93:0] Put# [1:1:231:0:0:39:0] Put# [1:1:232:0:0:41:0] Put# [1:1:233:0:0:38:0] Put# [1:1:234:0:0:53:0] Put# [1:1:235:0:0:7:0] Put# [1:1:236:0:0:44:0] Put# [1:1:237:0:0:60:0] Put# [1:1:238:0:0:19:0] Put# [1:1:239:0:0:70:0] Put# [1:1:240:0:0:79:0] Put# [1:1:241:0:0:10:0] Put# [1:1:242:0:0:39:0] Put# [1:1:243:0:0:58:0] Put# [1:1:244:0:0:69:0] Put# [1:1:245:0:0:37:0] Put# [1:1:246:0:0:82:0] Put# [1:1:247:0:0:84:0] Put# [1:1:248:0:0:23:0] Put# [1:1:249:0:0:18:0] Put# [1:1:250:0:0:72:0] Put# [1:1:251:0:0:21:0] Put# [1:1:252:0:0:92:0] Put# [1:1:253:0:0:18:0] Put# [1:1:254:0:0:88:0] Put# [1:1:255:0:0:55:0] Put# [1:1:256:0:0:81:0] Put# [1:1:257:0:0:26:0] Put# [1:1:258:0:0:21:0] Put# [1:1:259:0:0:11:0] Put# [1:1:260:0:0:100:0] Put# [1:1:261:0:0:13:0] Put# [1:1:262:0:0:96:0] Put# [1:1:263:0:0:75:0] Put# [1:1:264:0:0:80:0] Put# [1:1:265:0:0:90:0] Put# [1:1:266:0:0:44:0] Put# [1:1:267:0:0:29:0] Put# [1:1:268:0:0:65:0] Put# [1:1:269:0:0:68:0] Put# [1:1:270:0:0:14:0] Put# [1:1:271:0:0:39:0] Put# [1:1:272:0:0:69:0] Put# [1:1:273:0:0:12:0] Put# [1:1:274:0:0:25:0] Put# [1:1:275:0:0:17:0] Put# [1:1:276:0:0:67:0] Put# [1:1:277:0:0:4:0] Put# [1:1:278:0:0:10:0] Put# [1:1:279:0:0:77:0] Put# [1:1:280:0:0:64:0] Put# [1:1:281:0:0:92:0] Put# [1:1:282:0:0:50:0] Put# [1:1:283:0:0:20:0] Put# [1:1:284:0:0:23:0] Put# [1:1:285:0:0:11:0] Put# [1:1:286:0:0:7:0] Put# [1:1:287:0:0:82:0] Put# [1:1:288:0:0:36:0] Put# [1:1:289:0:0:25:0] Put# [1:1:290:0:0:61:0] Put# [1:1:291:0:0:48:0] Put# [1:1:292:0:0:61:0] Put# [1:1:293:0:0:87:0] Put# [1:1:294:0:0:68:0] Put# [1:1:295:0:0:26:0] Put# [1:1:296:0:0:10:0] Put# [1:1:297:0:0:2:0] Put# [1:1:298:0:0:23:0] Put# [1:1:299:0:0:57:0] Put# [1:1:300:0:0:27:0] Put# [1:1:301:0:0:87:0] Put# [1:1:302:0:0:61:0] Put# [1:1:303:0:0:39:0] Put# [1:1:304:0:0:87:0] Put# [1:1:305:0:0:51:0] Put# [1:1:306:0:0:81:0] Put# [1:1:307:0:0:37:0] Put# [1:1:308:0:0:79:0] Put# [1:1:309:0:0:14:0] Put# [1:1:310:0:0:48:0] Put# [1:1:311:0:0:15:0] Put# [1:1:312:0:0:40:0] Put# [1:1:313:0:0:25:0] Put# [1:1:314:0:0:3:0] Put# [1:1:315:0:0:99:0] Put# [1:1:316:0:0:43:0] Put# [1:1:317:0:0:11:0] Put# [1:1:318:0:0:41:0] Put# [1:1:319:0:0:3:0] Put# [1:1:320:0:0:62:0] Put# [1:1:321:0:0:17:0] Put# [1:1:322:0:0:20:0] Put# [1:1:323:0:0:100:0] Put# [1:1:324:0:0:11:0] Put# [1:1:325:0:0:2:0] Put# [1:1:326:0:0:96:0] Put# [1:1:327:0:0:73:0] Put# [1:1:328:0:0:25:0] Put# [1:1:329:0:0:100:0] Put# [1:1:330:0:0:23:0] Put# [1:1:331:0:0:49:0] Put# [1:1:332:0:0:51:0] Put# [1:1:333:0:0:73:0] Put# [1:1:334:0:0:33:0] Put# [1:1:335:0:0:42:0] Put# [1:1:336:0:0:43:0] Put# [1:1:337:0:0:20:0] Put# [1:1:338:0:0:38:0] Put# [1:1:339:0:0:50:0] Put# [1:1:340:0:0:13:0] Put# [1:1:341:0:0:26:0] Put# [1:1:342:0:0:66:0] Put# [1:1:343:0:0:91:0] Put# [1:1:344:0:0:26:0] Put# [1:1:345:0:0:76:0] Put# [1:1:346:0:0:97:0] Put# [1:1:347:0:0:57:0] Put# [1:1:348:0:0:60:0] Put# [1:1:349:0:0:9:0] Put# [1:1:350:0:0:54:0] Put# [1:1:351:0:0:96:0] Put# [1:1:352:0:0:98:0] Put# [1:1:353:0:0:51:0] Put# [1:1:354:0:0:88:0] Put# [1:1:355:0:0:96:0] Put# [1:1:356:0:0:95:0] Put# [1:1:357:0:0:36:0] Put# [1:1:358:0:0:35:0] Put# [1:1:359:0:0:71:0] Put# [1:1:360:0:0:21:0] Put# [1:1:361:0:0:74:0] Put# [1:1:362:0:0:6:0] Put# [1:1:363:0:0:55:0] Put# [1:1:364:0:0:25:0] Put# [1:1:365:0:0:6:0] Put# [1:1:366:0:0:67:0] Put# [1:1:367:0:0:35:0] Put# [1:1:368:0:0:75:0] Put# [1:1:369:0:0:32:0] Put# [1:1:370:0:0:6:0] Put# [1:1:371:0:0:48:0] Put# [1:1:372:0:0:36:0] Put# [1:1:373:0:0:7:0] Put# [1:1:374:0:0:42:0] Put# [1:1:375:0:0:15:0] Put# [1:1:376:0:0:90:0] Put# [1:1:377:0:0:27:0] Put# [1:1:378:0:0:10:0] Put# [1:1:379:0:0:24:0] Put# [1:1:380:0:0:3:0] Put# [1:1:381:0:0:22:0] Put# [1:1:382:0:0:73:0] Put# [1:1:383:0:0:45:0] Put# [1:1:384:0:0:73:0] Put# [1:1:385:0:0:77:0] Put# [1:1:386:0:0:71:0] Put# [1:1:387:0:0:30:0] Put# [1:1:388:0:0:26:0] Put# [1:1:389:0:0:50:0] Put# [1:1:390:0:0:51:0] Put# [1:1:391:0:0:40:0] Put# [1:1:392:0:0:5:0] Put# [1:1:393:0:0:32:0] Put# [1:1:394:0:0:75:0] Put# [1:1:395:0:0:78:0] Put# [1:1:396:0:0:21:0] Put# [1:1:397:0:0:41:0] Put# [1:1:398:0:0:57:0] Put# [1:1:399:0:0:3:0] Put# [1:1:400:0:0:75:0] Put# [1:1:401:0:0:92:0] Put# [1:1:402:0:0:38:0] Put# [1:1:403:0:0:93:0] Put# [1:1:404:0:0:94:0] Put# [1:1:405:0:0:10:0] Put# [1:1:406:0:0:58:0] Put# [1:1:407:0:0:82:0] Put# [1:1:408:0:0:36:0] Put# [1:1:409:0:0:88:0] Put# [1:1:410:0:0:55:0] Put# [1:1:411:0:0:64:0] Put# [1:1:412:0:0:84:0] Put# [1:1:413:0:0:58:0] Put# [1:1:414:0:0:91:0] Put# [1:1:415:0:0:19:0] Put# [1:1:416:0:0:67:0] Put# [1:1:417:0:0:38:0] Put# [1:1:418:0:0:12:0] Put# [1:1:419:0:0:54:0] Put# [1:1:420:0:0:67:0] Put# [1:1:421:0:0:43:0] Put# [1:1:422:0:0:59:0] Put# [1:1:423:0:0:53:0] Put# [1:1:424:0:0:42:0] Put# [1:1:425:0:0:64:0] Put# [1:1:426:0:0:31:0] Put# [1:1:427:0:0:97:0] Put# [1:1:428:0:0:18:0] Put# [1:1:429:0:0:92:0] Put# [1:1:430:0:0:38:0] Put# [1:1:431:0:0:60:0] Put# [1:1:432:0:0:66:0] Put# [1:1:433:0:0:97:0] Put# [1:1:434:0:0:60:0] Put# [1:1:435:0:0:13:0] Put# [1:1:436:0:0:85:0] Put# [1:1:437:0:0:4:0] Put# [1:1:438:0:0:60:0] Put# [1:1:439:0:0:42:0] Put# [1:1:440:0:0:64:0] Put# [1:1:441:0:0:46:0] Put# [1:1:442:0:0:5:0] Put# [1:1:443:0:0:45:0] Put# [1:1:444:0:0:99:0] Put# [1:1:445:0:0:59:0] Put# [1:1:446:0:0:1:0] Put# [1:1:447:0:0:11:0] Put# [1:1:448:0:0:59:0] Put# [1:1:449:0:0:27:0] Put# [1:1:450:0:0:63:0] Put# [1:1:451:0:0:40:0] Put# [1:1:452:0:0:25:0] Put# [1:1:453:0:0:8:0] Put# [1:1:454:0:0:64:0] Put# [1:1:455:0:0:21:0] Put# [1:1:456:0:0:54:0] Put# [1:1:457:0:0:65:0] Put# [1:1:458:0:0:2:0] Put# [1:1:459:0:0:80:0] Put# [1:1:460:0 ... [1:3:3000:0:0:45:0] Put# [1:3:3001:0:0:68:0] Put# [1:3:3002:0:0:84:0] Put# [1:3:3003:0:0:41:0] Put# [1:3:3004:0:0:31:0] Put# [1:3:3005:0:0:94:0] Put# [1:3:3006:0:0:3:0] Put# [1:3:3007:0:0:31:0] Put# [1:3:3008:0:0:45:0] Put# [1:3:3009:0:0:16:0] Put# [1:3:3010:0:0:51:0] Put# [1:3:3011:0:0:95:0] Put# [1:3:3012:0:0:43:0] Put# [1:3:3013:0:0:91:0] Put# [1:3:3014:0:0:4:0] Put# [1:3:3015:0:0:11:0] Put# [1:3:3016:0:0:86:0] Put# [1:3:3017:0:0:47:0] Put# [1:3:3018:0:0:94:0] Put# [1:3:3019:0:0:51:0] Put# [1:3:3020:0:0:73:0] Put# [1:3:3021:0:0:46:0] Put# [1:3:3022:0:0:55:0] Put# [1:3:3023:0:0:72:0] Put# [1:3:3024:0:0:50:0] Put# [1:3:3025:0:0:34:0] Put# [1:3:3026:0:0:11:0] Put# [1:3:3027:0:0:95:0] Put# [1:3:3028:0:0:48:0] Put# [1:3:3029:0:0:30:0] Put# [1:3:3030:0:0:33:0] Put# [1:3:3031:0:0:25:0] Put# [1:3:3032:0:0:98:0] Put# [1:3:3033:0:0:16:0] Put# [1:3:3034:0:0:84:0] Put# [1:3:3035:0:0:53:0] Put# [1:3:3036:0:0:41:0] Put# [1:3:3037:0:0:31:0] Put# [1:3:3038:0:0:43:0] Put# [1:3:3039:0:0:69:0] Put# [1:3:3040:0:0:48:0] Put# [1:3:3041:0:0:74:0] Put# [1:3:3042:0:0:92:0] Put# [1:3:3043:0:0:65:0] Put# [1:3:3044:0:0:89:0] Put# [1:3:3045:0:0:59:0] Put# [1:3:3046:0:0:12:0] Put# [1:3:3047:0:0:17:0] Put# [1:3:3048:0:0:21:0] Put# [1:3:3049:0:0:1:0] Put# [1:3:3050:0:0:69:0] Put# [1:3:3051:0:0:19:0] Put# [1:3:3052:0:0:93:0] Put# [1:3:3053:0:0:87:0] Put# [1:3:3054:0:0:6:0] Put# [1:3:3055:0:0:24:0] Put# [1:3:3056:0:0:78:0] Put# [1:3:3057:0:0:6:0] Put# [1:3:3058:0:0:90:0] Put# [1:3:3059:0:0:85:0] Put# [1:3:3060:0:0:35:0] Put# [1:3:3061:0:0:45:0] Put# [1:3:3062:0:0:99:0] Put# [1:3:3063:0:0:73:0] Put# [1:3:3064:0:0:4:0] Put# [1:3:3065:0:0:2:0] Put# [1:3:3066:0:0:6:0] Put# [1:3:3067:0:0:75:0] Put# [1:3:3068:0:0:20:0] Put# [1:3:3069:0:0:44:0] Put# [1:3:3070:0:0:90:0] Put# [1:3:3071:0:0:41:0] Put# [1:3:3072:0:0:19:0] Put# [1:3:3073:0:0:68:0] Put# [1:3:3074:0:0:84:0] Put# [1:3:3075:0:0:45:0] Put# [1:3:3076:0:0:51:0] Put# [1:3:3077:0:0:87:0] Put# [1:3:3078:0:0:7:0] Put# [1:3:3079:0:0:17:0] Put# [1:3:3080:0:0:40:0] Put# [1:3:3081:0:0:12:0] Put# [1:3:3082:0:0:19:0] Put# [1:3:3083:0:0:80:0] Put# [1:3:3084:0:0:14:0] Put# [1:3:3085:0:0:52:0] Put# [1:3:3086:0:0:36:0] Put# [1:3:3087:0:0:29:0] Put# [1:3:3088:0:0:43:0] Put# [1:3:3089:0:0:15:0] Put# [1:3:3090:0:0:52:0] Put# [1:3:3091:0:0:1:0] Put# [1:3:3092:0:0:98:0] Put# [1:3:3093:0:0:97:0] Put# [1:3:3094:0:0:42:0] Put# [1:3:3095:0:0:77:0] Put# [1:3:3096:0:0:55:0] Put# [1:3:3097:0:0:62:0] Put# [1:3:3098:0:0:58:0] Put# [1:3:3099:0:0:4:0] Put# [1:3:3100:0:0:20:0] Put# [1:3:3101:0:0:22:0] Put# [1:3:3102:0:0:53:0] Put# [1:3:3103:0:0:78:0] Put# [1:3:3104:0:0:64:0] Put# [1:3:3105:0:0:83:0] Put# [1:3:3106:0:0:10:0] Put# [1:3:3107:0:0:90:0] Put# [1:3:3108:0:0:76:0] Put# [1:3:3109:0:0:16:0] Put# [1:3:3110:0:0:76:0] Put# [1:3:3111:0:0:51:0] Put# [1:3:3112:0:0:43:0] Put# [1:3:3113:0:0:56:0] Put# [1:3:3114:0:0:83:0] Put# [1:3:3115:0:0:74:0] Put# [1:3:3116:0:0:85:0] Put# [1:3:3117:0:0:19:0] Put# [1:3:3118:0:0:12:0] Put# [1:3:3119:0:0:81:0] Put# [1:3:3120:0:0:45:0] Put# [1:3:3121:0:0:62:0] Put# [1:3:3122:0:0:88:0] Put# [1:3:3123:0:0:14:0] Put# [1:3:3124:0:0:76:0] Put# [1:3:3125:0:0:93:0] Put# [1:3:3126:0:0:28:0] Put# [1:3:3127:0:0:88:0] Put# [1:3:3128:0:0:3:0] Put# [1:3:3129:0:0:32:0] Put# [1:3:3130:0:0:61:0] Put# [1:3:3131:0:0:53:0] Put# [1:3:3132:0:0:55:0] Put# [1:3:3133:0:0:10:0] Put# [1:3:3134:0:0:97:0] Put# [1:3:3135:0:0:6:0] Put# [1:3:3136:0:0:89:0] Put# [1:3:3137:0:0:12:0] Put# [1:3:3138:0:0:60:0] Put# [1:3:3139:0:0:70:0] Put# [1:3:3140:0:0:49:0] Put# [1:3:3141:0:0:64:0] Put# [1:3:3142:0:0:58:0] Put# [1:3:3143:0:0:10:0] Put# [1:3:3144:0:0:93:0] Put# [1:3:3145:0:0:75:0] Put# [1:3:3146:0:0:65:0] Put# [1:3:3147:0:0:83:0] Put# [1:3:3148:0:0:28:0] Put# [1:3:3149:0:0:76:0] Put# [1:3:3150:0:0:6:0] Put# [1:3:3151:0:0:17:0] Put# [1:3:3152:0:0:75:0] Put# [1:3:3153:0:0:81:0] Put# [1:3:3154:0:0:16:0] Put# [1:3:3155:0:0:5:0] Put# [1:3:3156:0:0:83:0] Put# [1:3:3157:0:0:40:0] Put# [1:3:3158:0:0:9:0] Put# [1:3:3159:0:0:93:0] Put# [1:3:3160:0:0:74:0] Put# [1:3:3161:0:0:97:0] Put# [1:3:3162:0:0:3:0] Put# [1:3:3163:0:0:48:0] Put# [1:3:3164:0:0:79:0] Put# [1:3:3165:0:0:68:0] Put# [1:3:3166:0:0:4:0] Put# [1:3:3167:0:0:42:0] Put# [1:3:3168:0:0:62:0] Put# [1:3:3169:0:0:19:0] Put# [1:3:3170:0:0:19:0] Put# [1:3:3171:0:0:19:0] Put# [1:3:3172:0:0:100:0] Put# [1:3:3173:0:0:86:0] Put# [1:3:3174:0:0:92:0] Put# [1:3:3175:0:0:79:0] Put# [1:3:3176:0:0:18:0] Put# [1:3:3177:0:0:48:0] Put# [1:3:3178:0:0:44:0] Put# [1:3:3179:0:0:23:0] Put# [1:3:3180:0:0:65:0] Put# [1:3:3181:0:0:80:0] Put# [1:3:3182:0:0:95:0] Put# [1:3:3183:0:0:44:0] Put# [1:3:3184:0:0:93:0] Put# [1:3:3185:0:0:1:0] Put# [1:3:3186:0:0:59:0] Put# [1:3:3187:0:0:29:0] Put# [1:3:3188:0:0:66:0] Put# [1:3:3189:0:0:100:0] Put# [1:3:3190:0:0:30:0] Put# [1:3:3191:0:0:42:0] Put# [1:3:3192:0:0:53:0] Put# [1:3:3193:0:0:35:0] Put# [1:3:3194:0:0:91:0] Put# [1:3:3195:0:0:89:0] Put# [1:3:3196:0:0:85:0] Put# [1:3:3197:0:0:19:0] Put# [1:3:3198:0:0:34:0] Put# [1:3:3199:0:0:78:0] Put# [1:3:3200:0:0:52:0] Put# [1:3:3201:0:0:79:0] Put# [1:3:3202:0:0:51:0] Put# [1:3:3203:0:0:91:0] Put# [1:3:3204:0:0:17:0] Put# [1:3:3205:0:0:18:0] Put# [1:3:3206:0:0:79:0] Put# [1:3:3207:0:0:3:0] Put# [1:3:3208:0:0:63:0] Put# [1:3:3209:0:0:38:0] Put# [1:3:3210:0:0:76:0] Put# [1:3:3211:0:0:60:0] Put# [1:3:3212:0:0:69:0] Put# [1:3:3213:0:0:33:0] Put# [1:3:3214:0:0:61:0] Put# [1:3:3215:0:0:41:0] Put# [1:3:3216:0:0:73:0] Put# [1:3:3217:0:0:80:0] Put# [1:3:3218:0:0:34:0] Put# [1:3:3219:0:0:8:0] Put# [1:3:3220:0:0:72:0] Put# [1:3:3221:0:0:3:0] Put# [1:3:3222:0:0:98:0] Put# [1:3:3223:0:0:70:0] Put# [1:3:3224:0:0:63:0] Put# [1:3:3225:0:0:74:0] Put# [1:3:3226:0:0:5:0] Put# [1:3:3227:0:0:30:0] Put# [1:3:3228:0:0:68:0] Put# [1:3:3229:0:0:68:0] Put# [1:3:3230:0:0:54:0] Put# [1:3:3231:0:0:24:0] Put# [1:3:3232:0:0:66:0] Put# [1:3:3233:0:0:79:0] Put# [1:3:3234:0:0:55:0] Put# [1:3:3235:0:0:86:0] Put# [1:3:3236:0:0:36:0] Put# [1:3:3237:0:0:91:0] Put# [1:3:3238:0:0:63:0] Put# [1:3:3239:0:0:43:0] Put# [1:3:3240:0:0:9:0] Put# [1:3:3241:0:0:63:0] Put# [1:3:3242:0:0:7:0] Put# [1:3:3243:0:0:80:0] Put# [1:3:3244:0:0:44:0] Put# [1:3:3245:0:0:69:0] Put# [1:3:3246:0:0:30:0] Put# [1:3:3247:0:0:99:0] Put# [1:3:3248:0:0:23:0] Put# [1:3:3249:0:0:66:0] Put# [1:3:3250:0:0:52:0] Put# [1:3:3251:0:0:65:0] Put# [1:3:3252:0:0:56:0] Put# [1:3:3253:0:0:65:0] Put# [1:3:3254:0:0:84:0] Put# [1:3:3255:0:0:70:0] Put# [1:3:3256:0:0:74:0] Put# [1:3:3257:0:0:51:0] Put# [1:3:3258:0:0:79:0] Put# [1:3:3259:0:0:92:0] Put# [1:3:3260:0:0:65:0] Put# [1:3:3261:0:0:74:0] Put# [1:3:3262:0:0:94:0] Put# [1:3:3263:0:0:1:0] Put# [1:3:3264:0:0:85:0] Put# [1:3:3265:0:0:25:0] Put# [1:3:3266:0:0:55:0] Put# [1:3:3267:0:0:28:0] Put# [1:3:3268:0:0:81:0] Put# [1:3:3269:0:0:27:0] Put# [1:3:3270:0:0:69:0] Put# [1:3:3271:0:0:62:0] Put# [1:3:3272:0:0:32:0] Put# [1:3:3273:0:0:99:0] Put# [1:3:3274:0:0:3:0] Put# [1:3:3275:0:0:38:0] Put# [1:3:3276:0:0:86:0] Put# [1:3:3277:0:0:15:0] Put# [1:3:3278:0:0:34:0] Put# [1:3:3279:0:0:62:0] Put# [1:3:3280:0:0:60:0] Put# [1:3:3281:0:0:76:0] Put# [1:3:3282:0:0:13:0] Put# [1:3:3283:0:0:55:0] Put# [1:3:3284:0:0:95:0] Put# [1:3:3285:0:0:6:0] Put# [1:3:3286:0:0:86:0] Put# [1:3:3287:0:0:54:0] Put# [1:3:3288:0:0:66:0] Put# [1:3:3289:0:0:19:0] Put# [1:3:3290:0:0:18:0] Put# [1:3:3291:0:0:27:0] Put# [1:3:3292:0:0:22:0] Put# [1:3:3293:0:0:72:0] Put# [1:3:3294:0:0:74:0] Put# [1:3:3295:0:0:93:0] Put# [1:3:3296:0:0:67:0] Put# [1:3:3297:0:0:47:0] Put# [1:3:3298:0:0:63:0] Put# [1:3:3299:0:0:92:0] Put# [1:3:3300:0:0:1:0] Put# [1:3:3301:0:0:49:0] Put# [1:3:3302:0:0:18:0] Put# [1:3:3303:0:0:12:0] Put# [1:3:3304:0:0:93:0] Put# [1:3:3305:0:0:15:0] Put# [1:3:3306:0:0:48:0] Put# [1:3:3307:0:0:28:0] Put# [1:3:3308:0:0:48:0] Put# [1:3:3309:0:0:98:0] Put# [1:3:3310:0:0:96:0] Put# [1:3:3311:0:0:82:0] Put# [1:3:3312:0:0:71:0] Put# [1:3:3313:0:0:63:0] Put# [1:3:3314:0:0:96:0] Put# [1:3:3315:0:0:6:0] Put# [1:3:3316:0:0:85:0] Put# [1:3:3317:0:0:100:0] Put# [1:3:3318:0:0:88:0] Put# [1:3:3319:0:0:24:0] Put# [1:3:3320:0:0:56:0] Put# [1:3:3321:0:0:5:0] Put# [1:3:3322:0:0:81:0] Put# [1:3:3323:0:0:94:0] Put# [1:3:3324:0:0:8:0] Put# [1:3:3325:0:0:46:0] Put# [1:3:3326:0:0:64:0] Put# [1:3:3327:0:0:65:0] Put# [1:3:3328:0:0:19:0] Put# [1:3:3329:0:0:55:0] Put# [1:3:3330:0:0:84:0] Put# [1:3:3331:0:0:63:0] Put# [1:3:3332:0:0:75:0] Put# [1:3:3333:0:0:94:0] Put# [1:3:3334:0:0:67:0] Put# [1:3:3335:0:0:7:0] Put# [1:3:3336:0:0:81:0] Put# [1:3:3337:0:0:44:0] Put# [1:3:3338:0:0:92:0] Put# [1:3:3339:0:0:35:0] Put# [1:3:3340:0:0:84:0] Put# [1:3:3341:0:0:72:0] Put# [1:3:3342:0:0:77:0] Put# [1:3:3343:0:0:48:0] Put# [1:3:3344:0:0:97:0] Put# [1:3:3345:0:0:37:0] Put# [1:3:3346:0:0:39:0] Put# [1:3:3347:0:0:39:0] Put# [1:3:3348:0:0:81:0] Put# [1:3:3349:0:0:2:0] Put# [1:3:3350:0:0:39:0] Put# [1:3:3351:0:0:35:0] Put# [1:3:3352:0:0:24:0] Put# [1:3:3353:0:0:15:0] Put# [1:3:3354:0:0:73:0] Put# [1:3:3355:0:0:60:0] Put# [1:3:3356:0:0:2:0] Put# [1:3:3357:0:0:29:0] Put# [1:3:3358:0:0:86:0] Put# [1:3:3359:0:0:29:0] Put# [1:3:3360:0:0:10:0] Put# [1:3:3361:0:0:93:0] Put# [1:3:3362:0:0:51:0] Put# [1:3:3363:0:0:46:0] Put# [1:3:3364:0:0:27:0] Put# [1:3:3365:0:0:6:0] Put# [1:3:3366:0:0:87:0] Put# [1:3:3367:0:0:77:0] Put# [1:3:3368:0:0:27:0] Put# [1:3:3369:0:0:5:0] Put# [1:3:3370:0:0:75:0] Put# [1:3:3371:0:0:43:0] Put# [1:3:3372:0:0:33:0] Put# [1:3:3373:0:0:50:0] Put# [1:3:3374:0:0:33:0] Put# [1:3:3375:0:0:11:0] Put# [1:3:3376:0:0:30:0] Put# [1:3:3377:0:0:77:0] Put# [1:3:3378:0:0:37:0] Put# [1:3:3379:0:0:65:0] Put# [1:3:3380:0:0:10:0] Put# [1:3:3381:0:0:29:0] Put# [1:3:3382:0:0:97:0] Put# [1:3:3383:0:0:60:0] Put# [1:3:3384:0:0:85:0] Put# [1:3:3385:0:0:98:0] Put# [1:3:3386:0:0:96:0] Put# [1:3:3387:0:0:43:0] Put# [1:3:3388:0:0:84:0] Put# [1:3:3389:0:0:68:0] Put# [1:3:3390:0:0:45:0] Put# [1:3:3391:0:0:46:0] Put# [1:3:3392:0:0:89:0] Put# [1:3:3393:0:0:40:0] Put# [1:3:3394:0:0:61:0] Put# [1:3:3395:0:0:13:0] Put# [1:3:3396:0:0:68:0] Put# [1:3:3397:0:0:44:0] Put# [1:3:3398:0:0:68:0] Put# [1:3:3399:0:0:4:0] Put# [1:3:3400:0:0:91:0] Put# [1:3:3401:0:0:50:0] Put# [1:3:3402:0:0:25:0] Put# [1:3:3403:0:0:31:0] Put# [1:3:3404:0:0:14:0] Put# [1:3:3405:0:0:9:0] Put# [1:3:3406:0:0:94:0] Put# [1:3:3407:0:0:89:0] Put# [1:3:3408:0:0:76:0] Put# [1:3:3409:0:0:19:0] Put# [1:3:3410:0:0:66:0] Put# [1:3:3411:0:0:85:0] Put# [1:3:3412:0:0:24:0] Put# [1:3:3413:0:0:72:0] Put# [1:3:3414:0:0:76:0] Put# [1:3:3415:0:0:59:0] Put# [1:3:3416:0:0:97:0] Put# [1:3:3417:0:0:96:0] Put# [1:3:3418:0:0:24:0] Put# [1:3:3419:0:0:86:0] Put# [1:3:3420:0:0:79:0] Put# [1:3:3421:0:0:91:0] Put# [1:3:3422:0:0:54:0] Put# [1:3:3423:0:0:3:0] Put# [1:3:3424:0:0:28:0] Put# [1:3:3425:0:0:68:0] Put# [1:3:3426:0:0:100:0] Put# [1:3:3427:0:0:24:0] Put# [1:3:3428:0:0:69:0] Put# [1:3:3429:0:0:36:0] Put# [1:3:3430:0:0:20:0] Put# [1:3:3431:0:0:26:0] Put# [1:3:3432:0:0:99:0] Put# [1:3:3433:0:0:36:0] Put# [1:3:3434:0:0:68:0] Put# [1:3:3435:0:0:98:0] Put# [1:3:3436:0:0:10:0] Put# [1:3:3437:0:0:8:0] Put# [1:3:3438:0:0:54:0] Put# [1:3:3439:0:0:91:0] Put# [1:3:3440:0:0:2:0] Put# [1:3:3441:0:0:59:0] Put# [1:3:3442:0:0:69:0] Put# [1:3:3443:0:0:73:0] Put# [1:3:3444:0:0:2:0] Put# [1:3:3445:0:0:75:0] Put# [1:3:3446:0:0:40:0] Put# [1:3:3447:0:0:9:0] Put# [1:3:3448:0:0:13:0] Put# [1:3:3449:0:0:14:0] Put# [1:3:3450:0:0:99:0] Put# [1:3:3451:0:0:60:0] Put# [1:3:3452:0:0:54:0] Put# [1:3:3453:0:0:86:0] Put# [1:3:3454:0:0:37:0] Put# [1:3:3455:0:0:22:0] Put# [1:3:3456:0:0:57:0] Put# [1:3:3457:0:0:37:0] Put# [1:3:3458:0:0:53:0] Put# [1:3:3459:0:0:36:0] Put# [1:3:3460:0:0:21:0] Put# [1:3:3461:0:0:25:0] Put# [1:3:3462:0:0:66:0] Put# [1:3:3463:0:0:29:0] Put# [1:3:3464:0:0:51:0] Put# [1:3:3465:0:0:99:0] Reassign# 3 -- VSlotId { NodeId: 5 PDiskId: 1000 VSlotId: 1000 } GroupId: 2181038080 GroupGeneration: 3 VDiskKind: "Default" FailDomainIdx: 4 VDiskMetrics { SatisfactionRank: 12 VSlotId { NodeId: 5 PDiskId: 1000 VSlotId: 1000 } State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } Status: "READY" Ready: true Put# [1:3:3466:0:0:29:0] Put# [1:3:3467:0:0:29:0] >> Cdc::DocApi[TopicRunner] [GOOD] >> Cdc::HugeKey[PqRunner] |88.4%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} >> BuildStatsHistogram::Three_Serial_Small_2_Levels ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:231:2060] recipient: [1:225:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:231:2060] recipient: [1:225:2143] Leader for TabletID 72057594046678944 is [1:242:2154] sender: [1:243:2060] recipient: [1:225:2143] 2025-06-24T20:28:51.273170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:28:51.273321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:51.273368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:28:51.273413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:28:51.273451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:28:51.273481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:28:51.273535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:28:51.273604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:28:51.274351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:28:51.274708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:28:51.361265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:28:51.361336Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:28:51.381764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:28:51.383526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:28:51.383703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:28:51.390999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:28:51.391225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:28:51.391969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.392320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:28:51.395282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.395488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:28:51.396685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.396752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:28:51.396976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:28:51.397027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:28:51.397073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:28:51.397253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.404050Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:242:2154] sender: [1:355:2060] recipient: [1:17:2064] 2025-06-24T20:28:51.545382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:28:51.545646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.545860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:28:51.545922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:28:51.546168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:28:51.546240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:28:51.548704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.548903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:28:51.549166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.549250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:28:51.549298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:28:51.549338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:28:51.551643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.551705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:28:51.551750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:28:51.553821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.553876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:28:51.553937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.554017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:28:51.564640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:28:51.567335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:28:51.567536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:28:51.568604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:28:51.568760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 250 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:28:51.568813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.569117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:28:51.569179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:28:51.569362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:28:51.569489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:28:51.571808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:28:51.571880Z node 1 :FLAT_TX_SCHEMESHARD ... impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:13.964493Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:13.964599Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [7:242:2154], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:13.964631Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:14.375465Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:14.375560Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:14.375646Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [7:242:2154], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:14.375679Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:14.804396Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:14.804482Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:14.804563Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [7:242:2154], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:14.804602Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:15.248428Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:15.248527Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:15.248608Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [7:242:2154], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:15.248643Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:15.647664Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:15.647756Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:15.647898Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [7:242:2154], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:15.647933Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:16.059544Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:16.059648Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:16.059730Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [7:242:2154], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:16.059762Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:16.459884Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:16.459987Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:16.460080Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [7:242:2154], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:16.460113Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:16.883743Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:16.883837Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:31:16.883932Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [7:242:2154], Recipient [7:242:2154]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:16.883984Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:31:16.931860Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [7:1097:2845], Recipient [7:242:2154]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp/TempTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-24T20:31:16.931980Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:31:16.932157Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp/TempTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:31:16.932422Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp/TempTable" took 255us result status StatusPathDoesNotExist 2025-06-24T20:31:16.932611Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp/TempTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/tmp/TempTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:31:16.933277Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [7:1098:2846], Recipient [7:242:2154]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-24T20:31:16.933351Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:31:16.933485Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:31:16.933710Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp" took 226us result status StatusPathDoesNotExist 2025-06-24T20:31:16.933891Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/tmp" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:31:16.934515Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [7:1099:2847], Recipient [7:242:2154]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp/TempTable/ValueIndex" Options { ShowPrivateTable: true } 2025-06-24T20:31:16.934578Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:31:16.934694Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp/TempTable/ValueIndex" Options { ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:31:16.934940Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp/TempTable/ValueIndex" took 239us result status StatusPathDoesNotExist 2025-06-24T20:31:16.935131Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp/TempTable/ValueIndex\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/tmp/TempTable/ValueIndex" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> THiveTest::TestManyFollowersOnOneNode [GOOD] >> THiveTest::TestLockTabletExecutionTimeout >> BuildStatsHistogram::Three_Serial_Small_2_Levels [GOOD] >> BuildStatsHistogram::Three_Serial_Small_2_Levels_3_Buckets >> BuildStatsHistogram::Three_Serial_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Three_Serial_Small_1_Level >> BuildStatsHistogram::Three_Serial_Small_1_Level [GOOD] >> BuildStatsHistogram::Three_Serial_Small_0_Levels [GOOD] >> BuildStatsMixedIndex::Single >> Cdc::NewAndOldImagesLogDebezium [GOOD] >> Cdc::OldImageLogDebezium >> BuildStatsMixedIndex::Single [GOOD] >> BuildStatsMixedIndex::Single_Slices >> THiveTest::TestCreateTablet >> BuildStatsMixedIndex::Single_Slices [GOOD] >> BuildStatsMixedIndex::Single_History >> TColumnShardTestSchema::TTL-Reboot-Internal+FirstPkColumn [GOOD] |88.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/test-results/unittest/{meta.json ... results_accumulator.log} >> BuildStatsMixedIndex::Single_History [GOOD] >> BuildStatsMixedIndex::Single_History_Slices >> Cdc::VirtualTimestamps[PqRunner] [GOOD] >> Cdc::VirtualTimestamps[YdsRunner] >> THiveTest::TestFollowerPromotion [GOOD] >> THiveTest::TestFollowerPromotionFollowerDies |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |88.4%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} >> BuildStatsMixedIndex::Single_History_Slices [GOOD] >> BuildStatsMixedIndex::Single_Groups >> TargetTrackingScaleRecommenderPolicy::ScaleOut [GOOD] >> TargetTrackingScaleRecommenderPolicy::ScaleIn [GOOD] >> TargetTrackingScaleRecommenderPolicy::BigNumbersScaleOut [GOOD] >> TargetTrackingScaleRecommenderPolicy::BigNumbersScaleIn |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |88.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |88.4%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |88.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/test-results/unittest/{meta.json ... results_accumulator.log} |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf >> BuildStatsMixedIndex::Single_Groups [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices >> TargetTrackingScaleRecommenderPolicy::BigNumbersScaleIn [GOOD] >> TargetTrackingScaleRecommenderPolicy::SpikeResistance [GOOD] >> TargetTrackingScaleRecommenderPolicy::NearTarget [GOOD] >> TargetTrackingScaleRecommenderPolicy::AtTarget [GOOD] >> TargetTrackingScaleRecommenderPolicy::Fluctuations [GOOD] >> TargetTrackingScaleRecommenderPolicy::FluctuationsBigNumbers [GOOD] >> TargetTrackingScaleRecommenderPolicy::ScaleInToMaxSeen [GOOD] >> TargetTrackingScaleRecommenderPolicy::Idle [GOOD] >> TStorageBalanceTest::TestScenario1 |88.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |88.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service >> THiveTest::TestStorageBalancer [GOOD] >> THiveTest::TestRestartsWithFollower >> BuildStatsMixedIndex::Single_Groups_Slices [GOOD] >> BuildStatsMixedIndex::Single_Groups_History >> BuildStatsMixedIndex::Single_Groups_History [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices >> THiveTest::TestLocalDisconnect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot-Internal+FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-24T20:30:48.976912Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:30:48.982292Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:30:48.982792Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T20:30:49.017047Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T20:30:49.017410Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T20:30:49.025954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:30:49.026242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:30:49.026537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:30:49.026748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:30:49.026890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:30:49.027040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:30:49.027348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:30:49.027508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:30:49.027651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T20:30:49.027813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T20:30:49.027948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T20:30:49.059498Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:30:49.064798Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T20:30:49.065108Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T20:30:49.065189Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T20:30:49.065445Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:49.065636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T20:30:49.065717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T20:30:49.065767Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T20:30:49.065872Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T20:30:49.065952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T20:30:49.066003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T20:30:49.066069Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T20:30:49.066276Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:49.066386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T20:30:49.066446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T20:30:49.066482Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T20:30:49.066584Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T20:30:49.066690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T20:30:49.066754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T20:30:49.066788Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T20:30:49.066856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T20:30:49.066904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T20:30:49.066941Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T20:30:49.067215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T20:30:49.067279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T20:30:49.067316Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T20:30:49.067555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T20:30:49.067632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T20:30:49.067696Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T20:30:49.067907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T20:30:49.067971Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T20:30:49.068007Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T20:30:49.068101Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T20:30:49.068180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T20:30:49.068239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T20:30:49.068290Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T20:30:49.068807Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=78; 2025-06-24T20:30:49.068907Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=37; 2025-06-24T20:30:49.069014Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=52; 2025-06-24T20:30:49.069103Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=38; 2025-06-24T20:30:49.069215Z node 1 : ... al_result_received;interval_idx=0;intervalId=90; 2025-06-24T20:31:20.357037Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=90; 2025-06-24T20:31:20.357136Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T20:31:20.357260Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:20.357302Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-24T20:31:20.357386Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T20:31:20.357886Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T20:31:20.358121Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=timestamp: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:20.358191Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T20:31:20.358375Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=1000; 2025-06-24T20:31:20.358475Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=timestamp; 2025-06-24T20:31:20.358902Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:478:2482];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=timestamp: uint64; 2025-06-24T20:31:20.359117Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:20.362601Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:20.362793Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:20.363095Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T20:31:20.363411Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:20.363632Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:20.363706Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:479:2483] finished for tablet 9437184 2025-06-24T20:31:20.364409Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:478:2482];stats={"p":[{"events":["f_bootstrap"],"t":0.012},{"events":["f_ProduceResults"],"t":0.015},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.021},{"events":["f_ack","l_task_result"],"t":0.038},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.044}],"full":{"a":1750797080319110,"name":"_full_task","f":1750797080319110,"d_finished":0,"c":0,"l":1750797080363786,"d":44676},"events":[{"name":"bootstrap","f":1750797080331870,"d_finished":8619,"c":1,"l":1750797080340489,"d":8619},{"a":1750797080363057,"name":"ack","f":1750797080357842,"d_finished":4992,"c":1,"l":1750797080362834,"d":5721},{"a":1750797080363030,"name":"processing","f":1750797080340627,"d_finished":14074,"c":8,"l":1750797080362838,"d":14830},{"name":"ProduceResults","f":1750797080334377,"d_finished":7126,"c":11,"l":1750797080363673,"d":7126},{"a":1750797080363679,"name":"Finish","f":1750797080363679,"d_finished":0,"c":0,"l":1750797080363786,"d":107},{"name":"task_result","f":1750797080340661,"d_finished":8866,"c":7,"l":1750797080357549,"d":8866}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:20.364530Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:478:2482];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T20:31:20.365173Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:478:2482];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0.012},{"events":["f_ProduceResults"],"t":0.015},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.021},{"events":["f_ack","l_task_result"],"t":0.038},{"events":["l_ProduceResults","f_Finish"],"t":0.044},{"events":["l_ack","l_processing","l_Finish"],"t":0.045}],"full":{"a":1750797080319110,"name":"_full_task","f":1750797080319110,"d_finished":0,"c":0,"l":1750797080364595,"d":45485},"events":[{"name":"bootstrap","f":1750797080331870,"d_finished":8619,"c":1,"l":1750797080340489,"d":8619},{"a":1750797080363057,"name":"ack","f":1750797080357842,"d_finished":4992,"c":1,"l":1750797080362834,"d":6530},{"a":1750797080363030,"name":"processing","f":1750797080340627,"d_finished":14074,"c":8,"l":1750797080362838,"d":15639},{"name":"ProduceResults","f":1750797080334377,"d_finished":7126,"c":11,"l":1750797080363673,"d":7126},{"a":1750797080363679,"name":"Finish","f":1750797080363679,"d_finished":0,"c":0,"l":1750797080364595,"d":916},{"name":"task_result","f":1750797080340661,"d_finished":8866,"c":7,"l":1750797080357549,"d":8866}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:20.365294Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T20:31:20.316725Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59184;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59184;selected_rows=0; 2025-06-24T20:31:20.365356Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T20:31:20.365853Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> THiveTest::TestHiveBalancerWithPrefferedDC1 >> BuildStatsMixedIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsMixedIndex::Mixed >> BuildStatsMixedIndex::Mixed [GOOD] >> BuildStatsMixedIndex::Mixed_Groups >> THiveTest::TestLockTabletExecutionTimeout [GOOD] >> THiveTest::TestLockTabletExecutionRebootTimeout >> TPersqueueControlPlaneTestSuite::SetupReadLockSessionWithDatabase >> THiveTest::TestFollowerPromotionFollowerDies [GOOD] >> THiveTest::TestHiveBalancer >> BuildStatsMixedIndex::Mixed_Groups [GOOD] >> BuildStatsMixedIndex::Mixed_Groups_History >> Yq_1::Basic_TaggedLiteral [GOOD] >> KqpPg::DropTablePg [GOOD] >> KqpPg::DropTablePgMultiple >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError >> THiveTest::TestCreateTablet [GOOD] >> THiveTest::TestCreate100Tablets >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError >> AsyncIndexChangeExchange::ShouldDeliverChangesOnFreshTable [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnAlteredTable >> BuildStatsMixedIndex::Mixed_Groups_History [GOOD] >> BuildStatsMixedIndex::Serial >> BuildStatsMixedIndex::Serial [GOOD] >> BuildStatsMixedIndex::Serial_Groups >> BuildStatsMixedIndex::Serial_Groups [GOOD] >> BuildStatsMixedIndex::Serial_Groups_History >> THiveTest::TestRestartsWithFollower [GOOD] >> THiveTest::TestStartTabletTwiceInARow >> BuildStatsMixedIndex::Serial_Groups_History [GOOD] >> BuildStatsMixedIndex::Single_LowResolution >> DSProxyStrategyTest::Restore_mirror3dc [GOOD] >> BuildStatsMixedIndex::Single_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Slices_LowResolution >> BuildStatsMixedIndex::Single_Slices_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_LowResolution >> BuildStatsMixedIndex::Single_Groups_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices_LowResolution ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> DSProxyStrategyTest::Restore_mirror3dc [GOOD] Test command err: diskMask# 401 nonWorkingDomain# 0 78444 diskMask# 401 nonWorkingDomain# 1 8640 diskMask# 402 nonWorkingDomain# 0 191520 diskMask# 402 nonWorkingDomain# 1 8640 diskMask# 403 nonWorkingDomain# 0 69270 diskMask# 403 nonWorkingDomain# 1 4320 diskMask# 404 nonWorkingDomain# 0 128424 diskMask# 404 nonWorkingDomain# 1 64800 diskMask# 405 nonWorkingDomain# 0 63264 diskMask# 405 nonWorkingDomain# 1 8640 diskMask# 406 nonWorkingDomain# 0 91512 diskMask# 406 nonWorkingDomain# 1 8640 diskMask# 407 nonWorkingDomain# 0 43620 diskMask# 407 nonWorkingDomain# 1 4320 diskMask# 408 nonWorkingDomain# 0 129324 >> Cdc::OldImageLogDebezium [GOOD] >> Cdc::NewImageLogDebezium >> BuildStatsMixedIndex::Single_Groups_Slices_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_LowResolution >> THiveTest::TestStartTabletTwiceInARow [GOOD] >> THiveTest::TestSpreadNeighboursWithUpdateTabletsObject >> YdbTableSplit::SplitByLoadWithUpdates [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::Basic_TaggedLiteral [GOOD] Test command err: 2025-06-24T20:29:12.268585Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615653600717256:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:12.268827Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:29:14.842971Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:14.844707Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615662190651986:2253];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:15.131090Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0624 20:29:15.299796561 105156 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:15.299950331 105156 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:16.163823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.133024Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25065: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:25065 } ] 2025-06-24T20:29:17.186783Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:17.295474Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615653600717256:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:17.303799Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:17.600175Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25065: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:25065 2025-06-24T20:29:18.207121Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:18.314156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:18.934616Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25065: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:25065 } ] 2025-06-24T20:29:19.239384Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.337986Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:19.847233Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615662190651986:2253];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:19.847300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:20.248796Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.350494Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:20.416478976 105506 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:20.416609268 105506 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:20.885848Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.140896Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25065: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:25065 2025-06-24T20:29:21.151472Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25065: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:25065 } ] 2025-06-24T20:29:21.284966Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.362492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.872587Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.289909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.367439Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:22.868012Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.305521Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.379345Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.876096Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.315569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.363166Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25065: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:25065 } ] 2025-06-24T20:29:24.404135Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.880532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:25.329506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:25.406298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:25.455132963 105506 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:25.455254936 105506 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:25.894931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:26.352043Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:26.435656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:26.898331Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.360129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.436531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:27.901204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migration ... pp:648: SyncQuota finished with error: 2025-06-24T20:31:22.533780Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.533960Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.534080Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.534125Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.534243Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.534362Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.534477Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.534670Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.534704Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.534879Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.534989Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.535096Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.535189Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.535312Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.535421Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.535529Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.535633Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.535745Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.535851Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.535951Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.536078Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.536186Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.537710Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.537772Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.537969Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.538003Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.538110Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.538288Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.538406Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.538593Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.538623Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.538796Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.538826Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.539002Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.539033Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.539274Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.539308Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.539404Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.539503Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.539600Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.539763Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.539859Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.539936Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.540070Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.540165Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.540340Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.540370Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.540575Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.540605Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.540763Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.540874Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.540972Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.541099Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.541144Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.541251Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.541358Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.541455Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.541626Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.541732Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.541773Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.541884Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.542484Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.542618Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.542731Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.542841Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.542945Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.543044Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.543165Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.543745Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.543908Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.544037Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.544148Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.544255Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.544362Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.544476Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.544627Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.551518Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.551765Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.552336Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.552448Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.552541Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.552631Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.552730Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.552768Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.552873Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.553045Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.553152Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.553271Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.553379Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.553432Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.553589Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.553730Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.553856Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.554044Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.554088Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.554263Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.554290Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.554473Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.554506Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.554687Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.554718Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.554833Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.554946Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:22.555104Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |88.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull+useSink >> BuildStatsMixedIndex::Single_Groups_History_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices_LowResolution >> Cdc::VirtualTimestamps[YdsRunner] [GOOD] >> Cdc::VirtualTimestamps[TopicRunner] >> KqpPg::CopyTableSerialColumns+useSink [GOOD] >> KqpPg::CopyTableSerialColumns-useSink >> THiveTest::TestLocalDisconnect [GOOD] >> THiveTest::TestLocalReplacement >> THiveTest::TestDrain [GOOD] >> THiveTest::TestDrainWithMaxTabletsScheduled >> BuildStatsMixedIndex::Single_Groups_History_Slices_LowResolution [GOOD] >> Charge::Lookups [GOOD] >> Charge::ByKeysBasics [GOOD] >> Charge::ByKeysGroups [GOOD] >> Charge::ByKeysGroupsLimits [GOOD] >> Charge::ByKeysLimits [GOOD] >> Charge::ByKeysReverse >> YdbTableSplit::SplitByLoadWithDeletes [GOOD] >> Charge::ByKeysReverse [GOOD] >> Charge::ByKeysHistory [GOOD] >> Charge::ByKeysIndex [GOOD] >> Charge::ByRows [GOOD] >> Charge::ByRowsReverse [GOOD] >> Charge::ByRowsLimits [GOOD] >> Charge::ByRowsLimitsReverse [GOOD] >> DBase::Basics [GOOD] >> DBase::Select [GOOD] >> DBase::Defaults [GOOD] >> DBase::Subsets [GOOD] >> DBase::Garbage [GOOD] >> DBase::Affects [GOOD] >> DBase::Annex [GOOD] >> DBase::AnnexRollbackChanges [GOOD] >> DBase::Outer [GOOD] >> DBase::VersionBasics [GOOD] >> DBase::KIKIMR_15506_MissingSnapshotKeys [GOOD] >> DBase::EraseCacheWithUncommittedChanges [GOOD] >> DBase::EraseCacheWithUncommittedChangesCompacted [GOOD] >> DBase::AlterAndUpsertChangesVisibility >> DBase::AlterAndUpsertChangesVisibility [GOOD] >> DBase::UncommittedChangesVisibility [GOOD] >> DBase::UncommittedChangesCommitWithUpdates [GOOD] >> DBase::ReplayNewTable [GOOD] >> DBase::SnapshotNewTable [GOOD] >> DBase::DropModifiedTable [GOOD] >> DBase::KIKIMR_15598_Many_MemTables >> Cdc::HugeKey[PqRunner] [GOOD] >> Cdc::HugeKey[YdsRunner] >> YdbTableSplit::SplitByLoadWithReads [GOOD] |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithUpdates [GOOD] Test command err: 2025-06-24T20:30:58.627202Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616107458260946:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:58.627305Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00303b/r3tmp/tmp6CKnpU/pdisk_1.dat 2025-06-24T20:30:59.511818Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:59.529174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:59.529580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:59.537543Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3459, node 1 2025-06-24T20:30:59.729888Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:59.771874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:59.771897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:59.771905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:59.772048Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6906 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:00.288579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:6906 2025-06-24T20:31:03.296392Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616128933098433:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.296535Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.626161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:03.628660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616107458260946:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:03.628768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:03.874569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616128933098626:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.874671Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.901584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797063778 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797063778 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:31:04.151532Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616133228066020:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:04.155299Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:04.159374Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616133228066028:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:04.159485Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616133228066032:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:04.159543Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616133228066035:2353], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:04.159579Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616133228066033:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:04.159618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616133228066037:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:04.160022Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:04.165554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-24T20:31:04.165806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:04.165832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715660:1, at schemeshard: 72057594046644480 2025-06-24T20:31:04.165923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715660:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:04.165941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976715660:2, at schemeshard: 72057594046644480 2025-06-24T20:31:04.166011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715660:3, propose status:StatusAccepted, reason: , at schemeshard: 72057594 ... stomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.443774Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718586. Ctx: { TraceId: 01jyht6srm5d27jtxhmx3wq236, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTcyMmUwNjAtNWY1OTdmOTktM2NmZGVlOTUtZTE4ZTIwYTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.444538Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718588. Ctx: { TraceId: 01jyht6srm4wjw2nxkj0vkb0rk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTMwOTE3ZjAtZTVjM2I0NTEtMTYxZDU2YWYtYmZlZWQwYjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.445242Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718587. Ctx: { TraceId: 01jyht6srm9tm1j93zh9phrrsn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzEyZTIwM2ItNDkwZDRlNjMtMTFlZGQyYzQtOWVlZjU1NGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.450230Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718589. Ctx: { TraceId: 01jyht6srxc8ccfd5nzjh6wbbe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWNjM2I1ODMtNTlkOGRhNDYtMjUyZTgxZTctNjhhZWU5MTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.451846Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718590. Ctx: { TraceId: 01jyht6ss0c47xz86eye41hsr2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTMzY2EzYzctNzYxYjZhMGEtNTkyNDY5ZDAtMTM3NjQwZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.462461Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718591. Ctx: { TraceId: 01jyht6ss72hgm1j2636yr8gjr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzEwZDA2MTEtZjU5MmFkNTMtYTAxNTBlYjAtN2IyZmUyOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.462665Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718592. Ctx: { TraceId: 01jyht6ss7aj3st4p63m720vp9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjQ0Yjc0OTMtMzk0Mzk5NDYtNzJhMjVkZTYtYzlkZGQyNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.464079Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718593. Ctx: { TraceId: 01jyht6ss7atcawtxrj6sgc5cy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmVjOGQ5ODktZjJmYjA5ZjgtMzQ0YThiYTUtYzAwMDc5MDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.471941Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718595. Ctx: { TraceId: 01jyht6ssj49z9e1q7htqj34yy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWJiYjE1MGItYjg0ZGY5YzAtYTYzMGVkM2ItNWU4N2U0ZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.474050Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718596. Ctx: { TraceId: 01jyht6ssn8tk7r0jg295ve9p7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTMwOTE3ZjAtZTVjM2I0NTEtMTYxZDU2YWYtYmZlZWQwYjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.476498Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718594. Ctx: { TraceId: 01jyht6ssk3vt2rsxket47yrty, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjI1ZGE1YzItYWQ5OTIwMGQtMzJjMzAzYzgtM2FhMWNlNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.481696Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718597. Ctx: { TraceId: 01jyht6ssze9vsrj8cftj70hz2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTcyMmUwNjAtNWY1OTdmOTktM2NmZGVlOTUtZTE4ZTIwYTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.493458Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718599. Ctx: { TraceId: 01jyht6st928h1t1sqh4fg7ehp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmVjOGQ5ODktZjJmYjA5ZjgtMzQ0YThiYTUtYzAwMDc5MDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.493492Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718598. Ctx: { TraceId: 01jyht6st96jfe7kgnc054e1a0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTMzY2EzYzctNzYxYjZhMGEtNTkyNDY5ZDAtMTM3NjQwZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.495657Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718600. Ctx: { TraceId: 01jyht6st9f74pyw3f6pdryx24, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzEyZTIwM2ItNDkwZDRlNjMtMTFlZGQyYzQtOWVlZjU1NGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.497254Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718601. Ctx: { TraceId: 01jyht6st9em7p35ahvtj55jvn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjQ0Yjc0OTMtMzk0Mzk5NDYtNzJhMjVkZTYtYzlkZGQyNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-24T20:31:24.505142Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718602. Ctx: { TraceId: 01jyht6st9fyj214amhge6h24k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzEwZDA2MTEtZjU5MmFkNTMtYTAxNTBlYjAtN2IyZmUyOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797063778 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:31:24.535384Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718603. Ctx: { TraceId: 01jyht6str423z2a7jvgeqkn4h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWJiYjE1MGItYjg0ZGY5YzAtYTYzMGVkM2ItNWU4N2U0ZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.535691Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718605. Ctx: { TraceId: 01jyht6sv1ch14cmdx3k120p4r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjI1ZGE1YzItYWQ5OTIwMGQtMzJjMzAzYzgtM2FhMWNlNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.542208Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718606. Ctx: { TraceId: 01jyht6stw7abxck4cb3k6z4n6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTMwOTE3ZjAtZTVjM2I0NTEtMTYxZDU2YWYtYmZlZWQwYjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.542401Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718604. Ctx: { TraceId: 01jyht6sth92ddb54prb38wh5n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWNjM2I1ODMtNTlkOGRhNDYtMjUyZTgxZTctNjhhZWU5MTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.547624Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718607. Ctx: { TraceId: 01jyht6svrdbgkhdy2jd884hyw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmVjOGQ5ODktZjJmYjA5ZjgtMzQ0YThiYTUtYzAwMDc5MDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.552923Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718608. Ctx: { TraceId: 01jyht6svxdgqpr6swxvzvr100, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzEwZDA2MTEtZjU5MmFkNTMtYTAxNTBlYjAtN2IyZmUyOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:24.628471Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718609. Ctx: { TraceId: 01jyht6syjfnjnx3enk35xsv12, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTcyMmUwNjAtNWY1OTdmOTktM2NmZGVlOTUtZTE4ZTIwYTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797063778 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:31:24.923515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:31: [BackgroundCompaction] [Start] Compacting for pathId# [OwnerId: 72057594046644480, LocalPathId: 2], datashard# 72075186224037889, compactionInfo# {72057594046644480:2, SH# 3, Rows# 1351, Deletes# 0, Compaction# 1970-01-01T00:00:00.000000Z}, next wakeup in# 587.585170s, rate# 1.157407407e-05, in queue# 2 shards, waiting after compaction# 1 shards, running# 0 shards at schemeshard 72057594046644480 2025-06-24T20:31:24.924218Z node 1 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 2 of 72075186224037889 tableId# 2 localTid# 1001, requested from [1:7519616111753228542:2199], partsCount# 2, memtableSize# 52176, memtableWaste# 4656, memtableRows# 379 2025-06-24T20:31:24.992109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:112: [BackgroundCompaction] [Finished] Compaction completed for pathId# [OwnerId: 72057594046644480, LocalPathId: 2], datashard# 72075186224037889, shardIdx# 72057594046644480:2 in# 68 ms, with status# 0, next wakeup in# 587.516586s, rate# 1.157407407e-05, in queue# 2 shards, waiting after compaction# 2 shards, running# 0 shards at schemeshard 72057594046644480 Table has 2 shards >> Yq_1::Basic_EmptyDict [GOOD] |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> TContinuousBackupTests::TakeIncrementalBackup ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithDeletes [GOOD] Test command err: 2025-06-24T20:30:59.256755Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616110870253534:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:59.256808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ff5/r3tmp/tmpzPVe2u/pdisk_1.dat 2025-06-24T20:31:00.337316Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:00.370186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:00.370320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:00.379309Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:00.380138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2439, node 1 2025-06-24T20:31:00.676926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:00.676949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:00.676956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:00.677061Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4246 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:01.154747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:4246 2025-06-24T20:31:04.237165Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616132345091064:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:04.237290Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:04.262191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616110870253534:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:04.262271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:04.689417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:05.023495Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616136640058556:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:05.023575Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:05.040171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797064856 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797064856 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:31:05.553200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616136640058666:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:05.553278Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:05.555339Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616136640058671:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:05.580112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-24T20:31:05.580341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:05.580371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715660:1, at schemeshard: 72057594046644480 2025-06-24T20:31:05.580471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715660:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:05.580492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976715660:2, at schemeshard: 72057594046644480 2025-06-24T20:31:05.580563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715660:3, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:05.580624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976715660:3, path# /Root/.metadata/workload_manager/pools/default 2025-06-24T20:31:05.580895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715660:3 1 -> 128 2025-06-24T20:31:05.581148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715660:4, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:05.581173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:31:05.596898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715660, database: /Root, subject: metadata@system, status: StatusAccepted, operation: CREATE RESOURCE POOL, path: .metadata/workload_manager/pools/default, set owner:metadata@system, add access: +(SR|DS):all-users@well-known, add access: +(SR|DS):root@builtin 2025-06-24T20:31:05.597183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T20:31:05.597969Z ... 6. Ctx: { TraceId: 01jyht6tw7bv939bb0tgca9v2h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY1ODg3NTAtZmI4NTExYjAtYjUxNzIzZi04M2ExNDMzZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.581747Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718847. Ctx: { TraceId: 01jyht6tw69zafzh549ddbdcgm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmRmZjkyZC1mMGMxMTc4OS01OGI5ODM4OC05OWNkMzY0MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.584495Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718848. Ctx: { TraceId: 01jyht6twab2yxqz28fx1vwq2f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdjNTk2Mi00Y2Q2ZmRiNi0yMTcyMzk4NS03YTcyNWFhYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.587478Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718849. Ctx: { TraceId: 01jyht6twh5nm13z8kb34msmmj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2U0N2JjOWUtZDVkM2FhMDUtOTY3NWI3NmUtNzUzZGMwYmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.587783Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718850. Ctx: { TraceId: 01jyht6twhfdf1kpy3wy62we6w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTNlZDhjY2MtNTc4ZDJhODQtNTAwZGM4YTUtYmVmNjI3ZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.591522Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718852. Ctx: { TraceId: 01jyht6twmfhqtyvjq9pqqsxqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWM2MDNhZjYtYzZiZDViOTYtNGJhMjJjNmQtZGU1YjcwZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.592794Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718851. Ctx: { TraceId: 01jyht6twm0p19h7f2zrdxk7bv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzMzN2I3MzItMjVjZTY0NTctZjg5NzJjNjctZWVkZWZiMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.598538Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718853. Ctx: { TraceId: 01jyht6twpcj755p08kbdgf20w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFlMTFmMmEtMTZjMWZlMmMtM2UyZWRjYTgtOWE5NWMzN2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.602618Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718854. Ctx: { TraceId: 01jyht6twq69s8cdjyaah03a8k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWJkMTM4MWQtNTQ4NWU4Ny1kM2E5OWE1OS0zYjc4YTY5Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.608490Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718855. Ctx: { TraceId: 01jyht6tx47c8fn22rp6zyjf37, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTUwNmY4NmMtZWY1NjI5MDMtZmFmODRiYzAtNDNlZDYyMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.610617Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718856. Ctx: { TraceId: 01jyht6tx4be4pd7983sx7eqyn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY1ODg3NTAtZmI4NTExYjAtYjUxNzIzZi04M2ExNDMzZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.616903Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718858. Ctx: { TraceId: 01jyht6txc83x3vydcdj5a713q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTNlZDhjY2MtNTc4ZDJhODQtNTAwZGM4YTUtYmVmNjI3ZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.616953Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718857. Ctx: { TraceId: 01jyht6tx7c1j2md2yfagbmvxs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdjNTk2Mi00Y2Q2ZmRiNi0yMTcyMzk4NS03YTcyNWFhYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.618382Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718859. Ctx: { TraceId: 01jyht6txcemy9s9qhprvjjedb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmRmZjkyZC1mMGMxMTc4OS01OGI5ODM4OC05OWNkMzY0MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.625441Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718860. Ctx: { TraceId: 01jyht6txqd98wvpppg7brq4vy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFlMTFmMmEtMTZjMWZlMmMtM2UyZWRjYTgtOWE5NWMzN2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.625758Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718861. Ctx: { TraceId: 01jyht6txq1zctw18zx0jwe1wq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWM2MDNhZjYtYzZiZDViOTYtNGJhMjJjNmQtZGU1YjcwZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.627111Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718862. Ctx: { TraceId: 01jyht6txq6wfk62nbnnqejmz9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2U0N2JjOWUtZDVkM2FhMDUtOTY3NWI3NmUtNzUzZGMwYmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-24T20:31:25.636204Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718863. Ctx: { TraceId: 01jyht6txv1a1gjrxjvwjjpq87, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWJkMTM4MWQtNTQ4NWU4Ny1kM2E5OWE1OS0zYjc4YTY5Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.651717Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718865. Ctx: { TraceId: 01jyht6ty1cc7tn9ktbmwwjbmh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTUwNmY4NmMtZWY1NjI5MDMtZmFmODRiYzAtNDNlZDYyMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.652113Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718864. Ctx: { TraceId: 01jyht6ty24d9hjtzc6kkfc8h1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY1ODg3NTAtZmI4NTExYjAtYjUxNzIzZi04M2ExNDMzZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.654461Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718866. Ctx: { TraceId: 01jyht6tye0zkwjf7dcyxz9wsm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTNlZDhjY2MtNTc4ZDJhODQtNTAwZGM4YTUtYmVmNjI3ZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.656993Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718867. Ctx: { TraceId: 01jyht6tyea7yrcpk78ezy7shk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmRmZjkyZC1mMGMxMTc4OS01OGI5ODM4OC05OWNkMzY0MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797064856 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:31:25.664146Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718869. Ctx: { TraceId: 01jyht6tyg0z3bx5dzv1d9q1ye, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzMzN2I3MzItMjVjZTY0NTctZjg5NzJjNjctZWVkZWZiMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.665944Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718872. Ctx: { TraceId: 01jyht6tyh9n71ew9kzmfzy2fg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2U0N2JjOWUtZDVkM2FhMDUtOTY3NWI3NmUtNzUzZGMwYmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.667382Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718868. Ctx: { TraceId: 01jyht6tyg8dsekvxbkj03hedt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWM2MDNhZjYtYzZiZDViOTYtNGJhMjJjNmQtZGU1YjcwZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.668597Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718870. Ctx: { TraceId: 01jyht6tyg2sbr6d2v8payyjk6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdjNTk2Mi00Y2Q2ZmRiNi0yMTcyMzk4NS03YTcyNWFhYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.669722Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718871. Ctx: { TraceId: 01jyht6tyg7g2nyj7dsj7kkn36, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFlMTFmMmEtMTZjMWZlMmMtM2UyZWRjYTgtOWE5NWMzN2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:25.675022Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976718873. Ctx: { TraceId: 01jyht6tz2etp3e0wp1fe59rb7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWJkMTM4MWQtNTQ4NWU4Ny1kM2E5OWE1OS0zYjc4YTY5Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797064856 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithReads [GOOD] Test command err: 2025-06-24T20:30:56.782449Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616101591993247:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:56.782531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003049/r3tmp/tmpbUzIQo/pdisk_1.dat 2025-06-24T20:30:57.953224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:57.987388Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:58.193016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:58.193124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:58.199320Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:58.244749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9177, node 1 2025-06-24T20:30:58.759702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:58.759726Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:58.759734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:58.759880Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11966 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:59.361352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:11966 2025-06-24T20:31:01.783576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616101591993247:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:01.783667Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:02.289946Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616127361798050:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:02.290114Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:02.691078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:03.075866Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616131656765533:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.075948Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.105624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797062945 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797062945 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:31:03.427682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616131656765635:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.427776Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.428477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616131656765643:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.428523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616131656765644:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.433813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-24T20:31:03.434018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:03.434056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976710660:1, at schemeshard: 72057594046644480 2025-06-24T20:31:03.434152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710660:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:03.434171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976710660:2, at schemeshard: 72057594046644480 2025-06-24T20:31:03.434238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710660:3, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:03.434425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976710660:3, path# /Root/.metadata/workload_manager/pools/default 2025-06-24T20:31:03.435116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710660:3 1 -> 128 2025-06-24T20:31:03.443543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710660:4, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:03.443585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-2 ... sjrymgq14kj50pp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjYxNzllNzAtODBkN2Q0YmQtMjBjNzE2NzYtMTJkMzg5YTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.566351Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714764, task: 1, CA Id [1:7519616217556157632:2335]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:23.566354Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714763, task: 1, CA Id [1:7519616217556157622:2334]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:23.566843Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714897. Ctx: { TraceId: 01jyht6rxae25ez8dfka5qd2y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGJhZDI1MjQtZWQ5ZWE4OTctY2U5ZDM4N2ItNzFlYzEzODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.577661Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714898. Ctx: { TraceId: 01jyht6rxkbnqpg1m3ds40kzg4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njk3NDBjZWItZWJiNzFjMDEtNzRkOWY3Y2ItYThmYWU2NTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.577918Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714899. Ctx: { TraceId: 01jyht6rxka7q7zb5hzr5rmew7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc3ZTgyYzQtODQ0NjMwMjktYmI0ZjAyMTgtY2ExNTVjZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.580159Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714900. Ctx: { TraceId: 01jyht6rxq45zhp1n97g95es1y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTExNDdkNjAtMTFjM2NlY2ItZWY5ZWRhYzUtZmM2MmM4MDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.580178Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714901. Ctx: { TraceId: 01jyht6rxq895422p6qe88g32y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjQ0MTUzYjMtODcwYmM0M2QtZTY3Y2IwMDctOWRmZjAwZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.581725Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714903. Ctx: { TraceId: 01jyht6rxqf2sb55ags1tmh9mp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjYxNzllNzAtODBkN2Q0YmQtMjBjNzE2NzYtMTJkMzg5YTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.581919Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714902. Ctx: { TraceId: 01jyht6rxq9fbx0hksq078kva1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTRkMDI1NjktZjZhZjQ0N2ItOTVkNWViZmYtYTBjZWIwNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.587870Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714904. Ctx: { TraceId: 01jyht6ry0ehb8wfv04hagmkhe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGJhZDI1MjQtZWQ5ZWE4OTctY2U5ZDM4N2ItNzFlYzEzODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.589065Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714905. Ctx: { TraceId: 01jyht6ry279ffpjn0t0nw6hdz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc3ZTgyYzQtODQ0NjMwMjktYmI0ZjAyMTgtY2ExNTVjZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-24T20:31:23.593049Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714906. Ctx: { TraceId: 01jyht6ry61a4ggsnbqc126r44, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njk3NDBjZWItZWJiNzFjMDEtNzRkOWY3Y2ItYThmYWU2NTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.593667Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714907. Ctx: { TraceId: 01jyht6ry7czs79e3negf6m0vf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjYxNzllNzAtODBkN2Q0YmQtMjBjNzE2NzYtMTJkMzg5YTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.623099Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714913. Ctx: { TraceId: 01jyht6ryrehv0rf34zxracfn5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjYxNzllNzAtODBkN2Q0YmQtMjBjNzE2NzYtMTJkMzg5YTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.623393Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714909. Ctx: { TraceId: 01jyht6ryh6tzfa0981qyjn85h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTExNDdkNjAtMTFjM2NlY2ItZWY5ZWRhYzUtZmM2MmM4MDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.623828Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714912. Ctx: { TraceId: 01jyht6ryr86dx0tmxea7q340x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njk3NDBjZWItZWJiNzFjMDEtNzRkOWY3Y2ItYThmYWU2NTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.624080Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714910. Ctx: { TraceId: 01jyht6ryr7wgsfhekcg0964sv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGJhZDI1MjQtZWQ5ZWE4OTctY2U5ZDM4N2ItNzFlYzEzODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.624544Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714914. Ctx: { TraceId: 01jyht6ryz0djf8yrm8c95bvjr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc3ZTgyYzQtODQ0NjMwMjktYmI0ZjAyMTgtY2ExNTVjZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.624636Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714911. Ctx: { TraceId: 01jyht6ryraj42h1qdg4nqc789, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjQ0MTUzYjMtODcwYmM0M2QtZTY3Y2IwMDctOWRmZjAwZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:23.625072Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714908. Ctx: { TraceId: 01jyht6ryhbv8d0rzhtehj9a1t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTRkMDI1NjktZjZhZjQ0N2ItOTVkNWViZmYtYTBjZWIwNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797062945 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:31:23.664061Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714765, task: 1, CA Id [1:7519616217556157639:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:23.993086Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714764, task: 1, CA Id [1:7519616217556157632:2335]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:24.203189Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714765, task: 1, CA Id [1:7519616217556157639:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:24.203256Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714763, task: 1, CA Id [1:7519616217556157622:2334]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:24.569368Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714764, task: 1, CA Id [1:7519616217556157632:2335]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:24.803422Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714763, task: 1, CA Id [1:7519616217556157622:2334]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:25.092236Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714765, task: 1, CA Id [1:7519616217556157639:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:25.459259Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714763, task: 1, CA Id [1:7519616217556157622:2334]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:25.459616Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714764, task: 1, CA Id [1:7519616217556157632:2335]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:25.794324Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714765, task: 1, CA Id [1:7519616217556157639:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:26.023733Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714764, task: 1, CA Id [1:7519616217556157632:2335]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:26.291608Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714763, task: 1, CA Id [1:7519616217556157622:2334]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T20:31:26.508820Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976714765, task: 1, CA Id [1:7519616217556157639:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797062945 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards >> THiveTest::TestHiveBalancer [GOOD] >> THiveTest::TestFollowersCrossDC_Easy |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> KqpPg::DropTablePgMultiple [GOOD] >> KqpPg::DropTableIfExists |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |88.5%| [LD] {RESULT} $(B)/ydb/services/cms/ut/ydb-services-cms-ut >> KqpBatchDelete::Returning >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::Basic_EmptyDict [GOOD] Test command err: 2025-06-24T20:29:13.791977Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615659089230091:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:13.792030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0624 20:29:15.230014978 105396 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:15.230220066 105396 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:15.318823Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:29:15.331998Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:16.472344Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61429: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:61429 2025-06-24T20:29:16.535385Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61429: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:61429 } ] 2025-06-24T20:29:18.597894Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61429: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:61429 } ] 2025-06-24T20:29:19.429394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615659089230091:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:29:19.433814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:29:19.435211Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:20.312819417 105765 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:20.312942573 105765 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:20.495111Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:20.501294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:21.514995Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61429: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:61429 } ] 2025-06-24T20:29:21.731323Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61429: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:61429 2025-06-24T20:29:23.637352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:23.639204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.639460Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:24.640824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:25.596788509 105765 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:25.597893272 105765 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:26.072856Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61429: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:61429 2025-06-24T20:29:26.084508Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61429: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:61429 } ] 2025-06-24T20:29:28.442605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:28.442640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:29.444173Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:29.444200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:30.447767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:30.447800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:30.693301020 105764 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:30.693666057 105764 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:31.592171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:31.592193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:32.603645Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:32.603671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:33.142402Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61429: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:61429 } ] 2025-06-24T20:29:33.378721Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61429: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:61429 2025-06-24T20:29:33.799833Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:33.799862Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:34.807761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:34.808075Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0624 20:29:35.769737600 105764 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 20:29:35.771391986 105764 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T20:29:36.067269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:36.067311Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:37.075688Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:29:37.083380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00327a/r3tmp/tmpGMqhHl/pdisk_1.dat 2025-06-24T20:29:37.407975Z n ... pp:648: SyncQuota finished with error: 2025-06-24T20:31:26.817431Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.817464Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.817653Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.817686Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.817795Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.817916Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.818091Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.818147Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.818261Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.818370Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.818476Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.818591Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.818703Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.818815Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.818924Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.819035Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.819170Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.821256Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.821348Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.821384Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.821706Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.821741Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.821767Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.822032Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.822065Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.822092Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.822201Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.822305Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.822414Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.822596Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.822708Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.822868Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.822982Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.823008Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.823185Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.823246Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.823277Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.823381Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.823568Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.823776Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.823812Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.823945Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.823993Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.824136Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.824252Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.824387Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.824676Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.824712Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.824745Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.825064Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.825107Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.825140Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.825433Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.825470Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.825501Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.825626Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.825738Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.825862Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.826107Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.826145Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.826190Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.826306Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.826599Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.826636Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.826667Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.826995Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.827031Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.827059Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.827300Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.827337Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.827523Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.827557Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.827677Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.827838Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.827943Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.828051Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.828182Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.828227Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.828335Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.828444Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.828556Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.828718Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.828758Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.828862Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.828967Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.829146Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.829177Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.829378Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.829406Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.829512Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.829670Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.829774Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.829869Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.829966Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.830008Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.830106Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.830201Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.830312Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.830475Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.830526Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.830663Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.830886Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.830921Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-24T20:31:26.831125Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: >> THiveTest::TestSpreadNeighboursWithUpdateTabletsObject [GOOD] >> THiveTest::TestSpreadNeighboursDifferentOwners |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut >> DBase::KIKIMR_15598_Many_MemTables [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:30.805883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:30.805975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:30.806029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:30.806074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:30.806136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:30.806165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:30.806227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:30.806302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:30.807055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:30.807529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:30.889578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:30.889644Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:30.910852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:30.915190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:30.915382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:30.927691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:30.927944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:30.928602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:30.928999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:30.932733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:30.932904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:30.933916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:30.934008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:30.934119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:30.934160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:30.934201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:30.934327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:30.942224Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:31:31.082289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:31.082561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:31.082786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:31.082830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:31.083087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:31.083183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:31.087713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:31.087946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:31.088238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:31.088303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:31.088348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:31.088384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:31.096311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:31.096392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:31.096439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:31.102901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:31.103014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:31.103072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:31.103140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:31.107122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:31.109769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:31.109972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:31.111050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:31.111244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:31.111314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:31.111619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:31.111669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:31.111853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:31.111938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:31.115424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:31.115490Z node 1 :FLAT_TX_SCHEMESHARD ... alPathId: 3] was 3 2025-06-24T20:31:32.493046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:1 2025-06-24T20:31:32.493066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:1 2025-06-24T20:31:32.493141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T20:31:32.493169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:2 2025-06-24T20:31:32.493187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:2 2025-06-24T20:31:32.493233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T20:31:32.493277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:3 2025-06-24T20:31:32.493303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:3 2025-06-24T20:31:32.493360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T20:31:32.500415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:31:32.500506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:727:2629] TestWaitNotification: OK eventTxId 103 2025-06-24T20:31:32.501204Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:31:32.501528Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 342us result status StatusSuccess 2025-06-24T20:31:32.502092Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:32.502747Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:31:32.503080Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl/streamImpl" took 278us result status StatusSuccess 2025-06-24T20:31:32.515029Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" PathDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeStreamImpl Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409548 } PersQueueGroup { Name: "streamImpl" PathId: 4 TotalGroupCount: 1 PartitionPerTablet: 2 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 } TopicName: "continuousBackupImpl" TopicPath: "/MyRoot/Table/continuousBackupImpl/streamImpl" YdbDatabasePath: "/MyRoot" PartitionKeySchema { Name: "key" TypeId: 4 } MeteringMode: METERING_MODE_REQUEST_UNITS OffloadConfig { IncrementalBackup { DstPath: "/MyRoot/IncrBackupImpl" DstPathId { OwnerId: 72057594046678944 LocalId: 5 } } } } Partitions { PartitionId: 0 TabletId: 72075186233409547 Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409548 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:32.516600Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:31:32.516876Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 272us result status StatusSuccess 2025-06-24T20:31:32.528002Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> AsyncIndexChangeExchange::ShouldDeliverChangesOnAlteredTable [GOOD] >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterDroppingIndex |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> THiveTest::TestLocalReplacement [GOOD] >> THiveTest::TestHiveRestart >> Backpressure::MonteCarlo [GOOD] >> Cdc::NewImageLogDebezium [GOOD] >> Cdc::NaN[PqRunner] >> KqpQueryService::TableSink_HtapComplex+withOltpSink >> TPersqueueControlPlaneTestSuite::SetupReadLockSessionWithDatabase [GOOD] >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError >> Cdc::VirtualTimestamps[TopicRunner] [GOOD] >> Cdc::Write[PqRunner] >> KqpQueryService::AlterTempTable >> KqpQueryService::PeriodicTaskInSessionPool ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> DBase::KIKIMR_15598_Many_MemTables [GOOD] Test command err: 3 parts: [0:0:1:0:0:0:0] 167 rows, 7 pages, 1 levels: (91, 38) (166, 63) (325, 116) (394, 139) (481, 168) [0:0:2:0:0:0:0] 166 rows, 8 pages, 2 levels: (631, 218) (709, 244) (853, 292) (934, 319) (1087, 370) [0:0:3:0:0:0:0] 167 rows, 8 pages, 2 levels: (1156, 393) (1246, 423) (1396, 473) (1471, 498) (1633, 552) Checking BTree: Touched 100% bytes, 7 pages RowCountHistogram: 5% (actual 5%) key = (91, 38) value = 25 (actual 25 - 0% error) 5% (actual 5%) key = (166, 63) value = 50 (actual 50 - 0% error) 4% (actual 4%) key = (253, 92) value = 74 (actual 74 - 0% error) 4% (actual 4%) key = (325, 116) value = 96 (actual 96 - 0% error) 4% (actual 4%) key = (394, 139) value = 119 (actual 119 - 0% error) 5% (actual 5%) key = (481, 168) value = 144 (actual 144 - 0% error) 4% (actual 4%) key = (553, 192) value = 167 (actual 166 - 0% error) 4% (actual 5%) key = (631, 218) value = 191 (actual 191 - 0% error) 4% (actual 4%) key = (709, 244) value = 215 (actual 215 - 0% error) 3% (actual 3%) key = (766, 263) value = 234 (actual 234 - 0% error) 5% (actual 5%) key = (853, 292) value = 261 (actual 261 - 0% error) 4% (actual 4%) key = (934, 319) value = 285 (actual 285 - 0% error) 4% (actual 4%) key = (1006, 343) value = 309 (actual 309 - 0% error) 4% (actual 4%) key = (1087, 370) value = 333 (actual 332 - 0% error) 4% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 4% (actual 4%) key = (91, 38) value = 1974 (actual 1974 - 0% error) 4% (actual 4%) key = (166, 63) value = 3992 (actual 3992 - 0% error) 4% (actual 4%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 4% (actual 4%) key = (325, 116) value = 7868 (actual 7868 - 0% error) 4% (actual 4%) key = (394, 139) value = 9910 (actual 9910 - 0% error) 4% (actual 4%) key = (481, 168) value = 11938 (actual 11938 - 0% error) 4% (actual 4%) key = (553, 192) value = 13685 (actual 13685 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27765 (actual 27678 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 5% (actual 5%) key = (91, 38) value = 25 (actual 25 - 0% error) 5% (actual 5%) key = (166, 63) value = 50 (actual 50 - 0% error) 4% (actual 4%) key = (253, 92) value = 74 (actual 74 - 0% error) 4% (actual 4%) key = (325, 116) value = 96 (actual 96 - 0% error) 4% (actual 4%) key = (394, 139) value = 119 (actual 119 - 0% error) 5% (actual 5%) key = (481, 168) value = 144 (actual 144 - 0% error) 4% (actual 4%) key = (556, 193) value = 167 (actual 167 - 0% error) 4% (actual 4%) key = (631, 218) value = 191 (actual 191 - 0% error) 4% (actual 4%) key = (709, 244) value = 215 (actual 215 - 0% error) 3% (actual 3%) key = (766, 263) value = 234 (actual 234 - 0% error) 5% (actual 5%) key = (853, 292) value = 261 (actual 261 - 0% error) 4% (actual 4%) key = (934, 319) value = 285 (actual 285 - 0% error) 4% (actual 4%) key = (1006, 343) value = 309 (actual 309 - 0% error) 4% (actual 4%) key = (1087, 370) value = 332 (actual 332 - 0% error) 0% (actual 0%) key = (1090, 371) value = 333 (actual 333 - 0% error) 4% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 4% (actual 4%) key = (91, 38) value = 1974 (actual 1974 - 0% error) 4% (actual 4%) key = (166, 63) value = 3992 (actual 3992 - 0% error) 4% (actual 4%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 4% (actual 4%) key = (325, 116) value = 7868 (actual 7868 - 0% error) 4% (actual 4%) key = (394, 139) value = 9910 (actual 9910 - 0% error) 4% (actual 4%) key = (481, 168) value = 11938 (actual 11938 - 0% error) 4% (actual 4%) key = (556, 193) value = 13685 (actual 13685 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27678 (actual 27678 - 0% error) 0% (actual 0%) key = (1090, 371) value = 27765 (actual 27765 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Mixed: Touched 100% bytes, 7 pages RowCountHistogram: 14% (actual 5%) key = (91, 38) value = 70 (actual 25 - 9% error) 5% (actual 5%) key = (166, 63) value = 95 (actual 50 - 9% error) 4% (actual 4%) key = (253, 92) value = 119 (actual 74 - 9% error) 4% (actual 4%) key = (325, 116) value = 141 (actual 96 - 9% error) 4% (actual 4%) key = (394, 139) value = 164 (actual 119 - 9% error) 5% (actual 5%) key = (481, 168) value = 189 (actual 144 - 9% error) 4% (actual 9%) key = (631, 218) value = 212 (actual 191 - 4% error) 4% (actual 4%) key = (709, 244) value = 236 (actual 215 - 4% error) 3% (actual 3%) key = (766, 263) value = 255 (actual 234 - 4% error) 5% (actual 5%) key = (853, 292) value = 282 (actual 261 - 4% error) 4% (actual 4%) key = (934, 319) value = 306 (actual 285 - 4% error) 4% (actual 4%) key = (1006, 343) value = 330 (actual 309 - 4% error) 4% (actual 4%) key = (1087, 370) value = 353 (actual 332 - 4% error) 0% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 14% (actual 4%) key = (91, 38) value = 5939 (actual 1974 - 9% error) 4% (actual 4%) key = (166, 63) value = 7957 (actual 3992 - 9% error) 4% (actual 4%) key = (253, 92) value = 9854 (actual 5889 - 9% error) 4% (actual 4%) key = (325, 116) value = 11833 (actual 7868 - 9% error) 4% (actual 4%) key = (394, 139) value = 13875 (actual 9910 - 9% error) 4% (actual 4%) key = (481, 168) value = 15903 (actual 11938 - 9% error) 4% (actual 8%) key = (631, 218) value = 17650 (actual 15674 - 4% error) 4% (actual 4%) key = (709, 244) value = 19685 (actual 17709 - 4% error) 4% (actual 4%) key = (766, 263) value = 21640 (actual 19664 - 4% error) 4% (actual 4%) key = (853, 292) value = 23649 (actual 21673 - 4% error) 4% (actual 4%) key = (934, 319) value = 25688 (actual 23712 - 4% error) 4% (actual 4%) key = (1006, 343) value = 27663 (actual 25687 - 4% error) 4% (actual 4%) key = (1087, 370) value = 29654 (actual 27678 - 4% error) 0% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) 3 parts: [0:0:1:0:0:0:0] 167 rows, 7 pages, 1 levels: (91, 38) (166, 63) (325, 116) (394, 139) (481, 168) [0:0:2:0:0:0:0] 166 rows, 8 pages, 2 levels: (631, 218) (709, 244) (853, 292) (934, 319) (1087, 370) [0:0:3:0:0:0:0] 167 rows, 8 pages, 2 levels: (1156, 393) (1246, 423) (1396, 473) (1471, 498) (1633, 552) Checking BTree: Touched 33% bytes, 2 pages RowCountHistogram: 14% (actual 14%) key = (253, 92) value = 74 (actual 74 - 0% error) 18% (actual 18%) key = (553, 192) value = 167 (actual 166 - 0% error) 33% (actual 33%) key = (1087, 370) value = 333 (actual 332 - 0% error) 18% (actual 18%) key = (1396, 473) value = 426 (actual 426 - 0% error) 14% (actual 14%) DataSizeHistogram: 14% (actual 14%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 18% (actual 18%) key = (553, 192) value = 13685 (actual 13685 - 0% error) 33% (actual 33%) key = (1087, 370) value = 27765 (actual 27678 - 0% error) 18% (actual 19%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 15% (actual 15%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 23% (actual 23%) key = (394, 139) value = 119 (actual 119 - 0% error) 23% (actual 23%) key = (766, 263) value = 234 (actual 234 - 0% error) 24% (actual 24%) key = (1156, 393) value = 354 (actual 354 - 0% error) 23% (actual 23%) key = (1543, 522) value = 470 (actual 470 - 0% error) 6% (actual 6%) DataSi ... 85 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27765 (actual 27678 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 5% (actual 5%) key = (91, 38) value = 25 (actual 25 - 0% error) 5% (actual 5%) key = (166, 63) value = 50 (actual 50 - 0% error) 4% (actual 4%) key = (253, 92) value = 74 (actual 74 - 0% error) 4% (actual 4%) key = (325, 116) value = 96 (actual 96 - 0% error) 4% (actual 4%) key = (394, 139) value = 119 (actual 119 - 0% error) 5% (actual 5%) key = (481, 168) value = 144 (actual 144 - 0% error) 4% (actual 4%) key = (556, 193) value = 167 (actual 167 - 0% error) 4% (actual 4%) key = (631, 218) value = 191 (actual 191 - 0% error) 4% (actual 4%) key = (709, 244) value = 215 (actual 215 - 0% error) 3% (actual 3%) key = (766, 263) value = 234 (actual 234 - 0% error) 5% (actual 5%) key = (853, 292) value = 261 (actual 261 - 0% error) 4% (actual 4%) key = (934, 319) value = 285 (actual 285 - 0% error) 4% (actual 4%) key = (1006, 343) value = 309 (actual 309 - 0% error) 4% (actual 4%) key = (1087, 370) value = 332 (actual 332 - 0% error) 0% (actual 0%) key = (1090, 371) value = 333 (actual 333 - 0% error) 4% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 4% (actual 4%) key = (91, 38) value = 1974 (actual 1974 - 0% error) 4% (actual 4%) key = (166, 63) value = 3992 (actual 3992 - 0% error) 4% (actual 4%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 4% (actual 4%) key = (325, 116) value = 7868 (actual 7868 - 0% error) 4% (actual 4%) key = (394, 139) value = 9910 (actual 9910 - 0% error) 4% (actual 4%) key = (481, 168) value = 11938 (actual 11938 - 0% error) 4% (actual 4%) key = (556, 193) value = 13685 (actual 13685 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27678 (actual 27678 - 0% error) 0% (actual 0%) key = (1090, 371) value = 27765 (actual 27765 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Mixed: Touched 100% bytes, 3 pages RowCountHistogram: 14% (actual 5%) key = (91, 38) value = 70 (actual 25 - 9% error) 5% (actual 5%) key = (166, 63) value = 95 (actual 50 - 9% error) 4% (actual 4%) key = (253, 92) value = 119 (actual 74 - 9% error) 4% (actual 4%) key = (325, 116) value = 141 (actual 96 - 9% error) 4% (actual 4%) key = (394, 139) value = 164 (actual 119 - 9% error) 5% (actual 5%) key = (481, 168) value = 189 (actual 144 - 9% error) 4% (actual 9%) key = (631, 218) value = 212 (actual 191 - 4% error) 4% (actual 4%) key = (709, 244) value = 236 (actual 215 - 4% error) 3% (actual 3%) key = (766, 263) value = 255 (actual 234 - 4% error) 5% (actual 5%) key = (853, 292) value = 282 (actual 261 - 4% error) 4% (actual 4%) key = (934, 319) value = 306 (actual 285 - 4% error) 4% (actual 4%) key = (1006, 343) value = 330 (actual 309 - 4% error) 4% (actual 4%) key = (1087, 370) value = 353 (actual 332 - 4% error) 0% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 14% (actual 4%) key = (91, 38) value = 5939 (actual 1974 - 9% error) 4% (actual 4%) key = (166, 63) value = 7957 (actual 3992 - 9% error) 4% (actual 4%) key = (253, 92) value = 9854 (actual 5889 - 9% error) 4% (actual 4%) key = (325, 116) value = 11833 (actual 7868 - 9% error) 4% (actual 4%) key = (394, 139) value = 13875 (actual 9910 - 9% error) 4% (actual 4%) key = (481, 168) value = 15903 (actual 11938 - 9% error) 4% (actual 8%) key = (631, 218) value = 17650 (actual 15674 - 4% error) 4% (actual 4%) key = (709, 244) value = 19685 (actual 17709 - 4% error) 4% (actual 4%) key = (766, 263) value = 21640 (actual 19664 - 4% error) 4% (actual 4%) key = (853, 292) value = 23649 (actual 21673 - 4% error) 4% (actual 4%) key = (934, 319) value = 25688 (actual 23712 - 4% error) 4% (actual 4%) key = (1006, 343) value = 27663 (actual 25687 - 4% error) 4% (actual 4%) key = (1087, 370) value = 29654 (actual 27678 - 4% error) 0% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) 3 parts: [0:0:1:0:0:0:0] 167 rows, 1 pages, 0 levels: () () () () () [0:0:2:0:0:0:0] 166 rows, 1 pages, 0 levels: () () () () () [0:0:3:0:0:0:0] 167 rows, 1 pages, 0 levels: () () () () () Checking BTree: Touched 0% bytes, 0 pages RowCountHistogram: 33% (actual 33%) key = (553, 192) value = 167 (actual 166 - 0% error) 33% (actual 33%) key = (1087, 370) value = 333 (actual 332 - 0% error) 33% (actual 33%) DataSizeHistogram: 32% (actual 32%) key = (553, 192) value = 13565 (actual 13565 - 0% error) 33% (actual 33%) key = (1087, 370) value = 27505 (actual 27505 - 0% error) 33% (actual 33%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 33% (actual 33%) key = (556, 193) value = 167 (actual 167 - 0% error) 33% (actual 33%) key = (1090, 371) value = 333 (actual 333 - 0% error) 33% (actual 33%) DataSizeHistogram: 32% (actual 32%) key = (556, 193) value = 13565 (actual 13565 - 0% error) 33% (actual 33%) key = (1090, 371) value = 27505 (actual 27505 - 0% error) 33% (actual 33%) Checking Mixed: Touched 0% bytes, 0 pages RowCountHistogram: 100% (actual 100%) DataSizeHistogram: 100% (actual 100%) Got : 24000 2106439 49449 38 44 Expected: 24000 2106439 49449 38 44 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 49449 20 23 Expected: 12816 1121048 49449 20 23 Got : 24000 3547100 81694 64 44 Expected: 24000 3547100 81694 64 44 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425198 81694 26 17 Expected: 9582 1425198 81694 26 17 Got : 24000 2460139 23760 42 41 Expected: 24000 2460139 23760 42 41 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 23760 18 18 Expected: 10440 1060798 23760 18 18 Got : 24000 4054050 46562 68 43 Expected: 24000 4054050 46562 68 43 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2277890 46562 38 24 Expected: 13570 2277890 46562 38 24 Got : 24000 2106459 49449 38 44 Expected: 24000 2106459 49449 38 44 Got : 24000 2460219 23555 41 41 Expected: 24000 2460219 23555 41 41 Got : 24000 4054270 46543 66 43 Expected: 24000 4054270 46543 66 43 Got : 24000 2106479 49555 38 44 Expected: 24000 2106479 49555 38 44 Got : 24000 2460259 23628 41 41 Expected: 24000 2460259 23628 41 41 Got : 24000 4054290 46640 65 43 Expected: 24000 4054290 46640 65 43 Got : 24000 2106439 66674 3 4 Expected: 24000 2106439 66674 3 4 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 66674 2 2 Expected: 12816 1121048 66674 2 2 Got : 24000 2460139 33541 4 4 Expected: 24000 2460139 33541 4 4 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 33541 1 1 Expected: 10440 1060798 33541 1 1 Got : 24000 4054050 64742 7 4 Expected: 24000 4054050 64742 7 4 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2234982 64742 4 2 Expected: 13570 2234982 64742 4 2 >> THiveTest::TestSpreadNeighboursDifferentOwners [GOOD] >> THiveTest::TestUpdateTabletsObjectUpdatesMetrics ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut_client/unittest >> Backpressure::MonteCarlo [GOOD] Test command err: Clock# 1970-01-01T00:00:00.000000Z elapsed# 0.000032s EventsProcessed# 0 clients.size# 0 Clock# 1970-01-01T00:00:18.286894Z elapsed# 0.000169s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:00:32.865214Z elapsed# 0.000198s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:00:51.438943Z elapsed# 0.000219s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:01:05.732745Z elapsed# 0.000244s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:01:21.532306Z elapsed# 0.045566s EventsProcessed# 1954 clients.size# 1 Clock# 1970-01-01T00:01:35.775099Z elapsed# 0.045798s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:01:50.860722Z elapsed# 0.045818s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:02:07.264784Z elapsed# 0.045840s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:02:17.770628Z elapsed# 0.045860s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:02:33.995922Z elapsed# 0.045878s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:02:47.637345Z elapsed# 0.045895s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:03:03.214806Z elapsed# 0.045916s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:03:15.925410Z elapsed# 0.045934s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:03:33.011048Z elapsed# 0.045953s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:03:49.475219Z elapsed# 0.045972s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:04:01.196597Z elapsed# 0.045990s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:04:20.218411Z elapsed# 0.046006s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:04:31.377242Z elapsed# 0.046024s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:04:50.467802Z elapsed# 0.046057s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:05:07.671574Z elapsed# 0.046137s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:05:21.827606Z elapsed# 0.046154s EventsProcessed# 1956 clients.size# 0 Clock# 1970-01-01T00:05:40.351427Z elapsed# 0.079023s EventsProcessed# 4093 clients.size# 1 Clock# 1970-01-01T00:05:52.409788Z elapsed# 0.093510s EventsProcessed# 5512 clients.size# 1 Clock# 1970-01-01T00:06:06.167037Z elapsed# 0.109787s EventsProcessed# 7150 clients.size# 1 Clock# 1970-01-01T00:06:22.027551Z elapsed# 0.129166s EventsProcessed# 9113 clients.size# 1 Clock# 1970-01-01T00:06:35.297527Z elapsed# 0.145689s EventsProcessed# 10701 clients.size# 1 Clock# 1970-01-01T00:06:46.257791Z elapsed# 0.159912s EventsProcessed# 12042 clients.size# 1 Clock# 1970-01-01T00:07:02.620548Z elapsed# 0.181175s EventsProcessed# 14004 clients.size# 1 Clock# 1970-01-01T00:07:17.805952Z elapsed# 0.198859s EventsProcessed# 15692 clients.size# 1 Clock# 1970-01-01T00:07:37.088934Z elapsed# 0.220824s EventsProcessed# 17911 clients.size# 1 Clock# 1970-01-01T00:07:48.251451Z elapsed# 0.233743s EventsProcessed# 19322 clients.size# 1 Clock# 1970-01-01T00:08:05.671784Z elapsed# 0.270886s EventsProcessed# 23295 clients.size# 2 Clock# 1970-01-01T00:08:17.925606Z elapsed# 0.315226s EventsProcessed# 26274 clients.size# 2 Clock# 1970-01-01T00:08:32.667919Z elapsed# 0.357617s EventsProcessed# 29790 clients.size# 2 Clock# 1970-01-01T00:08:49.267404Z elapsed# 0.397058s EventsProcessed# 33744 clients.size# 2 Clock# 1970-01-01T00:09:04.407483Z elapsed# 0.431383s EventsProcessed# 37401 clients.size# 2 Clock# 1970-01-01T00:09:19.403302Z elapsed# 0.462749s EventsProcessed# 40819 clients.size# 2 Clock# 1970-01-01T00:09:32.439078Z elapsed# 0.492004s EventsProcessed# 43961 clients.size# 2 Clock# 1970-01-01T00:09:51.139036Z elapsed# 0.533533s EventsProcessed# 48430 clients.size# 2 Clock# 1970-01-01T00:10:06.993663Z elapsed# 0.569320s EventsProcessed# 52165 clients.size# 2 Clock# 1970-01-01T00:10:25.239401Z elapsed# 0.591949s EventsProcessed# 54292 clients.size# 1 Clock# 1970-01-01T00:10:43.170455Z elapsed# 0.614492s EventsProcessed# 56336 clients.size# 1 Clock# 1970-01-01T00:11:02.572543Z elapsed# 0.615374s EventsProcessed# 56339 clients.size# 0 Clock# 1970-01-01T00:11:18.236662Z elapsed# 0.615399s EventsProcessed# 56339 clients.size# 0 Clock# 1970-01-01T00:11:35.807926Z elapsed# 0.615422s EventsProcessed# 56339 clients.size# 0 Clock# 1970-01-01T00:11:53.740850Z elapsed# 0.615441s EventsProcessed# 56339 clients.size# 0 Clock# 1970-01-01T00:12:05.441181Z elapsed# 0.615459s EventsProcessed# 56339 clients.size# 0 Clock# 1970-01-01T00:12:18.417945Z elapsed# 0.615478s EventsProcessed# 56339 clients.size# 0 Clock# 1970-01-01T00:12:37.377824Z elapsed# 0.615497s EventsProcessed# 56339 clients.size# 0 Clock# 1970-01-01T00:12:54.127982Z elapsed# 0.615514s EventsProcessed# 56339 clients.size# 0 Clock# 1970-01-01T00:13:09.636943Z elapsed# 0.615534s EventsProcessed# 56339 clients.size# 0 Clock# 1970-01-01T00:13:26.550064Z elapsed# 0.637627s EventsProcessed# 58292 clients.size# 1 Clock# 1970-01-01T00:13:37.308695Z elapsed# 0.651788s EventsProcessed# 59577 clients.size# 1 Clock# 1970-01-01T00:13:48.367173Z elapsed# 0.679993s EventsProcessed# 62327 clients.size# 2 Clock# 1970-01-01T00:13:59.114045Z elapsed# 0.707695s EventsProcessed# 64994 clients.size# 2 Clock# 1970-01-01T00:14:13.111235Z elapsed# 0.743506s EventsProcessed# 68271 clients.size# 2 Clock# 1970-01-01T00:14:31.224847Z elapsed# 0.788587s EventsProcessed# 72409 clients.size# 2 Clock# 1970-01-01T00:14:49.018665Z elapsed# 0.810537s EventsProcessed# 74431 clients.size# 1 Clock# 1970-01-01T00:15:02.568116Z elapsed# 0.829036s EventsProcessed# 76103 clients.size# 1 Clock# 1970-01-01T00:15:21.348191Z elapsed# 0.862481s EventsProcessed# 78279 clients.size# 1 Clock# 1970-01-01T00:15:39.963848Z elapsed# 0.886976s EventsProcessed# 80475 clients.size# 1 Clock# 1970-01-01T00:15:54.463227Z elapsed# 0.906897s EventsProcessed# 82281 clients.size# 1 Clock# 1970-01-01T00:16:09.053408Z elapsed# 0.925040s EventsProcessed# 83976 clients.size# 1 Clock# 1970-01-01T00:16:27.582652Z elapsed# 0.925323s EventsProcessed# 83978 clients.size# 0 Clock# 1970-01-01T00:16:44.718285Z elapsed# 0.925346s EventsProcessed# 83978 clients.size# 0 Clock# 1970-01-01T00:17:04.556690Z elapsed# 0.957980s EventsProcessed# 86319 clients.size# 1 Clock# 1970-01-01T00:17:16.749185Z elapsed# 0.973793s EventsProcessed# 87746 clients.size# 1 Clock# 1970-01-01T00:17:34.442592Z elapsed# 0.997947s EventsProcessed# 89877 clients.size# 1 Clock# 1970-01-01T00:17:50.797865Z elapsed# 1.019381s EventsProcessed# 91823 clients.size# 1 Clock# 1970-01-01T00:18:06.928208Z elapsed# 1.041383s EventsProcessed# 93964 clients.size# 1 Clock# 1970-01-01T00:18:21.741618Z elapsed# 1.060280s EventsProcessed# 95722 clients.size# 1 Clock# 1970-01-01T00:18:38.503073Z elapsed# 1.102656s EventsProcessed# 99708 clients.size# 2 Clock# 1970-01-01T00:18:52.985261Z elapsed# 1.168265s EventsProcessed# 103064 clients.size# 2 Clock# 1970-01-01T00:19:12.790558Z elapsed# 1.228992s EventsProcessed# 107604 clients.size# 2 Clock# 1970-01-01T00:19:25.941087Z elapsed# 1.255749s EventsProcessed# 109196 clients.size# 1 Clock# 1970-01-01T00:19:43.678118Z elapsed# 1.277636s EventsProcessed# 111229 clients.size# 1 Clock# 1970-01-01T00:19:55.051046Z elapsed# 1.291734s EventsProcessed# 112585 clients.size# 1 Clock# 1970-01-01T00:20:12.351652Z elapsed# 1.314541s EventsProcessed# 114709 clients.size# 1 Clock# 1970-01-01T00:20:31.103730Z elapsed# 1.337333s EventsProcessed# 116927 clients.size# 1 Clock# 1970-01-01T00:20:46.965855Z elapsed# 1.357590s EventsProcessed# 118854 clients.size# 1 Clock# 1970-01-01T00:20:58.049099Z elapsed# 1.371232s EventsProcessed# 120160 clients.size# 1 Clock# 1970-01-01T00:21:15.771836Z elapsed# 1.398882s EventsProcessed# 122277 clients.size# 1 Clock# 1970-01-01T00:21:35.045062Z elapsed# 1.425891s EventsProcessed# 124524 clients.size# 1 Clock# 1970-01-01T00:21:47.494037Z elapsed# 1.438612s EventsProcessed# 126006 clients.size# 1 Clock# 1970-01-01T00:22:02.334423Z elapsed# 1.454527s EventsProcessed# 127701 clients.size# 1 Clock# 1970-01-01T00:22:18.946800Z elapsed# 1.472761s EventsProcessed# 129650 clients.size# 1 Clock# 1970-01-01T00:22:29.301981Z elapsed# 1.484780s EventsProcessed# 130900 clients.size# 1 Clock# 1970-01-01T00:22:43.125458Z elapsed# 1.500051s EventsProcessed# 132496 clients.size# 1 Clock# 1970-01-01T00:22:55.869624Z elapsed# 1.514230s EventsProcessed# 133972 clients.size# 1 Clock# 1970-01-01T00:23:14.476586Z elapsed# 1.534842s EventsProcessed# 136132 clients.size# 1 Clock# 1970-01-01T00:23:27.229978Z elapsed# 1.549758s EventsProcessed# 137686 clients.size# 1 Clock# 1970-01-01T00:23:40.993230Z elapsed# 1.564462s EventsProcessed# 139212 clients.size# 1 Clock# 1970-01-01T00:23:56.794444Z elapsed# 1.582742s EventsProcessed# 141075 clients.size# 1 Clock# 1970-01-01T00:24:16.334125Z elapsed# 1.607264s EventsProcessed# 143447 clients.size# 1 Clock# 1970-01-01T00:24:30.437866Z elapsed# 1.626293s EventsProcessed# 145121 clients.size# 1 Clock# 1970-01-01T00:24:48.738979Z elapsed# 1.649818s EventsProcessed# 147239 clients.size# 1 Clock# 1970-01-01T00:25:03.633322Z elapsed# 1.668888s EventsProcessed# 148941 clients.size# 1 Clock# 1970-01-01T00:25:15.090170Z elapsed# 1.683138s EventsProcessed# 150276 clients.size# 1 Clock# 1970-01-01T00:25:34.373916Z elapsed# 1.683305s EventsProcessed# 150278 clients.size# 0 Clock# 1970-01-01T00:25:52.365068Z elapsed# 1.683325s EventsProcessed# 150278 clients.size# 0 Clock# 1970-01-01T00:26:06.770778Z elapsed# 1.683343s EventsProcessed# 150278 clients.size# 0 Clock# 1970-01-01T00:26:22.755818Z elapsed# 1.683363s EventsProcessed# 150278 clients.size# 0 Clock# 1970-01-01T00:26:38.180655Z elapsed# 1.702288s EventsProcessed# 152034 clients.size# 1 Clock# 1970-01-01T00:26:48.720463Z elapsed# 1.702447s EventsProcessed# 152036 clients.size# 0 Clock# 1970-01-01T00:27:04.002911Z elapsed# 1.722927s EventsProcessed# 153876 clients.size# 1 Clock# 1970-01-01T00:27:14.968609Z elapsed# 1.736756s EventsProcessed# 155126 clients.size# 1 Clock# 1970-01-01T00:27:31.957546Z elapsed# 1.758449s EventsProcessed# 157117 clients.size# 1 Clock# 1970-01-01T00:27:49.541541Z elapsed# 1.782274s EventsProcessed# 159182 clients.size# 1 Clock# 1970-01-01T00:28:01.949351Z elapsed# 1.799520s EventsProcessed# 160676 clients.size# 1 Clock# 1970-01-01T00:28:17.293134Z elapsed# 1.820638s EventsProcessed# 162490 clients.size# 1 Clock# 1970-01-01T00:28:28.345099Z elapsed# 1.841087s EventsProcessed# 163782 clients.size# 1 Clock# 1970-01-01T00:28:43.329041Z elapsed# 1.860042s EventsProcessed# 165491 clients.size# 1 Clock# 1970-01-01T00:29:00.721933Z elapsed# 1.883060s EventsProcessed# 167553 clients.size# 1 Clock# 1970-01-01T00:29:16.034218Z elapsed# 1.903118s EventsProcessed# 169382 clients.size# 1 Clock# 1970-01-01T00:29:30.539446Z elapsed# 1.903276s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:29:45.932405Z elapsed# 1.903297s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:29:58.741809Z elapsed# 1.903318s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:30:16.871606Z elapsed# 1.903342s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:30:29.387341Z elapsed# 1.903361s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:30:46.714578Z elapsed# 1.903380s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:31:01.608099Z elapsed# 1.903398s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:31:21.113176Z elapsed# 1.903417s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:31:39.736500Z elapsed# 1.903437s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:31:57.672955Z elapsed# 1.903458s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:32:10.550389Z elapsed# 1.903477s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:32:27.832140Z elapsed# 1.903495s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:32:41.794361Z elapsed# 1.903514s EventsProcessed# 169384 clients.size# 0 Clock# 1970-01-01T00:32:59.835276Z elapsed# 1.903531s EventsProcessed# 169384 clie ... s EventsProcessed# 12619922 clients.size# 2 Clock# 1970-01-01T05:29:38.483690Z elapsed# 223.593427s EventsProcessed# 12624471 clients.size# 2 Clock# 1970-01-01T05:29:54.099357Z elapsed# 223.632835s EventsProcessed# 12628162 clients.size# 2 Clock# 1970-01-01T05:30:10.741465Z elapsed# 223.670773s EventsProcessed# 12631984 clients.size# 2 Clock# 1970-01-01T05:30:26.925113Z elapsed# 223.708190s EventsProcessed# 12635810 clients.size# 2 Clock# 1970-01-01T05:30:38.622802Z elapsed# 223.721652s EventsProcessed# 12637154 clients.size# 1 Clock# 1970-01-01T05:30:53.540274Z elapsed# 223.738961s EventsProcessed# 12638898 clients.size# 1 Clock# 1970-01-01T05:31:13.235374Z elapsed# 223.761451s EventsProcessed# 12641172 clients.size# 1 Clock# 1970-01-01T05:31:32.756882Z elapsed# 223.761624s EventsProcessed# 12641174 clients.size# 0 Clock# 1970-01-01T05:31:48.090047Z elapsed# 223.761649s EventsProcessed# 12641174 clients.size# 0 Clock# 1970-01-01T05:32:06.421562Z elapsed# 223.761671s EventsProcessed# 12641174 clients.size# 0 Clock# 1970-01-01T05:32:26.005573Z elapsed# 223.761691s EventsProcessed# 12641174 clients.size# 0 Clock# 1970-01-01T05:32:40.451708Z elapsed# 223.761710s EventsProcessed# 12641174 clients.size# 0 Clock# 1970-01-01T05:32:56.996758Z elapsed# 223.781617s EventsProcessed# 12643106 clients.size# 1 Clock# 1970-01-01T05:33:13.959898Z elapsed# 223.801703s EventsProcessed# 12645174 clients.size# 1 Clock# 1970-01-01T05:33:29.138501Z elapsed# 223.818584s EventsProcessed# 12646939 clients.size# 1 Clock# 1970-01-01T05:33:40.878083Z elapsed# 223.833274s EventsProcessed# 12648436 clients.size# 1 Clock# 1970-01-01T05:33:51.373073Z elapsed# 223.846108s EventsProcessed# 12649727 clients.size# 1 Clock# 1970-01-01T05:34:03.561061Z elapsed# 223.884450s EventsProcessed# 12652600 clients.size# 2 Clock# 1970-01-01T05:34:23.276241Z elapsed# 223.964683s EventsProcessed# 12657202 clients.size# 2 Clock# 1970-01-01T05:34:37.147685Z elapsed# 223.998186s EventsProcessed# 12660492 clients.size# 2 Clock# 1970-01-01T05:34:51.643722Z elapsed# 224.054410s EventsProcessed# 12665784 clients.size# 3 Clock# 1970-01-01T05:35:06.231131Z elapsed# 224.103676s EventsProcessed# 12670751 clients.size# 3 Clock# 1970-01-01T05:35:18.900783Z elapsed# 224.148234s EventsProcessed# 12675235 clients.size# 3 Clock# 1970-01-01T05:35:38.685029Z elapsed# 224.216593s EventsProcessed# 12682149 clients.size# 3 Clock# 1970-01-01T05:35:53.812555Z elapsed# 224.270525s EventsProcessed# 12687617 clients.size# 3 Clock# 1970-01-01T05:36:10.513587Z elapsed# 224.366694s EventsProcessed# 12693720 clients.size# 3 Clock# 1970-01-01T05:36:27.048891Z elapsed# 224.431846s EventsProcessed# 12699732 clients.size# 3 Clock# 1970-01-01T05:36:38.289185Z elapsed# 224.472034s EventsProcessed# 12703666 clients.size# 3 Clock# 1970-01-01T05:36:57.140135Z elapsed# 224.540571s EventsProcessed# 12710416 clients.size# 3 Clock# 1970-01-01T05:37:09.843526Z elapsed# 224.587094s EventsProcessed# 12715019 clients.size# 3 Clock# 1970-01-01T05:37:21.221515Z elapsed# 224.627780s EventsProcessed# 12719063 clients.size# 3 Clock# 1970-01-01T05:37:32.414188Z elapsed# 224.671630s EventsProcessed# 12723039 clients.size# 3 Clock# 1970-01-01T05:37:42.928187Z elapsed# 224.715575s EventsProcessed# 12726866 clients.size# 3 Clock# 1970-01-01T05:38:01.016982Z elapsed# 224.827744s EventsProcessed# 12733578 clients.size# 3 Clock# 1970-01-01T05:38:12.483408Z elapsed# 224.874251s EventsProcessed# 12737715 clients.size# 3 Clock# 1970-01-01T05:38:31.500477Z elapsed# 224.947379s EventsProcessed# 12744584 clients.size# 3 Clock# 1970-01-01T05:38:50.722955Z elapsed# 225.027096s EventsProcessed# 12751485 clients.size# 3 Clock# 1970-01-01T05:39:03.887360Z elapsed# 225.076911s EventsProcessed# 12756156 clients.size# 3 Clock# 1970-01-01T05:39:15.169015Z elapsed# 225.120222s EventsProcessed# 12760189 clients.size# 3 Clock# 1970-01-01T05:39:34.143576Z elapsed# 225.235478s EventsProcessed# 12767082 clients.size# 3 Clock# 1970-01-01T05:39:52.384335Z elapsed# 225.308578s EventsProcessed# 12773604 clients.size# 3 Clock# 1970-01-01T05:40:02.712025Z elapsed# 225.351426s EventsProcessed# 12777252 clients.size# 3 Clock# 1970-01-01T05:40:20.790344Z elapsed# 225.432435s EventsProcessed# 12783823 clients.size# 3 Clock# 1970-01-01T05:40:31.964459Z elapsed# 225.496227s EventsProcessed# 12789188 clients.size# 4 Clock# 1970-01-01T05:40:51.157843Z elapsed# 225.604402s EventsProcessed# 12798380 clients.size# 4 Clock# 1970-01-01T05:41:10.279020Z elapsed# 225.747447s EventsProcessed# 12807376 clients.size# 4 Clock# 1970-01-01T05:41:23.331191Z elapsed# 225.856767s EventsProcessed# 12813722 clients.size# 4 Clock# 1970-01-01T05:41:35.251900Z elapsed# 225.903663s EventsProcessed# 12817911 clients.size# 3 Clock# 1970-01-01T05:41:47.301474Z elapsed# 225.949650s EventsProcessed# 12822127 clients.size# 3 Clock# 1970-01-01T05:41:59.418423Z elapsed# 225.995372s EventsProcessed# 12826554 clients.size# 3 Clock# 1970-01-01T05:42:11.525839Z elapsed# 226.043939s EventsProcessed# 12830846 clients.size# 3 Clock# 1970-01-01T05:42:31.416947Z elapsed# 226.127158s EventsProcessed# 12837914 clients.size# 3 Clock# 1970-01-01T05:42:48.946056Z elapsed# 226.260731s EventsProcessed# 12844267 clients.size# 3 Clock# 1970-01-01T05:43:04.330537Z elapsed# 226.412658s EventsProcessed# 12851470 clients.size# 4 Clock# 1970-01-01T05:43:16.281702Z elapsed# 226.466316s EventsProcessed# 12855764 clients.size# 3 Clock# 1970-01-01T05:43:36.092467Z elapsed# 226.563230s EventsProcessed# 12863055 clients.size# 3 Clock# 1970-01-01T05:43:46.694060Z elapsed# 226.600721s EventsProcessed# 12866909 clients.size# 3 Clock# 1970-01-01T05:44:05.802057Z elapsed# 226.667313s EventsProcessed# 12873771 clients.size# 3 Clock# 1970-01-01T05:44:25.726345Z elapsed# 226.766673s EventsProcessed# 12880657 clients.size# 3 Clock# 1970-01-01T05:44:43.431450Z elapsed# 226.826817s EventsProcessed# 12886774 clients.size# 3 Clock# 1970-01-01T05:45:00.036457Z elapsed# 226.921499s EventsProcessed# 12892543 clients.size# 3 Clock# 1970-01-01T05:45:16.283208Z elapsed# 227.000194s EventsProcessed# 12898305 clients.size# 3 Clock# 1970-01-01T05:45:34.897327Z elapsed# 227.134846s EventsProcessed# 12907291 clients.size# 4 Clock# 1970-01-01T05:45:45.953359Z elapsed# 227.206123s EventsProcessed# 12912607 clients.size# 4 Clock# 1970-01-01T05:46:04.456086Z elapsed# 227.355720s EventsProcessed# 12921412 clients.size# 4 Clock# 1970-01-01T05:46:20.352952Z elapsed# 227.440684s EventsProcessed# 12928827 clients.size# 4 Clock# 1970-01-01T05:46:32.619753Z elapsed# 227.504994s EventsProcessed# 12934791 clients.size# 4 Clock# 1970-01-01T05:46:47.292289Z elapsed# 227.585938s EventsProcessed# 12941774 clients.size# 4 Clock# 1970-01-01T05:46:57.671555Z elapsed# 227.650873s EventsProcessed# 12946730 clients.size# 4 Clock# 1970-01-01T05:47:13.045402Z elapsed# 227.772944s EventsProcessed# 12954155 clients.size# 4 Clock# 1970-01-01T05:47:32.523076Z elapsed# 227.876375s EventsProcessed# 12963567 clients.size# 4 Clock# 1970-01-01T05:47:50.118751Z elapsed# 227.965369s EventsProcessed# 12971906 clients.size# 4 Clock# 1970-01-01T05:48:03.003910Z elapsed# 228.029393s EventsProcessed# 12978005 clients.size# 4 Clock# 1970-01-01T05:48:16.185283Z elapsed# 228.093480s EventsProcessed# 12984211 clients.size# 4 Clock# 1970-01-01T05:48:27.539501Z elapsed# 228.182846s EventsProcessed# 12989656 clients.size# 4 Clock# 1970-01-01T05:48:45.619973Z elapsed# 228.274498s EventsProcessed# 12998202 clients.size# 4 Clock# 1970-01-01T05:48:59.232257Z elapsed# 228.323930s EventsProcessed# 13002924 clients.size# 3 Clock# 1970-01-01T05:49:14.247111Z elapsed# 228.380975s EventsProcessed# 13008297 clients.size# 3 Clock# 1970-01-01T05:49:25.915783Z elapsed# 228.427709s EventsProcessed# 13012506 clients.size# 3 Clock# 1970-01-01T05:49:41.232980Z elapsed# 228.484566s EventsProcessed# 13017774 clients.size# 3 Clock# 1970-01-01T05:49:51.356550Z elapsed# 228.520957s EventsProcessed# 13021299 clients.size# 3 Clock# 1970-01-01T05:50:10.664244Z elapsed# 228.655685s EventsProcessed# 13030686 clients.size# 4 Clock# 1970-01-01T05:50:21.679767Z elapsed# 228.699303s EventsProcessed# 13034622 clients.size# 3 Clock# 1970-01-01T05:50:38.880106Z elapsed# 228.755986s EventsProcessed# 13040912 clients.size# 3 Clock# 1970-01-01T05:50:50.478856Z elapsed# 228.795963s EventsProcessed# 13045122 clients.size# 3 Clock# 1970-01-01T05:51:05.672598Z elapsed# 228.852838s EventsProcessed# 13050526 clients.size# 3 Clock# 1970-01-01T05:51:15.979218Z elapsed# 228.892076s EventsProcessed# 13054204 clients.size# 3 Clock# 1970-01-01T05:51:27.993397Z elapsed# 228.936915s EventsProcessed# 13058526 clients.size# 3 Clock# 1970-01-01T05:51:45.875485Z elapsed# 229.042792s EventsProcessed# 13065063 clients.size# 3 Clock# 1970-01-01T05:52:04.254038Z elapsed# 229.115349s EventsProcessed# 13071614 clients.size# 3 Clock# 1970-01-01T05:52:16.073599Z elapsed# 229.160868s EventsProcessed# 13075800 clients.size# 3 Clock# 1970-01-01T05:52:26.216854Z elapsed# 229.210075s EventsProcessed# 13080560 clients.size# 4 Clock# 1970-01-01T05:52:46.215078Z elapsed# 229.299569s EventsProcessed# 13089958 clients.size# 4 Clock# 1970-01-01T05:52:58.081284Z elapsed# 229.355081s EventsProcessed# 13095587 clients.size# 4 Clock# 1970-01-01T05:53:14.116670Z elapsed# 229.473192s EventsProcessed# 13103110 clients.size# 4 Clock# 1970-01-01T05:53:33.260416Z elapsed# 229.602867s EventsProcessed# 13112263 clients.size# 4 Clock# 1970-01-01T05:53:44.884787Z elapsed# 229.690816s EventsProcessed# 13117700 clients.size# 4 Clock# 1970-01-01T05:54:03.692486Z elapsed# 229.840082s EventsProcessed# 13126749 clients.size# 4 Clock# 1970-01-01T05:54:15.375508Z elapsed# 229.969926s EventsProcessed# 13132292 clients.size# 4 Clock# 1970-01-01T05:54:26.340725Z elapsed# 230.106997s EventsProcessed# 13138864 clients.size# 5 Clock# 1970-01-01T05:54:37.615483Z elapsed# 230.204660s EventsProcessed# 13145644 clients.size# 5 Clock# 1970-01-01T05:54:55.253242Z elapsed# 230.332533s EventsProcessed# 13156226 clients.size# 5 Clock# 1970-01-01T05:55:06.635744Z elapsed# 230.437547s EventsProcessed# 13163038 clients.size# 5 Clock# 1970-01-01T05:55:20.628553Z elapsed# 230.585819s EventsProcessed# 13171259 clients.size# 5 Clock# 1970-01-01T05:55:38.208010Z elapsed# 230.747711s EventsProcessed# 13181792 clients.size# 5 Clock# 1970-01-01T05:55:55.045088Z elapsed# 230.858201s EventsProcessed# 13191935 clients.size# 5 Clock# 1970-01-01T05:56:07.352678Z elapsed# 230.960359s EventsProcessed# 13199166 clients.size# 5 Clock# 1970-01-01T05:56:23.709095Z elapsed# 231.125053s EventsProcessed# 13208879 clients.size# 5 Clock# 1970-01-01T05:56:43.670961Z elapsed# 231.312388s EventsProcessed# 13220527 clients.size# 5 Clock# 1970-01-01T05:57:03.231373Z elapsed# 231.532617s EventsProcessed# 13234394 clients.size# 6 Clock# 1970-01-01T05:57:15.456427Z elapsed# 231.729901s EventsProcessed# 13243199 clients.size# 6 Clock# 1970-01-01T05:57:32.734580Z elapsed# 231.994993s EventsProcessed# 13255478 clients.size# 6 Clock# 1970-01-01T05:57:44.038764Z elapsed# 232.108537s EventsProcessed# 13262195 clients.size# 5 Clock# 1970-01-01T05:57:56.219055Z elapsed# 232.210730s EventsProcessed# 13269364 clients.size# 5 Clock# 1970-01-01T05:58:11.604534Z elapsed# 232.328293s EventsProcessed# 13278622 clients.size# 5 Clock# 1970-01-01T05:58:28.240215Z elapsed# 232.456046s EventsProcessed# 13286478 clients.size# 4 Clock# 1970-01-01T05:58:46.259665Z elapsed# 232.543962s EventsProcessed# 13295147 clients.size# 4 Clock# 1970-01-01T05:58:58.942466Z elapsed# 232.607996s EventsProcessed# 13301190 clients.size# 4 Clock# 1970-01-01T05:59:13.236811Z elapsed# 232.683220s EventsProcessed# 13307967 clients.size# 4 Clock# 1970-01-01T05:59:29.771576Z elapsed# 232.764520s EventsProcessed# 13315834 clients.size# 4 Clock# 1970-01-01T05:59:41.618580Z elapsed# 232.881656s EventsProcessed# 13321439 clients.size# 4 Clock# 1970-01-01T05:59:52.022675Z elapsed# 232.934061s EventsProcessed# 13326382 clients.size# 4 >> KqpNewEngine::PkSelect1 >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull-useSink >> KqpPg::CopyTableSerialColumns-useSink [GOOD] >> KqpPg::CreateIndex >> TSchemeShardViewTest::ReadOnlyMode >> THiveTest::TestFollowersCrossDC_Easy [GOOD] >> THiveTest::TestFollowers_LocalNodeOnly >> TSchemeShardViewTest::AsyncCreateDifferentViews >> THiveTest::TestUpdateTabletsObjectUpdatesMetrics [GOOD] >> THiveTest::TestRestartTablets >> KqpPg::TypeCoercionInsert+useSink [GOOD] >> KqpPg::TableSelect+useSink |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |88.5%| [TM] {RESULT} ydb/core/blobstorage/backpressure/ut_client/unittest |88.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut >> TSchemeShardViewTest::ReadOnlyMode [GOOD] >> TSchemeShardViewTest::CreateView >> TColumnShardTestSchema::ExportWithLostAnswer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:37.392111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:37.392242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:37.392285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:37.392321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:37.392365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:37.392417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:37.392487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:37.392562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:37.393329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:37.393709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:37.514572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:37.514638Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:37.531974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:37.535928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:37.536130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:37.544104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:37.544309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:37.544849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:37.545111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:37.547694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:37.547892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:37.549131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:37.549195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:37.549310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:37.549355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:37.549396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:37.549544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:37.560671Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:31:37.738816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:37.739139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:37.739843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:37.739908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:37.740274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:37.740446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:37.743971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:37.744224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:37.744484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:37.744550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:37.744595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:37.744632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:37.749764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:37.749856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:37.749921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:37.752630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:37.752692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:37.752753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:37.752806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:37.757067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:37.759933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:37.760175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:37.761486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:37.761651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:37.761741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:37.762056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:37.762132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:37.762372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:37.762482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:37.766532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:37.766612Z node 1 :FLAT_TX_SCHEMESHARD ... meshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:38.183437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 Leader for TabletID 72057594046678944 is [1:381:2348] sender: [1:439:2058] recipient: [1:15:2062] 2025-06-24T20:31:38.241910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "ThirdView" QueryText: "Some query" } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:38.242267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/ThirdView, opId: 103:0 2025-06-24T20:31:38.242390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/ThirdView, opId: 103:0, viewDescription: Name: "ThirdView" QueryText: "Some query" 2025-06-24T20:31:38.242578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: ThirdView, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T20:31:38.242671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-24T20:31:38.242731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 103:0 type: TxCreateView target path: [OwnerId: 72057594046678944, LocalPathId: 3] source path: 2025-06-24T20:31:38.242796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:38.249918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944 PathId: 3, at schemeshard: 72057594046678944 2025-06-24T20:31:38.250174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE VIEW, path: /MyRoot/ThirdView 2025-06-24T20:31:38.250419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:31:38.250482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:30: [72057594046678944] TCreateView::TPropose, opId: 103:0 ProgressState 2025-06-24T20:31:38.250542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-06-24T20:31:38.250661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:38.255294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-24T20:31:38.255593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000003 2025-06-24T20:31:38.256774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:38.257110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:38.257175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 103:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003 2025-06-24T20:31:38.259684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:0 128 -> 240 2025-06-24T20:31:38.259960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:31:38.260054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 FAKE_COORDINATOR: Erasing txId 103 2025-06-24T20:31:38.264161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:38.264228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:38.264467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:31:38.264571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:38.264615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:432:2388], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-24T20:31:38.264695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:432:2388], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-24T20:31:38.264769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:31:38.264814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T20:31:38.264926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:31:38.264962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:31:38.265006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:31:38.265039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:31:38.265075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-24T20:31:38.265118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:31:38.265160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:31:38.265219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:31:38.265368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:31:38.265416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-06-24T20:31:38.265458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T20:31:38.265489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T20:31:38.266456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:31:38.266596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:31:38.266665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:31:38.266701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T20:31:38.266753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T20:31:38.268527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:31:38.268654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:31:38.268705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:31:38.268747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T20:31:38.268799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:31:38.268907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-24T20:31:38.283948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:31:38.284256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 >> THiveTest::TestHiveRestart [GOOD] >> THiveTest::TestLimitedNodeList >> TSchemeShardViewTest::AsyncCreateDifferentViews [GOOD] >> Cdc::HugeKey[YdsRunner] [GOOD] >> Cdc::HugeKey[TopicRunner] >> TSchemeShardViewTest::CreateView [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ExportWithLostAnswer [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150797649.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150797649.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130797649.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130797649.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130796449.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130796449.000000s;Name=;Codec=}; 2025-06-24T20:30:53.254712Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T20:30:53.335867Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T20:30:53.336304Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T20:30:53.357218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:30:53.357496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:30:53.357820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:30:53.357974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:30:53.358091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:30:53.358220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:30:53.358333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:30:53.358454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:30:53.358576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T20:30:53.358731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T20:30:53.358857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T20:30:53.443135Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T20:30:53.443486Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T20:30:53.443584Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T20:30:53.443867Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:53.444136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T20:30:53.444240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T20:30:53.444300Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T20:30:53.444417Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T20:30:53.444486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T20:30:53.444539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T20:30:53.444575Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T20:30:53.444760Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:53.444849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T20:30:53.444895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T20:30:53.444930Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T20:30:53.445025Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T20:30:53.445086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T20:30:53.445139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T20:30:53.445176Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T20:30:53.445240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T20:30:53.445284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T20:30:53.445318Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T20:30:53.445562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T20:30:53.445607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T20:30:53.445641Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T20:30:53.445875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T20:30:53.445945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T20:30:53.445988Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T20:30:53.446142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T20:30:53.446211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T20:30:53.446247Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T20:30:53.446331Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T20:30:53.446412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T20:30:53.446457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T20:30:53.446492Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T20:30:53.447008Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=51; 2025-06-24T20:30:53.447121Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=54; 2025-06-24T20:30:53.454 ... anData [1:871:2829]->[1:870:2828] 2025-06-24T20:31:38.618573Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:871:2829];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T20:31:38.260313Z;index_granules=0;index_portions=1;index_batches=522;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=4873744;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=4873744;selected_rows=0; 2025-06-24T20:31:38.618603Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:871:2829];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T20:31:38.618834Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:871:2829];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T20:31:38.619575Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 7 at tablet 9437184 2025-06-24T20:31:38.619795Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750797089335:max} readable: {1750797089335:max} at tablet 9437184 2025-06-24T20:31:38.619923Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T20:31:38.620056Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750797089335:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T20:31:38.620119Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750797089335:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T20:31:38.620613Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750797089335:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T20:31:38.620694Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750797089335:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T20:31:38.621136Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750797089335:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:878:2836];trace_detailed=; 2025-06-24T20:31:38.621505Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T20:31:38.621687Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T20:31:38.621816Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:38.621923Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:38.622209Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T20:31:38.622327Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:38.622420Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:38.622451Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:878:2836] finished for tablet 9437184 2025-06-24T20:31:38.622864Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:877:2835];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ProduceResults"],"t":0},{"events":["f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750797098621076,"name":"_full_task","f":1750797098621076,"d_finished":0,"c":0,"l":1750797098622530,"d":1454},"events":[{"name":"bootstrap","f":1750797098621248,"d_finished":708,"c":1,"l":1750797098621956,"d":708},{"a":1750797098622183,"name":"ack","f":1750797098622183,"d_finished":0,"c":0,"l":1750797098622530,"d":347},{"a":1750797098622161,"name":"processing","f":1750797098622161,"d_finished":0,"c":0,"l":1750797098622530,"d":369},{"name":"ProduceResults","f":1750797098621758,"d_finished":390,"c":2,"l":1750797098622437,"d":390},{"a":1750797098622439,"name":"Finish","f":1750797098622439,"d_finished":0,"c":0,"l":1750797098622530,"d":91}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:38.622943Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:877:2835];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T20:31:38.623378Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:877:2835];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ProduceResults"],"t":0},{"events":["f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750797098621076,"name":"_full_task","f":1750797098621076,"d_finished":0,"c":0,"l":1750797098622992,"d":1916},"events":[{"name":"bootstrap","f":1750797098621248,"d_finished":708,"c":1,"l":1750797098621956,"d":708},{"a":1750797098622183,"name":"ack","f":1750797098622183,"d_finished":0,"c":0,"l":1750797098622992,"d":809},{"a":1750797098622161,"name":"processing","f":1750797098622161,"d_finished":0,"c":0,"l":1750797098622992,"d":831},{"name":"ProduceResults","f":1750797098621758,"d_finished":390,"c":2,"l":1750797098622437,"d":390},{"a":1750797098622439,"name":"Finish","f":1750797098622439,"d_finished":0,"c":0,"l":1750797098622992,"d":553}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:878:2836]->[1:877:2835] 2025-06-24T20:31:38.623465Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T20:31:38.620671Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T20:31:38.623504Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T20:31:38.623603Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateDifferentViews [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:38.133018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:38.133125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:38.133168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:38.133214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:38.133282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:38.133313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:38.133375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:38.133488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:38.134133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:38.134393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:38.223687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:38.223751Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:38.241953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:38.246926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:38.247261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:38.258147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:38.258414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:38.259230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:38.259604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:38.262948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:38.263177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:38.264465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:38.264536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:38.264686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:38.264778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:38.264838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:38.264981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:38.281650Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:31:38.477798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:38.478076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:38.478293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:38.478341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:38.478611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:38.478748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:38.482872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:38.483100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:38.483377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:38.483452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:38.483501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:38.483543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:38.489750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:38.489858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:38.489922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:38.498634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:38.498709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:38.498774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:38.498830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:38.503030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:38.506493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:38.506716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:38.507849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:38.508002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:38.508062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:38.508395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:38.508453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:38.508648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:38.508739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:38.511660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:38.511716Z node 1 :FLAT_TX_SCHEMESHARD ... X_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:31:39.037047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:31:39.037075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T20:31:39.037109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:31:39.037205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T20:31:39.040787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:31:39.042385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 2025-06-24T20:31:39.042690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T20:31:39.042739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-24T20:31:39.042825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:31:39.042869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-24T20:31:39.042926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:31:39.042949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:31:39.043564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T20:31:39.043690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:31:39.043737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:31:39.043770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:337:2326] 2025-06-24T20:31:39.044012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:31:39.044063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:31:39.044086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:337:2326] 2025-06-24T20:31:39.044230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:31:39.044251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:337:2326] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-24T20:31:39.044813Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:31:39.045009Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir" took 236us result status StatusSuccess 2025-06-24T20:31:39.045525Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir" PathDescription { Self { Name: "SomeDir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "FirstView" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SecondView" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 103 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:39.144122Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir/FirstView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:31:39.144437Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir/FirstView" took 356us result status StatusSuccess 2025-06-24T20:31:39.144839Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir/FirstView" PathDescription { Self { Name: "FirstView" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "FirstView" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 QueryText: "First query" CapturedContext { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:39.145490Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir/SecondView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:31:39.145674Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir/SecondView" took 191us result status StatusSuccess 2025-06-24T20:31:39.145983Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir/SecondView" PathDescription { Self { Name: "SecondView" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 103 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "SecondView" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 QueryText: "Second query" CapturedContext { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpPg::DropTableIfExists [GOOD] >> KqpPg::DropTableIfExists_GenericQuery >> THiveTest::TestRestartTablets [GOOD] >> THiveTest::TestServerlessComputeResourcesMode >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterDroppingIndex [GOOD] >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterCancelIndexBuild ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::CreateView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:39.477763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:39.477869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:39.477916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:39.477952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:39.478013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:39.478049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:39.478108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:39.478183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:39.478980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:39.479362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:39.563977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:39.564038Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:39.580952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:39.581439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:39.581631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:39.590270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:39.590481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:39.591120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:39.591503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:39.594828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:39.595046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:39.596322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:39.596390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:39.596633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:39.596682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:39.596740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:39.596826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:39.604334Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:39.740604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:39.740863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:39.741046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:39.741088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:39.741310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:39.741411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:39.743916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:39.744153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:39.744384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:39.744448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:39.744490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:39.744525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:39.747066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:39.747135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:39.747213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:39.749620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:39.749679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:39.749742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:39.749793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:39.753496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:39.755715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:39.755927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:39.756971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:39.757129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:39.757174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:39.757504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:39.757560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:39.757772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:39.757868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:39.760409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:39.760459Z node 1 :FLAT_TX_SCHEMESHARD ... 39.829305Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T20:31:39.830844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-24T20:31:39.830963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-24T20:31:39.831293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:39.831407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:39.831464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002 2025-06-24T20:31:39.831593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 128 -> 240 2025-06-24T20:31:39.831721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:39.831776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T20:31:39.833609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:39.833642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:39.833809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:31:39.883853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:39.883993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T20:31:39.884145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T20:31:39.884621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:31:39.884677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T20:31:39.884789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:31:39.884825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:31:39.884865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:31:39.884912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:31:39.884950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T20:31:39.885002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:31:39.885047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T20:31:39.885078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T20:31:39.885190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:31:39.885236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T20:31:39.885270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-24T20:31:39.885298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-24T20:31:39.886689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:39.886809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:39.886868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:31:39.886972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-24T20:31:39.887020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:31:39.888240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:39.888346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:39.888383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:31:39.888409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T20:31:39.888439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:31:39.888503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T20:31:39.896950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:31:39.902640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T20:31:39.902982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T20:31:39.903030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T20:31:39.903540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T20:31:39.903689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:31:39.903755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:301:2290] TestWaitNotification: OK eventTxId 101 2025-06-24T20:31:39.904704Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:31:39.904989Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 309us result status StatusSuccess 2025-06-24T20:31:39.905424Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyView" PathDescription { Self { Name: "MyView" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "MyView" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> Cdc::Write[PqRunner] [GOOD] >> Cdc::Write[YdsRunner] |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap >> THiveTest::TestFollowers_LocalNodeOnly [GOOD] >> THiveTest::TestFollowersCrossDC_Tight >> TSchemeShardViewTest::AsyncCreateSameView >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] >> TGRpcCmsTest::DescribeOptionsTest >> KqpQueryService::AlterTempTable [GOOD] >> KqpQueryService::CTASWithoutPerStatement >> TAsyncIndexTests::SplitIndexWithReboots[PipeResets] [GOOD] >> KqpPg::CreateIndex [GOOD] >> KqpPg::CreateNotNullPgColumn >> KqpBatchDelete::Returning [GOOD] >> TSchemeShardViewTest::EmptyName >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase [GOOD] >> TGRpcCmsTest::SimpleTenantsTestSyncOperation ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] Test command err: === Server->StartServer(false); 2025-06-24T20:31:25.277425Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616222134268546:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:25.277575Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:31:25.558989Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002460/r3tmp/tmpkxIxpe/pdisk_1.dat 2025-06-24T20:31:25.569307Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616222750187666:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:25.569778Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:31:25.693307Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:31:25.928592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:25.928696Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:25.929805Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:25.929873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:25.938048Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:31:25.938191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:31:25.938714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:31:25.952861Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17236, node 1 2025-06-24T20:31:26.270832Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:26.344563Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002460/r3tmp/yandexJIi2TZ.tmp 2025-06-24T20:31:26.344612Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002460/r3tmp/yandexJIi2TZ.tmp 2025-06-24T20:31:26.344806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002460/r3tmp/yandexJIi2TZ.tmp 2025-06-24T20:31:26.363733Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:26.363354Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:31:26.582508Z INFO: TTestServer started on Port 18305 GrpcPort 17236 TClient is connected to server localhost:18305 PQClient connected to localhost:17236 === TenantModeEnabled() = 1 === Init PQ - start server on port 17236 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:27.489887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T20:31:27.490124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:27.490396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:31:27.490426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:31:27.490734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:27.490821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:27.499543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:31:27.499895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:31:27.500156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:27.500228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:31:27.500247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-24T20:31:27.500262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 2 -> 3 waiting... 2025-06-24T20:31:27.504748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:31:27.504778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-24T20:31:27.504820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:31:27.512065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:27.512160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:31:27.512186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 3 -> 128 2025-06-24T20:31:27.520165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:27.520206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:27.520226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:31:27.520279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-24T20:31:27.525783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:27.535997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-24T20:31:27.536189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:31:27.540837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750797087585, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:31:27.541088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750797087585 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T20:31:27.541125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:31:27.541461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 128 -> 240 2025-06-24T20:31:27.541502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:31:27.541695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T20:31:27.543273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, Loc ... propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:41.420521Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:31:41.420586Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715665:0 progress is 1/1 2025-06-24T20:31:41.420594Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-24T20:31:41.420612Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715665:0 progress is 1/1 2025-06-24T20:31:41.420621Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-24T20:31:41.420656Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-06-24T20:31:41.420696Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715665, ready parts: 1/1, is published: false 2025-06-24T20:31:41.420711Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-06-24T20:31:41.420721Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-24T20:31:41.420732Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715665:0 2025-06-24T20:31:41.420742Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976715665, publications: 1, subscribers: 0 2025-06-24T20:31:41.420752Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976715665, [OwnerId: 72057594046644480, LocalPathId: 10], 3 2025-06-24T20:31:41.426009Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715665, response: Status: StatusSuccess TxId: 281474976715665 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T20:31:41.426251Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715665, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, add access: +W:test_user@builtin, remove access: -():test_user@builtin:- 2025-06-24T20:31:41.426423Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T20:31:41.426437Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715665, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-24T20:31:41.426626Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T20:31:41.426649Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7519616272460428875:2384], at schemeshard: 72057594046644480, txId: 281474976715665, path id: 10 2025-06-24T20:31:41.427137Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715665 2025-06-24T20:31:41.427226Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715665 2025-06-24T20:31:41.427240Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715665 2025-06-24T20:31:41.427255Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715665, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 3 2025-06-24T20:31:41.427269Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-06-24T20:31:41.427349Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 0 2025-06-24T20:31:41.429540Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715665 2025-06-24T20:31:41.430318Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-24T20:31:41.430350Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-24T20:31:41.430628Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "/Root/acc/topic1" message_group_id: "test-message-group" } 2025-06-24T20:31:41.430707Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "/Root/acc/topic1" message_group_id: "test-message-group" from ipv6:[::1]:55528 2025-06-24T20:31:41.430724Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:55528 proto=v1 topic=/Root/acc/topic1 durationSec=0 2025-06-24T20:31:41.430731Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T20:31:41.431607Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-24T20:31:41.431761Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T20:31:41.431770Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T20:31:41.431777Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T20:31:41.431806Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519616293935266334:2341] (SourceId=test-message-group, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-24T20:31:41.431822Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T20:31:41.432698Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 4, Generation: 1 2025-06-24T20:31:41.433832Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: test-message-group|3054699b-cd0a33a4-ea7f4e92-ff969167_0 2025-06-24T20:31:41.433052Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-message-group|3054699b-cd0a33a4-ea7f4e92-ff969167_0 generated for partition 0 topic 'acc/topic1' owner test-message-group 2025-06-24T20:31:41.439677Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-message-group|3054699b-cd0a33a4-ea7f4e92-ff969167_0 grpc read done: success: 1 data: update_token_request [content omitted] 2025-06-24T20:31:41.440079Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1346: updating token 2025-06-24T20:31:41.440155Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T20:31:41.441172Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: test-message-group|3054699b-cd0a33a4-ea7f4e92-ff969167_0 describe result for acl check 2025-06-24T20:31:41.441280Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: access to topic 'Topic /Root/acc/topic1 in database: /Root' denied for 'test_user_2@builtin' due to 'no WriteTopic rights', Marker# PQ1125 sessionId: test-message-group|3054699b-cd0a33a4-ea7f4e92-ff969167_0 2025-06-24T20:31:41.441587Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-message-group|3054699b-cd0a33a4-ea7f4e92-ff969167_0 is DEAD 2025-06-24T20:31:41.441854Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:31:41.783740Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519616293935266350:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:31:41.785433Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=MmY0YjY5YTUtOWFlMWM2MTEtNWVmNWE2MjMtMTU1NjY0MTY=, ActorId: [3:7519616293935266343:2346], ActorState: ExecuteState, TraceId: 01jyht7and3bh50ajrcfzhkdn9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:31:41.785833Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> TSchemeShardViewTest::AsyncCreateSameView [GOOD] >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitIndexWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T20:30:39.787717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:39.787848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:39.787898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:39.787952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:39.788007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:39.788087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:39.788159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:39.788262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:39.789172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:39.789711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:39.953263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:30:39.953365Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:39.954241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T20:30:39.984587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:39.985094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:39.985304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:40.024136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:40.024490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:40.025285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:40.031537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:40.035513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:40.035763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:40.037068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:40.037149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:40.037388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:40.037451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:40.037515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:40.037695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T20:30:40.055960Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:40.322441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:40.322725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.322990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:40.323054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:40.327540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:40.327778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:40.336172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:40.336402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:40.336659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.336733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:40.336780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:40.336849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:40.344302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.344401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:40.344454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:40.351402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.351487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.351540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:40.351668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:40.375630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:40.384183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:40.384486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:40.385759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:40.386006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... ntToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:42.815976Z node 22 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:31:42.816324Z node 22 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 388us result status StatusSuccess 2025-06-24T20:31:42.817303Z node 22 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\004\000\000\0002\000\000\000\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateSameView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:43.187974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:43.188081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:43.188121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:43.188161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:43.188230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:43.188286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:43.188352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:43.188460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:43.189253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:43.189629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:43.271993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:43.272052Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:43.282825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:43.286435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:43.286596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:43.295713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:43.295909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:43.296564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:43.296933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:43.299870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:43.300025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:43.300943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:43.300995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:43.301079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:43.301114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:43.301154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:43.301255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:43.306723Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:31:43.492090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:43.492465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:43.492719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:43.492763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:43.493013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:43.493158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:43.495875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:43.496122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:43.496390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:43.496458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:43.496498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:43.496535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:43.498888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:43.498965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:43.499048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:43.501926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:43.501991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:43.502082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:43.502138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:43.506467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:43.508923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:43.509176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:43.510298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:43.510441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:43.510492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:43.510791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:43.510846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:43.511049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:43.511123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:43.514051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:43.514100Z node 1 :FLAT_TX_SCHEMESHARD ... TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T20:31:43.620356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:43.620416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:43.620608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:31:43.620739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:43.620786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T20:31:43.620863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T20:31:43.621421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:31:43.621485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T20:31:43.621641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:31:43.621680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:31:43.621726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:31:43.621760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:31:43.621807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T20:31:43.621860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:31:43.621916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T20:31:43.621951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T20:31:43.622030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:31:43.622073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T20:31:43.622116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-24T20:31:43.622146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-24T20:31:43.622856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:43.622978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:43.623039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:31:43.623085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-24T20:31:43.623136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:31:43.625230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:43.625328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:43.625403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:31:43.625448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T20:31:43.625491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:31:43.625583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T20:31:43.631994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:31:43.633397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 2025-06-24T20:31:43.633826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T20:31:43.633897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-24T20:31:43.633994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:31:43.634022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-24T20:31:43.634098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:31:43.634120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:31:43.634694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T20:31:43.634902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:31:43.634954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:31:43.634995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:308:2297] 2025-06-24T20:31:43.635258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:31:43.635302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:308:2297] 2025-06-24T20:31:43.635495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:31:43.635594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:31:43.635622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:308:2297] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-24T20:31:43.636169Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:31:43.636433Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 248us result status StatusSuccess 2025-06-24T20:31:43.636881Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyView" PathDescription { Self { Name: "MyView" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "MyView" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::Returning [GOOD] Test command err: Trying to start YDB, gRPC: 19755, MsgBus: 5858 2025-06-24T20:31:32.626855Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616253385528113:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:32.626913Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ce4/r3tmp/tmpM6ADKO/pdisk_1.dat 2025-06-24T20:31:33.150717Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:33.150842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:33.156225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:31:33.168116Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:33.183576Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616253385528079:2079] 1750797092621523 != 1750797092621526 TServer::EnableGrpc on GrpcPort 19755, node 1 2025-06-24T20:31:33.479738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:33.479768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:33.479775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:33.479905Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:31:33.654724Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5858 TClient is connected to server localhost:5858 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:34.460319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:34.642569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:31:35.258410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:31:35.492875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:31:35.607728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:31:37.366520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616274860366213:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:37.366702Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:37.627542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616253385528113:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:37.688227Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:38.563819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:38.639307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:38.702585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:38.785369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:38.830547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:38.888717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:38.937827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:39.131738Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616283450301479:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:39.131873Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:39.132217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616283450301484:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:39.140732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:31:39.180521Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616283450301486:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:31:39.342123Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616283450301539:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:42.662853Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519616296335203744:2492], status: GENERIC_ERROR, issues:
:2:22: Error: BATCH DELETE is unsupported with RETURNING 2025-06-24T20:31:42.664714Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODI1OTI2ZDctN2YwYjg5YTAtMTczMWNjMWItOWJkNGRlNDA=, ActorId: [1:7519616296335203734:2486], ActorState: ExecuteState, TraceId: 01jyht7bhf9fs4amvx8tc7pr27, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> TSchemeShardViewTest::EmptyName [GOOD] >> THiveTest::TestServerlessComputeResourcesMode [GOOD] >> THiveTest::TestSkipBadNode ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase [GOOD] Test command err: === Server->StartServer(false); 2025-06-24T20:31:24.885720Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616219066448797:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:24.885767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:31:24.968692Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616220706744797:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:24.972182Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00247b/r3tmp/tmpYikfNc/pdisk_1.dat 2025-06-24T20:31:25.271885Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:31:25.276561Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:31:25.863531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:25.889267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:25.903004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:25.903097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:25.910098Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:25.913807Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:25.939392Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:31:25.942185Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:31:25.942295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:31:25.965294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:31:25.990548Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:31:26.003431Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 25029, node 1 2025-06-24T20:31:26.343840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00247b/r3tmp/yandexdroHEf.tmp 2025-06-24T20:31:26.343905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00247b/r3tmp/yandexdroHEf.tmp 2025-06-24T20:31:26.344203Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00247b/r3tmp/yandexdroHEf.tmp 2025-06-24T20:31:26.355436Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:31:26.581373Z INFO: TTestServer started on Port 21441 GrpcPort 25029 TClient is connected to server localhost:21441 PQClient connected to localhost:25029 === TenantModeEnabled() = 1 === Init PQ - start server on port 25029 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:27.344974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T20:31:27.345318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:27.359555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:31:27.359612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:31:27.359933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:27.360106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:27.372687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:31:27.373018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:31:27.373254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:27.373374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:31:27.373392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-24T20:31:27.373411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 2 -> 3 waiting... 2025-06-24T20:31:27.380042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:27.380170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:31:27.380187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 3 -> 128 2025-06-24T20:31:27.383392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:31:27.383425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-24T20:31:27.383459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:31:27.384189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:27.384209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:27.384252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:31:27.384319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-24T20:31:27.420446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:27.432245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-24T20:31:27.432439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:31:27.457513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750797087487, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:31:27.457717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750797087487 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T20:31:27.457751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:31:27.458102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 128 -> 240 2025-06-24T20:31:27.458140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose Handle ... 2057594046644480, txId: 281474976710664 2025-06-24T20:31:42.034471Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710664, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], version: 2 2025-06-24T20:31:42.034479Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 4 2025-06-24T20:31:42.034514Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710664, subscribers: 1 2025-06-24T20:31:42.034526Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [3:7519616294304050370:2329] 2025-06-24T20:31:42.042468Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710664 2025-06-24T20:31:42.042516Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710664 Create topic result: 1 === EnablePQLogs === CreateChannel === NewStub === InitializeWritePQService === InitializeWritePQService start iteration === InitializeWritePQService create streamingWriter === InitializeWritePQService Write 2025-06-24T20:31:42.150754Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-24T20:31:42.150791Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-24T20:31:42.151079Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "Root/acc/topic1" message_group_id: "12345678" } 2025-06-24T20:31:42.151204Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "Root/acc/topic1" message_group_id: "12345678" from ipv6:[::1]:48472 2025-06-24T20:31:42.151227Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:48472 proto=v1 topic=Root/acc/topic1 durationSec=0 2025-06-24T20:31:42.151236Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T20:31:42.157182Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-24T20:31:42.157329Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T20:31:42.157350Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T20:31:42.157358Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T20:31:42.157390Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519616298599017837:2332] (SourceId=12345678, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-24T20:31:42.157410Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T20:31:42.161734Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 4, Generation: 1 2025-06-24T20:31:42.162121Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 12345678|d9953094-29ce3181-2502c6e6-25918ae2_0 generated for partition 0 topic 'acc/topic1' owner 12345678 2025-06-24T20:31:42.163110Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: 12345678|d9953094-29ce3181-2502c6e6-25918ae2_0 2025-06-24T20:31:42.164071Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: 12345678|d9953094-29ce3181-2502c6e6-25918ae2_0 grpc read done: success: 0 data: 2025-06-24T20:31:42.164091Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: 12345678|d9953094-29ce3181-2502c6e6-25918ae2_0 grpc read failed 2025-06-24T20:31:42.164259Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 1 sessionId: 12345678|d9953094-29ce3181-2502c6e6-25918ae2_0 2025-06-24T20:31:42.164283Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: 12345678|d9953094-29ce3181-2502c6e6-25918ae2_0 is DEAD Finish: 0 === InitializeWritePQService done === PersQueueClient 2025-06-24T20:31:42.164542Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison === InitializePQ completed 2025-06-24T20:31:42.177982Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-24T20:31:42.178007Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-24T20:31:42.178295Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "topic1" message_group_id: "12345678" } 2025-06-24T20:31:42.178388Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "topic1" message_group_id: "12345678" from ipv6:[::1]:48472 2025-06-24T20:31:42.178408Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:48472 proto=v1 topic=topic1 durationSec=0 2025-06-24T20:31:42.178420Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T20:31:42.181035Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-24T20:31:42.181184Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T20:31:42.181196Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T20:31:42.181204Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T20:31:42.181233Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519616298599017856:2341] (SourceId=12345678, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-24T20:31:42.181268Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T20:31:42.183786Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 4, Generation: 1 2025-06-24T20:31:42.184069Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 12345678|9c681aff-470b7747-e7bd8ce7-59eef2ed_0 generated for partition 0 topic 'acc/topic1' owner 12345678 2025-06-24T20:31:42.191505Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: 12345678|9c681aff-470b7747-e7bd8ce7-59eef2ed_0 2025-06-24T20:31:42.199309Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: 12345678|9c681aff-470b7747-e7bd8ce7-59eef2ed_0 grpc read done: success: 0 data: 2025-06-24T20:31:42.199333Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: 12345678|9c681aff-470b7747-e7bd8ce7-59eef2ed_0 grpc read failed 2025-06-24T20:31:42.199363Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: 12345678|9c681aff-470b7747-e7bd8ce7-59eef2ed_0 grpc closed 2025-06-24T20:31:42.199378Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: 12345678|9c681aff-470b7747-e7bd8ce7-59eef2ed_0 is DEAD 2025-06-24T20:31:42.205045Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:31:42.727368Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519616298599017874:2351], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:31:42.729926Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2FhMzZjMDctYTUzMWY0MTMtZjJmZTI2ZjgtYmFlZGQ4YTk=, ActorId: [3:7519616298599017867:2347], ActorState: ExecuteState, TraceId: 01jyht7bk22r31ytqk13bsqf2j, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:31:42.730460Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> THiveTest::TestLimitedNodeList [GOOD] >> THiveTest::TestHiveFollowersWithChangingDC ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::EmptyName [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:44.293580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:44.293874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:44.293919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:44.293958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:44.294018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:44.294061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:44.294115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:44.294192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:44.295002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:44.295467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:44.404111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:44.404215Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:44.435405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:44.435906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:44.436143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:44.445445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:44.445664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:44.446378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:44.446746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:44.450375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:44.450691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:44.452056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:44.452156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:44.452557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:44.452618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:44.452693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:44.452803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:44.460704Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:44.706875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:44.707268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:44.707519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:44.707566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:44.707781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:44.707926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:44.710659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:44.710938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:44.711177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:44.711242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:44.711287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:44.711321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:44.718684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:44.718767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:44.718830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:44.723612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:44.723674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:44.723740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:44.723793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:44.730855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:44.740076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:44.740369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:44.748580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:44.748757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:44.748839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:44.749173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:44.749247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:44.749466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:44.749564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:44.756545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:44.756614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:44.756903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:44.756955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T20:31:44.757339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:44.757404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T20:31:44.757524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:31:44.757565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:31:44.757608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:31:44.757660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:31:44.757717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T20:31:44.757762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:31:44.757799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T20:31:44.757863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T20:31:44.757956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:31:44.757996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T20:31:44.758032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T20:31:44.763294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T20:31:44.763463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T20:31:44.763508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T20:31:44.763551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T20:31:44.763619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:44.763778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T20:31:44.783911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T20:31:44.784667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T20:31:44.785322Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T20:31:44.813782Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T20:31:44.816760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "" QueryText: "Some query" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:44.816944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/, opId: 101:0 2025-06-24T20:31:44.817002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/, opId: 101:0, viewDescription: Name: "" QueryText: "Some query" 2025-06-24T20:31:44.817124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/', error: path part shouldn't be empty, at schemeshard: 72057594046678944 2025-06-24T20:31:44.817985Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:31:44.829078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/\', error: path part shouldn\'t be empty" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:44.829367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/', error: path part shouldn't be empty, operation: CREATE VIEW, path: /MyRoot/ 2025-06-24T20:31:44.829939Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError [GOOD] Test command err: === Server->StartServer(false); 2025-06-24T20:31:25.618099Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616224638286285:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:25.618284Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:31:25.757928Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616223301953658:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:26.053720Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:31:26.053820Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:31:26.060378Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0024a2/r3tmp/tmpLYIBw6/pdisk_1.dat 2025-06-24T20:31:26.622707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:26.622825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:26.626480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:26.626569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:26.646000Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:26.654175Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:26.725867Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:31:26.726022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:31:26.729285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:31:26.759308Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 9307, node 1 2025-06-24T20:31:27.106792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0024a2/r3tmp/yandexEGWwwj.tmp 2025-06-24T20:31:27.106821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0024a2/r3tmp/yandexEGWwwj.tmp 2025-06-24T20:31:27.106990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0024a2/r3tmp/yandexEGWwwj.tmp 2025-06-24T20:31:27.107156Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:31:27.281332Z INFO: TTestServer started on Port 21212 GrpcPort 9307 TClient is connected to server localhost:21212 PQClient connected to localhost:9307 === TenantModeEnabled() = 1 === Init PQ - start server on port 9307 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:28.326341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T20:31:28.326579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:28.326805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:31:28.326826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:31:28.327050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:28.327102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:28.340406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:31:28.340669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root waiting... 2025-06-24T20:31:28.340864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:28.340923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:31:28.340937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-24T20:31:28.340955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 2 -> 3 2025-06-24T20:31:28.343947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:28.344005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:31:28.344024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 3 -> 128 2025-06-24T20:31:28.349163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:31:28.349192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-24T20:31:28.349211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:31:28.357752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:28.357785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:31:28.357823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:31:28.357878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-24T20:31:28.373991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:28.376392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-24T20:31:28.376536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:31:28.380215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750797088425, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:31:28.380376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750797088425 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T20:31:28.380404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:31:28.380700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 128 -> 240 2025-06-24T20:31:28.380729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:31:28.380890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T20:31:28.380952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPa ... NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976720665:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:42.460048Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976720665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:31:42.460125Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976720665:0 progress is 1/1 2025-06-24T20:31:42.460139Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976720665 ready parts: 1/1 2025-06-24T20:31:42.460162Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976720665:0 progress is 1/1 2025-06-24T20:31:42.460177Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976720665 ready parts: 1/1 2025-06-24T20:31:42.460220Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-06-24T20:31:42.460286Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976720665, ready parts: 1/1, is published: false 2025-06-24T20:31:42.460307Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-06-24T20:31:42.460320Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976720665 ready parts: 1/1 2025-06-24T20:31:42.460337Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976720665:0 2025-06-24T20:31:42.460365Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976720665, publications: 1, subscribers: 0 2025-06-24T20:31:42.460401Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976720665, [OwnerId: 72057594046644480, LocalPathId: 10], 3 2025-06-24T20:31:42.468158Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976720665, response: Status: StatusSuccess TxId: 281474976720665 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T20:31:42.468636Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976720665, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, add access: +W:test_user_0@builtin, add access: +W:test_user_1@builtin, add access: +W:test_user_2@builtin, remove access: -():test_user_0@builtin:-, remove access: -():test_user_1@builtin:-, remove access: -():test_user_2@builtin:- 2025-06-24T20:31:42.468830Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T20:31:42.468848Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976720665, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-24T20:31:42.469081Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T20:31:42.469093Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7519616271129435613:2401], at schemeshard: 72057594046644480, txId: 281474976720665, path id: 10 2025-06-24T20:31:42.470452Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976720665 2025-06-24T20:31:42.470556Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976720665 2025-06-24T20:31:42.470570Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976720665 2025-06-24T20:31:42.470586Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976720665, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 3 2025-06-24T20:31:42.470602Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-06-24T20:31:42.470699Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976720665, subscribers: 0 2025-06-24T20:31:42.478740Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-24T20:31:42.478770Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-24T20:31:42.482788Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "/Root/acc/topic1" message_group_id: "test-group-id" } 2025-06-24T20:31:42.482919Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "/Root/acc/topic1" message_group_id: "test-group-id" from ipv6:[::1]:58328 2025-06-24T20:31:42.482937Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:58328 proto=v1 topic=/Root/acc/topic1 durationSec=0 2025-06-24T20:31:42.482945Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T20:31:42.484097Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-24T20:31:42.484324Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T20:31:42.484336Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T20:31:42.484352Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T20:31:42.484387Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519616296899240479:2356] (SourceId=test-group-id, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-24T20:31:42.484407Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T20:31:42.487352Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 3, Generation: 1 2025-06-24T20:31:42.487735Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-group-id|98b0ef24-2028d30-122d400d-71c89d5e_0 generated for partition 0 topic 'acc/topic1' owner test-group-id 2025-06-24T20:31:42.488249Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: test-group-id|98b0ef24-2028d30-122d400d-71c89d5e_0 2025-06-24T20:31:42.494828Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-group-id|98b0ef24-2028d30-122d400d-71c89d5e_0 grpc read done: success: 1 data: update_token_request [content omitted] 2025-06-24T20:31:42.498811Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-group-id|98b0ef24-2028d30-122d400d-71c89d5e_0 grpc read done: success: 1 data: update_token_request [content omitted] 2025-06-24T20:31:42.498919Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: got another 'update_token_request' while previous still in progress, only single token update is allowed at a time sessionId: test-group-id|98b0ef24-2028d30-122d400d-71c89d5e_0 2025-06-24T20:31:42.499265Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-group-id|98b0ef24-2028d30-122d400d-71c89d5e_0 is DEAD 2025-06-24T20:31:42.499610Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:31:42.500333Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976720665 2025-06-24T20:31:42.915318Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519616296899240499:2363], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:31:42.917702Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=MTFmNWYzYmEtZWNmYWJhYjktZWRlNzJiZWMtZWI2ZGU4MTQ=, ActorId: [3:7519616296899240492:2359], ActorState: ExecuteState, TraceId: 01jyht7bs250necrr1gens83pq, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:31:42.918180Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> KqpNewEngine::PkSelect1 [GOOD] >> KqpNewEngine::PkSelect2 >> TGRpcCmsTest::RemoveWithAnotherTokenTest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> THiveTest::TestSkipBadNode [GOOD] >> THiveTest::TestStopTenant >> THiveTest::TestFollowersCrossDC_Tight [GOOD] >> THiveTest::TestFollowersCrossDC_MovingLeader >> KqpQueryService::TableSink_HtapComplex+withOltpSink [GOOD] >> KqpQueryService::TableSink_HtapComplex-withOltpSink |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TEvLocalSyncDataTests::SqueezeBlocks1 [GOOD] >> TEvLocalSyncDataTests::SqueezeBlocks2 [GOOD] >> Cdc::NaN[PqRunner] [GOOD] >> Cdc::NaN[YdsRunner] >> TEvLocalSyncDataTests::SqueezeBlocks3 [GOOD] >> TQuorumTrackerTests::Erasure4Plus2BlockIncludingMyFailDomain_8_2 [GOOD] >> KqpPg::DropTableIfExists_GenericQuery [GOOD] >> KqpPg::EquiJoin+useSink >> Cdc::Write[YdsRunner] [GOOD] >> Cdc::Write[TopicRunner] |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TEvLocalSyncDataTests::SqueezeBlocks2 [GOOD] >> YdbTableSplit::SplitByLoadWithReadsMultipleSplitsWithData [GOOD] >> TGRpcCmsTest::DescribeOptionsTest [GOOD] >> TSyncBrokerTests::ShouldEnqueue >> KqpQueryService::CTASWithoutPerStatement [GOOD] >> KqpQueryService::CheckIsolationLevelFroPerStatementMode |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::Erasure4Plus2BlockIncludingMyFailDomain_8_2 [GOOD] >> TSyncBrokerTests::ShouldEnqueue [GOOD] >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId [GOOD] >> TSyncNeighborsTests::SerDes3 [GOOD] >> TOlap::CreateTableWithNullableKeysNotAllowed >> THiveTest::TestStopTenant [GOOD] >> THiveTest::TestTabletAvailability ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId [GOOD] Test command err: 2025-06-24T20:31:48.825307Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-24T20:31:48.825448Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [1:6:2053], enqueued, active: 1, waiting: 1 2025-06-24T20:31:48.934319Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-06-24T20:31:48.934462Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], enqueued, active: 1, waiting: 1 2025-06-24T20:31:48.934556Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:79: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:7:2054], enqueued, active: 1, waiting: 1 >> Cdc::HugeKey[TopicRunner] [GOOD] >> Cdc::HugeKeyDebezium |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes3 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::DescribeOptionsTest [GOOD] Test command err: 2025-06-24T20:31:43.250162Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616299834766562:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:43.267847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ea4/r3tmp/tmpQkonod/pdisk_1.dat 2025-06-24T20:31:44.048712Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:44.084372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:44.084485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:44.094728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3870, node 1 2025-06-24T20:31:44.125788Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:31:44.344142Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:44.389623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:44.389655Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:44.389663Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:44.389805Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63682 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:44.915049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:63682 2025-06-24T20:31:45.405328Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:131: TTxProcessor(tenants) is now locking 2025-06-24T20:31:45.405353Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:143: TTxProcessor(tenants) is now locked by parent 2025-06-24T20:31:45.408330Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:102: TTxProcessor(tenants) is now active 2025-06-24T20:31:45.457958Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285140, Sender [1:7519616308424701886:2275], Recipient [1:7519616299834766975:2204]: NKikimr::NConsole::TEvConsole::TEvDescribeTenantOptionsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:50906" } 2025-06-24T20:31:45.458027Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:964: StateWork, processing event TEvConsole::TEvDescribeTenantOptionsRequest 2025-06-24T20:31:45.460914Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3335: Send TEvConsole::TEvDescribeTenantOptionsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.DescribeDatabaseOptionsResult] { storage_units { kind: "hdd2" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "hdd" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "hdd1" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "ssd" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "test" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } availability_zones { name: "dc-1" labels { key: "collocation" value: "disabled" } labels { key: "fixed_data_center" value: "DC-1" } } availability_zones { name: "any" labels { key: "any_data_center" value: "true" } labels { key: "collocation" value: "disabled" } } computational_units { kind: "slot" labels { key: "slot_type" value: "default" } labels { key: "type" value: "dynamic_slot" } allowed_availability_zones: "any" allowed_availability_zones: "dc-1" } } } } } >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterCancelIndexBuild [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnSplitMerge ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithReadsMultipleSplitsWithData [GOOD] Test command err: 2025-06-24T20:30:57.690356Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616103429139854:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:57.690434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003042/r3tmp/tmpoXG5Zb/pdisk_1.dat 2025-06-24T20:30:58.418121Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:58.442229Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:58.442331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:58.451359Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:58.509469Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 28720, node 1 2025-06-24T20:30:58.731460Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:58.765137Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:58.765159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:58.765166Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:58.765307Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8672 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:59.286639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:8672 2025-06-24T20:31:02.371018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616124903977381:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:02.371165Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:02.691999Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616103429139854:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:02.692060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:02.703108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:03.079839Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198944895:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.079942Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.092059Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198944911:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.092112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198944912:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.092165Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198944913:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.092192Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198944914:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.092224Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198944915:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.106878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:31:03.107316Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198944963:2360], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.107379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198944964:2361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.107414Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198944960:2357], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.107758Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.115492Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198945003:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.115551Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198945005:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.115672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198945000:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.115910Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.127270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198945029:2374], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.127339Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616129198945031:2376], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.127382Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:03.127613Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616129198944926:2774] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:31:03.127722Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616129198944927:2775] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046 ... 8. Ctx: { TraceId: 01jyht7e6t79swd7vhhmfhxmvr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTRjMTFlNDUtZjAyYjVkNmEtNmQ5YzdkYzYtY2IzMzUwYWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.386909Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725381. Ctx: { TraceId: 01jyht7e756tfre03wcpdj7daw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDE0ZmRjMDAtYzA0ZjZmMTYtZmFlZDEwMWMtYjJkYTZhN2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.387671Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725380. Ctx: { TraceId: 01jyht7e7585e0ka5xhnd06bd8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDkwNGEyMC04NmY3OTNiNi0zZmQxMDg5NC0zNTBhMzAwYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.388313Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725382. Ctx: { TraceId: 01jyht7e751rb8hr1fjzahgt6d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDJmMjc5MTEtZTlhNWQ3NWEtMmEwZWZjOTUtM2Y3YjQ4ZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.395085Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725383. Ctx: { TraceId: 01jyht7e7ga1mcf7xtwcxgta2g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Y0NDNhYy04NmQ3NjlmMi1iMWZlYThhYS0zYjBjOWJlMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.395871Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725384. Ctx: { TraceId: 01jyht7e7g3etya16ej8h4xc4a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTY0NmZiOTYtZDVhNzMxODUtMWFhNDZjN2ItMjY5Y2E1MWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.397003Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725385. Ctx: { TraceId: 01jyht7e7g1nr0zpf8jge56zr8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGY2YTlmYTItYmMzMTkyMDAtNzRjNmJmODgtY2NjZDhjMzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.411284Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725391. Ctx: { TraceId: 01jyht7e7y31xzwafz9zfzrvs5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzEwMmE0NDMtYzZjZWY4YjEtMWQ2ZWU3YS0zYzI3MDI1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.412085Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725392. Ctx: { TraceId: 01jyht7e7z2j29pfz9e5krmbxs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDJmMjc5MTEtZTlhNWQ3NWEtMmEwZWZjOTUtM2Y3YjQ4ZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.412654Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725386. Ctx: { TraceId: 01jyht7e7y13x96w8cmvshhyhq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTBlMmZkNjEtYTE1MGM2MDQtYTA0MTMyYWEtYzFkY2NkNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.413165Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725387. Ctx: { TraceId: 01jyht7e7y98k8wahezd5cr87j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTRjMTFlNDUtZjAyYjVkNmEtNmQ5YzdkYzYtY2IzMzUwYWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.413659Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725388. Ctx: { TraceId: 01jyht7e7y39yt4ta12c3yxzgz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDE0ZmRjMDAtYzA0ZjZmMTYtZmFlZDEwMWMtYjJkYTZhN2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.414149Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725389. Ctx: { TraceId: 01jyht7e7z8rwwqrw4nd7gv8bd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDkwNGEyMC04NmY3OTNiNi0zZmQxMDg5NC0zNTBhMzAwYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.414659Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725390. Ctx: { TraceId: 01jyht7e7y8xmyxsmkwqfbexba, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWFiMTk1NzAtYWZlZWI0MS1iZTkzZjVmYy1mYzdkZGYzOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.426960Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725394. Ctx: { TraceId: 01jyht7e88ck08ybjz3s16gk41, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTY0NmZiOTYtZDVhNzMxODUtMWFhNDZjN2ItMjY5Y2E1MWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.427987Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725393. Ctx: { TraceId: 01jyht7e874g6vnj4ge3dcp1jg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Y0NDNhYy04NmQ3NjlmMi1iMWZlYThhYS0zYjBjOWJlMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.430409Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725395. Ctx: { TraceId: 01jyht7e886qx5h7a76w6k75ff, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGY2YTlmYTItYmMzMTkyMDAtNzRjNmJmODgtY2NjZDhjMzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.442913Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725396. Ctx: { TraceId: 01jyht7e8p63023w6h1rz89bts, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDJmMjc5MTEtZTlhNWQ3NWEtMmEwZWZjOTUtM2Y3YjQ4ZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-24T20:31:45.473940Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725397. Ctx: { TraceId: 01jyht7e92ej0srzjh9z1mb2f1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzEwMmE0NDMtYzZjZWY4YjEtMWQ2ZWU3YS0zYzI3MDI1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.474889Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725398. Ctx: { TraceId: 01jyht7e8z7k25emf52zrj6fzt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTBlMmZkNjEtYTE1MGM2MDQtYTA0MTMyYWEtYzFkY2NkNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797062938 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 3 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:31:45.475283Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725401. Ctx: { TraceId: 01jyht7e8zdzwqw4xxhbsjyv0a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDkwNGEyMC04NmY3OTNiNi0zZmQxMDg5NC0zNTBhMzAwYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.475495Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725399. Ctx: { TraceId: 01jyht7e9287esp4b34j6dm9pa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDE0ZmRjMDAtYzA0ZjZmMTYtZmFlZDEwMWMtYjJkYTZhN2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.476097Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725400. Ctx: { TraceId: 01jyht7e92baqvb3t9vky99b7j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTRjMTFlNDUtZjAyYjVkNmEtNmQ5YzdkYzYtY2IzMzUwYWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.476989Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725402. Ctx: { TraceId: 01jyht7e93esqt3egm3tf5jf8p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Y0NDNhYy04NmQ3NjlmMi1iMWZlYThhYS0zYjBjOWJlMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.477071Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725404. Ctx: { TraceId: 01jyht7e8zdnkw8xfezdvfw0gs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTY0NmZiOTYtZDVhNzMxODUtMWFhNDZjN2ItMjY5Y2E1MWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.477586Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725405. Ctx: { TraceId: 01jyht7e97a9b9hdr7jejs99br, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGY2YTlmYTItYmMzMTkyMDAtNzRjNmJmODgtY2NjZDhjMzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.477609Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725403. Ctx: { TraceId: 01jyht7e8zdnrnddxsmprdf5ym, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWFiMTk1NzAtYWZlZWI0MS1iZTkzZjVmYy1mYzdkZGYzOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:45.486274Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976725406. Ctx: { TraceId: 01jyht7e9y9gj9cawrft1jpx62, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDJmMjc5MTEtZTlhNWQ3NWEtMmEwZWZjOTUtM2Y3YjQ4ZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797062938 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 3 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 3 shards >> KqpPg::CreateNotNullPgColumn [GOOD] >> TOlapNaming::CreateColumnTableOk >> KqpPg::CreateSequence >> TOlap::CreateTableWithNullableKeysNotAllowed [GOOD] >> TOlap::CreateTableWithNullableKeys >> THiveTest::TestTabletAvailability [GOOD] >> THiveTest::TestTabletsStartingCounter >> TOlapNaming::AlterColumnTableOk >> TOlap::CreateStore |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |88.6%| [LD] {RESULT} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut >> TOlapNaming::AlterColumnStoreOk >> TOlap::CreateStoreWithDirs >> TGRpcCmsTest::SimpleTenantsTestSyncOperation [GOOD] >> TOlapNaming::CreateColumnStoreOk >> TOlap::CreateTableWithNullableKeys [GOOD] >> THiveTest::TestTabletsStartingCounter [GOOD] >> THiveTest::TestTabletsStartingCounterExternalBoot >> TOlap::CustomDefaultPresets >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink >> TOlap::CreateStore [GOOD] >> TOlap::CreateDropTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::CreateTableWithNullableKeys [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:49.850527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:49.850617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:49.850674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:49.850718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:49.850781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:49.850810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:49.850863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:49.850940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:49.851759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:49.852116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:49.954405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:49.954468Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:49.969209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:49.969690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:49.969870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:49.981984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:49.982207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:49.982929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:49.983284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:49.986769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:49.987007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:49.988301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:49.988395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:49.988652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:49.988706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:49.988773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:49.988871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:49.998114Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:50.165053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:50.165315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:50.165597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:50.165655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:50.165924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:50.166144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:50.168691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:50.168898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:50.169121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:50.169189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:50.169228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:50.169258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:50.172086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:50.172141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:50.172169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:50.174415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:50.174476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:50.174525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:50.174597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:50.178748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:50.181276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:50.181485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:50.182580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:50.182767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:50.182817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:50.183169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:50.183236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:50.183418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:50.183508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:50.188051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:50.188115Z node 1 :FLAT_TX_SCHEMESHARD ... : 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:51.738527Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-24T20:31:51.738571Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-24T20:31:51.738615Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-24T20:31:51.739341Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:51.739434Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:51.739467Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-24T20:31:51.739512Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-24T20:31:51.739545Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T20:31:51.740041Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:51.740105Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:51.740142Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-24T20:31:51.740169Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-24T20:31:51.740194Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:31:51.740251Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 0/1, is published: true 2025-06-24T20:31:51.743256Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 106:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 275382275 2025-06-24T20:31:51.743960Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T20:31:51.744660Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T20:31:51.744791Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T20:31:51.757221Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6226: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 106 2025-06-24T20:31:51.757305Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409547, partId: 0 2025-06-24T20:31:51.757456Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 106:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 106 2025-06-24T20:31:51.757516Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 106:0 129 -> 130 FAKE_COORDINATOR: Erasing txId 106 2025-06-24T20:31:51.759782Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.759984Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.760036Z node 2 :FLAT_TX_SCHEMESHARD INFO: drop_table.cpp:315: TDropColumnTable TProposedDeleteParts operationId# 106:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:51.760146Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T20:31:51.760294Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T20:31:51.760376Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T20:31:51.760433Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T20:31:51.760467Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T20:31:51.760507Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-06-24T20:31:51.760642Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:362:2338] message: TxId: 106 2025-06-24T20:31:51.760701Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T20:31:51.760751Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 106:0 2025-06-24T20:31:51.760787Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 106:0 2025-06-24T20:31:51.762455Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-24T20:31:51.765181Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T20:31:51.765385Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T20:31:51.765432Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:669:2625] 2025-06-24T20:31:51.765980Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-24T20:31:51.766755Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409547;self_id=[2:515:2481];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:865;event=tablet_die; 2025-06-24T20:31:51.769982Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T20:31:51.771059Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-24T20:31:51.772679Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:31:51.772745Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-24T20:31:51.772811Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409547 2025-06-24T20:31:51.775355Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T20:31:51.775449Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T20:31:51.775655Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 2025-06-24T20:31:51.776305Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyDir/MyTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:31:51.776540Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyDir/MyTable" took 274us result status StatusPathDoesNotExist 2025-06-24T20:31:51.776707Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyDir/MyTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/MyDir\' (id: [OwnerId: 72057594046678944, LocalPathId: 3])" Path: "/MyRoot/MyDir/MyTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/MyDir" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:31:51.777304Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 5 SchemeshardId: 72057594046678944 Options { }, at schemeshard: 72057594046678944 2025-06-24T20:31:51.777389Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 5 took 95us result status StatusPathDoesNotExist 2025-06-24T20:31:51.777482Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'\', error: path is empty" Path: "" PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> ObjectDistribution::TestManyIrrelevantNodes [GOOD] >> Sequencer::Basic1 [GOOD] >> StoragePool::TestDistributionRandomProbability >> THiveTest::TestTabletsStartingCounterExternalBoot [GOOD] >> TScaleRecommenderTest::BasicTest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTestSyncOperation [GOOD] Test command err: 2025-06-24T20:31:44.769205Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616306490724636:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:44.769287Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003e98/r3tmp/tmpB49PJG/pdisk_1.dat 2025-06-24T20:31:45.509900Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:45.563905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:45.564061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:45.588600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:31:45.631688Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 65360, node 1 2025-06-24T20:31:45.771607Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:45.889432Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:45.889457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:45.889479Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:45.889615Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25254 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:46.419599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:46.514569Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7519616315080659816:2273], Recipient [1:7519616310785692242:2205]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { operation_params { operation_mode: SYNC } path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:60844" } 2025-06-24T20:31:46.514630Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-24T20:31:46.514658Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:46.514671Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:46.514835Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { operation_params { operation_mode: SYNC } path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:60844" 2025-06-24T20:31:46.515033Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1750797106514514) 2025-06-24T20:31:46.515644Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1750797106514514 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-24T20:31:46.516018Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-24T20:31:46.524349Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-24T20:31:46.525252Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797106514514&action=1" } } } 2025-06-24T20:31:46.525477Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:46.525560Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-24T20:31:46.525743Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-24T20:31:46.525987Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285139, Sender [1:7519616315080659816:2273], Recipient [1:7519616310785692242:2205]: NKikimr::NConsole::TEvConsole::TEvNotifyOperationCompletionRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797106514514&action=1" } UserToken: "" PeerName: "ipv6:[::1]:60844" } 2025-06-24T20:31:46.526017Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:968: StateWork, processing event TEvConsole::TEvNotifyOperationCompletionRequest 2025-06-24T20:31:46.526309Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:3443: Add subscription to /Root/users/user-1 for [1:7519616315080659816:2273] 2025-06-24T20:31:46.526405Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3451: Send TEvConsole::TEvNotifyOperationCompletionResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797106514514&action=1" } } 2025-06-24T20:31:46.527067Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-24T20:31:46.527912Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-24T20:31:46.533957Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-24T20:31:46.534025Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-24T20:31:46.534115Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7519616315080659821:2205], Recipient [1:7519616310785692242:2205]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-24T20:31:46.534141Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-24T20:31:46.534157Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:46.534192Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:46.534243Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-24T20:31:46.534274Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-24T20:31:46.534363Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-24T20:31:46.538222Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-24T20:31:46.538255Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:46.538263Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:46.538280Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:46.538370Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-24T20:31:46.538404Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1750797106514514 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T20:31:46.542627Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-24T20:31:46.542822Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:46.542872Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-24T20:31:46.542890Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-24T20:31:46.571964Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-24T20:31:46.573915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemesh ... ager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1750797107501756 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T20:31:47.641008Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750797107501756 issue= 2025-06-24T20:31:47.640117Z node 3 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72075186224037888 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found - using supplied 72075186224037888 2025-06-24T20:31:47.643799Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-24T20:31:47.643901Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-24T20:31:47.643919Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:47.644151Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519616310785692116:2207], Recipient [1:7519616310785692242:2205]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-24T20:31:47.644187Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-24T20:31:47.644209Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:47.644224Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:47.644257Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-24T20:31:47.644303Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1750797107501756 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T20:31:47.649881Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-24T20:31:47.649928Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:47.649968Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-24T20:31:47.650098Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-24T20:31:47.650598Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-24T20:31:47.650668Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-24T20:31:47.656381Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:47.660216Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-24T20:31:47.660353Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7519616319375627749:2205], Recipient [1:7519616310785692242:2205]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-24T20:31:47.660392Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-24T20:31:47.660407Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:47.660416Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:47.660460Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-24T20:31:47.660492Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-24T20:31:47.716064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-24T20:31:47.729449Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-24T20:31:47.729711Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:47.733845Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:47.733887Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:47.738547Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1750797107501756 2025-06-24T20:31:47.748384Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750797107501756 issue= 2025-06-24T20:31:47.759226Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1750797107501756 issue= 2025-06-24T20:31:47.759272Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-24T20:31:47.759378Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1750797107501756 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T20:31:47.770275Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037888 2025-06-24T20:31:47.786983Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-24T20:31:47.787037Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-24T20:31:47.787061Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037894 not found 2025-06-24T20:31:47.847360Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-24T20:31:47.847732Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-24T20:31:47.847971Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2431: Send /Root/users/user-1 notification to [1:7519616319375627647:2334]: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797107501756&action=2" ready: true status: SUCCESS } } 2025-06-24T20:31:47.848088Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:47.871468Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519616319375627819:2340], Recipient [1:7519616310785692242:2205]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:60844" } 2025-06-24T20:31:47.871500Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-24T20:31:47.871658Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3377: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: NOT_FOUND issues { message: "Unknown tenant /Root/users/user-1" severity: 1 } } } 2025-06-24T20:31:47.787075Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-24T20:31:47.787088Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-24T20:31:47.787101Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037895 not found 2025-06-24T20:31:47.787115Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-24T20:31:47.787128Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037896 not found 2025-06-24T20:31:47.787194Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-24T20:31:47.787289Z node 3 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72075186224037888 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037897 2025-06-24T20:31:47.787784Z node 3 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72075186224037888 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037890 2025-06-24T20:31:47.787835Z node 3 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72075186224037888 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037894 2025-06-24T20:31:47.787867Z node 3 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72075186224037888 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037891 2025-06-24T20:31:47.787907Z node 3 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72075186224037888 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037892 2025-06-24T20:31:47.787937Z node 3 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72075186224037888 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037895 2025-06-24T20:31:47.787994Z node 3 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72075186224037888 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037896 2025-06-24T20:31:47.788520Z node 3 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72075186224037888 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037889 2025-06-24T20:31:47.889605Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285123, Sender [1:7519616319375627822:2341], Recipient [1:7519616310785692242:2205]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:60844" } 2025-06-24T20:31:47.889668Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:967: StateWork, processing event TEvConsole::TEvListTenantsRequest 2025-06-24T20:31:47.889908Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3421: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { } } } } 2025-06-24T20:31:47.934760Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-24T20:31:47.936514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected >> TOlapNaming::AlterColumnStoreOk [GOOD] >> TOlapNaming::AlterColumnStoreFailed >> TGRpcCmsTest::RemoveWithAnotherTokenTest [GOOD] >> KqpNewEngine::PkSelect2 [GOOD] >> KqpNewEngine::PkRangeSelect3 >> TOlapNaming::CreateColumnStoreOk [GOOD] >> TOlapNaming::CreateColumnStoreFailed >> TOlap::CustomDefaultPresets [GOOD] >> TOlap::Decimal >> Cdc::Write[TopicRunner] [GOOD] >> Cdc::UpdateStream >> TOlap::CreateDropTable [GOOD] >> TOlap::CreateDropStandaloneTableDefaultSharding >> TOlap::CreateStoreWithDirs [GOOD] >> TOlap::CreateTable >> TOlapNaming::AlterColumnStoreFailed [GOOD] >> TOlapNaming::CreateColumnStoreFailed [GOOD] >> TOlap::Decimal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::RemoveWithAnotherTokenTest [GOOD] Test command err: 2025-06-24T20:31:46.674386Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616312287454082:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:46.674439Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003e87/r3tmp/tmpxqJZR8/pdisk_1.dat 2025-06-24T20:31:47.353043Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:47.373293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:47.373462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:47.388691Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29441, node 1 2025-06-24T20:31:47.699623Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:47.699848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:47.699855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:47.699864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:47.699987Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:48.227937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:22808 2025-06-24T20:31:48.677021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:31:48.751590Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7519616320877389412:2274], Recipient [1:7519616316582421797:2206]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)" PeerName: "ipv6:[::1]:45928" } 2025-06-24T20:31:48.751641Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-24T20:31:48.751664Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:48.751679Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:48.751825Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)" PeerName: "ipv6:[::1]:45928" 2025-06-24T20:31:48.752045Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1750797108749886) 2025-06-24T20:31:48.752606Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1750797108749886 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-24T20:31:48.752860Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-24T20:31:48.756203Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-24T20:31:48.757185Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797108749886&action=1" } } } 2025-06-24T20:31:48.757336Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:48.757410Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-24T20:31:48.757540Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-24T20:31:48.757997Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-24T20:31:48.758132Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-24T20:31:48.766524Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-24T20:31:48.766596Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-24T20:31:48.766677Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7519616320877389417:2206], Recipient [1:7519616316582421797:2206]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-24T20:31:48.766698Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-24T20:31:48.766710Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:48.766719Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:48.766798Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-24T20:31:48.766835Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-24T20:31:48.766905Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-24T20:31:48.771004Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519616320877389425:2275], Recipient [1:7519616316582421797:2206]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797108749886&action=1" } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)" } 2025-06-24T20:31:48.771041Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T20:31:48.771316Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797108749886&action=1" } } 2025-06-24T20:31:48.774539Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-24T20:31:48.774568Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:48.774578Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:48.774586Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:48.774640Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-24T20:31:48.774663Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1750797108749886 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T20:31:48.776886Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-24T20:31:48.777060Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:48.777112Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-24T20:31:48.777122Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-24T20:31:48.787850Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 UserToken: "\n\016user-1@builtin\022\ ... 663 2025-06-24T20:31:49.913488Z node 3 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72075186224037888 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found - using supplied 72075186224037888 2025-06-24T20:31:49.916609Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715663 2025-06-24T20:31:49.916631Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-24T20:31:49.916678Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-24T20:31:49.916861Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7519616325172357367:2206], Recipient [1:7519616316582421797:2206]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-24T20:31:49.916888Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-24T20:31:49.916905Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:49.916913Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:49.916956Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-24T20:31:49.916991Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1750797109844719 errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2025-06-24T20:31:49.917081Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750797109844719 issue=AccessDenied: Access denied for request 2025-06-24T20:31:49.918424Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519616325172357423:2356], Recipient [1:7519616316582421797:2206]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797109844719&action=2" } UserToken: "" } 2025-06-24T20:31:49.918461Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T20:31:49.918695Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797109844719&action=2" } } 2025-06-24T20:31:49.922942Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-24T20:31:49.922982Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-24T20:31:49.923025Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-24T20:31:49.923038Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037894 not found 2025-06-24T20:31:49.923059Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-24T20:31:49.923074Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-24T20:31:49.920741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-24T20:31:49.931267Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-24T20:31:49.931362Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-24T20:31:49.931384Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:49.931640Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519616316582421680:2201], Recipient [1:7519616316582421797:2206]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-24T20:31:49.931690Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-24T20:31:49.931718Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:49.931726Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:49.931760Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-24T20:31:49.931791Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1750797109844719 errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2025-06-24T20:31:49.935339Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-24T20:31:49.955630Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-24T20:31:49.968135Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-24T20:31:49.968200Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:49.968247Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-24T20:31:49.968375Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-24T20:31:49.968891Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 2 } } Success: true ConfigTxSeqNo: 10 2025-06-24T20:31:49.968956Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 2 } } } 2025-06-24T20:31:49.977473Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 11 2025-06-24T20:31:49.977677Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519616325172357482:2359], Recipient [1:7519616316582421797:2206]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797109844719&action=2" } UserToken: "" } 2025-06-24T20:31:49.977698Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T20:31:49.977872Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797109844719&action=2" } } 2025-06-24T20:31:49.977935Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7519616325172357474:2206], Recipient [1:7519616316582421797:2206]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-24T20:31:49.977970Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-24T20:31:49.977992Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:49.978003Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:49.978050Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-24T20:31:49.978069Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-24T20:31:49.993981Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-24T20:31:49.994043Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T20:31:49.994059Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:49.994073Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T20:31:49.994172Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1750797109844719 2025-06-24T20:31:49.994208Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750797109844719 issue=AccessDenied: Access denied for request 2025-06-24T20:31:49.994228Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1750797109844719 issue=AccessDenied: Access denied for request 2025-06-24T20:31:49.994245Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-24T20:31:49.994354Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1750797109844719 code=SUCCESS errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2025-06-24T20:31:50.006214Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-24T20:31:50.006276Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T20:31:50.037305Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519616329467324790:2361], Recipient [1:7519616316582421797:2206]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797109844719&action=2" } UserToken: "" } 2025-06-24T20:31:50.037354Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T20:31:50.037557Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750797109844719&action=2" ready: true status: SUCCESS } } 2025-06-24T20:31:50.047139Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-24T20:31:50.047348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected >> KqpQueryService::PeriodicTaskInSessionPool [GOOD] >> KqpQueryService::PeriodicTaskInSessionPoolSessionCloseByIdle >> TScaleRecommenderTest::BasicTest [GOOD] >> TOlap::CreateDropStandaloneTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::AlterColumnStoreFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:52.172865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:52.172980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:52.173031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:52.173084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:52.173163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:52.173198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:52.173271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:52.173352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:52.174239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:52.174703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:52.266858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:52.266925Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:52.286177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:52.286686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:52.286901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:52.300467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:52.300714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:52.301434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.301791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:52.305625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:52.305847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:52.307210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:52.307299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:52.307543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:52.307650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:52.307722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:52.307830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.315954Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:52.466612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:52.466875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.467196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:52.467254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:52.467577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:52.467662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:52.470270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.470547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:52.470791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.470868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:52.470908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:52.470942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:52.473173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.473237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:52.473287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:52.475443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.475502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.475545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.475611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:52.479403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:52.481852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:52.482076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:52.483205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.483387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:52.483438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.483769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:52.483838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.484037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:52.484126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:52.486771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:52.486828Z node 1 :FLAT_TX_SCHEMESHARD ... 54.011880Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:54.011957Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:31:54.012193Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:31:54.012411Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:54.012483Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T20:31:54.012551Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 3 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-24T20:31:54.012963Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.013028Z node 2 :FLAT_TX_SCHEMESHARD INFO: create_table.cpp:459: TCreateColumnTable TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-24T20:31:54.013094Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: create_table.cpp:485: TCreateColumnTable TProposedWaitParts operationId# 102:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-24T20:31:54.013880Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:31:54.013989Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:31:54.014043Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:31:54.014100Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T20:31:54.014159Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T20:31:54.014912Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:31:54.014996Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:31:54.015026Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:31:54.015056Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-24T20:31:54.015088Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T20:31:54.015318Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T20:31:54.017257Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 102:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-24T20:31:54.017347Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 102:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:3 msg type: 268697639 2025-06-24T20:31:54.017426Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 102, partId: 0, tablet: 72057594037968897 2025-06-24T20:31:54.018398Z node 2 :HIVE INFO: tablet_helpers.cpp:1441: [72057594037968897] TEvUpdateTabletsObject, msg: ObjectId: 7726343884038809171 TabletIds: 72075186233409546 TxId: 102 TxPartId: 0 2025-06-24T20:31:54.018622Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6052: Update tablets object reply, message: Status: OK TxId: 102 TxPartId: 0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.018728Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Status: OK TxId: 102 TxPartId: 0 2025-06-24T20:31:54.019690Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:31:54.019792Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:31:54.021203Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.035188Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6226: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 102 2025-06-24T20:31:54.035275Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T20:31:54.035413Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T20:31:54.037497Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.037655Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.037722Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T20:31:54.037872Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:31:54.037913Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:31:54.037956Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:31:54.037992Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:31:54.038036Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T20:31:54.038115Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:344:2320] message: TxId: 102 2025-06-24T20:31:54.038183Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:31:54.038229Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T20:31:54.038268Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T20:31:54.038421Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:31:54.040383Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:31:54.040441Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:403:2372] TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 2025-06-24T20:31:54.044322Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnStore AlterColumnStore { Name: "OlapStore" AlterSchemaPresets { Name: "default" AlterSchema { AddColumns { Name: "mess age" Type: "Utf8" } } } } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:54.044567Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: alter_store.cpp:465: TAlterOlapStore Propose, path: /MyRoot/OlapStore, opId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.044848Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusSchemeError, reason: Invalid name for column 'mess age', at schemeshard: 72057594046678944 2025-06-24T20:31:54.047210Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusSchemeError Reason: "Invalid name for column \'mess age\'" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:54.047432Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column 'mess age', operation: ALTER COLUMN STORE, path: /MyRoot/OlapStore TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:31:54.047751Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:31:54.047798Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:31:54.048228Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:31:54.048327Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:31:54.048405Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:439:2408] TestWaitNotification: OK eventTxId 103 >> TOlap::StoreStats ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::CreateColumnStoreFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:52.439995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:52.440092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:52.440134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:52.440171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:52.440249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:52.440285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:52.440361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:52.440446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:52.441187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:52.441543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:52.519464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:52.519519Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:52.542398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:52.542814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:52.542992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:52.551869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:52.552072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:52.552647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.552933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:52.557391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:52.557613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:52.558920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:52.558992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:52.559276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:52.559335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:52.559417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:52.559538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.566582Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:52.712094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:52.712318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.712596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:52.712642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:52.712877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:52.712970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:52.716190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.716471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:52.716720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.716784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:52.716829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:52.716863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:52.718988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.719061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:52.719104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:52.721490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.721546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.721599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.721673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:52.725061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:52.727315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:52.727555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:52.728705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.728890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:52.728951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.729273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:52.729334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.729537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:52.729636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:52.732205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:52.732260Z node 1 :FLAT_TX_SCHEMESHARD ... hemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:54.129721Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:54.129875Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:54.131477Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:54.131713Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:54.132857Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:54.133003Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 8589936750 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:54.192238Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:54.192631Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:54.192700Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:54.192900Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:54.192982Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:54.196643Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:54.196715Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:54.196975Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:54.197025Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 1, path id: 1 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:54.197469Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.197507Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T20:31:54.197613Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:31:54.197654Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:31:54.197697Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:31:54.197729Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:31:54.197770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T20:31:54.197815Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:31:54.197854Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T20:31:54.197887Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T20:31:54.197974Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:31:54.198020Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T20:31:54.198059Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T20:31:54.198602Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T20:31:54.198714Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T20:31:54.198758Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T20:31:54.198805Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T20:31:54.198851Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:54.198953Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T20:31:54.212307Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T20:31:54.212997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T20:31:54.213945Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:272:2261] Bootstrap 2025-06-24T20:31:54.234067Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:272:2261] Become StateWork (SchemeCache [2:277:2266]) 2025-06-24T20:31:54.236537Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "OlapStore" ColumnShardCount: 1 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "data" Type: "Utf8" } Columns { Name: "mess age" Type: "Utf8" } KeyColumnNames: "timestamp" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:54.236841Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /MyRoot/OlapStore, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.236984Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Invalid name for column 'mess age', at schemeshard: 72057594046678944 2025-06-24T20:31:54.237818Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:272:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:31:54.241645Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Invalid name for column \'mess age\'" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:54.241895Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column 'mess age', operation: CREATE COLUMN STORE, path: /MyRoot/OlapStore 2025-06-24T20:31:54.242364Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T20:31:54.296499Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T20:31:54.296571Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T20:31:54.297036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T20:31:54.297153Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:31:54.297196Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:287:2276] TestWaitNotification: OK eventTxId 101 2025-06-24T20:31:54.297683Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:31:54.297898Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 251us result status StatusPathDoesNotExist 2025-06-24T20:31:54.298087Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/OlapStore" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> THiveTest::TestHiveBalancerWithPrefferedDC1 [GOOD] >> THiveTest::TestHiveBalancerWithPrefferedDC2 >> TColumnShardTestSchema::HotTiers [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::Decimal [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:52.635780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:52.635876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:52.635921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:52.635983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:52.636056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:52.636091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:52.636152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:52.636283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:52.637271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:52.637669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:52.727585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:52.727648Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:52.743246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:52.743596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:52.743778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:52.752130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:52.752333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:52.753103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.753475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:52.758950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:52.759178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:52.760462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:52.760526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:52.760718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:52.760775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:52.760827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:52.760903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.767328Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:52.921409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:52.921676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.921976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:52.922032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:52.922389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:52.922469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:52.927881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.928107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:52.928309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.928396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:52.928442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:52.928475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:52.930918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.930985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:52.931031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:52.933486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.933558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.933662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.933743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:52.944904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:52.946763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:52.946927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:52.947721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.947829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:52.947865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.948104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:52.948163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.948322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:52.948413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:52.950032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:52.950069Z node 1 :FLAT_TX_SCHEMESHARD ... h path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:31:54.339342Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=tx_controller.cpp:215;event=finished_tx;tx_id=101; FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 2025-06-24T20:31:54.341103Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:54.341158Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:54.341352Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:31:54.341505Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:54.341551Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T20:31:54.341603Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T20:31:54.341707Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.341759Z node 2 :FLAT_TX_SCHEMESHARD INFO: create_store.cpp:245: TCreateOlapStore TProposedWaitParts operationId# 101:0 ProgressState at tablet: 72057594046678944 2025-06-24T20:31:54.341822Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: create_store.cpp:268: TCreateOlapStore TProposedWaitParts operationId# 101:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-24T20:31:54.343003Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:54.343104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:54.343168Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:31:54.343215Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T20:31:54.343262Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:31:54.343928Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:54.344006Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T20:31:54.344036Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-24T20:31:54.344067Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T20:31:54.344102Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T20:31:54.344173Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-24T20:31:54.346530Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 101:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-24T20:31:54.348132Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:31:54.348243Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T20:31:54.370939Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6226: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-06-24T20:31:54.371016Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T20:31:54.371173Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T20:31:54.373694Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.373922Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:31:54.373986Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T20:31:54.374113Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:31:54.374157Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:31:54.374207Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:31:54.374246Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:31:54.374291Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T20:31:54.374381Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:344:2320] message: TxId: 101 2025-06-24T20:31:54.374447Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:31:54.374491Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T20:31:54.374528Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T20:31:54.374666Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:31:54.377048Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:31:54.377114Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:345:2321] TestWaitNotification: OK eventTxId 101 2025-06-24T20:31:54.377565Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:31:54.377807Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 260us result status StatusSuccess 2025-06-24T20:31:54.378321Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/OlapStore" PathDescription { Self { Name: "OlapStore" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnStore CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnStoreVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnStoreDescription { Name: "OlapStore" ColumnShardCount: 1 ColumnShards: 72075186233409546 SchemaPresets { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } } Columns { Id: 2 Name: "data" Type: "Decimal(35,9)" TypeId: 4865 TypeInfo { DecimalPrecision: 35 DecimalScale: 9 } NotNull: false StorageId: "" DefaultValue { } } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } NextColumnFamilyId: 1 } } NextSchemaPresetId: 2 NextTtlSettingsPresetId: 1 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpPg::CreateSequence [GOOD] >> KqpPg::AlterSequence >> KqpPg::EquiJoin+useSink [GOOD] >> KqpPg::EquiJoin-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> TScaleRecommenderTest::BasicTest [GOOD] Test command err: 2025-06-24T20:31:15.403916Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:15.444184Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:15.444484Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:15.445541Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:15.445882Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T20:31:15.447037Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-24T20:31:15.447094Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:15.448564Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:31:2076] ControllerId# 72057594037932033 2025-06-24T20:31:15.448621Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:15.448746Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:15.448885Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:15.461204Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:15.461274Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:15.467311Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:38:2081] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:15.468144Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:39:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:15.468382Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:40:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:15.468565Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:41:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:15.468695Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:42:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:15.468840Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:43:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:15.468988Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:44:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:15.469015Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:15.469107Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:31:2076] 2025-06-24T20:31:15.469157Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:31:2076] 2025-06-24T20:31:15.469219Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:15.469363Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:15.470068Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:15.470253Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:15.502331Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:21:2063] 2025-06-24T20:31:15.502389Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:21:2063] 2025-06-24T20:31:15.502593Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:31:2076] 2025-06-24T20:31:15.502686Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:15.502753Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:15.502895Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:15.545129Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:15.545190Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-24T20:31:15.563928Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-24T20:31:15.564262Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:15.566623Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-24T20:31:15.566728Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:31:15.566789Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-24T20:31:15.567282Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:31:2076] 2025-06-24T20:31:15.567407Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-24T20:31:15.567451Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:15.567553Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:15.568099Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-24T20:31:15.568140Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-24T20:31:15.568169Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:15.568308Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-24T20:31:15.568409Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:15.568637Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:15.568903Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:15.569113Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-24T20:31:15.569169Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-24T20:31:15.569210Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-24T20:31:15.569288Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2025-06-24T20:31:15.569337Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:15.569507Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-24T20:31:15.569549Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-24T20:31:15.569775Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:31:2076] 2025-06-24T20:31:15.569823Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:31:2076] 2025-06-24T20:31:15.569867Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-24T20:31:15.573158Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-24T20:31:15.573248Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:54:2093] 2025-06-24T20:31:15.573277Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:54:2093] 2025-06-24T20:31:15.574516Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:15.574636Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:15.574729Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:15.575096Z node 1 :BS_NODE DEBUG: {NWDC ... 72075186224037888] connected with status OK role: Leader [24:569:2483] 2025-06-24T20:31:54.461988Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [24:569:2483] 2025-06-24T20:31:54.462057Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [24:569:2483] 2025-06-24T20:31:54.462140Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [24:569:2483] 2025-06-24T20:31:54.462204Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [24:569:2483] 2025-06-24T20:31:54.462292Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [24:568:2482] EventType# 268697642 2025-06-24T20:31:54.463043Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [24:572:2486] 2025-06-24T20:31:54.463106Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [24:572:2486] 2025-06-24T20:31:54.464759Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037888] queue send [24:572:2486] 2025-06-24T20:31:54.464874Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:54.464960Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 24 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [24:420:2365] 2025-06-24T20:31:54.465075Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72075186224037888] received pending shutdown [24:572:2486] 2025-06-24T20:31:54.465149Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [24:572:2486] 2025-06-24T20:31:54.465250Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [24:572:2486] 2025-06-24T20:31:54.465394Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [24:572:2486] 2025-06-24T20:31:54.465587Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [24:572:2486] 2025-06-24T20:31:54.465662Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [24:572:2486] 2025-06-24T20:31:54.465719Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [24:572:2486] 2025-06-24T20:31:54.465804Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [24:572:2486] 2025-06-24T20:31:54.465865Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [24:572:2486] 2025-06-24T20:31:54.465945Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [24:571:2485] EventType# 268697612 2025-06-24T20:31:54.466208Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} queued, type NKikimr::NHive::TTxUpdateTabletMetrics 2025-06-24T20:31:54.466330Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:31:54.466561Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} hope 1 -> done Change{15, redo 82b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-24T20:31:54.466671Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:31:54.478263Z node 24 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [ced0949902bd0a34] bootstrap ActorId# [24:575:2489] Group# 2147483648 BlobCount# 1 BlobIDs# [[72075186224037888:1:12:0:0:92:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T20:31:54.478471Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [ced0949902bd0a34] Id# [72075186224037888:1:12:0:0:92:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:31:54.478562Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [ced0949902bd0a34] restore Id# [72075186224037888:1:12:0:0:92:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T20:31:54.478663Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [ced0949902bd0a34] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224037888:1:12:0:0:92:1] Marker# BPG33 2025-06-24T20:31:54.478749Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [ced0949902bd0a34] Sending missing VPut part# 0 to# 0 blob Id# [72075186224037888:1:12:0:0:92:1] Marker# BPG32 2025-06-24T20:31:54.478966Z node 24 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [24:426:2368] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72075186224037888:1:12:0:0:92:1] FDS# 92 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:31:54.481757Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [ced0949902bd0a34] received {EvVPutResult Status# OK ID# [72075186224037888:1:12:0:0:92:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 19 } Cost# 80724 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 20 }}}} from# [80000000:1:0:0:0] Marker# BPP01 2025-06-24T20:31:54.481932Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [ced0949902bd0a34] Result# TEvPutResult {Id# [72075186224037888:1:12:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 2147483648 Marker# BPP12 2025-06-24T20:31:54.482076Z node 24 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [ced0949902bd0a34] SendReply putResult# TEvPutResult {Id# [72075186224037888:1:12:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T20:31:54.482396Z node 24 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2147483648 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.01 sample PartId# [72075186224037888:1:12:0:0:92:1] QueryCount# 1 VDiskId# [80000000:1:0:0:0] NodeId# 24 } TEvVPutResult{ TimestampMs# 3.843 VDiskId# [80000000:1:0:0:0] NodeId# 24 Status# OK } ] } 2025-06-24T20:31:54.482687Z node 24 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72075186224037888:1:12:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-24T20:31:54.482878Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:13} commited cookie 1 for step 12 2025-06-24T20:31:54.483568Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [24:577:2491] 2025-06-24T20:31:54.483634Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [24:577:2491] 2025-06-24T20:31:54.483755Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:54.483841Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 24 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [24:420:2365] 2025-06-24T20:31:54.483925Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037888] queue send [24:577:2491] 2025-06-24T20:31:54.484002Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72075186224037888] received pending shutdown [24:577:2491] 2025-06-24T20:31:54.484081Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [24:577:2491] 2025-06-24T20:31:54.484164Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [24:577:2491] 2025-06-24T20:31:54.484304Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [24:577:2491] 2025-06-24T20:31:54.484534Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [24:577:2491] 2025-06-24T20:31:54.484946Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [24:577:2491] 2025-06-24T20:31:54.485026Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [24:577:2491] 2025-06-24T20:31:54.485119Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [24:577:2491] 2025-06-24T20:31:54.485185Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [24:577:2491] 2025-06-24T20:31:54.485285Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [24:576:2490] EventType# 2146435094 2025-06-24T20:31:54.486121Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [24:580:2494] 2025-06-24T20:31:54.486192Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [24:580:2494] 2025-06-24T20:31:54.486312Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:54.486393Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 24 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [24:420:2365] 2025-06-24T20:31:54.486486Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037888] queue send [24:580:2494] 2025-06-24T20:31:54.486569Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72075186224037888] received pending shutdown [24:580:2494] 2025-06-24T20:31:54.486643Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [24:580:2494] 2025-06-24T20:31:54.486722Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [24:580:2494] 2025-06-24T20:31:54.486868Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [24:580:2494] 2025-06-24T20:31:54.487074Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [24:580:2494] 2025-06-24T20:31:54.487164Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [24:580:2494] 2025-06-24T20:31:54.487223Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [24:580:2494] 2025-06-24T20:31:54.487302Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [24:580:2494] 2025-06-24T20:31:54.487359Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [24:580:2494] 2025-06-24T20:31:54.487448Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [24:579:2493] EventType# 268697642 >> TQuorumTrackerTests::ErasureNoneNeverHasQuorum_4_1 [GOOD] >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_5_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::HotTiers [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150797654.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150797654.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150797654.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130797654.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150797654.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150797654.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130796454.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130797654.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130797654.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130796454.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130796454.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130796454.000000s;Name=;Codec=}; 2025-06-24T20:30:55.259367Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T20:30:55.309536Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T20:30:55.309954Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T20:30:55.318533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:30:55.318812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:30:55.319106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:30:55.319268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:30:55.319396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:30:55.319574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:30:55.319698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:30:55.319833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:30:55.319948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T20:30:55.320087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T20:30:55.320208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T20:30:55.355845Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T20:30:55.356205Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T20:30:55.356288Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T20:30:55.356519Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:55.356749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T20:30:55.356840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T20:30:55.356892Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T20:30:55.357001Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T20:30:55.357073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T20:30:55.357121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T20:30:55.357155Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T20:30:55.357378Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:55.357458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T20:30:55.357514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T20:30:55.357554Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T20:30:55.357667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T20:30:55.357737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T20:30:55.357783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T20:30:55.357817Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T20:30:55.357886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T20:30:55.357930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T20:30:55.357961Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T20:30:55.358220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T20:30:55.358272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T20:30:55.358311Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T20:30:55.358543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T20:30:55.358609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T20:30:55.358648Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T20:30:55.358802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T20:30:55.358860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T20:30:55.358899Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T20:30:55.359000Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T20:30:55.359082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=ab ... STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T20:31:55.029632Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T20:31:55.029683Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T20:31:55.029819Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T20:31:55.030066Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750797103567:max} readable: {1750797103567:max} at tablet 9437184 2025-06-24T20:31:55.030215Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T20:31:55.030409Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750797103567:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T20:31:55.030484Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750797103567:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T20:31:55.031076Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750797103567:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T20:31:55.031218Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750797103567:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T20:31:55.031750Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750797103567:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:1448:3392];trace_detailed=; 2025-06-24T20:31:55.032231Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T20:31:55.032539Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T20:31:55.032763Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:55.032923Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:55.033320Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T20:31:55.033453Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:55.033563Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:55.033611Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1448:3392] finished for tablet 9437184 2025-06-24T20:31:55.034157Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1447:3391];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750797115031671,"name":"_full_task","f":1750797115031671,"d_finished":0,"c":0,"l":1750797115033692,"d":2021},"events":[{"name":"bootstrap","f":1750797115031894,"d_finished":1071,"c":1,"l":1750797115032965,"d":1071},{"a":1750797115033290,"name":"ack","f":1750797115033290,"d_finished":0,"c":0,"l":1750797115033692,"d":402},{"a":1750797115033265,"name":"processing","f":1750797115033265,"d_finished":0,"c":0,"l":1750797115033692,"d":427},{"name":"ProduceResults","f":1750797115032654,"d_finished":540,"c":2,"l":1750797115033590,"d":540},{"a":1750797115033594,"name":"Finish","f":1750797115033594,"d_finished":0,"c":0,"l":1750797115033692,"d":98}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T20:31:55.034264Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1447:3391];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T20:31:55.034775Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1447:3391];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750797115031671,"name":"_full_task","f":1750797115031671,"d_finished":0,"c":0,"l":1750797115034321,"d":2650},"events":[{"name":"bootstrap","f":1750797115031894,"d_finished":1071,"c":1,"l":1750797115032965,"d":1071},{"a":1750797115033290,"name":"ack","f":1750797115033290,"d_finished":0,"c":0,"l":1750797115034321,"d":1031},{"a":1750797115033265,"name":"processing","f":1750797115033265,"d_finished":0,"c":0,"l":1750797115034321,"d":1056},{"name":"ProduceResults","f":1750797115032654,"d_finished":540,"c":2,"l":1750797115033590,"d":540},{"a":1750797115033594,"name":"Finish","f":1750797115033594,"d_finished":0,"c":0,"l":1750797115034321,"d":727}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1448:3392]->[1:1447:3391] 2025-06-24T20:31:55.034880Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T20:31:55.031182Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T20:31:55.034935Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T20:31:55.035057Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 160000/9752224 80000/4886744 0/0 >> TSyncBrokerTests::ShouldReturnTokensWithSameVDiskId >> TSyncBrokerTests::ShouldReturnTokensWithSameVDiskId [GOOD] >> TSyncNeighborsTests::SerDes1 [GOOD] |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_5_2 [GOOD] >> TQuorumTrackerTests::Erasure4Plus2BlockNotIncludingMyFailDomain_8_2 [GOOD] >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_4_2 [GOOD] >> TOlap::CreateTable [GOOD] >> TOlap::CreateTableTtl >> KqpQueryService::CheckIsolationLevelFroPerStatementMode [GOOD] >> KqpQueryService::AlterTable_DropNotNull_WithSetFamily_Valid >> TSyncBrokerTests::ShouldProcessAfterRelease >> TSyncBrokerTests::ShouldProcessAfterRelease [GOOD] >> TSyncBrokerTests::ShouldReleaseInQueue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes1 [GOOD] Test command err: 2025-06-24T20:31:56.718458Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-24T20:31:56.718593Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:50: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:6:2053], token sent, active: 1, waiting: 0 |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_4_2 [GOOD] >> TOlap::CreateDropStandaloneTable [GOOD] >> TOlap::AlterStore >> TSyncBrokerTests::ShouldReleaseInQueue [GOOD] >> TSyncNeighborsTests::SerDes2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldReleaseInQueue [GOOD] Test command err: 2025-06-24T20:31:57.202067Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-24T20:31:57.202208Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [1:6:2053], enqueued, active: 1, waiting: 1 2025-06-24T20:31:57.202269Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:123: TEvReleaseSyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token released, active: 1, waiting: 1 2025-06-24T20:31:57.202361Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:105: ProcessQueue(), VDisk actor id: [0:1:2], actor id: [1:6:2053], token sent, active: 0, waiting: 1 2025-06-24T20:31:57.302995Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-06-24T20:31:57.303171Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], enqueued, active: 1, waiting: 1 2025-06-24T20:31:57.303251Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:146: TEvReleaseSyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], removed from queue, active: 1, waiting: 0 >> TSyncBrokerTests::ShouldReturnToken >> TColumnShardTestSchema::RebootColdTiers [GOOD] >> TSyncBrokerTests::ShouldReturnToken [GOOD] >> TSyncBrokerTests::ShouldReleaseToken >> TSyncBrokerTests::ShouldReleaseToken [GOOD] |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes2 [GOOD] >> TOlapNaming::CreateColumnTableOk [GOOD] >> TOlapNaming::CreateColumnTableFailed >> TOlap::AlterStore [GOOD] >> TOlap::AlterTtl >> Cdc::HugeKeyDebezium [GOOD] >> Cdc::Drop[PqRunner] >> TTxAllocatorClientTest::Boot >> StoragePool::TestDistributionRandomProbability [GOOD] >> StoragePool::TestDistributionRandomProbabilityWithOverflow [GOOD] >> StoragePool::TestDistributionExactMin >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldReleaseToken [GOOD] Test command err: 2025-06-24T20:31:58.169352Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-24T20:31:58.260191Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-06-24T20:31:58.260294Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:123: TEvReleaseSyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token released, active: 1, waiting: 0 >> TTxAllocatorClientTest::Boot [GOOD] >> KqpQueryService::TableSink_HtapComplex-withOltpSink [GOOD] >> KqpQueryService::TableSink_HtapInteractive+withOltpSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootColdTiers [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150797654.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150797654.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150797654.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130797654.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150797654.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150797654.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130796454.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130797654.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130797654.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130796454.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130796454.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130796454.000000s;Name=;Codec=}; 2025-06-24T20:30:54.959964Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T20:30:54.982981Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T20:30:54.983578Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T20:30:54.994946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:30:55.001293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:30:55.001667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:30:55.001797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:30:55.001914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:30:55.002041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:30:55.002152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:30:55.002271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:30:55.002393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T20:30:55.002519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T20:30:55.002639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T20:30:55.036788Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T20:30:55.037130Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T20:30:55.037203Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T20:30:55.037449Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:55.037694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T20:30:55.037787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T20:30:55.037840Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T20:30:55.037950Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T20:30:55.038024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T20:30:55.038076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T20:30:55.038110Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T20:30:55.038293Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:30:55.038375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T20:30:55.038427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T20:30:55.038463Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T20:30:55.038564Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T20:30:55.038627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T20:30:55.038669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T20:30:55.038697Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T20:30:55.038763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T20:30:55.038804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T20:30:55.038838Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T20:30:55.039027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T20:30:55.039065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T20:30:55.039093Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T20:30:55.039351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T20:30:55.039444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T20:30:55.039478Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T20:30:55.039623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T20:30:55.039679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T20:30:55.039711Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T20:30:55.039818Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T20:30:55.039888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T20:30:55.039931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T20:30:55.039963Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:1 ... SHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=11; 2025-06-24T20:31:57.583766Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=102; 2025-06-24T20:31:57.583815Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=5463; 2025-06-24T20:31:57.583859Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=5589; 2025-06-24T20:31:57.583919Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=12; 2025-06-24T20:31:57.584029Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=69; 2025-06-24T20:31:57.584062Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=6268; 2025-06-24T20:31:57.584209Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=97; 2025-06-24T20:31:57.584332Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=64; 2025-06-24T20:31:57.584503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=120; 2025-06-24T20:31:57.584639Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=86; 2025-06-24T20:31:57.586712Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2018; 2025-06-24T20:31:57.588934Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=2132; 2025-06-24T20:31:57.589019Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=14; 2025-06-24T20:31:57.589083Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=10; 2025-06-24T20:31:57.589125Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-24T20:31:57.589230Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=38; 2025-06-24T20:31:57.589277Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-24T20:31:57.589376Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=65; 2025-06-24T20:31:57.589422Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-24T20:31:57.589495Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=39; 2025-06-24T20:31:57.589622Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=79; 2025-06-24T20:31:57.589963Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=302; 2025-06-24T20:31:57.590002Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=20055; 2025-06-24T20:31:57.590133Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=9739224;raw_bytes=13544452;count=2;records=160000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T20:31:57.590260Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T20:31:57.590316Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T20:31:57.590417Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T20:31:57.608270Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T20:31:57.608439Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T20:31:57.608530Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T20:31:57.608643Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750796804604;tx_id=18446744073709551615;;current_snapshot_ts=1750797092491; 2025-06-24T20:31:57.608692Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T20:31:57.608748Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T20:31:57.608787Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T20:31:57.608867Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T20:31:57.609849Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;self_id=[1:1575:3376];tablet_id=9437184;parent=[1:1536:3345];fline=manager.cpp:88;event=ask_data;request=request_id=66;9438184000001={portions_count=2};; 2025-06-24T20:31:57.610152Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T20:31:57.610523Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T20:31:57.610562Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T20:31:57.610588Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T20:31:57.610641Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T20:31:57.610727Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T20:31:57.610795Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750796804604;tx_id=18446744073709551615;;current_snapshot_ts=1750797092491; 2025-06-24T20:31:57.610841Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T20:31:57.610908Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T20:31:57.610943Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T20:31:57.611026Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1536:3345];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9739224 160000/9739224 160000/9739224 80000/4873744 0/0 >> TOlapNaming::CreateColumnTableFailed [GOOD] |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> Cdc::UpdateStream [GOOD] >> Cdc::UpdateShardCount |88.6%| [TA] $(B)/ydb/core/blobstorage/vdisk/syncer/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::Boot [GOOD] Test command err: 2025-06-24T20:31:58.700420Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T20:31:58.701030Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T20:31:58.701824Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T20:31:58.703670Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:31:58.704231Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T20:31:58.715179Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:31:58.715381Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:31:58.715501Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T20:31:58.715691Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:31:58.715847Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:31:58.715946Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T20:31:58.716053Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 >> TOlapNaming::AlterColumnTableOk [GOOD] >> TOlapNaming::AlterColumnTableFailed |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TOlap::CreateTableTtl [GOOD] >> THiveTest::TestHiveFollowersWithChangingDC [GOOD] >> THiveTest::TestHiveBalancerWithSystemTablets >> TOlap::AlterTtl [GOOD] >> KqpNewEngine::PkRangeSelect3 [GOOD] >> KqpNewEngine::PkRangeSelect4 |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 17545, MsgBus: 32676 2025-06-24T20:30:29.886822Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615985608721038:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:29.887459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f81/r3tmp/tmpA6sKln/pdisk_1.dat 2025-06-24T20:30:30.472103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:30.472224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:30.475509Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:30.485860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:30.491758Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519615985608721018:2079] 1750797029883470 != 1750797029883473 TServer::EnableGrpc on GrpcPort 17545, node 1 2025-06-24T20:30:30.700179Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:30.700203Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:30.700211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:30.700352Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:30:30.940799Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32676 TClient is connected to server localhost:32676 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:31.604188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:30:31.661279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:30:34.012191Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616002788590849:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:34.012370Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:34.082201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:34.256143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616007083558251:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:34.256259Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:34.278628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:34.382675Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616007083558332:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:34.382804Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:34.383087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616007083558337:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:34.388518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:30:34.424297Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616007083558339:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T20:30:34.517342Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616007083558390:2444] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:30:34.887428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519615985608721038:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:34.887479Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4965, MsgBus: 3310 2025-06-24T20:30:36.048441Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616015125745741:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:36.104961Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f81/r3tmp/tmpSaISkL/pdisk_1.dat 2025-06-24T20:30:36.357311Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:36.357982Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:36.400425Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:36.408287Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4965, node 2 2025-06-24T20:30:36.615885Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:36.615917Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:36.615927Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:36.616082Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3310 2025-06-24T20:30:37.105993Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3310 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:37.582020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdom ... CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:46.315780Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:46.332056Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:31:49.824247Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519616306548329284:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:49.824380Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:51.127635Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519616336613100958:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:51.127989Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:51.129798Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519616336613100993:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:51.137639Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:31:51.160620Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519616336613100995:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:31:51.262476Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519616336613101046:2342] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:51.294199Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519616336613101063:2302], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2025-06-24T20:31:51.294534Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=YTg3OWNhYi1iYzk4ODM1NS1iNTc0MDhjMi04NDY0NDg5YQ==, ActorId: [10:7519616336613100955:2292], ActorState: ExecuteState, TraceId: 01jyht7f5j8sksa41ss994avyg, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" Trying to start YDB, gRPC: 2062, MsgBus: 30602 2025-06-24T20:31:52.516228Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7519616340683867365:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:52.516354Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f81/r3tmp/tmpIOmxzC/pdisk_1.dat 2025-06-24T20:31:52.720771Z node 11 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:52.721201Z node 11 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [11:7519616340683867346:2079] 1750797112515650 != 1750797112515653 2025-06-24T20:31:52.727201Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:52.727344Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:52.729652Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2062, node 11 2025-06-24T20:31:52.800735Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:52.800763Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:52.800777Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:52.801012Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30602 2025-06-24T20:31:53.527074Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30602 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:53.807200Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:57.516745Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519616340683867365:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:57.516866Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:57.582093Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519616362158704478:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:57.582093Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519616362158704467:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:57.582213Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:57.587107Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:31:57.601034Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519616362158704481:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:31:57.681330Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519616362158704532:2338] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:57.710295Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [11:7519616362158704549:2300], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2025-06-24T20:31:57.710588Z node 11 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=11&id=ZmUwYTcyMDMtMzg3MzlmNzEtZjUzODY0YzgtNjM4NDFkYjE=, ActorId: [11:7519616362158704465:2291], ActorState: ExecuteState, TraceId: 01jyht7pfdek5740e3ntb2yhq7, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::CreateColumnTableFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:50.747791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:50.747887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:50.747929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:50.747972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:50.748074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:50.748104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:50.748165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:50.748254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:50.749193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:50.749647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:50.835878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:50.835951Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:50.868136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:50.868687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:50.868910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:50.880094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:50.880358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:50.881138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:50.881482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:50.887173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:50.887434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:50.889125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:50.889228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:50.889547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:50.889629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:50.889725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:50.889866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:50.898835Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:51.070967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:51.071399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.071713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:51.071767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:51.072039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:51.072135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:51.074784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:51.075231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:51.075497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.075568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:51.075613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:51.075649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:51.077957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.078034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:51.078076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:51.080095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.080138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.080179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:51.080233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:51.083422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:51.085550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:51.085764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:51.086657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:51.086778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:51.086819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:51.087093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:51.087141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:51.087335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:51.087403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:51.089440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:51.089479Z node 1 :FLAT_TX_SCHEMESHARD ... node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:58.943316Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:58.943460Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 8589936750 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:58.943517Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:58.943804Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:58.943862Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:58.944043Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:58.944125Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:58.948314Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:58.948381Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:58.948594Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:58.948649Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 1, path id: 1 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:58.949103Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:58.949159Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T20:31:58.949271Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:31:58.949311Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:31:58.949365Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:31:58.949402Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:31:58.949444Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T20:31:58.949492Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:31:58.949536Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T20:31:58.949596Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T20:31:58.949685Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:31:58.949732Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T20:31:58.949770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T20:31:58.950270Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T20:31:58.950375Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T20:31:58.950420Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T20:31:58.950461Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T20:31:58.950503Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:58.950594Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T20:31:58.953671Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T20:31:58.954157Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T20:31:58.954981Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:272:2261] Bootstrap 2025-06-24T20:31:58.979182Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:272:2261] Become StateWork (SchemeCache [2:277:2266]) 2025-06-24T20:31:58.982348Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TestTable" Schema { Columns { Name: "Id" Type: "Int32" NotNull: true } Columns { Name: "mess age" Type: "Utf8" } KeyColumnNames: "Id" } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:58.982697Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TestTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:31:58.982924Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Invalid name for column 'mess age', at schemeshard: 72057594046678944 2025-06-24T20:31:58.983811Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:272:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:31:58.988437Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Invalid name for column \'mess age\'" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:58.988722Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column 'mess age', operation: CREATE COLUMN TABLE, path: /MyRoot/ 2025-06-24T20:31:58.989243Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T20:31:58.989484Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T20:31:58.989529Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T20:31:58.989953Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T20:31:58.990061Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:31:58.990106Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:287:2276] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T20:31:58.993344Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TestTable" Schema { Columns { Name: "Id" Type: "Int32" NotNull: true } Columns { Name: "~!@#$%^&*()+=asdfa" Type: "Utf8" } KeyColumnNames: "Id" } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:58.993648Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TestTable, opId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:31:58.993848Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: Invalid name for column '~!@#$%^&*()+=asdfa', at schemeshard: 72057594046678944 2025-06-24T20:31:58.999129Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "Invalid name for column \'~!@#$%^&*()+=asdfa\'" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:58.999402Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column '~!@#$%^&*()+=asdfa', operation: CREATE COLUMN TABLE, path: /MyRoot/ TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T20:31:58.999741Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:31:58.999780Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:31:59.000127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:31:59.000220Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:31:59.000253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:294:2283] TestWaitNotification: OK eventTxId 102 >> AsyncIndexChangeExchange::ShouldDeliverChangesOnSplitMerge [GOOD] >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowByCount ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::CreateTableTtl [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:52.332397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:52.332501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:52.332550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:52.332604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:52.332679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:52.332715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:52.332781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:52.332861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:52.333842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:52.334303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:52.428991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:52.429060Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:52.447651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:52.448145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:52.448374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:52.473543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:52.473803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:52.474690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.475086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:52.482747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:52.483008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:52.484520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:52.484628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:52.484919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:52.484997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:52.485074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:52.485187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.496407Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:52.662618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:52.662880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.663208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:52.663269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:52.663559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:52.663672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:52.666584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.666846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:52.667133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.667236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:52.667289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:52.667328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:52.669869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.669938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:52.669984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:52.672547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.672616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:52.672668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.672737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:52.676831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:52.680179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:52.680429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:52.681586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:52.681757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:52.681812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.682208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:52.682290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:52.682513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:52.682611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:52.685558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:52.685620Z node 1 :FLAT_TX_SCHEMESHARD ... PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 ColumnTableTtlSettingsVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "Table3" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } NextColumnFamilyId: 1 } TtlSettings { Enabled { ColumnName: "timestamp" ColumnUnit: UNIT_AUTO Tiers { ApplyAfterSeconds: 360 EvictToExternalStorage { Storage: "/MyRoot/Tier1" } } } Version: 1 } SchemaPresetId: 1 SchemaPresetName: "default" ColumnStorePathId { OwnerId: 72057594046678944 LocalId: 2 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409546 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 106 2025-06-24T20:31:59.316998Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/OlapStore" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "Table4" TtlSettings { Enabled { ColumnName: "timestamp" ColumnUnit: UNIT_AUTO Tiers { ApplyAfterSeconds: 3600000000 EvictToExternalStorage { Storage: "/MyRoot/Tier1" } } } } ColumnShardCount: 1 } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:59.317436Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/OlapStore/Table4, opId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:31:59.317911Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: OlapStore, child name: Table4, child id: [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-24T20:31:59.317984Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 0 2025-06-24T20:31:59.318033Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 106:0 type: TxCreateColumnTable target path: [OwnerId: 72057594046678944, LocalPathId: 7] source path: 2025-06-24T20:31:59.318289Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-24T20:31:59.318641Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:59.318727Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 106:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:738) 2025-06-24T20:31:59.318893Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T20:31:59.318985Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-24T20:31:59.324652Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 106, response: Status: StatusAccepted TxId: 106 SchemeshardId: 72057594046678944 PathId: 7, at schemeshard: 72057594046678944 2025-06-24T20:31:59.324988Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE COLUMN TABLE, path: /MyRoot/OlapStore/ 2025-06-24T20:31:59.325305Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:59.325358Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:31:59.325593Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-24T20:31:59.325714Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:59.325761Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:210:2210], at schemeshard: 72057594046678944, txId: 106, path id: 2 2025-06-24T20:31:59.325813Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:210:2210], at schemeshard: 72057594046678944, txId: 106, path id: 7 2025-06-24T20:31:59.326294Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:31:59.326362Z node 3 :FLAT_TX_SCHEMESHARD INFO: create_table.cpp:235: TCreateColumnTable TConfigureParts operationId# 106:0 ProgressState at tabletId# 72057594046678944 2025-06-24T20:31:59.326547Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: create_table.cpp:321: TCreateColumnTable TConfigureParts operationId# 106:0 ProgressState Propose modify scheme on shard tabletId: 72075186233409546 2025-06-24T20:31:59.327648Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:59.327778Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:59.327825Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-24T20:31:59.327874Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 10 2025-06-24T20:31:59.327924Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-24T20:31:59.328453Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 1 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:59.328536Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 1 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:59.328565Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-24T20:31:59.328592Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 1 2025-06-24T20:31:59.328629Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-24T20:31:59.328695Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 0/1, is published: true 2025-06-24T20:31:59.336249Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 106:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382272 2025-06-24T20:31:59.336472Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 106, partId: 0, tablet: 72075186233409546 2025-06-24T20:31:59.337432Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409546;self_id=[3:311:2296];ev=NActors::IEventHandle;tablet_id=72075186233409546;tx_id=106;this=88923003994496;method=TTxController::StartProposeOnExecute;tx_info=106:TX_KIND_SCHEMA;min=5000007;max=18446744073709551615;plan=0;src=[3:129:2153];cookie=12:5;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:31:59.415553Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T20:31:59.416024Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/MyRoot/Tier1' stopped at tablet 72075186233409546 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/MyRoot/Tier1' stopped at tablet 72075186233409546 >> TYardTest::TestEnormousDisk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::AlterTtl [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:55.419499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:55.419594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:55.419637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:55.419681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:55.419747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:55.419778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:55.419831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:55.419902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:55.420738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:55.421110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:55.512635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:55.512698Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:55.532241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:55.532689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:55.532846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:55.542181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:55.542392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:55.542997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:55.543410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:55.546485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:55.546675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:55.548021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:55.548102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:55.548385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:55.548458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:55.548538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:55.548656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.555734Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:55.667471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:55.667691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.667930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:55.667967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:55.668178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:55.668244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:55.671028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:55.671308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:55.671515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.671587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:55.671635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:55.671670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:55.675408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.675483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:55.675526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:55.677833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.677887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.677933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:55.678000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:55.681716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:55.684146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:55.684410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:55.685485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:55.685625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:55.685676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:55.685960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:55.686020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:55.686216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:55.686303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:55.688713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:55.688759Z node 1 :FLAT_TX_SCHEMESHARD ... NKikimr::TEvColumnShard::TEvProposeTransactionResult> complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:31:59.587846Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:31:59.587922Z node 3 :FLAT_TX_SCHEMESHARD INFO: alter_table.cpp:148: TAlterColumnTable TPropose operationId# 106:0 HandleReply ProgressState at tablet: 72057594046678944 2025-06-24T20:31:59.588015Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 106 ready parts: 1/1 2025-06-24T20:31:59.588182Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 106 MinStep: 5000006 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:59.592504Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 106:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:106 msg type: 269090816 2025-06-24T20:31:59.592636Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 106, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 106 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 106 at step: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 106 at step: 5000007 2025-06-24T20:31:59.593001Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:59.593110Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 106 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 12884904045 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:59.593164Z node 3 :FLAT_TX_SCHEMESHARD INFO: alter_table.cpp:109: TAlterColumnTable TPropose operationId# 106:0 HandleReply TEvOperationPlan at tablet: 72057594046678944, stepId: 5000007 2025-06-24T20:31:59.593875Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 106:0 128 -> 129 2025-06-24T20:31:59.594144Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:31:59.594244Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:31:59.606899Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=106;fline=tx_controller.cpp:215;event=finished_tx;tx_id=106; FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000007 2025-06-24T20:31:59.615519Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:59.615602Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:31:59.615862Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:31:59.616131Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:59.616187Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:210:2210], at schemeshard: 72057594046678944, txId: 106, path id: 2 2025-06-24T20:31:59.616243Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:210:2210], at schemeshard: 72057594046678944, txId: 106, path id: 3 2025-06-24T20:31:59.616348Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:31:59.616426Z node 3 :FLAT_TX_SCHEMESHARD INFO: alter_table.cpp:199: TAlterColumnTable TProposedWaitParts operationId# 106:0 ProgressState at tablet: 72057594046678944 2025-06-24T20:31:59.616506Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: alter_table.cpp:222: TAlterColumnTable TProposedWaitParts operationId# 106:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-24T20:31:59.618164Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:59.618415Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:59.618464Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-24T20:31:59.618512Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-24T20:31:59.618564Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T20:31:59.622985Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 14 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:59.623098Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 14 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:31:59.623132Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-24T20:31:59.623190Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 14 2025-06-24T20:31:59.623228Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T20:31:59.623335Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 0/1, is published: true 2025-06-24T20:31:59.625350Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 106:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-24T20:31:59.628275Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T20:31:59.629146Z node 3 :TX_TIERING ERROR: log.cpp:784: fline=manager.cpp:158;error=cannot_read_secrets;reason=Can't read access key: No such secret: SId:secret; 2025-06-24T20:31:59.629510Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T20:31:59.642159Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6226: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 106 2025-06-24T20:31:59.642237Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409546, partId: 0 2025-06-24T20:31:59.642410Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 106:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 106 FAKE_COORDINATOR: Erasing txId 106 2025-06-24T20:31:59.645016Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:31:59.645210Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:31:59.645257Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 106:0 ProgressState 2025-06-24T20:31:59.645416Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T20:31:59.645463Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T20:31:59.645516Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T20:31:59.645556Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T20:31:59.645603Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-06-24T20:31:59.645692Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:345:2321] message: TxId: 106 2025-06-24T20:31:59.645758Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T20:31:59.645808Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 106:0 2025-06-24T20:31:59.645849Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 106:0 2025-06-24T20:31:59.646006Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:31:59.652242Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T20:31:59.652321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [3:554:2522] TestWaitNotification: OK eventTxId 106 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/MyRoot/Tier1' stopped at tablet 72075186233409546 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/MyRoot/Tier1' stopped at tablet 72075186233409546 >> THiveTest::TestDrainWithMaxTabletsScheduled [GOOD] >> THiveTest::TestDownAfterDrain |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> THiveTest::TestFollowersCrossDC_MovingLeader [GOOD] >> THiveTest::TestFollowersCrossDC_KillingHiveAndFollower |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> Cdc::NaN[YdsRunner] [GOOD] >> Cdc::NaN[TopicRunner] |88.6%| [TA] $(B)/ydb/services/fq/ut_integration/test-results/unittest/{meta.json ... results_accumulator.log} |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |88.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |88.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::AlterSequence [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequence >> KqpQueryService::AlterTable_DropNotNull_WithSetFamily_Valid [GOOD] >> KqpPg::InsertFromSelect_Simple-useSink [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> THiveTest::TestFollowersCrossDC_KillingHiveAndFollower [GOOD] >> THiveTest::TestFollowerCompatability1 >> StoragePool::TestDistributionExactMin [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TYardTest::TestEnormousDisk [GOOD] >> KqpPg::InsertFromSelect_NoReorder-useSink >> StoragePool::TestDistributionExactMinWithOverflow [GOOD] >> StoragePool::TestDistributionRandomMin7p >> TOlap::CreateDropStandaloneTableDefaultSharding [GOOD] >> Cdc::Drop[PqRunner] [GOOD] >> VDiskBalancing::TestRandom_Mirror3dc [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TBlobStorageProxyTest::TestGetAndRangeGetManyBlobs [GOOD] >> THiveTest::TestDownAfterDrain [GOOD] >> KqpPg::EquiJoin-useSink [GOOD] >> Cdc::Drop[YdsRunner] >> DSProxyStrategyTest::Restore_block42 [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> Cdc::UpdateShardCount [GOOD] >> TBlobStorageProxyTest::TestEmptyRange >> Cdc::UpdateRetentionPeriod |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> THiveTest::TestCreateTabletsWithRaceForStoragePoolsKIKIMR_9659 >> KqpPg::ExplainColumnsReorder >> THiveTest::TestCreateTabletsWithRaceForStoragePoolsKIKIMR_9659 [GOOD] >> KqpQueryService::TableSink_HtapInteractive+withOltpSink [GOOD] >> TOlapNaming::AlterColumnTableFailed [GOOD] >> THiveTest::TestDeleteTablet >> TBlobStorageProxyTest::TestEmptyRange [GOOD] >> THiveTest::TestFollowerCompatability1 [GOOD] >> KqpNewEngine::PkRangeSelect4 [GOOD] >> StoragePool::TestDistributionRandomMin7p [GOOD] >> YdbTableSplit::MergeByNoLoadAfterSplit [GOOD] >> StoragePool::TestDistributionRandomMin7pWithOverflow [GOOD] >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowByCount [GOOD] >> KqpPg::InsertFromSelect_NoReorder-useSink [GOOD] >> KqpPg::TableSelect+useSink [GOOD] >> Cdc::Drop[YdsRunner] [GOOD] >> Cdc::Drop[TopicRunner] >> Cdc::UpdateRetentionPeriod [GOOD] >> KqpPg::ExplainColumnsReorder [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequence [GOOD] >> KqpPg::CreateTableIfNotExists_GenericQuery >> Cdc::NaN[TopicRunner] [GOOD] >> THiveTest::TestCreate100Tablets [GOOD] >> TOlap::StoreStats [GOOD] >> KqpQueryService::PeriodicTaskInSessionPoolSessionCloseByIdle [GOOD] >> KqpQueryService::ReadDatashardAndColumnshard |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> DSProxyStrategyTest::Restore_block42 [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> THiveTest::TestDeleteTablet [GOOD] >> THiveTest::TestFollowerCompatability2 >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowBySize >> KqpPg::InsertFromSelect_Serial+useSink >> KqpPg::TableSelect-useSink >> Cdc::SupportedTypes >> KqpNewEngine::PruneEffectPartitions+UseSink >> Cdc::Drop[TopicRunner] [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> Cdc::RacyRebootAndSplitWithTxInflight >> THiveTest::TestCreateSubHiveCreateTablet |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> THiveTest::TestDeleteOwnerTablets >> THiveTest::TestFollowerCompatability2 [GOOD] >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowBySize [GOOD] >> KqpPg::InsertFromSelect_Serial+useSink [GOOD] >> KqpPg::InsertFromSelect_Serial-useSink >> TOlap::StoreStatsQuota >> Cdc::DropColumn >> KqpNewEngine::PruneEffectPartitions+UseSink [GOOD] >> KqpNewEngine::PrecomputeKey >> KqpPg::CreateTableIfNotExists_GenericQuery [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> KqpPg::TableArrayInsert-useSink [GOOD] >> Cdc::SupportedTypes [GOOD] >> Cdc::SplitTopicPartition_TopicAutoPartitioning >> KqpQueryService::ReadDatashardAndColumnshard [GOOD] >> THiveTest::TestHiveBalancerWithSystemTablets [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestEmptyRange [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> StoragePool::TestDistributionRandomMin7pWithOverflow [GOOD] Test command err: Took 10.485211 seconds >> THiveTest::TestDeleteOwnerTablets [GOOD] >> THiveTest::TestFollowerCompatability3 >> AsyncIndexChangeExchange::ShouldNotReorderChangesOnRace >> THiveTest::TestCreateSubHiveCreateTablet [GOOD] >> KqpPg::InsertFromSelect_Serial-useSink [GOOD] >> Cdc::RacyRebootAndSplitWithTxInflight [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink >> Cdc::RacyActivateAndEnqueue >> Cdc::DropColumn [GOOD] >> Cdc::DropIndex >> KqpNewEngine::PrecomputeKey [GOOD] >> KqpPg::Returning+useSink >> KqpPg::AlterColumnSetDefaultFromSequenceWithSchemaname >> THiveTest::TestHiveNoBalancingWithLowResourceUsage >> THiveTest::TestDeleteOwnerTabletsMany >> THiveTest::TestFollowerCompatability3 [GOOD] >> KqpNewEngine::PrimaryView >> THiveTest::TestCheckSubHiveMigrationManyTablets >> THiveTest::TestDeleteOwnerTabletsMany [GOOD] >> THiveTest::TestGetStorageInfo >> THiveTest::TestDeleteTabletWithFollowers >> AsyncIndexChangeExchange::ShouldNotReorderChangesOnRace [GOOD] >> THiveTest::TestGetStorageInfo [GOOD] >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> THiveTest::TestDeleteTabletWithFollowers [GOOD] >> Cdc::AreJsonsEqualReturnsTrueOnEqual [GOOD] >> THiveTest::TestCreateTabletBeforeLocal |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> Cdc::AreJsonsEqualReturnsFalseOnDifferent [GOOD] >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned [GOOD] >> Cdc::AreJsonsEqualFailsOnWildcardInArray [GOOD] >> Cdc::AlterViaTopicService >> THiveTest::TestCreateTabletBeforeLocal [GOOD] >> THiveTest::TestLockTabletExecutionRebootTimeout [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequenceWithSchemaname [GOOD] >> PartitionStats::CollectorOverload [GOOD] >> PartitionStats::Collector [GOOD] >> THiveTest::TestCreateTabletReboots >> THiveTest::TestLockTabletExecutionReconnect >> ExternalIndex::Simple |88.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |88.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |88.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |88.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |88.7%| [LD] {RESULT} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |88.7%| [LD] {RESULT} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |88.7%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TA] {RESULT} $(B)/ydb/services/fq/ut_integration/test-results/unittest/{meta.json ... results_accumulator.log} |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> THiveTest::TestCreateTabletReboots [GOOD] >> KqpPg::CheckPgAutoParams+useSink >> THiveTest::TestCreateTabletWithWrongSPoolsAndReassignGroupsFailButDeletionIsOk >> THiveTest::TestCreateTabletWithWrongSPoolsAndReassignGroupsFailButDeletionIsOk [GOOD] >> THiveTest::TestCreateTabletAndReassignGroupsWithReboots |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> PartitionStats::Collector [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> PartitionStats::CollectorOverload [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::MergeByNoLoadAfterSplit [GOOD] Test command err: 2025-06-24T20:30:54.418422Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616091255729814:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:54.418548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fe3/r3tmp/tmpJe7QLE/pdisk_1.dat 2025-06-24T20:30:55.344398Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:55.396079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:55.396173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:55.430515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:55.433250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:55.463362Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 24087, node 1 2025-06-24T20:30:55.793203Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:55.793237Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:55.793245Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:55.793375Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:56.552246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Triggering split by load TClient is connected to server localhost:31378 2025-06-24T20:30:59.423331Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616091255729814:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:59.423407Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:30:59.833721Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616112730567349:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:59.833830Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.196171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:00.471095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616117025534826:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.471193Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.511944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797060376 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797060376 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:31:00.848199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616117025534943:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.848290Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.853974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616117025534956:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.854026Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616117025534957:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.854053Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616117025534958:2356], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.883221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-24T20:31:00.883448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710661:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:00.883473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976710661:1, at schemeshard: 72057594046644480 2025-06-24T20:31:00.883576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710661:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:00.883600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976710661:2, at schemeshard: 72057594046644480 2025-06-24T20:31:00.883659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710661:3, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:00.883728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976710661:3, path# /Root/.metadata/workload_manager/pools/default 2025-06-24T20:31:00.884069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710661:3 1 -> 128 2025-06-24T20:31:00.884387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710661:4, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:00.884413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operati ... tPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:32:05.612366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__table_stats.cpp:450: Propose merge request : Transaction { WorkingDir: "/Root" OperationType: ESchemeOpSplitMergeTablePartitions SplitMergeTablePartitions { TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 } Internal: true FailOnExist: false } TxId: 281474976715658 TabletId: 72057594046644480, reason: shard with tabletId: 72075186224037889 merge by load (shardLoad: 0.02), shardToMergeCount: 2, totalSize: 0, sizeToMerge: 0, totalLoad: 0.04, loadThreshold: 0.07 2025-06-24T20:32:05.612591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /Root/Foo, tableId: , opId: 281474976715658:0, at schemeshard: 72057594046644480, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 2025-06-24T20:32:05.613290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /Root/Foo, tableId: , opId: 281474976715658:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000\262g\007\206\000\000\000\200" TabletID: 72075186224037889 ShardIdx: 2 } SourceRanges { KeyRangeBegin: "\002\000\004\000\000\000\262g\007\206\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037890 ShardIdx: 3 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" ShardIdx: 4 }, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 2025-06-24T20:32:05.613336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:32:05.615941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976715658:0 ProgressState, operation type: TxSplitTablePartition, at tablet# 72057594046644480 2025-06-24T20:32:05.620188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:176: TCreateParts opId# 281474976715658:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046644480 2025-06-24T20:32:05.620260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715658:0 2 -> 3 2025-06-24T20:32:05.621725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:84: TSplitMerge TConfigureDestination ProgressState, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-24T20:32:05.624567Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:7519616396198487565:9440] 2025-06-24T20:32:05.639711Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037891 2025-06-24T20:32:05.639843Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2025-06-24T20:32:05.640049Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-24T20:32:05.643446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976715658:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976715658:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976715658 TabletId: 72075186224037891 2025-06-24T20:32:05.643498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715658:0 3 -> 131 2025-06-24T20:32:05.644992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:334: TSplitMerge TTransferData operationId# 281474976715658:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:32:05.654820Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037891 2025-06-24T20:32:05.654936Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037891 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T20:32:05.654992Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-24T20:32:05.655025Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-06-24T20:32:05.655303Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-24T20:32:05.656822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976715658:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715658 TabletId: 72075186224037889 2025-06-24T20:32:05.657034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976715658:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715658 TabletId: 72075186224037890 2025-06-24T20:32:05.657242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715658:0 131 -> 132 2025-06-24T20:32:05.658185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T20:32:05.658355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T20:32:05.658394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:437: TSplitMerge TNotifySrc, operationId: 281474976715658:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:32:05.658760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-24T20:32:05.658792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715658 2025-06-24T20:32:05.658814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715658, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 2025-06-24T20:32:05.663127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976715658:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037890, at schemeshard: 72057594046644480 2025-06-24T20:32:05.663162Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037890 Initiating switch from PreOffline to Offline state 2025-06-24T20:32:05.663202Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-24T20:32:05.663420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976715658:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-24T20:32:05.663476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715658:0 progress is 1/1 2025-06-24T20:32:05.663500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715658:0 progress is 1/1 2025-06-24T20:32:05.663533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715658:0 2025-06-24T20:32:05.664746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:260: Unable to activate 281474976715658:0 2025-06-24T20:32:05.666371Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T20:32:05.666384Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T20:32:05.666654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:32:05.666837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:32:05.670363Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-24T20:32:05.670403Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-24T20:32:05.670797Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-24T20:32:05.670911Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-24T20:32:05.671008Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-24T20:32:05.671244Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-24T20:32:05.671315Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T20:32:05.671338Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797060376 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 3 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::AlterTable_DropNotNull_WithSetFamily_Valid [GOOD] Test command err: Trying to start YDB, gRPC: 13684, MsgBus: 28598 2025-06-24T20:31:35.883255Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616265755155014:2175];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:35.883330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003150/r3tmp/tmpZnrDo6/pdisk_1.dat 2025-06-24T20:31:36.631030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:36.632068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:36.636144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:31:36.678818Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:36.679690Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616265755154877:2079] 1750797095870002 != 1750797095870005 TServer::EnableGrpc on GrpcPort 13684, node 1 2025-06-24T20:31:36.888634Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:36.978736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:36.978773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:36.978782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:36.978907Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28598 TClient is connected to server localhost:28598 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:37.945934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:40.547057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616287229992008:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:40.547189Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:40.547706Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616287229992020:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:40.552694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:31:40.570302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616287229992022:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:31:40.632588Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616287229992073:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:40.878772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616265755155014:2175];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:40.878843Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:41.058139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T20:31:41.077308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:41.293559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T20:31:41.350315Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616291524959617:2486] txid# 281474976710664, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:41.367525Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616291524959624:2491] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZWVmNGMwMy03ODYwYTIyMi1kNWIyZDU5Yi03NDBmOGVjZQ==\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:41.396856Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T20:31:41.424055Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616291524959684:2538] txid# 281474976710667, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:41.429748Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616291524959691:2543] txid# 281474976710668, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZWVmNGMwMy03ODYwYTIyMi1kNWIyZDU5Yi03NDBmOGVjZQ==\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:41.434009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:41.828463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T20:31:42.017054Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616295819927171:2650] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:42.019525Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616295819927178:2655] txid# 281474976710675, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZWVmNGMwMy03ODYwYTIyMi1kNWIyZDU5Yi03NDBmOGVjZQ==\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:42.074730Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T20:31:42.092722Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519616295819927234:2371], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:21: Error: At function: KiReadTable!
:3:21: Error: Cannot find table 'db.[/Root/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:31:42.094511Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YjQ2YWU4ODItMWI5MGUwMGEtZTdjMmY1OGYtZDc0MTRmM2M=, ActorId: [1:7519616295819927232:2370], ActorState: ExecuteState, TraceId: 01jyht7azmde1eg5kkqnspr0bb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:31:42.149300Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519616295819927244:2376], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:31:42.151193Z node 1 :KQP_SESSION WARN: kqp_session_ ... k/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:54.725606Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:54.782578Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:55.286620Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:55.479766Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:55.595405Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:55.761837Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:56.059618Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:56.206003Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 23433, MsgBus: 22947 2025-06-24T20:31:57.166466Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519616361806134705:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:57.166563Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003150/r3tmp/tmp5O05zR/pdisk_1.dat 2025-06-24T20:31:57.320695Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:57.321436Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519616361806134682:2079] 1750797117165250 != 1750797117165253 2025-06-24T20:31:57.346159Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:57.346284Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:57.348770Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23433, node 4 2025-06-24T20:31:57.407744Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:57.407775Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:57.407788Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:57.407980Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22947 TClient is connected to server localhost:22947 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:58.027340Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:58.037047Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:31:58.193023Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:01.005213Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519616378986004500:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:01.005326Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:01.006204Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519616378986004512:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:01.012753Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:32:01.034119Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519616378986004514:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:32:01.123955Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519616378986004565:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:32:01.393928Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:01.650897Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519616378986004717:2316], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:30: Error: At function: KiWriteTable!
:2:84: Error: Failed to convert type: Struct<'id':Int32,'val1':Null,'val2':Int32> to Struct<'id':Int32,'val1':Int32,'val2':Int32?>
:2:84: Error: Failed to convert 'val1': Null to Int32
:2:84: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T20:32:01.651174Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=MmNjOTRkZmQtNjc5NzUyNmEtYWQ2ZDM0NzItOTBjNDc0YTA=, ActorId: [4:7519616378986004715:2315], ActorState: ExecuteState, TraceId: 01jyht7y2pchfy1tsfnqwj5azc, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T20:32:01.698901Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T20:32:01.746116Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T20:32:02.167372Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519616361806134705:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:02.167488Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ReadDatashardAndColumnshard [GOOD] Test command err: Trying to start YDB, gRPC: 17938, MsgBus: 13715 2025-06-24T20:31:35.718871Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616265772917136:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:35.718939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00313e/r3tmp/tmpf48AF8/pdisk_1.dat 2025-06-24T20:31:36.156716Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:36.172596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:36.172714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:36.211650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17938, node 1 2025-06-24T20:31:36.287479Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:36.287494Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:36.287498Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:36.287583Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13715 2025-06-24T20:31:36.728581Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13715 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:37.050985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:37.089739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:31:37.245627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:31:37.444389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:31:37.528593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:31:39.769145Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616282952787909:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:39.769258Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:40.270920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:40.352809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:40.429917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:40.483049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:40.548545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:40.621940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:40.719441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616265772917136:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:40.719552Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:40.768780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:40.864099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616287247755871:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:40.864194Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:40.864667Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616287247755876:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:40.870101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:31:40.895027Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616287247755878:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:31:40.990423Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616287247755929:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:51.149781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:31:51.149814Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded Trying to start YDB, gRPC: 10152, MsgBus: 3618 2025-06-24T20:31:54.927901Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616347102275803:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:54.927986Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; te ... :32:15.821730Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T20:32:15.821755Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T20:32:15.821778Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T20:32:15.821799Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T20:32:15.821922Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T20:32:15.821947Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T20:32:15.822067Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T20:32:15.822098Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T20:32:15.822112Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T20:32:15.822133Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T20:32:15.822178Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T20:32:15.822207Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T20:32:15.822256Z node 3 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T20:32:15.822291Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T20:32:15.822315Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T20:32:15.822740Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T20:32:15.822784Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T20:32:15.824575Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7519616440857227264:2311];ev=NActors::IEventHandle;tablet_id=72075186224037894;tx_id=281474976715661;this=88923050789888;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=1750797135824;max=18446744073709551615;plan=0;src=[3:7519616427972324752:2148];cookie=72:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.825041Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7519616440857227275:2313];ev=NActors::IEventHandle;tablet_id=72075186224037892;tx_id=281474976715661;this=88923050803328;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=1750797135824;max=18446744073709551615;plan=0;src=[3:7519616427972324752:2148];cookie=52:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.827334Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519616440857227377:2316];ev=NActors::IEventHandle;tablet_id=72075186224037891;tx_id=281474976715661;this=88923050805120;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=1750797135827;max=18446744073709551615;plan=0;src=[3:7519616427972324752:2148];cookie=42:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.828040Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7519616440857227262:2309];ev=NActors::IEventHandle;tablet_id=72075186224037896;tx_id=281474976715661;this=88923050792128;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=1750797135827;max=18446744073709551615;plan=0;src=[3:7519616427972324752:2148];cookie=92:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.829278Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519616440857227239:2308];ev=NActors::IEventHandle;tablet_id=72075186224037890;tx_id=281474976715661;this=88923050806912;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=1750797135829;max=18446744073709551615;plan=0;src=[3:7519616427972324752:2148];cookie=32:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.830798Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7519616440857227305:2314];ev=NActors::IEventHandle;tablet_id=72075186224037895;tx_id=281474976715661;this=88923050794144;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=1750797135830;max=18446744073709551615;plan=0;src=[3:7519616427972324752:2148];cookie=82:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.834296Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.834295Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.845749Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T20:32:15.845960Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T20:32:15.846176Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.846399Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.849661Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T20:32:15.850128Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T20:32:15.850157Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.850588Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.854672Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T20:32:15.855193Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.855387Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T20:32:15.855865Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.858837Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T20:32:15.859450Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T20:32:15.859838Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.859898Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T20:32:15.863655Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T20:32:15.863680Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T20:32:16.007616Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_HtapInteractive+withOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 32296, MsgBus: 8438 2025-06-24T20:31:35.360990Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616265732471735:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:35.361063Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00315d/r3tmp/tmpqvMUv2/pdisk_1.dat 2025-06-24T20:31:35.843996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:35.844084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:35.850268Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616265732471714:2079] 1750797095359593 != 1750797095359596 2025-06-24T20:31:35.863415Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:35.863737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32296, node 1 2025-06-24T20:31:35.984788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:35.984814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:35.984821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:35.984946Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8438 2025-06-24T20:31:36.396722Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8438 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:36.606604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:36.636026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:31:38.768438Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616278617374241:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:38.768580Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:39.091594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T20:31:39.284150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:31:39.284391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:31:39.284620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:31:39.284709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:31:39.284785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:31:39.284910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:31:39.285087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:31:39.285217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:31:39.285367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T20:31:39.285490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T20:31:39.285607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519616282912341706:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T20:31:39.291389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:31:39.291514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:31:39.291702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:31:39.291811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:31:39.291926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:31:39.292054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:31:39.292156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:31:39.292299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:31:39.292395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T20:31:39.292513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T20:31:39.292639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519616282912341709:2303];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T20:31:39.320715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519616282912341708:2302];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:31:39.320786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519616282912341708:2302];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:31:39.320933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519616282912341708:2302];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:31:39.321012Z node 1 :TX_COLUMNSHARD WARN ... V0ChunksMeta;id=18; 2025-06-24T20:32:03.242749Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T20:32:03.243927Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7519616382349879267:2305];ev=NActors::IEventHandle;tablet_id=72075186224037895;tx_id=281474976715658;this=88923055256000;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1750797123243;max=18446744073709551615;plan=0;src=[3:7519616369464976869:2152];cookie=82:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.244326Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7519616382349879206:2303];ev=NActors::IEventHandle;tablet_id=72075186224037894;tx_id=281474976715658;this=88923004094400;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1750797123243;max=18446744073709551615;plan=0;src=[3:7519616369464976869:2152];cookie=72:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.244575Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[3:7519616382349879188:2296];ev=NActors::IEventHandle;tablet_id=72075186224037888;tx_id=281474976715658;this=88923055264512;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1750797123244;max=18446744073709551615;plan=0;src=[3:7519616369464976869:2152];cookie=12:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.247021Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7519616382349879200:2301];ev=NActors::IEventHandle;tablet_id=72075186224037893;tx_id=281474976715658;this=88923053219616;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1750797123246;max=18446744073709551615;plan=0;src=[3:7519616369464976869:2152];cookie=62:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.247679Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519616382349879204:2302];ev=NActors::IEventHandle;tablet_id=72075186224037890;tx_id=281474976715658;this=88923055279296;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1750797123247;max=18446744073709551615;plan=0;src=[3:7519616369464976869:2152];cookie=32:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.250500Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519616382349879210:2304];ev=NActors::IEventHandle;tablet_id=72075186224037891;tx_id=281474976715658;this=88923057592768;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1750797123250;max=18446744073709551615;plan=0;src=[3:7519616369464976869:2152];cookie=42:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.251115Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519616382349879199:2300];ev=NActors::IEventHandle;tablet_id=72075186224037897;tx_id=281474976715658;this=88923055257120;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1750797123250;max=18446744073709551615;plan=0;src=[3:7519616369464976869:2152];cookie=102:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.254424Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.254435Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.260853Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T20:32:03.261470Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T20:32:03.261609Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.262511Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.268007Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T20:32:03.268640Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T20:32:03.268759Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.269601Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.274699Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T20:32:03.275427Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.275501Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T20:32:03.276372Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.281157Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T20:32:03.281860Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.282228Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T20:32:03.282957Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T20:32:03.287646Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T20:32:03.288940Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T20:32:03.295887Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:03.355123Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519616386644846939:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:03.355223Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:03.355258Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519616386644846944:2376], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:03.359047Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:32:03.369298Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519616386644846946:2377], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T20:32:03.436349Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519616386644846997:2642] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:32:03.606879Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T20:32:03.804313Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T20:32:04.112634Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519616369464976555:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:04.112720Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::CreateDropStandaloneTableDefaultSharding [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:51.376110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:51.376203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:51.376300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:51.376361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:51.376428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:51.376463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:51.376533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:51.376628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:51.377505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:51.377922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:51.467375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:51.467445Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:51.485210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:51.485697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:51.485891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:51.494586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:51.494818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:51.495608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:51.495936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:51.499418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:51.499681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:51.501095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:51.501184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:51.501490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:51.501552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:51.501618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:51.501749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.510321Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:51.680924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:51.681195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.681492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:51.681570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:51.681867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:51.681965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:51.684744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:51.684958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:51.685221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.685300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:51.685350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:51.685390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:51.687494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.687555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:51.687602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:51.689539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.689591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.689631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:51.689684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:51.693401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:51.695865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:51.696072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:51.697151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:51.697312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:51.697397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:51.697735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:51.697796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:51.698048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:51.698160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:51.700690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:51.700744Z node 1 :FLAT_TX_SCHEMESHARD ... p:180: Close pipe to deleted shardIdx 72057594046678944:59 tabletId 72075186233409604 2025-06-24T20:32:03.422687Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-24T20:32:03.422714Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-24T20:32:03.425829Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T20:32:03.425862Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T20:32:03.425976Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T20:32:03.425992Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T20:32:03.426056Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-06-24T20:32:03.426073Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-06-24T20:32:03.426133Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:21 2025-06-24T20:32:03.426149Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:21 tabletId 72075186233409566 2025-06-24T20:32:03.426208Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:19 2025-06-24T20:32:03.426225Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:19 tabletId 72075186233409564 2025-06-24T20:32:03.426281Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:17 2025-06-24T20:32:03.426298Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:17 tabletId 72075186233409562 2025-06-24T20:32:03.426364Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-06-24T20:32:03.426382Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409560 2025-06-24T20:32:03.426420Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:13 2025-06-24T20:32:03.426435Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:13 tabletId 72075186233409558 2025-06-24T20:32:03.427201Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:11 2025-06-24T20:32:03.427242Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:11 tabletId 72075186233409556 2025-06-24T20:32:03.427316Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:9 2025-06-24T20:32:03.427345Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:9 tabletId 72075186233409554 2025-06-24T20:32:03.427431Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:36 2025-06-24T20:32:03.427459Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:36 tabletId 72075186233409581 2025-06-24T20:32:03.427547Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:38 2025-06-24T20:32:03.427574Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:38 tabletId 72075186233409583 2025-06-24T20:32:03.427649Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:40 2025-06-24T20:32:03.427677Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:40 tabletId 72075186233409585 2025-06-24T20:32:03.427737Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:32 2025-06-24T20:32:03.427762Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:32 tabletId 72075186233409577 2025-06-24T20:32:03.427837Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:34 2025-06-24T20:32:03.427862Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:34 tabletId 72075186233409579 2025-06-24T20:32:03.431117Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:28 2025-06-24T20:32:03.431172Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:28 tabletId 72075186233409573 2025-06-24T20:32:03.431244Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:30 2025-06-24T20:32:03.431267Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:30 tabletId 72075186233409575 2025-06-24T20:32:03.431339Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:26 2025-06-24T20:32:03.431357Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:26 tabletId 72075186233409571 2025-06-24T20:32:03.431417Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-24T20:32:03.431433Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-06-24T20:32:03.431483Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:57 2025-06-24T20:32:03.431501Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:57 tabletId 72075186233409602 2025-06-24T20:32:03.431543Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:53 2025-06-24T20:32:03.431558Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:53 tabletId 72075186233409598 2025-06-24T20:32:03.431599Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:55 2025-06-24T20:32:03.431615Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:55 tabletId 72075186233409600 2025-06-24T20:32:03.431661Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:49 2025-06-24T20:32:03.431679Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:49 tabletId 72075186233409594 2025-06-24T20:32:03.434150Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:47 2025-06-24T20:32:03.434184Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:47 tabletId 72075186233409592 2025-06-24T20:32:03.434242Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:51 2025-06-24T20:32:03.434257Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:51 tabletId 72075186233409596 2025-06-24T20:32:03.434315Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:45 2025-06-24T20:32:03.434331Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:45 tabletId 72075186233409590 2025-06-24T20:32:03.434369Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:43 2025-06-24T20:32:03.434384Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:43 tabletId 72075186233409588 2025-06-24T20:32:03.434423Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:41 2025-06-24T20:32:03.434454Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:41 tabletId 72075186233409586 2025-06-24T20:32:03.434550Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 109 2025-06-24T20:32:03.435611Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyDir/ColumnTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:32:03.435866Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyDir/ColumnTable" took 296us result status StatusPathDoesNotExist 2025-06-24T20:32:03.436064Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyDir/ColumnTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/MyDir\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/MyDir/ColumnTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/MyDir" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "MyDir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:32:03.436777Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 6 SchemeshardId: 72057594046678944 Options { }, at schemeshard: 72057594046678944 2025-06-24T20:32:03.436862Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 6 took 93us result status StatusPathDoesNotExist 2025-06-24T20:32:03.436931Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'\', error: path is empty" Path: "" PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned [GOOD] Test command err: 2025-06-24T20:31:12.001198Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:12.038677Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:12.038971Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:12.041112Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:12.041542Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T20:31:12.042682Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-24T20:31:12.042742Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:12.043716Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:76:2077] ControllerId# 72057594037932033 2025-06-24T20:31:12.043763Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:12.043888Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:12.044047Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:12.056757Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:12.056837Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:12.058935Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:84:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.059117Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:85:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.060060Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:86:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.060256Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:87:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.060430Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:88:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.060572Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:89:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.060749Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:90:2088] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.060804Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:12.060900Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:76:2077] 2025-06-24T20:31:12.060938Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:76:2077] 2025-06-24T20:31:12.060988Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:12.061033Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:12.061879Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:12.061992Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:12.064816Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:12.064971Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:12.065516Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:12.065766Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:12.066619Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:101:2077] ControllerId# 72057594037932033 2025-06-24T20:31:12.066653Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:12.066713Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:12.066806Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:12.084481Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:12.084545Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:12.086400Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:108:2081] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.086576Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:109:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.086735Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:110:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.086859Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:111:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.087003Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:112:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.087129Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:113:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.107205Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:114:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.107332Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:12.107429Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:101:2077] 2025-06-24T20:31:12.107461Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:101:2077] 2025-06-24T20:31:12.107516Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:12.107557Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:12.108070Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:12.108149Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:12.110888Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:12.111032Z node 3 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 3 PDiskId# 1 Path# "SectorMap:2:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:12.111488Z node 3 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:12.111683Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:12.112499Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:122:2077] ControllerId# 72057594037932033 2025-06-24T20:31:12.112533Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:12.112592Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:12.112694Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:12.130566Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:12.130625Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:12.135222Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:129:2081] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.135400Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:130:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.135530Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:131:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.135701Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:132:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.135846Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:133:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.135987Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:134:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.136121Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:135:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.136150Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:12.136218Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [3:122:2077] 2025-06-24T20:31:12.136247Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72 ... strategy_restore.h:65: [8729fbeaec2f6015] restore Id# [72057594037927937:2:4:0:0:483:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T20:32:12.984980Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [8729fbeaec2f6015] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:4:0:0:483:1] Marker# BPG33 2025-06-24T20:32:12.985009Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [8729fbeaec2f6015] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:4:0:0:483:1] Marker# BPG32 2025-06-24T20:32:12.985096Z node 65 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [65:39:2082] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:4:0:0:483:1] FDS# 483 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:32:12.985996Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [8729fbeaec2f6015] received {EvVPutResult Status# OK ID# [72057594037927937:2:4:0:0:483:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 19 } Cost# 83803 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 20 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-24T20:32:12.986064Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [8729fbeaec2f6015] Result# TEvPutResult {Id# [72057594037927937:2:4:0:0:483:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-24T20:32:12.986100Z node 65 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [8729fbeaec2f6015] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:4:0:0:483:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T20:32:12.986185Z node 65 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.415 sample PartId# [72057594037927937:2:4:0:0:483:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 65 } TEvVPutResult{ TimestampMs# 1.33 VDiskId# [0:1:0:0:0] NodeId# 65 Status# OK } ] } 2025-06-24T20:32:12.986281Z node 65 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:4:0:0:483:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-24T20:32:12.986362Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} commited cookie 1 for step 4 2025-06-24T20:32:12.986630Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [65:316:2294] 2025-06-24T20:32:12.986672Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [65:316:2294] 2025-06-24T20:32:12.986754Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:32:12.986821Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 65 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [65:272:2262] 2025-06-24T20:32:12.986897Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [65:316:2294] 2025-06-24T20:32:12.986941Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [65:316:2294] 2025-06-24T20:32:12.987003Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [65:316:2294] 2025-06-24T20:32:12.987061Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [65:316:2294] 2025-06-24T20:32:12.987189Z node 65 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [65:316:2294] 2025-06-24T20:32:12.987320Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [65:316:2294] 2025-06-24T20:32:12.987388Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [65:316:2294] 2025-06-24T20:32:12.987422Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [65:316:2294] 2025-06-24T20:32:12.987475Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [65:316:2294] 2025-06-24T20:32:12.987516Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [65:316:2294] 2025-06-24T20:32:12.987563Z node 65 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [65:315:2293] EventType# 268697621 2025-06-24T20:32:12.994926Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [65:319:2297] 2025-06-24T20:32:12.994986Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [65:319:2297] 2025-06-24T20:32:12.995087Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:32:12.995189Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 65 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [65:272:2262] 2025-06-24T20:32:12.995270Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [65:319:2297] 2025-06-24T20:32:12.995320Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [65:319:2297] 2025-06-24T20:32:12.995371Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [65:319:2297] 2025-06-24T20:32:12.995431Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [65:319:2297] 2025-06-24T20:32:12.995533Z node 65 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [65:319:2297] 2025-06-24T20:32:12.995663Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [65:319:2297] 2025-06-24T20:32:12.995718Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [65:319:2297] 2025-06-24T20:32:12.995760Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [65:319:2297] 2025-06-24T20:32:12.995834Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [65:319:2297] 2025-06-24T20:32:12.995875Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [65:319:2297] 2025-06-24T20:32:12.995929Z node 65 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [65:318:2296] EventType# 268697615 2025-06-24T20:32:12.996079Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} queued, type NKikimr::NHive::TTxDeleteTablet 2025-06-24T20:32:12.996148Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:32:12.996344Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} hope 1 -> done Change{5, redo 102b alter 0b annex 0, ~{ 1 } -{ }, 0 gb} 2025-06-24T20:32:12.996412Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:32:12.996584Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} queued, type NKikimr::NHive::TTxDeleteTabletResult 2025-06-24T20:32:12.996638Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:32:12.996849Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} hope 1 -> done Change{6, redo 106b alter 0b annex 0, ~{ 16, 1 } -{ }, 0 gb} 2025-06-24T20:32:12.996925Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:32:13.008018Z node 65 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [185eac4b9c06d110] bootstrap ActorId# [65:322:2300] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:5:0:0:157:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T20:32:13.008183Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [185eac4b9c06d110] Id# [72057594037927937:2:5:0:0:157:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:32:13.008250Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [185eac4b9c06d110] restore Id# [72057594037927937:2:5:0:0:157:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T20:32:13.008329Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [185eac4b9c06d110] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:5:0:0:157:1] Marker# BPG33 2025-06-24T20:32:13.008388Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [185eac4b9c06d110] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:5:0:0:157:1] Marker# BPG32 2025-06-24T20:32:13.008574Z node 65 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [65:39:2082] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:5:0:0:157:1] FDS# 157 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:32:13.009689Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [185eac4b9c06d110] received {EvVPutResult Status# OK ID# [72057594037927937:2:5:0:0:157:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 20 } Cost# 81236 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 21 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-24T20:32:13.009810Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [185eac4b9c06d110] Result# TEvPutResult {Id# [72057594037927937:2:5:0:0:157:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-24T20:32:13.009880Z node 65 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [185eac4b9c06d110] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:5:0:0:157:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T20:32:13.010038Z node 65 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.812 sample PartId# [72057594037927937:2:5:0:0:157:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 65 } TEvVPutResult{ TimestampMs# 1.946 VDiskId# [0:1:0:0:0] NodeId# 65 Status# OK } ] } 2025-06-24T20:32:13.010230Z node 65 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:5:0:0:157:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-24T20:32:13.010400Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} commited cookie 1 for step 5 >> HullReplWriteSst::Basic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::AlterColumnTableFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:51.315592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:51.315681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:51.315739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:51.315790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:51.315857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:51.315890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:51.315948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:51.316021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:51.316776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:51.317166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:51.406558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:51.406632Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:51.421705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:51.422085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:51.422286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:51.437166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:51.437428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:51.438148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:51.438512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:51.442441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:51.442652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:51.443982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:51.444077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:51.444319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:51.444401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:51.444472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:51.444575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.458073Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:51.605210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:51.605456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.605774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:51.605831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:51.606088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:51.606166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:51.612227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:51.612505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:51.612759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.612838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:51.612885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:51.612919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:51.615518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.615580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:51.615631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:51.618141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.618202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:51.618254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:51.618319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:51.622298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:51.624849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:51.625075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:51.626175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:51.626332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:51.626380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:51.626764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:51.626831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:51.627032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:51.627126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:51.629719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:51.629773Z node 1 :FLAT_TX_SCHEMESHARD ... >FrontStep: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T20:32:05.832672Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.832892Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.833040Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.833155Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.833272Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.833377Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.833459Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.833529Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.836586Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.836801Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.836923Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.837177Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.837694Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.837925Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.839101Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.839225Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.840236Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.840348Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.840515Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.840631Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.840673Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.840730Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.840789Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.840890Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.840942Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.840986Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.841023Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.841069Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.841110Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.841185Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.841220Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T20:32:05.841332Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:32:05.841364Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:32:05.841397Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T20:32:05.841424Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:32:05.841456Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T20:32:05.841528Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:2780:3979] message: TxId: 101 2025-06-24T20:32:05.841579Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T20:32:05.841641Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T20:32:05.841671Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T20:32:05.842663Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-06-24T20:32:05.846130Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T20:32:05.846190Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:2781:3980] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T20:32:05.849019Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnTable AlterColumnTable { Name: "TestTable" AlterSchema { AddColumns { Name: "New Column" Type: "Int32" } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:32:05.849243Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: alter_table.cpp:282: TAlterColumnTable Propose, path: /MyRoot/TestTable, opId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:32:05.849504Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: update parse error: Invalid name for column 'New Column'. in alter constructor STANDALONE_UPDATE, at schemeshard: 72057594046678944 2025-06-24T20:32:05.852731Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "update parse error: Invalid name for column \'New Column\'. in alter constructor STANDALONE_UPDATE" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:32:05.852913Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: update parse error: Invalid name for column 'New Column'. in alter constructor STANDALONE_UPDATE, operation: ALTER COLUMN TABLE, path: /MyRoot/TestTable TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T20:32:05.853197Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T20:32:05.853233Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T20:32:05.853587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T20:32:05.853665Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:32:05.853692Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:3580:4709] TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Mirror3dc [GOOD] Test command err: RandomSeed# 16651718668671270572 Step = 0 SEND TEvPut with key [1:1:0:0:0:51943:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:51943:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 1 SEND TEvPut with key [1:1:1:0:0:37868:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:37868:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 2 SEND TEvPut with key [1:1:2:0:0:85877:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:85877:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 3 SEND TEvPut with key [1:1:3:0:0:192081:0] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:192081:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 4 SEND TEvPut with key [1:1:4:0:0:267203:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:267203:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 3 2025-06-24T20:28:18.454676Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 5 SEND TEvPut with key [1:1:5:0:0:502135:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:502135:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 6 SEND TEvPut with key [1:1:6:0:0:377427:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:377427:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 4 2025-06-24T20:28:18.680731Z 1 00h01m10.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 7 SEND TEvPut with key [1:1:7:0:0:48850:0] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:48850:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 8 SEND TEvPut with key [1:1:8:0:0:411812:0] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:411812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 9 SEND TEvPut with key [1:1:9:0:0:293766:0] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:293766:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start node 3 Step = 10 SEND TEvPut with key [1:1:10:0:0:127358:0] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:127358:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 11 SEND TEvPut with key [1:1:11:0:0:282945:0] TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:282945:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 12 SEND TEvPut with key [1:1:12:0:0:34864:0] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:34864:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 13 SEND TEvPut with key [1:1:13:0:0:363096:0] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:363096:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 14 SEND TEvPut with key [1:1:14:0:0:179270:0] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:179270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 15 SEND TEvPut with key [1:1:15:0:0:358611:0] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:358611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 16 SEND TEvPut with key [1:1:16:0:0:136892:0] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:136892:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 17 SEND TEvPut with key [1:1:17:0:0:517733:0] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:517733:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 18 SEND TEvPut with key [1:1:18:0:0:250802:0] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:250802:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 19 SEND TEvPut with key [1:1:19:0:0:199490:0] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:199490:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 20 SEND TEvPut with key [1:1:20:0:0:244269:0] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:244269:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 21 SEND TEvPut with key [1:1:21:0:0:329606:0] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:329606:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 22 SEND TEvPut with key [1:1:22:0:0:322338:0] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:322338:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 23 SEND TEvPut with key [1:1:23:0:0:519258:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:519258:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 24 SEND TEvPut with key [1:1:24:0:0:56036:0] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:56036:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 25 SEND TEvPut with key [1:1:25:0:0:514591:0] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:514591:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Stop node 7 2025-06-24T20:28:20.402366Z 1 00h01m30.100512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 26 SEND TEvPut with key [1:1:26:0:0:5927:0] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:5927:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 27 SEND TEvPut with key [1:1:27:0:0:148482:0] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:148482:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 28 SEND TEvPut with key [1:1:28:0:0:6043:0] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:6043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 29 SEND TEvPut with key [1:1:29:0:0:265170:0] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:265170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 30 SEND TEvPut with key [1:1:30:0:0:264716:0] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:264716:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Compact vdisk 3 Step = 31 SEND TEvPut with key [1:1:31:0:0:168116:0] TEvPutResult: TEvPutResult {Id# [1:1:31:0:0:168116:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 32 SEND TEvPut with key [1:1:32:0:0:444749:0] TEvPutResult: TEvPutResult {Id# [1:1:32:0:0:444749:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 33 SEND TEvPut with key [1:1:33:0:0:350254:0] TEvPutResult: TEvPutResult {Id# [1:1:33:0:0:350254:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 34 SEND TEvPut with key [1:1:34:0:0:145950:0] TEvPutResult: TEvPutResult {Id# [1:1:34:0:0:145950:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 35 SEND TEvPut with key [1:1:35:0:0:358611:0] TEvPutResult: TEvPutResult {Id# [1:1:35:0:0:358611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 36 SEND TEvPut with key [1:1:36:0:0:139148:0] TEvPutResult: TEvPutResult {Id# [1:1:36:0:0:139148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 37 SEND TEvPut with key [1:1:37:0:0:200198:0] TEvPutResult: TEvPutResult {Id# [1:1:37:0:0:200198:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 38 SEND TEvPut with key [1:1:38:0:0:185170:0] TEvPutResult: TEvPutResult {Id# [1:1:38:0:0:185170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 39 SEND TEvPut with key [1:1:39:0:0:297271:0] TEvPutResult: TEvPutResult {Id# [1:1:39:0:0:297271:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 40 SEND TEvPut with key [1:1:40:0:0:419670:0] TEvPutResult: TEvPutResult {Id# [1:1:40:0:0:419670:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 41 SEND TEvPut with key [1:1:41:0:0:218956:0] TEvPutResult: TEvPutResult {Id# [1:1:41:0:0:218956:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 42 SEND TEvPut with key [1:1:42:0:0:154723:0] TEvPutResult: TEvPutResult {Id# [1:1:42:0:0:154723:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 43 SEND TEvPut with key [1:1:43:0:0:13332:0] TEvPutResult: TEvPutResult {Id# [1:1:43:0:0:13332:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 44 SEND TEvPut with key [1:1:44:0:0:448892:0] TEvPutResult: TEvPutResult {Id# [1:1:44:0:0:448892:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 45 SEND TEvPut with key [1:1:45:0:0:103231:0] TEvPutResult: TEvPutResult {Id# [1:1:45:0:0:103231:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 46 SEND TEvPut with key [1:1:46:0:0:295973:0] TEvPutResult: TEvPutResult {Id# [1:1:46:0:0:295973:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 47 SEND TEvPut with key [1:1:47:0:0:402799:0] TEvPutResult: TEvPutResult {Id# [1:1:47:0:0:402799:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 48 SEND TEvPut with key [1:1:48:0:0:165045:0] TEvPutResult: TEvPutResult {Id# [1:1:48:0:0:165045:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 49 SEND TEvPut with key [1:1:49:0:0:360099:0] TEvPutResult: TEvPutResult {Id# [1:1:49:0:0:360099:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 50 SEND TEvPut with key [1:1:50:0:0:97222:0] TEvPutResult: TEvPutResult {Id# [1:1:50:0:0:97222:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 51 SEND TEvPut with key [1:1:51:0:0:303396:0] TEvPutResult: TEvPutResult {Id# [1:1:51:0:0:303396:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 52 SEND TEvPut with key [1:1:52:0:0:304876:0] TEvPutResult: TEvPutResult {Id# [1:1:52:0:0:304876:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 53 SEND TEvPut with key [1:1:53:0:0:375063:0] TEvPutResult: TEvPutResult {Id# [1:1:53:0:0:375063:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Start node 4 Step = 54 SEND TEvPut with key [1:1:54:0:0:288044:0] TEvPutResult: TEvPutResult {Id# [1:1:54:0:0:288044:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999866} Step = 55 SEND TEvPut with key [1:1:55:0:0:181559:0] TEvPutResult: TEvPutResult {Id# [1:1:55:0:0:181559:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 56 SEND TEvPut with key [1:1:56:0:0:42993:0] TEvPutResult: TEvPutResult {Id# [1:1:56:0:0:42993:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999866} Step = 57 SEND TEvPut with key [1:1:57:0:0:424399:0] TEvPutResult: TEvPutResult {Id# [1:1:57:0:0:424399:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 58 SEND TEvPut with key [1:1:58:0:0:169341:0] TEvPutResult: TEvPutResult {Id# [1:1:58:0:0:169341:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999902} Step = 59 SEND TEvPut with key [1:1:59:0:0:405932:0] TEvPutResult: TEvPutResult {Id# [1:1:59:0:0:405932:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999902} Step = 60 SEND TEvPut with key [1:1:60:0:0:190148:0] TEvPutResult: TEvPutResult {Id# [1:1:60:0:0:190148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Stop node 3 2025-06-24T20:28:22.613888Z 1 00h02m00.150512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Wipe node 0 2025-06-24T20:28:22.791530Z 1 00h02m10.161024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-24T20:28:22.793568Z 1 00h02m10.161024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4112017852504209790] Step = 61 SEND TEvPut with key [1:1:61:0:0:500240:0] 2025-06-24T20:28:24.940492Z 1 00h03m50.161024s :BS_PROXY ERROR: Group# 2181038080 StateEstablishingSessions Wakeup TIMEOUT Marker# DSP12 TEvPutResult: TEvPutResult {Id# [1:1:61:0:0:500240:0] Status# ERROR StatusFlags# { } ErrorReason# "Timeout while establishing sessions (DSPE4)." ApproximateFreeSpaceShare# 0} Step = 62 SEND TEvPut with key [1:1:62:0:0:354994:0] TEvPutResult: TEvPutResult {Id# [1:1:62:0:0:354994:0] Status# ERROR StatusFlags# { } ErrorReason# "Timeout while establishing sessions (DSPE4)." ApproximateFreeSpac ... 1:1:945:0:0:76599:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 946 SEND TEvPut with key [1:1:946:0:0:24822:0] TEvPutResult: TEvPutResult {Id# [1:1:946:0:0:24822:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Compact vdisk 2 Step = 947 SEND TEvPut with key [1:1:947:0:0:100167:0] TEvPutResult: TEvPutResult {Id# [1:1:947:0:0:100167:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 948 SEND TEvPut with key [1:1:948:0:0:112126:0] TEvPutResult: TEvPutResult {Id# [1:1:948:0:0:112126:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 949 SEND TEvPut with key [1:1:949:0:0:525378:0] TEvPutResult: TEvPutResult {Id# [1:1:949:0:0:525378:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 950 SEND TEvPut with key [1:1:950:0:0:410875:0] TEvPutResult: TEvPutResult {Id# [1:1:950:0:0:410875:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 951 SEND TEvPut with key [1:1:951:0:0:113503:0] TEvPutResult: TEvPutResult {Id# [1:1:951:0:0:113503:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 952 SEND TEvPut with key [1:1:952:0:0:431140:0] TEvPutResult: TEvPutResult {Id# [1:1:952:0:0:431140:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 953 SEND TEvPut with key [1:1:953:0:0:509293:0] TEvPutResult: TEvPutResult {Id# [1:1:953:0:0:509293:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Stop node 3 2025-06-24T20:31:32.006189Z 1 00h28m00.802048s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 954 SEND TEvPut with key [1:1:954:0:0:286395:0] TEvPutResult: TEvPutResult {Id# [1:1:954:0:0:286395:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999768} Stop node 1 2025-06-24T20:31:32.557327Z 1 00h28m10.803584s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 955 SEND TEvPut with key [1:1:955:0:0:219270:0] TEvPutResult: TEvPutResult {Id# [1:1:955:0:0:219270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Start node 1 Step = 956 SEND TEvPut with key [1:1:956:0:0:274971:0] TEvPutResult: TEvPutResult {Id# [1:1:956:0:0:274971:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 957 SEND TEvPut with key [1:1:957:0:0:487884:0] TEvPutResult: TEvPutResult {Id# [1:1:957:0:0:487884:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Start node 3 Step = 958 SEND TEvPut with key [1:1:958:0:0:327302:0] TEvPutResult: TEvPutResult {Id# [1:1:958:0:0:327302:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 959 SEND TEvPut with key [1:1:959:0:0:385917:0] TEvPutResult: TEvPutResult {Id# [1:1:959:0:0:385917:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Step = 960 SEND TEvPut with key [1:1:960:0:0:200998:0] TEvPutResult: TEvPutResult {Id# [1:1:960:0:0:200998:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 961 SEND TEvPut with key [1:1:961:0:0:61147:0] TEvPutResult: TEvPutResult {Id# [1:1:961:0:0:61147:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 962 SEND TEvPut with key [1:1:962:0:0:237906:0] TEvPutResult: TEvPutResult {Id# [1:1:962:0:0:237906:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Step = 963 SEND TEvPut with key [1:1:963:0:0:347273:0] TEvPutResult: TEvPutResult {Id# [1:1:963:0:0:347273:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 964 SEND TEvPut with key [1:1:964:0:0:181317:0] TEvPutResult: TEvPutResult {Id# [1:1:964:0:0:181317:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 965 SEND TEvPut with key [1:1:965:0:0:456096:0] TEvPutResult: TEvPutResult {Id# [1:1:965:0:0:456096:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 966 SEND TEvPut with key [1:1:966:0:0:93776:0] TEvPutResult: TEvPutResult {Id# [1:1:966:0:0:93776:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 967 SEND TEvPut with key [1:1:967:0:0:447659:0] TEvPutResult: TEvPutResult {Id# [1:1:967:0:0:447659:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 968 SEND TEvPut with key [1:1:968:0:0:14298:0] TEvPutResult: TEvPutResult {Id# [1:1:968:0:0:14298:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 969 SEND TEvPut with key [1:1:969:0:0:92781:0] TEvPutResult: TEvPutResult {Id# [1:1:969:0:0:92781:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 970 SEND TEvPut with key [1:1:970:0:0:334566:0] TEvPutResult: TEvPutResult {Id# [1:1:970:0:0:334566:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Stop node 0 2025-06-24T20:31:34.217305Z 9 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [9:127437:347] ServerId# [1:128490:169] TabletId# 72057594037932033 PipeClientId# [9:127437:347] 2025-06-24T20:31:34.217643Z 8 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:158073:17] ServerId# [1:158083:4095] TabletId# 72057594037932033 PipeClientId# [8:158073:17] 2025-06-24T20:31:34.217805Z 7 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:157015:17] ServerId# [1:157022:3968] TabletId# 72057594037932033 PipeClientId# [7:157015:17] 2025-06-24T20:31:34.217978Z 6 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:134102:17] ServerId# [1:134109:1009] TabletId# 72057594037932033 PipeClientId# [6:134102:17] 2025-06-24T20:31:34.218130Z 5 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:154110:17] ServerId# [1:154118:3587] TabletId# 72057594037932033 PipeClientId# [5:154110:17] 2025-06-24T20:31:34.218262Z 4 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:163042:17] ServerId# [1:163052:4693] TabletId# 72057594037932033 PipeClientId# [4:163042:17] 2025-06-24T20:31:34.218386Z 3 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:153018:17] ServerId# [1:153028:3464] TabletId# 72057594037932033 PipeClientId# [3:153018:17] 2025-06-24T20:31:34.218524Z 2 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:162079:17] ServerId# [1:162086:4584] TabletId# 72057594037932033 PipeClientId# [2:162079:17] Step = 971 SEND TEvPut with key [1:1:971:0:0:439384:0] TEvPutResult: TEvPutResult {Id# [1:1:971:0:0:439384:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99978} Step = 972 SEND TEvPut with key [1:1:972:0:0:252551:0] TEvPutResult: TEvPutResult {Id# [1:1:972:0:0:252551:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 973 SEND TEvPut with key [1:1:973:0:0:39982:0] TEvPutResult: TEvPutResult {Id# [1:1:973:0:0:39982:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Stop node 2 Step = 974 SEND TEvPut with key [1:1:974:0:0:526796:0] TEvPutResult: TEvPutResult {Id# [1:1:974:0:0:526796:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Start node 0 Step = 975 SEND TEvPut with key [1:1:975:0:0:337763:0] TEvPutResult: TEvPutResult {Id# [1:1:975:0:0:337763:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Stop node 2 Step = 976 SEND TEvPut with key [1:1:976:0:0:475740:0] TEvPutResult: TEvPutResult {Id# [1:1:976:0:0:475740:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 977 SEND TEvPut with key [1:1:977:0:0:169780:0] TEvPutResult: TEvPutResult {Id# [1:1:977:0:0:169780:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 978 SEND TEvPut with key [1:1:978:0:0:481535:0] TEvPutResult: TEvPutResult {Id# [1:1:978:0:0:481535:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 979 SEND TEvPut with key [1:1:979:0:0:24668:0] TEvPutResult: TEvPutResult {Id# [1:1:979:0:0:24668:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 980 SEND TEvPut with key [1:1:980:0:0:159890:0] TEvPutResult: TEvPutResult {Id# [1:1:980:0:0:159890:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 981 SEND TEvPut with key [1:1:981:0:0:111300:0] TEvPutResult: TEvPutResult {Id# [1:1:981:0:0:111300:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 982 SEND TEvPut with key [1:1:982:0:0:355914:0] TEvPutResult: TEvPutResult {Id# [1:1:982:0:0:355914:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 983 SEND TEvPut with key [1:1:983:0:0:399106:0] TEvPutResult: TEvPutResult {Id# [1:1:983:0:0:399106:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 984 SEND TEvPut with key [1:1:984:0:0:347759:0] TEvPutResult: TEvPutResult {Id# [1:1:984:0:0:347759:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 985 SEND TEvPut with key [1:1:985:0:0:261994:0] TEvPutResult: TEvPutResult {Id# [1:1:985:0:0:261994:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 986 SEND TEvPut with key [1:1:986:0:0:101043:0] TEvPutResult: TEvPutResult {Id# [1:1:986:0:0:101043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 987 SEND TEvPut with key [1:1:987:0:0:138774:0] TEvPutResult: TEvPutResult {Id# [1:1:987:0:0:138774:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 988 SEND TEvPut with key [1:1:988:0:0:441913:0] TEvPutResult: TEvPutResult {Id# [1:1:988:0:0:441913:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 989 SEND TEvPut with key [1:1:989:0:0:134469:0] TEvPutResult: TEvPutResult {Id# [1:1:989:0:0:134469:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 990 SEND TEvPut with key [1:1:990:0:0:123825:0] TEvPutResult: TEvPutResult {Id# [1:1:990:0:0:123825:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 991 SEND TEvPut with key [1:1:991:0:0:40387:0] TEvPutResult: TEvPutResult {Id# [1:1:991:0:0:40387:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 992 SEND TEvPut with key [1:1:992:0:0:193000:0] TEvPutResult: TEvPutResult {Id# [1:1:992:0:0:193000:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Stop node 7 2025-06-24T20:31:36.838202Z 1 00h29m20.816656s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 993 SEND TEvPut with key [1:1:993:0:0:455894:0] TEvPutResult: TEvPutResult {Id# [1:1:993:0:0:455894:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Compact vdisk 0 Step = 994 SEND TEvPut with key [1:1:994:0:0:54378:0] TEvPutResult: TEvPutResult {Id# [1:1:994:0:0:54378:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Compact vdisk 6 Step = 995 SEND TEvPut with key [1:1:995:0:0:487669:0] TEvPutResult: TEvPutResult {Id# [1:1:995:0:0:487669:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 996 SEND TEvPut with key [1:1:996:0:0:194641:0] TEvPutResult: TEvPutResult {Id# [1:1:996:0:0:194641:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 997 SEND TEvPut with key [1:1:997:0:0:74188:0] TEvPutResult: TEvPutResult {Id# [1:1:997:0:0:74188:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 998 SEND TEvPut with key [1:1:998:0:0:136082:0] TEvPutResult: TEvPutResult {Id# [1:1:998:0:0:136082:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 999 SEND TEvPut with key [1:1:999:0:0:145518:0] TEvPutResult: TEvPutResult {Id# [1:1:999:0:0:145518:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Starting nodes Start compaction 1 Start checking ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::ExplainColumnsReorder [GOOD] Test command err: Trying to start YDB, gRPC: 6996, MsgBus: 13292 2025-06-24T20:30:37.429684Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616016617998749:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:37.429745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f0c/r3tmp/tmpHelNhM/pdisk_1.dat 2025-06-24T20:30:37.994726Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:37.997033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:37.997118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:38.002948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6996, node 1 2025-06-24T20:30:38.245259Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:38.245280Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:38.245288Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:38.245438Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:30:38.454284Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13292 TClient is connected to server localhost:13292 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:39.399492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:30:39.487397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:30:42.431353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616016617998749:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:42.431426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:30:42.648517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616038092835839:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:42.648664Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:42.648804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616038092835851:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:42.653911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:30:42.675101Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616038092835853:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:30:42.772116Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616038092835905:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 65107, MsgBus: 8641 2025-06-24T20:30:43.843729Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616045295274385:2134];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f0c/r3tmp/tmpZzDTCh/pdisk_1.dat 2025-06-24T20:30:43.912340Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:30:43.987365Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:43.993922Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616045295274289:2079] 1750797043780879 != 1750797043780882 2025-06-24T20:30:44.001906Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:44.002000Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:44.003861Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65107, node 2 2025-06-24T20:30:44.073202Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:44.073228Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:44.073236Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:44.073360Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8641 TClient is connected to server localhost:8641 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:44.696898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:30:44.849060Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:47.715395Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519616062475144106:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:47.715493Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:47.715967Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519616062475144118:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:47.720352Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:30:47.741278Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T20:30:47.742063Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519616062475144120:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:30:47.826868Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519616062475144171:2331] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metad ... 204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:56.523735Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19579 2025-06-24T20:31:57.287046Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19579 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:57.396952Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:57.408513Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:32:01.273050Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519616357840457941:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:01.273156Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:32:01.613598Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519616379315295041:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:01.613872Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:01.628169Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:01.708343Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:01.787719Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519616379315295217:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:01.787885Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:01.787973Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519616379315295222:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:01.796384Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:32:01.810401Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519616379315295224:2315], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T20:32:01.912057Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519616379315295275:2444] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:32:08.801475Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:106:2152], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:32:08.801689Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:32:08.801868Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f0c/r3tmp/tmpEB4IRv/pdisk_1.dat 2025-06-24T20:32:09.148328Z node 12 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 12 Type# 268639257 2025-06-24T20:32:09.151085Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:09.186086Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:09.189465Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:32:2079] 1750797124605912 != 1750797124605916 2025-06-24T20:32:09.237040Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:09.237230Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:09.248705Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:09.343308Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:604:2512], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:09.343443Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:614:2517], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:09.343585Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:09.351398Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:32:09.480176Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:618:2520], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T20:32:09.501902Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:09.540772Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:687:2558] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } PreparedQuery: "52360337-a7e129d8-b3e2fd2b-7cef63b9" QueryAst: "(\n(let $1 (PgType \'int4))\n(let $2 \'(\'(\'\"_logical_id\" \'218) \'(\'\"_id\" \'\"142d0516-44d5241e-84716657-95931cc9\") \'(\'\"_partition_mode\" \'\"single\")))\n(let $3 (DqPhyStage \'() (lambda \'() (Iterator (AsList (AsStruct \'(\'\"x\" (PgConst \'1 $1)) \'(\'\"y\" (PgConst \'2 $1)))))) $2))\n(let $4 (DqCnResult (TDqOutput $3 \'\"0\") \'(\'\"y\" \'\"x\")))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($3) \'($4) \'() \'(\'(\'\"type\" \'\"generic\")))) \'((KqpTxResultBinding (ListType (StructType \'(\'\"x\" $1) \'(\'\"y\" $1))) \'\"0\" \'\"0\")) \'(\'(\'\"type\" \'\"query\"))))\n)\n" QueryPlan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":1,\"Operators\":[{\"Inputs\":[],\"Iterator\":\"[{x: \\\"1\\\",y: \\\"2\\\"}]\",\"Name\":\"Iterator\"}],\"Node Type\":\"ConstantExpr\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"ResourcePoolId\":\"default\"},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"tables\":[],\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"OptimizerStats\":{\"EquiJoinsCount\":0,\"JoinsCount\":0},\"PlanNodeType\":\"Query\"}}" YdbResults { columns { name: "y" type { pg_type { oid: 23 } } } columns { name: "x" type { pg_type { oid: 23 } } } } QueryDiagnostics: "" >> THiveTest::TestLockTabletExecutionReconnect [GOOD] >> THiveTest::TestLockTabletExecutionRebootReconnect >> THiveTest::TestCreateTabletAndReassignGroupsWithReboots [GOOD] >> THiveTest::TestCreateTabletChangeToExternal >> THiveTest::TestHiveBalancerWithPrefferedDC2 [GOOD] >> THiveTest::TestHiveBalancerWithPreferredDC3 |88.8%| [TA] $(B)/ydb/core/sys_view/partition_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TStorageBalanceTest::TestScenario1 [GOOD] >> TStorageBalanceTest::TestScenario2 |88.8%| [TA] $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/test-results/unittest/{meta.json ... results_accumulator.log} |88.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |88.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |88.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |88.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |88.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/test-results/unittest/{meta.json ... results_accumulator.log} |88.8%| [TA] {RESULT} $(B)/ydb/core/sys_view/partition_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |88.8%| [LD] {RESULT} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink |88.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |88.8%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |88.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> HullReplWriteSst::Basic [GOOD] Test command err: commit chunk# 1 {ChunkIdx: 1 Offset: 101208064 Size: 33007268} 750163 commit chunk# 2 {ChunkIdx: 2 Offset: 101187584 Size: 33028608} 750648 commit chunk# 3 {ChunkIdx: 3 Offset: 101257216 Size: 32959660} 749081 commit chunk# 4 {ChunkIdx: 4 Offset: 101220352 Size: 32993320} 749846 commit chunk# 5 {ChunkIdx: 5 Offset: 101208064 Size: 33009644} 750217 commit chunk# 6 {ChunkIdx: 6 Offset: 101228544 Size: 32987820} 749721 commit chunk# 7 {ChunkIdx: 7 Offset: 101228544 Size: 32989184} 749752 commit chunk# 8 {ChunkIdx: 8 Offset: 101232640 Size: 32985048} 749658 commit chunk# 9 {ChunkIdx: 9 Offset: 101203968 Size: 33011404} 750257 commit chunk# 10 {ChunkIdx: 10 Offset: 101208064 Size: 33007312} 750164 commit chunk# 11 {ChunkIdx: 11 Offset: 101212160 Size: 33004144} 750092 commit chunk# 12 {ChunkIdx: 12 Offset: 101220352 Size: 32993408} 749848 commit chunk# 13 {ChunkIdx: 13 Offset: 101220352 Size: 32995564} 749897 commit chunk# 14 {ChunkIdx: 14 Offset: 101208064 Size: 33009644} 750217 commit chunk# 15 {ChunkIdx: 15 Offset: 101249024 Size: 32968680} 749286 commit chunk# 16 {ChunkIdx: 16 Offset: 101216256 Size: 33001460} 750031 commit chunk# 17 {ChunkIdx: 17 Offset: 101224448 Size: 32989932} 749769 commit chunk# 18 {ChunkIdx: 18 Offset: 101236736 Size: 32980780} 749561 commit chunk# 19 {ChunkIdx: 19 Offset: 101212160 Size: 33005552} 750124 commit chunk# 20 {ChunkIdx: 20 Offset: 101199872 Size: 33017828} 750403 commit chunk# 21 {ChunkIdx: 21 Offset: 101236736 Size: 32978888} 749518 commit chunk# 22 {ChunkIdx: 22 Offset: 101224448 Size: 32992176} 749820 commit chunk# 23 {ChunkIdx: 23 Offset: 101228544 Size: 32989184} 749752 commit chunk# 24 {ChunkIdx: 24 Offset: 101224448 Size: 32989800} 749766 commit chunk# 25 {ChunkIdx: 25 Offset: 101224448 Size: 32992396} 749825 commit chunk# 26 {ChunkIdx: 26 Offset: 101228544 Size: 32989184} 749752 commit chunk# 27 {ChunkIdx: 27 Offset: 101212160 Size: 33001900} 750041 commit chunk# 28 {ChunkIdx: 28 Offset: 101216256 Size: 32997676} 749945 >> KqpPg::Returning+useSink [GOOD] >> KqpPg::Returning-useSink >> Cdc::SplitTopicPartition_TopicAutoPartitioning [GOOD] >> Cdc::ShouldDeliverChangesOnSplitMerge |88.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |88.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |88.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |88.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |88.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |88.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |88.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> TSubDomainTest::StartAndStopTenanNode >> THiveTest::TestCreateTabletChangeToExternal [GOOD] >> THiveTest::TestExternalBoot >> TSubDomainTest::CreateDummyTabletsInDifferentDomains >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TSubDomainTest::CreateTablet |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TSubDomainTest::LsLs >> TSubDomainTest::DeleteTableAndThenForceDeleteSubDomain >> TSubDomainTest::FailIfAffectedSetNotInterior >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TSubDomainTest::UserAttributes >> TModifyUserTest::ModifyUser >> TSubDomainTest::CreateTableInsideAndForceDeleteSubDomain >> TSubDomainTest::Boot >> THiveTest::TestLockTabletExecutionRebootReconnect [GOOD] >> THiveTest::TestLockTabletExecutionReconnectExpire >> Cdc::AlterViaTopicService [GOOD] >> Cdc::Alter |88.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |88.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |88.8%| [LD] {RESULT} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoBalancerInGetNodeRequest |88.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |88.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |88.8%| [LD] {RESULT} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |88.8%| [TA] $(B)/ydb/core/blobstorage/vdisk/repl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnEmptyTopicName >> THiveTest::TestExternalBoot [GOOD] >> THiveTest::TestExternalBootWhenLocked >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoClientSpecified |88.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/ut/ydb-core-security-ut >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::HandlesTimeout >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst |88.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/ut/ydb-core-security-ut |88.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.8%| [LD] {RESULT} $(B)/ydb/core/security/ut/ydb-core-security-ut >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoClientSpecified [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesTimeout >> TSubscriberCombinationsTest::CombinationsRootDomain >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesTimeout >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> ReadOnlyVDisk::TestDiscover >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnEmptyTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "empty topic in GetReadSessionsInfo request" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::SuccessfullyReplies >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::SuccessfullyPassesResponsesFromTablets ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoClientSpecified [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "No clientId specified in CmdGetReadSessionsInfo" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailesOnNotATopic >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnEmptyTopicName >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::SuccessfullyReplies [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName >> THiveTest::TestLockTabletExecutionReconnectExpire [GOOD] >> THiveTest::TestLockTabletExecutionStealLock >> THiveTest::TestExternalBootWhenLocked [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailesOnNotATopic [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> Cdc::RacyActivateAndEnqueue [GOOD] >> Cdc::RacyCreateAndSend >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnEmptyTopicName >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedTopicName >> Cdc::DropIndex [GOOD] >> Cdc::DisableStream >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::SuccessfullyReplies [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailesOnNotATopic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-06-24T20:32:27.412723Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:27.416737Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:27.417197Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-24T20:32:27.417256Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:27.417299Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-24T20:32:27.417345Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:27.417393Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:27.417495Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-24T20:32:27.418441Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [3:264:2254], now have 1 active actors on pipe 2025-06-24T20:32:27.418588Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:27.437892Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:27.448568Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:27.448751Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:27.449690Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928037] Config applied version 1 actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:27.449879Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:27.450418Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:27.450745Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [3:272:2260] 2025-06-24T20:32:27.453583Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-24T20:32:27.453680Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [3:272:2260] 2025-06-24T20:32:27.453740Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:27.453791Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:27.454201Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:32:27.454767Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [3:275:2262], now have 1 active actors on pipe 2025-06-24T20:32:27.557664Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:27.561626Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:27.561947Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928137] doesn't have tx info 2025-06-24T20:32:27.561994Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:27.562050Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-06-24T20:32:27.562092Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:27.562154Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:27.562230Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928137] doesn't have tx writes info 2025-06-24T20:32:27.562937Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928137] server connected, pipe [3:409:2362], now have 1 active actors on pipe 2025-06-24T20:32:27.563064Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:27.563275Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:27.565835Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:27.565966Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:27.566834Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928137] Config applied version 2 actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:27.566978Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:27.567381Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:27.567656Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [3:417:2368] 2025-06-24T20:32:27.569886Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-06-24T20:32:27.569954Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [3:417:2368] 2025-06-24T20:32:27.570015Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:27.570069Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:27.570336Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928137, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:32:27.570893Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928137] server connected, pipe [3:420:2370], now have 1 active actors on pipe 2025-06-24T20:32:27.589036Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:27.592979Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:27.593378Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928138] doesn't have tx info 2025-06-24T20:32:27.593429Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:27.593481Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-24T20:32:27.593526Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:27.593575Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:27.593634Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-24T20:32:27.594391Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [3:469:2407], now have 1 active actors on pipe 2025-06-24T20:32:27.594515Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:27.594716Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T20:32:27.597314Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T20:32:27.597464Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025- ... Seconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 7 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T20:32:28.378298Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.379010Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928138] Config applied version 7 actor [4:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 7 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T20:32:28.379164Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.379556Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.379789Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [4:476:2412] 2025-06-24T20:32:28.381973Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-24T20:32:28.382049Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [4:476:2412] 2025-06-24T20:32:28.382114Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.382170Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.382477Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T20:32:28.383194Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [4:479:2414], now have 1 active actors on pipe 2025-06-24T20:32:28.399250Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.403057Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.403421Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T20:32:28.403476Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.403521Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-24T20:32:28.403581Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.403635Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.403706Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T20:32:28.404401Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [4:528:2451], now have 1 active actors on pipe 2025-06-24T20:32:28.404514Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.404743Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928139] Config update version 8(current 0) received from actor [4:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:28.407201Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:28.407350Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.408303Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928139] Config applied version 8 actor [4:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:28.408439Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.408872Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.409120Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:536:2457] 2025-06-24T20:32:28.411349Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T20:32:28.411428Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [4:536:2457] 2025-06-24T20:32:28.411491Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.411548Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.411866Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:32:28.412455Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [4:539:2459], now have 1 active actors on pipe 2025-06-24T20:32:28.413725Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [4:545:2462], now have 1 active actors on pipe 2025-06-24T20:32:28.413852Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [4:546:2463], now have 1 active actors on pipe 2025-06-24T20:32:28.413952Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [4:547:2463], now have 1 active actors on pipe 2025-06-24T20:32:28.427774Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [4:552:2467], now have 1 active actors on pipe 2025-06-24T20:32:28.458828Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.461753Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.462149Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T20:32:28.462202Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.462351Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.463560Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.463639Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T20:32:28.463768Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.464184Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.464448Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:609:2512] 2025-06-24T20:32:28.466645Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-24T20:32:28.468897Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-24T20:32:28.469227Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-24T20:32:28.469575Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-24T20:32:28.469848Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-24T20:32:28.469894Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T20:32:28.469935Z node 4 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:32:28.469978Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T20:32:28.470030Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [4:609:2512] 2025-06-24T20:32:28.470091Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.470143Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.470360Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:32:28.471137Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928138] server disconnected, pipe [4:546:2463] destroyed 2025-06-24T20:32:28.471275Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928037] server disconnected, pipe [4:545:2462] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionLocationsResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionLocation { Partition: 0 Host: "::1" HostId: 4 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionLocation { Partition: 1 Host: "::1" HostId: 4 ErrorCode: OK } PartitionLocation { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "Tablet for that partition is not running" } ErrorCode: OK } } } >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] >> YdbTableSplit::RenameTablesAndSplit [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] >> ReadOnlyVDisk::TestStorageLoad >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailesOnNotATopic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "empty topic in GetTopicMetadata request" ErrorCode: BAD_REQUEST } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "TopicRequest must have Topic field." ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple TopicRequest for topic \'rt3.dc1--topic1\'" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TSubDomainTest::LsLs [GOOD] >> TSubDomainTest::LsAltered >> KqpNewEngine::PrimaryView [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnEmptyTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "TopicRequest must have Topic field." ErrorCode: BAD_REQUEST } >> TSubDomainTest::Boot [GOOD] >> TSubDomainTest::CheckAccessCopyTable >> TSubDomainTest::UserAttributes [GOOD] >> TSubDomainTest::UserAttributesApplyIf >> TModifyUserTest::ModifyUser [GOOD] >> TModifyUserTest::ModifyLdapUser >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads [GOOD] >> ReadOnlyVDisk::TestReads >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } >> THiveTest::TestLockTabletExecutionStealLock [GOOD] >> THiveTest::TestProgressWithMaxTabletsScheduled ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-24T20:32:28.385543Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.395585Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.395838Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-24T20:32:28.395883Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.395922Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-24T20:32:28.395955Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.396030Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.396097Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-24T20:32:28.396672Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:262:2252], now have 1 active actors on pipe 2025-06-24T20:32:28.396825Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.417628Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.420541Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.420700Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.421550Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928037] Config applied version 1 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.421707Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.422047Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.422383Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:270:2258] 2025-06-24T20:32:28.426198Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-24T20:32:28.426280Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:270:2258] 2025-06-24T20:32:28.426341Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.426393Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.426750Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:32:28.427298Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:273:2260], now have 1 active actors on pipe 2025-06-24T20:32:28.540489Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.545115Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.545431Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T20:32:28.545480Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.545519Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-24T20:32:28.545569Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.545615Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.545671Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T20:32:28.547217Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:408:2361], now have 1 active actors on pipe 2025-06-24T20:32:28.547356Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.547646Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:28.550355Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:28.550478Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.551380Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928139] Config applied version 2 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:28.551493Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.551847Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.552051Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:416:2367] 2025-06-24T20:32:28.554203Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T20:32:28.554273Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:416:2367] 2025-06-24T20:32:28.554333Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.554413Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.554669Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:32:28.555209Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:419:2369], now have 1 active actors on pipe 2025-06-24T20:32:28.556362Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:425:2372], now have 1 active actors on pipe 2025-06-24T20:32:28.556709Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:427:2373], now have 1 active actors on pipe 2025-06-24T20:32:28.557137Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928037] server disconnected, pipe [2:425:2372] destroyed 2025-06-24T20:32:28.557439Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928139] server disconnected, pipe [2:427:2373] destroyed Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestExternalBootWhenLocked [GOOD] Test command err: 2025-06-24T20:31:12.242352Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:12.271640Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:12.271946Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:12.273010Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:12.273412Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T20:31:12.274513Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-24T20:31:12.274569Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:12.275542Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:76:2077] ControllerId# 72057594037932033 2025-06-24T20:31:12.275587Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:12.275711Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:12.275859Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:12.294279Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:12.294352Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:12.297378Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:84:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.297570Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:85:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.297710Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:86:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.297887Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:87:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.298046Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:88:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.298183Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:89:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.298318Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:75:2076] Create Queue# [1:90:2088] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.298348Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:12.298448Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:76:2077] 2025-06-24T20:31:12.298481Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:76:2077] 2025-06-24T20:31:12.298535Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:12.298596Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:12.299511Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:12.299647Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:12.302544Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:12.302711Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:12.303274Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:12.303536Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:12.304601Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:101:2077] ControllerId# 72057594037932033 2025-06-24T20:31:12.304639Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:12.304705Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:12.304807Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:12.318802Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:12.318867Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:12.321206Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:108:2081] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.321376Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:109:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.321528Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:110:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.321659Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:111:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.321797Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:112:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.321939Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:113:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.322086Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:100:2076] Create Queue# [2:114:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.322127Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:12.322204Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:101:2077] 2025-06-24T20:31:12.322235Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:101:2077] 2025-06-24T20:31:12.322282Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:12.322320Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:12.322790Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:12.322868Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:12.325702Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:12.325864Z node 3 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 3 PDiskId# 1 Path# "SectorMap:2:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:12.326346Z node 3 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:12.326592Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:12.327760Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:122:2077] ControllerId# 72057594037932033 2025-06-24T20:31:12.327803Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:12.327873Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:12.328013Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:12.337765Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:12.337822Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:12.339399Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:129:2081] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.339601Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:130:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.339758Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:131:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.340030Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:132:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.340212Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:133:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.340294Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:134:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.340406Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:121:2076] Create Queue# [3:135:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:12.340441Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:12.340511Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [3:122:2077] 2025-06-24T20:31:12.340540Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72 ... e 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [29:449:2286] 2025-06-24T20:32:28.087128Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037927937] send [29:129:2094] 2025-06-24T20:32:28.087193Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [29:129:2094] 2025-06-24T20:32:28.087319Z node 29 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [29:100:2094] EventType# 268960257 2025-06-24T20:32:28.087549Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{22, NKikimr::NHive::TTxUpdateTabletStatus} queued, type NKikimr::NHive::TTxUpdateTabletStatus 2025-06-24T20:32:28.087632Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{22, NKikimr::NHive::TTxUpdateTabletStatus} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:32:28.087787Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{22, NKikimr::NHive::TTxUpdateTabletStatus} hope 1 -> done Change{13, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:32:28.087888Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{22, NKikimr::NHive::TTxUpdateTabletStatus} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:32:28.088175Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-24T20:32:28.088249Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:32:28.088362Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{13, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:32:28.088434Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:32:28.088922Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [29:462:2293] 2025-06-24T20:32:28.088999Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [29:462:2293] 2025-06-24T20:32:28.089133Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:32:28.089217Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 29 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [29:380:2236] 2025-06-24T20:32:28.089325Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [29:462:2293] 2025-06-24T20:32:28.089405Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [29:462:2293] 2025-06-24T20:32:28.089556Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:349: TClient[72075186224037888] connect request undelivered [29:462:2293] 2025-06-24T20:32:28.089618Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[72075186224037888] connect failed [29:462:2293] 2025-06-24T20:32:28.089725Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:536: Handle TEvTabletProblem tabletId: 72075186224037888 entry.State: StNormal 2025-06-24T20:32:28.089923Z node 29 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:32:28.090114Z node 29 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-24T20:32:28.090206Z node 29 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-24T20:32:28.090246Z node 29 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-24T20:32:28.090323Z node 29 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [29:380:2236] CurrentLeaderTablet: [29:396:2247] CurrentGeneration: 1 CurrentStep: 0} 2025-06-24T20:32:28.090419Z node 29 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [29:380:2236] CurrentLeaderTablet: [29:396:2247] CurrentGeneration: 1 CurrentStep: 0} 2025-06-24T20:32:28.090572Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [29:380:2236] CurrentLeaderTablet: [29:396:2247] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[29:24343667:0] : 3}, {[29:1099535971443:0] : 6}}}} 2025-06-24T20:32:28.090715Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72075186224037888 followers: 0 2025-06-24T20:32:28.091068Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [30:464:2162] 2025-06-24T20:32:28.091122Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [30:464:2162] 2025-06-24T20:32:28.091249Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:32:28.091347Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 30 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [29:330:2200] 2025-06-24T20:32:28.091450Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [30:464:2162] 2025-06-24T20:32:28.091512Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [30:464:2162] 2025-06-24T20:32:28.091567Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594037927937] forward result remote node 29 [30:464:2162] 2025-06-24T20:32:28.091685Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594037927937] remote node connected [30:464:2162] 2025-06-24T20:32:28.091758Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [30:464:2162] 2025-06-24T20:32:28.092038Z node 29 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [30:464:2162] 2025-06-24T20:32:28.092359Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [30:464:2162] 2025-06-24T20:32:28.092429Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [30:464:2162] 2025-06-24T20:32:28.092483Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [30:464:2162] 2025-06-24T20:32:28.092576Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [30:464:2162] 2025-06-24T20:32:28.092644Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [30:464:2162] 2025-06-24T20:32:28.092699Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [30:464:2162] 2025-06-24T20:32:28.093031Z node 29 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [30:452:2157] EventType# 268697624 2025-06-24T20:32:28.093250Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxStartTablet} queued, type NKikimr::NHive::TTxStartTablet 2025-06-24T20:32:28.093334Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxStartTablet} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:32:28.093597Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxStartTablet} hope 1 -> done Change{13, redo 144b alter 0b annex 0, ~{ 1, 16 } -{ }, 0 gb} 2025-06-24T20:32:28.093730Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxStartTablet} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:32:28.106672Z node 29 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [db158bc7997c188e] bootstrap ActorId# [29:467:2296] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:9:0:0:127:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T20:32:28.106912Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [db158bc7997c188e] Id# [72057594037927937:2:9:0:0:127:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:32:28.107010Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [db158bc7997c188e] restore Id# [72057594037927937:2:9:0:0:127:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T20:32:28.107117Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [db158bc7997c188e] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:9:0:0:127:1] Marker# BPG33 2025-06-24T20:32:28.107254Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [db158bc7997c188e] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:9:0:0:127:1] Marker# BPG32 2025-06-24T20:32:28.107539Z node 29 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [29:81:2082] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:9:0:0:127:1] FDS# 127 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:32:28.109305Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [db158bc7997c188e] received {EvVPutResult Status# OK ID# [72057594037927937:2:9:0:0:127:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 24 } Cost# 81000 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 25 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-24T20:32:28.109471Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [db158bc7997c188e] Result# TEvPutResult {Id# [72057594037927937:2:9:0:0:127:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-24T20:32:28.109596Z node 29 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [db158bc7997c188e] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:9:0:0:127:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T20:32:28.109813Z node 29 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.219 sample PartId# [72057594037927937:2:9:0:0:127:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 29 } TEvVPutResult{ TimestampMs# 2.992 VDiskId# [0:1:0:0:0] NodeId# 29 Status# OK } ] } 2025-06-24T20:32:28.110057Z node 29 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:9:0:0:127:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-24T20:32:28.110272Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} commited cookie 1 for step 9 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::RenameTablesAndSplit [GOOD] Test command err: 2025-06-24T20:30:55.372529Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616097443707269:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:55.372586Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00305d/r3tmp/tmpHOKjkv/pdisk_1.dat 2025-06-24T20:30:56.303272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:56.303372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:56.316016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:56.357025Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3152, node 1 2025-06-24T20:30:56.424211Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:56.673422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:56.673443Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:56.673449Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:56.673564Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28794 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:57.466484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:00.268100Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616118918544795:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.268233Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.379094Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616097443707269:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:00.379195Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:00.707427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/Foo, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-24T20:31:00.710901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:00.711029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:00.716362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/Foo 2025-06-24T20:31:00.890065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750797060929, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:31:00.949145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715658:0 2025-06-24T20:31:00.973813Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616118918545045:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:00.973891Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:01.010625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /Root/Foo, pathId: , opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-24T20:31:01.011260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715659:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:01.011295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T20:31:01.014669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715659, database: /Root, subject: , status: StatusAccepted, operation: ALTER TABLE, path: /Root/Foo 2025-06-24T20:31:01.035978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750797061083, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:31:01.052741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715659:0 Fast forward 1m partitions 2 Fast forward 1m partitions 2 Fast forward 1m partitions 2 Fast forward 1m 2025-06-24T20:31:10.948161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /Root/Foo, tableId: , opId: 281474976710657:0, at schemeshard: 72057594046644480, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SourceTabletId: 72075186224037889 SchemeshardId: 72057594046644480 2025-06-24T20:31:10.948761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /Root/Foo, tableId: , opId: 281474976710657:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000\377\377\377\177\000\000\000\200" TabletID: 72075186224037888 ShardIdx: 1 } SourceRanges { KeyRangeBegin: "\002\000\004\000\000\000\377\377\377\177\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037889 ShardIdx: 2 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" ShardIdx: 3 }, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SourceTabletId: 72075186224037889 SchemeshardId: 72057594046644480 2025-06-24T20:31:10.948802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:11.086517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710657:0 2025-06-24T20:31:11.105319Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T20:31:11.105357Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T20:31:11.252292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:31:11.252317Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded partitions 1 2025-06-24T20:31:13.303971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_move_table.cpp:651: TMoveTable Propose, from: /Root/Foo, to: /Root/Bar, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-24T20:31:13.309399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:31:13.313006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715660, database: /Root, subject: , status: StatusAccepted, operation: ALTER TABLE RENAME, dst path: /Root/Foo, dst path: /Root/Bar 2025-06-24T20:31:13.338549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750797553386, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:31:13.355055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1104: All parts have reached barrier, tx: 281474976715660, done: 0, blocked: 1 2025-06-24T20:31:13.380100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715660:0 2025-06-24T20:31:13.386919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 Fast forward 1m 2025-06-24T20:31:21.008228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037890 followerId 0 pathId [OwnerId: 72057594046644 ... rver disconnected at leader tablet# 72075186224037890, clientId# [1:7519616496875671278:2785], serverId# [1:7519616496875671295:4829], sessionId# [0:0:0] 2025-06-24T20:32:28.128886Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [1:7519616496875671280:2787], serverId# [1:7519616496875671296:4830], sessionId# [0:0:0] 2025-06-24T20:32:28.130088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519616161868218428 RawX2: 4503603922340152 } TabletId: 72075186224037890 State: 4 2025-06-24T20:32:28.130181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:32:28.130423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519616161868218428 RawX2: 4503603922340152 } TabletId: 72075186224037890 State: 4 2025-06-24T20:32:28.130471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:32:28.132677Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037892 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T20:32:28.132690Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037891 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T20:32:28.132729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:32:28.132751Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037891 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T20:32:28.132756Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037892 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T20:32:28.132821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:32:28.132839Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-24T20:32:28.132850Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-24T20:32:28.133106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519616488285736259 RawX2: 4503603922340542 } TabletId: 72075186224037892 State: 4 2025-06-24T20:32:28.133187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037892, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:32:28.133405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519616488285736259 RawX2: 4503603922340542 } TabletId: 72075186224037892 State: 4 2025-06-24T20:32:28.133427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037892, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:32:28.133583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519616488285736258 RawX2: 4503603922340541 } TabletId: 72075186224037891 State: 4 2025-06-24T20:32:28.133601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:32:28.133698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519616488285736258 RawX2: 4503603922340541 } TabletId: 72075186224037891 State: 4 2025-06-24T20:32:28.133715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:32:28.135643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:32:28.135643Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037892 state Offline 2025-06-24T20:32:28.135728Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037892 state Offline 2025-06-24T20:32:28.135735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:32:28.135789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:32:28.135822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:32:28.135825Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-24T20:32:28.135838Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-24T20:32:28.137008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T20:32:28.137172Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-24T20:32:28.137258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T20:32:28.137478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T20:32:28.137747Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-24T20:32:28.138585Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-24T20:32:28.138715Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-24T20:32:28.140125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-24T20:32:28.140336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T20:32:28.140499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-24T20:32:28.140619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-24T20:32:28.140756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T20:32:28.140872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-24T20:32:28.140964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T20:32:28.140990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T20:32:28.141020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T20:32:28.141064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T20:32:28.141083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T20:32:28.141155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-24T20:32:28.141326Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037892 reason = ReasonStop 2025-06-24T20:32:28.141349Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-24T20:32:28.141809Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-24T20:32:28.141869Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-24T20:32:28.141872Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-24T20:32:28.141924Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-24T20:32:28.142866Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-24T20:32:28.142919Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-24T20:32:28.146168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-24T20:32:28.146200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-24T20:32:28.146463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-24T20:32:28.146507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T20:32:28.146526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T20:32:28.146547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T20:32:28.146586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-24T20:32:28.404099Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.408150Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.408473Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-24T20:32:28.408521Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.408557Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-24T20:32:28.408611Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.408674Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.408764Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-24T20:32:28.409425Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:263:2253], now have 1 active actors on pipe 2025-06-24T20:32:28.409572Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.432136Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.434626Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.434791Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.435673Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928037] Config applied version 1 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.435819Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.436216Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.436506Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:271:2259] 2025-06-24T20:32:28.439090Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-24T20:32:28.439191Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:271:2259] 2025-06-24T20:32:28.439256Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.439305Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.439662Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:32:28.440222Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:274:2261], now have 1 active actors on pipe 2025-06-24T20:32:28.495495Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.498904Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.499263Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T20:32:28.499311Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.499347Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-24T20:32:28.499384Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.499455Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.499513Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T20:32:28.500148Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:409:2362], now have 1 active actors on pipe 2025-06-24T20:32:28.500267Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.500439Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:28.502827Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:28.502945Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.503773Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928139] Config applied version 2 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:28.503875Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.504183Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.504379Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:417:2368] 2025-06-24T20:32:28.506342Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T20:32:28.506407Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:417:2368] 2025-06-24T20:32:28.506461Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.506514Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.506772Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:32:28.507338Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:420:2370], now have 1 active actors on pipe 2025-06-24T20:32:28.508469Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:426:2373], now have 1 active actors on pipe 2025-06-24T20:32:28.508801Z node 2 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T20:32:28.508946Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:428:2374], now have 1 active actors on pipe 2025-06-24T20:32:28.509238Z node 2 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T20:32:28.509541Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928037] server disconnected, pipe [2:426:2373] destroyed 2025-06-24T20:32:28.509826Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928139] server disconnected, pipe [2:428:2374] destroyed 2025-06-24T20:32:29.220472Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:29.224046Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:29.224322Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-24T20:32:29.224377Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:29.224444Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-24T20:32:29.224498Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:29.224552Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:29.224618Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-24T20:32:29.225290Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [3:264:2254], now have 1 active actors on ... or topic 'rt3.dc1--topic2' partition 2 generation 2 [3:538:2459] 2025-06-24T20:32:29.377285Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:29.377350Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:29.377748Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:32:29.378404Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:541:2461], now have 1 active actors on pipe 2025-06-24T20:32:29.379717Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [3:547:2464], now have 1 active actors on pipe 2025-06-24T20:32:29.380096Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T20:32:29.380274Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [3:548:2465], now have 1 active actors on pipe 2025-06-24T20:32:29.380599Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T20:32:29.380664Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:549:2465], now have 1 active actors on pipe 2025-06-24T20:32:29.380881Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T20:32:29.392168Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:557:2472], now have 1 active actors on pipe 2025-06-24T20:32:29.420497Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:29.423935Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:29.424354Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T20:32:29.424409Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:29.424587Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:29.425509Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:29.425574Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T20:32:29.425694Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:29.426141Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:29.426418Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:614:2517] 2025-06-24T20:32:29.428648Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-24T20:32:29.430188Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-24T20:32:29.430556Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-24T20:32:29.430903Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-24T20:32:29.431265Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-24T20:32:29.431320Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T20:32:29.431374Z node 3 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:32:29.431425Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T20:32:29.431496Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:614:2517] 2025-06-24T20:32:29.431587Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:29.431653Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:29.431960Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:32:29.432752Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928138] server disconnected, pipe [3:548:2465] destroyed 2025-06-24T20:32:29.432848Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928037] server disconnected, pipe [3:547:2464] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionStatusResult { TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 79 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 79 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } PartitionResult { Partition: 2 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 93 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 93 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 39 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 39 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: 2025-06-24T20:32:28.198263Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.202041Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.202490Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-24T20:32:28.202549Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.202589Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-24T20:32:28.202654Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.202710Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.202777Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-24T20:32:28.203514Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [1:264:2254], now have 1 active actors on pipe 2025-06-24T20:32:28.203660Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.222053Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [1:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.224582Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.224798Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.225853Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928037] Config applied version 1 actor [1:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.226005Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.226425Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.226758Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [1:272:2260] 2025-06-24T20:32:28.229197Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-24T20:32:28.229287Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [1:272:2260] 2025-06-24T20:32:28.229345Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.229393Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.229735Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:32:28.230546Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [1:275:2262], now have 1 active actors on pipe 2025-06-24T20:32:28.275704Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.284131Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.284481Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928137] doesn't have tx info 2025-06-24T20:32:28.284530Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.284589Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-06-24T20:32:28.284632Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.284727Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.284794Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928137] doesn't have tx writes info 2025-06-24T20:32:28.285489Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928137] server connected, pipe [1:407:2360], now have 1 active actors on pipe 2025-06-24T20:32:28.285613Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.285824Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [1:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.292124Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.292273Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.293240Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928137] Config applied version 2 actor [1:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.293360Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.293789Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.294053Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [1:415:2366] 2025-06-24T20:32:28.298055Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-06-24T20:32:28.298135Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [1:415:2366] 2025-06-24T20:32:28.298189Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.298239Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.298555Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928137, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:32:28.299141Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928137] server connected, pipe [1:418:2368], now have 1 active actors on pipe 2025-06-24T20:32:28.319707Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.322346Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.322589Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928138] doesn't have tx info 2025-06-24T20:32:28.322622Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.322648Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-24T20:32:28.322673Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.322714Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.322754Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-24T20:32:28.323375Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [1:467:2405], now have 1 active actors on pipe 2025-06-24T20:32:28.323429Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.323581Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [1:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T20:32:28.325263Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T20:32:28.325375Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.326118Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928138] Config applied version 3 actor [1:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 Lifetime ... :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:29.919193Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:29.919424Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [3:477:2413] 2025-06-24T20:32:29.921721Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-24T20:32:29.921794Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [3:477:2413] 2025-06-24T20:32:29.921853Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:29.921908Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:29.922221Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T20:32:29.922820Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [3:480:2415], now have 1 active actors on pipe 2025-06-24T20:32:29.962485Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:29.966404Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:29.966778Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T20:32:29.966836Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:29.966893Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-24T20:32:29.966936Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:29.966988Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:29.967052Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T20:32:29.967869Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:529:2452], now have 1 active actors on pipe 2025-06-24T20:32:29.967989Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:29.968197Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928139] Config update version 12(current 0) received from actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:29.970780Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:29.970930Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:29.971636Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928139] Config applied version 12 actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:29.971772Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:29.972178Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:29.972393Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:537:2458] 2025-06-24T20:32:29.974572Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T20:32:29.974648Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:537:2458] 2025-06-24T20:32:29.974714Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:29.974764Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:29.975070Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:32:29.975654Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:540:2460], now have 1 active actors on pipe 2025-06-24T20:32:29.977219Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [3:547:2463], now have 1 active actors on pipe 2025-06-24T20:32:29.977917Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928137] server connected, pipe [3:549:2464], now have 1 active actors on pipe 2025-06-24T20:32:29.978067Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [3:550:2464], now have 1 active actors on pipe 2025-06-24T20:32:29.978107Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:551:2464], now have 1 active actors on pipe 2025-06-24T20:32:29.978899Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:564:2475], now have 1 active actors on pipe 2025-06-24T20:32:30.004427Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:30.006947Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:30.007360Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T20:32:30.007419Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:30.007573Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:30.008114Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:30.008164Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T20:32:30.008275Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:30.008622Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:30.008846Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:621:2520] 2025-06-24T20:32:30.011082Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-24T20:32:30.012413Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-24T20:32:30.012704Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-24T20:32:30.013107Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-24T20:32:30.013391Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-24T20:32:30.013439Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T20:32:30.013481Z node 3 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:32:30.013522Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T20:32:30.013580Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:621:2520] 2025-06-24T20:32:30.013638Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:30.013688Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:30.013934Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:32:30.014789Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928037] server disconnected, pipe [3:547:2463] destroyed 2025-06-24T20:32:30.014855Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928137] server disconnected, pipe [3:549:2464] destroyed 2025-06-24T20:32:30.014938Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928138] server disconnected, pipe [3:550:2464] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetReadSessionsInfoResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 3 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 0 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 3 ErrorCode: OK } PartitionResult { Partition: 1 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 3 ErrorCode: OK } PartitionResult { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "tablet for partition is not running" } ErrorCode: OK } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-06-24T20:32:28.240416Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.244984Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.245387Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-24T20:32:28.245442Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.245483Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-24T20:32:28.245528Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.245593Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.245684Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-24T20:32:28.246364Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:264:2254], now have 1 active actors on pipe 2025-06-24T20:32:28.246499Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.271704Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.274745Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.274919Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.275858Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928037] Config applied version 1 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.276036Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.276527Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.276877Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:272:2260] 2025-06-24T20:32:28.279511Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-24T20:32:28.279586Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:272:2260] 2025-06-24T20:32:28.279651Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.279717Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.280088Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:32:28.280670Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:275:2262], now have 1 active actors on pipe 2025-06-24T20:32:28.369920Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.386146Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.386498Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928137] doesn't have tx info 2025-06-24T20:32:28.386547Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.386591Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-06-24T20:32:28.386630Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.386674Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.386730Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928137] doesn't have tx writes info 2025-06-24T20:32:28.387429Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928137] server connected, pipe [2:408:2361], now have 1 active actors on pipe 2025-06-24T20:32:28.387542Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.387729Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.390354Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.390484Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.391707Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928137] Config applied version 2 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T20:32:28.391830Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:28.392192Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:28.392443Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [2:416:2367] 2025-06-24T20:32:28.394601Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-06-24T20:32:28.394674Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [2:416:2367] 2025-06-24T20:32:28.394732Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:28.394778Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:28.395019Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928137, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:32:28.395649Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928137] server connected, pipe [2:419:2369], now have 1 active actors on pipe 2025-06-24T20:32:28.417154Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:28.420815Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:28.421134Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928138] doesn't have tx info 2025-06-24T20:32:28.421181Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:28.421219Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-24T20:32:28.421256Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:28.421303Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.421371Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-24T20:32:28.421992Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [2:468:2406], now have 1 active actors on pipe 2025-06-24T20:32:28.422108Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:28.422279Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T20:32:28.424661Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T20:32:28.424826Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:28.425610Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928138] Config applied version 3 actor [2 ... me: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T20:32:29.850384Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:29.851064Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928138] Config applied version 11 actor [4:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T20:32:29.851201Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:29.851579Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:29.851817Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [4:476:2412] 2025-06-24T20:32:29.853893Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-24T20:32:29.853969Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [4:476:2412] 2025-06-24T20:32:29.854035Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:29.854083Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:29.854372Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T20:32:29.854934Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [4:479:2414], now have 1 active actors on pipe 2025-06-24T20:32:29.870100Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:29.873891Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:29.874201Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T20:32:29.874250Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:29.874293Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-24T20:32:29.874335Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:29.874383Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:29.874456Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T20:32:29.875088Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [4:528:2451], now have 1 active actors on pipe 2025-06-24T20:32:29.875212Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:32:29.875391Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928139] Config update version 12(current 0) received from actor [4:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:29.877646Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:29.877781Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:29.878363Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928139] Config applied version 12 actor [4:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T20:32:29.878477Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:29.878836Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:29.879017Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:536:2457] 2025-06-24T20:32:29.881128Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T20:32:29.881214Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [4:536:2457] 2025-06-24T20:32:29.881270Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:29.881318Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:29.881629Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:32:29.882142Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [4:539:2459], now have 1 active actors on pipe 2025-06-24T20:32:29.883260Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [4:545:2462], now have 1 active actors on pipe 2025-06-24T20:32:29.883425Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [4:546:2463], now have 1 active actors on pipe 2025-06-24T20:32:29.883645Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [4:547:2463], now have 1 active actors on pipe 2025-06-24T20:32:29.896007Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [4:555:2470], now have 1 active actors on pipe 2025-06-24T20:32:29.918898Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:32:29.921437Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:32:29.921773Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T20:32:29.921829Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:32:29.921986Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:32:29.922534Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:32:29.922584Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T20:32:29.922686Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:32:29.923036Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:32:29.923252Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:612:2515] 2025-06-24T20:32:29.925359Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-24T20:32:29.926560Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-24T20:32:29.926803Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-24T20:32:29.927190Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-24T20:32:29.927424Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-24T20:32:29.927468Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T20:32:29.927511Z node 4 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:32:29.927553Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T20:32:29.927605Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [4:612:2515] 2025-06-24T20:32:29.927665Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:32:29.927723Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:32:29.927961Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:32:29.928606Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928138] server disconnected, pipe [4:546:2463] destroyed 2025-06-24T20:32:29.928678Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928037] server disconnected, pipe [4:545:2462] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionOffsetsResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 StartOffset: 0 EndOffset: 0 ErrorCode: OK WriteTimestampEstimateMS: 0 } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 StartOffset: 0 EndOffset: 0 ErrorCode: OK WriteTimestampEstimateMS: 0 } PartitionResult { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "partition is not ready yet" } ErrorCode: OK } } } |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> TSchemeShardTest::Boot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PrimaryView [GOOD] Test command err: Trying to start YDB, gRPC: 8840, MsgBus: 12457 2025-06-24T20:31:36.612559Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616271839119859:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:36.612624Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003238/r3tmp/tmpwhLGMe/pdisk_1.dat 2025-06-24T20:31:37.425056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:31:37.425173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:31:37.427933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:31:37.469246Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:37.491367Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616271839119841:2079] 1750797096610123 != 1750797096610126 TServer::EnableGrpc on GrpcPort 8840, node 1 2025-06-24T20:31:37.692026Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:31:37.703518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:31:37.703541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:31:37.703549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:31:37.703686Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12457 TClient is connected to server localhost:12457 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:31:38.571476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:31:38.602381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:31:38.833003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:39.099395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:31:39.203060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:31:41.612671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616271839119859:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:41.612786Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:42.281258Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616297608925266:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:42.281373Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:42.650737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:42.702828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:42.798999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:42.842449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:42.886228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:42.935943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:43.008685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:43.089285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616301903893224:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:43.089365Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:43.089784Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616301903893229:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:43.095232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:31:43.110532Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616301903893231:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:31:43.177956Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616301903893282:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 65141, MsgBus: 13939 2025-06-24T20:31:46.291623Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616313016224693:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:46.291668Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... 9.562162Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519616454091519980:2079] 1750797139435705 != 1750797139435708 2025-06-24T20:32:19.579072Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:19.579195Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:19.580692Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31742, node 7 2025-06-24T20:32:19.649300Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:19.649324Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:19.649334Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:19.649475Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20431 TClient is connected to server localhost:20431 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:20.210780Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:20.226839Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:32:20.299132Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:32:20.448117Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:20.455975Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:32:20.520608Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:32:23.236762Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519616471271390795:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:23.236890Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:23.278321Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:23.313695Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:23.349825Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:23.382841Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:23.422379Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:23.467594Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:23.542101Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:23.604920Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519616471271391456:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:23.605040Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:23.605141Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519616471271391461:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:32:23.608923Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:32:23.620134Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519616471271391463:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:32:23.721680Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519616471271391514:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:32:24.443355Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519616454091519999:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:24.443504Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:32:25.450738Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:25.534099Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:32:25.584404Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TSubDomainTest::StartAndStopTenanNode [GOOD] >> TSubDomainTest::StartTenanNodeAndStopAtDestructor >> TSchemeShardCheckProposeSize::CopyTable ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads [GOOD] Test command err: 2025-06-24T20:30:55.944623Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616093668902704:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:55.944824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003064/r3tmp/tmptaX901/pdisk_1.dat 2025-06-24T20:30:56.929151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:56.929249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:56.961415Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:56.968529Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:57.073990Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:57.087573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:57.289760Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.140387s 2025-06-24T20:30:57.289893Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.140532s TServer::EnableGrpc on GrpcPort 22410, node 1 2025-06-24T20:30:57.699592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:57.699619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:57.699633Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:57.699760Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7262 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:58.617394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:7262 2025-06-24T20:31:00.951282Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616093668902704:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:31:00.951360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:31:01.315641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616119438707351:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:01.315790Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:01.869325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:31:02.147570Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616123733674838:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:02.147665Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:02.148337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616123733674843:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:31:02.153502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:31:02.211396Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616123733674845:2318], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:31:02.301764Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616123733674921:2801] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:31:02.489497Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyht63zyajzfh0r6ek9dqtph, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.531017Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyht64c07fw54sr4c5hfxas8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.567609Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyht64d458ae3np5tzawxkpg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.615970Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyht64ed6mmck6ecatk4t928, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.677456Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyht64gj3raftmkrnw1q3nsy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.717355Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyht64ht4enh1gys7srnmd1n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.764085Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyht64k4629vv8465dkvxb7y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.824003Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyht64n24zr6ze8hk9wrfm4a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.851088Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jyht64p1dg0c1dnf7ycec8d1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.889466Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyht64ptfab9yza3dsqap7en, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.929178Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyht64rd15k661p968hkaw31, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.961879Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jyht64se2ntf139fg74hf4c9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMwMzI4ZDMtYzE5ODcyYi1mMzA2OTE3NS1lMTRhODhlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:31:02.996175Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jyht64tafzej1g4m0vzhqbvh, Database: , Databas ... 976731321. Ctx: { TraceId: 01jyht8pgjfcmhp0vxyj5sacav, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWQ2Njk2MWUtYzJiYjg5ZjgtMjc5YWY2MDItNWUyMmU3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.663616Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731322. Ctx: { TraceId: 01jyht8pgh1zndefftwawt0gc2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjdiNjdjYzYtZmQ3N2U1ZDktMmZlMjI1NDctNjQ1YmUyN2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.664064Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731323. Ctx: { TraceId: 01jyht8pgj8409t02h9n7zdfkr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGMwMTM0MTItZjYxZTg0NGUtMWIwZmQ2NWUtMTg1NTJjNjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.666749Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731326. Ctx: { TraceId: 01jyht8pgtck0k0e7jmzy0d3k2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzBjMzQwZmItZDk5ZTEwOTctNjk1MzkwNzMtYTczZGRkZTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.673235Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731328. Ctx: { TraceId: 01jyht8phf68q91wd69efb89pf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDBlMmQ3NTgtMjA5NGQ3YTctYjI0MjAxZTMtZTAwMGE5ZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.673258Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731327. Ctx: { TraceId: 01jyht8phe7rmxrwc9twqbxfer, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Q2NDkwNTItZmFkM2IwOWItYTkyZjJiMC03M2QwZGNkZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.673708Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731329. Ctx: { TraceId: 01jyht8phe5hf33dm6ry5ac1xd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmUzZmRmOWEtZTc2MjE1ZWYtZjQwODA4ZmEtMmE4N2ZjYjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.680983Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731332. Ctx: { TraceId: 01jyht8phmbqjjb3n7mrzj0vz0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGI3NGY4ODYtYzUxMGZhMWEtOWNiOTljNjAtZGY1ZGRkNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.681479Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731330. Ctx: { TraceId: 01jyht8phk653swzg7te8nj7pg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTY1YTIzN2QtZjZkZWNlYTItNDRlZmIzMTAtODRkZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.682385Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731331. Ctx: { TraceId: 01jyht8phm6bsrvbt4hnv2xst9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDVkYzgzY2MtNjZkMzQ3NzYtNzhmZWQ0YS1lMGJiNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.683744Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731333. Ctx: { TraceId: 01jyht8phre38err291tay8zjk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjdiNjdjYzYtZmQ3N2U1ZDktMmZlMjI1NDctNjQ1YmUyN2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.689497Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731334. Ctx: { TraceId: 01jyht8phvdbtp7dwf786p6kbf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWQ2Njk2MWUtYzJiYjg5ZjgtMjc5YWY2MDItNWUyMmU3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.692716Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731335. Ctx: { TraceId: 01jyht8phvan7kc4rye5pc9wqy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzBjMzQwZmItZDk5ZTEwOTctNjk1MzkwNzMtYTczZGRkZTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.694092Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731337. Ctx: { TraceId: 01jyht8phz9zrxf4bjnc644ddf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmUzZmRmOWEtZTc2MjE1ZWYtZjQwODA4ZmEtMmE4N2ZjYjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.694243Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731336. Ctx: { TraceId: 01jyht8phydk4e0wzvjn8zzcc6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGMwMTM0MTItZjYxZTg0NGUtMWIwZmQ2NWUtMTg1NTJjNjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.694885Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731338. Ctx: { TraceId: 01jyht8pj38nsr670qkvcyyvh2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDBlMmQ3NTgtMjA5NGQ3YTctYjI0MjAxZTMtZTAwMGE5ZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.695374Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731339. Ctx: { TraceId: 01jyht8pj38v70rv585344hx88, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Q2NDkwNTItZmFkM2IwOWItYTkyZjJiMC03M2QwZGNkZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.703019Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731340. Ctx: { TraceId: 01jyht8pj65sqadaqvgz729r31, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTY1YTIzN2QtZjZkZWNlYTItNDRlZmIzMTAtODRkZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-24T20:32:26.732944Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731343. Ctx: { TraceId: 01jyht8pjk3nvwxpx621yh64yd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Q2NDkwNTItZmFkM2IwOWItYTkyZjJiMC03M2QwZGNkZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.733079Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731344. Ctx: { TraceId: 01jyht8pjk3mrppnshden1zv5g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDVkYzgzY2MtNjZkMzQ3NzYtNzhmZWQ0YS1lMGJiNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.733670Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731345. Ctx: { TraceId: 01jyht8pjnd90825ass2wx527g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDBlMmQ3NTgtMjA5NGQ3YTctYjI0MjAxZTMtZTAwMGE5ZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797062028 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-24T20:32:26.736849Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731347. Ctx: { TraceId: 01jyht8pjz2mrt610r1hx63t5x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTY1YTIzN2QtZjZkZWNlYTItNDRlZmIzMTAtODRkZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.737546Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731348. Ctx: { TraceId: 01jyht8pjz159zwg05v2bv2bzc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGI3NGY4ODYtYzUxMGZhMWEtOWNiOTljNjAtZGY1ZGRkNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.737961Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731349. Ctx: { TraceId: 01jyht8pjz8wr0p9xtyjyfsa2x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzBjMzQwZmItZDk5ZTEwOTctNjk1MzkwNzMtYTczZGRkZTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.738105Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731346. Ctx: { TraceId: 01jyht8pjq56xjht9k4fnkvvkt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmUzZmRmOWEtZTc2MjE1ZWYtZjQwODA4ZmEtMmE4N2ZjYjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.738354Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731341. Ctx: { TraceId: 01jyht8pjg85mrd5svsfbnh2rq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWQ2Njk2MWUtYzJiYjg5ZjgtMjc5YWY2MDItNWUyMmU3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.741120Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731342. Ctx: { TraceId: 01jyht8pjg65yaqtn8mjk42rm7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGMwMTM0MTItZjYxZTg0NGUtMWIwZmQ2NWUtMTg1NTJjNjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:32:26.756746Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976731350. Ctx: { TraceId: 01jyht8pm0ej3d9sda20tzy391, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjdiNjdjYzYtZmQ3N2U1ZDktMmZlMjI1NDctNjQ1YmUyN2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797062028 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards >> TestDataErasure::Run3CyclesForAllSupportedObjects >> Cdc::Alter [GOOD] >> TestDataErasure::SchemeShardCounterDoesNotConsistWithBscCounter >> Cdc::DescribeStream >> TSubDomainTest::DeleteTableAndThenForceDeleteSubDomain [GOOD] >> TSubDomainTest::DatashardRunAtOtherNodeWhenOneNodeIsStopped >> TestDataErasure::ManualLaunch3Cycles >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink >> TestDataErasure::DataErasureManualLaunch >> TSchemeShardTest::Boot [GOOD] >> TSchemeShardTest::AlterTableKeyColumns >> TTicketParserTest::LoginGood >> TTicketParserTest::AccessServiceAuthenticationOk |88.9%| [TA] $(B)/ydb/services/ydb/table_split_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TTicketParserTest::BulkAuthorizationRetryError >> TTicketParserTest::TicketFromCertificateCheckIssuerGood >> TTicketParserTest::NebiusAuthenticationUnavailable >> TSubDomainTest::CreateTableInsideAndForceDeleteSubDomain [GOOD] >> TSubDomainTest::CreateTableInsidetThenStopTenantAndForceDeleteSubDomain >> TSubDomainTest::CreateTablet [GOOD] >> TSubDomainTest::CreateTabletForUnknownDomain >> TTicketParserTest::TicketFromCertificateWithValidationGood >> THiveTest::TestProgressWithMaxTabletsScheduled [GOOD] >> THiveTest::TestResetServerlessComputeResourcesMode >> TSchemeShardCheckProposeSize::CopyTable [GOOD] >> TSchemeShardCheckProposeSize::CopyTables >> TTicketParserTest::AuthenticationWithUserAccount >> TSubDomainTest::LsAltered [GOOD] >> TModifyUserTest::ModifyLdapUser [GOOD] >> TModifyUserTest::ModifyUserIsEnabled >> TTicketParserTest::AuthorizationRetryError >> ReadOnlyVDisk::TestDiscover [GOOD] >> TSchemeShardTest::AlterTableKeyColumns [GOOD] >> TSchemeShardTest::AlterTableFollowers >> TSubDomainTest::UserAttributesApplyIf [GOOD] >> TSubDomainTest::FailIfAffectedSetNotInterior [GOOD] >> TSubDomainTest::GenericCases >> Cdc::ShouldDeliverChangesOnSplitMerge [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentAlterTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestDiscover [GOOD] Test command err: RandomSeed# 1352386974189556733 SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:1:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 3 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-06-24T20:32:29.474653Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5318:698] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-06-24T20:32:29.823583Z 1 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5318:698] 2025-06-24T20:32:29.825101Z 2 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5325:705] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:5:0:0:32768:0] 2025-06-24T20:32:30.116215Z 3 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5332:712] 2025-06-24T20:32:30.117305Z 1 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5318:698] 2025-06-24T20:32:30.118087Z 2 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5325:705] 2025-06-24T20:32:30.118488Z 1 00h02m30.110512s :BS_PROXY_PUT ERROR: [dbc76cab64ff99f1] Result# TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:5:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:5:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #0 to normal === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Putting VDisk #1 to normal === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Putting VDisk #2 to normal === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Putting VDisk #3 to normal === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Putting VDisk #4 to normal === Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Putting VDisk #5 to normal === Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Putting VDisk #6 to normal === Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::LsAltered [GOOD] Test command err: 2025-06-24T20:32:26.111311Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616485335888678:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.111538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047a2/r3tmp/tmp1YEK7v/pdisk_1.dat 2025-06-24T20:32:26.685409Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:26.687135Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616485335888648:2079] 1750797146110440 != 1750797146110443 2025-06-24T20:32:26.724715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:26.724818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:26.726921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13310 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:32:26.943175Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519616485335888884:2104] Handle TEvNavigate describe path dc-1 2025-06-24T20:32:26.983830Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519616485335889163:2259] HANDLE EvNavigateScheme dc-1 2025-06-24T20:32:26.984029Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519616485335888908:2117], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:26.984093Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519616485335888908:2117], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:32:26.984284Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519616485335889164:2260][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:26.986261Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616485335888618:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616485335889168:2260] 2025-06-24T20:32:26.986325Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616485335888618:2049] Subscribe: subscriber# [1:7519616485335889168:2260], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:26.986383Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616485335888621:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616485335889169:2260] 2025-06-24T20:32:26.986413Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616485335888621:2052] Subscribe: subscriber# [1:7519616485335889169:2260], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:26.986436Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616485335888624:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616485335889170:2260] 2025-06-24T20:32:26.986450Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616485335888624:2055] Subscribe: subscriber# [1:7519616485335889170:2260], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:26.986542Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616485335889168:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616485335888618:2049] 2025-06-24T20:32:26.986566Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616485335889169:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616485335888621:2052] 2025-06-24T20:32:26.986593Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616485335889170:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616485335888624:2055] 2025-06-24T20:32:26.986655Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616485335889164:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616485335889165:2260] 2025-06-24T20:32:26.986701Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616485335889164:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616485335889166:2260] 2025-06-24T20:32:26.986758Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519616485335889164:2260][/dc-1] Set up state: owner# [1:7519616485335888908:2117], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:26.986875Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616485335889164:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616485335889167:2260] 2025-06-24T20:32:26.986918Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519616485335889164:2260][/dc-1] Path was already updated: owner# [1:7519616485335888908:2117], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:26.986976Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616485335889168:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616485335889165:2260], cookie# 1 2025-06-24T20:32:26.986994Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616485335889169:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616485335889166:2260], cookie# 1 2025-06-24T20:32:26.987017Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616485335889170:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616485335889167:2260], cookie# 1 2025-06-24T20:32:26.987048Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616485335888618:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616485335889168:2260] 2025-06-24T20:32:26.987072Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616485335888618:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616485335889168:2260], cookie# 1 2025-06-24T20:32:26.987089Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616485335888621:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616485335889169:2260] 2025-06-24T20:32:26.987102Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616485335888621:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616485335889169:2260], cookie# 1 2025-06-24T20:32:26.987115Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616485335888624:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616485335889170:2260] 2025-06-24T20:32:26.987126Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616485335888624:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616485335889170:2260], cookie# 1 2025-06-24T20:32:26.987221Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616485335889168:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616485335888618:2049], cookie# 1 2025-06-24T20:32:26.987238Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616485335889169:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616485335888621:2052], cookie# 1 2025-06-24T20:32:26.987251Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616485335889170:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616485335888624:2055], cookie# 1 2025-06-24T20:32:26.987281Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616485335889164:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616485335889165:2260], cookie# 1 2025-06-24T20:32:26.987305Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616485335889164:2260][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:26.987320Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616485335889164:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616485335889166:2260], cookie# 1 2025-06-24T20:32:26.987332Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616485335889164:2260][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:26.987362Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616485335889164:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616485335889167:2260], cookie# 1 2025-06-24T20:32:26.987412Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519616485335889164:2260][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:27.051836Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519616485335888908:2117], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 Shards ... axPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797150599 ParentPathId: 1 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 ... (TRUNCATED) TClient::Ls request: /dc-1 2025-06-24T20:32:31.608429Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [2:7519616497516242680:2096] Handle TEvNavigate describe path /dc-1 2025-06-24T20:32:31.625644Z node 2 :TX_PROXY DEBUG: describe.cpp:272: Actor# [2:7519616506106177854:2329] HANDLE EvNavigateScheme /dc-1 2025-06-24T20:32:31.625758Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519616501811210224:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:31.625838Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][2:7519616501811210432:2259][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:7519616501811210224:2129], cookie# 4 2025-06-24T20:32:31.625902Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7519616501811210436:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519616501811210433:2259], cookie# 4 2025-06-24T20:32:31.625923Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7519616501811210437:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519616501811210434:2259], cookie# 4 2025-06-24T20:32:31.625944Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7519616501811210438:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519616501811210435:2259], cookie# 4 2025-06-24T20:32:31.625973Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7519616497516242600:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519616501811210436:2259], cookie# 4 2025-06-24T20:32:31.626001Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7519616497516242603:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519616501811210437:2259], cookie# 4 2025-06-24T20:32:31.626021Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7519616497516242606:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519616501811210438:2259], cookie# 4 2025-06-24T20:32:31.626047Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7519616501811210436:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519616497516242600:2049], cookie# 4 2025-06-24T20:32:31.626064Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7519616501811210437:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519616497516242603:2052], cookie# 4 2025-06-24T20:32:31.626081Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7519616501811210438:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519616497516242606:2055], cookie# 4 2025-06-24T20:32:31.626110Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][2:7519616501811210432:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519616501811210433:2259], cookie# 4 2025-06-24T20:32:31.626133Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][2:7519616501811210432:2259][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:31.626154Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][2:7519616501811210432:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519616501811210434:2259], cookie# 4 2025-06-24T20:32:31.626166Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][2:7519616501811210432:2259][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:31.626181Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][2:7519616501811210432:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519616501811210435:2259], cookie# 4 2025-06-24T20:32:31.626203Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][2:7519616501811210432:2259][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:31.626254Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [2:7519616501811210224:2129], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T20:32:31.626320Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [2:7519616501811210224:2129], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [2:7519616501811210432:2259] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750797150564 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:32:31.626388Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519616501811210224:2129], cacheItem# { Subscriber: { Subscriber: [2:7519616501811210432:2259] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750797150564 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-24T20:32:31.626569Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519616506106177855:2330], recipient# [2:7519616506106177854:2329], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:32:31.626606Z node 2 :TX_PROXY DEBUG: describe.cpp:356: Actor# [2:7519616506106177854:2329] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:32:31.626683Z node 2 :TX_PROXY DEBUG: describe.cpp:435: Actor# [2:7519616506106177854:2329] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-24T20:32:31.628094Z node 2 :TX_PROXY DEBUG: describe.cpp:448: Actor# [2:7519616506106177854:2329] Handle TEvDescribeSchemeResult Forward to# [2:7519616506106177853:2328] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750797150564 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750797150564 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750797150599 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } Children { Name: ".sys" PathId: 18446744073709551615 ... (TRUNCATED) >> KqpPg::TableSelect-useSink [GOOD] >> KqpPg::TableInsert+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::UserAttributesApplyIf [GOOD] Test command err: 2025-06-24T20:32:26.217128Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616488158606090:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.217482Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00479e/r3tmp/tmpeZQXE0/pdisk_1.dat 2025-06-24T20:32:26.676443Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:26.679692Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616488158605931:2079] 1750797146202172 != 1750797146202175 2025-06-24T20:32:26.705910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:26.706028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:26.711569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2589 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:32:26.966699Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519616488158606166:2104] Handle TEvNavigate describe path dc-1 2025-06-24T20:32:27.009377Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519616492453573741:2258] HANDLE EvNavigateScheme dc-1 2025-06-24T20:32:27.009550Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519616488158606189:2117], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:27.009608Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519616488158606189:2117], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:32:27.009802Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519616492453573742:2259][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:27.013079Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616488158605901:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616492453573746:2259] 2025-06-24T20:32:27.013155Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616488158605901:2049] Subscribe: subscriber# [1:7519616492453573746:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.013231Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616488158605904:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616492453573747:2259] 2025-06-24T20:32:27.013273Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616488158605904:2052] Subscribe: subscriber# [1:7519616492453573747:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.013335Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616492453573746:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488158605901:2049] 2025-06-24T20:32:27.013393Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616492453573747:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488158605904:2052] 2025-06-24T20:32:27.013440Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616492453573742:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616492453573743:2259] 2025-06-24T20:32:27.013490Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616492453573742:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616492453573744:2259] 2025-06-24T20:32:27.013576Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519616492453573742:2259][/dc-1] Set up state: owner# [1:7519616488158606189:2117], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.013601Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616488158605907:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616492453573748:2259] 2025-06-24T20:32:27.013656Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616488158605907:2055] Subscribe: subscriber# [1:7519616492453573748:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.013721Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616488158605901:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616492453573746:2259] 2025-06-24T20:32:27.013729Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616492453573748:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488158605907:2055] 2025-06-24T20:32:27.013738Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616488158605904:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616492453573747:2259] 2025-06-24T20:32:27.013755Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492453573746:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492453573743:2259], cookie# 1 2025-06-24T20:32:27.013771Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492453573747:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492453573744:2259], cookie# 1 2025-06-24T20:32:27.013783Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492453573748:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492453573745:2259], cookie# 1 2025-06-24T20:32:27.013805Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616492453573742:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616492453573745:2259] 2025-06-24T20:32:27.013875Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519616492453573742:2259][/dc-1] Path was already updated: owner# [1:7519616488158606189:2117], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.013928Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488158605901:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492453573746:2259], cookie# 1 2025-06-24T20:32:27.013978Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488158605904:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492453573747:2259], cookie# 1 2025-06-24T20:32:27.014012Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492453573746:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488158605901:2049], cookie# 1 2025-06-24T20:32:27.014030Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492453573747:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488158605904:2052], cookie# 1 2025-06-24T20:32:27.014062Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492453573742:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492453573743:2259], cookie# 1 2025-06-24T20:32:27.014102Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616492453573742:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:27.014123Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492453573742:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492453573744:2259], cookie# 1 2025-06-24T20:32:27.014133Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616492453573742:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:27.014157Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616488158605907:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616492453573748:2259] 2025-06-24T20:32:27.014184Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488158605907:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492453573748:2259], cookie# 1 2025-06-24T20:32:27.014228Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492453573748:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488158605907:2055], cookie# 1 2025-06-24T20:32:27.014270Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492453573742:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492453573745:2259], cookie# 1 2025-06-24T20:32:27.014292Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519616492453573742:2259][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:27.114467Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519616488158606189:2117], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsI ... 6: Actor# [2:7519616501595435597:2353] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:32:30.973755Z node 2 :TX_PROXY DEBUG: describe.cpp:435: Actor# [2:7519616501595435597:2353] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1/USER_0" Options { ShowPrivateTable: true } 2025-06-24T20:32:30.974378Z node 2 :TX_PROXY DEBUG: describe.cpp:448: Actor# [2:7519616501595435597:2353] Handle TEvDescribeSchemeResult Forward to# [2:7519616501595435596:2352] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "/dc-1/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797150760 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 4 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046644480 PathId: 2 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrA3" Value: "ValA3" } } PathId: 2 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797150760 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 4 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1... (TRUNCATED) 2025-06-24T20:32:31.036788Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:31.103006Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519616501595435333:2175], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:31.103088Z node 2 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [2:7519616501595435333:2175], path# /dc-1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480 2025-06-24T20:32:31.103369Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][2:7519616505890402896:2355][/dc-1/.metadata/initialization/migrations] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:31.103846Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7519616497300467639:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [2:7519616505890402900:2355] 2025-06-24T20:32:31.103860Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7519616497300467639:2049] Upsert description: path# /dc-1/.metadata/initialization/migrations 2025-06-24T20:32:31.103952Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7519616497300467639:2049] Subscribe: subscriber# [2:7519616505890402900:2355], path# /dc-1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:31.103992Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7519616497300467642:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [2:7519616505890402901:2355] 2025-06-24T20:32:31.104000Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7519616497300467642:2052] Upsert description: path# /dc-1/.metadata/initialization/migrations 2025-06-24T20:32:31.104022Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7519616497300467642:2052] Subscribe: subscriber# [2:7519616505890402901:2355], path# /dc-1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:31.104044Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7519616497300467645:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [2:7519616505890402902:2355] 2025-06-24T20:32:31.104051Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7519616497300467645:2055] Upsert description: path# /dc-1/.metadata/initialization/migrations 2025-06-24T20:32:31.104070Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7519616497300467645:2055] Subscribe: subscriber# [2:7519616505890402902:2355], path# /dc-1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:31.104107Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:7519616505890402900:2355][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [2:7519616497300467639:2049] 2025-06-24T20:32:31.104128Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:7519616505890402901:2355][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [2:7519616497300467642:2052] 2025-06-24T20:32:31.104145Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:7519616505890402902:2355][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [2:7519616497300467645:2055] 2025-06-24T20:32:31.104179Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:7519616505890402896:2355][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [2:7519616505890402897:2355] 2025-06-24T20:32:31.104223Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:7519616505890402896:2355][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [2:7519616505890402898:2355] 2025-06-24T20:32:31.104285Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][2:7519616505890402896:2355][/dc-1/.metadata/initialization/migrations] Set up state: owner# [2:7519616501595435333:2175], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:31.104313Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:7519616505890402896:2355][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [2:7519616505890402899:2355] 2025-06-24T20:32:31.104341Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:7519616505890402896:2355][/dc-1/.metadata/initialization/migrations] Ignore empty state: owner# [2:7519616501595435333:2175], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:31.104368Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7519616497300467639:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7519616505890402900:2355] 2025-06-24T20:32:31.104385Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7519616497300467642:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7519616505890402901:2355] 2025-06-24T20:32:31.104397Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7519616497300467645:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7519616505890402902:2355] 2025-06-24T20:32:31.104460Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [2:7519616501595435333:2175], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/initialization/migrations PathId: Strong: 1 } 2025-06-24T20:32:31.104545Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [2:7519616501595435333:2175], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/initialization/migrations PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:7519616505890402896:2355] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:32:31.104635Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519616501595435333:2175], cacheItem# { Subscriber: { Subscriber: [2:7519616505890402896:2355] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:31.104712Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519616505890402903:2356], recipient# [2:7519616505890402895:2259], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> THiveTest::TestResetServerlessComputeResourcesMode [GOOD] >> TSchemeShardTest::AlterTableFollowers [GOOD] >> TSchemeShardTest::AlterTableSizeToSplit >> TTicketParserTest::LoginGood [GOOD] >> TTicketParserTest::LoginGoodWithGroups >> TTicketParserTest::NebiusAuthenticationUnavailable [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryError >> TTicketParserTest::TicketFromCertificateCheckIssuerGood [GOOD] >> TTicketParserTest::TicketFromCertificateCheckIssuerBad >> Cdc::RacyCreateAndSend [GOOD] >> Cdc::RacySplitAndDropTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestResetServerlessComputeResourcesMode [GOOD] Test command err: 2025-06-24T20:31:06.765045Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:06.796995Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:06.797316Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:06.798399Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:06.798771Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:06.799915Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:49:2075] ControllerId# 72057594037932033 2025-06-24T20:31:06.799968Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:06.800096Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:06.800247Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:06.815734Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:06.815816Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:06.818385Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:58:2079] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.818605Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:59:2080] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.818758Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:60:2081] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.818894Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:61:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.819038Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:62:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.819341Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:63:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.819530Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:64:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.819563Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:06.819694Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:49:2075] 2025-06-24T20:31:06.819747Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:49:2075] 2025-06-24T20:31:06.819812Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:06.819920Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:06.823198Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:06.826111Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:06.826302Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:06.826897Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:06.835230Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T20:31:06.836824Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-24T20:31:06.836912Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:06.837845Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:73:2077] ControllerId# 72057594037932033 2025-06-24T20:31:06.837882Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:06.837977Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:06.838107Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:06.861024Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:06.861098Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:06.863098Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:81:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.863441Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:82:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.867653Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:83:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.867876Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:84:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.871573Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:85:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.871804Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:86:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.876171Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:87:2088] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:06.876265Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:06.876365Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:73:2077] 2025-06-24T20:31:06.876401Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:73:2077] 2025-06-24T20:31:06.876451Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:06.876496Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:06.877525Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:06.877828Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:06.892935Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:49:2075] 2025-06-24T20:31:06.893046Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:06.893105Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:06.893342Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:06.893502Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:06.947459Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:73:2077] 2025-06-24T20:31:06.947544Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:06.947580Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:06.947732Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:06.947771Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-24T20:31:06.954880Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-24T20:31:06.961099Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-24T20:31:06.961392Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:06.961769Z node 2 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:06.962316Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 1 2025-06-24T20:31:06.962356Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-24T20:31:06.962476Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-24T20:31:06.962792Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-24T20:31:06.962864Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:06.963097Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:73:2077] 2025-06-24T20:31:06.963133Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:06.971528Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-24T20:31:06.973706Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [2:53:2064] 2025-06-24T20:31:06.973762Z node 2 :PIPE_CLIENT D ... T_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594046678944 entry.State: StNormal ev: {EvForward TabletID: 72057594046678944 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:32:35.328810Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 25 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594046678944 followers: 0 countLeader 1 allowFollowers 0 winner: [24:333:2202] 2025-06-24T20:32:35.328877Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594046678944] forward result remote node 24 [25:696:2221] 2025-06-24T20:32:35.328938Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594046678944] remote node connected [25:696:2221] 2025-06-24T20:32:35.328966Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594046678944]::SendEvent [25:696:2221] 2025-06-24T20:32:35.329059Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:349: TClient[72075186224037888] connect request undelivered [24:692:2409] 2025-06-24T20:32:35.329113Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:559: TClient[72075186224037888] immediate retry [24:692:2409] 2025-06-24T20:32:35.329163Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [24:692:2409] 2025-06-24T20:32:35.329237Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:536: Handle TEvTabletProblem tabletId: 72075186224037888 entry.State: StNormal 2025-06-24T20:32:35.329463Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StProblemResolve ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:32:35.329560Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594046678944] Accept Connect Originator# [25:696:2221] 2025-06-24T20:32:35.329734Z node 24 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:32:35.329894Z node 24 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-24T20:32:35.329958Z node 24 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-24T20:32:35.329985Z node 24 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-24T20:32:35.330044Z node 24 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [24:653:2383] CurrentLeaderTablet: [24:655:2384] CurrentGeneration: 3 CurrentStep: 0} 2025-06-24T20:32:35.330142Z node 24 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [24:653:2383] CurrentLeaderTablet: [24:655:2384] CurrentGeneration: 3 CurrentStep: 0} 2025-06-24T20:32:35.330263Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [24:653:2383] CurrentLeaderTablet: [24:655:2384] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[24:24343667:0] : 3}, {[24:1099535971443:0] : 6}}}} 2025-06-24T20:32:35.330318Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037888 followers: 0 2025-06-24T20:32:35.330387Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 24 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [24:653:2383] 2025-06-24T20:32:35.330521Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [24:692:2409] 2025-06-24T20:32:35.330597Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [24:692:2409] 2025-06-24T20:32:35.330681Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594046678944] connected with status OK role: Leader [25:696:2221] 2025-06-24T20:32:35.330734Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594046678944] send queued [25:696:2221] 2025-06-24T20:32:35.330907Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [24:692:2409] 2025-06-24T20:32:35.331019Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046678944] send [25:696:2221] 2025-06-24T20:32:35.331066Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046678944] push event to server [25:696:2221] 2025-06-24T20:32:35.331478Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594046678944]::SendEvent [25:696:2221] 2025-06-24T20:32:35.331641Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [24:692:2409] 2025-06-24T20:32:35.331686Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [24:692:2409] 2025-06-24T20:32:35.331847Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594046678944] Push Sender# [25:695:2221] EventType# 271122945 2025-06-24T20:32:35.332008Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme 2025-06-24T20:32:35.332071Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:32:35.332308Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} hope 1 -> done Change{11, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:32:35.332377Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:32:35.333535Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [25:702:2222] 2025-06-24T20:32:35.333563Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [25:702:2222] 2025-06-24T20:32:35.333761Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:32:35.333798Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 25 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [24:332:2201] 2025-06-24T20:32:35.333858Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [25:702:2222] 2025-06-24T20:32:35.334664Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594037927937] forward result remote node 24 [25:702:2222] 2025-06-24T20:32:35.335050Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594037927937] remote node connected [25:702:2222] 2025-06-24T20:32:35.335085Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [25:702:2222] 2025-06-24T20:32:35.337659Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [25:702:2222] 2025-06-24T20:32:35.340165Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [25:702:2222] 2025-06-24T20:32:35.340233Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [25:702:2222] 2025-06-24T20:32:35.340282Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [25:702:2222] 2025-06-24T20:32:35.340430Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [25:702:2222] 2025-06-24T20:32:35.340809Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [25:700:2222] EventType# 268959744 2025-06-24T20:32:35.341101Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-24T20:32:35.341204Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:32:35.341442Z node 24 :HIVE WARN: node_info.cpp:25: HIVE#72057594037927937 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:35.341602Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{24, redo 152b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-24T20:32:35.341727Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:32:35.342454Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [24:711:2414] 2025-06-24T20:32:35.342527Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [24:711:2414] 2025-06-24T20:32:35.342666Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:32:35.342765Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 24 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [24:332:2201] 2025-06-24T20:32:35.342883Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [24:711:2414] 2025-06-24T20:32:35.342974Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [24:711:2414] 2025-06-24T20:32:35.343063Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [24:711:2414] 2025-06-24T20:32:35.343593Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [24:711:2414] 2025-06-24T20:32:35.343831Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [24:711:2414] 2025-06-24T20:32:35.344057Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [24:711:2414] 2025-06-24T20:32:35.344170Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [24:711:2414] 2025-06-24T20:32:35.344245Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [24:711:2414] 2025-06-24T20:32:35.344347Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [24:711:2414] 2025-06-24T20:32:35.344425Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [24:711:2414] 2025-06-24T20:32:35.344537Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [24:710:2413] EventType# 268697616 >> TTicketParserTest::LoginBad >> TTicketParserTest::AccessServiceAuthenticationOk [GOOD] >> TTicketParserTest::AccessServiceAuthenticationApiKeyOk >> TSubDomainTest::CheckAccessCopyTable [GOOD] >> TSubDomainTest::ConsistentCopyTable >> TTicketParserTest::TicketFromCertificateWithValidationGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersGood >> TSchemeShardTest::AlterTableSizeToSplit [GOOD] >> TSchemeShardTest::AlterTableSplitSchema >> TTicketParserTest::LoginRefreshGroupsWithError >> TSubDomainTest::StartTenanNodeAndStopAtDestructor [GOOD] >> Cdc::DisableStream [GOOD] >> Cdc::InitialScan >> TestDataErasure::SchemeShardCounterDoesNotConsistWithBscCounter [GOOD] >> TestDataErasure::DataErasureManualLaunch [GOOD] >> TestDataErasure::DataErasureWithCopyTable >> TMiniKQLEngineFlatHostTest::ShardId [GOOD] >> TMiniKQLEngineFlatHostTest::Basic [GOOD] >> TMiniKQLEngineFlatTest::TestAbort >> KqpPg::Returning-useSink [GOOD] >> KqpPg::SelectIndex+useSink >> TMiniKQLEngineFlatTest::TestAbort [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Fail1 >> TMiniKQLEngineFlatTest::TestCASBoth2Fail1 [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Fail2 >> TModifyUserTest::ModifyUserIsEnabled [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Fail2 [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Fail12 >> TMiniKQLEngineFlatTest::TestCASBoth2Fail12 [GOOD] >> TMiniKQLEngineFlatTest::TestBug998 >> TMiniKQLEngineFlatTest::TestBug998 [GOOD] >> TMiniKQLEngineFlatTest::TestAcquireLocks >> TMiniKQLEngineFlatTest::TestAcquireLocks [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownMultipleConsumers >> Cdc::DescribeStream [GOOD] >> Cdc::DecimalKey >> TTicketParserTest::AuthenticationWithUserAccount [GOOD] >> TTicketParserTest::AuthenticationUnsupported >> TMiniKQLEngineFlatTest::NoMapPushdownMultipleConsumers [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownNonPureLambda ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SchemeShardCounterDoesNotConsistWithBscCounter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:32:32.480971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:32:32.481078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:32.481122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:32:32.481176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:32:32.481241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:32:32.481277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:32:32.481364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:32.481460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:32:32.482320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:32:32.482742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:32:32.583517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:32:32.583586Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:32.599553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:32:32.604553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:32:32.604954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:32:32.615377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:32:32.615669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:32:32.616399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.616821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:32:32.620210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:32.620443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:32:32.621770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:32.621839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:32.621972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:32:32.622076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:32:32.622135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:32:32.622344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.630353Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:32:32.790765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:32:32.791027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.791276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:32:32.791326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:32:32.791628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:32:32.791769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:32.796535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.796797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:32:32.797103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.797162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:32:32.797206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:32:32.797241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:32:32.799678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.799758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:32:32.799808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:32:32.802150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.802211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.802257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.802324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:32:32.806402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:32:32.809055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:32:32.809297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:32:32.810476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.810653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:32:32.810712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.811026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:32:32.811075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.811333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:32:32.811450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:32:32.813987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:32.814046Z node 1 :FLAT_TX_SCHEMESHARD ... HEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:32:36.193097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:646: TTxCompleteDataErasureBSC Unknown generation#47, Expected gen# 1 at schemestard: 72057594046678944 2025-06-24T20:32:36.193208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 48 2025-06-24T20:32:36.193614Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:303:2281], Recipient [1:298:2278]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 48 Completed: false Progress10k: 0 2025-06-24T20:32:36.193655Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:32:36.193697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:32:36.193749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:32:36.193787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-06-24T20:32:36.196498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T20:32:36.196584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T20:32:36.196670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T20:32:36.687551Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:461:2412]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:36.687642Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:36.687742Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:843:2722]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:36.687767Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:36.687815Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:36.687844Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:36.687899Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:461:2412], Recipient [1:461:2412]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:36.687928Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:36.687995Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:843:2722], Recipient [1:843:2722]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:36.688019Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:36.688099Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:298:2278], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:36.688125Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:36.735669Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:32:36.735757Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:32:36.735815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 48 2025-06-24T20:32:36.736124Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:303:2281], Recipient [1:298:2278]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 48 Completed: false Progress10k: 5000 2025-06-24T20:32:36.736170Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:32:36.736214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:32:36.736293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:32:36.736341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-24T20:32:36.736407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T20:32:36.736484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T20:32:37.123832Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:843:2722]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:37.123925Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:37.124020Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:37.124054Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:37.124105Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:461:2412]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:37.124129Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:37.124181Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:843:2722], Recipient [1:843:2722]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:37.124209Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:37.124274Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:298:2278], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:37.124298Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:37.124342Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:461:2412], Recipient [1:461:2412]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:37.124366Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:37.165594Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:32:37.165677Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:32:37.165711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 48 2025-06-24T20:32:37.165983Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:303:2281], Recipient [1:298:2278]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 48 Completed: true Progress10k: 10000 2025-06-24T20:32:37.166020Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:32:37.166051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:32:37.166148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:32:37.166191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-24T20:32:37.166244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.931000s, Timestamp# 1970-01-01T00:00:05.114000Z 2025-06-24T20:32:37.166297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 48, duration# 2 s 2025-06-24T20:32:37.169341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T20:32:37.170035Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:2001:3666], Recipient [1:298:2278]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:32:37.170100Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:32:37.170151Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:32:37.170310Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125519, Sender [1:277:2266], Recipient [1:298:2278]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-24T20:32:37.170345Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5149: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-24T20:32:37.170379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7830: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TSubDomainTest::CreateTabletForUnknownDomain [GOOD] >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped >> TMiniKQLEngineFlatTest::NoMapPushdownNonPureLambda [GOOD] >> TMiniKQLEngineFlatTest::NoOrderedMapPushdown >> TMiniKQLEngineFlatTest::NoOrderedMapPushdown [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownWriteToTable >> TMiniKQLEngineFlatTest::NoMapPushdownWriteToTable [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownArgClosure >> TMiniKQLEngineFlatTest::NoMapPushdownArgClosure [GOOD] >> TSubDomainTest::DatashardRunAtOtherNodeWhenOneNodeIsStopped [GOOD] >> TSchemeShardTest::AlterTableSplitSchema [GOOD] >> TSchemeShardTest::AlterTableSettings >> TSubDomainTest::CreateTableInsidetThenStopTenantAndForceDeleteSubDomain [GOOD] >> TSubDomainTest::CreateTableInsideSubDomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::StartTenanNodeAndStopAtDestructor [GOOD] Test command err: 2025-06-24T20:32:26.127660Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616486912328458:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.136814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00478c/r3tmp/tmpUNfzEt/pdisk_1.dat 2025-06-24T20:32:26.636634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:26.636756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:26.645985Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:26.657635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7096 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:32:26.917390Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519616486912328679:2134] Handle TEvNavigate describe path dc-1 2025-06-24T20:32:26.949717Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519616486912329116:2430] HANDLE EvNavigateScheme dc-1 2025-06-24T20:32:26.949823Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519616486912328702:2147], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:26.949847Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519616486912328702:2147], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:32:26.949961Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519616486912329117:2431][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:26.951576Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616486912328367:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616486912329121:2431] 2025-06-24T20:32:26.951624Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616486912328367:2050] Subscribe: subscriber# [1:7519616486912329121:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:26.951707Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616486912328370:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616486912329122:2431] 2025-06-24T20:32:26.951718Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616486912328370:2053] Subscribe: subscriber# [1:7519616486912329122:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:26.951732Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616486912328373:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616486912329123:2431] 2025-06-24T20:32:26.951742Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616486912328373:2056] Subscribe: subscriber# [1:7519616486912329123:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:26.951856Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616486912329121:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486912328367:2050] 2025-06-24T20:32:26.951899Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616486912329122:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486912328370:2053] 2025-06-24T20:32:26.951917Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616486912329123:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486912328373:2056] 2025-06-24T20:32:26.951981Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616486912329117:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486912329118:2431] 2025-06-24T20:32:26.951987Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616486912328367:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616486912329121:2431] 2025-06-24T20:32:26.952016Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616486912328370:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616486912329122:2431] 2025-06-24T20:32:26.952030Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616486912328373:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616486912329123:2431] 2025-06-24T20:32:26.952033Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616486912329117:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486912329119:2431] 2025-06-24T20:32:26.952093Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519616486912329117:2431][/dc-1] Set up state: owner# [1:7519616486912328702:2147], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:26.953060Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616486912329117:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486912329120:2431] 2025-06-24T20:32:26.953113Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519616486912329117:2431][/dc-1] Path was already updated: owner# [1:7519616486912328702:2147], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:26.953173Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616486912329121:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616486912329118:2431], cookie# 1 2025-06-24T20:32:26.953194Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616486912329122:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616486912329119:2431], cookie# 1 2025-06-24T20:32:26.953208Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616486912329123:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616486912329120:2431], cookie# 1 2025-06-24T20:32:26.955219Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616486912328367:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616486912329121:2431], cookie# 1 2025-06-24T20:32:26.955272Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616486912328370:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616486912329122:2431], cookie# 1 2025-06-24T20:32:26.955595Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616486912328373:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616486912329123:2431], cookie# 1 2025-06-24T20:32:26.955864Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616486912329121:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486912328367:2050], cookie# 1 2025-06-24T20:32:26.955888Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616486912329122:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486912328370:2053], cookie# 1 2025-06-24T20:32:26.955902Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616486912329123:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486912328373:2056], cookie# 1 2025-06-24T20:32:26.955932Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616486912329117:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486912329118:2431], cookie# 1 2025-06-24T20:32:26.955964Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616486912329117:2431][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:26.955982Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616486912329117:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486912329119:2431], cookie# 1 2025-06-24T20:32:26.955994Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616486912329117:2431][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:26.956006Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616486912329117:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486912329120:2431], cookie# 1 2025-06-24T20:32:26.956026Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519616486912329117:2431][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:27.011611Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519616486912328702:2147], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataS ... ager/running_requests] Set up state: owner# [3:7519616512007216770:2128], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:35.919783Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519616524892119271:2544][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519616524892119281:2544] 2025-06-24T20:32:35.919812Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519616524892119271:2544][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [3:7519616512007216770:2128], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:35.919833Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519616512007216471:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519616524892119277:2543] 2025-06-24T20:32:35.919876Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519616512007216471:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519616524892119289:2545] 2025-06-24T20:32:35.919915Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519616512007216471:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519616524892119283:2544] 2025-06-24T20:32:35.920011Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519616512007216770:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-24T20:32:35.920120Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519616512007216770:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519616524892119270:2543] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:32:35.920241Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519616512007216770:2128], cacheItem# { Subscriber: { Subscriber: [3:7519616524892119270:2543] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:35.920302Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519616512007216770:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-24T20:32:35.920364Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519616512007216770:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519616524892119271:2544] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:32:35.920410Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519616512007216770:2128], cacheItem# { Subscriber: { Subscriber: [3:7519616524892119271:2544] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:35.920456Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519616512007216474:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519616524892119284:2544] 2025-06-24T20:32:35.920461Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519616524892119290:2545][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7519616512007216474:2056] 2025-06-24T20:32:35.920502Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519616524892119272:2545][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7519616524892119286:2545] 2025-06-24T20:32:35.920540Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519616524892119272:2545][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Set up state: owner# [3:7519616512007216770:2128], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:35.920542Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519616524892119291:2546], recipient# [3:7519616524892119268:2273], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:35.920574Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519616512007216474:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519616524892119290:2545] 2025-06-24T20:32:35.920611Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519616512007216770:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-24T20:32:35.928294Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519616512007216770:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519616524892119272:2545] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:32:35.928438Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519616512007216770:2128], cacheItem# { Subscriber: { Subscriber: [3:7519616524892119272:2545] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:35.928776Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519616524892119272:2545][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7519616524892119287:2545] 2025-06-24T20:32:35.928874Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519616524892119272:2545][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Ignore empty state: owner# [3:7519616512007216770:2128], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:35.935305Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519616524892119292:2547], recipient# [3:7519616524892119269:2274], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:36.227386Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519616512007216770:2128], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:36.227539Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519616512007216770:2128], cacheItem# { Subscriber: { Subscriber: [3:7519616516302184626:2531] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:36.227624Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519616529187086593:2551], recipient# [3:7519616529187086592:2275], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> THiveTest::TestHiveBalancerWithPreferredDC3 [GOOD] >> THiveTest::TestHiveBalancerWithFollowers >> TMiniKQLProgramBuilderTest::TestUpdateRowStaticKey >> TMiniKQLProgramBuilderTest::TestUpdateRowStaticKey [GOOD] >> TMiniKQLProtoTestYdb::TestExportDataTypeYdb >> TMiniKQLProtoTestYdb::TestExportDataTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalTypeYdb >> TMiniKQLProtoTestYdb::TestExportDecimalTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDictTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportBoolYdb ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TModifyUserTest::ModifyUserIsEnabled [GOOD] Test command err: 2025-06-24T20:32:26.262680Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616484566801763:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.262931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004797/r3tmp/tmpoWBCyb/pdisk_1.dat 2025-06-24T20:32:26.753212Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:26.784844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:26.784952Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:26.787780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3464 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:32:27.052808Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519616484566801964:2104] Handle TEvNavigate describe path dc-1 2025-06-24T20:32:27.076052Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519616488861769536:2258] HANDLE EvNavigateScheme dc-1 2025-06-24T20:32:27.076184Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519616484566802026:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:27.076224Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519616484566802026:2128], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:32:27.076377Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519616488861769537:2259][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:27.078303Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616484566801698:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616488861769541:2259] 2025-06-24T20:32:27.078355Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616484566801698:2049] Subscribe: subscriber# [1:7519616488861769541:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.078419Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616484566801701:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616488861769542:2259] 2025-06-24T20:32:27.078436Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616484566801701:2052] Subscribe: subscriber# [1:7519616488861769542:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.078468Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616484566801704:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616488861769543:2259] 2025-06-24T20:32:27.078486Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616484566801704:2055] Subscribe: subscriber# [1:7519616488861769543:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.078527Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616488861769541:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616484566801698:2049] 2025-06-24T20:32:27.078565Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616488861769542:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616484566801701:2052] 2025-06-24T20:32:27.078587Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616488861769543:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616484566801704:2055] 2025-06-24T20:32:27.078647Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616488861769537:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488861769538:2259] 2025-06-24T20:32:27.078687Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616488861769537:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488861769539:2259] 2025-06-24T20:32:27.078746Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519616488861769537:2259][/dc-1] Set up state: owner# [1:7519616484566802026:2128], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.078879Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616488861769537:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488861769540:2259] 2025-06-24T20:32:27.078931Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519616488861769537:2259][/dc-1] Path was already updated: owner# [1:7519616484566802026:2128], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.078965Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616488861769541:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488861769538:2259], cookie# 1 2025-06-24T20:32:27.078983Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616488861769542:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488861769539:2259], cookie# 1 2025-06-24T20:32:27.078995Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616488861769543:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488861769540:2259], cookie# 1 2025-06-24T20:32:27.079019Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616484566801698:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616488861769541:2259] 2025-06-24T20:32:27.079061Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616484566801698:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488861769541:2259], cookie# 1 2025-06-24T20:32:27.079097Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616484566801701:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616488861769542:2259] 2025-06-24T20:32:27.079121Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616484566801701:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488861769542:2259], cookie# 1 2025-06-24T20:32:27.079139Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616484566801704:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616488861769543:2259] 2025-06-24T20:32:27.080106Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616484566801704:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488861769543:2259], cookie# 1 2025-06-24T20:32:27.080160Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616488861769541:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616484566801698:2049], cookie# 1 2025-06-24T20:32:27.080182Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616488861769542:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616484566801701:2052], cookie# 1 2025-06-24T20:32:27.080202Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616488861769543:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616484566801704:2055], cookie# 1 2025-06-24T20:32:27.080232Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616488861769537:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488861769538:2259], cookie# 1 2025-06-24T20:32:27.080282Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616488861769537:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:27.080304Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616488861769537:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488861769539:2259], cookie# 1 2025-06-24T20:32:27.080314Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616488861769537:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:27.080377Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616488861769537:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488861769540:2259], cookie# 1 2025-06-24T20:32:27.080402Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519616488861769537:2259][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:27.129550Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519616484566802026:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataS ... 6519898670401:2112], cacheItem# { Subscriber: { Subscriber: [3:7519616519898670447:2131] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 9 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750797154547 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 9 IsSync: true Partial: 0 } 2025-06-24T20:32:34.864147Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519616519898670763:2331], recipient# [3:7519616519898670762:2330], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } }] } 2025-06-24T20:32:34.864183Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [3:7519616519898670762:2330] txid# 281474976710662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:32:34.864224Z node 3 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [3:7519616519898670762:2330] txid# 281474976710662, Access denied for user2 on path /dc-1, with access AlterSchema 2025-06-24T20:32:34.864308Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519616519898670762:2330] txid# 281474976710662, issues: { message: "Access denied for user2 on path /dc-1" issue_code: 200000 severity: 1 } 2025-06-24T20:32:34.864333Z node 3 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [3:7519616519898670762:2330] txid# 281474976710662 SEND to# [3:7519616519898670761:2329] Source {TEvProposeTransactionStatus Status# 5} 2025-06-24T20:32:34.866853Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [3:7519616519898670192:2087] Handle TEvProposeTransaction 2025-06-24T20:32:34.866876Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [3:7519616519898670192:2087] TxId# 281474976710663 ProcessProposeTransaction 2025-06-24T20:32:34.866909Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [3:7519616519898670192:2087] Cookie# 0 userReqId# "" txid# 281474976710663 SEND to# [3:7519616519898670765:2333] 2025-06-24T20:32:34.869461Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [3:7519616519898670765:2333] txid# 281474976710663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { ModifyUser { User: "user2" Password: "password" CanLogin: false } } } } UserToken: "\n\005user2\022\030\022\026\n\024all-users@well-known\032\322\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDg0MDM1NCwiaWF0IjoxNzUwNzk3MTU0LCJzdWIiOiJ1c2VyMiJ9.Twl-3ioxgwghxBHaVBxjec5lSkXFAGN7UMVXFj1JdS4JS1wmmKfGvItjy69ajlFuRj-K8BPOQwLlgKeTlYyPQ97N3plxHqLl-If88T2UDimHI5imq35X4QxWcWebzX_7qqOGWp8-IrfpXy5iaBpoIZlyzz43Nxs-RD7W6vlF3Qvj1__GfyzZ0oUGbDFHpW14RebqaLLlXiq1WzybRTV1eypJcP56BJg4ZvOrucPGrdxDSDoc_GcrY8F1WOgfE7L2IbXa59SOrm0CboZWH3uXvr9oWBq8ILN2QbrgDyJfoU8P2eJEyMmcpP6VhO9n4--xdrrqvjSY3P7RI-E8ZZh7-g\"\005Login*~eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDg0MDM1NCwiaWF0IjoxNzUwNzk3MTU0LCJzdWIiOiJ1c2VyMiJ9.**" PeerName: "" 2025-06-24T20:32:34.869517Z node 3 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [3:7519616519898670765:2333] txid# 281474976710663 Bootstrap, UserSID: user2 CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:32:34.869534Z node 3 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [3:7519616519898670765:2333] txid# 281474976710663 Bootstrap, UserSID: user2 IsClusterAdministrator: 1 2025-06-24T20:32:34.869580Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [3:7519616519898670765:2333] txid# 281474976710663 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:32:34.869652Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519616519898670401:2112], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:34.869734Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][3:7519616519898670447:2131][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:7519616519898670401:2112], cookie# 10 2025-06-24T20:32:34.869787Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7519616519898670460:2131][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519616519898670456:2131], cookie# 10 2025-06-24T20:32:34.869804Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7519616519898670461:2131][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519616519898670457:2131], cookie# 10 2025-06-24T20:32:34.869819Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7519616519898670462:2131][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519616519898670459:2131], cookie# 10 2025-06-24T20:32:34.869846Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7519616519898670135:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519616519898670460:2131], cookie# 10 2025-06-24T20:32:34.869868Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7519616519898670138:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519616519898670461:2131], cookie# 10 2025-06-24T20:32:34.869886Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7519616519898670141:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519616519898670462:2131], cookie# 10 2025-06-24T20:32:34.869915Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7519616519898670460:2131][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519616519898670135:2049], cookie# 10 2025-06-24T20:32:34.869930Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7519616519898670461:2131][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519616519898670138:2052], cookie# 10 2025-06-24T20:32:34.869944Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7519616519898670462:2131][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519616519898670141:2055], cookie# 10 2025-06-24T20:32:34.869976Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][3:7519616519898670447:2131][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519616519898670456:2131], cookie# 10 2025-06-24T20:32:34.869996Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][3:7519616519898670447:2131][/dc-1] Sync is in progress: cookie# 10, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:34.870010Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][3:7519616519898670447:2131][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519616519898670457:2131], cookie# 10 2025-06-24T20:32:34.870022Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][3:7519616519898670447:2131][/dc-1] Sync is in progress: cookie# 10, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:34.870038Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][3:7519616519898670447:2131][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519616519898670459:2131], cookie# 10 2025-06-24T20:32:34.870060Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][3:7519616519898670447:2131][/dc-1] Sync is done in the ring group: cookie# 10, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:34.870099Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519616519898670401:2112], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T20:32:34.870163Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519616519898670401:2112], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [3:7519616519898670447:2131] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 10 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750797154547 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:32:34.871706Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519616519898670401:2112], cacheItem# { Subscriber: { Subscriber: [3:7519616519898670447:2131] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 10 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750797154547 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 10 IsSync: true Partial: 0 } 2025-06-24T20:32:34.871915Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519616519898670766:2334], recipient# [3:7519616519898670765:2333], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } }] } 2025-06-24T20:32:34.871950Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [3:7519616519898670765:2333] txid# 281474976710663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:32:34.871991Z node 3 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [3:7519616519898670765:2333] txid# 281474976710663, Access denied for user2 on path /dc-1, with access AlterSchema 2025-06-24T20:32:34.872075Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519616519898670765:2333] txid# 281474976710663, issues: { message: "Access denied for user2 on path /dc-1" issue_code: 200000 severity: 1 } 2025-06-24T20:32:34.872103Z node 3 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [3:7519616519898670765:2333] txid# 281474976710663 SEND to# [3:7519616519898670764:2332] Source {TEvProposeTransactionStatus Status# 5} >> TMiniKQLProtoTestYdb::TestExportBoolYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDoubleYdb |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::NoMapPushdownArgClosure [GOOD] >> TMiniKQLProtoTestYdb::TestExportDoubleYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalYdb >> TMiniKQLProtoTestYdb::TestExportDecimalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalNegativeYdb >> TMiniKQLProtoTestYdb::TestExportDecimalNegativeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalHugeYdb >> TMiniKQLProtoTestYdb::TestExportDecimalHugeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportEmptyOptionalOptionalYdb >> TMiniKQLProtoTestYdb::TestExportEmptyOptionalOptionalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDictYdb >> TMiniKQLProtoTestYdb::TestExportDictYdb [GOOD] >> TMiniKQLProtoTestYdb::TestCellsFromTuple [GOOD] >> TestDataErasure::ManualLaunch3Cycles [GOOD] >> TestDataErasure::ManualLaunch3CyclesWithNotConsistentCountersInSchemeShardAndBSC >> TSubDomainTest::CreateDummyTabletsInDifferentDomains [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable >> TTicketParserTest::LoginGoodWithGroups [GOOD] >> TTicketParserTest::LoginRefreshGroupsGood ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::DatashardRunAtOtherNodeWhenOneNodeIsStopped [GOOD] Test command err: 2025-06-24T20:32:26.377453Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616486250215435:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.377534Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047a6/r3tmp/tmph1K8ck/pdisk_1.dat 2025-06-24T20:32:27.064065Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:27.076097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:27.076210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:27.085217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25776 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:32:27.319291Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519616486250215635:2141] Handle TEvNavigate describe path dc-1 2025-06-24T20:32:27.346896Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519616490545183358:2431] HANDLE EvNavigateScheme dc-1 2025-06-24T20:32:27.347041Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519616486250215677:2157], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:27.347076Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519616486250215677:2157], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:32:27.347264Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519616490545183359:2432][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:27.357506Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616486250215313:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616490545183363:2432] 2025-06-24T20:32:27.358357Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616486250215313:2050] Subscribe: subscriber# [1:7519616490545183363:2432], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.358447Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616490545183363:2432][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486250215313:2050] 2025-06-24T20:32:27.358489Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616490545183359:2432][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616490545183360:2432] 2025-06-24T20:32:27.358528Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616486250215313:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616490545183363:2432] 2025-06-24T20:32:27.358554Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616486250215319:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616490545183365:2432] 2025-06-24T20:32:27.358575Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616486250215319:2056] Subscribe: subscriber# [1:7519616490545183365:2432], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.358669Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616490545183365:2432][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486250215319:2056] 2025-06-24T20:32:27.358698Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616490545183359:2432][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616490545183362:2432] 2025-06-24T20:32:27.358751Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519616490545183359:2432][/dc-1] Set up state: owner# [1:7519616486250215677:2157], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.358873Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616490545183363:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616490545183360:2432], cookie# 1 2025-06-24T20:32:27.358893Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616490545183364:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616490545183361:2432], cookie# 1 2025-06-24T20:32:27.358907Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616490545183365:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616490545183362:2432], cookie# 1 2025-06-24T20:32:27.358935Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616486250215313:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616490545183363:2432], cookie# 1 2025-06-24T20:32:27.358969Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616490545183363:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486250215313:2050], cookie# 1 2025-06-24T20:32:27.359002Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616490545183359:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616490545183360:2432], cookie# 1 2025-06-24T20:32:27.359025Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616490545183359:2432][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:27.359077Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616486250215319:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616490545183365:2432] 2025-06-24T20:32:27.359091Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616486250215319:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616490545183365:2432], cookie# 1 2025-06-24T20:32:27.359208Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616486250215316:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616490545183364:2432] 2025-06-24T20:32:27.359246Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616486250215316:2053] Subscribe: subscriber# [1:7519616490545183364:2432], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.359296Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616486250215316:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616490545183364:2432], cookie# 1 2025-06-24T20:32:27.359344Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616490545183365:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486250215319:2056], cookie# 1 2025-06-24T20:32:27.359369Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616490545183364:2432][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486250215316:2053] 2025-06-24T20:32:27.359393Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616490545183364:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486250215316:2053], cookie# 1 2025-06-24T20:32:27.359422Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616490545183359:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616490545183362:2432], cookie# 1 2025-06-24T20:32:27.359439Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616490545183359:2432][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:27.359468Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616490545183359:2432][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616490545183361:2432] 2025-06-24T20:32:27.359520Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519616490545183359:2432][/dc-1] Path was already updated: owner# [1:7519616486250215677:2157], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.359568Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616490545183359:2432][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616490545183361:2432], cookie# 1 2025-06-24T20:32:27.359589Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519616490545183359:2432][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:27.359634Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616486250215316:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616490545183364:2432] 2025-06-24T20:32:27.392130Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:27.430856Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519616486250215677:2157], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 Resour ... 37], cacheItem# { Subscriber: { Subscriber: [4:7519616536273297064:2311] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:38.152733Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519616536273297077:2312], recipient# [4:7519616536273297052:2293], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:38.162261Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519616536273297052:2293], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:32:38.223323Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519616519093427578:2137], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:38.223470Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616519093427578:2137], cacheItem# { Subscriber: { Subscriber: [4:7519616536273297063:2310] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:38.223542Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616519093427578:2137], cacheItem# { Subscriber: { Subscriber: [4:7519616536273297064:2311] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:38.223651Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519616536273297078:2313], recipient# [4:7519616536273297052:2293], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:38.223852Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519616536273297052:2293], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:32:38.371478Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519616519093427578:2137], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:38.371615Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616519093427578:2137], cacheItem# { Subscriber: { Subscriber: [4:7519616536273297063:2310] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:38.371664Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616519093427578:2137], cacheItem# { Subscriber: { Subscriber: [4:7519616536273297064:2311] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:38.371776Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519616536273297080:2314], recipient# [4:7519616536273297052:2293], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:38.372148Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519616536273297052:2293], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:32:38.627607Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519616519093427578:2137], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:38.627749Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616519093427578:2137], cacheItem# { Subscriber: { Subscriber: [4:7519616536273297063:2310] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:38.627803Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616519093427578:2137], cacheItem# { Subscriber: { Subscriber: [4:7519616536273297064:2311] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:38.628059Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519616536273297082:2315], recipient# [4:7519616536273297052:2293], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:38.628257Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519616536273297052:2293], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } >> TMiniKQLEngineFlatTest::TestEmptyProgram >> TMiniKQLEngineFlatTest::TestEmptyProgram [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRow [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowNullKey >> TMiniKQLEngineFlatTest::TestEraseRowNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowManyShards >> TSchemeShardTest::AlterTableSettings [GOOD] >> TSchemeShardTest::AssignBlockStoreVolume >> TMiniKQLEngineFlatTest::TestEraseRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Success >> TTicketParserTest::TicketFromCertificateCheckIssuerBad [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationBad |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestCellsFromTuple [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Success [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowNoShards >> TTicketParserTest::AccessServiceAuthenticationApiKeyOk [GOOD] >> TTicketParserTest::AuthenticationUnavailable >> TMiniKQLEngineFlatTest::TestEraseRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestDiagnostics [GOOD] >> TMiniKQLEngineFlatTest::TestCombineByKeyPushdown >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExistsNullKey >> TMiniKQLEngineFlatTest::TestCombineByKeyPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestCombineByKeyNoPushdown >> TTicketParserTest::LoginBad [GOOD] >> TTicketParserTest::BulkAuthorizationWithRequiredPermissions >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExistsNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByItems >> TMiniKQLEngineFlatTest::TestCombineByKeyNoPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestLengthPushdown >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsNotExists >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByItems [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByItemsFromNull >> TMiniKQLEngineFlatTest::TestLengthPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestInternalResult >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByItemsFromNull [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByBytes >> TMiniKQLEngineFlatTest::TestInternalResult [GOOD] >> TMiniKQLEngineFlatTest::TestIndependentSelects >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsNotExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowPayload >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByBytes [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeNullNull >> TMiniKQLEngineFlatTest::TestIndependentSelects [GOOD] >> TMiniKQLEngineFlatTest::TestCrossTableRs >> TSchemeShardTest::AssignBlockStoreVolume [GOOD] >> TSchemeShardTest::AssignBlockStoreVolumeDuringAlter >> TMiniKQLEngineFlatTest::TestSelectRowPayload [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowPayloadNullKey >> TMiniKQLProgramBuilderTest::TestEraseRowStaticKey >> TMiniKQLEngineFlatTest::TestSelectRangeNullNull [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeToExclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeNoShards >> TMiniKQLEngineFlatTest::TestCrossTableRs [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowPayloadNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeToInclusive >> TMiniKQLProgramBuilderTest::TestEraseRowStaticKey [GOOD] >> TMiniKQLProgramBuilderTest::TestEraseRowPartialDynamicKey >> TMiniKQLEngineFlatTest::TestSelectRangeNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitions >> TMiniKQLEngineFlatTest::TestSelectRangeToInclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowManyShards >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink [GOOD] >> TSubDomainTest::GenericCases [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink >> TMiniKQLProgramBuilderTest::TestEraseRowPartialDynamicKey [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectRow >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitions [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems1 >> TMiniKQLEngineFlatTest::TestSelectRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowNoShards >> TMiniKQLProgramBuilderTest::TestSelectRow [GOOD] >> TMiniKQLProgramBuilderTest::TestUpdateRowDynamicKey >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems1 [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems2 >> TMiniKQLEngineFlatTest::TestSelectRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitions [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByItems >> TMiniKQLProgramBuilderTest::TestUpdateRowDynamicKey [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectFromInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectFromExclusiveRange >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems2 [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems3 >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByItems [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByBytes >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersBad >> TMiniKQLProgramBuilderTest::TestSelectFromExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectToInclusiveRange >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByBytes [GOOD] >> TMiniKQLEngineFlatTest::TestSomePushDown >> TTicketParserTest::AuthenticationUnsupported [GOOD] >> TTicketParserTest::AuthenticationUnknown >> KqpPg::CheckPgAutoParams+useSink [GOOD] >> KqpPg::CheckPgAutoParams-useSink >> TMiniKQLProgramBuilderTest::TestSelectToInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectToExclusiveRange >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems3 [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeNoColumns >> TMiniKQLEngineFlatTest::TestSomePushDown [GOOD] >> TMiniKQLEngineFlatTest::TestTakePushdown >> TMiniKQLProgramBuilderTest::TestSelectToExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectBothFromInclusiveToInclusiveRange >> TMiniKQLEngineFlatTest::TestSelectRangeNoColumns [GOOD] >> TMiniKQLEngineFlatTest::TestTakePushdown [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown >> TMiniKQLProgramBuilderTest::TestSelectBothFromInclusiveToInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectBothFromExclusiveToExclusiveRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestCrossTableRs [GOOD] Test command err: SetProgram (370): ydb/core/engine/mkql_engine_flat.cpp:183: ExtractResultType(): requirement !label.StartsWith(TxInternalResultPrefix) failed. Label can't be used in SetResult as it's reserved for internal purposes: __cantuse PrepareShardPrograms (491): too many shard readsets (1 > 0), src tables: [200:301:0], dst tables: [200:302:0] Type { Kind: Struct } >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectBothFromExclusiveToExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestInvalidParameterName >> TMiniKQLProgramBuilderTest::TestInvalidParameterName [GOOD] >> TMiniKQLProgramBuilderTest::TestInvalidParameterType >> TMiniKQLProtoTestYdb::TestExportVoidTypeYdb >> TMiniKQLProgramBuilderTest::TestInvalidParameterType [GOOD] >> TMiniKQLProtoTestYdb::TestExportVoidTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportUuidTypeYdb >> TMiniKQLProtoTestYdb::TestExportUuidTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportTupleTypeYdb >> TMiniKQLProtoTestYdb::TestExportTupleTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStructTypeYdb >> TSchemeShardTest::AssignBlockStoreVolumeDuringAlter [GOOD] >> TSchemeShardTest::AssignBlockStoreCheckVersionInAlter >> TMiniKQLProtoTestYdb::TestExportStructTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantTupleTypeYdb >> TMiniKQLProtoTestYdb::TestExportVariantTupleTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantStructTypeYdb >> TMiniKQLProtoTestYdb::TestExportVariantStructTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVoidYdb >> TMiniKQLProtoTestYdb::TestExportVoidYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStringYdb >> TestDataErasure::Run3CyclesForAllSupportedObjects [GOOD] |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestSelectRangeNoColumns [GOOD] |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown [GOOD] >> TMiniKQLProtoTestYdb::TestExportStringYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportUuidYdb >> Cdc::ShouldBreakLocksOnConcurrentAlterTable [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentMoveTable [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentMoveIndex >> TMiniKQLProtoTestYdb::TestExportUuidYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportTupleYdb >> TMiniKQLProtoTestYdb::TestExportTupleYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStructYdb >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistWithoutColumns >> TMiniKQLProtoTestYdb::TestExportStructYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantYdb |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProgramBuilderTest::TestInvalidParameterType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::GenericCases [GOOD] Test command err: 2025-06-24T20:32:26.599570Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616484500200270:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.599635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004791/r3tmp/tmpH0FhCj/pdisk_1.dat 2025-06-24T20:32:27.188180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:27.188303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:27.206081Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:27.215411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:27.264252Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TClient is connected to server localhost:27825 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:32:27.507347Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519616484500200283:2139] Handle TEvNavigate describe path dc-1 2025-06-24T20:32:27.547354Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519616488795168031:2447] HANDLE EvNavigateScheme dc-1 2025-06-24T20:32:27.547482Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519616484500200308:2153], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:27.547519Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519616484500200308:2153], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:32:27.547793Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519616488795168032:2448][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:27.554376Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616484500199968:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616488795168036:2448] 2025-06-24T20:32:27.554432Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616484500199968:2051] Subscribe: subscriber# [1:7519616488795168036:2448], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.554496Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616484500199974:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616488795168038:2448] 2025-06-24T20:32:27.554512Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616484500199974:2057] Subscribe: subscriber# [1:7519616488795168038:2448], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.554568Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616488795168036:2448][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616484500199968:2051] 2025-06-24T20:32:27.554592Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616488795168038:2448][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616484500199974:2057] 2025-06-24T20:32:27.559519Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616484500199971:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616488795168037:2448] 2025-06-24T20:32:27.559613Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616484500199971:2054] Subscribe: subscriber# [1:7519616488795168037:2448], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.559725Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616484500199968:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616488795168036:2448] 2025-06-24T20:32:27.559752Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616484500199974:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616488795168038:2448] 2025-06-24T20:32:27.559995Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616488795168032:2448][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488795168033:2448] 2025-06-24T20:32:27.560056Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616488795168032:2448][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488795168035:2448] 2025-06-24T20:32:27.563271Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:27.563503Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519616488795168032:2448][/dc-1] Set up state: owner# [1:7519616484500200308:2153], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.563664Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616488795168037:2448][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616484500199971:2054] 2025-06-24T20:32:27.563735Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616488795168036:2448][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488795168033:2448], cookie# 1 2025-06-24T20:32:27.563776Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616488795168037:2448][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488795168034:2448], cookie# 1 2025-06-24T20:32:27.563833Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616488795168038:2448][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488795168035:2448], cookie# 1 2025-06-24T20:32:27.563895Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616488795168032:2448][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488795168034:2448] 2025-06-24T20:32:27.563968Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519616488795168032:2448][/dc-1] Path was already updated: owner# [1:7519616484500200308:2153], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.567864Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616484500199971:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616488795168037:2448] 2025-06-24T20:32:27.567917Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616484500199971:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488795168037:2448], cookie# 1 2025-06-24T20:32:27.567946Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616484500199968:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488795168036:2448], cookie# 1 2025-06-24T20:32:27.567962Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616484500199974:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616488795168038:2448], cookie# 1 2025-06-24T20:32:27.568007Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616488795168037:2448][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616484500199971:2054], cookie# 1 2025-06-24T20:32:27.568023Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616488795168036:2448][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616484500199968:2051], cookie# 1 2025-06-24T20:32:27.568036Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616488795168038:2448][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616484500199974:2057], cookie# 1 2025-06-24T20:32:27.568077Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616488795168032:2448][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488795168034:2448], cookie# 1 2025-06-24T20:32:27.568104Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616488795168032:2448][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:27.568154Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616488795168032:2448][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488795168033:2448], cookie# 1 2025-06-24T20:32:27.568177Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616488795168032:2448][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:27.568203Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616488795168032:2448][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488795168035:2448], cookie# 1 2025-06-24T20:32:27.568240Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519616488795168032:2448][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:27.644095Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519616484500200308:2153], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046 ... rsion: 0 }: sender# [4:7519616547676510353:3043] 2025-06-24T20:32:40.086888Z node 4 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [4:7519616521906705030:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7519616547676510359:3044] 2025-06-24T20:32:40.086962Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [4:7519616526201672614:2127], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-24T20:32:40.087042Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [4:7519616526201672614:2127], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [4:7519616547676510346:3043] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:32:40.087161Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616526201672614:2127], cacheItem# { Subscriber: { Subscriber: [4:7519616547676510346:3043] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:40.087195Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [4:7519616526201672614:2127], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-24T20:32:40.087256Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [4:7519616526201672614:2127], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [4:7519616547676510347:3044] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:32:40.087310Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616526201672614:2127], cacheItem# { Subscriber: { Subscriber: [4:7519616547676510347:3044] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:40.087537Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519616547676510360:3045], recipient# [4:7519616547676510337:2293], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:40.146315Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519616526201672469:2118];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:40.146420Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:32:40.270039Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519616526201672614:2127], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:40.270239Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616526201672614:2127], cacheItem# { Subscriber: { Subscriber: [4:7519616530496640532:2561] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:40.270364Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519616547676510367:3048], recipient# [4:7519616547676510366:2294], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:41.087795Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519616526201672614:2127], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:41.092732Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616526201672614:2127], cacheItem# { Subscriber: { Subscriber: [4:7519616547676510338:3041] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:41.092918Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519616551971477673:3054], recipient# [4:7519616551971477672:2295], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:41.151309Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519616526201672614:2127], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:41.151467Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616526201672614:2127], cacheItem# { Subscriber: { Subscriber: [4:7519616530496640532:2561] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:41.151586Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519616551971477675:3055], recipient# [4:7519616551971477674:2296], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:41.272596Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519616526201672614:2127], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:41.272797Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519616526201672614:2127], cacheItem# { Subscriber: { Subscriber: [4:7519616530496640532:2561] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:41.272951Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519616551971477680:3056], recipient# [4:7519616551971477679:2297], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistWithoutColumns [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistSetPayload >> ReadOnlyVDisk::TestReads [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantYdb [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistSetPayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistSetPayloadNullValue >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistSetPayloadNullValue [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistErasePayload >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistErasePayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowExistChangePayload >> TSchemeShardTest::AssignBlockStoreCheckVersionInAlter [GOOD] >> TSchemeShardTest::AssignBlockStoreCheckFillGenerationInAlter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::Run3CyclesForAllSupportedObjects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:32:32.493843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:32:32.493936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:32.493979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:32:32.494018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:32:32.494083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:32:32.494118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:32:32.494190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:32.494277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:32:32.495109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:32:32.495615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:32:32.572745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:32:32.572828Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:32.588958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:32:32.593686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:32:32.593913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:32:32.603473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:32:32.603730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:32:32.604413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.604751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:32:32.607590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:32.607786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:32:32.608966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:32.609034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:32.609160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:32:32.609210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:32:32.609267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:32:32.609453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.619999Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:32:32.758398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:32:32.758631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.758833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:32:32.758879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:32:32.759129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:32:32.759290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:32.761677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.761933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:32:32.762216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.762277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:32:32.762318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:32:32.762357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:32:32.764529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.764634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:32:32.764704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:32:32.766796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.766855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.766908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.766975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:32:32.770739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:32:32.774111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:32:32.774348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:32:32.775428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.775592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:32:32.775650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.775977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:32:32.776033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.776264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:32:32.776344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:32:32.778630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:32.778699Z node 1 :FLAT_TX_SCHEMESHARD ... 7: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-24T20:32:43.860844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T20:32:43.860913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T20:32:43.943567Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:458:2408]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:43.943662Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:43.943910Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:458:2408], Recipient [1:458:2408]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:43.943962Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:43.944124Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:801:2686], Recipient [1:458:2408]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-06-24T20:32:43.944161Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:32:43.944288Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:32:43.944509Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 180us result status StatusSuccess 2025-06-24T20:32:43.945076Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:32:44.079596Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:958:2816]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:44.079689Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:44.079811Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:958:2816], Recipient [1:958:2816]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:44.079845Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:44.080029Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:1383:3165], Recipient [1:958:2816]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-06-24T20:32:44.080065Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:32:44.080170Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-24T20:32:44.080385Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 175us result status StatusSuccess 2025-06-24T20:32:44.080960Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 350 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-24T20:32:44.561394Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:44.561496Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:44.561596Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:295:2277], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:44.561632Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:44.611714Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:32:44.611797Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:32:44.611841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-24T20:32:44.612141Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:298:2279], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-06-24T20:32:44.612188Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:32:44.612248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:32:44.612342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:32:44.612382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-24T20:32:44.612443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.919000s, Timestamp# 1970-01-01T00:00:11.126000Z 2025-06-24T20:32:44.612481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-06-24T20:32:44.615782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T20:32:44.616443Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:4098:5371], Recipient [1:295:2277]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:32:44.616546Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:32:44.616585Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:32:44.616741Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125519, Sender [1:277:2266], Recipient [1:295:2277]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-24T20:32:44.616773Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5149: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-24T20:32:44.616814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7830: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TMiniKQLEngineFlatTest::TestUpdateRowExistChangePayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowExistErasePayload >> TMiniKQLEngineFlatTest::TestUpdateRowExistErasePayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowManyShards |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestExportVariantYdb [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNoShards >> TTicketParserTest::BulkAuthorizationRetryError [GOOD] >> TTicketParserTest::BulkAuthorizationRetryErrorImmediately >> TMiniKQLEngineFlatTest::TestUpdateRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortPushdownPk >> TMiniKQLProtoTestYdb::TestExportOptionalTypeYdb >> TMiniKQLEngineFlatTest::TestTopSortPushdownPk [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortPushdown >> TMiniKQLProtoTestYdb::TestExportOptionalTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportListTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportIntegralYdb >> TMiniKQLEngineFlatTest::TestTopSortPushdown [GOOD] >> TMiniKQLProgramBuilderTest::TestEraseRowDynamicKey >> TMiniKQLProtoTestYdb::TestExportIntegralYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportEmptyOptionalYdb >> TMiniKQLProgramBuilderTest::TestEraseRowDynamicKey [GOOD] >> TMiniKQLProgramBuilderTest::TestAcquireLocks [GOOD] >> TMiniKQLProgramBuilderTest::TestDiagnostics >> TMiniKQLProtoTestYdb::TestExportEmptyOptionalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalNotEmptyYdb >> TMiniKQLProgramBuilderTest::TestDiagnostics [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestReads [GOOD] Test command err: RandomSeed# 4078354925792288439 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #1 to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #2 to read-only === Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #0 === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #1 === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #2 === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #3 === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #4 === Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #5 === Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #6 === Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalNotEmptyYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalYdb >> TSubDomainTest::ConsistentCopyTable [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportListYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantNotNullYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantNotNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalNullYdb >> TSchemeShardTest::AssignBlockStoreCheckFillGenerationInAlter [GOOD] >> TSchemeShardTest::BlockStoreVolumeLimits >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNullYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNotNullYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNotNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType |88.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |88.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType [GOOD] |88.9%| [TA] {RESULT} $(B)/ydb/services/ydb/table_split_ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.9%| [LD] {RESULT} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProgramBuilderTest::TestDiagnostics [GOOD] |88.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |88.9%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |88.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica >> TTicketParserTest::AuthorizationRetryError [GOOD] >> TTicketParserTest::AuthorizationRetryErrorImmediately >> TTicketParserTest::TicketFromCertificateWithValidationBad [GOOD] >> TTicketParserTest::NebiusAuthorizationWithRequiredPermissions >> Cdc::DecimalKey [GOOD] >> Cdc::AddColumn >> TMiniKQLEngineFlatTest::TestPureProgram >> TMiniKQLEngineFlatTest::TestPureProgram [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFromInclusive >> TSubDomainTest::CreateTableInsideSubDomain [GOOD] >> TTicketParserTest::AuthenticationUnavailable [GOOD] >> TTicketParserTest::AuthenticationRetryError >> TMiniKQLEngineFlatTest::TestSelectRangeFromInclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFromExclusive |88.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |88.9%| [LD] {RESULT} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |88.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |88.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |88.9%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |88.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType [GOOD] >> TTicketParserTest::BulkAuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::BulkAuthorizationWithUserAccount >> TestDataErasure::DataErasureWithCopyTable [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFromExclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromIncTo >> KqpPg::SelectIndex+useSink [GOOD] >> KqpPg::SelectIndex-useSink >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromIncTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromIncTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromExcTo >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromExcTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromExcTo >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromExcTo [GOOD] >> TMiniKQLEngineFlatTest::TestMapsPushdown >> TMiniKQLEngineFlatTest::TestMapsPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoOrderedTakePushdown >> TMiniKQLEngineFlatTest::TestNoOrderedTakePushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoAggregatedPushdown >> TMiniKQLEngineFlatTest::TestNoAggregatedPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoPartialSortPushdown ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::ConsistentCopyTable [GOOD] Test command err: 2025-06-24T20:32:26.390234Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616488044666651:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.390293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004786/r3tmp/tmpZQpyh2/pdisk_1.dat 2025-06-24T20:32:26.800930Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:26.837528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:26.837729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:26.839748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14551 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:32:27.038316Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519616488044666840:2104] Handle TEvNavigate describe path dc-1 2025-06-24T20:32:27.061323Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519616492339634411:2258] HANDLE EvNavigateScheme dc-1 2025-06-24T20:32:27.061462Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519616488044666885:2126], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:27.061505Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519616488044666885:2126], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:32:27.061738Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519616492339634412:2259][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:27.065167Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616488044666572:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616492339634416:2259] 2025-06-24T20:32:27.065264Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616488044666572:2049] Subscribe: subscriber# [1:7519616492339634416:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.065375Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616488044666578:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616492339634418:2259] 2025-06-24T20:32:27.065396Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616488044666578:2055] Subscribe: subscriber# [1:7519616492339634418:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.065448Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616492339634416:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488044666572:2049] 2025-06-24T20:32:27.065502Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616492339634418:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488044666578:2055] 2025-06-24T20:32:27.065579Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616492339634412:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616492339634413:2259] 2025-06-24T20:32:27.065626Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616492339634412:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616492339634415:2259] 2025-06-24T20:32:27.065679Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519616492339634412:2259][/dc-1] Set up state: owner# [1:7519616488044666885:2126], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.065809Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492339634416:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492339634413:2259], cookie# 1 2025-06-24T20:32:27.065822Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492339634417:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492339634414:2259], cookie# 1 2025-06-24T20:32:27.065848Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492339634418:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492339634415:2259], cookie# 1 2025-06-24T20:32:27.065875Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616488044666572:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616492339634416:2259] 2025-06-24T20:32:27.065899Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488044666572:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492339634416:2259], cookie# 1 2025-06-24T20:32:27.065918Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616488044666578:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616492339634418:2259] 2025-06-24T20:32:27.065928Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488044666578:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492339634418:2259], cookie# 1 2025-06-24T20:32:27.070655Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616488044666575:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616492339634417:2259] 2025-06-24T20:32:27.070711Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616488044666575:2052] Subscribe: subscriber# [1:7519616492339634417:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.070765Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488044666575:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492339634417:2259], cookie# 1 2025-06-24T20:32:27.070813Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492339634416:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488044666572:2049], cookie# 1 2025-06-24T20:32:27.070829Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492339634418:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488044666578:2055], cookie# 1 2025-06-24T20:32:27.070859Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616492339634417:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488044666575:2052] 2025-06-24T20:32:27.070885Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492339634417:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488044666575:2052], cookie# 1 2025-06-24T20:32:27.070922Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492339634412:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492339634413:2259], cookie# 1 2025-06-24T20:32:27.070960Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616492339634412:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:27.070996Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492339634412:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492339634415:2259], cookie# 1 2025-06-24T20:32:27.071010Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616492339634412:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:27.071041Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616492339634412:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616492339634414:2259] 2025-06-24T20:32:27.071107Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519616492339634412:2259][/dc-1] Path was already updated: owner# [1:7519616488044666885:2126], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.071129Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492339634412:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492339634414:2259], cookie# 1 2025-06-24T20:32:27.071172Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519616492339634412:2259][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:27.071204Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616488044666575:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616492339634417:2259] 2025-06-24T20:32:27.152378Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519616488044666885:2126], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.023541Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519616544057735063:2124], cacheItem# { Subscriber: { Subscriber: [6:7519616569827539062:2248] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:46.023594Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519616544057735063:2124], cacheItem# { Subscriber: { Subscriber: [6:7519616569827539063:2249] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:46.023739Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7519616574122506379:2254], recipient# [6:7519616569827539059:2285], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.024215Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:7519616569827539059:2285], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:32:46.332028Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7519616544057735063:2124], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.332185Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519616544057735063:2124], cacheItem# { Subscriber: { Subscriber: [6:7519616548352702522:2228] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:46.332306Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7519616574122506382:2255], recipient# [6:7519616574122506381:2287], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.427272Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7519616544057735063:2124], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.427421Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519616544057735063:2124], cacheItem# { Subscriber: { Subscriber: [6:7519616569827539049:2246] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:46.427548Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7519616574122506384:2256], recipient# [6:7519616574122506383:2288], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.427880Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:32:46.493002Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7519616544057735063:2124], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.493192Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519616544057735063:2124], cacheItem# { Subscriber: { Subscriber: [6:7519616548352702522:2228] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:46.493329Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7519616574122506386:2257], recipient# [6:7519616574122506385:2289], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.523385Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7519616544057735063:2124], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.523548Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519616544057735063:2124], cacheItem# { Subscriber: { Subscriber: [6:7519616569827539062:2248] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:46.523655Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519616544057735063:2124], cacheItem# { Subscriber: { Subscriber: [6:7519616569827539063:2249] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:46.523795Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7519616574122506387:2258], recipient# [6:7519616569827539059:2285], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.524278Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:7519616569827539059:2285], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } >> TMiniKQLEngineFlatTest::TestNoPartialSortPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestMultiRSPerDestination >> TMiniKQLEngineFlatTest::TestMultiRSPerDestination [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureWithCopyTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:32:33.035585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:32:33.035711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:33.035776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:32:33.035828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:32:33.035885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:32:33.035923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:32:33.036011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:33.036113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:32:33.037092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:32:33.037538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:32:33.139656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:32:33.139738Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:33.166937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:32:33.172993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:32:33.173238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:32:33.195861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:32:33.196176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:32:33.197021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:33.197460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:32:33.201533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:33.201790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:32:33.203575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:33.203656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:33.203811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:32:33.203904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:32:33.203960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:32:33.204148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.212401Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:32:33.391074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:32:33.391398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.391682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:32:33.391775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:32:33.392089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:32:33.392244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:33.396586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:33.396911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:32:33.397219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.397292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:32:33.397346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:32:33.397392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:32:33.400092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.400179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:32:33.400231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:32:33.402670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.402741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.402820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:33.403243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:32:33.408017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:32:33.410682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:32:33.410958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:32:33.412292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:33.412488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:32:33.412562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:33.412928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:32:33.412991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:33.413271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:32:33.413383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:32:33.416289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:33.416368Z node 1 :FLAT_TX_SCHEMESHARD ... d(TabletID)=72075186233409552 maps to shardIdx: 72075186233409546:7 followerId=0, pathId: [OwnerId: 72075186233409546, LocalPathId: 3], pathId map=SimpleCopy, is column=0, is olap=0, RowCount 50, DataSize 5121950 2025-06-24T20:32:47.992150Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409552, followerId 0 2025-06-24T20:32:47.992190Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72075186233409546:7 with partCount# 1, rowCount# 50, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:50.000000Z at schemeshard 72075186233409546 2025-06-24T20:32:47.992235Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409552 2025-06-24T20:32:47.992299Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72075186233409546 2025-06-24T20:32:48.007478Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T20:32:48.007551Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T20:32:48.007582Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72075186233409546, queue size# 0 2025-06-24T20:32:48.031536Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.031607Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.031698Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:185:2177], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.031726Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.045254Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.045336Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.045430Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:280:2239], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.045464Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.081522Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.081612Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.081729Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:185:2177], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.081764Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.097265Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.097354Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.097467Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:280:2239], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.097502Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.132669Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.132753Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.132866Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:185:2177], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.132898Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.143528Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.143620Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.143719Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:280:2239], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.143755Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.179681Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.179768Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.179875Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:185:2177], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.179915Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.193514Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.193610Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.193727Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:280:2239], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.193764Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.227625Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.227714Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:48.227872Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:185:2177], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.227909Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:48.238966Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:32:48.239047Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:32:48.239084Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-24T20:32:48.239365Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [2:188:2179], Recipient [2:185:2177]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-24T20:32:48.239407Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:32:48.239437Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:32:48.239509Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:32:48.239559Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-24T20:32:48.239630Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 14.999500s, Timestamp# 1970-01-01T00:01:25.000500Z 2025-06-24T20:32:48.239697Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 35 s 2025-06-24T20:32:48.240347Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T20:32:48.243766Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [2:1735:3440], Recipient [2:185:2177]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:32:48.243846Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:32:48.243892Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:32:48.244103Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125519, Sender [2:171:2170], Recipient [2:185:2177]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-24T20:32:48.244144Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5149: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-24T20:32:48.244193Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7830: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersBad [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDefaultGroupGood ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::CreateTableInsideSubDomain [GOOD] Test command err: 2025-06-24T20:32:26.477139Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616488274121944:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.477594Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004782/r3tmp/tmpCUpKWl/pdisk_1.dat 2025-06-24T20:32:26.998523Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:27.019938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:27.020048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:27.034137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22345 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:32:27.270413Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519616488274122073:2133] Handle TEvNavigate describe path dc-1 2025-06-24T20:32:27.296138Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519616492569089819:2440] HANDLE EvNavigateScheme dc-1 2025-06-24T20:32:27.296294Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519616488274122104:2147], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:27.296336Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519616488274122104:2147], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:32:27.296534Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519616492569089820:2441][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:27.298900Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616488274121767:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616492569089824:2441] 2025-06-24T20:32:27.298969Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616488274121767:2050] Subscribe: subscriber# [1:7519616492569089824:2441], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.299060Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616488274121773:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616492569089826:2441] 2025-06-24T20:32:27.299082Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616488274121773:2056] Subscribe: subscriber# [1:7519616492569089826:2441], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.299135Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616492569089824:2441][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488274121767:2050] 2025-06-24T20:32:27.299175Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616492569089826:2441][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488274121773:2056] 2025-06-24T20:32:27.299221Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616492569089820:2441][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616492569089821:2441] 2025-06-24T20:32:27.299257Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616492569089820:2441][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616492569089823:2441] 2025-06-24T20:32:27.299321Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519616492569089820:2441][/dc-1] Set up state: owner# [1:7519616488274122104:2147], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.299516Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492569089824:2441][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492569089821:2441], cookie# 1 2025-06-24T20:32:27.299536Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492569089825:2441][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492569089822:2441], cookie# 1 2025-06-24T20:32:27.299551Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492569089826:2441][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492569089823:2441], cookie# 1 2025-06-24T20:32:27.299598Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616488274121767:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616492569089824:2441] 2025-06-24T20:32:27.299633Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488274121767:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492569089824:2441], cookie# 1 2025-06-24T20:32:27.299654Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616488274121773:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616492569089826:2441] 2025-06-24T20:32:27.299670Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488274121773:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492569089826:2441], cookie# 1 2025-06-24T20:32:27.301425Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616488274121770:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616492569089825:2441] 2025-06-24T20:32:27.301471Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616488274121770:2053] Subscribe: subscriber# [1:7519616492569089825:2441], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.301535Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488274121770:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492569089825:2441], cookie# 1 2025-06-24T20:32:27.301592Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492569089824:2441][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488274121767:2050], cookie# 1 2025-06-24T20:32:27.301610Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492569089826:2441][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488274121773:2056], cookie# 1 2025-06-24T20:32:27.301640Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616492569089825:2441][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616488274121770:2053] 2025-06-24T20:32:27.301680Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492569089825:2441][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488274121770:2053], cookie# 1 2025-06-24T20:32:27.301720Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492569089820:2441][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492569089821:2441], cookie# 1 2025-06-24T20:32:27.301748Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616492569089820:2441][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:27.301764Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492569089820:2441][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492569089823:2441], cookie# 1 2025-06-24T20:32:27.301776Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616492569089820:2441][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:27.301805Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616492569089820:2441][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616492569089822:2441] 2025-06-24T20:32:27.301879Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519616492569089820:2441][/dc-1] Path was already updated: owner# [1:7519616488274122104:2147], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.301902Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492569089820:2441][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492569089822:2441], cookie# 1 2025-06-24T20:32:27.301924Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519616492569089820:2441][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:27.301954Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616488274121770:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616492569089825:2441] 2025-06-24T20:32:27.367716Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519616488274122104:2147], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:44.994526Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7519616561744291897:2752], recipient# [5:7519616561744291874:2276], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:44.994586Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7519616561744291898:2753], recipient# [5:7519616561744291875:2277], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:44.994615Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7519616561744291888:2749][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7519616544564421532:2050] 2025-06-24T20:32:44.994641Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][5:7519616561744291876:2749][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7519616561744291885:2749] 2025-06-24T20:32:44.994667Z node 5 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][5:7519616561744291876:2749][/dc-1/.metadata/workload_manager/delayed_requests] Ignore empty state: owner# [5:7519616544564421823:2126], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:44.994683Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7519616544564421532:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7519616561744291888:2749] 2025-06-24T20:32:45.426384Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [5:7519616544564421823:2126], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:45.426545Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7519616544564421823:2126], cacheItem# { Subscriber: { Subscriber: [5:7519616548859389844:2655] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:45.426655Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7519616566039259201:2756], recipient# [5:7519616566039259200:2278], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.003667Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [5:7519616544564421823:2126], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.003836Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7519616544564421823:2126], cacheItem# { Subscriber: { Subscriber: [5:7519616561744291878:2751] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:46.003933Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7519616570334226507:2762], recipient# [5:7519616570334226506:2279], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.427324Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [5:7519616544564421823:2126], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:46.427475Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7519616544564421823:2126], cacheItem# { Subscriber: { Subscriber: [5:7519616548859389844:2655] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:46.427569Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7519616570334226512:2763], recipient# [5:7519616570334226511:2280], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:47.005197Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [5:7519616544564421823:2126], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:47.005370Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7519616544564421823:2126], cacheItem# { Subscriber: { Subscriber: [5:7519616561744291878:2751] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:47.005462Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7519616574629193816:2767], recipient# [5:7519616574629193815:2281], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:47.431380Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [5:7519616544564421823:2126], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:47.431548Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7519616544564421823:2126], cacheItem# { Subscriber: { Subscriber: [5:7519616548859389844:2655] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:32:47.431670Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7519616574629193821:2768], recipient# [5:7519616574629193820:2282], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> Cdc::RacySplitAndDropTable [GOOD] >> Cdc::RenameTable >> TTicketParserTest::AuthenticationUnknown [GOOD] >> TTicketParserTest::Authorization >> Cdc::InitialScan [GOOD] >> Cdc::InitialScan_WithTopicSchemeTx >> TTicketParserTest::LoginRefreshGroupsWithError [GOOD] >> TTicketParserTest::NebiusAccessServiceAuthenticationOk >> TestDataErasure::ManualLaunch3CyclesWithNotConsistentCountersInSchemeShardAndBSC [GOOD] >> TSchemeShardTest::BlockStoreVolumeLimits [GOOD] >> TSchemeShardTest::BlockStoreNonreplVolumeLimits ------- [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestMultiRSPerDestination [GOOD] Test command err: PrepareShardPrograms (491): too many shard readsets (2 > 1), src tables: [200:301:0], dst tables: [200:301:0] Type { Kind: Struct } >> TTicketParserTest::NebiusAuthorizationRetryError [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryErrorImmediately |89.0%| [TA] $(B)/ydb/core/engine/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |89.0%| [TA] {RESULT} $(B)/ydb/core/engine/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::ManualLaunch3CyclesWithNotConsistentCountersInSchemeShardAndBSC [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:32:32.841920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:32:32.842012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:32.842053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:32:32.842091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:32:32.842133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:32:32.842162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:32:32.842252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:32.842333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:32:32.843074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:32:32.843443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:32:32.929148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:32:32.929206Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:32.946186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:32:32.946624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:32:32.946800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:32:32.954502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:32:32.954683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:32:32.955356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.955682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:32:32.958608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:32.958797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:32:32.959897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:32.959967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:32.960175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:32:32.960226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:32:32.960275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:32:32.960362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.980487Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:32:33.119277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:32:33.119508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.119707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:32:33.119769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:32:33.120009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:32:33.120133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:33.128238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:33.128498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:32:33.128713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.128787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:32:33.128838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:32:33.128874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:32:33.135943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.136128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:32:33.136176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:32:33.143954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.144018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:33.144066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:33.144135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:32:33.147886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:32:33.152264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:32:33.152482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:32:33.153459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:33.153609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:32:33.153652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:33.153932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:32:33.153983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:33.154138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:32:33.154221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:32:33.160436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:33.160492Z node 1 :FLAT_TX_SCHEMESHARD ... hard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:32:49.252097Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-24T20:32:49.252160Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T20:32:49.252222Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T20:32:49.624903Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:49.624979Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:49.625099Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:464:2414], Recipient [2:464:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:49.625123Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:49.625238Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [2:801:2685], Recipient [2:464:2414]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-06-24T20:32:49.625269Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:32:49.625334Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:32:49.625452Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 98us result status StatusSuccess 2025-06-24T20:32:49.625796Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 250 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:32:49.704998Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:965:2820]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:49.705075Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:49.705213Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:965:2820], Recipient [2:965:2820]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:49.705247Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:49.705372Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [2:1373:3157], Recipient [2:965:2820]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-06-24T20:32:49.705402Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:32:49.705486Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-24T20:32:49.705659Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 134us result status StatusSuccess 2025-06-24T20:32:49.706663Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 400 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-24T20:32:49.791664Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:49.791766Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:49.791880Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:295:2277], Recipient [2:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:49.791919Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:49.869911Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [2:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:32:49.869992Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:32:49.870026Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 101 2025-06-24T20:32:49.870296Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [2:300:2280], Recipient [2:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 101 Completed: true Progress10k: 10000 2025-06-24T20:32:49.870323Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:32:49.870352Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:32:49.870418Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:32:49.870464Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-24T20:32:49.870504Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 101, duration# 2 s 2025-06-24T20:32:49.873463Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T20:32:49.874206Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [2:4058:5330], Recipient [2:295:2277]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:32:49.874268Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:32:49.874347Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:32:49.874479Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125519, Sender [2:3216:4658], Recipient [2:295:2277]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-24T20:32:49.874512Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5149: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-24T20:32:49.874548Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7830: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TTicketParserTest::BulkAuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::BulkAuthorization >> TestDataErasure::Run3CyclesForTopics >> TSchemeShardTest::BlockStoreNonreplVolumeLimits [GOOD] >> TSchemeShardTest::BlockStoreSystemVolumeLimits >> TestDataErasure::Run3CyclesForTables >> TTicketParserTest::BulkAuthorizationWithUserAccount [GOOD] >> TTicketParserTest::BulkAuthorizationWithUserAccount2 >> TTicketParserTest::AuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::AuthorizationWithRequiredPermissions >> TTicketParserTest::NebiusAuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::NebiusAuthorizationUnavailable >> TSchemeShardTest::BlockStoreSystemVolumeLimits [GOOD] >> TSchemeShardTest::AlterTableWithCompactionStrategies >> TestDataErasure::DataErasureWithSplit >> TTicketParserTest::NebiusAccessServiceAuthenticationOk [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryError >> TTicketParserTest::Authorization [GOOD] >> TTicketParserTest::AuthorizationModify >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter+useSink >> TTicketParserTest::TicketFromCertificateWithValidationDefaultGroupGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad >> TTicketParserTest::NebiusAuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAuthorization >> TSchemeShardTest::AlterTableWithCompactionStrategies [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-false >> TTicketParserTest::BulkAuthorization [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount2 |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |89.0%| [LD] {RESULT} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan >> TTicketParserTest::BulkAuthorizationWithUserAccount2 [GOOD] >> TTicketParserTest::BulkAuthorizationUnavailable >> TTicketParserTest::AuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount >> KqpPg::SelectIndex-useSink [GOOD] >> KqpPg::TableDeleteAllData+useSink >> Cdc::AddColumn [GOOD] >> Cdc::AddColumn_TopicAutoPartitioning >> TSchemeShardTest::BackupBackupCollection-WithIncremental-false [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true >> TTicketParserTest::NebiusAuthorizationUnavailable [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentMoveIndex [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentDropIndex >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 >> TTicketParserTest::AuthorizationModify [GOOD] >> Cdc::RenameTable [GOOD] >> Cdc::ResolvedTimestamps >> Viewer::JsonAutocompleteSimilarDatabaseName >> TTicketParserTest::NebiusAuthorization [GOOD] >> TTicketParserTest::NebiusAuthorizationModify ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAuthorizationUnavailable [GOOD] Test command err: 2025-06-24T20:32:33.431016Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616515118620380:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:33.431089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00469e/r3tmp/tmpuhogRB/pdisk_1.dat 2025-06-24T20:32:33.957930Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:33.959063Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616515118620288:2079] 1750797153427295 != 1750797153427298 TServer::EnableGrpc on GrpcPort 1247, node 1 2025-06-24T20:32:33.980818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:33.980913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:33.984570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:34.087310Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:34.087335Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:34.087348Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:34.087479Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61130 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:34.439866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:34.447431Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T20:32:34.454791Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 812AB7EB30A9E2EAF37B85264A8ED3D84F653ACC7AC6B48E34EFC39055576497 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-24T20:32:37.323875Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616531451268765:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:37.324078Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00469e/r3tmp/tmpRMzkBj/pdisk_1.dat 2025-06-24T20:32:37.526477Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:37.529347Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616531451268739:2079] 1750797157293027 != 1750797157293030 2025-06-24T20:32:37.545283Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:37.545394Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:37.548578Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1062, node 2 2025-06-24T20:32:37.635824Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:37.635847Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:37.635854Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:37.635944Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1881 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:37.959642Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:37.968122Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:37.971630Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket B8BC7A621DE35EE2D8235B0AAF13A619C5B0EFAA293E34EE3DEE2D6F520972CB () has now permanent error message 'Cannot create token from certificate. Client`s certificate and server`s certificate have different issuers' 2025-06-24T20:32:37.972206Z node 2 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket B8BC7A621DE35EE2D8235B0AAF13A619C5B0EFAA293E34EE3DEE2D6F520972CB: Cannot create token from certificate. Client`s certificate and server`s certificate have different issuers 2025-06-24T20:32:42.671723Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519616555059155160:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:42.674947Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00469e/r3tmp/tmp1CK07S/pdisk_1.dat 2025-06-24T20:32:42.957510Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:42.983486Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519616555059154952:2079] 1750797162608078 != 1750797162608081 2025-06-24T20:32:42.991779Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:42.991892Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:43.002200Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27762, node 3 2025-06-24T20:32:43.252195Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:43.252232Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:43.252243Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:43.252423Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:32:43.671387Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15470 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:43.972520Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:43.983503Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:43.989962Z node 3 :TICKET_ ... 1: [517000023b88] Connect to grpc://localhost:11591 2025-06-24T20:32:49.017702Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000023b88] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } 0: "OK" 2025-06-24T20:32:49.035160Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000023b88] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } } 2025-06-24T20:32:49.035478Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1219: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-24T20:32:49.035627Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:32:49.036266Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:49.036284Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:49.036291Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:49.036351Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-24T20:32:49.036623Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000023b88] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } 0: "OK" 2025-06-24T20:32:49.038931Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000023b88] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } } 2025-06-24T20:32:49.039171Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1219: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-24T20:32:49.039234Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'something.write for aaaa1234 bbbb4554 - PERMISSION_DENIED' 2025-06-24T20:32:54.590455Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519616606714094991:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:54.632979Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00469e/r3tmp/tmpsQ2gQY/pdisk_1.dat 2025-06-24T20:32:54.806160Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519616606714094971:2079] 1750797174581934 != 1750797174581937 2025-06-24T20:32:54.806253Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:54.817598Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:54.817726Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:54.820378Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64503, node 5 2025-06-24T20:32:54.935969Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:54.935998Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:54.936007Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:54.936174Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3371 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:55.404322Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:55.414260Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:55.420778Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:55.420816Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:55.420826Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:55.420905Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-24T20:32:55.420956Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700010d588] Connect to grpc://localhost:7343 2025-06-24T20:32:55.422356Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700010d588] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-24T20:32:55.446101Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700010d588] Status 14 Service Unavailable 2025-06-24T20:32:55.446289Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-24T20:32:55.446309Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-06-24T20:32:55.446643Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:55.446750Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-24T20:32:55.447128Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700010d588] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-24T20:32:55.454225Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700010d588] Status 14 Service Unavailable 2025-06-24T20:32:55.455379Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-24T20:32:55.455417Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-06-24T20:32:55.455447Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' >> Viewer::TabletMerging >> Cdc::InitialScan_WithTopicSchemeTx [GOOD] >> Cdc::InitialScan_TopicAutoPartitioning >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad [GOOD] >> Viewer::PDiskMerging >> Viewer::LevenshteinDistance [GOOD] >> Viewer::JsonStorageListingV2 >> Viewer::FuzzySearcherLimit3OutOf4 [GOOD] >> Viewer::FuzzySearcherLimit4OutOf4 [GOOD] >> Viewer::FuzzySearcherLongWord [GOOD] >> Viewer::FuzzySearcherPriority [GOOD] >> Viewer::JsonAutocompleteColumns >> Viewer::SelectStringWithNoBase64Encoding >> TTicketParserTest::AuthorizationWithUserAccount2 [GOOD] >> TTicketParserTest::BulkAuthorizationModify >> Viewer::JsonAutocompleteEmpty >> TestDataErasure::Run3CyclesForTopics [GOOD] >> Viewer::PDiskMerging [GOOD] >> Viewer::SelectStringWithBase64Encoding ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::AuthorizationModify [GOOD] Test command err: 2025-06-24T20:32:34.073465Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616520176195161:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:34.074138Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004694/r3tmp/tmpVPukwh/pdisk_1.dat 2025-06-24T20:32:34.919459Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:34.949955Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616520176195130:2079] 1750797154049949 != 1750797154049952 2025-06-24T20:32:34.984647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:34.984762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:34.990195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25096, node 1 2025-06-24T20:32:35.093941Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:35.214679Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:35.214712Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:35.214719Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:35.214830Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7647 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:35.823557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:35.848108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:35.852892Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:35.852940Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:35.852948Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:35.853371Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-24T20:32:35.853467Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010e88] Connect to grpc://localhost:21357 2025-06-24T20:32:35.857861Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010e88] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-24T20:32:35.891428Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000010e88] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:35.891825Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-24T20:32:35.893119Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000011588] Connect to grpc://localhost:29686 2025-06-24T20:32:35.893975Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000011588] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-24T20:32:35.956201Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000011588] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-24T20:32:35.959565Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-24T20:32:38.755904Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616536568642470:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:38.756194Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004694/r3tmp/tmp8eiVUq/pdisk_1.dat 2025-06-24T20:32:39.149817Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:39.149908Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:39.167432Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616536568642266:2079] 1750797158703809 != 1750797158703812 2025-06-24T20:32:39.178724Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:39.182724Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25330, node 2 2025-06-24T20:32:39.363495Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:39.363521Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:39.363530Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:39.363650Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15633 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:32:39.730156Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:39.776705Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:39.797319Z node 2 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (8E120919): Token is not supported 2025-06-24T20:32:43.715111Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519616558675212403:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:43.715208Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004694/r3tmp/tmpTJyRQS/pdisk_1.dat 2025-06-24T20:32:43.953052Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:43.953145Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:43.958274Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27449, node 3 2025-06-24T20:32:44.004330Z node 3 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T20:32:44.130112Z node 3 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T20:32:44.188817Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:44.271900Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:44.271924Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:44.271933Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:44.272077Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:32:44.736057Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1250 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Vers ... (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:51.158156Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000cec88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "XXXXXXXX" type: "ydb.database" } resource_path { id: "XXXXXXXX" type: "resource-manager.folder" } } 2025-06-24T20:32:51.160804Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000cec88] Status 16 Access Denied 2025-06-24T20:32:51.166968Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.read now has a permanent error "Access Denied" retryable:0 2025-06-24T20:32:51.167019Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'Access Denied' 2025-06-24T20:32:51.171518Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:51.171570Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:51.171580Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:51.171616Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:51.171839Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000cec88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "XXXXXXXX" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:51.174446Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000cec88] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:51.177278Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-24T20:32:51.177396Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:32:51.180726Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:51.180768Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:51.180778Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:51.180812Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:51.181067Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000cec88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "XXXXXXXX" type: "resource-manager.folder" } } 2025-06-24T20:32:51.185091Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000cec88] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:51.187419Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-24T20:32:51.187518Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:32:51.188355Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:51.188383Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:51.188393Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:51.188419Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(monitoring.view) 2025-06-24T20:32:51.188608Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000cec88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "monitoring.view" resource_path { id: "gizmo" type: "iam.gizmo" } } 2025-06-24T20:32:51.191913Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000cec88] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:51.195458Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission monitoring.view now has a valid subject "user1@as" 2025-06-24T20:32:51.195592Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004694/r3tmp/tmpsDEoWw/pdisk_1.dat 2025-06-24T20:32:55.441577Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:32:55.518170Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:55.521674Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519616609179865764:2079] 1750797175331770 != 1750797175331773 2025-06-24T20:32:55.534628Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:55.534744Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:55.538279Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13873, node 5 2025-06-24T20:32:55.621111Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:55.621135Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:55.621145Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:55.622649Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9149 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:55.946759Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:55.954254Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:32:55.956447Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:55.956627Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:55.956645Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:55.956686Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:55.956771Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700011c008] Connect to grpc://localhost:7631 2025-06-24T20:32:55.957723Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700011c008] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:55.968184Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700011c008] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:55.968482Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-24T20:32:55.968575Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:32:55.969828Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:55.969865Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:55.969876Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:55.969905Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:55.969945Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-24T20:32:55.970121Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700011c008] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:55.970651Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700011c008] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:55.972832Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700011c008] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:55.974426Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700011c008] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:55.974617Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-24T20:32:55.974666Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-24T20:32:55.974769Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::Run3CyclesForTopics [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:32:53.342597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:32:53.342688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:53.342726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:32:53.342762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:32:53.342818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:32:53.342857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:32:53.342931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:53.343015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:32:53.347894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:32:53.348314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:32:53.444685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:32:53.444754Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:53.466112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:32:53.466553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:32:53.466741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:32:53.474884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:32:53.475054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:32:53.475771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:53.476105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:32:53.479351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:53.479559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:32:53.480704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:53.480767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:53.481036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:32:53.481084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:32:53.481144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:32:53.481231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:32:53.488254Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:32:53.642193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:32:53.642420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:53.642627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:32:53.642677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:32:53.642955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:32:53.643121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:53.645625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:53.645888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:32:53.646133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:53.646188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:32:53.646229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:32:53.646260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:32:53.648360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:53.648430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:32:53.648472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:32:53.650371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:53.650414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:53.650458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:53.650512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:32:53.654350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:32:53.656317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:32:53.656518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:32:53.657443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:53.657563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:32:53.657617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:53.657900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:32:53.657951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:53.658113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:32:53.658190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:32:53.660433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:53.660484Z node 1 :FLAT_TX_SCHEMESHARD ... 7: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-24T20:32:59.876236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T20:32:59.876271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T20:32:59.951223Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:59.951318Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:32:59.951431Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:460:2410], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:59.951471Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:32:59.951701Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:638:2552], Recipient [1:460:2410]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409546 2025-06-24T20:32:59.951741Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:32:59.951864Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:32:59.952073Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 2 took 176us result status StatusSuccess 2025-06-24T20:32:59.952628Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409550 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409549 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409550 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:33:00.038349Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:886:2759]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:00.038445Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:00.038610Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:886:2759], Recipient [1:886:2759]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:00.038655Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:00.038818Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:1065:2900], Recipient [1:886:2759]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409551 2025-06-24T20:33:00.038850Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:33:00.038982Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-24T20:33:00.039218Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409551 describe pathId 2 took 192us result status StatusSuccess 2025-06-24T20:33:00.040337Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409551 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409555 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409554 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409555 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409552 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409553 SchemeShard: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-24T20:33:00.393171Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:00.393254Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:00.393373Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:294:2276], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:00.393409Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:00.404221Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:00.404314Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:00.404356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-24T20:33:00.404727Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:297:2278], Recipient [1:294:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-06-24T20:33:00.404784Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:33:00.404949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:33:00.405031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:33:00.405086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-24T20:33:00.405144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.979000s, Timestamp# 1970-01-01T00:00:11.065000Z 2025-06-24T20:33:00.405190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-06-24T20:33:00.412125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T20:33:00.412909Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:1540:3326], Recipient [1:294:2276]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:33:00.412990Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:33:00.413039Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:33:00.413310Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125519, Sender [1:276:2265], Recipient [1:294:2276]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-24T20:33:00.413359Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5149: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-24T20:33:00.413404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7830: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad [GOOD] Test command err: 2025-06-24T20:32:33.956167Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616518045862978:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:33.956202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004699/r3tmp/tmpuinkLi/pdisk_1.dat 2025-06-24T20:32:34.456219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:34.456872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:34.466036Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:34.469660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:34.470369Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616518045862762:2079] 1750797153942114 != 1750797153942117 TServer::EnableGrpc on GrpcPort 21532, node 1 2025-06-24T20:32:34.596338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:34.596377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:34.596387Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:34.596565Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21739 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T20:32:34.955502Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:34.962816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:34.979053Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 8C7CBC72D5D7D26C910F1DC0D4C2468803EB23E1B13CABC278B22A27F2301D6F () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-24T20:32:37.862998Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616532158159930:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:37.863256Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004699/r3tmp/tmp1kZpy8/pdisk_1.dat 2025-06-24T20:32:38.152205Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11321, node 2 2025-06-24T20:32:38.193811Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:38.193907Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:38.223817Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:38.367998Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:38.368023Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:38.368030Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:38.368163Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:32:38.837318Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7830 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:39.146247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:39.163482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:39.171953Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 34F671267A7D4322169B7ECB86FED5B03D9BBACE510027B18D242D7E08F4F448 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-24T20:32:44.027093Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519616565085420513:2237];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004699/r3tmp/tmpr2gR79/pdisk_1.dat 2025-06-24T20:32:44.168044Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:32:44.367185Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:44.371405Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519616560790453000:2079] 1750797163996584 != 1750797163996587 2025-06-24T20:32:44.393423Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:44.393518Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:44.400225Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11585, node 3 2025-06-24T20:32:44.600178Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:44.600203Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:44.600212Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:44.600346Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:32:45.027465Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19914 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:45.253480Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:45.283623Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:45.286642Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket A8ABD181C8CB174DFE8E36021EE14161B8FD4D5FE06C7CF228D399B5810552FF () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-24T20:32:45.287315Z node 3 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket A8ABD181C8CB174DFE8E36021EE14161B8FD4D5FE06C7CF228D399B5810552FF: Cannot create token from certificate. Client certificate failed verification 2025-06-24T20:32:50.433093Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519616587368397790:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:50.433175Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004699/r3tmp/tmpjZ0TCw/pdisk_1.dat 2025-06-24T20:32:50.888252Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:50.888373Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:50.889336Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:50.954066Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15750, node 4 2025-06-24T20:32:51.158068Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:51.158111Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:51.158122Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:51.158304Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:32:51.467413Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17897 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:52.141819Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:52.155850Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:32:52.257934Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket BA9D8545532F7FD775E46C066EE41C896562D50A36D5AE144943407D8ED3801F () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-24T20:32:56.515273Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519616615484950846:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:56.515381Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004699/r3tmp/tmpeG0q8f/pdisk_1.dat 2025-06-24T20:32:56.719051Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:56.719172Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:56.720011Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:56.725553Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519616615484950826:2079] 1750797176511827 != 1750797176511830 2025-06-24T20:32:56.743821Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20437, node 5 2025-06-24T20:32:56.790992Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:56.791027Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:56.791037Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:56.791244Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32417 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:57.073587Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:57.083773Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket C0DDBD04F1AFC4059FA826347FB6D8FB67C09F2E7426A37A859609D9F63D223C () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-24T20:32:57.084383Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket C0DDBD04F1AFC4059FA826347FB6D8FB67C09F2E7426A37A859609D9F63D223C: Cannot create token from certificate. Client certificate failed verification >> TTicketParserTest::BulkAuthorizationUnavailable [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount [GOOD] >> TTicketParserTest::AuthorizationUnavailable |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |89.0%| [LD] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut >> TestDataErasure::DataErasureWithSplit [GOOD] >> TestDataErasure::DataErasureWithMerge >> Viewer::JsonAutocompleteStartOfDatabaseName >> TestDataErasure::Run3CyclesForTables [GOOD] >> TTicketParserTest::AuthenticationRetryError [GOOD] >> TTicketParserTest::AuthenticationRetryErrorImmediately >> TestDataErasure::SimpleTestForTopic >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true [GOOD] >> Viewer::Cluster10000Tablets >> TTicketParserTest::LoginRefreshGroupsGood [GOOD] >> TTicketParserTest::LoginCheckRemovedUser ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::Run3CyclesForTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:32:53.696170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:32:53.696278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:53.696322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:32:53.696364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:32:53.696432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:32:53.696475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:32:53.696546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:53.696647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:32:53.697521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:32:53.697973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:32:53.819781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:32:53.819848Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:53.851583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:32:53.852109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:32:53.852313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:32:53.865368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:32:53.865602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:32:53.866402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:53.866764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:32:53.870437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:53.870678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:32:53.871958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:53.872025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:53.872320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:32:53.872379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:32:53.872440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:32:53.872539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:32:53.880567Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:32:54.037036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:32:54.037313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:54.037532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:32:54.037581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:32:54.038041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:32:54.038206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:54.040959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:54.041216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:32:54.041465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:54.041541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:32:54.041592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:32:54.041626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:32:54.044109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:54.044191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:32:54.044235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:32:54.046430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:54.046486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:54.046542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:54.046610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:32:54.050639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:32:54.053275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:32:54.053515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:32:54.054680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:54.054862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:32:54.054932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:54.055303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:32:54.055361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:54.055552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:32:54.055641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:32:54.060764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:54.060832Z node 1 :FLAT_TX_SCHEMESHARD ... 594046678944, LocalPathId: 3] in# 63 ms, next wakeup# 593.937000s, rate# 0, in queue# 0 tenants, running# 0 tenants at schemeshard 72057594046678944 2025-06-24T20:33:00.830894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:327: [RootDataErasureManager] Data erasure in tenants is completed. Send request to BS controller 2025-06-24T20:33:00.832683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:615: TTxCompleteDataErasureTenant Complete at schemeshard: 72057594046678944, NeedSendRequestToBSC# true 2025-06-24T20:33:00.832732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-24T20:33:00.832979Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:297:2278], Recipient [1:294:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 0 2025-06-24T20:33:00.833022Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:33:00.833052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:33:00.833125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:33:00.833161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-06-24T20:33:00.833231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T20:33:00.833283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T20:33:01.345129Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.345215Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.345289Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.345318Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.345371Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.345399Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.345455Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:838:2718], Recipient [1:838:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.345486Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.345562Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:294:2276], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.345603Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.345656Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:460:2410], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.345682Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.376772Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:01.376868Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:01.376913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-24T20:33:01.377157Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:297:2278], Recipient [1:294:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 5000 2025-06-24T20:33:01.377200Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:33:01.377237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:33:01.377323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:33:01.377377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-24T20:33:01.377445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T20:33:01.377502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T20:33:01.809391Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.809496Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.809599Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.809633Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.809703Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.809732Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:01.809800Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:294:2276], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.809836Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.810300Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:460:2410], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.810347Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.810419Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:838:2718], Recipient [1:838:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.810448Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:01.842357Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:01.842441Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:01.842481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-24T20:33:01.842732Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:297:2278], Recipient [1:294:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-06-24T20:33:01.842771Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:33:01.842806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:33:01.842884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:33:01.842919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-24T20:33:01.842971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.936000s, Timestamp# 1970-01-01T00:00:11.108000Z 2025-06-24T20:33:01.843011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-06-24T20:33:01.848318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T20:33:01.848958Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:3574:4928], Recipient [1:294:2276]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:33:01.849021Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:33:01.849063Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:33:01.849245Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125519, Sender [1:276:2265], Recipient [1:294:2276]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-24T20:33:01.849280Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5149: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-24T20:33:01.849316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7830: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TestDataErasure::SimpleTestForAllSupportedObjects ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::BulkAuthorizationUnavailable [GOOD] Test command err: 2025-06-24T20:32:37.377649Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616532225620989:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:37.377736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00467e/r3tmp/tmpt3bnvF/pdisk_1.dat 2025-06-24T20:32:38.111027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:38.119337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:38.124858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:38.144916Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:38.147339Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616532225620782:2079] 1750797157329938 != 1750797157329941 TServer::EnableGrpc on GrpcPort 16957, node 1 2025-06-24T20:32:38.381496Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:38.422067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:38.422093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:38.422112Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:38.422259Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6533 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:39.094608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:39.230925Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T20:32:39.231312Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:39.231347Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:39.231902Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (5DAB89DE) () has now permanent error message 'Token is not in correct format' 2025-06-24T20:32:39.231921Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Token is not in correct format 2025-06-24T20:32:39.231950Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (5DAB89DE): Token is not in correct format 2025-06-24T20:32:42.636470Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616553710824566:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:42.636538Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00467e/r3tmp/tmpoCbHn8/pdisk_1.dat 2025-06-24T20:32:43.015455Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:43.017303Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616553710824548:2079] 1750797162635479 != 1750797162635482 2025-06-24T20:32:43.028678Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:43.028772Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:43.033331Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9309, node 2 2025-06-24T20:32:43.227744Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:43.227768Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:43.227775Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:43.227883Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2292 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:43.623050Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:43.651718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:43.664830Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:43.665091Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:43.665110Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:43.665119Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:43.665238Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-24T20:32:43.665320Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700001bd88] Connect to grpc://localhost:9971 2025-06-24T20:32:43.674273Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700001bd88] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-24T20:32:43.689440Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700001bd88] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } } } 2025-06-24T20:32:43.689760Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-24T20:32:43.689873Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:32:43.695513Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:43.695553Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:43.695563Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:43.695646Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-24T20:32:43.695897Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700001bd88] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-24T20:32:43.707915Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700001bd88] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } } } 2025-06-24T20:32:43.711285Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.write access denie ... 4046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:53.909741Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:53.915542Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:32:53.921653Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:53.921685Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:53.921695Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:53.921823Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read somewhere.sleep something.list something.write something.eat) 2025-06-24T20:32:53.921869Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700007a908] Connect to grpc://localhost:26818 2025-06-24T20:32:53.923071Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007a908] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "somewhere.sleep" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.list" ...(truncated) } 2025-06-24T20:32:53.947283Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700007a908] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } items { permission: "somewhere.sleep" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } items { permission: "something.list" r...(truncated) } 2025-06-24T20:32:53.947740Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.read access denied for subject "user1@as" 2025-06-24T20:32:53.947763Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission somewhere.sleep access denied for subject "user1@as" 2025-06-24T20:32:53.947775Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.list access denied for subject "user1@as" 2025-06-24T20:32:53.947790Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.eat access denied for subject "user1@as" 2025-06-24T20:32:53.947807Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-24T20:32:53.947966Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700007ac88] Connect to grpc://localhost:62693 2025-06-24T20:32:53.948758Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007ac88] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-24T20:32:53.963482Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700007ac88] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-24T20:32:53.963967Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-24T20:32:57.609572Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519616618244096933:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:57.609630Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00467e/r3tmp/tmp9Vb1Cd/pdisk_1.dat 2025-06-24T20:32:57.755265Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:57.759305Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519616618244096913:2079] 1750797177608927 != 1750797177608930 2025-06-24T20:32:57.772920Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:57.773044Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:57.775340Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5773, node 5 2025-06-24T20:32:57.835571Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:57.835596Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:57.835606Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:57.835748Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31188 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:58.202050Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:58.207983Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:32:58.210439Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:58.210479Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:58.210490Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:58.210596Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-24T20:32:58.210668Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700009ea88] Connect to grpc://localhost:22761 2025-06-24T20:32:58.212078Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700009ea88] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-24T20:32:58.219852Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700009ea88] Status 14 Service Unavailable 2025-06-24T20:32:58.220030Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-24T20:32:58.220064Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-06-24T20:32:58.220105Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:58.220229Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-24T20:32:58.220546Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700009ea88] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-24T20:32:58.226565Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700009ea88] Status 1 CANCELLED 2025-06-24T20:32:58.227126Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" retryable: 1 2025-06-24T20:32:58.227172Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "CANCELLED" retryable: 1 2025-06-24T20:32:58.227211Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' >> Viewer::TabletMergingPacked >> TSchemeShardCheckProposeSize::CopyTables [GOOD] >> TSchemeShardDecimalTypesInTables::Parameterless |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:32:31.984149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:32:31.984240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:31.984281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:32:31.984326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:32:31.984382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:32:31.984412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:32:31.984476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:31.984566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:32:31.985414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:32:31.985809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:32:32.086261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:32:32.086320Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:32.105614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:32:32.106152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:32:32.106403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:32:32.116617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:32:32.116832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:32:32.117660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.118003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:32:32.121492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:32.121685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:32:32.123088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:32.123191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:32.123435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:32:32.123486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:32:32.123555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:32:32.123673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.131688Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:32:32.289319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:32:32.289585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.289864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:32:32.289928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:32:32.290225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:32:32.290371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:32.293189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.293419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:32:32.293809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.293876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:32:32.293953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:32:32.294002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:32:32.296455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.296524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:32:32.296581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:32:32.298990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.299066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.299120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.299216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:32:32.303957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:32:32.306419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:32:32.306616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:32:32.307875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.308048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:32:32.308107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.308432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:32:32.308497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.308675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:32:32.308784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:32:32.311325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:32.311383Z node 1 :FLAT_TX_SCHEMESHARD ... "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 } ChildrenExist: true } Children { Name: "DirB" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "Table2" PathId: 32 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 29 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:33:02.109326Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:33:02.109848Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" took 556us result status StatusSuccess 2025-06-24T20:33:02.110561Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" PathDescription { Self { Name: "Table2" PathId: 32 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 32 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:33:02.112329Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:33:02.112781Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" took 471us result status StatusSuccess 2025-06-24T20:33:02.113554Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" PathDescription { Self { Name: "DirB" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "Table3" PathId: 33 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 30 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 30 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:33:02.115061Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:33:02.115579Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" took 534us result status StatusSuccess 2025-06-24T20:33:02.116289Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" PathDescription { Self { Name: "Table3" PathId: 33 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 30 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 33 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> ResultFormatter::Utf8WithQuotes [GOOD] >> ResultFormatter::VariantStruct [GOOD] >> KqpPg::InsertNoTargetColumns_Alter+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter-useSink |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantStruct [GOOD] >> Viewer::TabletMergingPacked [GOOD] >> Viewer::VDiskMerging |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TSchemeShardDecimalTypesInTables::Parameterless [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-false >> TTicketParserTest::NebiusAuthorizationModify [GOOD] >> TMonitoringTests::ValidActorId >> TMonitoringTests::ValidActorId [GOOD] |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> VDiskBalancing::TestRandom_Block42 [GOOD] >> TTicketParserTest::BulkAuthorizationModify [GOOD] |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::ValidActorId [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-false [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAuthorizationModify [GOOD] Test command err: 2025-06-24T20:32:33.158655Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616515652140453:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:33.168885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046a5/r3tmp/tmpBwnupJ/pdisk_1.dat 2025-06-24T20:32:33.551492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:33.551588Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:33.557373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:33.579381Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616515652140388:2079] 1750797153112650 != 1750797153112653 2025-06-24T20:32:33.593187Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5690, node 1 2025-06-24T20:32:33.779824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:33.779851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:33.779858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:33.779981Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25928 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T20:32:34.171186Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:34.238300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:34.267451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:34.272601Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:34.272640Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:34.272647Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:34.273140Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-24T20:32:34.273215Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010408] Connect to grpc://localhost:9345 2025-06-24T20:32:34.275394Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-06-24T20:32:34.289225Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010408] Status 14 Service Unavailable 2025-06-24T20:32:34.291131Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:34.291172Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-24T20:32:34.291338Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-24T20:32:34.294772Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010408] Status 1 CANCELLED 2025-06-24T20:32:34.296100Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' 2025-06-24T20:32:37.008985Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616527923006098:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:37.010610Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046a5/r3tmp/tmpGN1Jk2/pdisk_1.dat 2025-06-24T20:32:37.172398Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:37.172460Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:37.172629Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:37.174242Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616527923005905:2079] 1750797156908837 != 1750797156908840 2025-06-24T20:32:37.184635Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12833, node 2 2025-06-24T20:32:37.319849Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:37.319873Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:37.319881Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:37.320009Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:37.669673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:37.675403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:37.679360Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:37.679391Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:37.679399Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:37.679467Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-24T20:32:37.679549Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000b2908] Connect to grpc://localhost:18779 2025-06-24T20:32:37.680856Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000b2908] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-24T20:32:37.695311Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000b2908] Status 14 Service Unavailable 2025-06-24T20:32:37.695474Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-24T20:32:37.695514Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:37.695590Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-24T20:32:37.695875Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000b2908] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-24T20:32:37.700739Z node 2 :GRPC_CLIENT DEBUG: grpc_service_c ... nt { user_account { id: "user1" } } } } 0: "OK" 2025-06-24T20:32:56.887879Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:56.887901Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:56.887911Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:56.887966Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-24T20:32:56.888203Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000015808] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "XXXXXXXX" } } iam_token: "**** (8E120919)" } } } 2025-06-24T20:32:56.891695Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000015808] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-24T20:32:56.891990Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:32:56.894081Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:56.894104Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:56.894114Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:56.894181Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-24T20:32:56.894446Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000015808] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "XXXXXXXX" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "XXXXXXXX" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "monitoring.view" } container_id: "folder" iam_token: "user1" } } NebiusAccessService::Authorize response 2025-06-24T20:32:56.896885Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000015808] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-24T20:32:56.897154Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:32:56.897886Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:56.897908Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:56.897915Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:56.897954Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( monitoring.view) 2025-06-24T20:32:56.898102Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000015808] Request AuthorizeRequest { checks { key: 0 value { permission { name: "monitoring.view" } container_id: "folder" iam_token: "**** (8E120919)" } } } 2025-06-24T20:32:56.900097Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000015808] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-24T20:32:56.901130Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:33:00.275073Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519616632676427841:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:00.275177Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046a5/r3tmp/tmpcRWmhP/pdisk_1.dat 2025-06-24T20:33:00.487798Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:00.488968Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:00.489047Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:00.489191Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519616632676427823:2079] 1750797180273772 != 1750797180273775 2025-06-24T20:33:00.505366Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13682, node 5 2025-06-24T20:33:00.575652Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:00.575682Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:00.575695Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:00.575887Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1048 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:00.870183Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:00.877265Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:00.879340Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:33:00.879369Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:33:00.879378Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:33:00.879436Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-24T20:33:00.879472Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700004ed08] Connect to grpc://localhost:8433 2025-06-24T20:33:00.880442Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700004ed08] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-24T20:33:00.895493Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700004ed08] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-24T20:33:00.895785Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:33:00.897207Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:33:00.897233Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:33:00.897242Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:33:00.897314Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-24T20:33:00.897645Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700004ed08] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-24T20:33:00.903502Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700004ed08] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { account { user_account { id: "user1" } } } } } 2025-06-24T20:33:00.903848Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> TTicketParserTest::AuthorizationUnavailable [GOOD] >> Viewer::VDiskMerging [GOOD] >> Viewer::TenantInfo5kkTablets >> TestDataErasure::SimpleTestForTopic [GOOD] >> KqpPg::CheckPgAutoParams-useSink [GOOD] >> TTicketParserTest::AuthenticationRetryErrorImmediately [GOOD] >> IncrementalRestoreScan::Empty ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::BulkAuthorizationModify [GOOD] Test command err: 2025-06-24T20:32:33.042015Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616515769860025:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:33.042062Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046a8/r3tmp/tmpjVQrsU/pdisk_1.dat 2025-06-24T20:32:33.526585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:33.526694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:33.535836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:33.561038Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:33.563181Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616511474892503:2079] 1750797153012464 != 1750797153012467 TServer::EnableGrpc on GrpcPort 11320, node 1 2025-06-24T20:32:33.655647Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:33.655667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:33.655674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:33.655800Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26237 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:34.029865Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:34.030636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:34.056939Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-24T20:32:34.063243Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000020388] Connect to grpc://localhost:8735 2025-06-24T20:32:34.067813Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000020388] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-24T20:32:34.078670Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000020388] Status 14 Service Unavailable 2025-06-24T20:32:34.079332Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-24T20:32:34.079392Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:34.079487Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-24T20:32:34.079814Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000020388] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-24T20:32:34.081933Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000020388] Status 14 Service Unavailable 2025-06-24T20:32:34.082272Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-24T20:32:34.082303Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:35.055261Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-24T20:32:35.055415Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-24T20:32:35.055792Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000020388] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-24T20:32:35.062147Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000020388] Status 14 Service Unavailable 2025-06-24T20:32:35.063353Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-24T20:32:35.063399Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:37.061933Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-24T20:32:37.062046Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-24T20:32:37.062373Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000020388] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-24T20:32:37.064599Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000020388] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:37.065133Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-24T20:32:38.051863Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616515769860025:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:38.051944Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:32:46.907766Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616570006488376:2225];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046a8/r3tmp/tmpsfipEI/pdisk_1.dat 2025-06-24T20:32:47.030348Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:32:47.134114Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:47.155870Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:47.155951Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:47.156843Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20869, node 2 2025-06-24T20:32:47.383888Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:47.383916Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:47.383926Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:47.384080Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8572 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:32:47.895889Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:48.093400Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemesha ... rst called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:57.221046Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:57.221079Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:57.221088Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:57.221136Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:57.221202Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(somewhere.sleep) 2025-06-24T20:32:57.221226Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.list) 2025-06-24T20:32:57.221248Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-24T20:32:57.221273Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.eat) 2025-06-24T20:32:57.221330Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000117308] Connect to grpc://localhost:3140 2025-06-24T20:32:57.224907Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000117308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:57.228917Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000117308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "somewhere.sleep" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:57.229266Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000117308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.list" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:57.229482Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000117308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:57.229665Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000117308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.eat" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:57.233758Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000117308] Status 16 Access Denied 2025-06-24T20:32:57.233948Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.eat now has a permanent error "Access Denied" retryable:0 2025-06-24T20:32:57.234419Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000117308] Status 16 Access Denied 2025-06-24T20:32:57.234592Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.list now has a permanent error "Access Denied" retryable:0 2025-06-24T20:32:57.235219Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000117308] Status 16 Access Denied 2025-06-24T20:32:57.235330Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000117308] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:57.235404Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000117308] Status 16 Access Denied 2025-06-24T20:32:57.235548Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission somewhere.sleep now has a permanent error "Access Denied" retryable:0 2025-06-24T20:32:57.235618Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-24T20:32:57.235650Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.read now has a permanent error "Access Denied" retryable:0 2025-06-24T20:32:57.235678Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-24T20:32:57.237172Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000117a08] Connect to grpc://localhost:21871 2025-06-24T20:32:57.238136Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000117a08] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-24T20:32:57.247223Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000117a08] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-24T20:32:57.248009Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-24T20:33:00.741580Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519616632493772015:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:00.757286Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046a8/r3tmp/tmpUqAooG/pdisk_1.dat 2025-06-24T20:33:00.974174Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:00.974291Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:00.978175Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:00.987257Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519616632493771977:2079] 1750797180736137 != 1750797180736140 2025-06-24T20:33:01.000751Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18624, node 5 2025-06-24T20:33:01.103946Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:01.103975Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:01.103988Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:01.104146Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25993 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:01.554003Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:01.563361Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:01.565682Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:33:01.565713Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:33:01.565723Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:33:01.565815Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read) 2025-06-24T20:33:01.565867Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000135d08] Connect to grpc://localhost:32594 2025-06-24T20:33:01.567001Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000135d08] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-24T20:33:01.580319Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000135d08] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:33:01.580816Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:33:01.581605Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:33:01.581642Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:33:01.581651Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:33:01.581776Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-24T20:33:01.582086Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000135d08] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-24T20:33:01.584663Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000135d08] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:33:01.585047Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:33:01.783017Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |89.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut >> TTicketParserTest::NebiusAuthenticationRetryError [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryErrorImmediately >> IncrementalRestoreScan::ChangeSenderSimple ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleTestForTopic [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:33:02.946392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:33:02.946484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:33:02.946520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:33:02.946562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:33:02.946618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:33:02.946652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:33:02.946714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:33:02.946799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:33:02.947751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:33:02.948108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:33:03.032729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:33:03.032798Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:03.050316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:33:03.050777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:33:03.050963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:33:03.059054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:33:03.059281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:33:03.059972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:33:03.060289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:33:03.063334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:33:03.063529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:33:03.064685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:33:03.064752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:33:03.065012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:33:03.065077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:33:03.065213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:33:03.065308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:33:03.077896Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:33:03.202641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:33:03.202897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:33:03.203103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:33:03.203166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:33:03.203423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:33:03.203587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:33:03.206023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:33:03.206217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:33:03.206447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:33:03.206500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:33:03.206543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:33:03.206579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:33:03.208518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:33:03.208591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:33:03.208624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:33:03.210218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:33:03.210259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:33:03.210294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:33:03.210341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:33:03.213227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:33:03.215601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:33:03.215788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:33:03.216552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:33:03.216663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:33:03.216703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:33:03.216942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:33:03.216981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:33:03.217160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:33:03.217235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:33:03.219928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:33:03.219968Z node 1 :FLAT_TX_SCHEMESHARD ... 7: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-24T20:33:06.512707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T20:33:06.512756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T20:33:06.583544Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:06.583618Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:06.583689Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:460:2410], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:06.583722Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:06.583886Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:638:2552], Recipient [1:460:2410]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409546 2025-06-24T20:33:06.583919Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:33:06.584017Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:33:06.584224Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 2 took 160us result status StatusSuccess 2025-06-24T20:33:06.584706Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409550 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409549 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409550 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:33:06.644712Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:886:2759]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:06.644781Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:06.644911Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:886:2759], Recipient [1:886:2759]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:06.644942Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:06.645095Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:1065:2900], Recipient [1:886:2759]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409551 2025-06-24T20:33:06.645147Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:33:06.645232Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-24T20:33:06.645410Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409551 describe pathId 2 took 137us result status StatusSuccess 2025-06-24T20:33:06.645903Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409551 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409555 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409554 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409555 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409552 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409553 SchemeShard: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-24T20:33:07.011514Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:07.011594Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:07.011673Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:294:2276], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:07.011709Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:07.047520Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:07.047603Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:07.047637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-24T20:33:07.047854Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:297:2278], Recipient [1:294:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-24T20:33:07.047891Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:33:07.047929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:33:07.048003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:33:07.048039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-24T20:33:07.048113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.981000s, Timestamp# 1970-01-01T00:00:05.063000Z 2025-06-24T20:33:07.048167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-06-24T20:33:07.056073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T20:33:07.056985Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:1348:3150], Recipient [1:294:2276]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:33:07.057051Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:33:07.057101Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:33:07.057243Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125519, Sender [1:276:2265], Recipient [1:294:2276]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-24T20:33:07.057278Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5149: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-24T20:33:07.057484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7830: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-true [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-false |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::AuthorizationUnavailable [GOOD] Test command err: 2025-06-24T20:32:34.226106Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616520982254694:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:34.245756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004687/r3tmp/tmp5J54UR/pdisk_1.dat 2025-06-24T20:32:34.639003Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:34.639126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:34.655916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:34.688276Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26680, node 1 2025-06-24T20:32:34.908769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:34.908808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:34.908820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:34.908956Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10856 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:32:35.159972Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:35.278683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:35.300312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:35.305475Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:35.305635Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000011208] Connect to grpc://localhost:1816 2025-06-24T20:32:35.309295Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000011208] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:35.336757Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000011208] Status 14 Service Unavailable 2025-06-24T20:32:35.337074Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-24T20:32:35.337100Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:35.337126Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:35.337380Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000011208] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:35.339342Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000011208] Status 14 Service Unavailable 2025-06-24T20:32:35.339544Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-24T20:32:35.339573Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:36.227387Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-24T20:32:36.227461Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:36.228132Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000011208] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:36.239261Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000011208] Status 14 Service Unavailable 2025-06-24T20:32:36.242230Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-24T20:32:36.242290Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:37.229472Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-24T20:32:37.229535Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:37.230216Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000011208] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:37.234316Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000011208] Status 14 Service Unavailable 2025-06-24T20:32:37.234673Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-24T20:32:37.234703Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:39.219235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616520982254694:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:39.219363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:32:39.247193Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-24T20:32:39.247252Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:39.247756Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000011208] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:39.253277Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000011208] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:39.253459Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a valid subject "user1@as" 2025-06-24T20:32:39.253561Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-24T20:32:48.403649Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616582772117049:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:48.403750Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004687/r3tmp/tmpl5cloT/pdisk_1.dat 2025-06-24T20:32:48.612851Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:48.616210Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616582772117031:2079] 1750797168398934 != 1750797168398937 2025-06-24T20:32:48.616919Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:48.617017Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:48.630626Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8284, node 2 2025-06-24T20:32:48.759867Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:48.759891Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:48.759899Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:48.760021Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29294 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "ro ... RACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-24T20:32:58.312819Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000074e08] Connect to grpc://localhost:27191 2025-06-24T20:32:58.313806Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000074e08] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-24T20:32:58.325904Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000074e08] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-24T20:32:58.326324Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-24T20:32:58.326993Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:58.327019Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:58.327029Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:58.327071Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-24T20:32:58.327313Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000075508] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:58.329526Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000075508] Status 16 Access Denied 2025-06-24T20:32:58.329678Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "Access Denied" retryable:0 2025-06-24T20:32:58.329706Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'Access Denied' 2025-06-24T20:32:58.330340Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:32:58.330362Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:58.330368Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:32:58.330394Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:32:58.330427Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-24T20:32:58.330587Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000075508] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:58.331036Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000075508] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:32:58.332038Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000075508] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:58.332186Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-24T20:32:58.332815Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000075508] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:58.332918Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-24T20:32:58.332944Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-24T20:32:58.333100Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-24T20:33:02.043969Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519616636048689742:2228];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004687/r3tmp/tmpVjLWK2/pdisk_1.dat 2025-06-24T20:33:02.117823Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:33:02.355925Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:02.356063Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:02.356774Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:02.358463Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519616636048689550:2079] 1750797181970926 != 1750797181970929 2025-06-24T20:33:02.389072Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20706, node 5 2025-06-24T20:33:02.486509Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:02.486538Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:02.486550Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:02.486710Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25706 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:02.858939Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:02.867521Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:33:02.867561Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:33:02.867573Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:33:02.867619Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:33:02.867686Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-24T20:33:02.867747Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000029308] Connect to grpc://localhost:6404 2025-06-24T20:33:02.868707Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000029308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:33:02.872329Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000029308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:33:02.891522Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000029308] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:33:02.891789Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000029308] Status 14 Service Unavailable 2025-06-24T20:33:02.892999Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-24T20:33:02.893110Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "Service Unavailable" retryable:1 2025-06-24T20:33:02.893149Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-24T20:33:02.893186Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-24T20:33:02.893271Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-24T20:33:02.893541Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000029308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:33:02.894339Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000029308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-24T20:33:02.899307Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000029308] Status 1 CANCELLED 2025-06-24T20:33:02.899584Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000029308] Status 1 CANCELLED 2025-06-24T20:33:02.900391Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1415: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" 2025-06-24T20:33:02.900477Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "CANCELLED" retryable:1 2025-06-24T20:33:02.900511Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' 2025-06-24T20:33:02.985708Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> IncrementalRestoreScan::ChangeSenderEmpty >> Cdc::AddColumn_TopicAutoPartitioning [GOOD] >> Cdc::AddIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::AuthenticationRetryErrorImmediately [GOOD] Test command err: 2025-06-24T20:32:32.978877Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616512463365790:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:32.978936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046ab/r3tmp/tmp3Vx5TU/pdisk_1.dat 2025-06-24T20:32:33.590461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:33.590608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:33.605502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:33.664767Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:33.671381Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616512463365772:2079] 1750797152969684 != 1750797152969687 TServer::EnableGrpc on GrpcPort 5211, node 1 2025-06-24T20:32:33.799896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:33.799918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:33.799926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:33.800068Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:32:34.029362Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15964 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:34.232688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:34.251885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:34.256043Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-24T20:32:34.256144Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010408] Connect to grpc://localhost:10785 2025-06-24T20:32:34.262537Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-24T20:32:34.282824Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000010408] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:34.283311Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:32:37.333960Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616533463286900:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:37.334002Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046ab/r3tmp/tmpTxLd7t/pdisk_1.dat 2025-06-24T20:32:37.663875Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616533463286815:2079] 1750797157327987 != 1750797157327990 2025-06-24T20:32:37.665336Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:37.669356Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:37.669439Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:37.674214Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21147, node 2 2025-06-24T20:32:37.787914Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:37.787942Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:37.787949Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:37.788071Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16112 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:38.176441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:38.187882Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:38.192987Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket ApiK****alid (AB5B5EA8) asking for AccessServiceAuthentication 2025-06-24T20:32:38.193045Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000a1b88] Connect to grpc://localhost:22566 2025-06-24T20:32:38.193932Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000a1b88] Request AuthenticateRequest { api_key: "ApiK****alid (AB5B5EA8)" } 2025-06-24T20:32:38.212839Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000a1b88] Response AuthenticateResponse { subject { user_account { id: "ApiKey-value-valid" } } } 2025-06-24T20:32:38.219374Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket ApiK****alid (AB5B5EA8) () has now valid token of ApiKey-value-valid@as 2025-06-24T20:32:38.392868Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:42.233718Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519616556597865274:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:42.233779Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046ab/r3tmp/tmpP7l713/pdisk_1.dat 2025-06-24T20:32:42.688037Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:42.691395Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519616556597865254:2079] 1750797162222675 != 1750797162222678 2025-06-24T20:32:42.717528Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:42.717628Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:42.722911Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25216, node 3 2025-06-24T20:32:42.987861Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:42.987886Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:42.987894Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:42.988034Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:32:43.264587Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63104 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } Childr ... nt.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046ab/r3tmp/tmpIXYFA0/pdisk_1.dat 2025-06-24T20:32:48.861051Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:48.862455Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519616582180777188:2079] 1750797168635792 != 1750797168635795 TServer::EnableGrpc on GrpcPort 8765, node 4 2025-06-24T20:32:48.944612Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:48.944639Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:48.944648Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:48.944793Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:32:48.963733Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:48.963852Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:48.968301Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:49.476661Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:49.483919Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:32:49.487040Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-24T20:32:49.487169Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700011d508] Connect to grpc://localhost:5759 2025-06-24T20:32:49.488834Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700011d508] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-24T20:32:49.501819Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700011d508] Status 14 Service Unavailable 2025-06-24T20:32:49.502045Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:49.502103Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-24T20:32:49.502366Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700011d508] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-24T20:32:49.505296Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700011d508] Status 14 Service Unavailable 2025-06-24T20:32:49.505663Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:49.654548Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:50.666322Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-24T20:32:50.666395Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-24T20:32:50.666638Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700011d508] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-24T20:32:50.669504Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700011d508] Status 14 Service Unavailable 2025-06-24T20:32:50.674970Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:51.667400Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-24T20:32:51.667443Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-24T20:32:51.667637Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700011d508] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-24T20:32:51.675378Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700011d508] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:32:51.676010Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-24T20:32:53.639484Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519616582180777222:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:53.639598Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:02.504442Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519616640773118399:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:02.504521Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046ab/r3tmp/tmpi70kgO/pdisk_1.dat 2025-06-24T20:33:02.722759Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:02.725697Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519616640773118378:2079] 1750797182499080 != 1750797182499083 2025-06-24T20:33:02.740690Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:02.740792Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:02.745725Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12847, node 5 2025-06-24T20:33:02.827875Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:02.827915Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:02.827924Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:02.828064Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14741 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:03.214138Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:03.225280Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-24T20:33:03.225359Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000054808] Connect to grpc://localhost:28884 2025-06-24T20:33:03.226143Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000054808] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-24T20:33:03.239351Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000054808] Status 14 Service Unavailable 2025-06-24T20:33:03.239774Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-24T20:33:03.239799Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-24T20:33:03.240020Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000054808] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-24T20:33:03.243879Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000054808] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-24T20:33:03.244077Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-24T20:33:03.558172Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> Viewer::JsonAutocompleteSimilarDatabaseName [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNameWithLimit >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-false [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-true >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlag >> KqpWorkloadService::TestQueueSizeSimple ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::CheckPgAutoParams-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 18551, MsgBus: 5035 2025-06-24T20:30:38.067886Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616017004049968:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:38.068677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ef6/r3tmp/tmpigEla7/pdisk_1.dat 2025-06-24T20:30:38.387185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:38.387295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:38.390478Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616017004049754:2079] 1750797037763042 != 1750797037763045 2025-06-24T20:30:38.402054Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:38.408977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18551, node 1 2025-06-24T20:30:38.547966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:38.547984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:38.547991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:38.548088Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:30:38.831831Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5035 TClient is connected to server localhost:5035 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:39.602740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:30:39.628190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:30:42.819486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616017004049968:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:42.819583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:30:42.943386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:43.140436Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill \x62797465612030 \x62797465612030 \x62797465612031 \x62797465612031 \x62797465612032 \x62797465612032 \x62797465612033 \x62797465612033 \x62797465612034 \x62797465612034 \x62797465612035 \x62797465612035 \x62797465612036 \x62797465612036 \x62797465612037 \x62797465612037 \x62797465612038 \x62797465612038 \x62797465612039 \x62797465612039 2025-06-24T20:30:43.218914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:43.319682Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:30:43.321770Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976715664 at tablet 72075186224037889 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976715664] at 72075186224037889 while waiting for scan finish) | 2025-06-24T20:30:43.324059Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715664 at tablet 72075186224037889 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976715664] at 72075186224037889 while waiting for scan finish) | \x62797465612030 \x62797465612030 \x62797465612031 \x62797465612031 \x62797465612032 \x62797465612032 \x62797465612033 \x62797465612033 \x62797465612034 \x62797465612034 \x62797465612035 \x62797465612035 \x62797465612036 \x62797465612036 \x62797465612037 \x62797465612037 \x62797465612038 \x62797465612038 \x62797465612039 \x62797465612039 2025-06-24T20:30:43.421629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:43.528812Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {"\\x6130","\\x623130"} {"\\x6130","\\x623130"} {"\\x6131","\\x623131"} {"\\x6131","\\x623131"} {"\\x6132","\\x623132"} {"\\x6132","\\x623132"} {"\\x6133","\\x623133"} {"\\x6133","\\x623133"} {"\\x6134","\\x623134"} {"\\x6134","\\x623134"} {"\\x6135","\\x623135"} {"\\x6135","\\x623135"} {"\\x6136","\\x623136"} {"\\x6136","\\x623136"} {"\\x6137","\\x623137"} {"\\x6137","\\x623137"} {"\\x6138","\\x623138"} {"\\x6138","\\x623138"} {"\\x6139","\\x623139"} {"\\x6139","\\x623139"} 2025-06-24T20:30:43.644027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:43.765146Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:30:43.766068Z node 1 :TX_DATASHARD ERROR: read_table_scan.cpp:460: TReadTableScan: undelivered event TxId: 281474976715673 2025-06-24T20:30:43.767135Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976715673 at tablet 72075186224037891 errors: WRONG_SHARD_STATE (cannot reach sink actor) | 2025-06-24T20:30:43.768133Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715673 at tablet 72075186224037891 status: ERROR errors: WRONG_SHARD_STATE (cannot reach sink actor) | {"\\x6130","\\x623130"} {"\\x6130","\\x623130"} {"\\x6131","\\x623131"} {"\\x6131","\\x623131"} {"\\x6132","\\x623132"} {"\\x6132","\\x623132"} {"\\x6133","\\x623133"} {"\\x6133","\\x623133"} {"\\x6134","\\x623134"} {"\\x6134","\\x623134"} {"\\x6135","\\x623135"} {"\\x6135","\\x623135"} {"\\x6136","\\x623136"} {"\\x6136","\\x623136"} {"\\x6137","\\x623137"} {"\\x6137","\\x623137"} {"\\x6138","\\x623138"} {"\\x6138","\\x623138"} {"\\x6139","\\x623139"} {"\\x6139","\\x623139"} 2025-06-24T20:30:43.831845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:43.921096Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill f f t t 2025-06-24T20:30:44.050210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) f f t t 2025-06-24T20:30:44.216538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:44.314044Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {f,f} {f,f} {t,t} {t,t} 2025-06-24T20:30:44.361514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:44.427284Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {f,f} {f,f} {t,t} {t,t} 2025-06-24T20:30:44.477262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:44.534262Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 ... on: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:56.400414Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:59.769849Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[14:7519616604567875773:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:59.770000Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:01.173589Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7519616634632647449:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:01.173588Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7519616634632647457:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:01.173717Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:01.181425Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:01.203360Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [14:7519616634632647463:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:01.290417Z node 14 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [14:7519616634632647514:2342] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:01.339426Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:01.794527Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:02.800057Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:468: Get parsing result with error, self: [14:7519616638927615156:2362], owner: [14:7519616634632647414:2285], statement id: 0 2025-06-24T20:33:02.800364Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=14&id=Nzk3NzAxODQtYmEzZDUyYjctNGRmMjM4MWQtZTA1YTJiN2I=, ActorId: [14:7519616638927615154:2361], ActorState: ExecuteState, TraceId: 01jyht9st94wsbtk5y5f0f5c5s, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T20:33:03.158167Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7519616643222582479:2372], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: RemovePrefixMembers, At function: PgSelect, At tuple, At tuple, At tuple, At function: PgSetItem, At tuple
: Error: At tuple
:1:1: Error: At function: PgWhere, At lambda
:2:55: Error: At function: PgOp
:2:55: Error: Unable to find an overload for operator = with given argument type(s): (text,int4) 2025-06-24T20:33:03.158658Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=14&id=NTkzM2JjMzktNzA5ZTBmNDctYjg0Zjc1ZDktN2QxZDM0ZQ==, ActorId: [14:7519616643222582476:2370], ActorState: ExecuteState, TraceId: 01jyht9t48egdq3ep19qdcvv49, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T20:33:03.217993Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7519616643222582493:2379], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: RemovePrefixMembers, At function: PgSelect, At tuple, At tuple, At tuple, At function: PgSetItem, At tuple
: Error: At tuple
:1:1: Error: At function: PgWhere, At lambda
:2:57: Error: At function: PgAnd
:2:67: Error: At function: PgOp
:2:67: Error: Unable to find an overload for operator = with given argument type(s): (text,int4) 2025-06-24T20:33:03.219556Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=14&id=NmUzM2Q1YTUtZGQ5Mjc3NGQtOTkxYmRlZWMtZWI4MzU3NGE=, ActorId: [14:7519616643222582488:2376], ActorState: ExecuteState, TraceId: 01jyht9t65bqb2m5wvcn9df951, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T20:33:03.244674Z node 14 :KQP_EXECUTER CRIT: kqp_literal_executer.cpp:112: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyht9t806h4rqjyspvzk9kr9, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZTQ3NDQ5ZTQtNjc2OWEwNjAtNTVjZGY1NTQtYzlhNjgzZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, unexpected exception caught: (NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: "a" 2025-06-24T20:33:03.245027Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=ZTQ3NDQ5ZTQtNjc2OWEwNjAtNTVjZGY1NTQtYzlhNjgzZmY=, ActorId: [14:7519616643222582502:2383], ActorState: ExecuteState, TraceId: 01jyht9t806h4rqjyspvzk9kr9, Create QueryResponse for error on request, msg: 2025-06-24T20:33:03.318289Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:03.479512Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:03.604887Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7519616643222582670:2408], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: values have 3 columns, INSERT INTO expects: 2 2025-06-24T20:33:03.611548Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=14&id=ZDFiMDg4NjAtMTkxODhhYTUtOGMzYzEzNDgtNGQxYTIwZjg=, ActorId: [14:7519616643222582667:2406], ActorState: ExecuteState, TraceId: 01jyht9tjk6kycmwgwrp2qq4t7, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T20:33:03.767078Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7519616643222582682:2414], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Failed to convert type: List> to List>
:1:1: Error: Failed to convert 'id': pgunknown to Optional
:1:1: Error: Row type mismatch for table: db.[/Root/PgTable2] 2025-06-24T20:33:03.767516Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=14&id=YzQyOTJlYmQtMWRiMWNhN2UtZTdjYmE4NWYtZjQ1ZmQ1ZTA=, ActorId: [14:7519616643222582679:2412], ActorState: ExecuteState, TraceId: 01jyht9tmk97k7ew27dt7998c1, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T20:33:04.412214Z node 14 :KQP_EXECUTER CRIT: kqp_literal_executer.cpp:112: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyht9tsv3szmhfhqf0bj8qhh, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZTZjZmM0MWMtNzY4NjE4M2UtNjdjYjNlOWEtNzhlOTFhMTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, unexpected exception caught: (NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: "a" 2025-06-24T20:33:04.412830Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=ZTZjZmM0MWMtNzY4NjE4M2UtNjdjYjNlOWEtNzhlOTFhMTM=, ActorId: [14:7519616643222582695:2420], ActorState: ExecuteState, TraceId: 01jyht9tsv3szmhfhqf0bj8qhh, Create QueryResponse for error on request, msg: 2025-06-24T20:33:04.473279Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:05.293093Z node 14 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 14, TabletId: 72075186224037892 not found 2025-06-24T20:33:05.365264Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Block42 [GOOD] Test command err: RandomSeed# 12862100260901856477 Step = 0 SEND TEvPut with key [1:1:0:0:0:585447:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:585447:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 1 SEND TEvPut with key [1:1:1:0:0:37868:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:37868:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 2 SEND TEvPut with key [1:1:2:0:0:619381:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:619381:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 3 SEND TEvPut with key [1:1:3:0:0:725585:0] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:725585:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 4 SEND TEvPut with key [1:1:4:0:0:2934723:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:2934723:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 4 2025-06-24T20:28:16.145350Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 5 SEND TEvPut with key [1:1:5:0:0:502135:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:502135:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} Step = 6 SEND TEvPut with key [1:1:6:0:0:3044947:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:3044947:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} Stop node 7 2025-06-24T20:28:16.361718Z 1 00h01m10.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 7 SEND TEvPut with key [1:1:7:0:0:582354:0] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:582354:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 8 SEND TEvPut with key [1:1:8:0:0:1478820:0] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:1478820:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 9 SEND TEvPut with key [1:1:9:0:0:1360774:0] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:1360774:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Start node 4 Step = 10 SEND TEvPut with key [1:1:10:0:0:1727870:0] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:1727870:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 11 SEND TEvPut with key [1:1:11:0:0:1883457:0] TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:1883457:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 12 SEND TEvPut with key [1:1:12:0:0:568368:0] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:568368:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 13 SEND TEvPut with key [1:1:13:0:0:896600:0] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:896600:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 14 SEND TEvPut with key [1:1:14:0:0:179270:0] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:179270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 15 SEND TEvPut with key [1:1:15:0:0:3026131:0] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:3026131:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 16 SEND TEvPut with key [1:1:16:0:0:670396:0] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:670396:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 17 SEND TEvPut with key [1:1:17:0:0:1584741:0] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:1584741:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 18 SEND TEvPut with key [1:1:18:0:0:2384818:0] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:2384818:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 19 SEND TEvPut with key [1:1:19:0:0:2867010:0] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:2867010:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 20 SEND TEvPut with key [1:1:20:0:0:2911789:0] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:2911789:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 21 SEND TEvPut with key [1:1:21:0:0:2463622:0] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:2463622:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 22 SEND TEvPut with key [1:1:22:0:0:322338:0] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:322338:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 23 SEND TEvPut with key [1:1:23:0:0:2119770:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:2119770:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 24 SEND TEvPut with key [1:1:24:0:0:56036:0] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:56036:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 25 SEND TEvPut with key [1:1:25:0:0:2648607:0] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:2648607:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Stop node 0 2025-06-24T20:28:17.828549Z 3 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-06-24T20:28:17.828762Z 6 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-06-24T20:28:17.828877Z 5 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:7670:16] ServerId# [1:7679:1093] TabletId# 72057594037932033 PipeClientId# [5:7670:16] 2025-06-24T20:28:17.829000Z 4 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-06-24T20:28:17.829121Z 2 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-06-24T20:28:17.829231Z 7 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] Step = 26 SEND TEvPut with key [1:1:26:0:0:539431:0] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:539431:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 27 SEND TEvPut with key [1:1:27:0:0:148482:0] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:148482:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 28 SEND TEvPut with key [1:1:28:0:0:2673563:0] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:2673563:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 29 SEND TEvPut with key [1:1:29:0:0:265170:0] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:265170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 30 SEND TEvPut with key [1:1:30:0:0:2398732:0] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:2398732:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Compact vdisk 2 Step = 31 SEND TEvPut with key [1:1:31:0:0:2302132:0] TEvPutResult: TEvPutResult {Id# [1:1:31:0:0:2302132:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 32 SEND TEvPut with key [1:1:32:0:0:3112269:0] TEvPutResult: TEvPutResult {Id# [1:1:32:0:0:3112269:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 33 SEND TEvPut with key [1:1:33:0:0:883758:0] TEvPutResult: TEvPutResult {Id# [1:1:33:0:0:883758:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 34 SEND TEvPut with key [1:1:34:0:0:1212958:0] TEvPutResult: TEvPutResult {Id# [1:1:34:0:0:1212958:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 35 SEND TEvPut with key [1:1:35:0:0:3026131:0] TEvPutResult: TEvPutResult {Id# [1:1:35:0:0:3026131:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 36 SEND TEvPut with key [1:1:36:0:0:139148:0] TEvPutResult: TEvPutResult {Id# [1:1:36:0:0:139148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 37 SEND TEvPut with key [1:1:37:0:0:200198:0] TEvPutResult: TEvPutResult {Id# [1:1:37:0:0:200198:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 38 SEND TEvPut with key [1:1:38:0:0:1252178:0] TEvPutResult: TEvPutResult {Id# [1:1:38:0:0:1252178:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 39 SEND TEvPut with key [1:1:39:0:0:1897783:0] TEvPutResult: TEvPutResult {Id# [1:1:39:0:0:1897783:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 40 SEND TEvPut with key [1:1:40:0:0:1486678:0] TEvPutResult: TEvPutResult {Id# [1:1:40:0:0:1486678:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 41 SEND TEvPut with key [1:1:41:0:0:1285964:0] TEvPutResult: TEvPutResult {Id# [1:1:41:0:0:1285964:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 42 SEND TEvPut with key [1:1:42:0:0:1221731:0] TEvPutResult: TEvPutResult {Id# [1:1:42:0:0:1221731:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 43 SEND TEvPut with key [1:1:43:0:0:1613844:0] TEvPutResult: TEvPutResult {Id# [1:1:43:0:0:1613844:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 44 SEND TEvPut with key [1:1:44:0:0:2582908:0] TEvPutResult: TEvPutResult {Id# [1:1:44:0:0:2582908:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 45 SEND TEvPut with key [1:1:45:0:0:1703743:0] TEvPutResult: TEvPutResult {Id# [1:1:45:0:0:1703743:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 46 SEND TEvPut with key [1:1:46:0:0:1362981:0] TEvPutResult: TEvPutResult {Id# [1:1:46:0:0:1362981:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 47 SEND TEvPut with key [1:1:47:0:0:1469807:0] TEvPutResult: TEvPutResult {Id# [1:1:47:0:0:1469807:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 48 SEND TEvPut with key [1:1:48:0:0:2832565:0] TEvPutResult: TEvPutResult {Id# [1:1:48:0:0:2832565:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 49 SEND TEvPut with key [1:1:49:0:0:1960611:0] TEvPutResult: TEvPutResult {Id# [1:1:49:0:0:1960611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 50 SEND TEvPut with key [1:1:50:0:0:1164230:0] TEvPutResult: TEvPutResult {Id# [1:1:50:0:0:1164230:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 51 SEND TEvPut with key [1:1:51:0:0:836900:0] TEvPutResult: TEvPutResult {Id# [1:1:51:0:0:836900:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 52 SEND TEvPut with key [1:1:52:0:0:838380:0] TEvPutResult: TEvPutResult {Id# [1:1:52:0:0:838380:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 53 SEND TEvPut with key [1:1:53:0:0:1975575:0] TEvPutResult: TEvPutResult {Id# [1:1:53:0:0:1975575:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Start node 0 Step = 54 SEND TEvPut with key [1:1:54:0:0:1888556:0] TEvPutResult: TEvPutResult {Id# [1:1:54:0:0:1888556:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 55 SEND TEvPut with key [1:1:55:0:0:715063:0] TEvPutResult: TEvPutResult {Id# [1:1:55:0:0:715063:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 56 SEND TEvPut with key [1:1:56:0:0:42993:0] TEvPutResult: TEvPutResult {Id# [1:1:56:0:0:42993:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 57 SEND TEvPut with key [1:1:57:0:0:1491407:0] TEvPutResult: TEvPutResult {Id# [1:1:57:0:0:1491407:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 58 SEND TEvPut with key [1:1:58:0:0:702845:0] TEvPutResult: TEvPutResult {Id# [1:1:58:0:0:702845:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 59 SEND TEvPut with key [1:1:59:0:0:2539948:0] TEvPutResult: TEvPutResult {Id# [1:1:59:0:0:2539948:0] Statu ... ND TEvPut with key [1:1:936:0:0:2748248:0] TEvPutResult: TEvPutResult {Id# [1:1:936:0:0:2748248:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Step = 937 SEND TEvPut with key [1:1:937:0:0:112302:0] TEvPutResult: TEvPutResult {Id# [1:1:937:0:0:112302:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 938 SEND TEvPut with key [1:1:938:0:0:800417:0] TEvPutResult: TEvPutResult {Id# [1:1:938:0:0:800417:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 939 SEND TEvPut with key [1:1:939:0:0:2336442:0] TEvPutResult: TEvPutResult {Id# [1:1:939:0:0:2336442:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 940 SEND TEvPut with key [1:1:940:0:0:982070:0] TEvPutResult: TEvPutResult {Id# [1:1:940:0:0:982070:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Start node 4 Step = 941 SEND TEvPut with key [1:1:941:0:0:713632:0] TEvPutResult: TEvPutResult {Id# [1:1:941:0:0:713632:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 942 SEND TEvPut with key [1:1:942:0:0:1644191:0] TEvPutResult: TEvPutResult {Id# [1:1:942:0:0:1644191:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 943 SEND TEvPut with key [1:1:943:0:0:254634:0] TEvPutResult: TEvPutResult {Id# [1:1:943:0:0:254634:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 944 SEND TEvPut with key [1:1:944:0:0:1141270:0] TEvPutResult: TEvPutResult {Id# [1:1:944:0:0:1141270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 945 SEND TEvPut with key [1:1:945:0:0:610103:0] TEvPutResult: TEvPutResult {Id# [1:1:945:0:0:610103:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Step = 946 SEND TEvPut with key [1:1:946:0:0:24822:0] TEvPutResult: TEvPutResult {Id# [1:1:946:0:0:24822:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Compact vdisk 6 Step = 947 SEND TEvPut with key [1:1:947:0:0:100167:0] TEvPutResult: TEvPutResult {Id# [1:1:947:0:0:100167:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 948 SEND TEvPut with key [1:1:948:0:0:645630:0] TEvPutResult: TEvPutResult {Id# [1:1:948:0:0:645630:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 949 SEND TEvPut with key [1:1:949:0:0:2125890:0] TEvPutResult: TEvPutResult {Id# [1:1:949:0:0:2125890:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 950 SEND TEvPut with key [1:1:950:0:0:2544891:0] TEvPutResult: TEvPutResult {Id# [1:1:950:0:0:2544891:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 951 SEND TEvPut with key [1:1:951:0:0:647007:0] TEvPutResult: TEvPutResult {Id# [1:1:951:0:0:647007:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 952 SEND TEvPut with key [1:1:952:0:0:2031652:0] TEvPutResult: TEvPutResult {Id# [1:1:952:0:0:2031652:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 953 SEND TEvPut with key [1:1:953:0:0:2109805:0] TEvPutResult: TEvPutResult {Id# [1:1:953:0:0:2109805:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Stop node 3 2025-06-24T20:32:16.453146Z 1 00h28m30.748557s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 954 SEND TEvPut with key [1:1:954:0:0:1353403:0] TEvPutResult: TEvPutResult {Id# [1:1:954:0:0:1353403:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Stop node 4 2025-06-24T20:32:17.423263Z 1 00h28m40.751024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 955 SEND TEvPut with key [1:1:955:0:0:1286278:0] TEvPutResult: TEvPutResult {Id# [1:1:955:0:0:1286278:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Start node 3 Step = 956 SEND TEvPut with key [1:1:956:0:0:1875483:0] TEvPutResult: TEvPutResult {Id# [1:1:956:0:0:1875483:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 957 SEND TEvPut with key [1:1:957:0:0:1021388:0] TEvPutResult: TEvPutResult {Id# [1:1:957:0:0:1021388:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Start node 4 Step = 958 SEND TEvPut with key [1:1:958:0:0:860806:0] TEvPutResult: TEvPutResult {Id# [1:1:958:0:0:860806:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 959 SEND TEvPut with key [1:1:959:0:0:385917:0] TEvPutResult: TEvPutResult {Id# [1:1:959:0:0:385917:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 960 SEND TEvPut with key [1:1:960:0:0:200998:0] TEvPutResult: TEvPutResult {Id# [1:1:960:0:0:200998:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 961 SEND TEvPut with key [1:1:961:0:0:1661659:0] TEvPutResult: TEvPutResult {Id# [1:1:961:0:0:1661659:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 962 SEND TEvPut with key [1:1:962:0:0:771410:0] TEvPutResult: TEvPutResult {Id# [1:1:962:0:0:771410:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 963 SEND TEvPut with key [1:1:963:0:0:1414281:0] TEvPutResult: TEvPutResult {Id# [1:1:963:0:0:1414281:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 964 SEND TEvPut with key [1:1:964:0:0:2848837:0] TEvPutResult: TEvPutResult {Id# [1:1:964:0:0:2848837:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 965 SEND TEvPut with key [1:1:965:0:0:989600:0] TEvPutResult: TEvPutResult {Id# [1:1:965:0:0:989600:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 966 SEND TEvPut with key [1:1:966:0:0:2761296:0] TEvPutResult: TEvPutResult {Id# [1:1:966:0:0:2761296:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 967 SEND TEvPut with key [1:1:967:0:0:981163:0] TEvPutResult: TEvPutResult {Id# [1:1:967:0:0:981163:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 968 SEND TEvPut with key [1:1:968:0:0:14298:0] TEvPutResult: TEvPutResult {Id# [1:1:968:0:0:14298:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 969 SEND TEvPut with key [1:1:969:0:0:626285:0] TEvPutResult: TEvPutResult {Id# [1:1:969:0:0:626285:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 970 SEND TEvPut with key [1:1:970:0:0:334566:0] TEvPutResult: TEvPutResult {Id# [1:1:970:0:0:334566:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Stop node 7 2025-06-24T20:32:19.019677Z 1 00h29m10.752560s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 971 SEND TEvPut with key [1:1:971:0:0:972888:0] TEvPutResult: TEvPutResult {Id# [1:1:971:0:0:972888:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999609} Step = 972 SEND TEvPut with key [1:1:972:0:0:786055:0] TEvPutResult: TEvPutResult {Id# [1:1:972:0:0:786055:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999609} Step = 973 SEND TEvPut with key [1:1:973:0:0:2707502:0] TEvPutResult: TEvPutResult {Id# [1:1:973:0:0:2707502:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999609} Stop node 1 2025-06-24T20:32:19.348306Z 1 00h29m20.784272s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 974 SEND TEvPut with key [1:1:974:0:0:2660812:0] TEvPutResult: TEvPutResult {Id# [1:1:974:0:0:2660812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Start node 1 Step = 975 SEND TEvPut with key [1:1:975:0:0:3005283:0] TEvPutResult: TEvPutResult {Id# [1:1:975:0:0:3005283:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Stop node 1 2025-06-24T20:32:19.816965Z 1 00h29m40.785296s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 976 SEND TEvPut with key [1:1:976:0:0:1542748:0] TEvPutResult: TEvPutResult {Id# [1:1:976:0:0:1542748:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 977 SEND TEvPut with key [1:1:977:0:0:2837300:0] TEvPutResult: TEvPutResult {Id# [1:1:977:0:0:2837300:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 978 SEND TEvPut with key [1:1:978:0:0:481535:0] TEvPutResult: TEvPutResult {Id# [1:1:978:0:0:481535:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 979 SEND TEvPut with key [1:1:979:0:0:24668:0] TEvPutResult: TEvPutResult {Id# [1:1:979:0:0:24668:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 980 SEND TEvPut with key [1:1:980:0:0:1760402:0] TEvPutResult: TEvPutResult {Id# [1:1:980:0:0:1760402:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 981 SEND TEvPut with key [1:1:981:0:0:1711812:0] TEvPutResult: TEvPutResult {Id# [1:1:981:0:0:1711812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 982 SEND TEvPut with key [1:1:982:0:0:1422922:0] TEvPutResult: TEvPutResult {Id# [1:1:982:0:0:1422922:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 983 SEND TEvPut with key [1:1:983:0:0:2533122:0] TEvPutResult: TEvPutResult {Id# [1:1:983:0:0:2533122:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 984 SEND TEvPut with key [1:1:984:0:0:347759:0] TEvPutResult: TEvPutResult {Id# [1:1:984:0:0:347759:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 985 SEND TEvPut with key [1:1:985:0:0:1862506:0] TEvPutResult: TEvPutResult {Id# [1:1:985:0:0:1862506:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 986 SEND TEvPut with key [1:1:986:0:0:101043:0] TEvPutResult: TEvPutResult {Id# [1:1:986:0:0:101043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 987 SEND TEvPut with key [1:1:987:0:0:672278:0] TEvPutResult: TEvPutResult {Id# [1:1:987:0:0:672278:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 988 SEND TEvPut with key [1:1:988:0:0:2042425:0] TEvPutResult: TEvPutResult {Id# [1:1:988:0:0:2042425:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 989 SEND TEvPut with key [1:1:989:0:0:1201477:0] TEvPutResult: TEvPutResult {Id# [1:1:989:0:0:1201477:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 990 SEND TEvPut with key [1:1:990:0:0:1724337:0] TEvPutResult: TEvPutResult {Id# [1:1:990:0:0:1724337:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 991 SEND TEvPut with key [1:1:991:0:0:2174403:0] TEvPutResult: TEvPutResult {Id# [1:1:991:0:0:2174403:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 992 SEND TEvPut with key [1:1:992:0:0:193000:0] TEvPutResult: TEvPutResult {Id# [1:1:992:0:0:193000:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 993 SEND TEvPut with key [1:1:993:0:0:618508:0] TEvPutResult: TEvPutResult {Id# [1:1:993:0:0:618508:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 994 SEND TEvPut with key [1:1:994:0:0:2278246:0] TEvPutResult: TEvPutResult {Id# [1:1:994:0:0:2278246:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 995 SEND TEvPut with key [1:1:995:0:0:2001881:0] TEvPutResult: TEvPutResult {Id# [1:1:995:0:0:2001881:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 996 SEND TEvPut with key [1:1:996:0:0:1759634:0] TEvPutResult: TEvPutResult {Id# [1:1:996:0:0:1759634:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 997 SEND TEvPut with key [1:1:997:0:0:2469234:0] TEvPutResult: TEvPutResult {Id# [1:1:997:0:0:2469234:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 998 SEND TEvPut with key [1:1:998:0:0:1329395:0] TEvPutResult: TEvPutResult {Id# [1:1:998:0:0:1329395:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 999 SEND TEvPut with key [1:1:999:0:0:1243807:0] TEvPutResult: TEvPutResult {Id# [1:1:999:0:0:1243807:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Starting nodes Start compaction 1 Start checking >> Viewer::JsonAutocompleteColumns [GOOD] >> Viewer::JsonAutocompleteEmpty [GOOD] >> Viewer::JsonAutocompleteEndOfDatabaseName >> Cdc::InitialScan_TopicAutoPartitioning [GOOD] >> Cdc::InitialScanDebezium >> TestDataErasure::DataErasureWithMerge [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-true [GOOD] >> TSchemeShardDecimalTypesInTables::CopyTableShouldNotFailOnDisabledFeatureFlag >> KqpWorkloadServiceActors::TestPoolFetcher |89.0%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpWorkloadService::TestQueryCancelAfterPoolWithLimits >> TestDataErasure::SimpleTestForAllSupportedObjects [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterDrop >> ResourcePoolsDdl::TestPoolSwitchToUnlimitedState ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteColumns [GOOD] Test command err: 2025-06-24T20:33:07.322792Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:297:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:07.323586Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:07.323691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:07.687416Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 12540, node 1 TClient is connected to server localhost:29576 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureWithMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:67:2058] recipient: [1:59:2101] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:67:2058] recipient: [1:59:2101] Leader for TabletID 72057594046678944 is [1:71:2105] sender: [1:75:2058] recipient: [1:59:2101] 2025-06-24T20:32:54.946218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:32:54.946320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:54.946371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:32:54.946415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:32:54.946463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:32:54.946499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:32:54.946584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:54.946716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:32:54.947663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:32:54.948094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:32:55.047183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:32:55.047257Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:55.048093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:32:55.048246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:32:55.048436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:32:55.055094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:32:55.055725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:32:55.056384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:55.056696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:32:55.057768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:55.058076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:32:55.059273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:55.059344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:55.059456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:32:55.059529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:32:55.059579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:32:55.059782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:32:55.063010Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:71:2105] sender: [1:150:2058] recipient: [1:16:2063] 2025-06-24T20:32:55.229951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:32:55.230195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:55.230442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:32:55.230513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:32:55.230836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:32:55.230946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:55.231767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:55.231990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:32:55.232275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:55.232335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:32:55.232400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:32:55.232447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:32:55.233050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:55.233114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:32:55.233158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:32:55.233702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:55.233745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:55.233792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:55.233859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:32:55.238040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:32:55.238611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:32:55.238814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:32:55.239951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:55.240099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 76 RawX2: 4294969405 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:32:55.240163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:55.240482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:32:55.240549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:55.240750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:32:55.240853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:32:55.241605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:55.241666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_boa ... 1: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.627826Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.627921Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:185:2177], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.627956Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.639379Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.639466Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.639576Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:280:2239], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.639609Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.674306Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.674365Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.674421Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:185:2177], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.674442Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.686016Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.686107Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.686201Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:280:2239], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.686232Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.727992Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.728073Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.728169Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:185:2177], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.728202Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.741657Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.741728Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.741793Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:280:2239], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.741812Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.779786Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.779870Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.780088Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:185:2177], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.780125Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.792483Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [2:1208:3019], Recipient [2:280:2239]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409551 TableLocalId: 2 Generation: 2 Round: 1 TableStats { DataSize: 10141461 RowCount: 99 IndexSize: 4463 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 50 HasLoanedParts: false Channels { Channel: 1 DataSize: 10141461 IndexSize: 4463 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 3199 Memory: 90317 Storage: 10149823 } ShardState: 2 UserTablePartOwners: 72075186233409551 NodeId: 2 StartTime: 50000 TableOwnerId: 72075186233409546 IsDstSplit: true FollowerId: 0 2025-06-24T20:33:09.792558Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T20:33:09.792611Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409551 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] state 'Ready' dataSize 10141461 rowCount 99 cpuUsage 0.3199 2025-06-24T20:33:09.792726Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409551 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] raw table stats: DataSize: 10141461 RowCount: 99 IndexSize: 4463 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 50 HasLoanedParts: false Channels { Channel: 1 DataSize: 10141461 IndexSize: 4463 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T20:33:09.792768Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-24T20:33:09.803648Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.803731Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.803822Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:280:2239], Recipient [2:280:2239]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.803853Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.816865Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [2:185:2177]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:09.816931Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:09.816956Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-24T20:33:09.817132Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [2:188:2179], Recipient [2:185:2177]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-24T20:33:09.817153Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:33:09.817198Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:33:09.817257Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:33:09.817283Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-24T20:33:09.817339Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 29.948500s, Timestamp# 1970-01-01T00:01:10.051500Z 2025-06-24T20:33:09.817377Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 20 s 2025-06-24T20:33:09.817858Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T20:33:09.820238Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [2:1522:3274], Recipient [2:185:2177]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:33:09.820298Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:33:09.820331Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:33:09.820453Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125519, Sender [2:171:2170], Recipient [2:185:2177]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-24T20:33:09.820481Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5149: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-24T20:33:09.820510Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7830: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> Viewer::JsonAutocompleteStartOfDatabaseName [GOOD] >> Viewer::JsonStorageListingV1 >> KqpWorkloadServiceTables::TestTablesIsNotCreatingForUnlimitedPool >> KqpWorkloadServiceDistributed::TestDistributedQueue >> Cdc::ShouldBreakLocksOnConcurrentDropIndex [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentCancelBuildIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleTestForAllSupportedObjects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:33:03.855847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:33:03.855930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:33:03.855969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:33:03.856005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:33:03.856068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:33:03.856096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:33:03.856159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:33:03.856263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:33:03.857057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:33:03.857441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:33:03.951798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:33:03.951845Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:03.966880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:33:03.971252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:33:03.971433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:33:03.980511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:33:03.980770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:33:03.981471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:33:03.981829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:33:03.984508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:33:03.984699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:33:03.985755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:33:03.985810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:33:03.985911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:33:03.985966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:33:03.986024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:33:03.986179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:33:03.992995Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:33:04.128636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:33:04.128851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:33:04.129022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:33:04.129065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:33:04.129327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:33:04.129466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:33:04.135933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:33:04.136146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:33:04.136385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:33:04.136466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:33:04.136512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:33:04.136553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:33:04.140387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:33:04.140465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:33:04.140507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:33:04.145307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:33:04.145371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:33:04.145415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:33:04.145497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:33:04.149074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:33:04.150985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:33:04.151208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:33:04.152353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:33:04.152491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:33:04.152548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:33:04.152846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:33:04.152902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:33:04.153066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:33:04.153159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:33:04.155733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:33:04.155790Z node 1 :FLAT_TX_SCHEMESHARD ... 7: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-24T20:33:09.736103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T20:33:09.736151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T20:33:09.769613Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:458:2408]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.769691Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.769814Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:458:2408], Recipient [1:458:2408]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.769847Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.769972Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:801:2686], Recipient [1:458:2408]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-06-24T20:33:09.770013Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:33:09.770109Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:33:09.770335Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 194us result status StatusSuccess 2025-06-24T20:33:09.770818Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T20:33:09.850027Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:958:2816]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.850106Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:09.850262Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:958:2816], Recipient [1:958:2816]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.850293Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:09.850425Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:1383:3167], Recipient [1:958:2816]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-06-24T20:33:09.850456Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:33:09.850569Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-24T20:33:09.850764Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 179us result status StatusSuccess 2025-06-24T20:33:09.851230Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 350 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-24T20:33:10.283064Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:10.283171Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:33:10.283286Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:295:2277], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:10.283332Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:33:10.345819Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:295:2277]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:10.345903Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T20:33:10.345941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-24T20:33:10.346208Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:298:2279], Recipient [1:295:2277]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-24T20:33:10.346251Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T20:33:10.346283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T20:33:10.346350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T20:33:10.346387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-24T20:33:10.346459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.923000s, Timestamp# 1970-01-01T00:00:05.122000Z 2025-06-24T20:33:10.346507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-06-24T20:33:10.352250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T20:33:10.352972Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:2425:4028], Recipient [1:295:2277]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:33:10.353030Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:33:10.353071Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:33:10.353253Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125519, Sender [1:277:2266], Recipient [1:295:2277]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-24T20:33:10.353290Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5149: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-24T20:33:10.353327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7830: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TSchemeShardDecimalTypesInTables::CopyTableShouldNotFailOnDisabledFeatureFlag [GOOD] >> TSchemeShardDecimalTypesInTables::CreateWithWrongParameters >> ResourcePoolClassifiersDdl::TestResourcePoolClassifiersPermissions >> IncrementalRestoreScan::Empty [GOOD] >> TSchemeShardDecimalTypesInTables::CreateWithWrongParameters [GOOD] >> TSchemeShardDecimalTypesInTables::AlterWithWrongParameters ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::Empty [GOOD] Test command err: 2025-06-24T20:33:11.331321Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:11.331839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:11.332060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046f5/r3tmp/tmpidJQb9/pdisk_1.dat 2025-06-24T20:33:11.771186Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:33:11.772284Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:178: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:556:2481] Exhausted 2025-06-24T20:33:11.772412Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:127: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:556:2481] Handle TEvIncrementalRestoreScan::TEvFinished NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvFinished 2025-06-24T20:33:11.772479Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:191: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:556:2481] Finish Done >> TTicketParserTest::NebiusAuthenticationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAccessKeySignatureUnsupported >> Cdc::ResolvedTimestamps [GOOD] >> Cdc::ResolvedTimestampsMultiplePartitions >> KqpPg::InsertNoTargetColumns_Alter-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Serial+useSink >> ResourcePoolsDdl::TestCreateResourcePool >> THealthCheckTest::OrangeGroupIssueWhenDegradedGroupStatus >> IncrementalRestoreScan::ChangeSenderSimple [GOOD] >> ReadOnlyVDisk::TestStorageLoad [GOOD] >> TSchemeShardDecimalTypesInTables::AlterWithWrongParameters [GOOD] >> TSchemeShardInfoTypesTest::EmptyFamilies [GOOD] >> TSchemeShardInfoTypesTest::LostId [GOOD] >> TSchemeShardInfoTypesTest::DeduplicationOrder [GOOD] >> TSchemeShardInfoTypesTest::MultipleDeduplications [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-false |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |89.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} |89.1%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer >> THealthCheckTest::YellowGroupIssueWhenPartialGroupStatus >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 [GOOD] |89.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |89.1%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |89.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut >> TTicketParserTest::LoginCheckRemovedUser [GOOD] >> TTicketParserTest::LoginEmptyTicketBad >> KqpWorkloadServiceActors::TestPoolFetcher [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherAclValidation >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlag [GOOD] >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlagOnServerless ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestStorageLoad [GOOD] Test command err: RandomSeed# 14957981135435145695 Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] 2025-06-24T20:32:36.728557Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:36.731375Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:36.734354Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:36.739311Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:36.739493Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:36.942372Z 1 00h02m38.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:36.955353Z 1 00h02m38.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:36.981001Z 1 00h02m38.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:36.992665Z 1 00h02m38.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:37.145114Z 1 00h02m38.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:37.157199Z 1 00h02m38.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:37.203535Z 1 00h02m39.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:37.205890Z 1 00h02m39.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:37.221692Z 1 00h02m39.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:37.426477Z 1 00h02m39.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:37.443199Z 1 00h02m39.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:37.866046Z 1 00h02m39.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:37.881532Z 1 00h02m39.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:37.971992Z 1 00h02m40.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.215865Z 1 00h02m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.218154Z 1 00h02m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.235644Z 1 00h02m40.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.250484Z 1 00h02m40.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.345185Z 1 00h02m40.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.498433Z 1 00h02m40.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.512813Z 1 00h02m40.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.586126Z 1 00h02m40.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.602466Z 1 00h02m40.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.618106Z 1 00h02m40.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.857493Z 1 00h02m41.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.875161Z 1 00h02m41.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.878523Z 1 00h02m41.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:38.919763Z 1 00h02m41.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:39.021956Z 1 00h02m41.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:39.053277Z 1 00h02m41.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:39.373927Z 1 00h02m41.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:39.418173Z 1 00h02m41.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:39.679774Z 1 00h02m42.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:39.753931Z 1 00h02m42.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:39.754727Z 1 00h02m42.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:39.873788Z 1 00h02m42.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.066638Z 1 00h02m42.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.233119Z 1 00h02m42.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.320007Z 1 00h02m42.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.503415Z 1 00h02m42.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.525596Z 1 00h02m43.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.560831Z 1 00h02m43.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.564520Z 1 00h02m43.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.595519Z 1 00h02m43.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.616024Z 1 00h02m43.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.644022Z 1 00h02m43.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.760624Z 1 00h02m43.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:40.953834Z 1 00h02m43.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:41.080547Z 1 00h02m43.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:41.098301Z 1 00h02m43.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:41.117155Z 1 00h02m43.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:41.193557Z 1 00h02m44.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:41.262949Z 1 00h02m44.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:41.453486Z 1 00h02m44.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:41.476674Z 1 00h02m44.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:41.508280Z 1 00h02m44.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:41.795996Z 1 00h02m44.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:42.193741Z 1 00h02m44.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:42.251501Z 1 00h02m44.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:42.539385Z 1 00h02m45.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:42.559132Z 1 00h02m45.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:42.578036Z 1 00h02m45.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:42.609377Z 1 00h02m45.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:42.623427Z 1 00h02m45.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:42.668560Z 1 00h02m45.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:42.698453Z 1 00h02m45.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:700] 2025-06-24T20:32:42.765985Z 1 00h02m46.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [ ... k [82000000:1:0:5:0] Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] 2025-06-24T20:33:00.469606Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:00.476558Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:00.494960Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:00.504380Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:00.506094Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:00.536488Z 8 00h20m54.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:00.747186Z 8 00h20m54.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:00.779584Z 8 00h20m54.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:00.802787Z 8 00h20m54.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:00.849949Z 8 00h20m55.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.095815Z 8 00h20m55.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.241559Z 8 00h20m55.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.244217Z 8 00h20m55.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.264636Z 8 00h20m55.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.296956Z 8 00h20m55.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.445656Z 8 00h20m55.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.592139Z 8 00h20m56.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.611449Z 8 00h20m56.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.646124Z 8 00h20m56.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.856162Z 8 00h20m56.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.858932Z 8 00h20m56.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:01.878864Z 8 00h20m56.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.016044Z 8 00h20m56.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.039856Z 8 00h20m56.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.093724Z 8 00h20m56.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.108250Z 8 00h20m57.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.288329Z 8 00h20m57.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.304804Z 8 00h20m57.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.328088Z 8 00h20m57.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.392035Z 8 00h20m57.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.393627Z 8 00h20m57.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.634359Z 8 00h20m57.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.841445Z 8 00h20m57.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.873510Z 8 00h20m57.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.890517Z 8 00h20m58.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.924831Z 8 00h20m58.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.950987Z 8 00h20m58.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.983064Z 8 00h20m58.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:02.986192Z 8 00h20m58.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.040736Z 8 00h20m58.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.062531Z 8 00h20m58.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.117450Z 8 00h20m58.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.265434Z 8 00h20m59.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.299099Z 8 00h20m59.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.316762Z 8 00h20m59.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.339600Z 8 00h20m59.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.342436Z 8 00h20m59.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.386262Z 8 00h20m59.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.406132Z 8 00h20m59.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.685144Z 8 00h20m59.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.709352Z 8 00h21m00.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.810604Z 8 00h21m00.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:03.826725Z 8 00h21m00.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.007608Z 8 00h21m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.010156Z 8 00h21m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.031807Z 8 00h21m00.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.045952Z 8 00h21m00.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.162290Z 8 00h21m00.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.319054Z 8 00h21m00.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.333973Z 8 00h21m00.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.364363Z 8 00h21m01.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.402304Z 8 00h21m01.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.647656Z 8 00h21m01.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.677650Z 8 00h21m01.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.695132Z 8 00h21m01.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:04.959457Z 8 00h21m01.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:05.078241Z 8 00h21m01.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:05.173028Z 8 00h21m02.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:05.221810Z 8 00h21m02.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:05.359633Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:05.360062Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] 2025-06-24T20:33:05.363044Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5368:749] >> GenericFederatedQuery::IcebergHadoopSaSelectAll ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderSimple [GOOD] Test command err: 2025-06-24T20:33:11.739067Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:11.739801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:11.740023Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046bf/r3tmp/tmpSaW67H/pdisk_1.dat 2025-06-24T20:33:12.122268Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:33:12.246258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-24T20:33:12.246500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:12.246675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:33:12.246715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:33:12.246954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:33:12.247052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:33:12.247897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:33:12.248119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:33:12.248313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:12.248365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:33:12.248415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:33:12.248446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:33:12.248998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:12.249048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:33:12.249083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:33:12.249524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:12.249577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:12.249623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-24T20:33:12.249688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:33:12.253045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:33:12.253681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:33:12.253898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:33:12.254970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:33:12.255013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-24T20:33:12.255047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:33:12.291893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-06-24T20:33:12.291996Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:12.293007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2025-06-24T20:33:12.293106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:33:12.298617Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797188196253 != 1750797188196257 2025-06-24T20:33:12.348012Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:33:12.349601Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T20:33:12.349955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:12.350112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:12.362916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:12.454694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:33:12.454969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T20:33:12.455083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-24T20:33:12.455444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:33:12.455530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-24T20:33:12.455779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T20:33:12.455885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-24T20:33:12.457265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T20:33:12.457324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-24T20:33:12.457561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T20:33:12.457617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:533:2461], at schemeshard: 72057594046644480, txId: 1, path id: 1 2025-06-24T20:33:12.457998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:12.458058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 1:0 ProgressState 2025-06-24T20:33:12.458170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:33:12.458212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:33:12.458275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:33:12.458324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:33:12.458378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T20:33:12.458432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:33:12.458471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T20:33:12.458504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T20:33:12.458574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-24T20:33:12.458617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publicatio ... :0 2025-06-24T20:33:13.394929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T20:33:13.395339Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037889 state Ready 2025-06-24T20:33:13.395422Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-24T20:33:13.396138Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:59:2106] Handle TEvNavigate describe path /Root/IncrBackupTable 2025-06-24T20:33:13.425783Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:768:2626] HANDLE EvNavigateScheme /Root/IncrBackupTable 2025-06-24T20:33:13.427647Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:768:2626] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:33:13.427792Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:768:2626] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" Options { ShowPrivateTable: true } 2025-06-24T20:33:13.428874Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:768:2626] Handle TEvDescribeSchemeResult Forward to# [1:552:2478] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-24T20:33:13.431319Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:778:2630], serverId# [1:779:2631], sessionId# [0:0:0] 2025-06-24T20:33:13.432206Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:780:2632] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:33:13.432413Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:780:2632] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:33:13.432680Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:780:2632] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T20:33:13.432881Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:139: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:780:2632] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] BodySize: 18 }] } 2025-06-24T20:33:13.432988Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:144: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:780:2632] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Kind: IncrementalRestore Source: InitialScan Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 3] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-24T20:33:13.433188Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [1:59:2106] Handle TEvGetProxyServicesRequest 2025-06-24T20:33:13.433281Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:40: [TableChangeSenderShard][0:0][72075186224037888][1:784:2632] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T20:33:13.433578Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:785:2636], serverId# [1:786:2637], sessionId# [0:0:0] 2025-06-24T20:33:13.483830Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][0:0][72075186224037888][1:784:2632] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T20:33:13.483992Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:154: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:780:2632] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T20:33:13.484153Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][0:0][72075186224037888][1:784:2632] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Kind: IncrementalRestore Source: InitialScan Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 3] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-24T20:33:13.484232Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:154: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:780:2632] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T20:33:13.484388Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:780:2632] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-false [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] Test command err: 2025-06-24T20:33:12.610651Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:12.611057Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:12.611530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046bc/r3tmp/tmpIYz7aO/pdisk_1.dat 2025-06-24T20:33:12.952531Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:33:13.081930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-24T20:33:13.082170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:13.082361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:33:13.082407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:33:13.082681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:33:13.082772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:33:13.083563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:33:13.083765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:33:13.083988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:13.084042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:33:13.084085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:33:13.084121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:33:13.084731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:13.084786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:33:13.084827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:33:13.085358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:13.085412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:13.085464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-24T20:33:13.085541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:33:13.089377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:33:13.089975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:33:13.090149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:33:13.091270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:33:13.091323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-24T20:33:13.091363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-24T20:33:13.122471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-06-24T20:33:13.122546Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:13.124290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2025-06-24T20:33:13.124381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:33:13.148560Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797188752310 != 1750797188752314 2025-06-24T20:33:13.194336Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:33:13.195671Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T20:33:13.195982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:13.196084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:13.207832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:13.282873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:33:13.283050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T20:33:13.283116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-24T20:33:13.283467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:33:13.283522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-24T20:33:13.283699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T20:33:13.283785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-24T20:33:13.284744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T20:33:13.284793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-24T20:33:13.284975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T20:33:13.285024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:533:2461], at schemeshard: 72057594046644480, txId: 1, path id: 1 2025-06-24T20:33:13.285349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:33:13.285390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 1:0 ProgressState 2025-06-24T20:33:13.285478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:33:13.285523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:33:13.285564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:33:13.285613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:33:13.285671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T20:33:13.285718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:33:13.285753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T20:33:13.285781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T20:33:13.285837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-24T20:33:13.285877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publicatio ... r: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 2025-06-24T20:33:14.091109Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:59:2106] Handle TEvNavigate describe path /Root/IncrBackupTable 2025-06-24T20:33:14.111446Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:780:2632] HANDLE EvNavigateScheme /Root/IncrBackupTable 2025-06-24T20:33:14.111950Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:780:2632] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:33:14.112052Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:780:2632] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" 2025-06-24T20:33:14.113162Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:780:2632] Handle TEvDescribeSchemeResult Forward to# [1:552:2478] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-24T20:33:14.114956Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:33:14.115193Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:33:14.115430Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T20:33:14.115538Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 [GOOD] Test command err: 2025-06-24T20:32:26.317365Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616486895230004:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.317800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047ae/r3tmp/tmpZ4rBeO/pdisk_1.dat 2025-06-24T20:32:26.794975Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:26.802321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:26.802419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:26.815117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31015 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:32:27.044584Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519616486895230078:2142] Handle TEvNavigate describe path dc-1 2025-06-24T20:32:27.067729Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519616491190197806:2437] HANDLE EvNavigateScheme dc-1 2025-06-24T20:32:27.067861Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519616486895230102:2155], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:27.067898Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519616486895230102:2155], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T20:32:27.068067Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519616491190197807:2438][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:27.070163Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616486895229755:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616491190197811:2438] 2025-06-24T20:32:27.070221Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616486895229755:2051] Subscribe: subscriber# [1:7519616491190197811:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.070287Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616486895229758:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616491190197812:2438] 2025-06-24T20:32:27.070305Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616486895229758:2054] Subscribe: subscriber# [1:7519616491190197812:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.070334Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519616486895229761:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519616491190197813:2438] 2025-06-24T20:32:27.070352Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519616486895229761:2057] Subscribe: subscriber# [1:7519616491190197813:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T20:32:27.070403Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616491190197811:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486895229755:2051] 2025-06-24T20:32:27.070423Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616491190197812:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486895229758:2054] 2025-06-24T20:32:27.070446Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519616491190197813:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616486895229761:2057] 2025-06-24T20:32:27.070483Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616491190197807:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616491190197808:2438] 2025-06-24T20:32:27.070526Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616491190197807:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616491190197809:2438] 2025-06-24T20:32:27.070600Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519616491190197807:2438][/dc-1] Set up state: owner# [1:7519616486895230102:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.070719Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519616491190197807:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519616491190197810:2438] 2025-06-24T20:32:27.070781Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519616491190197807:2438][/dc-1] Path was already updated: owner# [1:7519616486895230102:2155], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.070835Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616491190197811:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616491190197808:2438], cookie# 1 2025-06-24T20:32:27.070850Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616491190197812:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616491190197809:2438], cookie# 1 2025-06-24T20:32:27.070871Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616491190197813:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616491190197810:2438], cookie# 1 2025-06-24T20:32:27.070898Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616486895229755:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616491190197811:2438] 2025-06-24T20:32:27.070922Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616486895229755:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616491190197811:2438], cookie# 1 2025-06-24T20:32:27.070946Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616486895229758:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616491190197812:2438] 2025-06-24T20:32:27.070965Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616486895229758:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616491190197812:2438], cookie# 1 2025-06-24T20:32:27.070980Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519616486895229761:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519616491190197813:2438] 2025-06-24T20:32:27.070992Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616486895229761:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616491190197813:2438], cookie# 1 2025-06-24T20:32:27.078628Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616491190197811:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486895229755:2051], cookie# 1 2025-06-24T20:32:27.078687Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616491190197812:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486895229758:2054], cookie# 1 2025-06-24T20:32:27.078713Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616491190197813:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616486895229761:2057], cookie# 1 2025-06-24T20:32:27.078919Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616491190197807:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616491190197808:2438], cookie# 1 2025-06-24T20:32:27.078952Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616491190197807:2438][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:27.078978Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616491190197807:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616491190197809:2438], cookie# 1 2025-06-24T20:32:27.078996Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616491190197807:2438][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:27.079013Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616491190197807:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616491190197810:2438], cookie# 1 2025-06-24T20:32:27.079036Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519616491190197807:2438][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:27.128314Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519616486895230102:2155], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... Version: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:33:13.359497Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [13:7519616642850516334:2111], cacheItem# { Subscriber: { Subscriber: [13:7519616685800189387:2144] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:33:13.359561Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [13:7519616642850516334:2111], cacheItem# { Subscriber: { Subscriber: [13:7519616685800189388:2145] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:33:13.359687Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [13:7519616690095156713:2154], recipient# [13:7519616685800189385:2279], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:33:13.359916Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:7519616685800189385:2279], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:13.420399Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [13:7519616642850516334:2111], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:33:13.420586Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [13:7519616642850516334:2111], cacheItem# { Subscriber: { Subscriber: [13:7519616647145483646:2116] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:33:13.420713Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [13:7519616690095156715:2155], recipient# [13:7519616690095156714:2284], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:33:13.782614Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7519616642025166926:2112], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:33:13.782804Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7519616642025166926:2112], cacheItem# { Subscriber: { Subscriber: [11:7519616680679872749:2197] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:33:13.782933Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7519616642025166926:2112], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:33:13.783035Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7519616642025166926:2112], cacheItem# { Subscriber: { Subscriber: [11:7519616642025166939:2117] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:33:13.783040Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [11:7519616689269807382:2209], recipient# [11:7519616689269807380:2281], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:33:13.783175Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [11:7519616689269807383:2210], recipient# [11:7519616689269807381:2282], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:33:13.783342Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:13.843373Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7519616642025166926:2112], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:33:13.843587Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7519616642025166926:2112], cacheItem# { Subscriber: { Subscriber: [11:7519616680679872750:2198] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:33:13.843669Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7519616642025166926:2112], cacheItem# { Subscriber: { Subscriber: [11:7519616680679872751:2199] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:33:13.843812Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [11:7519616689269807384:2211], recipient# [11:7519616680679872748:2277], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-24T20:33:13.844155Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519616680679872748:2277], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } |89.1%| [TA] $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterDrop [GOOD] >> KqpWorkloadServiceTables::TestCreateWorkloadSerivceTables >> KqpWorkloadServiceTables::TestTablesIsNotCreatingForUnlimitedPool [GOOD] >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifier >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectAll >> GenericFederatedQuery::IcebergHiveBasicSelectAll >> TTicketParserTest::NebiusAccessKeySignatureUnsupported [GOOD] >> TSubscriberCombinationsTest::CombinationsRootDomain [GOOD] >> TSubscriberCombinationsTest::CombinationsMigratedPath >> GenericFederatedQuery::IcebergHadoopTokenSelectAll >> Viewer::JsonAutocompleteSimilarDatabaseNameWithLimit [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNamePOST >> KqpWorkloadService::TestQueueSizeSimple [GOOD] >> KqpWorkloadService::TestQueueSizeManyQueries >> GenericFederatedQuery::ClickHouseManagedSelectAll ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:32:32.381090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:32:32.381190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:32.381261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:32:32.381305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:32:32.381351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:32:32.381381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:32:32.381441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:32:32.381518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:32:32.382373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:32:32.382802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:32:32.465937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:32:32.466004Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:32.483675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:32:32.484233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:32:32.484442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:32:32.493765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:32:32.494016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:32:32.494769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.495134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:32:32.515857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:32.516106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:32:32.517460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:32.517531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:32:32.517768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:32:32.517816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:32:32.517859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:32:32.517955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.527731Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:32:32.674467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:32:32.674720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.674987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:32:32.675043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:32:32.675293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:32:32.675425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:32.677843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.678009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:32:32.678158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.678198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:32:32.678239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:32:32.678275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:32:32.681255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.681307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:32:32.681339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:32:32.682898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.682940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:32:32.682982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.683029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:32:32.695745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:32:32.697790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:32:32.697939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:32:32.698809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:32:32.698934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:32:32.698980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.699370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:32:32.699429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:32:32.699618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:32:32.699693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:32:32.703938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:32:32.704014Z node 1 :FLAT_TX_SCHEMESHARD ... 6678944 2025-06-24T20:33:16.989098Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 102 ready parts: 1/1 2025-06-24T20:33:16.989377Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 5000003 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:33:16.993423Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-24T20:33:16.993660Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 2025-06-24T20:33:16.994794Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:33:16.994997Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 51539609708 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:33:16.995113Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:359: TAlterTable TPropose operationId# 102:0 HandleReply TEvOperationPlan, operationId: 102:0, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-24T20:33:16.995663Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 129 2025-06-24T20:33:16.995930Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-24T20:33:17.007309Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:33:17.007413Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:33:17.007918Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:33:17.008054Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [12:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T20:33:17.009018Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:33:17.009127Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-24T20:33:17.011071Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:33:17.011281Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T20:33:17.011385Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T20:33:17.011485Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T20:33:17.011571Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T20:33:17.011728Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-06-24T20:33:17.012459Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 2026 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T20:33:17.012538Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T20:33:17.012723Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 2026 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T20:33:17.012882Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 2026 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T20:33:17.013752Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 51539609846 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T20:33:17.013824Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T20:33:17.014108Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 51539609846 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T20:33:17.014226Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T20:33:17.014404Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 309 RawX2: 51539609846 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T20:33:17.014522Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T20:33:17.014596Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:33:17.014658Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T20:33:17.014736Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T20:33:17.024066Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T20:33:17.024329Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:33:17.024972Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:33:17.025445Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T20:33:17.025519Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T20:33:17.025737Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:33:17.025819Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:33:17.025895Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T20:33:17.025991Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:33:17.026070Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T20:33:17.026183Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [12:337:2314] message: TxId: 102 2025-06-24T20:33:17.026296Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T20:33:17.026388Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T20:33:17.026458Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T20:33:17.026671Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T20:33:17.032406Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:33:17.032517Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [12:395:2365] TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAccessKeySignatureUnsupported [GOOD] Test command err: 2025-06-24T20:32:37.711032Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616532462222278:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:37.711080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004678/r3tmp/tmp1g6BDC/pdisk_1.dat 2025-06-24T20:32:38.434241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:38.434340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:38.448638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:38.491402Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616532462222256:2079] 1750797157689783 != 1750797157689786 2025-06-24T20:32:38.496906Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7081, node 1 2025-06-24T20:32:38.733037Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:38.741932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:38.741957Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:38.741966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:38.742090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22361 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:39.318850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:39.507840Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 2025-06-24T20:32:39.518027Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T20:32:39.518072Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:39.518672Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****2NEQ (A44A3F55) () has now retryable error message 'Security state is empty' 2025-06-24T20:32:39.518940Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T20:32:39.518963Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:39.519187Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****2NEQ (A44A3F55) () has now retryable error message 'Security state is empty' 2025-06-24T20:32:39.519200Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-24T20:32:39.519214Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-24T20:32:39.519237Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket eyJh****2NEQ (A44A3F55): Security state is empty 2025-06-24T20:32:41.739599Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****2NEQ (A44A3F55) 2025-06-24T20:32:41.739987Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T20:32:41.740014Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:41.740291Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****2NEQ (A44A3F55) () has now retryable error message 'Security state is empty' 2025-06-24T20:32:41.740304Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-24T20:32:42.527358Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T20:32:42.715336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616532462222278:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:42.715447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:32:44.747347Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****2NEQ (A44A3F55) 2025-06-24T20:32:44.747591Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T20:32:44.747606Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:44.748473Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****2NEQ (A44A3F55) () has now valid token of user1 2025-06-24T20:32:44.748485Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-24T20:32:47.755729Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****2NEQ (A44A3F55) 2025-06-24T20:32:47.756380Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****2NEQ (A44A3F55) () has now valid token of user1 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004678/r3tmp/tmp1GeyF9/pdisk_1.dat 2025-06-24T20:32:50.453829Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:32:50.545514Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:50.545853Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616588910383016:2079] 1750797170306031 != 1750797170306034 2025-06-24T20:32:50.558772Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:50.558871Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:50.564647Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25331, node 2 2025-06-24T20:32:50.775824Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:50.775854Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:50.775861Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:50.775968Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64215 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:51.284885Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:51.303580Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:51.308935Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-24T20:32:51.309044Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700004c308] Connect to grpc://localhost:26124 2025-06-24T20:32:51.311552Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700004c308] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-24T20:32:51.349959Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-24T20:32:51.360306Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700004c308] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-24T20:32:51.360878Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:32:54.897631 ... IENT DEBUG: grpc_service_client.h:120: [517000100708] Request AuthenticateRequest { iam_token: "**** (8E120919)" } iam_token: "user1" 2025-06-24T20:32:55.483837Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000100708] Status 14 Service Unavailable 2025-06-24T20:32:55.484186Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:55.889225Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:56.877135Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-24T20:32:56.877191Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-24T20:32:56.877350Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000100708] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" 2025-06-24T20:32:56.880424Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000100708] Status 14 Service Unavailable 2025-06-24T20:32:56.880734Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-24T20:32:58.878690Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-24T20:32:58.878729Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-24T20:32:58.878874Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000100708] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-24T20:32:58.883961Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000100708] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-24T20:32:58.884409Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:32:59.866715Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519616607528372529:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:59.866798Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:08.183498Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519616666225369229:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:08.183566Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004678/r3tmp/tmp2zwZLS/pdisk_1.dat 2025-06-24T20:33:08.428128Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:08.428247Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:08.428771Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:08.447295Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519616666225369211:2079] 1750797188182064 != 1750797188182067 2025-06-24T20:33:08.449497Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2387, node 4 2025-06-24T20:33:08.591883Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:08.591910Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:08.591923Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:08.592097Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12335 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:09.029047Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:09.039093Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:33:09.045855Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-24T20:33:09.045888Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:33:09.045897Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T20:33:09.045928Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-24T20:33:09.045985Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000a2d08] Connect to grpc://localhost:13661 2025-06-24T20:33:09.046895Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000a2d08] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-06-24T20:33:09.067522Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000a2d08] Status 14 Service Unavailable 2025-06-24T20:33:09.071436Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-24T20:33:09.071468Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-24T20:33:09.071646Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000a2d08] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-24T20:33:09.075006Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000a2d08] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-24T20:33:09.079468Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-24T20:33:09.225649Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004678/r3tmp/tmp24Wonf/pdisk_1.dat 2025-06-24T20:33:13.503899Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:13.519130Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519616686547986632:2079] 1750797193326010 != 1750797193326013 2025-06-24T20:33:13.521586Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:13.526273Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:13.526374Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:13.536255Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27673, node 5 2025-06-24T20:33:13.590400Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:13.590430Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:13.590447Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:13.590594Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14414 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:13.893569Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:13.902122Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:908: Ticket AKIA****MPLE (B3EDC139): Access key signature is not supported >> Viewer::Cluster10000Tablets [GOOD] >> Viewer::FuzzySearcherLimit1OutOf4 [GOOD] >> Viewer::FuzzySearcherLimit2OutOf4 [GOOD] >> Viewer::ExecuteQueryDoesntExecuteSchemeOperationsInsideTransation >> KqpWorkloadServiceActors::TestPoolFetcherAclValidation [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherNotExistingPool >> ResourcePoolsDdl::TestPoolSwitchToUnlimitedState [GOOD] >> ResourcePoolsDdl::TestResourcePoolAcl >> TTicketParserTest::LoginEmptyTicketBad [GOOD] >> Viewer::JsonAutocompleteEndOfDatabaseName [GOOD] >> Viewer::JsonAutocompleteScheme >> Cdc::InitialScanDebezium [GOOD] >> Cdc::InitialScanRacyCompleteAndRequest >> GenericFederatedQuery::PostgreSQLOnPremSelectAll ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::LoginEmptyTicketBad [GOOD] Test command err: 2025-06-24T20:32:32.901287Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616513302900033:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:32.903738Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046b0/r3tmp/tmp9leDwH/pdisk_1.dat 2025-06-24T20:32:33.355386Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:33.379328Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616513302899939:2079] 1750797152844647 != 1750797152844650 TServer::EnableGrpc on GrpcPort 27611, node 1 2025-06-24T20:32:33.391862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:33.391991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:33.400678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:33.543790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:33.543811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:33.543818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:33.543921Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6848 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T20:32:33.910249Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:33.946599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:33.980398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:32:34.063311Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T20:32:34.086594Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T20:32:34.086633Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:34.087814Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****xVHA (55AA7204) () has now valid token of user1 2025-06-24T20:32:34.087838Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-24T20:32:36.516364Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616530520213945:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:36.516465Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046b0/r3tmp/tmpv2jsdg/pdisk_1.dat 2025-06-24T20:32:36.684272Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:36.687103Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616530520213926:2079] 1750797156514061 != 1750797156514064 TServer::EnableGrpc on GrpcPort 19747, node 2 2025-06-24T20:32:36.729085Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:36.731449Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:36.738831Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:36.799370Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:36.799398Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:36.799407Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:36.799557Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3246 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:37.153310Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:32:37.161198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:32:37.259280Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T20:32:37.266073Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T20:32:37.266100Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:37.266831Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****0APQ (2B58639A) () has now valid token of user1 2025-06-24T20:32:37.266844Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046b0/r3tmp/tmpwmO5Mb/pdisk_1.dat 2025-06-24T20:32:41.506447Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:32:41.558537Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:41.560084Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519616549464396202:2079] 1750797161373350 != 1750797161373353 2025-06-24T20:32:41.578927Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:41.579016Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:41.587380Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21522, node 3 2025-06-24T20:32:41.642604Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:41.642626Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:41.642631Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:41.642716Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5952 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:32:41.981510Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, fir ... d: 72057594046644480 2025-06-24T20:32:42.167319Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T20:32:42.179512Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T20:32:42.179547Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:32:42.180237Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****pyfA (7A995FFD) () has now valid token of user1 2025-06-24T20:32:42.180250Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-24T20:32:42.181209Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T20:32:42.403377Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:46.410558Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****pyfA (7A995FFD) 2025-06-24T20:32:46.411070Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****pyfA (7A995FFD) () has now valid token of user1 2025-06-24T20:32:50.419317Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****pyfA (7A995FFD) 2025-06-24T20:32:50.419766Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****pyfA (7A995FFD) () has now valid token of user1 2025-06-24T20:32:52.182014Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T20:32:54.435441Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****pyfA (7A995FFD) 2025-06-24T20:32:54.435940Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****pyfA (7A995FFD) () has now valid token of user1 2025-06-24T20:32:56.537935Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:32:56.537983Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:59.439303Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****pyfA (7A995FFD) 2025-06-24T20:32:59.439712Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****pyfA (7A995FFD) () has now valid token of user1 2025-06-24T20:33:02.835212Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519616640499865878:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:02.835278Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046b0/r3tmp/tmpTh8TIn/pdisk_1.dat 2025-06-24T20:33:03.057696Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:03.058122Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519616640499865857:2079] 1750797182834188 != 1750797182834191 2025-06-24T20:33:03.081872Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:03.081976Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:03.086268Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15897, node 4 2025-06-24T20:33:03.235762Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:03.235796Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:03.235809Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:03.235984Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7365 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:03.743884Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:03.754817Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:33:03.863452Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:33:04.163374Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T20:33:04.194054Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T20:33:04.194101Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T20:33:04.195124Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****Uimw (33A89367) () has now valid token of user1 2025-06-24T20:33:04.195164Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-24T20:33:04.196105Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T20:33:07.835739Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519616640499865878:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:07.835835Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:07.855780Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****Uimw (33A89367) 2025-06-24T20:33:07.856061Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****Uimw (33A89367) () has now permanent error message 'User not found' 2025-06-24T20:33:11.871339Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****Uimw (33A89367) 2025-06-24T20:33:15.028326Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519616697360619727:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:15.028392Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046b0/r3tmp/tmpb48dNT/pdisk_1.dat 2025-06-24T20:33:15.264958Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:15.265068Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:15.271890Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519616697360619706:2079] 1750797195027562 != 1750797195027565 2025-06-24T20:33:15.282725Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:15.285580Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1321, node 5 2025-06-24T20:33:15.370996Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:15.371026Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:15.371037Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:15.371241Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4511 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T20:33:15.866100Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:33:15.880752Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:16.007393Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T20:33:16.047672Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:916: Ticket **** (00000000): Ticket is empty 2025-06-24T20:33:16.051355Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> ResourcePoolsDdl::TestCreateResourcePool [GOOD] >> ResourcePoolsDdl::TestCreateResourcePoolOnServerless >> Cdc::AddIndex [GOOD] >> Cdc::AddStream >> GenericFederatedQuery::YdbManagedSelectAll >> ResourcePoolClassifiersDdl::TestResourcePoolClassifiersPermissions [GOOD] >> ResourcePoolClassifiersDdl::TestResourcePoolClassifierRanks |89.1%| [TA] $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] >> THealthCheckTest::OrangeGroupIssueWhenDegradedGroupStatus [GOOD] >> THealthCheckTest::OnlyDiskIssueOnSpaceIssues >> Cdc::ResolvedTimestampsMultiplePartitions [GOOD] >> Cdc::ResolvedTimestampsVolatileOutOfOrder >> THealthCheckTest::YellowGroupIssueWhenPartialGroupStatus [GOOD] >> THealthCheckTest::TestTabletIsDead >> GenericFederatedQuery::IcebergHiveSaSelectAll >> KqpWorkloadServiceActors::TestPoolFetcherNotExistingPool [GOOD] >> KqpWorkloadServiceActors::TestDefaultPoolUsePermissions >> KqpPg::TableInsert+useSink [GOOD] >> KqpPg::TableInsert-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 15247, MsgBus: 17707 2025-06-24T20:30:36.662923Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616014000958822:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:36.671954Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f36/r3tmp/tmpVJCeVR/pdisk_1.dat 2025-06-24T20:30:37.238155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:37.238260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:37.242574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:37.264941Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616014000958698:2079] 1750797036608309 != 1750797036608312 2025-06-24T20:30:37.278460Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15247, node 1 2025-06-24T20:30:37.416478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:37.416513Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:37.416520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:37.416601Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:30:37.662283Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17707 TClient is connected to server localhost:17707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:38.101269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:30:38.115947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 16 2025-06-24T20:30:41.635496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:41.647440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616014000958822:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:41.647555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:30:42.000632Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:30:42.013088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:42.127269Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:30:42.168263Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616039770763300:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:42.168390Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:42.168603Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616039770763312:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:42.173506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:30:42.194398Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616039770763314:2315], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:30:42.272701Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616039770763366:2457] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } f f t t 18 2025-06-24T20:30:42.957172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:43.036154Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:30:43.041278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:43.133013Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 21 2025-06-24T20:30:43.589708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:43.683262Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:30:43.688255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:43.690166Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976710674 at tablet 72075186224037892 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710674] at 72075186224037892 while waiting for scan finish) | 2025-06-24T20:30:43.700174Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710674 at tablet 72075186224037892 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710674] at 72075186224037892 while waiting for scan finish) | 2025-06-24T20:30:43.781274Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 23 2025-06-24T20:30:44.242146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:44.365103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:44.445155Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 20 2025-06-24T20:30:44.912442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:44.991370Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:30:44.995538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part prop ... Connecting -> Connected TServer::EnableGrpc on GrpcPort 10510, node 11 2025-06-24T20:33:04.367926Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:04.367957Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:04.367972Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:04.368169Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15470 2025-06-24T20:33:04.998879Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15470 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:05.706087Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:10.442834Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519616675043975256:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:10.443125Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:10.444432Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519616675043975283:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:10.451738Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:10.477865Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519616675043975285:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:10.548138Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519616675043975337:2343] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:10.612413Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:11.565736Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) Trying to start YDB, gRPC: 31136, MsgBus: 31687 2025-06-24T20:33:13.924738Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519616686791654050:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:13.924864Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f36/r3tmp/tmpB2njKA/pdisk_1.dat 2025-06-24T20:33:14.157972Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:14.158140Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:14.162709Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31136, node 12 2025-06-24T20:33:14.215242Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:14.220560Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519616686791654031:2079] 1750797193924098 != 1750797193924101 2025-06-24T20:33:14.323912Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:14.323947Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:14.323964Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:14.324158Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31687 2025-06-24T20:33:14.939469Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31687 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:15.360347Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:18.924832Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519616686791654050:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:18.924954Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:20.357660Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519616716856425752:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:20.357829Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:20.360596Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519616716856425764:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:20.369250Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:20.386235Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519616716856425766:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:20.491308Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519616716856425817:2342] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:20.535014Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> Cdc::ShouldBreakLocksOnConcurrentCancelBuildIndex [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentAddStream >> KqpWorkloadServiceDistributed::TestDistributedQueue [GOOD] >> KqpWorkloadServiceDistributed::TestNodeDisconnect >> Viewer::ExecuteQueryDoesntExecuteSchemeOperationsInsideTransation [GOOD] >> Viewer::FloatPointJsonQuery >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlagOnServerless [GOOD] >> KqpWorkloadService::WorkloadServiceDisabledByInvalidDatabasePath >> GenericFederatedQuery::IcebergHadoopBasicSelectAll |89.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifier [GOOD] |89.1%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/test-results/unittest/{meta.json ... results_accumulator.log} |89.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifierOnServerless |89.1%| [TA] {RESULT} $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut >> KqpWorkloadService::TestQueryCancelAfterPoolWithLimits [GOOD] >> KqpWorkloadService::TestLargeConcurrentQueryLimit >> KqpWorkloadService::TestQueueSizeManyQueries [GOOD] >> KqpWorkloadService::TestZeroQueueSize >> Viewer::SelectStringWithBase64Encoding [GOOD] >> Viewer::QueryExecuteScript >> ResourcePoolsDdl::TestResourcePoolAcl [GOOD] >> ResourcePoolsDdl::TestWorkloadConfigOnServerless >> Cdc::InitialScanRacyCompleteAndRequest [GOOD] >> Cdc::InitialScanUpdatedRows >> KqpWorkloadServiceActors::TestDefaultPoolUsePermissions [GOOD] >> KqpWorkloadServiceActors::TestDefaultPoolAdminPermissions >> Viewer::JsonAutocompleteSimilarDatabaseNamePOST [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNameLowerCase >> GenericFederatedQuery::IcebergHadoopSaSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectConstant >> Viewer::SelectStringWithNoBase64Encoding [GOOD] >> Viewer::ServerlessNodesPage >> Viewer::JsonAutocompleteScheme [GOOD] >> Viewer::JsonAutocompleteEmptyColumns >> KqpWorkloadService::WorkloadServiceDisabledByInvalidDatabasePath [GOOD] >> KqpWorkloadService::TestZeroQueueSizeManyQueries >> GenericFederatedQuery::IcebergHiveTokenSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectConstant >> GenericFederatedQuery::IcebergHiveBasicSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectConstant >> TAsyncIndexTests::CdcAndMergeWithReboots[PipeResets] [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectConstant >> Cdc::AddStream [GOOD] >> Cdc::AwsRegion >> THealthCheckTest::OnlyDiskIssueOnSpaceIssues [GOOD] >> THealthCheckTest::OnlyDiskIssueOnInitialPDisks >> Viewer::FloatPointJsonQuery [GOOD] >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes >> GenericFederatedQuery::ClickHouseManagedSelectAll [GOOD] >> GenericFederatedQuery::ClickHouseManagedSelectConstant >> GenericFederatedQuery::PostgreSQLOnPremSelectAll [GOOD] >> GenericFederatedQuery::PostgreSQLOnPremSelectConstant >> KqpWorkloadServiceActors::TestDefaultPoolAdminPermissions [GOOD] >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CdcAndMergeWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T20:30:38.564317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:38.564440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:38.564482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:38.564521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:38.564576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:38.564625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:38.564709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:38.564799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:38.565684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:38.566077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:38.743186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:30:38.743262Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:38.744194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T20:30:38.763862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:38.764491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:38.764680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:38.773818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:38.774146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:38.774973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:38.775258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:38.778609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:38.778827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:38.780142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:38.780214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:38.780433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:38.780504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:38.780555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:38.780683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T20:30:38.788698Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:38.944141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:38.944417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:38.944668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:38.944734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:38.945109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:38.945256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:38.954305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:38.954575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:38.954842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:38.954922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:38.954993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:38.955044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:38.957985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:38.958073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:38.958150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:38.960810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:38.960878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:38.960949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:38.961021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:38.965290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:38.968374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:38.968594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:38.969793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:38.969959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... ion { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 2 IsBackup: false CdcStreams { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 6 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:33:31.727753Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409551:2][72075186233409546][54:1151:2930] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T20:33:31.727910Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409551:2][54:1120:2930] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T20:33:31.728118Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409551:2][72075186233409546][54:1151:2930] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750797211671164 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750797211671164 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 5 Group: 1750797211671164 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 }] } 2025-06-24T20:33:31.732218Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409551:2][72075186233409546][54:1151:2930] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 5 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 5 2025-06-24T20:33:31.732369Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409551:2][54:1120:2930] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T20:33:32.048169Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:33:32.048509Z node 54 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 389us result status StatusSuccess 2025-06-24T20:33:32.049561Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpWorkloadService::TestZeroQueueSize [GOOD] >> KqpWorkloadService::TestQueryCancelAfterUnlimitedPool >> GenericFederatedQuery::YdbManagedSelectAll [GOOD] >> GenericFederatedQuery::YdbManagedSelectConstant >> Cdc::ShouldBreakLocksOnConcurrentAddStream [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentAlterStream >> ResourcePoolsDdl::TestCreateResourcePoolOnServerless [GOOD] >> ResourcePoolsDdl::TestDefaultPoolRestrictions >> THealthCheckTest::TestTabletIsDead [GOOD] >> THealthCheckTest::TestReBootingTabletIsDead >> GenericFederatedQuery::IcebergHiveSaSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectConstant >> KqpWorkloadServiceTables::TestCreateWorkloadSerivceTables [GOOD] >> KqpWorkloadServiceTables::TestPoolStateFetcherActor >> GenericFederatedQuery::YdbFilterPushdown >> Cdc::InitialScanUpdatedRows [GOOD] >> Cdc::InitialScanAndLimits >> Cdc::ResolvedTimestampsVolatileOutOfOrder [GOOD] >> Cdc::SequentialSplitMerge >> THiveTest::TestHiveNoBalancingWithLowResourceUsage [GOOD] >> THiveTest::TestLockTabletExecution >> ResourcePoolClassifiersDdl::TestResourcePoolClassifierRanks [GOOD] >> ResourcePoolClassifiersDdl::TestExplicitPoolId >> GenericFederatedQuery::IcebergHadoopSaSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectCount >> GenericFederatedQuery::IcebergHadoopBasicSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectConstant >> KqpWorkloadService::TestZeroQueueSizeManyQueries [GOOD] >> KqpWorkloadServiceActors::TestCreateDefaultPool >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNameLowerCase [GOOD] >> Viewer::JsonAutocompleteSchemePOST >> THealthCheckTest::OnlyDiskIssueOnInitialPDisks [GOOD] >> THealthCheckTest::ProtobufBelowLimitFor10VdisksIssues >> ResourcePoolsDdl::TestDefaultPoolRestrictions [GOOD] >> ResourcePoolsDdl::TestAlterResourcePool >> Viewer::JsonAutocompleteEmptyColumns [GOOD] >> Viewer::JsonAutocompleteColumnsPOST ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes [GOOD] Test command err: 2025-06-24T20:33:14.338132Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:487:2387], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:14.338509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:14.338981Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:470:2169], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:14.339077Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:14.339439Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:14.339547Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:14.817776Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:15.051667Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:33:15.066315Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:33:15.647169Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 62360, node 1 TClient is connected to server localhost:19537 2025-06-24T20:33:16.089809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:16.089875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:16.089913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:16.090459Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:19.523633Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519616715867154031:2132];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:19.585241Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:33:19.763580Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519616715867153937:2079] 1750797199512219 != 1750797199512222 2025-06-24T20:33:19.776617Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:19.789058Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:19.789179Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:19.792357Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13947, node 3 2025-06-24T20:33:19.871215Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:19.871243Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:19.871256Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:19.871433Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16319 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:20.292479Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:20.306160Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:20.317891Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:20.321508Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:33:20.331931Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-24T20:33:20.525958Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:33:22.995969Z node 3 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:22.996052Z node 3 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:23.579691Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519616733047023794:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:23.579914Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:23.589146Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519616733047023806:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:23.595024Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:23.611778Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519616733047023808:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T20:33:23.692031Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519616733047023862:2355] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:23.979610Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YzYxMWJmYWMtOTU2YWFhOTctN2U0MzVjNzEtYjAxMWE5MGQ=, ActorId: [3:7519616733047023792:2300], ActorState: ExecuteState, TraceId: 01jyhtae3v8g8gkeh54eex760t, Create QueryResponse for error on request, msg: Scheme operations cannot be executed inside transaction 2025-06-24T20:33:26.220643Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519616743686562138:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:26.220708Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:26.497687Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:26.500034Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:26.500112Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:26.502612Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9680, node 4 2025-06-24T20:33:26.723832Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:26.723855Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:26.723863Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:26.723988Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21274 2025-06-24T2 ... 4a80] received request Name# ListDatabases ok# false data# peer# 2025-06-24T20:33:38.854109Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000541580] received request Name# RemoveDatabase ok# false data# peer# 2025-06-24T20:33:38.854366Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053c180] received request Name# DescribeDatabaseOptions ok# false data# peer# 2025-06-24T20:33:38.854519Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053dd80] received request Name# GetScaleRecommendation ok# false data# peer# 2025-06-24T20:33:38.854612Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00052e880] received request Name# ListEndpoints ok# false data# peer# 2025-06-24T20:33:38.854750Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053cf80] received request Name# WhoAmI ok# false data# peer# 2025-06-24T20:33:38.854851Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00052e180] received request Name# NodeRegistration ok# false data# peer# 2025-06-24T20:33:38.854977Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00052ef80] received request Name# Scan ok# false data# peer# 2025-06-24T20:33:38.855096Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0002b2680] received request Name# GetShardLocations ok# false data# peer# 2025-06-24T20:33:38.855242Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00052fd80] received request Name# DescribeTable ok# false data# peer# 2025-06-24T20:33:38.855512Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000530480] received request Name# CreateSnapshot ok# false data# peer# 2025-06-24T20:33:38.855749Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000532780] received request Name# RefreshSnapshot ok# false data# peer# 2025-06-24T20:33:38.856000Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000531280] received request Name# DiscardSnapshot ok# false data# peer# 2025-06-24T20:33:38.856262Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000531980] received request Name# List ok# false data# peer# 2025-06-24T20:33:38.856440Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000532e80] received request Name# RateLimiter/CreateResource ok# false data# peer# 2025-06-24T20:33:38.856515Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000533c80] received request Name# RateLimiter/AlterResource ok# false data# peer# 2025-06-24T20:33:38.856683Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000534380] received request Name# RateLimiter/DropResource ok# false data# peer# 2025-06-24T20:33:38.856758Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000535180] received request Name# RateLimiter/ListResources ok# false data# peer# 2025-06-24T20:33:38.856931Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000535880] received request Name# RateLimiter/DescribeResource ok# false data# peer# 2025-06-24T20:33:38.856987Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000536680] received request Name# RateLimiter/AcquireResource ok# false data# peer# 2025-06-24T20:33:38.857154Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000537b80] received request Name# CreateStream ok# false data# peer# 2025-06-24T20:33:38.857225Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000539080] received request Name# ListStreams ok# false data# peer# 2025-06-24T20:33:38.857415Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000539780] received request Name# DeleteStream ok# false data# peer# 2025-06-24T20:33:38.857723Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000536d80] received request Name# DescribeStream ok# false data# peer# 2025-06-24T20:33:38.857978Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000540780] received request Name# ListShards ok# false data# peer# 2025-06-24T20:33:38.858241Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f6880] received request Name# SetWriteQuota ok# false data# peer# 2025-06-24T20:33:38.858502Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f6f80] received request Name# UpdateStream ok# false data# peer# 2025-06-24T20:33:38.858529Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000539e80] received request Name# PutRecord ok# false data# peer# 2025-06-24T20:33:38.858743Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053a580] received request Name# PutRecords ok# false data# peer# 2025-06-24T20:33:38.858774Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053c880] received request Name# GetRecords ok# false data# peer# 2025-06-24T20:33:38.858988Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053d680] received request Name# GetShardIterator ok# false data# peer# 2025-06-24T20:33:38.859025Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053e480] received request Name# SubscribeToShard ok# false data# peer# 2025-06-24T20:33:38.859249Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000540e80] received request Name# DescribeLimits ok# false data# peer# 2025-06-24T20:33:38.859305Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004fa780] received request Name# DescribeStreamSummary ok# false data# peer# 2025-06-24T20:33:38.859477Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004fae80] received request Name# DecreaseStreamRetentionPeriod ok# false data# peer# 2025-06-24T20:33:38.859552Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004fa080] received request Name# IncreaseStreamRetentionPeriod ok# false data# peer# 2025-06-24T20:33:38.859736Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f9980] received request Name# UpdateShardCount ok# false data# peer# 2025-06-24T20:33:38.859785Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f9280] received request Name# UpdateStreamMode ok# false data# peer# 2025-06-24T20:33:38.859977Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f8b80] received request Name# RegisterStreamConsumer ok# false data# peer# 2025-06-24T20:33:38.860040Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00050aa80] received request Name# DeregisterStreamConsumer ok# false data# peer# 2025-06-24T20:33:38.860238Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000509c80] received request Name# DescribeStreamConsumer ok# false data# peer# 2025-06-24T20:33:38.860289Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000508e80] received request Name# ListStreamConsumers ok# false data# peer# 2025-06-24T20:33:38.860498Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000508780] received request Name# AddTagsToStream ok# false data# peer# 2025-06-24T20:33:38.860526Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000507980] received request Name# DisableEnhancedMonitoring ok# false data# peer# 2025-06-24T20:33:38.860752Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000506b80] received request Name# EnableEnhancedMonitoring ok# false data# peer# 2025-06-24T20:33:38.860769Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000519f80] received request Name# ListTagsForStream ok# false data# peer# 2025-06-24T20:33:38.860999Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000504880] received request Name# MergeShards ok# false data# peer# 2025-06-24T20:33:38.861130Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000504180] received request Name# RemoveTagsFromStream ok# false data# peer# 2025-06-24T20:33:38.861243Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f8480] received request Name# SplitShard ok# false data# peer# 2025-06-24T20:33:38.861359Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f7d80] received request Name# StartStreamEncryption ok# false data# peer# 2025-06-24T20:33:38.861505Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f7680] received request Name# StopStreamEncryption ok# false data# peer# 2025-06-24T20:33:38.861621Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f6180] received request Name# SelfCheck ok# false data# peer# 2025-06-24T20:33:38.861757Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f5a80] received request Name# NodeCheck ok# false data# peer# 2025-06-24T20:33:38.861859Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f3780] received request Name# CreateSession ok# false data# peer# 2025-06-24T20:33:38.861987Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f3080] received request Name# DeleteSession ok# false data# peer# 2025-06-24T20:33:38.862095Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f2980] received request Name# AttachSession ok# false data# peer# 2025-06-24T20:33:38.862265Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f1b80] received request Name# BeginTransaction ok# false data# peer# 2025-06-24T20:33:38.862397Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f1480] received request Name# CommitTransaction ok# false data# peer# 2025-06-24T20:33:38.862567Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f0d80] received request Name# RollbackTransaction ok# false data# peer# 2025-06-24T20:33:38.862641Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f5380] received request Name# ExecuteQuery ok# false data# peer# 2025-06-24T20:33:38.862815Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f4580] received request Name# ExecuteScript ok# false data# peer# 2025-06-24T20:33:38.862865Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f3e80] received request Name# FetchScriptResults ok# false data# peer# 2025-06-24T20:33:38.863081Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004f0680] received request Name# ExecuteTabletMiniKQL ok# false data# peer# 2025-06-24T20:33:38.863128Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004eff80] received request Name# ChangeTabletSchema ok# false data# peer# 2025-06-24T20:33:38.863415Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004ef880] received request Name# RestartTablet ok# false data# peer# 2025-06-24T20:33:38.863680Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004ef180] received request Name# CreateLogStore ok# false data# peer# 2025-06-24T20:33:38.863941Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004eea80] received request Name# DescribeLogStore ok# false data# peer# 2025-06-24T20:33:38.864213Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004ee380] received request Name# DropLogStore ok# false data# peer# 2025-06-24T20:33:38.864519Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004edc80] received request Name# AlterLogStore ok# false data# peer# 2025-06-24T20:33:38.864803Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004ed580] received request Name# CreateLogTable ok# false data# peer# 2025-06-24T20:33:38.864974Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004ece80] received request Name# DescribeLogTable ok# false data# peer# 2025-06-24T20:33:38.865073Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004ec780] received request Name# DropLogTable ok# false data# peer# 2025-06-24T20:33:38.865232Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004ec080] received request Name# AlterLogTable ok# false data# peer# 2025-06-24T20:33:38.865337Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004eb980] received request Name# Login ok# false data# peer# 2025-06-24T20:33:38.865514Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004db680] received request Name# DescribeReplication ok# false data# peer# 2025-06-24T20:33:38.865631Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004daf80] received request Name# DescribeView ok# false data# peer# >> Cdc::AwsRegion [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectCount >> GenericFederatedQuery::IcebergHadoopTokenSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectCount >> ResourcePoolsDdl::TestWorkloadConfigOnServerless [GOOD] >> ResourcePoolsSysView::TestResourcePoolsSysViewOnServerless >> GenericFederatedQuery::IcebergHiveTokenSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectCount >> Viewer::ServerlessNodesPage [GOOD] >> Viewer::ServerlessWithExclusiveNodes >> TPartitionTests::CorrectRange_Commit >> THiveTest::TestHiveBalancerWithFollowers [GOOD] >> THiveTest::TestHiveBalancerWithLimit >> KqpWorkloadService::TestLargeConcurrentQueryLimit [GOOD] >> KqpWorkloadService::TestLessConcurrentQueryLimit >> GenericFederatedQuery::PostgreSQLOnPremSelectConstant [GOOD] >> GenericFederatedQuery::PostgreSQLSelectCount ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::AwsRegion [GOOD] Test command err: 2025-06-24T20:30:34.764486Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:34.764789Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:30:34.764944Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004761/r3tmp/tmpAgBAl6/pdisk_1.dat 2025-06-24T20:30:35.184048Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:30:35.189197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:35.250871Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:35.257105Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797030945283 != 1750797030945287 2025-06-24T20:30:35.308691Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:35.308858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:35.321025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:35.410192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:35.455327Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:30:35.455588Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:30:35.495779Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:30:35.495904Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:30:35.497633Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:30:35.497718Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:30:35.497765Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:30:35.498123Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:30:35.498272Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:30:35.498371Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:30:35.509296Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:30:35.555354Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:30:35.555664Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:30:35.555830Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:30:35.555890Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:30:35.555941Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:30:35.555992Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:35.556609Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:30:35.556731Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:30:35.556843Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:35.556895Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:30:35.556942Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:30:35.557000Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:35.557166Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:30:35.557782Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:30:35.558073Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:30:35.558185Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:30:35.560393Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:30:35.571388Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:30:35.571564Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T20:30:35.732373Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T20:30:35.741232Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T20:30:35.741355Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:35.742627Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:35.742702Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:30:35.742794Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T20:30:35.743092Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T20:30:35.745981Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:30:35.746583Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:35.746682Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T20:30:35.763356Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T20:30:35.763986Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:30:35.767139Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T20:30:35.767232Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:35.768418Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T20:30:35.768523Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:35.772319Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:35.772400Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:30:35.772485Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T20:30:35.772563Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:30:35.772648Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T20:30:35.772772Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:35.774391Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:643:2539][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-24T20:30:35.779796Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:30:35.782616Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T20:30:35.782708Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T20:30:35.783924Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T20:30:41.173234Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:30:41.173826Z node 2 :METADATA ... 037891' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' size 426 >>>>> GetRecords path=/Root/Table/Stream1 partitionId=0 2025-06-24T20:33:42.240284Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-24T20:33:42.240421Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream1/streamImpl' partition 0 2025-06-24T20:33:42.241652Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 0 max time lag 0ms effective offset 0 2025-06-24T20:33:42.241778Z node 23 :PERSQUEUE DEBUG: subscriber.cpp:68: waiting read cookie 2 partition 0 user $without_consumer offset 0 count 10000 size 26214400 timeout 0 2025-06-24T20:33:42.241983Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 0 blobs 2025-06-24T20:33:42.242136Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:678: [PQ: 72075186224037889, Partition: 0, State: StateIdle] waiting read cookie 2 partition 0 read timeout for $without_consumer offset 0 2025-06-24T20:33:42.242303Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T20:33:42.255731Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037891, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 341 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:33:42.255974Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037891, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:42.256202Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037891, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream2/streamImpl', Partition: 0, SeqNo: 2, partNo: 0, Offset: 0 is stored on disk 2025-06-24T20:33:42.256835Z node 23 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037891, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=426, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:33:42.257002Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 342 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:33:42.257065Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:42.257132Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream1/streamImpl', Partition: 0, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-24T20:33:42.257366Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 1 2025-06-24T20:33:42.257539Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 1 2025-06-24T20:33:42.257666Z node 23 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037889, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=427, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:33:42.257803Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-24T20:33:42.258078Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-24T20:33:42.258247Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 3 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-06-24T20:33:42.258897Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 3 added 1 blobs, size 427 count 1 last offset 0, current partition end offset: 1 2025-06-24T20:33:42.259038Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 3. Send blob request. 2025-06-24T20:33:42.262088Z node 23 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][23:1124:2664] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 1 Offset: 0 WriteTimestampMS: 2501 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 1 } } } 2025-06-24T20:33:42.262356Z node 23 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037891][23:1125:2763] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 2 Offset: 0 WriteTimestampMS: 2501 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 1 } } } 2025-06-24T20:33:42.262492Z node 23 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 427 accessed 0 times before, last time 1970-01-01T00:00:02.000000Z 2025-06-24T20:33:42.262644Z node 23 :PERSQUEUE DEBUG: read.h:121: Reading cookie 3. All 1 blobs are from cache. 2025-06-24T20:33:42.262826Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-24T20:33:42.263267Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 407 from pos 0 cbcount 1 2025-06-24T20:33:42.263603Z node 23 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][23:827:2664] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-24T20:33:42.263751Z node 23 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][23:982:2763] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-24T20:33:42.263926Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer readTimeStamp done, result 2501 queuesize 0 startOffset 0 2025-06-24T20:33:42.264213Z node 23 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:33:42.264486Z node 23 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 2, at tablet# 72075186224037888 2025-06-24T20:33:42.264586Z node 23 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 1, at tablet: 72075186224037888 2025-06-24T20:33:42.264821Z node 23 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 2, at tablet: 72075186224037888 2025-06-24T20:33:42.276303Z node 23 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 2, left# 0, at tablet# 72075186224037888 >>>>> GetRecords path=/Root/Table/Stream1 partitionId=0 2025-06-24T20:33:42.566190Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-24T20:33:42.566271Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream1/streamImpl' partition 0 2025-06-24T20:33:42.566439Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 4 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 1 max time lag 0ms effective offset 0 2025-06-24T20:33:42.567114Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 4 added 1 blobs, size 427 count 1 last offset 0, current partition end offset: 1 2025-06-24T20:33:42.567291Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 4. Send blob request. 2025-06-24T20:33:42.567526Z node 23 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 427 accessed 1 times before, last time 1970-01-01T00:00:02.000000Z 2025-06-24T20:33:42.567662Z node 23 :PERSQUEUE DEBUG: read.h:121: Reading cookie 4. All 1 blobs are from cache. 2025-06-24T20:33:42.567815Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-24T20:33:42.568216Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 407 from pos 0 cbcount 1 2025-06-24T20:33:42.569382Z node 23 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:33:42.569612Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >>>>> GetRecords path=/Root/Table/Stream2 partitionId=0 2025-06-24T20:33:42.571863Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-24T20:33:42.572014Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037891] got client message batch for topic 'Table/Stream2/streamImpl' partition 0 2025-06-24T20:33:42.573610Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037891, Partition: 0, State: StateIdle] read cookie 2 Topic 'Table/Stream2/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 1 max time lag 0ms effective offset 0 2025-06-24T20:33:42.574369Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037891, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 426 count 1 last offset 0, current partition end offset: 1 2025-06-24T20:33:42.574519Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037891, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-24T20:33:42.574736Z node 23 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 426 accessed 0 times before, last time 1970-01-01T00:00:02.000000Z 2025-06-24T20:33:42.574854Z node 23 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-24T20:33:42.575011Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-24T20:33:42.575420Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 406 from pos 0 cbcount 1 2025-06-24T20:33:42.576336Z node 23 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037891' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:33:42.576564Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >> THiveTest::TestLockTabletExecution [GOOD] >> THiveTest::TestLockTabletExecutionBadOwner >> TPartitionTests::CorrectRange_Commit [GOOD] >> GenericFederatedQuery::ClickHouseManagedSelectConstant [GOOD] >> GenericFederatedQuery::ClickHouseSelectCount >> TPartitionTests::CorrectRange_Multiple_Consumers >> Cdc::ShouldBreakLocksOnConcurrentAlterStream [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentDropStream >> Viewer::QueryExecuteScript [FAIL] >> Viewer::Plan2SvgOK >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifierOnServerless [GOOD] >> ResourcePoolClassifiersDdl::TestAlterResourcePoolClassifier >> KqpWorkloadServiceActors::TestCreateDefaultPool [GOOD] >> KqpWorkloadServiceActors::TestCpuLoadActor >> TPartitionTests::CorrectRange_Multiple_Consumers [GOOD] >> TPartitionTests::ConflictingTxIsAborted >> GenericFederatedQuery::YdbManagedSelectConstant [GOOD] >> GenericFederatedQuery::YdbSelectCount >> TPartitionTests::ConflictingTxIsAborted [GOOD] >> TPartitionTests::ConflictingTxProceedAfterRollback >> KqpWorkloadServiceTables::TestPoolStateFetcherActor [GOOD] >> KqpWorkloadServiceTables::TestCleanupOnServiceRestart >> Cdc::SequentialSplitMerge [GOOD] >> Cdc::MustNotLoseSchemaSnapshot >> TPQTest::TestWaitInOwners >> Cdc::InitialScanAndLimits [GOOD] >> Cdc::InitialScanComplete >> GenericFederatedQuery::YdbFilterPushdown [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId1 >> VDiskTest::HugeBlobWrite [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectCount ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> VDiskTest::HugeBlobWrite [GOOD] Test command err: Put id# [29:1:1:0:0:1048576:1] totalSize# 0 blobValueIndex# 45 Trim Put id# [25:1:1:0:0:1572864:1] totalSize# 1048576 blobValueIndex# 56 Put id# [8:1:1:0:0:40960:1] totalSize# 2621440 blobValueIndex# 20 Put id# [70:1:1:0:0:589824:1] totalSize# 2662400 blobValueIndex# 30 Change MinHugeBlobSize# 8192 Put id# [84:1:1:0:0:10:1] totalSize# 3252224 blobValueIndex# 7 Put id# [68:1:1:0:0:1048576:1] totalSize# 3252234 blobValueIndex# 47 Put id# [40:1:1:0:0:589824:1] totalSize# 4300810 blobValueIndex# 37 Put id# [31:1:1:0:0:10:1] totalSize# 4890634 blobValueIndex# 3 Put id# [38:1:1:0:0:10:1] totalSize# 4890644 blobValueIndex# 8 Put id# [5:1:1:0:0:1572864:1] totalSize# 4890654 blobValueIndex# 54 Put id# [30:1:1:0:0:1048576:1] totalSize# 6463518 blobValueIndex# 40 Put id# [29:1:2:0:0:1048576:1] totalSize# 7512094 blobValueIndex# 44 Put id# [100:1:1:0:0:40960:1] totalSize# 8560670 blobValueIndex# 26 Change MinHugeBlobSize# 524288 Restart Put id# [14:1:1:0:0:40960:1] totalSize# 8601630 blobValueIndex# 29 Change MinHugeBlobSize# 8192 Trim Put id# [23:1:1:0:0:1572864:1] totalSize# 8642590 blobValueIndex# 52 Put id# [36:1:1:0:0:1572864:1] totalSize# 10215454 blobValueIndex# 59 Trim Put id# [14:1:2:0:0:589824:1] totalSize# 11788318 blobValueIndex# 37 Change MinHugeBlobSize# 61440 Put id# [18:1:1:0:0:40960:1] totalSize# 12378142 blobValueIndex# 25 Trim Put id# [61:1:1:0:0:10:1] totalSize# 12419102 blobValueIndex# 0 Trim Put id# [89:1:1:0:0:1572864:1] totalSize# 12419112 blobValueIndex# 51 Put id# [5:1:2:0:0:40960:1] totalSize# 13991976 blobValueIndex# 20 Change MinHugeBlobSize# 65536 Put id# [81:1:1:0:0:1048576:1] totalSize# 14032936 blobValueIndex# 41 Change MinHugeBlobSize# 61440 Put id# [68:1:2:0:0:10:1] totalSize# 15081512 blobValueIndex# 2 Put id# [79:1:1:0:0:40960:1] totalSize# 15081522 blobValueIndex# 29 Trim Put id# [18:1:2:0:0:40960:1] totalSize# 15122482 blobValueIndex# 27 Trim Put id# [9:1:1:0:0:1572864:1] totalSize# 15163442 blobValueIndex# 51 Put id# [90:1:1:0:0:40960:1] totalSize# 16736306 blobValueIndex# 23 Put id# [18:1:3:0:0:1572864:1] totalSize# 16777266 blobValueIndex# 59 Put id# [31:1:2:0:0:1024:1] totalSize# 18350130 blobValueIndex# 15 Put id# [98:1:1:0:0:1024:1] totalSize# 18351154 blobValueIndex# 11 Change MinHugeBlobSize# 524288 Put id# [79:1:2:0:0:1048576:1] totalSize# 18352178 blobValueIndex# 46 Put id# [15:1:1:0:0:10:1] totalSize# 19400754 blobValueIndex# 5 Put id# [37:1:1:0:0:1048576:1] totalSize# 19400764 blobValueIndex# 40 Change MinHugeBlobSize# 65536 Put id# [27:1:1:0:0:1048576:1] totalSize# 20449340 blobValueIndex# 47 Put id# [84:1:2:0:0:1572864:1] totalSize# 21497916 blobValueIndex# 52 Put id# [56:1:1:0:0:1024:1] totalSize# 23070780 blobValueIndex# 15 Restart Put id# [25:1:2:0:0:1048576:1] totalSize# 23071804 blobValueIndex# 49 Put id# [65:1:1:0:0:40960:1] totalSize# 24120380 blobValueIndex# 25 Put id# [68:1:3:0:0:10:1] totalSize# 24161340 blobValueIndex# 6 Put id# [2:1:1:0:0:1048576:1] totalSize# 24161350 blobValueIndex# 45 Put id# [76:1:1:0:0:589824:1] totalSize# 25209926 blobValueIndex# 36 Put id# [23:1:2:0:0:1024:1] totalSize# 25799750 blobValueIndex# 14 Trim Put id# [20:1:1:0:0:1024:1] totalSize# 25800774 blobValueIndex# 18 Put id# [17:1:1:0:0:1024:1] totalSize# 25801798 blobValueIndex# 10 Trim Put id# [59:1:1:0:0:1048576:1] totalSize# 25802822 blobValueIndex# 41 Put id# [47:1:1:0:0:589824:1] totalSize# 26851398 blobValueIndex# 34 Change MinHugeBlobSize# 12288 Put id# [99:1:1:0:0:10:1] totalSize# 27441222 blobValueIndex# 7 Trim Put id# [61:1:2:0:0:1048576:1] totalSize# 27441232 blobValueIndex# 49 Change MinHugeBlobSize# 65536 Put id# [89:1:2:0:0:1048576:1] totalSize# 28489808 blobValueIndex# 44 Put id# [82:1:1:0:0:1024:1] totalSize# 29538384 blobValueIndex# 11 Put id# [2:1:2:0:0:589824:1] totalSize# 29539408 blobValueIndex# 30 Put id# [62:1:1:0:0:40960:1] totalSize# 30129232 blobValueIndex# 25 Restart Put id# [45:1:1:0:0:40960:1] totalSize# 30170192 blobValueIndex# 28 Trim Put id# [47:1:2:0:0:1572864:1] totalSize# 30211152 blobValueIndex# 53 Put id# [93:1:1:0:0:589824:1] totalSize# 31784016 blobValueIndex# 32 Put id# [4:1:1:0:0:1572864:1] totalSize# 32373840 blobValueIndex# 55 Change MinHugeBlobSize# 12288 Put id# [19:1:1:0:0:589824:1] totalSize# 33946704 blobValueIndex# 32 Change MinHugeBlobSize# 8192 Put id# [28:1:1:0:0:1572864:1] totalSize# 34536528 blobValueIndex# 58 Put id# [47:1:3:0:0:1048576:1] totalSize# 36109392 blobValueIndex# 42 Put id# [64:1:1:0:0:1024:1] totalSize# 37157968 blobValueIndex# 16 Trim Put id# [15:1:2:0:0:1572864:1] totalSize# 37158992 blobValueIndex# 52 Put id# [60:1:1:0:0:1048576:1] totalSize# 38731856 blobValueIndex# 40 Put id# [89:1:3:0:0:1572864:1] totalSize# 39780432 blobValueIndex# 58 Put id# [24:1:1:0:0:10:1] totalSize# 41353296 blobValueIndex# 0 Put id# [28:1:2:0:0:10:1] totalSize# 41353306 blobValueIndex# 9 Put id# [96:1:1:0:0:40960:1] totalSize# 41353316 blobValueIndex# 24 Put id# [37:1:2:0:0:1572864:1] totalSize# 41394276 blobValueIndex# 51 Put id# [92:1:1:0:0:1024:1] totalSize# 42967140 blobValueIndex# 15 Put id# [92:1:2:0:0:1572864:1] totalSize# 42968164 blobValueIndex# 56 Put id# [32:1:1:0:0:1048576:1] totalSize# 44541028 blobValueIndex# 48 Put id# [75:1:1:0:0:1024:1] totalSize# 45589604 blobValueIndex# 15 Put id# [62:1:2:0:0:589824:1] totalSize# 45590628 blobValueIndex# 31 Put id# [82:1:2:0:0:1024:1] totalSize# 46180452 blobValueIndex# 15 Put id# [52:1:1:0:0:1024:1] totalSize# 46181476 blobValueIndex# 18 Put id# [83:1:1:0:0:589824:1] totalSize# 46182500 blobValueIndex# 34 Put id# [51:1:1:0:0:10:1] totalSize# 46772324 blobValueIndex# 2 Put id# [37:1:3:0:0:10:1] totalSize# 46772334 blobValueIndex# 7 Trim Put id# [16:1:1:0:0:10:1] totalSize# 46772344 blobValueIndex# 9 Put id# [34:1:1:0:0:1572864:1] totalSize# 46772354 blobValueIndex# 55 Change MinHugeBlobSize# 12288 Put id# [44:1:1:0:0:589824:1] totalSize# 48345218 blobValueIndex# 36 Restart Put id# [80:1:1:0:0:10:1] totalSize# 48935042 blobValueIndex# 7 Put id# [13:1:1:0:0:1572864:1] totalSize# 48935052 blobValueIndex# 52 Put id# [88:1:1:0:0:40960:1] totalSize# 50507916 blobValueIndex# 21 Trim Put id# [89:1:4:0:0:1572864:1] totalSize# 50548876 blobValueIndex# 50 Put id# [66:1:1:0:0:10:1] totalSize# 52121740 blobValueIndex# 3 Trim Put id# [100:1:2:0:0:40960:1] totalSize# 52121750 blobValueIndex# 23 Change MinHugeBlobSize# 524288 Put id# [75:1:2:0:0:1024:1] totalSize# 52162710 blobValueIndex# 11 Put id# [57:1:1:0:0:1024:1] totalSize# 52163734 blobValueIndex# 16 Change MinHugeBlobSize# 65536 Put id# [53:1:1:0:0:1572864:1] totalSize# 52164758 blobValueIndex# 58 Put id# [62:1:3:0:0:1048576:1] totalSize# 53737622 blobValueIndex# 42 Put id# [72:1:1:0:0:589824:1] totalSize# 54786198 blobValueIndex# 39 Put id# [41:1:1:0:0:1048576:1] totalSize# 55376022 blobValueIndex# 42 Put id# [89:1:5:0:0:1048576:1] totalSize# 56424598 blobValueIndex# 48 Put id# [72:1:2:0:0:589824:1] totalSize# 57473174 blobValueIndex# 39 Put id# [17:1:2:0:0:1572864:1] totalSize# 58062998 blobValueIndex# 51 Put id# [83:1:2:0:0:589824:1] totalSize# 59635862 blobValueIndex# 31 Put id# [55:1:1:0:0:589824:1] totalSize# 60225686 blobValueIndex# 32 Change MinHugeBlobSize# 61440 Put id# [91:1:1:0:0:1048576:1] totalSize# 60815510 blobValueIndex# 46 Put id# [34:1:2:0:0:1048576:1] totalSize# 61864086 blobValueIndex# 45 Put id# [64:1:2:0:0:1572864:1] totalSize# 62912662 blobValueIndex# 55 Put id# [31:1:3:0:0:1024:1] totalSize# 64485526 blobValueIndex# 15 Change MinHugeBlobSize# 12288 Put id# [59:1:2:0:0:1048576:1] totalSize# 64486550 blobValueIndex# 49 Trim Put id# [89:1:6:0:0:1024:1] totalSize# 65535126 blobValueIndex# 18 Put id# [49:1:1:0:0:40960:1] totalSize# 65536150 blobValueIndex# 21 Put id# [84:1:3:0:0:10:1] totalSize# 65577110 blobValueIndex# 4 Put id# [52:1:2:0:0:40960:1] totalSize# 65577120 blobValueIndex# 29 Trim Put id# [65:1:2:0:0:1024:1] totalSize# 65618080 blobValueIndex# 15 Trim Put id# [62:1:4:0:0:40960:1] totalSize# 65619104 blobValueIndex# 21 Trim Put id# [24:1:2:0:0:10:1] totalSize# 65660064 blobValueIndex# 4 Trim Put id# [99:1:2:0:0:40960:1] totalSize# 65660074 blobValueIndex# 24 Put id# [96:1:2:0:0:589824:1] totalSize# 65701034 blobValueIndex# 32 Put id# [45:1:2:0:0:589824:1] totalSize# 66290858 blobValueIndex# 36 Put id# [62:1:5:0:0:1048576:1] totalSize# 66880682 blobValueIndex# 45 Put id# [47:1:4:0:0:10:1] totalSize# 67929258 blobValueIndex# 7 Put id# [16:1:2:0:0:40960:1] totalSize# 67929268 blobValueIndex# 25 Trim Put id# [6:1:1:0:0:1048576:1] totalSize# 67970228 blobValueIndex# 49 Put id# [33:1:1:0:0:1024:1] totalSize# 69018804 blobValueIndex# 10 Put id# [11:1:1:0:0:1572864:1] totalSize# 69019828 blobValueIndex# 53 Put id# [43:1:1:0:0:589824:1] totalSize# 70592692 blobValueIndex# 30 Put id# [76:1:2:0:0:40960:1] totalSize# 71182516 blobValueIndex# 28 Put id# [56:1:2:0:0:589824:1] totalSize# 71223476 blobValueIndex# 33 Change MinHugeBlobSize# 65536 Put id# [7:1:1:0:0:10:1] totalSize# 71813300 blobValueIndex# 0 Trim Put id# [52:1:3:0:0:1048576:1] totalSize# 71813310 blobValueIndex# 41 Put id# [1:1:1:0:0:589824:1] totalSize# 72861886 blobValueIndex# 34 Put id# [3:1:1:0:0:1024:1] totalSize# 73451710 blobValueIndex# 16 Put id# [39:1:1:0:0:40960:1] totalSize# 73452734 blobValueIndex# 22 Put id# [100:1:3:0:0:1572864:1] totalSize# 73493694 blobValueIndex# 53 Put id# [17:1:3:0:0:10:1] totalSize# 75066558 blobValueIndex# 0 Put id# [2:1:3:0:0:1048576:1] totalSize# 75066568 blobValueIndex# 47 Put id# [34:1:3:0:0:1048576:1] totalSize# 76115144 blobValueIndex# 41 Change MinHugeBlobSize# 8192 Put id# [23:1:3:0:0:1572864:1] totalSize# 77163720 blobValueIndex# 58 Put id# [44:1:2:0:0:589824:1] totalSize# 78736584 blobValueIndex# 31 Change MinHugeBlobSize# 61440 Trim Put id# [31:1:4:0:0:40960:1] totalSize# 79326408 blobValueIndex# 23 Put id# [22:1:1:0:0:40960:1] totalSize# 79367368 blobValueIndex# 20 Put id# [83:1:3:0:0:10:1] totalSize# 79408328 blobValueIndex# 2 Trim Put id# [90:1:2:0:0:10:1] totalSize# 79408338 blobValueIndex# 7 Trim Restart Put id# [77:1:1:0:0:1572864:1] totalSize# 79408348 blobValueIndex# 58 Put id# [9:1:2:0:0:40960:1] totalSize# 80981212 blobValueIndex# 21 Put id# [79:1:3:0:0:1572864:1] totalSize# 81022172 blobValueIndex# 50 Change MinHugeBlobSize# 524288 Put id# [49:1:2:0:0:10:1] totalSize# 82595036 blobValueIndex# 8 Put id# [74:1:1:0:0:1048576:1] totalSize# 82595046 blobValueIndex# 42 Restart Put id# [90:1:3:0:0:1572864:1] totalSize# 83643622 blobValueIndex# 58 Put id# [56:1:3:0:0:1024:1] totalSize# 85216486 blobValueIndex# 18 Put id# [86:1:1:0:0:1048576:1] totalSize# 85217510 blobValueIndex# 40 Put id# [30:1:2:0:0:40960:1] totalSize# 86266086 blobValueIndex# 27 Put id# [35:1:1:0:0:10:1] totalSize# 86307046 blobValueIndex# 7 Put id# [46:1:1:0:0:40960:1] totalSize# 86307056 blobValueIndex# 25 Put id# [87:1:1:0:0:40960:1] totalSize# 86348016 blobValueIndex# 29 Trim Put id# [42:1:1:0:0:1572864:1] totalSize# 86388976 blobValueIndex# 56 Trim Put id# [3:1:2:0:0:1024:1] totalSize# 87961840 blobValueIndex# 18 Put id# [28:1:3:0:0:1572864:1] totalSize# 87962864 blobValueIndex# 59 Trim Put id# [73:1:1:0:0:1024:1] totalSize# 89535728 blobValueIndex# 19 Put id# [95:1:1:0:0:1572864:1] totalSize# 89536752 blobValueIndex# 55 Put id# [94:1:1:0:0:1572864:1] totalSize# 91109616 blobValueIndex# 57 Put id# [79:1:4:0:0:10:1] totalSize# 92682480 blobValueIndex# 1 Put id# [66:1:2:0:0:1048576:1] totalSize# 92682490 blobValueIndex# 47 Restart Put id# [59:1:3:0:0:40960:1] totalSize# 93731066 blobValueIndex# 25 Put id# [30:1:3:0:0:1024:1] totalSize# 93772026 blobValueIndex# 19 Put id# [72:1:3:0:0:1572864:1] totalSize# 93773050 blobValueIndex# 56 Put id# [24:1:3:0:0:1048576:1] totalSize# 95345914 blobValueIndex# 47 Restart Put id# [84:1:4:0:0:1024:1] totalSize# 96394490 blobValueIndex# 13 Put id# [6:1:2:0:0:1048576:1] totalSize# 96395514 blobValueIndex# 41 Put id# [58:1:1:0:0:10:1] totalSize# 97444090 blobValueIndex# 0 Put id# [30:1:4:0:0:1024:1] totalSize# 97444100 blobValueIndex# 10 Change MinHugeBlobSize# 819 ... obValueIndex# 0 Put id# [36:1:19:0:0:40960:1] totalSize# 1021470898 blobValueIndex# 21 Put id# [44:1:16:0:0:1024:1] totalSize# 1021511858 blobValueIndex# 14 Put id# [80:1:19:0:0:1048576:1] totalSize# 1021512882 blobValueIndex# 48 Put id# [92:1:16:0:0:1572864:1] totalSize# 1022561458 blobValueIndex# 52 Trim Put id# [100:1:17:0:0:40960:1] totalSize# 1024134322 blobValueIndex# 27 Trim Put id# [27:1:20:0:0:1048576:1] totalSize# 1024175282 blobValueIndex# 49 Put id# [12:1:16:0:0:589824:1] totalSize# 1025223858 blobValueIndex# 30 Put id# [98:1:15:0:0:1572864:1] totalSize# 1025813682 blobValueIndex# 55 Put id# [11:1:23:0:0:1024:1] totalSize# 1027386546 blobValueIndex# 18 Put id# [89:1:28:0:0:10:1] totalSize# 1027387570 blobValueIndex# 5 Put id# [46:1:25:0:0:10:1] totalSize# 1027387580 blobValueIndex# 0 Put id# [95:1:20:0:0:1048576:1] totalSize# 1027387590 blobValueIndex# 46 Put id# [41:1:19:0:0:40960:1] totalSize# 1028436166 blobValueIndex# 28 Restart Put id# [77:1:21:0:0:1024:1] totalSize# 1028477126 blobValueIndex# 17 Put id# [31:1:21:0:0:10:1] totalSize# 1028478150 blobValueIndex# 5 Put id# [9:1:25:0:0:40960:1] totalSize# 1028478160 blobValueIndex# 21 Put id# [24:1:23:0:0:10:1] totalSize# 1028519120 blobValueIndex# 5 Put id# [70:1:13:0:0:1048576:1] totalSize# 1028519130 blobValueIndex# 49 Trim Put id# [27:1:21:0:0:1024:1] totalSize# 1029567706 blobValueIndex# 18 Change MinHugeBlobSize# 65536 Put id# [88:1:22:0:0:1024:1] totalSize# 1029568730 blobValueIndex# 13 Put id# [54:1:25:0:0:589824:1] totalSize# 1029569754 blobValueIndex# 38 Put id# [23:1:32:0:0:1572864:1] totalSize# 1030159578 blobValueIndex# 52 Put id# [7:1:15:0:0:589824:1] totalSize# 1031732442 blobValueIndex# 32 Put id# [52:1:20:0:0:40960:1] totalSize# 1032322266 blobValueIndex# 27 Put id# [73:1:13:0:0:1572864:1] totalSize# 1032363226 blobValueIndex# 58 Trim Put id# [78:1:24:0:0:40960:1] totalSize# 1033936090 blobValueIndex# 21 Put id# [16:1:17:0:0:1024:1] totalSize# 1033977050 blobValueIndex# 14 Put id# [19:1:21:0:0:1572864:1] totalSize# 1033978074 blobValueIndex# 54 Put id# [16:1:18:0:0:1024:1] totalSize# 1035550938 blobValueIndex# 14 Put id# [99:1:18:0:0:1048576:1] totalSize# 1035551962 blobValueIndex# 40 Restart Put id# [17:1:19:0:0:40960:1] totalSize# 1036600538 blobValueIndex# 22 Trim Put id# [5:1:19:0:0:40960:1] totalSize# 1036641498 blobValueIndex# 20 Put id# [48:1:22:0:0:40960:1] totalSize# 1036682458 blobValueIndex# 25 Put id# [34:1:20:0:0:10:1] totalSize# 1036723418 blobValueIndex# 2 Put id# [34:1:21:0:0:10:1] totalSize# 1036723428 blobValueIndex# 1 Put id# [98:1:16:0:0:40960:1] totalSize# 1036723438 blobValueIndex# 24 Put id# [53:1:28:0:0:589824:1] totalSize# 1036764398 blobValueIndex# 31 Put id# [7:1:16:0:0:589824:1] totalSize# 1037354222 blobValueIndex# 33 Put id# [40:1:19:0:0:1048576:1] totalSize# 1037944046 blobValueIndex# 44 Put id# [1:1:26:0:0:1572864:1] totalSize# 1038992622 blobValueIndex# 57 Trim Put id# [1:1:27:0:0:40960:1] totalSize# 1040565486 blobValueIndex# 22 Put id# [41:1:20:0:0:589824:1] totalSize# 1040606446 blobValueIndex# 32 Put id# [30:1:22:0:0:40960:1] totalSize# 1041196270 blobValueIndex# 21 Trim Put id# [2:1:27:0:0:10:1] totalSize# 1041237230 blobValueIndex# 7 Trim Put id# [15:1:13:0:0:1048576:1] totalSize# 1041237240 blobValueIndex# 44 Change MinHugeBlobSize# 61440 Put id# [35:1:26:0:0:1024:1] totalSize# 1042285816 blobValueIndex# 11 Put id# [88:1:23:0:0:10:1] totalSize# 1042286840 blobValueIndex# 0 Put id# [79:1:21:0:0:40960:1] totalSize# 1042286850 blobValueIndex# 29 Put id# [4:1:22:0:0:10:1] totalSize# 1042327810 blobValueIndex# 7 Put id# [64:1:28:0:0:1024:1] totalSize# 1042327820 blobValueIndex# 14 Put id# [86:1:12:0:0:589824:1] totalSize# 1042328844 blobValueIndex# 37 Put id# [74:1:20:0:0:1048576:1] totalSize# 1042918668 blobValueIndex# 43 Put id# [55:1:27:0:0:589824:1] totalSize# 1043967244 blobValueIndex# 37 Put id# [46:1:26:0:0:589824:1] totalSize# 1044557068 blobValueIndex# 37 Put id# [24:1:24:0:0:40960:1] totalSize# 1045146892 blobValueIndex# 23 Put id# [5:1:20:0:0:589824:1] totalSize# 1045187852 blobValueIndex# 37 Put id# [63:1:15:0:0:40960:1] totalSize# 1045777676 blobValueIndex# 29 Change MinHugeBlobSize# 65536 Put id# [5:1:21:0:0:1572864:1] totalSize# 1045818636 blobValueIndex# 58 Put id# [76:1:18:0:0:1572864:1] totalSize# 1047391500 blobValueIndex# 50 Put id# [65:1:17:0:0:1572864:1] totalSize# 1048964364 blobValueIndex# 55 Put id# [61:1:25:0:0:1024:1] totalSize# 1050537228 blobValueIndex# 15 Change MinHugeBlobSize# 12288 Trim Put id# [75:1:17:0:0:10:1] totalSize# 1050538252 blobValueIndex# 6 Put id# [41:1:21:0:0:40960:1] totalSize# 1050538262 blobValueIndex# 21 Put id# [88:1:24:0:0:1572864:1] totalSize# 1050579222 blobValueIndex# 52 Put id# [6:1:21:0:0:1048576:1] totalSize# 1052152086 blobValueIndex# 46 Restart Put id# [6:1:22:0:0:1572864:1] totalSize# 1053200662 blobValueIndex# 53 Trim Put id# [27:1:22:0:0:40960:1] totalSize# 1054773526 blobValueIndex# 24 Trim Put id# [3:1:18:0:0:40960:1] totalSize# 1054814486 blobValueIndex# 24 Put id# [99:1:19:0:0:10:1] totalSize# 1054855446 blobValueIndex# 2 Put id# [1:1:28:0:0:1572864:1] totalSize# 1054855456 blobValueIndex# 51 Put id# [71:1:16:0:0:1572864:1] totalSize# 1056428320 blobValueIndex# 53 Put id# [23:1:33:0:0:589824:1] totalSize# 1058001184 blobValueIndex# 36 Put id# [93:1:20:0:0:1024:1] totalSize# 1058591008 blobValueIndex# 15 Put id# [36:1:20:0:0:1572864:1] totalSize# 1058592032 blobValueIndex# 53 Put id# [61:1:26:0:0:589824:1] totalSize# 1060164896 blobValueIndex# 39 Change MinHugeBlobSize# 61440 Put id# [64:1:29:0:0:1048576:1] totalSize# 1060754720 blobValueIndex# 49 Restart Put id# [2:1:28:0:0:10:1] totalSize# 1061803296 blobValueIndex# 0 Put id# [88:1:25:0:0:40960:1] totalSize# 1061803306 blobValueIndex# 23 Put id# [94:1:15:0:0:1024:1] totalSize# 1061844266 blobValueIndex# 15 Put id# [78:1:25:0:0:589824:1] totalSize# 1061845290 blobValueIndex# 30 Trim Put id# [69:1:23:0:0:1048576:1] totalSize# 1062435114 blobValueIndex# 40 Put id# [9:1:26:0:0:1572864:1] totalSize# 1063483690 blobValueIndex# 58 Put id# [34:1:22:0:0:1048576:1] totalSize# 1065056554 blobValueIndex# 40 Restart Put id# [30:1:23:0:0:589824:1] totalSize# 1066105130 blobValueIndex# 37 Put id# [94:1:16:0:0:40960:1] totalSize# 1066694954 blobValueIndex# 24 Put id# [76:1:19:0:0:1572864:1] totalSize# 1066735914 blobValueIndex# 53 Trim Put id# [69:1:24:0:0:10:1] totalSize# 1068308778 blobValueIndex# 4 Put id# [41:1:22:0:0:10:1] totalSize# 1068308788 blobValueIndex# 6 Trim Put id# [17:1:20:0:0:10:1] totalSize# 1068308798 blobValueIndex# 9 Put id# [19:1:22:0:0:1572864:1] totalSize# 1068308808 blobValueIndex# 57 Put id# [13:1:14:0:0:1024:1] totalSize# 1069881672 blobValueIndex# 15 Put id# [74:1:21:0:0:10:1] totalSize# 1069882696 blobValueIndex# 2 Trim Put id# [46:1:27:0:0:1024:1] totalSize# 1069882706 blobValueIndex# 19 Put id# [93:1:21:0:0:40960:1] totalSize# 1069883730 blobValueIndex# 25 Put id# [93:1:22:0:0:40960:1] totalSize# 1069924690 blobValueIndex# 23 Restart Put id# [62:1:24:0:0:589824:1] totalSize# 1069965650 blobValueIndex# 35 Restart Put id# [65:1:18:0:0:1024:1] totalSize# 1070555474 blobValueIndex# 11 Change MinHugeBlobSize# 12288 Put id# [86:1:13:0:0:1572864:1] totalSize# 1070556498 blobValueIndex# 56 Put id# [65:1:19:0:0:10:1] totalSize# 1072129362 blobValueIndex# 2 Restart Put id# [60:1:26:0:0:40960:1] totalSize# 1072129372 blobValueIndex# 25 Put id# [49:1:21:0:0:10:1] totalSize# 1072170332 blobValueIndex# 6 Put id# [71:1:17:0:0:1048576:1] totalSize# 1072170342 blobValueIndex# 42 Put id# [12:1:17:0:0:1024:1] totalSize# 1073218918 blobValueIndex# 14 Put id# [42:1:27:0:0:589824:1] totalSize# 1073219942 blobValueIndex# 36 Put id# [13:1:15:0:0:1048576:1] totalSize# 1073809766 blobValueIndex# 49 Put id# [58:1:18:0:0:40960:1] totalSize# 1074858342 blobValueIndex# 22 Trim Put id# [98:1:17:0:0:40960:1] totalSize# 1074899302 blobValueIndex# 25 Put id# [73:1:14:0:0:10:1] totalSize# 1074940262 blobValueIndex# 1 Put id# [36:1:21:0:0:1024:1] totalSize# 1074940272 blobValueIndex# 11 Put id# [78:1:26:0:0:1572864:1] totalSize# 1074941296 blobValueIndex# 50 Put id# [58:1:19:0:0:1024:1] totalSize# 1076514160 blobValueIndex# 16 Put id# [62:1:25:0:0:40960:1] totalSize# 1076515184 blobValueIndex# 29 Put id# [83:1:24:0:0:10:1] totalSize# 1076556144 blobValueIndex# 3 Trim Restart Put id# [98:1:18:0:0:1048576:1] totalSize# 1076556154 blobValueIndex# 44 Restart Put id# [13:1:16:0:0:1048576:1] totalSize# 1077604730 blobValueIndex# 48 Put id# [11:1:24:0:0:1048576:1] totalSize# 1078653306 blobValueIndex# 46 Put id# [53:1:29:0:0:10:1] totalSize# 1079701882 blobValueIndex# 3 Put id# [32:1:20:0:0:10:1] totalSize# 1079701892 blobValueIndex# 9 Put id# [61:1:27:0:0:40960:1] totalSize# 1079701902 blobValueIndex# 20 Change MinHugeBlobSize# 8192 Put id# [55:1:28:0:0:589824:1] totalSize# 1079742862 blobValueIndex# 36 Put id# [100:1:18:0:0:1572864:1] totalSize# 1080332686 blobValueIndex# 52 Put id# [41:1:23:0:0:589824:1] totalSize# 1081905550 blobValueIndex# 32 Put id# [73:1:15:0:0:1572864:1] totalSize# 1082495374 blobValueIndex# 57 Put id# [10:1:18:0:0:10:1] totalSize# 1084068238 blobValueIndex# 4 Put id# [43:1:25:0:0:589824:1] totalSize# 1084068248 blobValueIndex# 37 Change MinHugeBlobSize# 524288 Trim Put id# [29:1:17:0:0:1024:1] totalSize# 1084658072 blobValueIndex# 10 Put id# [68:1:21:0:0:1572864:1] totalSize# 1084659096 blobValueIndex# 51 Put id# [7:1:17:0:0:1572864:1] totalSize# 1086231960 blobValueIndex# 54 Trim Put id# [81:1:20:0:0:1024:1] totalSize# 1087804824 blobValueIndex# 16 Put id# [83:1:25:0:0:589824:1] totalSize# 1087805848 blobValueIndex# 30 Put id# [43:1:26:0:0:589824:1] totalSize# 1088395672 blobValueIndex# 30 Change MinHugeBlobSize# 12288 Put id# [36:1:22:0:0:589824:1] totalSize# 1088985496 blobValueIndex# 38 Put id# [95:1:21:0:0:1048576:1] totalSize# 1089575320 blobValueIndex# 47 Put id# [20:1:11:0:0:40960:1] totalSize# 1090623896 blobValueIndex# 24 Trim Put id# [20:1:12:0:0:1024:1] totalSize# 1090664856 blobValueIndex# 16 Restart Put id# [23:1:34:0:0:1572864:1] totalSize# 1090665880 blobValueIndex# 53 Put id# [15:1:14:0:0:1024:1] totalSize# 1092238744 blobValueIndex# 15 Put id# [16:1:19:0:0:589824:1] totalSize# 1092239768 blobValueIndex# 37 Trim Put id# [3:1:19:0:0:1572864:1] totalSize# 1092829592 blobValueIndex# 58 Put id# [27:1:23:0:0:10:1] totalSize# 1094402456 blobValueIndex# 2 Trim Put id# [55:1:29:0:0:1048576:1] totalSize# 1094402466 blobValueIndex# 44 Put id# [45:1:16:0:0:40960:1] totalSize# 1095451042 blobValueIndex# 23 Trim Put id# [87:1:10:0:0:40960:1] totalSize# 1095492002 blobValueIndex# 22 Trim Put id# [47:1:16:0:0:1024:1] totalSize# 1095532962 blobValueIndex# 14 Put id# [36:1:23:0:0:40960:1] totalSize# 1095533986 blobValueIndex# 22 Trim Put id# [63:1:16:0:0:1572864:1] totalSize# 1095574946 blobValueIndex# 59 Put id# [4:1:23:0:0:1572864:1] totalSize# 1097147810 blobValueIndex# 56 Put id# [76:1:20:0:0:1572864:1] totalSize# 1098720674 blobValueIndex# 50 Trim Put id# [28:1:26:0:0:589824:1] totalSize# 1100293538 blobValueIndex# 36 Put id# [22:1:17:0:0:10:1] totalSize# 1100883362 blobValueIndex# 5 Trim Put id# [80:1:20:0:0:1572864:1] totalSize# 1100883372 blobValueIndex# 50 Put id# [23:1:35:0:0:1572864:1] totalSize# 1102456236 blobValueIndex# 55 Put id# [40:1:20:0:0:40960:1] totalSize# 1104029100 blobValueIndex# 29 Put id# [77:1:22:0:0:40960:1] totalSize# 1104070060 blobValueIndex# 27 Trim Put id# [63:1:17:0:0:1048576:1] totalSize# 1104111020 blobValueIndex# 45 Restart Put id# [69:1:25:0:0:589824:1] totalSize# 1105159596 blobValueIndex# 32 Put id# [74:1:22:0:0:1024:1] totalSize# 1105749420 blobValueIndex# 16 Change MinHugeBlobSize# 65536 Put id# [53:1:30:0:0:589824:1] totalSize# 1105750444 blobValueIndex# 35 Put id# [37:1:25:0:0:1048576:1] totalSize# 1106340268 blobValueIndex# 45 Put id# [16:1:20:0:0:10:1] totalSize# 1107388844 blobValueIndex# 9 Put id# [37:1:26:0:0:10:1] totalSize# 1107388854 blobValueIndex# 9 Change MinHugeBlobSize# 61440 Restart |89.1%| [TA] $(B)/ydb/core/blobstorage/ut_vdisk2/test-results/unittest/{meta.json ... results_accumulator.log} >> THiveTest::TestLockTabletExecutionBadOwner [GOOD] >> THiveTest::TestLockTabletExecutionDelete >> KqpWorkloadService::TestQueryCancelAfterUnlimitedPool [GOOD] >> KqpWorkloadService::TestStartQueryAfterCancel |89.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |89.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |89.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk2/test-results/unittest/{meta.json ... results_accumulator.log} |89.1%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow >> ResourcePoolClassifiersDdl::TestExplicitPoolId [GOOD] >> ResourcePoolClassifiersDdl::TestMultiGroupClassification >> TPartitionTests::ConflictingTxProceedAfterRollback [GOOD] >> TPartitionTests::Batching >> THealthCheckTest::ProtobufBelowLimitFor10VdisksIssues [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues >> TPartitionTests::ConflictingSrcIdTxAndWritesDifferentBatches >> ResourcePoolsDdl::TestAlterResourcePool [GOOD] >> ResourcePoolsDdl::TestPoolSwitchToLimitedState >> GenericFederatedQuery::IcebergHadoopBasicSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectCount >> TPartitionTests::Batching [GOOD] >> TPartitionTests::CommitOffsetRanges >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId1 [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId2 >> GenericFederatedQuery::IcebergHadoopSaSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown |89.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |89.1%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |89.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut >> TPartitionTests::CommitOffsetRanges [GOOD] >> TPartitionTests::ChangeConfig >> Viewer::JsonAutocompleteSchemePOST [GOOD] >> TPartitionTests::ChangeConfig [GOOD] >> TPartitionTests::ConflictingActsInSeveralBatches ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteSchemePOST [GOOD] Test command err: 2025-06-24T20:33:06.360410Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:297:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:06.360996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:06.361071Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:06.720006Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25157, node 1 TClient is connected to server localhost:16000 2025-06-24T20:33:16.414758Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:16.414868Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:16.415160Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:118:2164], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:16.726491Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 TServer::EnableGrpc on GrpcPort 3502, node 2 TClient is connected to server localhost:3218 2025-06-24T20:33:26.128112Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:280:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:26.128616Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:26.128715Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:26.503443Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 TServer::EnableGrpc on GrpcPort 23983, node 3 TClient is connected to server localhost:23733 2025-06-24T20:33:38.244621Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:296:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:38.244847Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:38.245101Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:38.574265Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 7493, node 4 TClient is connected to server localhost:20383 2025-06-24T20:33:50.317783Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:115:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:50.318029Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:50.318127Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:50.739933Z node 5 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 5 Type# 268639257 TServer::EnableGrpc on GrpcPort 6386, node 5 TClient is connected to server localhost:23791 >> Viewer::Plan2SvgOK [GOOD] >> Viewer::Plan2SvgBad >> GenericFederatedQuery::IcebergHiveBasicSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown >> THealthCheckTest::TestReBootingTabletIsDead [GOOD] >> THealthCheckTest::UnknowPDiskState >> TPartitionTests::ConflictingSrcIdTxAndWritesDifferentBatches [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown >> Viewer::JsonAutocompleteColumnsPOST [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown >> THiveTest::TestLockTabletExecutionDelete [GOOD] >> THiveTest::TestLockTabletExecutionDeleteReboot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::ConflictingSrcIdTxAndWritesDifferentBatches [GOOD] Test command err: 2025-06-24T20:33:45.892776Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:45.892888Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:45.920210Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:182:2195] 2025-06-24T20:33:45.922705Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T20:33:45.000000Z 2025-06-24T20:33:45.922826Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [1:182:2195] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\250\230\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\250\230\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\262\222\004" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\002\020\001\030\001\"\007session(\0000\001@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\002\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-24T20:33:46.920522Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:46.920613Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:46.965393Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [2:182:2195] 2025-06-24T20:33:46.967892Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T20:33:46.000000Z 2025-06-24T20:33:46.967991Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [2:182:2195] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\220\240\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-1" Value: "\010\000\020\001\030\001\"\tsession-1(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-1" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-1" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\220\240\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-2" Value: "\010\000\020\001\030\001\"\tsession-2(\0000\003@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-2" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-2" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\220\240\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-1" Value: "\010\003\020\001\030\001\"\tsession-1(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-1" Value: "\003\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-1" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\220\240\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\262\222\004" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-2" Value: "\010\001\020\001\030\001\"\tsession-2(\0000\003@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-2" Value: "\001\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-2" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-1" Value: "\010\006\020\001\030\001\"\tsession-1(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-1" Value: "\006\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-1" StorageChannel: INLINE } 2025-06-24T20:33:48.033823Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:48.033905Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:48.428164Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:48.428241Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:48.446176Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-24T20:33:48.446383Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:33:48.446639Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:181:2194] 2025-06-24T20:33:48.447715Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T20:33:48.447892Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T20:33:48.448053Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T20:33:48.448240Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T20:33:48.448489Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T20:33:48.448707Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T20:33:48.448844Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T20:33:48.448884Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T20:33:48.448948Z node 4 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T20:33:48.000000Z 2025-06-24T20:33:48.448985Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T20:33:48.449037Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [4:181:2194] 2025-06-24T20:33:48.449091Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T20:33:48.449133Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:33:48.449366Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:33:49.868242Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create distr tx with id = 2 and act no: 3 Create immediate tx with id = 4 and act no: 5 2025-06-24T20:33:49.868621Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T20:33:49.868740Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-06-24T20:33:51.283672Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:33:51.283928Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:33:51.284046Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:33:51.284104Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Wait batch completion Got batch complete: 2 Wait batch completion Wait kv request 2025-06-24T20:33:51.557891Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-06-24T20:33:51.557986Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-06-24T20:33:51.558078Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T20:33:51.558148Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T20:33:51.558205Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T20:33:51.573271Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= Got KV request Wait tx committed for tx 2 2025-06-24T20:33:51.610978Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 2025-06-24T20:33:51.611088Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait immediate tx complete 4 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 4 2025-06-24T20:33:52.216792Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:52.216883Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:52.238845Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request Got KV request 2025-06-24T20:33:52.239104Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:33:52.239397Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:181:2194] 2025-06-24T20:33:52.240674Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T20:33:52.240912Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T20:33:52.241103Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T20:33:52.241361Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T20:33:52.241677Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-24T20:33:52.241937Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-24T20:33:52.242060Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T20:33:52.242096Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T20:33:52.242138Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T20:33:52.000000Z 2025-06-24T20:33:52.242173Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T20:33:52.242215Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [5:181:2194] 2025-06-24T20:33:52.242266Z node 5 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-24T20:33:52.242306Z node 5 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:33:52.242706Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:33:52.599690Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|2682d4ee-f1647492-6abd968d-52e4f616_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-24T20:33:52.599884Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-24T20:33:53.688693Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create distr tx with id = 2 and act no: 3 Create distr tx with id = 4 and act no: 5 2025-06-24T20:33:53.689098Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T20:33:53.689228Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-06-24T20:33:53.689301Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 4 2025-06-24T20:33:55.134087Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:33:55.134267Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:33:55.134342Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:33:55.134374Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Wait batch completion 2025-06-24T20:33:55.134485Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-24T20:33:55.134542Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-24T20:33:55.134616Z node 5 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 Got batch complete: 2 Wait batch completion Wait for no tx committed 2025-06-24T20:33:55.398000Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 4 2025-06-24T20:33:55.398081Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 4 2025-06-24T20:33:55.398150Z node 5 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 Got KV request Got KV request Wait kv request Wait tx committed for tx 0 2025-06-24T20:33:55.650839Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 2025-06-24T20:33:55.651030Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:1257: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Already written message. Topic: 'Root/PQ/rt3.dc1--account--topic' Partition: 0 SourceId: 'src1'. Message seqNo: 1. Committed seqNo: 6. Writing seqNo: (NULL). EndOffset: 1. CurOffset: 1. Offset: 60 2025-06-24T20:33:55.651208Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src1' seqNo 7 partNo 0 2025-06-24T20:33:55.653529Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src1' seqNo 7 partNo 0 FormedBlobsCount 0 NewHead: Offset 70 PartNo 0 PackedSize 84 count 1 nextOffset 71 batches 1 2025-06-24T20:33:55.653710Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:1257: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Already written message. Topic: 'Root/PQ/rt3.dc1--account--topic' Partition: 0 SourceId: 'src1'. Message seqNo: 7. Committed seqNo: 6. Writing seqNo: 7. EndOffset: 1. CurOffset: 71. Offset: 80 2025-06-24T20:33:55.654996Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 70,1 HeadOffset 1 endOffset 1 curOffset 71 d0000000000_00000000000000000070_00000_0000000001_00000? size 70 WTime 12141 2025-06-24T20:33:55.655207Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got KV request Got batch complete: 3 Got KV request Got KV request Wait tx committed for tx 4 Wait batch completion Wait kv request 2025-06-24T20:33:55.688127Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 17 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:33:55.688255Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:55.688359Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 1 is already written 2025-06-24T20:33:55.688429Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:55.688470Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 70 is stored on disk 2025-06-24T20:33:55.688495Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:55.688528Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 71 is already written 2025-06-24T20:33:55.688800Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=70, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 >> ResourcePoolsSysView::TestResourcePoolsSysViewOnServerless [GOOD] >> ResourcePoolsSysView::TestResourcePoolsSysViewFilters >> TPQTabletTests::DropTablet >> Cdc::ShouldBreakLocksOnConcurrentDropStream [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteColumnsPOST [GOOD] Test command err: 2025-06-24T20:33:07.014127Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:297:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:07.014811Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:07.014880Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:07.432047Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 4685, node 1 TClient is connected to server localhost:2467 2025-06-24T20:33:17.542757Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:17.542855Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:17.543125Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:118:2164], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:17.850244Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 TServer::EnableGrpc on GrpcPort 17542, node 2 TClient is connected to server localhost:30241 2025-06-24T20:33:27.797122Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:280:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:27.797468Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:27.797548Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:28.211894Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 TServer::EnableGrpc on GrpcPort 18240, node 3 TClient is connected to server localhost:16754 2025-06-24T20:33:39.877136Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:296:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:39.877304Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:39.877530Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:40.250320Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 25723, node 4 TClient is connected to server localhost:18990 2025-06-24T20:33:52.528535Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:115:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:52.528791Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:52.528905Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:52.941621Z node 5 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 5 Type# 268639257 TServer::EnableGrpc on GrpcPort 14072, node 5 TClient is connected to server localhost:9420 >> TPQTabletTests::DropTablet [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId2 [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken >> TPQTest::TestWaitInOwners [GOOD] >> TPQTest::TestWritePQCompact >> THiveTest::TestHiveBalancerWithLimit [GOOD] >> THiveTest::TestHiveBalancerIgnoreTablet >> Cdc::MustNotLoseSchemaSnapshot [GOOD] >> Cdc::MustNotLoseSchemaSnapshotWithVolatileTx >> TPQTabletTests::DropTablet_And_PlannedConfigTransaction >> TPartitionTests::ConflictingActsInSeveralBatches [GOOD] >> TPartitionTests::After_TEvGetWriteInfoError_Comes_TEvTxCalcPredicateResult >> GenericFederatedQuery::PostgreSQLSelectCount [GOOD] >> GenericFederatedQuery::PostgreSQLFilterPushdown >> TPQTabletTests::DropTablet_And_PlannedConfigTransaction [GOOD] >> TPartitionTests::After_TEvGetWriteInfoError_Comes_TEvTxCalcPredicateResult [GOOD] >> TPQTabletTests::UpdateConfig_1 >> Cdc::InitialScanComplete [GOOD] >> Cdc::InitialScanEnqueuesZeroRecords >> TPQTabletTests::Cancel_Tx >> KqpWorkloadServiceActors::TestCpuLoadActor [GOOD] >> TPQTabletTests::UpdateConfig_1 [GOOD] >> TPQTabletTests::Cancel_Tx [GOOD] >> TPQTabletTests::Config_TEvTxCommit_After_Restart ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::After_TEvGetWriteInfoError_Comes_TEvTxCalcPredicateResult [GOOD] Test command err: 2025-06-24T20:33:52.252155Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:52.252245Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:52.275376Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:182:2195] 2025-06-24T20:33:52.276497Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 1 generation 0 [1:182:2195] Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient-1" Value: "\010\000\020\002\030\003\"\014session-id-1(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient-1" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id-1" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient-2" Value: "\010\000\020\004\030\005\"\014session-id-2(\0000\003@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient-2" Value: "\000\000\000\000\000\000\000\000\004\000\000\000\005\000\000\000session-id-2" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient-3" Value: "\010\000\020\006\030\007\"\014session-id-3(\0000\004@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient-3" Value: "\000\000\000\000\000\000\000\000\006\000\000\000\007\000\000\000session-id-3" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient-1" Value: "\010\000\020\010\030\t\"\014session-id-2(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient-1" Value: "\000\000\000\000\000\000\000\000\010\000\000\000\t\000\000\000session-id-2" StorageChannel: INLINE } 2025-06-24T20:33:52.887829Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:52.887912Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:52.915330Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:182:2195] 2025-06-24T20:33:52.917463Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T20:33:52.000000Z 2025-06-24T20:33:52.917534Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [2:182:2195] Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\200\317\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\200\317\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\002\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\002\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\200\317\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\004\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\004\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-24T20:33:53.798600Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:53.798683Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:53.818601Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [3:180:2193] 2025-06-24T20:33:53.822146Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T20:33:53.000000Z 2025-06-24T20:33:53.822229Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [3:180:2193] Send change config Wait cmd write (initial) Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\350\326\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\262\222\004" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-1" Value: "\010\002\020\000\030\000\"\tsession-1(\0000\000@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-1" Value: "\002\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000session-1" StorageChannel: INLINE } Wait commit 1 done Wait cmd write (change config) Got cmd write: CmdDeleteRange { Range { From: "m0000000003cclient-2" IncludeFrom: true To: "m0000000003cclient-2" IncludeTo: true } } CmdDeleteRange { Range { From: "m0000000003uclient-2" IncludeFrom: true To: "m0000000003uclient-2" IncludeTo: true } } CmdWrite { Key: "i0000000003" Value: "\030\000(\350\326\226\235\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-1" Value: "\010\002\020\000\030\000\"\tsession-1(\0000\000@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-1" Value: "\002\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000session-1" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-3" Value: "\010\000\020\000\030\000\"\000(\0000\007@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-3" Value: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" StorageChannel: INLINE } CmdWrite { Key: "_config_3" Value: "\022\t\030\200\243\0058\200\200\200\005\030\000\"\027rt3.dc1--account--topic(\0020\001\272\001 /Root/PQ/rt3.dc1--account--topic\352\001\000\372\001\002\010\000\212\002\007account\220\002\001\242\002\002\010\000\252\002\016\n\010client-1@\000H\000\252\002\016\n\010client-3@\007H\000" StorageChannel: INLINE } Wait config changed 2025-06-24T20:33:54.654831Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:54.654917Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:54.674248Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-24T20:33:54.674477Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:33:54.674765Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:181:2194] 2025-06-24T20:33:54.675862Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T20:33:54.676076Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T20:33:54.676264Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T20:33:54.676460Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T20:33:54.676732Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-24T20:33:54.676938Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-24T20:33:54.677084Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T20:33:54.677127Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T20:33:54.677177Z node 4 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T20:33:54.000000Z 2025-06-24T20:33:54.677217Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T20:33:54.677268Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [4:181:2194] 2025-06-24T20:33:54.677327Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-24T20:33:54.677378Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:33:54.677631Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:33:55.021974Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|22987e89-e685b05f-e9b089e1-95b15f07_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-24T20:33:55.022159Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-24T20:33:55.022341Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src4|4a10c98-ec8d2dc0-37a00e39-842526cd_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src4 2025-06-24T20:33:55.022397Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-24T20:33:56.035859Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create distr tx with id = 2 and act no: 3 Create distr tx with id = 4 and act no: 5 Create distr tx with id = 8 and act no: 9 Create immediate tx with id = 11 and act no: 12 2025-06-24T20:33:56.036474Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T20:33:56.036588Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2 ... 5Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 62 is stored on disk 2025-06-24T20:33:57.990865Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:57.990900Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 63 is stored on disk 2025-06-24T20:33:57.990932Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:57.990999Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 11, partNo: 0, Offset: 64 is stored on disk 2025-06-24T20:33:57.991040Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:57.991079Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 12, partNo: 0, Offset: 65 is stored on disk 2025-06-24T20:33:57.991114Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:57.991404Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 70 is stored on disk 2025-06-24T20:33:57.991455Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:57.991511Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 71 is stored on disk 2025-06-24T20:33:57.991851Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=332, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Got batch complete: 1 Wait tx committed for tx 4 Wait batch completion Wait batch completion 2025-06-24T20:33:57.992152Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 8 2025-06-24T20:33:57.992216Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 8 2025-06-24T20:33:57.992307Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 72 PartNo 0 PackedSize 0 count 0 nextOffset 72 batches 0, NewHead=Offset 72 PartNo 0 PackedSize 0 count 0 nextOffset 72 batches 0 2025-06-24T20:33:57.992511Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 7 partNo 0 2025-06-24T20:33:57.993486Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 7 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 84 count 1 nextOffset 101 batches 1 2025-06-24T20:33:57.993585Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 8 partNo 0 2025-06-24T20:33:57.993638Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 8 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 136 count 2 nextOffset 102 batches 1 2025-06-24T20:33:57.993677Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 9 partNo 0 2025-06-24T20:33:57.993734Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 9 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 188 count 3 nextOffset 103 batches 1 2025-06-24T20:33:57.993772Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 10 partNo 0 2025-06-24T20:33:57.993807Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 10 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 240 count 4 nextOffset 104 batches 1 2025-06-24T20:33:57.993842Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 11 partNo 0 2025-06-24T20:33:57.993901Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 11 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 292 count 5 nextOffset 105 batches 1 2025-06-24T20:33:57.993948Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 12 partNo 0 2025-06-24T20:33:57.993990Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 12 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 344 count 6 nextOffset 106 batches 1 2025-06-24T20:33:57.994038Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T20:33:57.994091Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 72 PartNo 0 PackedSize 0 count 0 nextOffset 72 batches 0, NewHead=Offset 100 PartNo 0 PackedSize 344 count 6 nextOffset 106 batches 1 2025-06-24T20:33:58.012123Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-24T20:33:58.012764Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 100,6 HeadOffset 72 endOffset 72 curOffset 106 d0000000000_00000000000000000100_00000_0000000006_00000? size 211 WTime 12141 Got KV request Got batch complete: 7 Got KV request Got KV request Wait kv request Wait tx committed for tx 8 2025-06-24T20:33:58.035547Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 102 WriteNewSizeFromSupportivePartitions# 2 2025-06-24T20:33:58.035641Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:58.035723Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 72 is already written 2025-06-24T20:33:58.035763Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:58.035804Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 72 is already written 2025-06-24T20:33:58.035831Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:58.035865Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 72 is already written 2025-06-24T20:33:58.035891Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:58.035948Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 72 is already written 2025-06-24T20:33:58.035981Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:58.036015Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 11, partNo: 0, Offset: 72 is already written 2025-06-24T20:33:58.036040Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:33:58.036076Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 12, partNo: 0, Offset: 72 is already written 2025-06-24T20:33:58.036310Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=543, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Wait immediate tx complete 11 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 11 2025-06-24T20:33:58.537741Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:58.537823Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:58.557019Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [5:182:2195] 2025-06-24T20:33:58.558802Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 1 generation 0 [5:182:2195] >> TPQTabletTests::UpdateConfig_2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::ShouldBreakLocksOnConcurrentDropStream [GOOD] Test command err: 2025-06-24T20:30:30.799679Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615986396003576:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:30.822583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004783/r3tmp/tmpeBd2oW/pdisk_1.dat 2025-06-24T20:30:31.432927Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519615986396003550:2079] 1750797030796431 != 1750797030796434 2025-06-24T20:30:31.443708Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:31.453018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:31.457533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:31.462282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28144, node 1 2025-06-24T20:30:31.592481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:31.592530Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:31.592550Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:31.592715Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:30:31.665043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:31.688588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:31.721473Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7519615990690971439:2267] 2025-06-24T20:30:31.721727Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:30:31.744105Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:30:31.744174Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:30:31.746265Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:30:31.746358Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:30:31.746402Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:30:31.746816Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:30:31.746880Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:30:31.746913Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7519615990690971453:2267] in generation 1 2025-06-24T20:30:31.748418Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:30:31.822513Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:30:31.823742Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:30:31.823828Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7519615990690971457:2268] 2025-06-24T20:30:31.823843Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:30:31.823855Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:30:31.823866Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:31.824052Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519615990690971434:2297], serverId# [1:7519615990690971456:2308], sessionId# [0:0:0] 2025-06-24T20:30:31.824155Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:30:31.824240Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:30:31.824261Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:31.824276Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:30:31.824292Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:30:31.824307Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:31.824331Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:30:31.824621Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976710657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:30:31.824749Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976710657 at tablet 72075186224037888 2025-06-24T20:30:31.827715Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:30:31.828694Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:30:31.828784Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T20:30:31.835186Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519615990690971480:2318], serverId# [1:7519615990690971482:2320], sessionId# [0:0:0] 2025-06-24T20:30:31.835255Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:30:31.849797Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976710657 at step 1750797031879 at tablet 72075186224037888 { Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750797031879 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T20:30:31.849854Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:31.850108Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:31.850130Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:30:31.850160Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1750797031879:281474976710657] in PlanQueue unit at 72075186224037888 2025-06-24T20:30:31.850488Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1750797031879:281474976710657 keys extracted: 0 2025-06-24T20:30:31.850645Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:30:31.850754Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:31.850793Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T20:30:31.853604Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T20:30:31.854130Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:30:31.857071Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1750797031878 2025-06-24T20:30:31.857098Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:31.857139Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750797031879} 2025-06-24T20:30:31.857210Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:31.857260Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1750797031893 2025-06-24T20:30:31.857838Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:31.859125Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:31.859164Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:30:31.859213Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T20:30:31.859277Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750797031879 : 281474976710657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7519615990690971231:2181], exec latency: 3 ms, propose latency: 8 ms 2025-06-24T20:30:31.859313Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976710657 state Ready TxInFly 0 2025-06-24T20:30:31.859353Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:31.863493Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7519615990690971457:2268][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-24T20:30:31.868534Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSche ... ][72075186224037888:1][0][72075186224037891][25:1126:2764] Disconnected 2025-06-24T20:33:56.192165Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:648: [CdcChangeSenderMain][72075186224037888:1][25:982:2764] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvGone { PartitionId: 0 HardError: 0 } 2025-06-24T20:33:56.193207Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:468: [CdcChangeSenderMain][72075186224037888:1][25:982:2764] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table/Stream2 TableId: [72057594046644480:5:0] RequestType: ByTableId Operation: OpList RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindCdcStream DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [streamImpl] }] } 2025-06-24T20:33:56.193314Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:497: [CdcChangeSenderMain][72075186224037888:1][25:982:2764] Stream is planned to drop, waiting for the EvRemoveSender command 2025-06-24T20:33:56.193610Z node 25 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 25, TabletId: 72075186224037891 not found 2025-06-24T20:33:56.194160Z node 25 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 25, TabletId: 72075186224037892 not found 2025-06-24T20:33:56.369907Z node 25 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715665 at step 3000 at tablet 72075186224037888 { Transactions { TxId: 281474976715665 AckTo { RawX1: 0 RawX2: 0 } } Step: 3000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T20:33:56.370082Z node 25 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:33:56.370468Z node 25 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:33:56.370576Z node 25 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:33:56.370686Z node 25 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [3000:281474976715665] in PlanQueue unit at 72075186224037888 2025-06-24T20:33:56.371088Z node 25 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 3000:281474976715665 keys extracted: 0 2025-06-24T20:33:56.371353Z node 25 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:33:56.371762Z node 25 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:33:56.374665Z node 25 :TX_DATASHARD DEBUG: datashard.cpp:1822: Add schema snapshot: pathId# [OwnerId: 72057594046644480, LocalPathId: 2], version# 4, step# 3000, txId# 281474976715665, at tablet# 72075186224037888 2025-06-24T20:33:56.375249Z node 25 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:33:56.381136Z node 25 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,5) wasn't found 2025-06-24T20:33:56.382132Z node 25 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-24T20:33:56.420550Z node 25 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3000} 2025-06-24T20:33:56.420754Z node 25 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:33:56.420844Z node 25 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:33:56.420977Z node 25 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3000 : 281474976715665] from 72075186224037888 at tablet 72075186224037888 send result to client [25:372:2366], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:33:56.421098Z node 25 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715665 state Ready TxInFly 0 2025-06-24T20:33:56.421332Z node 25 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:33:56.421725Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:131: [ChangeSender][72075186224037888:1][25:644:2540] Handle NKikimr::NDataShard::TEvChangeExchange::TEvRemoveSender { PathId: [OwnerId: 72057594046644480, LocalPathId: 5] } 2025-06-24T20:33:56.421844Z node 25 :CHANGE_EXCHANGE NOTICE: change_sender.cpp:143: [ChangeSender][72075186224037888:1][25:644:2540] Remove sender: type# CdcStream, pathId# [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-24T20:33:56.422632Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:653: [CdcChangeSenderMain][72075186224037888:1][25:982:2764] Handle NKikimr::NDataShard::TEvChangeExchange::TEvRemoveSender { PathId: [OwnerId: 72057594046644480, LocalPathId: 5] } 2025-06-24T20:33:56.425402Z node 25 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715665 datashard 72075186224037888 state Ready 2025-06-24T20:33:56.425568Z node 25 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T20:33:56.563762Z node 25 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhtbe6c66p5ebthbtfrz5vj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=25&id=MTUyMWRmYWYtYjFkNjFiOTMtMjZmODNkNDctZTg4NmU1NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:33:56.564816Z node 25 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:8] at 72075186224037888 2025-06-24T20:33:56.565029Z node 25 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=8; 2025-06-24T20:33:56.565196Z node 25 :TX_DATASHARD INFO: datashard_write_operation.cpp:707: Write transaction 8 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-24T20:33:56.565544Z node 25 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 8 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-24T20:33:56.578131Z node 25 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 8 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-24T20:33:56.578313Z node 25 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:33:56.578881Z node 25 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [25:1175:2848], Table: `/Root/Table` ([72057594046644480:2:3]), SessionActorId: [25:1138:2848]Got LOCKS BROKEN for table `/Root/Table`. ShardID=72075186224037888, Sink=[25:1175:2848].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T20:33:56.579184Z node 25 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [25:1168:2848], SessionActorId: [25:1138:2848], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Table`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[25:1138:2848]. isRollback=0 2025-06-24T20:33:56.579917Z node 25 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=25&id=MTUyMWRmYWYtYjFkNjFiOTMtMjZmODNkNDctZTg4NmU1NGQ=, ActorId: [25:1138:2848], ActorState: ExecuteState, TraceId: 01jyhtbe6c66p5ebthbtfrz5vj, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [25:1249:2848] from: [25:1168:2848] 2025-06-24T20:33:56.580234Z node 25 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:9] at 72075186224037888 2025-06-24T20:33:56.580315Z node 25 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:9] at 72075186224037888 2025-06-24T20:33:56.580572Z node 25 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:33:56.580902Z node 25 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [25:1249:2848] TxId: 281474976715666. Ctx: { TraceId: 01jyhtbe6c66p5ebthbtfrz5vj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=25&id=MTUyMWRmYWYtYjFkNjFiOTMtMjZmODNkNDctZTg4NmU1NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Table`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T20:33:56.581359Z node 25 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=25&id=MTUyMWRmYWYtYjFkNjFiOTMtMjZmODNkNDctZTg4NmU1NGQ=, ActorId: [25:1138:2848], ActorState: ExecuteState, TraceId: 01jyhtbe6c66p5ebthbtfrz5vj, Create QueryResponse for error on request, msg: >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-06-24T20:33:56.586460Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-24T20:33:56.586609Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-24T20:33:56.587992Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 1 max time lag 0ms effective offset 0 2025-06-24T20:33:56.588672Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 139 count 1 last offset 0, current partition end offset: 1 2025-06-24T20:33:56.588801Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-24T20:33:56.588990Z node 25 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 139 accessed 0 times before, last time 1970-01-01T00:00:02.000000Z 2025-06-24T20:33:56.589111Z node 25 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-24T20:33:56.589247Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-24T20:33:56.589606Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 121 from pos 0 cbcount 1 2025-06-24T20:33:56.590524Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T20:33:56.590827Z node 25 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' >> TPQTest::TestUserInfoCompatibility >> TPQTabletTests::UpdateConfig_2 [GOOD] >> TPQTabletTests::Config_TEvTxCommit_After_Restart [GOOD] >> TPQTabletTests::All_New_Partitions_In_Another_Tablet >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_There_Are_More_Senders_Than_Recipients >> TPQTabletTests::All_New_Partitions_In_Another_Tablet [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_There_Are_More_Senders_Than_Recipients [GOOD] >> GenericFederatedQuery::ClickHouseSelectCount [GOOD] >> GenericFederatedQuery::ClickHouseFilterPushdown >> TPQTabletTests::After_Restarting_The_Tablet_Sends_A_TEvReadSet_For_Transactions_In_The_EXECUTED_State >> GenericFederatedQuery::YdbSelectCount [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_There_Are_Fewer_Senders_Than_Recipients >> KqpWorkloadServiceTables::TestCleanupOnServiceRestart [GOOD] >> KqpWorkloadServiceTables::TestLeaseExpiration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceActors::TestCpuLoadActor [GOOD] Test command err: 2025-06-24T20:33:09.490920Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616671452039792:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:09.491048Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004733/r3tmp/tmpuTrAZE/pdisk_1.dat 2025-06-24T20:33:10.078540Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:10.078654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:10.095245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:10.196372Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:10.198073Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616671452039761:2079] 1750797189490027 != 1750797189490030 TServer::EnableGrpc on GrpcPort 8710, node 1 2025-06-24T20:33:10.463747Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:10.463780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:10.463789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:10.463905Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:10.506422Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26891 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:11.032935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:11.076017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:33:14.056368Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=OTQxYWQxZmUtMWI2MmI2MjUtM2Q1MTZhMDUtYTZkZDZiZDM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id OTQxYWQxZmUtMWI2MmI2MjUtM2Q1MTZhMDUtYTZkZDZiZDM= 2025-06-24T20:33:14.056897Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:14.056920Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:114: [WorkloadService] [Service] Resource pools was disabled 2025-06-24T20:33:14.059996Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=OTQxYWQxZmUtMWI2MmI2MjUtM2Q1MTZhMDUtYTZkZDZiZDM=, ActorId: [1:7519616692926876859:2288], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:14.082709Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg== 2025-06-24T20:33:14.083046Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:14.083245Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ReadyState, TraceId: 01jyhta4v302tfhm4vmsn2p634, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7519616692926876860:2296] database: Root databaseId: /Root pool id: 2025-06-24T20:33:14.083366Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, Sending CompileQuery request 2025-06-24T20:33:14.439632Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, ExecutePhyTx, tx: 0x000050C0002FC458 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-24T20:33:14.439698Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, Sending to Executer TraceId: 0 8 2025-06-24T20:33:14.439871Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, Created new KQP executer: [1:7519616692926876865:2289] isRollback: 0 2025-06-24T20:33:14.486260Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1852: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, Forwarded TEvStreamData to [1:7519616692926876860:2296] 2025-06-24T20:33:14.491071Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616671452039792:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:14.491168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:14.492641Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-24T20:33:14.492869Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, txInfo Status: Committed Kind: Pure TotalDuration: 53.354 ServerDuration: 53.277 QueriesCount: 2 2025-06-24T20:33:14.492930Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T20:33:14.493118Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:33:14.493149Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, EndCleanup, isFinal: 1 2025-06-24T20:33:14.493201Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: ExecuteState, TraceId: 01jyhta4v302tfhm4vmsn2p634, Sent query response back to proxy, proxyRequestId: 3, proxyId: [1:7519616671452039993:2236] 2025-06-24T20:33:14.493245Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: unknown state, TraceId: 01jyhta4v302tfhm4vmsn2p634, Cleanup temp tables: 0 2025-06-24T20:33:14.503531Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=1&id=NDUyYzg3YjItYzBjZTJmNC04MmQ4MjFlMS05ODk5ZWNlZg==, ActorId: [1:7519616692926876861:2289], ActorState: unknown state, TraceId: 01jyhta4v302tfhm4vmsn2p634, Session actor destroyed 2025-06-24T20:33:14.513286Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=1&id=OTQxYWQxZmUtMWI2MmI2MjUtM2Q1MTZhMDUtYTZkZDZiZDM=, ActorId: [1:7519616692926876859:2288], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:33:14.513343Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=OTQxYWQxZmUtMWI2MmI2MjUtM2Q1MTZhMDUtYTZkZDZiZDM=, ActorId: [1:7519616692926876859:2288], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:33:14.513359Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=1&id=OTQxYWQxZmUtMWI2MmI2MjUtM2Q1MTZhMDUtYTZkZDZiZDM=, ActorId: [1:7519616692926876859:2288], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:33:14.513386Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=1&id=OTQxYWQxZmUtMWI2MmI2MjUtM2Q1MTZhMDUtYTZkZDZiZDM=, ActorId: [1:7519616692926876859:2288], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:33:14.513434Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=1&id=OTQxYWQxZmUtMWI2MmI2MjUtM2Q1MTZhMDUtYTZkZDZiZDM=, ActorId: [1:7519616692926876859:2288], ActorState: unknown state, Session actor destroyed 2025-06-24T20:33:15.931636Z node 2 :METADATA_ ... RunQuery SELECT SUM(CpuThreads) AS ThreadsCount, SUM(CpuThreads * (1.0 - CpuIdle)) AS TotalLoad FROM `.sys/nodes`; rpcActor: [8:7519616868542420232:2322] database: /Root databaseId: /Root pool id: default 2025-06-24T20:33:55.628627Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [8:7519616868542420231:2321], DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc= 2025-06-24T20:33:55.628674Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [8:7519616868542420235:2324], DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, Start pool fetching 2025-06-24T20:33:55.628698Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519616868542420236:2325], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-24T20:33:55.629369Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519616868542420236:2325], DatabaseId: /Root, PoolId: default, Pool info successfully fetched 2025-06-24T20:33:55.629436Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:107: [WorkloadService] [TPoolResolverActor] ActorId: [8:7519616868542420235:2324], DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, Pool info successfully resolved 2025-06-24T20:33:55.629470Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:279: [WorkloadService] [Service] Successfully fetched pool default, DatabaseId: /Root, SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc= 2025-06-24T20:33:55.629545Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:203: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519616864247452886:2309], DatabaseId: /Root, PoolId: default, Received new request, worker id: [8:7519616868542420231:2321], session id: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc= 2025-06-24T20:33:55.629593Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:313: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519616864247452886:2309], DatabaseId: /Root, PoolId: default, Reply continue success to [8:7519616868542420231:2321], session id: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, local in flight: 1 2025-06-24T20:33:55.629622Z node 8 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:290: [WorkloadService] [Service] Request placed into pool, DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc= 2025-06-24T20:33:55.629686Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:527: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ExecuteState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, continue request, pool id: default 2025-06-24T20:33:55.629894Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519616868542420234:2323], DatabaseId: /Root, PoolId: default, Pool info successfully fetched 2025-06-24T20:33:55.629930Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool default, DatabaseId: /Root 2025-06-24T20:33:55.630201Z node 8 :KQP_SESSION INFO: kqp_query_state.cpp:78: Scheme error, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], status: PathNotTable 2025-06-24T20:33:56.227000Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ExecuteState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, ExecutePhyTx, tx: 0x000050C0005D7E58 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-06-24T20:33:56.227067Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ExecuteState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, Sending to Executer TraceId: 0 8 2025-06-24T20:33:56.227196Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ExecuteState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, Created new KQP executer: [8:7519616872837387553:2321] isRollback: 0 2025-06-24T20:33:56.235368Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ExecuteState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-06-24T20:33:56.235437Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ExecuteState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, ExecutePhyTx, tx: 0x000050C000606058 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-24T20:33:56.236023Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ExecuteState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-24T20:33:56.236171Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ExecuteState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, txInfo Status: Committed Kind: ReadOnly TotalDuration: 9.31 ServerDuration: 9.19 QueriesCount: 2 2025-06-24T20:33:56.236265Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ExecuteState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T20:33:56.236338Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ExecuteState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 1 2025-06-24T20:33:56.236389Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:233: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519616864247452886:2309], DatabaseId: /Root, PoolId: default, Received cleanup request, worker id: [8:7519616868542420231:2321], session id: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, duration: 0.607562s, cpu consumed: 0.001798s 2025-06-24T20:33:56.236441Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:437: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519616864247452886:2309], DatabaseId: /Root, PoolId: default, Reply cleanup success to [8:7519616868542420231:2321], session id: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, local in flight: 0 2025-06-24T20:33:56.236509Z node 8 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:335: [WorkloadService] [Service] Request finished in pool, DatabaseId: /Root, PoolId: default, Duration: 0.607562s, CpuConsumed: 0.001798s, AdjustCpuQuota: 0 2025-06-24T20:33:56.236627Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: CleanupState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, EndCleanup, isFinal: 0 2025-06-24T20:33:56.236694Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: CleanupState, TraceId: 01jyhtbddcbz02xjztb5jsv0t9, Sent query response back to proxy, proxyRequestId: 6, proxyId: [8:7519616838477648177:2147] 2025-06-24T20:33:56.237010Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TCpuLoadFetcherActor] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, TxId: 2025-06-24T20:33:56.237107Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TCpuLoadFetcherActor] Finish with SUCCESS, SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, TxId: 2025-06-24T20:33:56.239825Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:33:56.239872Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:33:56.239902Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:33:56.239928Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:33:56.240019Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=M2M2ZGY5Y2EtYTRmMGQwYzAtNmFjYzcwMWQtZTczYzI4NDc=, ActorId: [8:7519616868542420231:2321], ActorState: unknown state, Session actor destroyed 2025-06-24T20:33:56.304216Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=YjZlYzAwODYtODM0N2VmZmMtNGE1NTM2ZmYtYTI4M2NhYmE=, ActorId: [8:7519616864247452734:2298], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:33:56.304276Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=YjZlYzAwODYtODM0N2VmZmMtNGE1NTM2ZmYtYTI4M2NhYmE=, ActorId: [8:7519616864247452734:2298], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:33:56.304314Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=YjZlYzAwODYtODM0N2VmZmMtNGE1NTM2ZmYtYTI4M2NhYmE=, ActorId: [8:7519616864247452734:2298], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:33:56.304346Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=YjZlYzAwODYtODM0N2VmZmMtNGE1NTM2ZmYtYTI4M2NhYmE=, ActorId: [8:7519616864247452734:2298], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:33:56.304457Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=YjZlYzAwODYtODM0N2VmZmMtNGE1NTM2ZmYtYTI4M2NhYmE=, ActorId: [8:7519616864247452734:2298], ActorState: unknown state, Session actor destroyed >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_There_Are_Fewer_Senders_Than_Recipients [GOOD] >> TPQTabletTests::After_Restarting_The_Tablet_Sends_A_TEvReadSet_For_Transactions_In_The_EXECUTED_State [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Recipients >> Viewer::ServerlessWithExclusiveNodes [GOOD] >> Viewer::SharedDoesntShowExclusiveNodes >> TPartitionTests::DifferentWriteTxBatchingOptions >> TFetchRequestTests::HappyWay >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Recipients [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Senders >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::After_Restarting_The_Tablet_Sends_A_TEvReadSet_For_Transactions_In_The_EXECUTED_State [GOOD] Test command err: 2025-06-24T20:33:57.715503Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:33:57.720073Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:33:57.720440Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-24T20:33:57.720493Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:33:57.720562Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-24T20:33:57.720607Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:33:57.720647Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:57.720700Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:57.735402Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:181:2194], now have 1 active actors on pipe 2025-06-24T20:33:57.735503Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:33:57.751209Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T20:33:57.757029Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T20:33:57.757195Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:57.758346Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T20:33:57.758495Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:33:57.759039Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:33:57.759543Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-24T20:33:57.760619Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-24T20:33:57.760717Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:189:2200] 2025-06-24T20:33:57.760788Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:33:57.761335Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:33:57.761476Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-24T20:33:57.761526Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-24T20:33:57.761823Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:33:57.762044Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:33:57.762270Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:33:57.765527Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:33:57.765644Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:33:57.766090Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:196:2205], now have 1 active actors on pipe 2025-06-24T20:33:57.766754Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:199:2207], now have 1 active actors on pipe 2025-06-24T20:33:57.766831Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1713: [PQ: 72057594037927937] Handle TEvPersQueue::TEvDropTablet 2025-06-24T20:33:58.360504Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:33:58.364551Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:33:58.364881Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-24T20:33:58.364922Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:33:58.364956Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-24T20:33:58.364991Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:33:58.365051Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:58.365102Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:58.383730Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [2:181:2194], now have 1 active actors on pipe 2025-06-24T20:33:58.383868Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:33:58.384179Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 2(current 0) received from actor [2:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-24T20:33:58.387092Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-24T20:33:58.387272Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:58.388675Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 2 actor [2:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-24T20:33:58.388810Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:33:58.388880Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:33:58.389390Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:33:58.389675Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:189:2200] 2025-06-24T20:33:58.390657Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-24T20:33:58.390720Z node 2 :PERSQUEUE ... 37659Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:34:01.137762Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-24T20:34:01.137805Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67891, State CALCULATED 2025-06-24T20:34:01.137854Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67891 State CALCULATED FrontTxId 67891 2025-06-24T20:34:01.137912Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67891, NewState WAIT_RS 2025-06-24T20:34:01.137958Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67891 moved from CALCULATED to WAIT_RS 2025-06-24T20:34:01.138006Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3988: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-24T20:34:01.138051Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3998: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-24T20:34:01.138118Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72057594037927937] HaveParticipantsDecision 0 2025-06-24T20:34:01.149409Z node 6 :PERSQUEUE DEBUG: pqtablet_mock.cpp:87: Client pipe to tablet 72057594037927937 from 22222 is reset 2025-06-24T20:34:01.194021Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:34:01.196773Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:34:01.198197Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:741: [PQ: 72057594037927937] has a tx info 2025-06-24T20:34:01.198270Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 110, PlanTxId 67891, ExecStep 110, ExecTxId 67891 2025-06-24T20:34:01.198430Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:999: [PQ: 72057594037927937] ReadRange pair. Key tx_00000000000000067890, Status 0 2025-06-24T20:34:01.198541Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1008: [PQ: 72057594037927937] Restore Tx. TxId: 67890, Step: 100, State: EXECUTED, WriteId: 2025-06-24T20:34:01.198632Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:999: [PQ: 72057594037927937] ReadRange pair. Key tx_00000000000000067891, Status 0 2025-06-24T20:34:01.198682Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1008: [PQ: 72057594037927937] Restore Tx. TxId: 67891, Step: 110, State: CALCULATED, WriteId: 2025-06-24T20:34:01.198718Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1011: [PQ: 72057594037927937] Fix tx state 2025-06-24T20:34:01.198779Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=2, PlannedTxs.size=2 2025-06-24T20:34:01.198829Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4918: [PQ: 72057594037927937] top tx queue (100, 67890) 2025-06-24T20:34:01.198891Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4938: [PQ: 72057594037927937] TxsOrder: 67890 EXECUTED 0 2025-06-24T20:34:01.198937Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4938: [PQ: 72057594037927937] TxsOrder: 67891 PLANNED 0 2025-06-24T20:34:01.199839Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:01.199909Z node 6 :PERSQUEUE INFO: pq_impl.cpp:787: [PQ: 72057594037927937] has a tx writes info 2025-06-24T20:34:01.200071Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:34:01.200462Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:34:01.200797Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [6:365:2341] 2025-06-24T20:34:01.201978Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitDiskStatusStep 2025-06-24T20:34:01.207348Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitMetaStep 2025-06-24T20:34:01.207723Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInfoRangeStep 2025-06-24T20:34:01.208462Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitDataRangeStep 2025-06-24T20:34:01.208761Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T20:34:01.208807Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T20:34:01.208852Z node 6 :PERSQUEUE INFO: partition_init.cpp:895: [topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:34:01.208891Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-24T20:34:01.208943Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 3 [6:365:2341] 2025-06-24T20:34:01.209000Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:34:01.209061Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:34:01.209140Z node 6 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 6 2025-06-24T20:34:01.209315Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3988: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-24T20:34:01.209364Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3998: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-24T20:34:01.209481Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state EXECUTED 2025-06-24T20:34:01.209525Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State EXECUTED 2025-06-24T20:34:01.209566Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State EXECUTED FrontTxId 67890 2025-06-24T20:34:01.209607Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4007: [PQ: 72057594037927937] TPersQueue::SendEvReadSetAckToSenders 2025-06-24T20:34:01.209647Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS_ACKS 2025-06-24T20:34:01.209718Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from EXECUTED to WAIT_RS_ACKS 2025-06-24T20:34:01.209785Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/1 2025-06-24T20:34:01.209824Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4560: [PQ: 72057594037927937] HaveAllRecipientsReceive 0, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-24T20:34:01.209861Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/1 2025-06-24T20:34:01.209920Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PLANNED 2025-06-24T20:34:01.209947Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67891, State PLANNED 2025-06-24T20:34:01.209975Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67891 State PLANNED FrontTxId 67891 2025-06-24T20:34:01.210002Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4427: [PQ: 72057594037927937] TxQueue.size 1 2025-06-24T20:34:01.210045Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:836: [PQ: 72057594037927937] New ExecStep 110, ExecTxId 67891 2025-06-24T20:34:01.210115Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67891, NewState CALCULATING 2025-06-24T20:34:01.210157Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67891 moved from PLANNED to CALCULATING 2025-06-24T20:34:01.210327Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:34:01.210480Z node 6 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 110, TxId 67891 2025-06-24T20:34:01.211045Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3507: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCalcPredicateResult Step 110, TxId 67891, Partition 0, Predicate 1 2025-06-24T20:34:01.211091Z node 6 :PERSQUEUE DEBUG: transaction.cpp:218: [TxId: 67891] Handle TEvTxCalcPredicateResult 2025-06-24T20:34:01.211128Z node 6 :PERSQUEUE DEBUG: transaction.cpp:267: [TxId: 67891] Partition responses 1/1 2025-06-24T20:34:01.211224Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state CALCULATING 2025-06-24T20:34:01.211277Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67891, State CALCULATING 2025-06-24T20:34:01.211322Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67891 State CALCULATING FrontTxId 67891 2025-06-24T20:34:01.211361Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4443: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-24T20:34:01.211406Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67891, NewState CALCULATED 2025-06-24T20:34:01.211448Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67891 moved from CALCULATING to CALCULATED 2025-06-24T20:34:01.211502Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67891 2025-06-24T20:34:01.211745Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67891] save tx TxId: 67891 State: CALCULATED MinStep: 153 MaxStep: 30153 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 110 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 25769805969 } Partitions { } 2025-06-24T20:34:01.211859Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:34:01.211945Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2918: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-24T20:34:01.211992Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037927937] Connected to tablet 22222 2025-06-24T20:34:01.216119Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:34:01.216184Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-24T20:34:01.216222Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67891, State CALCULATED 2025-06-24T20:34:01.216269Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67891 State CALCULATED FrontTxId 67891 2025-06-24T20:34:01.216325Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67891, NewState WAIT_RS 2025-06-24T20:34:01.216387Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67891 moved from CALCULATED to WAIT_RS 2025-06-24T20:34:01.216432Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3988: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-24T20:34:01.216470Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3998: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-24T20:34:01.216540Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72057594037927937] HaveParticipantsDecision 0 ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::YdbSelectCount [GOOD] Test command err: Trying to start YDB, gRPC: 8194, MsgBus: 20667 2025-06-24T20:33:21.678131Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616721464112002:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:21.678373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002790/r3tmp/tmpIQfc5n/pdisk_1.dat 2025-06-24T20:33:22.155571Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616721464111902:2079] 1750797201653040 != 1750797201653043 2025-06-24T20:33:22.215700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:22.215863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:22.216595Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:22.242566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8194, node 1 2025-06-24T20:33:22.321437Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:33:22.547984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:22.548011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:22.548019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:22.548176Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:22.672216Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20667 TClient is connected to server localhost:20667 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:23.207042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:25.548074Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616738643981731:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:25.548235Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:25.883385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:26.042527Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616742938949151:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:26.042630Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:26.042883Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616742938949156:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:26.046274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:26.058768Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616742938949158:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T20:33:26.147959Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616742938949198:2399] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:26.659251Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616721464112002:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:26.659363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:26.910374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:27.516250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:28.057944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:28.589270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:29.141218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:29.689487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:29.752690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:31.940937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710704:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:33:31.985483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710705:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:31.987085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:31.989275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/sch ... eACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:48.861969Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:48.947539Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:33:52.224184Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519616857196215659:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:52.224315Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:52.256987Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:52.338381Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519616857196215781:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:52.338489Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:52.338793Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519616857196215786:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:52.343101Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:52.360858Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519616857196215788:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:33:52.458181Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519616857196215828:2394] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:52.850817Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519616835721378567:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:52.859684Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:53.161955Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:53.826304Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:54.563516Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:55.167454Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:55.773463Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:56.391239Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:56.437662Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:58.970977Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715709:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } columns { name: "col2" type { type_id: DOUBLE } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TPQTest::TestUserInfoCompatibility [GOOD] >> TPQTest::TestSourceIdDropByUserWrites >> ResourcePoolsDdl::TestPoolSwitchToLimitedState [GOOD] >> ResourcePoolsDdl::TestDropResourcePool >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Senders [GOOD] >> THealthCheckTest::UnknowPDiskState [GOOD] >> THealthCheckTest::TestSystemStateRetriesAfterReceivingResponse >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveSaFilterPushdown |89.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/ut/ydb-core-client-ut |89.1%| [LD] {RESULT} $(B)/ydb/core/client/ut/ydb-core-client-ut |89.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/ut/ydb-core-client-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Senders [GOOD] Test command err: 2025-06-24T20:33:59.049720Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:33:59.053808Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:33:59.054127Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-24T20:33:59.054202Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:33:59.054244Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-24T20:33:59.054294Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:33:59.054331Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:59.054397Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:33:59.084127Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:181:2194], now have 1 active actors on pipe 2025-06-24T20:33:59.084243Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:33:59.107277Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T20:33:59.110290Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T20:33:59.110429Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:59.112159Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T20:33:59.112317Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:33:59.112401Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:33:59.113015Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:33:59.113462Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-24T20:33:59.114443Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-24T20:33:59.114503Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:189:2200] 2025-06-24T20:33:59.114556Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:33:59.115085Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:33:59.115240Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-24T20:33:59.115283Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-24T20:33:59.115448Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:33:59.115601Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:33:59.115907Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:33:59.116144Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:191:2202] 2025-06-24T20:33:59.116881Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:1:Initializer] Initializing completed. 2025-06-24T20:33:59.116938Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [1:191:2202] 2025-06-24T20:33:59.116983Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 1, State: StateInit] SYNC INIT topic topic partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:33:59.117312Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-24T20:33:59.117387Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit request with generation 1 2025-06-24T20:33:59.117421Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit with generation 1 done 2025-06-24T20:33:59.117529Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:33:59.117640Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:33:59.117838Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:33:59.117948Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T20:33:59.122275Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:33:59.122362Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:33:59.123057Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:33:59.123110Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T20:33:59.123518Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:204:2211], now have 1 active actors on pipe 2025-06-24T20:33:59.124310Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:207:2213], now have 1 active actors on pipe 2025-06-24T20:33:59.125223Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3225: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 180 RawX2: 4294969489 } TxId: 67890 Config { TabletConfig { PartitionConfig { LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 10485760 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--account--topic" Version: 2 LocalDC: true TopicPath: "/Root/PQ/rt3.dc1--account--topic" YdbDatabasePath: "" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } FederationAccount: "account" MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "client-1" Generation: 0 Important: false } Consumers { Name: "client-3" Generation: 7 Important: false } } BootstrapConfig { } } 2025-06-24T20:33:59.125404Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3715: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-24T20:33:59.125503Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-24T20:33:59.125539Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-24T20:33:59.125580Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3940: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-24T20:33:59.125616Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-24T20:33:59.125678Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T20:33:59.125978Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 231 MaxStep: 18446744073709551615 Kind: KIND_CONFIG TabletConfig { PartitionConfig { LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 10485760 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--account--topic" Version: 2 LocalDC: true TopicPath: "/Root/PQ/rt3.dc1--account--topic" YdbDatabasePath: "" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 FederationAccount: "account" MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "client-1" Generation: 2 Important: false } Consumers { Name: "client-3" Generation: 2 Important: false } } BootstrapConfig { } SourceActor { RawX1: 180 ... x TxId: 67890 State: PREPARED MinStep: 138 MaxStep: 30138 PredicateRecipients: 33334 PredicateRecipients: 33333 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 25769805969 } Partitions { } 2025-06-24T20:34:02.301912Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:34:02.305831Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:34:02.305894Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T20:34:02.305927Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-24T20:34:02.305960Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-06-24T20:34:02.306187Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3427: [PQ: 72057594037927937] Handle TEvTxProcessing::TEvPlanStep Transactions { TxId: 67890 AckTo { RawX1: 180 RawX2: 25769805969 } } Step: 100 2025-06-24T20:34:02.306251Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARED 2025-06-24T20:34:02.306283Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State PREPARED 2025-06-24T20:34:02.306315Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PLANNING 2025-06-24T20:34:02.306349Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3816: [PQ: 72057594037927937] PlanStep 100, PlanTxId 67890 2025-06-24T20:34:02.306391Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T20:34:02.306508Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PLANNED MinStep: 138 MaxStep: 30138 PredicateRecipients: 33334 PredicateRecipients: 33333 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 25769805969 } Partitions { } 2025-06-24T20:34:02.306579Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:34:02.309664Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:34:02.309759Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PLANNING 2025-06-24T20:34:02.309796Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State PLANNING 2025-06-24T20:34:02.309835Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PLANNED 2025-06-24T20:34:02.309874Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from PLANNING to PLANNED 2025-06-24T20:34:02.309904Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4427: [PQ: 72057594037927937] TxQueue.size 1 2025-06-24T20:34:02.309937Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:836: [PQ: 72057594037927937] New ExecStep 100, ExecTxId 67890 2025-06-24T20:34:02.309990Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState CALCULATING 2025-06-24T20:34:02.310024Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from PLANNED to CALCULATING 2025-06-24T20:34:02.310083Z node 6 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 100, TxId 67890 2025-06-24T20:34:02.310449Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3507: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCalcPredicateResult Step 100, TxId 67890, Partition 0, Predicate 1 2025-06-24T20:34:02.310483Z node 6 :PERSQUEUE DEBUG: transaction.cpp:218: [TxId: 67890] Handle TEvTxCalcPredicateResult 2025-06-24T20:34:02.310515Z node 6 :PERSQUEUE DEBUG: transaction.cpp:267: [TxId: 67890] Partition responses 1/1 2025-06-24T20:34:02.310550Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state CALCULATING 2025-06-24T20:34:02.310582Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State CALCULATING 2025-06-24T20:34:02.310615Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State CALCULATING FrontTxId 67890 2025-06-24T20:34:02.310649Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4443: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-24T20:34:02.310682Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState CALCULATED 2025-06-24T20:34:02.310718Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from CALCULATING to CALCULATED 2025-06-24T20:34:02.310755Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T20:34:02.310912Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: CALCULATED MinStep: 138 MaxStep: 30138 PredicateRecipients: 33334 PredicateRecipients: 33333 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 25769805969 } Partitions { } 2025-06-24T20:34:02.310986Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:34:02.317252Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:34:02.317319Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-24T20:34:02.317354Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State CALCULATED 2025-06-24T20:34:02.317388Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State CALCULATED FrontTxId 67890 2025-06-24T20:34:02.317421Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS 2025-06-24T20:34:02.317458Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from CALCULATED to WAIT_RS 2025-06-24T20:34:02.317498Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3988: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 2 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-06-24T20:34:02.317531Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3998: [PQ: 72057594037927937] Send TEvReadSet to tablet 33334 2025-06-24T20:34:02.317631Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3998: [PQ: 72057594037927937] Send TEvReadSet to tablet 33333 2025-06-24T20:34:02.317686Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72057594037927937] HaveParticipantsDecision 1 2025-06-24T20:34:02.317734Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState EXECUTING 2025-06-24T20:34:02.317788Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from WAIT_RS to EXECUTING 2025-06-24T20:34:02.317817Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72057594037927937] Received 0, Expected 1 2025-06-24T20:34:02.317873Z node 6 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 100, TxId 67890 2025-06-24T20:34:02.317924Z node 6 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 67890 2025-06-24T20:34:02.318083Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:34:02.320157Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2918: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-24T20:34:02.320215Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037927937] Connected to tablet 33334 2025-06-24T20:34:02.320821Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2918: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-24T20:34:02.320861Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037927937] Connected to tablet 33333 2025-06-24T20:34:02.329581Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:34:02.329673Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:34:02.329829Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3553: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCommitDone Step 100, TxId 67890, Partition 0 2025-06-24T20:34:02.329869Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state EXECUTING 2025-06-24T20:34:02.329900Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State EXECUTING 2025-06-24T20:34:02.329932Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State EXECUTING FrontTxId 67890 2025-06-24T20:34:02.329962Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-24T20:34:02.330011Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4185: [PQ: 72057594037927937] TxId: 67890 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-24T20:34:02.330047Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4516: [PQ: 72057594037927937] complete TxId 67890 2025-06-24T20:34:02.330082Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4534: [PQ: 72057594037927937] delete partitions for TxId 67890 2025-06-24T20:34:02.330115Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState EXECUTED 2025-06-24T20:34:02.330151Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from EXECUTING to EXECUTED 2025-06-24T20:34:02.330190Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T20:34:02.330352Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: EXECUTED MinStep: 138 MaxStep: 30138 PredicateRecipients: 33334 PredicateRecipients: 33333 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 25769805969 } Partitions { } 2025-06-24T20:34:02.330429Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:34:02.333588Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:34:02.333650Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state EXECUTED 2025-06-24T20:34:02.333685Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State EXECUTED 2025-06-24T20:34:02.333722Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State EXECUTED FrontTxId 67890 2025-06-24T20:34:02.333773Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4007: [PQ: 72057594037927937] TPersQueue::SendEvReadSetAckToSenders 2025-06-24T20:34:02.333809Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS_ACKS 2025-06-24T20:34:02.333839Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from EXECUTED to WAIT_RS_ACKS 2025-06-24T20:34:02.333873Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/2 2025-06-24T20:34:02.333898Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4560: [PQ: 72057594037927937] HaveAllRecipientsReceive 0, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-24T20:34:02.333926Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues [GOOD] Test command err: 2025-06-24T20:33:20.677040Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:20.677605Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:20.677714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:20.677973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:20.678048Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:20.678195Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmpivpMTx/pdisk_1.dat 2025-06-24T20:33:21.083281Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21953, node 1 TClient is connected to server localhost:9432 2025-06-24T20:33:21.506101Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:21.506172Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:21.506210Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:21.507061Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:30.298911Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:30.299598Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:30.299818Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:30.301675Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:30.302115Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:30.302253Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmpw4tr8c/pdisk_1.dat 2025-06-24T20:33:30.805105Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25310, node 3 TClient is connected to server localhost:6740 2025-06-24T20:33:31.250475Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:31.250538Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:31.250567Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:31.250996Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e463-3-3-42" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-42" path: "/home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmpw4tr8c/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "YELLOW-e463-3-3-43" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-43" path: "/home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmpw4tr8c/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "YELLOW-e463-3-3-44" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-44" path: "/home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmpw4tr8c/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 3 host: "::1" port: 12001 } 2025-06-24T20:33:39.678703Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:39.679301Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:39.679418Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:39.679674Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:39.679904Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:39.680115Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmp64v6cT/pdisk_1.dat 2025-06-24T20:33:40.068982Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16934, node 5 TClient is connected to server localhost:61328 2025-06-24T20:33:40.565859Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:40.565916Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:40.565948Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:40.566488Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-b783-5-5-42" status: RED message: "PDisk state is DeviceIoError" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-42" path: "/home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmp64v6cT/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-b783-5-5-43" status: RED message: "PDisk state is DeviceIoError" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-43" path: "/home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmp64v6cT/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-b783-5-5-44" status: RED message: "PDisk state is DeviceIoError" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-44" path: "/home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmp64v6cT/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 5 host: "::1" port: 12001 } 2025-06-24T20:33:49.300205Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:49.300628Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:49.301103Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:49.301266Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:49.301335Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:49.301506Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmplyCT81/pdisk_1.dat 2025-06-24T20:33:49.711821Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31011, node 7 TClient is connected to server localhost:25304 2025-06-24T20:33:50.206569Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:50.206647Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:50.206687Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:50.207736Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:59.570918Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:59.571604Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:59.572024Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:59.572114Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:59.572214Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:59.572495Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043f5/r3tmp/tmp4iZoDL/pdisk_1.dat 2025-06-24T20:34:00.030288Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3760, node 9 TClient is connected to server localhost:10738 2025-06-24T20:34:00.564352Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:00.564435Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:00.564483Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:00.564845Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> THiveTest::TestLockTabletExecutionDeleteReboot [GOOD] >> THiveTest::TestLockTabletExecutionBadUnlock ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken [GOOD] Test command err: Trying to start YDB, gRPC: 17073, MsgBus: 12462 2025-06-24T20:33:38.616061Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616795221158447:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:38.616094Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002720/r3tmp/tmpOy3N9C/pdisk_1.dat 2025-06-24T20:33:39.129265Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:39.130956Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616795221158427:2079] 1750797218566309 != 1750797218566312 2025-06-24T20:33:39.189534Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:39.189660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:39.192197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17073, node 1 2025-06-24T20:33:39.250506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:39.250539Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:39.250554Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:39.250692Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12462 2025-06-24T20:33:39.633081Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12462 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:39.875574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:39.895642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:33:41.998532Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616808106060953:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:41.998666Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:42.262674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:42.409207Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616812401028370:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:42.409329Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:42.409669Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616812401028375:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:42.413394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:42.429615Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616812401028377:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T20:33:42.492115Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616812401028417:2395] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:43.221241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:43.616424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616795221158447:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:43.616488Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:43.717448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:44.310879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:44.809013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:45.339835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:45.819310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:45.857548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:47.655980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 ... RT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27680, node 2 2025-06-24T20:33:49.071939Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:49.071968Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:49.071979Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:49.072104Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62583 TClient is connected to server localhost:62583 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:49.625306Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:49.753880Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 63972, MsgBus: 13902 2025-06-24T20:33:53.002169Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519616853883496816:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:53.002964Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002720/r3tmp/tmpE9Sp1Z/pdisk_1.dat 2025-06-24T20:33:53.236605Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:53.264018Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:53.264149Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:53.267479Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63972, node 3 2025-06-24T20:33:53.364063Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:53.364111Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:53.364129Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:53.364337Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13902 TClient is connected to server localhost:13902 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:53.993167Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:33:53.995287Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T20:33:54.001882Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:56.916159Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:56.919638Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:56.921899Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:57.155900Z node 3 :KQP_PROXY WARN: kqp_script_executions.cpp:1077: [ScriptExecutions] [TForgetScriptExecutionOperationActor] ExecutionId: , reply BAD_REQUEST, issues: {
: Error: Invalid operation id: ydb/public/sdk/cpp/src/library/operation_id/operation_id.cpp:184: Unable to find key: id } 2025-06-24T20:33:57.158656Z node 3 :KQP_PROXY WARN: kqp_script_executions.cpp:1366: [ScriptExecutions] [TGetScriptExecutionOperationActor] ExecutionId: , reply BAD_REQUEST, issues: {
: Error: Invalid operation id: ydb/public/sdk/cpp/src/library/operation_id/operation_id.cpp:184: Unable to find key: id } Trying to start YDB, gRPC: 14901, MsgBus: 9508 2025-06-24T20:33:58.077773Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519616880157273771:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:58.077899Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002720/r3tmp/tmpJadmqg/pdisk_1.dat 2025-06-24T20:33:58.240935Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519616880157273752:2079] 1750797238077250 != 1750797238077253 2025-06-24T20:33:58.253646Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:58.258501Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:58.258602Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:58.264200Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14901, node 4 2025-06-24T20:33:58.317795Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:58.317826Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:58.317837Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:58.317981Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9508 TClient is connected to server localhost:9508 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:58.986493Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:58.995516Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:33:59.121452Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> Viewer::TabletMerging [GOOD] >> Viewer::StorageGroupOutputWithoutFilterNoDepends >> ResourcePoolClassifiersDdl::TestAlterResourcePoolClassifier [GOOD] >> ResourcePoolClassifiersDdl::TestDropResourcePoolClassifier |89.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |89.1%| [LD] {RESULT} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |89.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |89.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |89.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |89.2%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |89.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |89.2%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpPg::TableDeleteAllData+useSink [GOOD] >> KqpPg::TableDeleteAllData-useSink >> GenericFederatedQuery::IcebergHadoopBasicSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown >> Viewer::Plan2SvgBad [FAIL] |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> ResourcePoolClassifiersDdl::TestMultiGroupClassification [GOOD] >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewOnServerless >> Cdc::MustNotLoseSchemaSnapshotWithVolatileTx [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentAddIndex |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpWorkloadService::TestStartQueryAfterCancel [GOOD] >> KqpWorkloadService::TestZeroConcurrentQueryLimit >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown [GOOD] |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |89.2%| [LD] {RESULT} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TSubscriberCombinationsTest::CombinationsMigratedPath [GOOD] >> THiveTest::TestLockTabletExecutionBadUnlock [GOOD] >> THiveTest::TestLockTabletExecutionGoodUnlock >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown [GOOD] >> KqpWorkloadServiceDistributed::TestNodeDisconnect [GOOD] >> KqpWorkloadServiceDistributed::TestDistributedLessConcurrentQueryLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberCombinationsTest::CombinationsMigratedPath [GOOD] Test command err: =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2025-06-24T20:32:27.702262Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-24T20:32:27.702350Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:3:2050] Successful handshake: owner# 800, generation# 1 2025-06-24T20:32:27.702517Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-24T20:32:27.702553Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:3:2050] Commit generation: owner# 800, generation# 1 2025-06-24T20:32:27.702625Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:37:2067] 2025-06-24T20:32:27.702657Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 800, generation# 1 2025-06-24T20:32:27.702905Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:37:2067] 2025-06-24T20:32:27.702936Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:6:2053] Commit generation: owner# 800, generation# 1 2025-06-24T20:32:27.703016Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:39:2069][/root/tenant] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:27.703452Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:43:2069] 2025-06-24T20:32:27.703490Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:3:2050] Upsert description: path# /root/tenant 2025-06-24T20:32:27.703587Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:3:2050] Subscribe: subscriber# [1:43:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:32:27.703741Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:44:2069] 2025-06-24T20:32:27.703766Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# /root/tenant 2025-06-24T20:32:27.703812Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:44:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:32:27.703923Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:45:2069] 2025-06-24T20:32:27.703941Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:9:2056] Upsert description: path# /root/tenant 2025-06-24T20:32:27.703970Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:9:2056] Subscribe: subscriber# [1:45:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:32:27.704065Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:3:2050] 2025-06-24T20:32:27.704133Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:43:2069] 2025-06-24T20:32:27.704172Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:44:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:6:2053] 2025-06-24T20:32:27.704215Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:44:2069] 2025-06-24T20:32:27.704252Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:45:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:9:2056] 2025-06-24T20:32:27.704281Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:45:2069] 2025-06-24T20:32:27.704350Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:40:2069] 2025-06-24T20:32:27.704413Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:41:2069] 2025-06-24T20:32:27.704455Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:39:2069][/root/tenant] Set up state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:32:27.704544Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:42:2069] 2025-06-24T20:32:27.704582Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:39:2069][/root/tenant] Ignore empty state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== !argsLeft.IsDeletion 2025-06-24T20:32:27.704797Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:3:2050] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:36:2066], cookie# 0, event size# 103 2025-06-24T20:32:27.704844Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:3:2050] Update description: path# /root/tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-24T20:32:27.704910Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:3:2050] Upsert description: path# /root/tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /root/tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-24T20:32:27.705058Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant PathId: [OwnerId: 800, LocalPathId: 2] Version: 1 }: sender# [1:3:2050] 2025-06-24T20:32:27.705106Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:43:2069] 2025-06-24T20:32:27.705177Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant PathId: [OwnerId: 800, LocalPathId: 2] Version: 1 }: sender# [1:40:2069] 2025-06-24T20:32:27.705234Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:39:2069][/root/tenant] Update to strong state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 2] AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 1 PathOwnerId: 900 2025-06-24T20:32:28.174368Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [3:36:2066] 2025-06-24T20:32:28.174430Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:3:2050] Successful handshake: owner# 800, generation# 1 2025-06-24T20:32:28.174581Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [3:36:2066] 2025-06-24T20:32:28.174620Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:3:2050] Commit generation: owner# 800, generation# 1 2025-06-24T20:32:28.174665Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [3:37:2067] 2025-06-24T20:32:28.174692Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 900, generation# 1 2025-06-24T20:32:28.174881Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [3:37:2067] 2025-06-24T20:32:28.174908Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:6:2053] Commit generation: owner# 900, generation# 1 2025-06-24T20:32:28.174979Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][3:39:2069][/root/tenant] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:32:28.175427Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:43:2069] 2025-06-24T20:32:28.175473Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:3:2050] Upsert description: path# /root/tenant 2025-06-24T20:32:28.175543Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:3:2050] Subscribe: subscriber# [3:43:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:32:28.175646Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:44:2069] 2025-06-24T20:32:28.175665Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:6:2053] Upsert description: path# /root/tenant 2025-06-24T20:32:28.175695Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:44:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:32:28.175825Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:45:2069] 2025-06-24T20:32:28.175858Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:9:2056] Upsert description: path# /root/tenant 2025-06-24T20:32:28.175901Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:9:2056] Subscribe: subscriber# [3:45:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:32:28.175966Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [3:3:2050] 2025-06-24T20:32:28.176012Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:43:2069] 2025-06-24T20:32:28.176079Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:44:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [3:6:2053] 2025-06-24T20:32:28.176122Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:44:2069] 2025-06-24T2 ... 50] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [397:36:2066] 2025-06-24T20:34:07.590506Z node 397 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [397:3:2050] Commit generation: owner# 910, generation# 1 2025-06-24T20:34:07.590566Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [397:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [397:37:2067] 2025-06-24T20:34:07.590602Z node 397 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [397:6:2053] Successful handshake: owner# 910, generation# 1 2025-06-24T20:34:07.590816Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [397:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [397:37:2067] 2025-06-24T20:34:07.590851Z node 397 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [397:6:2053] Commit generation: owner# 910, generation# 1 2025-06-24T20:34:07.590959Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:34:07.591459Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [397:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [397:43:2069] 2025-06-24T20:34:07.591505Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [397:3:2050] Upsert description: path# /Root/Tenant/table_inside 2025-06-24T20:34:07.591602Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [397:3:2050] Subscribe: subscriber# [397:43:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:34:07.591774Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [397:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [397:44:2069] 2025-06-24T20:34:07.591832Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [397:6:2053] Upsert description: path# /Root/Tenant/table_inside 2025-06-24T20:34:07.591895Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [397:6:2053] Subscribe: subscriber# [397:44:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:34:07.592067Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [397:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [397:45:2069] 2025-06-24T20:34:07.592097Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [397:9:2056] Upsert description: path# /Root/Tenant/table_inside 2025-06-24T20:34:07.592144Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [397:9:2056] Subscribe: subscriber# [397:45:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:34:07.592223Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][397:43:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:3:2050] 2025-06-24T20:34:07.592281Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [397:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [397:43:2069] 2025-06-24T20:34:07.592332Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][397:44:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:6:2053] 2025-06-24T20:34:07.592377Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [397:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [397:44:2069] 2025-06-24T20:34:07.592423Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][397:45:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:9:2056] 2025-06-24T20:34:07.592466Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [397:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [397:45:2069] 2025-06-24T20:34:07.592554Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:40:2069] 2025-06-24T20:34:07.592639Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:41:2069] 2025-06-24T20:34:07.592699Z node 397 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][397:39:2069][/Root/Tenant/table_inside] Set up state: owner# [397:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:34:07.592764Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:42:2069] 2025-06-24T20:34:07.592812Z node 397 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][397:39:2069][/Root/Tenant/table_inside] Ignore empty state: owner# [397:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() =========== Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 2025-06-24T20:34:08.104307Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [399:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [399:36:2066] 2025-06-24T20:34:08.104379Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [399:3:2050] Successful handshake: owner# 910, generation# 1 2025-06-24T20:34:08.104517Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [399:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [399:36:2066] 2025-06-24T20:34:08.104550Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [399:3:2050] Commit generation: owner# 910, generation# 1 2025-06-24T20:34:08.104604Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [399:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [399:37:2067] 2025-06-24T20:34:08.104639Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [399:6:2053] Successful handshake: owner# 910, generation# 1 2025-06-24T20:34:08.104837Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [399:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [399:37:2067] 2025-06-24T20:34:08.104872Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [399:6:2053] Commit generation: owner# 910, generation# 1 2025-06-24T20:34:08.104961Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T20:34:08.105431Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [399:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:43:2069] 2025-06-24T20:34:08.105469Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [399:3:2050] Upsert description: path# /Root/Tenant/table_inside 2025-06-24T20:34:08.105564Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [399:3:2050] Subscribe: subscriber# [399:43:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:34:08.105724Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [399:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:44:2069] 2025-06-24T20:34:08.105751Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [399:6:2053] Upsert description: path# /Root/Tenant/table_inside 2025-06-24T20:34:08.105794Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [399:6:2053] Subscribe: subscriber# [399:44:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:34:08.105948Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [399:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:45:2069] 2025-06-24T20:34:08.105976Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [399:9:2056] Upsert description: path# /Root/Tenant/table_inside 2025-06-24T20:34:08.106022Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [399:9:2056] Subscribe: subscriber# [399:45:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T20:34:08.106102Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][399:43:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:3:2050] 2025-06-24T20:34:08.106161Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [399:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:43:2069] 2025-06-24T20:34:08.106211Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][399:44:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:6:2053] 2025-06-24T20:34:08.106255Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [399:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:44:2069] 2025-06-24T20:34:08.106301Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][399:45:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:9:2056] 2025-06-24T20:34:08.106344Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [399:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:45:2069] 2025-06-24T20:34:08.106442Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:40:2069] 2025-06-24T20:34:08.106523Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:41:2069] 2025-06-24T20:34:08.106581Z node 399 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][399:39:2069][/Root/Tenant/table_inside] Set up state: owner# [399:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T20:34:08.106643Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:42:2069] 2025-06-24T20:34:08.106687Z node 399 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][399:39:2069][/Root/Tenant/table_inside] Ignore empty state: owner# [399:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |89.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 6982, MsgBus: 17626 2025-06-24T20:33:15.686060Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616695814495139:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:15.686106Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002aa5/r3tmp/tmpMzCELu/pdisk_1.dat 2025-06-24T20:33:16.252235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:16.252413Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:16.252902Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616695814495111:2079] 1750797195683648 != 1750797195683651 2025-06-24T20:33:16.264930Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:16.267953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6982, node 1 2025-06-24T20:33:16.370207Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:16.370230Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:16.370243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:16.370409Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17626 2025-06-24T20:33:16.705354Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17626 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:16.930985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:19.272099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616712994364938:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:19.272227Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:19.627918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:19.849994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616712994365061:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:19.850079Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:19.850297Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616712994365066:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:19.853702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:19.864067Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616712994365068:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T20:33:19.924230Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616712994365108:2397] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:20.612288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:20.686364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616695814495139:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:20.686424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:21.069225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:21.621660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:22.152386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:22.791632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:23.273241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:23.354528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:25.412838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:33:25.621259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:25.626730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:25.628445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" ... 7.915051Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519616875933051014:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:33:57.990265Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519616875933051054:2397] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:58.691888Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:59.432902Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:00.132566Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:00.812103Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:01.496397Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:02.208632Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:02.262019Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:34:05.284830Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715701:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> Cdc::InitialScanEnqueuesZeroRecords [GOOD] >> Cdc::InitialScanRacyProgressAndDrop >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown [GOOD] >> TYardTest::TestLogWriteCutEqualRandomWait [GOOD] >> TYardTest::TestLogWriteCutUnequal >> THealthCheckTest::TestSystemStateRetriesAfterReceivingResponse [GOOD] |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |89.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 25920, MsgBus: 11362 2025-06-24T20:33:18.066940Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616707677001164:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:18.066983Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002924/r3tmp/tmpLCTCfl/pdisk_1.dat 2025-06-24T20:33:18.520861Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25920, node 1 2025-06-24T20:33:18.673918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:18.673954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:18.673962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:18.674118Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:18.816044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:18.816166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:18.817709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11362 2025-06-24T20:33:19.105145Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11362 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:19.467085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:21.508087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616720561903646:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:21.508218Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:21.823645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:22.003439Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616720561903767:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.003513Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.004167Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616720561903772:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.007861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:22.023506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-24T20:33:22.023919Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616720561903774:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T20:33:22.094221Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616724856871120:2399] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:22.868328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:23.104637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616707677001164:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:23.104693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:23.319294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:23.835116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:24.355455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:25.040255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:25.588025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:25.649968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:28.082826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:33:28.197961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:28.199853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710710:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:28.201484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710711:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials ... 3881Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:01.169285Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:01.872577Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:02.671191Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:03.709861Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:04.545744Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:05.301058Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:05.391620Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:34:08.176014Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710703:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 |89.2%| [LD] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |89.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::CountUIDByVAT ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 24527, MsgBus: 29746 2025-06-24T20:33:18.422698Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616707494955937:2124];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:18.422751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002838/r3tmp/tmpVAd25m/pdisk_1.dat 2025-06-24T20:33:18.979240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:18.982292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:18.982713Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616707494955854:2079] 1750797198419648 != 1750797198419651 2025-06-24T20:33:18.984115Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:19.035923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24527, node 1 2025-06-24T20:33:19.163859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:19.163899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:19.163910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:19.164026Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29746 2025-06-24T20:33:19.454312Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29746 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:19.837169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:19.855836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:21.957192Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616720379858387:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:21.957317Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.263570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:22.428883Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616724674825800:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.428992Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.429346Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616724674825806:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.434762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:22.466804Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616724674825808:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:33:22.552128Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616724674825874:2404] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:23.208355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:23.422826Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616707494955937:2124];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:23.422897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:23.673939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:24.302032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:24.867910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:25.423642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:25.969835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:26.011261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:28.233835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:33:28.264366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715709:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:28.267269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715710:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:28.268806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715711:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/acti ... ck failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:00.966612Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519616868712885381:2098];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:00.966712Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:01.434077Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:02.367874Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:03.155826Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:04.079265Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:04.741163Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:05.625878Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:05.675138Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:34:08.328268Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710711:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TestProgram::CountUIDByVAT [GOOD] >> Viewer::JsonStorageListingV2 [GOOD] >> Viewer::JsonStorageListingV2GroupIdFilter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::TestSystemStateRetriesAfterReceivingResponse [GOOD] Test command err: 2025-06-24T20:33:21.052286Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:21.052902Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:21.053015Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:21.053336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:21.053442Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:21.053629Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043ef/r3tmp/tmp2JRvXA/pdisk_1.dat 2025-06-24T20:33:21.460512Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28405, node 1 TClient is connected to server localhost:24526 2025-06-24T20:33:21.872129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:21.872184Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:21.872216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:21.873052Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:30.582150Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:696:2375], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:30.582672Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:30.585227Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:30.587187Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:693:2315], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:30.587641Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:30.587769Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043ef/r3tmp/tmp8I8Y7D/pdisk_1.dat 2025-06-24T20:33:31.133287Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63710, node 3 TClient is connected to server localhost:30237 2025-06-24T20:33:35.145063Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:35.145126Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:35.145155Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:35.146012Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:35.164635Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:35.164808Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:35.214469Z node 3 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-24T20:33:35.215309Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:35.340358Z node 3 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 5 2025-06-24T20:33:35.341244Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connected -> Disconnected self_check_result: EMERGENCY issue_log { id: "RED-f489-1231c6b1" status: RED message: "Database has compute issues" location { database { name: "/Root" } } reason: "RED-6fa7-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "RED-6fa7-1231c6b1" status: RED message: "Compute has issues with tablets" location { database { name: "/Root" } } reason: "RED-e5e3-1231c6b1-PersQueue" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" reason: "YELLOW-e9e2-1231c6b1-5" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-e5e3-1231c6b1-PersQueue" status: RED message: "Tablets are dead" location { compute { tablet { type: "PersQueue" id: "72075186224037888" count: 1 } } database { name: "/Root" } node { } } type: "TABLET" level: 4 } location { id: 3 host: "::1" port: 12001 } 2025-06-24T20:33:44.758820Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:447:2221], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:44.759647Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:44.759940Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:783:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:44.760044Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:44.760501Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:44.760733Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043ef/r3tmp/tmpyB93qw/pdisk_1.dat 2025-06-24T20:33:45.135022Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8219, node 6 TClient is connected to server localhost:29823 2025-06-24T20:33:49.388929Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:49.389008Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:49.389057Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:49.390218Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:49.391852Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([6:1260:2661]) [6:1486:2666] 2025-06-24T20:33:49.392036Z node 6 :HIVE DEBUG: hive_impl.cpp:34: HIVE#72057594037968897 Handle TEvHive::TEvCreateTablet(PersQueue(72057594046578946,0)) 2025-06-24T20:33:49.407706Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:200: HIVE#72057594037968897 THive::TTxCreateTablet::Execute Owner: 72057594046578946 OwnerIdx: 0 TabletType: PersQueue BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } 2025-06-24T20:33:49.407846Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:348: HIVE#72057594037968897 Hive 72057594037968897 allocated TabletId 72075186224037888 from TabletIdIndex 65536 2025-06-24T20:33:49.408217Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:440: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for type PersQueue: {} 2025-06-24T20:33:49.408316Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:447: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for profile 'default': {Memory: 1048576} 2025-06-24T20:33:49.408580Z node 6 :HIVE DEBUG: hive_impl.cpp:2817: HIVE#72057594037968897 CreateTabletFollowers Tablet PersQueue.72075186224037888.Leader.0 2025-06-24T20:33:49.408672Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:173: HIVE#72057594037968897 THive::TTxCreateTablet::Execute TabletId: 72075186224037888 Status: OK 2025-06-24T20:33:49.408853Z node 6 :HIVE DEBUG: hive_impl.cpp:1075: HIVE#72057594037968897 THive::AssignTabletGroups TEvControllerSelectGroups tablet 72075186224037888 GroupParameters { StoragePoolSpecifier { Name: "/Root:test" } } ReturnAllMatchingGroups: true 2025-06-24T20:33:49.411011Z node 6 :HIVE DEBUG: hive_impl.cpp:72: HIVE#72057594037968897 Connected to tablet 72057594037932033 from tablet 72057594037968897 2025-06-24T20:33:49.418908Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([8:1470:2326]) [6:1525:2671] 2025-06-24T20:33:49.434879Z node 6 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [8:1462:2326] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priori ... 25-06-24T20:33:49.594527Z node 6 :HIVE TRACE: hive_impl.cpp:2567: HIVE#72057594037968897 UpdateTotalResources: ObjectId (72057594046578946,0): {} -> {Memory: 1048576} 2025-06-24T20:33:49.594643Z node 6 :HIVE TRACE: hive_impl.cpp:2573: HIVE#72057594037968897 UpdateTotalResources: Type PersQueue: {} -> {Memory: 1048576} 2025-06-24T20:33:49.594795Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T20:33:49.594847Z node 6 :HIVE TRACE: hive_impl.cpp:344: HIVE#72057594037968897 ProcessBootQueue - sending 2025-06-24T20:33:49.595199Z node 6 :HIVE TRACE: hive_impl.cpp:328: HIVE#72057594037968897 ProcessBootQueue - executing 2025-06-24T20:33:49.595300Z node 6 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-24T20:33:49.595365Z node 6 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-24T20:33:49.595425Z node 6 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-24T20:33:49.608787Z node 6 :HIVE DEBUG: tx__update_tablet_status.cpp:216: HIVE#72057594037968897 THive::TTxUpdateTabletStatus::Complete TabletId: 72075186224037888 SideEffects: {Notifications: 0x10040207 [6:1259:2660] {EvTabletCreationResult Status: OK TabletID: 72075186224037888}} 2025-06-24T20:33:49.608891Z node 6 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-24T20:33:53.502937Z node 6 :HIVE DEBUG: hive_impl.cpp:731: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 8: Status: 2 2025-06-24T20:33:53.503090Z node 6 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(8)::Execute 2025-06-24T20:33:53.503187Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 8 2025-06-24T20:33:53.503340Z node 6 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(8)::Complete 2025-06-24T20:33:53.503743Z node 6 :HIVE DEBUG: tx__restart_tablet.cpp:32: HIVE#72057594037968897 THive::TTxRestartTablet(PersQueue.72075186224037888.Leader.1)::Execute 2025-06-24T20:33:53.503891Z node 6 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(PersQueue.72075186224037888.Leader.1) VolatileState: Running -> Stopped (Node 8) 2025-06-24T20:33:53.503965Z node 6 :HIVE TRACE: node_info.cpp:118: HIVE#72057594037968897 Node(8, (0,1048576,0,0)->(0,0,0,0)) 2025-06-24T20:33:53.504110Z node 6 :HIVE TRACE: hive_impl.cpp:2567: HIVE#72057594037968897 UpdateTotalResources: ObjectId (72057594046578946,0): {Memory: 1048576} -> {} 2025-06-24T20:33:53.504227Z node 6 :HIVE TRACE: hive_impl.cpp:2573: HIVE#72057594037968897 UpdateTotalResources: Type PersQueue: {Memory: 1048576} -> {} 2025-06-24T20:33:53.504309Z node 6 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(PersQueue.72075186224037888.Leader.1 gen 1) to node 8 2025-06-24T20:33:53.504403Z node 6 :HIVE DEBUG: tablet_info.cpp:125: HIVE#72057594037968897 Tablet(PersQueue.72075186224037888.Leader.1) VolatileState: Stopped -> Booting 2025-06-24T20:33:53.504458Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (1) 2025-06-24T20:33:53.504511Z node 6 :HIVE TRACE: hive_impl.cpp:344: HIVE#72057594037968897 ProcessBootQueue - sending 2025-06-24T20:33:53.504836Z node 6 :HIVE DEBUG: tx__kill_node.cpp:22: HIVE#72057594037968897 THive::TTxKillNode(8)::Execute 2025-06-24T20:33:53.504962Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:33:53.505023Z node 6 :HIVE TRACE: hive_domains.cpp:16: Node(8) DeregisterInDomains (72057594046644480:1) : 1 -> 0 2025-06-24T20:33:53.505091Z node 6 :HIVE DEBUG: hive_impl.cpp:2804: HIVE#72057594037968897 RemoveRegisteredDataCentersNode(3, 8) 2025-06-24T20:33:53.505160Z node 6 :HIVE TRACE: tx__kill_node.cpp:50: HIVE#72057594037968897 THive::TTxKillNode - killing pipe server [6:1525:2671] 2025-06-24T20:33:53.505220Z node 6 :HIVE DEBUG: hive_impl.cpp:105: HIVE#72057594037968897 TryToDeleteNode(8): waiting 3600.000000s 2025-06-24T20:33:53.508336Z node 6 :HIVE TRACE: hive_impl.cpp:122: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerDisconnected([8:1470:2326]) [6:1525:2671] 2025-06-24T20:33:53.514419Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([6:1849:2689]) [6:1853:2694] 2025-06-24T20:33:53.521678Z node 6 :HIVE TRACE: hive_impl.cpp:122: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerDisconnected([6:1849:2689]) [6:1853:2694] 2025-06-24T20:33:53.523634Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([9:1830:2325]) [6:1884:2697] 2025-06-24T20:33:53.528361Z node 6 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [9:1823:2325] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-24T20:33:53.528505Z node 6 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(9)::Execute 2025-06-24T20:33:53.528648Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:53.528716Z node 6 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T20:33:53.528770Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (1) 2025-06-24T20:33:53.528821Z node 6 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T20:33:53.528867Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (1) 2025-06-24T20:33:53.528969Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:53.529793Z node 6 :HIVE DEBUG: hive_impl.cpp:808: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 9 Location DataCenter: "4" Module: "4" Rack: "4" Unit: "4" self_check_result: EMERGENCY issue_log { id: "RED-f489-1231c6b1" status: RED message: "Database has compute issues" location { database { name: "/Root" } } reason: "RED-6fa7-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "RED-6fa7-1231c6b1" status: RED message: "Compute has issues with tablets" location { database { name: "/Root" } } reason: "RED-e5e3-1231c6b1-PersQueue" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-6" reason: "YELLOW-e9e2-1231c6b1-7" reason: "YELLOW-e9e2-1231c6b1-8" reason: "YELLOW-e9e2-1231c6b1-9" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-7" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 7 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-8" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 8 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-9" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 9 host: "::1" port: 12004 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-e5e3-1231c6b1-PersQueue" status: RED message: "Tablets are dead" location { compute { tablet { type: "PersQueue" id: "72075186224037888" count: 1 } } database { name: "/Root" } node { } } type: "TABLET" level: 4 } location { id: 6 host: "::1" port: 12001 } 2025-06-24T20:34:00.229269Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:00.229558Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:00.229781Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043ef/r3tmp/tmp0wAEvm/pdisk_1.dat 2025-06-24T20:34:00.711706Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6963, node 10 TClient is connected to server localhost:25226 2025-06-24T20:34:01.262700Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:01.262786Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:01.263316Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:01.263863Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:07.764949Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:07.765453Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:07.765674Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043ef/r3tmp/tmpkEjkg6/pdisk_1.dat 2025-06-24T20:34:08.313321Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25418, node 12 TClient is connected to server localhost:24471 2025-06-24T20:34:08.848766Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:08.848857Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:08.848938Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:08.850461Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TBlobStorageWardenTest::TestReceivedPDiskRestartNotAllowed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::CountUIDByVAT [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } KeyColumns { Id: 4 } } } Command { Projection { Columns { Id: 10001 } Columns { Id: 4 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } KeyColumns { Id: 4 } } } Command { Projection { Columns { Id: 10001 } Columns { Id: 4 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2},{\"name\":\"vat\",\"id\":4}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"2,4\",\"o\":\"10001\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N2(9):{\"i\":\"4\",\"p\":{\"address\":{\"name\":\"vat\",\"id\":4}},\"o\":\"4\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"10001,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N4 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2},{\"name\":\"vat\",\"id\":4}]},\"o\":\"2,4\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N4->N2->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":4},{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2},{"name":"vat","id":4}]},"o":"2,4","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"uid","id":2},{"name":"vat","id":4}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"10001,4","t":"Projection"},"w":27,"id":5},"4":{"p":{"i":"4","p":{"address":{"name":"vat","id":4}},"o":"4","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"2,4","o":"10001","t":"Aggregation"},"w":18,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; >> TBlobStorageWardenTest::ObtainTenantKeySamePin [GOOD] >> TBlobStorageWardenTest::ObtainTenantKeyDifferentPin [GOOD] >> TBlobStorageWardenTest::TestDeleteStoragePool >> TBlobStorageWardenTest::TestFilterBadSerials [GOOD] >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError >> TBlobStorageWardenTest::TestReceivedPDiskRestartNotAllowed [GOOD] >> GenericFederatedQuery::PostgreSQLFilterPushdown [GOOD] >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown [GOOD] |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::ObtainTenantKeyDifferentPin [GOOD] |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestReceivedPDiskRestartNotAllowed [GOOD] |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |89.2%| [LD] {RESULT} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut >> KqpWorkloadService::TestLessConcurrentQueryLimit [GOOD] >> KqpWorkloadService::TestCpuLoadThreshold >> ResourcePoolsDdl::TestDropResourcePool [GOOD] >> ResourcePoolsSysView::TestResourcePoolsSysViewFilters [GOOD] >> TFlatTest::WriteMergeAndRead >> TFlatTest::SelectRangeForbidNullArgs2 >> SystemView::CollectPreparedQueries >> SystemView::VSlotsFields ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::PostgreSQLFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 24452, MsgBus: 64133 2025-06-24T20:33:21.115648Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616722846770065:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:21.117463Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ae/r3tmp/tmpLrreVx/pdisk_1.dat 2025-06-24T20:33:21.538419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:21.538551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:21.540943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:21.569134Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:21.571349Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616722846770048:2079] 1750797201114277 != 1750797201114280 TServer::EnableGrpc on GrpcPort 24452, node 1 2025-06-24T20:33:21.659952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:21.659979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:21.659987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:21.660095Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64133 TClient is connected to server localhost:64133 WaitRootIsUp 'Root'... 2025-06-24T20:33:22.187724Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:22.422398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:24.539884Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616735731672576:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:24.540023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:24.847530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:25.042646Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616740026639994:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:25.042751Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:25.043088Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616740026639999:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:25.049124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:25.059283Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616740026640001:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T20:33:25.118523Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616740026640041:2397] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:25.772084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:26.118354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616722846770065:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:26.118419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:26.326496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:26.958915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:27.541149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:28.050028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:28.547420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:28.609904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:30.776433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:33:30.828817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710711:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:30.830896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:30.847953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710710:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Call DescribeTable. data_source_instance { kind: POSTGRESQL endpoint { host: "localhost ... 519616902003142700:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:03.204260Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:03.224100Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519616902003142702:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:34:03.322616Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519616902003142742:2394] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:03.595454Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519616880528305479:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:03.595558Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:04.205459Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:04.998270Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:05.982117Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:06.733339Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:07.526219Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:08.286114Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:08.341568Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:34:11.233755Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715714:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TLocksFatTest::PointSetNotBreak >> SystemView::ShowCreateTablePartitionAtKeys >> KqpWorkloadService::TestZeroConcurrentQueryLimit [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 11657, MsgBus: 23203 2025-06-24T20:33:17.911040Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616705883714841:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:17.912206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0029d5/r3tmp/tmpHwfvyc/pdisk_1.dat 2025-06-24T20:33:18.314122Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11657, node 1 2025-06-24T20:33:18.342062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:18.342189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:18.346444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:18.433402Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:18.433425Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:18.433434Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:18.433579Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23203 2025-06-24T20:33:18.915595Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23203 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:19.050884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:19.076363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:21.389242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616723063584560:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:21.389369Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:21.640494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:21.764618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616723063584680:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:21.764801Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:21.765358Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616723063584686:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:21.769481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:21.780243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-24T20:33:21.780819Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616723063584688:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:33:21.843057Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616723063584728:2397] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:22.628363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:22.921725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616705883714841:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:22.922031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:23.185198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:23.752535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:24.400738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:24.911976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:25.435782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:25.545668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:27.600760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:33:27.636377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715709:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:27.638033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715710:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:27.639161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715711:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... 90:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:01.519208Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:02.157585Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:03.057571Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:04.115995Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:05.014867Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:05.877649Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:06.655044Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:06.738003Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:34:09.459587Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> KikimrProvider::TestFillAuthPropertiesBasic [GOOD] >> KikimrProvider::TestFillAuthPropertiesAws [GOOD] >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] >> THiveTest::TestLockTabletExecutionGoodUnlock [GOOD] >> THiveTest::TestLockTabletExecutionLocalGone >> KikimrIcGateway::TestCreateSameExternalTable >> TBlobStorageWardenTest::TestDeleteStoragePool [GOOD] >> TBlobStorageWardenTest::TestBlockEncriptedGroup >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW+VolatileTxs |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolsDdl::TestDropResourcePool [GOOD] Test command err: 2025-06-24T20:33:14.018320Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616694321169422:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:14.019017Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046dd/r3tmp/tmpMQlDb6/pdisk_1.dat 2025-06-24T20:33:14.455573Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616690026202024:2079] 1750797193991559 != 1750797193991562 2025-06-24T20:33:14.458077Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25026, node 1 2025-06-24T20:33:14.485116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:14.485253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:14.489160Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:14.553738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:14.553762Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:14.553776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:14.553920Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5535 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:14.926137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:15.018478Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:33:17.433429Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:33:17.448421Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NmEwZTBlMTQtYmRhNzUwZjAtZDRmZmQxOTUtMWMyYWZkY2U=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NmEwZTBlMTQtYmRhNzUwZjAtZDRmZmQxOTUtMWMyYWZkY2U= 2025-06-24T20:33:17.448902Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:17.448919Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:33:17.448943Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-24T20:33:17.448974Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616707206071823:2289], Start check tables existence, number paths: 2 2025-06-24T20:33:17.451061Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NmEwZTBlMTQtYmRhNzUwZjAtZDRmZmQxOTUtMWMyYWZkY2U=, ActorId: [1:7519616707206071832:2290], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:17.451354Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616707206071823:2289], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:33:17.451410Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616707206071823:2289], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:33:17.451441Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616707206071823:2289], Successfully finished 2025-06-24T20:33:17.451569Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:33:17.463796Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616707206071849:2297], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:17.467811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:17.469165Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616707206071849:2297], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-24T20:33:17.469406Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616707206071849:2297], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:33:17.480358Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616707206071849:2297], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:17.552900Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616707206071849:2297], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:17.557182Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616707206071900:2329] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:17.557360Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616707206071849:2297], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:33:17.566370Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-24T20:33:17.566407Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-24T20:33:17.566473Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616707206071909:2292], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-24T20:33:17.566643Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=NmEwZTBlMTQtYmRhNzUwZjAtZDRmZmQxOTUtMWMyYWZkY2U=, ActorId: [1:7519616707206071832:2290], ActorState: ReadyState, TraceId: 01jyhta87x1wf17rs60nm83e66, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: CREATE RESOURCE POOL my_pool WITH ( CONCURRENT_QUERY_LIMIT=1, QUEUE_SIZE=0 ); rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-24T20:33:17.569989Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616707206071909:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:17.570112Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:17.822089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:17.829739Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=NmEwZTBlMTQtYmRhNzUwZjAtZDRmZmQxOTUtMWMyYWZkY2U=, ActorId: [1:7519616707206071832:2290], ActorState: ExecuteState, TraceId: 01jyhta87x1wf17rs60nm83e66, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [1:7519616707206071917:2290] WorkloadServiceCleanup: 0 2025-06-24T20:33:17.831399Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=1&id=NmEwZTBlMTQtYmRhNzUwZjAtZDRmZmQxOTUtMWMyYWZkY2U=, ActorId: [1:7519616707206071832:2290], ActorState: CleanupState, TraceId: 01jyhta87x1wf17rs60nm83e66, EndCleanup, isFinal: 0 2025-06-24T20:33:17.831494Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=1&id=NmEwZTBlMTQtYmRhNzUwZjAtZDRmZmQxOTUtMWMyYWZkY2U=, ActorId: [1:7519616707206071832:2290], ActorState: CleanupState, TraceId: 01jyhta87x1wf17rs60nm83e66, Sent query response back to proxy, proxyRequestId: 3, proxyId: [1:7519616694321169551:2236] 2025-06-24T20:33:17.839120Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NWViZDY4YjQtZTkxOTQ2NTAtMjFlNGNjOTUtYTcxMTNjMGQ=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NWViZDY4YjQtZTkxOTQ2NTAtMjFlNGNjOTUtYTcxMTNjMGQ= 2025-06-24T20:33:17.839259Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NWViZDY4YjQtZTkxOTQ2NTAtMjFlNGNjOTUtYTcxMTNjMGQ=, ActorId: [1:7519616707206071940:229 ... 34:12.508373Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=Y2UxOTI4YzMtYjhkNTJjNjItNzQ3NmRhODItMzNiNjg3YjU=, ActorId: [8:7519616941385841836:2409], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:34:12.508421Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=Y2UxOTI4YzMtYjhkNTJjNjItNzQ3NmRhODItMzNiNjg3YjU=, ActorId: [8:7519616941385841836:2409], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:12.508453Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=Y2UxOTI4YzMtYjhkNTJjNjItNzQ3NmRhODItMzNiNjg3YjU=, ActorId: [8:7519616941385841836:2409], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:34:12.508479Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=Y2UxOTI4YzMtYjhkNTJjNjItNzQ3NmRhODItMzNiNjg3YjU=, ActorId: [8:7519616941385841836:2409], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:34:12.508551Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=Y2UxOTI4YzMtYjhkNTJjNjItNzQ3NmRhODItMzNiNjg3YjU=, ActorId: [8:7519616941385841836:2409], ActorState: unknown state, Session actor destroyed 2025-06-24T20:34:12.511311Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519616919911004916:2299], DatabaseId: /Root, PoolId: my_pool, Got delete notification 2025-06-24T20:34:12.511413Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: my_pool 2025-06-24T20:34:12.511522Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519616941385841897:2423], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-06-24T20:34:12.513346Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519616941385841897:2423], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-24T20:34:12.513533Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool my_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-24T20:34:12.514292Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=YjA5Yjk1NTItMzU1Y2FlYzMtMjFjMjNhMTAtMWE0ZjI4MGE=, ActorId: [8:7519616919911004809:2292], ActorState: ExecuteState, TraceId: 01jyhtbxw08z1a94zmds7vqayt, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [8:7519616941385841864:2292] WorkloadServiceCleanup: 0 2025-06-24T20:34:12.517727Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=YjA5Yjk1NTItMzU1Y2FlYzMtMjFjMjNhMTAtMWE0ZjI4MGE=, ActorId: [8:7519616919911004809:2292], ActorState: CleanupState, TraceId: 01jyhtbxw08z1a94zmds7vqayt, EndCleanup, isFinal: 0 2025-06-24T20:34:12.517808Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=YjA5Yjk1NTItMzU1Y2FlYzMtMjFjMjNhMTAtMWE0ZjI4MGE=, ActorId: [8:7519616919911004809:2292], ActorState: CleanupState, TraceId: 01jyhtbxw08z1a94zmds7vqayt, Sent query response back to proxy, proxyRequestId: 18, proxyId: [8:7519616898436167797:2132] 2025-06-24T20:34:12.528289Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg= 2025-06-24T20:34:12.528931Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, ActorId: [8:7519616941385841907:2424], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:34:12.529113Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, ActorId: [8:7519616941385841907:2424], ActorState: ReadyState, TraceId: 01jyhtbxxh70yk91gt6ncdqn91, received request, proxyRequestId: 19 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [8:7519616941385841906:2617] database: Root databaseId: /Root pool id: my_pool 2025-06-24T20:34:12.529155Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: my_pool 2025-06-24T20:34:12.529204Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [8:7519616941385841907:2424], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg= 2025-06-24T20:34:12.529246Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519616941385841909:2425], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-06-24T20:34:12.529348Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [8:7519616941385841910:2426], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, Start pool fetching 2025-06-24T20:34:12.529370Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519616941385841911:2427], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-06-24T20:34:12.531584Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519616941385841911:2427], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-24T20:34:12.531635Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519616941385841909:2425], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-24T20:34:12.531729Z node 8 :KQP_WORKLOAD_SERVICE ERROR: scheme_actors.cpp:56: [WorkloadService] [TPoolResolverActor] ActorId: [8:7519616941385841910:2426], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, Failed to fetch pool info NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-24T20:34:12.531733Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool my_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-24T20:34:12.531873Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:114: [WorkloadService] [TPoolResolverActor] ActorId: [8:7519616941385841910:2426], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, Failed to resolve pool, NOT_FOUND, issues: {
: Error: Failed to resolve pool id my_pool subissue: {
: Error: Resource pool my_pool not found or you don't have access permissions } } 2025-06-24T20:34:12.532014Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:546: [WorkloadService] [Service] Reply continue error NOT_FOUND to [8:7519616941385841907:2424]: {
: Error: Failed to resolve pool id my_pool subissue: {
: Error: Resource pool my_pool not found or you don't have access permissions } } 2025-06-24T20:34:12.532129Z node 8 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, ActorId: [8:7519616941385841907:2424], ActorState: ExecuteState, TraceId: 01jyhtbxxh70yk91gt6ncdqn91, Create QueryResponse for error on request, msg: Query failed during adding/waiting in workload pool 2025-06-24T20:34:12.532317Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, ActorId: [8:7519616941385841907:2424], ActorState: ExecuteState, TraceId: 01jyhtbxxh70yk91gt6ncdqn91, Cleanup start, isFinal: 1 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 1 2025-06-24T20:34:12.532412Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:189: [WorkloadService] [Service] Finished request with worker actor [8:7519616941385841907:2424], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg= 2025-06-24T20:34:12.532491Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, ActorId: [8:7519616941385841907:2424], ActorState: CleanupState, TraceId: 01jyhtbxxh70yk91gt6ncdqn91, EndCleanup, isFinal: 1 2025-06-24T20:34:12.532610Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, ActorId: [8:7519616941385841907:2424], ActorState: CleanupState, TraceId: 01jyhtbxxh70yk91gt6ncdqn91, Sent query response back to proxy, proxyRequestId: 19, proxyId: [8:7519616898436167797:2132] 2025-06-24T20:34:12.532645Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, ActorId: [8:7519616941385841907:2424], ActorState: unknown state, TraceId: 01jyhtbxxh70yk91gt6ncdqn91, Cleanup temp tables: 0 2025-06-24T20:34:12.532798Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=ZTk5ZjFmNWUtOWE3NGM1NzktNzQxMzQ1ZDQtYzkxYTJiYjg=, ActorId: [8:7519616941385841907:2424], ActorState: unknown state, TraceId: 01jyhtbxxh70yk91gt6ncdqn91, Session actor destroyed 2025-06-24T20:34:12.543411Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=YjA5Yjk1NTItMzU1Y2FlYzMtMjFjMjNhMTAtMWE0ZjI4MGE=, ActorId: [8:7519616919911004809:2292], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:34:12.543468Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=YjA5Yjk1NTItMzU1Y2FlYzMtMjFjMjNhMTAtMWE0ZjI4MGE=, ActorId: [8:7519616919911004809:2292], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:12.543505Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=YjA5Yjk1NTItMzU1Y2FlYzMtMjFjMjNhMTAtMWE0ZjI4MGE=, ActorId: [8:7519616919911004809:2292], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:34:12.543539Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=YjA5Yjk1NTItMzU1Y2FlYzMtMjFjMjNhMTAtMWE0ZjI4MGE=, ActorId: [8:7519616919911004809:2292], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:34:12.543640Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=YjA5Yjk1NTItMzU1Y2FlYzMtMjFjMjNhMTAtMWE0ZjI4MGE=, ActorId: [8:7519616919911004809:2292], ActorState: unknown state, Session actor destroyed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolsSysView::TestResourcePoolsSysViewFilters [GOOD] Test command err: 2025-06-24T20:33:11.303908Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616678612608158:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:11.304529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00470f/r3tmp/tmpDTsIHl/pdisk_1.dat 2025-06-24T20:33:11.791104Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:11.794828Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616678612607942:2079] 1750797191280270 != 1750797191280273 2025-06-24T20:33:11.844743Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:11.844854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14490, node 1 2025-06-24T20:33:11.860813Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:11.947708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:11.947730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:11.947738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:11.947847Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25943 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T20:33:12.296786Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:12.345828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:12.370344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:33:14.846027Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:33:14.862642Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YWRlNTI1Y2UtNTlmMmE5ZmMtZGJkNTZiYzktNGE4M2JkM2E=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YWRlNTI1Y2UtNTlmMmE5ZmMtZGJkNTZiYzktNGE4M2JkM2E= 2025-06-24T20:33:14.863379Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YWRlNTI1Y2UtNTlmMmE5ZmMtZGJkNTZiYzktNGE4M2JkM2E=, ActorId: [1:7519616691497510443:2288], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:14.863488Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616691497510442:2287], Start check tables existence, number paths: 2 2025-06-24T20:33:14.863599Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-24T20:33:14.863633Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:14.863653Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:33:14.869576Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616691497510442:2287], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:33:14.869660Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616691497510442:2287], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:33:14.869690Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616691497510442:2287], Successfully finished 2025-06-24T20:33:14.869772Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:33:14.899522Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691497510470:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:14.903398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:14.906371Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691497510470:2298], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-24T20:33:14.906566Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691497510470:2298], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:33:14.922696Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691497510470:2298], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:15.009583Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691497510470:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:15.015522Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616695792477817:2330] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:15.015697Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691497510470:2298], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:33:15.018523Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZTU4NWQ0NjctNWZkZDM4MDAtZWRjOTM5MTMtZDkwNDJjOTY=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTU4NWQ0NjctNWZkZDM4MDAtZWRjOTM5MTMtZDkwNDJjOTY= 2025-06-24T20:33:15.018903Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZTU4NWQ0NjctNWZkZDM4MDAtZWRjOTM5MTMtZDkwNDJjOTY=, ActorId: [1:7519616695792477824:2291], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:15.019082Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=ZTU4NWQ0NjctNWZkZDM4MDAtZWRjOTM5MTMtZDkwNDJjOTY=, ActorId: [1:7519616695792477824:2291], ActorState: ReadyState, TraceId: 01jyhta5rbev6tsgvz46g7azm9, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7519616695792477823:2335] database: Root databaseId: /Root pool id: sample_pool_id 2025-06-24T20:33:15.019559Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-24T20:33:15.019583Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-24T20:33:15.019665Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7519616695792477824:2291], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=ZTU4NWQ0NjctNWZkZDM4MDAtZWRjOTM5MTMtZDkwNDJjOTY= 2025-06-24T20:33:15.019714Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616695792477826:2292], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:33:15.019787Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519616695792477827:2293], Database: /Root, Start database fetching 2025-06-24T20:33:15.021138Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519616695792477827:2293], Database: /Root, Database info successfully fetched, serverless: 0 2025-06-24T20:33:15.021289Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616695792477826:2292], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-24T20:33:15.021335Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-06-24T20:33:15.021365Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-06-24T20:33:15.021387Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-24T20:33:15.021657Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7519616695792477838:2295], DatabaseId: /Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-24T20:33:15.021720Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolRe ... 20:34:10.838901Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: ExecuteState, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, ExecutePhyTx, tx: 0x000050C0000D3F18 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-24T20:34:10.838964Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: ExecuteState, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, Sending to Executer TraceId: 0 8 2025-06-24T20:34:10.839056Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: ExecuteState, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, Created new KQP executer: [10:7519616932368716315:2370] isRollback: 0 2025-06-24T20:34:10.869855Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1852: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: ExecuteState, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, Forwarded TEvStreamData to [9:7519616931306316113:3376] 2025-06-24T20:34:10.871466Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: ExecuteState, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-24T20:34:10.871615Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: ExecuteState, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, txInfo Status: Committed Kind: ReadOnly TotalDuration: 32.825 ServerDuration: 32.736 QueriesCount: 2 2025-06-24T20:34:10.871680Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: ExecuteState, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T20:34:10.872112Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: ExecuteState, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:10.872142Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: ExecuteState, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, EndCleanup, isFinal: 1 2025-06-24T20:34:10.872189Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: ExecuteState, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, Sent query response back to proxy, proxyRequestId: 5, proxyId: [10:7519616902303944061:2154] 2025-06-24T20:34:10.872208Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: unknown state, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, Cleanup temp tables: 0 2025-06-24T20:34:10.872765Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=10&id=MmU3MDBmMjgtYTcwZjlkODQtYzAyM2Y3N2YtZTY5NGMxNTc=, ActorId: [10:7519616932368716309:2370], ActorState: unknown state, TraceId: 01jyhtbw2p6ybq6svs8rqke4ff, Session actor destroyed 2025-06-24T20:34:10.886752Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk= 2025-06-24T20:34:10.887241Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:34:10.887480Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ReadyState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, received request, proxyRequestId: 6 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT * FROM `.sys/resource_pools` WHERE Name >= "default" rpcActor: [9:7519616931306316117:3377] database: /Root/test-dedicated databaseId: /Root/test-dedicated pool id: default 2025-06-24T20:34:10.887521Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ReadyState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, request placed into pool from cache: default 2025-06-24T20:34:10.887648Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, Sending CompileQuery request 2025-06-24T20:34:11.087629Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, ExecutePhyTx, tx: 0x000050C0004F1218 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-24T20:34:11.087698Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, Sending to Executer TraceId: 0 8 2025-06-24T20:34:11.087796Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, Created new KQP executer: [10:7519616936663683638:2378] isRollback: 0 2025-06-24T20:34:11.104792Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1852: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, Forwarded TEvStreamData to [9:7519616931306316117:3377] 2025-06-24T20:34:11.108550Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-24T20:34:11.108860Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, txInfo Status: Committed Kind: ReadOnly TotalDuration: 21.467 ServerDuration: 21.3 QueriesCount: 2 2025-06-24T20:34:11.108946Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T20:34:11.109414Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:11.109464Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, EndCleanup, isFinal: 1 2025-06-24T20:34:11.109529Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: ExecuteState, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, Sent query response back to proxy, proxyRequestId: 6, proxyId: [10:7519616902303944061:2154] 2025-06-24T20:34:11.109549Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: unknown state, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, Cleanup temp tables: 0 2025-06-24T20:34:11.110087Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=10&id=NTI1OTQwOTItZTJjNWQ1NDgtZmYxMWIwYzYtZTk0NDg1Yzk=, ActorId: [10:7519616932368716329:2378], ActorState: unknown state, TraceId: 01jyhtbwa7emna3cx6yjy7d0mj, Session actor destroyed 2025-06-24T20:34:11.136940Z node 9 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 10 2025-06-24T20:34:11.137331Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:34:11.137472Z node 9 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 11 2025-06-24T20:34:11.137684Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:34:11.144480Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=9&id=YzFkODg1MzUtNjllMjc4NzgtYzJiMzM5ZGItNmU2MzljN2U=, ActorId: [9:7519616896946576567:2294], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:34:11.144539Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=9&id=YzFkODg1MzUtNjllMjc4NzgtYzJiMzM5ZGItNmU2MzljN2U=, ActorId: [9:7519616896946576567:2294], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:11.144571Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=9&id=YzFkODg1MzUtNjllMjc4NzgtYzJiMzM5ZGItNmU2MzljN2U=, ActorId: [9:7519616896946576567:2294], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:34:11.144597Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=9&id=YzFkODg1MzUtNjllMjc4NzgtYzJiMzM5ZGItNmU2MzljN2U=, ActorId: [9:7519616896946576567:2294], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:34:11.144682Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=9&id=YzFkODg1MzUtNjllMjc4NzgtYzJiMzM5ZGItNmU2MzljN2U=, ActorId: [9:7519616896946576567:2294], ActorState: unknown state, Session actor destroyed |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |89.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadService::TestZeroConcurrentQueryLimit [GOOD] Test command err: 2025-06-24T20:33:09.562738Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616670210174261:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:09.562889Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00473d/r3tmp/tmpA8qAOs/pdisk_1.dat 2025-06-24T20:33:10.245455Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:10.247293Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616670210174098:2079] 1750797189539825 != 1750797189539828 2025-06-24T20:33:10.352895Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:10.352994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 16316, node 1 2025-06-24T20:33:10.384577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:10.579465Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:33:10.655841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:10.655862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:10.655869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:10.655968Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14289 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:11.147666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:13.891343Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:33:13.891753Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:13.892508Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-24T20:33:13.892537Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:33:13.915267Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616687390043906:2290], Start check tables existence, number paths: 2 2025-06-24T20:33:13.918619Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZTRlN2IxMjMtZWY4MzA5YzAtMWZmMGM2ZTItZjA1OTc3NGE=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTRlN2IxMjMtZWY4MzA5YzAtMWZmMGM2ZTItZjA1OTc3NGE= 2025-06-24T20:33:13.918792Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZTRlN2IxMjMtZWY4MzA5YzAtMWZmMGM2ZTItZjA1OTc3NGE=, ActorId: [1:7519616687390043907:2291], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:13.921769Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616687390043906:2290], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:33:13.921845Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616687390043906:2290], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:33:13.921907Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616687390043906:2290], Successfully finished 2025-06-24T20:33:13.922076Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:33:13.939917Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616687390043924:2302], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:13.946226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:13.957485Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616687390043924:2302], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-24T20:33:13.963413Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616687390043924:2302], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:33:13.975037Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616687390043924:2302], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:14.030843Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616687390043924:2302], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:14.038197Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616691685011272:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:14.038378Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616687390043924:2302], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:33:14.041047Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=OTkwZmExZmItZWIyMTliMjItOWNkOTFhZDktZWMyNTJiNjM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id OTkwZmExZmItZWIyMTliMjItOWNkOTFhZDktZWMyNTJiNjM= 2025-06-24T20:33:14.041425Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-24T20:33:14.041436Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-24T20:33:14.041502Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=OTkwZmExZmItZWIyMTliMjItOWNkOTFhZDktZWMyNTJiNjM=, ActorId: [1:7519616691685011280:2292], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:14.041696Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=OTkwZmExZmItZWIyMTliMjItOWNkOTFhZDktZWMyNTJiNjM=, ActorId: [1:7519616691685011280:2292], ActorState: ReadyState, TraceId: 01jyhta4ssf04xaqmevg7175j5, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7519616691685011279:2341] database: Root databaseId: /Root pool id: sample_pool_id 2025-06-24T20:33:14.041730Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7519616691685011280:2292], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=OTkwZmExZmItZWIyMTliMjItOWNkOTFhZDktZWMyNTJiNjM= 2025-06-24T20:33:14.041769Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616691685011282:2293], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:33:14.041841Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519616691685011283:2294], Database: /Root, Start database fetching 2025-06-24T20:33:14.043216Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519616691685011283:2294], Database: /Root, Database info successfully fetched, serverless: 0 2025-06-24T20:33:14.043293Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-06-24T20:33:14.043345Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7519616691685011292:2295], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=OTkwZmExZmItZWIyMTliMjItOWNkOTFhZDktZWMyNTJiNjM=, Start pool fetching 2025-06-24T20:33:14.043367Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616691685011293:2296], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:33:14.043523Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616691685011282:2293], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-24T20:33:14.043581Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616691685011293:2296], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-24T20:33:14.043604Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025 ... orkload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:34:12.971270Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519616939307995889:2299], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:34:12.977112Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:12.978778Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519616939307995889:2299], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-24T20:34:12.979018Z node 6 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519616939307995889:2299], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:34:12.996078Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519616917833158809:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:12.996297Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:12.997364Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519616939307995889:2299], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:34:13.076329Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519616939307995889:2299], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:34:13.081255Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519616943602963239:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:13.081429Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519616939307995889:2299], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:34:13.090572Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg= 2025-06-24T20:34:13.091280Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, ActorId: [6:7519616943602963246:2293], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:34:13.091433Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-24T20:34:13.091472Z node 6 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-24T20:34:13.091555Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519616943602963248:2294], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:34:13.092787Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, ActorId: [6:7519616943602963246:2293], ActorState: ReadyState, TraceId: 01jyhtbyf40589zdzqh5tmr3kz, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [6:7519616943602963245:2338] database: Root databaseId: /Root pool id: sample_pool_id 2025-06-24T20:34:13.092855Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [6:7519616943602963246:2293], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg= 2025-06-24T20:34:13.092922Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [6:7519616943602963256:2295], Database: /Root, Start database fetching 2025-06-24T20:34:13.093513Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519616943602963248:2294], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-24T20:34:13.093598Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [6:7519616943602963256:2295], Database: /Root, Database info successfully fetched, serverless: 0 2025-06-24T20:34:13.093647Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-06-24T20:34:13.093674Z node 6 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-24T20:34:13.093925Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-06-24T20:34:13.093997Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519616943602963259:2296], DatabaseId: /Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-24T20:34:13.094058Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [6:7519616943602963260:2297], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, Start pool fetching 2025-06-24T20:34:13.094102Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519616943602963261:2298], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:34:13.097093Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519616943602963259:2296], DatabaseId: /Root, PoolId: sample_pool_id, Got watch notification 2025-06-24T20:34:13.097213Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519616943602963261:2298], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-24T20:34:13.097299Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:107: [WorkloadService] [TPoolResolverActor] ActorId: [6:7519616943602963260:2297], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, Pool info successfully resolved 2025-06-24T20:34:13.097355Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:279: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg= 2025-06-24T20:34:13.097474Z node 6 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:290: [WorkloadService] [Service] Request placed into pool, DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg= 2025-06-24T20:34:13.097591Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, ActorId: [6:7519616943602963246:2293], ActorState: ExecuteState, TraceId: 01jyhtbyf40589zdzqh5tmr3kz, Create QueryResponse for error on request, msg: Query failed during adding/waiting in workload pool sample_pool_id 2025-06-24T20:34:13.097780Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, ActorId: [6:7519616943602963246:2293], ActorState: ExecuteState, TraceId: 01jyhtbyf40589zdzqh5tmr3kz, Cleanup start, isFinal: 1 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 1 2025-06-24T20:34:13.098073Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:189: [WorkloadService] [Service] Finished request with worker actor [6:7519616943602963246:2293], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg= 2025-06-24T20:34:13.098160Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, ActorId: [6:7519616943602963246:2293], ActorState: CleanupState, TraceId: 01jyhtbyf40589zdzqh5tmr3kz, EndCleanup, isFinal: 1 2025-06-24T20:34:13.098302Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, ActorId: [6:7519616943602963246:2293], ActorState: CleanupState, TraceId: 01jyhtbyf40589zdzqh5tmr3kz, Sent query response back to proxy, proxyRequestId: 3, proxyId: [6:7519616917833158892:2144] 2025-06-24T20:34:13.098357Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, ActorId: [6:7519616943602963246:2293], ActorState: unknown state, TraceId: 01jyhtbyf40589zdzqh5tmr3kz, Cleanup temp tables: 0 2025-06-24T20:34:13.098511Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=ZTc3YmNkNjctNDZiNWU1N2MtMWYwMjBhYTktNzJkNDg5ZDg=, ActorId: [6:7519616943602963246:2293], ActorState: unknown state, TraceId: 01jyhtbyf40589zdzqh5tmr3kz, Session actor destroyed 2025-06-24T20:34:13.139447Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=6&id=NDM1Y2YxYWMtZjJhMjE0MjgtYmNmYmU1NGQtZjlhZTlkYTk=, ActorId: [6:7519616939307995887:2291], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:34:13.139508Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=6&id=NDM1Y2YxYWMtZjJhMjE0MjgtYmNmYmU1NGQtZjlhZTlkYTk=, ActorId: [6:7519616939307995887:2291], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:13.139543Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=NDM1Y2YxYWMtZjJhMjE0MjgtYmNmYmU1NGQtZjlhZTlkYTk=, ActorId: [6:7519616939307995887:2291], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:34:13.139571Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=NDM1Y2YxYWMtZjJhMjE0MjgtYmNmYmU1NGQtZjlhZTlkYTk=, ActorId: [6:7519616939307995887:2291], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:34:13.139677Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=NDM1Y2YxYWMtZjJhMjE0MjgtYmNmYmU1NGQtZjlhZTlkYTk=, ActorId: [6:7519616939307995887:2291], ActorState: unknown state, Session actor destroyed >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError [GOOD] >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW+VolatileTxs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError [GOOD] Test command err: 2025-06-24T20:34:13.176691Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.177967Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.178045Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.181377Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.182032Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.183801Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b39/r3tmp/tmpmGoqnh/pdisk_1.dat 2025-06-24T20:34:13.756445Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.120223s 2025-06-24T20:34:13.756582Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.120401s Formatting PDisk with guid1 8402139303055281551 Creating PDisk with guid2 4435767157998124351 Creating pdisk 2025-06-24T20:34:14.517435Z node 1 :BS_PDISK ERROR: {BSP01@blobstorage_pdisk_actor.cpp:586} PDiskId# 1001 Can't start due to a guid error expected# 4435767157998124351 on-disk# 8402139303055281551 PDiskId# 1001 2025-06-24T20:34:14.555939Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [abc2fc901918ac71] bootstrap ActorId# [1:484:2461] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:347:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T20:34:14.556092Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.556138Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.556168Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.556195Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.556221Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.556250Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.556310Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [abc2fc901918ac71] restore Id# [72057594037932033:2:8:0:0:347:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T20:34:14.556393Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [abc2fc901918ac71] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:347:1] Marker# BPG33 2025-06-24T20:34:14.556441Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [abc2fc901918ac71] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:347:1] Marker# BPG32 2025-06-24T20:34:14.556488Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [abc2fc901918ac71] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:347:2] Marker# BPG33 2025-06-24T20:34:14.556523Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [abc2fc901918ac71] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:347:2] Marker# BPG32 2025-06-24T20:34:14.556553Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [abc2fc901918ac71] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:347:3] Marker# BPG33 2025-06-24T20:34:14.556579Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [abc2fc901918ac71] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:347:3] Marker# BPG32 2025-06-24T20:34:14.556738Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:42:2087] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:347:3] FDS# 347 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:34:14.556815Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:35:2080] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:347:2] FDS# 347 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:34:14.556866Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:56:2101] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:347:1] FDS# 347 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:34:14.571914Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [abc2fc901918ac71] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:347:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 82732 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-24T20:34:14.572126Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [abc2fc901918ac71] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:347:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 82732 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-24T20:34:14.572223Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [abc2fc901918ac71] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:347:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 82732 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-24T20:34:14.572335Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [abc2fc901918ac71] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:347:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-24T20:34:14.572416Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [abc2fc901918ac71] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:347:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T20:34:14.572612Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.105 sample PartId# [72057594037932033:2:8:0:0:347:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.105 sample PartId# [72057594037932033:2:8:0:0:347:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.106 sample PartId# [72057594037932033:2:8:0:0:347:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 16.199 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 16.365 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 16.48 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } Verify that PDisk returns ERROR YardInitResult: {EvYardInitResult Status# CORRUPTED ErrorReason# "PDisk is in StateError, reason# PDiskId# 1001 Can't start due to a guid error expected# 4435767157998124351 on-disk# 8402139303055281551" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 0 ownerRound# 0 OwnerWeight# 0 SlotSizeInUnits# 0 ChunkSize# 0 AppendBlockSize# 0 RecommendedReadSize# 0 SeekTimeUs# 0 ReadSpeedBps# 0 WriteSpeedBps# 0 ReadBlockSize# 0 WriteBlockSize# 0 BulkWriteBlockSize# 0 PrefetchSizeBytes# 0 GlueRequestDistanceBytes# 0}} OwnedChunks# {}} >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit [GOOD] >> KikimrIcGateway::TestCreateExternalTable >> TPartitionTests::DifferentWriteTxBatchingOptions [GOOD] >> TBlobStorageWardenTest::TestBlockEncriptedGroup [GOOD] >> TPartitionTests::FailedTxsDontBlock >> Viewer::TenantInfo5kkTablets [GOOD] >> Viewer::UseTransactionWhenExecuteDataActionQuery >> DataShardVolatile::DistributedWriteThenImmediateUpsert >> YdbProxy::DescribePath >> GenericFederatedQuery::IcebergHiveSaFilterPushdown [GOOD] >> DataShardVolatile::DistributedWrite >> TFlatTest::SelectRangeForbidNullArgs2 [GOOD] >> TFlatTest::SelectRangeForbidNullArgs3 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestBlockEncriptedGroup [GOOD] Test command err: 2025-06-24T20:34:13.166948Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.167571Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.169054Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.170120Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.170421Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.171342Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T20:34:13.172394Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b3e/r3tmp/tmpQFyWrT/pdisk_1.dat 2025-06-24T20:34:13.753307Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.119670s 2025-06-24T20:34:13.753460Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.119853s 2025-06-24T20:34:14.319025Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [e2e5f1b9c917f854] bootstrap ActorId# [1:485:2463] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1330:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T20:34:14.319214Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.319266Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.319295Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.319322Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.319348Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.319375Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.319414Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [e2e5f1b9c917f854] restore Id# [72057594037932033:2:8:0:0:1330:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T20:34:14.319515Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1330:1] Marker# BPG33 2025-06-24T20:34:14.319569Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1330:1] Marker# BPG32 2025-06-24T20:34:14.319613Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1330:2] Marker# BPG33 2025-06-24T20:34:14.319643Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1330:2] Marker# BPG32 2025-06-24T20:34:14.319674Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1330:3] Marker# BPG33 2025-06-24T20:34:14.319699Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1330:3] Marker# BPG32 2025-06-24T20:34:14.319865Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:42:2087] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1330:3] FDS# 1330 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:34:14.319936Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:35:2080] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1330:2] FDS# 1330 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:34:14.319985Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:56:2101] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1330:1] FDS# 1330 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:34:14.339627Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1330:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90472 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-24T20:34:14.340050Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1330:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90472 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-24T20:34:14.340204Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1330:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90472 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-24T20:34:14.340314Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [e2e5f1b9c917f854] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1330:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-24T20:34:14.340375Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [e2e5f1b9c917f854] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1330:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T20:34:14.340590Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.118 sample PartId# [72057594037932033:2:8:0:0:1330:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.119 sample PartId# [72057594037932033:2:8:0:0:1330:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.12 sample PartId# [72057594037932033:2:8:0:0:1330:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 20.832 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 21.196 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 21.362 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-24T20:34:14.399901Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [a55b41de52eb2a08] bootstrap ActorId# [1:531:2500] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:9:0:0:224:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T20:34:14.400048Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.400084Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.400111Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.400139Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.400166Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.400191Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:14.400237Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [a55b41de52eb2a08] restore Id# [72057594037932033:2:9:0:0:224:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T20:34:14.400303Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [a55b41de52eb2a08] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:9:0:0:224:1] Marker# BPG33 2025-06-24T20:34:14.400350Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [a55b41de52eb2a08] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:9:0:0:224:1] Marker# BPG32 2025-06-24T20:34:14.400396Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [a55b41de52eb2a08] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:9:0:0:224:2] Marker# BPG33 2025-06-24T20:34:14.400422Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [a55b41de52eb2a08] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:9:0:0:224:2] Marker# BPG32 2025-06-24T20:34:14.400450Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [a55b41de52eb2a08] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:9:0:0:224:3] Marker# BPG33 2025-06-24T20:34:14.400475Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [a55b41de52eb2a08] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:9:0:0:224:3] Marker# BPG32 2025-06-24T20: ... PutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-24T20:34:16.833927Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [cd65997ea3b51537] Result# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 2181038082 Marker# BPP12 2025-06-24T20:34:16.834007Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [cd65997ea3b51537] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T20:34:16.834158Z node 2 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.916 sample PartId# [1234:2:0:0:0:5:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 7.1 VDiskId# [82000002:1:0:0:0] NodeId# 2 Status# OK } ] } 2025-06-24T20:34:16.834748Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:34:16.834786Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:57: Group# 2181038082 SetStateUnconfigured Marker# DSP07 2025-06-24T20:34:16.834884Z node 3 :BS_PROXY DEBUG: dsproxy_impl.h:205: Group# 2181038082 HandleEnqueue# TEvBlock {TabletId# 1234 Generation# 3 Deadline# 18446744073709551 IsMonitored# 1} Marker# DSP17 2025-06-24T20:34:16.836192Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-06-24T20:34:16.836260Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:34:16.838773Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:615:2107] targetNodeId# 2 Marker# DSP01 2025-06-24T20:34:16.838948Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:616:2108] targetNodeId# 2 Marker# DSP01 2025-06-24T20:34:16.839081Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:617:2109] targetNodeId# 2 Marker# DSP01 2025-06-24T20:34:16.841317Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:618:2110] targetNodeId# 2 Marker# DSP01 2025-06-24T20:34:16.841497Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:619:2111] targetNodeId# 2 Marker# DSP01 2025-06-24T20:34:16.841627Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:620:2112] targetNodeId# 2 Marker# DSP01 2025-06-24T20:34:16.841817Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:621:2113] targetNodeId# 2 Marker# DSP01 2025-06-24T20:34:16.841857Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:34:16.843603Z node 3 :BS_NODE ERROR: {NW19@node_warden_group.cpp:212} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/2btm/004b3e/r3tmp/tmpvtp48l//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-06-24T20:34:16.844123Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T20:34:16.844361Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T20:34:16.844466Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T20:34:16.844641Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T20:34:16.844741Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T20:34:16.844825Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T20:34:16.844883Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T20:34:16.844915Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:194: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-24T20:34:16.844967Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:80: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-24T20:34:16.845155Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [efc53170c63234c6] bootstrap ActorId# [3:622:2114] Group# 2181038082 TabletId# 1234 Generation# 3 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-24T20:34:16.845224Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [efc53170c63234c6] Sending TEvVBlock Tablet# 1234 Generation# 3 vdiskId# [82000002:1:0:0:0] node# 2 Marker# DSPB03 2025-06-24T20:34:16.845437Z node 3 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [3:615:2107] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 3 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 2038994835514474485 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-24T20:34:16.864631Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [efc53170c63234c6] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 2 Marker# DSPB01 2025-06-24T20:34:16.864738Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [efc53170c63234c6] Result# TEvBlockResult {Status# OK} Marker# DSPB04 Sending TEvPut 2025-06-24T20:34:16.865181Z node 3 :BS_PROXY INFO: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:3:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:3:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-24T20:34:16.865432Z node 3 :BS_PROXY DEBUG: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:4:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:4:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-24T20:34:16.865888Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [c85e1a21dcb31b54] bootstrap ActorId# [2:623:2514] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:11:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-24T20:34:16.866092Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [c85e1a21dcb31b54] Id# [1234:2:0:0:0:11:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:34:16.866159Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [c85e1a21dcb31b54] restore Id# [1234:2:0:0:0:11:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T20:34:16.866244Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [c85e1a21dcb31b54] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG33 2025-06-24T20:34:16.866298Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [c85e1a21dcb31b54] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG32 2025-06-24T20:34:16.866469Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:602:2504] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:11:1] FDS# 11 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:34:16.866850Z node 2 :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:569: PDiskId# 1000 VDISK[82000002:_:0:0:0]: (2181038082) TEvVPut: failed to pass the Hull check; id# [1234:2:0:0:0:11:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T20:34:16.869685Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [c85e1a21dcb31b54] received {EvVPutResult Status# BLOCKED ErrorReason# "blocked" ID# [1234:2:0:0:0:11:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 80086 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-24T20:34:16.869877Z node 2 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [c85e1a21dcb31b54] Result# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038082 Marker# BPP12 2025-06-24T20:34:16.869968Z node 2 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [c85e1a21dcb31b54] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T20:34:16.870136Z node 2 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.815 sample PartId# [1234:2:0:0:0:11:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 2 } ] } 2025-06-24T20:34:16.870806Z node 3 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [3:615:2107] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 >> TFlatTest::WriteMergeAndRead [GOOD] >> TFlatTest::WriteSplitAndRead >> YdbProxy::DropTable >> YdbProxy::ReadTopic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit [GOOD] Test command err: 2025-06-24T20:33:10.568667Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616674704237099:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:10.568729Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004728/r3tmp/tmp5xw6Fz/pdisk_1.dat 2025-06-24T20:33:11.154200Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:11.156597Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616674704237084:2079] 1750797190554358 != 1750797190554361 2025-06-24T20:33:11.194088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:11.194183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:11.207084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21109, node 1 2025-06-24T20:33:11.379864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:11.379891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:11.379911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:11.380059Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:11.594379Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22058 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:11.966125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:14.320061Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:33:14.336609Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-24T20:33:14.336647Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:14.336668Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:33:14.336802Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616691884106893:2289], Start check tables existence, number paths: 2 2025-06-24T20:33:14.336904Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ODdiZTRiMDgtZGE3M2ExNS0yYjBhYzM1MS00MGEzMGJh, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ODdiZTRiMDgtZGE3M2ExNS0yYjBhYzM1MS00MGEzMGJh 2025-06-24T20:33:14.336962Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ODdiZTRiMDgtZGE3M2ExNS0yYjBhYzM1MS00MGEzMGJh, ActorId: [1:7519616691884106894:2290], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:14.339967Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616691884106893:2289], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:33:14.340040Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616691884106893:2289], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:33:14.340102Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616691884106893:2289], Successfully finished 2025-06-24T20:33:14.340179Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:33:14.351447Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691884106911:2301], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:14.357159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:14.359794Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691884106911:2301], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976715658 2025-06-24T20:33:14.361540Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691884106911:2301], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:33:14.369942Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691884106911:2301], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:33:14.447264Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691884106911:2301], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:14.451129Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616691884106963:2334] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:14.451254Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616691884106911:2301], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:33:14.451584Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616691884106970:2340], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:33:14.452684Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616691884106970:2340], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-24T20:33:14.462942Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=1&id=ODdiZTRiMDgtZGE3M2ExNS0yYjBhYzM1MS00MGEzMGJh, ActorId: [1:7519616691884106894:2290], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:33:14.462988Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=ODdiZTRiMDgtZGE3M2ExNS0yYjBhYzM1MS00MGEzMGJh, ActorId: [1:7519616691884106894:2290], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:33:14.463014Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=1&id=ODdiZTRiMDgtZGE3M2ExNS0yYjBhYzM1MS00MGEzMGJh, ActorId: [1:7519616691884106894:2290], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:33:14.463090Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=1&id=ODdiZTRiMDgtZGE3M2ExNS0yYjBhYzM1MS00MGEzMGJh, ActorId: [1:7519616691884106894:2290], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:33:14.463731Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=1&id=ODdiZTRiMDgtZGE3M2ExNS0yYjBhYzM1MS00MGEzMGJh, ActorId: [1:7519616691884106894:2290], ActorState: unknown state, Session actor destroyed 2025-06-24T20:33:15.047785Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616697275186882:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:15.050356Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004728/r3tmp/tmp6faBYS/pdisk_1.dat 2025-06-24T20:33:15.240627Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:15.255338Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616697275186863:2079] 1750797195046346 != 1750797195046349 2025-06-24T20:33:15.256142Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:15.256212Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:15.258868Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4757, node 2 2025-06-24T20:33:15.324043Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:15.324069Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:15.324098Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:15.324243Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10093 WaitRootIsUp 'Root'... TCli ... sactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:13.853389Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=MTc4OGMwNzYtYTFmZGI2NTAtYmNhYzdmMGYtZGM4MDJhZDM=, ActorId: [6:7519616803748082364:2295], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:34:13.853420Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=MTc4OGMwNzYtYTFmZGI2NTAtYmNhYzdmMGYtZGM4MDJhZDM=, ActorId: [6:7519616803748082364:2295], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:34:13.853504Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=MTc4OGMwNzYtYTFmZGI2NTAtYmNhYzdmMGYtZGM4MDJhZDM=, ActorId: [6:7519616803748082364:2295], ActorState: unknown state, Session actor destroyed 2025-06-24T20:34:13.859216Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz5r5382hvvgg83vvd3h, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-24T20:34:13.859405Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz5r5382hvvgg83vvd3h, txInfo Status: Committed Kind: ReadWrite TotalDuration: 40.776 ServerDuration: 40.24 QueriesCount: 2 2025-06-24T20:34:13.859543Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz5r5382hvvgg83vvd3h, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T20:34:13.859604Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz5r5382hvvgg83vvd3h, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:13.859638Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz5r5382hvvgg83vvd3h, EndCleanup, isFinal: 0 2025-06-24T20:34:13.859697Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz5r5382hvvgg83vvd3h, Sent query response back to proxy, proxyRequestId: 596, proxyId: [7:7519616783563929474:2170] 2025-06-24T20:34:13.860883Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, TxId: 2025-06-24T20:34:13.861034Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:197: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, RunDataQuery: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); 2025-06-24T20:34:13.861548Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ReadyState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, received request, proxyRequestId: 597 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); rpcActor: [7:7519616946772696353:5096] database: /Root databaseId: /Root pool id: default 2025-06-24T20:34:13.861575Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ReadyState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, request placed into pool from cache: default 2025-06-24T20:34:13.868085Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, ExecutePhyTx, tx: 0x000050C0004D41D8 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-06-24T20:34:13.868222Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, Sending to Executer TraceId: 0 8 2025-06-24T20:34:13.868374Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, Created new KQP executer: [7:7519616946772696356:5091] isRollback: 0 2025-06-24T20:34:13.919407Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-06-24T20:34:13.919540Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, ExecutePhyTx, tx: 0x000050C0004D4118 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-24T20:34:13.920711Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-24T20:34:13.920877Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, txInfo Status: Committed Kind: ReadOnly TotalDuration: 53.115 ServerDuration: 52.881 QueriesCount: 2 2025-06-24T20:34:13.921024Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T20:34:13.921088Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:13.921122Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, EndCleanup, isFinal: 0 2025-06-24T20:34:13.921173Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ExecuteState, TraceId: 01jyhtbz75d0wt7zj063f59pkw, Sent query response back to proxy, proxyRequestId: 597, proxyId: [7:7519616783563929474:2170] 2025-06-24T20:34:13.922193Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, TxId: 2025-06-24T20:34:13.922279Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, Finish with SUCCESS, SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, TxId: 2025-06-24T20:34:13.922484Z node 7 :KQP_WORKLOAD_SERVICE TRACE: pool_handlers_actors.cpp:746: [WorkloadService] [TPoolHandlerActorBase] ActorId: [7:7519616805038766170:2293], DatabaseId: /Root, PoolId: sample_pool_id, succefully refreshed pool state, in flight: 0, delayed: 0 2025-06-24T20:34:13.922528Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:34:13.922573Z node 7 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:13.922605Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:34:13.922664Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:34:13.922747Z node 7 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=7&id=MmViZGRkMWUtNDE1NDRjODAtMjBkNWI4MjItYWIzZTBmZTk=, ActorId: [7:7519616946772696333:5091], ActorState: unknown state, Session actor destroyed >> Cdc::ShouldBreakLocksOnConcurrentAddIndex [GOOD] >> Cdc::ResolvedTimestampsContinueAfterMerge ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveSaFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 30828, MsgBus: 17990 2025-06-24T20:33:23.804092Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616731344946709:2188];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:23.805008Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00275e/r3tmp/tmp1XxxPq/pdisk_1.dat 2025-06-24T20:33:24.267839Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:24.270688Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616731344946546:2079] 1750797203758292 != 1750797203758295 2025-06-24T20:33:24.278269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:24.278371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:24.280187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30828, node 1 2025-06-24T20:33:24.381483Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:24.381505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:24.381519Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:24.381664Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17990 2025-06-24T20:33:24.803334Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17990 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:25.149191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:25.164970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:27.365143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616748524816369:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:27.365287Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:27.736376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:27.949468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616748524816493:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:27.949637Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:27.949992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616748524816498:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:27.953642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:27.965127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-24T20:33:27.965483Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616748524816500:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:33:28.057101Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616752819783836:2398] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:28.815419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616731344946709:2188];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:28.815497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:28.861858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:29.584975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:30.189600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:30.943989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:31.485601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:31.987583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:32.045397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:34.143756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:33:34.194612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715709:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:34.200282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715710:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:34.202305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose ... 5660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:08.826914Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:09.512710Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:10.430725Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:11.490207Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:12.261157Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715689:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:12.959735Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:13.025854Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:34:15.925249Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715716:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> THiveTest::TestHiveBalancerIgnoreTablet [GOOD] >> THiveTest::TestHiveBalancerNodeRestarts >> Cdc::InitialScanRacyProgressAndDrop [GOOD] >> TPQTest::TestWritePQCompact [GOOD] >> Cdc::EnqueueRequestProcessSend >> TPQTest::TestWritePQBigMessage [GOOD] >> TPQTest::TestWritePQ >> KikimrIcGateway::TestCreateSameExternalTable [GOOD] >> KikimrIcGateway::TestDropExternalTable >> SystemView::CollectPreparedQueries [GOOD] >> SystemView::CollectScanQueries >> SystemView::VSlotsFields [GOOD] >> SystemView::TopPartitionsByCpuTables >> PartitionEndWatcher::EmptyPartition [GOOD] >> PartitionEndWatcher::AfterCommit [GOOD] >> YdbProxy::AlterTable >> Viewer::SharedDoesntShowExclusiveNodes [GOOD] >> Viewer::ServerlessWithExclusiveNodesCheckTable >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown [GOOD] >> TFetchRequestTests::HappyWay [GOOD] >> TFetchRequestTests::BadTopicName >> KikimrIcGateway::TestCreateExternalTable [GOOD] >> KikimrIcGateway::TestCreateResourcePool >> YdbProxy::MakeDirectory >> KqpRm::SnapshotSharingByExchanger >> TPartitionTests::FailedTxsDontBlock [GOOD] >> TFlatTest::SelectRangeForbidNullArgs3 [GOOD] >> TPartitionTests::EndWriteTimestamp_DataKeysBody >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW+VolatileTxs [GOOD] >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs >> YdbProxy::DescribePath [GOOD] >> YdbProxy::DescribeTable >> TFlatTest::WriteSplitAndRead [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 27862, MsgBus: 9147 2025-06-24T20:33:27.216500Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616747563671831:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:27.216536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002744/r3tmp/tmpA1kgYp/pdisk_1.dat 2025-06-24T20:33:27.769058Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616747563671811:2079] 1750797207196096 != 1750797207196099 2025-06-24T20:33:27.780282Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:27.786399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:27.786492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:27.791575Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27862, node 1 2025-06-24T20:33:27.939842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:27.939869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:27.939888Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:27.940027Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9147 2025-06-24T20:33:28.286145Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9147 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:28.591514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:28.624983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:33:30.972984Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616760448574344:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:30.973126Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:31.312710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:31.429830Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616764743541763:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:31.430007Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:31.430398Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616764743541769:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:31.433923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:31.448263Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616764743541771:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T20:33:31.514097Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616764743541811:2400] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:32.219276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616747563671831:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:32.219352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:32.249830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:32.826310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:33.461382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:33.997380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:34.508648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:35.006354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:35.049912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:37.022785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:33:37.065768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:37.068000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:37.069790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions ... or: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:11.496561Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519616916769102962:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:11.496654Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:11.979407Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:12.661745Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:13.445421Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:14.241521Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:15.104065Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:16.009046Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:16.065975Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:34:19.662248Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710710:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> THiveTest::TestLockTabletExecutionLocalGone [GOOD] >> THiveTest::TestLocalRegistrationInSharedHive >> TPartitionTests::EndWriteTimestamp_DataKeysBody [GOOD] >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW+VolatileTxs [GOOD] >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs >> ResourcePoolClassifiersDdl::TestDropResourcePoolClassifier [GOOD] >> ResourcePoolClassifiersDdl::TestDropResourcePool >> TPartitionTests::EndWriteTimestamp_FromMeta >> YdbProxy::DropTable [GOOD] >> YdbProxy::DescribeTopic >> TPartitionTests::EndWriteTimestamp_FromMeta [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeForbidNullArgs3 [GOOD] Test command err: 2025-06-24T20:34:13.800452Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616944918307226:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:13.800984Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab4/r3tmp/tmpz1j7rI/pdisk_1.dat 2025-06-24T20:34:14.243261Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616944918307128:2079] 1750797253761271 != 1750797253761274 2025-06-24T20:34:14.250820Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:14.261214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:14.261455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:14.263772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8784 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: 2025-06-24T20:34:14.801449Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:34:14.846520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:34:14.902187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab4/r3tmp/tmpTWP5bf/pdisk_1.dat 2025-06-24T20:34:18.643273Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616967594928751:2240];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:18.716846Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:18.804910Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:18.805022Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:18.827419Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616967594928523:2079] 1750797258383001 != 1750797258383004 2025-06-24T20:34:18.866824Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:18.870297Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14115 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:34:19.172788Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:19.179976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:19.196331Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TPartitionTests::EndWriteTimestamp_HeadKeys >> Viewer::StorageGroupOutputWithoutFilterNoDepends [GOOD] >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnVDiskSpaceStatus ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::WriteSplitAndRead [GOOD] Test command err: 2025-06-24T20:34:13.736615Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616945552809039:2152];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:13.736764Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab3/r3tmp/tmpCCucsr/pdisk_1.dat 2025-06-24T20:34:14.267568Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616945552808925:2079] 1750797253717355 != 1750797253717358 2025-06-24T20:34:14.290901Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:14.293785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:14.293868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:14.300563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5217 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:34:14.745935Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:14.747637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:14.762736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:14.772303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T20:34:14.780984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:15.017292Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.005s,wait=0.003s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-24T20:34:15.021262Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.013s,wait=0.003s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-24T20:34:15.063356Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-24T20:34:15.082549Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.006s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 2025-06-24T20:34:15.128595Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:34:15.132546Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.21, eph 3} end=Done, 4 blobs 8r (max 9), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (3250 2180 6413)b }, ecr=1.000 2025-06-24T20:34:15.134564Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:34:15.134648Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:34:15.135191Z node 1 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T20:34:15.135217Z node 1 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 0, front# 0 2025-06-24T20:34:15.143379Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:34:15.148805Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:34:15.148875Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:34:15.152083Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T20:34:15.155896Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.21, eph 3} end=Done, 4 blobs 9r (max 9), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (4073 2983 5183)b }, ecr=1.000 2025-06-24T20:34:15.157716Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T20:34:15.157792Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:34:15.158386Z node 1 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T20:34:15.158401Z node 1 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 1001, finished edge# 0, front# 0 2025-06-24T20:34:15.167411Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T20:34:15.176617Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T20:34:15.176700Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750797254920 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) 2025-06-24T20:34:15.216914Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T20:34:15.218886Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-24T20:34:15.219062Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:34:15.220457Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-24T20:34:15.225825Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:34:15.226600Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037888 restored its data 2025-06-24T20:34:15.227612Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-24T20:34:15.227775Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T20:34:15.228234Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037889 restored its data 2025-06-24T20:34:15.229005Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-24T20:34:15.235335Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T20:34:15.239763Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037889 restored its data 2025-06-24T20:34:15.240718Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-24T20:34:15.241447Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:34:15.242058Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037888 restored its data 2025-06-24T20:34:15.243070Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-24T20:34:15.243754Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:34:15.244353Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037888 restored its data 2025-06-24T20:34:15.245150Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-24T20:34:15.245301Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T20:34:15.245817Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037889 restored its data 2025-06-24T20:34:15.246641Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025- ... ard: 72057594046644480, message: Source { RawX1: 7519616971963383680 RawX2: 4503608217307417 } TabletId: 72075186224037891 State: 4 2025-06-24T20:34:20.022672Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:34:20.023717Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-24T20:34:20.023746Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-24T20:34:20.023832Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:34:20.023879Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:34:20.025056Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T20:34:20.025242Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-24T20:34:20.025680Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T20:34:20.025693Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T20:34:20.026732Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-24T20:34:20.026815Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037888 reason = ReasonStop 2025-06-24T20:34:20.026843Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:7519616971963383443:2386], serverId# [2:7519616971963383450:2387], sessionId# [0:0:0] 2025-06-24T20:34:20.026902Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-24T20:34:20.026915Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-24T20:34:20.026931Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [2:7519616971963383822:2617], serverId# [2:7519616971963383823:2618], sessionId# [0:0:0] 2025-06-24T20:34:20.027491Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519616971963383691 RawX2: 4503608217307418 } TabletId: 72075186224037892 State: 4 2025-06-24T20:34:20.027531Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037892, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:34:20.027737Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519616971963383691 RawX2: 4503608217307418 } TabletId: 72075186224037892 State: 4 2025-06-24T20:34:20.027756Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037892, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:34:20.028255Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-24T20:34:20.028274Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-24T20:34:20.028408Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:34:20.028470Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:34:20.029024Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-24T20:34:20.029096Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-24T20:34:20.030501Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-24T20:34:20.030555Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-24T20:34:20.031467Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-24T20:34:20.031673Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T20:34:20.031727Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037892 state Offline 2025-06-24T20:34:20.031751Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037892 state Offline 2025-06-24T20:34:20.031843Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-24T20:34:20.031955Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T20:34:20.032035Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" 2025-06-24T20:34:20.032645Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519616971963383337 RawX2: 4503608217307346 } TabletId: 72075186224037889 State: 4 2025-06-24T20:34:20.032708Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-24T20:34:20.033789Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T20:34:20.033808Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T20:34:20.033850Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-24T20:34:20.033864Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-24T20:34:20.033884Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-24T20:34:20.033976Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-24T20:34:20.035431Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-24T20:34:20.035472Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [2:7519616971963383824:2619], serverId# [2:7519616971963383825:2620], sessionId# [0:0:0] 2025-06-24T20:34:20.035491Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037892 reason = ReasonStop 2025-06-24T20:34:20.035507Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [2:7519616971963383826:2621], serverId# [2:7519616971963383827:2622], sessionId# [0:0:0] 2025-06-24T20:34:20.035525Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-24T20:34:20.036157Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037892 not found 2025-06-24T20:34:20.036449Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-24T20:34:20.036654Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-24T20:34:20.036844Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T20:34:20.036994Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T20:34:20.037018Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T20:34:20.037054Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T20:34:20.037482Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-24T20:34:20.037560Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-24T20:34:20.039001Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-24T20:34:20.039065Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-24T20:34:20.040192Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-24T20:34:20.040792Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-24T20:34:20.043355Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-24T20:34:20.043448Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-24T20:34:20.045337Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T20:34:20.045370Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T20:34:20.045432Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 >> KqpRm::NotEnoughExecutionUnits |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |89.3%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut >> KikimrIcGateway::TestDropExternalTable [GOOD] >> KikimrIcGateway::TestDropExternalDataSource >> DataShardVolatile::DistributedWriteThenImmediateUpsert [GOOD] >> DataShardVolatile::DistributedWriteThenSplit >> TPartitionTests::EndWriteTimestamp_HeadKeys [GOOD] >> TLocksFatTest::PointSetNotBreak [GOOD] >> TLocksFatTest::PointSetRemove >> SystemView::CollectScanQueries [GOOD] >> SystemView::AuthUsers >> TPDiskTest::DeviceHaltTooLong [GOOD] >> TPDiskTest::ChangePDiskKey >> KqpRm::SnapshotSharingByExchanger [GOOD] >> YdbProxy::AlterTable [GOOD] >> DataShardVolatile::DistributedWrite [GOOD] >> DataShardVolatile::DistributedWriteBrokenLock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::EndWriteTimestamp_HeadKeys [GOOD] Test command err: 2025-06-24T20:34:02.189769Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:02.189883Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:34:02.236335Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-24T20:34:02.236682Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:34:02.237157Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:181:2194] 2025-06-24T20:34:02.238639Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T20:34:02.238866Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T20:34:02.239061Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T20:34:02.239295Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T20:34:02.239728Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-24T20:34:02.239962Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-24T20:34:02.240119Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T20:34:02.240160Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T20:34:02.240209Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T20:34:02.000000Z 2025-06-24T20:34:02.240254Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T20:34:02.240305Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [1:181:2194] 2025-06-24T20:34:02.240373Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-24T20:34:02.240440Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:34:02.240737Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:34:02.599843Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|262bd1ee-6e4ed1c9-1085dd6-1694e490_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-24T20:34:02.600048Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-24T20:34:03.678249Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create immediate tx with id = 3 and act no: 4 Create immediate tx with id = 6 and act no: 7 2025-06-24T20:34:03.678692Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T20:34:05.111590Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:34:05.111793Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Wait batch completion 2025-06-24T20:34:05.112020Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:34:05.112085Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:34:05.112254Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src1' seqNo 1 partNo 0 2025-06-24T20:34:05.113172Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src1' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 20 PartNo 0 PackedSize 84 count 1 nextOffset 21 batches 1 2025-06-24T20:34:05.113275Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T20:34:05.113335Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 20 PartNo 0 PackedSize 84 count 1 nextOffset 21 batches 1 2025-06-24T20:34:05.131388Z node 1 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-24T20:34:05.131573Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src1' seqNo 3 partNo 0 2025-06-24T20:34:05.133133Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1425: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob sourceId 'src1' seqNo 3 partNo 0 result is x0000000000_00000000000000000020_00000_0000000001_00000? size 70 2025-06-24T20:34:05.133251Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1117: [PQ: 72057594037927937, Partition: 0, State: StateIdle] writing blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 old key x0000000000_00000000000000000020_00000_0000000001_00000? new key d0000000000_00000000000000000020_00000_0000000001_00000? size 70 WTime 10139 2025-06-24T20:34:05.134294Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src1' seqNo 3 partNo 0 FormedBlobsCount 1 NewHead: Offset 50 PartNo 0 PackedSize 84 count 1 nextOffset 51 batches 1 2025-06-24T20:34:05.134379Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T20:34:05.134462Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 84 count 1 nextOffset 51 batches 1 2025-06-24T20:34:05.134522Z node 1 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-24T20:34:05.135034Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 50,1 HeadOffset 1 endOffset 1 curOffset 51 d0000000000_00000000000000000050_00000_0000000001_00000? size 70 WTime 10139 Got batch complete: 5 Got KV request Got KV request Got KV request Send disk status response with cookie: 0 Wait immediate tx complete 3 2025-06-24T20:34:05.157356Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 34 WriteNewSizeFromSupportivePartitions# 2 2025-06-24T20:34:05.157485Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:05.157569Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 1 is already written 2025-06-24T20:34:05.157612Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:05.157663Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 1 is already written 2025-06-24T20:34:05.157995Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=140, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 3 Wait immediate tx complete 6 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 6 2025-06-24T20:34:06.513885Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=140, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Create distr tx with id = 8 and act no: 9 Create immediate tx with id = 10 and act no: 11 Create distr tx with id = 12 and act no: 13 2025-06-24T20:34:06.514258Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-06-24T20:34:06.514394Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 12 2025-06-24T20:34:07.877948Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=140, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:34:07.879246Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Wait batch completion 2025-06-24T20:34:07.879911Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:34:07.880074Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:34:07.880205Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T20:34:07.880464Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 51 PartNo 0 PackedSize 0 count 0 nextOffset 51 ba ... /rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src2' seqNo 10 partNo 0 2025-06-24T20:34:21.235600Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src2' seqNo 10 partNo 0 FormedBlobsCount 0 NewHead: Offset 70 PartNo 0 PackedSize 552 count 10 nextOffset 80 batches 1 2025-06-24T20:34:21.236272Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 70,10 HeadOffset 25 endOffset 25 curOffset 80 d0000000000_00000000000000000070_00000_0000000010_00000? size 299 WTime 11240 Got KV request Got batch complete: 10 Got KV request Got KV request Send disk status response with cookie: 0 2025-06-24T20:34:21.259033Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 170 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:34:21.259160Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:21.259255Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 70 is stored on disk 2025-06-24T20:34:21.259313Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:21.259354Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 71 is stored on disk 2025-06-24T20:34:21.259385Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:21.259425Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 72 is stored on disk 2025-06-24T20:34:21.259456Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:21.259492Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 73 is stored on disk 2025-06-24T20:34:21.259523Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:21.259558Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 74 is stored on disk 2025-06-24T20:34:21.259588Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:21.259629Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 75 is stored on disk 2025-06-24T20:34:21.259663Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:21.259703Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 76 is stored on disk 2025-06-24T20:34:21.259737Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:21.259776Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 77 is stored on disk 2025-06-24T20:34:21.259806Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:21.259845Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 78 is stored on disk 2025-06-24T20:34:21.259877Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:21.259914Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 79 is stored on disk 2025-06-24T20:34:21.260182Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=488, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Create distr tx with id = 8 and act no: 9 Create immediate tx with id = 10 and act no: 11 Create distr tx with id = 12 and act no: 13 2025-06-24T20:34:21.260514Z node 2 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-06-24T20:34:21.260636Z node 2 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 12 2025-06-24T20:34:22.351641Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=488, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:34:22.351857Z node 2 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Wait batch completion 2025-06-24T20:34:22.352052Z node 2 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:34:22.352120Z node 2 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:34:22.352201Z node 2 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(ABORTED), reason=MinSeqNo violation failure on src2 Got batch complete: 3 2025-06-24T20:34:22.624717Z node 2 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 12 2025-06-24T20:34:22.624816Z node 2 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 12 2025-06-24T20:34:22.624900Z node 2 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 80 PartNo 0 PackedSize 0 count 0 nextOffset 80 batches 0, NewHead=Offset 80 PartNo 0 PackedSize 0 count 0 nextOffset 80 batches 0 Got KV request Send disk status response with cookie: 0 Wait immediate tx complete 10 2025-06-24T20:34:22.659764Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 1 2025-06-24T20:34:22.659894Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=488, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Got propose resutl: Origin: 72057594037927937 Status: ABORTED TxId: 10 Errors { Kind: BAD_REQUEST Reason: "MinSeqNo violation failure on src2" } Wait tx committed for tx 12 2025-06-24T20:34:23.472736Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:23.472829Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:34:23.524204Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [3:180:2193] 2025-06-24T20:34:23.526253Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T20:34:23.000000Z 2025-06-24T20:34:23.526331Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 2 generation 0 [3:180:2193] 2025-06-24T20:34:24.655109Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:24.655217Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:34:24.699099Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [4:182:2195] 2025-06-24T20:34:24.701374Z node 4 :PERSQUEUE INFO: partition_init.cpp:895: [Root/PQ/rt3.dc1--account--topic:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:34:24.701455Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 2 generation 0 [4:182:2195] 2025-06-24T20:34:25.778365Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:25.778456Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:34:25.796809Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [5:182:2195] >>>> ADD BLOB 0 writeTimestamp=2025-06-24T20:34:25.788951Z >>>> ADD BLOB 1 writeTimestamp=2025-06-24T20:34:25.788980Z >>>> ADD BLOB 2 writeTimestamp=2025-06-24T20:34:25.788999Z >>>> ADD BLOB 3 writeTimestamp=2025-06-24T20:34:25.789016Z >>>> ADD BLOB 4 writeTimestamp=2025-06-24T20:34:25.789033Z >>>> ADD BLOB 5 writeTimestamp=2025-06-24T20:34:25.789049Z >>>> ADD BLOB 6 writeTimestamp=2025-06-24T20:34:25.789062Z >>>> ADD BLOB 7 writeTimestamp=2025-06-24T20:34:25.789076Z >>>> ADD BLOB 8 writeTimestamp=2025-06-24T20:34:25.789090Z >>>> ADD BLOB 9 writeTimestamp=2025-06-24T20:34:25.789108Z 2025-06-24T20:34:25.804147Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T20:34:25.000000Z 2025-06-24T20:34:25.804235Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 2 generation 0 [5:182:2195] >> KqpRm::NotEnoughExecutionUnits [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SnapshotSharingByExchanger [GOOD] Test command err: 2025-06-24T20:34:23.629238Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:23.629842Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0046ad/r3tmp/tmpRbLL1D/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:23.630688Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0046ad/r3tmp/tmpRbLL1D/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0046ad/r3tmp/tmpRbLL1D/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 9817709833072020221 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:23.678319Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T20:34:23.678641Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T20:34:23.727951Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:457:2101] with ResourceBroker at [2:428:2100] 2025-06-24T20:34:23.728105Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:458:2102] 2025-06-24T20:34:23.728270Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:456:2336] with ResourceBroker at [1:427:2317] 2025-06-24T20:34:23.728389Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:459:2337] 2025-06-24T20:34:23.728520Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T20:34:23.728555Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T20:34:23.728587Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T20:34:23.728607Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T20:34:23.728776Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:23.757513Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797263 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:23.757924Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:23.758061Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797263 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:23.758371Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T20:34:23.758476Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T20:34:23.758521Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:23.758625Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797263 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:23.758759Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T20:34:23.758786Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:23.758857Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797263 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:23.758974Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T20:34:23.759836Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-24T20:34:23.759980Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:23.760422Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:23.761061Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:23.761202Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:23.761377Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-24T20:34:23.761603Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T20:34:23.761771Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:24.957669Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-24T20:34:24.957800Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-24T20:34:24.957948Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:24.958022Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:24.958085Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:24.958140Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:24.958187Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [1:456:2336])) 2025-06-24T20:34:24.958453Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:24.958552Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-2-1-2 (2 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:24.958601Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-2-1-2 (2 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:24.958647Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-2-1-2 (2 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:24.958700Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-2-1-2 (2 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:24.958742Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-2-1-2 (2 by [1:456:2336])) 2025-06-24T20:34:24.958843Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 2, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:24.958924Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:24.959080Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797264 AvailableComputeActors: 80 UsedMemory: 200 TotalMemory: 1000 Memory { Pool: 1 Available: 800 } ExecutionUnits: 80 2025-06-24T20:34:24.959429Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:25.292150Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-24T20:34:25.292341Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [2:457:2101]) priority=0 resources={0, 100} 2025-06-24T20:34:25.292416Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [2:457:2101]) to queue queue_kqp_resource_manager 2025-06-24T20:34:25.292468Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [2:457:2101]) from queue queue_kqp_resource_manager 2025-06-24T20:34:25.292515Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [2:457:2101]) to queue queue_kqp_resource_manager 2025-06-24T20:34:25.292563Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [2:457:2101])) 2025-06-24T20:34:25.292700Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:25.292783Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-2-2-2 (2 by [2:457:2101]) priority=0 resources={0, 100} 2025-06-24T20:34:25.292825Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-2-2-2 (2 by [2:457:2101]) to queue queue_kqp_resource_manager 2025-06-24T20:34:25.292868Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-2-2-2 (2 by [2:457:2101]) from queue queue_kqp_resource_manager 2025-06-24T20:34:25.292907Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-2-2-2 (2 by [2:457:2101]) to queue queue_kqp_resource_manager 2025-06-24T20:34:25.292960Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-2-2-2 (2 by [2:457:2101])) 2025-06-24T20:34:25.293041Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 2, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:25.293107Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:25.293249Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797265 AvailableComputeActors: 80 UsedMemory: 200 TotalMemory: 1000 Memory { Pool: 1 Available: 800 } ExecutionUnits: 80 2025-06-24T20:34:25.293563Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T20:34:25.624092Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-24T20:34:25.624249Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-1-1 (1 by [1:456:2336]) (release resources {0, 100}) 2025-06-24T20:34:25.624313Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.350200 (remove task kqp-1-1-1 (1 by [1:456:2336])) 2025-06-24T20:34:25.624351Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.200400 2025-06-24T20:34:25.624400Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-24T20:34:25.624446Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-2-1-2 (2 by [1:456:2336]) (release resources {0, 100}) 2025-06-24T20:34:25.624496Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.350200 to 0.200400 (remove task kqp-2-1-2 (2 by [1:456:2336])) 2025-06-24T20:34:25.624539Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 2, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-24T20:34:25.624605Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:25.624771Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797266 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:25.625107Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:25.941309Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-24T20:34:25.941445Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-1-1 (1 by [2:457:2101]) (release resources {0, 100}) 2025-06-24T20:34:25.941508Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.350200 (remove task kqp-1-1-1 (1 by [2:457:2101])) 2025-06-24T20:34:25.941549Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.200400 2025-06-24T20:34:25.941597Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-24T20:34:25.941642Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-2-2-2 (2 by [2:457:2101]) (release resources {0, 100}) 2025-06-24T20:34:25.941691Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.350200 to 0.200400 (remove task kqp-2-2-2 (2 by [2:457:2101])) 2025-06-24T20:34:25.941741Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 2, taskId: 2. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-24T20:34:25.941811Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:25.941952Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797267 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:25.942289Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T20:34:26.248567Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request >> KqpRm::ResourceBrokerNotEnoughResources >> KikimrIcGateway::TestCreateResourcePool [GOOD] >> KikimrIcGateway::TestALterResourcePool >> KqpRm::NodesMembershipByExchanger >> YdbProxy::MakeDirectory [GOOD] >> YdbProxy::OAuthToken |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |89.3%| [LD] {RESULT} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes >> TPDiskTest::ChangePDiskKey [GOOD] >> TPDiskTest::PDiskIncreaseLogChunksLimitAfterRestart |89.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NotEnoughExecutionUnits [GOOD] Test command err: 2025-06-24T20:34:26.659582Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:26.660125Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0046a9/r3tmp/tmpbTzfh5/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:26.660745Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0046a9/r3tmp/tmpbTzfh5/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0046a9/r3tmp/tmpbTzfh5/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 15122477181607980009 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:26.739881Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T20:34:26.740178Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T20:34:26.770703Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:457:2101] with ResourceBroker at [2:428:2100] 2025-06-24T20:34:26.770859Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:458:2102] 2025-06-24T20:34:26.771022Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:456:2336] with ResourceBroker at [1:427:2317] 2025-06-24T20:34:26.771106Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:459:2337] 2025-06-24T20:34:26.791544Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T20:34:26.791641Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T20:34:26.791734Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T20:34:26.791763Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T20:34:26.791962Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:26.822588Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797266 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:26.823008Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:26.823137Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797266 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:26.824336Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T20:34:26.824485Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T20:34:26.824530Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:26.824657Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797266 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:26.824788Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T20:34:26.824830Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:26.824915Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797266 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:26.825039Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T20:34:26.825885Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-24T20:34:26.826007Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:26.826512Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:26.827032Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:26.828044Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:26.828283Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-24T20:34:26.828531Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T20:34:26.828827Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::AlterTable [GOOD] Test command err: 2025-06-24T20:34:21.052302Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616978446182468:2241];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:21.061661Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002767/r3tmp/tmpfmsi5f/pdisk_1.dat 2025-06-24T20:34:21.639967Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:21.687419Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616974151214951:2079] 1750797260942481 != 1750797260942484 2025-06-24T20:34:21.709747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:21.709865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:21.719529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30663 TServer::EnableGrpc on GrpcPort 30164, node 1 2025-06-24T20:34:22.050752Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:22.071589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:22.071621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:22.071628Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:22.071765Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30663 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:22.695713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:22.723068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:25.690892Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616995626052072:2304] txid# 281474976710658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-24T20:34:25.723022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:25.858540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T20:34:25.891902Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616995626052188:2383] txid# 281474976710661, issues: { message: "Can\'t drop unknown column: \'extra\'" severity: 1 } 2025-06-24T20:34:26.037822Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616978446182468:2241];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:26.037951Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpRm::ResourceBrokerNotEnoughResources [GOOD] >> YdbProxy::DescribeTable [GOOD] >> KqpRm::ManyTasks >> Viewer::UseTransactionWhenExecuteDataActionQuery [GOOD] >> ViewerTopicDataTests::TopicDataTest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::ResourceBrokerNotEnoughResources [GOOD] Test command err: 2025-06-24T20:34:28.583141Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:28.595910Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0046a3/r3tmp/tmpEYq9vV/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:28.596634Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0046a3/r3tmp/tmpEYq9vV/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0046a3/r3tmp/tmpEYq9vV/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 2605931984429788125 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:28.648953Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T20:34:28.649268Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T20:34:28.697908Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:457:2101] with ResourceBroker at [2:428:2100] 2025-06-24T20:34:28.698075Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:458:2102] 2025-06-24T20:34:28.698257Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:456:2336] with ResourceBroker at [1:427:2317] 2025-06-24T20:34:28.698352Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:459:2337] 2025-06-24T20:34:28.698483Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T20:34:28.698520Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T20:34:28.698559Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T20:34:28.698580Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T20:34:28.698743Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:28.715510Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797268 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:28.715880Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:28.716000Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797268 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 100000000 Memory { Pool: 1 Available: 100000000 } ExecutionUnits: 100 2025-06-24T20:34:28.716273Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T20:34:28.716371Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T20:34:28.716412Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:28.716516Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797268 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:28.716634Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T20:34:28.716659Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:28.716738Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797268 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 100000000 Memory { Pool: 1 Available: 100000000 } ExecutionUnits: 100 2025-06-24T20:34:28.716841Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T20:34:28.717624Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-24T20:34:28.717721Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:28.718153Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:28.718660Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:28.718803Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:28.718961Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-24T20:34:28.719492Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T20:34:28.719704Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:28.723870Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-1 (1 by [1:456:2336]) priority=0 resources={0, 1000} 2025-06-24T20:34:28.723949Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-1 (1 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:28.724011Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 1000} for task kqp-1-2-1 (1 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:28.724057Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-1 (1 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:28.724113Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 2.500000 (insert task kqp-1-2-1 (1 by [1:456:2336])) 2025-06-24T20:34:28.724352Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 1000ExternalMemory: 0 } 2025-06-24T20:34:28.724434Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-2 (2 by [1:456:2336]) priority=0 resources={0, 100000} 2025-06-24T20:34:28.724478Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-2 (2 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:28.724538Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task kqp-1-2-2 (2 by [1:456:2336]) 2025-06-24T20:34:28.724580Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:499: Removing task kqp-1-2-2 (2 by [1:456:2336]) 2025-06-24T20:34:28.724665Z node 1 :KQP_RESOURCE_MANAGER NOTICE: kqp_rm_service.cpp:338: TxId: 1, taskId: 2. Not enough memory for query, requested: 100000. TxResourcesInfo { TxId: 1, Database: , tx initially granted memory: 0B, tx total memory allocations: 1000B, tx largest successful memory allocation: 1000B, tx last failed memory allocation: 0B, tx total execution units: 0, started at: 2025-06-24T20:34:28.723768Z } >> YdbProxy::ReadTopic [GOOD] >> YdbProxy::ReadNonExistentTopic >> ExternalBlobsMultipleChannels::WithCompaction >> KqpRm::NodesMembershipByExchanger [GOOD] >> YdbProxy::DescribeTopic [GOOD] >> TAsyncIndexTests::SplitIndexWithReboots[TabletReboots] [GOOD] >> KqpWorkloadService::TestCpuLoadThreshold [GOOD] >> KqpWorkloadService::TestCpuLoadThresholdRefresh ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DescribeTable [GOOD] Test command err: 2025-06-24T20:34:18.162751Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616968713999809:2163];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:18.164345Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027d2/r3tmp/tmpwTOO0H/pdisk_1.dat 2025-06-24T20:34:18.915339Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:18.930466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:18.931297Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616968713999683:2079] 1750797258093106 != 1750797258093109 2025-06-24T20:34:18.933528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:18.937515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25660 TServer::EnableGrpc on GrpcPort 25841, node 1 2025-06-24T20:34:19.224820Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:19.303731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:19.303757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:19.303778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:19.303916Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25660 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:19.843463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:19.875840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:19.980225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T20:34:23.415254Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616988558529254:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:23.531057Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027d2/r3tmp/tmpjKXP2i/pdisk_1.dat 2025-06-24T20:34:23.742309Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:23.762428Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616988558529220:2079] 1750797263411287 != 1750797263411290 2025-06-24T20:34:23.772914Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:23.773004Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:23.780515Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12422 TServer::EnableGrpc on GrpcPort 27153, node 2 2025-06-24T20:34:24.216564Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:24.216587Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:24.216596Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:24.216725Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:24.440834Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12422 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:24.656214Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:24.664379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:28.494675Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519616988558529254:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:28.495037Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:28.506413Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> ExternalBlobsMultipleChannels::Simple >> KqpRm::ManyTasks [GOOD] >> THiveTest::TestLocalRegistrationInSharedHive [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NodesMembershipByExchanger [GOOD] Test command err: 2025-06-24T20:34:28.845524Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:28.846189Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/00469c/r3tmp/tmpmecpWR/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:28.847236Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/00469c/r3tmp/tmpmecpWR/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/00469c/r3tmp/tmpmecpWR/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 1128942879121100034 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:28.893716Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T20:34:28.894013Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T20:34:28.912256Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:457:2101] with ResourceBroker at [2:428:2100] 2025-06-24T20:34:28.912420Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:458:2102] 2025-06-24T20:34:28.912587Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:456:2336] with ResourceBroker at [1:427:2317] 2025-06-24T20:34:28.912670Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:459:2337] 2025-06-24T20:34:28.912800Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T20:34:28.912833Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T20:34:28.912863Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T20:34:28.912884Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T20:34:28.913055Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:28.928647Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797268 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:28.929035Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:28.929144Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797268 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:28.929474Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T20:34:28.929606Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T20:34:28.929663Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:28.929768Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797268 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:28.929875Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T20:34:28.929909Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:28.929983Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797268 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:28.930114Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T20:34:28.930939Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-24T20:34:28.931062Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:28.931655Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:28.932201Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:28.932382Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:28.932552Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-24T20:34:28.932774Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T20:34:28.932982Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:30.128658Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-24T20:34:30.128787Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-24T20:34:30.129859Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:30.464153Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request |89.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |89.3%| [LD] {RESULT} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::ManyTasks [GOOD] Test command err: 2025-06-24T20:34:31.038297Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:31.038962Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/00469b/r3tmp/tmprA5fHn/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:31.039642Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/00469b/r3tmp/tmprA5fHn/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/00469b/r3tmp/tmprA5fHn/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 11333770205515428239 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:31.092197Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T20:34:31.092496Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T20:34:31.114946Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:457:2101] with ResourceBroker at [2:428:2100] 2025-06-24T20:34:31.115101Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:458:2102] 2025-06-24T20:34:31.121260Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:456:2336] with ResourceBroker at [1:427:2317] 2025-06-24T20:34:31.121446Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:459:2337] 2025-06-24T20:34:31.121618Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T20:34:31.121662Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T20:34:31.121702Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T20:34:31.121722Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T20:34:31.121906Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:31.140331Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797271 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:31.140817Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:31.140960Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797271 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:31.141339Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T20:34:31.141483Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T20:34:31.141532Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:31.141696Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750797271 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:31.141844Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T20:34:31.141881Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T20:34:31.141949Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750797271 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T20:34:31.142085Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T20:34:31.142926Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-24T20:34:31.143066Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:31.143790Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:31.144423Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:31.144609Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T20:34:31.144828Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-24T20:34:31.145078Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T20:34:31.145302Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T20:34:31.152252Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:31.152352Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.152419Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:31.152476Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.152531Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [1:456:2336])) 2025-06-24T20:34:31.152751Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:31.153000Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-2 (2 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:31.153029Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-2 (2 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.153094Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-2-2 (2 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:31.153126Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-2 (2 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.153162Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-1-2-2 (2 by [1:456:2336])) 2025-06-24T20:34:31.153196Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:31.153305Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-3-3 (3 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:31.153328Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-3-3 (3 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.153356Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-3-3 (3 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:31.153379Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-3-3 (3 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.153423Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.750000 (insert task kqp-1-3-3 (3 by [1:456:2336])) 2025-06-24T20:34:31.153448Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 3. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:31.153547Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-4-4 (4 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:31.153589Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-4-4 (4 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.153623Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-4-4 (4 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:31.153646Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-4-4 (4 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.153680Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.750000 to 1.000000 (insert task kqp-1-4-4 (4 by [1:456:2336])) 2025-06-24T20:34:31.153710Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 4. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:31.153816Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-5-5 (5 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:31.153845Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-5-5 (5 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.153869Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-5-5 (5 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:31.153890Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-5-5 (5 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.153912Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.000000 to 1.250000 (insert task kqp-1-5-5 (5 by [1:456:2336])) 2025-06-24T20:34:31.153934Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 5. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:31.154033Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-6-6 (6 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:31.154095Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-6-6 (6 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.154121Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-6-6 (6 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:31.154147Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-6-6 (6 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.154179Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.250000 to 1.500000 (insert task kqp-1-6-6 (6 by [1:456:2336])) 2025-06-24T20:34:31.154212Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 6. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:31.154299Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-7-7 (7 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:31.154320Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-7-7 (7 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.154343Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-7-7 (7 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:31.154377Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-7-7 (7 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.154415Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.500000 to 1.750000 (insert task kqp-1-7-7 (7 by [1:456:2336])) 2025-06-24T20:34:31.154440Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 7. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:31.154526Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-8-8 (8 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:31.154559Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-8-8 (8 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.154585Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-8-8 (8 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:31.154610Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-8-8 (8 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.154641Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.750000 to 2.000000 (insert task kqp-1-8-8 (8 by [1:456:2336])) 2025-06-24T20:34:31.154682Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 8. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:31.154773Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-9-9 (9 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T20:34:31.154807Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-9-9 (9 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.154840Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-9-9 (9 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T20:34:31.154874Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-9-9 (9 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T20:34:31.154903Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 2.000000 to 2.250000 (insert task kqp-1-9-9 (9 by [1:456:2336])) 2025-06-24T20:34:31.154926Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 9. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T20:34:31.155030Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-1-1 (1 by [1:456:2336]) (release resources {0, 100}) 2025-06-24T20:34:31.155098Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 2.250000 to 2.000000 (remove task kqp-1-1-1 (1 by [1:456:2336])) 2025-06-24T20:34:31.155375Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 0. >> KikimrIcGateway::TestDropExternalDataSource [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DescribeTopic [GOOD] Test command err: 2025-06-24T20:34:18.933268Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616967131533224:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:18.944737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027a7/r3tmp/tmpR21PXM/pdisk_1.dat 2025-06-24T20:34:19.508562Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616967131533040:2079] 1750797258887281 != 1750797258887284 2025-06-24T20:34:19.520433Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:19.556481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:19.556590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:19.560595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9303 TServer::EnableGrpc on GrpcPort 8885, node 1 2025-06-24T20:34:19.915873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:19.915915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:19.915921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:19.916077Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:19.931319Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9303 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:20.476776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:23.710021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:23.885970Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T20:34:23.894039Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616988606370293:2397] txid# 281474976710660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-24T20:34:23.931794Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616967131533224:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:23.931876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:24.973313Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616994164243336:2178];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:25.037144Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027a7/r3tmp/tmpg43GtL/pdisk_1.dat 2025-06-24T20:34:25.220076Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:25.220152Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:25.221917Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:25.226397Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:25.227443Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616994164243183:2079] 1750797264902150 != 1750797264902153 TClient is connected to server localhost:5287 TServer::EnableGrpc on GrpcPort 11272, node 2 2025-06-24T20:34:25.575823Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:25.575845Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:25.575853Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:25.575954Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5287 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:25.974403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:25.974820Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T20:34:25.986736Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:26.050542Z node 2 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-24T20:34:26.270069Z node 2 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs [GOOD] >> YdbProxy::OAuthToken [GOOD] >> KikimrIcGateway::TestALterResourcePool [GOOD] |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |89.3%| [LD] {RESULT} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitIndexWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T20:30:39.656882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:39.656987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:39.657029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:39.657070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:39.657120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:39.657173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:39.657242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:39.657326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:39.658150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:39.658519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:39.747233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:30:39.747304Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:39.748239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T20:30:39.771673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:39.772244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:39.772418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:39.788901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:39.789186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:39.789951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:39.790257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:39.793947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:39.794154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:39.795670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:39.795745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:39.795983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:39.796053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:39.796100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:39.796246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T20:30:39.811114Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:39.981202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:39.981438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:39.981658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:39.981707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:39.981931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:39.982066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:39.992468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:39.992752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:39.993132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:39.993244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:39.993346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:39.993420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:40.000645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.000752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:40.000796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:40.004097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.004186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:40.004233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:40.004323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:40.012708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:40.016248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:40.016512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:40.017738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:40.017899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... ntToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:30.736165Z node 93 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:34:30.736547Z node 93 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 414us result status StatusSuccess 2025-06-24T20:34:30.737618Z node 93 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\004\000\000\0002\000\000\000\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPDiskTest::PDiskIncreaseLogChunksLimitAfterRestart [GOOD] >> TPDiskTest::AllRequestsAreAnsweredOnPDiskRestart >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns |89.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestDropExternalDataSource [GOOD] Test command err: Trying to start YDB, gRPC: 9558, MsgBus: 19288 2025-06-24T20:34:15.393002Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616955800487197:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:15.393327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003722/r3tmp/tmpmMJEVS/pdisk_1.dat 2025-06-24T20:34:15.954675Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:15.963383Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616955800486977:2079] 1750797255308410 != 1750797255308413 2025-06-24T20:34:15.975009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:15.975122Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:15.983998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9558, node 1 2025-06-24T20:34:16.162986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:16.163009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:16.163017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:16.163167Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:16.388392Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19288 TClient is connected to server localhost:19288 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:16.866617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:16.892657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:16.915256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:34:16.940875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:351) 2025-06-24T20:34:16.968178Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616960095454951:2341] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/f1/f2/external_table\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges)" severity: 1 }
: Error: Scheme operation failed, status: ExecComplete, reason: Check failed: path: '/Root/f1/f2/external_table', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges) 2025-06-24T20:34:16.968761Z node 1 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976710660, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/f1/f2/external_table', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges) Trying to start YDB, gRPC: 2317, MsgBus: 17255 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003722/r3tmp/tmp5mGAor/pdisk_1.dat 2025-06-24T20:34:20.497056Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:20.499514Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616975918331846:2079] 1750797260191171 != 1750797260191174 2025-06-24T20:34:20.511393Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:20.520645Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:20.520734Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:20.528554Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2317, node 2 2025-06-24T20:34:20.787778Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:20.787804Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:20.787812Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:20.787929Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17255 2025-06-24T20:34:21.236091Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17255 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:21.615511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:21.622690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:34:21.640785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:34:21.669076Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:351) 2025-06-24T20:34:21.677577Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-24T20:34:21.695565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 19774, MsgBus: 23086 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003722/r3tmp/tmpj0idYi/pdisk_1.dat 2025-06-24T20:34:26.264242Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519616999462000341:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:26.396446Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:26.494070Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:26.499338Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519616999462000203:2079] 1750797266169035 != 1750797266169038 2025-06-24T20:34:26.519782Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:26.519875Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:26.522332Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19774, node 3 2025-06-24T20:34:26.727751Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:26.727774Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:26.727781Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:26.727914Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:27.217172Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23086 TClient is connected to server localhost:23086 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:27.654138Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:27.664474Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:27.678044Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) >> DataShardVolatile::DistributedWriteThenSplit [GOOD] >> DataShardVolatile::DistributedWriteThenReadIterator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minstep/unittest >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs [GOOD] Test command err: 2025-06-24T20:34:19.918725Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:19.919101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:19.919386Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e02/r3tmp/tmpidXJGH/pdisk_1.dat 2025-06-24T20:34:20.275841Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:34:20.277190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:34:20.305828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:34:20.307859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:20.309112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:34:20.319996Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2025-06-24T20:34:20.320094Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 500 Status# 16 SEND to# [1:371:2365] Proxy marker# C1 2025-06-24T20:34:20.367430Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:20.367566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:34:20.378327Z node 1 :HIVE DEBUG: hive_impl.cpp:2275: HIVE#72057594037968897 Merged config: { } 2025-06-24T20:34:20.378630Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797256577871 != 1750797256577875 2025-06-24T20:34:20.454501Z node 1 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [1:295:2335] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-24T20:34:20.454721Z node 1 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Execute 2025-06-24T20:34:20.454885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:20.454956Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T20:34:20.455003Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T20:34:20.455055Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T20:34:20.455102Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T20:34:20.460025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:20.460641Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-24T20:34:20.460701Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-24T20:34:20.460773Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-24T20:34:20.460848Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-24T20:34:20.461101Z node 1 :HIVE DEBUG: hive_impl.cpp:808: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 1 Location DataCenter: "1" Module: "1" Rack: "1" Unit: "1" 2025-06-24T20:34:20.472036Z node 1 :HIVE DEBUG: tx__register_node.cpp:95: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Complete 2025-06-24T20:34:20.472143Z node 1 :HIVE DEBUG: node_info.cpp:373: HIVE#72057594037968897 Node(1) Ping([1:295:2335]) 2025-06-24T20:34:20.472259Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-24T20:34:20.472870Z node 1 :HIVE DEBUG: hive_impl.cpp:737: HIVE#72057594037968897 THive::Handle::TEvSyncTablets 2025-06-24T20:34:20.472965Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:41: HIVE#72057594037968897 THive::TTxSyncTablets([1:295:2335])::Execute 2025-06-24T20:34:20.473035Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T20:34:20.473142Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:130: HIVE#72057594037968897 THive::TTxSyncTablets([1:295:2335])::Complete 2025-06-24T20:34:20.473299Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-24T20:34:20.473341Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-24T20:34:20.473384Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-24T20:34:20.473438Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-24T20:34:20.473593Z node 1 :HIVE DEBUG: hive_impl.cpp:731: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 1: Status: 0 StartTime: 0 ResourceMaximum { Memory: 270443339776 } 2025-06-24T20:34:20.473651Z node 1 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(1)::Execute 2025-06-24T20:34:20.473705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:20.473884Z node 1 :HIVE DEBUG: hive_impl.cpp:2791: HIVE#72057594037968897 AddRegisteredDataCentersNode(1, 1) 2025-06-24T20:34:20.473968Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T20:34:20.474045Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T20:34:20.474267Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-24T20:34:20.474303Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-24T20:34:20.474332Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-24T20:34:20.474367Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-24T20:34:20.486632Z node 1 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(1)::Complete 2025-06-24T20:34:20.486758Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-24T20:34:20.555704Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2025-06-24T20:34:20.555844Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2025-06-24T20:34:20.556299Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 1000 in 0.500000s at 0.950000s 2025-06-24T20:34:20.556743Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 500, txid# 1 marker# C2 2025-06-24T20:34:20.556855Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 500 Status# 17 SEND EvProposeTransactionStatus to# [1:371:2365] Proxy 2025-06-24T20:34:20.557859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:34:20.564011Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-24T20:34:20.564206Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-24T20:34:20.564272Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 acknowledged 2025-06-24T20:34:20.564322Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:6] persistent tx 1 acknowledged 2025-06-24T20:34:20.565372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T20:34:20.565466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-06-24T20:34:20.566793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 1, subscribers: 1 2025-06-24T20:34:20.570117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/table-1, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:34:20.580194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:34:20.580340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:20.581684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/table-1 2025-06-24T20:34:20.598176Z node 1 :HIVE DEBUG: hive_impl.cpp:34: HIVE# ... 74976715665 HANDLE EvProposeTransaction marker# C0 2025-06-24T20:34:32.134304Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 281474976715665 step# 3500 Status# 16 SEND to# [2:371:2365] Proxy marker# C1 2025-06-24T20:34:32.147289Z node 2 :HIVE DEBUG: tx__delete_tablet_result.cpp:72: HIVE#72057594037968897 THive::TTxDeleteTabletResult(72075186224037888)::Complete SideEffects {} 2025-06-24T20:34:32.227894Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 281474976715665 has been planned 2025-06-24T20:34:32.228027Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 2025-06-24T20:34:32.228077Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 2025-06-24T20:34:32.228391Z node 2 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 4000 in 0.500000s at 3.950000s 2025-06-24T20:34:32.228912Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 3500, txid# 281474976715665 marker# C2 2025-06-24T20:34:32.229029Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 281474976715665 stepId# 3500 Status# 17 SEND EvProposeTransactionStatus to# [2:371:2365] Proxy 2025-06-24T20:34:32.229592Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 3500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:34:32.230371Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715665 at step 3500 at tablet 72075186224037889 { Transactions { TxId: 281474976715665 AckTo { RawX1: 0 RawX2: 0 } } Step: 3500 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-24T20:34:32.230451Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:34:32.230859Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T20:34:32.230915Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:34:32.230975Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [3500:281474976715665] in PlanQueue unit at 72075186224037889 2025-06-24T20:34:32.231965Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715665 keys extracted: 0 2025-06-24T20:34:32.232180Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:34:32.232436Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T20:34:32.232547Z node 2 :TX_DATASHARD INFO: drop_table_unit.cpp:72: Trying to DROP TABLE at 72075186224037889 2025-06-24T20:34:32.233075Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:34:32.236114Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-24T20:34:32.236220Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T20:34:32.236611Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-24T20:34:32.236742Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-24T20:34:32.236799Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 removed=1 2025-06-24T20:34:32.236834Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 for mediator 72057594046382081 acknowledged 2025-06-24T20:34:32.236896Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 acknowledged 2025-06-24T20:34:32.237458Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T20:34:32.237546Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [2:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:34:32.237614Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715665 state PreOffline TxInFly 0 2025-06-24T20:34:32.237712Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:34:32.237998Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1104: All parts have reached barrier, tx: 281474976715665, done: 0, blocked: 1 2025-06-24T20:34:32.243625Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715665 datashard 72075186224037889 state PreOffline 2025-06-24T20:34:32.243749Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-24T20:34:32.244098Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T20:34:32.244948Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715665:0 2025-06-24T20:34:32.246625Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.246 INFO ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [core exec] yql_execution.cpp:133: Completed async execution for node #42 2025-06-24T20:34:32.246921Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.246 INFO ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [core exec] yql_execution.cpp:153: State is ExecutionComplete after apply async changes for node #42 2025-06-24T20:34:32.247017Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.246 INFO ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [core exec] yql_execution.cpp:59: Begin, root #43 2025-06-24T20:34:32.247082Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.247 INFO ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [core exec] yql_execution.cpp:72: Collect unused nodes for root #43, status: Ok 2025-06-24T20:34:32.247157Z node 2 :KQP_YQL TRACE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.247 TRACE ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [core exec] yql_execution.cpp:387: {0}, callable #43 2025-06-24T20:34:32.247253Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.247 INFO ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [core exec] yql_execution.cpp:577: Node #43 finished execution 2025-06-24T20:34:32.247353Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.247 INFO ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [core exec] yql_execution.cpp:594: Node #43 created 0 trackable nodes: 2025-06-24T20:34:32.247434Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.247 INFO ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [core exec] yql_execution.cpp:87: Finish, output #43, status: Ok 2025-06-24T20:34:32.247496Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.247 INFO ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #43 2025-06-24T20:34:32.247706Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.247 NOTE ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [common provider] yql_provider_gateway.cpp:21:
: Info: Execution, code: 1060 2025-06-24T20:34:32.247787Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.247 NOTE ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [common provider] yql_provider_gateway.cpp:21:
:1:12: Info: Executing DROP TABLE 2025-06-24T20:34:32.247847Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NDcwZmU1OS1jYWVkZTVjMC0yZDk0M2U4Mi1lYTk4MWRkZg== 2025-06-24 20:34:32.247 NOTE ydb-core-tx-datashard-ut_minstep(pid=177152, tid=0x00007F2A20E32D40) [common provider] yql_provider_gateway.cpp:21:
: Info: Success, code: 4 2025-06-24T20:34:32.263302Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T20:34:32.263542Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-24T20:34:32.266147Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T20:34:32.272288Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-24T20:34:32.272901Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186224037889 2025-06-24T20:34:32.273003Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037889 2025-06-24T20:34:32.273139Z node 2 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037889.Leader.1) VolatileState: Running -> Stopped (Node 2) 2025-06-24T20:34:32.273290Z node 2 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037889.Leader.1 gen 1) to node 2 2025-06-24T20:34:32.273439Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 |89.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::OAuthToken [GOOD] Test command err: 2025-06-24T20:34:22.303838Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616985491995014:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:22.303880Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00274e/r3tmp/tmpdkmHmg/pdisk_1.dat 2025-06-24T20:34:23.095703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:23.095815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:23.103694Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616985491994994:2079] 1750797262278657 != 1750797262278660 2025-06-24T20:34:23.105347Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:23.116407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:23.399579Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1422 TServer::EnableGrpc on GrpcPort 15428, node 1 2025-06-24T20:34:23.614816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:23.614851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:23.614858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:23.614973Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1422 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:24.303188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:24.336878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00274e/r3tmp/tmplSctpj/pdisk_1.dat 2025-06-24T20:34:28.343366Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:28.384742Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519617011715903891:2079] 1750797268009139 != 1750797268009142 2025-06-24T20:34:28.395057Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:28.395135Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:28.403975Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:28.416682Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25371 TServer::EnableGrpc on GrpcPort 13487, node 2 2025-06-24T20:34:28.819796Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:28.819819Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:28.819827Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:28.819946Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:29.011298Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25371 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:29.226267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:29.234551Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:29.280608Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 >> ExternalBlobsMultipleChannels::SingleChannel |89.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestLocalRegistrationInSharedHive [GOOD] Test command err: 2025-06-24T20:31:23.051817Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:23.179122Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:23.187847Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:23.189019Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:23.189396Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T20:31:23.190555Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-24T20:31:23.190620Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:23.203184Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:31:2076] ControllerId# 72057594037932033 2025-06-24T20:31:23.203247Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:23.203366Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:23.203529Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:23.261254Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:23.261315Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:23.268329Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:38:2081] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:23.268492Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:39:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:23.268695Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:40:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:23.268938Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:41:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:23.269121Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:42:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:23.269315Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:43:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:23.269489Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:44:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:23.269519Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:23.269612Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:31:2076] 2025-06-24T20:31:23.269664Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:31:2076] 2025-06-24T20:31:23.269718Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:23.269838Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:23.270683Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:23.270852Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:23.326602Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:21:2063] 2025-06-24T20:31:23.326656Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:21:2063] 2025-06-24T20:31:23.326841Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:31:2076] 2025-06-24T20:31:23.326918Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:23.326971Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:23.327118Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:23.375464Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:23.375507Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-24T20:31:23.381734Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-24T20:31:23.381951Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:23.388096Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-24T20:31:23.388206Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:31:23.388267Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-24T20:31:23.388601Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:31:2076] 2025-06-24T20:31:23.388649Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-24T20:31:23.388686Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:23.388776Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:23.389368Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-24T20:31:23.389408Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-24T20:31:23.389457Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:23.389602Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-24T20:31:23.389708Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:23.389871Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:23.390115Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:23.390304Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-24T20:31:23.390359Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-24T20:31:23.390400Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-24T20:31:23.390485Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2025-06-24T20:31:23.390527Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:23.390684Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-24T20:31:23.390741Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-24T20:31:23.390977Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:31:2076] 2025-06-24T20:31:23.391023Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:31:2076] 2025-06-24T20:31:23.391064Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-24T20:31:23.393460Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-24T20:31:23.393513Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:54:2093] 2025-06-24T20:31:23.393533Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:54:2093] 2025-06-24T20:31:23.394898Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:23.394980Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:23.395035Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:23.395845Z node 1 :BS_NODE DEBUG: {NWDC ... DEBUG: tablet_pipe_client.cpp:195: TClient[72057594046678944] forward result remote node 40 [41:557:2159] 2025-06-24T20:34:30.939549Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594046678944] remote node connected [41:557:2159] 2025-06-24T20:34:30.939639Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594046678944]::SendEvent [41:557:2159] 2025-06-24T20:34:30.939994Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594046678944] Accept Connect Originator# [41:557:2159] 2025-06-24T20:34:30.940378Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594046678944] connected with status OK role: Leader [41:557:2159] 2025-06-24T20:34:30.940453Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594046678944] send queued [41:557:2159] 2025-06-24T20:34:30.940597Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046678944] send [41:557:2159] 2025-06-24T20:34:30.940631Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046678944] push event to server [41:557:2159] 2025-06-24T20:34:30.940703Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594046678944]::SendEvent [41:557:2159] 2025-06-24T20:34:30.940862Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594046678944] Push Sender# [41:556:2159] EventType# 271122945 2025-06-24T20:34:30.941040Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{16, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme 2025-06-24T20:34:30.941162Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{16, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:34:30.941498Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{16, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} hope 1 -> done Change{11, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:34:30.941612Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{16, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:34:30.952101Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [41:563:2160] 2025-06-24T20:34:30.952179Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [41:563:2160] 2025-06-24T20:34:30.952532Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:34:30.952603Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 41 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [40:332:2201] 2025-06-24T20:34:30.952712Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [41:563:2160] 2025-06-24T20:34:30.952771Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [41:567:2161] 2025-06-24T20:34:30.952800Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [41:567:2161] 2025-06-24T20:34:30.953004Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037888] queue send [41:567:2161] 2025-06-24T20:34:30.953163Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StInit ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:34:30.953311Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594037927937] forward result remote node 40 [41:563:2160] 2025-06-24T20:34:30.953957Z node 41 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:34:30.954051Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594037927937] remote node connected [41:563:2160] 2025-06-24T20:34:30.954118Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [41:563:2160] 2025-06-24T20:34:30.954741Z node 40 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-24T20:34:30.954805Z node 40 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-24T20:34:30.954850Z node 40 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-24T20:34:30.955096Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [41:563:2160] 2025-06-24T20:34:30.958870Z node 41 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [40:469:2302] CurrentLeaderTablet: [40:485:2313] CurrentGeneration: 1 CurrentStep: 0} 2025-06-24T20:34:30.959173Z node 41 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [40:469:2302] CurrentLeaderTablet: [40:485:2313] CurrentGeneration: 1 CurrentStep: 0} 2025-06-24T20:34:30.959300Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [40:469:2302] CurrentLeaderTablet: [40:485:2313] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[40:24343667:0] : 3}, {[40:1099535971443:0] : 6}}}} 2025-06-24T20:34:30.959357Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037888 followers: 0 2025-06-24T20:34:30.959425Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 41 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [40:469:2302] 2025-06-24T20:34:30.959542Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037888] forward result remote node 40 [41:567:2161] 2025-06-24T20:34:30.959797Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [41:563:2160] 2025-06-24T20:34:30.959835Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [41:563:2160] 2025-06-24T20:34:30.959873Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [41:563:2160] 2025-06-24T20:34:30.960014Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [41:563:2160] 2025-06-24T20:34:31.040423Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037888] remote node connected [41:567:2161] 2025-06-24T20:34:31.040521Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [41:567:2161] 2025-06-24T20:34:31.040962Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [41:567:2161] 2025-06-24T20:34:31.041077Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [41:560:2160] EventType# 268959744 2025-06-24T20:34:31.041399Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{24, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-24T20:34:31.041500Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{24, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:34:31.041775Z node 40 :HIVE WARN: node_info.cpp:25: HIVE#72057594037927937 Node(41, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:31.041950Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{24, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{14, redo 208b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-24T20:34:31.042088Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{24, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:34:31.042583Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [41:567:2161] 2025-06-24T20:34:31.042639Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [41:567:2161] 2025-06-24T20:34:31.042676Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [41:567:2161] 2025-06-24T20:34:31.042751Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [41:567:2161] 2025-06-24T20:34:31.042913Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-24T20:34:31.043006Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:34:31.043137Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{15, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:34:31.043249Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:34:31.043523Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72075186224037888] Push Sender# [41:561:2161] EventType# 268959744 2025-06-24T20:34:31.043724Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{6, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-24T20:34:31.043774Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{6, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:34:31.043928Z node 40 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(41, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:31.044038Z node 40 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(41, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:31.044113Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{6, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{6, redo 199b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-24T20:34:31.044174Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{6, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:34:31.044364Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:7} Tx{7, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-24T20:34:31.044415Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:7} Tx{7, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:34:31.044503Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:7} Tx{7, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:34:31.044549Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:7} Tx{7, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestALterResourcePool [GOOD] Test command err: Trying to start YDB, gRPC: 63718, MsgBus: 24430 2025-06-24T20:34:17.154145Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616963756338031:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:17.154191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036fd/r3tmp/tmpgf6APU/pdisk_1.dat 2025-06-24T20:34:17.838278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:17.838462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:17.841406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:17.870908Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:17.879585Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616963756338013:2079] 1750797257143036 != 1750797257143039 TServer::EnableGrpc on GrpcPort 63718, node 1 2025-06-24T20:34:18.077403Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:18.077447Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:18.077456Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:18.077595Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:18.259586Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24430 TClient is connected to server localhost:24430 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:18.981953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:18.996969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:19.013175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:34:19.033881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:351) Trying to start YDB, gRPC: 12552, MsgBus: 24408 2025-06-24T20:34:22.001266Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616985781520734:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:22.006245Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036fd/r3tmp/tmpevOIsK/pdisk_1.dat 2025-06-24T20:34:22.247815Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:22.251693Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616981486553390:2079] 1750797261996906 != 1750797261996909 2025-06-24T20:34:22.280630Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:22.280720Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:22.285212Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12552, node 2 2025-06-24T20:34:22.555334Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:22.555357Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:22.555367Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:22.555496Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24408 2025-06-24T20:34:23.009308Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24408 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:23.545468Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:23.552934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:23.585123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) Trying to start YDB, gRPC: 27660, MsgBus: 19289 2025-06-24T20:34:27.792550Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519617005552208777:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:27.863181Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036fd/r3tmp/tmppZVLI0/pdisk_1.dat 2025-06-24T20:34:28.032633Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:28.035372Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519617005552208637:2079] 1750797267739518 != 1750797267739521 2025-06-24T20:34:28.065714Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:28.065806Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:28.068598Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27660, node 3 2025-06-24T20:34:28.179779Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:28.179801Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:28.179811Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:28.179972Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19289 2025-06-24T20:34:28.803756Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19289 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:29.145031Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:29.159468Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:29.179950Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:29.207935Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterResourcePool, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp:159) >> TGroupMapperTest::NonUniformClusterMirror3dc >> TPDiskTest::AllRequestsAreAnsweredOnPDiskRestart [GOOD] >> TPDiskTest::ChunkWriteDifferentOffsetAndSize >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs [GOOD] |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |89.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime >> TGroupMapperTest::NonUniformClusterMirror3dc [GOOD] >> Cdc::ResolvedTimestampsContinueAfterMerge [GOOD] >> Cdc::ResolvedTimestampForDisplacedUpsert >> Cdc::EnqueueRequestProcessSend [GOOD] >> Cdc::InitialScanAndResolvedTimestamps >> TSchemeShardUserAttrsTest::UserConditionsAtCreateDropOps |89.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |89.3%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut >> TGroupMapperTest::DifferentGroupSizeInUnits [GOOD] >> YdbProxy::ReadNonExistentTopic [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::ClickHouseFilterPushdown 2025-06-24 20:34:17,080 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 20:34:17,319 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 161330 47.4M 47.1M 24.0M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/0027dc/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/tes 161707 1.5G 1.5G 1.0G └─ ydb-core-kqp-ut-federated_query-generic_ut --trace-path-append /home/runner/.ya/build/build_root/2btm/0027dc/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unit Test command err: Trying to start YDB, gRPC: 14633, MsgBus: 30238 2025-06-24T20:33:19.011869Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616715883927262:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:19.018392Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027dc/r3tmp/tmpGJtw1Q/pdisk_1.dat 2025-06-24T20:33:19.398664Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:19.412848Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616711588959918:2079] 1750797198987753 != 1750797198987756 TServer::EnableGrpc on GrpcPort 14633, node 1 2025-06-24T20:33:19.469438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:19.469582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:19.475524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:19.539927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:19.539950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:19.539961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:19.540086Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30238 TClient is connected to server localhost:30238 2025-06-24T20:33:20.054123Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:20.214309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:22.383531Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616728768829744:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.383662Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.641612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:22.768990Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616728768829866:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.769067Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.769266Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616728768829871:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:22.773291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:22.786420Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616728768829873:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T20:33:22.871641Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616728768829913:2397] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:23.405709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:23.921414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:24.008535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616715883927262:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:24.008596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:24.476397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:25.089141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:25.653647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:26.126295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:26.164486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:33:29.579113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710719:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:33:29.645614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710720:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:29.647330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710722:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:29.648527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710721:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Call DescribeTable. data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv ... txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:07.120520Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:07.896942Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:08.648611Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:09.406099Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:10.136209Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:10.996699Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:11.056335Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T20:34:15.837745Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715717:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T20:34:15.951360Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:34:15.951392Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded Call DescribeTable. data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/0027dc/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/0027dc/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout",), {}) |89.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformClusterMirror3dc [GOOD] |89.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::UserConditionsAtAlter >> DataShardVolatile::DistributedWriteBrokenLock [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan+UseSink >> TPDiskTest::ChunkWriteDifferentOffsetAndSize [GOOD] >> TPDiskTest::ChunkWriteBadOffset ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minstep/unittest >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs [GOOD] Test command err: 2025-06-24T20:34:18.913992Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:18.914408Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:18.914613Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e32/r3tmp/tmpPYhaWd/pdisk_1.dat 2025-06-24T20:34:19.295229Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:34:19.296824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-24T20:34:19.311160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:34:19.315599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:19.316700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:34:19.318975Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2025-06-24T20:34:19.319035Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 500 Status# 16 SEND to# [1:371:2365] Proxy marker# C1 2025-06-24T20:34:19.346413Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:19.346513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:34:19.351005Z node 1 :HIVE DEBUG: hive_impl.cpp:2275: HIVE#72057594037968897 Merged config: { } 2025-06-24T20:34:19.351306Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797255391347 != 1750797255391351 2025-06-24T20:34:19.405696Z node 1 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [1:295:2335] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-24T20:34:19.405857Z node 1 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Execute 2025-06-24T20:34:19.406056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:19.406119Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T20:34:19.406169Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T20:34:19.406222Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T20:34:19.406253Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T20:34:19.406334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:19.406699Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-24T20:34:19.406747Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-24T20:34:19.406805Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-24T20:34:19.406851Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-24T20:34:19.407060Z node 1 :HIVE DEBUG: hive_impl.cpp:808: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 1 Location DataCenter: "1" Module: "1" Rack: "1" Unit: "1" 2025-06-24T20:34:19.419821Z node 1 :HIVE DEBUG: tx__register_node.cpp:95: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Complete 2025-06-24T20:34:19.419937Z node 1 :HIVE DEBUG: node_info.cpp:373: HIVE#72057594037968897 Node(1) Ping([1:295:2335]) 2025-06-24T20:34:19.420031Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-24T20:34:19.420556Z node 1 :HIVE DEBUG: hive_impl.cpp:737: HIVE#72057594037968897 THive::Handle::TEvSyncTablets 2025-06-24T20:34:19.420642Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:41: HIVE#72057594037968897 THive::TTxSyncTablets([1:295:2335])::Execute 2025-06-24T20:34:19.420700Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T20:34:19.420809Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:130: HIVE#72057594037968897 THive::TTxSyncTablets([1:295:2335])::Complete 2025-06-24T20:34:19.420957Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-24T20:34:19.420993Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-24T20:34:19.421027Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-24T20:34:19.421071Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-24T20:34:19.421213Z node 1 :HIVE DEBUG: hive_impl.cpp:731: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 1: Status: 0 StartTime: 0 ResourceMaximum { Memory: 270443339776 } 2025-06-24T20:34:19.421269Z node 1 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(1)::Execute 2025-06-24T20:34:19.421316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:19.421480Z node 1 :HIVE DEBUG: hive_impl.cpp:2791: HIVE#72057594037968897 AddRegisteredDataCentersNode(1, 1) 2025-06-24T20:34:19.421539Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T20:34:19.421600Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T20:34:19.421819Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-24T20:34:19.421851Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-24T20:34:19.421877Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-24T20:34:19.421905Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-24T20:34:19.432730Z node 1 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(1)::Complete 2025-06-24T20:34:19.432825Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-24T20:34:19.503638Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2025-06-24T20:34:19.503755Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2025-06-24T20:34:19.504327Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 1000 in 0.500000s at 0.950000s 2025-06-24T20:34:19.504760Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 500, txid# 1 marker# C2 2025-06-24T20:34:19.504838Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 500 Status# 17 SEND EvProposeTransactionStatus to# [1:371:2365] Proxy 2025-06-24T20:34:19.505816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:34:19.507498Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-24T20:34:19.507653Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-24T20:34:19.507864Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 acknowledged 2025-06-24T20:34:19.507912Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:6] persistent tx 1 acknowledged 2025-06-24T20:34:19.508835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T20:34:19.508917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-06-24T20:34:19.510056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 1, subscribers: 1 2025-06-24T20:34:19.517443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/table-1, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:34:19.518776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:34:19.518921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:19.524179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/table-1 2025-06-24T20:34:19.534548Z node 1 :HIVE DEBUG: hive_impl.cpp:34: HIVE# ... 6-24T20:34:35.207612Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 281474976715665 has been planned 2025-06-24T20:34:35.207729Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 2025-06-24T20:34:35.207780Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 2025-06-24T20:34:35.208049Z node 2 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 33500 in 0.500000s at 33.450000s 2025-06-24T20:34:35.208515Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 33000, txid# 281474976715665 marker# C2 2025-06-24T20:34:35.208611Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 281474976715665 stepId# 33000 Status# 17 SEND EvProposeTransactionStatus to# [2:371:2365] Proxy 2025-06-24T20:34:35.209161Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 33000, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:34:35.210168Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715665 at step 33000 at tablet 72075186224037889 { Transactions { TxId: 281474976715665 AckTo { RawX1: 0 RawX2: 0 } } Step: 33000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-24T20:34:35.210230Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:34:35.210461Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T20:34:35.210521Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:34:35.210580Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [33000:281474976715665] in PlanQueue unit at 72075186224037889 2025-06-24T20:34:35.210802Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 33000:281474976715665 keys extracted: 0 2025-06-24T20:34:35.210953Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:34:35.211184Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T20:34:35.211267Z node 2 :TX_DATASHARD INFO: drop_table_unit.cpp:72: Trying to DROP TABLE at 72075186224037889 2025-06-24T20:34:35.211727Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:34:35.214045Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 33000} 2025-06-24T20:34:35.214156Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T20:34:35.214438Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-24T20:34:35.214554Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-24T20:34:35.214608Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 removed=1 2025-06-24T20:34:35.214662Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 acknowledged 2025-06-24T20:34:35.215022Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 acknowledged 2025-06-24T20:34:35.215661Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T20:34:35.215758Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [33000 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [2:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:34:35.215873Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715665 state PreOffline TxInFly 0 2025-06-24T20:34:35.215970Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:34:35.216231Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1104: All parts have reached barrier, tx: 281474976715665, done: 0, blocked: 1 2025-06-24T20:34:35.219638Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715665 datashard 72075186224037889 state PreOffline 2025-06-24T20:34:35.219729Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-24T20:34:35.220394Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715665:0 2025-06-24T20:34:35.220503Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976715665, publications: 2, subscribers: 1 2025-06-24T20:34:35.221946Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 1 2025-06-24T20:34:35.222378Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T20:34:35.223783Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.223 INFO ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [core exec] yql_execution.cpp:133: Completed async execution for node #42 2025-06-24T20:34:35.223922Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.223 INFO ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [core exec] yql_execution.cpp:153: State is ExecutionComplete after apply async changes for node #42 2025-06-24T20:34:35.224056Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.224 INFO ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [core exec] yql_execution.cpp:59: Begin, root #43 2025-06-24T20:34:35.224128Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.224 INFO ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [core exec] yql_execution.cpp:72: Collect unused nodes for root #43, status: Ok 2025-06-24T20:34:35.224199Z node 2 :KQP_YQL TRACE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.224 TRACE ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [core exec] yql_execution.cpp:387: {0}, callable #43 2025-06-24T20:34:35.224331Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.224 INFO ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [core exec] yql_execution.cpp:577: Node #43 finished execution 2025-06-24T20:34:35.224432Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.224 INFO ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [core exec] yql_execution.cpp:594: Node #43 created 0 trackable nodes: 2025-06-24T20:34:35.224523Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.224 INFO ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [core exec] yql_execution.cpp:87: Finish, output #43, status: Ok 2025-06-24T20:34:35.224636Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.224 INFO ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #43 2025-06-24T20:34:35.224866Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.224 NOTE ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [common provider] yql_provider_gateway.cpp:21:
: Info: Execution, code: 1060 2025-06-24T20:34:35.224943Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.224 NOTE ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [common provider] yql_provider_gateway.cpp:21:
:1:12: Info: Executing DROP TABLE 2025-06-24T20:34:35.225056Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MjRlOGNkMTctZTljZTQwN2UtOWQyMTkwNS1lZGI0OTgzMw== 2025-06-24 20:34:35.225 NOTE ydb-core-tx-datashard-ut_minstep(pid=176577, tid=0x00007F1546D34D40) [common provider] yql_provider_gateway.cpp:21:
: Info: Success, code: 4 2025-06-24T20:34:35.239959Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T20:34:35.240240Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-24T20:34:35.242655Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T20:34:35.243751Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-24T20:34:35.244257Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186224037889 2025-06-24T20:34:35.244330Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037889 2025-06-24T20:34:35.244455Z node 2 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037889.Leader.1) VolatileState: Running -> Stopped (Node 2) 2025-06-24T20:34:35.244597Z node 2 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037889.Leader.1 gen 1) to node 2 2025-06-24T20:34:35.244754Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 |89.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::DifferentGroupSizeInUnits [GOOD] >> TPDiskTest::ChunkWriteBadOffset [GOOD] >> TYardTest::TestLogWriteCutUnequal [GOOD] >> TYardTest::TestLogMultipleWriteRead |89.3%| [TA] $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardUserAttrsTest::UserConditionsAtCreateDropOps [GOOD] >> TConsoleTests::TestRestartConsoleAndPoolsExtSubdomain |89.3%| [TA] $(B)/ydb/core/tx/datashard/ut_minstep/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardUserAttrsTest::SetAttrs >> TSchemeShardUserAttrsTest::VariousUse >> TSchemeShardUserAttrsTest::UserConditionsAtAlter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::ReadNonExistentTopic [GOOD] Test command err: 2025-06-24T20:34:19.192305Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616971883472503:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:19.197828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002788/r3tmp/tmpPw8ylW/pdisk_1.dat 2025-06-24T20:34:19.687747Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:19.736515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:19.741428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:19.745450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9992 TServer::EnableGrpc on GrpcPort 2723, node 1 2025-06-24T20:34:20.226224Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:20.235884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:20.235917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:20.235926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:20.236048Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9992 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:20.931576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:20.967490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:21.249358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:23.771678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616989063342570:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:23.771778Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616989063342571:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:23.776264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:23.780222Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616989063342581:2448] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:34:23.782825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616989063342559:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:23.783043Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:23.787522Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616989063342578:2335], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T20:34:23.787654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T20:34:23.787762Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616989063342579:2336], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T20:34:23.858737Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616989063342627:2478] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:23.893601Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616989063342645:2486] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:24.252777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616971883472503:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:24.276735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:24.877671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:25.563948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:26.313083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T20:34:27.094755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T20:34:27.851192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002788/r3tmp/tmpasLRBL/pdisk_1.dat 2025-06-24T20:34:31.446523Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:31.639323Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:31.639453Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:31.643041Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:31.670844Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29407 TServer::EnableGrpc on GrpcPort 8553, node 2 2025-06-24T20:34:32.265889Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:32.265911Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:32.265920Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:32.266069Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:32.287368Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29407 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:32.844898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:32.852156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 >> TSchemeShardUserAttrsTest::Boot >> TYardTest::TestLogMultipleWriteRead [GOOD] >> TYardTest::TestLogContinuityPersistence ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TPDiskTest::ChunkWriteBadOffset [GOOD] Test command err: restart# 0 start with noop scheduler# 0 end with noop scheduler# 0 all chunk reads are received all chunk writes are received all log writes are received restart# 0 start with noop scheduler# 1 end with noop scheduler# 0 all chunk reads are received all chunk writes are received all log writes are received restart# 0 start with noop scheduler# 0 end with noop scheduler# 1 all chunk reads are received all chunk writes are received all log writes are received restart# 0 start with noop scheduler# 1 end with noop scheduler# 1 all chunk reads are received all chunk writes are received all log writes are received restart# 1 start with noop scheduler# 0 end with noop scheduler# 0 restart all chunk reads are received all chunk writes are received all log writes are received restart# 1 start with noop scheduler# 1 end with noop scheduler# 0 restart all chunk reads are received all chunk writes are received all log writes are received restart# 1 start with noop scheduler# 0 end with noop scheduler# 1 restart all chunk reads are received all chunk writes are received all log writes are received restart# 1 start with noop scheduler# 1 end with noop scheduler# 1 restart all chunk reads are received all chunk writes are received all log writes are received plainDataChunks# 0 seed# 1750797276179545 offset# 0 size# 29154 29154 ?= 29154 offset# 625856 size# 14753 14753 ?= 14753 offset# 1398016 size# 2137 2137 ?= 2137 offset# 2340864 size# 30408 30408 ?= 30408 offset# 2755392 size# 8109 8109 ?= 8109 offset# 2865120 size# 21126 21126 ?= 21126 offset# 3048000 size# 31034 31034 ?= 31034 offset# 3848608 size# 19780 19780 ?= 19780 offset# 4986528 size# 21916 21916 ?= 21916 offset# 5327904 size# 18129 18129 ?= 18129 offset# 6181344 size# 10611 10611 ?= 10611 offset# 6408928 size# 23906 23906 ?= 23906 offset# 6632448 size# 32100 32100 ?= 32100 offset# 7107936 size# 6742 6742 ?= 6742 offset# 7538720 size# 3267 3267 ?= 3267 offset# 7863840 size# 8146 8146 ?= 8146 offset# 8278368 size# 12680 12680 ?= 12680 offset# 9363456 size# 24760 24760 ?= 24760 offset# 10107168 size# 22474 22474 ?= 22474 offset# 10509504 size# 24926 24926 ?= 24926 offset# 11846560 size# 19913 19913 ?= 19913 offset# 13094208 size# 8937 8937 ?= 8937 offset# 14134592 size# 23569 23569 ?= 23569 offset# 14219936 size# 12213 12213 ?= 12213 offset# 14427200 size# 10800 10800 ?= 10800 offset# 14841728 size# 12551 12551 ?= 12551 offset# 15597632 size# 21759 21759 ?= 21759 offset# 16040608 size# 9080 9080 ?= 9080 offset# 17324832 size# 2071 2071 ?= 2071 offset# 17723104 size# 29706 29706 ?= 29706 offset# 18320512 size# 30590 30590 ?= 30590 offset# 18409920 size# 24785 24785 ?= 24785 offset# 18560288 size# 28978 28978 ?= 28978 offset# 18893536 size# 10295 10295 ?= 10295 offset# 19872960 size# 13452 13452 ?= 13452 offset# 20307808 size# 24207 24207 ?= 24207 offset# 21486368 size# 26863 26863 ?= 26863 offset# 21734272 size# 19681 19681 ?= 19681 offset# 22884384 size# 9476 9476 ?= 9476 offset# 22941280 size# 7334 7334 ?= 7334 offset# 23863808 size# 24812 24812 ?= 24812 offset# 25196800 size# 13296 13296 ?= 13296 offset# 25798272 size# 1793 1793 ?= 1793 offset# 27025600 size# 30708 30708 ?= 30708 offset# 28338272 size# 29480 29480 ?= 29480 offset# 28504896 size# 18063 18063 ?= 18063 offset# 28760928 size# 27292 27292 ?= 27292 offset# 28793440 size# 17113 17113 ?= 17113 offset# 29549344 size# 3400 3400 ?= 3400 offset# 30317440 size# 904 904 ?= 904 offset# 31130240 size# 30020 30020 ?= 30020 offset# 31841440 size# 31863 31863 ?= 31863 offset# 32325056 size# 12239 12239 ?= 12239 offset# 33093152 size# 5995 5995 ?= 5995 offset# 33926272 size# 32474 32474 ?= 32474 offset# 34649664 size# 27854 27854 ?= 27854 offset# 35381184 size# 27332 27332 ?= 27332 offset# 36454080 size# 873 873 ?= 873 offset# 37592000 size# 6326 6326 ?= 6326 offset# 38644576 size# 12168 12168 ?= 12168 offset# 39908480 size# 24112 24112 ?= 24112 offset# 40603424 size# 2875 2875 ?= 2875 offset# 41688512 size# 13532 13532 ?= 13532 offset# 42570400 size# 12992 12992 ?= 12992 offset# 43488864 size# 2228 2228 ?= 2228 offset# 44809664 size# 599 599 ?= 599 offset# 44874688 size# 4481 4481 ?= 4481 offset# 45277024 size# 26018 26018 ?= 26018 offset# 45610272 size# 16304 16304 ?= 16304 offset# 46650656 size# 8906 8906 ?= 8906 offset# 47691040 size# 2616 2616 ?= 2616 offset# 48816768 size# 19080 19080 ?= 19080 offset# 49914048 size# 328 328 ?= 328 offset# 50942240 size# 16221 16221 ?= 16221 offset# 52263040 size# 14286 14286 ?= 14286 offset# 52742592 size# 22209 22209 ?= 22209 offset# 53722016 size# 2751 2751 ?= 2751 offset# 54490112 size# 20706 20706 ?= 20706 offset# 55241952 size# 12131 12131 ?= 12131 offset# 55758080 size# 2719 2719 ?= 2719 offset# 55953152 size# 8293 8293 ?= 8293 offset# 56725312 size# 541 541 ?= 541 offset# 57351168 size# 29313 29313 ?= 29313 offset# 58224928 size# 10666 10666 ?= 10666 offset# 59244992 size# 9350 9350 ?= 9350 offset# 60395104 size# 9249 9249 ?= 9249 offset# 61049408 size# 23145 23145 ?= 23145 offset# 61622432 size# 27913 27913 ?= 27913 offset# 61898784 size# 32321 32321 ?= 32321 offset# 61910976 size# 21373 21373 ?= 21373 offset# 62366144 size# 5807 5807 ?= 5807 offset# 63036704 size# 8355 8355 ?= 8355 offset# 63548768 size# 5179 5179 ?= 5179 offset# 64507872 size# 14302 14302 ?= 14302 offset# 65475104 size# 4750 4750 ?= 4750 offset# 66795904 size# 11901 11901 ?= 11901 offset# 68015104 size# 27806 27806 ?= 27806 offset# 68823840 size# 9937 9937 ?= 9937 offset# 69348096 size# 17529 17529 ?= 17529 offset# 69945504 size# 8314 8314 ?= 8314 offset# 69945504 size# 23810 23810 ?= 23810 offset# 70420992 size# 17206 17206 ?= 17206 offset# 71542656 size# 24336 24336 ?= 24336 offset# 72022208 size# 13498 13498 ?= 13498 offset# 72583040 size# 30841 30841 ?= 30841 offset# 73123552 size# 16369 16369 ?= 16369 offset# 73208896 size# 22462 22462 ?= 22462 offset# 74375264 size# 18155 18155 ?= 18155 offset# 75106784 size# 13043 13043 ?= 13043 offset# 75423776 size# 6274 6274 ?= 6274 offset# 75891136 size# 20691 20691 ?= 20691 offset# 76699872 size# 2192 2192 ?= 2192 offset# 77175360 size# 29641 29641 ?= 29641 offset# 77894688 size# 32279 32279 ?= 32279 offset# 78171040 size# 15591 15591 ?= 15591 offset# 78829408 size# 9779 9779 ?= 9779 offset# 80068928 size# 5824 5824 ?= 5824 offset# 80670400 size# 21913 21913 ?= 21913 offset# 81462880 size# 7297 7297 ?= 7297 offset# 81601056 size# 15547 15547 ?= 15547 offset# 82369152 size# 19842 19842 ?= 19842 offset# 83661504 size# 31020 31020 ?= 31020 offset# 84523072 size# 16667 16667 ?= 16667 offset# 84746592 size# 26828 26828 ?= 26828 offset# 85258656 size# 10639 10639 ?= 10639 offset# 86002368 size# 2454 2454 ?= 2454 offset# 87197184 size# 10134 10134 ?= 10134 offset# 88290400 size# 19008 19008 ?= 19008 offset# 88448896 size# 5823 5823 ?= 5823 offset# 89233248 size# 20845 20845 ?= 20845 offset# 89542112 size# 15868 15868 ?= 15868 offset# 89635584 size# 19364 19364 ?= 19364 offset# 90700352 size# 22986 22986 ?= 22986 offset# 91399360 size# 20807 20807 ?= 20807 offset# 91655392 size# 2416 2416 ?= 2416 offset# 92541344 size# 25411 25411 ?= 25411 offset# 93675200 size# 26834 26834 ?= 26834 offset# 94658688 size# 2318 2318 ?= 2318 offset# 95520256 size# 28978 28978 ?= 28978 offset# 95784416 size# 24180 24180 ?= 24180 offset# 96320864 size# 10783 10783 ?= 10783 offset# 97308416 size# 5781 5781 ?= 5781 offset# 98604832 size# 4868 4868 ?= 4868 offset# 99287584 size# 1990 1990 ?= 1990 offset# 99958144 size# 27850 27850 ?= 27850 offset# 100051616 size# 31317 31317 ?= 31317 offset# 100246688 size# 22771 22771 ?= 22771 offset# 101547168 size# 15456 15456 ?= 15456 offset# 102246176 size# 9163 9163 ?= 9163 offset# 102965504 size# 25951 25951 ?= 25951 offset# 103640128 size# 7088 7088 ?= 7088 offset# 104237536 size# 337 337 ?= 337 offset# 104290368 size# 9917 9917 ?= 9917 offset# 104928416 size# 8124 8124 ?= 8124 offset# 105651808 size# 22556 22556 ?= 22556 offset# 105932224 size# 10686 10686 ?= 10686 offset# 107098592 size# 11075 11075 ?= 11075 offset# 108118656 size# 9830 9830 ?= 9830 offset# 109183424 size# 30477 30477 ?= 30477 offset# 109195616 size# 13652 13652 ?= 13652 offset# 110207552 size# 22789 22789 ?= 22789 offset# 110821216 size# 16351 16351 ?= 16351 offset# 111544608 size# 15949 15949 ?= 15949 offset# 112280192 size# 29489 29489 ?= 29489 offset# 112316768 size# 17628 17628 ?= 17628 offset# 113661952 size# 16448 16448 ?= 16448 offset# 114852704 size# 10312 10312 ?= 10312 offset# 114852704 size# 19381 19381 ?= 19381 offset# 115104672 size# 14766 14766 ?= 14766 offset# 115978432 size# 25784 25784 ?= 25784 offset# 116417344 size# 25193 25193 ?= 25193 offset# 117591840 size# 9471 9471 ?= 9471 offset# 118254272 size# 30948 30948 ?= 30948 offset# 118571264 size# 25316 25316 ?= 25316 offset# 119753888 size# 20264 20264 ?= 20264 offset# 120891808 size# 9364 9364 ?= 9364 offset# 121651776 size# 28508 28508 ?= 28508 offset# 122407680 size# 30356 30356 ?= 30356 offset# 123257056 size# 23189 23189 ?= 23189 offset# 124025152 size# 11161 11161 ?= 11161 offset# 124439680 size# 14091 14091 ?= 14091 offset# 125679200 size# 24845 24845 ?= 24845 offset# 126422912 size# 10786 10786 ?= 10786 offset# 126650496 size# 17115 17115 ?= 17115 offset# 127406400 size# 231 231 ?= 231 offset# 127853440 size# 1899 1899 ?= 1899 offset# 128759712 size# 15004 15004 ?= 15004 offset# 129694432 size# 26082 26082 ?= 26082 offset# 129885440 size# 16163 16163 ?= 16163 offset# 130889248 size# 19425 19425 ?= 19425 offset# 131563872 size# 28302 28302 ?= 28302 offset# 131958080 size# 26792 26792 ?= 26792 offset# 133071616 size# 10510 10510 ?= 10510 offset# 133734048 size# 1576 1576 ?= 1576 offset# 134831328 size# 11424 11424 ?= 11424 offset# 134985760 size# 21578 21578 ?= 21578 plainDataChunks# 1 seed# 1750797276971832 offset# 0 size# 12123 12123 ?= 12123 offset# 614400 size# 25961 25961 ?= 25961 offset# 835584 size# 10462 10462 ?= 10462 offset# 1814528 size# 126 126 ?= 126 offset# 2285568 size# 26593 26593 ?= 26593 offset# 2707456 size# 6841 6841 ?= 6841 offset# 3076096 size# 19115 19115 ?= 19115 offset# 3112960 size# 9270 9270 ?= 9270 offset# 3784704 size# 28470 28470 ?= 28470 offset# 4734976 size# 8071 8071 ?= 8071 offset# 4853760 size# 9439 9439 ?= 9439 offset# 5484544 size# 4080 4080 ?= 4080 offset# 5574656 size# 9352 9352 ?= 9352 offset# 6737920 size# 23620 23620 ?= 23620 offset# 7487488 size# 32734 32734 ?= 32734 offset# 7495680 size# 7911 7911 ?= 7911 offset# 8163328 size# 21052 21052 ?= 21052 offset# 8343552 size# 8015 8015 ?= 8015 offset# 9220096 size# 28300 28300 ?= 28300 offset# 9990144 size# 31767 31767 ?= 31767 offset# 11329536 size# 14345 14345 ?= 14345 offset# 11886592 size# 8846 8846 ?= 8846 offset# 12259328 size# 29584 29584 ?= 29584 offset# 12820480 size# 23788 23788 ?= 23788 offset# 14077952 size# 23977 23977 ?= 23977 offset# 15245312 size# 30274 30274 ?= 30274 offset# 16343040 size# 11049 11049 ?= 11049 offset# 17346560 size# 27818 27818 ?= 27818 offset# 18644992 size# 21157 21157 ?= 21157 offset# 19099648 size# 17894 17894 ?= 17894 offset# 20406272 size# 40 40 ?= 40 offset# 20410368 size# 4944 4944 ?= 4944 offset# 21344256 size# 27460 27460 ?= 27460 offset# 21917696 size# 8618 8618 ?= 8618 offset# 22720512 size# 31412 31412 ?= 31412 offset# 23404544 size# 23768 23768 ?= 23768 offset# 24145920 size# 3367 3367 ?= 3367 offset# 24330240 size# 20421 20421 ?= 20421 offset# 25260032 size# 13291 13291 ?= 13291 offset# 25690112 size# 7558 7558 ?= 7558 offset# 26689536 size# 23292 23292 ?= 23292 offset# 27918336 size# 13305 13305 ?= 13305 offset# 27934720 size# 3063 3063 ?= 3063 offset# 28254208 size# 22033 22033 ?= 22033 offset# 28553216 size# 21339 21339 ?= 21339 offset# 28749824 size# 24090 24090 ?= 24090 offset# 29487104 size# 21706 21706 ?= 21706 offset# 30662656 size# 10931 10931 ?= 10931 offset# 30670848 size# 13667 13667 ?= 13667 offset# 30994432 size# 23161 23161 ?= 23161 offset# 32055296 size# 5336 5336 ?= 5336 offset# 32186368 size# 29328 29328 ?= 29328 offset# 32370688 size# 12295 12295 ?= 12295 offset# 33525760 size# 2994 2994 ?= 2994 offset# 34635776 size# 31539 31539 ?= 31539 offset# 35594240 size# 16157 16157 ?= 16157 offset# 36483072 size# 27873 27873 ?= 27873 offset# 37339136 size# 14569 14569 ?= 14569 offset# 38219776 size# 22427 22427 ?= 22427 offset# 38825984 size# 512 512 ?= 512 offset# 39874560 size# 3483 3483 ?= 3483 offset# 41103360 size# 7796 7796 ?= 7796 offset# 41979904 size# 31045 31045 ?= 31045 offset# 42647552 size# 31422 31422 ?= 31422 offset# 43204608 size# 20960 20960 ?= 20960 offset# 44351488 size# 18102 18102 ?= 18102 offset# 44994560 size# 6677 6677 ?= 6677 offset# 45539328 size# 4744 4744 ?= 4744 offset# 46780416 size# 98 98 ?= 98 offset# 47599616 size# 19622 19622 ?= 19622 offset# 48734208 size# 2682 2682 ?= 2682 offset# 49442816 size# 7727 7727 ?= 7727 offset# 50634752 size# 17270 17270 ?= 17270 offset# 51482624 size# 25740 25740 ?= 25740 offset# 51945472 size# 17600 17600 ?= 17600 offset# 52248576 size# 16960 16960 ?= 16960 offset# 52469760 size# 871 871 ?= 871 offset# 52625408 size# 16954 16954 ?= 16954 offset# 53235712 size# 12845 12845 ?= 12845 offset# 53260288 size# 25074 25074 ?= 25074 offset# 54247424 size# 29362 29362 ?= 29362 offset# 55095296 size# 20093 20093 ?= 20093 offset# 55246848 size# 1336 1336 ?= 1336 offset# 55582720 size# 4840 4840 ?= 4840 offset# 56795136 size# 9323 9323 ?= 9323 offset# 57749504 size# 3010 3010 ?= 3010 offset# 58363904 size# 12690 12690 ?= 12690 offset# 59154432 size# 7259 7259 ?= 7259 offset# 60174336 size# 24102 24102 ?= 24102 offset# 61444096 size# 15058 15058 ?= 15058 offset# 62152704 size# 31893 31893 ?= 31893 offset# 63492096 size# 26409 26409 ?= 26409 offset# 64380928 size# 1729 1729 ?= 1729 offset# 65310720 size# 26036 26036 ?= 26036 offset# 65687552 size# 26012 26012 ?= 26012 offset# 66039808 size# 28184 28184 ?= 28184 offset# 66166784 size# 11455 11455 ?= 11455 offset# 66904064 size# 6995 6995 ?= 6995 offset# 68079616 size# 31284 31284 ?= 31284 offset# 68972544 size# 13735 13735 ?= 13735 offset# 69480448 size# 8259 8259 ?= 8259 offset# 70156288 size# 31212 31212 ?= 31212 offset# 70156288 size# 594 594 ?= 594 offset# 71057408 size# 23658 23658 ?= 23658 offset# 71397376 size# 14088 14088 ?= 14088 offset# 71675904 size# 3522 3522 ?= 3522 offset# 71942144 size# 21228 21228 ?= 21228 offset# 72024064 size# 22051 22051 ?= 22051 offset# 72646656 size# 29237 29237 ?= 29237 offset# 73363456 size# 8314 8314 ?= 8314 offset# 74067968 size# 24362 24362 ?= 24362 offset# 75341824 size# 27265 27265 ?= 27265 offset# 76472320 size# 7870 7870 ?= 7870 offset# 77406208 size# 15142 15142 ?= 15142 offset# 78622720 size# 22830 22830 ?= 22830 offset# 79847424 size# 27324 27324 ?= 27324 offset# 80039936 size# 2888 2888 ?= 2888 offset# 81158144 size# 28022 28022 ?= 28022 offset# 81190912 size# 9609 9609 ?= 9609 offset# 81670144 size# 30269 30269 ?= 30269 offset# 82219008 size# 20246 20246 ?= 20246 offset# 82898944 size# 23002 23002 ?= 23002 offset# 83755008 size# 16192 16192 ?= 16192 offset# 84496384 size# 29667 29667 ?= 29667 offset# 85688320 size# 14193 14193 ?= 14193 offset# 85852160 size# 256 256 ?= 256 offset# 86736896 size# 23387 23387 ?= 23387 offset# 87699456 size# 11529 11529 ?= 11529 offset# 88838144 size# 17243 17243 ?= 17243 offset# 89440256 size# 12545 12545 ?= 12545 offset# 90521600 size# 29796 29796 ?= 29796 offset# 90578944 size# 30303 30303 ?= 30303 offset# 91250688 size# 19741 19741 ?= 19741 offset# 91729920 size# 15384 15384 ?= 15384 offset# 93003776 size# 16878 16878 ?= 16878 offset# 94105600 size# 16019 16019 ?= 16019 offset# 95100928 size# 15342 15342 ?= 15342 offset# 96358400 size# 16279 16279 ?= 16279 offset# 96686080 size# 29584 29584 ?= 29584 offset# 96993280 size# 15113 15113 ?= 15113 offset# 97955840 size# 27859 27859 ?= 27859 offset# 99229696 size# 22351 22351 ?= 22351 offset# 100007936 size# 9506 9506 ?= 9506 offset# 100986880 size# 26448 26448 ?= 26448 offset# 102256640 size# 25900 25900 ?= 25900 offset# 102608896 size# 15230 15230 ?= 15230 offset# 103903232 size# 21427 21427 ?= 21427 offset# 105136128 size# 5458 5458 ?= 5458 offset# 105820160 size# 12067 12067 ?= 12067 offset# 106627072 size# 16626 16626 ?= 16626 offset# 107827200 size# 29631 29631 ?= 29631 offset# 108503040 size# 13720 13720 ?= 13720 offset# 109027328 size# 3626 3626 ?= 3626 offset# 109834240 size# 24710 24710 ?= 24710 offset# 110338048 size# 216 216 ?= 216 offset# 110530560 size# 25056 25056 ?= 25056 offset# 111824896 size# 22467 22467 ?= 22467 offset# 112537600 size# 1932 1932 ?= 1932 offset# 112762880 size# 23241 23241 ?= 23241 offset# 113397760 size# 5425 5425 ?= 5425 offset# 113778688 size# 11343 11343 ?= 11343 offset# 114814976 size# 7582 7582 ?= 7582 offset# 114827264 size# 3231 3231 ?= 3231 offset# 115421184 size# 4582 4582 ?= 4582 offset# 115556352 size# 10143 10143 ?= 10143 offset# 115920896 size# 19253 19253 ?= 19253 offset# 116125696 size# 6787 6787 ?= 6787 offset# 116264960 size# 12231 12231 ?= 12231 offset# 116277248 size# 5490 5490 ?= 5490 offset# 116326400 size# 17001 17001 ?= 17001 offset# 116469760 size# 7050 7050 ?= 7050 offset# 117776384 size# 23557 23557 ?= 23557 offset# 119046144 size# 12159 12159 ?= 12159 offset# 119980032 size# 2228 2228 ?= 2228 offset# 120537088 size# 31390 31390 ?= 31390 offset# 120889344 size# 898 898 ?= 898 offset# 121028608 size# 5250 5250 ?= 5250 offset# 122163200 size# 29830 29830 ?= 29830 offset# 122728448 size# 23583 23583 ?= 23583 offset# 123457536 size# 21860 21860 ?= 21860 offset# 124788736 size# 9272 9272 ?= 9272 offset# 126136320 size# 11802 11802 ?= 11802 offset# 126619648 size# 10958 10958 ?= 10958 offset# 127123456 size# 1031 1031 ?= 1031 offset# 127827968 size# 19790 19790 ?= 19790 offset# 128868352 size# 27164 27164 ?= 27164 offset# 129495040 size# 18750 18750 ?= 18750 offset# 129593344 size# 8709 8709 ?= 8709 offset# 130420736 size# 17734 17734 ?= 17734 offset# 130576384 size# 1468 1468 ?= 1468 offset# 130711552 size# 6312 6312 ?= 6312 offset# 131887104 size# 418 418 ?= 418 offset# 132595712 size# 18233 18233 ?= 18233 offset# 133107712 size# 16258 16258 ?= 16258 offset# 133423104 size# 5858 5858 ?= 5858 offset# 134676480 size# 8615 8615 ?= 8615 offset# 135069696 size# 4412 4412 ?= 4412 offset# 135159808 size# 27889 27889 ?= 27889 seed# 1750797277624749 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::UserConditionsAtCreateDropOps [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:34:37.124810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:34:37.124913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:34:37.124956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:34:37.125014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:34:37.125066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:34:37.125098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:34:37.125156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:34:37.125234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:34:37.126071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:34:37.126466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:34:37.212544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:37.212609Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:37.229176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:34:37.233969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:34:37.234209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:34:37.247013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:34:37.247328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:34:37.248107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:37.248520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:34:37.258949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:37.259173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:34:37.260527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:37.260603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:37.260756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:34:37.260829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:34:37.260879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:34:37.261043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:34:37.268628Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:34:37.452178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:34:37.452432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:37.452665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:34:37.452729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:34:37.452980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:34:37.453124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:37.457333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:37.457568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:34:37.457817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:37.457872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:34:37.457912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:34:37.457947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:34:37.460701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:37.460767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:34:37.460836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:34:37.463395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:37.463453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:37.463496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:37.463544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:34:37.469049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:34:37.470961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:34:37.471210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:34:37.472332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:37.472487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:37.472557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:37.472934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:34:37.473000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:37.473204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:34:37.473339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:34:37.479822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:37.479881Z node 1 :FLAT_TX_SCHEMESHARD ... is published: false 2025-06-24T20:34:37.653048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T20:34:37.653072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 105:0 2025-06-24T20:34:37.653095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 105:0 2025-06-24T20:34:37.653138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T20:34:37.653169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 105, publications: 2, subscribers: 0 2025-06-24T20:34:37.653194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-24T20:34:37.653215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-24T20:34:37.654289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:34:37.654604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:34:37.656284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:37.656334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:34:37.656500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T20:34:37.656633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:37.656671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 105, path id: 1 2025-06-24T20:34:37.656694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 105, path id: 4 FAKE_COORDINATOR: Erasing txId 105 2025-06-24T20:34:37.657177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:34:37.657248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:34:37.657280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 105 2025-06-24T20:34:37.657312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-24T20:34:37.657345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-24T20:34:37.657832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:34:37.657913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:34:37.657957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-24T20:34:37.657989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-24T20:34:37.658029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T20:34:37.658120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-24T20:34:37.658396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:34:37.658440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T20:34:37.658521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T20:34:37.660570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:34:37.661669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:34:37.661762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-24T20:34:37.662088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-24T20:34:37.662163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-24T20:34:37.662734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-24T20:34:37.662837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T20:34:37.662871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:402:2391] TestWaitNotification: OK eventTxId 105 2025-06-24T20:34:37.663525Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirC" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:34:37.663761Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirC" took 207us result status StatusPathDoesNotExist 2025-06-24T20:34:37.663941Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DirC\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/DirC" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T20:34:37.664369Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:34:37.664497Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 129us result status StatusSuccess 2025-06-24T20:34:37.664803Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::UserConditionsAtAlter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:34:37.876821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:34:37.876916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:34:37.876963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:34:37.877006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:34:37.877051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:34:37.877083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:34:37.877136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:34:37.877207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:34:37.877988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:34:37.878377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:34:37.973865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:37.973911Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:37.991958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:34:37.992444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:34:37.992635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:34:38.002186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:34:38.002397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:34:38.003134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:38.003478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:34:38.006547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:38.006750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:34:38.007984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:38.008062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:38.008292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:34:38.008345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:34:38.008397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:34:38.008503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:34:38.017039Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:34:38.165726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:34:38.166035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:38.166270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:34:38.166318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:34:38.166575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:34:38.166701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:38.169693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:38.169943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:34:38.170201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:38.170269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:34:38.170318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:34:38.170358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:34:38.172971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:38.173041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:34:38.173115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:34:38.175519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:38.175580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:38.175625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:38.175679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:34:38.179617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:34:38.182231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:34:38.182510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:34:38.183795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:38.183970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:38.184028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:38.184395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:34:38.184476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:38.184695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:34:38.184789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:34:38.187633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:38.187683Z node 1 :FLAT_TX_SCHEMESHARD ... { PathId: 2 PathVersion: 4 } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:34:38.284730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_user_attrs.cpp:26: TAlterUserAttrs Propose, path: /MyRoot/DirA, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:34:38.284844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-24T20:34:38.284908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 103:0 type: TxAlterUserAttributes target path: [OwnerId: 72057594046678944, LocalPathId: 2] source path: 2025-06-24T20:34:38.285058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:34:38.285133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 103:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-24T20:34:38.287522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:38.287773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: ALTER USER ATTRIBUTES, path: /MyRoot/DirA 2025-06-24T20:34:38.288004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:34:38.288043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_user_attrs.cpp:97: TAlterUserAttrs ProgressState, opId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:34:38.288117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-06-24T20:34:38.288245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:34:38.290234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-24T20:34:38.290403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 2025-06-24T20:34:38.290753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:38.290873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:38.290940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_user_attrs.cpp:114: TAlterUserAttrs HandleReply TEvOperationPlan, opId: 103:0, stepId:5000004, at schemeshard: 72057594046678944 2025-06-24T20:34:38.291170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:34:38.291209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:34:38.291253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:34:38.291300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:34:38.291370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:34:38.291426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-24T20:34:38.291477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T20:34:38.291535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:34:38.291577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:34:38.291611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:34:38.291661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:34:38.291714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 103, publications: 1, subscribers: 0 2025-06-24T20:34:38.291752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-24T20:34:38.293986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:38.294049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:34:38.294271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:38.294322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 103, path id: 2 FAKE_COORDINATOR: Erasing txId 103 2025-06-24T20:34:38.294902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:34:38.295009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:34:38.295045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:34:38.295087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T20:34:38.295166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T20:34:38.295273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-24T20:34:38.297184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:34:38.297478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:34:38.297541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:34:38.298012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:34:38.298111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:34:38.298188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:351:2340] TestWaitNotification: OK eventTxId 103 2025-06-24T20:34:38.298735Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:34:38.298922Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 210us result status StatusSuccess 2025-06-24T20:34:38.299330Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrA2" Value: "ValA2" } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> TConsoleTests::TestGetUnknownTenantStatus >> TSchemeShardUserAttrsTest::SetAttrs [GOOD] >> TYardTest::TestLogContinuityPersistence [GOOD] >> TYardTest::TestLogContinuityPersistenceLarge >> TSchemeShardUserAttrsTest::Boot [GOOD] |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> TSchemeShardUserAttrsTest::VariousUse [GOOD] >> TGRpcConsoleTest::SimpleConfigTest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::SetAttrs [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:34:39.087999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:34:39.088117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:34:39.088156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:34:39.088196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:34:39.088241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:34:39.088287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:34:39.088350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:34:39.088417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:34:39.089217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:34:39.089579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:34:39.175006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:39.175067Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:39.193286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:34:39.197668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:34:39.197871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:34:39.208170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:34:39.208395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:34:39.209070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:39.209421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:34:39.212574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:39.212761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:34:39.214052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:39.214138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:39.214284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:34:39.214333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:34:39.214374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:34:39.214551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.222322Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:34:39.414067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:34:39.414335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.414550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:34:39.414597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:34:39.414852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:34:39.415004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:39.424148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:39.424416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:34:39.424664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.424719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:34:39.424758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:34:39.424793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:34:39.431990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.432074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:34:39.432132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:34:39.440222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.440293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.440341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:39.440396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:34:39.444363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:34:39.452121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:34:39.452460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:34:39.453654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:39.453819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:39.453872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:39.454203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:34:39.454292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:39.454526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:34:39.454616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:34:39.464376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:39.464443Z node 1 :FLAT_TX_SCHEMESHARD ... .cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:34:39.578173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 103:0 type: TxAlterUserAttributes target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:34:39.578305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:34:39.578365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 103:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-24T20:34:39.581842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:39.582032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, subject: , status: StatusAccepted, operation: ALTER USER ATTRIBUTES, path: MyRoot 2025-06-24T20:34:39.582278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.582331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_user_attrs.cpp:97: TAlterUserAttrs ProgressState, opId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.582433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-06-24T20:34:39.582582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:34:39.584805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-24T20:34:39.584984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 2025-06-24T20:34:39.585382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:39.585518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:39.585566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_user_attrs.cpp:114: TAlterUserAttrs HandleReply TEvOperationPlan, opId: 103:0, stepId:5000004, at schemeshard: 72057594046678944 2025-06-24T20:34:39.585786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:34:39.585829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:34:39.585895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:34:39.585929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:34:39.586001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:34:39.586077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-24T20:34:39.586141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:34:39.586195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:34:39.586232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:34:39.586264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:34:39.586318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T20:34:39.586354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 103, publications: 1, subscribers: 0 2025-06-24T20:34:39.586390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 6 2025-06-24T20:34:39.588975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:39.589032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:34:39.589295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:39.589353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 103, path id: 1 FAKE_COORDINATOR: Erasing txId 103 2025-06-24T20:34:39.589975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:34:39.590099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:34:39.590142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:34:39.590210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2025-06-24T20:34:39.590271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:34:39.590363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-24T20:34:39.592969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T20:34:39.593252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T20:34:39.593299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T20:34:39.593800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T20:34:39.593920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:34:39.593958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:340:2329] TestWaitNotification: OK eventTxId 103 2025-06-24T20:34:39.594576Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:34:39.594843Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 264us result status StatusSuccess 2025-06-24T20:34:39.595457Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 2 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrRoot" Value: "ValRoot" } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::Plan2SvgBad [FAIL] Test command err: 2025-06-24T20:33:01.045102Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616635953225575:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:01.045160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:01.566735Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616635953225464:2079] 1750797181040209 != 1750797181040212 2025-06-24T20:33:01.567723Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:01.578722Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:01.578866Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14875, node 1 2025-06-24T20:33:01.586220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:01.643652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:01.643678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:01.643685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:01.643846Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23779 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:33:02.128482Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:02.231647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:02.306100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:02.313319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:33:02.322427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T20:33:05.183803Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616653133095323:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:05.184002Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:05.184587Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616653133095343:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:05.189867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:05.233423Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616653133095345:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T20:33:05.300785Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616653133095396:2358] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:06.052026Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616635953225575:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:06.052122Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:08.124662Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616665893595531:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:08.124746Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:08.351738Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:08.356480Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:08.356596Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:08.359323Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22376, node 2 2025-06-24T20:33:08.483390Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:08.483413Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:08.483421Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:08.483551Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26648 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:08.872628Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:08.879964Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:09.010723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:09.020691Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:33:09.135699Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:33:13.138203Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519616665893595531:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:13.138432Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 202 ... :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:36.297959Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:36.298016Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:36.395781Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:36.395842Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:36.496761Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:36.496812Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:36.615637Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:36.615694Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:36.779240Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:36.779306Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:36.885706Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:36.885780Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:37.000250Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:37.000333Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:37.139567Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:37.139622Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:37.246851Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:37.246909Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:37.367854Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:37.367916Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:37.482218Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:37.482272Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:37.612463Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:37.612528Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:37.660022Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:37.662275Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:37.664471Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:33:39.553035Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:33:39.553100Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:33:44.017512Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:33:44.017610Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded assertion failed at ydb/core/viewer/viewer_ut.cpp:1914, virtual void NTestSuiteViewer::TTestCaseQueryExecuteScript::Execute_(NUnitTest::TTestContext &): (json["metadata"].GetMap().at("exec_stats").GetMap().contains("process_cpu_time_us")) {"metadata":{"result_sets_meta":[{"finished":true,"columns":[{"name":"Key","type":{"optional_type":{"item":{"type_id":"UINT64"}}}},{"name":"Value","type":{"optional_type":{"item":{"type_id":"STRING"}}}}],"number_rows":"15"}],"execution_id":"60bb3d8-d65e588-55f8ea2-9c063a28","exec_stats":{"query_plan":"{}"},"script_content":{"text":"SELECT * FROM `/Root/Test`;"},"exec_mode":"EXEC_MODE_EXECUTE","exec_status":"EXEC_STATUS_RUNNING","@type":"type.googleapis.com/Ydb.Query.ExecuteScriptMetadata"},"status":"SUCCESS","id":"ydb://scriptexec/9?id=60bb3d8-d65e588-55f8ea2-9c063a28"} TBackTrace::Capture()+28 (0x19953A5C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x19E0F510) NTestSuiteViewer::TTestCaseQueryExecuteScript::Execute_(NUnitTest::TTestContext&)+11601 (0x194F3241) std::__y1::__function::__func, void ()>::operator()()+280 (0x19508FC8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19E466F6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19E16099) NTestSuiteViewer::TCurrentTest::Execute()+1204 (0x19507E74) NUnitTest::TTestFactory::Execute()+2438 (0x19E17966) NUnitTest::RunMain(int, char**)+5213 (0x19E40C6D) ??+0 (0x7FB65EE98D90) __libc_start_main+128 (0x7FB65EE98E40) _start+41 (0x16D34029) 2025-06-24T20:33:47.244100Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519616834998636763:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:47.244191Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:47.483753Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519616834998636744:2079] 1750797227243447 != 1750797227243450 2025-06-24T20:33:47.497729Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:47.508049Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:47.508178Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:47.511570Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27352, node 6 2025-06-24T20:33:47.559951Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:47.559997Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:47.560008Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:47.560188Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21995 2025-06-24T20:33:48.271543Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:33:52.053255Z node 6 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (8C3E2D8D): Could not find correct token validator 2025-06-24T20:33:52.245144Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519616834998636763:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:52.245274Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:55.769743Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519616866626122903:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:55.769821Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:56.039208Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:56.039349Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:56.039939Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:56.041735Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519616866626122879:2079] 1750797235767253 != 1750797235767256 2025-06-24T20:33:56.043524Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14267, node 7 2025-06-24T20:33:56.132092Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:56.132124Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:56.132139Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:56.132350Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19564 2025-06-24T20:33:56.795748Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:00.775314Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519616866626122903:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:00.775437Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; (TSystemError) (Error 11: Resource temporarily unavailable) util/network/socket.cpp:910: can not read from socket input stream >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewOnServerless [GOOD] >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewFilters |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::Boot [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:34:39.577027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:34:39.577119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:34:39.577156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:34:39.577193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:34:39.577240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:34:39.577293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:34:39.577359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:34:39.577435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:34:39.578279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:34:39.578634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:34:39.670459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:39.670517Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:39.688904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:34:39.693488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:34:39.693675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:34:39.702844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:34:39.703075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:34:39.703840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:39.704211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:34:39.707569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:39.707766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:34:39.709111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:39.709178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:39.709313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:34:39.709375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:34:39.709421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:34:39.709601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.717444Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:34:39.876632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:34:39.876902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.877145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:34:39.877197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:34:39.877498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:34:39.877695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:39.880716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:39.881016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:34:39.881320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.881390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:34:39.881436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:34:39.881480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:34:39.884385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.884455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:34:39.884524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:34:39.886949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.887018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.887067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:39.887155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:34:39.890506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:34:39.892436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:34:39.892667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:34:39.893689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:39.893806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:39.893843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:39.894092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:34:39.894139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:39.894398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:34:39.894478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:34:39.896878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:39.896929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:34:39.897138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:39.897182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T20:34:39.897698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.897758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T20:34:39.897859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:34:39.897917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:34:39.897971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T20:34:39.898006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:34:39.898045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T20:34:39.898090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T20:34:39.898124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T20:34:39.898187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T20:34:39.898260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:34:39.898308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T20:34:39.898359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T20:34:39.900509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T20:34:39.900646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T20:34:39.900696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T20:34:39.900735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T20:34:39.900807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:34:39.900973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T20:34:39.904726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T20:34:39.905207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 >> TLocksFatTest::PointSetRemove [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::VariousUse [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:34:39.273366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:34:39.273469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:34:39.273511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:34:39.273546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:34:39.273606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:34:39.273646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:34:39.273700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:34:39.273783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:34:39.274562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:34:39.274930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:34:39.363353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:39.363418Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:39.392882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:34:39.397392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:34:39.397607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:34:39.410952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:34:39.411204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:34:39.411918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:39.412367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:34:39.415657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:39.415845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:34:39.417185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:39.417251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:39.417386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:34:39.417435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:34:39.417490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:34:39.417660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.425172Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:34:39.586629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:34:39.586869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.587071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:34:39.587321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:34:39.587623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:34:39.587768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:39.592277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:39.592542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:34:39.592769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.592825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:34:39.592864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:34:39.592900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:34:39.596207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.596276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:34:39.596362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:34:39.600784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.600854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.600915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:39.600968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:34:39.604738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:34:39.612093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:34:39.612387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:34:39.613488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:34:39.613642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:39.613690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:39.613998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:34:39.614061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:34:39.614270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:34:39.614377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:34:39.617248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:39.617333Z node 1 :FLAT_TX_SCHEMESHARD ... shToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T20:34:39.976077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:34:39.976123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:34:39.976314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:34:39.976389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T20:34:39.976570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:34:39.976609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 112, path id: 1 2025-06-24T20:34:39.976650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 112, path id: 3 2025-06-24T20:34:39.976684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 112, path id: 4 FAKE_COORDINATOR: Erasing txId 112 2025-06-24T20:34:39.977556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T20:34:39.977637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T20:34:39.977686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 112 2025-06-24T20:34:39.977734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-24T20:34:39.977775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T20:34:39.978424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T20:34:39.978506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T20:34:39.978548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 112 2025-06-24T20:34:39.978578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-24T20:34:39.978605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T20:34:39.979262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T20:34:39.979350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T20:34:39.979377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 112 2025-06-24T20:34:39.979412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-24T20:34:39.979445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T20:34:39.979526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 112, subscribers: 0 2025-06-24T20:34:39.979986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T20:34:39.980038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T20:34:39.980120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:34:39.982806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T20:34:39.983803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T20:34:39.984149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T20:34:39.985143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2025-06-24T20:34:39.985625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2025-06-24T20:34:39.985689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2025-06-24T20:34:39.986378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2025-06-24T20:34:39.986496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2025-06-24T20:34:39.986535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:496:2485] TestWaitNotification: OK eventTxId 112 2025-06-24T20:34:39.987449Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:34:39.987685Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirB" took 283us result status StatusSuccess 2025-06-24T20:34:39.988113Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirB" PathDescription { Self { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrB1" Value: "ValB1" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 113 2025-06-24T20:34:39.991660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "DirB" } ApplyIf { PathId: 2 PathVersion: 8 } ApplyIf { PathId: 3 PathVersion: 7 } ApplyIf { PathId: 4 PathVersion: 3 } } TxId: 113 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:34:39.991854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:29: TRmDir Propose, path: /MyRoot/DirB, pathId: 0, opId: 113:0, at schemeshard: 72057594046678944 2025-06-24T20:34:39.991971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 113:1, propose status:StatusPreconditionFailed, reason: fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T20:34:39.995087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 113, response: Status: StatusPreconditionFailed Reason: "fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4]" TxId: 113 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:34:39.995344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 113, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4], operation: DROP DIRECTORY, path: /MyRoot/DirB TestModificationResult got TxId: 113, wait until txId: 113 >> TFetchRequestTests::BadTopicName [GOOD] >> TFetchRequestTests::CheckAccess |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> TGRpcConsoleTest::SimpleConfigTest [GOOD] |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> KqpScanSpilling::SelfJoinQueryService |89.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |89.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |89.4%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpWorkloadServiceTables::TestLeaseExpiration [GOOD] >> KqpWorkloadServiceTables::TestLeaseUpdates >> Viewer::JsonStorageListingV1 [GOOD] >> Viewer::JsonStorageListingV1GroupIdFilter >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table |89.4%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_minstep/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table |89.4%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut >> DataShardVolatile::DistributedWriteThenReadIterator [GOOD] >> DataShardVolatile::DistributedWriteThenReadIteratorStream >> TxUsage::WriteToTopic_Invalid_Session_Table >> ResourcePoolClassifiersDdl::TestDropResourcePool [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksFatTest::PointSetRemove [GOOD] Test command err: 2025-06-24T20:34:14.162216Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616949287882350:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:14.163230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab2/r3tmp/tmpBmZzAQ/pdisk_1.dat 2025-06-24T20:34:14.688761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:14.688863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:14.692573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:14.715992Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616949287882330:2079] 1750797254160552 != 1750797254160555 2025-06-24T20:34:14.728149Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TClient is connected to server localhost:24732 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:34:15.098772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:34:15.133847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T20:34:15.140211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:15.214746Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:15.316921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:15.402238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:19.163360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616949287882350:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:19.163492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:26.547480Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617002478258770:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:26.547539Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab2/r3tmp/tmpHm7hHP/pdisk_1.dat 2025-06-24T20:34:27.016550Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:27.016640Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:27.027612Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:27.031466Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519617002478258747:2079] 1750797266536661 != 1750797266536664 2025-06-24T20:34:27.067888Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:27.603658Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27191 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:34:27.721456Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:27.728707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:27.755952Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:27.882253Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:28.002128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:31.550594Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519617002478258770:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:31.550674Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:34.235527Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519617035544642585:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:34.235581Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab2/r3tmp/tmp55inYQ/pdisk_1.dat 2025-06-24T20:34:34.398430Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:34.403444Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519617035544642567:2079] 1750797274234845 != 1750797274234848 2025-06-24T20:34:34.415119Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:34.415671Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:34.417330Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8495 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T20:34:34.732630Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:34.755379Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T20:34:34.765065Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:34.839468Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:34.938700Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:35.251355Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:39.240183Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519617035544642585:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:39.240244Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; >> TStorageBalanceTest::TestScenario2 [GOOD] >> TStorageBalanceTest::TestScenario3 >> ViewerTopicDataTests::TopicDataTest [GOOD] >> TxUsage::WriteToTopic_Demo_1_Table >> TxUsage::WriteToTopic_Demo_2_Table >> TxUsage::TwoSessionOneConsumer_Table >> BasicUsage::ReadWithoutConsumerWithRestarts [GOOD] >> BasicUsage::ReadWithRestarts >> TxUsage::WriteToTopic_Demo_3_Table >> KqpScanSpilling::SpillingInRuntimeNodes+EnabledSpilling >> TYardTest::TestLogContinuityPersistenceLarge [GOOD] >> TYardTest::TestLogWriteLsnConsistency >> TConsoleTests::TestGetUnknownTenantStatus [GOOD] >> TConsoleTests::TestGetUnknownTenantStatusExtSubdomain >> TConsoleTests::TestRestartConsoleAndPoolsExtSubdomain [GOOD] >> TConsoleTests::TestSetDefaultStorageUnitsQuota >> TYardTest::TestLogWriteLsnConsistency [GOOD] >> TYardTest::TestLotsOfTinyAsyncLogLatency >> TxUsage::WriteToTopic_Demo_4_Table >> KqpScanLogs::WideCombine-EnabledLogs |89.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |89.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |89.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow >> KqpScanSpilling::HandleErrorsCorrectly ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolClassifiersDdl::TestDropResourcePool [GOOD] Test command err: 2025-06-24T20:33:11.870828Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616681084367208:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:11.878578Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00470b/r3tmp/tmpcjs66S/pdisk_1.dat 2025-06-24T20:33:12.436811Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616681084367094:2079] 1750797191806201 != 1750797191806204 2025-06-24T20:33:12.445930Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:12.463694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:12.463850Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:12.467583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15820, node 1 2025-06-24T20:33:12.563373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:12.563401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:12.563427Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:12.563584Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1362 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:33:12.871320Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:13.052782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:13.072730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:33:15.499648Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:33:15.530858Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-24T20:33:15.530889Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:15.530919Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:33:15.534320Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZTdmZTU5YjctNmRmMmY3MGMtY2UzZDk3MGItZWYyOTg0MTE=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTdmZTU5YjctNmRmMmY3MGMtY2UzZDk3MGItZWYyOTg0MTE= 2025-06-24T20:33:15.534981Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616698264236893:2287], Start check tables existence, number paths: 2 2025-06-24T20:33:15.535129Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZTdmZTU5YjctNmRmMmY3MGMtY2UzZDk3MGItZWYyOTg0MTE=, ActorId: [1:7519616698264236895:2289], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:15.540758Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616698264236893:2287], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:33:15.540971Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616698264236893:2287], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:33:15.541010Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616698264236893:2287], Successfully finished 2025-06-24T20:33:15.541082Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:33:15.556344Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616698264236921:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:15.562104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:15.566404Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616698264236921:2300], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-24T20:33:15.566609Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616698264236921:2300], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:33:15.587081Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616698264236921:2300], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:15.653083Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616698264236921:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:15.658460Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616698264236972:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:15.658603Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616698264236921:2300], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:33:15.665474Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NjIyNDA1NjYtZGI4MGY3ODctNDA1MGRhNmEtMmI5YzUwZjg=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NjIyNDA1NjYtZGI4MGY3ODctNDA1MGRhNmEtMmI5YzUwZjg= 2025-06-24T20:33:15.665826Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-24T20:33:15.665838Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-24T20:33:15.665879Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NjIyNDA1NjYtZGI4MGY3ODctNDA1MGRhNmEtMmI5YzUwZjg=, ActorId: [1:7519616698264236979:2291], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:15.666051Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=NjIyNDA1NjYtZGI4MGY3ODctNDA1MGRhNmEtMmI5YzUwZjg=, ActorId: [1:7519616698264236979:2291], ActorState: ReadyState, TraceId: 01jyhta6chbj31vqz0xrp40fkh, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7519616698264236978:2337] database: Root databaseId: /Root pool id: sample_pool_id 2025-06-24T20:33:15.666084Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7519616698264236979:2291], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=NjIyNDA1NjYtZGI4MGY3ODctNDA1MGRhNmEtMmI5YzUwZjg= 2025-06-24T20:33:15.666126Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616698264236981:2292], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:33:15.666188Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519616698264236982:2293], Database: /Root, Start database fetching 2025-06-24T20:33:15.666624Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519616698264236982:2293], Database: /Root, Database info successfully fetched, serverless: 0 2025-06-24T20:33:15.666686Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-06-24T20:33:15.666758Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7519616698264236991:2294], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=NjIyNDA1NjYtZGI4MGY3ODctNDA1MGRhNmEtMmI5YzUwZjg=, Start pool fetching 2025-06-24T20:33:15.666790Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616698264236992:2295], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:33:15.667586Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616698264236992:2295], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-24T20:33:15.667673Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616698264236981:2292], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetche ... tx.DeferredEffects.size(): 0 2025-06-24T20:34:42.040291Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ExecuteState, TraceId: 01jyhtctg7bdye0eb06k2vr2ps, Sending to Executer TraceId: 0 8 2025-06-24T20:34:42.040374Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ExecuteState, TraceId: 01jyhtctg7bdye0eb06k2vr2ps, Created new KQP executer: [8:7519617068302239791:2629] isRollback: 0 2025-06-24T20:34:42.045859Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ExecuteState, TraceId: 01jyhtctg7bdye0eb06k2vr2ps, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-24T20:34:42.046012Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ExecuteState, TraceId: 01jyhtctg7bdye0eb06k2vr2ps, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 9.215 QueriesCount: 2 2025-06-24T20:34:42.046139Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ExecuteState, TraceId: 01jyhtctg7bdye0eb06k2vr2ps, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T20:34:42.046504Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ExecuteState, TraceId: 01jyhtctg7bdye0eb06k2vr2ps, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:42.046545Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ExecuteState, TraceId: 01jyhtctg7bdye0eb06k2vr2ps, EndCleanup, isFinal: 0 2025-06-24T20:34:42.046608Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ExecuteState, TraceId: 01jyhtctg7bdye0eb06k2vr2ps, Sent query response back to proxy, proxyRequestId: 55, proxyId: [8:7519616990992826598:2162] 2025-06-24T20:34:42.060172Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:34:42.060268Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ReadyState, Sending to Executer TraceId: 0 8 2025-06-24T20:34:42.060342Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ReadyState, Created new KQP executer: [8:7519617068302239801:2629] isRollback: 1 2025-06-24T20:34:42.060390Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 1 TransactionsToBeAborted.size(): 1 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:42.073622Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: CleanupState, EndCleanup, isFinal: 1 2025-06-24T20:34:42.073667Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:34:42.073808Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=ZTM4N2E3ZDktN2ZjZmQyZDMtM2VlNDI4My04ZDRiNTdjYw==, ActorId: [8:7519617064007272442:2629], ActorState: unknown state, Session actor destroyed 2025-06-24T20:34:42.099503Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: ExecuteState, TraceId: 01jyhtctpp0z9az6s0536e8t91, ExecutePhyTx, tx: 0x000050C000574858 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-24T20:34:42.099571Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: ExecuteState, TraceId: 01jyhtctpp0z9az6s0536e8t91, Sending to Executer TraceId: 0 8 2025-06-24T20:34:42.099686Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: ExecuteState, TraceId: 01jyhtctpp0z9az6s0536e8t91, Created new KQP executer: [8:7519617068302239805:2639] isRollback: 0 2025-06-24T20:34:42.101572Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1852: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: ExecuteState, TraceId: 01jyhtctpp0z9az6s0536e8t91, Forwarded TEvStreamData to [8:7519617064007272484:2914] 2025-06-24T20:34:42.102425Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: ExecuteState, TraceId: 01jyhtctpp0z9az6s0536e8t91, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-24T20:34:42.102577Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: ExecuteState, TraceId: 01jyhtctpp0z9az6s0536e8t91, txInfo Status: Committed Kind: Pure TotalDuration: 3.167 ServerDuration: 3.101 QueriesCount: 2 2025-06-24T20:34:42.102653Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: ExecuteState, TraceId: 01jyhtctpp0z9az6s0536e8t91, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T20:34:42.102859Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: ExecuteState, TraceId: 01jyhtctpp0z9az6s0536e8t91, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:42.102897Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: ExecuteState, TraceId: 01jyhtctpp0z9az6s0536e8t91, EndCleanup, isFinal: 1 2025-06-24T20:34:42.102951Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: ExecuteState, TraceId: 01jyhtctpp0z9az6s0536e8t91, Sent query response back to proxy, proxyRequestId: 58, proxyId: [8:7519616990992826598:2162] 2025-06-24T20:34:42.102982Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: unknown state, TraceId: 01jyhtctpp0z9az6s0536e8t91, Cleanup temp tables: 0 2025-06-24T20:34:42.103296Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=YWY5MjNlZWMtZDBlZDQxYjItYmViMjQ3OS02MjE0YWQ1Nw==, ActorId: [8:7519617068302239781:2639], ActorState: unknown state, TraceId: 01jyhtctpp0z9az6s0536e8t91, Session actor destroyed 2025-06-24T20:34:42.119061Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=OTUxNzgyYjUtOTYyMWViYzktNzExY2ZiZTktMTBiZTc1YjY=, ActorId: [8:7519617021057598194:2294], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:34:42.119122Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=OTUxNzgyYjUtOTYyMWViYzktNzExY2ZiZTktMTBiZTc1YjY=, ActorId: [8:7519617021057598194:2294], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:34:42.119179Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=OTUxNzgyYjUtOTYyMWViYzktNzExY2ZiZTktMTBiZTc1YjY=, ActorId: [8:7519617021057598194:2294], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:34:42.119209Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=OTUxNzgyYjUtOTYyMWViYzktNzExY2ZiZTktMTBiZTc1YjY=, ActorId: [8:7519617021057598194:2294], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:34:42.119288Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=OTUxNzgyYjUtOTYyMWViYzktNzExY2ZiZTktMTBiZTc1YjY=, ActorId: [8:7519617021057598194:2294], ActorState: unknown state, Session actor destroyed 2025-06-24T20:34:42.382054Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=8&id=OGQ4NjMxMWEtMzJjZTgwNTEtNjAxZDEwYTUtNjhmODY1OTM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id OGQ4NjMxMWEtMzJjZTgwNTEtNjAxZDEwYTUtNjhmODY1OTM= 2025-06-24T20:34:42.382602Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=8&id=OGQ4NjMxMWEtMzJjZTgwNTEtNjAxZDEwYTUtNjhmODY1OTM=, ActorId: [8:7519617068302239815:2648], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:34:42.383297Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=8&id=OGQ4NjMxMWEtMzJjZTgwNTEtNjAxZDEwYTUtNjhmODY1OTM=, ActorId: [8:7519617068302239815:2648], ActorState: ReadyState, TraceId: 01jyhtcv2f4tyjdfsapd1xsmef, received request, proxyRequestId: 60 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: SELECT * FROM `//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers`; rpcActor: [8:7519617068302239816:2649] database: /Root databaseId: /Root pool id: default 2025-06-24T20:34:42.383327Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=8&id=OGQ4NjMxMWEtMzJjZTgwNTEtNjAxZDEwYTUtNjhmODY1OTM=, ActorId: [8:7519617068302239815:2648], ActorState: ReadyState, TraceId: 01jyhtcv2f4tyjdfsapd1xsmef, request placed into pool from cache: default 2025-06-24T20:34:42.383442Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=8&id=OGQ4NjMxMWEtMzJjZTgwNTEtNjAxZDEwYTUtNjhmODY1OTM=, ActorId: [8:7519617068302239815:2648], ActorState: ExecuteState, TraceId: 01jyhtcv2f4tyjdfsapd1xsmef, Sending CompileQuery request >> Viewer::ServerlessWithExclusiveNodesCheckTable [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan+UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan-UseSink >> SystemView::TopPartitionsByCpuTables [GOOD] >> SystemView::TopPartitionsByCpuRanges >> KqpWorkloadService::TestCpuLoadThresholdRefresh [GOOD] >> KqpWorkloadService::TestHandlerActorCleanup >> ExternalBlobsMultipleChannels::Simple [GOOD] >> KqpRe2::IncorrectRegexNoError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::ServerlessWithExclusiveNodesCheckTable [GOOD] Test command err: 2025-06-24T20:33:00.813550Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616631812530644:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:00.813619Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:01.415868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:01.416025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:01.419177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:01.479952Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9700, node 1 2025-06-24T20:33:01.554605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:01.554629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:01.554636Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:01.554782Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:01.842420Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3915 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:02.154872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:02.192690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:02.228569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:02.232959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:33:02.242029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-24T20:33:05.191135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616653287367763:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:05.191268Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:05.195260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616653287367789:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:05.200829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:05.231411Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616653287367791:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T20:33:05.309859Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616653287367842:2360] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:05.817405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616631812530644:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:05.817496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:33:08.482718Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616665334925064:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:08.483679Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:08.705531Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:08.708846Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616665334925041:2079] 1750797188443318 != 1750797188443321 TServer::EnableGrpc on GrpcPort 12901, node 2 2025-06-24T20:33:08.811859Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:08.811890Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:08.811898Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:08.812030Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:08.831965Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:08.832075Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:08.837865Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7057 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:09.312087Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:09.326689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:33:09.371485Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:09.376300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:33:09.382594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-24T20:33:09.505457Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:33:12.736968Z node 2 :KQP_WORKLOAD_SERV ... 46744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:23.185007Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:23.202143Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:33:23.212319Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:23.216687Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:33:27.141918Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519616748341678333:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:27.142089Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:27.142509Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519616748341678345:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:27.148948Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:27.175694Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519616748341678347:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T20:33:27.273752Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519616748341678399:2359] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:39.282373Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:269:2229], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:39.282869Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:39.283052Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:39.790118Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:39.987672Z node 5 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:33:40.022049Z node 5 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:33:40.802441Z node 5 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 22181, node 5 TClient is connected to server localhost:65102 2025-06-24T20:33:41.388358Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:41.388467Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:41.388539Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:41.389275Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:33:55.213702Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:507:2388], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:55.214204Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:55.214453Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:55.705568Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:55.882794Z node 7 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:33:55.923834Z node 7 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:33:56.790141Z node 7 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 8131, node 7 TClient is connected to server localhost:22228 2025-06-24T20:33:57.482266Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:57.482375Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:57.482454Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:57.483425Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:13.860185Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:506:2388], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:13.861083Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:13.861329Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:34:14.476165Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:14.626218Z node 10 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:34:14.669530Z node 10 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:34:15.703459Z node 10 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 7850, node 10 TClient is connected to server localhost:24485 2025-06-24T20:34:16.462972Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:16.463092Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:16.463210Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:16.464055Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:36.499236Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:585:2389], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:36.499878Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:36.500202Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:34:37.351567Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:37.524597Z node 13 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:34:37.562363Z node 13 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:34:38.729871Z node 13 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 8025, node 13 TClient is connected to server localhost:19802 2025-06-24T20:34:39.857591Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:39.857715Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:39.857812Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:39.858986Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> ViewerTopicDataTests::TopicDataTest [GOOD] Test command err: 2025-06-24T20:33:37.545093Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:1559:2389], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:37.545412Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:1171:2333], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:37.545624Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:1174:2333], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:37.547255Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:37.547482Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:37.547573Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:37.547919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:37.547984Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:37.548038Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:37.548851Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:1552:2333], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:37.549524Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:37.549992Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:37.550935Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:1168:2330], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:37.551861Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:37.552268Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:37.974599Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:38.180205Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:33:38.207775Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:33:38.879540Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 17204, node 1 TClient is connected to server localhost:19534 2025-06-24T20:33:39.231457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:39.231535Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:39.231578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:39.231900Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:18.049884Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519616963461721943:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:18.050558Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:34:18.355576Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:18.374758Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:18.374924Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:18.377809Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8716, node 6 2025-06-24T20:34:18.654765Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:18.654801Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:18.654814Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:18.655015Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:18.985936Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13458 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:19.323875Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:19.392260Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:19.430934Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:34:19.436238Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:34:22.799903Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-24T20:34:22.799979Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-24T20:34:23.050783Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519616963461721943:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:23.050888Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:24.591633Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519616993526493565:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:24.591847Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:24.592595Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519616993526493585:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:24.598987Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:24.623310Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-24T20:34:24.623711Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519616993526493587:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction ... T20:34:37.886277Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Send 1 message(s) (4 left), first sequence number is 16 2025-06-24T20:34:37.896166Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: try to update token 2025-06-24T20:34:37.896222Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Send 1 message(s) (3 left), first sequence number is 17 2025-06-24T20:34:37.932704Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: try to update token 2025-06-24T20:34:37.932772Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Send 1 message(s) (2 left), first sequence number is 18 2025-06-24T20:34:37.949977Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: try to update token 2025-06-24T20:34:37.950058Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Send 1 message(s) (1 left), first sequence number is 19 2025-06-24T20:34:37.963389Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session got write response: sequence_numbers: 14 sequence_numbers: 15 offsets: 53 offsets: 54 already_written: false already_written: false write_statistics { persist_duration_ms: 3 queued_in_partition_duration_ms: 115 } 2025-06-24T20:34:37.963566Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: acknoledged message 14 2025-06-24T20:34:37.963605Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: acknoledged message 15 2025-06-24T20:34:37.972471Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: try to update token 2025-06-24T20:34:37.972528Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Send 1 message(s) (0 left), first sequence number is 20 2025-06-24T20:34:38.030932Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session got write response: sequence_numbers: 16 offsets: 55 already_written: false write_statistics { persist_duration_ms: 1 queued_in_partition_duration_ms: 1 } 2025-06-24T20:34:38.030984Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: acknoledged message 16 2025-06-24T20:34:38.051499Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session got write response: sequence_numbers: 17 offsets: 56 already_written: false write_statistics { persist_duration_ms: 14 queued_in_partition_duration_ms: 67 } 2025-06-24T20:34:38.051560Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: acknoledged message 17 2025-06-24T20:34:38.051839Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session got write response: sequence_numbers: 18 offsets: 57 already_written: false write_statistics { persist_duration_ms: 14 queued_in_partition_duration_ms: 67 } 2025-06-24T20:34:38.051862Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: acknoledged message 18 2025-06-24T20:34:38.071413Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session got write response: sequence_numbers: 19 offsets: 58 already_written: false write_statistics { persist_duration_ms: 19 queued_in_partition_duration_ms: 10 } 2025-06-24T20:34:38.071475Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: acknoledged message 19 2025-06-24T20:34:38.071765Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session got write response: sequence_numbers: 20 offsets: 59 already_written: false write_statistics { persist_duration_ms: 19 } 2025-06-24T20:34:38.071794Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: acknoledged message 20 2025-06-24T20:34:38.119246Z :INFO: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session will now close 2025-06-24T20:34:38.119370Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: aborting 2025-06-24T20:34:38.120119Z :INFO: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: gracefully shut down, all writes complete 2025-06-24T20:34:38.120496Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|3c3e8198-3cc014b9-b0963f4e-6f9baf47_0] Write session: destroy 2025-06-24T20:34:39.300983Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519617057948428388:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:39.301098Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:39.301599Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519617057948428400:2357], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:39.306309Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:39.323605Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519617057948428402:2358], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 2025-06-24T20:34:39.375862Z :DEBUG: [] MessageGroupId [producer4] SessionId [] Write session: try to update token 2025-06-24T20:34:39.376355Z :INFO: [] MessageGroupId [producer4] SessionId [] Write session: Do CDS request 2025-06-24T20:34:39.376419Z :INFO: [] MessageGroupId [producer4] SessionId [] Start write session. Will connect to endpoint: localhost:14094 2025-06-24T20:34:39.395405Z :DEBUG: [] MessageGroupId [producer4] SessionId [] Write session: send init request: init_request { topic: "/Root/topic1" message_group_id: "producer4" } 2025-06-24T20:34:39.407341Z :INFO: [] MessageGroupId [producer4] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750797279407 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:34:39.407451Z :INFO: [] MessageGroupId [producer4] SessionId [] Write session established. Init response: session_id: "producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0" topic: "topic1" 2025-06-24T20:34:39.416618Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0] Write 1 messages with Id from 1 to 1 2025-06-24T20:34:39.417319Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0] Write session: close. Timeout = 18446744073709551 ms 2025-06-24T20:34:39.427086Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519617057948428463:2527] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:39.497991Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0] Write session: try to update token 2025-06-24T20:34:39.498064Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0] Send 1 message(s) (0 left), first sequence number is 1 2025-06-24T20:34:39.548328Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519617057948428472:2366], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:39.548795Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=Nzk0NmRlOC0yMGFmMzZmMS0xZjIxZjMzOC1iNDRhMjczYQ==, ActorId: [7:7519617057948428386:2353], ActorState: ExecuteState, TraceId: 01jyhtcr2247waw1r70mk94krg, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:39.549947Z node 7 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:39.555429Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0] Write session got write response: sequence_numbers: 1 offsets: 60 already_written: false write_statistics { persist_duration_ms: 5 queued_in_partition_duration_ms: 40 } 2025-06-24T20:34:39.555486Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0] Write session: acknoledged message 1 2025-06-24T20:34:39.619778Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0] Write session will now close 2025-06-24T20:34:39.619898Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0] Write session: aborting 2025-06-24T20:34:39.620709Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0] Write session: gracefully shut down, all writes complete 2025-06-24T20:34:39.621302Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c71dc382-e82ecf2e-6232bfc-448a7b33_0] Write session: destroy Size: 4194320 Got response:400: PathErrorUnknown Got response:400: No such partition in topic 2025-06-24T20:34:39.900294Z node 7 :PERSQUEUE ERROR: partition_read.cpp:780: [PQ: 72075186224037889, Partition: 0, State: StateIdle] reading from too big offset - topic topic1 partition 0 client $without_consumer EndOffset 61 offset 10000 Got response:400: Bad offset >> TConsoleTests::TestGetUnknownTenantStatusExtSubdomain [GOOD] >> TConsoleTests::TestRestartConsoleAndPools >> SystemView::AuthUsers [GOOD] >> SystemView::AuthUsers_LockUnlock >> ExternalBlobsMultipleChannels::WithCompaction [GOOD] >> Cdc::InitialScanAndResolvedTimestamps [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::Simple [GOOD] Test command err: 2025-06-24T20:34:35.982713Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:35.983252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:35.983492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045e6/r3tmp/tmph8hoTr/pdisk_1.dat 2025-06-24T20:34:36.431952Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:34:36.435860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:36.524692Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:36.530616Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797272071025 != 1750797272071029 2025-06-24T20:34:36.585402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:36.585584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:36.600615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:36.708587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:37.155114Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:697:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:37.155268Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:37.155378Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:37.161556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:37.224052Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:37.374049Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:711:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:34:37.470743Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:781:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:37.939502Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhtcnz0dp969wdjw2ac9egj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGQ1YTQyOTYtYTdkZDc3NDktNjMzOGFkZWEtMmE3MDkyOWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.089876Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhtcprqenpnjwc8z9bmbfqj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmQwYmNlMjEtM2Q2NDQyNzQtMmI5YmViNmEtMzMzYmEwMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.191836Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhtcpwgbvx6e3p13cxpx2kt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWNjYmNhMTctMzA0YTIzMWUtYWI3ZDE0YTgtMWU3ZTUwNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.280925Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhtcq03dgah2yjja3tts6n5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzlmMzE4NDMtNTc3MTBmM2ItNzIyNGQ5NWItZmQ5ZjgxMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.392774Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhtcq2gf0sgtvab88sw9jjq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWQ1YjI5MTYtNzIwYTliNGMtZGRiOGNmZTYtYjVhNGJhMTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.466772Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhtcq61ex1y8dw50spmfpn0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjJiYmUxYmMtOGQwODY4MDQtNzAyM2Q1MDktYzFkZmM0MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.546882Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhtcq8998e5k3pkxcqhe0wa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmZhYWIyMTctZTMyZjU1YzEtMjVlMzM4MjAtODNmNzk5YTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.636183Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhtcqat7at1ydcx0bswhj2v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JlNTRiNDEtODVhYTQwNTYtZDM2ZDVmMS0zMzRmYzYzMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.738696Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyhtcqdk2tgg5xxg31eegmga, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmVkODQwYWYtNDBiY2U5ZDQtYmQzMmMxZDEtOGI1YTk1YmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.880325Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jyhtcqh371pb5m8n3a5mxfff, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmZhMzllNmYtZWY4OGVmOTctYTVhNWEzMmMtNjE3ZmNiZTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.963139Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyhtcqnd8yy5b94dgmf18rdt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTE4ZDU5OWItOWMzZDhkZTctMTkzYjMxNzgtNzI3ODAzNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.116686Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyhtcqqtdr2bcs2kr623dy73, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzQ2NGU4M2EtYjUwZDBhZGEtMmVjYTg4NTgtNTg1YTJiNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.222225Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jyhtcqx46c2v5v8hkb2hffsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGI5ODEyYmEtZjBhYjlhMmQtODc5NDEyYzEtYzMwOWEzODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.302052Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jyhtcqzwej1ftz7yrgggfnwy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTg2NWFjNGQtMzI5YjhiOTEtN2Q4MTI3YzAtZDg0ZTQyMmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.372068Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jyhtcr2c3kxv9btdhn5e9ztz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzYyOWNjNWMtYjBjNWRjODgtMmZhYWFmYjAtMzZmZWI4NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.436410Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyhtcr4jb3eb0extatxe7qd7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2RkNzVkNzUtYzA4MmExYWItYjJkMjVhNWYtMTllYzNmODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.508658Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jyhtcr6j20sahe69cymkgq56, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjZjNWRiZDUtOWNlODY4MTMtZDhlNjQ1NmUtOWFiODczZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.581030Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jyhtcr8v1cs43vh1j909ghbx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTgzM2E3NjEtNmQ1ZmZkYWQtZjUwMTlmZGItYTBhNTdkZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.653316Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jyhtcrb3dhf2gc6sns7hnze5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTIwYWIwMGEtNmJmNDQwNi03NTM4ZDIzOS01MzdhNDNiMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.726428Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jyhtcrdc1k59vfn0wzantqmd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTg5OGFiNGYtNWRhYzk4ODAtNTZmZTExMzQtNWM5MTAwMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.796977Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jyhtcrfn8vfpvrr3yn0v90ef, Database: , DatabaseId: /Root, Sessio ... r.cpp:120: TxId: 281474976715727. Ctx: { TraceId: 01jyhtcwc0eg5jarxp07v9mhfb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWJiZGFmOC1jYWVhZTFhNy1kNzY2ZjdjMi00Y2NmNDhiYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:43.868775Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715728. Ctx: { TraceId: 01jyhtcwe88wjscree9nx9b14z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDY5NGM1ZTMtNGY0MGI0OGMtODA4MmE0MmEtOGIyMTIwZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:43.934821Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jyhtcwh409n493pnxxrxyd16, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjhmNjBjMGYtY2QzZGQ3MWYtOTkzYTBmNjMtZmI1MjQ0ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.004233Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jyhtcwk40a2awxd3m1pvehsf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M1YzJlMmEtYzM4MDJhMzUtMzQzOWI1MDYtYzRlMGE0NzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.117853Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jyhtcwp96pdkmbrwhdw6j6a0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTE1N2Y1ODUtOGM2MGRiMTMtZTkyNDgxOTItMmI2Y2ZkZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.241290Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jyhtcwrya9gwz5ntsfvgmp70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDEzNzQ3NDYtNGYxN2ZlZTgtMzZlODE2NWItZDkzODM3Y2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.315776Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jyhtcwwrbxqvx0bn9y0ct851, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDNhMGRjNmQtODA3Y2NhOGQtYjQwZDIzZGYtY2FlMWJjY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.411635Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jyhtcwz2brr3mrjpwxjratwc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmY3MzU4ZTYtMjEzNjc4MjMtZWQ0ODJmODctZTA5YzYyYWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.490458Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jyhtcx22d3ffbw27cmxtse0p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjQ2NGJkOWUtNzZjZmVkZmUtZmJiYWQ5N2MtOGUxOWU3YTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.587120Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jyhtcx4hb9n830jb11fdx0aa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWQwMmE0NWEtM2FkMTNlZTctN2QxYmEzODQtNGY5ZmQ0NjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.669813Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jyhtcx7k1ye5qdzkkezta3zx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2MzOGEwNTYtYTcyY2YyNC0zMjgyNzFhOS1iYzg5NDMxZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.738249Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jyhtcxa4f9axz84sct32c9zw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTRmYzg0N2QtOTU2YjczZDYtM2Q4NTA1ZmYtNjFkYWUzNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.804320Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jyhtcxc8c2zd773ve7x592e2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjY4NjgzZDItOTNmZDFkYi03ZDUxZjZlZS1lODk5ZGJiMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.908093Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jyhtcxeb2pm1r7v5kqcpghxm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODllNjcwMjEtOTk1OWIxNjEtMzNmMjU0MzMtNDA1YjVmYjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:44.985697Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jyhtcxhp5k2gzpxyzdygcgvr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTY3OTJiNDktYmQzY2E4NzMtMWU3YzAwZmEtMWVmMzc0YjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.076403Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jyhtcxm04y8rpy59thxcarnr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmQyOGI3ZmUtMjM2NzhhMS0zNWY4M2UyLTRiNmU4YmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.151729Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jyhtcxpvc14q64t6yxeag17m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzNlNjhmYjQtYjAxYzJmZjYtNGIwMzFlMTgtMWFiNDZhN2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.262606Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jyhtcxs74yvpehxx26mmy98z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmRjMjcwYWQtOWE3Mjg5NjktODcyYWE4NDMtNzJjODkxMWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.337310Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jyhtcxwp90y18xy3sffbeggr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTUxOGU2M2EtYzc1N2QxZjEtMWE2MTcwMzQtMmU0ZGQ1NzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.447931Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jyhtcxz07bttfsyve3ay3bz7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzcwOWQwZmMtZjA3M2IxYTQtZWI3NjRlZTYtMjE2NmE1NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.522948Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jyhtcy2f0y5ytq1gjv91y80x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTIwYzA5YzEtNTA3MGQ1ODYtNDE5ZmMxYWUtOWUyNmJmODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.663179Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jyhtcy4vft98a8sx4k4xftnd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTAyOGM0NzQtNTdkMzM2NzEtOWM4MmIzYTgtNDAzZGU2ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.735717Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jyhtcy966emkjhp0d4ew6hme, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWIzZmIzNDYtNDcyYjBjMDAtZjAxYzVhOTYtODZlODY4OTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.809367Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jyhtcybf2bm554nmqs5stcbh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTQ5YTI1MDQtZDhlMzc3ZTItMWEyZGY4ODUtYTY1MjNkNjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.881332Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jyhtcydr825g0cf45vbeapbs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjRhZjlkZGItZDgwMGJkN2ItZjg0MGJlODAtYjMzNzMyNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.952620Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jyhtcyg0cqyggtywgm40yv5z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmZjMDgwNjEtY2JmN2I0OGQtNjVhMzAwZTItYzBhYjFhMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.024452Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jyhtcyj8b1ej6qhppg1emasf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmYzZTc0M2QtZmE2MDcwOTctZjM0ZTNiOWMtOGM5YjZmNzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.103709Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jyhtcymf2afcpsk4hs9s2wbx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzU5ZmUyYzMtOTIwMzBjY2EtYTdlYTcxOWUtZDIxNTEyMzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.183035Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jyhtcypy6r0bwrgf2bdsnf83, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mzk2ZWIwMzEtMmRkZTM0ZGMtNjk3OWE2MzgtMjdlYWZmYmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.263676Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jyhtcyse9npna7vd2t0rb4we, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTA2MWQ2M2QtNWE3OTViNWQtYjhmODE4NjctNTNmYzlhODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.369703Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jyhtcywz2fv222tsstgkpbk1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDlhODk4YmItN2EyNmU2YWYtNDZiNjUxNmMtMjU2YzMyOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.443699Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jyhtcyz966z1m9vykwkm6g75, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWQyY2Y5M2ItNDI3Mzk5ZTItNDY4NjZhZjUtYzY0NjNiOTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.517275Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jyhtcz1k8h58haefze44rywp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzA4Mjk0ZGQtMTNkNzRkOWMtM2M0YTIzODktMWUxNDg1Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.812944Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jyhtcz7p9jr1zjgbrvb7x4vc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjRiMDM4NTEtODM1ZDZhNS03MGY0YWNjZS1mMmYyYzliMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns [GOOD] >> ExternalBlobsMultipleChannels::SingleChannel [GOOD] >> KqpScanLogs::GraceJoin+EnabledLogs >> KqpScanSpilling::SelfJoinQueryService [GOOD] >> KqpScanSpilling::SelfJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::WithCompaction [GOOD] Test command err: 2025-06-24T20:34:35.897475Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:35.897886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:35.898101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00461f/r3tmp/tmpqPfX2a/pdisk_1.dat 2025-06-24T20:34:36.492339Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:34:36.503545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:36.629951Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:36.638650Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797271158858 != 1750797271158862 2025-06-24T20:34:36.694198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:36.694363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:36.712674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:36.805187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:37.342899Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:697:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:37.343083Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:37.343344Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:37.349707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:37.410408Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:37.561434Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:711:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:34:37.781722Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:781:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:38.367040Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhtcp4weebnq3f1s3v787bf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDY4MWJiYjItYWIzNWM4MTYtOWE3NGNiYy0yMjcyODJh, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.466772Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhtcq5yc4e1s1t6q8deaqm4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDc4MTliZi03NTU2YzViNC0zNDg2YTJmNS00OTg1Y2M2Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.563991Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhtcq8q0jy9d41fxbcsn5h3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGJmZjI1M2QtMmI5MWQxZWEtNTI0NzE3YmUtMjI0YmVmYWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.647259Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhtcqbv133ejt8ng6a18a0x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjljNmY1MmYtMTVlMmVjOTctNGQyOTUxYjUtYzcwZmNkMjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.752551Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhtcqemb9yttctmd9xs0wp5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTFiYzg3NGItNmJiNjdiMC1lZTg1NmRkYy1lNWUwNzU2NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:38.931015Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhtcqhnb7q3yytckxcfccr9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTY4N2UxMmItNWEwZDMxMWEtYmM4NDU2MWYtNjU4ZTUxZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.043509Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhtcqq5fhejm8ahw4yjqfc2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmY4NTgyZTYtYjgyYzJmNWEtODczMjU2N2UtOWI4MTY2YTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.144185Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhtcqtn510trvjnev05mmtr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGU1OWNhZWEtMWU4NWI0NmEtNjg1N2FhMTMtNDNhMzY0OTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.237596Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyhtcqxs69edk1r56g5x4zys, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTFiM2JhNjctZTQ5Y2IzZi0yMzg1YWZjZC1hMDk5ZDMxZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.342283Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jyhtcr0v5fxqb1vxysspsgfs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzA4ODE4ZS1mMTVkNWFlOS00NDIyYTNiZC05YmI2NjI3Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.467421Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyhtcr438nhan4v7mf9s507z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmZlMzVhZWYtMjA3MDRhMGMtZGQ1MDhiYzEtYmM3ODEyMzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.542925Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyhtcr7ye1e58qsk8547d2fw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTgzZGU1OWMtNTAzYzhhMjctYmRhMmY5NS1iYjIzZGQ2MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.624967Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jyhtcrab5hjaz4bkyskktka8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmJkM2ZiZi1lYjRlN2VlNC02MDUyMzNlMi00ZjkyZDBiMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.700691Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jyhtcrcta1zzvwrawn57mj4w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTdjYmM3ZmMtZTRiNDkyYjctOGIyZmZjNjctZGQzNDQzMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.797595Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jyhtcrfb5f0e8ajaj82gzexm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThmMThhMmQtYjYxMzIyNTgtZGY4YWExODctMmQ1OGYzNDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.886613Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyhtcrjbbppa605m1zq4pn10, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzZlMGYzOGUtMmNhYzhkMjYtYzJhYTBhN2EtZTk5NWU4NGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.966252Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jyhtcrn47c2km84g3dmds23f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2M4NjhlNzktNGQzY2VhMWItOWRiMjY2MTUtMjVhYTc1ZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.040386Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jyhtcrqg31b77dc10pj9avj7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTM5OGVjZjQtM2IyYzg5YjgtNzUyMjg1NjAtYjY3ZDY2ZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.137489Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jyhtcrss6apaar4537cpvkqp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzAzMGEwNmUtOTE2MTBkNDgtMzQ1NWQ1ZjYtZjhkMGE0YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.231205Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jyhtcrwv2aczv5sgq3cg557a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzE2NTFjZDAtNjg4M2M1ZDctZjdhYTU3MDAtMmQyOTQ3NjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.379894Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jyhtcrzv0fgp4r295a2dyndj, Database: , DatabaseId: /Root, SessionId: ... ceId: 01jyhtcxja6xgfnxjf90f2940v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjY3NGE0My0yY2Y3MGFjYi1mYmJjZGJkZS1mYzEzN2Y3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.077775Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jyhtcxmr1s19111r06rhsvpn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGZjMWY1YWMtNDFkZmE4YjYtODkyNTJhMi05ZmNlOGIwYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.198752Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jyhtcxq7cjyb76hrnsdsmnr1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcyYjU0Y2EtYzFkZGQyYjMtMTE0OGViNzItNjA2ZmVhMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.302308Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jyhtcxv1e64mvq88threa4qb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjY2NTVmZS1hNGVmZDRiMS1jNDJiODEyOS1iNGFjNDY4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.387886Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jyhtcxycew7dqc3vgg5w3az6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2M1ZDY2MTAtZDQwZTY1Y2EtNmU2N2NjZTEtOTYxYmM1ZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.474095Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jyhtcy14f7hgwy1enrdhw16x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTJkNzg3Y2QtM2U0MmVjNmQtYzAzNWFiNjEtNTUyNTVmYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.562835Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jyhtcy3p2yc4p75zbtqc832k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmNjMDdlZmYtZGNmZmRmMzctYjdmMWY3ZjItNGM2Njg5N2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.645626Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jyhtcy6e806rv69m8p452mta, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWFiYzM0NmMtNThmMzY4YWEtYmI0Njc5N2QtYWI4ZmMwNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.721886Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jyhtcy90cwyg6fe14j2ahyvr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2M2ZTZjNmQtZWZlNjUzZGItMWRjN2Q2MWYtOTBlODM2Mzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.804606Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jyhtcybf1rxzczzhre195w27, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTgyM2NkZC1mZmJlNWUxNy03YmYxZDc2NC05MTlmMmIyNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:45.901134Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jyhtcye02ykvcms3n47y7nsb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzNlYTQwZjYtMWQxNTA1MjctMzg4MWQ4ZWItYjUwM2NlMTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.014139Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jyhtcyh496f5k2dprh5xmc8q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjcwOWJkYTEtMTUxMWRhNDMtZmQ3MDk0YzItZWU5ZTkwOGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.118773Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jyhtcymfdmh8zywznh5rp3kw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjQ4ZGJmMjYtZWI3NDM0NDctMjI2ZTMyNmEtYjc3MzM4Nzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.222333Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jyhtcyqx73xgk9wee3rd8fz1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjdlMWM5MTItYWE1NzMwNS02NGRmZTc1NS0yYWI2MzA2NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.325255Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jyhtcyv35xt01qthcezj85ap, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzhkYzRkZmMtNzU5MWRkNDYtYmZlZTM0NWUtZWM4MDZlMDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.414541Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jyhtcyy8fhg8x00ajp8x8ewv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmU3OTBlZDUtNGVjOGJhZjMtYjQ0Zjc5ZTUtNWQ1MDJl, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.500683Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jyhtcz136vs2s8ayg4505n7m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGRlNDNlYmEtOTZlMzE5MjktNGY4ODNhYTEtM2VhNTUzOWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.580942Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jyhtcz3p63cz89kp8r9hhr4z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmMwNWFhYjMtOWFmMTQ3OC03N2NmNzhkNS01OGFjY2RlOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.674215Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jyhtcz690m236nsg7z2c17dd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2UxYjFlMWMtODJkMGFiYTctNTBjOGNmYjQtYWQ4MDRlYjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.757890Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jyhtcz94fm9gm3hrabrsbepw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yjc1MDAxMGItZGE2NDc4ZDMtZDllNGI1YjMtZjUzZWFhNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.859000Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jyhtczc76qgszmgc579swkmy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzAzY2I4NTAtNzhmY2UxZGMtYzI4ZGRmYTktM2U5NDc4NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.950058Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jyhtczf2e477tmhf9vwewm7m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGQ0NjY4Ni03MDkzZTk3Ny1hZDg1ZmVhMi0yNjhlZGE4, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.104897Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jyhtczhsbjgpx1magb3egv94, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTdkMmQzZmItMzg2YzJjYjgtZTZiMWEwM2QtZDI5MjRhZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.193441Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jyhtczpn9j5nn6xb5tmdady4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTdmYjMyZmQtMjBhYmI2YTUtYjdhNjM1ZjMtZDFjYzNjYzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.281859Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jyhtczsd9svdms90ey2yapky, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmZhNzFjNmQtOWMwMTViMTItM2VkYWI0ZjktOGIyYWQyNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.382583Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jyhtczw65pmk1hkph43tbxvv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjIxMWZjNjItYzMxNzk5ODMtNzc3ZjE0ZGItOWVhZDIyZTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.464748Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jyhtczzbctp2jxn2hw15amek, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGNkOTVlZjgtYTNjZGZiYmItOGM4NjE2MzUtM2E3OTljYjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.540188Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jyhtd01tbb4vnvgysyppnem0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTRiNTJkMjktYWY5YmNlZDktNjI3NWFhNC1mOGY0YWEzYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.641112Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jyhtd047dncenv8x6hbaq96x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTY5Mjc2NTQtYzIzOWU0N2MtZGFiZTM4OGUtZTMxZWU0MDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.764851Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jyhtd07bbxrnrngwhsmfrwd9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWYzOGM1OC0yMzQyMTE5Ni02OTA3MTZiMi00NWQ2ZThhNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.894444Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jyhtd0bgdg48f6xdh5cqnxnt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2MxOTZmODgtMzRhYjQ2NWQtM2U0YWJiMTMtZGQwMTdmY2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.008277Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jyhtd0fgcp56qndtatv1n54x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmMxN2NlMTctZTcxMDdiMDYtZmVkNzIwNy03OTEwMmYwYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.106464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 100:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T20:34:48.626111Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jyhtd0z8166q18xxpcxw6z82, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGJjZTEzYWQtMzNjZDliZTgtMTRjMGExNS1hNzBjNWU5Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TConsoleTests::TestSetDefaultStorageUnitsQuota [GOOD] >> TConsoleTests::TestSetDefaultComputationalUnitsQuota >> Cdc::ResolvedTimestampForDisplacedUpsert [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::InitialScanAndResolvedTimestamps [GOOD] Test command err: 2025-06-24T20:30:31.011988Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615993647004968:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:31.012272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00473f/r3tmp/tmpbJsgys/pdisk_1.dat 2025-06-24T20:30:31.708806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:31.708915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:31.718723Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:31.727411Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519615989352037503:2079] 1750797030932399 != 1750797030932402 2025-06-24T20:30:31.748420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1989, node 1 2025-06-24T20:30:32.015668Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:32.031381Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:32.031405Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:32.031412Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:32.031571Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:30:32.173968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:32.221743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:32.282297Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7519615997941972696:2268] 2025-06-24T20:30:32.291599Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:30:32.319675Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:30:32.319765Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:30:32.322100Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:30:32.322157Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:30:32.322196Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:30:32.322655Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:30:32.322733Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:30:32.322765Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7519615997941972712:2268] in generation 1 2025-06-24T20:30:32.328963Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:30:32.425035Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:30:32.425189Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:30:32.425249Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7519615997941972714:2269] 2025-06-24T20:30:32.425261Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:30:32.425279Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:30:32.425308Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:32.425495Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:30:32.425568Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:30:32.425587Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:32.425626Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:30:32.425645Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:30:32.425673Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:32.425725Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519615997941972693:2304], serverId# [1:7519615997941972710:2311], sessionId# [0:0:0] 2025-06-24T20:30:32.427607Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:30:32.427893Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976710657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:30:32.427984Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976710657 at tablet 72075186224037888 2025-06-24T20:30:32.429715Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:30:32.436227Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:30:32.436342Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T20:30:32.442269Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519615997941972728:2321], serverId# [1:7519615997941972729:2322], sessionId# [0:0:0] 2025-06-24T20:30:32.453810Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976710657 at step 1750797032481 at tablet 72075186224037888 { Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750797032481 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T20:30:32.453868Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:32.454558Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:30:32.454650Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:32.454669Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:30:32.454703Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1750797032481:281474976710657] in PlanQueue unit at 72075186224037888 2025-06-24T20:30:32.454976Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1750797032481:281474976710657 keys extracted: 0 2025-06-24T20:30:32.455109Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:30:32.463805Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:32.463891Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T20:30:32.466454Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T20:30:32.466917Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:30:32.472524Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750797032481} 2025-06-24T20:30:32.472581Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:32.472623Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1750797032480 2025-06-24T20:30:32.472639Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:32.472677Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1750797032488 2025-06-24T20:30:32.483383Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:32.483440Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:30:32.483528Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T20:30:32.483591Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750797032481 : 281474976710657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7519615993647005188:2186], exec latency: 11 ms, propose latency: 28 ms 2025-06-24T20:30:32.483655Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976710657 state Ready TxInFly 0 2025-06-24T20:30:32.483709Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:32.486122Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7519615997941972714:2269][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-24T20:30:32.498925Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSch ... pp:42: [CdcStreamHeartbeat] Emit change records: edge# v7500/18446744073709551615, at tablet# 72075186224037888 2025-06-24T20:34:47.933010Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-24T20:34:47.933614Z node 29 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:42: [CdcStreamHeartbeat] Emit change records: edge# v7500/18446744073709551615, at tablet# 72075186224037888 2025-06-24T20:34:47.944940Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-24T20:34:47.945149Z node 29 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T20:34:47.960905Z node 29 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:78: [CdcStreamHeartbeat] Enqueue 1 change record(s): at tablet# 72075186224037888 2025-06-24T20:34:47.961176Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 } 2025-06-24T20:34:47.961355Z node 29 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:34:47.961545Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:3812: Waiting for PlanStep# 9000 from mediator time cast 2025-06-24T20:34:47.961684Z node 29 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:78: [CdcStreamHeartbeat] Enqueue 0 change record(s): at tablet# 72075186224037888 2025-06-24T20:34:47.961786Z node 29 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:34:47.962043Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:71: [ChangeSender][72075186224037888:1][29:643:2539] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 }] } 2025-06-24T20:34:47.962399Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:628: [CdcChangeSenderMain][72075186224037888:1][29:932:2730] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 }] } 2025-06-24T20:34:47.962805Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037888 2025-06-24T20:34:47.963119Z node 29 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 1 change records: to# [29:932:2730], at tablet# 72075186224037888 2025-06-24T20:34:47.963208Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 1, forgotten# 0, left# 0, at tablet# 72075186224037888 2025-06-24T20:34:47.963419Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:633: [CdcChangeSenderMain][72075186224037888:1][29:932:2730] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-24T20:34:47.963769Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:111: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][29:1014:2730] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-24T20:34:47.964354Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-24T20:34:47.964411Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-24T20:34:47.964563Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 2 requestId: cookie: 2 2025-06-24T20:34:47.964720Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-24T20:34:47.964759Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-24T20:34:47.964817Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72075186224037889] got client message topic: Table/Stream/streamImpl partition: 0 SourceId: '\00072075186224037888' SeqNo: 4 partNo : 0 messageNo: 3 size 26 offset: -1 2025-06-24T20:34:47.965060Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1293: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 process heartbeat sourceId '\00072075186224037888' version v6000/0 2025-06-24T20:34:47.965263Z node 29 :PERSQUEUE INFO: partition_write.cpp:1797: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 emit heartbeat v6000/0 2025-06-24T20:34:47.965590Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob processing sourceId '\00072075186224037889' seqNo 0 partNo 0 2025-06-24T20:34:48.014223Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob complete sourceId '\00072075186224037889' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 3 PartNo 0 PackedSize 107 count 1 nextOffset 4 batches 1 2025-06-24T20:34:48.014907Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Add new write blob: topic 'Table/Stream/streamImpl' partition 0 compactOffset 3,1 HeadOffset 3 endOffset 3 curOffset 4 d0000000000_00000000000000000003_00000_0000000001_00000? size 93 WTime 7451 2025-06-24T20:34:48.015329Z node 29 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:34:48.015445Z node 29 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 3 partNo 0 count 1 size 93 2025-06-24T20:34:48.016555Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 3 count 1 size 93 actorID [29:873:2685] 2025-06-24T20:34:48.016714Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 3 partno 0 count 1 parts 0 suffix '63' size 93 2025-06-24T20:34:48.027638Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 44 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:34:48.027804Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:48.027891Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream/streamImpl', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-06-24T20:34:48.028157Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037889, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=452, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:34:48.028259Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 3 requestId: cookie: 2 2025-06-24T20:34:48.028533Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][29:1014:2730] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 4 Offset: 3 WriteTimestampMS: 7451 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 2 } } } 2025-06-24T20:34:48.028632Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][29:932:2730] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-24T20:34:48.028853Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-06-24T20:34:48.028896Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 4, at tablet: 72075186224037888 2025-06-24T20:34:48.040946Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037888 >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-06-24T20:34:48.203097Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-24T20:34:48.203180Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-24T20:34:48.203323Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 4 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 4 max time lag 0ms effective offset 0 2025-06-24T20:34:48.205742Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 4 added 2 blobs, size 452 count 4 last offset 3, current partition end offset: 4 2025-06-24T20:34:48.205945Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 4. Send blob request. 2025-06-24T20:34:48.206222Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 3 parts_count 0 source 1 size 359 accessed 1 times before, last time 1970-01-01T00:00:06.000000Z 2025-06-24T20:34:48.206348Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 3 partno 0 count 1 parts_count 0 source 1 size 93 accessed 0 times before, last time 1970-01-01T00:00:07.000000Z 2025-06-24T20:34:48.206426Z node 29 :PERSQUEUE DEBUG: read.h:121: Reading cookie 4. All 2 blobs are from cache. 2025-06-24T20:34:48.206621Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 2 blobs 2025-06-24T20:34:48.207409Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 3 count 3 size 339 from pos 0 cbcount 3 2025-06-24T20:34:48.207663Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 1 size 75 from pos 0 cbcount 1 2025-06-24T20:34:48.208708Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 3 parts 0 suffix '63' 2025-06-24T20:34:48.208818Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 3 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:34:48.208983Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns [GOOD] Test command err: 2025-06-24T20:34:37.571427Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:37.571883Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:37.572133Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045d0/r3tmp/tmpkIpY2X/pdisk_1.dat 2025-06-24T20:34:38.036028Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:34:38.041992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:38.088378Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:38.099243Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797274078234 != 1750797274078238 2025-06-24T20:34:38.146838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:38.146977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:38.171120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:38.273662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:38.747170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:697:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:38.749127Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:38.749327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:38.759702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:38.817144Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:38.960783Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:711:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:34:39.081064Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:781:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:39.668956Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhtcqgr68rvhdebmgcb7rwz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGVjZThhOGYtODY5ODIxZjItZmRmNTZmMzgtNzQyNDAyODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.788168Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhtcre32we3q7seegvj3acv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTVjNjVjNjMtMTI2MTJlMjEtYWY1OTM4YzYtZDg1NDQ4NWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.889072Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhtcrhma9k2w2ah03mx5q25, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDIwMjBjZGEtNTEzZThlMWItNWUxMmZjNzAtM2ZlNmE3MzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:39.979849Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhtcrmy8b5v2r59e969xfct, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2Y0ODllMDAtYjExNDUxNTMtOTE3Y2Y0OTEtYzI1YzY4ZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.060786Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhtcrqk4tmpg2ev6cjmptmk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmQyZTVkYTctNDM4NmMxYWYtZGNkNWU3NGQtNjk4ZDRkNjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.146014Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhtcrt4bs09mfmy68f4n7e7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDdiN2Q0ZS01MGM4YmRlMi04NjhjOWVhMC1iMmIxMjJlMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.241153Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhtcrws5z4fmett2qppy3c4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTc5ZjU3MmItN2M1NTExODItZTQ5ZDNiMmEtODFjZWRlM2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.329736Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhtcrzs6gw8ja30hevz1t4v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmFmOGNkODctNjYyNTA2YmItMTA5OTRhZDEtY2NhYzVhZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.418360Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyhtcs2h8pds3s70cs6rjvax, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTA3ZWQxNTItNmQzODQ2MTktYzM5ZTE4NGEtMTM4NDhmZjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.571026Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jyhtcs5942r01vc9ykqk027r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGM1NzJhNGUtNGEzYzFkZGUtZjc3ZjM1NmItZTIzODhiZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.673589Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyhtcsa21cmb923q4496tgrj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGU3NjA0OWUtMzJmNGRiOWEtOTIxMDNmMWMtNjdmYWRhZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.825029Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyhtcsd99nfb8823wqh35zjq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzdiYTlkOC00ZDU1MWJmMy1mOGQ2NGQ3Yi1mN2NlMTFkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.990099Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jyhtcsjhbbq8ahpbxen8bjbj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmQwZDVjYmMtYjM5N2MzYjItZGZkMTk5ZTktZDBjZjI2YTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.075000Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jyhtcsq6822ht0gkng47hxeq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDk4ODNkOTYtOGMxOGY4YWItMzZlYTcyODMtYTZiYTk5ZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.158201Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jyhtcsssc01xnj8rd519fwwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTBiZDJmYTUtODViZTg1ZWItMmJiNjZmNTktNWFlNmY5YjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.244892Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyhtcswe609ab9qkfqn7c98x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTg1Njc2MGYtMWNlY2JjZjEtOTc2YmNlNGMtZGQ0MzZhMTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.332066Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jyhtcsz408gaj2qfaf2pgty6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTliYTNiYTctYzFlZDgyYzctMTk0NmJjOGItYmQwNjUxYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.419107Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jyhtct1v4ba7gn44gd0xv46y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjJmYmQ0YzktMmNmMDg2MGQtZWUxZDI2YmMtOWExYmRlMjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.525458Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jyhtct4j4d072hkvzx1mf73e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmQ2Yzc1ZWQtZTQyMzYzNDQtMWIwYzEyMzktYWRjZGY2MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.718340Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jyhtct8823kxwqrww2ewth23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzAyMjNlOTQtYTU5YzYyYmQtZjBhNjgwNTgtNTg5M2FhZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.928308Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jyhtctdy16h0ksmqsnsvhd6n, Database: , DatabaseId: /Root, Sessio ... anner.cpp:120: TxId: 281474976715727. Ctx: { TraceId: 01jyhtcyxc2kn64n69qae83mer, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzNmYzI0OGItZDI0MjRhNTUtZWU5OTI0NS1mMDlkNzk0ZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.454883Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715728. Ctx: { TraceId: 01jyhtcyzndp8z4wyrefw6wpa4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzdkZmRlZGYtZTcwZTEwOWUtOWE3ZDNmZjEtMWE4ZDRlODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.527800Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jyhtcz1xbewwgmv6qaqnqdj7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDljZTg0M2YtYmE3ZDA1MWUtNzc2MWQyZGEtODhlOTM2NmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.601704Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jyhtcz46atxqj6ag658ynyr0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTA0ZDgyNGEtNzYxNzExMTYtOWYzYTIxMjMtYjRkZDc5MTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.675932Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jyhtcz6f77x901dpw169gr6a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmNiY2NhMjYtZDc4YzRkZWMtY2IyZjU4NDQtOTI1YWVmMWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.776328Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jyhtcz8tdbhvdd3wn44a6nyf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjBlOGNlNmYtNjI0OGEzZGUtYjFhZDk1Mi00ZTNmMDg4MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.851138Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jyhtczbyfnejg3352xv8h50e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDMxOThjYzAtNDkxODMwM2UtMmRmNzU2NmQtOTc0NjlmZjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.924867Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jyhtcze912vkxmp2phre0swy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTliMWNjZTQtNDFlYWNhOTgtM2U2YTAwZjQtNjA2NWQxY2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.998296Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jyhtczgk3amnhpv440xxyr7m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWMxMTM1YmItNDQ0NDVmODYtM2EyMzU1MGQtNGM5MjM2Yzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.072236Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jyhtczjwepxhg7dc3v5cdh68, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWZkZmJkNjAtODhhNDk2NzUtYTc5OGEzNjYtMWQ4ODA4OTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.143977Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jyhtczn7839r6managw2ctfh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjNlZjU5M2QtM2QxYzUzNTUtM2ZjOWUwZGEtMWYyOWUxYWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.200766Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jyhtczqc178eccr6dj35fev9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWYyODEyOGUtMmM0MTZiYWYtMWY4ZGQxYTQtYjk1YzFjMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.272395Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jyhtczs692n7nkm6gaqqzr8n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDRiNWI4ZmUtOTE5NWQ1NjEtOTRiY2Y0NjUtYjQxNzZmZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.448447Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jyhtczyfa03by68akagxy4ek, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWEwZTQ4ZjYtNTcxMjk5ZmEtNjQxZTg5OTctZTg3MTVjNDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.527499Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jyhtd00z7eqj3c3pegayqfnw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JiNjk4ZmMtNzE0NWExMTItMzIxNWQ3ZTItYmE4OTliOTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.606964Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jyhtd03e62dgd661jd22bsmv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmRhYzA4YmEtOWE2YTYwYWUtNWM0NjMxMjAtODI2NDYxZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.690323Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jyhtd05y76c7rkecxb86pdv2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjA2OWQ4OTktNjUzNDYxZi1lZjk4OTgzOS03ZTBlYzNm, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.772493Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jyhtd08h2wvv6nxxceqfraq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjJlMjA5ZmMtZjY1Njk5NTEtNjQ3MzE0NjctOWJiZTIwMTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.852699Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jyhtd0b38p22cggc0rg584ca, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTg4MWY3NTktODlkOTM4NDAtYzYzN2UxZjQtYWI1MGJmOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.933536Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jyhtd0dk7seama3zreag1678, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWVlNDgyNzgtMjhiMDJkNS01ZTBmZDg3MS00YTdkNDNlZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.049123Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jyhtd0h4dcdbxc1qpctgervr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjhhZTkzNjQtM2I4MTU1MTQtOGYzN2QyY2EtOTdlYmYyNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.132075Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jyhtd0kraecexxpv2zrz2rnv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjE5M2M3ZTktZmNjMzQ3OGUtNGFjMDFhZTUtOTkxZDVmYjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.213510Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jyhtd0pbaezkydt7qhxdbthj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2U5MjFlZGQtZDJmM2QyM2MtM2ZiYzA2LWJiM2JmMmQz, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.293776Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jyhtd0rx0366w23az74gb48h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFhODA4MWUtOGRmYmI0MzctNTg5OGNmODMtNjdmOTJhMjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.373759Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jyhtd0vd7rrc8czk0khcakt3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzM2NzUwM2EtZDY0ZGM0YjUtODI0Y2FjYTQtNDc2MTk1N2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.455972Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jyhtd0xx4kddx4sa3v65cd2t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWNhMGY3YjYtNDhmZTg4ZWQtZmIxZWU4NDAtYmFjZjE3NTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.537311Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jyhtd10gd16dc2c9dxt73mng, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzcxNDJiYzEtZTFkMzYwODktMjNjZTkyY2QtNGVmZWI4ZjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.652639Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jyhtd131ekejy822wz8m447y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTBlNDU1Yy1hYmJmNGI0ZS04ODcyODFhYy0zMjRkMDU1OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.734794Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jyhtd16k2zemfpph40swqdzb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NThhMzZhNWMtZDc4ZWRkOWEtODBiNTgxYmItNDhiNzBhMTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.815254Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jyhtd1966abmzktzbyzw5p1k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjY1ZDExNzYtMWIwOGY2ODItZDlkYjBkMmEtNGE5OWM3OWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.893590Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jyhtd1bpa95mmm8ksqrjz276, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODNhZGMyNTgtZjYwZjY2YzQtOWUwYjQ1YzItZWU5NmU5YmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.971085Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jyhtd1e45gmzs1qg8zgkbfjb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWI4MmNlYzAtZDY5YzUyMGQtZDAzNzhkOWMtN2UyNjljNmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:49.051597Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jyhtd1gj48qad9arwt6c7608, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjUwYTU2NjMtMmI2ZmU1MDQtZDQxNTI3ZGEtNTQ5NGIzNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:49.240972Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jyhtd1kgab85kc4hnkgen619, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGEzNGRlMDItYmQyNzU3MTAtNTdlMWNhYTctZGE0MzY2Mzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::SingleChannel [GOOD] Test command err: 2025-06-24T20:34:38.437071Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:38.437496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:38.437734Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045be/r3tmp/tmpvvfihl/pdisk_1.dat 2025-06-24T20:34:38.844618Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:34:38.853126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:38.930249Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:38.946924Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797275153628 != 1750797275153632 2025-06-24T20:34:39.005628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:39.005786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:39.020743Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:39.137822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:39.634741Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:697:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:39.634876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:39.634965Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:39.640700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:39.695017Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:39.859423Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:711:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:34:39.938841Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:781:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:40.394416Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhtcrcg9p831hkq2cj6jwt7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmY3NzRiNzktMzI5NGRlMDAtMjA2YTJhZmMtZjZjNDcwZWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.507748Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhtcs4t31bfawewxgk1xan1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzIyM2I3YmMtNmJmMmMzNC0xYjNjOGI2YS0xZmFmMmM3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.612567Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhtcs83fkedbz8es9er4t6w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjZiM2ZlZGYtNDZhNDc5NC1lMzM1ZTI1ZS1iMDkwNzMyYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.750222Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhtcsbbb5sq1nb9ytjz57vx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2M0ZGZmOGMtMjc4MDNkYjgtZjBmMjhiMDItYTY2NDljNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.848245Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhtcsfn7ds4z96v2m4wgabx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGQ3N2U4YTUtMTQ4ODQ3ZC0xZjcyY2YwOS1kYzcyYzBkOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:40.967749Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhtcsk46kgqj5ddwfq7f4cb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWU1OTg0ZTEtNTAzYzg4NDctNDc2YjBiMDgtMWIxY2UzNDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.048789Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhtcspe337ncvtpwf7531f8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTFjNjg5NGQtNjBlMmY1YTUtMmRhYTUzNjUtYWNhZWYwMjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.143089Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhtcsrz6st0wgazdcpat9ap, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODM0ZmM3OTItOTAzYmE4MWItZmU0YTc1M2YtZDc4Mjg1MjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.229356Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyhtcsw4684tc4633xt9j8y7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmI4ZmI2NGEtNDJkOTg2MTgtNzA1N2YwZjUtYTNmMTZmOTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.315345Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jyhtcsym263ct7r4tnq6xwrr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjM2MTQ4MDctYjkzZjk1ZjMtYjJhNjViMTItZjdjODAwNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.479757Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyhtct1aa6zz5c46pctf8ghn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzNhZDEwMWMtYmY1ZjhhZmMtMjJiNzM0NmEtZTE5NzI4NGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.699704Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyhtct6ecba9fzhsxm3fd858, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzgyMzM5ZGMtOGNiOGU5OWQtYjU0NWIzM2UtZDRlZjVkY2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.810367Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jyhtctdhb5rz7db3x09s9b42, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmNjNTM1NzktOTM0NzY2NDEtM2UzMjkxNTEtMWMzYTc3YzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.882649Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jyhtctgsasr1k3xhb5sp6ww8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDJkNDM0YTYtNzY3YmJlNjAtNmNhOWM1MDYtM2Y0M2MwNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:41.960189Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jyhtctk3aafkft4x7hamjtz9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWEzZDc0MWItZGIzY2I4MGQtYTYwMTg0OGItZGI1OGNjNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:42.034761Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyhtctng7djjn16x5ds449dy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTk5Y2QzNS00OGUyMDExNC0yNmZmMzg1Ni1iYWViY2ZhMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:42.108120Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jyhtctqs0tfyzchvdcrcer2w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmRiM2MwN2UtY2Q0YmY4N2QtNjgxYjg5M2ItZjMyODIwYjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:42.179255Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jyhtctt22cd6x8p3ctvcgqk3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTQxODdlNzItYjkyMTU2MTMtZGE3ZjZlNzItZWQyMjhkN2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:42.252448Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jyhtctw9ffs01e35pnsaab23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmZlYTUyMjEtZDU1Y2RiZWUtOTcyNTM3ODktMzljMzA0MmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:42.328035Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jyhtctyj2k1tfpgkyetxzs24, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjgyMTdjZWEtZjkxOTEwOGItMjA5ZjllNDktYTM2YTM0Mjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:42.411560Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jyhtcv0zekar6y3cbnk7a7wa, Database: , DatabaseId: /Root, Sessio ... p:120: TxId: 281474976715727. Ctx: { TraceId: 01jyhtcyntb2987s4hwdn7qrbn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzZlZDU3OTAtM2RiMTA4YzktMmJhMzVkYzctODM3MjZjODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.257342Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715728. Ctx: { TraceId: 01jyhtcyr78zfd7xa267tr8en8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTc3NGFmYTUtNjU1OGM1ZS01M2FkYjk4YS0yNjE3NmMyMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.388785Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jyhtcyvy4pdvphefarw4v9fm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmUwNzQxNjYtMzk4ZTkzNWYtYTAyZGQwYTYtMzI4OTk0Njg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.526382Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jyhtcyzy0d819c1xkpz6xcsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mjk2YWFjMTItN2ZkZDA3MmYtYTdhYTRlYjItOWIyZjY1MTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.600491Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jyhtcz46c5r7cdrmrm9mwkat, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGQ2NzY5ZjAtNDE0YjgzYTAtYTBiNGU0NTMtYWFhODVhM2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.692448Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jyhtcz6f9h6dv332s858k608, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjA1ZDEwZTItZDM1NzhjZDgtYTI2NzVmNzItNmRiOWE2NmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.794744Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jyhtcz9g5xszdk3e96xg8yjx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTg0MmI5MzItZDk4ZjBjYmMtYzI1Y2FmYWMtYjFiZmZkNjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:46.950482Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jyhtczczayq01gb5cdtvv15h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWU2NWFkMjItY2Q4MGZiNC1iODZkYTVhOC1jMTE4OGU0NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.033950Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jyhtczhd90etkwnve0jt3x8k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjBhNjEyYzctNmVlZWIyMjktYzI1Y2MxMWYtNmQ1YTUxMTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.139577Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jyhtczm38gr5v9h0d0c5weje, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Q2NmVkYjMtYzQ5ZjJlNzUtNWEzMjBlNTgtN2EyZWMyNDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.215571Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jyhtczqa7ehpkha2t6cy2tw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzU0MDcyZS1mZWRmMTJiNS00ZWVmZjI5My1hNDdkMjMxOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.289068Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jyhtczsp62qbggmv3en285aj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjNGI5ZWEtMjdkMmY5MGYtYTM3MDcxNDAtNWQ0ODk3NTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.396718Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jyhtczw05sgqyes0h3adq9dj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGM4OTU5ZDUtNThiZjIxYzctMmIwNDZjNzYtMTljNTljNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.516630Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jyhtczzb2snq5bwykjf9nbch, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTlmYjQ1OGItYjg3MjE4NzgtNGMwOTc2OWItNGEwZDYyZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.646288Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jyhtd03d19dr2wrdsz6fbwhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTg0NTU2YWMtNWM2ZGUyNmUtZDM3MzM0M2YtZTYzMWNiY2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.754854Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jyhtd0791g56kzqrtcr3ec3h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTdkODZhYTItZWMxODMwMGUtMjc0YTk3MDMtOGM1ZjYyYjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.858490Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jyhtd0an5cjzeyx7pw3ah62y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDVlNWE2MmYtYmMzNTVhODYtMTNkODBkZmItNTQ0ODM5Yjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:47.931502Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jyhtd0dt4v5721f57c7qstd5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhmMGFiZDktNDJiNjI2NzUtZDM1N2ZlMzQtN2VjYWVkMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.006875Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jyhtd0g33hnr4vaskkqxryk8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjIzYzI0ODktZjVjMzIxM2ItZmQ4MDVmNjItYWUxODY2NDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.081897Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jyhtd0je429qay6kap4szpxq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzUwZTgyY2UtYTI2NTdkNmItZTlhNWY4ZTctODQ5YTk3YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.213510Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jyhtd0ms0002neze2e3crn10, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDhhMTQxYmQtOGQ3OGY3NDUtZTlhMWJhZDAtZTNlMzQ5OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.286981Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jyhtd0rx7enafty6bmdfb70g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2FkNDk0YmEtNjgxNDc2Y2MtMWQ1NTA1OGEtY2UyZGZiZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.358606Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jyhtd0v60q70t4ftjndaxnzs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQ4ZWZkZDgtNDJkNTlkNTgtNWExMGQ3NDctMWQ2MDAzYTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.430973Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jyhtd0xdeh5ew9vn6y2x72bc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmVjMmZkZGItZjdkZGIyYTktM2ZiNjhlM2MtYWEwNTQzZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.502838Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jyhtd0zpe92jrarfcbmyymm5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2M2MjVmZDItMTg5OTc0Mi1hOWFiZjE4ZC1kNWY0YjBkMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.576997Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jyhtd121a5qenhckr493fd03, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWFiMWFiNzgtZWRjMThmMjUtNThlNzAyOTktZDk0MTE2Mjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.652162Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jyhtd14857mtdf5b4pzajhaz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTU2NWFjZDgtODhlYmEyNTUtNjJlZTQwMWUtM2E2NDZkZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.722752Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jyhtd16k8ts36r4kc8j3534v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTU1YTk5ZTAtMjMwNTExYTEtODYzN2ZjODgtNTllZWEyNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.793632Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jyhtd18s5ym3m7z4m2jka82e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWQyNDc3NWYtNGNiOGE5N2YtODU5OTgwNzgtZmM1ODQyODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.893421Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jyhtd1b1b6tsgrhdjr9b1sk2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjBjNWY2ZDktN2VhMWU1MDMtMTNhODc3OTUtNGZmMzNjZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:48.962618Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jyhtd1e4cq213n19m8xtepk5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM4ZmMxOTUtZDVjODJlM2MtYmJmNGQ1MjAtZWNhYjE0NTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:49.032933Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jyhtd1gad7vffkmh1y93n2ch, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDc5MGJiMS1mMTc2MjI5Ny0yYjQ5ZTRiZC0yMzcwYTc5OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:49.102728Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jyhtd1jgfa1mzrdjqvhkjwp8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODcyMzg3MmYtN2NhYjg2ZjItYWM1NTNkYjYtZWQ0ZDVjM2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:49.309381Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jyhtd1nhedw10gd7k0jmn5b7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Q4Y2VjYmMtZDEwNTc5NDAtYjM2Mzg1MDEtYjdmYzQxNDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> DataShardVolatile::DistributedWriteThenReadIteratorStream [GOOD] >> DataShardVolatile::DistributedWriteThenScanQuery >> KqpScanLogs::WideCombine+EnabledLogs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SelfJoinQueryService [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/2btm/00378c/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk6 Trying to start YDB, gRPC: 15840, MsgBus: 62336 2025-06-24T20:34:41.923686Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617066484126665:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:41.923739Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00378c/r3tmp/tmpINJGZS/pdisk_1.dat 2025-06-24T20:34:42.490492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:42.490604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:42.495096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:42.528055Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:42.528804Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617066484126639:2079] 1750797281915920 != 1750797281915923 TServer::EnableGrpc on GrpcPort 15840, node 1 2025-06-24T20:34:42.617174Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:42.617194Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:42.617211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:42.617344Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62336 2025-06-24T20:34:42.957434Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62336 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:43.522050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:43.564268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:43.579970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:43.761208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:43.951046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:44.041236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:46.211551Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617087958964773:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:46.211671Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:46.608742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:46.658744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:46.696511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:46.736798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:46.791027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:46.875429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:46.927098Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617066484126665:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:46.927298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:46.975364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:47.091373Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617092253932740:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.091444Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.091620Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617092253932745:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.095414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:47.111011Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617092253932747:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:34:47.211014Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617092253932800:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (DataType 'String)) (let $5 (OptionalType $4)) (let $6 (StructType '('"Key" $3) '('"Value" $5))) (let $7 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($18) (block '( (let $19 (lambda '($20) (block '( (let $21 (VariantType (TupleType $6 $6))) (let $22 (Variant $20 '0 $21)) (let $23 (Variant $20 '1 $21)) (return $22 $23) )))) (return (FromFlow (MultiMap (ToFlow $18) $19))) ))) '('('"_logical_id" '689) '('"_id" '"fc0bfba4-489a25fd-a544bbb2-73089b47")))) (let $8 (DqCnMap (TDqOutput $7 '0))) (let $9 (DqCnBroadcast (TDqOutput $7 '1))) (let $10 (StructType '('"t1.Key" $3) '('"t1.Value" $5) '('"t2.Key" $3) '('"t2.Value" $5))) (let $11 '('('"_logical_id" '608) '('"_id" '"e2a01aa-c4e516b6-b7fbbfe4-55e4e447") '('"_wide_channels" $10))) (let $12 (DqPhyStage '($8 $9) (lambda '($24 $25) (block '( (let $26 '('Many 'Hashed 'Compact)) (let $27 (SqueezeToDict (FlatMap (ToFlow $25) (lambda '($30) (block '( (let $31 (Member $30 '"Value")) (let $32 (Nothing (OptionalType (TupleType $4 $6)))) (let $33 (IfPresent $31 (lambda '($34) (Just '($34 $30))) $32)) (return (If (Exists $31) $33 $32)) )))) (lambda '($35) (Nth $35 '0)) (lambda '($36) (Nth $36 '1)) $26)) (let $28 (Sort (FlatMap $27 (lambda '($37) (block '( (let $38 '('"Value")) (let $39 '('"Key" '"t1.Key" '"Value" '"t1.Value")) (let $40 '('"Key" '"t2.Key" '"Value" '"t2.Value")) (return (MapJoinCore (OrderedFilter (ToFlow $24) (lambda '($41) (Exists (Member $41 '"Value")))) $37 'Inner $38 $38 $39 $40 '('"t1.Value") '('"t2.Value"))) )))) (Bool 'true) (lambda '($42) (Member $42 '"t1.Key")))) (let $29 (lambda '($43) (Member $43 '"t1.Key") (Member $43 '"t1.Value") (Member $43 '"t2.Key") (Member $43 '"t2.Value"))) (return (FromFlow (ExpandMap $28 $29))) ))) $11)) (let $13 (DqCnMerge (TDqOutput $12 '0) '('('0 '"Asc")))) (let $14 (DqPhyStage '($13) (lambda '($44) (FromFlow (NarrowMap (ToFlow $44) (lambda '($45 $46 $47 $48) (AsStruct '('"t1.Key" $45) '('"t1.Value" $46) '('"t2.Key" $47) '('"t2.Value" $48)))))) '('('"_logical_id" '620) '('"_id" '"5975f70d-438e217c-428596c0-f059f395")))) (let $15 '($7 $12 $14)) (let $16 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $17 (DqCnResult (TDqOutput $14 '0) $16)) (return (KqpPhysicalQuery '((KqpPhysicalTx $15 '($17) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $10) '0 '0)) '('('"type" '"query")))) ) >> KqpPg::TableInsert-useSink [GOOD] >> KqpPg::TempTablesSessionsIsolation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::ResolvedTimestampForDisplacedUpsert [GOOD] Test command err: 2025-06-24T20:30:31.943011Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519615992329994556:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:31.944339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00474b/r3tmp/tmpHfBVFJ/pdisk_1.dat 2025-06-24T20:30:32.549503Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:32.566699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:32.566835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:32.570364Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23080, node 1 2025-06-24T20:30:32.803619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:32.803647Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:32.803654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:32.816493Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:30:32.919665Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:30:32.944895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:32.976490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:33.018344Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7519616000919929571:2268] 2025-06-24T20:30:33.018613Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:30:33.031948Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:30:33.032016Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:30:33.039589Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:30:33.039659Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:30:33.039702Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:30:33.040146Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:30:33.040215Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:30:33.040251Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7519616000919929585:2268] in generation 1 2025-06-24T20:30:33.047857Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:30:33.156404Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:30:33.156572Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:30:33.156618Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7519616000919929589:2269] 2025-06-24T20:30:33.156629Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:30:33.156662Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:30:33.156675Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:33.156839Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:30:33.156913Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:30:33.156952Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519615996624962271:2302], serverId# [1:7519616000919929587:2311], sessionId# [0:0:0] 2025-06-24T20:30:33.157022Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:33.157041Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:30:33.157060Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:30:33.157084Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:33.157733Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:30:33.158003Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976710657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:30:33.158105Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976710657 at tablet 72075186224037888 2025-06-24T20:30:33.159953Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:30:33.161228Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:30:33.161340Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T20:30:33.164567Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519616000919929603:2320], serverId# [1:7519616000919929604:2321], sessionId# [0:0:0] 2025-06-24T20:30:33.174241Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976710657 at step 1750797033209 at tablet 72075186224037888 { Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750797033209 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T20:30:33.174286Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:33.174474Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:30:33.174556Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:33.174577Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:30:33.174602Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1750797033209:281474976710657] in PlanQueue unit at 72075186224037888 2025-06-24T20:30:33.174896Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1750797033209:281474976710657 keys extracted: 0 2025-06-24T20:30:33.175040Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:30:33.175769Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:30:33.175813Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T20:30:33.178206Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T20:30:33.179834Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:30:33.181004Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750797033209} 2025-06-24T20:30:33.181061Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:33.181104Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1750797033208 2025-06-24T20:30:33.181116Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:33.184130Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:30:33.184160Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:30:33.184192Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T20:30:33.184242Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750797033209 : 281474976710657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7519615996624962015:2159], exec latency: 4 ms, propose latency: 9 ms 2025-06-24T20:30:33.184275Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976710657 state Ready TxInFly 0 2025-06-24T20:30:33.184321Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:30:33.184421Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1750797033216 2025-06-24T20:30:33.186577Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7519616000919929589:2269][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-24T20:30:33.192913Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710657 datashard 72075186224037888 state Ready 2025-06-24T20:30:33.192967Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedRe ... GE DEBUG: change_sender_cdc_stream.cpp:628: [CdcChangeSenderMain][72075186224037888:1][29:804:2647] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 }] } 2025-06-24T20:34:49.928289Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037888 2025-06-24T20:34:49.928891Z node 29 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 1 change records: to# [29:804:2647], at tablet# 72075186224037888 2025-06-24T20:34:49.929024Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 1, forgotten# 0, left# 0, at tablet# 72075186224037888 2025-06-24T20:34:49.929378Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:633: [CdcChangeSenderMain][72075186224037888:1][29:804:2647] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 6 Group: 0 Step: 9000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-24T20:34:49.929832Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:111: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][29:889:2647] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 6 Group: 0 Step: 9000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-24T20:34:49.930691Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-24T20:34:49.930856Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-24T20:34:49.931112Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 10 requestId: cookie: 6 2025-06-24T20:34:49.931432Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-24T20:34:49.931485Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-24T20:34:49.931619Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72075186224037889] got client message topic: Table/Stream/streamImpl partition: 0 SourceId: '\00072075186224037888' SeqNo: 6 partNo : 0 messageNo: 11 size 26 offset: -1 2025-06-24T20:34:49.931947Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1293: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 process heartbeat sourceId '\00072075186224037888' version v9000/0 2025-06-24T20:34:49.932150Z node 29 :PERSQUEUE INFO: partition_write.cpp:1797: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 emit heartbeat v9000/0 2025-06-24T20:34:49.932470Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob processing sourceId '\00072075186224037889' seqNo 0 partNo 0 2025-06-24T20:34:50.043108Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob complete sourceId '\00072075186224037889' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 5 PartNo 0 PackedSize 107 count 1 nextOffset 6 batches 1 2025-06-24T20:34:50.046705Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Add new write blob: topic 'Table/Stream/streamImpl' partition 0 compactOffset 5,1 HeadOffset 5 endOffset 5 curOffset 6 d0000000000_00000000000000000005_00000_0000000001_00000? size 93 WTime 8979 2025-06-24T20:34:50.047397Z node 29 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:34:50.047729Z node 29 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 5 partNo 0 count 1 size 93 2025-06-24T20:34:50.049489Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 5 count 1 size 93 actorID [29:753:2617] 2025-06-24T20:34:50.049945Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 5 partno 0 count 1 parts 0 suffix '63' size 93 2025-06-24T20:34:50.063759Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 44 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:34:50.064062Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:34:50.064311Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream/streamImpl', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-06-24T20:34:50.064993Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037889, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=763, count=6, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:34:50.065215Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 11 requestId: cookie: 6 2025-06-24T20:34:50.065712Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][29:889:2647] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 6 Offset: 5 WriteTimestampMS: 8979 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 6 } } } 2025-06-24T20:34:50.065962Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][29:804:2647] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-24T20:34:50.066338Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-06-24T20:34:50.066466Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 6, at tablet: 72075186224037888 2025-06-24T20:34:50.067423Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037888 ... checking the update is logged before the new resolved timestamp >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-06-24T20:34:50.195659Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-24T20:34:50.195882Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-24T20:34:50.196283Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 10 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 6 max time lag 0ms effective offset 0 2025-06-24T20:34:50.198881Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 10 added 6 blobs, size 763 count 6 last offset 5, current partition end offset: 6 2025-06-24T20:34:50.199062Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 10. Send blob request. 2025-06-24T20:34:50.200878Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 93 accessed 6 times before, last time 1970-01-01T00:00:06.000000Z 2025-06-24T20:34:50.200995Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 1 partno 0 count 1 parts_count 0 source 1 size 174 accessed 3 times before, last time 1970-01-01T00:00:06.000000Z 2025-06-24T20:34:50.201045Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 2 partno 0 count 1 parts_count 0 source 1 size 93 accessed 1 times before, last time 1970-01-01T00:00:06.000000Z 2025-06-24T20:34:50.201093Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 3 partno 0 count 1 parts_count 0 source 1 size 155 accessed 0 times before, last time 1970-01-01T00:00:08.000000Z 2025-06-24T20:34:50.201141Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 4 partno 0 count 1 parts_count 0 source 1 size 155 accessed 0 times before, last time 1970-01-01T00:00:08.000000Z 2025-06-24T20:34:50.201188Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 5 partno 0 count 1 parts_count 0 source 1 size 93 accessed 0 times before, last time 1970-01-01T00:00:08.000000Z 2025-06-24T20:34:50.201309Z node 29 :PERSQUEUE DEBUG: read.h:121: Reading cookie 10. All 6 blobs are from cache. 2025-06-24T20:34:50.201536Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 6 blobs 2025-06-24T20:34:50.202223Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 75 from pos 0 cbcount 1 2025-06-24T20:34:50.202482Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 1 totakecount 1 count 1 size 154 from pos 0 cbcount 1 2025-06-24T20:34:50.202591Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 2 totakecount 1 count 1 size 75 from pos 0 cbcount 1 2025-06-24T20:34:50.202694Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 1 size 135 from pos 0 cbcount 1 2025-06-24T20:34:50.202796Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 4 totakecount 1 count 1 size 135 from pos 0 cbcount 1 2025-06-24T20:34:50.202888Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 5 totakecount 1 count 1 size 75 from pos 0 cbcount 1 2025-06-24T20:34:50.203291Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:34:50.203419Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 1 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:34:50.203476Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:34:50.203537Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 3 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:34:50.207650Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 4 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:34:50.207773Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 5 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:34:50.208097Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >> KqpScanSpilling::SpillingPragmaParseError >> TConsoleConfigTests::TestModifyConfigItem |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> TConsoleTests::TestRestartConsoleAndPools [GOOD] >> TConsoleTests::TestRemoveTenantWithBorrowedStorageUnits >> KqpScanSpilling::SpillingInRuntimeNodes-EnabledSpilling >> TxUsage::WriteToTopic_Invalid_Session_Table [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan-UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation+UseSink >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnVDiskSpaceStatus [GOOD] >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnUsage |89.4%| [TA] $(B)/ydb/services/dynamic_config/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> SystemView::ShowCreateTablePartitionAtKeys [GOOD] >> SystemView::ShowCreateTablePartitionByHash >> TxUsage::WriteToTopic_Invalid_Session_Query >> TModificationsValidatorTests::TestIsValidationRequired_NONE [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_TENANTS [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_TENANTS_AND_NODE_TYPES [GOOD] >> TNetClassifierUpdaterTest::TestGetUpdatesFromHttpServer >> KqpRe2::IncorrectRegexNoError [GOOD] >> KqpRe2::IncorrectRegexWithoutExecutionNoError >> TopicService::OneConsumer_TheRangesDoNotOverlap >> TConsoleConfigTests::TestModifyConfigItem [GOOD] >> TConsoleConfigTests::TestRemoveConfigItem >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table [GOOD] >> TxUsage::TwoSessionOneConsumer_Table [GOOD] >> TConsoleTests::TestSetDefaultComputationalUnitsQuota [GOOD] >> TConsoleTests::TestTenantConfigConsistency >> TxUsage::TwoSessionOneConsumer_Query >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query >> KqpScanArrowInChanels::AllTypesColumns >> TConsoleConfigTests::TestRemoveConfigItem [GOOD] >> TConsoleConfigTests::TestRemoveConfigItems >> TYardTest::TestLotsOfTinyAsyncLogLatency [GOOD] >> TYardTest::TestHugeChunkAndLotsOfTinyAsyncLogOrder >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table [GOOD] >> BasicUsage::ReadWithRestarts [GOOD] >> BasicUsage::ConflictingWrites >> TConsoleConfigTests::TestRemoveConfigItems [GOOD] >> TConsoleConfigTests::TestConfigureOrderConflicts >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped [GOOD] >> TConsoleConfigTests::TestConfigureOrderConflicts [GOOD] >> TConsoleConfigTests::TestGetItems >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped [GOOD] Test command err: 2025-06-24T20:32:26.456915Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616488137857809:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.456973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:32:26.517016Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616487803381394:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.517062Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:32:26.572331Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519616485378030992:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:32:26.572495Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047b8/r3tmp/tmpgdcJU8/pdisk_1.dat 2025-06-24T20:32:27.455389Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:27.519266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:32:27.523305Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:32:27.556766Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:27.567910Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:32:27.581524Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:27.585022Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:32:27.601267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:27.601406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:27.606225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:27.606321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:27.606502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:27.606541Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:27.622271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:27.630541Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T20:32:27.630584Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:32:27.631671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:27.633697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63038 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:32:28.009008Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519616488137857824:2144] Handle TEvNavigate describe path dc-1 2025-06-24T20:32:28.053741Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519616496727792883:2462] HANDLE EvNavigateScheme dc-1 2025-06-24T20:32:28.053901Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519616488137857847:2157], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:32:28.057215Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519616492432825424:2325][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519616488137857847:2157], cookie# 1 2025-06-24T20:32:28.059195Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492432825439:2325][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492432825436:2325], cookie# 1 2025-06-24T20:32:28.059258Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492432825440:2325][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492432825437:2325], cookie# 1 2025-06-24T20:32:28.059304Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519616492432825441:2325][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492432825438:2325], cookie# 1 2025-06-24T20:32:28.059352Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488137857498:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492432825439:2325], cookie# 1 2025-06-24T20:32:28.059377Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488137857501:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492432825440:2325], cookie# 1 2025-06-24T20:32:28.059394Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519616488137857504:2059] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519616492432825441:2325], cookie# 1 2025-06-24T20:32:28.059425Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492432825439:2325][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488137857498:2053], cookie# 1 2025-06-24T20:32:28.059475Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492432825440:2325][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488137857501:2056], cookie# 1 2025-06-24T20:32:28.059491Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519616492432825441:2325][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616488137857504:2059], cookie# 1 2025-06-24T20:32:28.059524Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492432825424:2325][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492432825436:2325], cookie# 1 2025-06-24T20:32:28.059549Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616492432825424:2325][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T20:32:28.059583Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492432825424:2325][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492432825437:2325], cookie# 1 2025-06-24T20:32:28.059597Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519616492432825424:2325][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T20:32:28.059612Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519616492432825424:2325][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519616492432825438:2325], cookie# 1 2025-06-24T20:32:28.059635Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519616492432825424:2325][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T20:32:28.059710Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519616488137857847:2157], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T20:32:28.067254Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519616488137857847:2157], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519616492432825424:2325] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T20:32:28.067420Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519616488137857847:2157], cacheItem# { Subscriber: { Subscriber: [1:7519616492432825424:2325] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T20:32:28.070728Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519616496727792884:2463], recipient# [1:7519616496727792883:2462], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:32:28.070845Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519616496727792883:2462] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:32:28.114845Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519616496727792883:2462] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T20:32:28.119009Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:75196164967277 ... teStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:34:57.086571Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519617135394026226:3671], recipient# [7:7519617135394026225:2674], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:57.479718Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7519616549782526809:2108], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:57.479899Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7519616549782526809:2108], cacheItem# { Subscriber: { Subscriber: [8:7519616554077494509:2354] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:34:57.479974Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7519616549782526809:2108], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:57.480067Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7519616549782526809:2108], cacheItem# { Subscriber: { Subscriber: [8:7519616571257363721:2359] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:34:57.480161Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7519617133898080317:2755], recipient# [8:7519617133898080315:2690], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:57.480227Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7519617133898080318:2756], recipient# [8:7519617133898080316:2691], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:57.737878Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7519616549782526809:2108], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:57.738086Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7519616549782526809:2108], cacheItem# { Subscriber: { Subscriber: [8:7519616554077494509:2354] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:34:57.738227Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7519617133898080320:2757], recipient# [8:7519617133898080319:2692], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:57.891357Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519616542688536398:2151], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:57.891542Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519616542688536398:2151], cacheItem# { Subscriber: { Subscriber: [7:7519616564163373722:2767] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:34:57.891676Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519617135394026234:3672], recipient# [7:7519617135394026233:2675], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:58.023436Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519616542688536398:2151], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:58.023625Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519616542688536398:2151], cacheItem# { Subscriber: { Subscriber: [7:7519616546983504235:2559] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:34:58.023740Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519617139688993535:3676], recipient# [7:7519617139688993534:2676], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:58.085939Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519616542688536398:2151], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:34:58.086183Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519616542688536398:2151], cacheItem# { Subscriber: { Subscriber: [7:7519616546983504235:2559] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T20:34:58.086332Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519617139688993537:3677], recipient# [7:7519617139688993536:2677], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TConsoleTests::TestTenantConfigConsistency [GOOD] >> TConsoleTests::TestSetConfig >> TYardTest::TestHugeChunkAndLotsOfTinyAsyncLogOrder [GOOD] >> TYardTest::TestLogLatency >> DataShardVolatile::DistributedWriteThenScanQuery [GOOD] >> DataShardVolatile::DistributedWriteWithAsyncIndex >> KqpScanSpilling::SelfJoin [GOOD] >> KqpPg::TempTablesSessionsIsolation [FAIL] >> KqpPg::TempTablesDrop >> TConsoleConfigTests::TestGetItems [GOOD] >> TConsoleConfigTests::TestGetNodeItems >> TConsoleTests::TestRemoveTenantWithBorrowedStorageUnits [GOOD] >> TConsoleTests::TestListTenants |89.4%| [TA] $(B)/ydb/core/tx/datashard/ut_change_exchange/test-results/unittest/{meta.json ... results_accumulator.log} >> TYardTest::TestLogLatency [GOOD] >> TYardTest::TestMultiYardFirstRecordToKeep >> KqpScanSpilling::SpillingPragmaParseError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SelfJoin [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/2btm/00375d/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk5 Trying to start YDB, gRPC: 25424, MsgBus: 9664 2025-06-24T20:34:51.437216Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617107453458235:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:51.437596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00375d/r3tmp/tmpp4Dtf8/pdisk_1.dat 2025-06-24T20:34:52.435007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:52.435105Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:52.442337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:52.452724Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:52.455315Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617107453458032:2079] 1750797291389557 != 1750797291389560 2025-06-24T20:34:52.455537Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 25424, node 1 2025-06-24T20:34:52.719512Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:52.719535Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:52.719547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:52.719661Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9664 TClient is connected to server localhost:9664 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:53.794639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:53.840808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:53.854998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:54.051625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:54.271597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:54.366291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:56.463908Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617107453458235:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:56.464051Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:56.510835Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617128928296148:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:56.510967Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:57.217420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:57.284306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:57.341987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:57.408923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:57.448805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:57.503797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:57.613023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:57.681893Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617133223264112:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:57.681977Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:57.682238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617133223264117:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:57.685864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:57.703073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T20:34:57.703271Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617133223264119:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:34:57.799290Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617133223264170:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:59.066455 ... }. CA StateFunc 271646922 2025-06-24T20:35:00.562991Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519617146108166571:2532], TxId: 281474976710683, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:35:00.563340Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.563430Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.563609Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166571:2532], TxId: 281474976710683, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646927 2025-06-24T20:35:00.563622Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166571:2532], TxId: 281474976710683, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.563656Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519617146108166571:2532], TxId: 281474976710683, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:35:00.563664Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646923 2025-06-24T20:35:00.563682Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976710683, task: 3. Finish input channelId: 3, from: [1:7519617146108166571:2532] 2025-06-24T20:35:00.563699Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.563716Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166571:2532], TxId: 281474976710683, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646927 2025-06-24T20:35:00.563727Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166571:2532], TxId: 281474976710683, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.563771Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710683, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [10] 2025-06-24T20:35:00.563783Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710683, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 2, seqNo: [10] 2025-06-24T20:35:00.563792Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710683, task: 2. Tasks execution finished 2025-06-24T20:35:00.563803Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519617146108166571:2532], TxId: 281474976710683, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T20:35:00.563878Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710683, task: 2. pass away 2025-06-24T20:35:00.563961Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710683;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:35:00.563982Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.564046Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.564556Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.564621Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.564648Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:35:00.565021Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.565035Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:35:00.565048Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.565081Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T20:35:00.565387Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T20:35:00.565424Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710683, task: 3. Tasks execution finished, don't wait for ack delivery in input channelId: 3, seqNo: [11] 2025-06-24T20:35:00.565433Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710683, task: 3. Tasks execution finished 2025-06-24T20:35:00.565444Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519617146108166572:2533], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODI0OWViMDAtN2M3ZDg5YTItMmYwNmZjOTctYmViYmEzYjE=. TraceId : 01jyhtdc6fbme885kb9gm14dyk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T20:35:00.565516Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710683, task: 3. pass away 2025-06-24T20:35:00.565586Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710683;task_id=3;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:35:00.566570Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797300420, txId: 281474976710682] shutting down >> TConsoleConfigTests::TestGetNodeItems [GOOD] >> TConsoleConfigTests::TestGetNodeConfig >> TYardTest::TestMultiYardFirstRecordToKeep [GOOD] >> TYardTest::TestLogOverwriteRestarts >> KqpScanArrowFormat::AggregateCountStar >> TxUsage::WriteToTopic_Demo_2_Table [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation+UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation-UseSink >> TxUsage::WriteToTopic_Demo_1_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SpillingPragmaParseError [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/2btm/003720/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk9 Trying to start YDB, gRPC: 1258, MsgBus: 11058 2025-06-24T20:34:54.495767Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617123828225878:2214];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003720/r3tmp/tmpBhVMBB/pdisk_1.dat 2025-06-24T20:34:54.788739Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:55.019947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:55.020056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:55.024901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:55.056886Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:55.059326Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617123828225689:2079] 1750797294433537 != 1750797294433540 TServer::EnableGrpc on GrpcPort 1258, node 1 2025-06-24T20:34:55.317317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:55.317338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:55.317346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:55.331814Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:55.519021Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11058 TClient is connected to server localhost:11058 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:56.665288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:56.721768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:57.104222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:57.337719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:57.442690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:59.482618Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617123828225878:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:59.482692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:59.610622Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617145303063814:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:59.610741Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:59.987342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.061967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.134204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.182239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.220572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.310001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.366189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.479600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617149598031781:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.479708Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.479808Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617149598031786:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.485281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:00.494491Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617149598031788:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:35:00.580429Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617149598031843:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:01.804350Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617153892999416:2482], status: GENERIC_ERROR, issues:
: Error: Pre type annotation, code: 1020
:3:40: Error: Bad "EnableSpillingNodes" setting for "$all" cluster: (yexception) tools/enum_parser/enum_serialization_runtime/enum_runtime.cpp:70: Key 'GraceJoin1' not found in enum NYql::NDq::EEnabledSpillingNodes. Valid options are: 'None', 'GraceJoin', 'Aggregation', 'All'. 2025-06-24T20:35:01.804612Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjkyYmU3ODctNTc2NDZlNWEtM2Q3NWViZWMtMTkzNTM2OGI=, ActorId: [1:7519617153892999409:2478], ActorState: ExecuteState, TraceId: 01jyhtddxh5ck9qs4jc6m4cpzn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> TxUsage::WriteToTopic_Demo_3_Table [GOOD] >> TNetClassifierUpdaterTest::TestGetUpdatesFromHttpServer [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsAndTags >> TConsoleConfigTests::TestGetNodeConfig [GOOD] >> TConsoleConfigTests::TestAutoOrder >> KqpRe2::IncorrectRegexWithoutExecutionNoError [GOOD] >> KqpScanArrowInChanels::AllTypesColumns [GOOD] >> KqpScanArrowInChanels::SingleKey >> KqpScanArrowInChanels::AggregateNoColumn >> TConsoleConfigTests::TestAutoOrder [GOOD] >> TConsoleConfigTests::TestAutoSplit >> TxUsage::WriteToTopic_Demo_4_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpRe2::IncorrectRegexWithoutExecutionNoError [GOOD] Test command err: Trying to start YDB, gRPC: 26233, MsgBus: 10608 2025-06-24T20:34:48.575441Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617097480615267:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:48.634501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00376c/r3tmp/tmpDc02lA/pdisk_1.dat 2025-06-24T20:34:49.136407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:49.136528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:49.139127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:49.147303Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617097480615236:2079] 1750797288522425 != 1750797288522428 2025-06-24T20:34:49.158624Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26233, node 1 2025-06-24T20:34:49.243855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:49.243882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:49.243890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:49.244059Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10608 2025-06-24T20:34:49.632314Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10608 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:50.078869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:50.141052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:50.433713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:50.631172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:50.717619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:52.748297Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617114660486047:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:52.748462Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:53.337309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:53.372390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:53.424114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:53.501358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:53.544388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:53.570668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617097480615267:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:53.570760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:53.636021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:53.717482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:53.834979Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617118955454013:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:53.835069Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:53.838219Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617118955454018:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:53.844107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:53.871972Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617118955454020:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:34:53.928507Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617118955454071:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ( (let $1 (DataType 'Bool)) (let $2 '('('"_logical_id" '235) '('"_id" '"187be2d2-f3ba46ae-a3c19996-a4113b37") '('"_partition_mode" '"single"))) (let $3 (DqPhyStage '() (lambda '() (block '( (let $5 (String '"a[x")) (let $6 (OptionalType (StructType '('"CaseSensitive" $1) '('"DotNl" $1) '('"Literal" $1) '('"LogErrors" $1) '('"LongestMatch" $1) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $1) '('"NeverNl" $1) '('"OneLine" $1) '('"PerlClasses" $1) '('"PosixSyntax" $1) '('"Utf8" $1) '('"WordBoundary" $1)))) (le ... d_root/2btm/00376c/r3tmp/tmphBZMPy/pdisk_1.dat 2025-06-24T20:34:56.503339Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:56.507129Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:56.519951Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:56.520038Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:56.523787Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10858, node 2 2025-06-24T20:34:56.647875Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:56.647899Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:56.647908Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:56.648022Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19829 TClient is connected to server localhost:19829 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:57.217302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:57.242205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:57.322380Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:57.332004Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:57.490871Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:57.558855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:00.233689Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617146703559916:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.233799Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.292478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.375106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.414357Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.454668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.488231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.525915Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.561436Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:00.643699Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617146703560572:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.643813Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.644173Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617146703560577:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.648156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:00.658802Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519617146703560579:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:35:00.754725Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519617146703560630:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (DataType 'Uint64)) (let $4 (DataType 'String)) (let $5 (OptionalType $4)) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($10) (FromFlow (Filter (ToFlow $10) (lambda '($11) (block '( (let $12 (DataType 'Bool)) (let $13 (OptionalType (StructType '('"CaseSensitive" $12) '('"DotNl" $12) '('"Literal" $12) '('"LogErrors" $12) '('"LongestMatch" $12) '('"MaxMem" $3) '('"NeverCapture" $12) '('"NeverNl" $12) '('"OneLine" $12) '('"PerlClasses" $12) '('"PosixSyntax" $12) '('"Utf8" $12) '('"WordBoundary" $12)))) (let $14 (CallableType '() '($12) '($5))) (let $15 (Udf '"Re2.Grep" '((String '"[") (Nothing $13)) (VoidType) '"" $14 (TupleType $4 $13) '"" '())) (return (Or (Coalesce (== (Member $11 '"Key") (Int32 '1)) (Bool 'false)) (Apply $15 (Member $11 '"Value")))) )))))) '('('"_logical_id" '493) '('"_id" '"17ae3357-13b26d63-6b79aa80-b2f4efcb")))) (let $7 (DqCnUnionAll (TDqOutput $6 '"0"))) (let $8 (DqPhyStage '($7) (lambda '($16) $16) '('('"_logical_id" '573) '('"_id" '"ba19325e-ad0eed3f-f035f06c-76105477")))) (let $9 (DqCnResult (TDqOutput $8 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($6 $8) '($9) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"Key" (OptionalType $3)) '('"Value" $5))) '"0" '"0")) '('('"type" '"query")))) ) >> TxUsage::WriteToTopic_Demo_3_Query >> TxUsage::WriteToTopic_Demo_2_Query >> KqpScanArrowFormat::AllTypesColumns >> TxUsage::WriteToTopic_Demo_21_RestartNo_Table >> TxUsage::WriteToTopic_Demo_1_Query >> TConsoleTests::TestSetConfig [GOOD] >> TConsoleTests::TestTenantGeneration >> TxUsage::WriteToTopic_Demo_4_Query >> TConsoleConfigTests::TestAutoSplit [GOOD] >> TConsoleConfigTests::TestValidation >> TConsoleTests::TestListTenants [GOOD] >> TConsoleTests::TestListTenantsExtSubdomain |89.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |89.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |89.4%| [TA] {RESULT} $(B)/ydb/services/dynamic_config/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> THiveTest::TestHiveBalancerNodeRestarts [GOOD] >> TxUsage::WriteToTopic_Invalid_Session_Query [GOOD] >> TxUsage::TwoSessionOneConsumer_Query [GOOD] >> TConsoleConfigTests::TestValidation [GOOD] >> DataShardVolatile::DistributedWriteWithAsyncIndex [GOOD] >> TxUsage::WriteToTopic_Two_WriteSession_Table >> THiveTest::TestHiveBalancerDifferentResources >> KqpScanArrowFormat::SingleKey >> TxUsage::WriteToTopic_Demo_10_Table >> TConsoleConfigTests::TestCheckConfigUpdates |89.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query >> TFetchRequestTests::CheckAccess [GOOD] >> PQCountersSimple::PartitionWriteQuota >> SystemView::AuthUsers_LockUnlock [GOOD] >> TConsoleTests::TestTenantGeneration [GOOD] >> KqpWorkloadServiceTables::TestLeaseUpdates [GOOD] >> KqpPg::TempTablesDrop [GOOD] >> DataShardVolatile::DistributedWriteThenLateWriteReadCommit >> TConsoleTests::TestListTenantsExtSubdomain [GOOD] >> KqpScanArrowInChanels::SingleKey [GOOD] >> KqpScanArrowFormat::AggregateCountStar [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation-UseSink [GOOD] >> TopicService::OneConsumer_TheRangesDoNotOverlap [GOOD] >> TConsoleConfigTests::TestCheckConfigUpdates [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query [GOOD] >> SystemView::TopPartitionsByCpuRanges [GOOD] >> SystemView::AuthUsers_Access >> TConsoleTests::TestTenantGenerationExtSubdomain >> KqpScanArrowInChanels::JoinWithParams >> KqpPg::TempTablesWithCache >> DataShardVolatile::DistributedWriteEarlierSnapshotNotBlocked >> KqpScanArrowFormat::AggregateByColumn >> TConsoleConfigTests::TestManageValidators >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table >> TConsoleTests::TestModifyUsedZoneKind >> SystemView::TopPartitionsByTliFields >> TConsoleConfigTests::TestManageValidators [GOOD] >> TConsoleConfigTests::TestDryRun |89.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |89.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join >> TopicService::OneConsumer_TheRangesOverlap |89.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query >> TConsoleConfigTests::TestDryRun [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlWithoutFlag >> PQCountersSimple::PartitionWriteQuota [GOOD] >> PQCountersSimple::PartitionFirstClass >> KqpScanArrowFormat::AllTypesColumns [GOOD] >> KqpScanArrowFormat::AllTypesColumnsCellvec ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceTables::TestLeaseUpdates [GOOD] Test command err: 2025-06-24T20:33:11.235439Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616680061206219:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:11.236362Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004727/r3tmp/tmp2On1vx/pdisk_1.dat 2025-06-24T20:33:11.968247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:11.968405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:11.971381Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:12.028393Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:12.031694Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616680061206122:2079] 1750797191144469 != 1750797191144472 TServer::EnableGrpc on GrpcPort 30582, node 1 2025-06-24T20:33:12.235899Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:33:12.280842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:12.280876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:12.280902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:12.281024Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7291 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:12.879860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:15.491595Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:33:15.520304Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616697241075932:2290], Start check tables existence, number paths: 2 2025-06-24T20:33:15.525599Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-24T20:33:15.525639Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:15.525660Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:33:15.525786Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616697241075932:2290], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:33:15.525841Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616697241075932:2290], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:33:15.525892Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616697241075932:2290], Successfully finished 2025-06-24T20:33:15.525964Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:33:15.532036Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=N2Q0OGMwODAtNGU2NTE3MWUtN2ZmMjQ3YTMtN2VjMTgyMjQ=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id N2Q0OGMwODAtNGU2NTE3MWUtN2ZmMjQ3YTMtN2VjMTgyMjQ= 2025-06-24T20:33:15.532395Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=N2Q0OGMwODAtNGU2NTE3MWUtN2ZmMjQ3YTMtN2VjMTgyMjQ=, ActorId: [1:7519616697241075948:2291], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:15.566364Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616697241075950:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:15.570391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:15.575451Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616697241075950:2300], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-24T20:33:15.577826Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616697241075950:2300], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:33:15.601110Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616697241075950:2300], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:15.703280Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616697241075950:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:15.708605Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616697241076001:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:15.708751Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616697241075950:2300], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:33:15.709080Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: Root, PoolId: sample_pool_id 2025-06-24T20:33:15.709097Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id Root 2025-06-24T20:33:15.709168Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616697241076008:2292], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:33:15.710728Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616697241076008:2292], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-24T20:33:15.710801Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: Root 2025-06-24T20:33:15.710825Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-24T20:33:15.711123Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7519616697241076017:2293], DatabaseId: Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-24T20:33:15.712349Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7519616697241076017:2293], DatabaseId: Root, PoolId: sample_pool_id, Got watch notification 2025-06-24T20:33:15.725331Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-24T20:33:15.725360Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-24T20:33:15.725546Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=N2Q0OGMwODAtNGU2NTE3MWUtN2ZmMjQ3YTMtN2VjMTgyMjQ=, ActorId: [1:7519616697241075948:2291], ActorState: ReadyState, TraceId: 01jyhta6eca7ns5qba0hxxy4my, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: DROP RESOURCE POOL sample_pool_id; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-24T20:33:15.732060Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616697241076029:2295], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-24T20:33:15.733967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616697241076029:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:15.734075Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:16.139231Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7519616697241076017:2293], DatabaseId: Root, PoolId: sample_pool_id, Got delete notification 2025-06-24T20:33:16.148051Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=N2Q0OGMwODAtN ... SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnyqe01ks4dq3k55nnad, txInfo Status: Committed Kind: ReadWrite TotalDuration: 14.693 ServerDuration: 14.028 QueriesCount: 2 2025-06-24T20:35:09.926856Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnyqe01ks4dq3k55nnad, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T20:35:09.926930Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnyqe01ks4dq3k55nnad, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:35:09.926972Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnyqe01ks4dq3k55nnad, EndCleanup, isFinal: 0 2025-06-24T20:35:09.927033Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnyqe01ks4dq3k55nnad, Sent query response back to proxy, proxyRequestId: 28, proxyId: [10:7519617068179020898:2169] 2025-06-24T20:35:09.927476Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, TxId: 2025-06-24T20:35:09.927705Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:197: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, RunDataQuery: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); 2025-06-24T20:35:09.928164Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ReadyState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, received request, proxyRequestId: 29 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); rpcActor: [10:7519617184143139261:2518] database: /Root databaseId: /Root pool id: default 2025-06-24T20:35:09.928212Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ReadyState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, request placed into pool from cache: default 2025-06-24T20:35:09.928897Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, ExecutePhyTx, tx: 0x000050C0001A0E18 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-06-24T20:35:09.928982Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, Sending to Executer TraceId: 0 8 2025-06-24T20:35:09.929073Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, Created new KQP executer: [10:7519617184143139264:2512] isRollback: 0 2025-06-24T20:35:09.936410Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-06-24T20:35:09.936489Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, ExecutePhyTx, tx: 0x000050C0001A1298 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-24T20:35:09.937216Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-24T20:35:09.937335Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, txInfo Status: Committed Kind: ReadOnly TotalDuration: 8.624 ServerDuration: 8.47 QueriesCount: 2 2025-06-24T20:35:09.937438Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T20:35:09.937490Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:35:09.937511Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, EndCleanup, isFinal: 0 2025-06-24T20:35:09.937552Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ExecuteState, TraceId: 01jyhtdnz8c67w52f3gkrcb0ac, Sent query response back to proxy, proxyRequestId: 29, proxyId: [10:7519617068179020898:2169] 2025-06-24T20:35:09.937998Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, TxId: 2025-06-24T20:35:09.938099Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, Finish with SUCCESS, SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, TxId: 2025-06-24T20:35:09.943809Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:35:09.943861Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:35:09.943899Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:35:09.943944Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:35:09.944056Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=10&id=ZDA5N2RhZDItNDdhZGRkZjQtNjY3ZTgwYWQtOTk3YTI4ZTY=, ActorId: [10:7519617184143139233:2512], ActorState: unknown state, Session actor destroyed 2025-06-24T20:35:09.950324Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=10&id=Nzk0OTFjMzItZGIyNTRlMjMtODQwNWRlODctYTY5OGYyMjQ=, ActorId: [10:7519617098243792466:2296], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:35:09.950381Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=Nzk0OTFjMzItZGIyNTRlMjMtODQwNWRlODctYTY5OGYyMjQ=, ActorId: [10:7519617098243792466:2296], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:35:09.950415Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=Nzk0OTFjMzItZGIyNTRlMjMtODQwNWRlODctYTY5OGYyMjQ=, ActorId: [10:7519617098243792466:2296], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:35:09.950443Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=10&id=Nzk0OTFjMzItZGIyNTRlMjMtODQwNWRlODctYTY5OGYyMjQ=, ActorId: [10:7519617098243792466:2296], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:35:09.950549Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=10&id=Nzk0OTFjMzItZGIyNTRlMjMtODQwNWRlODctYTY5OGYyMjQ=, ActorId: [10:7519617098243792466:2296], ActorState: unknown state, Session actor destroyed >> PQCountersSimple::PartitionFirstClass [GOOD] >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewFilters [GOOD] >> PQCountersSimple::SupportivePartitionCountersPersist >> KqpScanArrowInChanels::AggregateNoColumn [GOOD] >> KqpScanArrowInChanels::AggregateNoColumnNoRemaps >> KqpScanArrowFormat::SingleKey [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlWithoutFlag [GOOD] >> TAsyncIndexTests::MergeMainWithReboots[TabletReboots] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query [GOOD] >> DemoTx::Scenario_1 >> PQCountersSimple::SupportivePartitionCountersPersist [GOOD] >> TConsoleTests::TestTenantGenerationExtSubdomain [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestart >> KqpScanArrowFormat::JoinWithParams >> KqpWorkloadServiceDistributed::TestDistributedLessConcurrentQueryLimit [GOOD] >> TConsoleTests::TestModifyUsedZoneKind [GOOD] >> TConsoleTests::TestSchemeShardErrorForwarding >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestart [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscription >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestartSimplified >> TConsoleTests::TestMergeConfig >> KqpScanArrowInChanels::JoinWithParams [GOOD] >> KqpScanArrowFormat::AggregateByColumn [GOOD] >> TxUsage::WriteToTopic_Two_WriteSession_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestartSimplified [GOOD] >> DataShardVolatile::DistributedWriteThenLateWriteReadCommit [GOOD] >> KqpScanArrowFormat::AggregateNoColumn >> DataShardVolatile::TwoAppendsMustBeVolatile+UseSink >> TConsoleInMemoryConfigSubscriptionTests::TestComplexYamlConfigChanges >> TxUsage::WriteToTopic_Two_WriteSession_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeMainWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T20:30:42.424486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:42.424583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:42.424630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:42.424667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:42.424716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:42.424765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:42.424836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:42.424919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:42.425672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:42.426032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:42.516778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:30:42.516843Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:42.517643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T20:30:42.536622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:42.537117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:42.537285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:42.558405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:42.558705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:42.559475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:42.559776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:42.564958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:42.565187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:42.566588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:42.566659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:42.566898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:42.566987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:42.567038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:42.567212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T20:30:42.582277Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:42.857724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:42.857975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.858205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:42.858273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:42.858523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:42.858734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:42.875297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:42.875536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:42.875826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.875908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:42.875969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:42.876012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:42.878645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.878712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:42.878776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:42.881337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.881408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:42.881451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:42.881525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:42.885369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:42.887415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:42.887720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:42.888961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:42.889105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... icy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:35:13.793786Z node 107 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409549:2][72075186233409546][107:827:2669] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T20:35:13.793898Z node 107 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409549:2][107:780:2669] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T20:35:13.794045Z node 107 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409549:2][72075186233409546][107:827:2669] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750797313769145 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750797313769145 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750797313769145 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T20:35:13.802196Z node 107 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409549:2][72075186233409546][107:827:2669] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-24T20:35:13.802325Z node 107 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409549:2][107:780:2669] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T20:35:13.997672Z node 107 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:35:13.998010Z node 107 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 374us result status StatusSuccess 2025-06-24T20:35:13.998973Z node 107 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |89.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_exchange/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewFilters [GOOD] Test command err: 2025-06-24T20:33:12.193455Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616682089789996:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:12.193509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046e2/r3tmp/tmpcm1zBT/pdisk_1.dat 2025-06-24T20:33:12.629432Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:12.631399Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616682089789956:2079] 1750797192180434 != 1750797192180437 TServer::EnableGrpc on GrpcPort 29395, node 1 2025-06-24T20:33:12.717128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:12.717258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:12.725128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:12.806952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:12.806979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:12.806992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:12.807137Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8245 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T20:33:13.227333Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:13.278842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:13.294097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:33:15.634544Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:33:15.634912Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616694974692461:2289], Start check tables existence, number paths: 2 2025-06-24T20:33:15.649697Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZTYyOTZjODItMWNjMmRhNzItYzFiM2U1YmYtN2FjOTJjNQ==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTYyOTZjODItMWNjMmRhNzItYzFiM2U1YmYtN2FjOTJjNQ== 2025-06-24T20:33:15.650265Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:15.650302Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:33:15.650341Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-24T20:33:15.650434Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616694974692461:2289], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:33:15.650507Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616694974692461:2289], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:33:15.650544Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616694974692461:2289], Successfully finished 2025-06-24T20:33:15.657392Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZTYyOTZjODItMWNjMmRhNzItYzFiM2U1YmYtN2FjOTJjNQ==, ActorId: [1:7519616694974692485:2290], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:15.657687Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:33:15.667205Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616694974692487:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:15.675806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:15.677280Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616694974692487:2298], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-24T20:33:15.677478Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616694974692487:2298], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:33:15.692035Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616694974692487:2298], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:15.771693Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616694974692487:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:15.776069Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616694974692538:2330] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:15.776204Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616694974692487:2298], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:33:15.784988Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-24T20:33:15.785023Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-24T20:33:15.785246Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=ZTYyOTZjODItMWNjMmRhNzItYzFiM2U1YmYtN2FjOTJjNQ==, ActorId: [1:7519616694974692485:2290], ActorState: ReadyState, TraceId: 01jyhta6g8b8qf91bh6npzp1c7, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: GRANT DESCRIBE SCHEMA ON `/Root` TO `user@test`; GRANT DESCRIBE SCHEMA, SELECT ROW ON `/Root/.metadata/workload_manager/pools/sample_pool_id` TO `user@test`; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-24T20:33:15.791244Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616694974692547:2292], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-24T20:33:15.795907Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616694974692547:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:15.796029Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:33:16.155538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:16.164268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:33:16.168386Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=ZTYyOTZjODItMWNjMmRhNzItYzFiM2U1YmYtN2FjOTJjNQ==, ActorId: [1:7519616694974692485:2290], ActorState: ExecuteState, TraceId: 01jyhta6g8b8qf91bh6npzp1c7, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [1:7519616694974692548:2290] WorkloadServiceCleanup: 0 2025-06-24T20:33:16.170655Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=1&id=ZTYyOTZjODItMWNjMmRhNzItYzFiM2U1YmYtN2FjOTJjNQ==, ActorId: [1:7519616694974692485:2290], ActorState: CleanupState, TraceId: 01jyhta6g8b8qf91bh6npzp1c7, EndCleanup, isFinal: 0 2025-06-24T20:33:16.170721Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=1&id=ZTYyOTZjODItMWNjMmRhNzItYzFiM2U1YmYtN2FjOTJjNQ==, ActorId: [1:7519616694974692485:2290], ActorState: CleanupState, TraceId: ... _id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: ReadyState, TraceId: 01jyhtdqtydmn9mbggzsxhkakd, request placed into pool from cache: default 2025-06-24T20:35:11.838700Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: ExecuteState, TraceId: 01jyhtdqtydmn9mbggzsxhkakd, Sending CompileQuery request 2025-06-24T20:35:11.854309Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][9:7519617101857200818:2522][/Root/test-shared/.metadata/initialization/migrations] Sync is incomplete in one of the ring groups: cookie# 50 2025-06-24T20:35:11.854423Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][9:7519617101857200818:2522][/Root/test-shared/.metadata/initialization/migrations] Sync is incomplete in one of the ring groups: cookie# 51 2025-06-24T20:35:11.855579Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7519617196346484336:2981], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]
: Error: LookupError, code: 2005 2025-06-24T20:35:11.855814Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: ExecuteState, TraceId: 01jyhtdqtydmn9mbggzsxhkakd, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-24T20:35:11.855848Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: ExecuteState, TraceId: 01jyhtdqtydmn9mbggzsxhkakd, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:35:11.855908Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: ExecuteState, TraceId: 01jyhtdqtydmn9mbggzsxhkakd, EndCleanup, isFinal: 0 2025-06-24T20:35:11.856067Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: ExecuteState, TraceId: 01jyhtdqtydmn9mbggzsxhkakd, Sent query response back to proxy, proxyRequestId: 108, proxyId: [9:7519617097562232708:2148] 2025-06-24T20:35:11.856910Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-24T20:35:11.857284Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]
: Error: LookupError, code: 2005 2025-06-24T20:35:11.857352Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:35:11.857392Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:35:11.857423Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:35:11.857447Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:35:11.857526Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=9&id=NzMxNGU5NzAtYzdmMzc5Yi01MmUyYTkwNy1iMTQxMjk1OQ==, ActorId: [9:7519617196346484333:2979], ActorState: unknown state, Session actor destroyed 2025-06-24T20:35:12.009630Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA== 2025-06-24T20:35:12.009781Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:35:12.011303Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: ReadyState, TraceId: 01jyhtdr0b1yjj4dwwwx1cxcfw, received request, proxyRequestId: 110 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: SELECT * FROM `//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers`; rpcActor: [9:7519617200641451640:2986] database: /Root/test-shared databaseId: /Root/test-shared pool id: default 2025-06-24T20:35:12.011348Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: ReadyState, TraceId: 01jyhtdr0b1yjj4dwwwx1cxcfw, request placed into pool from cache: default 2025-06-24T20:35:12.011478Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: ExecuteState, TraceId: 01jyhtdr0b1yjj4dwwwx1cxcfw, Sending CompileQuery request 2025-06-24T20:35:12.032641Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][9:7519617127627004734:2632][/Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers] Sync is incomplete in one of the ring groups: cookie# 58 2025-06-24T20:35:12.032762Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][9:7519617127627004734:2632][/Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers] Sync is incomplete in one of the ring groups: cookie# 59 2025-06-24T20:35:12.033831Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7519617200641451642:2987], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]
: Error: LookupError, code: 2005 2025-06-24T20:35:12.034170Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: ExecuteState, TraceId: 01jyhtdr0b1yjj4dwwwx1cxcfw, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-24T20:35:12.034224Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: ExecuteState, TraceId: 01jyhtdr0b1yjj4dwwwx1cxcfw, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:35:12.034248Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: ExecuteState, TraceId: 01jyhtdr0b1yjj4dwwwx1cxcfw, EndCleanup, isFinal: 0 2025-06-24T20:35:12.034391Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: ExecuteState, TraceId: 01jyhtdr0b1yjj4dwwwx1cxcfw, Sent query response back to proxy, proxyRequestId: 110, proxyId: [9:7519617097562232708:2148] 2025-06-24T20:35:12.035196Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-24T20:35:12.035503Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]
: Error: LookupError, code: 2005 2025-06-24T20:35:12.035630Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:35:12.035679Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:35:12.035710Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:35:12.035734Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:35:12.035808Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=9&id=YzQ5NjRiNjAtOTNlYzI1ODQtZmFlYmRkNmUtZjI0M2NmOA==, ActorId: [9:7519617200641451639:2985], ActorState: unknown state, Session actor destroyed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> PQCountersSimple::SupportivePartitionCountersPersist [GOOD] Test command err: 2025-06-24T20:34:01.643437Z :HappyWay INFO: Random seed for debugging is 1750797241643399 2025-06-24T20:34:02.112667Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616898822693717:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:02.112703Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:02.231589Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616896885754760:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:02.231649Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b79/r3tmp/tmp5PXAaA/pdisk_1.dat 2025-06-24T20:34:02.574313Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:34:02.604797Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:34:03.008760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:03.011297Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:03.016050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:03.016138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:03.024713Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:34:03.035796Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:03.075849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:03.099837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:03.151226Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:03.266401Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 9577, node 1 2025-06-24T20:34:03.628117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003b79/r3tmp/yandexRpYHWN.tmp 2025-06-24T20:34:03.628151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003b79/r3tmp/yandexRpYHWN.tmp 2025-06-24T20:34:03.628362Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003b79/r3tmp/yandexRpYHWN.tmp 2025-06-24T20:34:03.635233Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:03.881545Z INFO: TTestServer started on Port 18394 GrpcPort 9577 TClient is connected to server localhost:18394 PQClient connected to localhost:9577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:04.528966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T20:34:06.873530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616916002563870:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:06.875242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616916002563883:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:06.875297Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:06.873530Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519616914065624220:2270], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:06.873628Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:06.873703Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519616914065624235:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:06.883463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:06.910537Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519616914065624239:2126] txid# 281474976715657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:34:06.934639Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616916002563885:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T20:34:06.934728Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519616914065624238:2274], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T20:34:07.011355Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616920297531278:2684] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:07.042129Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519616918360591562:2132] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:07.232456Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519616896885754760:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:07.232569Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:07.571745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616898822693717:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:07.571888Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:07.656475Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519616918360591569:2278], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:07.658669Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NDdjNzVjOTMtZTcyYjlhZWYtOWM5NzhkODYtNjY2MTBiMmI=, ActorId: [2:7519616914065624207:2269], ActorState: ExecuteState, TraceId: 01jyhtbrcke8h5jp2pf43jtwma, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:07.656474Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519616920297531299:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:07.663383Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permi ... 5-06-24T20:35:08.188369Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037894] got client message batch for topic 'rt3.dc1--topic1' partition 1 2025-06-24T20:35:08.188453Z node 6 :PERSQUEUE ERROR: partition_read.cpp:780: [PQ: 72075186224037894, Partition: 1, State: StateIdle] reading from too big offset - topic rt3.dc1--topic1 partition 1 client $without_consumer EndOffset 0 offset 1 2025-06-24T20:35:08.188491Z node 6 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72075186224037894 topic 'rt3.dc1--topic1' partition 1 error: trying to read from future. ReadOffset 1, 0 EndOffset 0 2025-06-24T20:35:08.188526Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1426: [PQ: 72075186224037894] Handle TEvPQ::TEvError Cookie 1, Error trying to read from future. ReadOffset 1, 0 EndOffset 0 2025-06-24T20:35:08.188542Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:401: Answer error topic: 'rt3.dc1--topic1' partition: 1 messageNo: 0 requestId: request-id-0-1 error: trying to read from future. ReadOffset 1, 0 EndOffset 0 2025-06-24T20:35:08.189290Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [5:7519617140945672791:3611] destroyed 2025-06-24T20:35:08.548362Z node 5 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [5:7519617183895346184:2570] TxId: 281474976710708. Ctx: { TraceId: 01jyhtdm8222d5qr26gbtb30g4, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ZDFkYThlNTktY2MxYjQyM2EtMjNjYjA1ZWQtNTlkNzMxNjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 6 2025-06-24T20:35:08.549188Z node 5 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [5:7519617183895346188:2570], TxId: 281474976710708, task: 3. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ZDFkYThlNTktY2MxYjQyM2EtMjNjYjA1ZWQtNTlkNzMxNjQ=. TraceId : 01jyhtdm8222d5qr26gbtb30g4. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [5:7519617183895346184:2570], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-24T20:35:10.269008Z node 7 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:10.269146Z node 7 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:35:10.301618Z node 7 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:10.302813Z node 7 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [7:201:2214] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 30720 BurstSize: 30720 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T20:35:10.303783Z node 7 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [7:210:2221] 2025-06-24T20:35:10.310681Z node 7 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [7:210:2221] 2025-06-24T20:35:10.319270Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1d8c7516-ad311cfe-5a9d1f5e-f28431c8_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:226:2234] 2025-06-24T20:35:10.331291Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ea217e57-1daf6291-b23226d0-2851fcee_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:226:2234] 2025-06-24T20:35:10.709957Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|f275a400-69f15f93-67aed135-c7035fae_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:226:2234] 2025-06-24T20:35:10.991188Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e66be385-60f156cf-e66c7c72-24db2c8e_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:226:2234] 2025-06-24T20:35:11.262292Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6d38dd11-33cee70b-d73db643-4e496b97_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:226:2234] 2025-06-24T20:35:11.541196Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|243de661-6d6ca29e-e363cbe0-ff658420_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:226:2234] **** Total histogram: ****
Interval=0ms: 1
Interval=10000ms: 0
Interval=1000ms: 1
Interval=100ms: 0
Interval=10ms: 0
Interval=1ms: 0
Interval=20ms: 0
Interval=2500ms: 4
Interval=5000ms: 0
Interval=500ms: 0
Interval=50ms: 0
Interval=5ms: 0
Interval=999999ms: 0
**** **** **** **** 2025-06-24T20:35:12.572268Z node 8 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:12.572397Z node 8 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:35:12.609311Z node 8 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:12.610731Z node 8 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 2 actor [8:201:2214] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-24T20:35:12.611965Z node 8 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [8:209:2220] 2025-06-24T20:35:12.613590Z node 8 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [8:209:2220] 2025-06-24T20:35:12.614796Z node 8 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [8:210:2221] 2025-06-24T20:35:12.615906Z node 8 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [8:210:2221] 2025-06-24T20:35:12.623261Z node 8 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|857c3057-2c2e15de-a0d26e3c-51be05f1_0 generated for partition 0 topic 'topic' owner default 2025-06-24T20:35:12.633671Z node 8 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d7ec398f-ecea093c-95646f95-ff8bf394_1 generated for partition 0 topic 'topic' owner default 2025-06-24T20:35:12.643616Z node 8 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2b4bb9d2-bc950f18-a3567e85-2908ea06_2 generated for partition 0 topic 'topic' owner default 2025-06-24T20:35:12.658654Z node 8 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|aff759b9-67c912f7-60dd1799-b9578285_3 generated for partition 0 topic 'topic' owner default 2025-06-24T20:35:13.393162Z node 9 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:13.393311Z node 9 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:35:13.436745Z node 9 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:13.438001Z node 9 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 3 actor [9:201:2214] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 30720 BurstSize: 30720 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-24T20:35:13.439044Z node 9 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [9:210:2221] 2025-06-24T20:35:13.448564Z node 9 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [9:210:2221] 2025-06-24T20:35:13.464605Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a4bf53bb-11cdc364-bafbe247-ebb13226_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 Captured kesus quota request event from [9:226:2234] 2025-06-24T20:35:13.478028Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|74984815-ef39c2e0-d675e2ad-6033c2ff_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 Captured kesus quota request event from [9:226:2234] 2025-06-24T20:35:13.854330Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7e64d0c4-6e7bcdee-b33b82e5-31001172_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 Captured kesus quota request event from [9:226:2234] 2025-06-24T20:35:14.231079Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|dd6c788e-cc43c674-66f155ad-addb9be2_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 Captured kesus quota request event from [9:226:2234] 2025-06-24T20:35:14.511106Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8e4b2276-b6159e2-c0c3bbcb-3f309a6_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 Captured kesus quota request event from [9:226:2234] 2025-06-24T20:35:14.771035Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|306a139f-ae2bb2aa-c7f107b9-408c57f0_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 Captured kesus quota request event from [9:226:2234] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::JoinWithParams [GOOD] Test command err: Trying to start YDB, gRPC: 22348, MsgBus: 8878 2025-06-24T20:34:57.666405Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617135762163146:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:57.666498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002259/r3tmp/tmpLY00wQ/pdisk_1.dat 2025-06-24T20:34:58.124203Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:58.124304Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:58.159096Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617135762163047:2079] 1750797297631432 != 1750797297631435 2025-06-24T20:34:58.180020Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:58.180268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22348, node 1 2025-06-24T20:34:58.287810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:58.287833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:58.287854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:58.287952Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8878 2025-06-24T20:34:58.692520Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8878 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:59.063924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:59.091349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:59.269861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:59.463435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:34:59.552221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:01.365363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617152942033851:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:01.365505Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:01.700137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:01.753517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:01.809086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:01.853851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:01.885225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:01.915540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:01.944247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:02.001015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617157237001804:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:02.001117Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:02.001219Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617157237001809:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:02.005071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:02.022037Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617157237001811:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:35:02.110275Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617157237001862:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:02.660451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617135762163146:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:02.671278Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:03.128458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 864000000000 2025-06-24T20:35:03.570753Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: Kqp ... ode 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519617190413632129:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:10.058955Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002259/r3tmp/tmp50DMZL/pdisk_1.dat 2025-06-24T20:35:10.201014Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:10.202227Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519617190413632110:2079] 1750797310058430 != 1750797310058433 2025-06-24T20:35:10.220805Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:10.220900Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:10.222450Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5046, node 3 2025-06-24T20:35:10.259031Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:10.259056Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:10.259066Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:10.259225Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6544 TClient is connected to server localhost:6544 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:10.784951Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:10.800514Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:10.888214Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:11.035920Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:11.117501Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:11.154078Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:13.692482Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519617203298535626:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:13.692616Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:13.757046Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:13.798977Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:13.829995Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:13.868047Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:13.903408Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:13.977946Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:14.020051Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:14.098027Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519617207593503581:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:14.098149Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:14.098847Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519617207593503586:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:14.104047Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:14.117510Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-24T20:35:14.117758Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519617207593503588:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:35:14.216997Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519617207593503641:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:15.059046Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519617190413632129:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:15.059119Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:15.721740Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797315708, txId: 281474976715672] shutting down 2025-06-24T20:35:15.962445Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797315995, txId: 281474976715674] shutting down >> TYardTest::TestLogOverwriteRestarts [GOOD] >> TYardTest::TestLogOwerwrite >> KqpPg::TempTablesWithCache [GOOD] >> KqpPg::TableDeleteWhere+useSink >> KqpScanArrowFormat::AllTypesColumnsCellvec [GOOD] >> KqpScanArrowFormat::AggregateNoColumnNoRemaps >> DataShardVolatile::DistributedWriteEarlierSnapshotNotBlocked [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit+UseSink >> TxUsage::WriteToTopic_Demo_21_RestartNo_Table [GOOD] >> TYardTest::TestLogOwerwrite [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestComplexYamlConfigChanges [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlResend >> TConsoleTests::TestSchemeShardErrorForwarding [GOOD] >> TPersQueueTest::FetchRequest >> TConsoleTests::TestScaleRecommenderPolicies >> TPersQueueTest::BadTopic >> TPersQueueTest::SetupLockSession2 >> TxUsage::WriteToTopic_Demo_21_RestartNo_Query >> TConsoleTests::TestMergeConfig [GOOD] >> TConsoleTests::TestRemoveTenant |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TYardTest::TestLogOwerwrite [GOOD] >> TPersQueueTest::WriteExisting >> THiveTest::TestCheckSubHiveMigrationManyTablets [GOOD] >> THiveTest::TestCreateSubHiveCreateManyTablets >> KqpScanArrowFormat::JoinWithParams [GOOD] >> KqpScanArrowInChanels::AggregateCountStar >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlResend [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApply |89.5%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/test-results/unittest/{meta.json ... results_accumulator.log} >> THiveTest::TestHiveBalancerDifferentResources [GOOD] >> THiveTest::TestHiveBalancerDifferentResources2 |89.5%| [TA] $(B)/ydb/core/blobstorage/pdisk/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::WriteToTopic_Demo_10_Table [GOOD] >> KqpScanArrowInChanels::AggregateNoColumnNoRemaps [GOOD] >> KqpScanArrowInChanels::AggregateWithFunction >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApply [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscription [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAlter >> TxUsage::WriteToTopic_Demo_3_Query [GOOD] >> TPersQueueTest::ReadFromSeveralPartitions >> TxUsage::WriteToTopic_Demo_10_Query >> TopicService::OneConsumer_TheRangesOverlap [GOOD] >> TxUsage::WriteToTopic_Demo_2_Query [GOOD] >> KqpPg::TableDeleteAllData-useSink [GOOD] >> KqpPg::PgUpdateCompoundKey+useSink >> TxUsage::WriteToTopic_Demo_32_Table >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb [GOOD] >> SystemView::AuthUsers_Access [GOOD] >> SystemView::AuthUsers_ResultOrder >> TxUsage::WriteToTopic_Demo_1_Query [GOOD] >> TxUsage::WriteToTopic_Demo_24_Table >> BasicUsage::ConflictingWrites [GOOD] >> Describe::LocationWithKillTablets ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb [GOOD] Test command err: 2025-06-24T20:34:55.095245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:55.095317Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:55.160769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:56.767261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:56.767349Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:56.813929Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:58.047128Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:58.047234Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:58.092484Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:59.155903Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:59.155970Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:59.211087Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:00.646229Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:00.646297Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:00.698352Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:01.917614Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:01.917693Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:01.957853Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:03.056672Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:03.056757Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:03.111767Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:04.383782Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:04.383861Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:04.435587Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:05.515295Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:05.515356Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:05.555974Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:06.694734Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:06.694818Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:06.748640Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:08.070674Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:08.070751Z node 11 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:08.134856Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:09.264983Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:09.265078Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:09.313790Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:10.901311Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:10.901398Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:10.956575Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:12.313121Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:12.313232Z node 14 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:12.387941Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:14.121210Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:14.121293Z node 16 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:14.205545Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:15.939771Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:15.939866Z node 18 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:16.009039Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:17.755838Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:17.755927Z node 20 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:17.834180Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:19.552155Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:19.552262Z node 22 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:19.605482Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:20.951603Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:20.951698Z node 23 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:21.020705Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:21.670377Z node 23 :BS_CONTROLLER ERROR: {BSC26@console_interaction.cpp:112} failed to parse config obtained from Console ErrorReason# ydb/library/yaml_config/yaml_config_parser.cpp:1362: Condition violated: `config.HasDomainsConfig()' Yaml# --- metadata: kind: MainConfig cluster: "" version: 1 config: log_config: cluster_name: cluster1 allowed_labels: test: type: enum values: ? true selector_config: [] >> TopicService::DifferentConsumers_TheRangesOverlap >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table [GOOD] >> TxUsage::WriteToTopic_Demo_4_Query [GOOD] >> TConsoleTests::TestScaleRecommenderPolicies [GOOD] >> TConsoleTests::TestScaleRecommenderPoliciesValidation >> TxUsage::Sinks_Oltp_WriteToTopic_3_Query >> TxUsage::WriteToTopic_Demo_41_Table >> TPartitionWriterCacheActorTests::WriteReplyOrder >> TxUsage::WriteToTopic_Two_WriteSession_Query [GOOD] >> TPartitionWriterCacheActorTests::WriteReplyOrder [GOOD] >> TPartitionWriterCacheActorTests::DropOldWriter >> KqpScanArrowFormat::AggregateNoColumn [GOOD] >> KqpScanArrowFormat::AggregateEmptySum >> TPartitionWriterCacheActorTests::DropOldWriter [GOOD] >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit+UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit-UseSink >> TxUsage::WriteToTopic_Demo_5_Table >> KqpScanArrowFormat::AggregateNoColumnNoRemaps [GOOD] >> KqpScanArrowFormat::AggregateWithFunction >> KqpScanArrowInChanels::AggregateCountStar [GOOD] >> KqpScanArrowInChanels::AggregateByColumn >> TConsoleTests::TestScaleRecommenderPoliciesValidation [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorSingle >> DemoTx::Scenario_1 [GOOD] |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAlter [GOOD] >> TConsoleTests::TestRemoveTenant [GOOD] >> TConsoleTests::TestRemoveTenantExtSubdomain >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAclChange |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer >> TOlap::StoreStatsQuota [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table [GOOD] >> KqpScanArrowInChanels::AggregateWithFunction [GOOD] >> KqpScanArrowInChanels::AggregateEmptySum >> DemoTx::Scenario_2 >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query >> SystemView::TopPartitionsByTliFields [GOOD] >> ViewQuerySplit::Basic [GOOD] >> ViewQuerySplit::WithPragmaTablePathPrefix [GOOD] >> ViewQuerySplit::WithPairedPragmaTablePathPrefix [GOOD] >> ViewQuerySplit::WithComments [GOOD] >> ViewQuerySplit::Joins [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsAndTags [GOOD] >> THeavyPerfTest::TTestLoadEverything [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartNo_Query [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsOnly >> THiveImplTest::BootQueueSpeed >> KqpPg::PgUpdateCompoundKey+useSink [GOOD] >> KqpPg::PgUpdateCompoundKey-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::StoreStatsQuota [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:31:55.646475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:31:55.646551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:55.646582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:31:55.646618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:31:55.646666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:31:55.646691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:31:55.646731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:31:55.646790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:31:55.647486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:31:55.647800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:31:55.723305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:31:55.723353Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:31:55.740713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:31:55.741078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:31:55.741304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:31:55.750012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:31:55.750227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:31:55.751018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:55.751385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:31:55.755226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:55.755458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:31:55.756826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:55.756910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:31:55.757166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:31:55.757231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:31:55.757303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:31:55.757415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.784046Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:31:55.918039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:31:55.918305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.918581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:31:55.918633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:31:55.918900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:31:55.919009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:31:55.921317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:55.921543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:31:55.921761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.921836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:31:55.921893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:31:55.921931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:31:55.924058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.924116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:31:55.924168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:31:55.926330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.926382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:31:55.926428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:55.926497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:31:55.936553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:31:55.938667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:31:55.938904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:31:55.939918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:31:55.940075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:31:55.940128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:55.940452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:31:55.940544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:31:55.940759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:31:55.940851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:31:55.943557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:31:55.943604Z node 1 :FLAT_TX_SCHEMESHARD ... tablet_id=72075186233409546;self_id=[2:465:2431];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186233409546; 2025-06-24T20:35:28.111604Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546; 2025-06-24T20:35:28.111713Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T20:35:28.111785Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T20:35:28.111900Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T20:35:28.111981Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T20:35:28.112075Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T20:35:28.112126Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T20:35:28.112230Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T20:35:28.403502Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;parent=[2:465:2431];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T20:35:28.681218Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;parent=[2:465:2431];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T20:35:28.682750Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186233409546; 2025-06-24T20:35:28.693204Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546; 2025-06-24T20:35:28.693321Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T20:35:28.693407Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T20:35:28.693527Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T20:35:28.693612Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T20:35:28.693683Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T20:35:28.693745Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T20:35:28.693855Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T20:35:28.978086Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;parent=[2:465:2431];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T20:35:29.224608Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;parent=[2:465:2431];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T20:35:29.227002Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186233409546; 2025-06-24T20:35:29.239689Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546; 2025-06-24T20:35:29.239787Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T20:35:29.239920Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: There are stats for 1 tables 2025-06-24T20:35:29.240076Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T20:35:29.240182Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T20:35:29.240269Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T20:35:29.240335Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T20:35:29.240384Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T20:35:29.240496Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:465:2431];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T20:35:29.240832Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 424 rowCount 1 cpuUsage 2.4121 2025-06-24T20:35:29.247845Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:1 data size 424 row count 1 2025-06-24T20:35:29.248057Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=OlapStore, is column=0, is olap=1, RowCount 1, DataSize 424 2025-06-24T20:35:29.248199Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:353: OLAP store contains 1 tables. 2025-06-24T20:35:29.248256Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:188: PrepareStats: SchemeShard has no info on DataShard 72075186233409546 channel 2 binding 2025-06-24T20:35:29.248344Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:397: Aggregated stats for pathId 4: RowCount 1, DataSize 424 2025-06-24T20:35:29.248881Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:35:29.249155Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 359us result status StatusSuccess 2025-06-24T20:35:29.249813Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } Children { Name: "OlapStore" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnStore CreateFinished: true CreateTxId: 104 CreateStep: 1750796531816 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 424 DataSize: 424 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 424 DataSize: 424 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 1000000 data_size_soft_quota: 900000 } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TxUsage::WriteToTopic_Demo_32_Table [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorSingle [GOOD] >> DataShardVolatile::TwoAppendsMustBeVolatile+UseSink [GOOD] >> TPersQueueTest::BadTopic [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Table >> DataShardVolatile::TwoAppendsMustBeVolatile-UseSink >> TConsoleTxProcessorTests::TestTxProcessorSubProcessor >> TPersQueueTest::CloseActiveWriteSessionOnClusterDisable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> ViewQuerySplit::Joins [GOOD] Test command err: 2025-06-24T20:34:13.922572Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616947161194356:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:13.922612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b0c/r3tmp/tmpEnraum/pdisk_1.dat 2025-06-24T20:34:14.402358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:14.402444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:14.408199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:14.438672Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:14.443395Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616947161194338:2079] 1750797253922091 != 1750797253922094 TServer::EnableGrpc on GrpcPort 23314, node 1 2025-06-24T20:34:14.571531Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:14.571551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:14.571557Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:14.571665Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16605 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:14.930564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:14.951283Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:17.640714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616964341064132:2288], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:17.640718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616964341064146:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:17.640958Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:17.646284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:17.663044Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616964341064165:2292], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:34:17.766747Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616964341064216:2331] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:18.578004Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhtc0b7cnye70j4trrfxfh7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDg2M2Y2ODMtZGY0OTk1ZjYtM2ZlOWIwMzYtOWVlNzE1Mzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:18.645136Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519616968636031556:2301], owner: [1:7519616968636031553:2299], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:34:18.677529Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519616968636031556:2301], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T20:34:18.701567Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519616968636031556:2301], row count: 1, finished: 1 2025-06-24T20:34:18.701714Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519616968636031556:2301], owner: [1:7519616968636031553:2299], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:34:18.720244Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797258575, txId: 281474976710660] shutting down 2025-06-24T20:34:18.923230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616947161194356:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:18.923322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:19.966954Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhtc4yt56mdgqa4s9gdcd0a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDY4MWI3LWY4ZTJiNzcyLWM0ZGY2MzFiLTMyYjhjMWYy, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:19.970871Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519616972930998907:2319], owner: [1:7519616972930998903:2317], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:34:19.973609Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519616972930998907:2319], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T20:34:19.973968Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519616972930998907:2319], row count: 2, finished: 1 2025-06-24T20:34:19.974059Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519616972930998907:2319], owner: [1:7519616972930998903:2317], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:34:19.977245Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797259960, txId: 281474976710662] shutting down 2025-06-24T20:34:21.585330Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616981213998500:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:21.585428Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b0c/r3tmp/tmplVdsbr/pdisk_1.dat 2025-06-24T20:34:21.938798Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:21.945269Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:21.945347Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:21.954941Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:21.968308Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 TServer::EnableGrpc on GrpcPort 6232, node 2 2025-06-24T20:34:22.277724Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:22.277748Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:22.277757Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:22.277892Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:22.606763Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14457 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: ... RCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797325934, txId: 281474976710679] shutting down 2025-06-24T20:35:26.560990Z node 14 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:14: [72075186224037899] TTxAggregate::Execute 2025-06-24T20:35:26.561041Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:136: [72075186224037899] PersistQueryResults: interval end# 2025-06-24T20:35:26.000000Z, query count# 0 2025-06-24T20:35:26.561077Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 8, interval end# 2025-06-24T20:35:26.000000Z, query count# 0, persisted# 0 2025-06-24T20:35:26.561102Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 10, interval end# 2025-06-24T20:35:26.000000Z, query count# 0, persisted# 0 2025-06-24T20:35:26.561125Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 12, interval end# 2025-06-24T20:35:26.000000Z, query count# 0, persisted# 0 2025-06-24T20:35:26.561150Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 14, interval end# 2025-06-24T20:35:26.000000Z, query count# 0, persisted# 0 2025-06-24T20:35:26.561171Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 9, interval end# 2025-06-24T21:00:00.000000Z, query count# 0, persisted# 0 2025-06-24T20:35:26.561193Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 11, interval end# 2025-06-24T21:00:00.000000Z, query count# 0, persisted# 0 2025-06-24T20:35:26.561214Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 13, interval end# 2025-06-24T21:00:00.000000Z, query count# 0, persisted# 0 2025-06-24T20:35:26.561237Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 15, interval end# 2025-06-24T21:00:00.000000Z, query count# 0, persisted# 0 2025-06-24T20:35:26.568596Z node 14 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:110: [72075186224037899] TTxAggregate::Complete 2025-06-24T20:35:27.021200Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710683. Ctx: { TraceId: 01jyhte67h62n7z2bh3r7t5hhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=OGQ4M2MyZDgtMzI0YmE5MGEtZTQ1NDNjZDMtZjY0Mzg3NTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:35:27.050899Z node 14 :SYSTEM_VIEWS DEBUG: tx_collect.cpp:14: [72075186224037899] TTxCollect::Execute 2025-06-24T20:35:27.051041Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037899] PersistPartitionTopResults: table id# 17, partition interval end# 2025-06-24T20:35:27.000000Z, partition count# 1 2025-06-24T20:35:27.051114Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037899] PersistPartitionTopResults: table id# 20, partition interval end# 2025-06-24T20:35:27.000000Z, partition count# 1 2025-06-24T20:35:27.051196Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037899] PersistPartitionTopResults: table id# 18, partition interval end# 2025-06-24T21:00:00.000000Z, partition count# 1 2025-06-24T20:35:27.051274Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037899] PersistPartitionTopResults: table id# 21, partition interval end# 2025-06-24T21:00:00.000000Z, partition count# 1 2025-06-24T20:35:27.051371Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:333: [72075186224037899] Reset: interval end# 2025-06-24T20:35:27.000000Z 2025-06-24T20:35:27.030269Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [12:7519617262991481361:2418], owner: [12:7519617262991481357:2416], scan id: 0, sys view info: Type: ETopPartitionsByTliOneMinute SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-24T20:35:27.069321Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [12:7519617262991481361:2418], schemeshard id: 72075186224037888, hive id: 72057594037968897, database: /Root/Tenant1, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 2], database node count: 2 2025-06-24T20:35:27.071408Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:641: [72075186224037893] Reply batch: range# From { IntervalEndUs: 1750797326000000 Rank: 0 } InclusiveFrom: true To { IntervalEndUs: 1750797326000000 Rank: 4294967295 } InclusiveTo: true Type: TOP_PARTITIONS_BY_TLI_ONE_MINUTE , rows# 1, bytes# 63, next# 2025-06-24T20:35:27.075592Z node 12 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [12:7519617262991481361:2418], row count: 1, finished: 1 2025-06-24T20:35:27.075719Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [12:7519617262991481361:2418], owner: [12:7519617262991481357:2416], scan id: 0, sys view info: Type: ETopPartitionsByTliOneMinute SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-24T20:35:27.076063Z node 14 :SYSTEM_VIEWS DEBUG: tx_collect.cpp:29: [72075186224037899] TTxCollect::Complete 2025-06-24T20:35:27.081809Z node 12 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797327019, txId: 281474976710682] shutting down 2025-06-24T20:35:27.082668Z node 16 :SYSTEM_VIEWS DEBUG: tx_collect.cpp:14: [72075186224037893] TTxCollect::Execute 2025-06-24T20:35:27.082801Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037893] PersistPartitionTopResults: table id# 17, partition interval end# 2025-06-24T20:35:27.000000Z, partition count# 1 2025-06-24T20:35:27.082860Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037893] PersistPartitionTopResults: table id# 20, partition interval end# 2025-06-24T20:35:27.000000Z, partition count# 1 2025-06-24T20:35:27.082914Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037893] PersistPartitionTopResults: table id# 18, partition interval end# 2025-06-24T21:00:00.000000Z, partition count# 1 2025-06-24T20:35:27.082962Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037893] PersistPartitionTopResults: table id# 21, partition interval end# 2025-06-24T21:00:00.000000Z, partition count# 1 2025-06-24T20:35:27.083039Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:333: [72075186224037893] Reset: interval end# 2025-06-24T20:35:27.000000Z 2025-06-24T20:35:27.086890Z node 16 :SYSTEM_VIEWS DEBUG: tx_collect.cpp:29: [72075186224037893] TTxCollect::Complete 2025-06-24T20:35:27.157657Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 14 2025-06-24T20:35:27.158598Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:35:27.172033Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-06-24T20:35:27.172479Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:35:27.175549Z node 13 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [13:7519617201447978775:2073], processor id# 72075186224037899, database# /Root/Tenant2 2025-06-24T20:35:27.176333Z node 13 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [13:7519617201447978775:2073], database# /Root/Tenant2, processor id# 72075186224037899 2025-06-24T20:35:27.175584Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 13 2025-06-24T20:35:27.176448Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:35:27.196533Z node 14 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [14:7519617205307565620:2086], processor id# 72075186224037899, database# /Root/Tenant2 2025-06-24T20:35:27.198116Z node 14 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [14:7519617205307565620:2086], database# /Root/Tenant2, processor id# 72075186224037899 2025-06-24T20:35:27.191763Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 16 2025-06-24T20:35:27.203265Z node 15 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [15:7519617198584814802:2084], processor id# 72075186224037893, database# /Root/Tenant1 2025-06-24T20:35:27.216354Z node 16 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [16:7519617200600048560:2094], processor id# 72075186224037893, database# /Root/Tenant1 2025-06-24T20:35:27.192773Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:35:27.197050Z node 12 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[16:7519617200600048614:2105], Type=268959746 2025-06-24T20:35:27.219671Z node 12 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[13:7519617201447978950:2105], Type=268959746 2025-06-24T20:35:27.219704Z node 12 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[13:7519617201447978950:2105], Type=268959746 2025-06-24T20:35:27.219727Z node 12 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[13:7519617201447978950:2105], Type=268959746 2025-06-24T20:35:27.219753Z node 12 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[13:7519617201447978950:2105], Type=268959746 2025-06-24T20:35:27.219782Z node 12 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[13:7519617201447978950:2105], Type=268959746 2025-06-24T20:35:27.219808Z node 12 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[13:7519617201447978950:2105], Type=268959746 2025-06-24T20:35:27.214806Z node 15 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [15:7519617198584814802:2084], database# /Root/Tenant1, processor id# 72075186224037893 2025-06-24T20:35:27.233280Z node 16 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [16:7519617200600048560:2094], database# /Root/Tenant1, processor id# 72075186224037893 2025-06-24T20:35:27.287554Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:35:27.287597Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:30.004486Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [13:7519617201447978775:2073], interval end# 2025-06-24T20:35:30.000000Z, event interval end# 2025-06-24T20:35:30.000000Z 2025-06-24T20:35:30.004534Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [13:7519617201447978775:2073], query logs count# 0, processor ids count# 1, processor id to database count# 1 2025-06-24T20:35:30.006521Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [13:7519617192858044105:2063], interval end# 2025-06-24T20:35:30.000000Z, event interval end# 2025-06-24T20:35:30.000000Z 2025-06-24T20:35:30.006558Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [13:7519617192858044105:2063], query logs count# 0, processor ids count# 0, processor id to database count# 0 |89.5%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/test-results/unittest/{meta.json ... results_accumulator.log} |89.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |89.5%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |89.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/pdisk/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::WriteToTopic_Demo_33_Table >> TxUsage::WriteToTopic_Demo_10_Query [GOOD] >> TPersQueueTest::WriteExisting [GOOD] >> TPersQueueTest::WriteExistingBigValue >> TopicService::DifferentConsumers_TheRangesOverlap [GOOD] >> TPersQueueTest::SetupLockSession2 [GOOD] >> TPersQueueTest::SetupLockSession |89.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_olap/test-results/unittest/{meta.json ... results_accumulator.log} |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |89.5%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw >> TxUsage::WriteToTopic_Demo_11_Table |89.5%| [LD] {RESULT} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut >> SystemView::ShowCreateTablePartitionByHash [GOOD] >> SystemView::ShowCreateTablePartitionSettings |89.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap/test-results/unittest/{meta.json ... results_accumulator.log} >> TPersQueueTest::UpdatePartitionLocation >> TPersQueueTest::DirectReadPreCached >> TxUsage::WriteToTopic_Demo_24_Table [GOOD] >> TopicService::UnknownConsumer >> KqpScanArrowFormat::AggregateEmptySum [GOOD] |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |89.5%| [LD] {RESULT} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |89.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table [GOOD] |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |89.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing >> TConsoleTxProcessorTests::TestTxProcessorSubProcessor [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorTemporary >> SystemView::AuthUsers_ResultOrder [GOOD] >> SystemView::AuthUsers_TableRange >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit-UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |89.5%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut >> TxUsage::WriteToTopic_Demo_24_Query >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAclChange [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |89.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |89.5%| [LD] {RESULT} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut >> KqpScanArrowFormat::AggregateWithFunction [GOOD] >> KqpScanArrowInChanels::AggregateByColumn [GOOD] >> THiveTest::TestHiveBalancerDifferentResources2 [GOOD] >> THiveTest::TestHiveBalancerUselessNeighbourMoves ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowFormat::AggregateEmptySum [GOOD] Test command err: Trying to start YDB, gRPC: 11776, MsgBus: 64775 2025-06-24T20:35:03.013876Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617158960542603:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:03.013967Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002200/r3tmp/tmp2Du5LW/pdisk_1.dat 2025-06-24T20:35:03.346489Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:03.349448Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617154665575284:2079] 1750797303008807 != 1750797303008810 TServer::EnableGrpc on GrpcPort 11776, node 1 2025-06-24T20:35:03.433630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:03.433749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:03.435636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:03.438541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:03.438570Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:03.438587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:03.438755Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64775 TClient is connected to server localhost:64775 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T20:35:04.026042Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:04.066042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:04.098197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:04.234417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:04.383170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:04.462842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:06.126552Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617171845446112:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:06.126694Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:06.419627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:06.456041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:06.531394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:06.602380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:06.637001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:06.691462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:06.751102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:06.827128Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617171845446774:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:06.827274Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:06.827604Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617171845446779:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:06.831721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:06.843515Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617171845446781:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:35:06.905245Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617171845446832:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:08.011780Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617158960542603:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:08.011899Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:09.155927Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797308365, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 22810, MsgBus: 28564 2025-06-24T20:35:09.940875Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617184285581336:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:09.940972Z node 2 :METADATA_PROV ... se.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:26.728606Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797324143, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 24316, MsgBus: 16065 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002200/r3tmp/tmpcFrbLJ/pdisk_1.dat 2025-06-24T20:35:28.025617Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519617263939446295:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:28.025929Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:28.144431Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:28.158404Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519617263939446098:2079] 1750797327865602 != 1750797327865605 2025-06-24T20:35:28.160488Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:28.160589Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:28.167809Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24316, node 4 2025-06-24T20:35:28.275806Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:28.275837Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:28.275848Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:28.276003Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16065 2025-06-24T20:35:28.877319Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16065 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:29.161773Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:29.178022Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:29.272138Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:29.494289Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:29.600592Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:32.651342Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617285414284227:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:32.651463Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:32.728132Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:32.806142Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:32.881606Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519617263939446295:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:32.881686Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:32.905679Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:32.983274Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:33.057078Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:33.150324Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:33.233769Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:33.350995Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617289709252196:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:33.351063Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:33.351273Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617289709252201:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:33.420521Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:33.442162Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519617289709252203:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:35:33.523350Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519617289709252254:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:36.400954Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797336036, txId: 281474976715672] shutting down >> THiveImplTest::BootQueueSpeed [GOOD] >> THiveImplTest::BalancerSpeedAndDistribution >> KqpScanArrowInChanels::AggregateEmptySum [GOOD] >> TConsoleTests::TestRemoveTenantExtSubdomain [GOOD] >> TConsoleTests::TestRemoveSharedTenantWoServerlessTenants >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnUsage [GOOD] >> Viewer::SimpleFeatureFlags ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowFormat::AggregateWithFunction [GOOD] Test command err: Trying to start YDB, gRPC: 14290, MsgBus: 16512 2025-06-24T20:35:05.899252Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617168291042750:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:05.900182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00216f/r3tmp/tmphaGFvm/pdisk_1.dat 2025-06-24T20:35:06.359068Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:06.360422Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:06.360544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:06.372460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14290, node 1 2025-06-24T20:35:06.432231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:06.432258Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:06.432266Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:06.432411Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16512 2025-06-24T20:35:06.887671Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16512 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:07.085000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:07.099454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:35:07.109512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:07.251275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:07.402297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:07.486215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:09.227351Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617185470913384:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:09.227466Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:09.510779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:09.542238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:09.570833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:09.611675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:09.647346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:09.701135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:09.773760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:09.835673Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617185470914044:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:09.835717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617185470914049:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:09.835752Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:09.839396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:09.854810Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617185470914051:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:35:09.940863Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617185470914104:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:10.891269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617168291042750:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:10.891337Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:11.041345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:11.466609Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discard ... ger.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797325179, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 12204, MsgBus: 31462 2025-06-24T20:35:28.624146Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519617266164648574:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:28.624216Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00216f/r3tmp/tmpCMFjFb/pdisk_1.dat 2025-06-24T20:35:28.870856Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:28.873954Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519617266164648554:2079] 1750797328620216 != 1750797328620219 2025-06-24T20:35:28.898625Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:28.899499Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:28.901739Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12204, node 4 2025-06-24T20:35:28.958792Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:28.958838Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:28.958850Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:28.958999Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31462 TClient is connected to server localhost:31462 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:29.629052Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:29.643414Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:29.659897Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:35:29.680757Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:29.769813Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:29.984692Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:30.099729Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:33.480496Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617287639486679:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:33.480606Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:33.554148Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:33.618252Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:33.626024Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519617266164648574:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:33.628246Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:33.671291Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:33.746306Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:33.837209Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:33.922861Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:34.024857Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:34.143226Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617291934454645:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:34.143354Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:34.143425Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617291934454650:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:34.147081Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:34.161444Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519617291934454652:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:35:34.231308Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519617291934454703:3431] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:37.393510Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797336533, txId: 281474976715672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::AggregateByColumn [GOOD] Test command err: Trying to start YDB, gRPC: 4527, MsgBus: 16305 2025-06-24T20:35:07.712466Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617178035778498:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:07.712853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002076/r3tmp/tmpWJUQp4/pdisk_1.dat 2025-06-24T20:35:08.012432Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:08.013622Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617178035778311:2079] 1750797307703517 != 1750797307703520 2025-06-24T20:35:08.024233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:08.024357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:08.027196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4527, node 1 2025-06-24T20:35:08.121237Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:08.121266Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:08.121279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:08.121388Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16305 TClient is connected to server localhost:16305 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:08.658834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:08.690450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:08.711692Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:08.838481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:09.029278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:09.109341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:10.898290Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617190920681832:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:10.898433Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:11.261495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:11.295021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:11.367799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:11.398694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:11.467169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:11.541369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:11.582108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:11.645398Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617195215649795:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:11.645523Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:11.645927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617195215649800:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:11.650247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:11.667302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617195215649802:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:35:11.763538Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617195215649855:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:12.711942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617178035778498:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:12.712033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:13.120532Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797313160, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 1683, MsgBus: 11482 2025-06-24T20:35:13.986441Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617203074346118:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:13.986470Z node 2 :METADATA_PROVIDE ... ot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:27.584166Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797326859, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 9377, MsgBus: 5454 2025-06-24T20:35:28.967241Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519617267832365387:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:28.967418Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002076/r3tmp/tmpM0Wxng/pdisk_1.dat 2025-06-24T20:35:29.268982Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519617267832365332:2079] 1750797328936579 != 1750797328936582 2025-06-24T20:35:29.281052Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9377, node 4 2025-06-24T20:35:29.314870Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:29.315043Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:29.411828Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:29.463951Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:29.463991Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:29.464002Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:29.464174Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5454 2025-06-24T20:35:30.007483Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5454 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:30.317130Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:30.344594Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:30.439273Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:30.674078Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:30.782664Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:33.816557Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617289307203467:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:33.816655Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:33.922129Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:33.974191Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519617267832365387:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:33.974268Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:34.011358Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:34.091749Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:34.172615Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:34.230141Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:34.308642Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:34.402540Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:34.561752Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617293602171430:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:34.561886Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:34.562064Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617293602171435:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:34.567532Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:34.580012Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519617293602171437:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:35:34.675459Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519617293602171488:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:37.606465Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797336722, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAclChange [GOOD] Test command err: 2025-06-24T20:33:12.281389Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616684057881696:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:12.281430Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:33:12.414064Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616685778141753:2157];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004707/r3tmp/tmp74JRkg/pdisk_1.dat 2025-06-24T20:33:12.884818Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:33:13.219883Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:13.230006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:13.230109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:13.232910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:13.233024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:13.242613Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:33:13.242772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:13.250825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:33:13.297895Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 20612, node 1 2025-06-24T20:33:13.418636Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:33:13.516161Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:13.516192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:13.516207Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:13.516361Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24838 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:14.007235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:16.688757Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:33:16.713837Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MTIxMGQwY2ItYzgwMDI5MjYtODQ3MGY0MDAtYzYzOWZlOWE=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MTIxMGQwY2ItYzgwMDI5MjYtODQ3MGY0MDAtYzYzOWZlOWE= 2025-06-24T20:33:16.714429Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616701237751714:2294], Start check tables existence, number paths: 2 2025-06-24T20:33:16.714575Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MTIxMGQwY2ItYzgwMDI5MjYtODQ3MGY0MDAtYzYzOWZlOWE=, ActorId: [1:7519616701237751715:2295], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:16.714845Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 2 2025-06-24T20:33:16.714858Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:16.714867Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:33:16.727454Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616701237751714:2294], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:33:16.727530Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616701237751714:2294], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:33:16.727558Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616701237751714:2294], Successfully finished 2025-06-24T20:33:16.727642Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:33:16.744298Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616701237751732:2519], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:16.748896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:16.751889Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616701237751732:2519], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-24T20:33:16.752086Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616701237751732:2519], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:33:16.767452Z node 2 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:33:16.767667Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:16.767683Z node 2 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:33:16.774332Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616701237751732:2519], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:16.780306Z node 2 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 2 2025-06-24T20:33:16.780377Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519616702958011091:2266], Start check tables existence, number paths: 2 2025-06-24T20:33:16.781986Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519616702958011091:2266], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:33:16.782039Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519616702958011091:2266], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:33:16.782070Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519616702958011091:2266], Successfully finished 2025-06-24T20:33:16.782107Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:33:16.849104Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616701237751732:2519], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:16.855315Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616701237751806:2571] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:16.855513Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616701237751732:2519], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:33:16.861565Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NGY4Njk2YWItNDk0ZDc5MWEtNjc1NTliNzUtODMzNWViNGI=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NGY4Njk2YWItNDk0ZDc5MWEtNjc1NTliNzUtODMzNWViNGI= 2025-06-24T20:33:16.861941Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-24T20:33:16.861962Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-24T20:33:16.862012Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NGY4Njk2YWItNDk0ZDc5MWEtNjc1NTliNzUtODMzNWViNGI=, ActorId: [1:7519616701237751816:2296], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:16.862143Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616701237751818:2297], DatabaseId: /Root, PoolId: sampl ... (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:30.947321Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:34.935333Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519617274055092863:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:34.935440Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:36.919438Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:35:36.940239Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc= 2025-06-24T20:35:36.941564Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7519617304119864498:2291], Start check tables existence, number paths: 2 2025-06-24T20:35:36.941690Z node 12 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-24T20:35:36.941716Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:35:36.941743Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:35:36.941809Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [12:7519617304119864499:2292], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:35:36.947811Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7519617304119864498:2291], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:35:36.947927Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7519617304119864498:2291], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:35:36.947976Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7519617304119864498:2291], Successfully finished 2025-06-24T20:35:36.948082Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:35:36.956276Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519617304119864528:2306], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:35:36.984372Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:36.987520Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519617304119864528:2306], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-24T20:35:36.987756Z node 12 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519617304119864528:2306], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:35:37.017369Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519617304119864528:2306], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:35:37.087320Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519617304119864528:2306], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:35:37.091927Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519617308414831876:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:37.092116Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519617304119864528:2306], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:35:37.092759Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: Root, PoolId: sample_pool_id 2025-06-24T20:35:37.092798Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id Root 2025-06-24T20:35:37.092905Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519617308414831883:2296], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:35:37.095106Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519617308414831883:2296], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-24T20:35:37.095248Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: Root 2025-06-24T20:35:37.095296Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-24T20:35:37.095706Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [12:7519617308414831892:2297], DatabaseId: Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-24T20:35:37.100244Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [12:7519617308414831892:2297], DatabaseId: Root, PoolId: sample_pool_id, Got watch notification 2025-06-24T20:35:37.112091Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-24T20:35:37.112127Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-24T20:35:37.112260Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [12:7519617304119864499:2292], ActorState: ReadyState, TraceId: 01jyhteggq5mq7kmfyaysxr78c, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: GRANT ALL ON `/Root/.metadata/workload_manager/pools/sample_pool_id` TO `test@user`; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-24T20:35:37.115247Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519617308414831904:2299], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-24T20:35:37.123337Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519617308414831904:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:37.123489Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:37.133518Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:35:37.136555Z node 12 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [12:7519617304119864499:2292], ActorState: ExecuteState, TraceId: 01jyhteggq5mq7kmfyaysxr78c, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [12:7519617308414831905:2292] WorkloadServiceCleanup: 0 2025-06-24T20:35:37.142776Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [12:7519617308414831892:2297], DatabaseId: Root, PoolId: sample_pool_id, Got watch notification 2025-06-24T20:35:37.143431Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [12:7519617304119864499:2292], ActorState: CleanupState, TraceId: 01jyhteggq5mq7kmfyaysxr78c, EndCleanup, isFinal: 0 2025-06-24T20:35:37.143494Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [12:7519617304119864499:2292], ActorState: CleanupState, TraceId: 01jyhteggq5mq7kmfyaysxr78c, Sent query response back to proxy, proxyRequestId: 3, proxyId: [12:7519617274055092833:2064] 2025-06-24T20:35:37.157087Z node 12 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [12:7519617304119864499:2292], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:35:37.157143Z node 12 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [12:7519617304119864499:2292], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:35:37.157179Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [12:7519617304119864499:2292], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:35:37.157207Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [12:7519617304119864499:2292], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:35:37.157299Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=12&id=ZDQ3YmVkOTMtMjMwZTUzYTUtOWVjYjg2Y2UtZGU4YmIxYjc=, ActorId: [12:7519617304119864499:2292], ActorState: unknown state, Session actor destroyed >> TPersQueueTest::FetchRequest [GOOD] >> TPersQueueTest::Init >> TxUsage::Sinks_Oltp_WriteToTopic_3_Query [GOOD] >> TxUsage::WriteToTopic_Demo_41_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::AggregateEmptySum [GOOD] Test command err: Trying to start YDB, gRPC: 8239, MsgBus: 3042 2025-06-24T20:35:04.797017Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617164254444979:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:04.797192Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0021a3/r3tmp/tmpyfC5je/pdisk_1.dat 2025-06-24T20:35:05.133244Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:05.137399Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617164254444959:2079] 1750797304793407 != 1750797304793410 TServer::EnableGrpc on GrpcPort 8239, node 1 2025-06-24T20:35:05.183156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:05.186222Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:05.194352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:05.208830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:05.208857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:05.208868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:05.209005Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3042 TClient is connected to server localhost:3042 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:05.800372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:05.803945Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T20:35:05.832828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:05.964696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:06.136918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:06.220922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:07.841699Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617177139348487:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:07.841829Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:08.115826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:08.195722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:08.226085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:08.257597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:08.289226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:08.366923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:08.436288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:08.494581Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617181434316449:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:08.494661Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:08.494888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617181434316454:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:08.499746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:08.511483Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617181434316456:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:35:08.594076Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617181434316507:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:09.795445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617164254444979:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:09.795513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:12.445465Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797310535, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 6515, MsgBus: 29013 2025-06-24T20:35:13.198416Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617205088074025:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:13.198714Z node 2 :METADATA_PROVIDER E ... nager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797328735, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 7903, MsgBus: 26971 2025-06-24T20:35:30.369427Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519617274721933438:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:30.371574Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0021a3/r3tmp/tmpD4ZiGj/pdisk_1.dat 2025-06-24T20:35:30.528571Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:30.542809Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519617274721933418:2079] 1750797330360653 != 1750797330360656 2025-06-24T20:35:30.555082Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:30.555200Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 7903, node 4 2025-06-24T20:35:30.560960Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:30.607209Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:30.607232Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:30.607242Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:30.607396Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26971 TClient is connected to server localhost:26971 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T20:35:31.348566Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:31.378531Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:31.383921Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:31.518559Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:31.721653Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:31.817587Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:35.173328Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617296196771540:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:35.173450Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:35.274666Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:35.323021Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:35.368839Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:35.374054Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519617274721933438:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:35.374599Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:35.417865Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:35.508234Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:35.602158Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:35.659950Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:35.814920Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617296196772205:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:35.815052Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:35.815389Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617296196772210:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:35.821965Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:35.842584Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-24T20:35:35.844232Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519617296196772212:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:35:35.899360Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519617296196772263:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:38.590074Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797338227, txId: 281474976715672] shutting down |89.6%| [TA] $(B)/ydb/core/kqp/ut/arrow/test-results/unittest/{meta.json ... results_accumulator.log} >> TConsoleTxProcessorTests::TestTxProcessorTemporary [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorRandom >> Describe::LocationWithKillTablets [GOOD] >> Describe::DescribePartitionPermissions >> TConsoleConfigSubscriptionTests::TestAddConfigSubscription >> TxUsage::WriteToTopic_Demo_41_Query >> KqpTypes::UnsafeTimestampCastV0 >> TxUsage::WriteToTopic_Demo_5_Table [GOOD] >> THiveImplTest::BalancerSpeedAndDistribution [GOOD] >> THiveImplTest::TestShortTabletTypes [GOOD] >> THiveImplTest::TestStDev [GOOD] >> THiveImplTest::BootQueueConfigurePriorities [GOOD] >> THiveTest::TestBlockCreateTablet >> KqpParams::CheckQueryCacheForPreparedQuery >> TPersQueueTest::ReadFromSeveralPartitions [GOOD] >> TPersQueueTest::ReadFromSeveralPartitionsMigrated >> KqpStats::JoinNoStatsYql >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table >> TConsoleConfigSubscriptionTests::TestAddConfigSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscription >> KqpLimits::TooBigQuery+useSink >> DemoTx::Scenario_2 [GOOD] >> THiveTest::TestBlockCreateTablet [GOOD] >> THiveTest::DrainWithHiveRestart >> TxUsage::WriteToTopic_Demo_33_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query [GOOD] >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscriptions >> TxUsage::WriteToTopic_Demo_33_Query |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |89.6%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/arrow/test-results/unittest/{meta.json ... results_accumulator.log} |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscriptions [GOOD] >> TConsoleConfigSubscriptionTests::TestListConfigSubscriptions >> TPQTest::TestSourceIdDropByUserWrites [GOOD] >> TPQTest::TestTimeRetention >> DemoTx::Scenario_3 >> TxUsage::WriteToTopic_Demo_5_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 4602, MsgBus: 3363 2025-06-24T20:30:37.708403Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616019915543433:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:37.708485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f22/r3tmp/tmpyFvzn3/pdisk_1.dat 2025-06-24T20:30:38.117363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:38.117473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:38.120345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:38.182617Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4602, node 1 2025-06-24T20:30:38.430772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:38.430801Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:38.430814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:38.430937Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:30:38.729549Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3363 TClient is connected to server localhost:3363 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:39.410509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:30:39.445142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 16 2025-06-24T20:30:41.640342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:41.882631Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '0'::int2, ARRAY ['false'::bool, 'false'::bool] ); 2025-06-24T20:30:41.940612Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616037095413336:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:41.940714Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:41.941119Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616037095413348:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:30:41.946140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:30:41.971576Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616037095413350:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T20:30:42.078270Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616041390380697:2405] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '1'::int2, ARRAY ['true'::bool, 'true'::bool] ); 18 2025-06-24T20:30:42.715199Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616019915543433:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:42.715574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:30:42.722115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:42.861950Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::"char", '0'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::"char", '1'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::"char", '2'::"char"] ); 21 2025-06-24T20:30:43.401812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:43.488566Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int2, '0'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int2, '1'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int2, '2'::int2] ); 23 2025-06-24T20:30:44.097453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:44.157694Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:30:44.160454Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976710679 at tablet 72075186224037891 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710679] at 72075186224037891 while waiting for scan finish) | 2025-06-24T20:30:44.161906Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710679 at tablet 72075186224037891 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710679] at 72075186224037891 while waiting for scan finish) | --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int4, '0'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int4, '1'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int4, '2'::int4] ); 20 2025-06-24T20:30:44.688714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:30:44.754619Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int8, '0'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int8, '1'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int8, '2'::int8] ); 700 2025-06-24T20:30:45.316347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '0'::int2, ARRAY ['0.5'::float4, '0.5'::float4] ); ... at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:29.622132Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7519617270427580639:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:29.622324Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:29.622960Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7519617270427580645:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:29.628797Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:29.661962Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [9:7519617270427580647:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:35:29.731280Z node 9 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [9:7519617270427580698:2401] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:30.160098Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7519617274722548043:2324], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2025-06-24T20:35:30.164457Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=9&id=NzdkZjdkZmUtNGVlODJmOTMtZTI3MTk5NzItOWY0MzBkMTg=, ActorId: [9:7519617274722548036:2320], ActorState: ExecuteState, TraceId: 01jyhte9p1ft0qebmyjxv47n2h, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T20:35:30.224919Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... Trying to start YDB, gRPC: 12537, MsgBus: 23025 2025-06-24T20:35:33.043620Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519617288306271830:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:33.043719Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f22/r3tmp/tmpKTEeMi/pdisk_1.dat 2025-06-24T20:35:33.280284Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:33.282930Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519617288306271807:2079] 1750797333042377 != 1750797333042380 2025-06-24T20:35:33.301129Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:33.301259Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:33.305033Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12537, node 10 2025-06-24T20:35:33.391962Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:33.392001Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:33.392014Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:33.392193Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23025 2025-06-24T20:35:34.070907Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23025 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:34.434490Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:34.463666Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:35:38.045912Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519617288306271830:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:38.046099Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:39.771672Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519617314076076232:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:39.771809Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:39.786267Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:39.870404Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519617314076076336:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:39.870528Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:39.870568Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519617314076076342:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:39.876505Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:39.893822Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519617314076076344:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T20:35:39.959358Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519617314076076395:2401] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:40.686309Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519617318371043760:2331], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2025-06-24T20:35:40.687650Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=ZmMzMjU2LTMxZjE2MjM3LWU5MGVlOTc3LTc1YTZiMmQy, ActorId: [10:7519617318371043753:2327], ActorState: ExecuteState, TraceId: 01jyhtekyyedh7pxz19fv17e3w, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T20:35:40.699777Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TConsoleTests::TestRemoveSharedTenantWoServerlessTenants [GOOD] >> TConsoleTests::TestRemoveSharedTenantWithServerlessTenants >> TConsoleConfigSubscriptionTests::TestListConfigSubscriptions [GOOD] >> TConsoleConfigSubscriptionTests::TestReplaceConfigSubscriptions >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Table [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort [GOOD] >> DataShardVolatile::DistributedWriteAsymmetricExecute |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |89.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo >> TConsoleConfigSubscriptionTests::TestReplaceConfigSubscriptions [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForNewSubscription >> KqpQuery::ExecuteDataQueryCollectMeta >> TPQTest::TestTimeRetention [GOOD] >> TPQTest::TestStorageRetention >> TopicService::UnknownConsumer [GOOD] >> THiveTest::TestHiveBalancerUselessNeighbourMoves [GOOD] >> THiveTest::TestHiveBalancerWithImmovableTablets >> KqpTypes::UnsafeTimestampCastV0 [GOOD] >> KqpTypes::UnsafeTimestampCastV1 >> TxUsage::WriteToTopic_Demo_24_Query [GOOD] >> TPersQueueTest::CloseActiveWriteSessionOnClusterDisable [GOOD] >> TPersQueueTest::Cache >> DataShardVolatile::TwoAppendsMustBeVolatile-UseSink [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure+UseSink >> KqpStats::JoinNoStatsYql [GOOD] >> KqpStats::JoinNoStatsScan >> THiveTest::DrainWithHiveRestart [GOOD] >> THiveTest::TestCheckSubHiveForwarding >> KqpParams::CheckQueryCacheForPreparedQuery [GOOD] >> KqpParams::CheckQueryCacheForExecuteAndPreparedQueries >> TxUsage::WriteToTopic_Demo_25_Table >> TopicService::UnknownTopic >> TConsoleConfigSubscriptionTests::TestNotificationForNewSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForNewConfigItem >> Viewer::SimpleFeatureFlags [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query [GOOD] >> TPersQueueTest::DirectReadPreCached [GOOD] >> TPersQueueTest::DirectReadNotCached |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |89.6%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut >> TPQTest::TestStorageRetention [GOOD] >> TPQTest::TestStatusWithMultipleConsumers |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut >> THiveTest::TestCheckSubHiveForwarding [GOOD] >> THiveTest::TestCheckSubHiveDrain >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Query >> TPersQueueTest::UpdatePartitionLocation [GOOD] >> TPersQueueTest::TopicServiceCommitOffset >> TConsoleTests::TestRemoveSharedTenantWithServerlessTenants [GOOD] >> TConsoleTests::TestRemoveSharedTenantAfterRemoveServerlessTenant >> TPQTest::TestStatusWithMultipleConsumers [GOOD] >> TPQTest::TestTabletRestoreEventsOrder >> Describe::DescribePartitionPermissions [GOOD] >> DirectReadWithServer::KillPQTablet >> TPQTest::TestTabletRestoreEventsOrder [GOOD] |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |89.6%| [LD] {RESULT} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut >> TxUsage::WriteToTopic_Demo_33_Query [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForNewConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItem >> THiveTest::TestCheckSubHiveDrain [GOOD] >> THiveTest::TestCheckSubHiveMigration >> TxUsage::WriteToTopic_Demo_34_Table |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestTabletRestoreEventsOrder [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T20:34:00.204602Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:00.204705Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:182:2057] recipient: [1:14:2061] 2025-06-24T20:34:00.238271Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:181:2194], now have 1 active actors on pipe 2025-06-24T20:34:00.238448Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T20:34:00.266912Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "test" Generation: 1 Important: false } 2025-06-24T20:34:00.272699Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "test" Generation: 1 Important: false } 2025-06-24T20:34:00.272914Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:00.275954Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "test" Generation: 1 Important: false } 2025-06-24T20:34:00.276087Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:34:00.276167Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:1:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:34:00.276209Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:34:00.276299Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:3:Initializer] Start initializing step TInitConfigStep 2025-06-24T20:34:00.277226Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:34:00.277638Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-24T20:34:00.280680Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--asdfgs--topic:0:Initializer] Initializing completed. 2025-06-24T20:34:00.280759Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:188:2199] 2025-06-24T20:34:00.280840Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--asdfgs--topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:34:00.283494Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T20:34:00.283632Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit request with generation 1 2025-06-24T20:34:00.283691Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit with generation 1 done 2025-06-24T20:34:00.283741Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user test reinit request with generation 1 2025-06-24T20:34:00.283792Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user test reinit with generation 1 done 2025-06-24T20:34:00.284028Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:34:00.284078Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user test readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:34:00.284270Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:34:00.284621Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:34:00.284897Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:189:2200] 2025-06-24T20:34:00.287138Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--asdfgs--topic:1:Initializer] Initializing completed. 2025-06-24T20:34:00.287226Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:189:2200] 2025-06-24T20:34:00.287272Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--asdfgs--topic partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:34:00.289329Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-24T20:34:00.289439Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 1 user user reinit request with generation 1 2025-06-24T20:34:00.289485Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 1 user user reinit with generation 1 done 2025-06-24T20:34:00.289531Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 1 user test reinit request with generation 1 2025-06-24T20:34:00.289578Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 1 user test reinit with generation 1 done 2025-06-24T20:34:00.289771Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 1 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:34:00.289808Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 1 user test readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:34:00.290271Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:34:00.290408Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T20:34:00.290696Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [1:190:2201] 2025-06-24T20:34:00.292832Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--asdfgs--topic:2:Initializer] Initializing completed. 2025-06-24T20:34:00.292889Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [1:190:2201] 2025-06-24T20:34:00.292933Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--asdfgs--topic partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T20:34:00.294990Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T20:34:00.295096Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 2, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 2 user user reinit request with generation 1 2025-06-24T20:34:00.295173Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 2, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 2 user user r ... cer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:562:2554] connected; active server actors: 1 2025-06-24T20:35:56.033211Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:567:2559] connected; active server actors: 1 2025-06-24T20:35:56.040461Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:572:2564] connected; active server actors: 1 2025-06-24T20:35:56.048027Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:577:2569] connected; active server actors: 1 2025-06-24T20:35:56.060842Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:582:2574] connected; active server actors: 1 2025-06-24T20:35:56.064487Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:587:2579] connected; active server actors: 1 2025-06-24T20:35:56.077309Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:592:2584] connected; active server actors: 1 2025-06-24T20:35:56.080720Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:597:2589] connected; active server actors: 1 2025-06-24T20:35:56.088336Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:602:2594] connected; active server actors: 1 2025-06-24T20:35:56.091335Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:607:2599] connected; active server actors: 1 2025-06-24T20:35:56.100495Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:612:2604] connected; active server actors: 1 2025-06-24T20:35:56.103370Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:617:2609] connected; active server actors: 1 2025-06-24T20:35:56.113014Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:622:2614] connected; active server actors: 1 2025-06-24T20:35:56.116267Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:627:2619] connected; active server actors: 1 2025-06-24T20:35:56.120716Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:632:2624] connected; active server actors: 1 2025-06-24T20:35:56.128809Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:637:2629] connected; active server actors: 1 2025-06-24T20:35:56.133027Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:642:2634] connected; active server actors: 1 2025-06-24T20:35:56.141507Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:647:2639] connected; active server actors: 1 2025-06-24T20:35:56.148597Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:652:2644] connected; active server actors: 1 2025-06-24T20:35:56.155673Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:657:2649] connected; active server actors: 1 2025-06-24T20:35:56.161058Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:662:2654] connected; active server actors: 1 2025-06-24T20:35:56.168656Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:667:2659] connected; active server actors: 1 2025-06-24T20:35:56.180776Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:672:2664] connected; active server actors: 1 2025-06-24T20:35:56.188784Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:677:2669] connected; active server actors: 1 2025-06-24T20:35:56.196817Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:682:2674] connected; active server actors: 1 2025-06-24T20:35:56.212660Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:687:2679] connected; active server actors: 1 2025-06-24T20:35:56.220070Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:692:2684] connected; active server actors: 1 2025-06-24T20:35:56.232754Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:697:2689] connected; active server actors: 1 2025-06-24T20:35:56.240597Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:702:2694] connected; active server actors: 1 2025-06-24T20:35:56.255178Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:707:2699] connected; active server actors: 1 2025-06-24T20:35:56.264757Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:712:2704] connected; active server actors: 1 2025-06-24T20:35:56.276896Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:717:2709] connected; active server actors: 1 2025-06-24T20:35:56.287475Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:722:2714] connected; active server actors: 1 2025-06-24T20:35:56.294460Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:727:2719] connected; active server actors: 1 2025-06-24T20:35:56.308374Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:732:2724] connected; active server actors: 1 2025-06-24T20:35:56.316805Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:737:2729] connected; active server actors: 1 2025-06-24T20:35:56.328736Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:742:2734] connected; active server actors: 1 2025-06-24T20:35:56.336654Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:747:2739] connected; active server actors: 1 2025-06-24T20:35:56.344652Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:752:2744] connected; active server actors: 1 2025-06-24T20:35:56.363971Z node 67 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [67:757:2749], now have 1 active actors on pipe 2025-06-24T20:35:56.365573Z node 67 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [67:760:2752], now have 1 active actors on pipe 2025-06-24T20:35:56.366873Z node 67 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [67:763:2755], now have 1 active actors on pipe 2025-06-24T20:35:56.372540Z node 67 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [67:766:2758] connected; active server actors: 1 2025-06-24T20:35:57.482713Z node 68 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:57.482836Z node 68 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:35:57.620635Z node 68 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:57.620754Z node 68 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:35:57.624865Z node 68 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:57.626588Z node 68 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 72 actor [68:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 72 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 72 ReadRuleGenerations: 72 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 72 Important: false } Consumers { Name: "aaa" Generation: 72 Important: true } 2025-06-24T20:35:57.627817Z node 68 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [68:246:2244] 2025-06-24T20:35:57.629294Z node 68 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 3 [68:246:2244] 2025-06-24T20:35:57.630745Z node 68 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [68:248:2246] 2025-06-24T20:35:57.631931Z node 68 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 3 [68:248:2246] 2025-06-24T20:35:57.681377Z node 68 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:57.681480Z node 68 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:35:57.682569Z node 68 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [68:325:2306] 2025-06-24T20:35:57.684930Z node 68 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [68:327:2308] 2025-06-24T20:35:57.691620Z node 68 :PERSQUEUE INFO: partition_init.cpp:895: [topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:35:57.691739Z node 68 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 4 [68:325:2306] 2025-06-24T20:35:57.692216Z node 68 :PERSQUEUE INFO: partition_init.cpp:895: [topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:35:57.692268Z node 68 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 4 [68:327:2308] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::SimpleFeatureFlags [GOOD] Test command err: BASE_PERF = 3.671600697 2025-06-24T20:34:10.722407Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:297:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:10.723017Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:10.723081Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:34:11.202255Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 11461, node 1 TClient is connected to server localhost:8213 2025-06-24T20:34:22.220046Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:34:22.220167Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:22.220601Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:118:2164], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:34:22.572236Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 TServer::EnableGrpc on GrpcPort 25094, node 2 TClient is connected to server localhost:17479 2025-06-24T20:34:36.725450Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:280:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:36.725915Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:36.726017Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:34:37.085035Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 TServer::EnableGrpc on GrpcPort 30453, node 3 TClient is connected to server localhost:1958 2025-06-24T20:34:50.412907Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:296:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:50.413186Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:34:50.413485Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:34:51.187792Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 6340, node 4 TClient is connected to server localhost:62608 2025-06-24T20:35:05.281697Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:115:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:35:05.281961Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:35:05.282072Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:35:05.678934Z node 5 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 5 Type# 268639257 TServer::EnableGrpc on GrpcPort 18997, node 5 TClient is connected to server localhost:17227 2025-06-24T20:35:18.679444Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:280:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:35:18.680008Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:35:18.680129Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:35:19.096028Z node 6 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 6 Type# 268639257 TServer::EnableGrpc on GrpcPort 22413, node 6 TClient is connected to server localhost:28378 2025-06-24T20:35:34.488024Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:298:2341], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:35:34.488536Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:35:34.488785Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:35:35.151417Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 24952, node 7 TClient is connected to server localhost:23683 2025-06-24T20:35:40.684521Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519617318294356800:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:40.684760Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:35:41.039365Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519617318294356781:2079] 1750797340671865 != 1750797340671868 2025-06-24T20:35:41.050112Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:41.050329Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:41.053750Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14155, node 8 2025-06-24T20:35:41.096467Z node 8 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T20:35:41.099560Z node 8 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T20:35:41.124011Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:41.243557Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:41.243602Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:41.243625Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:41.243865Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:41.719417Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27711 2025-06-24T20:35:45.687517Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519617318294356800:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:45.687635Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TPersQueueTest::WriteExistingBigValue [GOOD] >> TPersQueueTest::WriteEmptyData >> KqpTypes::UnsafeTimestampCastV1 [GOOD] >> KqpTypes::Time64Columns+EnableTableDatetime64 >> TxUsage::WriteToTopic_Demo_41_Query [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table >> TPersQueueTest::Init [GOOD] >> TPersQueueTest::EventBatching >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table [GOOD] >> KqpQuery::ExecuteDataQueryCollectMeta [GOOD] >> KqpQuery::DeleteWhereInSubquery >> THiveTest::TestCheckSubHiveMigration [GOOD] >> THiveTest::PipeAlivenessOfDeadTablet >> DataShardVolatile::DistributedWriteAsymmetricExecute [GOOD] >> DataShardVolatile::DistributedWriteThenDropTable >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query >> TxUsage::WriteToTopic_Demo_42_Table >> KqpParams::CheckQueryCacheForExecuteAndPreparedQueries [GOOD] >> KqpParams::CheckCacheByAst >> KqpQuery::PreparedQueryInvalidate |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |89.6%| [LD] {RESULT} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItemScope >> KqpStats::MultiTxStatsFullExpYql |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless >> THiveTest::PipeAlivenessOfDeadTablet [GOOD] >> THiveTest::TestBootProgress >> TxUsage::WriteToTopic_Demo_5_Query [GOOD] >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest [GOOD] >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse >> DataShardVolatile::VolatileCommitOnBlobStorageFailure+UseSink [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure-UseSink >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table [GOOD] >> TPersQueueTest::SetupLockSession [GOOD] >> TPersQueueTest::StreamReadCreateAndDestroyMsgs >> DemoTx::Scenario_3 [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query >> SystemView::AuthUsers_TableRange [GOOD] >> SystemView::AuthPermissions_ResultOrder >> TxUsage::WriteToTopic_Demo_6_Table >> THiveTest::TestBootProgress [GOOD] >> THiveTest::TestBridgeCreateTablet >> TConsoleTests::TestRemoveSharedTenantAfterRemoveServerlessTenant [GOOD] >> TConsoleTests::TestRemoveServerlessTenant >> TPersQueueTest::ReadFromSeveralPartitionsMigrated [GOOD] >> TPersQueueTest::SchemeshardRestart >> KqpStats::JoinNoStatsScan [GOOD] >> KqpStats::JoinStatsBasicScan >> KqpTypes::Time64Columns+EnableTableDatetime64 [GOOD] >> KqpTypes::Time64Columns-EnableTableDatetime64 |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |89.6%| [LD] {RESULT} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut >> DemoTx::Scenario_4 >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItemScope [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRemovedConfigItem >> TopicService::UnknownTopic [GOOD] >> KqpPg::TableDeleteWhere+useSink [GOOD] >> KqpPg::TableDeleteWhere-useSink |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector >> THiveTest::TestBridgeCreateTablet [GOOD] >> TxUsage::WriteToTopic_Demo_25_Table [GOOD] >> KqpQuery::DeleteWhereInSubquery [GOOD] >> KqpQuery::DictJoin >> TopicService::UseDoubleSlashInTopicPath >> KqpQuery::PreparedQueryInvalidate [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Query [GOOD] >> KqpQuery::QueryCache >> KqpStats::MultiTxStatsFullExpYql [GOOD] >> KqpStats::MultiTxStatsFullExpScan >> TxUsage::WriteToTopic_Demo_25_Query >> DirectReadWithServer::KillPQTablet [GOOD] >> DirectReadWithServer::KillPQRBTablet [GOOD] >> LocalPartition::Restarts >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Table >> TxUsage::WriteToTopic_Demo_34_Table [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRemovedConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedClient >> KqpTypes::Time64Columns-EnableTableDatetime64 [GOOD] >> DataShardVolatile::DistributedWriteThenDropTable [GOOD] >> DataShardVolatile::DistributedWriteThenCopyTable >> Viewer::JsonStorageListingV2GroupIdFilter [GOOD] >> Viewer::JsonStorageListingV2NodeIdFilter >> TxUsage::WriteToTopic_Demo_34_Query >> TConsoleTests::TestRemoveServerlessTenant [GOOD] >> TConsoleTests::TestRegisterComputationalUnitsForPending |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |89.6%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpTypes::Time64Columns-EnableTableDatetime64 [GOOD] Test command err: Trying to start YDB, gRPC: 17299, MsgBus: 13599 2025-06-24T20:35:43.267405Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617331542015402:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:43.269866Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033c6/r3tmp/tmpPZj78H/pdisk_1.dat 2025-06-24T20:35:43.741330Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:43.741942Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617331542015220:2079] 1750797343249416 != 1750797343249419 TServer::EnableGrpc on GrpcPort 17299, node 1 2025-06-24T20:35:43.786492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:43.786614Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:43.791063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:43.883763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:43.883790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:43.883797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:43.883912Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13599 2025-06-24T20:35:44.261964Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13599 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:44.703653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:44.729141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:35:44.742362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:44.949769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:45.219406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:45.343136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:47.322842Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617348721886042:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:47.323021Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:47.890019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:47.970870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.038149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.122439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.218523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.266199Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617331542015402:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:48.266270Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:48.310450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.420557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.527337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617353016854011:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:48.527433Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:48.527713Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617353016854016:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:48.532432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:48.545920Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617353016854018:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:35:48.601085Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617353016854069:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:50.122737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... ted 2025-06-24T20:36:00.906412Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:00.913233Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519617405522356385:2079] 1750797360616290 != 1750797360616293 2025-06-24T20:36:00.930249Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:00.932218Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21780, node 3 2025-06-24T20:36:01.135750Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:36:01.135775Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:36:01.135782Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:36:01.135922Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28493 2025-06-24T20:36:01.699334Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28493 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T20:36:02.300710Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:02.315506Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:36:05.627577Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519617405522356404:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:05.627665Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:36:06.408416Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519617431292160805:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:06.408507Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:06.439521Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:06.526783Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519617431292160905:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:06.526894Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:06.527296Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519617431292160910:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:06.535598Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:36:06.563378Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519617431292160912:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:36:06.666543Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519617431292160965:2398] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 9393, MsgBus: 11322 2025-06-24T20:36:08.313888Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519617438500724836:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:08.334617Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033c6/r3tmp/tmpSCBP8n/pdisk_1.dat 2025-06-24T20:36:08.563008Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:08.567519Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519617438500724817:2079] 1750797368301574 != 1750797368301577 2025-06-24T20:36:08.577818Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:08.577917Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:08.581384Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9393, node 4 2025-06-24T20:36:08.735785Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:36:08.735811Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:36:08.735821Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:36:08.735964Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11322 2025-06-24T20:36:09.327291Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11322 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:36:09.632702Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:36:09.647432Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:36:13.306608Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519617438500724836:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:13.306711Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:36:13.681980Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617459975561941:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:13.682067Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:13.709942Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519617459975561962:2309] txid# 281474976710658, issues: { message: "Type \'Datetime64\' specified for column \'DatetimePK\', but support for new date/time 64 types is disabled (EnableTableDatetime64 feature flag is off)" severity: 1 } |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge >> TPersQueueTest::TopicServiceCommitOffset [GOOD] >> TPersQueueTest::TopicServiceCommitOffsetBadOffsets >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure-UseSink [GOOD] >> DataShardVolatile::VolatileTxAbortedOnSplit >> TxUsage::WriteToTopic_Demo_11_Table [GOOD] |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |89.6%| [LD] {RESULT} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut >> TPersQueueTest::DirectReadNotCached [GOOD] >> TPersQueueTest::DirectReadBudgetOnRestart >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query >> TxUsage::WriteToTopic_Demo_11_Query >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsOnly [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxTags >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedClient [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForTimeoutedNotificationResponse >> TPersQueueTest::WriteEmptyData [GOOD] >> TPersQueueTest::WriteNonExistingPartition >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table >> THiveTest::TestHiveBalancerWithImmovableTablets [GOOD] >> THiveTest::TestHiveBalancerHighUsage |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp >> TPersQueueTest::EventBatching [GOOD] >> TPersQueueTest::NoDecompressionMemoryLeaks >> KqpQuery::DictJoin [GOOD] >> KqpQuery::ExecuteWriteQuery >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query [GOOD] >> KqpQuery::QueryCache [GOOD] >> KqpQuery::QueryCacheInvalidate >> KqpStats::MultiTxStatsFullExpScan [GOOD] >> KqpStats::JoinStatsBasicYql+StreamLookupJoin >> TxUsage::WriteToTopic_Demo_6_Table [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForTimeoutedNotificationResponse [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedServer >> TConsoleTests::TestRegisterComputationalUnitsForPending [GOOD] >> TConsoleTests::TestNotifyOperationCompletion >> TPersQueueTest::SchemeshardRestart [GOOD] >> TPersQueueTest::SameOffset >> KqpStats::JoinStatsBasicScan [GOOD] >> KqpStats::DeferredEffects-UseSink >> TxUsage::WriteToTopic_Demo_6_Query >> SystemView::ShowCreateTablePartitionSettings [GOOD] >> SystemView::ShowCreateTableReadReplicas >> KqpWorkloadService::TestHandlerActorCleanup [GOOD] >> TxUsage::WriteToTopic_Demo_42_Table [GOOD] >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError >> TPersQueueTest::Cache [GOOD] >> TPersQueueTest::CacheHead >> DemoTx::Scenario_4 [GOOD] >> TopicService::UseDoubleSlashInTopicPath [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table >> THiveTest::TestHiveBalancerHighUsage [GOOD] >> THiveTest::TestHiveBalancerHighUsageAndColumnShards >> TxUsage::WriteToTopic_Demo_42_Query >> DataShardVolatile::DistributedWriteThenCopyTable [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsert |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |89.6%| [LD] {RESULT} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |89.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test >> TPersQueueTest::StreamReadCreateAndDestroyMsgs [GOOD] >> TPersQueueTest::StreamReadCommitAndStatusMsgs >> DemoTx::Scenario_5 |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut >> TxUsage::WriteToTopic_Demo_25_Query [GOOD] >> TopicService::RelativePath ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadService::TestHandlerActorCleanup [GOOD] Test command err: 2025-06-24T20:33:11.026772Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616680199916046:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:33:11.027101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004713/r3tmp/tmpFZja3h/pdisk_1.dat 2025-06-24T20:33:11.561607Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616675904948554:2079] 1750797191002217 != 1750797191002220 2025-06-24T20:33:11.572459Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:11.593625Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:33:11.593755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:33:11.599975Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24982, node 1 2025-06-24T20:33:11.739699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:11.739720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:11.739728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:11.739837Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29853 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:33:12.027586Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:33:12.182969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:33:14.325257Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T20:33:14.342463Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616693084818357:2288], Start check tables existence, number paths: 2 2025-06-24T20:33:14.342543Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-24T20:33:14.342556Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T20:33:14.342568Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T20:33:14.345364Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YmY2MmY2MDAtMzBiNTc3NTctZjBlMmEyOWYtNDEzMDQzNWU=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YmY2MmY2MDAtMzBiNTc3NTctZjBlMmEyOWYtNDEzMDQzNWU= 2025-06-24T20:33:14.345878Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616693084818357:2288], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T20:33:14.345919Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616693084818357:2288], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T20:33:14.345953Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519616693084818357:2288], Successfully finished 2025-06-24T20:33:14.347194Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YmY2MmY2MDAtMzBiNTc3NTctZjBlMmEyOWYtNDEzMDQzNWU=, ActorId: [1:7519616693084818382:2290], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:14.347302Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T20:33:14.363258Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616693084818384:2301], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:14.367016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:33:14.368130Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616693084818384:2301], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-24T20:33:14.368281Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616693084818384:2301], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-24T20:33:14.379162Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616693084818384:2301], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:33:14.444922Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616693084818384:2301], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-24T20:33:14.449326Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616693084818435:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:33:14.449461Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616693084818384:2301], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-24T20:33:14.457777Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZTRkMGRkNzEtMmJjODJhYWYtZDcwZWRlNWUtMjgyODAxMQ==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTRkMGRkNzEtMmJjODJhYWYtZDcwZWRlNWUtMjgyODAxMQ== 2025-06-24T20:33:14.457938Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZTRkMGRkNzEtMmJjODJhYWYtZDcwZWRlNWUtMjgyODAxMQ==, ActorId: [1:7519616693084818443:2291], ActorState: unknown state, session actor bootstrapped 2025-06-24T20:33:14.458151Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-24T20:33:14.458165Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-24T20:33:14.458250Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616693084818445:2292], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:33:14.458272Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=ZTRkMGRkNzEtMmJjODJhYWYtZDcwZWRlNWUtMjgyODAxMQ==, ActorId: [1:7519616693084818443:2291], ActorState: ReadyState, TraceId: 01jyhta56t161fs0nesgfs2880, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7519616693084818442:2339] database: Root databaseId: /Root pool id: sample_pool_id 2025-06-24T20:33:14.458317Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7519616693084818443:2291], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=ZTRkMGRkNzEtMmJjODJhYWYtZDcwZWRlNWUtMjgyODAxMQ== 2025-06-24T20:33:14.458375Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519616693084818446:2293], Database: /Root, Start database fetching 2025-06-24T20:33:14.459937Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519616693084818446:2293], Database: /Root, Database info successfully fetched, serverless: 0 2025-06-24T20:33:14.460036Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-06-24T20:33:14.460041Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616693084818445:2292], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-24T20:33:14.460093Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-06-24T20:33:14.460114Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-24T20:33:14.460122Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7519616693084818456:2294], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=ZTRkMGRkNzEtMmJjODJhYWYtZDcwZWRlNWUtMjgyODAxMQ==, Start pool fetching 2025-06-24T20:33:14.460149Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616693084818457:2295], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:33:14.460401Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_h ... roxyRequestId: 16, proxyId: [6:7519617091464542541:2064] 2025-06-24T20:35:00.359033Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=MTQwOGQ4ZjYtY2I4OTc4OGMtYjY3MjVkZWMtYzlmNWZjM2U=, ActorId: [6:7519617147299118556:2416], ActorState: unknown state, TraceId: 01jyhtdcfpd9hwn7eya6jbt6va, Cleanup temp tables: 0 2025-06-24T20:35:00.359507Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=MTQwOGQ4ZjYtY2I4OTc4OGMtYjY3MjVkZWMtYzlmNWZjM2U=, ActorId: [6:7519617147299118556:2416], ActorState: unknown state, TraceId: 01jyhtdcfpd9hwn7eya6jbt6va, Session actor destroyed 2025-06-24T20:35:00.374888Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=6&id=MmU4YzMzZTgtYTVmYzJkYTctMzkzMDM4NjItZTRmYmRmNDE=, ActorId: [6:7519617121529314234:2294], ActorState: ReadyState, TraceId: 01jyhtdcmpcd4hb80mbqg8t4cp, received request, proxyRequestId: 18 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: DROP RESOURCE POOL sample_pool_id; DROP RESOURCE POOL default; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-24T20:35:00.475346Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519617121529314307:2298], DatabaseId: /Root, PoolId: sample_pool_id, Got delete notification 2025-06-24T20:35:00.475471Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-24T20:35:00.475547Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519617147299118616:2428], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-24T20:35:00.479308Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519617147299118616:2428], DatabaseId: /Root, PoolId: sample_pool_id, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool sample_pool_id not found or you don't have access permissions } 2025-06-24T20:35:00.479434Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool sample_pool_id, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool sample_pool_id not found or you don't have access permissions } 2025-06-24T20:35:00.486601Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519617125824281809:2325], DatabaseId: /Root, PoolId: default, Got delete notification 2025-06-24T20:35:00.486678Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-24T20:35:00.486732Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519617147299118630:2429], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-24T20:35:00.489934Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519617147299118630:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.490071Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.494645Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=6&id=MmU4YzMzZTgtYTVmYzJkYTctMzkzMDM4NjItZTRmYmRmNDE=, ActorId: [6:7519617121529314234:2294], ActorState: ExecuteState, TraceId: 01jyhtdcmpcd4hb80mbqg8t4cp, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [6:7519617147299118602:2294] WorkloadServiceCleanup: 0 2025-06-24T20:35:00.498200Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=MmU4YzMzZTgtYTVmYzJkYTctMzkzMDM4NjItZTRmYmRmNDE=, ActorId: [6:7519617121529314234:2294], ActorState: CleanupState, TraceId: 01jyhtdcmpcd4hb80mbqg8t4cp, EndCleanup, isFinal: 0 2025-06-24T20:35:00.498449Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=6&id=MmU4YzMzZTgtYTVmYzJkYTctMzkzMDM4NjItZTRmYmRmNDE=, ActorId: [6:7519617121529314234:2294], ActorState: CleanupState, TraceId: 01jyhtdcmpcd4hb80mbqg8t4cp, Sent query response back to proxy, proxyRequestId: 18, proxyId: [6:7519617091464542541:2064] Wait pool handlers 0.000020s: number handlers = 2 Wait pool handlers 1.000561s: number handlers = 2 Wait pool handlers 2.001023s: number handlers = 2 2025-06-24T20:35:02.918742Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:35:02.918792Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded Wait pool handlers 3.001172s: number handlers = 2 Wait pool handlers 4.001298s: number handlers = 2 Wait pool handlers 5.001429s: number handlers = 2 Wait pool handlers 6.001943s: number handlers = 2 Wait pool handlers 7.002048s: number handlers = 2 Wait pool handlers 8.002144s: number handlers = 2 Wait pool handlers 9.002271s: number handlers = 2 Wait pool handlers 10.004048s: number handlers = 2 Wait pool handlers 11.006647s: number handlers = 2 Wait pool handlers 12.006773s: number handlers = 2 2025-06-24T20:35:13.100366Z node 6 :KQP_WORKLOAD_SERVICE TRACE: pool_handlers_actors.cpp:689: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519617121529314307:2298], DatabaseId: /Root, PoolId: sample_pool_id, Try to start scheduled refresh Wait pool handlers 13.006912s: number handlers = 2 Wait pool handlers 14.007051s: number handlers = 2 Wait pool handlers 15.008060s: number handlers = 2 Wait pool handlers 16.008212s: number handlers = 2 Wait pool handlers 17.008404s: number handlers = 2 Wait pool handlers 18.016067s: number handlers = 2 Wait pool handlers 19.020062s: number handlers = 2 Wait pool handlers 20.024142s: number handlers = 2 Wait pool handlers 21.024303s: number handlers = 2 Wait pool handlers 22.025421s: number handlers = 2 Wait pool handlers 23.025550s: number handlers = 2 Wait pool handlers 24.026710s: number handlers = 2 Wait pool handlers 25.026844s: number handlers = 2 Wait pool handlers 26.028195s: number handlers = 2 Wait pool handlers 27.031440s: number handlers = 2 Wait pool handlers 28.031925s: number handlers = 2 Wait pool handlers 29.032074s: number handlers = 2 Wait pool handlers 30.032891s: number handlers = 2 Wait pool handlers 31.034433s: number handlers = 2 Wait pool handlers 32.034570s: number handlers = 2 Wait pool handlers 33.035867s: number handlers = 2 Wait pool handlers 34.036043s: number handlers = 2 Wait pool handlers 35.040053s: number handlers = 2 Wait pool handlers 36.044065s: number handlers = 2 Wait pool handlers 37.044233s: number handlers = 2 Wait pool handlers 38.048055s: number handlers = 2 Wait pool handlers 39.052065s: number handlers = 2 Wait pool handlers 40.052193s: number handlers = 2 Wait pool handlers 41.054255s: number handlers = 2 Wait pool handlers 42.055773s: number handlers = 2 Wait pool handlers 43.056044s: number handlers = 2 Wait pool handlers 44.056467s: number handlers = 2 Wait pool handlers 45.059636s: number handlers = 2 Wait pool handlers 46.059807s: number handlers = 2 Wait pool handlers 47.060069s: number handlers = 2 Wait pool handlers 48.060332s: number handlers = 2 Wait pool handlers 49.064048s: number handlers = 2 Wait pool handlers 50.064189s: number handlers = 2 Wait pool handlers 51.064324s: number handlers = 2 Wait pool handlers 52.068053s: number handlers = 2 Wait pool handlers 53.068177s: number handlers = 2 Wait pool handlers 54.072052s: number handlers = 2 Wait pool handlers 55.072274s: number handlers = 2 Wait pool handlers 56.072542s: number handlers = 2 Wait pool handlers 57.073848s: number handlers = 2 Wait pool handlers 58.076077s: number handlers = 2 Wait pool handlers 59.078304s: number handlers = 2 Wait pool handlers 60.080114s: number handlers = 2 Wait pool handlers 61.080340s: number handlers = 2 Wait pool handlers 62.084183s: number handlers = 2 Wait pool handlers 63.088057s: number handlers = 2 Wait pool handlers 64.092056s: number handlers = 2 Wait pool handlers 65.095594s: number handlers = 2 Wait pool handlers 66.095740s: number handlers = 2 Wait pool handlers 67.096064s: number handlers = 2 Wait pool handlers 68.100124s: number handlers = 2 Wait pool handlers 69.102741s: number handlers = 2 Wait pool handlers 70.104224s: number handlers = 2 Wait pool handlers 71.107043s: number handlers = 2 Wait pool handlers 72.108068s: number handlers = 2 Wait pool handlers 73.109665s: number handlers = 2 Wait pool handlers 74.112218s: number handlers = 2 Wait pool handlers 75.116151s: number handlers = 2 Wait pool handlers 76.118149s: number handlers = 2 Wait pool handlers 77.120089s: number handlers = 2 Wait pool handlers 78.123108s: number handlers = 2 Wait pool handlers 79.128448s: number handlers = 2 Wait pool handlers 80.130107s: number handlers = 2 Wait pool handlers 81.132147s: number handlers = 2 Wait pool handlers 82.133129s: number handlers = 2 Wait pool handlers 83.136101s: number handlers = 2 Wait pool handlers 84.137741s: number handlers = 2 2025-06-24T20:36:24.759937Z node 6 :KQP_WORKLOAD_SERVICE INFO: pool_handlers_actors.cpp:178: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519617125824281809:2325], DatabaseId: /Root, PoolId: default, Got stop pool handler request, waiting for 0 requests 2025-06-24T20:36:24.760134Z node 6 :KQP_WORKLOAD_SERVICE INFO: pool_handlers_actors.cpp:178: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519617121529314307:2298], DatabaseId: /Root, PoolId: sample_pool_id, Got stop pool handler request, waiting for 0 requests 2025-06-24T20:36:24.760291Z node 6 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:425: [WorkloadService] [Service] Got stop pool handler response, DatabaseId: /Root, PoolId: default 2025-06-24T20:36:24.760326Z node 6 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:425: [WorkloadService] [Service] Got stop pool handler response, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-24T20:36:25.658323Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=6&id=MmU4YzMzZTgtYTVmYzJkYTctMzkzMDM4NjItZTRmYmRmNDE=, ActorId: [6:7519617121529314234:2294], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T20:36:25.658392Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=6&id=MmU4YzMzZTgtYTVmYzJkYTctMzkzMDM4NjItZTRmYmRmNDE=, ActorId: [6:7519617121529314234:2294], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T20:36:25.658432Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=MmU4YzMzZTgtYTVmYzJkYTctMzkzMDM4NjItZTRmYmRmNDE=, ActorId: [6:7519617121529314234:2294], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T20:36:25.658466Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=MmU4YzMzZTgtYTVmYzJkYTctMzkzMDM4NjItZTRmYmRmNDE=, ActorId: [6:7519617121529314234:2294], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T20:36:25.658581Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=MmU4YzMzZTgtYTVmYzJkYTctMzkzMDM4NjItZTRmYmRmNDE=, ActorId: [6:7519617121529314234:2294], ActorState: unknown state, Session actor destroyed |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows >> DataShardVolatile::VolatileTxAbortedOnSplit [GOOD] >> DataShardVolatile::VolatileTxAbortedOnDrop >> TxUsage::WriteToTopic_Demo_34_Query [GOOD] >> TxUsage::WriteToTopic_Demo_26_Table >> KqpQuery::QueryCacheInvalidate [GOOD] >> KqpQuery::Pure >> TConsoleTests::TestNotifyOperationCompletion [GOOD] >> TConsoleTests::TestNotifyOperationCompletionExtSubdomain >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Table [GOOD] >> TxUsage::WriteToTopic_Demo_35_Table |89.7%| [TA] $(B)/ydb/core/kqp/workload_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> SystemView::AuthPermissions_ResultOrder [GOOD] >> SystemView::AuthPermissions_Selects >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedServer [GOOD] >> TConsoleConfigSubscriptionTests::TestAddSubscriptionIdempotency |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut >> KqpStats::JoinStatsBasicYql+StreamLookupJoin [GOOD] >> KqpStats::JoinStatsBasicYql-StreamLookupJoin |89.7%| [TA] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.7%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestBridgeCreateTablet [GOOD] Test command err: 2025-06-24T20:31:13.640811Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:13.678497Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:13.678727Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:13.679563Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:13.679832Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:13.680494Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:49:2075] ControllerId# 72057594037932033 2025-06-24T20:31:13.680527Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:13.680626Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:13.680751Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:13.691774Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:13.691841Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:13.694038Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:58:2079] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.694221Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:59:2080] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.694341Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:60:2081] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.694473Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:61:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.694600Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:62:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.694740Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:63:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.694872Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:48:2074] Create Queue# [2:64:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.694897Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:13.695005Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:49:2075] 2025-06-24T20:31:13.695037Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:49:2075] 2025-06-24T20:31:13.695091Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:13.695581Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:13.695990Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:13.698689Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:13.698850Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:13.701432Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:13.701733Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T20:31:13.702955Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-24T20:31:13.703014Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:13.703863Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:73:2077] ControllerId# 72057594037932033 2025-06-24T20:31:13.703897Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:13.704001Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:13.704237Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:13.718758Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:13.718811Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:13.720537Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:81:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.720700Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:82:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.720826Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:83:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.720945Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:84:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.721070Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:85:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.721208Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:86:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.721336Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2076] Create Queue# [1:87:2088] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:13.721373Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:13.721438Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:73:2077] 2025-06-24T20:31:13.721462Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:73:2077] 2025-06-24T20:31:13.721499Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:13.721533Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:13.722407Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:13.722653Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:13.731362Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:49:2075] 2025-06-24T20:31:13.731443Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:13.731507Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:13.731707Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:13.731852Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:13.771447Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:73:2077] 2025-06-24T20:31:13.771514Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:13.771547Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:13.771645Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:13.771677Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-24T20:31:13.777951Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-24T20:31:13.781158Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-24T20:31:13.781404Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:13.781766Z node 2 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:13.782269Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 1 2025-06-24T20:31:13.782304Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-24T20:31:13.782405Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-24T20:31:13.782730Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-24T20:31:13.782807Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:13.783036Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:73:2077] 2025-06-24T20:31:13.783078Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:13.783170Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-24T20:31:13.785244Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [2:53:2064] 2025-06-24T20:31:13.785290Z node 2 :PIPE_CLIENT D ... 94037927937:2:19} Tx{50, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-24T20:36:09.724101Z node 14 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:19} Tx{50, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:36:09.724168Z node 14 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:19} Tx{50, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{33, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:36:09.724223Z node 14 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:19} Tx{50, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:36:09.724702Z node 15 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [14773c3bc185cab4] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037893 RecordGeneration# 1 Channel# 0 VDisk# [80000001:1:0:0:0]} Marker# DSPC01 2025-06-24T20:36:09.724775Z node 15 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [14773c3bc185cab4] Result# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 0 Status# OK} Marker# DSPC02 2025-06-24T20:36:09.724907Z node 15 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [8a5cae695f6b7442] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037893 RecordGeneration# 1 Channel# 1 VDisk# [80000004:1:0:0:0]} Marker# DSPC01 2025-06-24T20:36:09.724962Z node 15 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [8a5cae695f6b7442] Result# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 1 Status# OK} Marker# DSPC02 2025-06-24T20:36:09.725125Z node 15 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# 261566cae509d58c GroupId# 2147483649 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-24T20:36:09.725202Z node 15 :BS_PROXY_BRIDGE DEBUG: {BPB01@bridge.cpp:318} request finished RequestId# 261566cae509d58c Response# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-24T20:36:09.725354Z node 15 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# b7605224579dbbde GroupId# 2147483652 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 1 Status# OK} 2025-06-24T20:36:09.725399Z node 15 :BS_PROXY_BRIDGE DEBUG: {BPB01@bridge.cpp:318} request finished RequestId# b7605224579dbbde Response# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 1 Status# OK} 2025-06-24T20:36:09.747445Z node 14 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [83027ed5f9e3eb97] bootstrap ActorId# [14:1003:2573] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:18:0:0:154:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T20:36:09.747667Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [83027ed5f9e3eb97] Id# [72057594037927937:2:18:0:0:154:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:36:09.747754Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [83027ed5f9e3eb97] restore Id# [72057594037927937:2:18:0:0:154:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T20:36:09.747866Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [83027ed5f9e3eb97] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:18:0:0:154:1] Marker# BPG33 2025-06-24T20:36:09.747926Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [83027ed5f9e3eb97] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:18:0:0:154:1] Marker# BPG32 2025-06-24T20:36:09.748140Z node 14 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [14:58:2081] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:18:0:0:154:1] FDS# 154 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:36:09.754333Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [83027ed5f9e3eb97] received {EvVPutResult Status# OK ID# [72057594037927937:2:18:0:0:154:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 33 } Cost# 81212 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 34 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-24T20:36:09.754547Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [83027ed5f9e3eb97] Result# TEvPutResult {Id# [72057594037927937:2:18:0:0:154:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-24T20:36:09.754667Z node 14 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [83027ed5f9e3eb97] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:18:0:0:154:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T20:36:09.754920Z node 14 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.118 sample PartId# [72057594037927937:2:18:0:0:154:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 14 } TEvVPutResult{ TimestampMs# 7.336 VDiskId# [0:1:0:0:0] NodeId# 14 Status# OK } ] } 2025-06-24T20:36:09.755284Z node 14 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:18:0:0:154:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-24T20:36:09.755549Z node 14 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:19} commited cookie 1 for step 18 2025-06-24T20:36:09.757070Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037893] ::Bootstrap [14:1005:2575] 2025-06-24T20:36:09.757150Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037893] lookup [14:1005:2575] 2025-06-24T20:36:09.757319Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037893 entry.State: StInit ev: {EvForward TabletID: 72075186224037893 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:36:09.757563Z node 14 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037893 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:36:09.757826Z node 14 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037893 Cookie: 0} 2025-06-24T20:36:09.757945Z node 14 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037893 Cookie: 1} 2025-06-24T20:36:09.758014Z node 14 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037893 Cookie: 2} 2025-06-24T20:36:09.758127Z node 14 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037893 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [15:954:2283] CurrentLeaderTablet: [15:962:2288] CurrentGeneration: 1 CurrentStep: 0} 2025-06-24T20:36:09.758255Z node 14 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037893 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [15:954:2283] CurrentLeaderTablet: [15:962:2288] CurrentGeneration: 1 CurrentStep: 0} 2025-06-24T20:36:09.758404Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037893 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037893 Cookie: 0 CurrentLeader: [15:954:2283] CurrentLeaderTablet: [15:962:2288] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[14:24343667:0] : 3}, {[14:1099535971443:0] : 6}}}} 2025-06-24T20:36:09.758467Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037893 followers: 0 2025-06-24T20:36:09.758540Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 14 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037893 followers: 0 countLeader 1 allowFollowers 0 winner: [15:954:2283] 2025-06-24T20:36:09.758732Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037893] forward result remote node 15 [14:1005:2575] 2025-06-24T20:36:09.758944Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037893] remote node connected [14:1005:2575] 2025-06-24T20:36:09.759055Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037893]::SendEvent [14:1005:2575] 2025-06-24T20:36:09.763809Z node 15 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037893] Accept Connect Originator# [14:1005:2575] 2025-06-24T20:36:09.764470Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037893] connected with status OK role: Leader [14:1005:2575] 2025-06-24T20:36:09.764569Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037893] send queued [14:1005:2575] 2025-06-24T20:36:09.765725Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [14:1009:2577] 2025-06-24T20:36:09.765795Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [14:1009:2577] 2025-06-24T20:36:09.766018Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [14:1009:2577] 2025-06-24T20:36:09.766131Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:36:09.766221Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 14 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [14:450:2264] 2025-06-24T20:36:09.766323Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [14:1009:2577] 2025-06-24T20:36:09.766388Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [14:1009:2577] 2025-06-24T20:36:09.766454Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [14:1009:2577] 2025-06-24T20:36:09.766647Z node 14 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [14:1009:2577] 2025-06-24T20:36:09.766920Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [14:1009:2577] 2025-06-24T20:36:09.766991Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [14:1009:2577] 2025-06-24T20:36:09.767051Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [14:1009:2577] 2025-06-24T20:36:09.767140Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [14:1009:2577] 2025-06-24T20:36:09.767229Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [14:1009:2577] 2025-06-24T20:36:09.767318Z node 14 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [14:1008:2576] EventType# 268697616 >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table [GOOD] >> KqpParams::CheckCacheByAst [GOOD] >> KqpParams::CheckCacheWithRecompilationQuery >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::ApplyInCorrectOrder >> TConsoleConfigSubscriptionTests::TestAddSubscriptionIdempotency [GOOD] >> TConsoleConfigSubscriptionTests::TestConfigNotificationRetries |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |89.7%| [LD] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut >> KqpStats::DeferredEffects-UseSink [GOOD] >> SystemView::ShowCreateTableReadReplicas [FAIL] >> SystemView::ShowCreateTableTtlSettings >> TPersQueueTest::WriteNonExistingPartition [GOOD] >> TPersQueueTest::WriteNonExistingTopic >> TConsoleTests::TestNotifyOperationCompletionExtSubdomain [GOOD] >> TConsoleTests::TestRemoveAttributes >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query >> TConsoleTxProcessorTests::TestTxProcessorRandom [GOOD] >> TImmediateControlsConfiguratorTests::TestControlsInitialization >> TPersQueueTest::TopicServiceCommitOffsetBadOffsets [GOOD] >> TPersQueueTest::TopicServiceReadBudget ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::DeferredEffects-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 6946, MsgBus: 21734 2025-06-24T20:35:43.992095Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617330668588812:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:44.000435Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033c0/r3tmp/tmpht8uwb/pdisk_1.dat 2025-06-24T20:35:44.445657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:44.445750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:44.447177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:44.463710Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:44.483376Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617330668588784:2079] 1750797343984498 != 1750797343984501 TServer::EnableGrpc on GrpcPort 6946, node 1 2025-06-24T20:35:44.579836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:44.579867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:44.579875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:44.580050Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21734 2025-06-24T20:35:45.029531Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21734 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:45.318948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:45.343806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:45.510276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:45.672467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:45.751187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:47.545894Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617347848459620:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:47.546059Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:47.921213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:47.973200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.033566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.109376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.165773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.210421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.301196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:48.395510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617352143427579:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:48.395570Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617352143427584:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:48.395611Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:48.399843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:48.418088Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617352143427586:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:35:48.495791Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617352143427637:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:49.053086Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617330668588812:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:49.053388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 9082, MsgBus: 23238 2025-06-24T20:35:53.078638Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617372911689679:2241];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033c0/r3tmp/tmpO5yWh2/pdisk_1.dat 2025-06-24T20:35:53.153023Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ ... n Trying to start YDB, gRPC: 9195, MsgBus: 26678 2025-06-24T20:36:25.800751Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519617511640339580:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:25.800812Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033c0/r3tmp/tmpZ6XrDU/pdisk_1.dat 2025-06-24T20:36:26.612609Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:26.615361Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519617511640339557:2079] 1750797385773976 != 1750797385773979 2025-06-24T20:36:26.711782Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:26.711885Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:26.744693Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9195, node 4 2025-06-24T20:36:26.858580Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:27.047835Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:36:27.047867Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:36:27.047881Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:36:27.048047Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26678 TClient is connected to server localhost:26678 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:36:28.214406Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:36:28.222102Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:36:28.231291Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:28.333090Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:28.597544Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:36:28.711224Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:30.802273Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519617511640339580:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:30.802372Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:36:33.176659Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617546000079562:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:33.176773Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:33.337978Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:33.419219Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:33.510474Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:33.640459Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:33.740449Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:33.846789Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:33.954680Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:34.128316Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617550295047554:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:34.128411Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:34.128842Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617550295047559:2441], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:34.134407Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:36:34.151735Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519617550295047561:2442], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:36:34.260011Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519617550295047612:3434] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Warning: Type annotation, code: 1030
:3:46: Warning: At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |89.7%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut >> KqpQuery::Pure [GOOD] >> TxUsage::WriteToTopic_Demo_6_Query [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsert [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsertWithCdc >> THiveTest::TestHiveBalancerHighUsageAndColumnShards [GOOD] >> THiveTest::TestHiveBalancerOneTabletHighUsage >> TImmediateControlsConfiguratorTests::TestControlsInitialization [GOOD] >> TImmediateControlsConfiguratorTests::TestModifiedControls >> DataShardVolatile::VolatileTxAbortedOnDrop [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::Pure [GOOD] Test command err: Trying to start YDB, gRPC: 11263, MsgBus: 32653 2025-06-24T20:36:03.763989Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617418715822068:2165];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:03.764198Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003350/r3tmp/tmp5JAmgH/pdisk_1.dat 2025-06-24T20:36:04.402314Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617418715821915:2079] 1750797363682123 != 1750797363682126 2025-06-24T20:36:04.412981Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:04.431446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:04.431584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:04.447842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11263, node 1 2025-06-24T20:36:04.763325Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:04.823766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:36:04.823792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:36:04.823803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:36:04.823935Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32653 TClient is connected to server localhost:32653 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:36:05.793267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:36:05.838609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:06.099931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:06.361934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:06.460806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:08.600106Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617440190660031:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:08.600257Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:08.755428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617418715822068:2165];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:08.755505Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:36:09.021760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.064567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.099285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.179391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.219520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.305081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.370713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.522105Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617444485627991:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:09.522188Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:09.522483Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617444485627996:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:09.527910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:36:09.555053Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617444485627998:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:36:09.610563Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617444485628051:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:36:11.840339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) Trying to start YDB, gRPC: 29758, MsgBus: 11177 2025-06-24T20:36:13.076039Z node 2 :METADATA_PROVIDER WARN: ... ] ActorId: [3:7519617529926413562:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:36:29.338086Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519617529926413613:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:36:31.314086Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) Trying to start YDB, gRPC: 23840, MsgBus: 28352 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003350/r3tmp/tmpRBgkS5/pdisk_1.dat 2025-06-24T20:36:33.055115Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:36:33.064647Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:33.072764Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:33.082773Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:33.083101Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:33.087390Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519617540580732593:2079] 1750797392710393 != 1750797392710396 TServer::EnableGrpc on GrpcPort 23840, node 4 2025-06-24T20:36:33.256587Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:36:33.256618Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:36:33.256632Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:36:33.256788Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28352 2025-06-24T20:36:33.709415Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28352 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:36:34.054120Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:36:34.066212Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:36:34.080563Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:34.175102Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:34.427947Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:34.546754Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:39.047337Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617570645505300:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:39.047466Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:39.148094Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:39.229602Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:39.296201Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:39.350964Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:39.435166Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:39.530824Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:39.612210Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:39.739715Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617570645505981:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:39.739833Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:39.740317Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617570645505986:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:39.745078Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:36:39.770377Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519617570645505988:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:36:39.838758Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519617570645506049:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TxUsage::WriteToTopic_Demo_7_Table |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TPersQueueTest::NoDecompressionMemoryLeaks [GOOD] >> TPersQueueTest::PreferredCluster_TwoEnabledClustersAndWriteSessionsWithDifferentPreferredCluster_SessionWithMismatchedClusterDiesAndOthersAlive >> S3SettingsConversion::StyleDeduction [GOOD] >> TopicService::RelativePath [GOOD] >> LocalTableWriter::ApplyInCorrectOrder [GOOD] >> TImmediateControlsConfiguratorTests::TestModifiedControls [GOOD] >> TImmediateControlsConfiguratorTests::TestResetToDefault >> KqpStats::JoinStatsBasicYql-StreamLookupJoin [GOOD] |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::StyleDeduction [GOOD] >> LocalPartition::Restarts [GOOD] >> LocalPartition::WithoutPartitionWithRestart >> TxUsage::WriteToTopic_Demo_35_Table [GOOD] |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> DemoTx::Scenario_5 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::ApplyInCorrectOrder [GOOD] Test command err: 2025-06-24T20:36:39.081282Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617572977492241:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:39.081364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00226c/r3tmp/tmpVvuG8U/pdisk_1.dat 2025-06-24T20:36:40.091311Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:40.543340Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:36:40.724939Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617568682524758:2079] 1750797398995511 != 1750797398995514 2025-06-24T20:36:40.735411Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:40.775302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:40.775455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:40.835515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:41.113895Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.198394s 2025-06-24T20:36:41.114839Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.199322s TClient is connected to server localhost:29459 TServer::EnableGrpc on GrpcPort 2587, node 1 2025-06-24T20:36:42.059782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:36:42.059814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:36:42.059841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:36:42.060042Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29459 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:36:43.749340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:36:43.955356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:44.072083Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617572977492241:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:44.072171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797404503 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-24T20:36:44.644714Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handshake: worker# [1:7519617590157361879:2305] 2025-06-24T20:36:44.661827Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:36:44.662162Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T20:36:44.666633Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Send handshake: worker# [1:7519617590157361879:2305] 2025-06-24T20:36:44.667923Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T20:36:44.691047Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-24T20:36:44.703366Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-24T20:36:44.707607Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519617594452329276:2369] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T20:36:44.707703Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T20:36:44.707842Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519617594452329276:2369] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-24T20:36:44.748167Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519617594452329276:2369] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T20:36:44.748244Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T20:36:44.748292Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-24T20:36:44.748688Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 19b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T20:36:44.749143Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } 2025-06-24T20:36:44.749237Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 49 },{ Order: 3 BodySize: 48 }] } 2025-06-24T20:36:44.749352Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519617594452329276:2369] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 3 Group: 0 Step: 2 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-24T20:36:44.767951Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519617594452329276:2369] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T20:36:44.768019Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T20:36:44.768056Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519617594452329273:2369] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2,3] } |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |89.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> TPersQueueTest::SameOffset [GOOD] >> TPersQueueTest::SchemeOperationsTest >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError >> TxUsage::WriteToTopic_Demo_35_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::JoinStatsBasicYql-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 29700, MsgBus: 12308 2025-06-24T20:36:03.758231Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617419310459847:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:03.758306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003313/r3tmp/tmpjO1agE/pdisk_1.dat 2025-06-24T20:36:04.602134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:04.602241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:04.668651Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:04.671347Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617419310459709:2079] 1750797363719291 != 1750797363719294 2025-06-24T20:36:04.748956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:04.767812Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 29700, node 1 2025-06-24T20:36:04.843665Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:04.987673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:36:04.987704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:36:04.987715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:36:04.987809Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12308 TClient is connected to server localhost:12308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:36:05.941127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:36:05.977027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:06.196120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:06.431728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:06.532365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:08.748152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617419310459847:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:08.748212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:36:08.765245Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617440785297814:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:08.765604Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:09.197853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.250798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.295015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.374806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.435095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.493620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.553078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:09.640947Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617445080265772:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:09.641046Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:09.641339Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617445080265777:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:09.645891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:36:09.663533Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617445080265779:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:36:09.748389Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617445080265832:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:36:11.803649Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797371757, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 29080, MsgBus: 20099 2025-06-24T20:36:13.060097Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event= ... ROR: schemereq.cpp:553: Actor# [3:7519617536119810935:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 29476, MsgBus: 4708 2025-06-24T20:36:35.326629Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519617554190743803:2238];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003313/r3tmp/tmp3AIlvG/pdisk_1.dat 2025-06-24T20:36:35.359947Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:36:35.469228Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:35.470257Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519617554190743589:2079] 1750797395218562 != 1750797395218565 2025-06-24T20:36:35.494131Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:35.494235Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:35.500995Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29476, node 4 2025-06-24T20:36:35.697912Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:36:35.697935Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:36:35.697944Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:36:35.698113Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4708 2025-06-24T20:36:36.227347Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4708 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:36:36.787246Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:36:36.815355Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:36:36.986042Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:37.120642Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:37.419731Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:37.519763Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:36:40.255304Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519617554190743803:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:40.255405Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:36:40.473037Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617575665581706:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:40.473111Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:40.590558Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:40.669423Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:40.744487Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:40.823011Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:40.902303Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:41.006006Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:41.121589Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:41.250795Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617579960549673:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:41.250904Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:41.251237Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617579960549678:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:41.256728Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:36:41.269232Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519617579960549680:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:36:41.374316Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519617579960549733:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TopicService::AccessRights >> TImmediateControlsConfiguratorTests::TestResetToDefault [GOOD] >> TImmediateControlsConfiguratorTests::TestMaxLimit >> KqpParams::CheckCacheWithRecompilationQuery [GOOD] |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |89.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows >> TPersQueueTest::DirectReadBudgetOnRestart [GOOD] >> TPersQueueTest::DirectReadCorrectOffsetsOnRestart >> TPQTest::TestWritePQ [GOOD] >> TPQTest::TestWriteOffsetWithBigMessage >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table [GOOD] >> SystemView::ShowCreateTableTtlSettings [FAIL] >> SystemView::ShowCreateTableTemporary >> TxUsage::WriteToTopic_Demo_26_Table [GOOD] >> TImmediateControlsConfiguratorTests::TestMaxLimit [GOOD] >> TImmediateControlsConfiguratorTests::TestDynamicMap >> TTxLocatorTest::Boot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::CheckCacheWithRecompilationQuery [GOOD] Test command err: Trying to start YDB, gRPC: 10226, MsgBus: 27395 2025-06-24T20:35:43.689640Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617331473842146:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:43.689693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033c9/r3tmp/tmpTIg7uu/pdisk_1.dat 2025-06-24T20:35:44.349447Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:44.351344Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617331473842124:2079] 1750797343688567 != 1750797343688570 2025-06-24T20:35:44.372984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:44.373135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:44.380317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10226, node 1 2025-06-24T20:35:44.599826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:44.599850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:44.599858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:44.599970Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:44.747225Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27395 TClient is connected to server localhost:27395 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:46.010945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:46.049349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:46.304507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:46.555193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:46.647101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:48.720564Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617331473842146:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:48.720639Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:49.150715Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617357243647545:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:49.150843Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:49.502900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:49.544962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:49.615856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:49.700664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:49.755010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:49.835998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:49.912580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:50.003636Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617357243648212:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:50.003733Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:50.004109Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617361538615513:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:50.008363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:50.038867Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617361538615515:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:35:50.139113Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617361538615568:3434] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 29495, MsgBus: 21520 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033c9/r3tmp/tmpdbcG1G/pdisk_1.dat 2025-06-24T20:35:53.690178Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:53.690281Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:53.691316Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline ... 2a80] received request Name# ListDatabases ok# false data# peer# 2025-06-24T20:36:48.194061Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fff80] received request Name# RemoveDatabase ok# false data# peer# 2025-06-24T20:36:48.194150Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000380780] received request Name# DescribeDatabaseOptions ok# false data# peer# 2025-06-24T20:36:48.194266Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001f9580] received request Name# GetScaleRecommendation ok# false data# peer# 2025-06-24T20:36:48.194356Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00042a380] received request Name# ListEndpoints ok# false data# peer# 2025-06-24T20:36:48.194445Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001fe980] received request Name# WhoAmI ok# false data# peer# 2025-06-24T20:36:48.194648Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00035d780] received request Name# NodeRegistration ok# false data# peer# 2025-06-24T20:36:48.194856Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000107680] received request Name# Scan ok# false data# peer# 2025-06-24T20:36:48.195076Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000415380] received request Name# GetShardLocations ok# false data# peer# 2025-06-24T20:36:48.195405Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000259280] received request Name# DescribeTable ok# false data# peer# 2025-06-24T20:36:48.195701Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000200580] received request Name# CreateSnapshot ok# false data# peer# 2025-06-24T20:36:48.195916Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000200c80] received request Name# RefreshSnapshot ok# false data# peer# 2025-06-24T20:36:48.196126Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000201380] received request Name# DiscardSnapshot ok# false data# peer# 2025-06-24T20:36:48.196347Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000201a80] received request Name# List ok# false data# peer# 2025-06-24T20:36:48.196551Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000202180] received request Name# RateLimiter/CreateResource ok# false data# peer# 2025-06-24T20:36:48.198723Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000202f80] received request Name# RateLimiter/AlterResource ok# false data# peer# 2025-06-24T20:36:48.198957Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000202880] received request Name# RateLimiter/DropResource ok# false data# peer# 2025-06-24T20:36:48.199198Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000203680] received request Name# RateLimiter/ListResources ok# false data# peer# 2025-06-24T20:36:48.199430Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000203d80] received request Name# RateLimiter/DescribeResource ok# false data# peer# 2025-06-24T20:36:48.199667Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000204480] received request Name# RateLimiter/AcquireResource ok# false data# peer# 2025-06-24T20:36:48.199886Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000247380] received request Name# CreateStream ok# false data# peer# 2025-06-24T20:36:48.200098Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000261e80] received request Name# ListStreams ok# false data# peer# 2025-06-24T20:36:48.200310Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001caf80] received request Name# DeleteStream ok# false data# peer# 2025-06-24T20:36:48.200514Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000204b80] received request Name# DescribeStream ok# false data# peer# 2025-06-24T20:36:48.200727Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000250680] received request Name# ListShards ok# false data# peer# 2025-06-24T20:36:48.200913Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000137f80] received request Name# SetWriteQuota ok# false data# peer# 2025-06-24T20:36:48.201137Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000302080] received request Name# UpdateStream ok# false data# peer# 2025-06-24T20:36:48.201329Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00007d280] received request Name# PutRecord ok# false data# peer# 2025-06-24T20:36:48.201514Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00032ab80] received request Name# PutRecords ok# false data# peer# 2025-06-24T20:36:48.201698Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00032b280] received request Name# GetRecords ok# false data# peer# 2025-06-24T20:36:48.201901Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00032a480] received request Name# GetShardIterator ok# false data# peer# 2025-06-24T20:36:48.202077Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00032c080] received request Name# SubscribeToShard ok# false data# peer# 2025-06-24T20:36:48.202282Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001f2c80] received request Name# DescribeLimits ok# false data# peer# 2025-06-24T20:36:48.202460Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0002cc380] received request Name# DescribeStreamSummary ok# false data# peer# 2025-06-24T20:36:48.202659Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0002cca80] received request Name# DecreaseStreamRetentionPeriod ok# false data# peer# 2025-06-24T20:36:48.202865Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0002cd180] received request Name# IncreaseStreamRetentionPeriod ok# false data# peer# 2025-06-24T20:36:48.203067Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000012680] received request Name# UpdateShardCount ok# false data# peer# 2025-06-24T20:36:48.207471Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00000f580] received request Name# UpdateStreamMode ok# false data# peer# 2025-06-24T20:36:48.207764Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000010a80] received request Name# RegisterStreamConsumer ok# false data# peer# 2025-06-24T20:36:48.207993Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00024ea80] received request Name# DeregisterStreamConsumer ok# false data# peer# 2025-06-24T20:36:48.208213Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003fe080] received request Name# DescribeStreamConsumer ok# false data# peer# 2025-06-24T20:36:48.208424Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00020fa80] received request Name# ListStreamConsumers ok# false data# peer# 2025-06-24T20:36:48.208624Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0002dd480] received request Name# AddTagsToStream ok# false data# peer# 2025-06-24T20:36:48.208833Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0002db880] received request Name# DisableEnhancedMonitoring ok# false data# peer# 2025-06-24T20:36:48.209029Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00031ee80] received request Name# EnableEnhancedMonitoring ok# false data# peer# 2025-06-24T20:36:48.209238Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000070e80] received request Name# ListTagsForStream ok# false data# peer# 2025-06-24T20:36:48.209456Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000266b80] received request Name# MergeShards ok# false data# peer# 2025-06-24T20:36:48.209670Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000267980] received request Name# RemoveTagsFromStream ok# false data# peer# 2025-06-24T20:36:48.209863Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000ad480] received request Name# SplitShard ok# false data# peer# 2025-06-24T20:36:48.210064Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000291280] received request Name# StartStreamEncryption ok# false data# peer# 2025-06-24T20:36:48.210265Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00039ab80] received request Name# StopStreamEncryption ok# false data# peer# 2025-06-24T20:36:48.210486Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000305880] received request Name# SelfCheck ok# false data# peer# 2025-06-24T20:36:48.210693Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000303580] received request Name# NodeCheck ok# false data# peer# 2025-06-24T20:36:48.210904Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000114180] received request Name# CreateSession ok# false data# peer# 2025-06-24T20:36:48.211100Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000205980] received request Name# DeleteSession ok# false data# peer# 2025-06-24T20:36:48.215246Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000206080] received request Name# AttachSession ok# false data# peer# 2025-06-24T20:36:48.215560Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00011b880] received request Name# BeginTransaction ok# false data# peer# 2025-06-24T20:36:48.215849Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000117980] received request Name# CommitTransaction ok# false data# peer# 2025-06-24T20:36:48.216113Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0002fe880] received request Name# RollbackTransaction ok# false data# peer# 2025-06-24T20:36:48.216341Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000302780] received request Name# ExecuteQuery ok# false data# peer# 2025-06-24T20:36:48.216554Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000304a80] received request Name# ExecuteScript ok# false data# peer# 2025-06-24T20:36:48.216959Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000205280] received request Name# FetchScriptResults ok# false data# peer# 2025-06-24T20:36:48.217190Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00011e280] received request Name# ExecuteTabletMiniKQL ok# false data# peer# 2025-06-24T20:36:48.217403Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000cd380] received request Name# ChangeTabletSchema ok# false data# peer# 2025-06-24T20:36:48.217637Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000343a80] received request Name# RestartTablet ok# false data# peer# 2025-06-24T20:36:48.217838Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00015f580] received request Name# CreateLogStore ok# false data# peer# 2025-06-24T20:36:48.218051Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000342c80] received request Name# DescribeLogStore ok# false data# peer# 2025-06-24T20:36:48.218267Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000343380] received request Name# DropLogStore ok# false data# peer# 2025-06-24T20:36:48.218477Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000341e80] received request Name# AlterLogStore ok# false data# peer# 2025-06-24T20:36:48.218687Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000341780] received request Name# CreateLogTable ok# false data# peer# 2025-06-24T20:36:48.218883Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000341080] received request Name# DescribeLogTable ok# false data# peer# 2025-06-24T20:36:48.219073Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000340980] received request Name# DropLogTable ok# false data# peer# 2025-06-24T20:36:48.219294Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000340280] received request Name# AlterLogTable ok# false data# peer# 2025-06-24T20:36:48.219531Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00033fb80] received request Name# Login ok# false data# peer# 2025-06-24T20:36:48.219772Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000283280] received request Name# DescribeReplication ok# false data# peer# 2025-06-24T20:36:48.219991Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000254580] received request Name# DescribeView ok# false data# peer# >> TConsoleTests::TestRemoveAttributes [GOOD] >> TConsoleTests::TestRemoveAttributesExtSubdomain >> TTxLocatorTest::Boot [GOOD] >> Viewer::JsonStorageListingV1GroupIdFilter [GOOD] >> Viewer::JsonStorageListingV1NodeIdFilter >> TPersQueueTest::StreamReadCommitAndStatusMsgs [GOOD] >> TPersQueueTest::StreamReadManyUpdateTokenAndRead >> TTxLocatorTest::TestZeroRange >> TopicAutoscaling::ReadingAfterSplitTest_BeforeAutoscaleAwareSDK |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq >> TBalanceCoverageBuilderTest::TestOneSplit [GOOD] |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |89.7%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query [GOOD] >> TTxLocatorTest::TestZeroRange [GOOD] >> TxUsage::WriteToTopic_Demo_26_Query >> TImmediateControlsConfiguratorTests::TestDynamicMap [GOOD] >> TFstClassSrcIdPQTest::TestTableCreated >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::Boot [GOOD] Test command err: 2025-06-24T20:36:51.251012Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T20:36:51.251892Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T20:36:51.252774Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T20:36:51.254659Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:51.255451Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T20:36:51.269362Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:51.269531Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:51.269644Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T20:36:51.269780Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:51.269929Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:51.270025Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T20:36:51.270126Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestOneSplit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestZeroRange [GOOD] Test command err: 2025-06-24T20:36:52.268635Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T20:36:52.269190Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T20:36:52.269958Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T20:36:52.272015Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:52.272541Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T20:36:52.283617Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:52.283827Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:52.283940Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T20:36:52.284085Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:52.284220Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:52.284320Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T20:36:52.284427Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-24T20:36:52.285217Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#0 2025-06-24T20:36:52.285722Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:52.285789Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T20:36:52.285877Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 0 2025-06-24T20:36:52.285915Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult from# 0 to# 0 expected SUCCESS >> TxUsage::WriteToTopic_Demo_42_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table >> TPQCDTest::TestCloudClientsAreConsistentlyDistributed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TImmediateControlsConfiguratorTests::TestDynamicMap [GOOD] Test command err: 2025-06-24T20:34:39.068457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:39.068527Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:39.147330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:40.558399Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:40.558998Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:40.559787Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 7814695940717562827 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:40.704140Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:34:40.710400Z node 2 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000000:_:0:0:0]: (2147483648) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T20:34:40.797869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:1, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T20:34:40.961579Z node 7 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:40.962219Z node 7 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:40.962527Z node 7 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 2181875835963143125 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:41.056485Z node 6 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:41.057039Z node 6 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:41.057265Z node 6 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 13371093036260859230 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:41.104786Z node 8 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:41.105357Z node 8 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:41.109438Z node 8 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002943/r3tmp/tmpC6jfye/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 4308728146961105875 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpe ... istered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TxLimitControls.PerRequestDataSizeLimit was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TxLimitControls.PerShardReadSizeLimit was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TxLimitControls.PerShardIncomingReadSetSizeLimit was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TxLimitControls.DefaultTimeoutMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control CoordinatorControls.EnableLeaderLeases was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control CoordinatorControls.MinLeaderLeaseDurationUs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control CoordinatorControls.VolatilePlanLeaseMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control CoordinatorControls.PlanAheadTimeShiftMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control CoordinatorControls.MinPlanResolutionMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control SchemeShardControls.ForceShardSplitDataSize was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control SchemeShardControls.DisableForceShardSplit was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TCMallocControls.ProfileSamplingRate was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TCMallocControls.GuardedSamplingRate was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TCMallocControls.PageCacheTargetSize was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TCMallocControls.PageCacheReleaseRate was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.EnableLocalSyncLogDataCutting was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.EnableSyncLogChunkCompressionHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.EnableSyncLogChunkCompressionSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.MaxSyncLogChunksInFlightHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.MaxSyncLogChunksInFlightSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.BurstThresholdNsHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.BurstThresholdNsSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.BurstThresholdNsNVME was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.DiskTimeAvailableScaleHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.DiskTimeAvailableScaleSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.DiskTimeAvailableScaleNVME was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.DefaultHugeGarbagePerMille was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.HugeDefragFreeSpaceBorderPerMille was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.MaxChunksToDefragInflight was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingDryRun was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinLevel0SstCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxLevel0SstCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinInplacedSizeHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxInplacedSizeHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinInplacedSizeSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxInplacedSizeSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinOccupancyPerMille was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxOccupancyPerMille was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinLogChunkCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxLogChunkCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.MaxInProgressSyncCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TabletControls.MaxCommitRedoMB was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.SlowDiskThreshold was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.PredictedDelayMultiplier was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.LongRequestThresholdMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.MaxNumOfSlowDisks was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.SlowDiskThresholdHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.PredictedDelayMultiplierHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.MaxNumOfSlowDisksHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.SlowDiskThresholdSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.PredictedDelayMultiplierSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.MaxNumOfSlowDisksSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.RequestReportingSettings.BucketSize was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.RequestReportingSettings.LeakDurationMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.RequestReportingSettings.LeakRate was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.MaxCommonLogChunksHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.MaxCommonLogChunksSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.UseNoopSchedulerHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.UseNoopSchedulerSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control BlobStorageControllerControls.EnableSelfHealWithDegraded was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TableServiceControls.EnableMergeDatashardReads was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TestShardControls.DisableWrites was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |89.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::GetLocalDescribe >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table [GOOD] |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |89.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects >> DataShardVolatile::UpsertNoLocksArbiter+UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter-UseSink |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::DescribeConsumer >> TTopicApiDescribes::DescribeTopic >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query [GOOD] >> TConsoleTests::TestRemoveAttributesExtSubdomain [GOOD] >> TTopicApiDescribes::GetPartitionDescribe >> TxUsage::WriteToTopic_Demo_22_RestartNo_Table >> TxUsage::WriteToTopic_Demo_43_Table >> TPersQueueTest::WriteNonExistingTopic [GOOD] >> TPersQueueTest::WriteAfterAlter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TConsoleTests::TestRemoveAttributesExtSubdomain [GOOD] Test command err: 2025-06-24T20:34:40.841809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:40.841888Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:40.931841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:42.414183Z node 6 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:42.414856Z node 6 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpbQENxY/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:42.415764Z node 6 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpbQENxY/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpbQENxY/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 622267506916184959 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:45.157645Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:45.157719Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:45.234800Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:49.114054Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:34:49.114123Z node 19 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:49.198394Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:50.415375Z node 23 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:50.416024Z node 23 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpEz4y5e/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:50.416318Z node 23 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpEz4y5e/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpEz4y5e/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 13935018820600180673 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:50.696040Z node 19 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 19 Type# 268639257 2025-06-24T20:34:50.702753Z node 23 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000000:_:0:0:0]: (2147483648) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpEz4y5e/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T20:34:50.817667Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:1, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T20:34:51.041827Z node 22 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:51.042467Z node 22 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpEz4y5e/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:34:51.042726Z node 22 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpEz4y5e/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpEz4y5e/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 4956549070963037708 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:34:51.147452Z node 20 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:34:51.148021Z node 20 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0028e8/r3tmp/tmpEz4y ... 2057594046578944, txId: 281474976715661, path id: [OwnerId: 72057594046578944, LocalPathId: 3] 2025-06-24T20:36:57.997671Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-24T20:36:57.997726Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [163:709:2238], at schemeshard: 72057594046578944, txId: 281474976715661, path id: 3 2025-06-24T20:36:57.997924Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5902: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186233409546, msg: TabletId: 72057594046578944 Generation: 2 UserAttributes { Key: "name1" Value: "value1" } UserAttributesVersion: 3 2025-06-24T20:36:57.998043Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046578944 Generation: 2 UserAttributes { Key: "name1" Value: "value1" } UserAttributesVersion: 3, at schemeshard: 72075186233409546 2025-06-24T20:36:57.998397Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:589: Cannot publish paths for unknown operation id#0 2025-06-24T20:36:58.001054Z node 163 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-24T20:36:58.001216Z node 163 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:16] persistent tx 281474976715661 for mediator 72057594046382081 tablet 72057594046578944 removed=1 2025-06-24T20:36:58.001265Z node 163 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:16] persistent tx 281474976715661 for mediator 72057594046382081 acknowledged 2025-06-24T20:36:58.001307Z node 163 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:16] persistent tx 281474976715661 acknowledged 2025-06-24T20:36:58.003623Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046578944, msg: Owner: 72057594046578944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046578944, cookie: 281474976715661 2025-06-24T20:36:58.003829Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046578944, msg: Owner: 72057594046578944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046578944, cookie: 281474976715661 2025-06-24T20:36:58.003902Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046578944, txId: 281474976715661 2025-06-24T20:36:58.006421Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046578944, txId: 281474976715661, pathId: [OwnerId: 72057594046578944, LocalPathId: 3], version: 7 2025-06-24T20:36:58.006615Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046578944, LocalPathId: 3] was 11 2025-06-24T20:36:58.006856Z node 163 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046578944, txId: 281474976715661, subscribers: 1 2025-06-24T20:36:58.007005Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046578944, to actorId: [163:1967:2385] 2025-06-24T20:36:58.016085Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5889: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046578944, msg: DomainSchemeShard: 72057594046578944 DomainPathId: 3 TabletID: 72075186233409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 3 UserAttributesVersion: 3 TenantHive: 18446744073709551615 TenantSysViewProcessor: 72075186233409553 TenantRootACL: "" TenantStatisticsAggregator: 72075186233409554 TenantGraphShard: 18446744073709551615 2025-06-24T20:36:58.016227Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046578944, LocalPathId: 3], at schemeshard: 72057594046578944 2025-06-24T20:36:58.016362Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:568: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046578944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046578944, LocalPathId: 3], Generation: 2, ActorId:[163:1424:2609], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 3, TenantHive: 18446744073709551615, TenantSysViewProcessor: 72075186233409553, TenantStatisticsAggregator: 72075186233409554, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 3, tenantHive: 18446744073709551615, tenantSysViewProcessor: 72075186233409553, at schemeshard: 72057594046578944 2025-06-24T20:36:58.016543Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-24T20:36:58.016595Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-24T20:36:58.016818Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-24T20:36:58.016875Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [163:1628:2754], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-24T20:36:58.020492Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72075186233409546, cookie: 0 2025-06-24T20:36:58.024401Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046578944, cookie: 281474976715661 2025-06-24T20:36:58.024575Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046578944, LocalPathId: 3], at schemeshard: 72057594046578944 Reply: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "tenant-1" PathId: 3 SchemeshardId: 72057594046578944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976715657 CreateStep: 1000 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 10 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 Mediators: 72075186233409552 SchemeShard: 72075186233409546 SysViewProcessor: 72075186233409553 StatisticsAggregator: 72075186233409554 } DomainKey { SchemeShard: 72057594046578944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 9 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046578944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "name1" Value: "value1" } } PathId: 3 PathOwnerId: 72057594046578944 Reply: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "tenant-1" PathId: 3 SchemeshardId: 72057594046578944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976715657 CreateStep: 1000 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 10 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 Mediators: 72075186233409552 SchemeShard: 72075186233409546 SysViewProcessor: 72075186233409553 StatisticsAggregator: 72075186233409554 } DomainKey { SchemeShard: 72057594046578944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 9 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046578944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "name1" Value: "value1" } } PathId: 3 PathOwnerId: 72057594046578944 |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |89.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login >> DataShardVolatile::DistributedWriteThenBulkUpsertWithCdc [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenDrop >> TPQCDTest::TestCloudClientsAreConsistentlyDistributed [GOOD] >> SystemView::AuthPermissions_Selects [GOOD] >> TxUsage::WriteToTopic_Demo_7_Table [GOOD] >> KqpCost::OltpWriteRow-isSink >> SystemView::ShowCreateTableTemporary [FAIL] >> SystemView::ShowCreateTableSequences ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestCloudClientsAreConsistentlyDistributed [GOOD] Test command err: 2025-06-24T20:36:55.421349Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617642873896235:2177];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:55.436020Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002486/r3tmp/tmpjWB7zZ/pdisk_1.dat 2025-06-24T20:36:56.164920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:56.165030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:56.192506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:56.204161Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:56.223931Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617642873896096:2079] 1750797415395061 != 1750797415395064 TServer::EnableGrpc on GrpcPort 64485, node 1 2025-06-24T20:36:56.423377Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:56.479843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002486/r3tmp/yandexFxj2wI.tmp 2025-06-24T20:36:56.479874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002486/r3tmp/yandexFxj2wI.tmp 2025-06-24T20:36:56.492969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002486/r3tmp/yandexFxj2wI.tmp 2025-06-24T20:36:56.493181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15422 PQClient connected to localhost:64485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:36:57.037541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T20:36:57.133882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-24T20:37:00.419311Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617642873896235:2177];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:00.441547Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:00.517240Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617664348733294:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:00.517360Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:00.517620Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617664348733306:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:00.522526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:00.553477Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617664348733308:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T20:37:00.644656Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617664348733374:2392] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:37:00.934811Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617664348733382:2306], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:37:00.935161Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YzdlMzNmODctNGRiMzVjMTQtYTZhMzVmNjYtZjM0N2QwMTI=, ActorId: [1:7519617664348733292:2294], ActorState: ExecuteState, TraceId: 01jyhth1z27ppe56aqvyr3h30a, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:37:00.939853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:00.971813Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:37:01.091025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:01.215303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T20:37:01.583287Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhth2tafjq182arjn62nvrm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODBjZThkNTktMTVjOGUxYTEtYzFlZDM3MDItZGJlOGQ0NDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TxUsage::WriteToTopic_Demo_35_Query [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query >> TxUsage::WriteToTopic_Demo_7_Query >> LocalPartition::WithoutPartitionWithRestart [GOOD] >> LocalPartition::WithoutPartitionUnknownEndpoint |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |89.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl >> TopicService::AccessRights [GOOD] >> TPopulatorTest::RemoveDir >> TPopulatorTest::MakeDir >> TxUsage::WriteToTopic_Demo_11_Query [GOOD] >> TPersQueueTest::TopicServiceReadBudget [GOOD] >> TPersQueueTest::TopicServiceSimpleHappyWrites >> TxUsage::WriteToTopic_Demo_12_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::AuthPermissions_Selects [GOOD] Test command err: 2025-06-24T20:34:13.969790Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616944982654448:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:13.979263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b11/r3tmp/tmp50DJ81/pdisk_1.dat 2025-06-24T20:34:14.419341Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616944982654406:2079] 1750797253961103 != 1750797253961106 2025-06-24T20:34:14.517824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:14.517958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:14.527859Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:14.599763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10444, node 1 2025-06-24T20:34:14.787410Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:14.787434Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:14.787442Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:14.787594Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:14.983795Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3137 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:15.510485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:15.527697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:15.534335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:18.331130Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616966457491606:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:18.331271Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:18.331496Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616966457491618:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:18.336577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:18.355104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-24T20:34:18.375476Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616966457491620:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T20:34:18.458423Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616966457491671:2390] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:18.967292Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616944982654448:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:18.967366Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:19.122729Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhtc3jrc37xtfwn9qd3b726, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTg1NGFjNzUtNmYwNmYxZjQtYjM2MmVlMWItZjIxNThhY2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:19.362974Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhtc4k0bnkt28jm0gmy2s6z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjU5NTdiMmYtMTEzYTNmYzQtZWMyNmIzZGUtY2RhYTgwZjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:19.567679Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyhtc4ky9c4zw2m7stjptzb1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzQwZjg2NDMtNWZjYWI2ZC1iNjdmZjlhYS00ZWFkODQwOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:34:19.570912Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519616970752459075:2327], owner: [1:7519616970752459071:2325], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:34:19.571729Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519616970752459075:2327], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T20:34:19.572274Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519616970752459075:2327], row count: 2, finished: 1 2025-06-24T20:34:19.572362Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519616970752459075:2327], owner: [1:7519616970752459071:2325], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:34:19.577199Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797259565, txId: 281474976710663] shutting down 2025-06-24T20:34:20.635314Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519616975357330532:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:20.635416Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b11/r3tmp/tmpCcfYjV/pdisk_1.dat 2025-06-24T20:34:20.907336Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519616975357330504:2079] 1750797260579772 != 1750797260579775 2025-06-24T20:34:20.963375Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:20.970451Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:20.970533Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:20.973166Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15805, node 2 2025-06-24T20:34:21.174844Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:21.174866Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:21.174872Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:21.175000Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31197 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 7205 ... owPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } Children [.metadata,Dir1,Table0,Tenant1,Tenant2] }] } 2025-06-24T20:36:57.497195Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519617651835636729:2418], row count: 0, finished: 0 2025-06-24T20:36:57.498903Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:36:57.500799Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [SubDir1,SubDir2] }] } 2025-06-24T20:36:57.500882Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519617651835636729:2418], row count: 0, finished: 0 2025-06-24T20:36:57.501327Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:36:57.506134Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-24T20:36:57.506247Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519617651835636729:2418], row count: 2, finished: 0 2025-06-24T20:36:57.507979Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [33:7519617651835636729:2418], owner: [33:7519617651835636726:2416], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:36:57.513117Z node 33 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797417487, txId: 281474976715687] shutting down 2025-06-24T20:36:57.513898Z node 33 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [33:7519617561641321145:2079], database# , query hash# 3187945588805523718, cpu time# 430222 2025-06-24T20:36:57.758702Z node 36 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:510: NSysView::TPartitionStatsCollector: TEvProcessOverloaded , top size by CPU # 0, top size by TLI # 0, time# 2025-06-24T20:36:57.758574Z 2025-06-24T20:36:57.884702Z node 33 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715690. Ctx: { TraceId: 01jyhtgz2w4hvr23tq54bdcm43, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=33&id=OThmYTg3ZmQtOTY2YzhlMWEtM2VkZmRlYWItNDYyZDI0YTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:36:57.896579Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [33:7519617651835636771:2428], owner: [33:7519617651835636768:2426], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:36:57.898733Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [33:7519617651835636771:2428], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T20:36:57.898768Z node 33 :SYSTEM_VIEWS DEBUG: auth_scan_base.h:100: ProceedToScan, tenant name: /Root tenant owner: root@builtin subject sid: empty require admin access: 0 is admin: 1 2025-06-24T20:36:57.898850Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:36:57.899393Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } Children [.metadata,Dir1,Table0,Tenant1,Tenant2] }] } 2025-06-24T20:36:57.899456Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519617651835636771:2428], row count: 0, finished: 0 2025-06-24T20:36:57.903466Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:36:57.903999Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [SubDir1,SubDir2] }] } 2025-06-24T20:36:57.904075Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519617651835636771:2428], row count: 0, finished: 0 2025-06-24T20:36:57.904165Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T20:36:57.908970Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-24T20:36:57.909069Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519617651835636771:2428], row count: 1, finished: 0 2025-06-24T20:36:57.911070Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [33:7519617651835636771:2428], owner: [33:7519617651835636768:2426], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:36:57.915286Z node 33 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [33:7519617561641321145:2079], database# , query hash# 15123460272068726277, cpu time# 306654 2025-06-24T20:36:57.916879Z node 33 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797417879, txId: 281474976715689] shutting down 2025-06-24T20:36:57.965483Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 37 2025-06-24T20:36:57.975399Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:36:57.976031Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 36 2025-06-24T20:36:57.976809Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:36:57.977290Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 35 2025-06-24T20:36:57.985122Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:36:57.981411Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(35, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:36:57.997128Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 34 2025-06-24T20:36:58.003135Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T20:36:58.009986Z node 36 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:36:58.030839Z node 33 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[34:7519617575277938504:2100], Type=268959746 2025-06-24T20:36:58.030893Z node 33 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[34:7519617575277938504:2100], Type=268959746 2025-06-24T20:36:58.030920Z node 33 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[34:7519617575277938504:2100], Type=268959746 2025-06-24T20:36:58.030954Z node 33 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[34:7519617575277938504:2100], Type=268959746 >> TPersQueueTest::PreferredCluster_TwoEnabledClustersAndWriteSessionsWithDifferentPreferredCluster_SessionWithMismatchedClusterDiesAndOthersAlive [GOOD] >> TPersQueueTest::PreferredCluster_DisabledRemoteClusterAndWriteSessionsWithDifferentPreferredClusterAndLaterRemoteClusterEnabled_SessionWithMismatchedClusterDiesAfterPreferredClusterEnabledAndOtherSessionsAlive >> KqpCost::OlapRange >> TopicService::ThereAreGapsInTheOffsetRanges >> KqpPg::TableDeleteWhere-useSink [GOOD] >> TxUsage::WriteToTopic_Demo_26_Query [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter-UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter+UseSink >> TPopulatorTest::RemoveDir [GOOD] >> TPopulatorTest::MakeDir [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::MakeDir [GOOD] Test command err: 2025-06-24T20:37:08.188843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:37:08.206516Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 100 2025-06-24T20:37:08.928402Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 419, preserialized size# 51 2025-06-24T20:37:08.928522Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 3 2025-06-24T20:37:08.930063Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.930156Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.930192Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.961241Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: false CreateTxId: 100 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 309, preserialized size# 2 2025-06-24T20:37:08.961325Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 2 2025-06-24T20:37:08.961499Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:12:2059], cookie# 100 2025-06-24T20:37:08.961589Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:15:2062], cookie# 100 2025-06-24T20:37:08.961628Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:18:2065], cookie# 100 2025-06-24T20:37:08.961802Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:98:2123], cookie# 100 2025-06-24T20:37:08.973372Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.973611Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:99:2124], cookie# 100 2025-06-24T20:37:08.973656Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 3 2025-06-24T20:37:08.973806Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:12:2059], cookie# 100 2025-06-24T20:37:08.973873Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.973909Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.974351Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:100:2125], cookie# 100 2025-06-24T20:37:08.974402Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:15:2062], cookie# 100 2025-06-24T20:37:08.974443Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:18:2065], cookie# 100 2025-06-24T20:37:08.974698Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:98:2123], cookie# 100 2025-06-24T20:37:09.003420Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:99:2124], cookie# 100 2025-06-24T20:37:09.003510Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 2 2025-06-24T20:37:09.003600Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:100:2125], cookie# 100 2025-06-24T20:37:09.003640Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 100 FAKE_COORDINATOR: Add transaction: 100 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000001 2025-06-24T20:37:09.071530Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 429, preserialized size# 56 2025-06-24T20:37:09.071619Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 4 2025-06-24T20:37:09.071785Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:09.071860Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:09.071914Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 FAKE_COORDINATOR: Erasing txId 100 2025-06-24T20:37:09.072786Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 314, preserialized size# 2 2025-06-24T20:37:09.072850Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 3 2025-06-24T20:37:09.072978Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:12:2059], cookie# 100 2025-06-24T20:37:09.073030Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:15:2062], cookie# 100 2025-06-24T20:37:09.073071Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:18:2065], cookie# 100 2025-06-24T20:37:09.073246Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:98:2123], cookie# 100 2025-06-24T20:37:09.073285Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:09.073360Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:09.073392Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:09.073523Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:99:2124], cookie# 100 2025-06-24T20:37:09.073555Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 4 2025-06-24T20:37:09.073597Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:12:2059], cookie# 100 2025-06-24T20:37:09.073633Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:15:2062], cookie# 100 2025-06-24T20:37:09.073678Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:18:2065], cookie# 100 2025-06-24T20:37:09.073790Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:100:2125], cookie# 100 2025-06-24T20:37:09.074190Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:98:2123], cookie# 100 2025-06-24T20:37:09.074250Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:99:2124], cookie# 100 2025-06-24T20:37:09.074288Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 3 2025-06-24T20:37:09.084840Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:100:2125], cookie# 100 2025-06-24T20:37:09.084915Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 100 TestModificationResult got TxId: 100, wait until txId: 100 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::RemoveDir [GOOD] Test command err: 2025-06-24T20:37:08.189050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:37:08.207176Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 100 2025-06-24T20:37:08.928419Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 419, preserialized size# 51 2025-06-24T20:37:08.928537Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 3 2025-06-24T20:37:08.930239Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.930356Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.930391Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.959863Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirB" PathDescription { Self { Name: "DirB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: false CreateTxId: 100 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 309, preserialized size# 2 2025-06-24T20:37:08.960008Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 2 2025-06-24T20:37:08.960239Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:12:2059], cookie# 100 2025-06-24T20:37:08.960382Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:15:2062], cookie# 100 2025-06-24T20:37:08.960426Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:18:2065], cookie# 100 2025-06-24T20:37:08.960628Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:98:2123], cookie# 100 2025-06-24T20:37:08.970866Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.972708Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:99:2124], cookie# 100 2025-06-24T20:37:08.972803Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 3 2025-06-24T20:37:08.972895Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:12:2059], cookie# 100 2025-06-24T20:37:08.972986Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.973044Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T20:37:08.973653Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:100:2125], cookie# 100 2025-06-24T20:37:08.973705Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:15:2062], cookie# 100 2025-06-24T20:37:08.973757Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:18:2065], cookie# 100 2025-06-24T20:37:08.974067Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:98:2123], cookie# 100 2025-06-24T20:37:09.001808Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:99:2124], cookie# 100 2025-06-24T20:37:09.001917Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 2 2025-06-24T20:37:09.002133Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:100:2125], cookie# 100 2025-06-24T20:37:09.002173Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 100 FAKE_COORDINATOR: Add transaction: 100 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000001 2025-06-24T20:37:09.067475Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 429, preserialized size# 56 2025-06-24T20:37:09.067577Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 4 2025-06-24T20:37:09.067772Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], co ... populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 101 2025-06-24T20:37:09.143106Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 101 2025-06-24T20:37:09.143228Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:99:2124], cookie# 101 2025-06-24T20:37:09.143253Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 5 2025-06-24T20:37:09.143310Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:12:2059], cookie# 101 2025-06-24T20:37:09.143395Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:15:2062], cookie# 101 2025-06-24T20:37:09.143422Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:18:2065], cookie# 101 2025-06-24T20:37:09.143458Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:100:2125], cookie# 101 2025-06-24T20:37:09.144112Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:98:2123], cookie# 101 2025-06-24T20:37:09.144226Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:99:2124], cookie# 101 2025-06-24T20:37:09.144247Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 3 2025-06-24T20:37:09.144293Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:100:2125], cookie# 101 2025-06-24T20:37:09.144319Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-24T20:37:09.146201Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 101, event size# 321, preserialized size# 2 2025-06-24T20:37:09.146264Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 101, is deletion# false, version: 6 2025-06-24T20:37:09.146410Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 101 2025-06-24T20:37:09.146479Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 101 2025-06-24T20:37:09.146540Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 101 2025-06-24T20:37:09.146796Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/Root/DirB\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000002, drop txId: 101" Path: "/Root/DirB" PathId: 2 LastExistedPrefixPath: "/Root" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 101, event size# 306, preserialized size# 0 2025-06-24T20:37:09.146829Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 101, is deletion# true, version: 0 2025-06-24T20:37:09.146900Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 101 2025-06-24T20:37:09.146941Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:15:2062], cookie# 101 2025-06-24T20:37:09.147006Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:18:2065], cookie# 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T20:37:09.147519Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:99:2124], cookie# 101 2025-06-24T20:37:09.147594Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:12:2059], cookie# 101 2025-06-24T20:37:09.147660Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 101 2025-06-24T20:37:09.147705Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 101 2025-06-24T20:37:09.147746Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:12:2059], cookie# 101 2025-06-24T20:37:09.147898Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:100:2125], cookie# 101 2025-06-24T20:37:09.147937Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 6 2025-06-24T20:37:09.148010Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:98:2123], cookie# 101 2025-06-24T20:37:09.148042Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:15:2062], cookie# 101 2025-06-24T20:37:09.148143Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:18:2065], cookie# 101 2025-06-24T20:37:09.148580Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:98:2123], cookie# 101 2025-06-24T20:37:09.148703Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:99:2124], cookie# 101 2025-06-24T20:37:09.148738Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 18446744073709551615 2025-06-24T20:37:09.149247Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:100:2125], cookie# 101 2025-06-24T20:37:09.149288Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 101 TestModificationResult got TxId: 101, wait until txId: 101 >> TxUsage::WriteToTopic_Demo_27_Table >> TPersQueueTest::CacheHead [GOOD] >> TPersQueueTest::CheckACLForGrpcWrite >> TNetClassifierUpdaterTest::TestFiltrationByNetboxTags [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-25 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TNetClassifierUpdaterTest::TestFiltrationByNetboxTags [GOOD] Test command err: 2025-06-24T20:34:55.988701Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617127512782584:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:55.988756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmpNRF6eI/pdisk_1.dat 2025-06-24T20:34:56.940206Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:57.009853Z node 1 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:23585) connection closed with error: Connection refused 2025-06-24T20:34:57.010627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:57.010708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:57.013330Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:57.059446Z node 1 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:34:57.119879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:00.599117Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617147936076841:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:00.599196Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmpdjoudI/pdisk_1.dat 2025-06-24T20:35:00.758951Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:00.772767Z node 2 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#28,[::1]:26003) connection closed with error: Connection refused 2025-06-24T20:35:00.775831Z node 2 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:35:00.777003Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:00.777107Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:00.786051Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:01.610312Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:04.066133Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519617166345594461:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:04.066210Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmpP8oDgM/pdisk_1.dat 2025-06-24T20:35:04.170161Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519617166345594429:2079] 1750797304065433 != 1750797304065436 2025-06-24T20:35:04.183964Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:04.193135Z node 3 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:5323) connection closed with error: Connection refused 2025-06-24T20:35:04.193975Z node 3 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:35:04.195130Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:04.195222Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:04.196545Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:05.073584Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:07.577844Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519617177968052887:2147];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmpbezoCR/pdisk_1.dat 2025-06-24T20:35:07.645096Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:07.706714Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:07.720990Z node 4 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#28,[::1]:1798) connection closed with error: Connection refused 2025-06-24T20:35:07.721444Z node 4 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:35:07.730386Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:07.730483Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:07.731939Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:08.578101Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:11.120461Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519617195259602773:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:11.120516Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmpFjHAGo/pdisk_1.dat 2025-06-24T20:35:11.241485Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:11.248349Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519617195259602754:2079] 1750797311120022 != 1750797311120025 2025-06-24T20:35:11.270569Z node 5 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#30,[::1]:5068) connection closed with error: Connection refused 2025-06-24T20:35:11.272019Z node 5 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:35:11.272735Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:11.272802Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:11.274917Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:12.131486Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:14.875220Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519617206879073110:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:14.875292Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmpOLoURY/pdisk_1.dat 2025-06-24T20:35:15.010663Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519617206879073083:2079] 1750797314874803 != 1750797314874806 2025-06-24T20:35:15.057280Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:15.063862Z node 6 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#32,[::1]:17691) connection closed with error: Connection refused 2025-06-24T20:35:15.064233Z node 6 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:35:15.079073Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:15.079195Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:15.080846Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:15.887607Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:18.853059Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519617226769468486:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:18.853121Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmp7GotEZ/pdisk_1.dat 2025-06-24T20:35:18.976287Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:18.994947Z node 7 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#34,[::1]:15962) connection closed with error: Connection refused 2025-06-24T20:35:18.995655Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:18.995733Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:18.995980Z node 7 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:35:18.998588Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:19.871323Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:23.195234Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519617246828537153:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:23.204906Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_me ... g.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[17:7519617488906724073:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:19.865241Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmptfRsSI/pdisk_1.dat 2025-06-24T20:36:20.093503Z node 17 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:20.093725Z node 17 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [17:7519617488906723876:2079] 1750797379840169 != 1750797379840172 2025-06-24T20:36:20.141453Z node 17 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:62191) connection closed with error: Connection refused 2025-06-24T20:36:20.144778Z node 17 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:36:20.146495Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:20.146588Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:20.149418Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:20.851331Z node 17 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:27.032464Z node 18 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[18:7519617514873348442:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:27.032538Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmpIPyIov/pdisk_1.dat 2025-06-24T20:36:27.282560Z node 18 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [18:7519617514873348424:2079] 1750797387024552 != 1750797387024555 2025-06-24T20:36:27.303456Z node 18 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:27.314728Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:27.315727Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:27.319886Z node 18 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#28,[::1]:31048) connection closed with error: Connection refused 2025-06-24T20:36:27.320325Z node 18 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:36:27.323178Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:28.083288Z node 18 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:33.028493Z node 19 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[19:7519617548605924336:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:33.028555Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmpI4nDFG/pdisk_1.dat 2025-06-24T20:36:33.321140Z node 19 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:33.323318Z node 19 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [19:7519617548605924318:2079] 1750797393019810 != 1750797393019813 2025-06-24T20:36:33.359339Z node 19 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#30,[::1]:12887) connection closed with error: Connection refused 2025-06-24T20:36:33.360440Z node 19 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:36:33.363524Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:33.363621Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:33.365741Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:34.075752Z node 19 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:39.638382Z node 20 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[20:7519617572544755630:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:39.638425Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmp14WWZQ/pdisk_1.dat 2025-06-24T20:36:39.856647Z node 20 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:39.875023Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:39.875164Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:39.878524Z node 20 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#32,[::1]:16052) connection closed with error: Connection refused 2025-06-24T20:36:39.879662Z node 20 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:36:39.882288Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:40.741573Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:46.738188Z node 21 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[21:7519617602675998251:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:46.740962Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmp3sT1mS/pdisk_1.dat 2025-06-24T20:36:47.218332Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:47.218458Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:47.248919Z node 21 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:47.266148Z node 21 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#34,[::1]:61384) connection closed with error: Connection refused 2025-06-24T20:36:47.269293Z node 21 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:36:47.270775Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:47.719344Z node 21 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:56.443864Z node 22 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[22:7519617647018266832:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:56.443952Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmp2VopLq/pdisk_1.dat 2025-06-24T20:36:56.709131Z node 22 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:56.710919Z node 22 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [22:7519617647018266809:2079] 1750797416440982 != 1750797416440985 2025-06-24T20:36:56.713118Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:56.713376Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:56.713819Z node 22 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#36,[::1]:25717) connection closed with error: Connection refused 2025-06-24T20:36:56.726566Z node 22 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:36:56.730387Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:57.565340Z node 22 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:37:04.405426Z node 23 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[23:7519617681770379549:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:04.405491Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ff/r3tmp/tmp8VKTSR/pdisk_1.dat 2025-06-24T20:37:04.668017Z node 23 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:04.669793Z node 23 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [23:7519617681770379520:2079] 1750797424393884 != 1750797424393887 2025-06-24T20:37:04.686585Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:04.686707Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:04.690442Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:04.692161Z node 23 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#38,[::1]:8943) connection closed with error: Connection refused 2025-06-24T20:37:04.695625Z node 23 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:37:05.451357Z node 23 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TPersQueueTest::StreamReadManyUpdateTokenAndRead [GOOD] >> TPersQueueTest::SetupWriteSession >> TPersQueueTest::DirectReadCorrectOffsetsOnRestart [GOOD] >> TPersQueueTest::DirectReadBadCases >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-25 >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table >> TxUsage::WriteToTopic_Demo_36_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-43 >> TTopicApiDescribes::GetLocalDescribe [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenDrop [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenSplit >> TxUsage::WriteToTopic_Demo_22_RestartNo_Table [GOOD] >> TPersQueueTest::SchemeOperationsTest [GOOD] >> TPersQueueTest::SchemeOperationFirstClassCitizen |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |89.8%| [LD] {RESULT} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut >> KqpCost::OltpWriteRow-isSink [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::TableDeleteWhere-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 11097, MsgBus: 8814 2025-06-24T20:30:35.638169Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616007998476639:2128];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:35.638208Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f47/r3tmp/tmpCMCQnG/pdisk_1.dat 2025-06-24T20:30:36.178572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:30:36.178674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:30:36.180831Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:36.197819Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:30:36.199269Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519616007998476546:2079] 1750797035634077 != 1750797035634080 TServer::EnableGrpc on GrpcPort 11097, node 1 2025-06-24T20:30:36.313436Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:30:36.313457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:30:36.313465Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:30:36.313610Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8814 2025-06-24T20:30:36.598904Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:30:37.078075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:30:39.289264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_17472595041006102391_17823623939509273229' Unable to coerce value for pgbpchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-24T20:30:39.602636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/Coerce__pgbpchar_17472595041006102391_5352544928909966465' Unable to coerce value for _pgbpchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-24T20:30:39.790567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) abcd 2025-06-24T20:30:39.986993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) {abcd,abcd} 2025-06-24T20:30:40.200114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) abcd 2025-06-24T20:30:40.439258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) {"abcd ","abcd "} 2025-06-24T20:30:40.643336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616007998476639:2128];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:30:40.643389Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:30:40.829916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/Coerce_pgvarchar_17472595041006102391_17823623939509273229' Unable to coerce value for pgvarchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character varying(2) 2025-06-24T20:30:40.946166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/Coerce__pgvarchar_17472595041006102391_5352544928909966465' Unable to coerce value for _pgvarchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character varying(2) 2025-06-24T20:30:41.039478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) abcd 2025-06-24T20:30:41.264108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) {abcd,abcd} 2025-06-24T20:30:41.471578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) abcd 2025-06-24T20:30:41.630803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) {abcd,abcd} 2025-06-24T20:30:41.840698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710686:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/Coerce_pgbit_17472595041006102391_5866627432374416336' Unable to coerce value for pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(2) 2025-06-24T20:30:41.955838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/Coerce__pgbit_17472595041006102391_11087201080355820517' Unable to coerce value for _pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(2) 2025-06-24T20:30:42.086145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710688:0, at schemeshard: 72057594046644480, first GetDB called at: ... d 281474976710845 at tablet 72075186224037947 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710845] at 72075186224037947 while waiting for scan finish) | 2025-06-24T20:37:02.567471Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710846:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:02.696436Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:02.748969Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710848:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 829 2025-06-24T20:37:02.896402Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:02.939391Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710850:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:03.136769Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:03.177121Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710852:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:03.305623Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 774 2025-06-24T20:37:03.356359Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710854:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:03.557239Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710855:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:03.711309Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2950 2025-06-24T20:37:03.752248Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710857:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:03.899230Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:03.939057Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710859:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 114 2025-06-24T20:37:04.086354Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710860:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:04.211544Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:04.214462Z node 11 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976710862 at tablet 72075186224037956 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710862] at 72075186224037956 while waiting for scan finish) | 2025-06-24T20:37:04.220977Z node 11 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710862 at tablet 72075186224037956 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710862] at 72075186224037956 while waiting for scan finish) | 2025-06-24T20:37:04.235035Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710863:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 3802 2025-06-24T20:37:04.342103Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:04.377428Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710865:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:04.567021Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:04.623688Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710867:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 4072 2025-06-24T20:37:04.831061Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710868:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:05.014484Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:05.056171Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710871:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:05.209416Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 142 2025-06-24T20:37:05.248443Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710873:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:05.346924Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:05.396280Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710875:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:05.571951Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 3615 2025-06-24T20:37:05.609988Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710877:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:05.732311Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:05.766478Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710879:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 3614 2025-06-24T20:37:05.949163Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710880:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:06.080157Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:06.114037Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710882:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 22 2025-06-24T20:37:06.250660Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710883:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:06.443713Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:37:06.497554Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710885:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TxUsage::WriteToTopic_Demo_22_RestartNo_Query >> SystemView::ShowCreateTableSequences [FAIL] >> SystemView::ShowCreateTablePartitionPolicyIndexTable ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::GetLocalDescribe [GOOD] Test command err: 2025-06-24T20:36:57.265766Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617650453220231:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:57.265811Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0048dd/r3tmp/tmpSfPbxw/pdisk_1.dat 2025-06-24T20:36:57.750877Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:36:57.754453Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:36:57.912231Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:36:58.319283Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:58.336812Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:36:58.410402Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:36:58.437373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:58.437490Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:58.437847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:58.437886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:58.448786Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:36:58.449189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:58.449669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:36:58.491136Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17096, node 1 2025-06-24T20:36:58.740663Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T20:36:58.747326Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T20:36:59.243857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0048dd/r3tmp/yandexlZp68Z.tmp 2025-06-24T20:36:59.243879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0048dd/r3tmp/yandexlZp68Z.tmp 2025-06-24T20:36:59.256497Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0048dd/r3tmp/yandexlZp68Z.tmp 2025-06-24T20:36:59.257137Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:36:59.516778Z INFO: TTestServer started on Port 21805 GrpcPort 17096 TClient is connected to server localhost:21805 PQClient connected to localhost:17096 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:00.437173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:37:00.598316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T20:37:02.263267Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617650453220231:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:02.263365Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:04.621103Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617682008275136:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:04.621191Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617682008275147:2275], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:04.621249Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:04.628904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:04.682495Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519617682008275150:2276], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T20:37:04.772978Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519617682008275178:2137] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:37:05.291530Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617680517992373:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:37:05.293877Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmEyNzVlZDUtYmM1ZWMyNDktNWExNTkwYWYtYzUzM2Y2OTU=, ActorId: [1:7519617680517992341:2307], ActorState: ExecuteState, TraceId: 01jyhth62c7jxw9ch8kc6yw5mc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:37:05.296250Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:37:05.294411Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519617682008275185:2280], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:37:05.297649Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NjViMzI1ZmUtOWI5MzM0MjQtYTVlYTA3MjEtNmI1MzA1NzQ=, ActorId: [2:7519617682008275134:2271], ActorState: ExecuteState, TraceId: 01jyhth5z7cdxkpjgfbn583nt9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:37:05.298084Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:37:05.302913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:05.517364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 28147497 ... ate: StateInit] bootstrapping 14 [2:7519617720662981635:2392] 2025-06-24T20:37:13.472594Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037898, Partition: 14, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 14 generation 1 [2:7519617720662981635:2392] 2025-06-24T20:37:13.456148Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037897, Partition: 13, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 13 generation 1 [1:7519617719172699332:2469] 2025-06-24T20:37:13.471946Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037892, Partition: 0, State: StateInit] bootstrapping 0 [1:7519617719172699348:2473] 2025-06-24T20:37:13.476926Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037895, Partition: 9, State: StateInit] bootstrapping 9 [2:7519617720662981632:2389] 2025-06-24T20:37:13.477644Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037895, Partition: 2, State: StateInit] bootstrapping 2 [2:7519617720662981633:2390] 2025-06-24T20:37:13.478269Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037895, Partition: 9, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 9 generation 1 [2:7519617720662981632:2389] 2025-06-24T20:37:13.479701Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037895, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 2 generation 1 [2:7519617720662981633:2390] 2025-06-24T20:37:13.480646Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037899, Partition: 4, State: StateInit] bootstrapping 4 [1:7519617719172699334:2471] 2025-06-24T20:37:13.482864Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037899, Partition: 4, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 4 generation 1 [1:7519617719172699334:2471] 2025-06-24T20:37:13.482911Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037892, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 0 generation 1 [1:7519617719172699348:2473] 2025-06-24T20:37:13.487392Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037896, Partition: 5, State: StateInit] bootstrapping 5 [1:7519617719172699385:2480] 2025-06-24T20:37:13.489695Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037896, Partition: 5, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 5 generation 1 [1:7519617719172699385:2480] 2025-06-24T20:37:13.490424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:37:13.495087Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:13.503694Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037892, Partition: 3, State: StateInit] bootstrapping 3 [1:7519617719172699336:2472] 2025-06-24T20:37:13.505514Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037892, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 3 generation 1 [1:7519617719172699336:2472] 2025-06-24T20:37:13.515428Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037896, Partition: 10, State: StateInit] bootstrapping 10 [1:7519617719172699386:2481] 2025-06-24T20:37:13.517771Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037896, Partition: 10, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 10 generation 1 [1:7519617719172699386:2481] 2025-06-24T20:37:13.547522Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037893] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:13.548534Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037898] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:13.549033Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037895] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:13.550882Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037899] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:13.553919Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:13.554592Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:13.559123Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037896] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:13.559948Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037897] disable metering: reason# billing is not enabled in BillingMeteringConfig Create topic result: 1 2025-06-24T20:37:13.609473Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519617719172699697:3955]: Request location 2025-06-24T20:37:13.609921Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617719172699708:3960] connected; active server actors: 1 2025-06-24T20:37:13.610235Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 1, Generation 1 2025-06-24T20:37:13.610262Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 2, Generation 1 2025-06-24T20:37:13.610273Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 2, NodeId 2, Generation 1 2025-06-24T20:37:13.610286Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 1 2025-06-24T20:37:13.610306Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037899, partitionId 4, NodeId 1, Generation 1 2025-06-24T20:37:13.610320Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 1, Generation 1 2025-06-24T20:37:13.610330Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 6, NodeId 2, Generation 1 2025-06-24T20:37:13.610345Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 7, NodeId 1, Generation 1 2025-06-24T20:37:13.610354Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 8, NodeId 1, Generation 1 2025-06-24T20:37:13.610365Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 9, NodeId 2, Generation 1 2025-06-24T20:37:13.610375Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 10, NodeId 1, Generation 1 2025-06-24T20:37:13.610386Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 11, NodeId 2, Generation 1 2025-06-24T20:37:13.610396Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 12, NodeId 1, Generation 1 2025-06-24T20:37:13.610407Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 13, NodeId 1, Generation 1 2025-06-24T20:37:13.610417Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 14, NodeId 2, Generation 1 2025-06-24T20:37:13.610745Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519617719172699697:3955]: Got location 2025-06-24T20:37:13.611109Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617719172699708:3960] disconnected; active server actors: 1 2025-06-24T20:37:13.611167Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617719172699708:3960] disconnected no session 2025-06-24T20:37:13.616020Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519617719172699709:3961]: Request location 2025-06-24T20:37:13.616627Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617719172699711:3963] connected; active server actors: 1 2025-06-24T20:37:13.617307Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519617719172699709:3961]: Got location 2025-06-24T20:37:13.617067Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 2, Generation 1 2025-06-24T20:37:13.618097Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519617719172699712:3964]: Request location 2025-06-24T20:37:13.617085Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 1 2025-06-24T20:37:13.617100Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 1, Generation 1 2025-06-24T20:37:13.617595Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617719172699711:3963] disconnected; active server actors: 1 2025-06-24T20:37:13.617613Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617719172699711:3963] disconnected no session 2025-06-24T20:37:13.618480Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617719172699714:3966] connected; active server actors: 1 2025-06-24T20:37:14.323067Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [1:7519617723467667095:2515] TxId: 281474976710684. Ctx: { TraceId: 01jyhthf1a3hs2nm3qe2ka3y78, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzMyMDFjNjItYWY4YjM2Y2YtMmMyOTAxMTItZWE2YjFjMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 2 2025-06-24T20:37:14.323823Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519617723467667099:2515], TxId: 281474976710684, task: 3. Ctx: { CustomerSuppliedId : . TraceId : 01jyhthf1a3hs2nm3qe2ka3y78. SessionId : ydb://session/3?node_id=1&id=MzMyMDFjNjItYWY4YjM2Y2YtMmMyOTAxMTItZWE2YjFjMWM=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7519617723467667095:2515], status: UNAVAILABLE, reason: {
: Error: Terminate execution } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OltpWriteRow-isSink [GOOD] Test command err: Trying to start YDB, gRPC: 22421, MsgBus: 11436 2025-06-24T20:37:04.661104Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617681278524137:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:04.661309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a4d/r3tmp/tmpZCKklI/pdisk_1.dat 2025-06-24T20:37:05.270479Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617681278524024:2079] 1750797424625639 != 1750797424625642 2025-06-24T20:37:05.290217Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:05.301382Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:05.301627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:05.305771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22421, node 1 2025-06-24T20:37:05.549021Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:05.549048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:05.549067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:05.549197Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:37:05.658189Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11436 TClient is connected to server localhost:11436 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:06.366263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:37:06.416163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:37:06.428058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:06.738314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:07.040263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:07.206689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:09.436822Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617702753362130:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:09.436986Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:09.651475Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617681278524137:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:09.651528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:09.982857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:10.051094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:10.127216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:10.210500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:10.269238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:10.356750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:10.462748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:10.588882Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617707048330093:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:10.589029Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:10.589409Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617707048330098:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:10.594852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:10.645337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T20:37:10.645584Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617707048330100:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:37:10.717043Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617707048330153:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:37:13.065745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) query_phases { duration_us: 711 cpu_time_us: 711 } query_phases { duration_us: 10801 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1350 affected_shards: 1 } compilation { duration_us: 111865 cpu_time_us: 107674 } process_cpu_time_us: 1133 total_duration_us: 126989 total_cpu_time_us: 110868 query_phases { duration_us: 723 cpu_time_us: 723 } query_phases { duration_us: 7676 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1417 affected_shards: 1 } compilation { duration_us: 112394 cpu_time_us: 108210 } process_cpu_time_us: 1280 total_duration_us: 125266 total_cpu_time_us: 111630 2025-06-24T20:37:14.122577Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519617724228199763:2513], TxId: 281474976710677, task: 1. Ctx: { TraceId : 01jyhther85527mk20xcq8jnjt. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=YjIzN2UzMmEtOTY1NDY0MjItYTFmZWE2NTItZjA5N2MzNTY=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-24T20:37:14.123740Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519617724228199764:2514], TxId: 281474976710677, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=YjIzN2UzMmEtOTY1NDY0MjItYTFmZWE2NTItZjA5N2MzNTY=. TraceId : 01jyhther85527mk20xcq8jnjt. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7519617724228199760:2473], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-24T20:37:14.124339Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YjIzN2UzMmEtOTY1NDY0MjItYTFmZWE2NTItZjA5N2MzNTY=, ActorId: [1:7519617715638265007:2473], ActorState: ExecuteState, TraceId: 01jyhther85527mk20xcq8jnjt, Create QueryResponse for error on request, msg: query_phases { duration_us: 1035 cpu_time_us: 1035 } query_phases { duration_us: 7558 table_access { name: "/Root/TestTable" reads { rows: 1 bytes: 8 } partitions_count: 1 } cpu_time_us: 6031 affected_shards: 1 } query_phases { duration_us: 37686 cpu_time_us: 37838 } compilation { duration_us: 458747 cpu_time_us: 452753 } process_cpu_time_us: 2192 total_duration_us: 515320 total_cpu_time_us: 499849 query_phases { duration_us: 859 cpu_time_us: 859 } query_phases { duration_us: 5881 table_access { name: "/Root/TestTable" partitions_count: 1 } cpu_time_us: 4364 affected_shards: 1 } query_phases { duration_us: 2358 cpu_time_us: 2019 } query_phases { duration_us: 5904 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1839 affected_shards: 1 } compilation { duration_us: 378340 cpu_time_us: 367237 } process_cpu_time_us: 2609 total_duration_us: 408653 total_cpu_time_us: 378927 query_phases { duration_us: 854 cpu_time_us: 854 } query_phases { duration_us: 6469 table_access { name: "/Root/TestTable" partitions_count: 1 } cpu_time_us: 4865 affected_shards: 1 } query_phases { duration_us: 10189 cpu_time_us: 1063 affected_shards: 1 } compilation { duration_us: 388378 cpu_time_us: 382749 } process_cpu_time_us: 2123 total_duration_us: 422617 total_cpu_time_us: 391654 query_phases { duration_us: 649 cpu_time_us: 649 } query_phases { duration_us: 36876 table_access { name: "/Root/TestTable" reads { rows: 1 bytes: 8 } partitions_count: 1 } cpu_time_us: 35315 affected_shards: 1 } query_phases { duration_us: 6150 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1871 affected_shards: 1 } compilation { duration_us: 250871 cpu_time_us: 244852 } process_cpu_time_us: 1474 total_duration_us: 305118 total_cpu_time_us: 284161 query_phases { duration_us: 564 cpu_time_us: 564 } query_phases { duration_us: 5762 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 1097 affected_shards: 1 } compilation { duration_us: 78080 cpu_time_us: 72392 } process_cpu_time_us: 918 total_duration_us: 87729 total_cpu_time_us: 74971 query_phases { duration_us: 566 cpu_time_us: 566 } query_phases { duration_us: 3515 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 986 affected_shards: 1 } compilation { duration_us: 98238 cpu_time_us: 94149 } process_cpu_time_us: 1085 total_duration_us: 107327 total_cpu_time_us: 96786 >> TopicAutoscaling::ReadingAfterSplitTest_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query >> KqpQuery::ExecuteWriteQuery [GOOD] >> TTopicApiDescribes::DescribeTopic [GOOD] >> TTopicApiDescribes::DescribeConsumer [GOOD] >> TxUsage::WriteToTopic_Demo_12_Table [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-4 >> KqpCost::OlapRange [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-25 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-26 >> TxUsage::WriteToTopic_Demo_12_Query >> TTopicApiDescribes::GetPartitionDescribe [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-25 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-26 |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |89.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::DescribeTopic [GOOD] Test command err: 2025-06-24T20:37:00.746397Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617661844709598:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:00.749760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:37:00.791290Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617664939586176:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:00.791345Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:37:01.053799Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:37:01.070470Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004836/r3tmp/tmpXRY6by/pdisk_1.dat 2025-06-24T20:37:01.713583Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:01.755163Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:37:01.812174Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:37:01.833843Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:01.838656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:01.848984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:01.849074Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:01.852228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:01.859396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:01.874758Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:37:01.874934Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:01.879379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11852, node 1 2025-06-24T20:37:02.123490Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004836/r3tmp/yandexO8CsJU.tmp 2025-06-24T20:37:02.123517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004836/r3tmp/yandexO8CsJU.tmp 2025-06-24T20:37:02.123699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004836/r3tmp/yandexO8CsJU.tmp 2025-06-24T20:37:02.123879Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:37:02.334281Z INFO: TTestServer started on Port 4990 GrpcPort 11852 TClient is connected to server localhost:4990 PQClient connected to localhost:11852 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:03.098770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:37:03.164484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T20:37:05.736992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617661844709598:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:05.737064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:05.795901Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519617664939586176:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:05.832172Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:06.092406Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617687614514342:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:06.092495Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:06.099481Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617687614514379:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:06.104452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:06.155398Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617687614514381:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T20:37:06.607442Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617687614514467:2755] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:37:06.637538Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519617690709390333:2280], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:37:06.637956Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzNlZWRiNzctNmRhNGFlMDAtOWI4NTMyMzAtMjU1MDkwNTE=, ActorId: [2:7519617690709390285:2273], ActorState: ExecuteState, TraceId: 01jyhth7qb4gs84ths01qbxdtq, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:37:06.645182Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:37:06.643957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:06.656896Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617687614514503:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:37:06.657897Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YmRlNmI4YWUtOTg0MzQ5MS0zMmZkNWZiLTk4NDU0OGQy, ActorId: [1:7519617687614514339:2297], ActorState: ExecuteState, TraceId: 01jyhth7da2fbxr6342780ceph, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:37:06.658332Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: ... } attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } consumer_stats { min_partitions_last_read_time { seconds: 1750797435 nanos: 95000000 } max_read_time_lag { } max_write_time_lag { } bytes_read { } max_committed_time_lag { } } } topic_stats { min_last_write_time { seconds: 1750797435 nanos: 344000000 } max_write_time_lag { } bytes_written { } } } } } Describe topic with location 2025-06-24T20:37:15.414293Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-24T20:37:15.414423Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//rt3.dc1--topic-x 2025-06-24T20:37:15.415696Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519617726269222414:2562]: Request location 2025-06-24T20:37:15.418751Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617726269222416:2563] connected; active server actors: 1 2025-06-24T20:37:15.419177Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 2, Generation 2 2025-06-24T20:37:15.419196Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 1, Generation 2 2025-06-24T20:37:15.419209Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 2, NodeId 2, Generation 2 2025-06-24T20:37:15.419224Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 2, Generation 2 2025-06-24T20:37:15.419237Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037899, partitionId 4, NodeId 1, Generation 2 2025-06-24T20:37:15.419250Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 1, Generation 2 2025-06-24T20:37:15.419283Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 6, NodeId 1, Generation 2 2025-06-24T20:37:15.419310Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 7, NodeId 2, Generation 2 2025-06-24T20:37:15.419321Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 8, NodeId 2, Generation 2 2025-06-24T20:37:15.419334Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 9, NodeId 2, Generation 2 2025-06-24T20:37:15.419346Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 10, NodeId 1, Generation 2 2025-06-24T20:37:15.419367Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 11, NodeId 1, Generation 2 2025-06-24T20:37:15.419379Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 12, NodeId 2, Generation 2 2025-06-24T20:37:15.419390Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 13, NodeId 2, Generation 2 2025-06-24T20:37:15.419401Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 14, NodeId 1, Generation 2 2025-06-24T20:37:15.422204Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519617726269222414:2562]: Got location Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeTopicResult] { self { name: "rt3.dc1--topic-x" owner: "root@builtin" type: TOPIC created_at { plan_step: 1750797433868 tx_id: 281474976715676 } } partitioning_settings { min_active_partitions: 15 max_active_partitions: 1 auto_partitioning_settings { strategy: AUTO_PARTITIONING_STRATEGY_DISABLED partition_write_speed { stabilization_window { seconds: 300 } up_utilization_percent: 80 down_utilization_percent: 20 } } } partitions { active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 1 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 2 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 3 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 4 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 5 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 6 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 7 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 8 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 9 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 10 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 11 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 12 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 13 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 14 active: true partition_location { node_id: 1 generation: 2 } } retention_period { seconds: 64800 } partition_write_speed_bytes_per_second: 2097152 partition_write_burst_bytes: 2097152 attributes { key: "__max_partition_message_groups_seqno_stored" value: "6000000" } attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } } } } } Describe topic with no stats or location 2025-06-24T20:37:15.426024Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617726269222416:2563] disconnected; active server actors: 1 2025-06-24T20:37:15.426053Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617726269222416:2563] disconnected no session Got response: 2025-06-24T20:37:15.427811Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-24T20:37:15.427925Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//rt3.dc1--topic-x operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeTopicResult] { self { name: "rt3.dc1--topic-x" owner: "root@builtin" type: TOPIC created_at { plan_step: 1750797433868 tx_id: 281474976715676 } } partitioning_settings { min_active_partitions: 15 max_active_partitions: 1 auto_partitioning_settings { strategy: AUTO_PARTITIONING_STRATEGY_DISABLED partition_write_speed { stabilization_window { seconds: 300 } up_utilization_percent: 80 down_utilization_percent: 20 } } } partitions { active: true } partitions { partition_id: 1 active: true } partitions { partition_id: 2 active: true } partitions { partition_id: 3 active: true } partitions { partition_id: 4 active: true } partitions { partition_id: 5 active: true } partitions { partition_id: 6 active: true } partitions { partition_id: 7 active: true } partitions { partition_id: 8 active: true } partitions { partition_id: 9 active: true } partitions { partition_id: 10 active: true } partitions { partition_id: 11 active: true } partitions { partition_id: 12 active: true } partitions { partition_id: 13 active: true } partitions { partition_id: 14 active: true } retention_period { seconds: 64800 } partition_write_speed_bytes_per_second: 2097152 partition_write_burst_bytes: 2097152 attributes { key: "__max_partition_message_groups_seqno_stored" value: "6000000" } attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } } } } } Describe bad topic 2025-06-24T20:37:15.433555Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-24T20:37:15.433672Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//bad-topic Got response: operation { ready: true status: SCHEME_ERROR issues { message: "path \'Root/PQ/bad-topic\' does not exist or you do not have access rights" issue_code: 500018 severity: 1 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::DescribeConsumer [GOOD] Test command err: 2025-06-24T20:36:59.979577Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617658791589879:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:59.979632Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:37:00.122643Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617662401450850:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:00.199641Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:37:00.466625Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:37:00.469604Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004888/r3tmp/tmpf5S2lS/pdisk_1.dat 2025-06-24T20:37:00.877572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:00.877712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:00.880789Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:00.899762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:00.899839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:00.917891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:00.919095Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:37:00.923267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19596, node 1 2025-06-24T20:37:01.011732Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:37:01.075886Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004888/r3tmp/yandex67bW3z.tmp 2025-06-24T20:37:01.075910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004888/r3tmp/yandex67bW3z.tmp 2025-06-24T20:37:01.076084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004888/r3tmp/yandex67bW3z.tmp 2025-06-24T20:37:01.076215Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:37:01.128500Z INFO: TTestServer started on Port 14541 GrpcPort 19596 2025-06-24T20:37:01.131892Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14541 PQClient connected to localhost:19596 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:01.988264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:37:02.137088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:37:02.606981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976720660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:37:04.983219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617658791589879:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:04.983313Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:05.081593Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617683876287712:2277], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:05.081591Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617683876287701:2274], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:05.081740Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:05.088881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:05.094143Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519617662401450850:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:05.094215Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:05.119499Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519617683876287715:2278], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T20:37:05.204704Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519617683876287742:2175] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:37:05.701396Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617684561394771:2306], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:37:05.702217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:05.703488Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NWE5YTQ3OS04ZWY4NzU0ZC0zYTU1ZmVjNy1lOGUzNDZjNQ==, ActorId: [1:7519617684561394745:2299], ActorState: ExecuteState, TraceId: 01jyhth6nw857zfgmqe0kyh5vr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:37:05.705277Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519617683876287749:2282], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:37:05.705552Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:37:05.707241Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZGU1Yjg4OWEtM2E3MmIzMDktOWM3NTRiNGUtMzg3NmNmOWY=, ActorId: [2:7519617683876287699:2273], ActorState: ExecuteState, TraceId: 01jyhth6df7mf1ms99asn66wqv, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:37:05.707628Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of tabl ... e_lag { } max_write_time_lag { } bytes_read { } max_committed_time_lag { } } } partitions { partition_id: 12 active: true partition_stats { partition_offsets { } last_write_time { seconds: 1750797435 nanos: 789000000 } max_write_time_lag { } bytes_written { } partition_node_id: 2 } partition_consumer_stats { last_read_time { seconds: 1750797435 nanos: 856000000 } max_read_time_lag { } max_write_time_lag { } bytes_read { } max_committed_time_lag { } } } partitions { partition_id: 13 active: true partition_stats { partition_offsets { } last_write_time { seconds: 1750797435 nanos: 789000000 } max_write_time_lag { } bytes_written { } partition_node_id: 2 } partition_consumer_stats { last_read_time { seconds: 1750797435 nanos: 831000000 } max_read_time_lag { } max_write_time_lag { } bytes_read { } max_committed_time_lag { } } } partitions { partition_id: 14 active: true partition_stats { partition_offsets { } last_write_time { seconds: 1750797435 nanos: 563000000 } max_write_time_lag { } bytes_written { } partition_node_id: 1 } partition_consumer_stats { last_read_time { seconds: 1750797435 nanos: 585000000 } max_read_time_lag { } max_write_time_lag { } bytes_read { } max_committed_time_lag { } } } } } } 2025-06-24T20:37:16.660120Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:157: new Describe consumer request 2025-06-24T20:37:16.660190Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:473: TDescribeConsumerActor for request path: "/Root/PQ//rt3.dc1--topic-x" consumer: "my-consumer" include_location: true 2025-06-24T20:37:16.660844Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519617731806037842:2605]: Request location 2025-06-24T20:37:16.661311Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617731806037844:2606] connected; active server actors: 1 2025-06-24T20:37:16.661723Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 2, Generation 2 2025-06-24T20:37:16.661738Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 1, Generation 2 2025-06-24T20:37:16.661747Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 2, NodeId 2, Generation 2 2025-06-24T20:37:16.661755Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 2, Generation 2 2025-06-24T20:37:16.661762Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037899, partitionId 4, NodeId 1, Generation 2 2025-06-24T20:37:16.661769Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 1, Generation 2 2025-06-24T20:37:16.661776Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 6, NodeId 1, Generation 2 2025-06-24T20:37:16.661784Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 7, NodeId 2, Generation 2 2025-06-24T20:37:16.661793Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 8, NodeId 2, Generation 2 2025-06-24T20:37:16.661800Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 9, NodeId 2, Generation 2 2025-06-24T20:37:16.661807Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 10, NodeId 1, Generation 2 2025-06-24T20:37:16.661815Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 11, NodeId 1, Generation 2 2025-06-24T20:37:16.661821Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 12, NodeId 2, Generation 2 2025-06-24T20:37:16.661827Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 13, NodeId 2, Generation 2 2025-06-24T20:37:16.661834Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 14, NodeId 1, Generation 2 Got response: 2025-06-24T20:37:16.662616Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519617731806037842:2605]: Got location 2025-06-24T20:37:16.664778Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617731806037844:2606] disconnected; active server actors: 1 2025-06-24T20:37:16.664809Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617731806037844:2606] disconnected no session operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeConsumerResult] { self { name: "rt3.dc1--topic-x/my-consumer" owner: "root@builtin" type: TOPIC created_at { plan_step: 1750797433644 tx_id: 281474976720679 } } consumer { name: "shared/my-consumer" important: true read_from { } attributes { key: "_service_type" value: "data-streams" } } partitions { active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 1 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 2 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 3 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 4 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 5 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 6 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 7 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 8 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 9 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 10 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 11 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 12 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 13 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 14 active: true partition_location { node_id: 1 generation: 2 } } } } } 2025-06-24T20:37:16.670000Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:157: new Describe consumer request 2025-06-24T20:37:16.670073Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:473: TDescribeConsumerActor for request path: "/Root/PQ//rt3.dc1--topic-x" consumer: "my-consumer" Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeConsumerResult] { self { name: "rt3.dc1--topic-x/my-consumer" owner: "root@builtin" type: TOPIC created_at { plan_step: 1750797433644 tx_id: 281474976720679 } } consumer { name: "shared/my-consumer" important: true read_from { } attributes { key: "_service_type" value: "data-streams" } } partitions { active: true } partitions { partition_id: 1 active: true } partitions { partition_id: 2 active: true } partitions { partition_id: 3 active: true } partitions { partition_id: 4 active: true } partitions { partition_id: 5 active: true } partitions { partition_id: 6 active: true } partitions { partition_id: 7 active: true } partitions { partition_id: 8 active: true } partitions { partition_id: 9 active: true } partitions { partition_id: 10 active: true } partitions { partition_id: 11 active: true } partitions { partition_id: 12 active: true } partitions { partition_id: 13 active: true } partitions { partition_id: 14 active: true } } } } 2025-06-24T20:37:16.678983Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:157: new Describe consumer request 2025-06-24T20:37:16.679087Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:473: TDescribeConsumerActor for request path: "/Root/PQ//bad-topic" consumer: "my-consumer" include_stats: true include_location: true Got response: operation { ready: true status: SCHEME_ERROR issues { message: "path \'Root/PQ/bad-topic\' does not exist or you do not have access rights" issue_code: 500018 severity: 1 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::ExecuteWriteQuery [GOOD] Test command err: Trying to start YDB, gRPC: 31671, MsgBus: 17733 2025-06-24T20:35:51.082683Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617367764373555:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:51.083140Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00336c/r3tmp/tmpCqChx0/pdisk_1.dat 2025-06-24T20:35:51.768393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:51.768497Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:51.780561Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:51.793123Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:51.827310Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617363469406078:2079] 1750797351003404 != 1750797351003407 TServer::EnableGrpc on GrpcPort 31671, node 1 2025-06-24T20:35:52.046545Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:52.107694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:52.107717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:52.107724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:52.107825Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17733 TClient is connected to server localhost:17733 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:53.362502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:53.388403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:35:53.405768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:53.754613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:53.990999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:54.097949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:56.071265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617367764373555:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:56.071333Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:56.885019Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617389239211488:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:56.885150Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:57.419130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:57.473019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:57.519114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:57.563914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:57.624427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:57.676806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:57.755685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:57.842754Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617393534179449:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:57.842877Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:57.843299Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617393534179454:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:57.849192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:57.865841Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617393534179456:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:35:57.933714Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617393534179507:3430] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:59.384328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... lled at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:30.651014Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617532695251539:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:30.651130Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:30.651660Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519617532695251544:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:30.656831Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:36:30.680716Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519617532695251546:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:36:30.769433Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519617532695251597:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:36:33.734865Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:39.141130Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:36:39.141192Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:40.735369Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-24T20:36:43.352292Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-24T20:36:47.799429Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-24T20:36:50.292416Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-24T20:36:50.295480Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938`. ShardID=72075186224037922, Sink=[4:7519617558465055937:2479]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-24T20:36:50.359816Z node 4 :TX_DATASHARD ERROR: datashard__stats.cpp:649: CPU usage 61.0225 is higher than threshold of 60 in-flight Tx: 0 immediate Tx: 0 readIterators: 0 at datashard: 72075186224037922 table: [/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938] 2025-06-24T20:36:50.359945Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-24T20:36:50.363290Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938`. ShardID=72075186224037922, Sink=[4:7519617558465055937:2479]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-24T20:36:50.369800Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-24T20:36:50.369948Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938`. ShardID=72075186224037922, Sink=[4:7519617558465055937:2479]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-24T20:36:52.230143Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-24T20:36:52.538471Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-24T20:36:52.538519Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1081: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Retry failed: not found ShardID=72075186224037922 with Cookie=1 2025-06-24T20:36:53.135735Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-24T20:36:53.139282Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938`. ShardID=72075186224037922, Sink=[4:7519617558465055937:2479]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-24T20:36:53.214255Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-24T20:36:53.215789Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938`. ShardID=72075186224037922, Sink=[4:7519617558465055937:2479]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-24T20:36:54.671288Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519617558465055937:2479], Table: `/Root/.tmp/sessions/OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==/Root/test_table_cas_14714297871758353938` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-24T20:36:56.686481Z node 4 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhtg7eg26qmamvc8kxns3fh", SessionId: ydb://session/3?node_id=4&id=OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==, Slow query, duration: 23.323017s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n CREATE TABLE test_table (\n PRIMARY KEY (id)\n ) AS SELECT\n ROW_NUMBER() OVER w AS id, data\n FROM\n AS_TABLE(ListReplicate(<|data: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'|>, 500000))\n WINDOW\n w AS (ORDER BY data)", parameters: 0b 2025-06-24T20:36:57.103720Z node 4 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhtg7eg26qmamvc8kxns3fh", SessionId: ydb://session/3?node_id=4&id=OGVjZjM1OGYtN2IwNTU4Mi0zNzM5ZTkzNy03NzQ3NGFiZg==, Slow query, duration: 23.740255s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n CREATE TABLE test_table (\n PRIMARY KEY (id)\n ) AS SELECT\n ROW_NUMBER() OVER w AS id, data\n FROM\n AS_TABLE(ListReplicate(<|data: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'|>, 500000))\n WINDOW\n w AS (ORDER BY data)", parameters: 0b 2025-06-24T20:36:57.111212Z --------------- Start update --------------- 2025-06-24T20:37:15.150595Z node 4 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhtgymzbscjkezygnzb3we0", SessionId: ydb://session/3?node_id=4&id=ZmNmZDIyYTMtZGEyYTgwMzUtNzllZDc3ZjktN2U4MmYxOWU=, Slow query, duration: 18.026619s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n UPDATE test_table SET data = \"a\"\n ", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapRange [GOOD] Test command err: Trying to start YDB, gRPC: 18202, MsgBus: 8709 2025-06-24T20:37:07.941634Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617694278927691:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:07.941690Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a44/r3tmp/tmp6Muw9c/pdisk_1.dat 2025-06-24T20:37:08.646846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:08.646949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:08.666094Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:08.667429Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617694278927667:2079] 1750797427934222 != 1750797427934225 2025-06-24T20:37:08.728230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18202, node 1 2025-06-24T20:37:08.940239Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:08.940261Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:08.940268Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:08.940396Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:37:09.019815Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8709 TClient is connected to server localhost:8709 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:10.128127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:37:10.153269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:37:10.173877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:10.357056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:10.706850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:10.915210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:12.947594Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617694278927691:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:13.017615Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:13.797644Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617720048733088:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:13.797795Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:14.266924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:14.334502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:14.382690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:14.464042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:14.514813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:14.560078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:14.622786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:14.796417Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617724343701051:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:14.796505Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:14.796930Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617724343701056:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:14.801904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:14.834453Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617724343701058:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:37:14.906664Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617724343701109:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:37:17.074667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runn ... : log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T20:37:17.598251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T20:37:17.598364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T20:37:17.598393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T20:37:17.598453Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T20:37:17.598493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T20:37:17.598527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T20:37:17.599075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T20:37:17.599104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T20:37:17.604562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7519617737228603399:2483];ev=NActors::IEventHandle;tablet_id=72075186224037923;tx_id=281474976710672;this=88923053486624;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750797437604;max=18446744073709551615;plan=0;src=[1:7519617698573895277:2139];cookie=362:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.610172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7519617737228603406:2486];ev=NActors::IEventHandle;tablet_id=72075186224037930;tx_id=281474976710672;this=88923029927424;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750797437609;max=18446744073709551615;plan=0;src=[1:7519617698573895277:2139];cookie=432:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.611623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[1:7519617737228603408:2487];ev=NActors::IEventHandle;tablet_id=72075186224037926;tx_id=281474976710672;this=88923053488192;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750797437610;max=18446744073709551615;plan=0;src=[1:7519617698573895277:2139];cookie=392:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.613039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7519617737228603400:2484];ev=NActors::IEventHandle;tablet_id=72075186224037929;tx_id=281474976710672;this=88923054472896;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750797437612;max=18446744073709551615;plan=0;src=[1:7519617698573895277:2139];cookie=422:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.614423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7519617737228603404:2485];ev=NActors::IEventHandle;tablet_id=72075186224037925;tx_id=281474976710672;this=88923053489760;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750797437614;max=18446744073709551615;plan=0;src=[1:7519617698573895277:2139];cookie=382:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.619440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7519617737228603409:2488];ev=NActors::IEventHandle;tablet_id=72075186224037927;tx_id=281474976710672;this=88923054475584;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750797437618;max=18446744073709551615;plan=0;src=[1:7519617698573895277:2139];cookie=402:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.620999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;self_id=[1:7519617737228603418:2490];ev=NActors::IEventHandle;tablet_id=72075186224037928;tx_id=281474976710672;this=88923053224992;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750797437620;max=18446744073709551615;plan=0;src=[1:7519617698573895277:2139];cookie=412:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.621626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;self_id=[1:7519617737228603417:2489];ev=NActors::IEventHandle;tablet_id=72075186224037924;tx_id=281474976710672;this=88923053225664;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750797437621;max=18446744073709551615;plan=0;src=[1:7519617698573895277:2139];cookie=372:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.630964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.631134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.652178Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T20:37:17.653568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.659504Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T20:37:17.660293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.664480Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T20:37:17.665607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.666231Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T20:37:17.666867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.677669Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T20:37:17.680766Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T20:37:17.681489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.684307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.689942Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T20:37:17.698352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T20:37:17.703058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.705594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T20:37:17.714936Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T20:37:17.721241Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T20:37:17.908622Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-24T20:37:17.909337Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-24T20:37:17.910172Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7519617737228603406:2486];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037930;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037926;receive=72075186224037931; 2025-06-24T20:37:17.910694Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; >> TFstClassSrcIdPQTest::TestTableCreated [GOOD] >> TFstClassSrcIdPQTest::NoMapping >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-43 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-44 >> DataShardVolatile::UpsertBrokenLockArbiter+UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter-UseSink >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::GetPartitionDescribe [GOOD] Test command err: 2025-06-24T20:37:02.202226Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617670238220725:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:02.239482Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:37:02.778714Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:37:02.785295Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617670159420115:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:02.785911Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:37:02.785940Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047f6/r3tmp/tmpuAgiaz/pdisk_1.dat 2025-06-24T20:37:03.187451Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:37:03.299374Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:03.307690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:03.307830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:03.308054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:03.308103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:03.308428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:03.333299Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:37:03.333548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:03.336143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:03.347422Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 3536, node 1 2025-06-24T20:37:03.665266Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0047f6/r3tmp/yandexLTaP2H.tmp 2025-06-24T20:37:03.665308Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0047f6/r3tmp/yandexLTaP2H.tmp 2025-06-24T20:37:03.665500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0047f6/r3tmp/yandexLTaP2H.tmp 2025-06-24T20:37:03.665662Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:37:03.831399Z INFO: TTestServer started on Port 61289 GrpcPort 3536 TClient is connected to server localhost:61289 PQClient connected to localhost:3536 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:04.645545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:37:04.980168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T20:37:07.195405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617670238220725:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:07.195521Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:07.356139Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519617670159420115:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:07.356268Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:08.470756Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617695929223996:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:08.470831Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:08.471163Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617695929224007:2275], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:08.495219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:08.529001Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519617695929224010:2276], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T20:37:08.615976Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519617695929224039:2134] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:37:09.002435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:09.007220Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519617695929224046:2280], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:37:09.009505Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=OTEwY2EyYS1jYzQ5NTU1Mi05ZjkzNjFmMC1jZTMwOTQ3NQ==, ActorId: [2:7519617695929223994:2271], ActorState: ExecuteState, TraceId: 01jyhth9qk9g4j286xbgesdq03, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:37:09.007988Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617696008025613:2312], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:37:09.009925Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OWFlY2IzNDYtMWViMDQzMDktYjlmZWVhM2MtMjMzNGI0ODc=, ActorId: [1:7519617696008025588:2305], ActorState: ExecuteState, TraceId: 01jyhth9tz14ax2nd0m31zqfq4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:37:09.012132Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:37:09.013313Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access p ... : 6, State: StateInit] bootstrapping 6 [2:7519617734583930986:2445] 2025-06-24T20:37:17.057826Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037896] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:17.057861Z node 2 :PERSQUEUE INFO: pq_impl.cpp:787: [PQ: 72075186224037896] has a tx writes info 2025-06-24T20:37:17.058867Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037896, Partition: 5, State: StateInit] bootstrapping 5 [2:7519617734583930995:2448] 2025-06-24T20:37:17.067175Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037896, Partition: 10, State: StateInit] bootstrapping 10 [2:7519617734583930996:2449] 2025-06-24T20:37:17.067647Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037898] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:17.067672Z node 2 :PERSQUEUE INFO: pq_impl.cpp:787: [PQ: 72075186224037898] has a tx writes info 2025-06-24T20:37:17.068137Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037898, Partition: 14, State: StateInit] bootstrapping 14 [2:7519617734583931030:2451] 2025-06-24T20:37:17.069437Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037898, Partition: 11, State: StateInit] bootstrapping 11 [2:7519617734583931037:2453] 2025-06-24T20:37:17.073036Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:37:17.073076Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037893, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 1 generation 2 [2:7519617734583930985:2444] 2025-06-24T20:37:17.074495Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:6:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:37:17.074527Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037893, Partition: 6, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 6 generation 2 [2:7519617734583930986:2445] 2025-06-24T20:37:17.074722Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:5:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:37:17.074744Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037896, Partition: 5, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 5 generation 2 [2:7519617734583930995:2448] 2025-06-24T20:37:17.074767Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:10:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:37:17.074792Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037896, Partition: 10, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 10 generation 2 [2:7519617734583930996:2449] 2025-06-24T20:37:17.075087Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:14:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:37:17.075112Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037898, Partition: 14, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 14 generation 2 [2:7519617734583931030:2451] 2025-06-24T20:37:17.076531Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037896, NodeId 2, Generation 2 2025-06-24T20:37:17.076562Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037893, NodeId 2, Generation 2 2025-06-24T20:37:17.076864Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:11:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:37:17.076926Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037898, Partition: 11, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 11 generation 2 [2:7519617734583931037:2453] 2025-06-24T20:37:17.078813Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037898, NodeId 2, Generation 2 2025-06-24T20:37:17.113017Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519617730367765847:2512]: Request location 2025-06-24T20:37:17.113552Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 2, Generation 2 2025-06-24T20:37:17.113610Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519617730367765847:2512]: Got location 2025-06-24T20:37:17.114405Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617730367765866:2516] disconnected; active server actors: 1 2025-06-24T20:37:17.114451Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617730367765866:2516] disconnected no session Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribePartitionResult] { partition { partition_id: 1 active: true partition_location { node_id: 2 generation: 2 } } } } } 2025-06-24T20:37:17.118868Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:166: new Describe partition request 2025-06-24T20:37:17.118964Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/PQ//rt3.dc1--topic-x" partition_id: 3 include_stats: true include_location: true 2025-06-24T20:37:17.119008Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[1:7519617734662733435:2534]: Bootstrap 2025-06-24T20:37:17.119586Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519617734662733435:2534]: Request location 2025-06-24T20:37:17.119823Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617734662733438:2536] connected; active server actors: 1 2025-06-24T20:37:17.119872Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 2 2025-06-24T20:37:17.120019Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519617734662733435:2534]: Got location 2025-06-24T20:37:17.120102Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617734662733438:2536] disconnected; active server actors: 1 2025-06-24T20:37:17.120115Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519617734662733438:2536] disconnected no session Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribePartitionResult] { partition { partition_id: 3 active: true partition_stats { partition_offsets { } last_write_time { seconds: 1750797436 nanos: 932000000 } max_write_time_lag { } bytes_written { } partition_node_id: 1 } partition_location { node_id: 1 generation: 2 } } } } } 2025-06-24T20:37:17.122872Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:166: new Describe partition request 2025-06-24T20:37:17.122967Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/PQ//bad-topic" include_stats: true include_location: true 2025-06-24T20:37:17.123013Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[1:7519617734662733442:2538]: Bootstrap Got response: operation { ready: true status: SCHEME_ERROR issues { message: "path \'Root/PQ/bad-topic\' does not exist or you do not have access rights" issue_code: 500018 severity: 1 } } 2025-06-24T20:37:17.741534Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720682, task: 1, CA Id [1:7519617734662733467:2541]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 0 2025-06-24T20:37:17.776752Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720682, task: 1, CA Id [1:7519617734662733467:2541]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-24T20:37:17.835945Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720682, task: 1, CA Id [1:7519617734662733467:2541]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-24T20:37:17.919251Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720682, task: 1, CA Id [1:7519617734662733467:2541]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-24T20:37:17.999474Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720682, task: 1, CA Id [1:7519617734662733467:2541]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-24T20:37:18.158278Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720682, task: 1, CA Id [1:7519617734662733467:2541]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-24T20:37:18.190889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:37:18.190926Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:18.373247Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720682, task: 1, CA Id [1:7519617734662733467:2541]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-24T20:37:18.496747Z node 1 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720683. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-24T20:37:18.496886Z node 1 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [1:7519617738957700824:2542] TxId: 281474976720683. Ctx: { TraceId: 01jyhthjyh0rrw03hqs7e66g4z, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmQwNmU0NTQtYmU4NTFjNzAtZjFjNzk4NGItMzZmY2MxYjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-24T20:37:18.497116Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MmQwNmU0NTQtYmU4NTFjNzAtZjFjNzk4NGItMzZmY2MxYjM=, ActorId: [1:7519617734662733490:2542], ActorState: ExecuteState, TraceId: 01jyhthjyh0rrw03hqs7e66g4z, Create QueryResponse for error on request, msg: 2025-06-24T20:37:18.498879Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyhthkavfd30jzpavjrre9zm" } } YdbStatus: UNAVAILABLE ConsumedRu: 258 } 2025-06-24T20:37:18.739567Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720682, task: 1, CA Id [1:7519617734662733467:2541]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-24T20:37:19.363844Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720682, task: 1, CA Id [1:7519617734662733467:2541]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 >> TxUsage::WriteToTopic_Demo_43_Table [GOOD] |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> EraseRowsTests::ConditionalEraseRowsShouldNotErase >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-false >> TxUsage::WriteToTopic_Demo_7_Query [GOOD] >> LocalPartition::WithoutPartitionUnknownEndpoint [GOOD] >> LocalPartition::WithoutPartitionPartitionRelocation |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TxUsage::WriteToTopic_Demo_43_Query >> TopicService::ThereAreGapsInTheOffsetRanges [GOOD] >> TSchemeShardTTLTests::ShouldCheckQuotas >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-true >> TxUsage::WriteToTopic_Demo_8_Table >> TPersQueueTest::WriteAfterAlter [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Compressed >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-5 |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-26 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-27 >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-false >> TPersQueueTest::TopicServiceSimpleHappyWrites [GOOD] >> TPersQueueTest::WhenDisableNodeAndCreateTopic_ThenAllPartitionsAreOnOtherNode |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-26 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-27 >> TSchemeShardTTLTests::ShouldCheckQuotas [GOOD] >> TopicService::OnePartitionAndNoGapsInTheOffsets >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-true >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-44 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-45 |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> THiveTest::TestHiveBalancerOneTabletHighUsage [GOOD] >> THiveTest::TestHiveBalancerWithSpareNodes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ShouldCheckQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:37:25.202656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:37:25.202761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:37:25.202814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:37:25.202864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:37:25.202925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:37:25.202961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:37:25.203035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:37:25.203141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:37:25.204095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:37:25.204534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:37:25.294238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:37:25.294314Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:25.312731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:37:25.313240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:37:25.313449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:37:25.322793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:37:25.323014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:37:25.323814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:25.324224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:37:25.328194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:37:25.328398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:37:25.329655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:37:25.329726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:37:25.330007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:37:25.330081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:37:25.330202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:37:25.330312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:37:25.340090Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:37:25.505467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:37:25.505773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:25.505999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:37:25.506058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:37:25.506384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:37:25.506463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:25.511366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:25.511620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:37:25.511849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:25.511905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:37:25.511968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:37:25.512005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:37:25.514495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:25.514583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:37:25.514626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:37:25.516901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:25.516963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:25.517026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:37:25.517089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:37:25.521121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:37:25.523586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:37:25.523832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:37:25.524933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:25.525103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:37:25.525171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:37:25.525536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:37:25.525602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:37:25.525789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:37:25.525893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:37:25.529369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:37:25.529425Z node 1 :FLAT_TX_SCHEMESHARD ... 46678944 2025-06-24T20:37:26.185303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:37:26.185490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:37:26.185548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:37:26.185603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-24T20:37:26.185655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T20:37:26.186865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:37:26.186968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T20:37:26.187007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T20:37:26.187057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-24T20:37:26.187100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T20:37:26.187217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-24T20:37:26.201364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1584 } } CommitVersion { Step: 200 TxId: 103 } 2025-06-24T20:37:26.201469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409549, partId: 0 2025-06-24T20:37:26.201740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1584 } } CommitVersion { Step: 200 TxId: 103 } 2025-06-24T20:37:26.201902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1584 } } CommitVersion { Step: 200 TxId: 103 } 2025-06-24T20:37:26.203341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 549 RawX2: 4294969789 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-24T20:37:26.203430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409549, partId: 0 2025-06-24T20:37:26.203598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Source { RawX1: 549 RawX2: 4294969789 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-24T20:37:26.203661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T20:37:26.203753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 549 RawX2: 4294969789 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-24T20:37:26.203825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 103:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:26.203869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.203910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 103:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-24T20:37:26.203976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:0 129 -> 240 2025-06-24T20:37:26.209676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:37:26.209847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T20:37:26.210161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.211372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.214408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.214478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T20:37:26.214617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:37:26.214669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:37:26.214735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T20:37:26.214792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:37:26.214843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-24T20:37:26.214925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:410:2376] message: TxId: 103 2025-06-24T20:37:26.214989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T20:37:26.215038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T20:37:26.215112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T20:37:26.217186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T20:37:26.220395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T20:37:26.220485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:577:2513] TestWaitNotification: OK eventTxId 103 W0000 00:00:1750797446.221178 223853 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TTableDescription: 9:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 104 2025-06-24T20:37:26.224928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/SubDomain" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table4" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 SysSettings { RunInterval: 1799999999 } Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 104 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:37:26.225420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/SubDomain/Table4, opId: 104:0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.225581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/SubDomain/Table4, opId: 104:0, schema: Name: "Table4" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 SysSettings { RunInterval: 1799999999 } Tiers { ApplyAfterSeconds: 3600 Delete { } } } }, at schemeshard: 72057594046678944 2025-06-24T20:37:26.226033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 104:1, propose status:StatusSchemeError, reason: TTL run interval cannot be less than limit: 1800, at schemeshard: 72057594046678944 2025-06-24T20:37:26.229657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 104, response: Status: StatusSchemeError Reason: "TTL run interval cannot be less than limit: 1800" TxId: 104 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:37:26.229972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 104, database: /MyRoot/SubDomain, subject: , status: StatusSchemeError, reason: TTL run interval cannot be less than limit: 1800, operation: CREATE TABLE, path: /MyRoot/SubDomain/Table4 TestModificationResult got TxId: 104, wait until txId: 104 >> TxUsage::WriteToTopic_Demo_36_Table [GOOD] >> TPersQueueTest::CheckACLForGrpcWrite [GOOD] >> TPersQueueTest::CheckACLForGrpcRead >> DataShardVolatile::DistributedWriteLostPlanThenSplit [GOOD] >> DataShardVolatile::DistributedOutOfOrderFollowerConsistency |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false >> TxUsage::WriteToTopic_Demo_36_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:37:25.818124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:37:25.818282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:37:25.818330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:37:25.818391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:37:25.818467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:37:25.818503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:37:25.818572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:37:25.818660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:37:25.819567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:37:25.819965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:37:25.940901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:37:25.940967Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:25.980276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:37:25.980835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:37:25.981045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:37:25.992097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:37:25.992350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:37:25.993174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:25.993565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:37:25.997631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:37:25.997851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:37:25.999231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:37:25.999314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:37:25.999559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:37:25.999637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:37:25.999757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:37:26.000153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.010342Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:37:26.207009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:37:26.207319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.207552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:37:26.207609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:37:26.207960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:37:26.208120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:26.214001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:26.214271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:37:26.214509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.214590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:37:26.214660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:37:26.214699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:37:26.220485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.220592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:37:26.220642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:37:26.223221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.223288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:26.223381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:37:26.223440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:37:26.227661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:37:26.230096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:37:26.230301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:37:26.231490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:26.231671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:37:26.231743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:37:26.232102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:37:26.232184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:37:26.232376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:37:26.232465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:37:26.235093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:37:26.235173Z node 1 :FLAT_TX_SCHEMESHARD ... d: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-06-24T20:37:27.359290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:27.359432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:37:27.359514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-24T20:37:27.359584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710760:0 128 -> 240 2025-06-24T20:37:27.368371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-24T20:37:27.368492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-06-24T20:37:27.368604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-24T20:37:27.368639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T20:37:27.368680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-24T20:37:27.368723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T20:37:27.368758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-24T20:37:27.368844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:127:2151] message: TxId: 281474976710760 2025-06-24T20:37:27.368895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T20:37:27.368946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-24T20:37:27.368981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710760:0 2025-06-24T20:37:27.369072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-24T20:37:27.377793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-24T20:37:27.377927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710760 2025-06-24T20:37:27.378021Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-24T20:37:27.378148Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:388:2358], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-24T20:37:27.380834Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-24T20:37:27.380950Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:388:2358], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T20:37:27.381076Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-24T20:37:27.387875Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-24T20:37:27.388006Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:388:2358], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T20:37:27.388072Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-24T20:37:27.388264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T20:37:27.388331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:475:2434] TestWaitNotification: OK eventTxId 102 2025-06-24T20:37:27.388999Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:37:27.389314Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 352us result status StatusSuccess 2025-06-24T20:37:27.389869Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByValue" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table [GOOD] >> TPersQueueTest::DirectReadBadCases [GOOD] >> TPersQueueTest::DirectReadWrongGeneration >> SystemView::ShowCreateTablePartitionPolicyIndexTable [FAIL] >> SystemView::StoragePoolsFields >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false [GOOD] >> DataShardWrite::UpsertImmediate >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query >> DataShardWrite::UpsertImmediateManyColumns ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:37:23.981728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:37:23.981859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:37:23.981915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:37:23.981960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:37:23.982009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:37:23.982053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:37:23.982139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:37:23.982239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:37:23.983073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:37:23.983486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:37:24.083755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:37:24.083830Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:24.114740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:37:24.115249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:37:24.115455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:37:24.125030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:37:24.125254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:37:24.125971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:24.126329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:37:24.129837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:37:24.130050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:37:24.131252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:37:24.131314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:37:24.131523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:37:24.131591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:37:24.131642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:37:24.131740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:37:24.145972Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:37:24.283424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:37:24.283626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:24.283807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:37:24.283843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:37:24.293686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:37:24.293839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:24.300310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:24.300569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:37:24.300816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:24.300885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:37:24.300928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:37:24.300976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:37:24.303297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:24.303353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:37:24.303389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:37:24.307055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:24.307130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:37:24.307232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:37:24.307283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:37:24.311034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:37:24.319119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:37:24.319384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:37:24.320433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:37:24.320589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:37:24.320652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:37:24.320960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:37:24.321018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:37:24.329670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:37:24.329835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:37:24.332767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:37:24.332849Z node 1 :FLAT_TX_SCHEMESHARD ... 06-24T20:37:29.117711Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:37:29.117770Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T20:37:29.117961Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T20:37:29.118141Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:37:29.118202Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-24T20:37:29.118271Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 105, path id: 2 2025-06-24T20:37:29.119259Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:37:29.119414Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:37:29.119477Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 105 2025-06-24T20:37:29.119522Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-24T20:37:29.119578Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T20:37:29.120218Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:37:29.120335Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:37:29.120386Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-24T20:37:29.120416Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-24T20:37:29.120483Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T20:37:29.120597Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-24T20:37:29.124921Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:37:29.125261Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestModificationResults wait txId: 106 2025-06-24T20:37:29.128572Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "user1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:37:29.132144Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-24T20:37:29.132318Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T20:37:29.132376Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T20:37:29.132427Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T20:37:29.132466Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T20:37:29.132541Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:37:29.132608Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-24T20:37:29.132650Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T20:37:29.132687Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 106:0 2025-06-24T20:37:29.132733Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-24T20:37:29.132773Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 10 2025-06-24T20:37:29.140279Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:37:29.140435Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE USER, path: /MyRoot 2025-06-24T20:37:29.140677Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:37:29.140753Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:37:29.141025Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:37:29.141089Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-24T20:37:29.141731Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:37:29.141868Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T20:37:29.141934Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-24T20:37:29.141989Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 10 2025-06-24T20:37:29.142046Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:37:29.142196Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-24T20:37:29.150158Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-06-24T20:37:29.150815Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1/DirSub1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:37:29.151075Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1/DirSub1" took 283us result status StatusSuccess 2025-06-24T20:37:29.151519Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1/DirSub1" PathDescription { Self { Name: "DirSub1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 103 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "user2" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:37:29.152173Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T20:37:29.152287Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Cannot find user: user1", at schemeshard: 72057594046678944 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-27 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-28 >> TxUsage::WriteToTopic_Demo_22_RestartNo_Query [GOOD] >> DataShardWrite::IncrementImmediate >> TPersQueueTest::PreferredCluster_DisabledRemoteClusterAndWriteSessionsWithDifferentPreferredClusterAndLaterRemoteClusterEnabled_SessionWithMismatchedClusterDiesAfterPreferredClusterEnabledAndOtherSessionsAlive [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndCloseClientSessionWithEnabledRemotePreferredClusterDelaySec_SessionDiesOnlyAfterDelay >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-6 >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-45 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-46 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-27 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-28 >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit >> DataShardWrite::WriteImmediateBadRequest >> TxUsage::WriteToTopic_Demo_12_Query [GOOD] >> TPersQueueTest::SchemeOperationFirstClassCitizen [GOOD] >> TPersQueueTest::SchemeOperationsCheckPropValues >> TxUsage::WriteToTopic_Demo_13_Table >> DataShardVolatile::UpsertBrokenLockArbiter-UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart+UseSink >> EraseRowsTests::ConditionalEraseRowsShouldNotErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |89.8%| [LD] {RESULT} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut >> DataShardWrite::UpsertImmediate [GOOD] >> DataShardWrite::ReplaceImmediate >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite [GOOD] >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |89.8%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut >> DataShardWrite::IncrementImmediate [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite-Volatile >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-7 >> DataShardWrite::UpsertImmediateManyColumns [GOOD] >> DataShardWrite::UpsertPrepared+Volatile >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-28 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-29 >> TxUsage::WriteToTopic_Demo_27_Table [GOOD] >> TxUsage::WriteToTopic_Demo_27_Query |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |89.8%| [LD] {RESULT} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut >> DataShardWrite::WriteImmediateBadRequest [GOOD] >> DataShardWrite::WriteImmediateSeveralOperations >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-46 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-47 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-28 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-29 >> TFstClassSrcIdPQTest::NoMapping [GOOD] >> TFstClassSrcIdPQTest::ProperPartitionSelected >> SystemView::StoragePoolsFields [GOOD] >> TPersQueueTest::SetupWriteSession [GOOD] >> TPersQueueTest::StoreNoMoreThanXSourceIDs >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query [GOOD] >> KqpEffects::InsertAbort_Params_Success >> KqpInplaceUpdate::SingleRowArithm+UseSink >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK_AutoCommit >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table >> DataShardWrite::ReplaceImmediate [GOOD] >> DataShardWrite::ReplaceImmediate_DefaultValue >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table [GOOD] >> DataShardWrite::UpsertPrepared+Volatile [GOOD] >> DataShardWrite::UpsertPrepared-Volatile >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite [GOOD] >> DataShardWrite::DeleteImmediate >> TxUsage::WriteToTopic_Demo_36_Query [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query >> DataShardVolatile::DistributedOutOfOrderFollowerConsistency [GOOD] >> DataShardVolatile::DistributedWriteRSNotAckedBeforeCommit >> TopicService::OnePartitionAndNoGapsInTheOffsets [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-8 >> TxUsage::WriteToTopic_Demo_8_Table [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-29 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-30 >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite-Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite-Volatile >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] >> DataShardWrite::WriteImmediateSeveralOperations [GOOD] >> DataShardWrite::UpsertPreparedManyTables+Volatile >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-47 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-48 >> TxUsage::WriteToTopic_Demo_8_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] Test command err: 2025-06-24T20:37:28.462845Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:37:28.474796Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:28.477705Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003781/r3tmp/tmpOm1d0a/pdisk_1.dat 2025-06-24T20:37:29.402585Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:37:29.427384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:29.683732Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:29.703848Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797444151202 != 1750797444151206 2025-06-24T20:37:29.769096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:29.769239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:29.789483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:30.007889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:30.205305Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:37:30.205619Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:37:30.334119Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:37:30.334299Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:37:30.377329Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:37:30.377448Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:37:30.377526Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:37:30.405107Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:37:30.405352Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:37:30.405452Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:37:30.418516Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:37:30.475578Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:37:30.484180Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:37:30.484397Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:37:30.484451Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:37:30.484509Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:37:30.484558Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:37:30.495851Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:37:30.496011Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:37:30.496090Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:37:30.496156Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:37:30.496202Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:37:30.496247Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:37:30.496375Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:37:30.511391Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:37:30.515509Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:37:30.515703Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:37:30.548333Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:37:30.559891Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:37:30.560022Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T20:37:30.741961Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T20:37:30.759099Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T20:37:30.759232Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:37:30.760404Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:37:30.760461Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:37:30.760524Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T20:37:30.760804Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T20:37:30.760952Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:37:30.761410Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:37:30.761476Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T20:37:30.785208Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T20:37:30.803594Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:37:30.811502Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T20:37:30.811580Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:37:30.812832Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T20:37:30.812923Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:37:30.813808Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:37:30.813854Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:37:30.813929Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T20:37:30.824455Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:37:30.824630Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T20:37:30.824800Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:37:30.829906Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:37:30.838420Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T20:37:30.838517Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T20:37:30.854817Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T20:37:31.072766Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:31.072880Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... main_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:37:40.547022Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037891 2025-06-24T20:37:40.547107Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2025-06-24T20:37:40.594599Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037894 actor [2:1210:2989] 2025-06-24T20:37:40.594884Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:37:40.610733Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:37:40.610897Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:37:40.612631Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037894 2025-06-24T20:37:40.612730Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037894 2025-06-24T20:37:40.612786Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037894 2025-06-24T20:37:40.613164Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:37:40.613312Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:37:40.613392Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037894 persisting started state actor id [2:1226:2989] in generation 1 2025-06-24T20:37:40.639886Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:37:40.639983Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037894 2025-06-24T20:37:40.640093Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037894 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:37:40.640175Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037894, actorId: [2:1228:2999] 2025-06-24T20:37:40.640208Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037894 2025-06-24T20:37:40.640276Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037894, state: WaitScheme 2025-06-24T20:37:40.640313Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T20:37:40.640757Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037894 2025-06-24T20:37:40.640859Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037894 2025-06-24T20:37:40.640943Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-24T20:37:40.640983Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:37:40.641022Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037894 TxInFly 0 2025-06-24T20:37:40.641059Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-24T20:37:40.641439Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1207:2987], serverId# [2:1217:2993], sessionId# [0:0:0] 2025-06-24T20:37:40.641549Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-24T20:37:40.641757Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037894 txId 281474976715663 ssId 72057594046644480 seqNo 2:7 2025-06-24T20:37:40.641843Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715663 at tablet 72075186224037894 2025-06-24T20:37:40.642400Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2025-06-24T20:37:40.654692Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-24T20:37:40.654832Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037894 not sending time cast registration request in state WaitScheme 2025-06-24T20:37:40.804685Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1234:3005], serverId# [2:1236:3007], sessionId# [0:0:0] 2025-06-24T20:37:40.805319Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715663 at step 4000 at tablet 72075186224037894 { Transactions { TxId: 281474976715663 AckTo { RawX1: 0 RawX2: 0 } } Step: 4000 MediatorID: 72057594046382081 TabletID: 72075186224037894 } 2025-06-24T20:37:40.805372Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T20:37:40.805895Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-24T20:37:40.805940Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:37:40.805986Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [4000:281474976715663] in PlanQueue unit at 72075186224037894 2025-06-24T20:37:40.806236Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037894 loaded tx from db 4000:281474976715663 keys extracted: 0 2025-06-24T20:37:40.806362Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:37:40.806866Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-24T20:37:40.806934Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037894 tableId# [OwnerId: 72057594046644480, LocalPathId: 8] schema version# 1 2025-06-24T20:37:40.807413Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037894 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T20:37:40.807801Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:37:40.809644Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037894 time 3500 2025-06-24T20:37:40.809692Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T20:37:40.810861Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037894 step# 4000} 2025-06-24T20:37:40.810927Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-24T20:37:40.813354Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2025-06-24T20:37:40.813449Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T20:37:40.813580Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-24T20:37:40.813900Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037891 2025-06-24T20:37:40.813966Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2025-06-24T20:37:40.814045Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037893 2025-06-24T20:37:40.814101Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:37:40.814229Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-24T20:37:40.814268Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037894 2025-06-24T20:37:40.814321Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037894 2025-06-24T20:37:40.814401Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [4000 : 281474976715663] from 72075186224037894 at tablet 72075186224037894 send result to client [2:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:37:40.814453Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037894 Sending notify to schemeshard 72057594046644480 txId 281474976715663 state Ready TxInFly 0 2025-06-24T20:37:40.814525Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T20:37:40.816754Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037894 coordinator 72057594046316545 last step 0 next step 4000 2025-06-24T20:37:40.818239Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715663 datashard 72075186224037894 state Ready 2025-06-24T20:37:40.818305Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037894 Got TEvSchemaChangedResult from SS at 72075186224037894 2025-06-24T20:37:40.863081Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1263:3028], serverId# [2:1264:3029], sessionId# [0:0:0] 2025-06-24T20:37:40.863439Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1263:3028], serverId# [2:1264:3029], sessionId# [0:0:0] 2025-06-24T20:37:40.883792Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1268:3033], serverId# [2:1269:3034], sessionId# [0:0:0] 2025-06-24T20:37:40.884077Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1268:3033], serverId# [2:1269:3034], sessionId# [0:0:0] 2025-06-24T20:37:40.910963Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1273:3038], serverId# [2:1274:3039], sessionId# [0:0:0] 2025-06-24T20:37:40.915490Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1273:3038], serverId# [2:1274:3039], sessionId# [0:0:0] >> TopicService::MultiplePartitionsAndNoGapsInTheOffsets >> TPersQueueTest::DirectReadWrongGeneration [GOOD] >> TPersQueueTest::DirectReadStop >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-29 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-30 >> LocalPartition::WithoutPartitionPartitionRelocation [GOOD] >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query [GOOD] |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |89.8%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace >> TPersQueueTest::WhenDisableNodeAndCreateTopic_ThenAllPartitionsAreOnOtherNode [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Compressed |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |89.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table >> DataShardVolatile::UpsertNoLocksArbiterRestart+UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart-UseSink |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> DataShardWrite::DeleteImmediate [GOOD] >> DataShardWrite::CancelImmediate >> DataShardWrite::ReplaceImmediate_DefaultValue [GOOD] >> DataShardWrite::UpdateImmediate >> TPersQueueTest::CheckACLForGrpcRead [GOOD] >> TPersQueueTest::CheckKillBalancer >> TxUsage::WriteToTopic_Demo_13_Table [GOOD] >> TxUsage::WriteToTopic_Demo_43_Query [GOOD] >> DataShardWrite::UpsertPrepared-Volatile [GOOD] >> DataShardWrite::UpsertNoLocksArbiter >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Compressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Compressed >> TxUsage::WriteToTopic_Demo_13_Query |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TxUsage::WriteToTopic_Demo_44_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-30 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-49 >> DataShardWrite::UpsertPreparedManyTables+Volatile [GOOD] >> DataShardWrite::UpsertPreparedManyTables-Volatile |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |89.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-48 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-1 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-9 >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Table [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite-Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite+Volatile >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Query |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-30 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-31 >> TxUsage::WriteToTopic_Demo_37_Table >> DataShardWrite::UpdateImmediate [GOOD] >> DataShardWrite::RejectOnChangeQueueOverflow >> DataShardWrite::CancelImmediate [GOOD] >> DataShardWrite::DeletePrepared+Volatile |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndCloseClientSessionWithEnabledRemotePreferredClusterDelaySec_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_NonExistentPreferredCluster_SessionDiesOnlyAfterDelay >> ExternalIndex::Simple [GOOD] >> TPersQueueTest::SchemeOperationsCheckPropValues [GOOD] >> TPersQueueTest::ReadRuleServiceType >> DataShardWrite::UpsertNoLocksArbiter [GOOD] >> DataShardWrite::UpsertLostPrepareArbiter >> DataShardWrite::UpsertPreparedManyTables-Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache+Volatile |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |89.9%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-49 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-50 >> KqpEffects::InsertAbort_Params_Success [GOOD] >> KqpEffects::InsertAbort_Params_Duplicates+UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-2 >> KqpInplaceUpdate::SingleRowArithm+UseSink [GOOD] >> KqpInplaceUpdate::Negative_SingleRowWithValueCast-UseSink |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |89.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan >> DataShardVolatile::DistributedWriteRSNotAckedBeforeCommit [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare+UseSink |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::StoragePoolsFields [GOOD] Test command err: 2025-06-24T20:34:14.831828Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519616951821009762:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:14.832097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b08/r3tmp/tmpnvXLKi/pdisk_1.dat 2025-06-24T20:34:15.351211Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:15.365053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:15.365187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:15.384951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10513, node 1 2025-06-24T20:34:15.967251Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:16.007344Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:16.007368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:16.007377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:16.007538Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24494 TClient is connected to server localhost:24494 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:17.162792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:19.493647Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:276: Subscribed for config changes 2025-06-24T20:34:19.493684Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:331: Updated config 2025-06-24T20:34:19.585417Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616973295847278:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:19.585527Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:19.589565Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519616973295847290:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:19.595913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:19.631224Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519616973295847292:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:34:19.718354Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519616973295847365:2752] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:19.719700Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-24T20:34:19.719879Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:413: Perform request, TraceId.SpanIdPtr: 0x000050F000254A38 2025-06-24T20:34:19.719935Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:423: Received compile request, sender: [1:7519616973295847249:2298], queryUid: , queryText: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n ", keepInCache: 1, split: 0{ TraceId: 01jyhtc4sy9tpwm2qbecrxbmyf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjE0YjlhNmYtN2JlYzMzNGUtYTcyZjM2NDItMTlmZGI3NjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default} 2025-06-24T20:34:19.720074Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-24T20:34:19.720147Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:519: Added request to queue, sender: [1:7519616973295847249:2298], queueSize: 1 2025-06-24T20:34:19.720898Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:880: Created compile actor, sender: [1:7519616973295847249:2298], compileActor: [1:7519616973295847379:2311] 2025-06-24T20:34:19.825610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519616951821009762:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:19.825695Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:20.065602Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtc4sy9tpwm2qbecrxbmyf, SessionId: CompileActor 2025-06-24 20:34:20.065 INFO ydb-core-sys_view-ut(pid=175978, tid=0x00007FB91A49D640) [core dq] kqp_host.cpp:1375: Good place to weld in 2025-06-24T20:34:20.067502Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtc4sy9tpwm2qbecrxbmyf, SessionId: CompileActor 2025-06-24 20:34:20.066 INFO ydb-core-sys_view-ut(pid=175978, tid=0x00007FB91A49D640) [core dq] kqp_host.cpp:1380: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1")) '('typeId (String '"EXTERNAL_DATA_SOURCE"))) (Void) '('('mode 'createObject) '('features $3)))) ) 2025-06-24T20:34:20.068372Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtc4sy9tpwm2qbecrxbmyf, SessionId: CompileActor 2025-06-24 20:34:20.067 INFO ydb-core-sys_view-ut(pid=175978, tid=0x00007FB91A49D640) [KQP] kqp_host.cpp:1386: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink ' ... igrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b08/r3tmp/tmpMNtEBM/pdisk_1.dat 2025-06-24T20:37:19.743063Z node 36 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:19.787762Z node 36 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:19.787924Z node 36 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:19.803396Z node 36 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:19.840959Z node 36 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 36 Type# 268639257 TServer::EnableGrpc on GrpcPort 14211, node 36 2025-06-24T20:37:19.926125Z node 36 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:19.926163Z node 36 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:19.926176Z node 36 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:19.926414Z node 36 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:37:20.179306Z node 36 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26203 TClient is connected to server localhost:26203 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:21.196909Z node 36 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:37:23.951321Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[36:7519617738991885292:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:23.951472Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:26.896290Z node 36 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:276: Subscribed for config changes 2025-06-24T20:37:26.896345Z node 36 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:331: Updated config 2025-06-24T20:37:26.933414Z node 36 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyhthp8v9m09zpbnz16jccsx", Request deadline has expired for 0.627220s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession 2025-06-24T20:37:29.754949Z node 41 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[41:7519617785486422926:2151];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b08/r3tmp/tmpmGH9j7/pdisk_1.dat 2025-06-24T20:37:29.891241Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:37:29.999106Z node 41 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:30.004375Z node 41 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [41:7519617785486422788:2079] 1750797449653845 != 1750797449653848 2025-06-24T20:37:30.028049Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(41, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:30.028222Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(41, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:30.033447Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(41, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13796, node 41 2025-06-24T20:37:30.130843Z node 41 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:30.130888Z node 41 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:30.130906Z node 41 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:30.131182Z node 41 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29495 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:37:30.743769Z node 41 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:30.806116Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:37:34.726243Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[41:7519617785486422926:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:34.726364Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:36.389337Z node 41 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [41:7519617815551194504:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:36.389540Z node 41 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:36.390138Z node 41 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [41:7519617815551194516:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:36.398961Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:36.430235Z node 41 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [41:7519617815551194518:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:37:36.515992Z node 41 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [41:7519617815551194569:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:37:36.742639Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhthzka0wbyv39kyvggrdc1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=NzA3ZjE4MzQtZWM0NmQyOTgtMWYzMWY0NTQtNzIyOTBhZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:37:36.751373Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7519617815551194613:2307], owner: [41:7519617815551194609:2305], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:37:36.753849Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7519617815551194613:2307], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T20:37:36.819102Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519617815551194613:2307], row count: 1, finished: 1 2025-06-24T20:37:36.819510Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7519617815551194613:2307], owner: [41:7519617815551194609:2305], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T20:37:36.826739Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797456737, txId: 281474976710660] shutting down >> TPQTest::TestWriteOffsetWithBigMessage [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-49 >> TFstClassSrcIdPQTest::ProperPartitionSelected [GOOD] >> TPQCompatTest::DiscoverTopics >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestWriteOffsetWithBigMessage [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T20:33:48.907783Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:48.907888Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:182:2057] recipient: [1:14:2061] 2025-06-24T20:33:48.947555Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:48.978476Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T20:33:48.979573Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-24T20:33:48.982439Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:188:2199] 2025-06-24T20:33:48.984785Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:189:2200] 2025-06-24T20:33:48.986710Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:189:2200] 2025-06-24T20:33:48.994053Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|8fdac69c-8d2ef424-88e3261-6abcde40_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:48.994610Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|e8c28db2-caa29ffc-c705973-1dde1f9_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:48.999602Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|323d6f14-5d8fbddd-2b94814d-95039d78_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:50.237914Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bb57f8e1-b7a9ed61-54256a29-32c8a53c_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:33:50.238652Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|29e0d353-1caaef8e-5ba3171-73ec3ad7_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-24T20:33:50.239772Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|50188cac-73b26c95-f89e702e-23da5025_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:51.368722Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|d5ce3417-1a1eccca-c9d7d16-a8a8a007_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:111:2057] recipient: [2:104:2136] 2025-06-24T20:33:52.019974Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:52.020066Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927938 is [2:156:2175] sender: [2:157:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:180:2057] recipient: [2:14:2061] 2025-06-24T20:33:52.039417Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:52.040585Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-24T20:33:52.041264Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:186:2197] 2025-06-24T20:33:52.044173Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:186:2197] 2025-06-24T20:33:52.046166Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:187:2198] 2025-06-24T20:33:52.049082Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:187:2198] 2025-06-24T20:33:52.059219Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|9c323c05-4dd9249b-14c221d2-c1f49bf7_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:52.059790Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|64f9d4cc-c7928e95-7086e3d-19f49359_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:52.060432Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|81dd6e9a-e51bb3f8-97677faf-48a4e7ed_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:53.368076Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|85ab71f9-80e5bb95-f85ef7f6-f63ea897_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:33:53.368754Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|d3d64d4c-c8870319-780de624-84dc8680_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-24T20:33:53.369697Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|3f6a53e-261f2437-b615d410-8ff35a9c_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:54.692069Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|245223b0-82aacecd-33ce792-1e2896ec_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:111:2057] recipient: [3:104:2136] 2025-06-24T20:33:55.168405Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:55.168487Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927938 is [3:156:2175] sender: [3:157:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:182:2057] recipient: [3:14:2061] 2025-06-24T20:33:55.198620Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:33:55.199323Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 3 actor [3:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-24T20:33:55.199864Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:188:2199] 2025-06-24T20:33:55.202080Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [3:188:2199] 2025-06-24T20:33:55.203723Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [3:189:2200] 2025-06-24T20:33:55.205119Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [3:189:2200] 2025-06-24T20:33:55.214603Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|76153e30-4b5f4e48-7178eba1-723a662c_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:55.215060Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|6c705133-13428818-d6912f3a-6f1b6bdc_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:55.215614Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|b3b70181-bd875f61-eccc5c69-eab9c339_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner 2025-06-24T20:33:56.484107Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|18ac3128-298454e5-95424f2b-ee395864_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:33:56.484701Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|608138be-5c466627-e7495379-34100483_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-24T20:33:56.485371Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner|c1000f75-95099896-ccd26648-4f11506f_0 generated for partition 0 topic ... .dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:37:46.923689Z node 82 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [82:268:2261] 2025-06-24T20:37:46.924714Z node 82 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:37:46.924771Z node 82 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 3 [82:269:2262] !Reboot 72057594037927937 (actor [82:110:2140]) rebooted! !Reboot 72057594037927937 (actor [82:110:2140]) tablet resolver refreshed! new actor is[82:214:2215] Leader for TabletID 72057594037927937 is [82:214:2215] sender: [82:328:2057] recipient: [82:14:2061] 2025-06-24T20:37:48.436363Z node 82 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|45f14549-d218c0e5-62a79efb-d1d45f76_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:48.734384Z node 82 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|cc74df3b-e1e83a3-c1c4163a-9f334ae4_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:49.073190Z node 82 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T20:37:49.098085Z node 82 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6b56e5ad-6d89a57e-4cde44ad-71ae0c1c_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:49.458823Z node 82 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|19e220b7-ea5ceb9d-67e32adf-e9cb84b5_0 generated for partition 2 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:49.541282Z node 82 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2510cf6a-11b7e6bd-e15cc092-25642545_1 generated for partition 2 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:49.688021Z node 82 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T20:37:50.030703Z node 82 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [0:0:0] sender: [83:106:2057] recipient: [83:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [83:106:2057] recipient: [83:104:2136] Leader for TabletID 72057594037927937 is [83:110:2140] sender: [83:111:2057] recipient: [83:104:2136] 2025-06-24T20:37:50.815330Z node 83 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:50.815420Z node 83 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [83:152:2057] recipient: [83:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [83:152:2057] recipient: [83:150:2171] Leader for TabletID 72057594037927938 is [83:156:2175] sender: [83:157:2057] recipient: [83:150:2171] Leader for TabletID 72057594037927937 is [83:110:2140] sender: [83:182:2057] recipient: [83:14:2061] 2025-06-24T20:37:50.841776Z node 83 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:50.843684Z node 83 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 83 actor [83:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 TopicName: "rt3.dc1--asdfgs--topic" Version: 83 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } ReadRuleGenerations: 83 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 83 Important: true } 2025-06-24T20:37:50.844999Z node 83 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [83:188:2199] 2025-06-24T20:37:50.848538Z node 83 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [83:188:2199] 2025-06-24T20:37:50.850757Z node 83 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [83:189:2200] 2025-06-24T20:37:50.853344Z node 83 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [83:189:2200] 2025-06-24T20:37:50.855439Z node 83 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [83:190:2201] 2025-06-24T20:37:50.857897Z node 83 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [83:190:2201] 2025-06-24T20:37:50.889644Z node 83 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|4ddd256c-6b51260-579031a2-bc34257d_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:51.104196Z node 83 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|be00129c-355b8da2-dcecbdc1-311b2244_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:51.314933Z node 83 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T20:37:51.343428Z node 83 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|69f3f1c3-88ec424c-ecc93a67-93fe83ef_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:51.639687Z node 83 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|831eaff3-24730ec3-e1e4d596-35b05a10_0 generated for partition 2 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:51.712716Z node 83 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8cce9ff1-e8de58a7-462846d9-eef32f9a_1 generated for partition 2 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:51.775736Z node 83 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T20:37:51.995077Z node 83 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [0:0:0] sender: [84:106:2057] recipient: [84:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [84:106:2057] recipient: [84:104:2136] Leader for TabletID 72057594037927937 is [84:110:2140] sender: [84:111:2057] recipient: [84:104:2136] 2025-06-24T20:37:52.675695Z node 84 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:52.675807Z node 84 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [84:152:2057] recipient: [84:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [84:152:2057] recipient: [84:150:2171] Leader for TabletID 72057594037927938 is [84:156:2175] sender: [84:157:2057] recipient: [84:150:2171] Leader for TabletID 72057594037927937 is [84:110:2140] sender: [84:182:2057] recipient: [84:14:2061] 2025-06-24T20:37:52.707706Z node 84 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:37:52.709168Z node 84 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 84 actor [84:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 TopicName: "rt3.dc1--asdfgs--topic" Version: 84 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } ReadRuleGenerations: 84 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 84 Important: true } 2025-06-24T20:37:52.710403Z node 84 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [84:188:2199] 2025-06-24T20:37:52.713788Z node 84 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [84:188:2199] 2025-06-24T20:37:52.716219Z node 84 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [84:189:2200] 2025-06-24T20:37:52.718705Z node 84 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [84:189:2200] 2025-06-24T20:37:52.721236Z node 84 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [84:190:2201] 2025-06-24T20:37:52.723736Z node 84 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [84:190:2201] 2025-06-24T20:37:52.759833Z node 84 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|73367b3e-e1198576-c83858a0-4b594423_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:53.077110Z node 84 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d07988db-a10f7ca-32225890-24b8ee97_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:53.311929Z node 84 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T20:37:53.338081Z node 84 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|cbeace7-23ab3e08-f244e9cf-87d04af0_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:53.673028Z node 84 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|589f2c4b-21f18a0f-e8ddc84a-4472abec_0 generated for partition 2 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:53.745557Z node 84 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|eb7f6034-fb2ddb99-9052079-f45c16de_1 generated for partition 2 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T20:37:53.863185Z node 84 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T20:37:54.137841Z node 84 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite+Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite+Volatile >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-31 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-32 >> DataShardVolatile::UpsertNoLocksArbiterRestart-UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart+UseSink >> DataShardWrite::RejectOnChangeQueueOverflow [GOOD] >> DataShardWrite::UpsertBrokenLockArbiter >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission [GOOD] >> LocalPartition::WithoutPartitionWithSplit >> DataShardWrite::UpsertPreparedNoTxCache+Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache-Volatile >> TopicService::MultiplePartitionsAndNoGapsInTheOffsets [GOOD] >> TProxyActorTest::TestDisconnectWhileAttaching |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |89.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks >> DataShardWrite::DeletePrepared+Volatile [GOOD] >> DataShardWrite::DeletePrepared-Volatile >> THiveTest::TestHiveBalancerWithSpareNodes [GOOD] |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |89.9%| [LD] {RESULT} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table [GOOD] |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |89.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3 >> TxUsage::WriteToTopic_Demo_8_Query [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK_AutoCommit [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PQv1 >> TxUsage::WriteToTopic_Demo_13_Query [GOOD] >> DataShardWrite::UpsertLostPrepareArbiter [GOOD] >> DataShardWrite::UpsertNoLocksArbiterRestart >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-51 >> TTopicYqlTest::DropTopicYql >> TPersQueueTest::DirectReadStop [GOOD] >> TPersQueueTest::DirectReadCleanCache >> TProxyActorTest::TestDisconnectWhileAttaching [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query >> TxUsage::WriteToTopic_Demo_27_Query [GOOD] >> DataStreams::TestGetRecordsStreamWithSingleShard >> TxUsage::WriteToTopic_Demo_9_Table >> TTxDataShardMiniKQL::WriteKeyTooLarge ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestDisconnectWhileAttaching [GOOD] Test command err: ... waiting for blocked registrations ... blocking NKikimr::NKesus::TEvKesus::TEvRegisterProxy from KESUS_PROXY_ACTOR to KESUS_TABLET_ACTOR cookie 0 ... waiting for blocked registrations (done) 2025-06-24T20:37:59.331272Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037927937] NodeDisconnected NodeId# 2 ... unblocking NKikimr::NKesus::TEvKesus::TEvRegisterProxy from KESUS_PROXY_ACTOR to KESUS_TABLET_ACTOR >> TxUsage::WriteToTopic_Demo_14_Table >> TxUsage::WriteToTopic_Demo_28_Table >> KqpEffects::InsertAbort_Params_Duplicates+UseSink [GOOD] >> KqpEffects::InsertAbort_Params_Duplicates-UseSink >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite+Volatile [GOOD] >> DataShardWrite::InsertImmediate >> TTxDataShardMiniKQL::WriteKeyTooLarge [GOOD] >> TTxDataShardMiniKQL::WriteValueTooLarge >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-32 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-33 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-49 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-50 >> KqpInplaceUpdate::Negative_SingleRowWithValueCast-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestHiveBalancerWithSpareNodes [GOOD] Test command err: 2025-06-24T20:31:24.311010Z node 5 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:24.346446Z node 5 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } PDisks { NodeID: 4 PDiskID: 1 Path: "SectorMap:3:3200" PDiskGuid: 4 } PDisks { NodeID: 5 PDiskID: 1 Path: "SectorMap:4:3200" PDiskGuid: 5 } PDisks { NodeID: 6 PDiskID: 1 Path: "SectorMap:5:3200" PDiskGuid: 6 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:24.346722Z node 5 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 5 PDiskId# 1 Path# "SectorMap:4:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:24.351282Z node 5 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:24.351644Z node 5 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:24.352607Z node 5 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [5:159:2079] ControllerId# 72057594037932033 2025-06-24T20:31:24.352647Z node 5 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:24.352762Z node 5 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:24.352909Z node 5 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:24.364720Z node 5 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:24.364791Z node 5 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:24.366932Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:158:2078] Create Queue# [5:166:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.367093Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:158:2078] Create Queue# [5:167:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.367263Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:158:2078] Create Queue# [5:168:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.367448Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:158:2078] Create Queue# [5:169:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.367606Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:158:2078] Create Queue# [5:170:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.367762Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:158:2078] Create Queue# [5:171:2088] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.367896Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:158:2078] Create Queue# [5:172:2089] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.367924Z node 5 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:24.368016Z node 5 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [5:159:2079] 2025-06-24T20:31:24.368075Z node 5 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [5:159:2079] 2025-06-24T20:31:24.368123Z node 5 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:24.368167Z node 5 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:24.368802Z node 5 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:24.368929Z node 6 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:24.371594Z node 6 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } PDisks { NodeID: 4 PDiskID: 1 Path: "SectorMap:3:3200" PDiskGuid: 4 } PDisks { NodeID: 5 PDiskID: 1 Path: "SectorMap:4:3200" PDiskGuid: 5 } PDisks { NodeID: 6 PDiskID: 1 Path: "SectorMap:5:3200" PDiskGuid: 6 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:24.371742Z node 6 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 6 PDiskId# 1 Path# "SectorMap:5:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:24.372202Z node 6 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:24.372399Z node 6 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:24.373272Z node 6 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [6:182:2080] ControllerId# 72057594037932033 2025-06-24T20:31:24.373305Z node 6 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:24.373376Z node 6 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:24.373484Z node 6 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:24.390539Z node 6 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:24.390601Z node 6 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:24.392495Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:181:2079] Create Queue# [6:189:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.392656Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:181:2079] Create Queue# [6:190:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.392819Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:181:2079] Create Queue# [6:191:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.392964Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:181:2079] Create Queue# [6:192:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.393126Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:181:2079] Create Queue# [6:193:2088] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.393318Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:181:2079] Create Queue# [6:194:2089] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.393455Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:181:2079] Create Queue# [6:195:2090] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.393480Z node 6 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:24.393547Z node 6 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [6:182:2080] 2025-06-24T20:31:24.393576Z node 6 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [6:182:2080] 2025-06-24T20:31:24.393622Z node 6 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:24.393675Z node 6 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:24.394069Z node 6 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:24.394317Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:24.397029Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } PDisks { NodeID: 4 PDiskID: 1 Path: "SectorMap:3:3200" PDiskGuid: 4 } PDisks { NodeID: 5 PDiskID: 1 Path: "SectorMap:4:3200" PDiskGuid: 5 } PDisks { NodeID: 6 PDiskID: 1 Path: "SectorMap:5:3200" PDiskGuid: 6 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:24.450802Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:24.451372Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:24.451687Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T20:31:24.452905Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-24T20:31:24.453011Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:24.453888Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:206:2081] ControllerId# 72057594037932033 2025-06-24T20:31:24.453932Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:24.454021Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:24.454138Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:24.497764Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:24.497830Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:24.500727Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:205:2080] Create Queue# [1:214:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.500924Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:205:2080] Create Queue# [1:215:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.501085Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:205:2080] Create Queue# [1:216:2088] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.501250Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:205:2080] Create Queue# [1:217:2089] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.501396Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:205:2080] Create Queue# [1:218:2090] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:24.501562Z ... llptr Flags: 1:2:0} 2025-06-24T20:37:56.510575Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037893 followers: 0 countLeader 1 allowFollowers 0 winner: [63:1325:2101] 2025-06-24T20:37:56.510689Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037893] forward result remote node 63 [58:2121:2499] 2025-06-24T20:37:56.510818Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037893] remote node connected [58:2121:2499] 2025-06-24T20:37:56.604458Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037893]::SendEvent [58:2121:2499] 2025-06-24T20:37:56.605090Z node 63 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037893] Accept Connect Originator# [58:2121:2499] 2025-06-24T20:37:56.605530Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037893] connected with status OK role: Leader [58:2121:2499] 2025-06-24T20:37:56.605598Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037893] send queued [58:2121:2499] 2025-06-24T20:37:56.606746Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037894] ::Bootstrap [58:2124:2501] 2025-06-24T20:37:56.606792Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037894] lookup [58:2124:2501] 2025-06-24T20:37:56.606872Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037894 entry.State: StNormal ev: {EvForward TabletID: 72075186224037894 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:37:56.606925Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037894 followers: 0 countLeader 1 allowFollowers 0 winner: [62:1332:2143] 2025-06-24T20:37:56.607019Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037894] forward result remote node 62 [58:2124:2501] 2025-06-24T20:37:56.607126Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037894] remote node connected [58:2124:2501] 2025-06-24T20:37:56.607198Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037894]::SendEvent [58:2124:2501] 2025-06-24T20:37:56.607564Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:349: TClient[72075186224037894] connect request undelivered [58:2124:2501] 2025-06-24T20:37:56.607625Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:559: TClient[72075186224037894] immediate retry [58:2124:2501] 2025-06-24T20:37:56.607659Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037894] lookup [58:2124:2501] 2025-06-24T20:37:56.607713Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:536: Handle TEvTabletProblem tabletId: 72075186224037894 entry.State: StNormal 2025-06-24T20:37:56.607890Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037894 entry.State: StProblemResolve ev: {EvForward TabletID: 72075186224037894 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:37:56.607972Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037894 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:37:56.608100Z node 58 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 0} 2025-06-24T20:37:56.608164Z node 58 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 1} 2025-06-24T20:37:56.608206Z node 58 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 2} 2025-06-24T20:37:56.608269Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037894 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [63:1990:2269] CurrentLeaderTablet: [63:1995:2272] CurrentGeneration: 3 CurrentStep: 0} 2025-06-24T20:37:56.608404Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037894 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [63:1990:2269] CurrentLeaderTablet: [63:1995:2272] CurrentGeneration: 3 CurrentStep: 0} 2025-06-24T20:37:56.608527Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037894 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037894 Cookie: 0 CurrentLeader: [63:1990:2269] CurrentLeaderTablet: [63:1995:2272] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[58:24343667:0] : 7}, {[58:1099535971443:0] : 10}}}} 2025-06-24T20:37:56.608567Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037894 followers: 0 2025-06-24T20:37:56.608617Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037894 followers: 0 countLeader 1 allowFollowers 0 winner: [63:1990:2269] 2025-06-24T20:37:56.608722Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037894] forward result remote node 63 [58:2124:2501] 2025-06-24T20:37:56.608853Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037894] remote node connected [58:2124:2501] 2025-06-24T20:37:56.608896Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037894]::SendEvent [58:2124:2501] 2025-06-24T20:37:56.609138Z node 63 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037894] Accept Connect Originator# [58:2124:2501] 2025-06-24T20:37:56.609502Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037894] connected with status OK role: Leader [58:2124:2501] 2025-06-24T20:37:56.609545Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037894] send queued [58:2124:2501] 2025-06-24T20:37:56.610618Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037895] ::Bootstrap [58:2128:2503] 2025-06-24T20:37:56.610658Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037895] lookup [58:2128:2503] 2025-06-24T20:37:56.610734Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037895 entry.State: StNormal ev: {EvForward TabletID: 72075186224037895 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:37:56.610787Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037895 followers: 0 countLeader 1 allowFollowers 0 winner: [63:1833:2196] 2025-06-24T20:37:56.610898Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037895] forward result remote node 63 [58:2128:2503] 2025-06-24T20:37:56.610989Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037895] remote node connected [58:2128:2503] 2025-06-24T20:37:56.611029Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037895]::SendEvent [58:2128:2503] 2025-06-24T20:37:56.619693Z node 63 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037895] Accept Connect Originator# [58:2128:2503] 2025-06-24T20:37:56.620174Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037895] connected with status OK role: Leader [58:2128:2503] 2025-06-24T20:37:56.620225Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037895] send queued [58:2128:2503] 2025-06-24T20:37:56.621395Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037896] ::Bootstrap [58:2131:2505] 2025-06-24T20:37:56.621440Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037896] lookup [58:2131:2505] 2025-06-24T20:37:56.621546Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037896 entry.State: StNormal ev: {EvForward TabletID: 72075186224037896 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:37:56.703331Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037896 followers: 0 countLeader 1 allowFollowers 0 winner: [63:1836:2198] 2025-06-24T20:37:56.704939Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037896] forward result remote node 63 [58:2131:2505] 2025-06-24T20:37:56.705274Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037896] remote node connected [58:2131:2505] 2025-06-24T20:37:56.705339Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037896]::SendEvent [58:2131:2505] 2025-06-24T20:37:56.705696Z node 63 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037896] Accept Connect Originator# [58:2131:2505] 2025-06-24T20:37:56.706137Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037896] connected with status OK role: Leader [58:2131:2505] 2025-06-24T20:37:56.706193Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037896] send queued [58:2131:2505] 2025-06-24T20:37:56.707487Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [58:2133:2506] 2025-06-24T20:37:56.707575Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [58:2133:2506] 2025-06-24T20:37:56.707709Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:37:56.707800Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [58:621:2179] 2025-06-24T20:37:56.707944Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [58:2133:2506] 2025-06-24T20:37:56.708039Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [58:2133:2506] 2025-06-24T20:37:56.708120Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [58:2133:2506] 2025-06-24T20:37:56.708213Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [58:2133:2506] 2025-06-24T20:37:56.708354Z node 58 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [58:2133:2506] 2025-06-24T20:37:56.708593Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [58:2133:2506] 2025-06-24T20:37:56.708669Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [58:2133:2506] 2025-06-24T20:37:56.708726Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [58:2133:2506] 2025-06-24T20:37:56.708810Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [58:2133:2506] 2025-06-24T20:37:56.708869Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [58:2133:2506] 2025-06-24T20:37:56.708953Z node 58 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [58:592:2174] EventType# 268697616 |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |89.9%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> ExternalIndex::Simple [GOOD] Test command err: 2025-06-24T20:32:26.384869Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2327], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:32:26.385132Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:32:26.385287Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/cs_index/external;error=incorrect path status: LookupError; 2025-06-24T20:32:26.385360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004718/r3tmp/tmpQfWNPl/pdisk_1.dat 2025-06-24T20:32:26.759871Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 1350, node 1 TClient is connected to server localhost:29162 2025-06-24T20:32:27.018206Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [1:60:2107] Handle TEvGetProxyServicesRequest 2025-06-24T20:32:27.018563Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [1:60:2107] Handle TEvGetProxyServicesRequest 2025-06-24T20:32:27.023286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:32:27.076266Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:32:27.077948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:32:27.077998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:32:27.078029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:32:27.078350Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:32:27.078738Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797143211089 != 1750797143211093 2025-06-24T20:32:27.126185Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:60:2107] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:32:27.127362Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T20:32:27.127676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:32:27.127839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:32:27.139927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:32:27.292565Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:60:2107] Handle TEvProposeTransaction 2025-06-24T20:32:27.292666Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:60:2107] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T20:32:27.292956Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:60:2107] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:637:2530] 2025-06-24T20:32:27.402531Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:637:2530] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "olapStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "resource_id" Type: "Utf8" DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Name: "uid" Type: "Utf8" NotNull: true StorageId: "__MEMORY" } Columns { Name: "level" Type: "Int32" } Columns { Name: "message" Type: "Utf8" StorageId: "__MEMORY" } Columns { Name: "json_payload" Type: "JsonDocument" } KeyColumnNames: "timestamp" KeyColumnNames: "uid" } } } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T20:32:27.402633Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:637:2530] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:32:27.403307Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:637:2530] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T20:32:27.403424Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:637:2530] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:32:27.403728Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:637:2530] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:32:27.403960Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:637:2530] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:32:27.404062Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:637:2530] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T20:32:27.407790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) 2025-06-24T20:32:27.408411Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:637:2530] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T20:32:27.409247Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:637:2530] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T20:32:27.409318Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:637:2530] txid# 281474976715657 SEND to# [1:636:2529] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T20:32:27.517972Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T20:32:27.548321Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T20:32:27.548688Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037888 2025-06-24T20:32:27.556748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:32:27.557022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:32:27.557347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:32:27.557510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:32:27.557639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:32:27.557769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:32:27.557897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:32:27.558012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:32:27.558143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T20:32:27.558280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T20:32:27.558404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T20:32:27.584544Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 72075186224037888 2025-06-24T20:32:27.585029Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T20:32:27.585098Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T20:32:27.585299Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T20:32:27.585454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T20:32:27.585529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T20:32:27.585574Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;e ... form.cpp:33: PhysicalPeepholeTransformer: ( (let $1 (KqpTable '"//Root/.metadata/initialization/migrations" '"72057594046644480:6" '"" '1)) (let $2 '('"componentId" '"instant" '"modificationId")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '1)) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"componentId" $5) '('"instant" (OptionalType (DataType 'Uint32))) '('"modificationId" $5))) (let $7 '('('"_logical_id" '338) '('"_id" '"c8266f3c-ec2ffc8a-1a20dc86-b9a07926") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"componentId") (Member $14 '"instant") (Member $14 '"modificationId"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (NarrowMap (Take (ToFlow $15) $3) (lambda '($16 $17 $18) (AsStruct '('"componentId" $16) '('"instant" $17) '('"modificationId" $18)))))) '('('"_logical_id" '351) '('"_id" '"e5dc10a2-3651a6a1-ca198b02-a1f68c87")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) 2025-06-24T20:37:46.672998Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.672 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:466: Register async execution for node #268 2025-06-24T20:37:46.673152Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.673 TRACE ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:387: {3}, callable #277 2025-06-24T20:37:46.673262Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.673 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:577: Node #277 finished execution 2025-06-24T20:37:46.673326Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.673 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:594: Node #277 created 0 trackable nodes: 2025-06-24T20:37:46.673414Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.673 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:87: Finish, output #280, status: Async 2025-06-24T20:37:46.673993Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.673 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:133: Completed async execution for node #268 2025-06-24T20:37:46.674066Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:153: State is ExecutionRequired after apply async changes for node #268 2025-06-24T20:37:46.674121Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:59: Begin, root #280 2025-06-24T20:37:46.674162Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:72: Collect unused nodes for root #280, status: Ok 2025-06-24T20:37:46.674223Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 TRACE ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:387: {0}, callable #280 2025-06-24T20:37:46.674269Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 TRACE ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:387: {1}, callable #279 2025-06-24T20:37:46.674313Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 TRACE ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:387: {2}, callable #278 2025-06-24T20:37:46.674409Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 TRACE ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:387: {3}, callable #275 2025-06-24T20:37:46.674475Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 TRACE ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:387: {4}, callable #268 2025-06-24T20:37:46.674629Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:577: Node #268 finished execution 2025-06-24T20:37:46.674704Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:594: Node #268 created 0 trackable nodes: 2025-06-24T20:37:46.674767Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 TRACE ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:387: {3}, callable #275 2025-06-24T20:37:46.674825Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:577: Node #275 finished execution 2025-06-24T20:37:46.674894Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.674 TRACE ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:387: {2}, callable #278 2025-06-24T20:37:46.675125Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.675 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:577: Node #278 finished execution 2025-06-24T20:37:46.675202Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.675 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:594: Node #278 created 0 trackable nodes: 2025-06-24T20:37:46.675269Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.675 TRACE ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:387: {1}, callable #279 2025-06-24T20:37:46.675402Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.675 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:577: Node #279 finished execution 2025-06-24T20:37:46.675460Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.675 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:594: Node #279 created 0 trackable nodes: 2025-06-24T20:37:46.675520Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.675 TRACE ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:387: {0}, callable #280 2025-06-24T20:37:46.675583Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.675 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:577: Node #280 finished execution 2025-06-24T20:37:46.675639Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.675 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:594: Node #280 created 0 trackable nodes: 2025-06-24T20:37:46.675693Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.675 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:87: Finish, output #280, status: Ok 2025-06-24T20:37:46.675748Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhtjes7ahkd9ymvg9pdjvmh, SessionId: CompileActor 2025-06-24 20:37:46.675 INFO ydb-services-ext_index-ut(pid=143770, tid=0x00007FC3995DCD40) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #280 2025-06-24T20:37:46.697235Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [1:60:2107] Handle TEvExecuteKqpTransaction 2025-06-24T20:37:46.697313Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [1:60:2107] TxId# 281474976716232 ProcessProposeKqpTransaction 2025-06-24T20:37:46.711702Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [1:60:2107] Handle TEvExecuteKqpTransaction 2025-06-24T20:37:46.711776Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [1:60:2107] TxId# 281474976716233 ProcessProposeKqpTransaction 2025-06-24T20:37:46.879295Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;parent=[1:695:2576];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T20:37:46.879625Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[1:698:2578];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T20:37:46.879696Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[1:701:2580];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T20:37:46.879751Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[1:707:2582];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T20:37:46.892466Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:695:2576];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037888; 2025-06-24T20:37:46.892630Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:698:2578];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037889; 2025-06-24T20:37:46.892700Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;self_id=[1:701:2580];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037891; 2025-06-24T20:37:46.892770Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:707:2582];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037890; REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table [GOOD] >> DataShardWrite::DeletePrepared-Volatile [GOOD] >> DataShardWrite::DelayedVolatileTxAndEvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::Negative_SingleRowWithValueCast-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 14282, MsgBus: 6881 2025-06-24T20:37:39.918133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617832574249002:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:39.918188Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001bb5/r3tmp/tmpqNLdJg/pdisk_1.dat 2025-06-24T20:37:40.968478Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:37:41.140203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:41.151399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:41.151494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:41.195119Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617832574248974:2079] 1750797459896052 != 1750797459896055 2025-06-24T20:37:41.196487Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:41.222943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:41.561124Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.177193s 2025-06-24T20:37:41.561194Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.177287s TServer::EnableGrpc on GrpcPort 14282, node 1 2025-06-24T20:37:42.341170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:42.341193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:42.341200Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:42.341697Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6881 TClient is connected to server localhost:6881 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:43.909255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:37:43.992959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:44.500291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:44.634776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:44.727623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:44.919280Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617832574249002:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:44.919368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:45.084237Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617858344054405:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:45.084370Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:46.329559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.419553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.452452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.485743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.523504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.595738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.632613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.803340Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617862639022383:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:46.803496Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:46.823331Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617862639022388:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:46.863296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:46.883552Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617862639022390:2441], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:37:46.980105Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617862639022443:3433] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, s ... :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617890395172521:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:53.768967Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001bb5/r3tmp/tmptn92q8/pdisk_1.dat 2025-06-24T20:37:54.005604Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:54.007283Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519617890395172498:2079] 1750797473749738 != 1750797473749741 2025-06-24T20:37:54.018253Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:54.018367Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:54.024316Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7972, node 2 2025-06-24T20:37:54.095811Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:54.095845Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:54.095853Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:54.095994Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16624 TClient is connected to server localhost:16624 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:54.706092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:37:54.711664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:37:54.723552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:54.799456Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T20:37:54.811243Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:55.029309Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:55.167807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:58.124582Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617911870010603:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:58.124663Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:58.200367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:58.258053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:58.308635Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:58.350136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:58.391239Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:58.485901Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:58.570614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:58.670784Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617911870011257:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:58.670864Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:58.671351Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617911870011262:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:58.676217Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:58.697407Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519617911870011264:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:37:58.769276Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519617890395172521:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:58.769362Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:58.790887Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519617911870011316:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:01.009804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TTxDataShardMiniKQL::WriteValueTooLarge [GOOD] >> TTxDataShardMiniKQL::WriteLargeExternalBlob >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query >> TTxDataShardMiniKQL::WriteEraseRead >> DataShardWrite::UpsertPreparedNoTxCache-Volatile [GOOD] >> DataShardWrite::WriteCommitVersion >> TxUsage::WriteToTopic_Demo_37_Table [GOOD] >> DataShardWrite::UpsertBrokenLockArbiter [GOOD] >> DataShardWrite::PreparedDistributedWritePageFault |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |89.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit |89.9%| [LD] {RESULT} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |89.9%| [LD] {RESULT} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Query [GOOD] >> DataShardTxOrder::RandomPointsAndRanges >> DataShardVolatile::DistributedUpsertRestartBeforePrepare+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare-UseSink >> TxUsage::WriteToTopic_Demo_37_Query >> TTxDataShardMiniKQL::WriteEraseRead [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMultipleShards >> DataShardOutOfOrder::TestShardRestartNoUndeterminedImmediate >> JsonChangeRecord::DataChangeVersion [GOOD] >> DataStreams::TestGetRecordsStreamWithSingleShard [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneByTS >> DataShardWrite::UpsertNoLocksArbiterRestart [GOOD] >> DataShardWrite::UpsertLostPrepareArbiterRestart >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Table |89.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::DataChangeVersion [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Compressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Uncompressed >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-52 >> TTxDataShardMiniKQL::WriteAndReadMultipleShards [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMany >> TTxDataShardMiniKQL::WriteLargeExternalBlob [GOOD] >> DataShardWrite::InsertImmediate [GOOD] >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-33 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-34 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::WriteLargeExternalBlob [GOOD] Test command err: 2025-06-24T20:38:02.176288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:38:02.176358Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:02.180102Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:38:02.198668Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:38:02.199251Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T20:38:02.199587Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:02.262470Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:38:02.302355Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:38:02.302712Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:38:02.304885Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T20:38:02.304983Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T20:38:02.305042Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T20:38:02.305536Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:38:02.305660Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:38:02.305767Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T20:38:02.399229Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:38:02.439443Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T20:38:02.439687Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:38:02.439801Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T20:38:02.439860Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T20:38:02.439895Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T20:38:02.439930Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:38:02.440084Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:02.440128Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:02.440414Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T20:38:02.440523Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T20:38:02.440673Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T20:38:02.440712Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:38:02.440763Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T20:38:02.440810Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T20:38:02.440854Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T20:38:02.440887Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T20:38:02.440942Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T20:38:02.441046Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:02.441095Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:02.441147Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T20:38:02.444218Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nY\n\006table2\032\n\n\004key1\030\002 \"\032\013\n\004key2\030\200$ #\032\014\n\005value\030\200$ 8(\"(#:\010Z\006\010\000\030\000(\000J\014/Root/table2\222\002\013\th\020\000\000\000\000\000\000\020\016" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T20:38:02.444293Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:38:02.444389Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:38:02.444590Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T20:38:02.444637Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T20:38:02.444699Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T20:38:02.444750Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T20:38:02.444805Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T20:38:02.444847Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T20:38:02.444879Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T20:38:02.445208Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T20:38:02.445245Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T20:38:02.445280Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T20:38:02.445340Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T20:38:02.445394Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T20:38:02.445432Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T20:38:02.445463Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T20:38:02.445501Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T20:38:02.445530Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T20:38:02.463835Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T20:38:02.463940Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T20:38:02.463975Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T20:38:02.464022Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T20:38:02.464110Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T20:38:02.464709Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:02.464763Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:02.464808Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T20:38:02.464931Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T20:38:02.464966Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T20:38:02.465098Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T20:38:02.465147Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T20:38:02.465202Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T20:38:02.465237Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T20:38:02.468922Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T20:38:02.469002Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:38:02.469272Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:02.469309Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:02.469365Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T20:38:02.469425Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:38:02.469459Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T20:38:02.469498Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T20:38:02.469530Z node 1 :TX_DATASHARD TRACE: dat ... s: 0, InvisibleRowSkips: 0} 2025-06-24T20:38:06.894355Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T20:38:06.894409Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-24T20:38:06.894453Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-24T20:38:06.894492Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-24T20:38:06.894590Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T20:38:06.894621Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-24T20:38:06.894656Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-24T20:38:06.894692Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-24T20:38:06.894738Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-24T20:38:06.894790Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-24T20:38:06.894832Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-24T20:38:06.958596Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T20:38:06.958685Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-24T20:38:06.958739Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 5 ms, status: COMPLETE 2025-06-24T20:38:06.958859Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:38:07.889632Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [3:102:2135], Recipient [3:238:2229]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 102 RawX2: 12884904023 } 2025-06-24T20:38:07.889722Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-24T20:38:07.890158Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:303:2282], Recipient [3:238:2229]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:07.890201Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:07.890252Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:302:2281], serverId# [3:303:2282], sessionId# [0:0:0] 2025-06-24T20:38:08.075848Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [3:102:2135], Recipient [3:238:2229]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 102 RawX2: 12884904023 } TxBody: "\032\332\201\200\010\037\000\005\205\n\205\000\205\004?\000\205\002\202\0041\034MyReads MyWrites\205\004?\000\206\202\024Reply\024Write?\000?\000 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\000\005?\004?\014\005?\002)\211\006\202\203\005\004\213\004\203\004\203\001H\205\002\203\001H\01056$UpdateRow\000\003?\016 h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000\013?\024\003?\020\251\003\003?\022\006bar\003\005?\030\003?\026\007\000\000\000\001xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 2025-06-24T20:38:08.078349Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:38:08.078514Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:38:08.138990Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit CheckDataTx 2025-06-24T20:38:08.139100Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Executed 2025-06-24T20:38:08.139184Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit CheckDataTx 2025-06-24T20:38:08.139225Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-24T20:38:08.139262Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit BuildAndWaitDependencies 2025-06-24T20:38:08.139309Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-24T20:38:08.139366Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 9437184 2025-06-24T20:38:08.139405Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Executed 2025-06-24T20:38:08.139427Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-24T20:38:08.139449Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit ExecuteDataTx 2025-06-24T20:38:08.139472Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit ExecuteDataTx 2025-06-24T20:38:08.139526Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-24T20:38:08.139571Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:124: Operation [0:3] at 9437184 requested 46269670 more memory 2025-06-24T20:38:08.139613Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Restart 2025-06-24T20:38:08.139758Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:38:08.139796Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit ExecuteDataTx 2025-06-24T20:38:08.139842Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-24T20:38:08.183446Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:3] at 9437184 exceeded memory limit 50463974 and requests 403711792 more for the next try 2025-06-24T20:38:08.191494Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 3 released its data 2025-06-24T20:38:08.191596Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Restart 2025-06-24T20:38:08.191930Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:38:08.191966Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit ExecuteDataTx 2025-06-24T20:38:08.260777Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 3 at 9437184 restored its data 2025-06-24T20:38:08.260870Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-24T20:38:08.382180Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:3] at tablet 9437184 with status COMPLETE 2025-06-24T20:38:08.382318Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:3] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 16777223, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T20:38:08.382421Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T20:38:08.382466Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit ExecuteDataTx 2025-06-24T20:38:08.382518Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit FinishPropose 2025-06-24T20:38:08.382567Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit FinishPropose 2025-06-24T20:38:08.382621Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is DelayComplete 2025-06-24T20:38:08.382652Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit FinishPropose 2025-06-24T20:38:08.382693Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit CompletedOperations 2025-06-24T20:38:08.382731Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit CompletedOperations 2025-06-24T20:38:08.382794Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Executed 2025-06-24T20:38:08.382827Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit CompletedOperations 2025-06-24T20:38:08.382879Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 9437184 has finished 2025-06-24T20:38:08.470600Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T20:38:08.470688Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:3] at 9437184 on unit FinishPropose 2025-06-24T20:38:08.470747Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 3 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 2 ms, status: COMPLETE 2025-06-24T20:38:08.470879Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:38:08.626582Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 9437184, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T20:38:08.626688Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 9437184, table# 1001, finished edge# 0, front# 0 2025-06-24T20:38:08.640760Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:235:2228], Recipient [3:238:2229]: NKikimr::TEvTablet::TEvFollowerGcApplied >> KqpScan::ScanRetryRead >> DataShardVolatile::UpsertBrokenLockArbiterRestart+UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart-UseSink |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |89.9%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator >> DataShardWrite::DelayedVolatileTxAndEvWrite [GOOD] >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-51 >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Compressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Uncompressed >> TPersQueueTest::ReadRuleServiceType [GOOD] >> TPersQueueTest::ReadRuleServiceTypeLimit >> DataShardWrite::PreparedDistributedWritePageFault [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table [GOOD] >> KqpEffects::InsertAbort_Params_Duplicates-UseSink [GOOD] >> KqpScan::ScanDuringSplit10 >> DataShardWrite::WriteCommitVersion [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::PreparedDistributedWritePageFault [GOOD] Test command err: 2025-06-24T20:37:33.128343Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:37:33.128811Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:33.129019Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004601/r3tmp/tmpRpRkLD/pdisk_1.dat 2025-06-24T20:37:33.505243Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:37:33.508841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:33.568979Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:33.579675Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797449891953 != 1750797449891957 2025-06-24T20:37:33.632341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:33.632521Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:33.644452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:33.727674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:33.774912Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:37:33.776257Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:37:33.776787Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:37:33.777111Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:37:33.828493Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:37:33.829335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:37:33.829499Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:37:33.831439Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:37:33.831558Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:37:33.831633Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:37:33.832120Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:37:33.832336Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:37:33.832453Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:37:33.844594Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:37:33.866401Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:37:33.866605Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:37:33.866748Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:37:33.866784Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:37:33.866839Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:37:33.866871Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:37:33.867114Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:37:33.867182Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:37:33.867470Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:37:33.867549Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:37:33.867601Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:37:33.867648Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:37:33.867738Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:37:33.867771Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:37:33.867810Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:37:33.867868Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:37:33.867923Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:37:33.868037Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:33.868080Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:33.868130Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:37:33.868571Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:37:33.868610Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:37:33.868715Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:37:33.868900Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:37:33.868953Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:37:33.869051Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:37:33.869101Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:37:33.869155Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:37:33.869193Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:37:33.869236Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:37:33.869515Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:37:33.869563Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:37:33.869599Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:37:33.869630Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:37:33.869670Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:37:33.869697Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:37:33.869730Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:37:33.869759Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:37:33.869786Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:37:33.871404Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:37:33.871477Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:37:33.882383Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:37:33.882472Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:37:33.882512Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:37:33.882562Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... Add [3500:1234567890011] at 72075186224037888 to execution unit LoadWriteDetails 2025-06-24T20:38:12.342532Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit LoadTxDetails 2025-06-24T20:38:12.343019Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 1234567890011 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 1234567890011 TxMode: MODE_PREPARE Locks { Op: Commit } 2025-06-24T20:38:12.354951Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:213: Table /Root/table, shard: 72075186224037888, write point (Int32 : 1) 2025-06-24T20:38:12.355168Z node 7 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Int32 : 1) table: [72057594046644480:2:1] 2025-06-24T20:38:12.355327Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:683: LoadWriteDetails at 72075186224037888 loaded writeOp from db 3500:1234567890011 keys extracted: 1 2025-06-24T20:38:12.355404Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-24T20:38:12.355451Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit LoadWriteDetails 2025-06-24T20:38:12.355487Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:38:12.355536Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:38:12.355629Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:1234567890011] is the new logically complete end at 72075186224037888 2025-06-24T20:38:12.355686Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:1234567890011] is the new logically incomplete end at 72075186224037888 2025-06-24T20:38:12.355750Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:1234567890011] at 72075186224037888 2025-06-24T20:38:12.355820Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-24T20:38:12.355849Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:38:12.355887Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit BuildWriteOutRS 2025-06-24T20:38:12.355916Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit BuildWriteOutRS 2025-06-24T20:38:12.355967Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-24T20:38:12.355995Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit BuildWriteOutRS 2025-06-24T20:38:12.356019Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit StoreAndSendWriteOutRS 2025-06-24T20:38:12.356048Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit StoreAndSendWriteOutRS 2025-06-24T20:38:12.356077Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-24T20:38:12.356101Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit StoreAndSendWriteOutRS 2025-06-24T20:38:12.356126Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit PrepareWriteTxInRS 2025-06-24T20:38:12.356154Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit PrepareWriteTxInRS 2025-06-24T20:38:12.356189Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-24T20:38:12.356223Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit PrepareWriteTxInRS 2025-06-24T20:38:12.356249Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit LoadAndWaitInRS 2025-06-24T20:38:12.356277Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit LoadAndWaitInRS 2025-06-24T20:38:12.356310Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-24T20:38:12.356337Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit LoadAndWaitInRS 2025-06-24T20:38:12.356362Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit ExecuteWrite 2025-06-24T20:38:12.356389Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit ExecuteWrite 2025-06-24T20:38:12.356435Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [3500:1234567890011] at 72075186224037888 2025-06-24T20:38:12.356988Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:122: Tablet 72075186224037888 is not ready for [3500:1234567890011] execution 2025-06-24T20:38:12.357163Z node 7 :TX_DATASHARD DEBUG: datashard_write_operation.cpp:454: tx 1234567890011 at 72075186224037888 released its data 2025-06-24T20:38:12.357236Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Restart 2025-06-24T20:38:12.357281Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:38:12.357349Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T20:38:12.357417Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:38:12.357473Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:38:12.358116Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:38:12.358192Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit ExecuteWrite 2025-06-24T20:38:12.358265Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [3500:1234567890011] at 72075186224037888 2025-06-24T20:38:12.358767Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 1234567890011 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 1234567890011 TxMode: MODE_PREPARE Locks { Op: Commit } 2025-06-24T20:38:12.358895Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:213: Table /Root/table, shard: 72075186224037888, write point (Int32 : 1) 2025-06-24T20:38:12.358984Z node 7 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Int32 : 1) table: [72057594046644480:2:1] 2025-06-24T20:38:12.359093Z node 7 :TX_DATASHARD DEBUG: datashard_write_operation.cpp:547: tx 1234567890011 at 72075186224037888 restored its data 2025-06-24T20:38:12.359576Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [3500:1234567890011] at 72075186224037888, row count=1 2025-06-24T20:38:12.359661Z node 7 :TX_DATASHARD TRACE: locks.cpp:194: Lock 1234567890001 marked broken at v{min} 2025-06-24T20:38:12.359817Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-24T20:38:12.359942Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:38:12.360015Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit ExecuteWrite 2025-06-24T20:38:12.360072Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit CompleteWrite 2025-06-24T20:38:12.360134Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-06-24T20:38:12.360454Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is DelayComplete 2025-06-24T20:38:12.360507Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit CompleteWrite 2025-06-24T20:38:12.360559Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:38:12.360608Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:38:12.360668Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-24T20:38:12.360709Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:38:12.360772Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:1234567890011] at 72075186224037888 has finished 2025-06-24T20:38:12.360831Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:38:12.360887Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T20:38:12.360943Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:38:12.360995Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:38:12.361885Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-24T20:38:12.362277Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:38:12.362349Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-06-24T20:38:12.362441Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [3500 : 1234567890011] from 72075186224037888 at tablet 72075186224037888 send result to client [7:752:2610] 2025-06-24T20:38:12.362514Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query >> TPersQueueTest::PreferredCluster_NonExistentPreferredCluster_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndRemoteClusterEnabledDelaySec_SessionDiesOnlyAfterDelay ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertAbort_Params_Duplicates-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 27950, MsgBus: 27319 2025-06-24T20:37:39.923634Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617829692047307:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:39.923716Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001beb/r3tmp/tmp0I9tGv/pdisk_1.dat 2025-06-24T20:37:40.968405Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:37:41.140424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:41.163688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:41.163775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:41.164247Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617829692047274:2079] 1750797459899280 != 1750797459899283 2025-06-24T20:37:41.194796Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:41.223514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:41.560475Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.178197s 2025-06-24T20:37:41.560558Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.178315s TServer::EnableGrpc on GrpcPort 27950, node 1 2025-06-24T20:37:42.339760Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:42.339783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:42.339790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:42.339938Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27319 TClient is connected to server localhost:27319 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:37:43.946717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:37:43.997701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:44.500255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:44.655787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:44.735937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:37:44.934514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617829692047307:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:44.936042Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:37:45.083909Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617855461852692:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:45.084074Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:46.327711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.398213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.442644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.468513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.524050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.559517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.644277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:46.797250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617859756820660:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:46.797361Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:46.812201Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617859756820665:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:37:46.864313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:37:46.885663Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617859756820667:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:37:46.974537Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617859756820721:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool ... SPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519617931475801078:2079] 1750797483131846 != 1750797483131849 2025-06-24T20:38:03.518382Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5001, node 3 2025-06-24T20:38:03.739802Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:03.739843Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:03.739852Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:03.740030Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:38:04.149176Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63383 TClient is connected to server localhost:63383 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:04.801126Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:04.815877Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:38:04.829959Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:05.038532Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:05.280117Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:05.401299Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:08.158215Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519617931475801291:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:08.158302Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:08.511358Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519617952950639203:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:08.511479Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:08.559619Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:08.623912Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:08.711197Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:08.762597Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:08.820418Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:08.919605Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:09.007615Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:09.133585Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519617957245607162:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:09.133647Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:09.134075Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519617957245607167:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:09.144886Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:09.170331Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519617957245607169:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:38:09.268183Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519617957245607222:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:12.059255Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7519617965835542112:2485], TxId: 281474976710673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=OGU2NjA2MDMtMWVkZjVlMzEtMjJhZmFiNTgtY2U5OWY1NjY=. TraceId : 01jyhtk72e9ej85gmahbm8hq40. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Duplicated keys found., code: 2012 }. 2025-06-24T20:38:12.060017Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519617965835542113:2486], TxId: 281474976710673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=OGU2NjA2MDMtMWVkZjVlMzEtMjJhZmFiNTgtY2U5OWY1NjY=. TraceId : 01jyhtk72e9ej85gmahbm8hq40. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7519617965835542109:2471], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-24T20:38:12.060604Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=OGU2NjA2MDMtMWVkZjVlMzEtMjJhZmFiNTgtY2U5OWY1NjY=, ActorId: [3:7519617965835542074:2471], ActorState: ExecuteState, TraceId: 01jyhtk72e9ej85gmahbm8hq40, Create QueryResponse for error on request, msg: >> DataShardOutOfOrder::TestShardRestartNoUndeterminedImmediate [GOOD] >> DataShardOutOfOrder::TestShardRestartDuringWaitingRead >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query [GOOD] |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |89.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes >> DataShardWrite::UpsertLostPrepareArbiterRestart [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::WriteCommitVersion [GOOD] Test command err: 2025-06-24T20:37:35.706653Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:37:35.707052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:35.707306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045f3/r3tmp/tmpYaOR8B/pdisk_1.dat 2025-06-24T20:37:36.056137Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:37:36.059624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:36.109953Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:36.118966Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797452751921 != 1750797452751925 2025-06-24T20:37:36.169622Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:36.169816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:36.187479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:36.296530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:36.379381Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:37:36.380940Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:37:36.381450Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:37:36.381733Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:37:36.439593Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:37:36.440422Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:37:36.440581Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:37:36.442327Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:37:36.442417Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:37:36.442473Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:37:36.442851Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:37:36.443017Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:37:36.443097Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:37:36.455869Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:37:36.506620Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:37:36.506863Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:37:36.506979Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:37:36.507028Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:37:36.507081Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:37:36.507117Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:37:36.507450Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:37:36.507507Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:37:36.507835Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:37:36.507955Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:37:36.508024Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:37:36.508060Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:37:36.508099Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:37:36.508145Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:37:36.508183Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:37:36.508244Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:37:36.508287Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:37:36.508417Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:36.508450Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:36.508494Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:37:36.508911Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:37:36.508949Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:37:36.509053Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:37:36.509260Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:37:36.509326Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:37:36.509444Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:37:36.509494Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:37:36.509547Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:37:36.509585Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:37:36.509617Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:37:36.509898Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:37:36.509935Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:37:36.509971Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:37:36.510001Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:37:36.510055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:37:36.510090Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:37:36.510134Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:37:36.510172Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:37:36.510200Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:37:36.512131Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:37:36.512192Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:37:36.523124Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:37:36.523227Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:37:36.523273Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:37:36.523321Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... dWaitDependencies 2025-06-24T20:38:13.987062Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1531:281474976715666] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-24T20:38:13.987111Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1531:281474976715666] is the new logically complete end at 72075186224037890 2025-06-24T20:38:13.987136Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1531:281474976715666] is the new logically incomplete end at 72075186224037890 2025-06-24T20:38:13.987181Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1531:281474976715666] at 72075186224037890 2025-06-24T20:38:13.987215Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1531:281474976715666] at 72075186224037890 is Executed 2025-06-24T20:38:13.987238Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1531:281474976715666] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-24T20:38:13.987277Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1531:281474976715666] at 72075186224037890 to execution unit ExecuteWrite 2025-06-24T20:38:13.987303Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1531:281474976715666] at 72075186224037890 on unit ExecuteWrite 2025-06-24T20:38:13.987331Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [1531:281474976715666] at 72075186224037890 2025-06-24T20:38:13.987370Z node 7 :TX_DATASHARD TRACE: datashard_kqp.cpp:694: Send commit decision from 72075186224037890 to 72075186224037889 2025-06-24T20:38:13.987402Z node 7 :TX_DATASHARD TRACE: datashard_kqp.cpp:725: Will wait for volatile decision from 72075186224037889 to 72075186224037890 2025-06-24T20:38:13.987517Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [1531:281474976715666] at 72075186224037890, row count=1 2025-06-24T20:38:13.987667Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:180: Deleted RS at 72075186224037890 source 72075186224037890 dest 72075186224037888 consumer 72075186224037888 seqno 1 txId 281474976715660 2025-06-24T20:38:13.987719Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:180: Deleted RS at 72075186224037890 source 72075186224037890 dest 72075186224037889 consumer 72075186224037889 seqno 2 txId 281474976715660 2025-06-24T20:38:13.987791Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-24T20:38:13.987865Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1531:281474976715666] at 72075186224037890 is DelayCompleteNoMoreRestarts 2025-06-24T20:38:13.987900Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1531:281474976715666] at 72075186224037890 executing on unit ExecuteWrite 2025-06-24T20:38:13.987927Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1531:281474976715666] at 72075186224037890 to execution unit CompleteWrite 2025-06-24T20:38:13.987952Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1531:281474976715666] at 72075186224037890 on unit CompleteWrite 2025-06-24T20:38:13.988069Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1531:281474976715666] at 72075186224037890 is DelayComplete 2025-06-24T20:38:13.988096Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1531:281474976715666] at 72075186224037890 executing on unit CompleteWrite 2025-06-24T20:38:13.988122Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1531:281474976715666] at 72075186224037890 to execution unit CompletedOperations 2025-06-24T20:38:13.988150Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1531:281474976715666] at 72075186224037890 on unit CompletedOperations 2025-06-24T20:38:13.988181Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1531:281474976715666] at 72075186224037890 is Executed 2025-06-24T20:38:13.988203Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1531:281474976715666] at 72075186224037890 executing on unit CompletedOperations 2025-06-24T20:38:13.988225Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1531:281474976715666] at 72075186224037890 has finished 2025-06-24T20:38:13.988252Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:38:13.988279Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-24T20:38:13.988310Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-24T20:38:13.988335Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-24T20:38:13.989160Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:668:2551], Recipient [7:664:2549]: {TEvReadSet step# 1531 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-24T20:38:13.989211Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T20:38:13.989282Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715666 2025-06-24T20:38:13.989393Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1531 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-24T20:38:13.989505Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:664:2549], Recipient [7:668:2551]: {TEvReadSet step# 1531 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-24T20:38:13.989530Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T20:38:13.989555Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715666 2025-06-24T20:38:13.989598Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1531 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-24T20:38:13.989756Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1531} 2025-06-24T20:38:13.990080Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1531} 2025-06-24T20:38:13.990546Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T20:38:13.990607Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1531:281474976715666] at 72075186224037889 on unit ExecuteWrite 2025-06-24T20:38:13.990659Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 3 at 72075186224037889 from 72075186224037889 to 72075186224037890 txId 281474976715666 2025-06-24T20:38:13.990721Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1531:281474976715666] at 72075186224037889 on unit CompleteWrite 2025-06-24T20:38:13.990781Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:38:13.990909Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-24T20:38:13.991094Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T20:38:13.991121Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1531:281474976715666] at 72075186224037890 on unit ExecuteWrite 2025-06-24T20:38:13.991214Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 3 at 72075186224037890 from 72075186224037890 to 72075186224037889 txId 281474976715666 2025-06-24T20:38:13.991266Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1531:281474976715666] at 72075186224037890 on unit CompleteWrite 2025-06-24T20:38:13.991306Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T20:38:13.991353Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-24T20:38:13.991485Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:664:2549], Recipient [7:668:2551]: {TEvReadSet step# 1531 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-24T20:38:13.991517Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T20:38:13.991549Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715666 2025-06-24T20:38:13.991614Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1531 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-24T20:38:13.991766Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [1531 : 281474976715666] from 72075186224037890 at tablet 72075186224037890 send result to client [7:1029:2789] 2025-06-24T20:38:13.992250Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T20:38:13.992499Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:668:2551], Recipient [7:664:2549]: {TEvReadSet step# 1531 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-24T20:38:13.992549Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T20:38:13.992582Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715666 2025-06-24T20:38:13.992632Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1531 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-24T20:38:13.992739Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [1531 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [7:1029:2789] 2025-06-24T20:38:13.993353Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 >> TxUsage::WriteToTopic_Demo_14_Table [GOOD] >> TxUsage::WriteToTopic_Demo_28_Table [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-52 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-53 >> KqpScan::RemoteShardScan >> TPQCompatTest::DiscoverTopics [GOOD] >> TPQCompatTest::SetupLockSession >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-52 >> TPersQueueTest::CheckKillBalancer [GOOD] >> TPersQueueTest::CheckDeleteTopic >> TxUsage::WriteToTopic_Demo_14_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::UpsertLostPrepareArbiterRestart [GOOD] Test command err: 2025-06-24T20:37:34.030908Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:37:34.031559Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:34.031798Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045fa/r3tmp/tmpeYCgLe/pdisk_1.dat 2025-06-24T20:37:34.415593Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:37:34.418796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:34.464083Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:34.469712Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797450353195 != 1750797450353199 2025-06-24T20:37:34.524501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:34.524682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:34.537254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:34.621920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:34.675391Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:37:34.676619Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:37:34.677084Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:37:34.677320Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:37:34.724629Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:37:34.725440Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:37:34.725629Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:37:34.727514Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:37:34.727594Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:37:34.727635Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:37:34.727936Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:37:34.728067Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:37:34.728142Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:37:34.739158Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:37:34.765461Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:37:34.765708Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:37:34.765822Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:37:34.765918Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:37:34.765975Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:37:34.766020Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:37:34.766255Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:37:34.766304Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:37:34.766636Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:37:34.766750Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:37:34.766821Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:37:34.766869Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:37:34.766912Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:37:34.766947Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:37:34.766981Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:37:34.767028Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:37:34.767077Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:37:34.767252Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:34.767304Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:34.767367Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:37:34.767811Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:37:34.767868Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:37:34.767984Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:37:34.768236Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:37:34.768308Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:37:34.768421Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:37:34.768480Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:37:34.768532Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:37:34.768572Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:37:34.768603Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:37:34.768898Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:37:34.768941Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:37:34.768979Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:37:34.769010Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:37:34.769055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:37:34.769082Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:37:34.769130Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:37:34.769162Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:37:34.769188Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:37:34.770797Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:37:34.770851Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:37:34.781598Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:37:34.781671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:37:34.781711Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:37:34.781771Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... : NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:15.625274Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:15.625322Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:923:2739], serverId# [7:924:2740], sessionId# [0:0:0] 2025-06-24T20:38:15.625427Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553169, Sender [7:922:2738], Recipient [7:676:2556]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-24T20:38:15.626270Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:927:2743], Recipient [7:676:2556]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:15.626324Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:15.626363Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:926:2742], serverId# [7:927:2743], sessionId# [0:0:0] 2025-06-24T20:38:15.626577Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [7:925:2741], Recipient [7:676:2556]: NKikimrTxDataShard.TEvRead ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-06-24T20:38:15.626705Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2025-06-24T20:38:15.626750Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037890 CompleteEdge# v1001/1000001 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T20:38:15.626786Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037890 changed HEAD read to non-repeatable v4000/18446744073709551615 2025-06-24T20:38:15.626837Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit CheckRead 2025-06-24T20:38:15.626919Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-24T20:38:15.626952Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit CheckRead 2025-06-24T20:38:15.626983Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-06-24T20:38:15.627013Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-24T20:38:15.627064Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037890 2025-06-24T20:38:15.627101Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-24T20:38:15.627127Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-24T20:38:15.651292Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit ExecuteRead 2025-06-24T20:38:15.651399Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit ExecuteRead 2025-06-24T20:38:15.651551Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 1, request: { ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-06-24T20:38:15.651745Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037890 Complete read# {[7:925:2741], 1002} after executionsCount# 1 2025-06-24T20:38:15.651798Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037890 read iterator# {[7:925:2741], 1002} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:38:15.651879Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037890 read iterator# {[7:925:2741], 1002} finished in read 2025-06-24T20:38:15.651944Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-24T20:38:15.651979Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit ExecuteRead 2025-06-24T20:38:15.652008Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit CompletedOperations 2025-06-24T20:38:15.652036Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit CompletedOperations 2025-06-24T20:38:15.652089Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-24T20:38:15.652114Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit CompletedOperations 2025-06-24T20:38:15.652139Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037890 has finished 2025-06-24T20:38:15.652172Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-06-24T20:38:15.652277Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-06-24T20:38:15.653229Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:930:2746], Recipient [7:674:2554]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:15.653290Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:15.653333Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:929:2745], serverId# [7:930:2746], sessionId# [0:0:0] 2025-06-24T20:38:15.653472Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553169, Sender [7:928:2744], Recipient [7:674:2554]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-24T20:38:15.654329Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:933:2749], Recipient [7:674:2554]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:15.654377Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:15.654413Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:932:2748], serverId# [7:933:2749], sessionId# [0:0:0] 2025-06-24T20:38:15.654560Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [7:931:2747], Recipient [7:674:2554]: NKikimrTxDataShard.TEvRead ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-06-24T20:38:15.654687Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037891, FollowerId 0 2025-06-24T20:38:15.654737Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037891 CompleteEdge# v1000/281474976715657 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T20:38:15.654772Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037891 changed HEAD read to non-repeatable v4000/18446744073709551615 2025-06-24T20:38:15.654820Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit CheckRead 2025-06-24T20:38:15.654894Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-24T20:38:15.654927Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit CheckRead 2025-06-24T20:38:15.654957Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit BuildAndWaitDependencies 2025-06-24T20:38:15.654985Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit BuildAndWaitDependencies 2025-06-24T20:38:15.655033Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037891 2025-06-24T20:38:15.655071Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-24T20:38:15.655096Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit BuildAndWaitDependencies 2025-06-24T20:38:15.655120Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit ExecuteRead 2025-06-24T20:38:15.658894Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit ExecuteRead 2025-06-24T20:38:15.659100Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037891 Execute read# 1, request: { ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-06-24T20:38:15.659279Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037891 Complete read# {[7:931:2747], 1003} after executionsCount# 1 2025-06-24T20:38:15.659335Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037891 read iterator# {[7:931:2747], 1003} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:38:15.659415Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037891 read iterator# {[7:931:2747], 1003} finished in read 2025-06-24T20:38:15.659476Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-24T20:38:15.659511Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit ExecuteRead 2025-06-24T20:38:15.659542Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit CompletedOperations 2025-06-24T20:38:15.659574Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit CompletedOperations 2025-06-24T20:38:15.659626Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-24T20:38:15.659656Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit CompletedOperations 2025-06-24T20:38:15.659682Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037891 has finished 2025-06-24T20:38:15.659717Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037891 2025-06-24T20:38:15.659814Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037891 |90.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> TxUsage::WriteToTopic_Demo_28_Query >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-34 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-35 >> TxUsage::WriteToTopic_Demo_9_Table [GOOD] >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace [GOOD] >> TDataShardLocksTest::Points_OneTx >> TDataShardLocksTest::Points_OneTx [GOOD] >> TDataShardLocksTest::Points_ManyTx_RemoveAll >> DataShardVolatile::DistributedUpsertRestartBeforePrepare-UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare+UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-5 >> TDataShardLocksTest::Points_ManyTx_RemoveAll [GOOD] >> TDataShardLocksTest::UseLocksCache >> TxUsage::WriteToTopic_Demo_9_Query >> TDataShardLocksTest::MvccTestOooTxDoesntBreakPrecedingReadersLocks [GOOD] >> TDataShardLocksTest::MvccTestOutdatedLocksRemove [GOOD] >> TDataShardLocksTest::MvccTestBreakEdge [GOOD] >> TDataShardLocksTest::MvccTestAlreadyBrokenLocks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace [GOOD] Test command err: 2025-06-24T20:37:33.822736Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:37:33.823211Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:33.823418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045f6/r3tmp/tmpSLTAG7/pdisk_1.dat 2025-06-24T20:37:34.182156Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:37:34.185685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:34.229858Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:34.235191Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797450832350 != 1750797450832354 2025-06-24T20:37:34.286464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:34.286654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:34.298749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:34.393786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:34.439974Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:37:34.441483Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:37:34.442060Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:37:34.442372Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:37:34.486992Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:37:34.487832Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:37:34.488017Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:37:34.489773Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:37:34.489849Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:37:34.489893Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:37:34.490248Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:37:34.490390Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:37:34.490480Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:37:34.501516Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:37:34.545112Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:37:34.545391Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:37:34.545528Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:37:34.545571Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:37:34.545632Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:37:34.545673Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:37:34.546142Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:37:34.546196Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:37:34.546530Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:37:34.546651Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:37:34.546746Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:37:34.546805Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:37:34.546857Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:37:34.546896Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:37:34.546939Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:37:34.546991Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:37:34.547053Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:37:34.547243Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:34.547287Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:34.547333Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:37:34.547776Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:37:34.547841Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:37:34.547955Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:37:34.548221Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:37:34.548300Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:37:34.548421Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:37:34.548485Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:37:34.548547Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:37:34.548600Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:37:34.548646Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:37:34.548968Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:37:34.549007Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:37:34.549052Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:37:34.549087Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:37:34.549137Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:37:34.549172Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:37:34.549213Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:37:34.549247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:37:34.549277Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:37:34.550897Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:37:34.550957Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:37:34.561860Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:37:34.561968Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:37:34.562012Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:37:34.562060Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890011 ... observed 2 more commits after readset unblock ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR 2025-06-24T20:38:18.822734Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:38:18.822843Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037888 on unit StoreAndSendWriteOutRS 2025-06-24T20:38:18.822908Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 2 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 1234567890012 2025-06-24T20:38:18.823280Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:38:18.823325Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037888 on unit CompleteWrite 2025-06-24T20:38:18.823403Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2000 : 1234567890012] from 72075186224037888 at tablet 72075186224037888 send result to client [7:797:2649] 2025-06-24T20:38:18.823478Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:38:18.823637Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-24T20:38:18.823884Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:38:18.823918Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-06-24T20:38:18.823968Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [1500 : 1234567890011] from 72075186224037888 at tablet 72075186224037888 send result to client [7:760:2624] 2025-06-24T20:38:18.824059Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037888 {TEvReadSet step# 1500 txid# 1234567890011 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-24T20:38:18.824126Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:38:18.824319Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:656:2544], Recipient [7:659:2546]: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-24T20:38:18.824393Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T20:38:18.824472Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 1234567890012 2025-06-24T20:38:18.824596Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-24T20:38:18.824939Z node 7 :TX_DATASHARD TRACE: operation.cpp:67: Filled readset for [2000:1234567890012] from=72075186224037888 to=72075186224037889origin=72075186224037888 2025-06-24T20:38:18.825059Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-24T20:38:18.825210Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [7:656:2544], Recipient [7:659:2546]: {TEvReadSet step# 1500 txid# 1234567890011 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-24T20:38:18.825256Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T20:38:18.825323Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 1234567890011 2025-06-24T20:38:18.825496Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:38:18.825548Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:8] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T20:38:18.825604Z node 7 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 8 at tablet 72075186224037888 send to client, propose latency: 3 ms, status: STATUS_COMPLETED 2025-06-24T20:38:18.825724Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:38:18.825918Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [7:659:2546], Recipient [7:659:2546]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:18.825970Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:18.826055Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T20:38:18.826134Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:38:18.826198Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [2000:1234567890012] at 72075186224037889 for LoadAndWaitInRS 2025-06-24T20:38:18.826263Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit LoadAndWaitInRS 2025-06-24T20:38:18.826319Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-06-24T20:38:18.826385Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit LoadAndWaitInRS 2025-06-24T20:38:18.826438Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit ExecuteWrite 2025-06-24T20:38:18.826494Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit ExecuteWrite 2025-06-24T20:38:18.826543Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [2000:1234567890012] at 72075186224037889 2025-06-24T20:38:18.826611Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:390: Operation [2000:1234567890012] at 72075186224037889 aborting because locks are not valid 2025-06-24T20:38:18.826691Z node 7 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=1234567890012; 2025-06-24T20:38:18.826786Z node 7 :TX_DATASHARD INFO: datashard_write_operation.cpp:707: Write transaction 1234567890012 at 72075186224037889 has an error: Operation is aborting because locks are not valid 2025-06-24T20:38:18.826856Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-06-24T20:38:18.826886Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit ExecuteWrite 2025-06-24T20:38:18.826926Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit CompleteWrite 2025-06-24T20:38:18.826955Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit CompleteWrite 2025-06-24T20:38:18.827319Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is DelayComplete 2025-06-24T20:38:18.827378Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit CompleteWrite 2025-06-24T20:38:18.827458Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T20:38:18.827507Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit CompletedOperations 2025-06-24T20:38:18.827556Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-06-24T20:38:18.827584Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T20:38:18.827624Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [2000:1234567890012] at 72075186224037889 has finished 2025-06-24T20:38:18.827683Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:38:18.827738Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-24T20:38:18.827788Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-24T20:38:18.827840Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-24T20:38:18.828656Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T20:38:18.828719Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037889 on unit CompleteWrite 2025-06-24T20:38:18.828772Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2000 : 1234567890012] from 72075186224037889 at tablet 72075186224037889 send result to client [7:797:2649] 2025-06-24T20:38:18.828843Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037889 {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2025-06-24T20:38:18.828892Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:38:18.829041Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [7:659:2546], Recipient [7:656:2544]: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2025-06-24T20:38:18.829083Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T20:38:18.829126Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890012 |90.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::MvccTestAlreadyBrokenLocks [GOOD] >> KqpTx::DeferredEffects >> TTopicYqlTest::DropTopicYql [GOOD] >> TTopicYqlTest::CreateTopicYqlBackCompatibility |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/fqrun/fqrun |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/fqrun/fqrun |90.0%| [LD] {RESULT} $(B)/ydb/tests/tools/fqrun/fqrun >> TxUsage::WriteToTopic_Demo_37_Query [GOOD] >> TPersQueueTest::DirectReadCleanCache [GOOD] >> TPersQueueTest::DirectReadRestartPQRB >> KqpLocks::TwoPhaseTx >> DataShardOutOfOrder::TestShardRestartDuringWaitingRead [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2-withSink >> TxUsage::WriteToTopic_Demo_38_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-52 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Table [GOOD] >> TStorageBalanceTest::TestScenario3 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestShardRestartDuringWaitingRead [GOOD] Test command err: 2025-06-24T20:38:11.649734Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:11.650497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:11.650732Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e63/r3tmp/tmpFzcXKD/pdisk_1.dat 2025-06-24T20:38:12.063412Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:38:12.066944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:38:12.103793Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:12.108140Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797487469197 != 1750797487469201 2025-06-24T20:38:12.156425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:12.156581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:12.168514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:12.272611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:12.809287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:12.936317Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:13.177787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:870:2682], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:13.177927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:880:2687], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:13.178008Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:13.183889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:13.352097Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:884:2690], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:38:13.487532Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:940:2727] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:13.876103Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhtk8xq157zsa5bbx28wgg9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjkxNDY2ODktZWYxNTc4MmUtNjg1MmVlM2EtZjA5NDk3OWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:38:13.966710Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhtk9mefgx5fdf8jhyqcjnr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2E3YmM5ZjItY2MwOTkyZTQtZGU1NDBlY2QtY2RiYzA5Zjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:38:14.457947Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhtk9qj9mrdycewsp9kbpee, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzFkZWI5ZDctNDQ1YjBiYmMtYzhkM2E0M2YtMzBmZmRjNDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 1 } } ... waiting for commit read sets 2025-06-24T20:38:14.593376Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhtka6z66hcb8fz827kzp1d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzFkZWI5ZDctNDQ1YjBiYmMtYzhkM2E0M2YtMzBmZmRjNDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... sending immediate upsert ... waiting for immediate propose 2025-06-24T20:38:14.699389Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhtkaanckq95w51xff1jz0t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTUzNzU4NDItZmQwZTY5OTItM2JiNzYxYjktZjI5MTIzOWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... immediate upsert is blocked 2025-06-24T20:38:14.703741Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting immediate write tx 281474976715666 because datashard 72075186224037889 is restarting;tx_id=281474976715666; 2025-06-24T20:38:14.714950Z node 1 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [1:1138:2780], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [1:1017:2780]Got OVERLOADED for table `/Root/table-1`. ShardID=72075186224037889, Sink=[1:1138:2780]. Ignored this error.{
: Error: Rejecting immediate write tx 281474976715666 because datashard 72075186224037889 is restarting, code: 2006 } 2025-06-24T20:38:14.715604Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:1131:2780], SessionActorId: [1:1017:2780], statusCode=OVERLOADED. Issue=
: Error: Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037889 is overloaded. Table `/Root/table-1`., code: 2006
: Error: Rejecting immediate write tx 281474976715666 because datashard 72075186224037889 is restarting, code: 2006 . sessionActorId=[1:1017:2780]. isRollback=0 2025-06-24T20:38:14.715985Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=YTUzNzU4NDItZmQwZTY5OTItM2JiNzYxYjktZjI5MTIzOWQ=, ActorId: [1:1017:2780], ActorState: ExecuteState, TraceId: 01jyhtkaanckq95w51xff1jz0t, got TEvKqpBuffer::TEvError in ExecuteState, status: OVERLOADED send to: [1:1132:2780] from: [1:1131:2780] 2025-06-24T20:38:14.716821Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:1132:2780] TxId: 281474976715665. Ctx: { TraceId: 01jyhtkaanckq95w51xff1jz0t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTUzNzU4NDItZmQwZTY5OTItM2JiNzYxYjktZjI5MTIzOWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. OVERLOADED: {
: Error: Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037889 is overloaded. Table `/Root/table-1`., code: 2006 subissue: {
: Error: Rejecting immediate write tx 281474976715666 because datashard 72075186224037889 is restarting, code: 2006 } } 2025-06-24T20:38:14.727603Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YTUzNzU4NDItZmQwZTY5OTItM2JiNzYxYjktZjI5MTIzOWQ=, ActorId: [1:1017:2780], ActorState: ExecuteState, TraceId: 01jyhtkaanckq95w51xff1jz0t, Create QueryResponse for error on request, msg: 2025-06-24T20:38:14.729371Z node 1 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1103: SelfId: [1:1104:2782], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [1:1019:2782]TEvDeliveryProblem was received from tablet: 72075186224037889 2025-06-24T20:38:14.729528Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:1094:2782], SessionActorId: [1:1019:2782], statusCode=UNDETERMINED. Issue=
: Error: State of operation is unknown. Error writing to table `/Root/table-1`. Transaction state unknown for tablet 72075186224037889., code: 2026 . sessionActorId=[1:1019:2782]. isRollback=0 2025-06-24T20:38:14.730283Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=YzFkZWI5ZDctNDQ1YjBiYmMtYzhkM2E0M2YtMzBmZmRjNDI=, ActorId: [1:1019:2782], ActorState: ExecuteState, TraceId: 01jyhtka6z66hcb8fz827kzp1d, got TEvKqpBuffer::TEvError in ExecuteState, status: UNDETERMINED send to: [1:1095:2782] from: [1:1094:2782] 2025-06-24T20:38:14.730824Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:1095:2782] TxId: 281474976715664. Ctx: { TraceId: 01jyhtka6z66hcb8fz827kzp1d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzFkZWI5ZDctNDQ1YjBiYmMtYzhkM2E0M2YtMzBmZmRjNDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNDETERMINED: {
: Error: State of operation is unknown. Error writing to table `/Root/table-1`. Transaction state unknown for tablet 72075186224037889., code: 2026 } 2025-06-24T20:38:14.731971Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YzFkZWI5ZDctNDQ1YjBiYmMtYzhkM2E0M2YtMzBmZmRjNDI=, ActorId: [1:1019:2782], ActorState: ExecuteState, TraceId: 01jyhtka6z66hcb8fz827kzp1d, Create QueryResponse for error on request, msg: 2025-06-24T20:38:15.164439Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhtkakz6y80161420yg627b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWRiMjhmMzktZTA1Zjc3LTkzMzgyNThlLWVkZmE0OGRj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } } 2025-06-24T20:38:21.086506Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:21.087607Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:21.087784Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e63/r3tmp/tmpe4stnC/pdisk_1.dat 2025-06-24T20:38:21.590104Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T20:38:21.592136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:38:21.632459Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:21.633825Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750797495961738 != 1750797495961742 2025-06-24T20:38:21.686178Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:21.686323Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:21.701650Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:21.813621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:22.259765Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:22.416800Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:22.751464Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:781:2634], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:22.751584Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:791:2639], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:22.751666Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:22.757526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:22.954624Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:795:2642], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T20:38:22.999422Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:851:2679] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:23.218630Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhtkj8x7q4ws1cgw29envam, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OWZiNzk2OGUtMTg2ZmRiNjEtYjRjZmI5MmItNjQ4ZjkxNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:38:23.436534Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhtkjrk1fq65jntekdskpw2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTk0OTk1NjYtOWI4MGFlMGEtNjI4MWQyYjUtYmNmZWMyZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... waiting for readsets 2025-06-24T20:38:24.252144Z node 2 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1103: SelfId: [2:937:2718], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [2:913:2718]TEvDeliveryProblem was received from tablet: 72075186224037888 2025-06-24T20:38:24.252304Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:927:2718], SessionActorId: [2:913:2718], statusCode=UNDETERMINED. Issue=
: Error: State of operation is unknown. Error writing to table `/Root/table-1`. Transaction state unknown for tablet 72075186224037888., code: 2026 . sessionActorId=[2:913:2718]. isRollback=0 2025-06-24T20:38:24.252560Z node 2 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715664, task: 1, CA Id [2:963:2756]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 0 2025-06-24T20:38:24.252939Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=NTc4YzliZDctYzYxMTczYzItNGVlZWJjZjUtNDRjY2FhMTE=, ActorId: [2:913:2718], ActorState: ExecuteState, TraceId: 01jyhtkjz2fwdx7f4y587xas5h, got TEvKqpBuffer::TEvError in ExecuteState, status: UNDETERMINED send to: [2:928:2718] from: [2:927:2718] 2025-06-24T20:38:24.253463Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:928:2718] TxId: 281474976715663. Ctx: { TraceId: 01jyhtkjz2fwdx7f4y587xas5h, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTc4YzliZDctYzYxMTczYzItNGVlZWJjZjUtNDRjY2FhMTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNDETERMINED: {
: Error: State of operation is unknown. Error writing to table `/Root/table-1`. Transaction state unknown for tablet 72075186224037888., code: 2026 } 2025-06-24T20:38:24.254493Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NTc4YzliZDctYzYxMTczYzItNGVlZWJjZjUtNDRjY2FhMTE=, ActorId: [2:913:2718], ActorState: ExecuteState, TraceId: 01jyhtkjz2fwdx7f4y587xas5h, Create QueryResponse for error on request, msg: { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } >> TopicAutoscaling::ReadingAfterSplitTest_PQv1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-5 [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_AutoscaleAwareSDK >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-6 >> DataShardVolatile::UpsertBrokenLockArbiterRestart-UseSink [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart+UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-35 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-36 >> KqpScan::ScanRetryRead [GOOD] >> KqpScan::ScanRetryReadRanges |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |90.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |90.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-53 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-54 |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |90.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source >> TestYmqHttpProxy::TestSendMessageEmptyQueueUrl |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |90.0%| [LD] {RESULT} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut >> LocalPartition::WithoutPartitionWithSplit [GOOD] >> TxUsage::SessionAbort_Table |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |90.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> TStorageBalanceTest::TestScenario3 [GOOD] Test command err: c[def1] ---------------------------------------------------------------------------------------------------- (0) 2025-06-24T20:31:22.052780Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:22.167519Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:22.167849Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:22.169017Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:22.169408Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T20:31:22.170579Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-24T20:31:22.170641Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:22.191906Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:31:2076] ControllerId# 72057594037932033 2025-06-24T20:31:22.191969Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:22.192099Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:22.192236Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:22.203967Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:22.204011Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:22.205718Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:38:2081] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:22.205845Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:39:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:22.205934Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:40:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:22.206063Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:41:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:22.206166Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:42:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:22.206250Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:43:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:22.206347Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:44:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:22.206373Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:22.206448Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:31:2076] 2025-06-24T20:31:22.206474Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:31:2076] 2025-06-24T20:31:22.206512Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:22.206587Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:22.207093Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:22.207384Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:22.214413Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:21:2063] 2025-06-24T20:31:22.214473Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:21:2063] 2025-06-24T20:31:22.214705Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:31:2076] 2025-06-24T20:31:22.214771Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:22.214816Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:22.214980Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:22.241649Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:22.241731Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-24T20:31:22.257922Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-24T20:31:22.258160Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:22.266495Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-24T20:31:22.266617Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:31:22.266698Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-24T20:31:22.267043Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:31:2076] 2025-06-24T20:31:22.267095Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-24T20:31:22.267137Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:22.267247Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:22.271582Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-24T20:31:22.271678Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-24T20:31:22.271724Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:22.271853Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-24T20:31:22.272055Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:22.272329Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:22.272618Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:22.272875Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-24T20:31:22.272932Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-24T20:31:22.273001Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-24T20:31:22.273088Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2025-06-24T20:31:22.273150Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:22.273324Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-24T20:31:22.273371Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-24T20:31:22.273632Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:31:2076] 2025-06-24T20:31:22.273684Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:31:2076] 2025-06-24T20:31:22.273737Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-24T20:31:22.277084Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-24T20:31:22.277193Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:54:2093] 2025-06-24T20:31:22.277230Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:54:2093] 2025-06-24T20:31:22.278552Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:22.278668Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:22.278760Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 7205759 ... TEvVPutResult{ TimestampMs# 9.609 VDiskId# [0:1:0:0:0] NodeId# 12 Status# OK } ] } 2025-06-24T20:38:22.582647Z node 12 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:494:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-24T20:38:22.582826Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:495} commited cookie 1 for step 494 2025-06-24T20:38:22.589173Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:495} Tx{1497, NKikimr::NHive::TTxReassignGroups} queued, type NKikimr::NHive::TTxReassignGroups 2025-06-24T20:38:22.589262Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:495} Tx{1497, NKikimr::NHive::TTxReassignGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:38:22.589506Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:495} Tx{1497, NKikimr::NHive::TTxReassignGroups} hope 1 -> done Change{1000, redo 303b alter 0b annex 0, ~{ 1, 2 } -{ }, 0 gb} 2025-06-24T20:38:22.589559Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:495} Tx{1497, NKikimr::NHive::TTxReassignGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:38:22.589691Z node 12 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037932033] send [12:1369:2260] 2025-06-24T20:38:22.589729Z node 12 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037932033] push event to server [12:1369:2260] 2025-06-24T20:38:22.589786Z node 12 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037932033] HandleSend Sender# [12:1303:2222] EventType# 268637702 c[def1] ****------------------------------------------------------------------------------------------------ (0.044) *****----------------------------------------------------------------------------------------------- (0.054) ******---------------------------------------------------------------------------------------------- (0.056) *****----------------------------------------------------------------------------------------------- (0.048) *****----------------------------------------------------------------------------------------------- (0.05) ******---------------------------------------------------------------------------------------------- (0.064) *******--------------------------------------------------------------------------------------------- (0.066) *****----------------------------------------------------------------------------------------------- (0.052) *******--------------------------------------------------------------------------------------------- (0.066) *****----------------------------------------------------------------------------------------------- (0.046) *****----------------------------------------------------------------------------------------------- (0.054) 2025-06-24T20:38:22.699611Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:496} Tx{1498, NKikimr::NHive::TTxUpdateTabletGroups} queued, type NKikimr::NHive::TTxUpdateTabletGroups 2025-06-24T20:38:22.699701Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:496} Tx{1498, NKikimr::NHive::TTxUpdateTabletGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:38:22.699841Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923006398016}: tablet 72075186224037902 wasn't changed 2025-06-24T20:38:22.699886Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923006398016}: tablet 72075186224037902 skipped channel 0 2025-06-24T20:38:22.699976Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923006398016}: tablet 72075186224037902 skipped channel 1 2025-06-24T20:38:22.700018Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923006398016}: tablet 72075186224037902 skipped channel 2 2025-06-24T20:38:22.700097Z node 12 :HIVE NOTICE: tx__update_tablet_groups.cpp:326: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{88923006398016}(72075186224037902)::Execute - TryToBoot was not successfull 2025-06-24T20:38:22.700172Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:496} Tx{1498, NKikimr::NHive::TTxUpdateTabletGroups} hope 1 -> done Change{1001, redo 257b alter 0b annex 0, ~{ 2, 1 } -{ }, 0 gb} 2025-06-24T20:38:22.700227Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:496} Tx{1498, NKikimr::NHive::TTxUpdateTabletGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:38:22.720118Z node 12 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [17d188fef1315c01] bootstrap ActorId# [12:11810:4497] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:495:0:0:246:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T20:38:22.720282Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [17d188fef1315c01] Id# [72057594037927937:2:495:0:0:246:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T20:38:22.720332Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [17d188fef1315c01] restore Id# [72057594037927937:2:495:0:0:246:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T20:38:22.720393Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [17d188fef1315c01] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:495:0:0:246:1] Marker# BPG33 2025-06-24T20:38:22.720450Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [17d188fef1315c01] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:495:0:0:246:1] Marker# BPG32 2025-06-24T20:38:22.720582Z node 12 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [12:333:2090] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:495:0:0:246:1] FDS# 246 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T20:38:22.729538Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [17d188fef1315c01] received {EvVPutResult Status# OK ID# [72057594037927937:2:495:0:0:246:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 512 } Cost# 81937 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 513 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-24T20:38:22.729694Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [17d188fef1315c01] Result# TEvPutResult {Id# [72057594037927937:2:495:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-24T20:38:22.729758Z node 12 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [17d188fef1315c01] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:495:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T20:38:22.729896Z node 12 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.895 sample PartId# [72057594037927937:2:495:0:0:246:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 12 } TEvVPutResult{ TimestampMs# 9.907 VDiskId# [0:1:0:0:0] NodeId# 12 Status# OK } ] } 2025-06-24T20:38:22.730551Z node 12 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:495:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-24T20:38:22.730723Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:496} commited cookie 1 for step 495 2025-06-24T20:38:22.732618Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:496} Tx{1499, NKikimr::NHive::TTxReassignGroups} queued, type NKikimr::NHive::TTxReassignGroups 2025-06-24T20:38:22.732686Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:496} Tx{1499, NKikimr::NHive::TTxReassignGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:38:22.732946Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:496} Tx{1499, NKikimr::NHive::TTxReassignGroups} hope 1 -> done Change{1002, redo 303b alter 0b annex 0, ~{ 1, 2 } -{ }, 0 gb} 2025-06-24T20:38:22.733006Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:496} Tx{1499, NKikimr::NHive::TTxReassignGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:38:22.733138Z node 12 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037932033] send [12:1369:2260] 2025-06-24T20:38:22.733178Z node 12 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037932033] push event to server [12:1369:2260] 2025-06-24T20:38:22.733241Z node 12 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037932033] HandleSend Sender# [12:1303:2222] EventType# 268637702 c[def1] ****------------------------------------------------------------------------------------------------ (0.044) *****----------------------------------------------------------------------------------------------- (0.054) ******---------------------------------------------------------------------------------------------- (0.056) *****----------------------------------------------------------------------------------------------- (0.048) *****----------------------------------------------------------------------------------------------- (0.05) ******---------------------------------------------------------------------------------------------- (0.064) *******--------------------------------------------------------------------------------------------- (0.066) *****----------------------------------------------------------------------------------------------- (0.052) *******--------------------------------------------------------------------------------------------- (0.066) *****----------------------------------------------------------------------------------------------- (0.046) *****----------------------------------------------------------------------------------------------- (0.054) 2025-06-24T20:38:22.835509Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:497} Tx{1500, NKikimr::NHive::TTxUpdateTabletGroups} queued, type NKikimr::NHive::TTxUpdateTabletGroups 2025-06-24T20:38:22.835609Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:497} Tx{1500, NKikimr::NHive::TTxUpdateTabletGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:38:22.835762Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923006399808}: tablet 72075186224037971 wasn't changed 2025-06-24T20:38:22.835806Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923006399808}: tablet 72075186224037971 skipped channel 0 2025-06-24T20:38:22.835890Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923006399808}: tablet 72075186224037971 skipped channel 1 2025-06-24T20:38:22.835947Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923006399808}: tablet 72075186224037971 skipped channel 2 2025-06-24T20:38:22.836030Z node 12 :HIVE NOTICE: tx__update_tablet_groups.cpp:326: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{88923006399808}(72075186224037971)::Execute - TryToBoot was not successfull 2025-06-24T20:38:22.836108Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:497} Tx{1500, NKikimr::NHive::TTxUpdateTabletGroups} hope 1 -> done Change{1003, redo 257b alter 0b annex 0, ~{ 2, 1 } -{ }, 0 gb} 2025-06-24T20:38:22.836163Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:497} Tx{1500, NKikimr::NHive::TTxUpdateTabletGroups} release 4194304b of static, Memory{0 dyn 0} >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query [GOOD] >> KqpScan::ScanDuringSplit10 [GOOD] >> KqpScan::ScanDuringSplitThenMerge >> KqpTx::DeferredEffects [GOOD] >> KqpTx::CommitStats >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare-UseSink |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |90.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Uncompressed [GOOD] >> TPersQueueTest::TestWriteStat >> TPersQueueTest::StoreNoMoreThanXSourceIDs [GOOD] >> TPersQueueTest::SetupWriteSessionOnDisabledCluster >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndSameParams >> TPersQueueTest::ReadRuleServiceTypeLimit [GOOD] >> TPersQueueTest::ReadRuleDisallowDefaultServiceType >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table >> KqpLocks::TwoPhaseTx [GOOD] >> KqpLocks::MixedTxFail-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] Test command err: 2025-06-24T20:37:32.629439Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:37:32.629809Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:37:32.629981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045fe/r3tmp/tmpf7Xvlc/pdisk_1.dat 2025-06-24T20:37:32.959986Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:37:32.965213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:33.041525Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:33.047001Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797449514537 != 1750797449514541 2025-06-24T20:37:33.096550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:33.096713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:33.108628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:33.194593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:37:33.237954Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:37:33.239126Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:37:33.239655Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:37:33.240110Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:37:33.294419Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:37:33.295230Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:37:33.295457Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:37:33.297414Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:37:33.297509Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:37:33.297573Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:37:33.298001Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:37:33.298161Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:37:33.298279Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:37:33.311686Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:37:33.339276Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:37:33.339479Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:37:33.339587Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:37:33.339624Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:37:33.339661Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:37:33.339719Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:37:33.339964Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:37:33.340013Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:37:33.340406Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:37:33.340587Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:37:33.340667Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:37:33.340708Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:37:33.340750Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:37:33.340784Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:37:33.340836Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:37:33.340883Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:37:33.340933Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:37:33.341071Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:33.341112Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:33.341174Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:37:33.341610Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:37:33.341654Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:37:33.341767Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:37:33.341982Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:37:33.342072Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:37:33.342213Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:37:33.342264Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:37:33.342304Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:37:33.342343Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:37:33.342372Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:37:33.342716Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:37:33.342761Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:37:33.342798Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:37:33.342833Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:37:33.342880Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:37:33.342923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:37:33.342980Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:37:33.343017Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:37:33.343042Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:37:33.344615Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:37:33.344670Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:37:33.356856Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:37:33.356938Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:37:33.359503Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:37:33.359574Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 888 on unit CompletedOperations 2025-06-24T20:38:31.905332Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T20:38:31.905356Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:38:31.905381Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-24T20:38:31.905408Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T20:38:31.905504Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T20:38:31.906711Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [9:1571:2400], Recipient [9:1228:2351]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T20:38:31.906791Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-24T20:38:31.916435Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [9:1573:2401], Recipient [9:1228:2351]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T20:38:31.916520Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-24T20:38:31.925906Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [8:1556:2903], Recipient [9:1501:2395] 2025-06-24T20:38:31.925991Z node 9 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-24T20:38:31.926174Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [9:1228:2351], Recipient [9:1228:2351]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T20:38:31.926211Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T20:38:31.926281Z node 9 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-24T20:38:31.926454Z node 9 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } SendingShards: 72075186224037888 ReceivingShards: 72075186224037888 Op: Commit } 2025-06-24T20:38:31.926575Z node 9 :TX_DATASHARD TRACE: key_validator.cpp:33: -- AddReadRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-24T20:38:31.926691Z node 9 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-24T20:38:31.926811Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckWrite 2025-06-24T20:38:31.926872Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:38:31.926903Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckWrite 2025-06-24T20:38:31.926931Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:38:31.926958Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:38:31.927003Z node 9 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-24T20:38:31.927062Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-24T20:38:31.927091Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:38:31.927112Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:38:31.930349Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteWrite 2025-06-24T20:38:31.930439Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteWrite 2025-06-24T20:38:31.930483Z node 9 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:7] at 72075186224037888 2025-06-24T20:38:31.930551Z node 9 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-24T20:38:31.930756Z node 9 :TX_DATASHARD TRACE: datashard_kqp.cpp:806: KqpCommitLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true 2025-06-24T20:38:31.930829Z node 9 :TX_DATASHARD TRACE: datashard_user_db.cpp:435: Committing changes lockId# 281474976715661 in localTid# 1001 shard# 72075186224037888 2025-06-24T20:38:31.930940Z node 9 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:7] at 72075186224037888 2025-06-24T20:38:31.931137Z node 9 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-24T20:38:31.931225Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:38:31.931257Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteWrite 2025-06-24T20:38:31.931285Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-24T20:38:31.931313Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T20:38:31.931386Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:38:31.931428Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-24T20:38:31.931506Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:38:31.931556Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:38:31.931714Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:38:31.931743Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:38:31.931770Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-24T20:38:31.936525Z node 9 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-24T20:38:31.936614Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:7] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T20:38:31.936650Z node 9 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 7 at tablet 72075186224037888 send to client, propose latency: 1 ms, status: STATUS_COMPLETED 2025-06-24T20:38:31.936754Z node 9 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-24T20:38:31.936854Z node 9 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:38:31.938769Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270977, Sender [9:59:2063], Recipient [9:1228:2351]: {TEvNotifyPlanStep TabletId# 72075186224037888 PlanStep# 1501} 2025-06-24T20:38:31.938832Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3172: StateWork, processing event TEvMediatorTimecast::TEvNotifyPlanStep 2025-06-24T20:38:31.938890Z node 9 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-24T20:38:31.938961Z node 9 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 { items { int64_value: 0 } items { int64_value: 1000 } }, { items { int64_value: 1 } items { int64_value: 1001 } }, { items { int64_value: 2 } items { int64_value: 1002 } }, { items { int64_value: 3 } items { int64_value: 1003 } }, { items { int64_value: 4 } items { int64_value: 1004 } }, { items { int64_value: 5 } items { int64_value: 1005 } }, { items { int64_value: 6 } items { int64_value: 5001 } } { items { int64_value: 0 } items { int64_value: 2000 } }, { items { int64_value: 1 } items { int64_value: 2001 } }, { items { int64_value: 2 } items { int64_value: 2002 } }, { items { int64_value: 3 } items { int64_value: 2003 } }, { items { int64_value: 4 } items { int64_value: 2004 } }, { items { int64_value: 5 } items { int64_value: 2005 } }, { items { int64_value: 6 } items { int64_value: 5002 } } result_sets { columns { name: "index" type { optional_type { item { type_id: INT64 } } } } columns { name: "value" type { optional_type { item { type_id: INT64 } } } } rows { items { int64_value: 0 } items { int64_value: 1000 } } rows { items { int64_value: 1 } items { int64_value: 1001 } } rows { items { int64_value: 2 } items { int64_value: 1002 } } rows { items { int64_value: 3 } items { int64_value: 1003 } } rows { items { int64_value: 4 } items { int64_value: 1004 } } rows { items { int64_value: 5 } items { int64_value: 1005 } } rows { items { int64_value: 6 } items { int64_value: 5001 } } } result_sets { columns { name: "index" type { optional_type { item { type_id: INT64 } } } } columns { name: "value" type { optional_type { item { type_id: INT64 } } } } rows { items { int64_value: 0 } items { int64_value: 2000 } } rows { items { int64_value: 1 } items { int64_value: 2001 } } rows { items { int64_value: 2 } items { int64_value: 2002 } } rows { items { int64_value: 3 } items { int64_value: 2003 } } rows { items { int64_value: 4 } items { int64_value: 2004 } } rows { items { int64_value: 5 } items { int64_value: 2005 } } rows { items { int64_value: 6 } items { int64_value: 5002 } } } tx_meta { } >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-7 >> DataStreams::TestGetRecords1MBMessagesOneByOneByTS [GOOD] >> DataStreams::TestGetRecordsStreamWithMultipleShards >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-36 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-55 |90.0%| [TA] $(B)/ydb/core/tx/datashard/ut_write/test-results/unittest/{meta.json ... results_accumulator.log} >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Uncompressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Uncompressed >> TxUsage::WriteToTopic_Demo_14_Query [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-54 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-55 |90.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] >> TxUsage::WriteToTopic_Demo_28_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table [GOOD] >> TDataShardLocksTest::UseLocksCache [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2-withSink [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3+withSink >> KqpScan::RemoteShardScan [GOOD] >> KqpScan::ScanDuringSplit >> TxUsage::WriteToTopic_Demo_15_Table |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut >> TxUsage::WriteToTopic_Demo_9_Query [GOOD] >> TestYmqHttpProxy::TestSendMessageEmptyQueueUrl [GOOD] >> TxUsage::WriteToTopic_Demo_29_Table |90.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> TxUsage::WriteToTopic_Demo_50_Table >> TestYmqHttpProxy::TestSendMessageFifoQueue |90.0%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_write/test-results/unittest/{meta.json ... results_accumulator.log} |90.0%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::UseLocksCache [GOOD] Test command err: 2025-06-24T20:38:27.252740Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:27.253281Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:27.253502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043f0/r3tmp/tmpgblXu2/pdisk_1.dat 2025-06-24T20:38:27.967888Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:38:27.977221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:38:28.080879Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:28.094746Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797501173231 != 1750797501173235 2025-06-24T20:38:28.146598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:28.146773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:28.160318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:28.300489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:28.356854Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:631:2532]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:38:28.358436Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:631:2532]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:38:28.358965Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:631:2532] 2025-06-24T20:38:28.359257Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:28.411546Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:631:2532]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:38:28.412531Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:617:2524], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:38:28.413679Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:38:28.413827Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:38:28.415742Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:38:28.415833Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:38:28.415914Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:38:28.416354Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:38:28.416632Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:38:28.416712Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:659:2532] in generation 1 2025-06-24T20:38:28.416977Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:617:2524], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:38:28.417479Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:633:2534] 2025-06-24T20:38:28.417713Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:28.427137Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:617:2524], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:38:28.428043Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:38:28.428169Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:38:28.429863Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T20:38:28.429939Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T20:38:28.429994Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T20:38:28.430361Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:38:28.430540Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:38:28.430632Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:665:2534] in generation 1 2025-06-24T20:38:28.444008Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:38:28.482043Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:38:28.482310Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:38:28.482457Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:668:2553] 2025-06-24T20:38:28.482520Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:38:28.482573Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:38:28.482614Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:38:28.482976Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:631:2532], Recipient [1:631:2532]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:28.483040Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:28.483215Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:38:28.483261Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T20:38:28.483361Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:38:28.483431Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:669:2554] 2025-06-24T20:38:28.483457Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T20:38:28.483483Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T20:38:28.483507Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:38:28.483904Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:633:2534], Recipient [1:633:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:28.483949Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:28.484177Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:38:28.484303Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:38:28.484406Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:38:28.484472Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:38:28.484541Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:38:28.484585Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:38:28.484631Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:38:28.484667Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:38:28.484713Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:38:28.484763Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T20:38:28.484838Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T20:38:28.485399Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:640:2536], Recipient [1:631:2532]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:28.485449Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:28.485525Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:640:2536], sessionId# [0:0:0] 2025-06-24T20:38:28.485585Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T20:38:28.485618Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:38:28.485645Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037889 2025-06-24T20:38:28.485671Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-24T20:38:28.485694Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-24T20:38:28.485719Z node 1 :TX_DATASHARD ... node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2025-06-24T20:38:38.199710Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T20:38:38.199755Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2025-06-24T20:38:38.199797Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:38:38.199836Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:38:38.199893Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2025-06-24T20:38:38.199939Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T20:38:38.199973Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:38:38.199996Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T20:38:38.200021Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:38:38.200182Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-24T20:38:38.200478Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2500/18446744073709551615 2025-06-24T20:38:38.200532Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is DelayComplete 2025-06-24T20:38:38.200571Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T20:38:38.200612Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:38:38.200650Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:38:38.200697Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T20:38:38.200724Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:38:38.200752Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037888 has finished 2025-06-24T20:38:38.200800Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T20:38:38.211783Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:38:38.211853Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2500:281474976715661] at 72075186224037888 on unit CompleteWrite 2025-06-24T20:38:38.211911Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2500 : 281474976715661] from 72075186224037888 at tablet 72075186224037888 send result to client [2:885:2672] 2025-06-24T20:38:38.211980Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037888 {TEvReadSet step# 2500 txid# 281474976715661 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-24T20:38:38.212019Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:38:38.212110Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:38:38.212164Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T20:38:38.212218Z node 2 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 2 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-24T20:38:38.212312Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:38:38.212423Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T20:38:38.212486Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:38:38.212535Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:951:2739], 0} after executionsCount# 1 2025-06-24T20:38:38.212595Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:951:2739], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:38:38.212675Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:951:2739], 0} finished in read 2025-06-24T20:38:38.212962Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:923:2724], Recipient [2:633:2534]: {TEvReadSet step# 2500 txid# 281474976715661 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-24T20:38:38.213023Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T20:38:38.213064Z node 2 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-06-24T20:38:38.214015Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:951:2739], Recipient [2:923:2724]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T20:38:38.214081Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-24T20:38:38.214513Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:951:2739], Recipient [2:633:2534]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2025-06-24T20:38:38.214649Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-24T20:38:38.214711Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit CheckRead 2025-06-24T20:38:38.214774Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-24T20:38:38.214802Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit CheckRead 2025-06-24T20:38:38.214838Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-24T20:38:38.214885Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-24T20:38:38.214932Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037889 2025-06-24T20:38:38.214964Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-24T20:38:38.214987Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-24T20:38:38.215009Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037889 to execution unit ExecuteRead 2025-06-24T20:38:38.215031Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit ExecuteRead 2025-06-24T20:38:38.219267Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2025-06-24T20:38:38.219547Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v2500/18446744073709551615 2025-06-24T20:38:38.219599Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[2:951:2739], 1} after executionsCount# 1 2025-06-24T20:38:38.219660Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[2:951:2739], 1} sends rowCount# 2, bytes# 64, quota rows left# 997, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:38:38.219761Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[2:951:2739], 1} finished in read 2025-06-24T20:38:38.219852Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-24T20:38:38.219887Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit ExecuteRead 2025-06-24T20:38:38.219916Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T20:38:38.219944Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit CompletedOperations 2025-06-24T20:38:38.219990Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-24T20:38:38.220011Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T20:38:38.220032Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037889 has finished 2025-06-24T20:38:38.220060Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-24T20:38:38.220164Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-24T20:38:38.221145Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:951:2739], Recipient [2:633:2534]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2025-06-24T20:38:38.221200Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 1 } >> DataShardVolatile::UpsertDependenciesShardsRestart+UseSink [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart-UseSink >> KqpTx::CommitStats [GOOD] >> TPQCompatTest::SetupLockSession [GOOD] >> TPQCompatTest::BadTopics >> DataStreams::TestGetRecordsStreamWithMultipleShards [GOOD] >> DataStreams::TestGetRecordsWithBigSeqno >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query |90.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndRemoteClusterEnabledDelaySec_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_RemotePreferredClusterEnabledWhileSessionInitializing_SessionDiesOnlyAfterInitializationAndDelay ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::CommitStats [GOOD] Test command err: Trying to start YDB, gRPC: 20284, MsgBus: 28491 2025-06-24T20:38:22.816435Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618014611409207:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:22.816510Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031a3/r3tmp/tmpeUdKJo/pdisk_1.dat 2025-06-24T20:38:23.697732Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:23.699522Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618014611408983:2079] 1750797502733257 != 1750797502733260 2025-06-24T20:38:23.781903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:23.782024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:23.790583Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:23.792613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20284, node 1 2025-06-24T20:38:24.139740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:24.139764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:24.139771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:24.139889Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28491 TClient is connected to server localhost:28491 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:25.721999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:25.767139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:38:25.786822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:26.214816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:26.427664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:26.534622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:27.819463Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618014611409207:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:27.819545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:28.833508Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618040381214409:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:28.833613Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:29.423772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:29.474079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:29.520222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:29.582719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:29.621597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:29.708468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:29.817654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:29.991641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618044676182375:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:29.991736Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:29.992155Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618044676182380:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:29.996633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:30.012538Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618044676182382:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:38:30.108196Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618048971149729:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 27561, MsgBus: 10208 2025-06-24T20:38:33.551406Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618060777576206:2224];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031a3/r3tmp/tmpFRVRnr/pdisk_1.dat 2025-06-24T20:38:33.695310Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:38:33.863316Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519618060777576018:2079] 1750797513436598 != 1750797513436601 2025-06-24T20:38:33.863609Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:33.869025Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:33.869126Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:33.872295Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27561, node 2 2025-06-24T20:38:34.177701Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:34.177728Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:34.177741Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:34.177874Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:38:34.459275Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10208 TClient is connected to server localhost:10208 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:34.944331Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:34.958928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:35.069048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:35.276915Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:35.366165Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:38.462536Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618060777576206:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:38.462634Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:38.723312Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618082252414145:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:38.723436Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:38.800900Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:38.856145Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:38.966033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:39.032691Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:39.100889Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:39.173109Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:39.259998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:39.391359Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618086547382104:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:39.391481Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:39.393054Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618086547382109:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:39.397838Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:39.426146Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618086547382111:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:38:39.529056Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618086547382164:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-55 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-56 >> StreamCreator::WithResolvedTimestamps >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-55 >> KqpScan::ScanRetryReadRanges [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8 >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndSameParams [GOOD] |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |90.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndDifferentParams >> KqpUniqueIndex::UpdateOnFkSelectResultSameValue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_scan/unittest >> KqpScan::ScanRetryReadRanges [GOOD] Test command err: 2025-06-24T20:38:20.022547Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:20.023235Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:20.023354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:20.023715Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:20.023818Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:20.024012Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004980/r3tmp/tmpuJ4qdZ/pdisk_1.dat 2025-06-24T20:38:20.498415Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:20.734326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:38:20.881930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:20.882115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:20.896406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:20.896521Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:20.911575Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:38:20.912587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:20.913038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:21.256707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:22.009173Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1314:2791], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:22.009348Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1325:2796], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:22.009444Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:22.025036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:22.138233Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:22.138393Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:22.582681Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1328:2799], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:38:22.782893Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1451:2867] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:24.470034Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhtkhhp9epfwe0nmvwe12g0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTNkZTI4ZWMtOWM3MTMzYzMtMzRjYTZhYTktMjFlZmIxNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root -- nodeId: 2 2025-06-24T20:38:25.480430Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhtkm1w8p0zw63vz2n09q9w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFlZmQwOGEtZmI3OWJiOGMtZGRhYmJhYzctNjBiYmFiMmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root -- EvScan [1:1530:2919] -> [2:1486:2394] -- EvScanData from [2:1534:2401]: pass 2025-06-24T20:38:26.505050Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhtkm1w8p0zw63vz2n09q9w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFlZmQwOGEtZmI3OWJiOGMtZGRhYmJhYzctNjBiYmFiMmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root -- EvStreamData: {"ResultSet":{"columns":[{"name":"column0","type":{"optional_type":{"item":{"type_id":4}}}}],"rows":[{"items":[{"uint64_value":596400}]}]},"SeqNo":1,"QueryResultIndex":0,"ChannelId":1,"VirtualTimestamp":{"Step":2000,"TxId":281474976715661},"Finished":true} 2025-06-24T20:38:26.510534Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715661] shutting down 2025-06-24T20:38:37.981156Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:37.981685Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:37.981971Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:37.984726Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:37.985339Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:37.985512Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004980/r3tmp/tmpLGcfz0/pdisk_1.dat 2025-06-24T20:38:38.564764Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:38.766289Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:38:38.919845Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:38.920007Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:38.924959Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:38.925068Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:38.949082Z node 3 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-24T20:38:38.949820Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:38.950265Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:39.318434Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:40.200199Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1313:2791], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:40.200348Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1324:2796], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:40.200434Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:40.207526Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:40.391393Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:40.391523Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:40.820396Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:1327:2799], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:38:40.945330Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:1453:2870] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:42.074930Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhtm39y63ctdpfyefpxxmfk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2M4NjFhNjYtOGRmMjk3ZWQtNDBmZWVkMGYtYjc3MzZlZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root -- nodeId: 4 2025-06-24T20:38:43.287606Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhtm55wcw10cy0nvvaf1246, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MzYzM2QwZmYtYzdlZTdlZjQtYTcyOTRhMzUtNDQ0NmY5YzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root -- EvScan [3:1533:2923] -> [4:1488:2393] -- EvScanData from [4:1537:2400]: pass -- EvStreamData: {"ResultSet":{"columns":[{"name":"key","type":{"optional_type":{"item":{"type_id":2}}}},{"name":"value","type":{"optional_type":{"item":{"type_id":2}}}}],"rows":[{"items":[{"uint32_value":2},{"uint32_value":22}]},{"items":[{"uint32_value":21},{"uint32_value":2121}]},{"items":[{"uint32_value":22},{"uint32_value":2222}]},{"items":[{"uint32_value":23},{"uint32_value":2323}]},{"items":[{"uint32_value":24},{"uint32_value":2424}]},{"items":[{"uint32_value":25},{"uint32_value":2525}]},{"items":[{"uint32_value":26},{"uint32_value":2626}]},{"items":[{"uint32_value":27},{"uint32_value":2727}]},{"items":[{"uint32_value":28},{"uint32_value":2828}]},{"items":[{"uint32_value":29},{"uint32_value":2929}]},{"items":[{"uint32_value":40},{"uint32_value":4040}]},{"items":[{"uint32_value":41},{"uint32_value":4141}]},{"items":[{"uint32_value":42},{"uint32_value":4242}]},{"items":[{"uint32_value":43},{"uint32_value":4343}]},{"items":[{"uint32_value":44},{"uint32_value":4444}]},{"items":[{"uint32_value":45},{"uint32_value":4545}]},{"items":[{"uint32_value":46},{"uint32_value":4646}]},{"items":[{"uint32_value":47},{"uint32_value":4747}]},{"items":[{"uint32_value":48},{"uint32_value":4848}]},{"items":[{"uint32_value":49},{"uint32_value":4949}]},{"items":[{"uint32_value":50},{"uint32_value":5050}]}]},"SeqNo":1,"QueryResultIndex":0,"ChannelId":2,"VirtualTimestamp":{"Step":2000,"TxId":281474976715661},"Finished":false} -- EvStreamData: {"ResultSet":{"columns":[{"name":"key","type":{"optional_type":{"item":{"type_id":2}}}},{"name":"value","type":{"optional_type":{"item":{"type_id":2}}}}]},"SeqNo":2,"QueryResultIndex":0,"ChannelId":2,"VirtualTimestamp":{"Step":2000,"TxId":281474976715661},"Finished":true} 2025-06-24T20:38:43.304166Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715661] shutting down >> TTopicYqlTest::CreateTopicYqlBackCompatibility [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query [GOOD] |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |90.0%| [LD] {RESULT} $(B)/ydb/core/cms/ut/ydb-core-cms-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-55 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-56 >> KqpIndexes::CreateTableWithImplicitSyncIndexSQL >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3+withSink [GOOD] >> KqpScan::ScanDuringSplitThenMerge [GOOD] >> KqpScan::ScanPg >> KqpLocks::MixedTxFail-useSink [GOOD] >> KqpLocksTricky::TestNoLocksIssue+withSink >> TxUsage::WriteToTopic_Demo_23_RestartNo_Table >> TxUsage::SessionAbort_Table [GOOD] >> TxUsage::WriteToTopic_Demo_38_Table [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare-UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPlan >> KqpIndexes::SecondaryIndexOrderBy2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 3356, MsgBus: 4595 2025-06-24T20:38:25.491277Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618026352029859:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:25.495112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003356/r3tmp/tmp9t4zGb/pdisk_1.dat 2025-06-24T20:38:26.511918Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:26.627895Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:26.629090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:26.629179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:26.639551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:26.642979Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 3356, node 1 2025-06-24T20:38:26.939527Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:26.939550Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:26.939556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:26.939670Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4595 TClient is connected to server localhost:4595 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:28.276046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:28.330047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:28.636275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:29.102320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:29.250608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:30.483381Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618026352029859:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:30.483476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:31.978143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618052121835083:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:31.978247Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:32.748033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.808182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.864522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.920790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.986123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:33.059961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:33.115950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:33.187944Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618060711770340:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:33.188025Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:33.188424Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618060711770345:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:33.194547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:33.208454Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618060711770347:2441], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:38:33.304131Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618060711770398:3432] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:38.312690Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NmVlOWFhMTItNWY1ZTZiODktN2JmOWNlOTUtZjM0MjU2Mzk=, ActorId: [1:7519618069301705250:2475], ActorState: ExecuteState, TraceId: 01jyhtm15055amx917ftwn06se, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken Trying to start YDB, gRPC: 4349, MsgBus: 23895 2025-06-24T20:38:39.504138Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618087635913919:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:39.504213Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003356/r3tmp/tmpdVgC4f/pdisk_1.dat 2025-06-24T20:38:39.733732Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:39.737823Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519618087635913890:2079] 1750797519502858 != 1750797519502861 2025-06-24T20:38:39.748507Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:39.748596Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:39.752269Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4349, node 2 2025-06-24T20:38:39.822516Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:39.822547Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:39.822558Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:39.822708Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23895 TClient is connected to server localhost:23895 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:40.398697Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:40.419563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:40.527012Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:40.539359Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:40.785210Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:40.924344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:43.450142Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618104815784704:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:43.450245Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:43.569901Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:43.620043Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:43.706292Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:43.752666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:43.811673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:43.910807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:44.012241Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:44.121110Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618109110752666:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:44.121212Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:44.121652Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618109110752671:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:44.126621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:44.147399Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618109110752673:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:38:44.242554Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618109110752724:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:44.555384Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618087635913919:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:44.555529Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:46.864684Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NzZkNjFlOTktOGYxODA0YWUtYjk3NzMxYy1kZTkyODA2NQ==, ActorId: [2:7519618113405720298:2476], ActorState: ExecuteState, TraceId: 01jyhtm9r0bhf6zdt2sede7xyw, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken >> DataStreams::TestGetRecordsWithBigSeqno [GOOD] >> TxUsage::SessionAbort_Query >> TestYmqHttpProxy::TestSendMessageFifoQueue [GOOD] >> TPersQueueTest::CheckDeleteTopic [GOOD] >> TPersQueueTest::CheckDecompressionTasksWithoutSession >> TestYmqHttpProxy::TestSendMessageWithAttributes ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecordsWithBigSeqno [GOOD] Test command err: 2025-06-24T20:38:02.093000Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617927510034336:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:02.093191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003043/r3tmp/tmpDrmJ3J/pdisk_1.dat 2025-06-24T20:38:02.852819Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:02.866989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:02.867091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:02.877557Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63170, node 1 2025-06-24T20:38:03.108658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:03.108682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:03.108689Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:03.108820Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:38:03.122187Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3978 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:03.474135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:03.616763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:3978 2025-06-24T20:38:03.833465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:38:04.285415Z node 1 :PERSQUEUE ERROR: partition_read.cpp:780: [PQ: 72075186224037888, Partition: 0, State: StateIdle] reading from too big offset - topic stream_TestGetRecordsStreamWithSingleShard partition 0 client $without_consumer EndOffset 30 offset 100000 2025-06-24T20:38:07.951349Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519617949753866728:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:07.951462Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003043/r3tmp/tmp7zyALe/pdisk_1.dat 2025-06-24T20:38:08.272144Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65299, node 4 2025-06-24T20:38:08.329120Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-24T20:38:08.343163Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:08.343282Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:08.360749Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:08.431906Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:08.431928Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:08.431939Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:08.432064Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13040 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:08.764613Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:08.852700Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:38:08.998902Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13040 2025-06-24T20:38:09.175884Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:38:12.946516Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519617949753866728:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:12.946603Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:23.227724Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:38:23.227760Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:36.958420Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519618076521245184:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:36.958503Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003043/r3tmp/tmpGdGW7p/pdisk_1.dat 2025-06-24T20:38:37.375728Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:37.414797Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:37.414910Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:37.422020Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11321, node 7 2025-06-24T20:38:37.700101Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:37.700129Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:37.700139Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:37.700312Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:38:38.015502Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23006 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:38.245587Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:38.263877Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:38:38.357945Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:23006 2025-06-24T20:38:38.605504Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:38:38.619950Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-24T20:38:43.815411Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519618107352843378:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:43.891892Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003043/r3tmp/tmpSmHyjG/pdisk_1.dat 2025-06-24T20:38:44.145076Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:44.165515Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:44.165603Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:44.174653Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62063, node 10 2025-06-24T20:38:44.491512Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:44.491548Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:44.491561Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:44.491753Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:38:44.822840Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6188 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:45.241538Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:45.387383Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:6188 2025-06-24T20:38:45.821847Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... >> KqpIndexes::NullInIndexTableNoDataRead ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TTopicYqlTest::CreateTopicYqlBackCompatibility [GOOD] Test command err: 2025-06-24T20:34:56.956876Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617131561809192:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:56.956938Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:57.208609Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617134551475905:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:57.231006Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:57.509498Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00498e/r3tmp/tmpkKSdK1/pdisk_1.dat 2025-06-24T20:34:57.616905Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:34:58.051075Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:58.133124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:58.168693Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:58.194951Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:58.200602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:58.200712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:58.208663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:58.208758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:58.213005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:58.213758Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:34:58.222707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5737, node 1 2025-06-24T20:34:58.437973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00498e/r3tmp/yandex7OUCYj.tmp 2025-06-24T20:34:58.437999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00498e/r3tmp/yandex7OUCYj.tmp 2025-06-24T20:34:58.438122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00498e/r3tmp/yandex7OUCYj.tmp 2025-06-24T20:34:58.438219Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:58.513624Z INFO: TTestServer started on Port 21464 GrpcPort 5737 TClient is connected to server localhost:21464 PQClient connected to localhost:5737 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:58.930711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:34:59.116282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:34:59.418307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:35:01.927564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617153036646730:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:01.927837Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:01.932667Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617153036646742:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:01.936939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:01.957149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617131561809192:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:01.957239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:01.968466Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617153036646744:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T20:35:02.066861Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617157331614129:2756] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:02.195192Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519617134551475905:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:02.195269Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:02.647237Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617157331614139:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:02.649026Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MjcyYTU5MC1jYjY5MTJiNi02NGQ4OTVkMS03NDI0NDMyMw==, ActorId: [1:7519617153036646728:2298], ActorState: ExecuteState, TraceId: 01jyhtde4z9wx44csfstt7nwa6, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:35:02.650715Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:35:02.667610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:02.770924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:02.934159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T20:35:03.426824Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 28147 ... 2.211122Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4534: [PQ: 72075186224037893] delete partitions for TxId 281474976715676 2025-06-24T20:38:42.211175Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037893] TxId 281474976715676, NewState EXECUTED 2025-06-24T20:38:42.211210Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037893] TxId 281474976715676 moved from EXECUTING to EXECUTED 2025-06-24T20:38:42.211249Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72075186224037893] write key for TxId 281474976715676 2025-06-24T20:38:42.211874Z node 25 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715676] save tx TxId: 281474976715676 State: EXECUTED MinStep: 1750797522005 MaxStep: 18446744073709551615 Step: 1750797522159 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 2 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "rt3.dc1--legacy--topic1" Version: 0 LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/rt3.dc1--legacy--topic1" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 KeyRange { ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 PartitionStrategy { MinPartitionCount: 2 MaxPartitionCount: 5 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } AllPartitions { PartitionId: 0 KeyRange { ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519618024503198107 RawX2: 107374184600 } Partitions { Partition { PartitionId: 0 } } 2025-06-24T20:38:42.220694Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037893] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:38:42.247383Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:38:42.247441Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037893] Try execute txs with state EXECUTED 2025-06-24T20:38:42.247471Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037893] TxId 281474976715676, State EXECUTED 2025-06-24T20:38:42.247502Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037893] TxId 281474976715676 State EXECUTED FrontTxId 281474976715676 2025-06-24T20:38:42.247535Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4007: [PQ: 72075186224037893] TPersQueue::SendEvReadSetAckToSenders 2025-06-24T20:38:42.247563Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037893] TxId 281474976715676, NewState WAIT_RS_ACKS 2025-06-24T20:38:42.247585Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037893] TxId 281474976715676 moved from EXECUTED to WAIT_RS_ACKS 2025-06-24T20:38:42.247623Z node 25 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-06-24T20:38:42.247635Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4560: [PQ: 72075186224037893] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-24T20:38:42.247653Z node 25 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-06-24T20:38:42.247681Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4621: [PQ: 72075186224037893] add an TxId 281474976715676 to the list for deletion 2025-06-24T20:38:42.247715Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037893] TxId 281474976715676, NewState DELETING 2025-06-24T20:38:42.247750Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3852: [PQ: 72075186224037893] delete key for TxId 281474976715676 2025-06-24T20:38:42.247826Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037893] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:38:42.251795Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:38:42.251838Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037893] Try execute txs with state DELETING 2025-06-24T20:38:42.251860Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037893] TxId 281474976715676, State DELETING 2025-06-24T20:38:42.251896Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72075186224037893] delete TxId 281474976715676 2025-06-24T20:38:42.252051Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:38:42.252094Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-24T20:38:42.252116Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976715676, State EXECUTED 2025-06-24T20:38:42.252143Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037892] TxId 281474976715676 State EXECUTED FrontTxId 281474976715676 2025-06-24T20:38:42.252166Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4007: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-24T20:38:42.252190Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715676, NewState WAIT_RS_ACKS 2025-06-24T20:38:42.252210Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037892] TxId 281474976715676 moved from EXECUTED to WAIT_RS_ACKS 2025-06-24T20:38:42.252245Z node 26 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-06-24T20:38:42.252256Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4560: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-24T20:38:42.252274Z node 26 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-06-24T20:38:42.252295Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4621: [PQ: 72075186224037892] add an TxId 281474976715676 to the list for deletion 2025-06-24T20:38:42.252318Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715676, NewState DELETING 2025-06-24T20:38:42.252348Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3852: [PQ: 72075186224037892] delete key for TxId 281474976715676 2025-06-24T20:38:42.252422Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:38:42.259421Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:38:42.259475Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-24T20:38:42.259500Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976715676, State DELETING 2025-06-24T20:38:42.259529Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72075186224037892] delete TxId 281474976715676 TClient::Ls request: /Root/PQ/rt3.dc1--legacy--topic1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "rt3.dc1--legacy--topic1" PathId: 13 SchemeshardId: 72057594046644480 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976715676 CreateStep: 1750797522159 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186224037894 } PersQueueGroup { Name: "rt3.dc1--legacy--topic1" PathId: 13 TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 10... (TRUNCATED) === PATH DESCRIPTION: Name: "rt3.dc1--legacy--topic1" PathId: 13 TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } YdbDatabasePath: "/Root" PartitionStrategy { MinPartitionCount: 2 MaxPartitionCount: 5 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } Partitions { PartitionId: 0 TabletId: 72075186224037893 KeyRange { ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186224037892 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active } AlterVersion: 1 BalancerTabletID: 72075186224037894 NextPartitionId: 2 2025-06-24T20:38:43.449217Z node 25 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [25:7519618106107578813:2474] TxId: 281474976715681. Ctx: { TraceId: 01jyhtm6fm5cwqrf4d51hypxkd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=25&id=Mjk4MGExOWQtYzIxZjQzZDUtYTMxODFmOGMtYzY0MGEzZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 26 2025-06-24T20:38:43.450124Z node 25 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [25:7519618106107578817:2474], TxId: 281474976715681, task: 2. Ctx: { TraceId : 01jyhtm6fm5cwqrf4d51hypxkd. SessionId : ydb://session/3?node_id=25&id=Mjk4MGExOWQtYzIxZjQzZDUtYTMxODFmOGMtYzY0MGEzZDM=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [25:7519618106107578813:2474], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> StreamCreator::WithResolvedTimestamps [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-56 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-57 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-9 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-55 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-56 >> KqpMultishardIndex::YqWorksFineAfterAlterIndexTableDirectly |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |90.0%| [LD] {RESULT} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |90.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> StreamCreator::WithResolvedTimestamps [GOOD] Test command err: 2025-06-24T20:38:45.130655Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618112159632082:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:45.143624Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0029b7/r3tmp/tmpnml8Si/pdisk_1.dat 2025-06-24T20:38:46.010475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:46.010597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:46.012387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:46.013136Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618112159631889:2079] 1750797525054987 != 1750797525054990 2025-06-24T20:38:46.015430Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:46.111504Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63436 TServer::EnableGrpc on GrpcPort 10625, node 1 2025-06-24T20:38:46.640019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:46.640042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:46.640048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:46.640168Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63436 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:47.526719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:47.569764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797527710 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750797527598 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797527710 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-24T20:38:47.812899Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T20:38:47.813040Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T20:38:47.813066Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T20:38:47.813870Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T20:38:50.127409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618112159632082:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:50.160405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:51.302526Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750797527710, tx_id: 281474976710658 } } } 2025-06-24T20:38:51.311591Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T20:38:51.313415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:51.315091Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-24T20:38:51.315106Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-24T20:38:51.377007Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-24T20:38:51.377040Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-24T20:38:51.377624Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:57: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::NController::TEvPrivate::TEvAllowCreateStream 2025-06-24T20:38:51.641827Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][1:7519618137929436571:2309] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:5:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-24T20:38:51.783373Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:85: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvAlterTableResponse { Result: { status: SUCCESS, issues: } } 2025-06-24T20:38:51.783406Z node 1 :REPLICATION_CONTROLLER INFO: stream_creator.cpp:100: [StreamCreator][rid 1][tid 1] Success: issues# 2025-06-24T20:38:51.875003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T20:38:51.921904Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:137: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvAlterTopicResponse { Result: { status: SUCCESS, issues: } } 2025-06-24T20:38:51.921934Z node 1 :REPLICATION_CONTROLLER INFO: stream_creator.cpp:155: [StreamCreator][rid 1][tid 1] Success: issues# TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797527710 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyC... (TRUNCATED) >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table [GOOD] >> TxUsage::WriteToTopic_Demo_50_Table [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndDifferentParams [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadFromTimestamp_BeforeAutoscaleAwareSDK >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-56 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-57 >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Table >> TestYmqHttpProxy::TestCreateQueueWithWrongBody >> TxUsage::WriteToTopic_Demo_38_Query >> TxUsage::WriteToTopic_Demo_29_Table [GOOD] >> TSchemeShardMoveTest::MoveIndex >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query >> TPersQueueTest::ReadRuleDisallowDefaultServiceType [GOOD] >> TPersQueueTest::ReadRuleServiceTypeMigration >> TxUsage::WriteToTopic_Demo_15_Table [GOOD] >> KqpScan::ScanDuringSplit [GOOD] >> KqpScan::ScanAfterSplitSlowMetaRead >> TxUsage::WriteToTopic_Demo_29_Query |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |90.1%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut >> TxUsage::WriteToTopic_Demo_15_Query >> DataShardVolatile::UpsertDependenciesShardsRestart-UseSink [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes+UseSink >> TSchemeShardMoveTest::MoveIndex [GOOD] >> TSchemeShardMoveTest::MoveIndexDoesNonExisted >> TestYmqHttpProxy::TestSendMessageWithAttributes [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-58 >> KqpIndexes::CreateTableWithImplicitSyncIndexSQL [GOOD] >> KqpIndexes::CreateTableWithExplicitSyncIndexSQL >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-56 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-57 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10 >> TestYmqHttpProxy::TestSetQueueAttributes >> TSchemeShardMoveTest::MoveIndexDoesNonExisted [GOOD] |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |90.1%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator >> test_sql_streaming.py::test[suites-ReadTopicWithMetadata-default.txt] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query [GOOD] >> KqpIndexes::SecondaryIndexOrderBy2 [GOOD] >> KqpIndexes::SecondaryIndexReplace+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveIndexDoesNonExisted [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:38:58.140836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:38:58.140934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:38:58.140977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:38:58.141018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:38:58.141066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:38:58.141102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:38:58.141200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:38:58.141292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:38:58.142161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:38:58.142515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:38:58.243382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:38:58.243458Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:58.257728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:38:58.262436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:38:58.262642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:38:58.271847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:38:58.272098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:38:58.272882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:38:58.273244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:38:58.276325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:38:58.276530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:38:58.277713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:38:58.277781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:38:58.277915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:38:58.277965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:38:58.278013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:38:58.278172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:38:58.285627Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:38:58.436151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:38:58.436436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:38:58.436673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:38:58.436743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:38:58.437002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:38:58.437080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:38:58.440676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:38:58.440920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:38:58.441217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:38:58.441280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:38:58.441327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:38:58.441369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:38:58.443822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:38:58.443892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:38:58.443953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:38:58.446217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:38:58.446282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:38:58.446341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:38:58.446393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:38:58.458147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:38:58.460865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:38:58.461122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:38:58.462208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:38:58.462359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:38:58.462412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:38:58.462711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:38:58.462764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:38:58.462979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:38:58.463080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:38:58.465673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:38:58.465727Z node 1 :FLAT_TX_SCHEMESHARD ... DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:39:01.469268Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Sync" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:39:01.469514Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Sync" took 276us result status StatusSuccess 2025-06-24T20:39:01.470287Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Sync" PathDescription { Self { Name: "Sync" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Sync" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value0" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:39:01.470983Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Async" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:39:01.475494Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Async" took 4.46ms result status StatusSuccess 2025-06-24T20:39:01.476490Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Async" PathDescription { Self { Name: "Async" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 5 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Async" LocalPathId: 5 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value1" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpMultishardIndex::YqWorksFineAfterAlterIndexTableDirectly [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix+Nullable-Covered >> KqpScan::ScanPg [GOOD] >> KqpUniqueIndex::UpdateOnFkSelectResultSameValue [GOOD] >> KqpUniqueIndex::UpdateOnFkAlreadyExist >> DataShardVolatile::DistributedUpsertRestartAfterPlan [GOOD] >> DataShardVolatile::CompactedVolatileChangesCommit >> TPersQueueTest::DirectReadRestartPQRB [GOOD] >> TPersQueueTest::DirectReadRestartTablet >> TPersQueueTest::SetupWriteSessionOnDisabledCluster [GOOD] >> TPersQueueTest::SetupReadSession >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Uncompressed [GOOD] >> TTopicYqlTest::CreateAndAlterTopicYql |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |90.1%| [LD] {RESULT} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut >> KqpIndexes::NullInIndexTableNoDataRead [GOOD] >> KqpIndexes::SecondaryIndexOrderBy >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-58 >> TestYmqHttpProxy::TestCreateQueueWithWrongBody [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_scan/unittest >> KqpScan::ScanPg [GOOD] Test command err: 2025-06-24T20:38:22.808633Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:22.809404Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:22.809534Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:22.809894Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:22.810011Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:22.815581Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004978/r3tmp/tmpEF66D7/pdisk_1.dat 2025-06-24T20:38:23.717418Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:23.981289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:38:24.163466Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:212:2173] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:38:24.172295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:24.172443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:24.174041Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T20:38:24.174852Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:241:2129] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:38:24.184162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:24.184290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:24.185395Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976720656 RangeEnd# 281474976725656 txAllocator# 72057594046447617 2025-06-24T20:38:24.208051Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:38:24.209044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:24.209531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:24.569947Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:212:2173] Handle TEvProposeTransaction 2025-06-24T20:38:24.570043Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:212:2173] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T20:38:24.570227Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:212:2173] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:1151:2703] 2025-06-24T20:38:24.756644Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:1151:2703] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T20:38:24.756764Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:1151:2703] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:38:24.757524Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:1151:2703] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T20:38:24.757630Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:1151:2703] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:38:24.758151Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:1151:2703] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:38:24.758393Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:1151:2703] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:38:24.758506Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:1151:2703] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T20:38:24.758841Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:1151:2703] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T20:38:24.760689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:24.764134Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:1151:2703] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T20:38:24.764220Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:1151:2703] txid# 281474976715657 SEND to# [1:1058:2645] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T20:38:24.928217Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1202:2351] 2025-06-24T20:38:24.928519Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:25.097650Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:38:25.097791Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:38:25.105194Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:38:25.105333Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:38:25.105395Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:38:25.105806Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:38:25.105992Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:38:25.106107Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1222:2351] in generation 1 2025-06-24T20:38:25.146614Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:38:25.190768Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:38:25.191001Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:38:25.191213Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1227:2367] 2025-06-24T20:38:25.191266Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:38:25.191321Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:38:25.191388Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:38:25.191952Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:38:25.192058Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:38:25.192114Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:38:25.192157Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:38:25.192199Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:38:25.192248Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:38:25.264416Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1186:2733], serverId# [2:1231:2368], sessionId# [0:0:0] 2025-06-24T20:38:25.264907Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:38:25.265205Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:38:25.265350Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:38:25.268055Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:38:25.286419Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:38:25.286555Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T20:38:25.581367Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1255:2750], serverId# [2:1258:2375], sessionId# [0:0:0] 2025-06-24T20:38:25.589736Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046 ... wYzYtNTg0MTAwOGU=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Do not drain channelId: 1, finished 2025-06-24T20:38:46.606288Z node 3 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715664, task: 1. Tasks execution finished 2025-06-24T20:38:46.606314Z node 3 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [3:1558:2932], TxId: 281474976715664, task: 1. Ctx: { TraceId : 01jyhtm7stebenj7ewqp03aapf. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=MjJiMjgzZDktZDg3YWNhZTEtMzk5NmQwYzYtNTg0MTAwOGU=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-24T20:38:46.606388Z node 3 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715664, task: 1. pass away 2025-06-24T20:38:46.606470Z node 3 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715664;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:38:46.606595Z node 3 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715664, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-24T20:38:46.606755Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1555:2893] TxId: 281474976715664. Ctx: { TraceId: 01jyhtm7stebenj7ewqp03aapf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjJiMjgzZDktZDg3YWNhZTEtMzk5NmQwYzYtNTg0MTAwOGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1558:2932], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 2020 Tasks { TaskId: 1 CpuTimeUs: 557 FinishTimeMs: 1750797526606 OutputRows: 1 OutputBytes: 6 ResultRows: 1 ResultBytes: 6 ComputeCpuTimeUs: 61 BuildCpuTimeUs: 496 HostName: "ghrun-4pjfmvffsq" NodeId: 3 CreateTimeMs: 1750797526603 UpdateTimeMs: 1750797526606 } MaxMemoryUsage: 1048576 } 2025-06-24T20:38:46.606805Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715664. Ctx: { TraceId: 01jyhtm7stebenj7ewqp03aapf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjJiMjgzZDktZDg3YWNhZTEtMzk5NmQwYzYtNTg0MTAwOGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:1558:2932] 2025-06-24T20:38:46.606899Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [3:1555:2893] TxId: 281474976715664. Ctx: { TraceId: 01jyhtm7stebenj7ewqp03aapf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjJiMjgzZDktZDg3YWNhZTEtMzk5NmQwYzYtNTg0MTAwOGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T20:38:46.606940Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [3:1555:2893] TxId: 281474976715664. Ctx: { TraceId: 01jyhtm7stebenj7ewqp03aapf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjJiMjgzZDktZDg3YWNhZTEtMzk5NmQwYzYtNTg0MTAwOGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-24T20:38:46.607000Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [3:1555:2893] TxId: 281474976715664. Ctx: { TraceId: 01jyhtm7stebenj7ewqp03aapf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjJiMjgzZDktZDg3YWNhZTEtMzk5NmQwYzYtNTg0MTAwOGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.002020s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T20:38:46.608038Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715661] shutting down 2025-06-24T20:38:46.608130Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [3:212:2173] Handle TEvProposeTransaction 2025-06-24T20:38:46.608167Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [3:212:2173] TxId# 0 ProcessProposeTransaction 2025-06-24T20:38:46.608296Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:289: actor# [3:212:2173] Cookie# 0 userReqId# "" txid# 0 reqId# [3:1560:2933] SnapshotReq marker# P0 2025-06-24T20:38:46.609253Z node 3 :TX_PROXY DEBUG: resolvereq.cpp:152: Actor# [3:1563:2933] txid# 0 HANDLE EvNavigateKeySetResult TResolveTablesActor marker# P1 ErrorCount# 0 2025-06-24T20:38:46.609487Z node 3 :TX_PROXY DEBUG: resolvereq.cpp:272: Actor# [3:1563:2933] txid# 0 HANDLE EvResolveKeySetResult TResolveTablesActor marker# P2 ErrorCount# 0 2025-06-24T20:38:46.609608Z node 3 :TX_PROXY DEBUG: snapshotreq.cpp:1453: Actor# [3:1560:2933] SEND TEvDiscardVolatileSnapshotRequest to datashard 72075186224037888 marker# P3 2025-06-24T20:38:56.475990Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:56.476740Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:56.476891Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:56.477248Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:56.477533Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:56.477783Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004978/r3tmp/tmptiqhZo/pdisk_1.dat 2025-06-24T20:38:56.907999Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:57.062031Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:38:57.204066Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:57.204418Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:57.210592Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:57.210759Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:57.231397Z node 5 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-06-24T20:38:57.232057Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:57.232522Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:57.570112Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:58.431959Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:1313:2790], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:58.432086Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:1324:2795], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:58.432581Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:58.447778Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:58.618253Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:58.618377Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:59.040095Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:1327:2798], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:38:59.146241Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:1454:2870] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:00.250220Z node 5 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhtmn3t5effgc16wwp4a01b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ZDEwNThhMjQtYmU1MzhhM2MtNWFiYjMwNzItZjJlYTQ2Yzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:39:01.197081Z node 5 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhtmpz66xm3ydyvxc1es5b7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=YWYxN2VjM2MtY2YwMjg5NWEtNDBhYjJiZjItZmI0NmI4Zjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:39:02.067314Z node 5 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhtmpz66xm3ydyvxc1es5b7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=YWYxN2VjM2MtY2YwMjg5NWEtNDBhYjJiZjItZmI0NmI4Zjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:39:02.070635Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715661] shutting down >> TBackupCollectionTests::HiddenByFeatureFlag >> TxUsage::WriteToTopic_Demo_23_RestartNo_Table [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] >> TPersQueueTest::AllEqual [GOOD] >> TPersQueueTest::BadSids >> KqpLocksTricky::TestNoLocksIssue+withSink [GOOD] >> TxUsage::SessionAbort_Query [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithWrongAttribute >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table >> TxUsage::WriteToTopic_Demo_23_RestartNo_Query |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |90.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr >> TBackupCollectionTests::HiddenByFeatureFlag [GOOD] >> TBackupCollectionTests::DisallowedPath |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr >> TBackupCollectionTests::DisallowedPath [GOOD] >> TBackupCollectionTests::ParallelCreate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpLocksTricky::TestNoLocksIssue+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 4257, MsgBus: 23854 2025-06-24T20:38:24.592913Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618021990830967:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:24.592961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003376/r3tmp/tmpvVM7IE/pdisk_1.dat 2025-06-24T20:38:25.547354Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618021990830948:2079] 1750797504582730 != 1750797504582733 2025-06-24T20:38:25.560065Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:25.646016Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:25.649126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:25.649230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:25.764450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4257, node 1 2025-06-24T20:38:26.019835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:26.019861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:26.019869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:26.020023Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23854 TClient is connected to server localhost:23854 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:27.362729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:27.401930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:27.771800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:28.168769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:28.267943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:29.595571Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618021990830967:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:29.595676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:31.538168Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618052055603704:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:31.538335Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:32.153374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.230744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.309194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.365985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.423331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.491970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.543115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:32.668062Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618056350571670:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:32.668211Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:32.671568Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618056350571675:2441], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:32.676422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:32.706039Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618056350571677:2442], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:38:32.768140Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618056350571728:3447] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:34.813213Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976710676; 2025-06-24T20:38:34.827426Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [1:7519618064940506643:2483], Table: `/Root/Test` ([72057594046644480:9:1]), SessionActorId: [1:7519618064940506614:2483]Got LOCKS BROKEN for table `/Root/Test`. ShardID=72075186224037914, Sink=[1:7519618064940506643:2483].{ Disconnected 2025-06-24T20:38:53.878379Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:53.891690Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7415 TClient is connected to server localhost:7415 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:54.397274Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:54.425055Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:54.587850Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:54.788983Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:55.318265Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:55.733477Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:56.516944Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1683:3279], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:56.517090Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:56.550431Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:56.754296Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:57.043121Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:57.400128Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:57.698989Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:58.211196Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:58.569547Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.064595Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:2353:3775], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.064769Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.065076Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:2358:3780], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.080838Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:59.285429Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:2360:3782], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:38:59.354376Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:2418:3821] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:01.282914Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:01.604465Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:02.017234Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Table >> TBackupCollectionTests::CreateAbsolutePath >> TPQCompatTest::BadTopics [GOOD] >> TPQCompatTest::CommitOffsets >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-58 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-59 >> TBackupCollectionTests::ParallelCreate [GOOD] >> TBackupCollectionTests::Drop >> TxUsage::WriteToTopic_Demo_44_Table [GOOD] >> KqpIndexes::CreateTableWithExplicitSyncIndexSQL [GOOD] >> KqpIndexes::DeleteByIndex >> TBackupCollectionTests::CreateAbsolutePath [GOOD] >> TBackupCollectionTests::Create >> TestYmqHttpProxy::TestSetQueueAttributes [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-11 >> TBackupCollectionTests::Drop [GOOD] >> TBackupCollectionTests::DropTwice >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 >> TxUsage::WriteToTopic_Demo_44_Query >> TBackupCollectionTests::Create [GOOD] >> TBackupCollectionTests::CreateTwice >> Viewer::JsonStorageListingV2NodeIdFilter [GOOD] >> Viewer::JsonStorageListingV2PDiskIdFilter >> TestYmqHttpProxy::TestTagQueue >> BasicUsage::GetAllStartPartitionSessions >> TPersQueueTest::PreferredCluster_RemotePreferredClusterEnabledWhileSessionInitializing_SessionDiesOnlyAfterInitializationAndDelay [GOOD] >> TPersQueueTest::PartitionsMapping >> TBackupCollectionTests::DropTwice [GOOD] >> TBackupCollectionTests::TableWithSystemColumns >> TBackupCollectionTests::CreateTwice [GOOD] >> TBackupCollectionTests::BackupAbsentCollection >> KqpIndexes::SecondaryIndexReplace+UseSink [GOOD] >> KqpIndexes::SecondaryIndexReplace-UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-58 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-59 >> TBackupCollectionTests::BackupAbsentCollection [GOOD] >> TBackupCollectionTests::BackupDroppedCollection >> TBackupCollectionTests::BackupDroppedCollection [GOOD] >> TBackupCollectionTests::BackupAbsentDirs >> TxUsage::WriteToTopic_Demo_29_Query [GOOD] >> TBackupCollectionTests::TableWithSystemColumns [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithWrongAttribute [GOOD] >> TPersQueueTest::CheckDecompressionTasksWithoutSession [GOOD] >> TPersQueueTest::Codecs_InitWriteSession_DefaultTopicSupportedCodecsInInitResponse >> TBackupCollectionTests::BackupAbsentDirs [GOOD] >> TBackupCollectionTests::BackupNonIncrementalCollection >> TxUsage::WriteToTopic_Demo_30_Table >> KqpUniqueIndex::UpdateOnFkAlreadyExist [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup_collection/unittest >> TBackupCollectionTests::TableWithSystemColumns [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:39:06.358301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:39:06.358388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:39:06.358435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:39:06.358475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:39:06.358523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:39:06.358549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:39:06.358616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:39:06.358714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:39:06.359838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:39:06.360175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:39:06.439975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:39:06.440039Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:06.457382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:39:06.461078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:39:06.461261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:39:06.471078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:39:06.471368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:39:06.472082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:39:06.472428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:39:06.475508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:39:06.475709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:39:06.476829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:39:06.476904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:39:06.477047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:39:06.477097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:39:06.477137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:39:06.477315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:39:06.484497Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T20:39:06.658369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:39:06.658611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:06.658893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:39:06.659003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:39:06.672535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:39:06.672657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:39:06.682637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:39:06.682849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:39:06.683021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:06.683058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:39:06.683088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:39:06.683114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:39:06.692660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:06.692793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:39:06.692856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:39:06.700421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:06.700516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:06.700569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:39:06.700622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:39:06.704415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:39:06.706895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:39:06.707131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:39:06.708247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:39:06.708407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:39:06.708471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:39:06.708748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:39:06.708801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:39:06.708986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:39:06.709113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:39:06.711686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:39:06.711733Z node 1 :FLAT_TX_SCHEMESHARD ... : Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409548 Status: COMPLETE TxId: 106 Step: 5000007 OrderId: 106 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409548 CpuTimeUsec: 1639 } } CommitVersion { Step: 5000007 TxId: 106 } 2025-06-24T20:39:13.401612Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409548, partId: 1 2025-06-24T20:39:13.401790Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 106:1, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409548 Status: COMPLETE TxId: 106 Step: 5000007 OrderId: 106 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409548 CpuTimeUsec: 1639 } } CommitVersion { Step: 5000007 TxId: 106 } 2025-06-24T20:39:13.401939Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409548 Status: COMPLETE TxId: 106 Step: 5000007 OrderId: 106 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409548 CpuTimeUsec: 1639 } } CommitVersion { Step: 5000007 TxId: 106 } 2025-06-24T20:39:13.402020Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:39:13.404906Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [6:654:2600], Recipient [6:126:2150]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:13.404991Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:13.405025Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:39:13.405288Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269551620, Sender [6:590:2545], Recipient [6:126:2150]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 590 RawX2: 25769806321 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-24T20:39:13.405334Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4983: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-24T20:39:13.405478Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 590 RawX2: 25769806321 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-24T20:39:13.405536Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409548, partId: 1 2025-06-24T20:39:13.405732Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 106:1, at schemeshard: 72057594046678944, message: Source { RawX1: 590 RawX2: 25769806321 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-24T20:39:13.405843Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 106:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T20:39:13.405975Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 106:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 590 RawX2: 25769806321 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-24T20:39:13.406066Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 106:1, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T20:39:13.406148Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-24T20:39:13.406200Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 106:1, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-24T20:39:13.406275Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 106:1 129 -> 240 2025-06-24T20:39:13.406486Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:39:13.407791Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:39:13.407938Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:39:13.408077Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T20:39:13.408131Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:39:13.412694Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T20:39:13.412821Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:39:13.412963Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T20:39:13.412992Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:39:13.413078Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-24T20:39:13.413106Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:39:13.413253Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-24T20:39:13.413298Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:39:13.413350Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 106:1 2025-06-24T20:39:13.413501Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [6:590:2545] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 106 at schemeshard: 72057594046678944 2025-06-24T20:39:13.413995Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [6:126:2150], Recipient [6:126:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:39:13.414046Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:39:13.414138Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-24T20:39:13.414198Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 106:1 ProgressState 2025-06-24T20:39:13.414347Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:39:13.414389Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:1 progress is 2/2 2025-06-24T20:39:13.414459Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-06-24T20:39:13.414541Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:1 progress is 2/2 2025-06-24T20:39:13.414595Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-06-24T20:39:13.414646Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 2/2, is published: true 2025-06-24T20:39:13.414746Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [6:304:2293] message: TxId: 106 2025-06-24T20:39:13.414811Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-06-24T20:39:13.414866Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 106:0 2025-06-24T20:39:13.414910Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 106:0 2025-06-24T20:39:13.415026Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-24T20:39:13.415073Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 106:1 2025-06-24T20:39:13.415099Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 106:1 2025-06-24T20:39:13.415208Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-06-24T20:39:13.422964Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:39:13.423119Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [6:304:2293] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 106 at schemeshard: 72057594046678944 2025-06-24T20:39:13.423641Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T20:39:13.423713Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [6:618:2565] 2025-06-24T20:39:13.423981Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [6:620:2567], Recipient [6:126:2150]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:13.424030Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:13.424060Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 >> TestYmqHttpProxy::TestCreateQueueWithTags >> DataShardVolatile::NotCachingAbortingDeletes+UseSink [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes-UseSink >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query [GOOD] >> TxUsage::WriteToTopic_Demo_15_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateOnFkAlreadyExist [GOOD] Test command err: Trying to start YDB, gRPC: 10154, MsgBus: 27370 2025-06-24T20:38:46.660613Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618116988320392:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:46.674247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00466f/r3tmp/tmpzL7NTO/pdisk_1.dat 2025-06-24T20:38:47.420461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:47.420580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:47.432732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:47.443002Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:47.443489Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618116988320211:2079] 1750797526608960 != 1750797526608963 TServer::EnableGrpc on GrpcPort 10154, node 1 2025-06-24T20:38:47.675284Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:47.778764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:47.778801Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:47.778807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:47.778921Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27370 TClient is connected to server localhost:27370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:49.043123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:49.084082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:49.446472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:49.834351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:50.023970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:51.627894Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618116988320392:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:51.627975Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:52.673770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618142758125641:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:52.673909Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:53.474978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:53.555672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:53.641253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:53.743549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:53.795246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:53.883246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:53.944335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.064989Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618151348060913:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.065085Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.065343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618151348060918:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.070235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:54.087339Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618151348060920:2441], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:38:54.161655Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618151348060971:3434] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:56.998685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:39:02.339529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot ... tileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11049, node 2 2025-06-24T20:39:04.291851Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:39:04.291879Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:39:04.291894Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:39:04.292052Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26159 TClient is connected to server localhost:26159 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:39:04.952771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:39:04.979001Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:39:05.069075Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T20:39:05.130608Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:05.347408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:39:05.446415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:39:08.369812Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618212547187670:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:08.369924Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:08.444394Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:08.496201Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:08.536954Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:08.583976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:08.669967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:08.729708Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:08.810437Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:08.936471Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618212547188336:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:08.936576Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:08.936867Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618212547188341:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:08.942416Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:39:08.958649Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618212547188343:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:39:09.046713Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618195367316892:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:09.046833Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:39:09.052639Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618216842155690:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:10.214205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:39:12.599016Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhtn1xn7de22ykj7vhaat3z, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTBjNDY4MDAtYWUyZDdkN2MtODJmNzE4YWEtMzhjMWQ3OTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T20:39:12.613995Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YTBjNDY4MDAtYWUyZDdkN2MtODJmNzE4YWEtMzhjMWQ3OTc=, ActorId: [2:7519618221137124019:2534], ActorState: ExecuteState, TraceId: 01jyhtn1xn7de22ykj7vhaat3z, Create QueryResponse for error on request, msg: 2025-06-24T20:39:13.852018Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhtn304c9ytbf1epd4j4n3s, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTBjNDY4MDAtYWUyZDdkN2MtODJmNzE4YWEtMzhjMWQ3OTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T20:39:13.852329Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YTBjNDY4MDAtYWUyZDdkN2MtODJmNzE4YWEtMzhjMWQ3OTc=, ActorId: [2:7519618221137124019:2534], ActorState: ExecuteState, TraceId: 01jyhtn304c9ytbf1epd4j4n3s, Create QueryResponse for error on request, msg: >> KqpScan::ScanAfterSplitSlowMetaRead [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-11 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-12 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-59 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-60 >> TBackupCollectionTests::BackupNonIncrementalCollection [GOOD] >> TxUsage::WriteToTopic_Demo_38_Query [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query >> TxUsage::WriteToTopic_Demo_16_Table >> BasicUsage::PropagateSessionClosed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup_collection/unittest >> TBackupCollectionTests::BackupNonIncrementalCollection [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:39:08.881522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:39:08.881607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:39:08.881645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:39:08.881679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:39:08.881723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:39:08.881752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:39:08.881808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:39:08.881886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:39:08.882640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:39:08.882982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:39:08.970341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:39:08.970419Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:08.989029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:39:08.989584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:39:08.989786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:39:08.998514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:39:08.998741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:39:08.999530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:39:08.999864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:39:09.003491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:39:09.003720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:39:09.005039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:39:09.005109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:39:09.005367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:39:09.005422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:39:09.005475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:39:09.005569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:39:09.013259Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:39:09.177836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:39:09.178081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:09.178340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:39:09.178387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:39:09.178597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:39:09.178662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:39:09.181138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:39:09.181368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:39:09.181567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:09.181620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:39:09.181685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:39:09.181728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:39:09.183966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:09.184037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:39:09.184077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:39:09.187346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:09.187402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:09.187449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:39:09.187498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:39:09.191389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:39:09.200683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:39:09.200956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:39:09.202003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:39:09.202141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:39:09.202202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:39:09.202512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:39:09.202573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:39:09.202749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:39:09.202821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:39:09.209298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:39:09.209383Z node 1 :FLAT_TX_SCHEMESHARD ... hDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-24T20:39:16.624595Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 105:1 2025-06-24T20:39:16.624616Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 105:1 2025-06-24T20:39:16.624734Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-24T20:39:16.624776Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T20:39:16.634380Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T20:39:16.634575Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [7:306:2295] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 105 at schemeshard: 72057594046678944 2025-06-24T20:39:16.634850Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T20:39:16.634909Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [7:530:2489] 2025-06-24T20:39:16.635214Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [7:532:2491], Recipient [7:132:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:16.635276Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:16.635312Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 TestModificationResults wait txId: 106 2025-06-24T20:39:16.636002Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [7:598:2555], Recipient [7:132:2155]: {TEvModifySchemeTransaction txid# 106 TabletId# 72057594046678944} 2025-06-24T20:39:16.636080Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T20:39:16.644273Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpBackupIncrementalBackupCollection BackupIncrementalBackupCollection { Name: ".backups/collections/MyCollection1" } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:39:16.644921Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:39:16.645163Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 4], parent name: MyCollection1, child name: 19700101000000Z_incremental, child id: [OwnerId: 72057594046678944, LocalPathId: 8], at schemeshard: 72057594046678944 2025-06-24T20:39:16.645247Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 0 2025-06-24T20:39:16.645308Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 106:0 type: TxMkDir target path: [OwnerId: 72057594046678944, LocalPathId: 8] source path: 2025-06-24T20:39:16.645430Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:39:16.645570Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 106:1, explain: Incremental backup is disabled on this collection, at schemeshard: 72057594046678944 2025-06-24T20:39:16.645632Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:2, propose status:StatusInvalidParameter, reason: Incremental backup is disabled on this collection, at schemeshard: 72057594046678944 2025-06-24T20:39:16.652974Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:160: Abort operation: IgniteOperation fail to propose a part, opId: 106:1, at schemeshard: 72057594046678944, already accepted parts: 1, propose result status: StatusInvalidParameter, with reason: Incremental backup is disabled on this collection, tx message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpBackupIncrementalBackupCollection BackupIncrementalBackupCollection { Name: ".backups/collections/MyCollection1" } } TxId: 106 TabletId: 72057594046678944 2025-06-24T20:39:16.653223Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:275: MkDir AbortPropose, opId: 106:0, at schemeshard: 72057594046678944 2025-06-24T20:39:16.659330Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T20:39:16.676090Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Incremental backup is disabled on this collection" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:39:16.676442Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Incremental backup is disabled on this collection, operation: BACKUP INCREMENTAL, path: /MyRoot/.backups/collections/MyCollection1 2025-06-24T20:39:16.676525Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T20:39:16.676957Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-24T20:39:16.677010Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-24T20:39:16.677528Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [7:604:2561], Recipient [7:132:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:16.677605Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:16.677651Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T20:39:16.677815Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [7:306:2295], Recipient [7:132:2155]: NKikimrScheme.TEvNotifyTxCompletion TxId: 106 2025-06-24T20:39:16.677855Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T20:39:16.677943Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-24T20:39:16.678061Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T20:39:16.678110Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [7:602:2559] 2025-06-24T20:39:16.678393Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [7:604:2561], Recipient [7:132:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:16.678430Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:16.678482Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 2025-06-24T20:39:16.679023Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [7:605:2562], Recipient [7:132:2155]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-24T20:39:16.679075Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:39:16.679491Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:39:16.679802Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1" took 594us result status StatusSuccess 2025-06-24T20:39:16.680411Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1" PathDescription { Self { Name: "MyCollection1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "19700101000000Z_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "MyCollection1" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/Table1" } } Cluster { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardVolatile::CompactedVolatileChangesCommit [GOOD] >> DataShardVolatile::CompactedVolatileChangesAbort ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_scan/unittest >> KqpScan::ScanAfterSplitSlowMetaRead [GOOD] Test command err: 2025-06-24T20:38:28.458108Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:28.458899Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:28.459065Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:28.459219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:28.459818Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:28.459872Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004964/r3tmp/tmpZcAbW4/pdisk_1.dat 2025-06-24T20:38:29.105297Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:29.389830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:38:29.559947Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:212:2173] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:38:29.562975Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:29.563136Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:29.564661Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T20:38:29.565910Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:241:2129] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:38:29.577131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:29.577239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:29.589372Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976720656 RangeEnd# 281474976725656 txAllocator# 72057594046447617 2025-06-24T20:38:29.603654Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:38:29.604192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:29.604642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:29.956823Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:212:2173] Handle TEvProposeTransaction 2025-06-24T20:38:29.956936Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:212:2173] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T20:38:29.957149Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:212:2173] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:1150:2702] 2025-06-24T20:38:30.190700Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:1150:2702] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 7 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T20:38:30.190816Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:1150:2702] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:38:30.191689Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:1150:2702] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T20:38:30.191793Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:1150:2702] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:38:30.192268Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:1150:2702] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:38:30.192471Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:1150:2702] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:38:30.192594Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:1150:2702] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T20:38:30.194821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:30.195509Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:1150:2702] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T20:38:30.198141Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:1150:2702] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T20:38:30.198237Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:1150:2702] txid# 281474976715657 SEND to# [1:1058:2645] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T20:38:30.357213Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:1227:2760] 2025-06-24T20:38:30.357556Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:30.442776Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037894 actor [1:1233:2764] 2025-06-24T20:38:30.443077Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:30.478731Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:38:30.479344Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:38:30.481283Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T20:38:30.481377Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T20:38:30.481445Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T20:38:30.481866Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:38:30.484036Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:38:30.484121Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:1321:2760] in generation 1 2025-06-24T20:38:30.488926Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037892 actor [1:1236:2765] 2025-06-24T20:38:30.489198Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:30.500404Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:38:30.501279Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:38:30.502834Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037894 2025-06-24T20:38:30.502909Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037894 2025-06-24T20:38:30.502973Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037894 2025-06-24T20:38:30.503328Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:38:30.503821Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:38:30.503888Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037894 persisting started state actor id [1:1334:2764] in generation 1 2025-06-24T20:38:30.519413Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:38:30.519561Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:38:30.521140Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037892 2025-06-24T20:38:30.521211Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037892 2025-06-24T20:38:30.521269Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037892 2025-06-24T20:38:30.521614Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:38:30.521784Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:38:30.521873Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037892 persisting started state actor id [1:1352:2765] in generation 1 2025-06-24T20:38:30.553724Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1332:2360] 2025-06-24T20:38:30.554051Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:30.613974Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [2:1341:2361] 2025-06-24T20:38:30.614286Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:30.626407Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037893 actor [2:1343:2362] 2025-06-24T20:38:30.626685Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:30.640590Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [2:1345:2363] 2025-06-24T20:38:30.640884Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:30.652373Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: Tx ... aseId : /Root. PoolId : default. Database : . }. Send stats to executor actor [5:1869:2926] TaskId: 1 Stats: CpuTimeUs: 473 Tasks { TaskId: 1 CpuTimeUs: 200 FinishTimeMs: 1750797555514 OutputRows: 1 OutputBytes: 6 ResultRows: 1 ResultBytes: 6 ComputeCpuTimeUs: 65 BuildCpuTimeUs: 135 HostName: "ghrun-4pjfmvffsq" NodeId: 5 CreateTimeMs: 1750797555513 CurrentWaitOutputTimeUs: 159 UpdateTimeMs: 1750797555515 } MaxMemoryUsage: 1048576 2025-06-24T20:39:15.515750Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:357: ActorId: [5:1869:2926] TxId: 281474976715667. Ctx: { TraceId: 01jyhtn2c45sa54y7d564sg2sw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [5:1554:2926], seqNo: 1, nRows: 1 2025-06-24T20:39:15.515841Z node 5 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:39:15.515891Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:1485: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Poll inputs 2025-06-24T20:39:15.515919Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:1500: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Poll sources 2025-06-24T20:39:15.515949Z node 5 :KQP_COMPUTE TRACE: dq_sync_compute_actor_base.h:36: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Resume execution, run status: Finished 2025-06-24T20:39:15.515977Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:393: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. ProcessOutputsState.Inflight: 0 2025-06-24T20:39:15.516004Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:423: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Do not drain channelId: 1, finished 2025-06-24T20:39:15.516066Z node 5 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715667, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-24T20:39:15.516285Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [5:1869:2926] TxId: 281474976715667. Ctx: { TraceId: 01jyhtn2c45sa54y7d564sg2sw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [5:1872:3107], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 473 Tasks { TaskId: 1 CpuTimeUs: 200 FinishTimeMs: 1750797555514 OutputRows: 1 OutputBytes: 6 ResultRows: 1 ResultBytes: 6 ComputeCpuTimeUs: 65 BuildCpuTimeUs: 135 HostName: "ghrun-4pjfmvffsq" NodeId: 5 CreateTimeMs: 1750797555513 CurrentWaitOutputTimeUs: 159 UpdateTimeMs: 1750797555515 } MaxMemoryUsage: 1048576 } 2025-06-24T20:39:15.516411Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [5:1869:2926] TxId: 281474976715667. Ctx: { TraceId: 01jyhtn2c45sa54y7d564sg2sw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [5:1872:3107], ... response 271646822 NKikimr::NKqp::TEvKqpExecuter::TEvStreamData NKikimrKqp.TEvExecuterStreamData ResultSet { columns { name: "column0" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint64_value: 596400 } } } SeqNo: 1 QueryResultIndex: 0 ChannelId: 1 VirtualTimestamp { Step: 2500 TxId: 281474976715664 } Finished: true 2025-06-24T20:39:15.516818Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:423: TxId: 281474976715667, send ack to channelId: 1, seqNo: 1, enough: 0, freeSpace: 100, to: [5:1873:3107] 2025-06-24T20:39:15.516894Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_channels.cpp:179: TxId: 281474976715667, task: 1. Received channel data ack for channelId: 1, seqNo: 1, lastSentSeqNo: 1, freeSpace: 100, early finish: 0 2025-06-24T20:39:15.516941Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_channels.cpp:207: TxId: 281474976715667, task: 1. PeerState, peerState:(freeSpace:100;inFlightBytes:0;inFlightCount:0;), sentSeqNo: 1, ackSeqNo: 1 2025-06-24T20:39:15.516968Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_channels.cpp:220: TxId: 281474976715667, task: 1. Resume compute actor 2025-06-24T20:39:15.517059Z node 5 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-24T20:39:15.517107Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:1485: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Poll inputs 2025-06-24T20:39:15.517136Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:1500: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Poll sources 2025-06-24T20:39:15.517166Z node 5 :KQP_COMPUTE TRACE: dq_sync_compute_actor_base.h:36: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Resume execution, run status: Finished 2025-06-24T20:39:15.517190Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:393: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. ProcessOutputsState.Inflight: 0 2025-06-24T20:39:15.517227Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:423: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Do not drain channelId: 1, finished 2025-06-24T20:39:15.517259Z node 5 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715667, task: 1. Tasks execution finished 2025-06-24T20:39:15.517287Z node 5 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [5:1872:3107], TxId: 281474976715667, task: 1. Ctx: { TraceId : 01jyhtn2c45sa54y7d564sg2sw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-24T20:39:15.517370Z node 5 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715667, task: 1. pass away 2025-06-24T20:39:15.517469Z node 5 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715667;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T20:39:15.517609Z node 5 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715667, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-24T20:39:15.517845Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [5:1869:2926] TxId: 281474976715667. Ctx: { TraceId: 01jyhtn2c45sa54y7d564sg2sw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [5:1872:3107], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 2116 Tasks { TaskId: 1 CpuTimeUs: 204 FinishTimeMs: 1750797555517 OutputRows: 1 OutputBytes: 6 ResultRows: 1 ResultBytes: 6 ComputeCpuTimeUs: 69 BuildCpuTimeUs: 135 HostName: "ghrun-4pjfmvffsq" NodeId: 5 CreateTimeMs: 1750797555513 UpdateTimeMs: 1750797555517 } MaxMemoryUsage: 1048576 } 2025-06-24T20:39:15.517913Z node 5 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyhtn2c45sa54y7d564sg2sw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [5:1872:3107] 2025-06-24T20:39:15.518011Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [5:1869:2926] TxId: 281474976715667. Ctx: { TraceId: 01jyhtn2c45sa54y7d564sg2sw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T20:39:15.518050Z node 5 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [5:1869:2926] TxId: 281474976715667. Ctx: { TraceId: 01jyhtn2c45sa54y7d564sg2sw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-24T20:39:15.518087Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [5:1869:2926] TxId: 281474976715667. Ctx: { TraceId: 01jyhtn2c45sa54y7d564sg2sw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ODMwMjk5ZmQtY2I5MjQ2YzgtNWQ0MjZjYWYtYjUxNTU3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.002116s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 ... response 271646721 NKikimr::NKqp::NPrivateEvents::TEvQueryResponse NKikimrKqp.TEvQueryResponse Response { TxMeta { } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 987 >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table [GOOD] >> BasicUsage::BasicWriteSession >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-59 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-60 |90.1%| [TA] $(B)/ydb/core/tx/schemeshard/ut_backup_collection/test-results/unittest/{meta.json ... results_accumulator.log} >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 |90.1%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp_scan/test-results/unittest/{meta.json ... results_accumulator.log} >> TestYmqHttpProxy::TestTagQueue [GOOD] >> KqpIndexes::SecondaryIndexOrderBy [GOOD] >> KqpIndexes::SecondaryIndexInsert1 >> KqpIndexes::DeleteByIndex [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query >> TxUsage::WriteToTopic_Demo_39_Table >> BasicUsage::WriteSessionNoAvailableDatabase >> TestYmqHttpProxy::TestUntagQueue |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |90.1%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/test-results/unittest/{meta.json ... results_accumulator.log} |90.1%| [LD] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |90.1%| [LD] {RESULT} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::DeleteByIndex [GOOD] Test command err: Trying to start YDB, gRPC: 2380, MsgBus: 12316 2025-06-24T20:38:47.911822Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618122043000777:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:47.912109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004661/r3tmp/tmp69DiWg/pdisk_1.dat 2025-06-24T20:38:48.600268Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:48.607484Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618122043000557:2079] 1750797527835316 != 1750797527835319 2025-06-24T20:38:48.657807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:48.657913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:48.661109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2380, node 1 2025-06-24T20:38:48.843309Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:48.883525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:48.883553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:48.883560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:48.883698Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12316 TClient is connected to server localhost:12316 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:50.069615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:38:50.141914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:50.370292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:50.582283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:50.726898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:52.899269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618122043000777:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:52.899353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:53.460295Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618147812805977:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:53.460447Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.236901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.293488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.372054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.442354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.581360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.625857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.692795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.803367Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618152107773944:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.803486Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.811300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618152107773949:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.817406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:54.829711Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618152107773951:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:38:54.908654Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618152107774004:3432] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:57.026376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:57.159856Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618164992676451:3751] t ... done id#281474976715672:0 progress is 3/3 2025-06-24T20:39:17.169764Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T20:39:17.169777Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-24T20:39:17.169830Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519618249192980021:2480] message: TxId: 281474976715672 2025-06-24T20:39:17.169854Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T20:39:17.169880Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-24T20:39:17.169892Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:0 2025-06-24T20:39:17.170000Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-24T20:39:17.170016Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-24T20:39:17.170022Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:1 2025-06-24T20:39:17.170039Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-24T20:39:17.170046Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-24T20:39:17.170054Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:2 2025-06-24T20:39:17.170081Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-24T20:39:17.170460Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:17.170533Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519618249192980021:2480] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T20:39:17.171819Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519618253487947329:3609], Recipient [3:7519618219128206605:2140]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:17.171841Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:17.171851Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:39:17.178950Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519618253487947423:3677], Recipient [3:7519618219128206605:2140]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:17.178985Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:17.179000Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:39:17.718900Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519618219128206605:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:17.718952Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:17.719003Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519618219128206605:2140], Recipient [3:7519618219128206605:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:17.719031Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:18.726888Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519618219128206605:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:18.726933Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:18.726981Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519618219128206605:2140], Recipient [3:7519618219128206605:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:18.726997Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:19.726606Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519618219128206605:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:19.726646Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:19.726709Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519618219128206605:2140], Recipient [3:7519618219128206605:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:19.726742Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:20.331676Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [3:7519618266372849549:3769], Recipient [3:7519618219128206605:2140]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:20.331716Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:20.331729Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:39:20.331760Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [3:7519618266372849550:3770], Recipient [3:7519618219128206605:2140]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:20.331771Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:20.331778Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:39:20.332274Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519618223423174197:2273], Recipient [3:7519618219128206605:2140]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750797556761 LastUpdateTime: 1750797556761 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 3 StartTime: 1750797550287 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T20:39:20.332321Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T20:39:20.332364Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-24T20:39:20.332485Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750797556761 LastUpdateTime: 1750797556761 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T20:39:20.332516Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099993s, queue# 1 2025-06-24T20:39:20.332685Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519618223423174208:2274], Recipient [3:7519618219128206605:2140]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037889 TableLocalId: 2 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750797556756 LastUpdateTime: 1750797556756 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037889 NodeId: 3 StartTime: 1750797550289 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T20:39:20.332699Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T20:39:20.332715Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037889 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-24T20:39:20.332798Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037889 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750797556756 LastUpdateTime: 1750797556756 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 >> IndexBuildTest::CancellationNotEnoughRetries |90.1%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_scan/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpIndexes::SecondaryIndexReplace-UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartNo_Query [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-12 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-13 >> TTopicYqlTest::CreateAndAlterTopicYql [GOOD] >> TTopicYqlTest::AlterAutopartitioning >> TPersQueueTest::ReadRuleServiceTypeMigration [GOOD] >> TPersQueueTest::ReadRuleServiceTypeMigrationWithDisallowDefault >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-60 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-61 >> TCmsTenatsTest::TestClusterRatioLimit >> TestYmqHttpProxy::TestCreateQueueWithTags [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexReplace-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 1387, MsgBus: 8865 2025-06-24T20:38:49.178742Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618132345389477:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:49.181612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00465a/r3tmp/tmpmTbDdx/pdisk_1.dat 2025-06-24T20:38:50.023694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:50.023813Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:50.034245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:50.045548Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:50.051298Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618132345389423:2079] 1750797529129963 != 1750797529129966 TServer::EnableGrpc on GrpcPort 1387, node 1 2025-06-24T20:38:50.201789Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:50.229198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:50.229223Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:50.229229Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:50.229349Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8865 TClient is connected to server localhost:8865 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:51.120479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:51.159621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:38:51.203609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:51.554065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:51.874936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:52.015513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:54.072980Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618153820227538:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.073090Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.179214Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618132345389477:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:54.179294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:54.412117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.446236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.487710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.561895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.602013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.642896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.717089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:54.810846Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618153820228205:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.810914Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.811059Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618153820228210:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:54.814775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:54.825713Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618153820228212:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:38:54.913359Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618153820228263:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:38:57.047137Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519618132345389778:2161]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:38:57.047212Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.c ... AT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:20.829847Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269551620, Sender [3:7519618262201989353:2476], Recipient [3:7519618227842248628:2147]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 7519618262201989353 RawX2: 4503612512274860 } Origin: 72075186224037923 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-06-24T20:39:20.829880Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4983: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-24T20:39:20.829955Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 7519618262201989353 RawX2: 4503612512274860 } Origin: 72075186224037923 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-06-24T20:39:20.829976Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 281474976710672, tablet: 72075186224037923, partId: 0 2025-06-24T20:39:20.830115Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976710672:0, at schemeshard: 72057594046644480, message: Source { RawX1: 7519618262201989353 RawX2: 4503612512274860 } Origin: 72075186224037923 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-06-24T20:39:20.830151Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 281474976710672:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-24T20:39:20.830226Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 281474976710672:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7519618262201989353 RawX2: 4503612512274860 } Origin: 72075186224037923 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-06-24T20:39:20.830277Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710672:0, shardIdx: 72057594046644480:35, shard: 72075186224037923, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-24T20:39:20.830296Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-24T20:39:20.830313Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976710672:0, datashard: 72075186224037923, at schemeshard: 72057594046644480 2025-06-24T20:39:20.830338Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710672:0 129 -> 240 2025-06-24T20:39:20.830494Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:39:20.830980Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-24T20:39:20.830993Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:20.831008Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976710672:0 2025-06-24T20:39:20.831065Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519618262201989353:2476] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-24T20:39:20.831201Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519618227842248628:2147], Recipient [3:7519618227842248628:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T20:39:20.831220Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T20:39:20.831265Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-24T20:39:20.831286Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976710672:0 ProgressState 2025-06-24T20:39:20.831381Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:39:20.831395Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710672:0 progress is 3/3 2025-06-24T20:39:20.831407Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-24T20:39:20.831428Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710672:0 progress is 3/3 2025-06-24T20:39:20.831439Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-24T20:39:20.831453Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710672, ready parts: 3/3, is published: true 2025-06-24T20:39:20.831499Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519618262201989328:2474] message: TxId: 281474976710672 2025-06-24T20:39:20.831525Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-24T20:39:20.831550Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710672:0 2025-06-24T20:39:20.831561Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710672:0 2025-06-24T20:39:20.831700Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-24T20:39:20.831715Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710672:1 2025-06-24T20:39:20.831721Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710672:1 2025-06-24T20:39:20.831736Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-24T20:39:20.831743Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710672:2 2025-06-24T20:39:20.831749Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710672:2 2025-06-24T20:39:20.831785Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-24T20:39:20.832311Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:20.832373Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519618262201989328:2474] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-24T20:39:20.833987Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519618262201989340:3614], Recipient [3:7519618227842248628:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:20.834015Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:20.834028Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:39:20.834059Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519618262201989416:3665], Recipient [3:7519618227842248628:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:20.834070Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:20.834078Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:39:20.840687Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519618262201989432:3680], Recipient [3:7519618227842248628:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:20.840731Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:20.840744Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:39:21.495109Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519618227842248628:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:21.495176Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:21.495233Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519618227842248628:2147], Recipient [3:7519618227842248628:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:21.495250Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:21.783293Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:39:22.495291Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519618227842248628:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:22.495335Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:22.495387Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519618227842248628:2147], Recipient [3:7519618227842248628:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:22.495403Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:22.654397Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T20:39:22.705612Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Table [GOOD] >> TopicAutoscaling::ReadFromTimestamp_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadFromTimestamp_PQv1 >> TestYmqHttpProxy::TestDeleteMessage >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Query >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-60 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-61 |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |90.1%| [LD] {RESULT} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut >> TxUsage::WriteToTopic_Demo_30_Table [GOOD] >> TCmsTenatsTest::TestClusterRatioLimit [GOOD] >> TCmsTenatsTest::TestClusterRatioLimitForceRestartMode |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |90.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors >> TCmsTest::TestForceRestartMode >> KqpIndexes::SecondaryIndexInsert1 [GOOD] >> TxUsage::WriteToTopic_Demo_30_Query >> TPersQueueTest::BadSids [GOOD] >> BasicUsage::GetAllStartPartitionSessions [GOOD] >> BasicUsage::PreferredDatabaseNoFallback >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-60 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexInsert1 [GOOD] Test command err: Trying to start YDB, gRPC: 4004, MsgBus: 27645 2025-06-24T20:38:52.911593Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618145649321460:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:52.923705Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00464f/r3tmp/tmpwcwOns/pdisk_1.dat 2025-06-24T20:38:53.815896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:53.816018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:53.839874Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618145649321428:2079] 1750797532888477 != 1750797532888480 2025-06-24T20:38:53.849075Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:53.878318Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:53.880228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4004, node 1 2025-06-24T20:38:54.130977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:54.131007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:54.131014Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:54.131167Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27645 TClient is connected to server localhost:27645 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:55.446626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:55.526856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:55.788589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:56.065016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:56.167566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:57.915484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618145649321460:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:57.915585Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:58.364967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618171419126849:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:58.365070Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.299920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.348219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.389530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.450996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.545639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.616319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.699711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.790125Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618175714094813:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.790204Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.790484Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618175714094818:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.795079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:59.823990Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618175714094820:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:38:59.903698Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618175714094871:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:01.604402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:01.676221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part ... :7519618267781160419:2139]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 8] Version: 2 } 2025-06-24T20:39:26.898016Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5035: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-24T20:39:26.898047Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 8 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710659 2025-06-24T20:39:26.898099Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 8 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710659 2025-06-24T20:39:26.898109Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976710659 2025-06-24T20:39:26.898120Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710659, pathId: [OwnerId: 72057594046644480, LocalPathId: 8], version: 2 2025-06-24T20:39:26.898132Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 8] was 2 2025-06-24T20:39:26.898180Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710659, subscribers: 1 2025-06-24T20:39:26.898203Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [3:7519618289255997373:2305] 2025-06-24T20:39:26.898221Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:39:26.898695Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:26.898786Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:26.898822Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:26.898854Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:26.903531Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710659 2025-06-24T20:39:26.903555Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:26.903630Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710659 2025-06-24T20:39:26.903650Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:26.903695Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710659 2025-06-24T20:39:26.903706Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:26.903742Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710659 2025-06-24T20:39:26.903751Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:26.903786Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710659 2025-06-24T20:39:26.903797Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:26.903939Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519618289255997373:2305] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 at schemeshard: 72057594046644480 2025-06-24T20:39:26.904546Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519618289255997373:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T20:39:26.904794Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519618289255997402:2406], Recipient [3:7519618267781160419:2139]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:26.904824Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:26.904841Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:39:26.960880Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [3:7519618289255997428:2431], Recipient [3:7519618267781160419:2139]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:26.960922Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:26.960933Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:39:26.960975Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [3:7519618289255997424:2428], Recipient [3:7519618267781160419:2139]: {TEvModifySchemeTransaction txid# 281474976710660 TabletId# 72057594046644480} 2025-06-24T20:39:26.960991Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T20:39:26.963724Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } TxId: 281474976710660 TabletId: 72057594046644480 Owner: "metadata@system" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T20:39:26.964101Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976710660:0, path# /Root/.metadata/workload_manager/pools/default 2025-06-24T20:39:26.964267Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710660:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/Root/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), at schemeshard: 72057594046644480 2025-06-24T20:39:26.964477Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T20:39:26.965056Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710660, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" TxId: 281474976710660 SchemeshardId: 72057594046644480 PathId: 8 PathCreateTxId: 281474976710659, at schemeshard: 72057594046644480 2025-06-24T20:39:26.965376Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710660, database: /Root, subject: metadata@system, status: StatusAlreadyExists, reason: Check failed: path: '/Root/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), operation: CREATE RESOURCE POOL, path: default, set owner:metadata@system, add access: +(SR|DS):all-users@well-known, add access: +(SR|DS):root@builtin 2025-06-24T20:39:26.965399Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:39:26.965623Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519618289255997424:2428] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:26.965833Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519618289255997428:2431], Recipient [3:7519618267781160419:2139]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:26.965853Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:39:26.965871Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T20:39:27.783656Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519618267781160419:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:27.783713Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:27.783779Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519618267781160419:2139], Recipient [3:7519618267781160419:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:27.783796Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:39:27.806719Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> TCmsTenatsTest::TestClusterRatioLimitForceRestartMode [GOOD] >> TCmsTenatsTest::TestClusterLimitForceRestartModeScheduled |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |90.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-13 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14 >> DataShardVolatile::NotCachingAbortingDeletes-UseSink [GOOD] >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck >> TCmsTest::TestForceRestartMode [GOOD] >> TCmsTest::StateStorageTwoRings >> TestYmqHttpProxy::TestUntagQueue [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Table |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |90.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction >> TPQCompatTest::CommitOffsets [GOOD] >> TPQCompatTest::LongProducerAndLongMessageGroupId >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-61 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-62 >> TCmsTest::RequestRestartServicesDryRun >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::BadSids [GOOD] Test command err: 2025-06-24T20:35:26.781615Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:26.781724Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T20:35:27.471974Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:35:27.472066Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info === Server->StartServer(false); 2025-06-24T20:35:28.350112Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519617268214627016:2084];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:28.382485Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:28.516580Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519617268161604310:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:28.788609Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004962/r3tmp/tmpgi6PG2/pdisk_1.dat 2025-06-24T20:35:28.868499Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:28.868870Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:29.446834Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:29.478486Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:29.478607Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:29.480641Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:29.480712Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:29.481461Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:35:29.491295Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:29.493446Z node 3 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-24T20:35:29.498428Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:29.499513Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:29.581305Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 20119, node 3 2025-06-24T20:35:29.733255Z node 3 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T20:35:29.734160Z node 3 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T20:35:29.808355Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004962/r3tmp/yandex1tottT.tmp 2025-06-24T20:35:29.808382Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004962/r3tmp/yandex1tottT.tmp 2025-06-24T20:35:29.808567Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004962/r3tmp/yandex1tottT.tmp 2025-06-24T20:35:29.808737Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:30.022641Z INFO: TTestServer started on Port 5387 GrpcPort 20119 TClient is connected to server localhost:5387 PQClient connected to localhost:20119 === TenantModeEnabled() = 0 === Init PQ - start server on port 20119 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:30.768715Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T20:35:30.768914Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:30.769136Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:35:30.769167Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:35:30.769466Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:35:30.769582Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:30.776342Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:35:30.776540Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:35:30.776747Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:30.776802Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:35:30.776819Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-24T20:35:30.776858Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-24T20:35:30.783221Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:30.783285Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:35:30.783304Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 3 -> 128 2025-06-24T20:35:30.788279Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:30.788327Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:30.788353Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T20:35:30.788401Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-24T20:35:30.796053Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:35:30.796537Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:35:30.796554Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-24T20:35:30.796575Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:35:30.798615Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-24T20:35:30.798816Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:35:30.803595Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750797330849, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:35:30.803788Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750797330849 MediatorID: 72057594046382081 ... 25-06-24T20:39:26.103353Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037893 (partition=7) TEvClientConnected Status OK, TabletId: 72075186224037893, NodeId 21, Generation: 1 2025-06-24T20:39:26.103437Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037893] server connected, pipe [21:7519618287993999132:2529], now have 1 active actors on pipe 2025-06-24T20:39:26.103550Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-24T20:39:26.103585Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037893] got client message batch for topic 'rt3.dc1--topic1' partition 7 2025-06-24T20:39:26.103710Z node 21 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0 generated for partition 7 topic 'rt3.dc1--topic1' owner base64:aa 2025-06-24T20:39:26.103850Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037893, Partition: 7, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 7 2025-06-24T20:39:26.103916Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--topic1' partition: 7 messageNo: 0 requestId: cookie: 0 2025-06-24T20:39:26.104187Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-24T20:39:26.104207Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037893] got client message batch for topic 'rt3.dc1--topic1' partition 7 2025-06-24T20:39:26.104315Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--topic1' partition: 7 messageNo: 0 requestId: cookie: 0 2025-06-24T20:39:26.104479Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 5 partition: 7 MaxSeqNo: 0 sessionId: base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0 2025-06-24T20:39:26.107987Z :INFO: [] MessageGroupId [base64:aa] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750797566107 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:39:26.108154Z :INFO: [] MessageGroupId [base64:aa] SessionId [] Write session established. Init response: session_id: "base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0" topic: "topic1" cluster: "dc1" partition_id: 7 supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-24T20:39:26.108428Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0] Write 1 messages with Id from 1 to 1 2025-06-24T20:39:26.108561Z :INFO: [] MessageGroupId [base64:aa] SessionId [base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0] Write session: close. Timeout = 18446744073709551 ms 2025-06-24T20:39:26.109153Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0] Write session: try to update token 2025-06-24T20:39:26.109233Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0] Send 1 message(s) (0 left), first sequence number is 1 2025-06-24T20:39:26.115265Z node 21 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-24T20:39:26.115596Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=7) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-24T20:39:26.115857Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-24T20:39:26.115919Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037893] got client message batch for topic 'rt3.dc1--topic1' partition 7 2025-06-24T20:39:26.116049Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--topic1' partition: 7 messageNo: 0 requestId: cookie: 1 2025-06-24T20:39:26.116143Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=7) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:39:26.116333Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-24T20:39:26.116351Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037893] got client message batch for topic 'rt3.dc1--topic1' partition 7 2025-06-24T20:39:26.116878Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72075186224037893] got client message topic: rt3.dc1--topic1 partition: 7 SourceId: '\0base64:aa' SeqNo: 1 partNo : 0 messageNo: 1 size 92 offset: -1 2025-06-24T20:39:26.117177Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037893, Partition: 7, State: StateIdle] Topic 'rt3.dc1--topic1' partition 7 part blob processing sourceId '\0base64:aa' seqNo 1 partNo 0 2025-06-24T20:39:26.155652Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037893, Partition: 7, State: StateIdle] Topic 'rt3.dc1--topic1' partition 7 part blob complete sourceId '\0base64:aa' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 169 count 1 nextOffset 1 batches 1 2025-06-24T20:39:26.156611Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037893, Partition: 7, State: StateIdle] Add new write blob: topic 'rt3.dc1--topic1' partition 7 compactOffset 0,1 HeadOffset 0 endOffset 0 curOffset 1 d0000000007_00000000000000000000_00000_0000000001_00000? size 157 WTime 1750797566156 2025-06-24T20:39:26.156913Z node 21 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:39:26.157018Z node 21 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 7 offset 0 partNo 0 count 1 size 157 2025-06-24T20:39:26.162725Z node 21 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 7 offset 0 count 1 size 157 actorID [21:7519618270814129416:2449] 2025-06-24T20:39:26.163066Z node 21 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037893' partition 7 offset 0 partno 0 count 1 parts 0 suffix '63' size 157 2025-06-24T20:39:26.163749Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037893, Partition: 7, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 102 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:39:26.163869Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037893, Partition: 7, State: StateIdle] TPartition::ReplyWrite. Partition: 7 2025-06-24T20:39:26.163974Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037893, Partition: 7, State: StateIdle] Answering for message sourceid: '\0base64:aa', Topic: 'rt3.dc1--topic1', Partition: 7, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-24T20:39:26.164437Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037893, Partition: 7, State: StateIdle] Topic 'rt3.dc1--topic1' partition 7 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:39:26.164509Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037893, Partition: 7, State: StateIdle] Topic 'rt3.dc1--topic1' partition 7 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T20:39:26.164597Z node 21 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037893, Partition: 7, State: StateIdle] need more data for compaction. cumulativeSize=157, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:39:26.164698Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--topic1' partition: 7 messageNo: 1 requestId: cookie: 1 2025-06-24T20:39:26.164857Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=7) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:39:26.165386Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037893, Partition: 7, State: StateIdle] read cookie 2 Topic 'rt3.dc1--topic1' partition 7 user user offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-06-24T20:39:26.165739Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037893, Partition: 7, State: StateIdle] read cookie 2 added 1 blobs, size 157 count 1 last offset 0, current partition end offset: 1 2025-06-24T20:39:26.165773Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037893, Partition: 7, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-24T20:39:26.165838Z node 21 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 7 offset 0 partno 0 count 1 parts_count 0 source 1 size 157 accessed 0 times before, last time 2025-06-24T20:39:26.000000Z 2025-06-24T20:39:26.165877Z node 21 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-24T20:39:26.165939Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-24T20:39:26.166127Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 137 from pos 0 cbcount 1 2025-06-24T20:39:26.166218Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--topic1' partition 7 user user readTimeStamp done, result 1750797566112 queuesize 0 startOffset 0 2025-06-24T20:39:26.166837Z node 21 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037893' partition 7 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:39:26.167902Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0] Write session got write response: sequence_numbers: 1 offsets: 0 already_written: false partition_id: 7 write_statistics { persist_duration_ms: 8 queued_in_partition_duration_ms: 42 } 2025-06-24T20:39:26.167968Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0] Write session: acknoledged message 1 2025-06-24T20:39:26.211228Z :INFO: [] MessageGroupId [base64:aa] SessionId [base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0] Write session will now close 2025-06-24T20:39:26.211325Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0] Write session: aborting 2025-06-24T20:39:26.211920Z :INFO: [] MessageGroupId [base64:aa] SessionId [base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0] Write session: gracefully shut down, all writes complete 2025-06-24T20:39:26.211997Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0] Write session: destroy 2025-06-24T20:39:26.222808Z node 21 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0 grpc read done: success: 0 data: 2025-06-24T20:39:26.222851Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0 grpc read failed 2025-06-24T20:39:26.222895Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0 grpc closed 2025-06-24T20:39:26.222919Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: base64:aa|a17d7c3-4311ef8c-26a36e22-e798230_0 is DEAD 2025-06-24T20:39:26.224025Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=7) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:39:26.225752Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037893] server disconnected, pipe [21:7519618287993999132:2529] destroyed 2025-06-24T20:39:26.225806Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037893, Partition: 7, State: StateIdle] TPartition::DropOwner. >> TCmsTest::StateStorageTwoRings [GOOD] >> TCmsTest::StateStorageTwoBrokenRings >> TCmsTenatsTest::TestClusterLimitForceRestartModeScheduled [GOOD] >> TCmsTenatsTest::TestClusterRatioLimitForceRestartModeScheduled >> DataShardVolatile::CompactedVolatileChangesAbort [GOOD] |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |90.2%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query [GOOD] >> TCmsTest::RequestRestartServicesDryRun [GOOD] >> TCmsTest::RequestReplacePDiskDoesntBreakGroup >> TPersQueueTest::PartitionsMapping [GOOD] >> TPersQueueTest::MessageMetadata >> TxUsage::WriteToTopic_Demo_16_Table [GOOD] >> TCmsTenatsTest::TestClusterRatioLimitForceRestartModeScheduled [GOOD] >> TestYmqHttpProxy::TestDeleteMessage [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-61 [GOOD] >> TCmsTest::StateStorageTwoBrokenRings [GOOD] >> TCmsTest::SysTabletsNode >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-62 |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestClusterRatioLimitForceRestartModeScheduled [GOOD] >> TxUsage::WriteToTopic_Demo_39_Table [GOOD] >> TestYmqHttpProxy::TestDeleteMessageBatch >> TxUsage::WriteToTopic_Demo_16_Query >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table [GOOD] >> DataShardReadTableSnapshots::ReadTableSplitBefore >> TPersQueueTest::Codecs_InitWriteSession_DefaultTopicSupportedCodecsInInitResponse [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithDefaultCodecs_MessagesAreAcknowledged >> TCmsTest::RequestReplacePDiskDoesntBreakGroup [GOOD] >> TCmsTest::RequestReplacePDiskConsecutiveWithDone >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-60 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 >> TxUsage::WriteToTopic_Demo_39_Query >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-15 >> TCmsTest::SysTabletsNode [GOOD] >> DstCreator::Basic >> TPersQueueTest::SetupReadSession [GOOD] >> TPersQueueTest::TestBigMessage >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query >> BasicUsage::BasicWriteSession [GOOD] >> BasicUsage::CloseWriteSessionImmediately >> Viewer::JsonStorageListingV1NodeIdFilter [GOOD] >> Viewer::JsonStorageListingV1PDiskIdFilter |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::SysTabletsNode [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-62 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-63 >> BasicUsage::WriteSessionNoAvailableDatabase [GOOD] >> BasicUsage::WriteSessionSwitchDatabases >> TCmsTest::RequestReplacePDiskConsecutiveWithDone [GOOD] >> TCmsTest::RequestReplaceManyDevicesOnOneNode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_volatile/unittest >> DataShardVolatile::CompactedVolatileChangesAbort [GOOD] Test command err: 2025-06-24T20:34:21.960939Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:21.961327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:21.961510Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00215f/r3tmp/tmpMQ4MXM/pdisk_1.dat 2025-06-24T20:34:22.472049Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:34:22.475387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:22.522459Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:22.529426Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797258426614 != 1750797258426618 2025-06-24T20:34:22.576855Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:34:22.578214Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T20:34:22.578491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:22.578618Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:22.592524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:22.799658Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T20:34:22.799748Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T20:34:22.799908Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:601:2509] 2025-06-24T20:34:23.012696Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:601:2509] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T20:34:23.012808Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:601:2509] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:34:23.013455Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:601:2509] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T20:34:23.013549Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:601:2509] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:34:23.013934Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:601:2509] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:34:23.014172Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:601:2509] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 1000 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:34:23.014335Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:601:2509] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T20:34:23.014672Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:601:2509] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T20:34:23.017826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:23.019091Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:601:2509] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T20:34:23.019195Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:601:2509] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T20:34:23.058669Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:617:2524], Recipient [1:626:2530]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:34:23.065185Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:617:2524], Recipient [1:626:2530]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:34:23.065712Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:626:2530] 2025-06-24T20:34:23.066018Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:34:23.224107Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:617:2524], Recipient [1:626:2530]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:34:23.224949Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:34:23.225077Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:34:23.227086Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:34:23.231765Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:34:23.231886Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:34:23.232347Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:34:23.232533Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:34:23.232629Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:642:2530] in generation 1 2025-06-24T20:34:23.243588Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:34:23.301648Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:34:23.301869Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:34:23.302016Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:644:2540] 2025-06-24T20:34:23.302065Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:34:23.302103Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:34:23.302140Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:34:23.302390Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:626:2530], Recipient [1:626:2530]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:34:23.302456Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:34:23.302853Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:34:23.302970Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:34:23.303099Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:34:23.303213Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:34:23.303283Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:34:23.303324Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:34:23.303360Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:34:23.303394Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:34:23.303445Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:34:23.303548Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:633:2534], Recipient [1:626:2530]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:34:23.303584Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:34:23.303639Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:622:2527], serverId# [1:633:2534], sessionId# [0:0:0] 2025-06-24T20:34:23.304094Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:633:2534] 2025-06-24T20:34:23.304141Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:34:23.304253Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:34:23.304488Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:34:23.304543Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:34:23.304640Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:34:23.304693Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20 ... 4T20:39:32.946014Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [26:928:2734] 2025-06-24T20:39:32.946150Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [26:928:2734] 2025-06-24T20:39:32.946652Z node 26 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [26:927:2733], Recipient [26:656:2544]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 2 } CompactBorrowed: false 2025-06-24T20:39:32.946902Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} queued, type NKikimr::NDataShard::TDataShard::TTxCompactTable 2025-06-24T20:39:32.947131Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:39:32.947385Z node 26 :TABLET_EXECUTOR DEBUG: TCompactionLogic PrepareForceCompaction for 72075186224037888 table 1001, mode Full, forced state None, forced mode Full 2025-06-24T20:39:32.947701Z node 26 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 1 of 72075186224037888 tableId# 2 localTid# 1001, requested from [26:927:2733], partsCount# 0, memtableSize# 656, memtableWaste# 3952, memtableRows# 2 2025-06-24T20:39:32.947937Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} hope 1 -> done Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:39:32.948140Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:39:32.948644Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy PrepareCompaction for 72075186224037888: task 1, edge 9223372036854775807/0, generation 0 2025-06-24T20:39:32.948821Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:16} starting compaction 2025-06-24T20:39:32.949470Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} starting Scan{1 on 1001, Compact{72075186224037888.1.16, eph 1}} 2025-06-24T20:39:32.949715Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} started compaction 1 2025-06-24T20:39:32.949867Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy PrepareCompaction for 72075186224037888 started compaction 1 generation 0 ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 12910281824604102047 2025-06-24T20:39:32.962152Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} Compact 1 on TGenCompactionParams{1001: gen 0 epoch +inf, 0 parts} step 16, product {tx status + 1 parts epoch 2} done 2025-06-24T20:39:32.962735Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CompactionFinished for 72075186224037888: compaction 1, generation 0 2025-06-24T20:39:32.962977Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CheckGeneration for 72075186224037888 generation 1, state Free, final id 0, final level 0 2025-06-24T20:39:32.963106Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CheckGeneration for 72075186224037888 generation 3, state Free, final id 0, final level 0 2025-06-24T20:39:32.964038Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 1, ts 1970-01-01T00:00:01.549293Z 2025-06-24T20:39:32.964369Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} queued, type NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs 2025-06-24T20:39:32.964570Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:39:32.964775Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 1, front# 1 2025-06-24T20:39:32.964966Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001 sending TEvCompactTableResult to# [26:927:2733]pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T20:39:32.966279Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} hope 1 -> done Change{17, redo 83b alter 0b annex 0, ~{ 27 } -{ }, 0 gb} 2025-06-24T20:39:32.966499Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} release 4194304b of static, Memory{0 dyn 0} ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 17456505408754476522 ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 5854347089185145346 ========= Starting an immediate read ========= 2025-06-24T20:39:33.285633Z node 26 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhtnpvk8zq0a1qnwezpkray, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=26&id=NDk2NmE1Yi1hYTAwMDc2Ny0xZWVjNzhiYS05NzJlZGMw, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:39:33.288363Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037888] send [26:860:2679] 2025-06-24T20:39:33.288509Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [26:860:2679] 2025-06-24T20:39:33.289055Z node 26 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [26:953:2741], Recipient [26:656:2544]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false KeysSize: 1 2025-06-24T20:39:33.289406Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2025-06-24T20:39:33.289610Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:39:33.289860Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T20:39:33.290024Z node 26 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1550/281474976715662 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T20:39:33.290188Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v1550/18446744073709551615 2025-06-24T20:39:33.290403Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CheckRead 2025-06-24T20:39:33.290675Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-24T20:39:33.290809Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CheckRead 2025-06-24T20:39:33.290935Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:39:33.291041Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:39:33.291265Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037888 2025-06-24T20:39:33.291390Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-24T20:39:33.291441Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:39:33.291477Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T20:39:33.291514Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:39:33.291798Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-24T20:39:33.292198Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-06-24T20:39:33.292280Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T20:39:33.292400Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:39:33.292499Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:39:33.292580Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-24T20:39:33.292615Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:39:33.292682Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-24T20:39:33.292804Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T20:39:33.293019Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{18, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:39:33.293242Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:39:33.419677Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-24T20:39:33.419933Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:39:33.420409Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{12, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-24T20:39:33.420624Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:39:33.421992Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:14} commited cookie 1 for step 13 2025-06-24T20:39:33.422472Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [26:519:2451] 2025-06-24T20:39:33.422604Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [26:519:2451] |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |90.2%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |90.2%| [LD] {RESULT} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Query [GOOD] |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight [GOOD] |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TxUsage::WriteToTopic_Demo_30_Query [GOOD] >> TxUsage::ReadRuleGeneration >> TxUsage::WriteToTopic_Demo_31_Table >> TTopicYqlTest::AlterAutopartitioning [GOOD] >> TTopicYqlTest::BadRequests |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |90.2%| [LD] {RESULT} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut >> TCmsTest::RequestReplaceManyDevicesOnOneNode [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-15 [GOOD] >> DataShardReadTableSnapshots::ReadTableSplitBefore [GOOD] >> DataShardReadTableSnapshots::ReadTableSplitFinished >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16 >> DstCreator::Basic [GOOD] >> DstCreator::CannotFindColumn |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::RequestReplaceManyDevicesOnOneNode [GOOD] |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-62 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-63 >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight [GOOD] Test command err: 2025-06-24T20:38:31.323637Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618053155092018:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:31.324119Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044a3/r3tmp/tmp6Ty8NP/pdisk_1.dat 2025-06-24T20:38:32.069794Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618053155091880:2079] 1750797511254413 != 1750797511254416 2025-06-24T20:38:32.078234Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:32.083922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:32.084059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:32.088250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7044, node 1 2025-06-24T20:38:32.316130Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:38:32.408539Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:32.408565Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:32.408573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:32.408683Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19020 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:33.157904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:33.180266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:19020 2025-06-24T20:38:33.479347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:38:33.490703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T20:38:33.496900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-24T20:38:33.509946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T20:38:33.564148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:33.846002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:38:33.917906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:38:33.998564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-24T20:38:34.004855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:34.077897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:34.137205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:34.184605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:34.238225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:34.286255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:34.374226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:36.273824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618053155092018:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:36.273902Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:36.701152Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618074629929739:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:36.701259Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:36.701669Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618074629929751:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:36.705823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:36.723034Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618074629929753:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-24T20:38:36.781889Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618074629929804:2870] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 720575940 ... e='000000000000000301v0', folder_id='folder4' 2025-06-24T20:39:44.603817Z node 7 :SQS DEBUG: proxy_actor.cpp:78: Request [12d52064-de3d638-ec6237af-ef582f06] Request proxy started 2025-06-24T20:39:44.604702Z node 7 :SQS DEBUG: service.cpp:761: Request [12d52064-de3d638-ec6237af-ef582f06] Answer configuration for queue [cloud4/000000000000000301v0] without leader 2025-06-24T20:39:44.605210Z node 7 :SQS DEBUG: proxy_actor.cpp:97: Request [12d52064-de3d638-ec6237af-ef582f06] Get configuration duration: 0ms 2025-06-24T20:39:44.605609Z node 7 :SQS DEBUG: proxy_service.cpp:246: Request [12d52064-de3d638-ec6237af-ef582f06] Send get leader node request to sqs service for cloud4/000000000000000301v0 2025-06-24T20:39:44.606815Z node 7 :SQS DEBUG: service.cpp:581: Request [12d52064-de3d638-ec6237af-ef582f06] Leader node for queue [cloud4/000000000000000301v0] is 7 2025-06-24T20:39:44.607621Z node 7 :SQS TRACE: executor.cpp:286: Request [a3f5b4ce-224ef6a7-3661c232-75e112db] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] HandleResponse { Status: 48 TxId: 281474976710922 Step: 1750797584599 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: false } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{\"k15\":\"v\"}" } } } } } 2025-06-24T20:39:44.607651Z node 7 :SQS DEBUG: executor.cpp:287: Request [a3f5b4ce-224ef6a7-3661c232-75e112db] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Attempt 1 execution duration: 127ms 2025-06-24T20:39:44.608241Z node 7 :SQS TRACE: executor.cpp:325: Request [a3f5b4ce-224ef6a7-3661c232-75e112db] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Sending mkql execution result: { Status: 48 TxId: 281474976710922 Step: 1750797584599 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: false } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{\"k15\":\"v\"}" } } } } } 2025-06-24T20:39:44.608351Z node 7 :SQS TRACE: executor.cpp:327: Request [a3f5b4ce-224ef6a7-3661c232-75e112db] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Minikql data response: {"attrs": {"ContentBasedDeduplication": false, "DelaySeconds": 0, "DlqArn": "", "DlqName": "", "FifoQueue": true, "MaxReceiveCount": 0, "MaximumMessageSize": 262144, "MessageRetentionPeriod": 345600000, "ReceiveMessageWaitTime": 0, "ShowDetailedCountersDeadline": null, "VisibilityTimeout": 30000}, "queueExists": true, "tags": "{\"k15\":\"v\"}"} 2025-06-24T20:39:44.608513Z node 7 :SQS DEBUG: executor.cpp:401: Request [a3f5b4ce-224ef6a7-3661c232-75e112db] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] execution duration: 131ms 2025-06-24T20:39:44.608701Z node 7 :SQS DEBUG: proxy_service.cpp:170: Request [12d52064-de3d638-ec6237af-ef582f06] Got leader node for queue response. Node id: 7. Status: 0 2025-06-24T20:39:44.608802Z node 7 :SQS TRACE: proxy_service.cpp:303: Request [12d52064-de3d638-ec6237af-ef582f06] Sending request from proxy to leader node 7: ListQueueTags { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000301v0" } RequestId: "12d52064-de3d638-ec6237af-ef582f06" 2025-06-24T20:39:44.608920Z node 7 :SQS DEBUG: proxy_service.cpp:70: Request [12d52064-de3d638-ec6237af-ef582f06] Received Sqs Request: ListQueueTags { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000301v0" } RequestId: "12d52064-de3d638-ec6237af-ef582f06" 2025-06-24T20:39:44.609318Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [a3f5b4ce-224ef6a7-3661c232-75e112db] Sending executed reply 2025-06-24T20:39:44.609851Z node 7 :SQS DEBUG: action.h:133: Request [12d52064-de3d638-ec6237af-ef582f06] Request started. Actor: [7:7519618366641866843:5368] 2025-06-24T20:39:44.609898Z node 7 :SQS TRACE: service.cpp:1472: Inc local leader ref for actor [7:7519618366641866843:5368] 2025-06-24T20:39:44.609918Z node 7 :SQS DEBUG: service.cpp:754: Request [12d52064-de3d638-ec6237af-ef582f06] Forward configuration request to queue [cloud4/000000000000000301v0] leader 2025-06-24T20:39:44.610258Z node 7 :SQS DEBUG: action.h:627: Request [12d52064-de3d638-ec6237af-ef582f06] Get configuration duration: 0ms 2025-06-24T20:39:44.610285Z node 7 :SQS TRACE: action.h:647: Request [12d52064-de3d638-ec6237af-ef582f06] Got configuration. Root url: http://ghrun-4pjfmvffsq.auto.internal:8771, Shards: 1, Fail: 0 2025-06-24T20:39:44.610315Z node 7 :SQS TRACE: action.h:427: Request [12d52064-de3d638-ec6237af-ef582f06] DoRoutine 2025-06-24T20:39:44.610460Z node 7 :SQS TRACE: action.h:264: Request [12d52064-de3d638-ec6237af-ef582f06] SendReplyAndDie from action actor { ListQueueTags { RequestId: "12d52064-de3d638-ec6237af-ef582f06" Tags { Key: "k15" Value: "v" } } } 2025-06-24T20:39:44.610621Z node 7 :SQS TRACE: proxy_service.h:35: Request [12d52064-de3d638-ec6237af-ef582f06] Sending sqs response: { ListQueueTags { RequestId: "12d52064-de3d638-ec6237af-ef582f06" Tags { Key: "k15" Value: "v" } } RequestId: "12d52064-de3d638-ec6237af-ef582f06" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k15" Value: "v" } } 2025-06-24T20:39:44.610744Z node 7 :SQS TRACE: service.cpp:1483: Dec local leader ref for actor [7:7519618366641866843:5368]. Found: 1 2025-06-24T20:39:44.610851Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse ListQueueTags { RequestId: "12d52064-de3d638-ec6237af-ef582f06" Tags { Key: "k15" Value: "v" } } RequestId: "12d52064-de3d638-ec6237af-ef582f06" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k15" Value: "v" } 2025-06-24T20:39:44.610990Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7519618366641866841:2741]: ListQueueTags { RequestId: "12d52064-de3d638-ec6237af-ef582f06" Tags { Key: "k15" Value: "v" } } RequestId: "12d52064-de3d638-ec6237af-ef582f06" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k15" Value: "v" } 2025-06-24T20:39:44.611228Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [12d52064-de3d638-ec6237af-ef582f06] HandleResponse: { ListQueueTags { RequestId: "12d52064-de3d638-ec6237af-ef582f06" Tags { Key: "k15" Value: "v" } } RequestId: "12d52064-de3d638-ec6237af-ef582f06" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k15" Value: "v" } }, status: OK 2025-06-24T20:39:44.611318Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [12d52064-de3d638-ec6237af-ef582f06] Sending reply from proxy actor: { ListQueueTags { RequestId: "12d52064-de3d638-ec6237af-ef582f06" Tags { Key: "k15" Value: "v" } } RequestId: "12d52064-de3d638-ec6237af-ef582f06" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k15" Value: "v" } } 2025-06-24T20:39:44.611726Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [ListQueueTags] requestId [12d52064-de3d638-ec6237af-ef582f06] Got succesfult GRPC response. 2025-06-24T20:39:44.611833Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListQueueTags] requestId [12d52064-de3d638-ec6237af-ef582f06] reply ok 2025-06-24T20:39:44.611929Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [ListQueueTags] requestId [12d52064-de3d638-ec6237af-ef582f06] Send metering event. HttpStatusCode: 200 IsFifo: 1 FolderId: folder4 RequestSizeInBytes: 530 ResponseSizeInBytes: 197 SourceAddress: b8d1:5f00:6050:0:a0d1:5f00:6050:0 ResourceId: 000000000000000301v0 Action: ListQueueTags 2025-06-24T20:39:44.612063Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:38066) <- (200 , 20 bytes) 2025-06-24T20:39:44.612229Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:38066) connection closed Http output full {"Tags":{"k15":"v"}} |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydbd/ydbd |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydbd/ydbd |90.2%| [LD] {RESULT} $(B)/ydb/apps/ydbd/ydbd >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-63 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-64 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |90.2%| [LD] {RESULT} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader >> TestYmqHttpProxy::TestDeleteMessageBatch [GOOD] |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TComputeScheduler::TTotalLimits [GOOD] |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TPersQueueTest::ReadRuleServiceTypeMigrationWithDisallowDefault [GOOD] >> TPersQueueTest::ReadWithoutConsumerFederation |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |90.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::TTotalLimits [GOOD] Test command err: 1610 1600 1610 1600 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_volatile/unittest >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck [GOOD] Test command err: 2025-06-24T20:34:21.608949Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:21.609360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:21.609562Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0021ac/r3tmp/tmpPpr8dz/pdisk_1.dat 2025-06-24T20:34:22.019733Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:34:22.025374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:34:22.081256Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:22.088308Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797257993815 != 1750797257993819 2025-06-24T20:34:22.144019Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:34:22.145196Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T20:34:22.145447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:22.145558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:22.157327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:22.369307Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T20:34:22.369385Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T20:34:22.369524Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:601:2509] 2025-06-24T20:34:22.626894Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:601:2509] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value2" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T20:34:22.627011Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:601:2509] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:34:22.627721Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:601:2509] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T20:34:22.627840Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:601:2509] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:34:22.628267Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:601:2509] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:34:22.628468Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:601:2509] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 1000 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:34:22.628637Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:601:2509] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T20:34:22.628960Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:601:2509] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T20:34:22.630596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:22.631903Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:601:2509] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T20:34:22.631984Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:601:2509] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T20:34:22.696111Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:617:2524], Recipient [1:626:2530]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:34:22.697297Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:617:2524], Recipient [1:626:2530]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:34:22.697772Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:626:2530] 2025-06-24T20:34:22.698050Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:34:22.805319Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:617:2524], Recipient [1:626:2530]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:34:22.812180Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:34:22.812324Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:34:22.814158Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:34:22.814245Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:34:22.814307Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:34:22.814726Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:34:22.814894Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:34:22.814984Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:642:2530] in generation 1 2025-06-24T20:34:22.826634Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:34:22.937936Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:34:22.938189Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:34:22.938291Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:644:2540] 2025-06-24T20:34:22.938339Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:34:22.938384Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:34:22.938438Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:34:22.938698Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:626:2530], Recipient [1:626:2530]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:34:22.938758Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:34:22.939086Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:34:22.939221Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:34:22.939352Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:34:22.939396Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:34:22.939451Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:34:22.939490Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:34:22.939532Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:34:22.939572Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:34:22.939614Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:34:22.939706Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:633:2534], Recipient [1:626:2530]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:34:22.939741Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:34:22.939782Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:622:2527], serverId# [1:633:2534], sessionId# [0:0:0] 2025-06-24T20:34:22.940180Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:633:2534] 2025-06-24T20:34:22.940227Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:34:22.940331Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:34:22.940570Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:34:22.940645Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:34:22.940758Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:34:22.940816Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:28147 ... pp:1916: Add [0:7] at 72075186224037889 to execution unit ExecuteRead 2025-06-24T20:39:46.588084Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037889 on unit ExecuteRead 2025-06-24T20:39:46.588219Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1511 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1000 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1000 } 2025-06-24T20:39:46.588496Z node 28 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v1511/18446744073709551615 2025-06-24T20:39:46.588553Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[28:1050:2819], 1} after executionsCount# 1 2025-06-24T20:39:46.588603Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[28:1050:2819], 1} sends rowCount# 1, bytes# 32, quota rows left# 999, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:39:46.588679Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[28:1050:2819], 1} finished in read 2025-06-24T20:39:46.588746Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037889 is Executed 2025-06-24T20:39:46.588781Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037889 executing on unit ExecuteRead 2025-06-24T20:39:46.588815Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T20:39:46.588849Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037889 on unit CompletedOperations 2025-06-24T20:39:46.588905Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037889 is Executed 2025-06-24T20:39:46.588935Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T20:39:46.588969Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037889 has finished 2025-06-24T20:39:46.589003Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-24T20:39:46.589118Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037889:1:16} Tx{33, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:39:46.589185Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037889:1:16} Tx{33, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:39:46.589234Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-24T20:39:46.590025Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037889] send [28:900:2707] 2025-06-24T20:39:46.590077Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037889] push event to server [28:900:2707] 2025-06-24T20:39:46.590500Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037890] ::Bootstrap [28:1053:2822] 2025-06-24T20:39:46.590547Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037890] lookup [28:1053:2822] 2025-06-24T20:39:46.590792Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037890] queue send [28:1053:2822] 2025-06-24T20:39:46.590911Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [28:1050:2819], Recipient [28:666:2550]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2025-06-24T20:39:46.590970Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 1 } 2025-06-24T20:39:46.591061Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037890] forward result local node, try to connect [28:1053:2822] 2025-06-24T20:39:46.591108Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037890]::SendEvent [28:1053:2822] 2025-06-24T20:39:46.591358Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037890] connected with status OK role: Leader [28:1053:2822] 2025-06-24T20:39:46.591409Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037890] send queued [28:1053:2822] 2025-06-24T20:39:46.591446Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037890] push event to server [28:1053:2822] 2025-06-24T20:39:46.591559Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [28:1054:2823], Recipient [28:1006:2791]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:46.591600Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:46.591647Z node 28 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [28:1053:2822], serverId# [28:1054:2823], sessionId# [0:0:0] 2025-06-24T20:39:46.591841Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [28:1050:2819], Recipient [28:1006:2791]: NKikimrTxDataShard.TEvRead ReadId: 2 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1511 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2025-06-24T20:39:46.591970Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2025-06-24T20:39:46.592043Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:39:46.592148Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2025-06-24T20:39:46.592236Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CheckRead 2025-06-24T20:39:46.592326Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-24T20:39:46.592364Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CheckRead 2025-06-24T20:39:46.592399Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-06-24T20:39:46.592440Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-24T20:39:46.592512Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1] at 72075186224037890 2025-06-24T20:39:46.592565Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-24T20:39:46.592596Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-24T20:39:46.592626Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit ExecuteRead 2025-06-24T20:39:46.592658Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2025-06-24T20:39:46.592801Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 1, request: { ReadId: 2 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1511 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2025-06-24T20:39:46.593093Z node 28 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037890 promoting UnprotectedReadEdge to v1511/18446744073709551615 2025-06-24T20:39:46.593146Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037890 Complete read# {[28:1050:2819], 2} after executionsCount# 1 2025-06-24T20:39:46.593196Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037890 read iterator# {[28:1050:2819], 2} sends rowCount# 1, bytes# 32, quota rows left# 998, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:39:46.593274Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037890 read iterator# {[28:1050:2819], 2} finished in read 2025-06-24T20:39:46.593338Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-24T20:39:46.593373Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit ExecuteRead 2025-06-24T20:39:46.593405Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit CompletedOperations 2025-06-24T20:39:46.593437Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CompletedOperations 2025-06-24T20:39:46.593494Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-24T20:39:46.593525Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CompletedOperations 2025-06-24T20:39:46.593557Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1] at 72075186224037890 has finished 2025-06-24T20:39:46.593597Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-06-24T20:39:46.593733Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{17, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T20:39:46.593824Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:39:46.593875Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-06-24T20:39:46.594673Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037890] send [28:1053:2822] 2025-06-24T20:39:46.594721Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037890] push event to server [28:1053:2822] 2025-06-24T20:39:46.594865Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [28:1050:2819], Recipient [28:1006:2791]: NKikimrTxDataShard.TEvReadCancel ReadId: 2 2025-06-24T20:39:46.594921Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037890 ReadCancel: { ReadId: 2 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 11 } items { uint32_value: 111 } }, { items { uint32_value: 21 } items { uint32_value: 21 } } >> DstCreator::CannotFindColumn [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestDeleteMessageBatch [GOOD] Test command err: 2025-06-24T20:38:35.212752Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618071886489393:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:35.216439Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004499/r3tmp/tmptddmVH/pdisk_1.dat 2025-06-24T20:38:35.864957Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:35.903943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:35.904112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:38:35.919754Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21956, node 1 2025-06-24T20:38:36.179903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:36.179924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:36.179931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:36.180037Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:38:36.223340Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20783 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:36.779255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:36.820702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:20783 2025-06-24T20:38:37.080841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T20:38:37.087406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T20:38:37.089585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-24T20:38:37.135416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T20:38:37.154949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:37.343849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:38:37.433301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-24T20:38:37.438685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:38:37.579835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-24T20:38:37.600209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:37.663257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:37.725927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:37.817289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:37.873726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:37.924960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:38:37.994289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:40.184540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618071886489393:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:40.184623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:41.106102Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618097656294434:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:41.106235Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:41.112136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618097656294446:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:41.120805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:41.143302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618097656294448:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-24T20:38:41.242061Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618097656294501:2873] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, Loc ... 35763Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [d2283d42-93910869-11ab9c95-ed860e75] Sending reply from proxy actor: { DeleteMessageBatch { RequestId: "d2283d42-93910869-11ab9c95-ed860e75" Entries { Id: "Id-0" } Entries { Id: "Id-1" } } RequestId: "d2283d42-93910869-11ab9c95-ed860e75" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } 2025-06-24T20:39:49.740714Z node 7 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] HandleResponse { Status: 48 TxId: 281474976710712 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "messages" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "SentTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } } } Value { Struct { Optional { } } } } } 2025-06-24T20:39:49.740749Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Attempt 1 execution duration: 3ms 2025-06-24T20:39:49.740924Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Sending mkql execution result: { Status: 48 TxId: 281474976710712 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "messages" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "SentTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } } } Value { Struct { Optional { } } } } } 2025-06-24T20:39:49.740951Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Minikql data response: {"messages": []} 2025-06-24T20:39:49.741024Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] execution duration: 7ms 2025-06-24T20:39:49.741141Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [] Sending executed reply 2025-06-24T20:39:49.741237Z node 7 :SQS DEBUG: queue_leader.cpp:1913: Handle oldest timestamp metrics for [cloud4/000000000000000101v0/3] 2025-06-24T20:39:49.755409Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [DeleteMessageBatch] requestId [d2283d42-93910869-11ab9c95-ed860e75] Got succesfult GRPC response. 2025-06-24T20:39:49.755615Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DeleteMessageBatch] requestId [d2283d42-93910869-11ab9c95-ed860e75] reply ok 2025-06-24T20:39:49.755783Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [DeleteMessageBatch] requestId [d2283d42-93910869-11ab9c95-ed860e75] Send metering event. HttpStatusCode: 200 IsFifo: 0 FolderId: folder4 RequestSizeInBytes: 716 ResponseSizeInBytes: 222 SourceAddress: f8bc:8b00:6050:0:e0bc:8b00:6050:0 ResourceId: 000000000000000101v0 Action: DeleteMessageBatch Http output full {"Successful":[{"Id":"Id-0"},{"Id":"Id-1"}]} 2025-06-24T20:39:49.755895Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:38110) <- (200 , 44 bytes) 2025-06-24T20:39:49.756039Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:38110) connection closed 2025-06-24T20:39:49.756918Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:38118) incoming connection opened 2025-06-24T20:39:49.757011Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:38118) -> (POST /Root, 106 bytes) 2025-06-24T20:39:49.757149Z node 7 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [781f:df00:6050:0:601f:df00:6050:0] request [ReceiveMessage] url [/Root] database [/Root] requestId: ffc68061-ff91955c-a887572-485273a1 2025-06-24T20:39:49.757605Z node 7 :HTTP_PROXY INFO: http_req.cpp:520: http request [ReceiveMessage] requestId [ffc68061-ff91955c-a887572-485273a1] got new request from [781f:df00:6050:0:601f:df00:6050:0] 2025-06-24T20:39:49.758086Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:454: http request [ReceiveMessage] requestId [ffc68061-ff91955c-a887572-485273a1] Got cloud auth response. FolderId: folder4 CloudId: cloud4 UserSid: fake_user_sid@as 2025-06-24T20:39:49.758113Z node 7 :HTTP_PROXY INFO: http_req.cpp:280: http request [ReceiveMessage] requestId [ffc68061-ff91955c-a887572-485273a1] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-24T20:39:49.758837Z node 7 :SQS DEBUG: ymq_proxy.cpp:148: Got new request in YMQ proxy. FolderId: folder4, CloudId: cloud4, UserSid: fake_user_sid@as, RequestId: ffc68061-ff91955c-a887572-485273a1 2025-06-24T20:39:49.758961Z node 7 :SQS DEBUG: proxy_actor.cpp:263: Request [ffc68061-ff91955c-a887572-485273a1] Proxy actor: used user_name='cloud4', queue_name='000000000000000101v0', folder_id='folder4' 2025-06-24T20:39:49.758974Z node 7 :SQS DEBUG: proxy_actor.cpp:78: Request [ffc68061-ff91955c-a887572-485273a1] Request proxy started 2025-06-24T20:39:49.759036Z node 7 :SQS DEBUG: service.cpp:761: Request [ffc68061-ff91955c-a887572-485273a1] Answer configuration for queue [cloud4/000000000000000101v0] without leader 2025-06-24T20:39:49.759164Z node 7 :SQS DEBUG: proxy_actor.cpp:97: Request [ffc68061-ff91955c-a887572-485273a1] Get configuration duration: 0ms 2025-06-24T20:39:49.759248Z node 7 :SQS DEBUG: proxy_service.cpp:246: Request [ffc68061-ff91955c-a887572-485273a1] Send get leader node request to sqs service for cloud4/000000000000000101v0 2025-06-24T20:39:49.759270Z node 7 :SQS DEBUG: service.cpp:581: Request [ffc68061-ff91955c-a887572-485273a1] Leader node for queue [cloud4/000000000000000101v0] is 7 2025-06-24T20:39:49.759295Z node 7 :SQS DEBUG: proxy_service.cpp:170: Request [ffc68061-ff91955c-a887572-485273a1] Got leader node for queue response. Node id: 7. Status: 0 2025-06-24T20:39:49.759409Z node 7 :SQS TRACE: proxy_service.cpp:303: Request [ffc68061-ff91955c-a887572-485273a1] Sending request from proxy to leader node 7: ReceiveMessage { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000101v0" } RequestId: "ffc68061-ff91955c-a887572-485273a1" 2025-06-24T20:39:49.759814Z node 7 :SQS DEBUG: proxy_service.cpp:70: Request [ffc68061-ff91955c-a887572-485273a1] Received Sqs Request: ReceiveMessage { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000101v0" } RequestId: "ffc68061-ff91955c-a887572-485273a1" 2025-06-24T20:39:49.759923Z node 7 :SQS DEBUG: action.h:133: Request [ffc68061-ff91955c-a887572-485273a1] Request started. Actor: [7:7519618386988551286:3716] 2025-06-24T20:39:49.759963Z node 7 :SQS TRACE: service.cpp:1472: Inc local leader ref for actor [7:7519618386988551286:3716] 2025-06-24T20:39:49.759980Z node 7 :SQS DEBUG: service.cpp:754: Request [ffc68061-ff91955c-a887572-485273a1] Forward configuration request to queue [cloud4/000000000000000101v0] leader 2025-06-24T20:39:49.760022Z node 7 :SQS DEBUG: action.h:627: Request [ffc68061-ff91955c-a887572-485273a1] Get configuration duration: 0ms 2025-06-24T20:39:49.760041Z node 7 :SQS TRACE: action.h:647: Request [ffc68061-ff91955c-a887572-485273a1] Got configuration. Root url: http://ghrun-4pjfmvffsq.auto.internal:8771, Shards: 4, Fail: 0 2025-06-24T20:39:49.760068Z node 7 :SQS TRACE: action.h:662: Request [ffc68061-ff91955c-a887572-485273a1] Got configuration. Attributes: { ContentBasedDeduplication: 0 DelaySeconds: 0.000000s FifoQueue: 0 MaximumMessageSize: 262144 MessageRetentionPeriod: 345600.000000s ReceiveMessageWaitTime: 0.000000s VisibilityTimeout: 30.000000s } 2025-06-24T20:39:49.760083Z node 7 :SQS TRACE: action.h:427: Request [ffc68061-ff91955c-a887572-485273a1] DoRoutine 2025-06-24T20:39:49.760127Z node 7 :SQS TRACE: queue_leader.cpp:2424: Increment active message requests for [cloud4/000000000000000101v0/3]. ActiveMessageRequests: 1 2025-06-24T20:39:49.760146Z node 7 :SQS DEBUG: queue_leader.cpp:938: Request [ffc68061-ff91955c-a887572-485273a1] Received empty result from shard 3 infly. Infly capacity: 0. Messages count: 0 2025-06-24T20:39:49.760158Z node 7 :SQS DEBUG: queue_leader.cpp:1162: Request [ffc68061-ff91955c-a887572-485273a1] No known messages in this shard. Skip attempt to add messages to infly 2025-06-24T20:39:49.760168Z node 7 :SQS DEBUG: queue_leader.cpp:1168: Request [ffc68061-ff91955c-a887572-485273a1] Already tried to add messages to infly 2025-06-24T20:39:49.760446Z node 7 :SQS TRACE: queue_leader.cpp:2434: Decrement active message requests for [[cloud4/000000000000000101v0/3]. ActiveMessageRequests: 0 2025-06-24T20:39:49.760545Z node 7 :SQS TRACE: action.h:264: Request [ffc68061-ff91955c-a887572-485273a1] SendReplyAndDie from action actor { ReceiveMessage { RequestId: "ffc68061-ff91955c-a887572-485273a1" } } 2025-06-24T20:39:49.760631Z node 7 :SQS TRACE: proxy_service.h:35: Request [ffc68061-ff91955c-a887572-485273a1] Sending sqs response: { ReceiveMessage { RequestId: "ffc68061-ff91955c-a887572-485273a1" } RequestId: "ffc68061-ff91955c-a887572-485273a1" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } 2025-06-24T20:39:49.760770Z node 7 :SQS DEBUG: queue_leader.cpp:384: Request ReceiveMessage working duration: 1ms 2025-06-24T20:39:49.760881Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse ReceiveMessage { RequestId: "ffc68061-ff91955c-a887572-485273a1" } RequestId: "ffc68061-ff91955c-a887572-485273a1" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false 2025-06-24T20:39:49.760936Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7519618386988551285:2516]: ReceiveMessage { RequestId: "ffc68061-ff91955c-a887572-485273a1" } RequestId: "ffc68061-ff91955c-a887572-485273a1" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false 2025-06-24T20:39:49.760979Z node 7 :SQS TRACE: service.cpp:1483: Dec local leader ref for actor [7:7519618386988551286:3716]. Found: 1 2025-06-24T20:39:49.761816Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [ffc68061-ff91955c-a887572-485273a1] HandleResponse: { ReceiveMessage { RequestId: "ffc68061-ff91955c-a887572-485273a1" } RequestId: "ffc68061-ff91955c-a887572-485273a1" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false }, status: OK 2025-06-24T20:39:49.761903Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [ffc68061-ff91955c-a887572-485273a1] Sending reply from proxy actor: { ReceiveMessage { RequestId: "ffc68061-ff91955c-a887572-485273a1" } RequestId: "ffc68061-ff91955c-a887572-485273a1" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } Http output full {} 2025-06-24T20:39:49.762288Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [ReceiveMessage] requestId [ffc68061-ff91955c-a887572-485273a1] Got succesfult GRPC response. 2025-06-24T20:39:49.762341Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ReceiveMessage] requestId [ffc68061-ff91955c-a887572-485273a1] reply ok 2025-06-24T20:39:49.762430Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [ReceiveMessage] requestId [ffc68061-ff91955c-a887572-485273a1] Send metering event. HttpStatusCode: 200 IsFifo: 0 FolderId: folder4 RequestSizeInBytes: 526 ResponseSizeInBytes: 178 SourceAddress: 781f:df00:6050:0:601f:df00:6050:0 ResourceId: 000000000000000101v0 Action: ReceiveMessage 2025-06-24T20:39:49.762529Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:38118) <- (200 , 2 bytes) 2025-06-24T20:39:49.762661Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:38118) connection closed >> KqpErrors::ProposeResultLost_RwTx+UseSink |90.2%| [TA] $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeshardCompactionQueueTest::UpdateBelowThreshold [GOOD] >> TSchemeshardCompactionQueueTest::UpdateWithEmptyShard [GOOD] >> BsControllerConfig::AddDriveSerial |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut >> BsControllerConfig::MergeIntersectingBoxes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::CannotFindColumn [GOOD] Test command err: 2025-06-24T20:39:40.193777Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618349134638004:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:40.198928Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030bd/r3tmp/tmpY8fmYS/pdisk_1.dat 2025-06-24T20:39:40.945889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:40.946004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:40.968544Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:41.079933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:39:41.227860Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29132 TServer::EnableGrpc on GrpcPort 8322, node 1 2025-06-24T20:39:41.695817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:39:41.695849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:39:41.695856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:39:41.696008Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29132 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:39:42.942000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:39:43.002155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797583150 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750797583031 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750797583150 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-24T20:39:43.311361Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T20:39:43.311500Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T20:39:43.311523Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T20:39:43.315285Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T20:39:45.199440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618349134638004:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:45.199536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:39:46.358114Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750797583150, tx_id: 281474976710658 } } } 2025-06-24T20:39:46.358553Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T20:39:46.360305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:46.361807Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-24T20:39:46.361833Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-24T20:39:46.420408Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-24T20:39:46.420443Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750797586454 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-24T20:39:47.300584Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618378662953668:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:47.300679Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030bd/r3tmp/tmpbeNo6u/pdisk_1.dat 2025-06-24T20:39:47.505055Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:47.506459Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519618378662953652:2079] 1750797587298573 != 1750797587298576 2025-06-24T20:39:47.518448Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:47.518535Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node( ... ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:39:48.313395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:39:48.327316Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:39:48.343497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:39:48.349538Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:39:48.462576Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750797588372 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750797588561 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750797588372 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750797588561 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-24T20:39:48.614591Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T20:39:48.614726Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T20:39:48.614743Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T20:39:48.615481Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T20:39:51.770841Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750797588463, tx_id: 281474976710658 } } } 2025-06-24T20:39:51.771126Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T20:39:51.772707Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-24T20:39:51.773865Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750797588561 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-24T20:39:51.774111Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Cannot find column: name: value |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::UpdateWithEmptyShard [GOOD] |90.2%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} |90.2%| [LD] {RESULT} $(B)/ydb/core/mind/ut/ydb-core-mind-ut >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Query >> BsControllerConfig::SelectAllGroups >> DataShardReadTableSnapshots::ReadTableSplitFinished [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table [GOOD] >> TxUsage::WriteToTopic_Demo_39_Query [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-17 >> TxUsage::WriteToTopic_Demo_16_Query [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix+Nullable-Covered [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix+Nullable+Covered >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableSplitFinished [GOOD] Test command err: 2025-06-24T20:39:43.176941Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:39:43.177348Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:39:43.177525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e62/r3tmp/tmp76oEOO/pdisk_1.dat 2025-06-24T20:39:43.639911Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:39:43.643582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:39:43.724356Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:43.729861Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797578354566 != 1750797578354570 2025-06-24T20:39:43.784034Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T20:39:43.785282Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T20:39:43.785546Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:43.785682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:43.800519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:39:43.903491Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T20:39:43.903571Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T20:39:43.903761Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T20:39:44.223574Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T20:39:44.223717Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:39:44.231520Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T20:39:44.231683Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:39:44.232270Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:39:44.232540Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:39:44.232710Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T20:39:44.239712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:44.240487Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T20:39:44.241323Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T20:39:44.241412Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T20:39:44.287014Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:39:44.289221Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:39:44.289838Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:39:44.290118Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:39:44.336199Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:39:44.337234Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:39:44.337384Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:39:44.339044Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:39:44.339131Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:39:44.339207Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:39:44.339586Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:39:44.339789Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:39:44.339879Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:39:44.351908Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:39:44.398769Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:39:44.399053Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:39:44.399209Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:39:44.399251Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:39:44.399303Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:39:44.399356Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:39:44.399620Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:39:44.399678Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:39:44.400051Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:39:44.400192Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:39:44.400266Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:39:44.400305Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:39:44.400363Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:39:44.400418Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:39:44.400454Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:39:44.400495Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:39:44.400540Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:39:44.400657Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:44.400696Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:44.400740Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:39:44.401203Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:39:44.401254Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:39:44.401379Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:39:44.401629Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:39:44.402155Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:39:44.402293Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:39:44.402350Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20: ... D DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037896, TxId: 281474976715664, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T20:39:54.344503Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:1306:3020], Recipient [2:1034:2808]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037896 Status: RESPONSE_DATA TxId: 281474976715664 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\006\000\000\000b\005\035B\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 1 DataLastKey: "\001\000\004\000\000\000\006\000\000\000" 2025-06-24T20:39:54.344551Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:1034:2808] TxId# 281474976715663] Received stream data from ShardId# 72075186224037896 2025-06-24T20:39:54.344600Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:1034:2808] TxId# 281474976715663] Sending TEvStreamDataAck to [2:1306:3020] ShardId# 72075186224037896 2025-06-24T20:39:54.344668Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037896, TxId: 281474976715664, PendingAcks: 0 2025-06-24T20:39:54.344748Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:1306:3020], Recipient [2:1034:2808]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715664 ShardId: 72075186224037896 2025-06-24T20:39:54.344780Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:1034:2808] TxId# 281474976715663] Received TEvStreamQuotaRequest from ShardId# 72075186224037896 2025-06-24T20:39:54.345140Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:1033:2808], Recipient [2:1034:2808]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715663 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-24T20:39:54.345169Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:1034:2808] TxId# 281474976715663] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-24T20:39:54.345194Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:1034:2808] TxId# 281474976715663] Reserving quota 1 messages for ShardId# 72075186224037896 2025-06-24T20:39:54.345251Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037896, TxId: 281474976715664, MessageQuota: 1 2025-06-24T20:39:54.345332Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037896, TxId: 281474976715664, MessageQuota: 1 2025-06-24T20:39:54.345480Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037896 2025-06-24T20:39:54.345509Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715664, at: 72075186224037896 2025-06-24T20:39:54.345593Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287429, Sender [2:1306:3020], Recipient [2:1034:2808]: NKikimrTx.TEvStreamQuotaRelease TxId: 281474976715664 ShardId: 72075186224037896 2025-06-24T20:39:54.345619Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:1034:2808] TxId# 281474976715663] Received TEvStreamQuotaRelease from ShardId# 72075186224037896 2025-06-24T20:39:54.345645Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:1034:2808] TxId# 281474976715663] Released quota 1 reserved messages from ShardId# 72075186224037896 2025-06-24T20:39:54.345776Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1205:2941], Recipient [2:1205:2941]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:39:54.345810Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:39:54.345894Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037896 2025-06-24T20:39:54.345931Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037896 active 1 active planned 0 immediate 1 planned 0 2025-06-24T20:39:54.345967Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715664] at 72075186224037896 for ReadTableScan 2025-06-24T20:39:54.346011Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715664] at 72075186224037896 on unit ReadTableScan 2025-06-24T20:39:54.346043Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715664] at 72075186224037896 error: , IsFatalError: 0 2025-06-24T20:39:54.346083Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715664] at 72075186224037896 is Executed 2025-06-24T20:39:54.346120Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715664] at 72075186224037896 executing on unit ReadTableScan 2025-06-24T20:39:54.346155Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715664] at 72075186224037896 to execution unit FinishPropose 2025-06-24T20:39:54.346180Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715664] at 72075186224037896 on unit FinishPropose 2025-06-24T20:39:54.346231Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715664] at 72075186224037896 is DelayComplete 2025-06-24T20:39:54.346256Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715664] at 72075186224037896 executing on unit FinishPropose 2025-06-24T20:39:54.346282Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715664] at 72075186224037896 to execution unit CompletedOperations 2025-06-24T20:39:54.346306Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715664] at 72075186224037896 on unit CompletedOperations 2025-06-24T20:39:54.346357Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715664] at 72075186224037896 is Executed 2025-06-24T20:39:54.346386Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715664] at 72075186224037896 executing on unit CompletedOperations 2025-06-24T20:39:54.346408Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715664] at 72075186224037896 has finished 2025-06-24T20:39:54.346432Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037896 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:39:54.346455Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037896 2025-06-24T20:39:54.346481Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037896 has no attached operations 2025-06-24T20:39:54.346519Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037896 2025-06-24T20:39:54.346576Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037896 2025-06-24T20:39:54.346607Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715664] at 72075186224037896 on unit FinishPropose 2025-06-24T20:39:54.346644Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715664 at tablet 72075186224037896 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-24T20:39:54.346708Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037896 2025-06-24T20:39:54.346955Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:1205:2941], Recipient [2:1034:2808]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037896 Status: COMPLETE TxId: 281474976715664 Step: 0 OrderId: 281474976715664 ExecLatency: 0 ProposeLatency: 0 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037896 CpuTimeUsec: 368 } } CommitVersion { Step: 0 TxId: 281474976715664 } 2025-06-24T20:39:54.347039Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1850: [ReadTable [2:1034:2808] TxId# 281474976715663] Received stream complete from ShardId# 72075186224037896 2025-06-24T20:39:54.347104Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:1034:2808] TxId# 281474976715663] RESPONSE Status# ExecComplete prepare time: 0.035839s execute time: 0.851354s total time: 0.887193s 2025-06-24T20:39:54.347688Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1034:2808], Recipient [2:835:2660]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-24T20:39:54.347877Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1034:2808], Recipient [2:945:2742]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-24T20:39:54.348447Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1034:2808], Recipient [2:949:2744]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-24T20:39:54.348862Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1034:2808], Recipient [2:1200:2939]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-24T20:39:54.349021Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1034:2808], Recipient [2:1205:2941]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-24T20:39:54.349339Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1309:3023], Recipient [2:1092:2857]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:54.349374Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:54.349431Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037893, clientId# [2:1307:3021], serverId# [2:1309:3023], sessionId# [0:0:0] 2025-06-24T20:39:54.349503Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1310:3024], Recipient [2:1094:2859]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:54.349531Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:39:54.349660Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1308:3022], serverId# [2:1310:3024], sessionId# [0:0:0] 2025-06-24T20:39:54.349874Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1034:2808], Recipient [2:1092:2857]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-24T20:39:54.350049Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1034:2808], Recipient [2:1094:2859]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 >> BsControllerConfig::SelectAllGroups [GOOD] >> BackupPathTest::RecursiveDirectoryPlusExplicitTable >> TxUsage::WriteToTopic_Demo_40_Table >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeDir >> TopicAutoscaling::ReadFromTimestamp_PQv1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-64 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-65 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::SelectAllGroups [GOOD] Test command err: 2025-06-24T20:39:55.146067Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T20:39:55.151935Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T20:39:55.152489Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T20:39:55.155529Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:39:55.156029Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T20:39:55.156165Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T20:39:55.156212Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T20:39:55.156482Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T20:39:55.168667Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T20:39:55.168816Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T20:39:55.169011Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T20:39:55.169129Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T20:39:55.169231Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T20:39:55.169319Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T20:39:55.390061Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.164106s 2025-06-24T20:39:55.390206Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.164282s >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-63 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-64 >> TxUsage::WriteToTopic_Demo_17_Table >> BsControllerConfig::AddDriveSerial [GOOD] >> BsControllerConfig::AddDriveSerialMassive |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |90.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |90.2%| [LD] {RESULT} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut >> BasicUsage::CloseWriteSessionImmediately [GOOD] >> TxUsage::WriteToTopic_Demo_31_Table [GOOD] |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> TxUsage::WriteToTopic_Demo_31_Query |90.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::CloseWriteSessionImmediately [GOOD] Test command err: 2025-06-24T20:39:20.162236Z :BasicWriteSession INFO: Random seed for debugging is 1750797560162196 2025-06-24T20:39:21.077154Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618266879251554:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:21.077225Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:39:21.325283Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618267898639654:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:21.325346Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047b5/r3tmp/tmpWS2PVa/pdisk_1.dat 2025-06-24T20:39:21.914622Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:39:21.981948Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:39:22.203358Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:39:22.227597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:39:22.515366Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:39:22.551341Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:39:22.673721Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:22.710094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:22.712211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:22.724720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:22.724804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:22.740036Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:39:22.740186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:39:22.741778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62372, node 1 2025-06-24T20:39:23.231866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0047b5/r3tmp/yandexsQTBqf.tmp 2025-06-24T20:39:23.231886Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0047b5/r3tmp/yandexsQTBqf.tmp 2025-06-24T20:39:23.232022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0047b5/r3tmp/yandexsQTBqf.tmp 2025-06-24T20:39:23.232110Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:39:23.455019Z INFO: TTestServer started on Port 24954 GrpcPort 62372 TClient is connected to server localhost:24954 PQClient connected to localhost:62372 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:39:23.949714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T20:39:26.078589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618266879251554:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:26.078683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:39:26.335276Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618267898639654:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:26.335364Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:39:27.919482Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618292649056300:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:27.919645Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:27.920041Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618292649056335:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:27.930722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:39:28.027387Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618292649056337:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T20:39:28.108369Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618296944023727:2690] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:28.535022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:28.550285Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519618296944023737:2313], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:39:28.553551Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTY2OTg1OWItMjI5MWVjMC05NDQxNzY5ZS1hNzc0Mjg5Zg==, ActorId: [1:7519618292649056296:2299], ActorState: ExecuteState, TraceId: 01jyhtnhvc62nkrcbpkk604a7h, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:39:28.555737Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:39:28.553321Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519618297963410924:2278], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:39:28.555824Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZjUyNDAyYTMtNDhmNDJjYzItODdkNzA3NTctODJmMDMwYWE=, ActorId: [2:7519618297963410884:2272], ActorState: ExecuteState, TraceId: 01jyhtnj780p3sdqdka0y6ts3n, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:39:28.556200Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { r ... ry: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-24T20:39:53.621508Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T20:39:53.621519Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-24T20:39:53.621538Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7519618408142516390:2446] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-24T20:39:53.628819Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7519618408142516390:2446] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-24T20:39:53.940290Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [3:7519618408142516390:2446] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-24T20:39:53.948682Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519618408142516444:2446] connected; active server actors: 1 2025-06-24T20:39:53.948900Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [3:7519618408142516390:2446] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-24T20:39:53.948920Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7519618408142516390:2446] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-24T20:39:53.977974Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519618408142516444:2446] disconnected; active server actors: 1 2025-06-24T20:39:53.978013Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519618408142516444:2446] disconnected no session 2025-06-24T20:39:54.094703Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037892] server connected, pipe [3:7519618412437483765:2446], now have 1 active actors on pipe 2025-06-24T20:39:54.095121Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T20:39:54.095165Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T20:39:54.095255Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|54d8f664-87bcb338-c81903b6-13ed1d5_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-24T20:39:54.095358Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T20:39:54.097454Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750797594097 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:39:54.097621Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|54d8f664-87bcb338-c81903b6-13ed1d5_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-24T20:39:54.093073Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7519618408142516390:2446] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-24T20:39:54.095421Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T20:39:54.099255Z :INFO: [] MessageGroupId [src] SessionId [src|54d8f664-87bcb338-c81903b6-13ed1d5_0] Write session: close. Timeout = 0 ms 2025-06-24T20:39:54.096084Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T20:39:54.099313Z :INFO: [] MessageGroupId [src] SessionId [src|54d8f664-87bcb338-c81903b6-13ed1d5_0] Write session will now close 2025-06-24T20:39:54.096110Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T20:39:54.096178Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T20:39:54.099349Z :DEBUG: [] MessageGroupId [src] SessionId [src|54d8f664-87bcb338-c81903b6-13ed1d5_0] Write session: aborting 2025-06-24T20:39:54.099805Z :INFO: [] MessageGroupId [src] SessionId [src|54d8f664-87bcb338-c81903b6-13ed1d5_0] Write session: gracefully shut down, all writes complete 2025-06-24T20:39:54.099854Z :DEBUG: [] MessageGroupId [src] SessionId [src|54d8f664-87bcb338-c81903b6-13ed1d5_0] Write session: destroy 2025-06-24T20:39:54.093112Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519618408142516390:2446] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-24T20:39:54.093130Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7519618408142516390:2446] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-24T20:39:54.093161Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T20:39:54.094892Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-06-24T20:39:54.096528Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|54d8f664-87bcb338-c81903b6-13ed1d5_0 2025-06-24T20:39:54.105129Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|54d8f664-87bcb338-c81903b6-13ed1d5_0 grpc read done: success: 0 data: 2025-06-24T20:39:54.109264Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [3:7519618412437483765:2446] destroyed 2025-06-24T20:39:54.105159Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|54d8f664-87bcb338-c81903b6-13ed1d5_0 grpc read failed 2025-06-24T20:39:54.109359Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:39:54.105189Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|54d8f664-87bcb338-c81903b6-13ed1d5_0 grpc closed 2025-06-24T20:39:54.105212Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|54d8f664-87bcb338-c81903b6-13ed1d5_0 is DEAD 2025-06-24T20:39:54.105734Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison Session was created 2025-06-24T20:39:55.272685Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710685, task: 1, CA Id [3:7519618416732451123:2467]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-24T20:39:55.307716Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710685, task: 1, CA Id [3:7519618416732451123:2467]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:39:55.363327Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710685, task: 1, CA Id [3:7519618416732451123:2467]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:39:55.440181Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710685, task: 1, CA Id [3:7519618416732451123:2467]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:39:55.551252Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710685, task: 1, CA Id [3:7519618416732451123:2467]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:39:55.694996Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710685, task: 1, CA Id [3:7519618416732451123:2467]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:39:55.954418Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710685, task: 1, CA Id [3:7519618416732451123:2467]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:39:56.183124Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:39:56.183163Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:56.232436Z node 3 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976710686. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T20:39:56.232553Z node 3 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [3:7519618421027418485:2468] TxId: 281474976710686. Ctx: { TraceId: 01jyhtpcm68j3s5pq0vt8xs6rs, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzM1NTE4OWMtNTk5MWJiNjItNTM3NmZlOTctYzNhOWNlZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T20:39:56.232768Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NzM1NTE4OWMtNTk5MWJiNjItNTM3NmZlOTctYzNhOWNlZmY=, ActorId: [3:7519618416732451127:2468], ActorState: ExecuteState, TraceId: 01jyhtpcm68j3s5pq0vt8xs6rs, Create QueryResponse for error on request, msg: 2025-06-24T20:39:56.234244Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyhtpdcfckqpwjtk81gq419t" } } YdbStatus: UNAVAILABLE ConsumedRu: 500 } 2025-06-24T20:39:56.480940Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710685, task: 1, CA Id [3:7519618416732451123:2467]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:39:57.064262Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710685, task: 1, CA Id [3:7519618416732451123:2467]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:39:57.659946Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710685, task: 1, CA Id [3:7519618416732451123:2467]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> TPQCompatTest::LongProducerAndLongMessageGroupId [GOOD] >> TPQCompatTest::ReadWriteSessions |90.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 >> BsControllerConfig::AddDriveSerialMassive [GOOD] |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |90.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest |90.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> TPersQueueTest::MessageMetadata [GOOD] >> TPersQueueTest::LOGBROKER_7820 |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::AddDriveSerialMassive [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:204:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:204:2077] Leader for TabletID 72057594037932033 is [1:234:2079] sender: [1:236:2066] recipient: [1:204:2077] 2025-06-24T20:39:54.377276Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T20:39:54.391774Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T20:39:54.392234Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T20:39:54.395241Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:39:54.395809Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T20:39:54.395983Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T20:39:54.396010Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T20:39:54.396295Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T20:39:54.414279Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T20:39:54.414429Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T20:39:54.414639Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T20:39:54.414777Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T20:39:54.414879Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T20:39:54.414967Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:234:2079] sender: [1:257:2066] recipient: [1:20:2067] 2025-06-24T20:39:54.426724Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T20:39:54.426887Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T20:39:54.439754Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T20:39:54.439907Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T20:39:54.439986Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T20:39:54.440063Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T20:39:54.440171Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T20:39:54.440241Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T20:39:54.440290Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T20:39:54.440349Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T20:39:54.451160Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T20:39:54.451327Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T20:39:54.463922Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T20:39:54.464090Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T20:39:54.465476Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T20:39:54.465531Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T20:39:54.465739Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T20:39:54.465794Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T20:39:54.479727Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-24T20:39:54.481351Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-24T20:39:54.482162Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] Leader for TabletID 72057594037932033 is [11:231:2079] sender: [11:234:2066] recipient: [11:204:2077] 2025-06-24T20:39:56.462549Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T20:39:56.472235Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T20:39:56.472540Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T20:39:56.474656Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:39:56.475098Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T20:39:56.475476Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T20:39:56.475507Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T20:39:56.475747Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T20:39:56.498470Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T20:39:56.498641Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T20:39:56.498779Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T20:39:56.498914Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T20:39:56.499012Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T20:39:56.499073Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:231:2079] sender: [11:257:2066] recipient: [11:20:2067] 2025-06-24T20:39:56.511839Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T20:39:56.512048Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T20:39:56.523834Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T20:39:56.523990Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T20:39:56.524078Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T20:39:56.524185Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T20:39:56.524361Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T20:39:56.524497Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T20:39:56.524562Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T20:39:56.524628Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T20:39:56.539771Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T20:39:56.539923Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T20:39:56.551739Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T20:39:56.551913Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T20:39:56.553584Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T20:39:56.553638Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T20:39:56.553857Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T20:39:56.553987Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T20:39:56.554683Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-24T20:39:56.555886Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# ... ommand { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2025-06-24T20:39:58.602177Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2025-06-24T20:39:58.602814Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2025-06-24T20:39:58.607770Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2025-06-24T20:39:58.608720Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2025-06-24T20:39:58.609496Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2025-06-24T20:39:58.610268Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2025-06-24T20:39:58.611022Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2025-06-24T20:39:58.611756Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2025-06-24T20:39:58.612468Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2025-06-24T20:39:58.613131Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2025-06-24T20:39:58.613833Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2025-06-24T20:39:58.614676Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2025-06-24T20:39:58.624222Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2025-06-24T20:39:58.625396Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:230:2066] recipient: [31:206:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:230:2066] recipient: [31:206:2077] Leader for TabletID 72057594037932033 is [31:236:2079] sender: [31:237:2066] recipient: [31:206:2077] 2025-06-24T20:40:00.603689Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T20:40:00.604805Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T20:40:00.605042Z node 31 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T20:40:00.606477Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:40:00.606903Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T20:40:00.607051Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T20:40:00.607082Z node 31 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T20:40:00.607546Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T20:40:00.619279Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T20:40:00.619443Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T20:40:00.619583Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T20:40:00.619709Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T20:40:00.619854Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T20:40:00.619930Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [31:236:2079] sender: [31:257:2066] recipient: [31:20:2067] 2025-06-24T20:40:00.631527Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T20:40:00.631713Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T20:40:00.643709Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T20:40:00.643861Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T20:40:00.643983Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T20:40:00.644103Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T20:40:00.644332Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T20:40:00.644412Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T20:40:00.644453Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T20:40:00.644502Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T20:40:00.658394Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T20:40:00.658560Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T20:40:00.672112Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T20:40:00.672258Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T20:40:00.673635Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T20:40:00.673707Z node 31 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T20:40:00.673950Z node 31 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T20:40:00.674007Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T20:40:00.674725Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_0" BoxId: 1 } } } 2025-06-24T20:40:00.680705Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_1" BoxId: 1 } } } 2025-06-24T20:40:00.681644Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_2" BoxId: 1 } } } 2025-06-24T20:40:00.682397Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_3" BoxId: 1 } } } 2025-06-24T20:40:00.683089Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_4" BoxId: 1 } } } 2025-06-24T20:40:00.692785Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2025-06-24T20:40:00.693656Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2025-06-24T20:40:00.694423Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2025-06-24T20:40:00.704449Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2025-06-24T20:40:00.705494Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2025-06-24T20:40:00.706376Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2025-06-24T20:40:00.707541Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2025-06-24T20:40:00.708427Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2025-06-24T20:40:00.709209Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2025-06-24T20:40:00.710026Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2025-06-24T20:40:00.710986Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2025-06-24T20:40:00.716656Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2025-06-24T20:40:00.717647Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2025-06-24T20:40:00.718535Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2025-06-24T20:40:00.719685Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } >> LocalPartitionReader::Booting >> LocalPartitionReader::Booting [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithDefaultCodecs_MessagesAreAcknowledged [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithNonDefaultCodecThatHasToBeConfiguredAdditionally_SessionClosedWithBadRequestError |90.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::Booting [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-17 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-18 >> DataShardReadIterator::ShouldReverseReadMultipleKeys >> TNodeBrokerTest::NodesMigrationNodeName >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey+EvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::ReadFromTimestamp_PQv1 [GOOD] Test command err: 2025-06-24T20:36:52.690548Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617628060135789:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:52.705633Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031b3/r3tmp/tmptxR9LB/pdisk_1.dat 2025-06-24T20:36:53.063045Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:36:53.421887Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617628060135771:2079] 1750797412670099 != 1750797412670102 2025-06-24T20:36:53.432416Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:53.434088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:36:53.434202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:36:53.441749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14764, node 1 2025-06-24T20:36:53.639902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0031b3/r3tmp/yandexl3aoI9.tmp 2025-06-24T20:36:53.639933Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0031b3/r3tmp/yandexl3aoI9.tmp 2025-06-24T20:36:53.640120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0031b3/r3tmp/yandexl3aoI9.tmp 2025-06-24T20:36:53.640226Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:36:53.724031Z INFO: TTestServer started on Port 63059 GrpcPort 14764 2025-06-24T20:36:53.743135Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63059 PQClient connected to localhost:14764 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:36:54.185361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:36:54.210490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:36:54.229147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T20:36:54.471440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-24T20:36:57.082482Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617649534973044:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:57.082656Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:57.085536Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617649534973057:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:36:57.090482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:36:57.105652Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617649534973059:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:36:57.188486Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617649534973123:2446] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:36:57.418643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:57.423875Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617649534973131:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:36:57.426282Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZWNiNDBhYjktNTAyMzgzZDUtZjA1NDcyMTktYTJlNjdhYjE=, ActorId: [1:7519617649534973042:2298], ActorState: ExecuteState, TraceId: 01jyhtgykr34f824080g28928a, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:36:57.428689Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:36:57.456978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:36:57.553734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T20:36:57.683777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617628060135789:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:36:57.683852Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7519617649534973416:2621] === CheckClustersList. Ok 2025-06-24T20:37:04.261179Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T20:37:04.300723Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519617679599744654:2689], Recipient [1:7519617632355103391:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:04.300777Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:37:04.300792Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T20:37:04.301246Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [1:7519617636650071008:2277], Recipient [1:7519617632355103391:2146]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 3 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 1 StartTime: 1750797414242 TableOwnerId: 72057594046644480 Fol ... tition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-24T20:39:56.947866Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7519618410468089023:2801], Partition 1, Sender [7:7519618410468089023:2801], Recipient [7:7519618410468089092:2806], Cookie: 0 2025-06-24T20:39:56.947938Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188536, Sender [7:7519618410468089023:2801], Recipient [7:7519618410468089092:2806]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-24T20:39:56.947975Z node 7 :PERSQUEUE TRACE: partition.h:621: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-24T20:39:56.947982Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7519618410468089021:2800], Partition 2, Sender [7:7519618410468089021:2800], Recipient [7:7519618410468089099:2810], Cookie: 0 2025-06-24T20:39:56.948037Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7519618410468089023:2801], Partition 1, Sender [7:7519618410468089023:2801], Recipient [7:7519618410468089092:2806], Cookie: 0 2025-06-24T20:39:56.948043Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188536, Sender [7:7519618410468089021:2800], Recipient [7:7519618410468089099:2810]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-24T20:39:56.948064Z node 7 :PERSQUEUE TRACE: partition.h:621: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-24T20:39:56.948084Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188491, Sender [7:7519618410468089023:2801], Recipient [7:7519618410468089092:2806]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-24T20:39:56.948111Z node 7 :PERSQUEUE TRACE: partition.h:597: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-24T20:39:56.948114Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7519618410468089021:2800], Partition 2, Sender [7:7519618410468089021:2800], Recipient [7:7519618410468089099:2810], Cookie: 0 2025-06-24T20:39:56.948152Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188491, Sender [7:7519618410468089021:2800], Recipient [7:7519618410468089099:2810]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-24T20:39:56.948170Z node 7 :PERSQUEUE TRACE: partition.h:597: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-24T20:39:56.948371Z node 7 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-24T20:39:56.948384Z node 7 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-24T20:39:56.948539Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188503, Sender [7:7519618350338545532:2418], Recipient [7:7519618350338545475:2415]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-24T20:39:56.948563Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5278: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-24T20:39:56.948618Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188503, Sender [7:7519618410468089099:2810], Recipient [7:7519618410468089021:2800]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-24T20:39:56.948633Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5278: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-24T20:39:56.948704Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188503, Sender [7:7519618410468089092:2806], Recipient [7:7519618410468089023:2801]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-24T20:39:56.948728Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5278: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-24T20:39:56.949215Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 16 DataSize: 0 UsedReserveSize: 0 2025-06-24T20:39:56.949399Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 3 2025-06-24T20:39:56.950162Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271188001, Sender [7:7519618350338545472:2414], Recipient [7:7519618290209002288:2146]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 16 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-06-24T20:39:56.950190Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4989: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-24T20:39:56.950209Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-06-24T20:39:56.950236Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099995s, queue# 1 2025-06-24T20:39:56.983756Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519618410468089021:2800], Partition 2, Sender [0:0:0], Recipient [7:7519618410468089099:2810], Cookie: 0 2025-06-24T20:39:56.983827Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [7:7519618350338545472:2414], Recipient [7:7519618290209002288:2146]: NKikimrSchemeOp.TDescribePath PathId: 13 SchemeshardId: 72057594046644480 2025-06-24T20:39:56.983854Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519618410468089099:2810]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T20:39:56.983864Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T20:39:56.983885Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T20:39:56.983942Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T20:39:56.984032Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T20:39:56.984062Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T20:39:56.984114Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T20:39:56.984256Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519618350338545475:2415], Partition 0, Sender [0:0:0], Recipient [7:7519618350338545532:2418], Cookie: 0 2025-06-24T20:39:56.984292Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519618350338545532:2418]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T20:39:56.984310Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T20:39:56.984339Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T20:39:56.984375Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T20:39:56.984392Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T20:39:56.984431Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T20:39:56.984478Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519618410468089023:2801], Partition 1, Sender [0:0:0], Recipient [7:7519618410468089092:2806], Cookie: 0 2025-06-24T20:39:56.984512Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519618410468089092:2806]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T20:39:56.984529Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T20:39:56.984557Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T20:39:56.984590Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T20:39:56.984609Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T20:39:56.984630Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> DataShardReadIterator::ShouldReadRangeInclusiveEndsCellVec >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeDir [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBlockStoreVolume [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExtSubDomain [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeColumnStore [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeColumnTable [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeCdcStream |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup >> BackupPathTest::RecursiveDirectoryPlusExplicitTable [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-65 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-66 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-64 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-65 >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Table [GOOD] >> TPersQueueTest::DirectReadRestartTablet [GOOD] >> TPersQueueTest::EachMessageGetsExactlyOneAcknowledgementInCorrectOrder >> TNodeBrokerTest::Test999NodesSubscribers |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence >> BackupRestore::RestoreTablePartitioningSettings >> KqpErrors::ProposeResultLost_RwTx+UseSink [GOOD] >> KqpErrors::ProposeResultLost_RwTx-UseSink >> TPersQueueTest::TestBigMessage [GOOD] >> TPersQueueTest::SetMeteringMode >> TDynamicNameserverTest::BasicFunctionality-EnableNodeBrokerDeltaProtocol-true |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut >> TNodeBrokerTest::NodesMigrationNodeName [GOOD] >> TxUsage::ReadRuleGeneration [GOOD] >> BasicUsage::PropagateSessionClosed [GOOD] >> BasicUsage::ReadMirrored ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationNodeName [GOOD] Test command err: 2025-06-24T20:40:06.384382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:40:06.384451Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table >> TTopicYqlTest::BadRequests [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_4_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-64 >> TNodeBrokerTest::SubscribeToNodes >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-18 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-19 >> DataShardReadIterator::ShouldReverseReadMultipleKeys [GOOD] >> DataShardReadIterator::ShouldReverseReadMultipleKeysOneByOne >> TDynamicNameserverTest::BasicFunctionality-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-false >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query [GOOD] >> TxUsage::WriteToTopic_Demo_17_Table [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey-EvWrite >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TTopicYqlTest::BadRequests [GOOD] Test command err: 2025-06-24T20:35:20.703919Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617235512801222:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:20.704026Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:20.754182Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617231669264401:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:20.755239Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00496a/r3tmp/tmpH1vqB4/pdisk_1.dat 2025-06-24T20:35:21.062710Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:21.062254Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:21.273092Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:21.297502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:21.297592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:21.299651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:21.299727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:21.301081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:21.316394Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:35:21.320139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2141, node 1 2025-06-24T20:35:21.427759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00496a/r3tmp/yandexw0uBYs.tmp 2025-06-24T20:35:21.427791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00496a/r3tmp/yandexw0uBYs.tmp 2025-06-24T20:35:21.427966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00496a/r3tmp/yandexw0uBYs.tmp 2025-06-24T20:35:21.428101Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:21.511175Z INFO: TTestServer started on Port 23362 GrpcPort 2141 2025-06-24T20:35:21.692908Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:21.765212Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23362 PQClient connected to localhost:2141 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:21.926535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:35:21.993977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:35:22.199469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:35:24.488541Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617248849133914:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:24.488800Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:24.489593Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617248849133941:2276], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:24.496274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:24.515268Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519617248849133943:2277], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T20:35:24.607158Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519617248849133970:2175] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:25.027264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:25.035732Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617252692671357:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:25.036055Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDBlMzVhYWMtZGYxZTQ2NDctMTc4ODViYzYtNWFkNDRlNw==, ActorId: [1:7519617252692671317:2295], ActorState: ExecuteState, TraceId: 01jyhte48wen2ks2kq1ehfaqzh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:35:25.038728Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:35:25.040345Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519617248849133977:2281], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:25.040613Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=OTdhOWJhMDMtNjE1YTBlMmQtMWMwMWJkZTItMzk5ODhmZDc=, ActorId: [2:7519617248849133912:2272], ActorState: ExecuteState, TraceId: 01jyhte4648kn7p2y4t6qgj3nn, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:35:25.041078Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:35:25.139802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:25.290408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: ... 3647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "rt3.dc1--legacy--topic1" Version: 0 LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/rt3.dc1--legacy--topic1" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } Consumers { Name: "c2" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519618382172497030 RawX2: 107374184590 } Partitions { Partition { PartitionId: 0 } } 2025-06-24T20:40:06.173379Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:40:06.183249Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:40:06.183300Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state CALCULATED 2025-06-24T20:40:06.183326Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976715678, State CALCULATED 2025-06-24T20:40:06.183357Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037892] TxId 281474976715678 State CALCULATED FrontTxId 281474976715678 2025-06-24T20:40:06.183387Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715678, NewState WAIT_RS 2025-06-24T20:40:06.183425Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037892] TxId 281474976715678 moved from CALCULATED to WAIT_RS 2025-06-24T20:40:06.183490Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3988: [PQ: 72075186224037892] Send TEvTxProcessing::TEvReadSet to 0 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-06-24T20:40:06.183531Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72075186224037892] HaveParticipantsDecision 1 2025-06-24T20:40:06.183605Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715678, NewState EXECUTING 2025-06-24T20:40:06.183635Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037892] TxId 281474976715678 moved from WAIT_RS to EXECUTING 2025-06-24T20:40:06.183661Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72075186224037892] Received 0, Expected 1 2025-06-24T20:40:06.183731Z node 26 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1750797606173, TxId 281474976715678 2025-06-24T20:40:06.184466Z node 26 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:40:06.193646Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3553: [PQ: 72075186224037892] Handle TEvPQ::TEvTxCommitDone Step 1750797606173, TxId 281474976715678, Partition 0 2025-06-24T20:40:06.193705Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state EXECUTING 2025-06-24T20:40:06.193736Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976715678, State EXECUTING 2025-06-24T20:40:06.193766Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037892] TxId 281474976715678 State EXECUTING FrontTxId 281474976715678 2025-06-24T20:40:06.193788Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72075186224037892] Received 1, Expected 1 2025-06-24T20:40:06.193807Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:40:06.193853Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4185: [PQ: 72075186224037892] TxId: 281474976715678 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-24T20:40:06.193898Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4516: [PQ: 72075186224037892] complete TxId 281474976715678 2025-06-24T20:40:06.193899Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:40:06.194512Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72075186224037892] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "rt3.dc1--legacy--topic1" Version: 0 LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/rt3.dc1--legacy--topic1" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } Consumers { Name: "c2" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } 2025-06-24T20:40:06.194685Z node 26 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:40:06.194846Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4534: [PQ: 72075186224037892] delete partitions for TxId 281474976715678 2025-06-24T20:40:06.194878Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715678, NewState EXECUTED 2025-06-24T20:40:06.194914Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037892] TxId 281474976715678 moved from EXECUTING to EXECUTED 2025-06-24T20:40:06.194954Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72075186224037892] write key for TxId 281474976715678 2025-06-24T20:40:06.195628Z node 26 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715678] save tx TxId: 281474976715678 State: EXECUTED MinStep: 1750797606000 MaxStep: 18446744073709551615 Step: 1750797606173 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "rt3.dc1--legacy--topic1" Version: 0 LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/rt3.dc1--legacy--topic1" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } Consumers { Name: "c2" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519618382172497030 RawX2: 107374184590 } Partitions { Partition { PartitionId: 0 } } 2025-06-24T20:40:06.196048Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:40:06.229416Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:40:06.229477Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-24T20:40:06.229516Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976715678, State EXECUTED 2025-06-24T20:40:06.229554Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037892] TxId 281474976715678 State EXECUTED FrontTxId 281474976715678 2025-06-24T20:40:06.229594Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4007: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-24T20:40:06.229631Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715678, NewState WAIT_RS_ACKS 2025-06-24T20:40:06.229659Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037892] TxId 281474976715678 moved from EXECUTED to WAIT_RS_ACKS 2025-06-24T20:40:06.229708Z node 26 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715678] PredicateAcks: 0/0 2025-06-24T20:40:06.229723Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4560: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-24T20:40:06.229747Z node 26 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715678] PredicateAcks: 0/0 2025-06-24T20:40:06.229779Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4621: [PQ: 72075186224037892] add an TxId 281474976715678 to the list for deletion 2025-06-24T20:40:06.229817Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715678, NewState DELETING 2025-06-24T20:40:06.229863Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3852: [PQ: 72075186224037892] delete key for TxId 281474976715678 2025-06-24T20:40:06.229966Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:40:06.248084Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:40:06.248143Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-24T20:40:06.248174Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976715678, State DELETING 2025-06-24T20:40:06.248205Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72075186224037892] delete TxId 281474976715678 >> TxUsage::WriteToTopic_Demo_17_Query >> DataShardReadIterator::ShouldReadRangeInclusiveEndsCellVec [GOOD] >> DataShardReadIterator::ShouldReadRangeInclusiveEndsArrow |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-65 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-66 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-66 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-67 >> BsControllerConfig::MergeIntersectingBoxes [GOOD] >> BsControllerConfig::MoveGroups ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] Test command err: 2025-06-24T20:40:09.435675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:40:09.435774Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:40:09.839747Z node 1 :NODE_BROKER ERROR: node_broker.cpp:797: [Dirty] Configured lease duration (10.000000s) is too small. Using min. value: 300.000000s 2025-06-24T20:40:09.860061Z node 1 :NODE_BROKER ERROR: node_broker.cpp:797: [Committed] Configured lease duration (10.000000s) is too small. Using min. value: 300.000000s 2025-06-24T20:40:13.263263Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:40:13.263349Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Query >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeCdcStream [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBlobDepot [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBackupCollection [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT8 >> BackupRestore::RestoreTablePartitioningSettings [GOOD] >> BackupRestore::RestoreIndexTablePartitioningSettings >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Query [GOOD] |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query [GOOD] |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator >> KqpSnapshotRead::TestReadOnly-withSink >> TNodeBrokerTest::SubscribeToNodes [GOOD] >> BasicUsage::PreferredDatabaseNoFallback [GOOD] >> TNodeBrokerTest::ResolveScopeIdForServerless >> TxUsage::WriteToTopic_Demo_31_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SubscribeToNodes [GOOD] Test command err: 2025-06-24T20:40:13.256889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:40:13.256965Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:40:15.793365Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1025 not in range (1023, 1024] >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut >> DataShardReadIterator::ShouldReverseReadMultipleKeysOneByOne [GOOD] >> DataShardReadIterator::ShouldReverseReadMultipleRanges ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::PreferredDatabaseNoFallback [GOOD] Test command err: 2025-06-24T20:39:10.561078Z :GetAllStartPartitionSessions INFO: Random seed for debugging is 1750797550561033 2025-06-24T20:39:11.018883Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618226020938669:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:11.018934Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:39:11.116350Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618223571393120:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:11.131463Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:39:11.559137Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047b9/r3tmp/tmpTak24q/pdisk_1.dat 2025-06-24T20:39:11.596564Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:39:11.971683Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:11.993099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:11.993197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:11.996644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:11.996718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:11.998155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:39:12.002898Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:39:12.004372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:39:12.060196Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:39:12.120652Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 26302, node 1 2025-06-24T20:39:12.238465Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0047b9/r3tmp/yandexgjO8EP.tmp 2025-06-24T20:39:12.238499Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0047b9/r3tmp/yandexgjO8EP.tmp 2025-06-24T20:39:12.238686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0047b9/r3tmp/yandexgjO8EP.tmp 2025-06-24T20:39:12.238828Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:39:12.324334Z INFO: TTestServer started on Port 5474 GrpcPort 26302 TClient is connected to server localhost:5474 PQClient connected to localhost:26302 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:39:12.993698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T20:39:16.023316Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618226020938669:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:16.023396Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:39:16.122388Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618223571393120:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:16.122458Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:39:16.167341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618247495776118:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:16.167578Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:16.168035Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618247495776155:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:16.173226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:39:16.183440Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618247495776189:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:16.183547Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:16.231216Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618247495776157:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T20:39:16.544736Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618247495776233:2689] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:16.680316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:16.694003Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519618247495776252:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:39:16.699387Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODk1MTcyMjUtYmEyYWRjNmMtODAyYjk2YjItYzc4NDdj, ActorId: [1:7519618247495776112:2296], ActorState: ExecuteState, TraceId: 01jyhtn6ctct34myszerer3djc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:39:16.699584Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519618245046229821:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:39:16.711937Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MzZlOTA5NTctZmM4ZGMzMDktYmU0MmIyYjMtNDI2NjhkODQ=, ActorId: [2:7519618245046229780:2270], ActorState: ExecuteState, TraceId: 01jyhtn6py6s07dvw2yvtjmep3, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:39:16.715295Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:39:16.716322Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Clu ... 99-696a19f-a136e405_0 grpc closed 2025-06-24T20:39:45.363502Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|769e643-a5441199-696a19f-a136e405_0 is DEAD 2025-06-24T20:39:45.366310Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:39:45.367060Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [3:7519618372164283188:2463] destroyed 2025-06-24T20:39:45.367122Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. ====TYdbPqTestRetryPolicy() ====ExpectBreakDown === Session was created, waiting for retries >>> Ready to answer: ok ====CreateRetryState ====CreateRetryState Initialized Test retry state: get retry delay 2025-06-24T20:39:45.507796Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-24T20:39:46.207283Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:39:46.207311Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded Test retry state: get retry delay 2025-06-24T20:39:47.515303Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-24T20:39:48.107490Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-24T20:39:49.519697Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s === In the next federation discovery response dc2 will be available Test retry state: get retry delay 2025-06-24T20:39:51.523592Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-24T20:39:53.111501Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-24T20:39:53.527623Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-24T20:39:55.528443Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-24T20:39:57.535276Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-24T20:39:58.111197Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-24T20:39:59.539416Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-24T20:40:01.543427Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-24T20:40:03.111492Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-24T20:40:03.550604Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-24T20:40:05.551390Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-24T20:40:07.559779Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-24T20:40:08.115450Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-24T20:40:09.564018Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-24T20:40:11.571309Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-24T20:40:13.115510Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:40:13.118168Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][rt3.dc1--test-topic] TPersQueueReadBalancer::HandleWakeup 2025-06-24T20:40:13.118264Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][rt3.dc1--test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 1 2025-06-24T20:40:13.135829Z node 4 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } 2025-06-24T20:40:13.159519Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][rt3.dc1--test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 1 DataSize: 0 UsedReserveSize: 0 2025-06-24T20:40:13.160159Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][rt3.dc1--test-topic] ProcessPendingStats. PendingUpdates size 1 Test retry state: get retry delay 2025-06-24T20:40:13.578882Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s === Waiting for repair >>> Ready to answer: ok 2025-06-24T20:40:15.583584Z :INFO: [/Root] [] [] Start federated write session to database 'dc2' (previous was ) FederationState: { Status: SUCCESS SelfLocation: "fancy_datacenter" DbInfos: [ { name: "dc1" path: "/Root" id: "account-dc1" endpoint: "localhost:3165" location: "dc1" status: AVAILABLE weight: 1000 } { name: "dc2" path: "/Root" id: "account-dc2" endpoint: "localhost:3165" location: "dc2" status: AVAILABLE weight: 500 } { name: "dc3" path: "/Root" id: "account-dc3" endpoint: "localhost:3165" location: "dc3" status: AVAILABLE weight: 500 } ] ControlPlaneEndpoint: cp.logbroker-federation:2135 } === Closing the session 2025-06-24T20:40:15.595513Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: try to update token 2025-06-24T20:40:15.603762Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Start write session. Will connect to nodeId: 0 2025-06-24T20:40:15.609761Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: write to message_group: src_id 2025-06-24T20:40:15.609907Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: send init request: init_request { path: "test-topic" message_group_id: "src_id" } 2025-06-24T20:40:15.610261Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: OnWriteDone gRpcStatusCode: 0 2025-06-24T20:40:15.611369Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: close. Timeout 0.000000s 2025-06-24T20:40:15.611418Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session will now close 2025-06-24T20:40:15.611491Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: aborting 2025-06-24T20:40:15.611978Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: gracefully shut down, all writes complete 2025-06-24T20:40:15.612040Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: destroy 2025-06-24T20:40:15.611393Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-24T20:40:15.611430Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-24T20:40:15.611828Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { path: "test-topic" message_group_id: "src_id" } 2025-06-24T20:40:15.611994Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 path: "test-topic" message_group_id: "src_id" from ipv6:[::1]:47852 2025-06-24T20:40:15.612012Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="topic server" ip=ipv6:[::1]:47852 proto=topic topic=test-topic durationSec=0 2025-06-24T20:40:15.612027Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T20:40:15.613691Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-24T20:40:15.613804Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-24T20:40:15.613816Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T20:40:15.613824Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-24T20:40:15.613840Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7519618501013303301:2696] (SourceId=src_id, PreferedPartition=(NULL)) StartKqpSession 2025-06-24T20:40:15.616765Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7519618501013303301:2696] (SourceId=src_id, PreferedPartition=(NULL)) Select from the table 2025-06-24T20:40:15.626375Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: grpc closed 2025-06-24T20:40:15.626407Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: is DEAD 2025-06-24T20:40:15.984800Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715739, task: 1, CA Id [3:7519618501013303341:2701]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-24T20:40:16.016279Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715739, task: 1, CA Id [3:7519618501013303341:2701]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:40:16.072238Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715739, task: 1, CA Id [3:7519618501013303341:2701]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:40:16.140040Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715739, task: 1, CA Id [3:7519618501013303341:2701]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:40:16.224242Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715739, task: 1, CA Id [3:7519618501013303341:2701]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:40:16.355295Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715739, task: 1, CA Id [3:7519618501013303341:2701]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:40:16.663725Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715739, task: 1, CA Id [3:7519618501013303341:2701]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> TNodeBrokerTest::ResolveScopeIdForServerless [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-64 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-65 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-19 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-20 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ResolveScopeIdForServerless [GOOD] Test command err: 2025-06-24T20:40:18.923606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:40:18.923683Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:40:19.085847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T20:40:19.147017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 102 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000002 FAKE_COORDINATOR: Erasing txId 102 >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] >> TPersQueueTest::ReadWithoutConsumerFederation [GOOD] >> TPersQueueTest::ReadWithoutConsumerFirstClassCitizen |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange+EvWrite >> DataShardReadIterator::ShouldReadRangeInclusiveEndsArrow [GOOD] >> DataShardReadIterator::ShouldReadRangeReverse ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] Test command err: 2025-06-24T20:40:01.759020Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:40:01.759770Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:40:01.759887Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:40:01.760252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:40:01.760364Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:40:01.760544Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002675/r3tmp/tmpU5ddL4/pdisk_1.dat 2025-06-24T20:40:02.372518Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:02.671887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:40:02.871980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:02.872113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:02.882273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:02.882403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:02.896608Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:40:02.897298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:02.897683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:03.220827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:04.242225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1504:2911], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:04.242369Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1515:2916], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:04.242456Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:04.249829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:04.460143Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:04.460297Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:04.856811Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1518:2919], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:40:05.116367Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1658:2999] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:40:05.718969Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:198: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Bootstrap done, become ReadyState 2025-06-24T20:40:05.719641Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:1684:2909] TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-06-24T20:40:05.719883Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-24T20:40:05.720180Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:623: ActorId: [1:1684:2909] TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got request, become WaitResolveState 2025-06-24T20:40:05.720692Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715660. Resolved key sets: 1 2025-06-24T20:40:05.720962Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715660. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-24T20:40:05.721152Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [1:1684:2909] TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (Iterator (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3))))) )))) ) 2025-06-24T20:40:05.721360Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:1512: ActorId: [1:1684:2909] TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] create compute task: 1 2025-06-24T20:40:05.721536Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:40:05.721610Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-24T20:40:05.722139Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [1:1687:2909] 2025-06-24T20:40:05.722219Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [1:1687:2909], channels: 0 2025-06-24T20:40:05.722306Z node 1 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [1:1684:2909] TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T20:40:05.722364Z node 1 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2809: ActorId: [1:1684:2909] TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Updating channels after the creation of compute actors 2025-06-24T20:40:05.722418Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715660. Ctx: { TraceId: 01jyhtpncfa5b5a7tcs64x0h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdkOGIyMTAtYTM1N2FhOWYtODlhN2FhMzMtYmVlMzY4YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [1:1687:2909] 2025-06-24T20:40:05.722496Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715660. Ctx: { TraceId: 01 ... id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CT 1, CA [3:1737:3040], 2025-06-24T20:40:19.474268Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1737:3040], 2025-06-24T20:40:19.474792Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:791: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing task: 1 on compute actor: [4:1739:2433] 2025-06-24T20:40:19.474856Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [4:1739:2433] 2025-06-24T20:40:19.474917Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:838: TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Task: 1, output channelId: 1, dst task: 2, at actor [3:1737:3040] 2025-06-24T20:40:19.474992Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1737:3040], channels: 1 2025-06-24T20:40:19.475072Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [4:1739:2433], channels: 1 2025-06-24T20:40:19.475257Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1739:2433], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-24T20:40:19.475307Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1739:2433], CA [3:1737:3040], 2025-06-24T20:40:19.475360Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:1739:2433], CA [3:1737:3040], 2025-06-24T20:40:19.475861Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1739:2433], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 651 Tasks { TaskId: 1 CpuTimeUs: 413 ComputeCpuTimeUs: 13 BuildCpuTimeUs: 400 HostName: "ghrun-4pjfmvffsq" NodeId: 4 CreateTimeMs: 1750797619472 CurrentWaitInputTimeUs: 22 UpdateTimeMs: 1750797619473 } MaxMemoryUsage: 1048576 } 2025-06-24T20:40:19.475979Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1739:2433], CA [3:1737:3040], 2025-06-24T20:40:19.476020Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:1739:2433], CA [3:1737:3040], 2025-06-24T20:40:19.483694Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:391: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got result, channelId: 2, shardId: 0, inputIndex: 0, from: [3:1738:3040], finished: 0 2025-06-24T20:40:19.483818Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:394: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send ack to channelId: 2, seqNo: 1, to: [3:1738:3040] 2025-06-24T20:40:19.490306Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:391: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got result, channelId: 2, shardId: 0, inputIndex: 0, from: [3:1738:3040], finished: 1 2025-06-24T20:40:19.490384Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:394: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send ack to channelId: 2, seqNo: 2, to: [3:1738:3040] 2025-06-24T20:40:19.491523Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1737:3040], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1557 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 893 FinishTimeMs: 1750797619490 InputRows: 3 InputBytes: 12 OutputRows: 3 OutputBytes: 12 ResultRows: 3 ResultBytes: 12 ComputeCpuTimeUs: 248 BuildCpuTimeUs: 645 HostName: "ghrun-4pjfmvffsq" NodeId: 3 CreateTimeMs: 1750797619471 UpdateTimeMs: 1750797619490 } MaxMemoryUsage: 1048576 } 2025-06-24T20:40:19.491670Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:1737:3040] 2025-06-24T20:40:19.491751Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1739:2433], 2025-06-24T20:40:19.491796Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [4:1739:2433], 2025-06-24T20:40:19.492222Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1739:2433], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1725 DurationUs: 9000 Tasks { TaskId: 1 CpuTimeUs: 585 FinishTimeMs: 1750797619490 OutputRows: 3 OutputBytes: 12 Tables { TablePath: "/Root/table-1" ReadRows: 3 ReadBytes: 24 AffectedPartitions: 4 } IngressRows: 3 ComputeCpuTimeUs: 185 BuildCpuTimeUs: 400 WaitInputTimeUs: 7628 HostName: "ghrun-4pjfmvffsq" NodeId: 4 StartTimeMs: 1750797619481 CreateTimeMs: 1750797619472 UpdateTimeMs: 1750797619490 } MaxMemoryUsage: 1048576 } 2025-06-24T20:40:19.492309Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [4:1739:2433] 2025-06-24T20:40:19.492491Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T20:40:19.492578Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-24T20:40:19.492641Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [3:1729:3040] TxId: 281474976715663. Ctx: { TraceId: 01jyhtq43acpp5mvcyz7at04zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVlYzRlMTItNjBiNDhkMWQtNTg1ZTcyMjUtY2Y0MGViODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.003282s ReadRows: 3 ReadBytes: 24 ru: 3 rate limiter was not found force flag: 1 { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 3 } } >> TxUsage::WriteToTopic_Demo_40_Table [GOOD] >> TAsyncIndexTests::SplitBothWithReboots[TabletReboots] [GOOD] >> KqpSinkLocks::EmptyRange |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |90.3%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-67 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-68 >> KqpSinkLocks::OlapUncommittedRead >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-66 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-67 |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut >> BackupRestore::TestAllPrimitiveTypes-UINT8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT16 >> TTenantPoolTests::TestForcedSensorLabelsForStaticConfig >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Table >> BackupRestore::RestoreIndexTablePartitioningSettings [GOOD] >> BackupRestore::RestoreIndexTableReadReplicasSettings >> DataShardReadIterator::ShouldReverseReadMultipleRanges [GOOD] >> DataShardReadIterator::ShouldReverseReadMultipleRangesOneByOneWithAcks >> TPQCompatTest::ReadWriteSessions [GOOD] >> TxUsage::WriteToTopic_Demo_32_Query >> TTenantPoolTests::TestForcedSensorLabelsForStaticConfig [GOOD] >> KqpSnapshotRead::TestReadOnly-withSink [GOOD] >> KqpSnapshotRead::TestSnapshotExpiration+withSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-65 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-66 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TTenantPoolTests::TestForcedSensorLabelsForStaticConfig [GOOD] Test command err: 2025-06-24T20:40:26.522135Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:40:26.522925Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0035fa/r3tmp/tmpDXpmmg/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:40:26.523639Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0035fa/r3tmp/tmpDXpmmg/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0035fa/r3tmp/tmpDXpmmg/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 2439060718685535227 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T20:40:26.534039Z node 3 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T20:40:26.534593Z node 3 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0035fa/r3tmp/tmpDXpmmg/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T20:40:26.535226Z node 3 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0035fa/r3tmp/tmpDXpmmg/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0035fa/r3tmp/tmpDXpmmg/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 14146255783585556359 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 >> TPersQueueTest::Codecs_WriteMessageWithNonDefaultCodecThatHasToBeConfiguredAdditionally_SessionClosedWithBadRequestError [GOOD] >> TPersQueueTest::CreateTopicWithMeteringMode >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-20 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPQCompatTest::ReadWriteSessions [GOOD] Test command err: 2025-06-24T20:35:15.690965Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617212959524739:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:15.695218Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:15.785783Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617209981620295:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:15.785876Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00498c/r3tmp/tmpPPeBLT/pdisk_1.dat 2025-06-24T20:35:16.038624Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:16.039370Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:16.299950Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:16.300036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:16.300265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:16.300288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:16.310839Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:16.330901Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:35:16.331026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:16.339859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:16.372034Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 27278, node 1 2025-06-24T20:35:16.463641Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00498c/r3tmp/yandexpKRQvL.tmp 2025-06-24T20:35:16.463674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00498c/r3tmp/yandexpKRQvL.tmp 2025-06-24T20:35:16.463864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00498c/r3tmp/yandexpKRQvL.tmp 2025-06-24T20:35:16.464049Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:16.522281Z INFO: TTestServer started on Port 11339 GrpcPort 27278 2025-06-24T20:35:16.739689Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11339 PQClient connected to localhost:27278 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:35:16.798758Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:16.938695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:35:17.000280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T20:35:19.361628Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617227161489832:2276], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:19.361713Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617227161489821:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:19.362007Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:19.369280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:19.412638Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519617227161489835:2277], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T20:35:19.506999Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519617227161489862:2175] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:19.730471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:19.739649Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519617227161489876:2281], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:19.740222Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NGUzNTVjOWUtZDQ0ZmRjZDAtOGQ0Mjk5MDEtOTgxOTNmMzc=, ActorId: [2:7519617227161489819:2272], ActorState: ExecuteState, TraceId: 01jyhtdz5z76syx9g25ax57e65, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:35:19.742417Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:35:19.765826Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617230139395027:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:19.767925Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YzZkZGIyNDItYzBhMmNhOTctNWIwODAyZGQtNjA3MzBhYWQ=, ActorId: [1:7519617230139395001:2297], ActorState: ExecuteState, TraceId: 01jyhtdz7adngfhgrqtjhvqmxq, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:35:19.768289Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:35:19.815791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:19.980339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_run ... er" } } 2025-06-24T20:40:22.524079Z node 27 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 auth for : user 2025-06-24T20:40:22.524120Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:455: Handle describe topics 2025-06-24T20:40:22.524142Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:506: SendSchemeCacheRequest 2025-06-24T20:40:22.524215Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:541: send request for 1 topics, got 1 requests infly, db = "Root/LbCommunal" 2025-06-24T20:40:22.524975Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:548: Handle SchemeCache response: result# { ErrorCount: 0 DatabaseName: Root/LbCommunal DomainOwnerId: 0 Instant: 12 ResultSet [{ Path: Root/LbCommunal/account/topic2-mirrored-from-dc2 TableId: [72057594046644480:18:0] RequestType: ByPath Operation: OpList RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindTopic DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T20:40:22.525125Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:613: Got describe topics SC response 2025-06-24T20:40:22.525163Z node 27 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 Handle describe topics response 2025-06-24T20:40:22.525393Z node 27 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 auth is DEAD 2025-06-24T20:40:22.525519Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 auth ok: topics# 1, initDone# 0 2025-06-24T20:40:22.526771Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:1196: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 register session: topic# rt3.dc2--account--topic2 2025-06-24T20:40:22.527392Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037899][topic2-mirrored-from-dc2] pipe [27:7519618530442393353:2649] connected; active server actors: 1 2025-06-24T20:40:22.527527Z node 27 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1699: [72075186224037899][topic2-mirrored-from-dc2] consumer "user" register session for pipe [27:7519618530442393353:2649] session shared/user_27_6_11837261769267089781_v1 2025-06-24T20:40:22.527600Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:635: [72075186224037899][topic2-mirrored-from-dc2] consumer user register readable partition 0 ===Got response: status: SUCCESS init_response { session_id: "shared/user_27_6_11837261769267089781_v1" } 2025-06-24T20:40:22.527697Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:665: [72075186224037899][topic2-mirrored-from-dc2] consumer user family created family=1 (Status=Free, Partitions=[0]) 2025-06-24T20:40:22.527781Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:867: [72075186224037899][topic2-mirrored-from-dc2] consumer user register reading session ReadingSession "shared/user_27_6_11837261769267089781_v1" (Sender=[27:7519618530442393350:2649], Pipe=[27:7519618530442393353:2649], Partitions=[], ActiveFamilyCount=0) 2025-06-24T20:40:22.527833Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037899][topic2-mirrored-from-dc2] consumer user rebalancing was scheduled 2025-06-24T20:40:22.527922Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037899][topic2-mirrored-from-dc2] consumer user balancing. Sessions=1, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-06-24T20:40:22.528029Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037899][topic2-mirrored-from-dc2] consumer user balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "shared/user_27_6_11837261769267089781_v1" (Sender=[27:7519618530442393350:2649], Pipe=[27:7519618530442393353:2649], Partitions=[], ActiveFamilyCount=0) 2025-06-24T20:40:22.528141Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037899][topic2-mirrored-from-dc2] consumer user family 1 status Active partitions [0] session "shared/user_27_6_11837261769267089781_v1" sender [27:7519618530442393350:2649] lock partition 0 for ReadingSession "shared/user_27_6_11837261769267089781_v1" (Sender=[27:7519618530442393350:2649], Pipe=[27:7519618530442393353:2649], Partitions=[], ActiveFamilyCount=1) generation 1 step 3 2025-06-24T20:40:22.528241Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037899][topic2-mirrored-from-dc2] consumer user start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-24T20:40:22.528295Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037899][topic2-mirrored-from-dc2] consumer user balancing duration: 0.000327s 2025-06-24T20:40:22.528993Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 assign: record# { Partition: 0 TabletId: 72075186224037898 Topic: "topic2-mirrored-from-dc2" Generation: 1 Step: 3 Session: "shared/user_27_6_11837261769267089781_v1" ClientId: "user" PipeClient { RawX1: 7519618530442393353 RawX2: 4503715591490137 } Path: "/Root/LbCommunal/account/topic2-mirrored-from-dc2" } 2025-06-24T20:40:22.529131Z node 27 :PQ_READ_PROXY INFO: partition_actor.cpp:1132: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 INITING TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) 2025-06-24T20:40:22.530063Z node 27 :PQ_READ_PROXY INFO: partition_actor.cpp:972: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037898 Generation: 1, pipe: [27:7519618530442393356:2652] 2025-06-24T20:40:22.534552Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037898] server connected, pipe [27:7519618530442393356:2652], now have 1 active actors on pipe 2025-06-24T20:40:22.534687Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic2-mirrored-from-dc2' requestId: 2025-06-24T20:40:22.534732Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037898] got client message batch for topic 'rt3.dc2--account--topic2' partition 0 2025-06-24T20:40:22.534798Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:1938: [PQ: 72075186224037898] Created session shared/user_27_6_11837261769267089781_v1 on pipe: [27:7519618530442393356:2652] 2025-06-24T20:40:22.534906Z node 28 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: shared/user_27_6_11837261769267089781_v1:1 with generation 1 2025-06-24T20:40:22.535083Z node 28 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037898, Partition: 0, State: StateIdle] Topic 'rt3.dc2--account--topic2' partition 0 user user session is set to 0 (startOffset 0) session shared/user_27_6_11837261769267089781_v1 2025-06-24T20:40:22.535338Z node 28 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:40:22.542000Z node 28 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037898, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:40:22.542101Z node 28 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037898, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:40:22.542164Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic2-mirrored-from-dc2' partition: 0 messageNo: 0 requestId: cookie: 18446744073709551615 2025-06-24T20:40:22.542654Z node 27 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 0 SizeLag: 0 WriteTimestampEstimateMS: 0 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-24T20:40:22.542739Z node 27 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 INIT DONE TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) EndOffset 0 readOffset 0 committedOffset 0 2025-06-24T20:40:22.542843Z node 27 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 sending to client partition status ===Got response: status: SUCCESS start_partition_session_request { partition_session { partition_session_id: 1 path: "account/topic2-mirrored-from-dc2" } partition_offsets { } } 2025-06-24T20:40:22.577970Z node 27 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 grpc read done: success# 0, data# { } 2025-06-24T20:40:22.577999Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 grpc read failed 2025-06-24T20:40:22.578029Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 grpc closed 2025-06-24T20:40:22.578074Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 6 consumer shared/user session shared/user_27_6_11837261769267089781_v1 is DEAD 2025-06-24T20:40:22.578672Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037899][topic2-mirrored-from-dc2] pipe [27:7519618530442393353:2649] disconnected; active server actors: 1 2025-06-24T20:40:22.578711Z node 27 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037899][topic2-mirrored-from-dc2] pipe [27:7519618530442393353:2649] client user disconnected session shared/user_27_6_11837261769267089781_v1 2025-06-24T20:40:22.579354Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037898] Destroy direct read session shared/user_27_6_11837261769267089781_v1 2025-06-24T20:40:22.579411Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037898] server disconnected, pipe [27:7519618530442393356:2652] destroyed 2025-06-24T20:40:22.579471Z node 28 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_27_6_11837261769267089781_v1 2025-06-24T20:40:23.020466Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:743: Check version rescan 2025-06-24T20:40:23.041594Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:137: Metacache: reset 2025-06-24T20:40:24.016254Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:167: HandleClustersUpdate 2025-06-24T20:40:24.016286Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:169: HandleClustersUpdate LocalCluster !LocalCluster.empty() 2025-06-24T20:40:24.043840Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:743: Check version rescan 2025-06-24T20:40:24.058592Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:137: Metacache: reset ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitBothWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T20:30:41.333679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:30:41.333786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:41.333835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:30:41.333888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:30:41.333937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:30:41.333987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:30:41.334061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:30:41.334141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:30:41.338896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:30:41.339397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:30:41.462951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T20:30:41.463032Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:30:41.463902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T20:30:41.483119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:30:41.483831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:30:41.484017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:30:41.493197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:30:41.493484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:30:41.494236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:41.494511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:30:41.498133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:41.498354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:30:41.499663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:30:41.499745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:30:41.499999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:30:41.500087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:30:41.500134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:30:41.500275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T20:30:41.508130Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:30:41.708617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:30:41.708893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:41.709132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:30:41.709193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:30:41.709508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:30:41.709681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:30:41.716407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:41.716677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:30:41.716959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:41.717034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:30:41.717100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:30:41.717145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:30:41.721289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:41.721370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:30:41.721436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:30:41.724425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:41.724499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:30:41.724555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:30:41.724649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:30:41.728643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:30:41.731304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:30:41.731546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:30:41.732719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:30:41.732876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... ionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:40:22.864000Z node 180 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409550][180:1027:2810] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T20:40:22.864119Z node 180 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409551][180:1028:2810] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T20:40:22.864205Z node 180 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][180:970:2810] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409550 } 2025-06-24T20:40:22.864295Z node 180 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][180:970:2810] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-24T20:40:22.864429Z node 180 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409550][180:1027:2810] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750797622823975 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750797622823975 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T20:40:22.864629Z node 180 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409551][180:1028:2810] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 3 Group: 1750797622823975 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T20:40:22.875863Z node 180 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409550][180:1027:2810] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 2 2025-06-24T20:40:22.876279Z node 180 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][180:970:2810] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409550 } 2025-06-24T20:40:22.877409Z node 180 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409551][180:1028:2810] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-24T20:40:22.877543Z node 180 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][180:970:2810] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-24T20:40:23.088328Z node 180 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T20:40:23.088732Z node 180 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 468us result status StatusSuccess 2025-06-24T20:40:23.090002Z node 180 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\004\000\000\0002\000\000\000\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardReadIterator::ShouldReadRangeReverse [GOOD] >> DataShardReadIterator::ShouldReadRangeInclusiveEndsMissingLeftRight >> TPersQueueTest::EachMessageGetsExactlyOneAcknowledgementInCorrectOrder [GOOD] >> TPersQueueTest::Delete >> BasicUsage::ReadMirrored [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange-EvWrite >> TxUsage::WriteToTopic_Demo_17_Query [GOOD] >> TNodeBrokerTest::Test1001NodesSubscribers ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::ReadMirrored [GOOD] Test command err: 2025-06-24T20:39:18.265275Z :PropagateSessionClosed INFO: Random seed for debugging is 1750797558265234 2025-06-24T20:39:18.915422Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618257082590944:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:18.915472Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047b7/r3tmp/tmpbv90oL/pdisk_1.dat 2025-06-24T20:39:19.099086Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618259959703869:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:19.107701Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:39:19.719642Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:39:19.783026Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:39:19.924703Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:39:19.928791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:39:20.067490Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:39:20.127952Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:39:20.507559Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:20.539549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:20.539671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:20.563279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:20.563363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:20.582983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:39:20.587226Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:39:20.588763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61030, node 1 2025-06-24T20:39:21.078501Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0047b7/r3tmp/yandexoRihXP.tmp 2025-06-24T20:39:21.078522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0047b7/r3tmp/yandexoRihXP.tmp 2025-06-24T20:39:21.078708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0047b7/r3tmp/yandexoRihXP.tmp 2025-06-24T20:39:21.078818Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:39:21.364929Z INFO: TTestServer started on Port 5370 GrpcPort 61030 TClient is connected to server localhost:5370 PQClient connected to localhost:61030 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:39:22.254059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T20:39:23.919295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618257082590944:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:23.919378Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:39:24.019573Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618259959703869:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:24.019632Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:39:25.792133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618287147363011:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:25.792301Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:25.793242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618287147363031:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:25.828256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:39:25.845490Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618285729507811:2276], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:25.845577Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618285729507787:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:25.845895Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:25.911263Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618285729507816:2128] txid# 281474976710657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:39:25.923335Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618287147363033:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720661 completed, doublechecking } 2025-06-24T20:39:25.923758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976720661, at schemeshard: 72057594046644480 2025-06-24T20:39:25.927443Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618285729507815:2277], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720661 completed, doublechecking } 2025-06-24T20:39:26.016137Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618291442330420:2694] txid# 281474976720662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:26.047870Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618290024475140:2135] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:26.435383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:26.470534Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519618291442330430:2313], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:39:26.472883Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MWRjM2NhZC0zZDlmMWEwZi04YjEyMWZkNC03NTYxNDA1Yw==, ActorId: [1:7519618287147362992:2299], ActorStat ... or.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_6516710108694859081_v1 grpc read done: success# 1, data# { read_request { bytes_size: 1450 } } 2025-06-24T20:40:26.984052Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/user session shared/user_3_1_6516710108694859081_v1 got read request: guid# b626ad92-7ec0afbf-6dddfe8d-4e74d57 >>> event from dataHandler: DataReceived { Partition session id: 3 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 Message { Data: ..130 bytes.. Information: { Offset: 1 ProducerId: "src_id" SeqNo: 2 CreateTime: 2025-06-24T20:40:26.952000Z WriteTime: 2025-06-24T20:40:26.964000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:32790", "logtype": "unknown", "_ip": "ipv6:[::1]:32790" } MessageMeta: { } } Partition session id: 3 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } Message { Data: ..240 bytes.. Information: { Offset: 2 ProducerId: "src_id" SeqNo: 3 CreateTime: 2025-06-24T20:40:26.952000Z WriteTime: 2025-06-24T20:40:26.964000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:32790", "logtype": "unknown", "_ip": "ipv6:[::1]:32790" } MessageMeta: { } } Partition session id: 3 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } Message { Data: ..350 bytes.. Information: { Offset: 3 ProducerId: "src_id" SeqNo: 4 CreateTime: 2025-06-24T20:40:26.952000Z WriteTime: 2025-06-24T20:40:26.964000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:32790", "logtype": "unknown", "_ip": "ipv6:[::1]:32790" } MessageMeta: { } } Partition session id: 3 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } Message { Data: ..460 bytes.. Information: { Offset: 4 ProducerId: "src_id" SeqNo: 5 CreateTime: 2025-06-24T20:40:26.952000Z WriteTime: 2025-06-24T20:40:26.964000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:32790", "logtype": "unknown", "_ip": "ipv6:[::1]:32790" } MessageMeta: { } } Partition session id: 3 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } } >>> get 4 messages in this event 2025-06-24T20:40:26.987914Z :DEBUG: [/Root] [/Root] [7bd7a8b9-b5557fd4-9d8e8649-52b29d42] [] The application data is transferred to the client. Number of messages 4, size 1180 bytes 2025-06-24T20:40:26.987975Z :DEBUG: [/Root] [/Root] [7bd7a8b9-b5557fd4-9d8e8649-52b29d42] [] Returning serverBytesSize = 0 to budget 2025-06-24T20:40:27.055222Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|4dd26fdc-c0dd1ac7-35b92315-f2518fa0_0] Write session will now close 2025-06-24T20:40:27.055313Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|4dd26fdc-c0dd1ac7-35b92315-f2518fa0_0] Write session: aborting 2025-06-24T20:40:27.055806Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|4dd26fdc-c0dd1ac7-35b92315-f2518fa0_0] Write session: gracefully shut down, all writes complete >>> Writes to test-topic-mirrored-from-dc3 successful 2025-06-24T20:40:27.055858Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|4dd26fdc-c0dd1ac7-35b92315-f2518fa0_0] Write session: destroy 2025-06-24T20:40:27.056014Z :INFO: [/Root] [/Root] [7bd7a8b9-b5557fd4-9d8e8649-52b29d42] Closing read session. Close timeout: 18446744073709.551615s 2025-06-24T20:40:27.056080Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic-mirrored-from-dc3:0:3:4:0 -:test-topic:0:2:4:0 -:test-topic-mirrored-from-dc2:0:1:4:0 2025-06-24T20:40:27.056127Z :INFO: [/Root] [/Root] [7bd7a8b9-b5557fd4-9d8e8649-52b29d42] Counters: { Errors: 0 CurrentSessionLifetimeMs: 538 BytesRead: 3600 MessagesRead: 15 BytesReadCompressed: 3600 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:40:27.056645Z :INFO: [/Root] [/Root] [7bd7a8b9-b5557fd4-9d8e8649-52b29d42] Closing read session. Close timeout: 0.000000s 2025-06-24T20:40:27.056465Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|4dd26fdc-c0dd1ac7-35b92315-f2518fa0_0 grpc read done: success: 0 data: 2025-06-24T20:40:27.056704Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic-mirrored-from-dc3:0:3:4:0 -:test-topic:0:2:4:0 -:test-topic-mirrored-from-dc2:0:1:4:0 2025-06-24T20:40:27.056494Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: src_id|4dd26fdc-c0dd1ac7-35b92315-f2518fa0_0 grpc read failed 2025-06-24T20:40:27.056747Z :INFO: [/Root] [/Root] [7bd7a8b9-b5557fd4-9d8e8649-52b29d42] Counters: { Errors: 0 CurrentSessionLifetimeMs: 539 BytesRead: 3600 MessagesRead: 15 BytesReadCompressed: 3600 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:40:27.056529Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: src_id|4dd26fdc-c0dd1ac7-35b92315-f2518fa0_0 grpc closed 2025-06-24T20:40:27.056550Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: src_id|4dd26fdc-c0dd1ac7-35b92315-f2518fa0_0 is DEAD 2025-06-24T20:40:27.056784Z :INFO: [/Root] [/Root] [7bd7a8b9-b5557fd4-9d8e8649-52b29d42] Closing read session. Close timeout: 0.000000s 2025-06-24T20:40:27.056832Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic-mirrored-from-dc3:0:3:4:0 -:test-topic:0:2:4:0 -:test-topic-mirrored-from-dc2:0:1:4:0 2025-06-24T20:40:27.056873Z :INFO: [/Root] [/Root] [7bd7a8b9-b5557fd4-9d8e8649-52b29d42] Counters: { Errors: 0 CurrentSessionLifetimeMs: 539 BytesRead: 3600 MessagesRead: 15 BytesReadCompressed: 3600 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:40:27.056975Z :NOTICE: [/Root] [/Root] [7bd7a8b9-b5557fd4-9d8e8649-52b29d42] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T20:40:27.057938Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037896] server disconnected, pipe [3:7519618545840000531:2566] destroyed 2025-06-24T20:40:27.057427Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:40:27.058189Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_6516710108694859081_v1 grpc read done: success# 0, data# { } 2025-06-24T20:40:27.058216Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_6516710108694859081_v1 grpc read failed 2025-06-24T20:40:27.058004Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:40:27.058261Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_6516710108694859081_v1 grpc closed 2025-06-24T20:40:27.058334Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_6516710108694859081_v1 is DEAD 2025-06-24T20:40:27.058917Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037896] Destroy direct read session shared/user_3_1_6516710108694859081_v1 2025-06-24T20:40:27.058997Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037894] Destroy direct read session shared/user_3_1_6516710108694859081_v1 2025-06-24T20:40:27.059030Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [3:7519618545840000382:2553] destroyed 2025-06-24T20:40:27.059061Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session shared/user_3_1_6516710108694859081_v1 2025-06-24T20:40:27.059087Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [3:7519618545840000381:2552] destroyed 2025-06-24T20:40:27.059224Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037896] server disconnected, pipe [3:7519618545840000380:2551] destroyed 2025-06-24T20:40:27.059261Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_6516710108694859081_v1 2025-06-24T20:40:27.059283Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_6516710108694859081_v1 2025-06-24T20:40:27.059301Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_6516710108694859081_v1 2025-06-24T20:40:27.060466Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][rt3.dc1--test-topic-mirrored-from-dc3] pipe [3:7519618545840000371:2548] disconnected; active server actors: 1 2025-06-24T20:40:27.060500Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][rt3.dc1--test-topic-mirrored-from-dc3] pipe [3:7519618545840000371:2548] client user disconnected session shared/user_3_1_6516710108694859081_v1 2025-06-24T20:40:27.060555Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][rt3.dc1--test-topic-mirrored-from-dc2] pipe [3:7519618545840000373:2548] disconnected; active server actors: 1 2025-06-24T20:40:27.060556Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519618545840000372:2548] disconnected; active server actors: 1 2025-06-24T20:40:27.060564Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][rt3.dc1--test-topic-mirrored-from-dc2] pipe [3:7519618545840000373:2548] client user disconnected session shared/user_3_1_6516710108694859081_v1 2025-06-24T20:40:27.060577Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519618545840000372:2548] client user disconnected session shared/user_3_1_6516710108694859081_v1 2025-06-24T20:40:27.248508Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710705, task: 1, CA Id [3:7519618550134967849:2571]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-24T20:40:27.287754Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710705, task: 1, CA Id [3:7519618550134967849:2571]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:40:27.344366Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710705, task: 1, CA Id [3:7519618550134967849:2571]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:40:27.427919Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710705, task: 1, CA Id [3:7519618550134967849:2571]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:40:27.515075Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710705, task: 1, CA Id [3:7519618550134967849:2571]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:40:27.660269Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710705, task: 1, CA Id [3:7519618550134967849:2571]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:40:27.981671Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710705, task: 1, CA Id [3:7519618550134967849:2571]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> TxUsage::Transactions_Conflict_On_SeqNo_Table >> TNodeBrokerTest::NodeNameWithDifferentTenants >> TxUsage::WriteToTopic_Demo_40_Query |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-68 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-69 >> DataShardReadIterator::ShouldReverseReadMultipleRangesOneByOneWithAcks [GOOD] >> DataShardReadIterator::ShouldReturnMvccSnapshotFromFuture |90.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::MkDir >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-67 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-68 >> TNodeBrokerTest::NodeNameWithDifferentTenants [GOOD] >> BsControllerConfig::MoveGroups [GOOD] >> KqpSinkLocks::EmptyRange [GOOD] >> KqpSinkLocks::EmptyRangeAlreadyBroken >> TxUsage::Sinks_Oltp_WriteToTopics_4_Table [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT16 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT32 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-66 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodeNameWithDifferentTenants [GOOD] Test command err: 2025-06-24T20:40:32.641177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:40:32.641260Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:40:33.010730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T20:40:33.053214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 102 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000002 FAKE_COORDINATOR: Erasing txId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::MoveGroups [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2973:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2973:2117] Leader for TabletID 72057594037932033 is [1:3113:2119] sender: [1:3115:2106] recipient: [1:2973:2117] 2025-06-24T20:39:55.941592Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T20:39:55.955769Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T20:39:55.956233Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T20:39:55.958549Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:39:55.959447Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T20:39:55.959686Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T20:39:55.959730Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T20:39:55.960081Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T20:39:55.984845Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T20:39:55.984997Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T20:39:55.985200Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T20:39:55.985440Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T20:39:55.985578Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T20:39:55.985662Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3113:2119] sender: [1:3138:2106] recipient: [1:60:2107] 2025-06-24T20:39:56.000382Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T20:39:56.000558Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T20:39:56.015863Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T20:39:56.016019Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T20:39:56.016108Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T20:39:56.016187Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T20:39:56.016304Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T20:39:56.016373Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T20:39:56.016498Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T20:39:56.016589Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T20:39:56.027740Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T20:39:56.027878Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T20:39:56.038742Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T20:39:56.038903Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T20:39:56.040231Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T20:39:56.040279Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T20:39:56.040482Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T20:39:56.040534Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T20:39:56.091556Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 150 PDiskFilter { Property { Type: ROT } } } } } 2025-06-24T20:39:56.093195Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-24T20:39:56.093278Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-24T20:39:56.093312Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-24T20:39:56.093357Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-24T20:39:56.093388Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-24T20:39:56.093415Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-24T20:39:56.093440Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-24T20:39:56.093478Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-24T20:39:56.093504Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-24T20:39:56.093539Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-24T20:39:56.093569Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-24T20:39:56.093593Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-24T20:39:56.093984Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-24T20:39:56.094024Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-24T20:39:56.094082Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-24T20:39:56.094122Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-24T20:39:56.094148Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-24T20:39:56.094170Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-24T20:39:56.094199Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-24T20:39:56.094230Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-24T20:39:56.094251Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:3 ... Id# 178:1001 Path# /dev/disk2 2025-06-24T20:40:26.636135Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 178:1002 Path# /dev/disk3 2025-06-24T20:40:26.636165Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1000 Path# /dev/disk1 2025-06-24T20:40:26.636193Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1001 Path# /dev/disk2 2025-06-24T20:40:26.636219Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1002 Path# /dev/disk3 2025-06-24T20:40:26.636244Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1000 Path# /dev/disk1 2025-06-24T20:40:26.636270Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1001 Path# /dev/disk2 2025-06-24T20:40:26.636297Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1002 Path# /dev/disk3 2025-06-24T20:40:26.636324Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1000 Path# /dev/disk1 2025-06-24T20:40:26.636351Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1001 Path# /dev/disk2 2025-06-24T20:40:26.636379Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1002 Path# /dev/disk3 2025-06-24T20:40:26.636405Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1000 Path# /dev/disk1 2025-06-24T20:40:26.636432Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1001 Path# /dev/disk2 2025-06-24T20:40:26.636460Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1002 Path# /dev/disk3 2025-06-24T20:40:26.636486Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1000 Path# /dev/disk1 2025-06-24T20:40:26.636511Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1001 Path# /dev/disk2 2025-06-24T20:40:26.636537Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1002 Path# /dev/disk3 2025-06-24T20:40:26.636572Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1000 Path# /dev/disk1 2025-06-24T20:40:26.636597Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1001 Path# /dev/disk2 2025-06-24T20:40:26.636623Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1002 Path# /dev/disk3 2025-06-24T20:40:26.636647Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1000 Path# /dev/disk1 2025-06-24T20:40:26.636673Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1001 Path# /dev/disk2 2025-06-24T20:40:26.636699Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1002 Path# /dev/disk3 2025-06-24T20:40:26.636724Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1000 Path# /dev/disk1 2025-06-24T20:40:26.636749Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1001 Path# /dev/disk2 2025-06-24T20:40:26.636775Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1002 Path# /dev/disk3 2025-06-24T20:40:26.636803Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1000 Path# /dev/disk1 2025-06-24T20:40:26.636830Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1001 Path# /dev/disk2 2025-06-24T20:40:26.636858Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1002 Path# /dev/disk3 2025-06-24T20:40:26.636885Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1000 Path# /dev/disk1 2025-06-24T20:40:26.636914Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1001 Path# /dev/disk2 2025-06-24T20:40:26.636942Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1002 Path# /dev/disk3 2025-06-24T20:40:26.636966Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1000 Path# /dev/disk1 2025-06-24T20:40:26.636991Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1001 Path# /dev/disk2 2025-06-24T20:40:26.637018Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1002 Path# /dev/disk3 2025-06-24T20:40:26.637042Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1000 Path# /dev/disk1 2025-06-24T20:40:26.637069Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1001 Path# /dev/disk2 2025-06-24T20:40:26.637097Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1002 Path# /dev/disk3 2025-06-24T20:40:26.637123Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1000 Path# /dev/disk1 2025-06-24T20:40:26.637149Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1001 Path# /dev/disk2 2025-06-24T20:40:26.637174Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1002 Path# /dev/disk3 2025-06-24T20:40:26.637197Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1000 Path# /dev/disk1 2025-06-24T20:40:26.637224Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1001 Path# /dev/disk2 2025-06-24T20:40:26.637251Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1002 Path# /dev/disk3 2025-06-24T20:40:26.637278Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1000 Path# /dev/disk1 2025-06-24T20:40:26.637304Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1001 Path# /dev/disk2 2025-06-24T20:40:26.637332Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1002 Path# /dev/disk3 2025-06-24T20:40:26.637357Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1000 Path# /dev/disk1 2025-06-24T20:40:26.637382Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1001 Path# /dev/disk2 2025-06-24T20:40:26.637410Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1002 Path# /dev/disk3 2025-06-24T20:40:26.637435Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1000 Path# /dev/disk1 2025-06-24T20:40:26.637461Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1001 Path# /dev/disk2 2025-06-24T20:40:26.637487Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1002 Path# /dev/disk3 2025-06-24T20:40:26.637513Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1000 Path# /dev/disk1 2025-06-24T20:40:26.637538Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1001 Path# /dev/disk2 2025-06-24T20:40:26.637605Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1002 Path# /dev/disk3 2025-06-24T20:40:26.637637Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1000 Path# /dev/disk1 2025-06-24T20:40:26.637666Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1001 Path# /dev/disk2 2025-06-24T20:40:26.637692Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1002 Path# /dev/disk3 2025-06-24T20:40:26.637718Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1000 Path# /dev/disk1 2025-06-24T20:40:26.637744Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1001 Path# /dev/disk2 2025-06-24T20:40:26.637769Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1002 Path# /dev/disk3 2025-06-24T20:40:26.637793Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1000 Path# /dev/disk1 2025-06-24T20:40:26.637822Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1001 Path# /dev/disk2 2025-06-24T20:40:26.637848Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1002 Path# /dev/disk3 2025-06-24T20:40:26.637873Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1000 Path# /dev/disk1 2025-06-24T20:40:26.637896Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1001 Path# /dev/disk2 2025-06-24T20:40:26.637922Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1002 Path# /dev/disk3 2025-06-24T20:40:26.946094Z node 151 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.314608s 2025-06-24T20:40:26.946347Z node 151 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.314881s 2025-06-24T20:40:26.960840Z node 151 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 151 Type# 268639257 2025-06-24T20:40:26.985066Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-24T20:40:27.098244Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 1 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 1 ExplicitGroupId: 2147483748 } } } 2025-06-24T20:40:27.115635Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-24T20:40:27.242198Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 2 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 2 ExplicitGroupId: 2147483749 } } } 2025-06-24T20:40:27.259406Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-24T20:40:27.366920Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 3 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 3 } } } 2025-06-24T20:40:27.388609Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } >> KqpTx::TooManyTx >> BackupRestore::RestoreIndexTableReadReplicasSettings [GOOD] >> BackupRestore::RestoreTableSplitBoundaries >> DataShardReadIterator::ShouldReadRangeInclusiveEndsMissingLeftRight [GOOD] >> DataShardReadIterator::ShouldReadRangeNonInclusiveEnds >> TxUsage::Sinks_Oltp_WriteToTopics_4_Query >> TPersQueueTest::LOGBROKER_7820 [GOOD] >> TPersQueueTest::InflightLimit >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query >> TxUsage::WriteToTopic_Demo_44_Query [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips+EvWrite >> TSchemeShardUserAttrsTest::MkDir [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-22 >> KqpTx::CommitRoTx ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::MkDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:40:36.457389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:40:36.457491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:40:36.457531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:40:36.457575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:40:36.463244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:40:36.463335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:40:36.463455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:40:36.463612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:40:36.464394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:40:36.476245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:40:36.665649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:40:36.665724Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:36.704560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:40:36.705050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:40:36.705335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:40:36.758772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:40:36.759099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:40:36.767701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:40:36.768416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:40:36.796206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:40:36.799899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:40:36.831783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:40:36.831895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:40:36.832156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:40:36.832210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:40:36.832320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:40:36.832424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:40:36.846809Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:40:37.095372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:40:37.103078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:40:37.111344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:40:37.111452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:40:37.115537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:40:37.115758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:40:37.120709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:40:37.127497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:40:37.127784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:40:37.127941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:40:37.127981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:40:37.128017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:40:37.136149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:40:37.136230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:40:37.136289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:40:37.138543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:40:37.138609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:40:37.138658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:40:37.138706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:40:37.147035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:40:37.156091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:40:37.163355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:40:37.164678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:40:37.164827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:40:37.164883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:40:37.167235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:40:37.167338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:40:37.167612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:40:37.167717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:40:37.172751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:40:37.172822Z node 1 :FLAT_TX_SCHEMESHARD ... : 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:40:37.602638Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:40:37.602949Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 317us result status StatusSuccess 2025-06-24T20:40:37.603446Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 } ChildrenExist: true } Children { Name: "SubDirA" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrA1" Value: "ValA1" } UserAttributes { Key: "AttrA2" Value: "ValA2" } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:40:37.613385Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:40:37.613735Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirB" took 321us result status StatusSuccess 2025-06-24T20:40:37.614212Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirB" PathDescription { Self { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrB1" Value: "ValB1" } UserAttributes { Key: "AttrB2" Value: "ValB2" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:40:37.614936Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/SubDirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:40:37.615140Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/SubDirA" took 208us result status StatusSuccess 2025-06-24T20:40:37.615603Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/SubDirA" PathDescription { Self { Name: "SubDirA" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "DirB" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 103 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrAA1" Value: "ValAA1" } UserAttributes { Key: "AttrAA2" Value: "ValAA2" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:40:37.616304Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/SubDirA/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T20:40:37.616494Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/SubDirA/DirB" took 188us result status StatusSuccess 2025-06-24T20:40:37.616879Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/SubDirA/DirB" PathDescription { Self { Name: "DirB" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 103 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrAB1" Value: "ValAB1" } UserAttributes { Key: "AttrAB2" Value: "ValAB2" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table [GOOD] >> TxUsage::WriteToTopic_Demo_45_Table |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |90.4%| [LD] {RESULT} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |90.4%| [LD] {RESULT} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut >> DataShardReadIterator::ShouldReturnMvccSnapshotFromFuture [GOOD] >> DataShardReadIterator::ShouldRollbackLocksWhenWrite |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |90.4%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut >> KqpSinkLocks::InvalidateOnCommit >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-69 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-70 >> TxUsage::WriteToTopic_Demo_32_Query [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-68 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-69 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-68 >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query >> KqpSnapshotRead::TestSnapshotExpiration+withSink [GOOD] >> DataShardReadIterator::ShouldReadRangeNonInclusiveEnds [GOOD] >> DataShardReadIterator::ShouldReadRangeLeftInclusive >> test_sql_streaming.py::test[suites-ReadTopicWithMetadata-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataInsideFilter-default.txt] >> BackupRestore::TestAllPrimitiveTypes-UINT32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT64 >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix+EvWrite >> KqpTx::TooManyTx [GOOD] >> KqpTx::SnapshotROInteractive2 >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Table [GOOD] >> KqpSinkLocks::OlapUncommittedRead [GOOD] >> KqpSinkLocks::OlapInsertWithBulkUpsert-UseBulkUpsert [GOOD] >> KqpSinkLocks::OlapVisibleUncommittedRows [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::TestSnapshotExpiration+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 17564, MsgBus: 11328 2025-06-24T20:40:17.826405Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618507054473497:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:17.826507Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00319d/r3tmp/tmp5Jvzts/pdisk_1.dat 2025-06-24T20:40:18.584331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:18.584449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:18.626417Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:18.647091Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618507054473373:2079] 1750797617785744 != 1750797617785747 2025-06-24T20:40:18.652566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17564, node 1 2025-06-24T20:40:18.911003Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:18.963671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:18.963694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:18.963700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:18.963812Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11328 TClient is connected to server localhost:11328 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:19.926725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:19.982137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:20.166470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:20.543554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:20.695531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:22.833686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618507054473497:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:22.833769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:23.179587Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618532824278775:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:23.179678Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:23.604614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:23.645640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:23.715244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:23.790937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:23.864854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:23.971734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:24.042705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:24.195978Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618537119246743:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:24.196085Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:24.196096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618537119246748:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:24.200722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:24.217276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T20:40:24.218168Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618537119246750:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:40:24.297745Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618537119246801:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 25854, MsgBus: 29728 2025-06-24T20:40:27.573723Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618549952555665:2127];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:27.574672Z node 2 :METADATA_PROVIDER ERROR: log.cp ... p:228: got bad distributable configuration TClient is connected to server localhost:29728 TClient is connected to server localhost:29728 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:28.419504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:28.434642Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:28.514427Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:28.679358Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:28.753910Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:28.852715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:31.345849Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618567132426386:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:31.345981Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:31.456132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:31.540171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:31.587671Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:31.636215Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:31.740291Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:31.818451Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:31.879626Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:31.971924Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618567132427046:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:31.972015Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:31.972233Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618567132427051:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:31.976503Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:31.986553Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618567132427053:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:40:32.090077Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618571427394400:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:40:32.566631Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618549952555665:2127];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:32.566729Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:42.724614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:40:42.724652Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:43.791222Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [2:7519618618672035306:2605], TxId: 281474976715680, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=NDgzYmIwNGEtNGE4NDdlYzUtOGU1MjdmNTAtNDU2MjI1YzA=. TraceId : 01jyhtqvkh3jwtxftx8dknthea. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Source[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Table id 2 has no snapshot at v1750797634005/18446744073709551615 shard 72075186224037888 with lowWatermark v1750797634208/18446744073709551615 (node# 2 state# Ready) } } 2025-06-24T20:40:43.791765Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7519618618672035306:2605], TxId: 281474976715680, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=NDgzYmIwNGEtNGE4NDdlYzUtOGU1MjdmNTAtNDU2MjI1YzA=. TraceId : 01jyhtqvkh3jwtxftx8dknthea. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Table id 2 has no snapshot at v1750797634005/18446744073709551615 shard 72075186224037888 with lowWatermark v1750797634208/18446744073709551615 (node# 2 state# Ready) } }. 2025-06-24T20:40:43.792376Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519618618672035307:2606], TxId: 281474976715680, task: 2. Ctx: { SessionId : ydb://session/3?node_id=2&id=NDgzYmIwNGEtNGE4NDdlYzUtOGU1MjdmNTAtNDU2MjI1YzA=. CustomerSuppliedId : . TraceId : 01jyhtqvkh3jwtxftx8dknthea. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519618618672035302:2475], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T20:40:43.792804Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NDgzYmIwNGEtNGE4NDdlYzUtOGU1MjdmNTAtNDU2MjI1YzA=, ActorId: [2:7519618575722361973:2475], ActorState: ExecuteState, TraceId: 01jyhtqvkh3jwtxftx8dknthea, Create QueryResponse for error on request, msg: >> IndexBuildTest::CancellationNotEnoughRetries [GOOD] >> IndexBuildTest::CheckLimitWithDroppedIndex >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-22 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-23 >> KqpSinkLocks::EmptyRangeAlreadyBroken [GOOD] >> KqpSinkLocks::EmptyRangeAlreadyBrokenOlap >> AutoConfig::GetServicePoolsWith2CPUs [GOOD] >> KqpTx::CommitRoTx [GOOD] >> KqpTx::CommitRoTx_TLI ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::OlapVisibleUncommittedRows [GOOD] Test command err: Trying to start YDB, gRPC: 28175, MsgBus: 24228 2025-06-24T20:40:24.333436Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618539488533087:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:24.336278Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003187/r3tmp/tmpH7vnGN/pdisk_1.dat 2025-06-24T20:40:25.143357Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618539488532883:2079] 1750797624250913 != 1750797624250916 2025-06-24T20:40:25.180049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:25.180161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:25.180778Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:25.184988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28175, node 1 2025-06-24T20:40:25.379870Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:25.459607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:25.459634Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:25.459654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:25.459771Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24228 TClient is connected to server localhost:24228 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:26.456757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:28.842519Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618556668402712:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:28.842702Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:28.843072Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618556668402724:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:28.850372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:28.866109Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618556668402726:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:40:28.960354Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618556668402777:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:40:29.339223Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618539488533087:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:29.339339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:29.648925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T20:40:29.860542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:40:29.860787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:40:29.861033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:40:29.861151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:40:29.861256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:40:29.861353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:40:29.861452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:40:29.861600Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:40:29.861715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T20:40:29.861830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T20:40:29.861939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T20:40:29.867200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519618560963370237:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T20:40:29.867265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519618560963370237:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T20:40:29.867465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519618560963370237:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T20:40:29.867571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519618560963370237:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T20:40:29.867672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519618560963370237:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T20:40:29.867787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519618560963370237:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T20:40:29.867894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519618560963370237:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T20:40:29.867993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519618560963370237:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T20:40:29.868084Z node 1 :TX_COLU ... vent=ack_tablet_duplication;wait=72075186224037970,72075186224037981,72075186224037993,72075186224037996;receive=72075186224037969; 2025-06-24T20:40:42.724366Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-24T20:40:42.724421Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-24T20:40:42.724472Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-24T20:40:42.724539Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=53;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-24T20:40:42.724590Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=54;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-24T20:40:42.724639Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=55;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-24T20:40:42.724691Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=56;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-24T20:40:42.724818Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=58;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993,72075186224037996;receive=72075186224037981; 2025-06-24T20:40:42.724871Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=59;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993,72075186224037996;receive=72075186224037981; 2025-06-24T20:40:42.724928Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=60;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993,72075186224037996;receive=72075186224037981; 2025-06-24T20:40:42.724981Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=61;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993,72075186224037996;receive=72075186224037981; 2025-06-24T20:40:42.725034Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=62;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993,72075186224037996;receive=72075186224037981; 2025-06-24T20:40:42.725085Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=63;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993,72075186224037996;receive=72075186224037981; 2025-06-24T20:40:42.725133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=64;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993,72075186224037996;receive=72075186224037981; 2025-06-24T20:40:42.725259Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037996;receive=72075186224037993; 2025-06-24T20:40:42.725308Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037996;receive=72075186224037993; 2025-06-24T20:40:42.725357Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037996;receive=72075186224037993; 2025-06-24T20:40:42.725407Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037996;receive=72075186224037993; 2025-06-24T20:40:42.725459Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037996;receive=72075186224037993; 2025-06-24T20:40:42.725507Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037996;receive=72075186224037993; 2025-06-24T20:40:42.725564Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037996;receive=72075186224037993; 2025-06-24T20:40:42.726855Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T20:40:42.728638Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T20:40:42.730420Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T20:40:44.642998Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519618560963370237:2307];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:40:44.645229Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519618560963370238:2308];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:40:44.654884Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519618560963370363:2315];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:40:44.654970Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519618560963370283:2313];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:40:44.655953Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519618560963370279:2309];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:40:44.656040Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519618560963370281:2310];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:40:44.656097Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519618560963370232:2306];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:40:44.656155Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519618560963370292:2314];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:40:44.656212Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519618560963370282:2312];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:40:44.656268Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519618560963370280:2311];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; >> TPersQueueTest::ReadWithoutConsumerFirstClassCitizen [GOOD] |90.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith2CPUs [GOOD] >> BackupRestore::RestoreTableSplitBoundaries [GOOD] >> BackupRestore::ImportDataShouldHandleErrors >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite2 >> IndexBuildTest::CheckLimitWithDroppedIndex [GOOD] >> IndexBuildTest::DropIndex >> DataShardReadIterator::ShouldRollbackLocksWhenWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions+EvWrite >> KqpRanges::IsNullInValue >> KqpKv::ReadRows_SpecificKey >> TPersQueueTest::SetMeteringMode [GOOD] >> TPersQueueTest::TClusterTrackerTest ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_32_Query [GOOD] Test command err: 2025-06-24T20:34:43.920811Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617075654399016:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:43.927407Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:44.198428Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00454c/r3tmp/tmpnvnAK0/pdisk_1.dat 2025-06-24T20:34:44.471054Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:44.479278Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617075654398986:2079] 1750797283913126 != 1750797283913129 2025-06-24T20:34:44.480477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:44.480585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:44.491780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30726, node 1 2025-06-24T20:34:44.596268Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00454c/r3tmp/yandexvekCWa.tmp 2025-06-24T20:34:44.596301Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00454c/r3tmp/yandexvekCWa.tmp 2025-06-24T20:34:44.596473Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00454c/r3tmp/yandexvekCWa.tmp 2025-06-24T20:34:44.596618Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:44.656030Z INFO: TTestServer started on Port 22924 GrpcPort 30726 TClient is connected to server localhost:22924 PQClient connected to localhost:30726 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:34:44.911285Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:45.077424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:34:45.095712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:45.113505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T20:34:45.288956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-24T20:34:47.828310Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617092834268934:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.828487Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.831215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617092834268970:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.843309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:47.876124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 2025-06-24T20:34:47.879537Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617092834268973:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:34:47.990700Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617092834269037:2442] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:48.320779Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617092834269047:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:48.321011Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDQ1ZWRkMC01MjEwNWJjZC0zNDgyZTY5NS1hZTI3NWRlNw==, ActorId: [1:7519617092834268932:2296], ActorState: ExecuteState, TraceId: 01jyhtd0cc7x0416yx0ykttatd, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:48.322086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:48.323134Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:48.358785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:48.445005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519617097129236614:2617] 2025-06-24T20:34:48.918871Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617075654399016:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:48.918959Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T20:34:54.179330Z :WriteToTopic_Demo_2_Table INFO: TTopicSdkTestSetup started 2025-06-24T20:34:54.227604Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T20:34:54.312537Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:34:54.313384Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-24T20:34:54.313582Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:34:54.313756Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-24T20:34:54.313772Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:34:54.313785Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-24T20:34:54.313804Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:34:54.313828Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:54.313867Z node 1 :PERSQU ... ERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037894, Partition: {0, {19, 281474976710676}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {19, 281474976710676}, 100001} 2025-06-24T20:40:43.300780Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037894, Partition: {0, {19, 281474976710676}, 100001}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id', Topic: 'topic_A', Partition: {0, {19, 281474976710676}, 100001}, SeqNo: 2, partNo: 13, Offset: 0 is stored on disk 2025-06-24T20:40:43.301886Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {19, 281474976710676}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=7001240, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:40:43.302002Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 1 requestId: cookie: 2 2025-06-24T20:40:43.302077Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:40:43.303720Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0] PartitionId [0] Generation [1] Write session: OnReadDone gRpcStatusCode: 0 2025-06-24T20:40:43.303991Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0] PartitionId [0] Generation [1] Write session got write response: acks { seq_no: 2 written_in_tx { } } write_statistics { persisting_time { nanos: 219000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-24T20:40:43.304068Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0] PartitionId [0] Generation [1] OnAck: seqNo=2, txId={ydb://session/3?node_id=19&id=YWI1NWY2MzktNDdhNmE4ZWYtYjk3ZDFkNDMtZWZmM2RiMDU=, 01jyhtqv5c7gmsknczypvffrdn}, WriteCount=1, AckCount=1 2025-06-24T20:40:43.306279Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0] PartitionId [0] Generation [1] Write session: acknoledged message 2 2025-06-24T20:40:43.317607Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3225: [PQ: 72075186224037894] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 7519618621009295259 RawX2: 4503681231751578 } TxId: 281474976710677 Data { Operations { PartitionId: 0 Path: "/Root/topic_A" SupportivePartition: 100001 } Op: Commit SendingShards: 72075186224037894 ReceivingShards: 72075186224037894 Immediate: true WriteId { NodeId: 19 KeyId: 281474976710676 } } 2025-06-24T20:40:43.317661Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3265: [PQ: 72075186224037894] PartitionId {0, {19, 281474976710676}, 100001} for WriteId {19, 281474976710676} 2025-06-24T20:40:43.317691Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3355: [PQ: 72075186224037894] TxId 281474976710677 has WriteId {19, 281474976710676} 2025-06-24T20:40:43.317710Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3380: [PQ: 72075186224037894] immediate transaction 2025-06-24T20:40:43.317791Z node 19 :PERSQUEUE DEBUG: partition.cpp:1295: [PQ: 72075186224037894, Partition: {0, {19, 281474976710676}, 100001}, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoRequest 2025-06-24T20:40:43.317923Z node 19 :PERSQUEUE DEBUG: partition.cpp:1290: [PQ: 72075186224037894, Partition: {0, {19, 281474976710676}, 100001}, State: StateIdle] Send TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:40:43.317964Z node 19 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T20:40:43.318031Z node 19 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T20:40:43.318082Z node 19 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-24T20:40:43.318124Z node 19 :PERSQUEUE DEBUG: partition.cpp:2547: [PQ: 72075186224037894, Partition: 0, State: StateIdle] add key D0000100001_00000000000000000000_00000_0000000001_00013? 2025-06-24T20:40:43.318311Z node 19 :PERSQUEUE DEBUG: partition.cpp:2558: [PQ: 72075186224037894, Partition: 0, State: StateIdle] PartitionedBlob.GetFormedBlobs().size=1 2025-06-24T20:40:43.318379Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:1117: [PQ: 72075186224037894, Partition: 0, State: StateIdle] writing blob: topic 'topic_A' partition 0 old key D0000100001_00000000000000000000_00000_0000000001_00013? new key d0000000000_00000000000000000001_00000_0000000001_00013? size 7001240 WTime 1750797643317 2025-06-24T20:40:43.318505Z node 19 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72075186224037894, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-24T20:40:43.318764Z node 19 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:40:43.318796Z node 19 :PERSQUEUE DEBUG: read.h:328: CacheProxy. Rename blob from D0000100001_00000000000000000000_00000_0000000001_00013? to d0000000000_00000000000000000001_00000_0000000001_00013? 2025-06-24T20:40:43.323753Z node 19 :PERSQUEUE DEBUG: cache_eviction.h:349: Renaming head blob in L1. Old partition 100001 old offset 0 old count 1 new partition 0 new offset 1 new count 1 actorID [19:7519618616714327783:2445] 2025-06-24T20:40:43.323943Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 7000382 2025-06-24T20:40:43.324354Z node 19 :PERSQUEUE DEBUG: pq_l2_cache.cpp:179: PQ Cache (L2). Renamed. old Tablet '72075186224037894' partition 100001 offset 0 partno 0 count 1 parts 13 suffix '63', new Tablet '72075186224037894' partition 0 offset 1 partno 0 count 1 parts 13 suffix '63' 2025-06-24T20:40:43.328528Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=7017776, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:40:43.329494Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:5193: [PQ: 72075186224037894] Handle TEvPQ::TEvTransactionCompleted WriteId {19, 281474976710676} 2025-06-24T20:40:43.329527Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:5219: [PQ: 72075186224037894] send TEvPQ::TEvDeletePartition to partition {0, {19, 281474976710676}, 100001} 2025-06-24T20:40:43.329917Z node 19 :PERSQUEUE DEBUG: partition.cpp:3863: [PQ: 72075186224037894, Partition: {0, {19, 281474976710676}, 100001}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-24T20:40:43.330188Z node 19 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:40:43.330211Z node 19 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from D0000100001(+) to D0000100002(-) 2025-06-24T20:40:43.331031Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:5095: [PQ: 72075186224037894] Handle TEvLongTxService::TEvLockStatus LockId: 281474976710676 LockNode: 19 Status: STATUS_NOT_FOUND 2025-06-24T20:40:43.331063Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:5110: [PQ: 72075186224037894] TxWriteInfo: WriteId {19, 281474976710676}, TxId 281474976710677, Status STATUS_SUBSCRIBED 2025-06-24T20:40:43.331090Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:5120: [PQ: 72075186224037894] there is already a transaction TxId 281474976710677 for WriteId {19, 281474976710676} 2025-06-24T20:40:43.335643Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {19, 281474976710676}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=7001240, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:40:43.335711Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {19, 281474976710676}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=7001240, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:40:43.336413Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:5157: [PQ: 72075186224037894] Handle TEvPQ::TEvDeletePartitionDone {0, {19, 281474976710676}, 100001} 2025-06-24T20:40:43.336497Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3608: [PQ: 72075186224037894] send TEvUnsubscribeLock for WriteId {19, 281474976710676} 2025-06-24T20:40:43.336530Z node 19 :PERSQUEUE WARN: pq_impl.cpp:4230: [PQ: 72075186224037894] Unknown transaction 281474976710677 2025-06-24T20:40:43.336604Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037894] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:40:43.338104Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:40:43.423955Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-24T20:40:43.424025Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0] PartitionId [0] Generation [1] Write session will now close 2025-06-24T20:40:43.424096Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-24T20:40:43.424730Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-24T20:40:43.424797Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-24T20:40:43.430871Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0 grpc read done: success: 0 data: 2025-06-24T20:40:43.430908Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 3 sessionId: test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0 grpc read failed 2025-06-24T20:40:43.430948Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 3 sessionId: test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0 grpc closed 2025-06-24T20:40:43.430965Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: test-message_group_id|611d21ed-c5112db3-b173f400-34594ff7_0 is DEAD 2025-06-24T20:40:43.431984Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:40:43.432062Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:40:43.432131Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:40:43.433098Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519618616714327874:2464] destroyed 2025-06-24T20:40:43.433139Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519618621009295231:2464] destroyed 2025-06-24T20:40:43.433163Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519618616714327877:2464] destroyed 2025-06-24T20:40:43.433422Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::ReadWithoutConsumerFirstClassCitizen [GOOD] Test command err: 2025-06-24T20:35:22.729793Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617243927956193:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:22.729841Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004967/r3tmp/tmpuxATN4/pdisk_1.dat 2025-06-24T20:35:23.088398Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617239915861224:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:23.089529Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:23.089564Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:23.102970Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:23.457038Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:23.484061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:23.484170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:23.485839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:23.485911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:23.494190Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:23.496552Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:35:23.497658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25404, node 1 2025-06-24T20:35:23.840625Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:23.841813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004967/r3tmp/yandex4VHG0I.tmp 2025-06-24T20:35:23.841826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004967/r3tmp/yandex4VHG0I.tmp 2025-06-24T20:35:23.842015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004967/r3tmp/yandex4VHG0I.tmp 2025-06-24T20:35:23.842142Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:23.854210Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:23.900661Z INFO: TTestServer started on Port 61396 GrpcPort 25404 TClient is connected to server localhost:61396 PQClient connected to localhost:25404 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:24.366565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:35:24.470577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T20:35:27.217165Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617261390697852:2274], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:27.217394Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:27.217593Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617261390697870:2277], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:27.227185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:27.294193Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519617261390697880:2278], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T20:35:27.588423Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519617261390697907:2176] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:27.622091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:27.624017Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617265402793801:2305], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:27.624321Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Y2I5YWUxODMtZTM1OWI4MTYtYWM5NzE1MGUtN2VmYTNiYzg=, ActorId: [1:7519617265402793764:2298], ActorState: ExecuteState, TraceId: 01jyhte6xf6j7v9mq6mdnetegr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:35:27.626601Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:35:27.631653Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519617261390697921:2282], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:27.631919Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzUyNDhkYjEtZTE2MjgwNmEtNjUxNzVhY2YtNTM0MmQ3ZWU=, ActorId: [2:7519617261390697849:2273], ActorState: ExecuteState, TraceId: 01jyhte6v9eqqv4ge1cs6bbd8d, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:35:27.632445Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:35:27.730077Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617243927956193:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:27.730147Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:27.814482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:27.854444Z node 2 :METADATA ... ession _26_2_11188095057795183721_v1 got read request: guid# 6191159e-db540df4-3dca649d-ab112a76 2025-06-24T20:40:45.115923Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 2 consumer session _26_2_11188095057795183721_v1 performing read request: guid# b763de0e-9de61b18-2e7f9dd1-55594215, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5), count# 3, size# 353, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-24T20:40:45.116010Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 2 consumer session _26_2_11188095057795183721_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5)maxCount 3 maxSize 353 maxTimeLagMs 0 readTimestampMs 0 readOffset 37 EndOffset 40 ClientCommitOffset 0 committedOffset 0 Guid b763de0e-9de61b18-2e7f9dd1-55594215 2025-06-24T20:40:45.116053Z node 26 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer session _26_1_12006510889199757562_v1 is DEAD 2025-06-24T20:40:45.117858Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-24T20:40:45.117932Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 4 2025-06-24T20:40:45.118045Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _26_1_12006510889199757562_v1 2025-06-24T20:40:45.118106Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [26:7519618626579129094:2550] destroyed 2025-06-24T20:40:45.118132Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _26_1_12006510889199757562_v1 2025-06-24T20:40:45.118154Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [26:7519618626579129092:2549] destroyed 2025-06-24T20:40:45.118172Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _26_1_12006510889199757562_v1 2025-06-24T20:40:45.118193Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [26:7519618626579129091:2548] destroyed 2025-06-24T20:40:45.118210Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _26_1_12006510889199757562_v1 2025-06-24T20:40:45.118231Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [26:7519618626579129089:2546] destroyed 2025-06-24T20:40:45.118250Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _26_1_12006510889199757562_v1 2025-06-24T20:40:45.118269Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [26:7519618626579129090:2547] destroyed 2025-06-24T20:40:45.118335Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_1_12006510889199757562_v1 2025-06-24T20:40:45.118361Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_1_12006510889199757562_v1 2025-06-24T20:40:45.118380Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_1_12006510889199757562_v1 2025-06-24T20:40:45.118400Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_1_12006510889199757562_v1 2025-06-24T20:40:45.118419Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_1_12006510889199757562_v1 2025-06-24T20:40:45.118646Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 4, State: StateIdle] read cookie 36 Topic 'rt3.dc1--topic1' partition 4 user $without_consumer offset 37 count 3 size 353 endOffset 40 max time lag 0ms effective offset 37 2025-06-24T20:40:45.119789Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 4, State: StateIdle] read cookie 36 added 3 blobs, size 468 count 3 last offset 39, current partition end offset: 40 2025-06-24T20:40:45.119856Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 4, State: StateIdle] Reading cookie 36. Send blob request. 2025-06-24T20:40:45.119950Z node 27 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 4 offset 37 partno 0 count 1 parts_count 0 source 1 size 156 accessed 1 times before, last time 2025-06-24T20:40:45.000000Z 2025-06-24T20:40:45.119976Z node 27 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 4 offset 38 partno 0 count 1 parts_count 0 source 1 size 156 accessed 1 times before, last time 2025-06-24T20:40:45.000000Z 2025-06-24T20:40:45.120006Z node 27 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 4 offset 39 partno 0 count 1 parts_count 0 source 1 size 156 accessed 1 times before, last time 2025-06-24T20:40:45.000000Z 2025-06-24T20:40:45.120060Z node 27 :PERSQUEUE DEBUG: read.h:121: Reading cookie 36. All 3 blobs are from cache. 2025-06-24T20:40:45.120138Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 3 blobs 2025-06-24T20:40:45.120757Z node 27 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 4 offset 37 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:40:45.120791Z node 27 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 4 offset 38 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:40:45.120823Z node 27 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 4 offset 39 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:40:45.121058Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 37 totakecount 1 count 1 size 136 from pos 0 cbcount 1 2025-06-24T20:40:45.121186Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 38 totakecount 1 count 1 size 136 from pos 0 cbcount 1 2025-06-24T20:40:45.121255Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 39 totakecount 1 count 1 size 136 from pos 0 cbcount 1 2025-06-24T20:40:45.121416Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--topic1' partition: 4 messageNo: 0 requestId: cookie: 37 2025-06-24T20:40:45.123292Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer session _26_2_11188095057795183721_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5) initDone 1 event { CmdReadResult { MaxOffset: 40 Result { Offset: 37 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 39 WriteTimestampMS: 1750797644865 CreateTimestampMS: 1750797644862 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } Result { Offset: 38 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 40 WriteTimestampMS: 1750797644880 CreateTimestampMS: 1750797644878 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } Result { Offset: 39 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 41 WriteTimestampMS: 1750797644895 CreateTimestampMS: 1750797644892 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 3 SizeLag: 60 RealReadOffset: 39 WaitQuotaTimeMs: 0 EndOffset: 40 StartOffset: 0 } Cookie: 37 } 2025-06-24T20:40:45.123678Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 2 consumer session _26_2_11188095057795183721_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5) wait data in partition inited, cookie 1 from offset 40 2025-06-24T20:40:45.123755Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 2 consumer session _26_2_11188095057795183721_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5) EndOffset 40 ReadOffset 40 ReadGuid b763de0e-9de61b18-2e7f9dd1-55594215 has messages 1 2025-06-24T20:40:45.123977Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer session _26_2_11188095057795183721_v1 read done: guid# b763de0e-9de61b18-2e7f9dd1-55594215, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5), size# 522 2025-06-24T20:40:45.124017Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer session _26_2_11188095057795183721_v1 response to read: guid# b763de0e-9de61b18-2e7f9dd1-55594215 2025-06-24T20:40:45.124330Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer session _26_2_11188095057795183721_v1 Process answer. Aval parts: 0 Bytes readed: 522 Offset: 37 from session 5 Offset: 38 from session 5 Offset: 39 from session 5 2025-06-24T20:40:45.132551Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer session _26_2_11188095057795183721_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 5 offsets { end: 39 } } } } 2025-06-24T20:40:45.132596Z node 26 :PQ_READ_PROXY INFO: read_session_actor.cpp:1640: session cookie 2 consumer session _26_2_11188095057795183721_v1 closed with error: reason# can't commit when reading without a consumer 2025-06-24T20:40:45.132875Z node 26 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer session _26_2_11188095057795183721_v1 is DEAD 2025-06-24T20:40:45.136885Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _26_2_11188095057795183721_v1 2025-06-24T20:40:45.136959Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [26:7519618626579129103:2557] destroyed 2025-06-24T20:40:45.136999Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _26_2_11188095057795183721_v1 2025-06-24T20:40:45.137020Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [26:7519618626579129102:2556] destroyed 2025-06-24T20:40:45.137039Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _26_2_11188095057795183721_v1 2025-06-24T20:40:45.137058Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [26:7519618626579129101:2555] destroyed 2025-06-24T20:40:45.137076Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _26_2_11188095057795183721_v1 2025-06-24T20:40:45.137096Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [26:7519618626579129105:2554] destroyed 2025-06-24T20:40:45.137113Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _26_2_11188095057795183721_v1 2025-06-24T20:40:45.137133Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [26:7519618626579129104:2553] destroyed 2025-06-24T20:40:45.137229Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_2_11188095057795183721_v1 2025-06-24T20:40:45.137285Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_2_11188095057795183721_v1 2025-06-24T20:40:45.137325Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_2_11188095057795183721_v1 2025-06-24T20:40:45.137357Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_2_11188095057795183721_v1 2025-06-24T20:40:45.137403Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_2_11188095057795183721_v1 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-71 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-68 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 >> IndexBuildTest::DropIndex [GOOD] >> KqpSinkLocks::InvalidateOnCommit [GOOD] >> KqpSinkLocks::InvalidateOlapOnCommit >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-69 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-70 >> DataShardReadIterator::ShouldReadRangeLeftInclusive [GOOD] >> DataShardReadIterator::ShouldReadRangeRightInclusive >> AutoConfig::GetASPoolsWith2CPUs [GOOD] >> KqpTx::ExplicitTcl >> TxUsage::WriteToTopic_Demo_45_Table [GOOD] |90.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith2CPUs [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix-EvWrite >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::DropIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T20:39:24.670959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T20:39:24.671066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:39:24.671105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T20:39:24.674330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T20:39:24.674456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T20:39:24.674503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T20:39:24.674602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T20:39:24.674696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T20:39:24.675575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T20:39:24.675962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T20:39:24.854485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:39:24.854549Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:24.880723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T20:39:24.881247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T20:39:24.881405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T20:39:24.903506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T20:39:24.903782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T20:39:24.904466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T20:39:24.904847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T20:39:24.919941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:39:24.920218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T20:39:24.921664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:39:24.921756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T20:39:24.922037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T20:39:24.922099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T20:39:24.922154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T20:39:24.922299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T20:39:24.948464Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T20:39:25.249735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T20:39:25.250001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:25.250307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T20:39:25.255031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T20:39:25.255531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T20:39:25.255670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:39:25.268898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T20:39:25.269129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T20:39:25.269375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:25.269458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T20:39:25.269533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T20:39:25.269589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T20:39:25.276429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:25.276494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:39:25.276530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T20:39:25.284436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:25.284522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T20:39:25.284578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:39:25.284644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T20:39:25.296097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:39:25.304482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T20:39:25.304721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T20:39:25.305842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T20:39:25.306018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T20:39:25.306093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:39:25.306401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T20:39:25.306449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T20:39:25.306594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T20:39:25.306657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T20:39:25.310823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T20:39:25.310954Z node 1 :FLAT_TX_SCHEMESHARD ... 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.356000Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-24T20:40:51.356049Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-06-24T20:40:51.356100Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 5 2025-06-24T20:40:51.357012Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.357090Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.357111Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-24T20:40:51.357135Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 8], version: 18446744073709551615 2025-06-24T20:40:51.357159Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-06-24T20:40:51.358381Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.358445Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.358468Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-24T20:40:51.358491Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-06-24T20:40:51.358516Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T20:40:51.359387Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.359471Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.359502Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-24T20:40:51.359846Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-24T20:40:51.359892Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 105:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:40:51.360126Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 4 2025-06-24T20:40:51.360280Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#105:0 progress is 2/3 2025-06-24T20:40:51.360321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 2/3 2025-06-24T20:40:51.360357Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#105:0 progress is 2/3 2025-06-24T20:40:51.360394Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 2/3 2025-06-24T20:40:51.360429Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 105, ready parts: 2/3, is published: false 2025-06-24T20:40:51.362163Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.362253Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.362286Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-24T20:40:51.362478Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.362541Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T20:40:51.362582Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-24T20:40:51.362613Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 18446744073709551615 2025-06-24T20:40:51.362649Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 4 2025-06-24T20:40:51.362734Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 105, ready parts: 2/3, is published: true 2025-06-24T20:40:51.364005Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 105:2, at schemeshard: 72057594046678944 2025-06-24T20:40:51.364062Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 105:2 ProgressState, at schemeshard: 72057594046678944 2025-06-24T20:40:51.364312Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-24T20:40:51.364439Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#105:2 progress is 3/3 2025-06-24T20:40:51.364471Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-24T20:40:51.364516Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#105:2 progress is 3/3 2025-06-24T20:40:51.364545Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-24T20:40:51.364582Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 105, ready parts: 3/3, is published: true 2025-06-24T20:40:51.364660Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:418:2373] message: TxId: 105 2025-06-24T20:40:51.364725Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-24T20:40:51.364784Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 105:0 2025-06-24T20:40:51.364830Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 105:0 2025-06-24T20:40:51.364953Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-24T20:40:51.365006Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 105:1 2025-06-24T20:40:51.365032Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 105:1 2025-06-24T20:40:51.365069Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 2 2025-06-24T20:40:51.365096Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 105:2 2025-06-24T20:40:51.365124Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 105:2 2025-06-24T20:40:51.365170Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-24T20:40:51.365791Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:40:51.368136Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:40:51.368272Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:40:51.368321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:40:51.368463Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:40:51.370258Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T20:40:51.370486Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T20:40:51.370538Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [3:951:2872] TestWaitNotification: OK eventTxId 105 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-23 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 >> TPersQueueTest::Delete [GOOD] >> TPersQueueTest::DisableWrongSettings >> BackupRestore::TestAllPrimitiveTypes-UINT64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP >> TxUsage::WriteToTopic_Demo_45_Query >> TxUsage::WriteToTopic_Demo_40_Query [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix+Nullable+Covered [GOOD] >> KqpTx::CommitRoTx_TLI [GOOD] >> KqpTx::SnapshotROInteractive2 [GOOD] >> TNodeBrokerTest::NodesMigrationRemovedChanged >> TEnumerationTest::TestPublish [GOOD] >> TLocalTests::TestAddTenant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::CommitRoTx_TLI [GOOD] Test command err: Trying to start YDB, gRPC: 7341, MsgBus: 20776 2025-06-24T20:40:39.347515Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618605486307766:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:39.347571Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00317e/r3tmp/tmpUXJKFP/pdisk_1.dat 2025-06-24T20:40:40.307807Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:40.321814Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618605486307669:2079] 1750797639214069 != 1750797639214072 2025-06-24T20:40:40.327343Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:40.344463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:40.348617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:40.354706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:40.360927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 7341, node 1 2025-06-24T20:40:40.651756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:40.651783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:40.651791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:40.651911Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20776 TClient is connected to server localhost:20776 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:41.895327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:41.935025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:40:41.948720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:42.320518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:42.597574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:42.691066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:44.302643Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618605486307766:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:44.360760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:44.578174Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618626961145798:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:44.578283Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:44.995343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:45.073033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:45.147454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:45.231062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:45.274505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:45.322511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:45.356787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:45.415888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618631256113768:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:45.415990Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:45.416243Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618631256113773:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:45.421621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:45.432266Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618631256113775:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:40:45.487953Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618631256113826:3433] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 6762, MsgBus: 5774 2025-06-24T20:40:48.074878Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618643339118301:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:48.074943Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00317e/r3tmp/tmpueCPzt/pdisk_1.dat 2025-06-24T20:40:48.234936Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:48.235015Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:48.239721Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:48.242827Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:48.243481Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519618643339118281:2079] 1750797648074473 != 1750797648074476 TServer::EnableGrpc on GrpcPort 6762, node 2 2025-06-24T20:40:48.349306Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:48.349329Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:48.349339Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:48.349479Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5774 TClient is connected to server localhost:5774 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:49.034592Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:49.044301Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:40:49.054458Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:49.097778Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:49.174442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:49.462000Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:49.565445Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:51.960272Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618656224021803:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:51.960347Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:52.067343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:52.148368Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:52.197085Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:52.250803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:52.305312Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:52.362525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:52.409554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:52.517761Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618660518989759:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:52.517873Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:52.518155Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618660518989764:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:52.522875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:52.540592Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618660518989766:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:40:52.608844Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618660518989817:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:40:53.087799Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618643339118301:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:53.089838Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions-EvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::SnapshotROInteractive2 [GOOD] Test command err: Trying to start YDB, gRPC: 12250, MsgBus: 23286 2025-06-24T20:40:36.707734Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618592645393196:2170];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:36.708744Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003185/r3tmp/tmpQVMG3K/pdisk_1.dat 2025-06-24T20:40:37.153688Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:37.159955Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618592645393056:2079] 1750797636671881 != 1750797636671884 2025-06-24T20:40:37.245012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:37.245145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 12250, node 1 2025-06-24T20:40:37.252542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:37.403717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:37.403744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:37.403750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:37.403878Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:40:37.707991Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23286 TClient is connected to server localhost:23286 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:38.941113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:38.992916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:39.319904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:39.753258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:39.955101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:41.703277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618592645393196:2170];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:41.703369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:42.736995Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618618415198475:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:42.737096Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:43.177423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:43.219586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:43.269022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:43.354754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:43.396316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:43.461620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:43.529481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:43.618834Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618622710166432:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:43.618893Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:43.619196Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618622710166437:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:43.623728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:43.648039Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618622710166439:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:40:43.743647Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618622710166494:3432] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:40:45.103746Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YmU5NTIxZjQtNWU3ZDVhNDgtZWM5ZDgzYmMtYWNkNTk4ODY=, ActorId: [1:7519618627005134073:2480], ActorState: ReadyState, TraceId: 01jyhtqx7jaw7by56s871n3p80, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:861: Too many transactions, current active: 2 MaxTxPerSession: 2 Trying to start YDB, gRPC: 13477, MsgBus: 19518 2025-06-24T20:40:46.022055Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618632711395228:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:46.022106Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003185/r3tmp/tmpkfsBFA/pdisk_1.dat 2025-06-24T20:40:46.248904Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:46.249273Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:46.249339Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:46.253695Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519618632711395207:2079] 1750797646017417 != 1750797646017420 2025-06-24T20:40:46.277161Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13477, node 2 2025-06-24T20:40:46.479765Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:46.479803Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:46.479812Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:46.479949Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19518 TClient is connected to server localhost:19518 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:40:47.061380Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:47.073908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:47.084164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:47.177963Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:47.411410Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:47.522652Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:50.143371Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618649891266034:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:50.143476Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:50.216017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:50.258797Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:50.297486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:50.383287Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:50.424773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:50.503222Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:50.543018Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:50.640663Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618649891266699:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:50.640782Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:50.641138Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618649891266704:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:50.645689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:50.662128Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618649891266706:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:40:50.746889Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618649891266757:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:40:51.024831Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618632711395228:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:51.024951Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> DataShardTxOrder::RandomPointsAndRanges [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix+Nullable+Covered [GOOD] Test command err: Trying to start YDB, gRPC: 29720, MsgBus: 28337 2025-06-24T20:38:54.592725Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618153269778267:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:54.597536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004646/r3tmp/tmp3IUug3/pdisk_1.dat 2025-06-24T20:38:55.165529Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:55.191860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:38:55.191966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 29720, node 1 2025-06-24T20:38:55.224865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:38:55.454925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:55.454953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:55.454962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:55.455126Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:38:55.589859Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28337 TClient is connected to server localhost:28337 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:38:56.162901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:38:56.225829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:56.418833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:56.656068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:56.768869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:38:58.829366Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618170449648972:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:58.829461Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.182316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.224652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.272962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.322504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.362636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.413320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.514390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:38:59.591276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618153269778267:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:38:59.591465Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:38:59.660279Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618174744616940:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.660355Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.660734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618174744616945:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:38:59.664956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:38:59.690925Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618174744616947:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:38:59.787683Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618174744616998:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:01.231555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:39:02.146798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710675:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first cal ... HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 60 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037919 NodeId: 3 StartTime: 1750797603481 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T20:40:53.610550Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T20:40:53.610567Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037919 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 1032 rowCount 5 cpuUsage 0.006 2025-06-24T20:40:53.610653Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037919 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 1032 RowCount: 5 IndexSize: 0 InMemSize: 1032 LastAccessTime: 1750797605100 LastUpdateTime: 1750797605100 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 5 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T20:40:53.610788Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519618447816710112:2424], Recipient [3:7519618417751937145:2143]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037921 TableLocalId: 12 Generation: 1 Round: 4 TableStats { DataSize: 1240 RowCount: 7 IndexSize: 0 InMemSize: 1240 LastAccessTime: 1750797605100 LastUpdateTime: 1750797605100 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 7 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 70 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037921 NodeId: 3 StartTime: 1750797603480 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T20:40:53.610802Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T20:40:53.610818Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037921 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 1240 rowCount 7 cpuUsage 0.007 2025-06-24T20:40:53.610896Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037921 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 1240 RowCount: 7 IndexSize: 0 InMemSize: 1240 LastAccessTime: 1750797605100 LastUpdateTime: 1750797605100 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 7 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T20:40:53.702671Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519618417751937145:2143]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T20:40:53.702726Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T20:40:53.702744Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 5 2025-06-24T20:40:53.702807Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 5 2025-06-24T20:40:53.702828Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 5 2025-06-24T20:40:53.702905Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 11 shard idx 72057594046644480:31 data size 960 row count 4 2025-06-24T20:40:53.702987Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037918 maps to shardIdx: 72057594046644480:31 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 11], pathId map=Join2, is column=0, is olap=0, RowCount 4, DataSize 960 2025-06-24T20:40:53.703004Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037918, followerId 0 2025-06-24T20:40:53.703072Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:31 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T20:40:53.703124Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037918 2025-06-24T20:40:53.703176Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 11 shard idx 72057594046644480:30 data size 1184 row count 6 2025-06-24T20:40:53.703212Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037917 maps to shardIdx: 72057594046644480:30 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 11], pathId map=Join2, is column=0, is olap=0, RowCount 6, DataSize 1184 2025-06-24T20:40:53.703222Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037917, followerId 0 2025-06-24T20:40:53.703256Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:30 with partCount# 0, rowCount# 6, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T20:40:53.703269Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037917 2025-06-24T20:40:53.703290Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:33 data size 928 row count 4 2025-06-24T20:40:53.703330Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037920 maps to shardIdx: 72057594046644480:33 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 4, DataSize 928 2025-06-24T20:40:53.703340Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037920, followerId 0 2025-06-24T20:40:53.703372Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:33 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T20:40:53.703396Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037920 2025-06-24T20:40:53.703417Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:32 data size 1032 row count 5 2025-06-24T20:40:53.703454Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037919 maps to shardIdx: 72057594046644480:32 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 5, DataSize 1032 2025-06-24T20:40:53.703463Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037919, followerId 0 2025-06-24T20:40:53.703493Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:32 with partCount# 0, rowCount# 5, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T20:40:53.703508Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037919 2025-06-24T20:40:53.703528Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:34 data size 1240 row count 7 2025-06-24T20:40:53.703558Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037921 maps to shardIdx: 72057594046644480:34 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 7, DataSize 1240 2025-06-24T20:40:53.703567Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037921, followerId 0 2025-06-24T20:40:53.703598Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:34 with partCount# 0, rowCount# 7, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T20:40:53.703612Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037921 2025-06-24T20:40:53.703679Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T20:40:53.703830Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519618417751937145:2143]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T20:40:53.703849Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T20:40:53.703875Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T20:40:54.092630Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519618417751937145:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:40:54.092675Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:40:54.092724Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519618417751937145:2143], Recipient [3:7519618417751937145:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:40:54.092745Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpKv::ReadRows_SpecificKey [GOOD] >> KqpKv::ReadRows_NonExistentKeys >> TLocalTests::TestAddTenant [GOOD] >> KqpLimits::TooBigQuery+useSink [GOOD] >> KqpLimits::TooBigKey+useSink >> KqpSinkTx::SnapshotROInteractive2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TLocalTests::TestAddTenant [GOOD] Test command err: 2025-06-24T20:40:57.865653Z node 1 :LOCAL ERROR: local.cpp:1293: TDomainLocal(dc-1): Receive TEvDescribeSchemeResult with bad status StatusPathDoesNotExist reason is <> while resolving subdomain dc-1 2025-06-24T20:40:57.865894Z node 1 :LOCAL ERROR: local.cpp:1543: Unknown domain dc-3 >> TxUsage::Sinks_Oltp_WriteToTopics_4_Query [GOOD] >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-false ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_40_Query [GOOD] Test command err: 2025-06-24T20:34:44.399418Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617076910243212:2160];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:44.406770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:44.774772Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004548/r3tmp/tmpnAdTbr/pdisk_1.dat 2025-06-24T20:34:45.141524Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617076910243090:2079] 1750797284360876 != 1750797284360879 2025-06-24T20:34:45.152009Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:45.181134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:45.195334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:45.198354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12983, node 1 2025-06-24T20:34:45.379737Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:45.583298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004548/r3tmp/yandexR0wo3G.tmp 2025-06-24T20:34:45.583323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004548/r3tmp/yandexR0wo3G.tmp 2025-06-24T20:34:45.583538Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004548/r3tmp/yandexR0wo3G.tmp 2025-06-24T20:34:45.583671Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:45.720548Z INFO: TTestServer started on Port 61402 GrpcPort 12983 TClient is connected to server localhost:61402 PQClient connected to localhost:12983 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:46.310694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:34:46.341507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T20:34:46.355888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:34:46.557365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:49.173467Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617098385080360:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:49.173607Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:49.174142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617098385080381:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:49.178936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:49.208680Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617098385080383:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:34:49.289125Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617098385080447:2445] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:49.621293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617076910243212:2160];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:49.621432Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:49.662303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.665028Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617098385080455:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:49.665770Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=M2QxOTFhOWEtNTlmNWFjYWYtMmU4ZWRmMTQtMTRmNWNmNjM=, ActorId: [1:7519617098385080356:2296], ActorState: ExecuteState, TraceId: 01jyhtd1nz18n2kbej9mf8344k, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:49.668081Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:49.707019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.833822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519617102680048035:2624] === CheckClustersList. Ok 2025-06-24T20:34:56.823422Z :WriteToTopic_Demo_3_Table INFO: TTopicSdkTestSetup started 2025-06-24T20:34:56.861473Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T20:34:56.924389Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519617128449852028:2717] connected; active server actors: 1 2025-06-24T20:34:56.924643Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-24T20:34:56.930082Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-24T20:34:56.930227Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-24T20:34:56.931570Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-24T20:34:56.954211Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:34:56.955523Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-24T20:34:56.956022Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72075186224037892] Transaction ... paction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=6001401, count=4, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:40:54.723739Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 8 2025-06-24T20:40:54.723848Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 8 } 2025-06-24T20:40:54.723890Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 94 endOffset 100 with cookie 8 2025-06-24T20:40:54.723936Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 replying for commits: assignId# 1, from# 8, to# 8, offset# 94 2025-06-24T20:40:54.728286Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 0} (95-95) 2025-06-24T20:40:54.732733Z :DEBUG: [/Root] Take Data. Partition 0. Read: {2, 0} (96-96) 2025-06-24T20:40:54.733819Z :DEBUG: [/Root] Take Data. Partition 0. Read: {3, 0} (97-97) 2025-06-24T20:40:54.743752Z :DEBUG: [/Root] Take Data. Partition 0. Read: {4, 0} (98-98) 2025-06-24T20:40:54.746190Z :DEBUG: [/Root] Take Data. Partition 0. Read: {5, 0} (99-99) 2025-06-24T20:40:54.746270Z :DEBUG: [/Root] [/Root] [6f18ee36-ed87ee7e-f22e10f7-952991c1] [] The application data is transferred to the client. Number of messages 6, size 6000000 bytes 2025-06-24T20:40:54.746321Z :DEBUG: [/Root] [/Root] [6f18ee36-ed87ee7e-f22e10f7-952991c1] [] Returning serverBytesSize = 0 to budget 0 6 2025-06-24T20:40:54.776568Z :DEBUG: [/Root] [/Root] [6f18ee36-ed87ee7e-f22e10f7-952991c1] [] Commit offsets [94, 100). Partition stream id: 1 2025-06-24T20:40:54.779438Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 1 offsets { start: 94 end: 100 } } } } 2025-06-24T20:40:54.779677Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 100 prev 94 end 100 by cookie 9 2025-06-24T20:40:54.780067Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-24T20:40:54.780113Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-24T20:40:54.780244Z node 19 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 100 (startOffset 94) session test-consumer_19_1_17627208481660929839_v1 2025-06-24T20:40:54.780428Z node 19 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:40:54.781620Z node 19 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 100 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:40:54.781688Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:40:54.781749Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=6001401, count=4, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:40:54.781780Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 9 2025-06-24T20:40:54.781867Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 9 } 2025-06-24T20:40:54.781902Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 100 endOffset 100 with cookie 9 2025-06-24T20:40:54.781942Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 replying for commits: assignId# 1, from# 9, to# 9, offset# 100 2025-06-24T20:40:54.782411Z :DEBUG: [/Root] [/Root] [6f18ee36-ed87ee7e-f22e10f7-952991c1] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 100 } } 2025-06-24T20:40:54.987255Z :INFO: [/Root] [/Root] [6f18ee36-ed87ee7e-f22e10f7-952991c1] Closing read session. Close timeout: 0.000000s 2025-06-24T20:40:54.987335Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:99:100 2025-06-24T20:40:54.987391Z :INFO: [/Root] [/Root] [6f18ee36-ed87ee7e-f22e10f7-952991c1] Counters: { Errors: 0 CurrentSessionLifetimeMs: 4106 BytesRead: 100000000 MessagesRead: 100 BytesReadCompressed: 100000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:40:54.987518Z :NOTICE: [/Root] [/Root] [6f18ee36-ed87ee7e-f22e10f7-952991c1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T20:40:54.987567Z :DEBUG: [/Root] [/Root] [6f18ee36-ed87ee7e-f22e10f7-952991c1] [] Abort session to cluster 2025-06-24T20:40:54.988066Z :DEBUG: [/Root] 0x000051E00014BD90 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_17627208481660929839_v1 Close 2025-06-24T20:40:54.988478Z :DEBUG: [/Root] 0x000051E00014BD90 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_17627208481660929839_v1 Close 2025-06-24T20:40:54.988610Z :NOTICE: [/Root] [/Root] [6f18ee36-ed87ee7e-f22e10f7-952991c1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T20:40:54.991236Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 grpc read done: success# 0, data# { } 2025-06-24T20:40:54.991270Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 grpc read failed 2025-06-24T20:40:54.991271Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [19:7519618656848733822:2541]: session cookie 2 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 grpc read done: success# 0, data# { } 2025-06-24T20:40:54.991296Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [19:7519618656848733822:2541]: session cookie 2 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1grpc read failed 2025-06-24T20:40:54.991301Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 grpc closed 2025-06-24T20:40:54.991339Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [19:7519618656848733822:2541]: session cookie 2 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 grpc closed 2025-06-24T20:40:54.991356Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 is DEAD 2025-06-24T20:40:54.991366Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [19:7519618656848733822:2541]: session cookie 2 consumer test-consumer session test-consumer_19_1_17627208481660929839_v1 proxy is DEAD 2025-06-24T20:40:54.992374Z node 19 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [19:7519618652553766509:2535] disconnected; active server actors: 1 2025-06-24T20:40:54.992403Z node 19 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [19:7519618652553766509:2535] client test-consumer disconnected session test-consumer_19_1_17627208481660929839_v1 2025-06-24T20:40:54.992534Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037894] Destroy direct read session test-consumer_19_1_17627208481660929839_v1 2025-06-24T20:40:54.992574Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519618652553766513:2538] destroyed 2025-06-24T20:40:54.993007Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|b3e7b97b-437a3ac2-b8df71c7-7f58add0_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-24T20:40:54.992655Z node 19 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_19_1_17627208481660929839_v1 2025-06-24T20:40:54.993040Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|b3e7b97b-437a3ac2-b8df71c7-7f58add0_0] PartitionId [0] Generation [1] Write session will now close 2025-06-24T20:40:54.993988Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|b3e7b97b-437a3ac2-b8df71c7-7f58add0_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-24T20:40:54.994330Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|b3e7b97b-437a3ac2-b8df71c7-7f58add0_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-24T20:40:54.994360Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|b3e7b97b-437a3ac2-b8df71c7-7f58add0_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-24T20:40:55.011285Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message_group_id|b3e7b97b-437a3ac2-b8df71c7-7f58add0_0 grpc read done: success: 0 data: 2025-06-24T20:40:55.011321Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message_group_id|b3e7b97b-437a3ac2-b8df71c7-7f58add0_0 grpc read failed 2025-06-24T20:40:55.011380Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message_group_id|b3e7b97b-437a3ac2-b8df71c7-7f58add0_0 grpc closed 2025-06-24T20:40:55.011408Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message_group_id|b3e7b97b-437a3ac2-b8df71c7-7f58add0_0 is DEAD 2025-06-24T20:40:55.012391Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:40:55.012431Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:40:55.013031Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519618635373896958:2474] destroyed 2025-06-24T20:40:55.013075Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519618635373896961:2474] destroyed 2025-06-24T20:40:55.013119Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TNodeBrokerTest::NodeNameReuseRestart >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-71 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-72 >> DataShardReadIterator::ShouldReadRangeRightInclusive [GOOD] >> DataShardReadIterator::ShouldReadRangeOneByOne >> BackupRestore::ImportDataShouldHandleErrors [GOOD] >> BackupRestore::BackupUuid >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-70 >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite2 [GOOD] >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite3 >> TxUsage::Transactions_Conflict_On_SeqNo_Query >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-true >> TNodeBrokerTest::RegistrationPipelining >> TNodeBrokerTest::NodesMigrationRemovedChanged [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-true [GOOD] >> AutoConfig::GetServicePoolsWith3CPUs [GOOD] >> KqpTx::ExplicitTcl [GOOD] >> KqpTx::EmptyTxOnCommit |90.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith3CPUs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationRemovedChanged [GOOD] Test command err: 2025-06-24T20:40:57.673390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:40:57.673472Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder+EvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-24T20:41:00.415816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:00.415903Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from to NODE_BROKER_ACTOR 2025-06-24T20:41:01.382870Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:01.382930Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 >> TNodeBrokerTest::NodeNameReuseRestart [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodeNameReuseRestart [GOOD] Test command err: 2025-06-24T20:41:00.967463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:00.967540Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:01.325251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 >> KqpKv::ReadRows_NonExistentKeys [GOOD] >> KqpKv::ReadRows_NotFullPK >> KqpRanges::IsNullInValue [GOOD] >> KqpRanges::IsNullInJsonValue >> TNodeBrokerTest::RegistrationPipelining [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_DATE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_DATETIME [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_TIMESTAMP [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP64 >> KqpSnapshotIsolation::TConflictReadWriteOltp [GOOD] >> KqpSnapshotIsolation::TConflictReadWriteOltpNoSink [GOOD] >> TNodeBrokerTest::ExtendLeaseRestartRace >> KqpLocks::DifferentKeyUpdate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPointsAndRanges [GOOD] Test command err: 2025-06-24T20:38:07.266574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:38:07.266642Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:07.268555Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:38:07.281799Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:38:07.282328Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T20:38:07.282620Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:07.330778Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:38:07.342828Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:38:07.343071Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:38:07.344846Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T20:38:07.344924Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T20:38:07.344977Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T20:38:07.345381Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:38:07.345470Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:38:07.345544Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T20:38:07.429135Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:38:07.469042Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T20:38:07.469246Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:38:07.469358Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T20:38:07.469392Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T20:38:07.469440Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T20:38:07.469486Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:38:07.469643Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:07.469695Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:07.470003Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T20:38:07.470129Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T20:38:07.470295Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T20:38:07.470335Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:38:07.470390Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T20:38:07.470424Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T20:38:07.470473Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T20:38:07.470515Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T20:38:07.470570Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T20:38:07.470680Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:07.470717Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:07.470767Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T20:38:07.474124Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T20:38:07.474198Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:38:07.474281Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:38:07.474471Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T20:38:07.474519Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T20:38:07.474567Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T20:38:07.474613Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T20:38:07.474654Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T20:38:07.474687Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T20:38:07.474740Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T20:38:07.475041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T20:38:07.475080Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T20:38:07.475128Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T20:38:07.475265Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T20:38:07.475325Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T20:38:07.475361Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T20:38:07.475400Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T20:38:07.475430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T20:38:07.475457Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T20:38:07.493139Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T20:38:07.493239Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T20:38:07.493280Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T20:38:07.493321Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T20:38:07.493404Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T20:38:07.493999Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:07.494052Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:07.494105Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T20:38:07.494232Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T20:38:07.494262Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T20:38:07.494417Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T20:38:07.494459Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T20:38:07.494512Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T20:38:07.494556Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T20:38:07.498062Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T20:38:07.498149Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:38:07.498415Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:07.498462Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:07.498511Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T20:38:07.498550Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:38:07.498581Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T20:38:07.498629Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T20:38:07.498671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... 9437186 has no attached operations 2025-06-24T20:40:52.492500Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-24T20:40:52.521741Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T20:40:52.521840Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:402] at 9437184 on unit CompleteOperation 2025-06-24T20:40:52.521928Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 402] from 9437184 at tablet 9437184 send result to client [4:102:2135], exec latency: 3 ms, propose latency: 5 ms 2025-06-24T20:40:52.522022Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 402 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 399} 2025-06-24T20:40:52.522068Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:40:52.522367Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T20:40:52.522409Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:403] at 9437184 on unit StoreAndSendOutRS 2025-06-24T20:40:52.522447Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 400 at 9437184 from 9437184 to 9437185 txId 403 2025-06-24T20:40:52.522514Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T20:40:52.522549Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:403] at 9437184 on unit CompleteOperation 2025-06-24T20:40:52.522618Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 403] from 9437184 at tablet 9437184 send result to client [4:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T20:40:52.522681Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 400} 2025-06-24T20:40:52.522718Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:40:52.576109Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [4:238:2229], Recipient [4:461:2400]: {TEvReadSet step# 1000004 txid# 402 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 399} 2025-06-24T20:40:52.576206Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T20:40:52.576261Z node 4 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437184 consumer 9437184 txId 402 2025-06-24T20:40:52.577403Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [4:238:2229], Recipient [4:461:2400]: {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 400} 2025-06-24T20:40:52.577470Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T20:40:52.577516Z node 4 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437184 consumer 9437184 txId 403 2025-06-24T20:40:52.578262Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [4:238:2229], Recipient [4:349:2314]: {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437184 TabletDest# 9437185 SetTabletProducer# 9437184 ReadSet.Size()# 7 Seqno# 400 Flags# 0} 2025-06-24T20:40:52.578317Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T20:40:52.578366Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 9437185 source 9437184 dest 9437185 producer 9437184 txId 403 2025-06-24T20:40:52.578470Z node 4 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 9437185 got read set: {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437184 TabletDest# 9437185 SetTabletProducer# 9437184 ReadSet.Size()# 7 Seqno# 400 Flags# 0} 2025-06-24T20:40:52.578537Z node 4 :TX_DATASHARD TRACE: operation.cpp:67: Filled readset for [1000004:403] from=9437184 to=9437185origin=9437184 2025-06-24T20:40:52.578663Z node 4 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437185 2025-06-24T20:40:52.579198Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [4:349:2314], Recipient [4:349:2314]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:40:52.579249Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:40:52.579324Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-24T20:40:52.579380Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 active 1 active planned 1 immediate 0 planned 1 2025-06-24T20:40:52.579435Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000004:403] at 9437185 for LoadAndWaitInRS 2025-06-24T20:40:52.579476Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437185 on unit LoadAndWaitInRS 2025-06-24T20:40:52.579521Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437185 is Executed 2025-06-24T20:40:52.579558Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437185 executing on unit LoadAndWaitInRS 2025-06-24T20:40:52.579597Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:403] at 9437185 to execution unit ExecuteDataTx 2025-06-24T20:40:52.579635Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437185 on unit ExecuteDataTx 2025-06-24T20:40:52.581718Z node 4 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000004:403] at tablet 9437185 with status COMPLETE 2025-06-24T20:40:52.581795Z node 4 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000004:403] at 9437185: {NSelectRow: 4, NSelectRange: 1, NUpdateRow: 5, NEraseRow: 0, SelectRowRows: 1, SelectRowBytes: 8, SelectRangeRows: 17, SelectRangeBytes: 136, UpdateRowBytes: 37, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T20:40:52.581876Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437185 is ExecutedNoMoreRestarts 2025-06-24T20:40:52.581919Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437185 executing on unit ExecuteDataTx 2025-06-24T20:40:52.581956Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:403] at 9437185 to execution unit CompleteOperation 2025-06-24T20:40:52.581997Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437185 on unit CompleteOperation 2025-06-24T20:40:52.582309Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437185 is DelayComplete 2025-06-24T20:40:52.582347Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437185 executing on unit CompleteOperation 2025-06-24T20:40:52.582383Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:403] at 9437185 to execution unit CompletedOperations 2025-06-24T20:40:52.582423Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437185 on unit CompletedOperations 2025-06-24T20:40:52.582468Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437185 is Executed 2025-06-24T20:40:52.582503Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437185 executing on unit CompletedOperations 2025-06-24T20:40:52.582540Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000004:403] at 9437185 has finished 2025-06-24T20:40:52.582595Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:40:52.582632Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-24T20:40:52.582669Z node 4 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-24T20:40:52.582701Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-24T20:40:52.618583Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T20:40:52.618679Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:403] at 9437186 on unit CompleteOperation 2025-06-24T20:40:52.618768Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 403] from 9437186 at tablet 9437186 send result to client [4:102:2135], exec latency: 3 ms, propose latency: 5 ms 2025-06-24T20:40:52.618860Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437185 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 400} 2025-06-24T20:40:52.618908Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T20:40:52.620233Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [4:461:2400], Recipient [4:349:2314]: {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437185 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 400} 2025-06-24T20:40:52.620311Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T20:40:52.620359Z node 4 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437186 consumer 9437186 txId 403 2025-06-24T20:40:52.653075Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-24T20:40:52.653166Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:403] at 9437185 on unit CompleteOperation 2025-06-24T20:40:52.653253Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 403] from 9437185 at tablet 9437185 send result to client [4:102:2135], exec latency: 3 ms, propose latency: 5 ms 2025-06-24T20:40:52.653341Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437185 {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437184 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 400} 2025-06-24T20:40:52.653388Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-24T20:40:52.654243Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [4:349:2314], Recipient [4:238:2229]: {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437184 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 400} 2025-06-24T20:40:52.654312Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T20:40:52.654358Z node 4 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437185 consumer 9437185 txId 403 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::RegistrationPipelining [GOOD] Test command err: 2025-06-24T20:41:02.464571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:02.464644Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for commit ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... waiting for commit (done) ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR >> KqpScanLogs::WideCombine-EnabledLogs [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table |90.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TConflictReadWriteOltpNoSink [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips-EvWrite >> TNodeBrokerTest::LoadStateMoveEpoch >> TNodeBrokerTest::UpdateNodesLog >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataInsideFilter-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] >> TNodeBrokerTest::UpdateEpochPipelining >> TNodeBrokerTest::NodesMigrationExpireRemoved |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/kqprun |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/kqprun |90.4%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/kqprun >> KqpSinkLocks::EmptyRangeAlreadyBrokenOlap [GOOD] >> TxUsage::WriteToTopic_Demo_45_Query [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-72 >> DataShardReadIterator::ShouldReadRangeOneByOne [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix1 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-72 [GOOD] >> KqpScanSpilling::HandleErrorsCorrectly [GOOD] >> TPersQueueTest::CreateTopicWithMeteringMode [GOOD] >> TPersQueueTest::DefaultMeteringMode >> KqpKv::ReadRows_NotFullPK [GOOD] >> KqpKv::ReadRows_SpecificReturnValue >> KqpTx::EmptyTxOnCommit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query [GOOD] Test command err: 2025-06-24T20:34:44.352066Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617079329062076:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:44.352353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:44.666876Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004544/r3tmp/tmpKCJUAs/pdisk_1.dat 2025-06-24T20:34:44.941578Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:44.947322Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617079329061930:2079] 1750797284309003 != 1750797284309006 2025-06-24T20:34:45.014397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:45.014521Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:45.017419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17486, node 1 2025-06-24T20:34:45.126941Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004544/r3tmp/yandexSnHhdO.tmp 2025-06-24T20:34:45.126968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004544/r3tmp/yandexSnHhdO.tmp 2025-06-24T20:34:45.127125Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004544/r3tmp/yandexSnHhdO.tmp 2025-06-24T20:34:45.127267Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:45.262912Z INFO: TTestServer started on Port 13846 GrpcPort 17486 2025-06-24T20:34:45.347309Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13846 PQClient connected to localhost:17486 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:45.782613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:34:45.842729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:34:46.019426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:48.489627Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617096508931881:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.489798Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.489867Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617096508931918:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.503234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:48.557358Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617096508931920:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:34:48.915485Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617096508931984:2443] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:48.971768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.017653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.079317Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617096508931992:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:49.079669Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NGRiZTZjMzUtODE1NGMzNDgtNTIyZTEzZTgtYjEwMzU2YTU=, ActorId: [1:7519617096508931879:2297], ActorState: ExecuteState, TraceId: 01jyhtd1088fy1w0gxc4ftv8wf, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:49.081842Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:49.113668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T20:34:49.346691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617079329062076:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:49.346799Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7519617100803899586:2623] === CheckClustersList. Ok 2025-06-24T20:34:55.157905Z :ReadWithRestarts INFO: TTopicSdkTestSetup started 2025-06-24T20:34:55.196212Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T20:34:55.229934Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:34:55.239867Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-24T20:34:55.240158Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:34:55.240414Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-24T20:34:55.240437Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:34:55.240454Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-24T20:34:55.240477Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:34:55.240506Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:55.240547Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-24T20:34:55.259311Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519617126573703539:2700] connected; active server actors: 1 2025-06-24T20:34:55.259592Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224 ... ssignId:1) initDone 1 event { Cookie: 2 } 2025-06-24T20:40:59.124227Z node 20 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 1 endOffset 1 with cookie 2 2025-06-24T20:40:59.124302Z node 20 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 1 2025-06-24T20:40:59.125422Z :DEBUG: [/Root] [/Root] [d676a08a-65f3ce42-284f0990-3074442] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 1 } } 2025-06-24T20:40:59.889785Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T20:40:59.891628Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 4 sessionId: test-message_group_id|d8cc2cb3-92c8caba-9246c4bc-8f1c1481_0 describe result for acl check 2025-06-24T20:41:00.027987Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-24T20:41:00.028093Z :INFO: [/Root] [/Root] [d676a08a-65f3ce42-284f0990-3074442] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1004 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:41:00.043347Z node 20 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 checking auth because of timeout 2025-06-24T20:41:00.043456Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth for : test-consumer 2025-06-24T20:41:00.044484Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 Handle describe topics response 2025-06-24T20:41:00.044691Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth is DEAD 2025-06-24T20:41:00.044838Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth ok: topics# 1, initDone# 1 2025-06-24T20:41:00.085597Z node 20 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 checking auth because of timeout 2025-06-24T20:41:00.085716Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth for : test-consumer 2025-06-24T20:41:00.086850Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 Handle describe topics response 2025-06-24T20:41:00.087040Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth is DEAD 2025-06-24T20:41:00.087257Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth ok: topics# 1, initDone# 1 2025-06-24T20:41:01.097467Z node 20 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 checking auth because of timeout 2025-06-24T20:41:01.097557Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth for : test-consumer 2025-06-24T20:41:01.101028Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 Handle describe topics response 2025-06-24T20:41:01.101234Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth is DEAD 2025-06-24T20:41:01.101392Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth ok: topics# 1, initDone# 1 2025-06-24T20:41:03.100743Z node 20 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 checking auth because of timeout 2025-06-24T20:41:03.144905Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth for : test-consumer 2025-06-24T20:41:03.156783Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 Handle describe topics response 2025-06-24T20:41:03.157057Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth is DEAD 2025-06-24T20:41:03.157239Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 auth ok: topics# 1, initDone# 1 2025-06-24T20:41:03.167890Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037894] server connected, pipe [20:7519618705616318629:4503], now have 1 active actors on pipe 2025-06-24T20:41:03.175332Z :INFO: [/Root] [/Root] [d676a08a-65f3ce42-284f0990-3074442] Closing read session. Close timeout: 0.000000s 2025-06-24T20:41:03.175471Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-24T20:41:03.175566Z :INFO: [/Root] [/Root] [d676a08a-65f3ce42-284f0990-3074442] Counters: { Errors: 0 CurrentSessionLifetimeMs: 4152 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:41:03.175752Z :NOTICE: [/Root] [/Root] [d676a08a-65f3ce42-284f0990-3074442] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T20:41:03.175841Z :DEBUG: [/Root] [/Root] [d676a08a-65f3ce42-284f0990-3074442] [] Abort session to cluster 2025-06-24T20:41:03.176548Z :DEBUG: [/Root] 0x000051E00055C190 TDirectReadSessionManager ServerSessionId=test-consumer_20_1_7237775049464435150_v1 Close 2025-06-24T20:41:03.177118Z :DEBUG: [/Root] 0x000051E00055C190 TDirectReadSessionManager ServerSessionId=test-consumer_20_1_7237775049464435150_v1 Close 2025-06-24T20:41:03.177323Z :NOTICE: [/Root] [/Root] [d676a08a-65f3ce42-284f0990-3074442] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T20:41:03.179027Z node 20 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 grpc read done: success# 0, data# { } 2025-06-24T20:41:03.179080Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 grpc read failed 2025-06-24T20:41:03.179124Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 grpc closed 2025-06-24T20:41:03.179194Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 is DEAD 2025-06-24T20:41:03.180293Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [20:7519618688436448964:2933]: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 grpc closed 2025-06-24T20:41:03.180351Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [20:7519618688436448964:2933]: session cookie 2 consumer test-consumer session test-consumer_20_1_7237775049464435150_v1 proxy is DEAD 2025-06-24T20:41:03.183008Z node 20 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [20:7519618688436448955:2925] disconnected; active server actors: 1 2025-06-24T20:41:03.183083Z node 20 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [20:7519618688436448955:2925] client test-consumer disconnected session test-consumer_20_1_7237775049464435150_v1 2025-06-24T20:41:03.183409Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037894] Destroy direct read session test-consumer_20_1_7237775049464435150_v1 2025-06-24T20:41:03.183496Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [20:7519618688436448958:2930] destroyed 2025-06-24T20:41:03.183576Z node 20 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_20_1_7237775049464435150_v1 2025-06-24T20:41:03.184719Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d8cc2cb3-92c8caba-9246c4bc-8f1c1481_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-24T20:41:03.184796Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d8cc2cb3-92c8caba-9246c4bc-8f1c1481_0] PartitionId [0] Generation [1] Write session will now close 2025-06-24T20:41:03.184868Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|d8cc2cb3-92c8caba-9246c4bc-8f1c1481_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-24T20:41:03.185481Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d8cc2cb3-92c8caba-9246c4bc-8f1c1481_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-24T20:41:03.185554Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|d8cc2cb3-92c8caba-9246c4bc-8f1c1481_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-24T20:41:03.191410Z node 20 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message_group_id|d8cc2cb3-92c8caba-9246c4bc-8f1c1481_0 grpc read done: success: 0 data: 2025-06-24T20:41:03.191461Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message_group_id|d8cc2cb3-92c8caba-9246c4bc-8f1c1481_0 grpc read failed 2025-06-24T20:41:03.191534Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message_group_id|d8cc2cb3-92c8caba-9246c4bc-8f1c1481_0 grpc closed 2025-06-24T20:41:03.191564Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message_group_id|d8cc2cb3-92c8caba-9246c4bc-8f1c1481_0 is DEAD 2025-06-24T20:41:03.192846Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:41:03.192913Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:41:03.193360Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [20:7519618684141481560:2910] destroyed 2025-06-24T20:41:03.193405Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [20:7519618684141481563:2910] destroyed 2025-06-24T20:41:03.193509Z node 20 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> KqpSinkTx::SnapshotROInteractive2 [GOOD] >> KqpSnapshotIsolation::TConflictReadWriteOlap [GOOD] >> TxUsage::WriteToTopic_Demo_46_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanLogs::WideCombine-EnabledLogs [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/2btm/003777/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk3 Trying to start YDB, gRPC: 20147, MsgBus: 17548 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003777/r3tmp/tmpB7KAb7/pdisk_1.dat TServer::EnableGrpc on GrpcPort 20147, node 1 TClient is connected to server localhost:17548 TClient is connected to server localhost:17548 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (DataType 'Uint64)) (let $4 '('('"_logical_id" '505) '('"_id" '"f01e652f-712f86dc-6d2e3c5f-de3665ff") '('"_wide_channels" (StructType '('"Value" (OptionalType (DataType 'String))) '('_yql_agg_0 $3))))) (let $5 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($12) (block '( (let $13 (lambda '($15) (Member $15 '"Key") (Member $15 '"Value"))) (let $14 (lambda '($25 $26) $25 $26)) (return (FromFlow (WideCombiner (ExpandMap (ToFlow $12) $13) '-1073741824 (lambda '($16 $17) $17) (lambda '($18 $19 $20) (AggrCountInit $19)) (lambda '($21 $22 $23 $24) (AggrCountUpdate $22 $24)) $14))) ))) $4)) (let $6 (DqCnHashShuffle (TDqOutput $5 '0) '('0) '0 '"HashV1")) (let $7 (DqPhyStage '($6) (lambda '($27) (block '( (let $28 (WideCombiner (ToFlow $27) '"" (lambda '($29 $30) $29) (lambda '($31 $32 $33) $33) (lambda '($34 $35 $36 $37) (AggrAdd $36 $37)) (lambda '($38 $39) $39))) (return (FromFlow (NarrowMap $28 (lambda '($40) (AsStruct '('"column0" $40)))))) ))) '('('"_logical_id" '1265) '('"_id" '"9d4ee778-a831f5bd-54299b34-268b61b7")))) (let $8 (DqCnUnionAll (TDqOutput $7 '0))) (let $9 (DqPhyStage '($8) (lambda '($41) $41) '('('"_logical_id" '1533) '('"_id" '"601b4665-8a4124b9-d3e4274f-cf89dd9b")))) (let $10 '($5 $7 $9)) (let $11 (DqCnResult (TDqOutput $9 '0) '('"column0"))) (return (KqpPhysicalQuery '((KqpPhysicalTx $10 '($11) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"column0" $3))) '0 '0)) '('('"type" '"query")))) ) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::EmptyRangeAlreadyBrokenOlap [GOOD] Test command err: Trying to start YDB, gRPC: 17361, MsgBus: 11425 2025-06-24T20:40:24.089079Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618541220926479:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:24.089440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003197/r3tmp/tmpSQiHXl/pdisk_1.dat 2025-06-24T20:40:24.796228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:24.796333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:24.805468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:24.813046Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:24.814653Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618541220926285:2079] 1750797624019782 != 1750797624019785 TServer::EnableGrpc on GrpcPort 17361, node 1 2025-06-24T20:40:25.117737Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:25.191656Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:25.191677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:25.191684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:25.191784Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11425 TClient is connected to server localhost:11425 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:26.214575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:26.273398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:40:28.448956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618558400796108:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:28.449076Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618558400796098:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:28.449188Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:28.452916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:28.466800Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618558400796127:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:40:28.568768Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618558400796178:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:40:28.880577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:29.067293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618541220926479:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:29.067387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:29.108018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:30.640623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:33.007964Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=6; 2025-06-24T20:40:33.019785Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-24T20:40:33.019980Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-24T20:40:33.020231Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [1:7519618579875640719:2932], Table: `/Root/Test` ([72057594046644480:6:1]), SessionActorId: [1:7519618575580673347:2932]Got LOCKS BROKEN for table `/Root/Test`. ShardID=72075186224037888, Sink=[1:7519618579875640719:2932].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T20:40:33.021917Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519618579875640712:2932], SessionActorId: [1:7519618575580673347:2932], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[1:7519618575580673347:2932]. isRollback=0 2025-06-24T20:40:33.022162Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=NGFiMjgxZDktYzM4MGQ3MGMtOGE4ZDA2MzUtODMyN2E4ZQ==, ActorId: [1:7519618575580673347:2932], ActorState: ExecuteState, TraceId: 01jyhtqhdgd2q65e2ycmd1bxym, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519618579875640713:2932] from: [1:7519618579875640712:2932] 2025-06-24T20:40:33.022254Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519618579875640713:2932] TxId: 281474976710667. Ctx: { TraceId: 01jyhtqhdgd2q65e2ycmd1bxym, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGFiMjgxZDktYzM4MGQ3MGMtOGE4ZDA2MzUtODMyN2E4ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T20:40:33.022491Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NGFiMjgxZDktYzM4MGQ3MGMtOGE4ZDA2MzUtODMyN2E4ZQ==, ActorId: [1:7519618575580673347:2932], ActorState: ExecuteState, TraceId: 01jyhtqhdgd2q65e2ycmd1bxym, Create QueryResponse for error on request, msg:
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 Trying to start YDB, gRPC: 18502, MsgBus: 26445 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003197/r3tmp/tmppLWjyl/pdisk_1.dat 2025-06-24T20:40:35.579385Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:40:35.584507Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:35.586234Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519618585669575459:2079] 1750797635355900 != 1750797635355903 2025-06-24T20:40:35.603095Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:35.603475Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:35.605849Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18502, node 2 2025-06-24T20:40:35.803909Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken o ... ocessing::TEvReadSetAck;tablet_id=72075186224037969;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037970; 2025-06-24T20:41:02.715006Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;local_tx_no=17;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-24T20:41:02.716100Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T20:41:02.716274Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037981;local_tx_no=17;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-24T20:41:04.304495Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-24T20:41:04.310886Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-24T20:41:04.311139Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-24T20:41:04.311947Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-24T20:41:04.312149Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-24T20:41:04.312338Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-24T20:41:04.312529Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-24T20:41:04.312719Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-24T20:41:04.312930Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-24T20:41:04.317284Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-24T20:41:04.864634Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519618654611006651:2307];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=tablet lock have another internal generation counter: 18446744073709551615 != 0;tx_id=281474976710671; 2025-06-24T20:41:04.865025Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [3:7519618710445591611:4112], SessionActorId: [3:7519618701855656509:4112], Got LOCKS BROKEN for table. ShardID=72075186224037891, Sink=[3:7519618710445591611:4112].{
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } 2025-06-24T20:41:04.865180Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519618710445591611:4112], SessionActorId: [3:7519618701855656509:4112], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 . sessionActorId=[3:7519618701855656509:4112]. isRollback=0 2025-06-24T20:41:04.865531Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=MzdkMGRiNjEtZGY4YWJhMzgtYThjNjE2Y2ItYWI1MTJmNTg=, ActorId: [3:7519618701855656509:4112], ActorState: ExecuteState, TraceId: 01jyhtrg2f2mazrv19avwf7prm, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:7519618710445591795:4112] from: [3:7519618710445591611:4112] 2025-06-24T20:41:04.865624Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519618710445591795:4112] TxId: 281474976710671. Ctx: { TraceId: 01jyhtrg2f2mazrv19avwf7prm, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MzdkMGRiNjEtZGY4YWJhMzgtYThjNjE2Y2ItYWI1MTJmNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } } 2025-06-24T20:41:04.865797Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=MzdkMGRiNjEtZGY4YWJhMzgtYThjNjE2Y2ItYWI1MTJmNTg=, ActorId: [3:7519618701855656509:4112], ActorState: ExecuteState, TraceId: 01jyhtrg2f2mazrv19avwf7prm, Create QueryResponse for error on request, msg: 2025-06-24T20:41:04.865902Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037896 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-24T20:41:04.866000Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7519618654611006671:2311];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:04.866094Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037897 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-24T20:41:04.866141Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519618654611006672:2312];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:04.866200Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519618654611006651:2307];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:04.866401Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037888 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-24T20:41:04.866447Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[3:7519618654611006643:2306];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:04.866506Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037889 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-24T20:41:04.866568Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519618654611006628:2305];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:04.866635Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037890 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-24T20:41:04.866675Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519618654611006655:2310];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:04.866753Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037893 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-24T20:41:04.866788Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037892 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-24T20:41:04.866801Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7519618654611006652:2308];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:04.866861Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7519618654611006654:2309];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:04.866871Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037894 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-24T20:41:04.866932Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7519618654611006685:2314];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:04.866980Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037895 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-24T20:41:04.867027Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7519618654611006684:2313];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0;
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite3 [GOOD] |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |90.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-71 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::HandleErrorsCorrectly [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/2btm/00376d/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk4 Trying to start YDB, gRPC: 20967, MsgBus: 25764 2025-06-24T20:34:45.763033Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617082441258863:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:45.768995Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00376d/r3tmp/tmpW8qEoX/pdisk_1.dat 2025-06-24T20:34:46.452189Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:46.466194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:46.466310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:46.470346Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20967, node 1 2025-06-24T20:34:46.639159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:46.639193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:46.639205Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:46.639309Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:46.770495Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25764 TClient is connected to server localhost:25764 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:47.460855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:47.503341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:34:47.517180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:47.874596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:48.135271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:34:48.224692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.925149Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617099621129636:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:49.925247Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:50.470862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:50.504402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:50.551906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:50.589173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:50.641132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:50.766651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:50.767253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617082441258863:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:50.767436Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:50.883436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:50.981650Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617103916097593:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:50.981730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:50.981788Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617103916097598:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:50.984928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:50.994630Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617103916097600:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:34:51.086320Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617108211064947:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:01.423461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:35:01.423505Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '787) '('"_id" '"81d71fca-b47eccd0-e15d567f-21234330") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1 '"HashV1")) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '685) '('"_id" '"4af8e65c-6a42e61-4863a9fd-18ca7f7f") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '697) '('"_id" '"e0da23ab-419979af-b5eba184-9fe00a39")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) 2025-06-24T20:41:06.794247Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 0, bytes: 2402376 2025-06-24T20:41:06.817521Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 1, bytes: 144 2025-06-24T20:41:06.817567Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 2, bytes: 1200936 2025-06-24T20:41:06.817765Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 3, bytes: 72 2025-06-24T20:41:06.817795Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 4, bytes: 1200744 2025-06-24T20:41:06.817973Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 5, bytes: 72 2025-06-24T20:41:06.818001Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 6, bytes: 1601312 2025-06-24T20:41:06.818252Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 7, bytes: 96 2025-06-24T20:41:06.818279Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 8, bytes: 2001584 2025-06-24T20:41:06.818649Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 9, bytes: 120 2025-06-24T20:41:06.818685Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 10, bytes: 2001792 2025-06-24T20:41:06.819060Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 11, bytes: 120 2025-06-24T20:41:06.819091Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 12, bytes: 2202288 2025-06-24T20:41:06.819664Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 13, bytes: 132 2025-06-24T20:41:06.819696Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 14, bytes: 2002000 2025-06-24T20:41:06.820223Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519618714528843116:7848], blobId: 15, bytes: 120 2025-06-24T20:41:06.820475Z node 1 :KQP_COMPUTE ERROR: compute_storage_actor.cpp:79: TxId: 281474976710972. Error: [TEvError] File size limit exceeded: 2/0Mb 2025-06-24T20:41:06.867409Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519618714528843107:4862], TxId: 281474976710972, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=Njc2ZDdiNjgtMTk4NWMwMzAtNzRmYTUxMy00ZTNjZGZiNQ==. TraceId : 01jyhtrha44n5vd7edxdceef01. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: [Compute spilling][TEvError] File size limit exceeded: 2/0Mb }. 2025-06-24T20:41:06.876048Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519618714528843108:4863], TxId: 281474976710972, task: 3. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=Njc2ZDdiNjgtMTk4NWMwMzAtNzRmYTUxMy00ZTNjZGZiNQ==. TraceId : 01jyhtrha44n5vd7edxdceef01. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate execution }. 2025-06-24T20:41:07.034251Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=Njc2ZDdiNjgtMTk4NWMwMzAtNzRmYTUxMy00ZTNjZGZiNQ==, ActorId: [1:7519618714528843091:4857], ActorState: ExecuteState, TraceId: 01jyhtrha44n5vd7edxdceef01, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::EmptyTxOnCommit [GOOD] Test command err: Trying to start YDB, gRPC: 9661, MsgBus: 1202 2025-06-24T20:40:53.610133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618663272923731:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:53.610189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00316a/r3tmp/tmp6mwDcH/pdisk_1.dat 2025-06-24T20:40:54.091282Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618663272923711:2079] 1750797653608811 != 1750797653608814 2025-06-24T20:40:54.100736Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:54.104918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:54.105041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:54.113385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9661, node 1 2025-06-24T20:40:54.209829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:54.209860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:54.209870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:54.210000Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1202 2025-06-24T20:40:54.649856Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1202 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:54.985870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:55.009147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:40:55.027635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:55.184612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:55.394692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:55.493533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:57.722311Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618680452794523:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:57.722433Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:58.088380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:58.128925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:58.212590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:58.270751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:58.317807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:58.385877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:58.468630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:58.610926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618663272923731:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:58.611016Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:58.615603Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618684747762489:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:58.615730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:58.623519Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618684747762494:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:58.633443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:58.674712Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618684747762496:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:40:58.734287Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618684747762549:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:01.288974Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=OTFhODhkMGUtZGIzMmFmYjMtOWM3N2NkOWEtYzdmZDk1ZjQ=, ActorId: [1:7519618693337697431:2478], ActorState: ReadyState, TraceId: 01jyhtrd2y6ng1xs95ay5tzc87, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 5006, MsgBus: 30983 2025-06-24T20:41:02.171450Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618701625738580:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:02.171496Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00316a/r3tmp/tmpzY8oLK/pdisk_1.dat 2025-06-24T20:41:02.398409Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:02.412277Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:02.412392Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:02.414556Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5006, node 2 2025-06-24T20:41:02.547851Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:02.547879Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:02.547889Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:02.548027Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30983 TClient is connected to server localhost:30983 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:41:03.186111Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:03.194275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:41:03.220319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:03.324355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:41:03.536118Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:03.634344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:05.938748Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618714510642052:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:05.938848Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:06.029775Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:06.080636Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:06.121696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:06.156343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:06.197008Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:06.280442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:06.341032Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:06.414709Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618718805610011:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:06.414816Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:06.414981Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618718805610016:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:06.418547Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:06.428871Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618718805610018:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:41:06.503435Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618718805610069:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:07.171199Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618701625738580:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:07.171277Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TNodeBrokerTest::LoadStateMoveEpoch [GOOD] >> KqpSinkLocks::InvalidateOlapOnCommit [GOOD] >> KqpSinkLocks::OlapInsertWithBulkUpsert+UseBulkUpsert [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TConflictReadWriteOlap [GOOD] Test command err: Trying to start YDB, gRPC: 24160, MsgBus: 23942 2025-06-24T20:40:59.272392Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618691156431974:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:59.279026Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003166/r3tmp/tmpMHqDlZ/pdisk_1.dat 2025-06-24T20:40:59.778526Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:59.778635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:59.783386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:59.831120Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:59.832801Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618691156431941:2079] 1750797659265173 != 1750797659265176 TServer::EnableGrpc on GrpcPort 24160, node 1 2025-06-24T20:41:00.027489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:00.027514Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:00.027519Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:00.027627Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:41:00.293559Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23942 TClient is connected to server localhost:23942 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:01.222861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:01.247485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:41:03.254610Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618708336301740:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:03.254757Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618708336301776:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:03.254802Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:03.260266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:03.277074Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618708336301778:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:41:03.366211Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618708336301831:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:03.659280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:03.880196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:04.763124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618691156431974:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:04.798200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:05.098999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TNodeBrokerTest::ExtendLeaseRestartRace [GOOD] >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::LoadStateMoveEpoch [GOOD] Test command err: 2025-06-24T20:41:06.910195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:06.910265Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder-EvWrite >> TNodeBrokerTest::TestListNodesEpochDeltas >> KqpScanSpilling::SpillingInRuntimeNodes+EnabledSpilling [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite3 [GOOD] Test command err: Trying to start YDB, gRPC: 7677, MsgBus: 29493 2025-06-24T20:40:48.866170Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618642055603475:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:48.866222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003172/r3tmp/tmpL0oqSf/pdisk_1.dat 2025-06-24T20:40:49.424524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:49.424613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:49.471337Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618642055603459:2079] 1750797648864128 != 1750797648864131 2025-06-24T20:40:49.494584Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:49.496534Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7677, node 1 2025-06-24T20:40:49.815922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:49.815952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:49.815965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:49.816119Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:40:49.893595Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29493 TClient is connected to server localhost:29493 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:50.476519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:50.501728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:40:52.745837Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618659235473271:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:52.746113Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:52.746666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618659235473298:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:52.751778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:52.799631Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618659235473300:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:40:52.872917Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618659235473351:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:40:53.283472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:53.455338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:54.253106Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618642055603475:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:54.285531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:54.605666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:59.190518Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519618689300252458:2931], SessionActorId: [1:7519618676415350488:2931], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/KV2`, code: 2001 . sessionActorId=[1:7519618676415350488:2931]. isRollback=0 2025-06-24T20:40:59.190812Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=ZmQ4MWRlYmUtY2VlMDU5YTAtZmIzMDYxZWYtN2NhZTRlMTg=, ActorId: [1:7519618676415350488:2931], ActorState: ExecuteState, TraceId: 01jyhtran9348m1j565cv17etv, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519618689300252459:2931] from: [1:7519618689300252458:2931] 2025-06-24T20:40:59.190889Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519618689300252459:2931] TxId: 281474976710666. Ctx: { TraceId: 01jyhtran9348m1j565cv17etv, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmQ4MWRlYmUtY2VlMDU5YTAtZmIzMDYxZWYtN2NhZTRlMTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/KV2`, code: 2001 } 2025-06-24T20:40:59.191120Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZmQ4MWRlYmUtY2VlMDU5YTAtZmIzMDYxZWYtN2NhZTRlMTg=, ActorId: [1:7519618676415350488:2931], ActorState: ExecuteState, TraceId: 01jyhtran9348m1j565cv17etv, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 27935, MsgBus: 16754 2025-06-24T20:41:00.670146Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618694606500973:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:00.670272Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003172/r3tmp/tmpqg4VOq/pdisk_1.dat 2025-06-24T20:41:00.829836Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:00.833492Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519618694606500942:2079] 1750797660669514 != 1750797660669517 2025-06-24T20:41:00.845005Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:00.845096Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:00.847555Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27935, node 2 2025-06-24T20:41:00.939387Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:00.939408Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:00.939416Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:00.939569Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16754 TClient is connected to server localhost:16754 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:01.592888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:01.694807Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:41:04.301995Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618711786370762:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:04.302093Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:04.302489Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618711786370774:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:04.307059Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:04.322955Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T20:41:04.323633Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618711786370776:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:41:04.401608Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618711786370828:2334] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:04.446967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:04.503673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:05.699842Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618694606500973:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:05.700786Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:05.756784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:08.076894Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NzlmNGYwNjMtYjE3NGY3OC1iODU0ZmE1Yy01YjFiZjIxYQ==, ActorId: [2:7519618724671280783:2930], ActorState: ExecuteState, TraceId: 01jyhtrknb0bjtq775qszrmhab, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ExtendLeaseRestartRace [GOOD] Test command err: 2025-06-24T20:41:05.701221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:05.701309Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... rebooting node broker ... OnActivateExecutor tabletId# 72057594037936129 ... captured cache request ... sending extend lease request ... captured cache request ... captured cache request ... waiting for response ... waiting for epoch update >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionEnqueue >> TNodeBrokerTest::UpdateEpochPipelining [GOOD] >> TNodeBrokerTest::ShiftIdRangeRemoveReusedID >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-true >> BackupRestore::BackupUuid [GOOD] >> BackupRestore::RestoreViewQueryText >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionEnqueue [GOOD] >> TFlatTableExecutor_ExecutorTxLimit::TestExecutorTxLimit [GOOD] >> TFlatTableExecutor_Follower::BasicFollowerRead >> TNodeBrokerTest::NodesMigrationExpireRemoved [GOOD] >> TFlatTableExecutor_Follower::BasicFollowerRead [GOOD] >> TFlatTableExecutor_Follower::FollowerEarlyRebootHoles [GOOD] >> TFlatTableExecutor_Follower::FollowerAttachOnTxQueueScanSnapshot >> TNodeBrokerTest::RegistrationPipeliningNodeName >> KqpLocks::DifferentKeyUpdate [GOOD] >> KqpLocks::EmptyRange >> TNodeBrokerTest::NodesMigration1000Nodes >> TNodeBrokerTest::UpdateNodesLog [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::OlapInsertWithBulkUpsert+UseBulkUpsert [GOOD] Test command err: Trying to start YDB, gRPC: 16926, MsgBus: 10350 2025-06-24T20:40:42.467383Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618617429508641:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:42.467422Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003173/r3tmp/tmpHd61wf/pdisk_1.dat 2025-06-24T20:40:43.150978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:43.155257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:43.158149Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:43.225212Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16926, node 1 2025-06-24T20:40:43.399712Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:43.399739Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:43.399749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:43.399829Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:40:43.507679Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10350 TClient is connected to server localhost:10350 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:44.400295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:44.447679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:40:46.754418Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618634609378443:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:46.754535Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618634609378434:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:46.754636Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:46.759231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:46.773723Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618634609378449:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:40:46.876376Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618634609378500:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:40:47.211483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:47.365532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:47.563112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618617429508641:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:47.675162Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:48.494356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:50.642865Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519618651789255725:2931], SessionActorId: [1:7519618647494288384:2931], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 . sessionActorId=[1:7519618647494288384:2931]. isRollback=0 2025-06-24T20:40:50.643080Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=NTdhN2Y1MzUtZDliNjRjNGEtNjc1OGRmYTgtYmE5Yzc5M2Y=, ActorId: [1:7519618647494288384:2931], ActorState: ExecuteState, TraceId: 01jyhtr2nv5z3rn2gm2gxkew87, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519618651789255726:2931] from: [1:7519618651789255725:2931] 2025-06-24T20:40:50.643163Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519618651789255726:2931] TxId: 281474976710665. Ctx: { TraceId: 01jyhtr2nv5z3rn2gm2gxkew87, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTdhN2Y1MzUtZDliNjRjNGEtNjc1OGRmYTgtYmE5Yzc5M2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 } 2025-06-24T20:40:50.643363Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NTdhN2Y1MzUtZDliNjRjNGEtNjc1OGRmYTgtYmE5Yzc5M2Y=, ActorId: [1:7519618647494288384:2931], ActorState: ExecuteState, TraceId: 01jyhtr2nv5z3rn2gm2gxkew87, Create QueryResponse for error on request, msg:
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 Trying to start YDB, gRPC: 9924, MsgBus: 1521 2025-06-24T20:40:52.084653Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618658324472431:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:52.108861Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003173/r3tmp/tmpOSh47v/pdisk_1.dat 2025-06-24T20:40:52.329658Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:52.335368Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519618658324472391:2079] 1750797652074448 != 1750797652074451 2025-06-24T20:40:52.352630Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:52.352793Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:52.355477Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9924, node 2 2025-06-24T20:40:52.522088Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:52.522113Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:52.522120Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:52.522257Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1521 2025-06-24T20:40:53.136332Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1521 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 184 ... 025-06-24T20:41:06.379269Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[2:7519618684094279019:2560];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037993;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892;receive=72075186224037897; 2025-06-24T20:41:06.379310Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[2:7519618684094279019:2560];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037993;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892;receive=72075186224037897; 2025-06-24T20:41:06.379344Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[2:7519618684094279019:2560];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037993;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892;receive=72075186224037897; 2025-06-24T20:41:06.379380Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[2:7519618684094279019:2560];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037993;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892;receive=72075186224037897; 2025-06-24T20:41:06.379415Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[2:7519618684094279019:2560];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037993;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892;receive=72075186224037897; 2025-06-24T20:41:06.379450Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[2:7519618684094279019:2560];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037993;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892;receive=72075186224037897; 2025-06-24T20:41:06.379487Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[2:7519618684094279019:2560];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037993;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892;receive=72075186224037897; 2025-06-24T20:41:06.380243Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T20:41:07.289399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:41:07.289440Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:07.538307Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T20:41:07.540150Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Complete;commit_tx_id=281474976715667;commit_lock_id=281474976715666;fline=manager.cpp:94;broken_lock_id=281474976715665; 2025-06-24T20:41:07.555687Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[2:7519618675504342445:2313];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=tablet lock have another internal generation counter: 18446744073709551615 != 0;tx_id=281474976715668; 2025-06-24T20:41:07.556889Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [2:7519618722748992351:3926], SessionActorId: [2:7519618718454024559:3926], Got LOCKS BROKEN for table. ShardID=72075186224037897, Sink=[2:7519618722748992351:3926].{
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } 2025-06-24T20:41:07.557043Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519618722748992351:3926], SessionActorId: [2:7519618718454024559:3926], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 . sessionActorId=[2:7519618718454024559:3926]. isRollback=0 2025-06-24T20:41:07.557335Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=ODk4YzI1OTUtNTMzOTNmMjAtODYxMmVkZWMtYTY4NDk0OTY=, ActorId: [2:7519618718454024559:3926], ActorState: ExecuteState, TraceId: 01jyhtrk6z9e6pkss5tahmhyq1, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519618722748992516:3926] from: [2:7519618722748992351:3926] 2025-06-24T20:41:07.557411Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519618722748992516:3926] TxId: 281474976715668. Ctx: { TraceId: 01jyhtrk6z9e6pkss5tahmhyq1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODk4YzI1OTUtNTMzOTNmMjAtODYxMmVkZWMtYTY4NDk0OTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } }
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 2025-06-24T20:41:07.557566Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ODk4YzI1OTUtNTMzOTNmMjAtODYxMmVkZWMtYTY4NDk0OTY=, ActorId: [2:7519618718454024559:3926], ActorState: ExecuteState, TraceId: 01jyhtrk6z9e6pkss5tahmhyq1, Create QueryResponse for error on request, msg: 2025-06-24T20:41:07.560728Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037894 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-24T20:41:07.560808Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[2:7519618675504342423:2308];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:07.560972Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037890 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-24T20:41:07.561015Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[2:7519618675504342424:2309];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:07.561173Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037888 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-24T20:41:07.561223Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[2:7519618675504342552:2314];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:07.561589Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037891 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-24T20:41:07.561638Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[2:7519618675504342426:2311];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:07.561750Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037893 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-24T20:41:07.561791Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[2:7519618675504342422:2307];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:07.561843Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[2:7519618675504342445:2313];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:07.563662Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037895 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-24T20:41:07.563718Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[2:7519618675504342427:2312];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:07.563883Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037896 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-24T20:41:07.563950Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519618675504342420:2305];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:07.564486Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037889 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-24T20:41:07.564542Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[2:7519618675504342421:2306];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:41:07.567694Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037892 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-24T20:41:07.567785Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519618675504342425:2310];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::UpdateEpochPipelining [GOOD] Test command err: 2025-06-24T20:41:08.075484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:08.075558Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:09.627557Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host2:1001: ERROR_TEMP: No free node IDs ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR >> TFlatTableExecutor_Follower::FollowerAttachOnTxQueueScanSnapshot [GOOD] >> TFlatTableExecutor_Follower::FollowerAttachAfterLoan >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query [GOOD] >> TNodeBrokerTest::NodesSubscriberDisconnect >> TFlatTableExecutor_Follower::FollowerAttachAfterLoan [GOOD] >> TFlatTableExecutor_Gc::TestFailedGcAfterReboot [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_FlatIndex |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |90.4%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExpireRemoved [GOOD] Test command err: 2025-06-24T20:41:08.048197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:08.048293Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> KqpRanges::IsNullInJsonValue [GOOD] >> KqpRanges::IsNullPartial >> KqpScanLogs::WideCombine+EnabledLogs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::UpdateNodesLog [GOOD] Test command err: 2025-06-24T20:41:07.738384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:07.738482Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR 2025-06-24T20:41:10.457295Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1026 not in range (1023, 1024] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SpillingInRuntimeNodes+EnabledSpilling [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/2btm/003778/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk7 Trying to start YDB, gRPC: 18730, MsgBus: 15263 2025-06-24T20:34:44.437605Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617079248363878:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:44.437661Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003778/r3tmp/tmp6Lr2P9/pdisk_1.dat 2025-06-24T20:34:45.207086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:45.207236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:45.214014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:45.230186Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:45.232579Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617079248363858:2079] 1750797284407921 != 1750797284407924 TServer::EnableGrpc on GrpcPort 18730, node 1 2025-06-24T20:34:45.467374Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:45.467637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:45.467645Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:45.467652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:45.467808Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15263 TClient is connected to server localhost:15263 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:46.206138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:46.248169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:46.490595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:46.709830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:46.837433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:49.249330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617100723201983:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:49.249440Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:49.443258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617079248363878:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:49.443339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:49.618171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.716576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.755547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.785761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.850531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.898934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.964267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:50.066376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617105018169945:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:50.066463Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:50.066667Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617105018169950:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:50.070910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:50.086152Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617105018169952:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:34:50.163337Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617105018170003:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:00.152192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:35:00.152222Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '787) '('"_id" '"d6094545-4a5c036d-8a0a44b5-c00c1d63") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1 '"HashV1")) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '685) '('"_id" '"ed3fd7c4-7b913fdc-774c258d-af30bddb") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '697) '('"_id" '"b4200f58-a5ffc102-60ef5c95-f972214b")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) >> KqpLimits::TooBigKey+useSink [GOOD] >> KqpLimits::TooBigKey-useSink >> TDynamicNameserverTest::ListNodesCacheWhenNoChanges-EnableNodeBrokerDeltaProtocol-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-24T20:41:11.231304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:11.231389Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) 2025-06-24T20:41:12.155779Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:12.155853Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |90.5%| [LD] {RESULT} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut >> TFlatTableExecutor_IndexLoading::CalculateReadSize_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_BTreeIndex >> TLocalTests::TestAlterTenant >> TNodeBrokerTest::NodesMigrationManyNodesInterrupted >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UTF8 >> KqpKv::ReadRows_SpecificReturnValue [GOOD] >> KqpKv::ReadRows_TimeoutCancelsReads >> TFlatTableExecutor_IndexLoading::CalculateReadSize_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_FlatIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-24T20:34:43.895766Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617075526832405:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:43.918596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:44.212538Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00454e/r3tmp/tmp7wLvg4/pdisk_1.dat 2025-06-24T20:34:44.542193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:44.542309Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:44.597951Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:44.615312Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617075526832378:2079] 1750797283837160 != 1750797283837163 2025-06-24T20:34:44.631699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31157, node 1 2025-06-24T20:34:44.831927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00454e/r3tmp/yandextF7Rw2.tmp 2025-06-24T20:34:44.831950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00454e/r3tmp/yandextF7Rw2.tmp 2025-06-24T20:34:44.832105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00454e/r3tmp/yandextF7Rw2.tmp 2025-06-24T20:34:44.832252Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:44.925935Z INFO: TTestServer started on Port 7351 GrpcPort 31157 2025-06-24T20:34:44.928274Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7351 PQClient connected to localhost:31157 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:45.434636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:34:45.498291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:34:45.683499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:45.703249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-24T20:34:48.275087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617097001669644:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.275336Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.275921Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617097001669671:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.284130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:48.313982Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617097001669673:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:34:48.604756Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617097001669737:2448] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:48.674868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:48.769351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:48.822586Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617097001669745:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:48.823442Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YzBmMTU0YWMtODM5OWU3MjgtZjZkODNhOGYtNTRlMjNmNTA=, ActorId: [1:7519617097001669641:2298], ActorState: ExecuteState, TraceId: 01jyhtd0t35z5ztadq05vjwcjd, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:48.825690Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:48.901011Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617075526832405:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:48.901112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:48.944236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519617101296637324:2628] === CheckClustersList. Ok 2025-06-24T20:34:54.448187Z :WriteToTopic_Demo_1_Table INFO: TTopicSdkTestSetup started 2025-06-24T20:34:54.475962Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T20:34:54.562495Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:34:54.562799Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-24T20:34:54.569220Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:34:54.569479Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-24T20:34:54.569498Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:34:54.569519Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-24T20:34:54.569540Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:34:54.569570Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:54.569604Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-24T20:34:54.570988Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037892] server connecte ... 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 12 prev 0 end 12 by cookie 2 2025-06-24T20:41:01.515261Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-24T20:41:01.515320Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-24T20:41:01.515473Z node 19 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 12 (startOffset 12) session test-consumer_19_1_14849854558454574899_v1 2025-06-24T20:41:01.515655Z node 19 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:41:01.523295Z node 19 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 12 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:41:01.523407Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:41:01.523464Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:41:01.523521Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-24T20:41:01.523650Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-24T20:41:01.523693Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 12 endOffset 12 with cookie 2 2025-06-24T20:41:01.523743Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 12 2025-06-24T20:41:01.527172Z :DEBUG: [/Root] [/Root] [4367ea0f-b4547564-c18e7d08-65754cbf] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 12 } } 2025-06-24T20:41:02.150287Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:11:12 2025-06-24T20:41:02.150368Z :INFO: [/Root] [/Root] [4367ea0f-b4547564-c18e7d08-65754cbf] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1003 BytesRead: 15000000 MessagesRead: 12 BytesReadCompressed: 15000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:41:02.165615Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 checking auth because of timeout 2025-06-24T20:41:02.165703Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 auth for : test-consumer 2025-06-24T20:41:02.166574Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 Handle describe topics response 2025-06-24T20:41:02.166710Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 auth is DEAD 2025-06-24T20:41:02.166791Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 auth ok: topics# 1, initDone# 1 2025-06-24T20:41:02.199288Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 checking auth because of timeout 2025-06-24T20:41:02.199402Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 auth for : test-consumer 2025-06-24T20:41:02.200415Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 Handle describe topics response 2025-06-24T20:41:02.200548Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 auth is DEAD 2025-06-24T20:41:02.200645Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 auth ok: topics# 1, initDone# 1 2025-06-24T20:41:02.627066Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:41:03.207178Z :INFO: [/Root] [/Root] [4367ea0f-b4547564-c18e7d08-65754cbf] Closing read session. Close timeout: 0.000000s 2025-06-24T20:41:03.207261Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:11:12 2025-06-24T20:41:03.207337Z :INFO: [/Root] [/Root] [4367ea0f-b4547564-c18e7d08-65754cbf] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2060 BytesRead: 15000000 MessagesRead: 12 BytesReadCompressed: 15000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:41:03.207474Z :NOTICE: [/Root] [/Root] [4367ea0f-b4547564-c18e7d08-65754cbf] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T20:41:03.207539Z :DEBUG: [/Root] [/Root] [4367ea0f-b4547564-c18e7d08-65754cbf] [] Abort session to cluster 2025-06-24T20:41:03.208099Z :DEBUG: [/Root] 0x000051E000295D90 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_14849854558454574899_v1 Close 2025-06-24T20:41:03.208531Z :DEBUG: [/Root] 0x000051E000295D90 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_14849854558454574899_v1 Close 2025-06-24T20:41:03.208677Z :NOTICE: [/Root] [/Root] [4367ea0f-b4547564-c18e7d08-65754cbf] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T20:41:03.211256Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 grpc read done: success# 0, data# { } 2025-06-24T20:41:03.211296Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 grpc read failed 2025-06-24T20:41:03.211339Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 grpc closed 2025-06-24T20:41:03.211379Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 is DEAD 2025-06-24T20:41:03.212314Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [19:7519618697597044726:2522]: session cookie 2 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 grpc read done: success# 0, data# { } 2025-06-24T20:41:03.212338Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [19:7519618697597044726:2522]: session cookie 2 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1grpc read failed 2025-06-24T20:41:03.212374Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:349: Direct read proxy [19:7519618697597044726:2522]: session cookie 2 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 Close session with reason: reads done signal, closing everything 2025-06-24T20:41:03.212387Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:367: session cookie 2 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 closed 2025-06-24T20:41:03.212728Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [19:7519618697597044726:2522]: session cookie 2 consumer test-consumer session test-consumer_19_1_14849854558454574899_v1 proxy is DEAD 2025-06-24T20:41:03.214840Z node 19 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [19:7519618697597044717:2516] disconnected; active server actors: 1 2025-06-24T20:41:03.214872Z node 19 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [19:7519618697597044717:2516] client test-consumer disconnected session test-consumer_19_1_14849854558454574899_v1 2025-06-24T20:41:03.214999Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037894] Destroy direct read session test-consumer_19_1_14849854558454574899_v1 2025-06-24T20:41:03.215038Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519618697597044720:2519] destroyed 2025-06-24T20:41:03.215094Z node 19 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_19_1_14849854558454574899_v1 2025-06-24T20:41:03.221188Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d372b989-f93f2bfc-8f15d777-890a6425_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-24T20:41:03.221258Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d372b989-f93f2bfc-8f15d777-890a6425_0] PartitionId [0] Generation [2] Write session will now close 2025-06-24T20:41:03.221314Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|d372b989-f93f2bfc-8f15d777-890a6425_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-24T20:41:03.221829Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d372b989-f93f2bfc-8f15d777-890a6425_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-24T20:41:03.221882Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|d372b989-f93f2bfc-8f15d777-890a6425_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-24T20:41:03.228896Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|d372b989-f93f2bfc-8f15d777-890a6425_0 grpc read done: success: 0 data: 2025-06-24T20:41:03.228941Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|d372b989-f93f2bfc-8f15d777-890a6425_0 grpc read failed 2025-06-24T20:41:03.228983Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|d372b989-f93f2bfc-8f15d777-890a6425_0 grpc closed 2025-06-24T20:41:03.229003Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|d372b989-f93f2bfc-8f15d777-890a6425_0 is DEAD 2025-06-24T20:41:03.229949Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:41:03.231369Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519618689007110068:2496] destroyed 2025-06-24T20:41:03.231444Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TDynamicNameserverTest::ListNodesCacheWhenNoChanges-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestCreateSubHiveCreateManyTablets 2025-06-24 20:41:05,527 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 20:41:05,644 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 125036 47.6M 46.2M 24.2M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/00420f/ydb/core/mind/hive/ut/test-results/unittest/testing_out_stuff/chunk2/testing_out_stuff/test_tool.args 126310 785M 779M 771M └─ ydb-core-mind-hive-ut --trace-path-append /home/runner/.ya/build/build_root/2btm/00420f/ydb/core/mind/hive/ut/test-results/unittest/testing_out_stuff/chunk2/ytest.report Test command err: 2025-06-24T20:31:21.010570Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T20:31:21.062279Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:21.062580Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T20:31:21.065142Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:31:21.065562Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T20:31:21.066828Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-24T20:31:21.066909Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T20:31:21.068035Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:31:2076] ControllerId# 72057594037932033 2025-06-24T20:31:21.068100Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T20:31:21.068248Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T20:31:21.068383Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T20:31:21.086330Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T20:31:21.086397Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T20:31:21.090976Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:38:2081] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:21.095808Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:39:2082] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:21.096097Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:40:2083] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:21.096292Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:41:2084] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:21.096455Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:42:2085] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:21.096597Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:43:2086] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:21.096726Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:30:2075] Create Queue# [1:44:2087] targetNodeId# 1 Marker# DSP01 2025-06-24T20:31:21.096762Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-24T20:31:21.096874Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:31:2076] 2025-06-24T20:31:21.096936Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:31:2076] 2025-06-24T20:31:21.096999Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-24T20:31:21.097133Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T20:31:21.098429Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T20:31:21.098607Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:21.107472Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:21:2063] 2025-06-24T20:31:21.107536Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:21:2063] 2025-06-24T20:31:21.107772Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:31:2076] 2025-06-24T20:31:21.107856Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:21.107912Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:21.108116Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:21.143871Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:21.143945Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-24T20:31:21.149348Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-24T20:31:21.149655Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:21.151871Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-24T20:31:21.151997Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-24T20:31:21.152089Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-24T20:31:21.152506Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:31:2076] 2025-06-24T20:31:21.152570Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-24T20:31:21.152614Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-24T20:31:21.152721Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:21.158089Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-24T20:31:21.158167Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-24T20:31:21.158200Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T20:31:21.158346Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-24T20:31:21.158488Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:21.158735Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-24T20:31:21.159068Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-24T20:31:21.159367Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-24T20:31:21.159440Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-24T20:31:21.159494Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-24T20:31:21.159579Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2025-06-24T20:31:21.159628Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:21.159851Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-24T20:31:21.169991Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-24T20:31:21.170362Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:31:2076] 2025-06-24T20:31:21.170421Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:31:2076] 2025-06-24T20:31:21.170491Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-24T20:31:21.173717Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-24T20:31:21.173810Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:54:2093] 2025-06-24T20:31:21.173843Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:54:2093] 2025-06-24T20:31:21.175130Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:21.175257Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-24T20:31:21.175335Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:31:21.178161Z node 1 :BS_NODE DEBUG: {NWDC ... large>} Item# {VMultiPutItem ID# [72075186224038247:1:2:0:0:71:1] FullDataSize# 71 DataSize# 71 Data# } Item# {VMultiPutItem ID# [72075186224038248:1:1:0:0:42:1] FullDataSize# 42 DataSize# 42 Data# } Item# {VMultiPutItem ID# [72075186224038249:1:1:0:0:42:1] FullDataSize# 42 DataSize# 42 Data# } Item# {VMultiPutItem ID# [72075186224038250:1:1:0:0:42:1] FullDataSize# 42 DataSize# 42 Data# } Item# {VMultiPutItem ID# [72075186224038251:1:1:0:0:42:1] FullDataSize# 42 DataSize# 42 Data# } Item# {VMultiPutItem ID# [72075186224038251:1:2:0:0:71:1] FullDataSize# 71 DataSize# 71 Data# } Item# {VMultiPutItem ID# [72075186224038252:1:1:0:0:42:1] FullDataSize# 42 DataSize# 42 Data# } Item# {VMultiPutItem ID# [72075186224038252:1:2:0:0:71:1] FullDataSize# 71 DataSize# 71 Data# } VDiskId# [80000000:1:0:0:0] HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog}} cookie# 0 2025-06-24T20:41:05.395568Z node 6 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:172: [ac102821b7cf00d0] bootstrap ActorId# [6:23031:18153] Group# 2147483649 TabletId# 72075186224038260 Channel# 1 RecordGeneration# 1 PerGenerationCounter# 1 Deadline# 586524-01-19T08:01:49.551615Z CollectGeneration# 1 CollectStep# 0 Collect# true Hard# false RestartCounter# 0 Marker# DSPC03 2025-06-24T20:41:05.395651Z node 6 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [6:443:2381] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[72075186224038260:1:1:1] collect=[1:0] cookie# 0 2025-06-24T20:41:05.395710Z node 6 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:172: [ada9fc30f17413f8] bootstrap ActorId# [6:23032:18154] Group# 2147483649 TabletId# 72075186224038065 Channel# 1 RecordGeneration# 1 PerGenerationCounter# 1 Deadline# 586524-01-19T08:01:49.551615Z CollectGeneration# 1 CollectStep# 0 Collect# true Hard# false RestartCounter# 0 Marker# DSPC03 2025-06-24T20:41:05.395757Z node 6 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [6:443:2381] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[72075186224038065:1:1:1] collect=[1:0] cookie# 0 2025-06-24T20:41:05.398701Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5600, NKikimr::NHive::TTxUpdateTabletStatus} queued, type NKikimr::NHive::TTxUpdateTabletStatus 2025-06-24T20:41:05.398797Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5600, NKikimr::NHive::TTxUpdateTabletStatus} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:41:05.399117Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5600, NKikimr::NHive::TTxUpdateTabletStatus} hope 1 -> done Change{3373, redo 162b alter 0b annex 0, ~{ 1 } -{ }, 0 gb} 2025-06-24T20:41:05.495381Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5600, NKikimr::NHive::TTxUpdateTabletStatus} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:41:05.496060Z node 6 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:172: [2b9cb1b18843294f] bootstrap ActorId# [6:23033:18155] Group# 2147483649 TabletId# 72075186224038258 Channel# 1 RecordGeneration# 1 PerGenerationCounter# 1 Deadline# 586524-01-19T08:01:49.551615Z CollectGeneration# 1 CollectStep# 0 Collect# true Hard# false RestartCounter# 0 Marker# DSPC03 2025-06-24T20:41:05.496153Z node 6 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [6:443:2381] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[72075186224038258:1:1:1] collect=[1:0] cookie# 0 2025-06-24T20:41:05.496563Z node 6 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224038102 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:41:05.498049Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5601, NKikimr::NHive::TTxUpdateTabletStatus} queued, type NKikimr::NHive::TTxUpdateTabletStatus 2025-06-24T20:41:05.498107Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5601, NKikimr::NHive::TTxUpdateTabletStatus} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:41:05.498392Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5601, NKikimr::NHive::TTxUpdateTabletStatus} hope 1 -> done Change{3374, redo 162b alter 0b annex 0, ~{ 1 } -{ }, 0 gb} 2025-06-24T20:41:05.498446Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5601, NKikimr::NHive::TTxUpdateTabletStatus} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:41:05.498968Z node 6 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [03e9ae5536ebf447] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037932 RecordGeneration# 1 Channel# 1 VDisk# [80000001:1:0:0:0]} Marker# DSPC01 2025-06-24T20:41:05.499035Z node 6 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [03e9ae5536ebf447] Result# TEvCollectGarbageResult {TabletId# 72075186224037932 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 1 Status# OK} Marker# DSPC02 2025-06-24T20:41:05.501580Z node 6 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:172: [e669e1af586f7126] bootstrap ActorId# [6:23038:18157] Group# 2147483649 TabletId# 72075186224038257 Channel# 1 RecordGeneration# 1 PerGenerationCounter# 1 Deadline# 586524-01-19T08:01:49.551615Z CollectGeneration# 1 CollectStep# 0 Collect# true Hard# false RestartCounter# 0 Marker# DSPC03 2025-06-24T20:41:05.501683Z node 6 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [6:443:2381] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[72075186224038257:1:1:1] collect=[1:0] cookie# 0 2025-06-24T20:41:05.502489Z node 6 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224038074 Cookie: 1} 2025-06-24T20:41:05.502577Z node 6 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224038102 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:41:05.512169Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5602, NKikimr::NHive::TTxUpdateTabletStatus} queued, type NKikimr::NHive::TTxUpdateTabletStatus 2025-06-24T20:41:05.512229Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5602, NKikimr::NHive::TTxUpdateTabletStatus} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:41:05.512491Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5602, NKikimr::NHive::TTxUpdateTabletStatus} hope 1 -> done Change{3375, redo 162b alter 0b annex 0, ~{ 1 } -{ }, 0 gb} 2025-06-24T20:41:05.512551Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5602, NKikimr::NHive::TTxUpdateTabletStatus} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:41:05.513072Z node 6 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:172: [cca2041b79d5529c] bootstrap ActorId# [6:23040:18159] Group# 2147483649 TabletId# 72075186224038255 Channel# 1 RecordGeneration# 1 PerGenerationCounter# 1 Deadline# 586524-01-19T08:01:49.551615Z CollectGeneration# 1 CollectStep# 0 Collect# true Hard# false RestartCounter# 0 Marker# DSPC03 2025-06-24T20:41:05.513155Z node 6 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [6:443:2381] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[72075186224038255:1:1:1] collect=[1:0] cookie# 0 2025-06-24T20:41:05.514170Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5603, NKikimr::NHive::TTxUpdateTabletStatus} queued, type NKikimr::NHive::TTxUpdateTabletStatus 2025-06-24T20:41:05.514211Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5603, NKikimr::NHive::TTxUpdateTabletStatus} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:41:05.514416Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5603, NKikimr::NHive::TTxUpdateTabletStatus} hope 1 -> done Change{3376, redo 162b alter 0b annex 0, ~{ 1 } -{ }, 0 gb} 2025-06-24T20:41:05.514526Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5603, NKikimr::NHive::TTxUpdateTabletStatus} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:41:05.514990Z node 6 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:172: [db1fe8f5f9132a09] bootstrap ActorId# [6:23042:18161] Group# 2147483649 TabletId# 72075186224038066 Channel# 1 RecordGeneration# 1 PerGenerationCounter# 1 Deadline# 586524-01-19T08:01:49.551615Z CollectGeneration# 1 CollectStep# 0 Collect# true Hard# false RestartCounter# 0 Marker# DSPC03 2025-06-24T20:41:05.515063Z node 6 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [6:443:2381] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[72075186224038066:1:1:1] collect=[1:0] cookie# 0 2025-06-24T20:41:05.515341Z node 6 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224038074 Cookie: 1} 2025-06-24T20:41:05.515403Z node 6 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224038074 Cookie: 1} 2025-06-24T20:41:05.515464Z node 6 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224038102 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-24T20:41:05.516070Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5604, NKikimr::NHive::TTxUpdateTabletStatus} queued, type NKikimr::NHive::TTxUpdateTabletStatus 2025-06-24T20:41:05.516116Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5604, NKikimr::NHive::TTxUpdateTabletStatus} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T20:41:05.516310Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5604, NKikimr::NHive::TTxUpdateTabletStatus} hope 1 -> done Change{3377, redo 162b alter 0b annex 0, ~{ 1 } -{ }, 0 gb} 2025-06-24T20:41:05.516352Z node 6 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:62} Tx{5604, NKikimr::NHive::TTxUpdateTabletStatus} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T20:41:05.516545Z node 6 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72075186224038102 LockedInitializationPath Marker# TSYS32 2025-06-24T20:41:05.516801Z node 6 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224038113 Cookie: 0 ProxyOptions: SigAsync} Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/00420f/ydb/core/mind/hive/ut/test-results/unittest/testing_out_stuff/chunk2/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/00420f/ydb/core/mind/hive/ut/test-results/unittest/testing_out_stuff/chunk2/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> KqpScanSpilling::SpillingInRuntimeNodes-EnabledSpilling [GOOD] >> TLocalTests::TestAlterTenant [GOOD] >> TNodeBrokerTest::ExtendLeasePipelining >> TLocalTests::TestAddTenantWhileResolving ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanLogs::WideCombine+EnabledLogs [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/2btm/003731/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk2 Trying to start YDB, gRPC: 23444, MsgBus: 16972 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003731/r3tmp/tmpEJI2Ub/pdisk_1.dat TServer::EnableGrpc on GrpcPort 23444, node 1 TClient is connected to server localhost:16972 TClient is connected to server localhost:16972 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (DataType 'Uint64)) (let $4 '('('"_logical_id" '505) '('"_id" '"ecc94e8f-f67405f6-ce6aa372-8504d9ea") '('"_wide_channels" (StructType '('"Value" (OptionalType (DataType 'String))) '('_yql_agg_0 $3))))) (let $5 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($12) (block '( (let $13 (lambda '($15) (Member $15 '"Key") (Member $15 '"Value"))) (let $14 (lambda '($25 $26) $25 $26)) (return (FromFlow (WideCombiner (ExpandMap (ToFlow $12) $13) '-1073741824 (lambda '($16 $17) $17) (lambda '($18 $19 $20) (AggrCountInit $19)) (lambda '($21 $22 $23 $24) (AggrCountUpdate $22 $24)) $14))) ))) $4)) (let $6 (DqCnHashShuffle (TDqOutput $5 '0) '('0) '0 '"HashV1")) (let $7 (DqPhyStage '($6) (lambda '($27) (block '( (let $28 (WideCombiner (ToFlow $27) '"" (lambda '($29 $30) $29) (lambda '($31 $32 $33) $33) (lambda '($34 $35 $36 $37) (AggrAdd $36 $37)) (lambda '($38 $39) $39))) (return (FromFlow (NarrowMap $28 (lambda '($40) (AsStruct '('"column0" $40)))))) ))) '('('"_logical_id" '1265) '('"_id" '"ef291aa9-38c987a0-49e8d73-64474b")))) (let $8 (DqCnUnionAll (TDqOutput $7 '0))) (let $9 (DqPhyStage '($8) (lambda '($41) $41) '('('"_logical_id" '1533) '('"_id" '"df573684-a89178da-9c61a70b-9991a4c8")))) (let $10 '($5 $7 $9)) (let $11 (DqCnResult (TDqOutput $9 '0) '('"column0"))) (return (KqpPhysicalQuery '((KqpPhysicalTx $10 '($11) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"column0" $3))) '0 '0)) '('('"type" '"query")))) ) >> TNodeBrokerTest::Test1000NodesSubscribers >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2+EvWrite >> TNodeBrokerTest::RegistrationPipeliningNodeName [GOOD] >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> AutoConfig::GetASPoolsWith4AndMoreCPUs [GOOD] >> TPersQueueTest::DisableWrongSettings [GOOD] >> TPersQueueTest::DisableDeduplication >> TLocalTests::TestAddTenantWhileResolving [GOOD] >> KqpTx::RollbackByIdle >> KqpSqlIn::CantRewrite >> KqpSnapshotIsolation::TReadOnlyOltp [GOOD] >> KqpSnapshotIsolation::TReadOnlyOltpNoSink [GOOD] |90.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith4AndMoreCPUs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::RegistrationPipeliningNodeName [GOOD] Test command err: 2025-06-24T20:41:12.744033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:12.744113Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:13.085566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T20:41:13.137830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 102 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000002 FAKE_COORDINATOR: Erasing txId 102 ... waiting for commit ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... waiting for commit (done) ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-24T20:41:13.619665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:13.619761Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:14.290720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:14.290790Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 >> TNodeBrokerTest::TestListNodesEpochDeltas [GOOD] >> TNodeBrokerTest::NodesSubscriberDisconnect [GOOD] |90.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TLocalTests::TestAddTenantWhileResolving [GOOD] |90.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TReadOnlyOltpNoSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SpillingInRuntimeNodes-EnabledSpilling [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/2btm/0036fb/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk8 Trying to start YDB, gRPC: 64178, MsgBus: 17631 2025-06-24T20:34:55.026816Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617120440562714:2071];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036fb/r3tmp/tmpBQP0Vv/pdisk_1.dat 2025-06-24T20:34:55.349707Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:55.582137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:55.582292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:55.585525Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:55.597343Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:55.599070Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617120440562681:2079] 1750797294982314 != 1750797294982317 TServer::EnableGrpc on GrpcPort 64178, node 1 2025-06-24T20:34:55.899945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:55.899983Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:55.899992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:55.900162Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:56.040638Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17631 TClient is connected to server localhost:17631 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:56.650847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:56.702297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:56.849796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:57.065232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:57.143622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:59.108664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617141915400788:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:59.108791Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:59.543140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:59.591812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:59.634439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:59.704442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:59.781138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:59.858020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:59.945151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:59.996009Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617120440562714:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:59.996219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:00.056659Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617146210368757:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.056785Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.057039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617146210368762:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:00.062369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:00.074707Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617146210368764:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:35:00.159022Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617146210368815:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:10.553588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:35:10.553654Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '787) '('"_id" '"7eb12656-514be5ab-731f444c-1f1d2e5e") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1 '"HashV1")) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '685) '('"_id" '"6197a870-408456a6-c1fe8141-5fa322c9") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '697) '('"_id" '"2bce927e-e64db51f-2e2ea037-d768e449")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) >> TNodeBrokerTest::NodesMigrationRemoveActive >> TNodeBrokerTest::ShiftIdRangeRemoveNew >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-72 [GOOD] >> TNodeBrokerTest::ShiftIdRangeRemoveReusedID [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesSubscriberDisconnect [GOOD] Test command err: 2025-06-24T20:41:13.439811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:13.439882Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> DataShardReadIterator::ShouldReadRangePrefix1 [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::TestListNodesEpochDeltas [GOOD] Test command err: 2025-06-24T20:41:12.061810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:12.061964Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TConsoleConfigSubscriptionTests::TestConfigNotificationRetries [GOOD] >> TConsoleConfigSubscriptionTests::TestConfigSubscriptionsCleanup >> TNodeBrokerTest::NodesMigrationRemoveExpired ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveReusedID [GOOD] Test command err: 2025-06-24T20:41:12.429072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:12.429158Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:14.855565Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1026 not in range (1023, 1025] >> TNodeBrokerTest::NodesMigration1000Nodes [GOOD] |90.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> TDynamicNameserverTest::TestCacheUsage >> TNodeBrokerTest::TestRandomActions |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |90.5%| [LD] {RESULT} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 >> TNodeBrokerTest::FixedNodeId >> TNodeBrokerTest::NodesMigrationNewActiveNode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration1000Nodes [GOOD] Test command err: 2025-06-24T20:41:13.017698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:13.017784Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::ExtendLeasePipelining [GOOD] >> TPersQueueTest::TClusterTrackerTest [GOOD] >> TPersQueueTest::TestReadPartitionByGroupId >> KqpSinkMvcc::OltpNamedStatementNoSink |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |90.5%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable >> TNodeBrokerTest::NodesMigrationRemoveActive [GOOD] >> TTenantPoolTests::TestStateStatic >> KqpScanLogs::GraceJoin+EnabledLogs [GOOD] >> KqpScanLogs::GraceJoin-EnabledLogs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ExtendLeasePipelining [GOOD] Test command err: 2025-06-24T20:41:15.233061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:15.233136Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR >> TNodeBrokerTest::SingleDomainModeBannedIds |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |90.5%| [LD] {RESULT} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationRemoveActive [GOOD] Test command err: 2025-06-24T20:41:16.885461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:16.885539Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::ShiftIdRangeRemoveNew [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder+EvWrite >> TDynamicNameserverTest::TestCacheUsage [GOOD] >> TDynamicNameserverTest::ListNodesCacheWhenNoChanges-EnableNodeBrokerDeltaProtocol-true >> KqpLocks::EmptyRange [GOOD] >> KqpLocks::EmptyRangeAlreadyBroken >> TNodeBrokerTest::NodesMigrationManyNodesInterrupted [GOOD] >> TTenantPoolTests::TestStateStatic [GOOD] |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/config/ut/ydb-services-config-ut |90.5%| [LD] {RESULT} $(B)/ydb/services/config/ut/ydb-services-config-ut |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/config/ut/ydb-services-config-ut >> TLocalTests::TestRemoveTenantWhileResolving |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut |90.5%| [LD] {RESULT} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-24T20:34:42.729847Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617069407145582:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:42.729912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:43.112892Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004567/r3tmp/tmpFkWWEa/pdisk_1.dat 2025-06-24T20:34:43.431312Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:43.440651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:43.440744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:43.443876Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617069407145561:2079] 1750797282725985 != 1750797282725988 2025-06-24T20:34:43.444475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5714, node 1 2025-06-24T20:34:43.778902Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:43.841832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004567/r3tmp/yandexQjy1w0.tmp 2025-06-24T20:34:43.841859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004567/r3tmp/yandexQjy1w0.tmp 2025-06-24T20:34:43.842055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004567/r3tmp/yandexQjy1w0.tmp 2025-06-24T20:34:43.842174Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:43.994344Z INFO: TTestServer started on Port 24638 GrpcPort 5714 TClient is connected to server localhost:24638 PQClient connected to localhost:5714 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:44.512546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-24T20:34:44.551420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:34:44.717292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:47.731406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617069407145582:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:47.731484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:47.822095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617090881982827:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.823903Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.824865Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617090881982855:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.828928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:47.858242Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617090881982857:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:34:47.944061Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617090881982923:2448] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:48.312758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:48.382775Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617090881982931:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:48.383346Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MzFhOTI0MjgtOTVmYmNiMTYtODM1NzM2YTgtNWRkNTBiYg==, ActorId: [1:7519617090881982825:2298], ActorState: ExecuteState, TraceId: 01jyhtd0ca2nvxjkadw4gdgcs1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:48.385633Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:48.402397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:48.522555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519617095176950507:2626] === CheckClustersList. Ok 2025-06-24T20:34:54.127010Z :WriteToTopic_Demo_20_RestartNo_Table INFO: TTopicSdkTestSetup started 2025-06-24T20:34:54.169518Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T20:34:54.200260Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519617120946754469:2706] connected; active server actors: 1 2025-06-24T20:34:54.201006Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-24T20:34:54.202659Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-24T20:34:54.202815Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-24T20:34:54.204555Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-24T20:34:54.210499Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:34:54.215249Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-24T20:34:54.215576Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:34:54.215882Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx inf ... 412065943869697_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 1 offsets { start: 1 end: 2 } } } } 2025-06-24T20:41:10.607780Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 2 prev 1 end 2 by cookie 3 2025-06-24T20:41:10.608363Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-24T20:41:10.608408Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-24T20:41:10.608615Z node 19 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 2 (startOffset 2) session test-consumer_19_1_15022412065943869697_v1 2025-06-24T20:41:10.608866Z node 19 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:41:10.611250Z :DEBUG: [/Root] [/Root] [4c47b404-c30b1a6b-53083e45-718dec09] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 2 } } 2025-06-24T20:41:10.610211Z node 19 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 2 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T20:41:10.610286Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:41:10.610338Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:41:10.610385Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 3 2025-06-24T20:41:10.610495Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 3 } 2025-06-24T20:41:10.610536Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 2 endOffset 2 with cookie 3 2025-06-24T20:41:10.610585Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 replying for commits: assignId# 1, from# 3, to# 3, offset# 2 2025-06-24T20:41:11.220097Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:1:2 2025-06-24T20:41:11.220481Z :INFO: [/Root] [/Root] [4c47b404-c30b1a6b-53083e45-718dec09] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1003 BytesRead: 14000000 MessagesRead: 2 BytesReadCompressed: 14000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:41:11.242951Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 checking auth because of timeout 2025-06-24T20:41:11.243048Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 auth for : test-consumer 2025-06-24T20:41:11.243978Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 Handle describe topics response 2025-06-24T20:41:11.244120Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 auth is DEAD 2025-06-24T20:41:11.244218Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 auth ok: topics# 1, initDone# 1 2025-06-24T20:41:11.315759Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 checking auth because of timeout 2025-06-24T20:41:11.315836Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 auth for : test-consumer 2025-06-24T20:41:11.316970Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 Handle describe topics response 2025-06-24T20:41:11.317087Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 auth is DEAD 2025-06-24T20:41:11.317161Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 auth ok: topics# 1, initDone# 1 2025-06-24T20:41:12.168304Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:41:12.278954Z :INFO: [/Root] [/Root] [4c47b404-c30b1a6b-53083e45-718dec09] Closing read session. Close timeout: 0.000000s 2025-06-24T20:41:12.279034Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:1:2 2025-06-24T20:41:12.279103Z :INFO: [/Root] [/Root] [4c47b404-c30b1a6b-53083e45-718dec09] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2062 BytesRead: 14000000 MessagesRead: 2 BytesReadCompressed: 14000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:41:12.280092Z :NOTICE: [/Root] [/Root] [4c47b404-c30b1a6b-53083e45-718dec09] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T20:41:12.280173Z :DEBUG: [/Root] [/Root] [4c47b404-c30b1a6b-53083e45-718dec09] [] Abort session to cluster 2025-06-24T20:41:12.280787Z :DEBUG: [/Root] 0x000051E0002DF590 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_15022412065943869697_v1 Close 2025-06-24T20:41:12.281260Z :DEBUG: [/Root] 0x000051E0002DF590 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_15022412065943869697_v1 Close 2025-06-24T20:41:12.281409Z :NOTICE: [/Root] [/Root] [4c47b404-c30b1a6b-53083e45-718dec09] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T20:41:12.283736Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|4ffefabe-68a036cd-113bf47f-f38b429a_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-24T20:41:12.283794Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|4ffefabe-68a036cd-113bf47f-f38b429a_0] PartitionId [0] Generation [2] Write session will now close 2025-06-24T20:41:12.283856Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|4ffefabe-68a036cd-113bf47f-f38b429a_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-24T20:41:12.283604Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 grpc read done: success# 0, data# { } 2025-06-24T20:41:12.283654Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 grpc read failed 2025-06-24T20:41:12.283692Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 grpc closed 2025-06-24T20:41:12.283744Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 is DEAD 2025-06-24T20:41:12.285089Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|4ffefabe-68a036cd-113bf47f-f38b429a_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-24T20:41:12.285153Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|4ffefabe-68a036cd-113bf47f-f38b429a_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-24T20:41:12.284805Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [19:7519618736664895210:2514]: session cookie 2 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 grpc read done: success# 0, data# { } 2025-06-24T20:41:12.284839Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [19:7519618736664895210:2514]: session cookie 2 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1grpc read failed 2025-06-24T20:41:12.284872Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [19:7519618736664895210:2514]: session cookie 2 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 grpc closed 2025-06-24T20:41:12.284907Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [19:7519618736664895210:2514]: session cookie 2 consumer test-consumer session test-consumer_19_1_15022412065943869697_v1 proxy is DEAD 2025-06-24T20:41:12.287647Z node 19 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [19:7519618736664895187:2505] disconnected; active server actors: 1 2025-06-24T20:41:12.287692Z node 19 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [19:7519618736664895187:2505] client test-consumer disconnected session test-consumer_19_1_15022412065943869697_v1 2025-06-24T20:41:12.287809Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037894] Destroy direct read session test-consumer_19_1_15022412065943869697_v1 2025-06-24T20:41:12.287862Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519618736664895190:2508] destroyed 2025-06-24T20:41:12.287918Z node 19 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_19_1_15022412065943869697_v1 2025-06-24T20:41:12.303726Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|4ffefabe-68a036cd-113bf47f-f38b429a_0 grpc read done: success: 0 data: 2025-06-24T20:41:12.303760Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|4ffefabe-68a036cd-113bf47f-f38b429a_0 grpc read failed 2025-06-24T20:41:12.303794Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|4ffefabe-68a036cd-113bf47f-f38b429a_0 grpc closed 2025-06-24T20:41:12.303809Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|4ffefabe-68a036cd-113bf47f-f38b429a_0 is DEAD 2025-06-24T20:41:12.306652Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:41:12.307019Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519618728074960564:2492] destroyed 2025-06-24T20:41:12.307076Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TDynamicNameserverTest::ListNodesCacheWhenNoChanges-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TNodeBrokerTest::FixedNodeId [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveNew [GOOD] Test command err: 2025-06-24T20:41:17.020783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:17.020863Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:18.563807Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1026 not in range (1023, 1025] |90.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TTenantPoolTests::TestStateStatic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationManyNodesInterrupted [GOOD] Test command err: 2025-06-24T20:41:14.337008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:14.337074Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for first batch is committed ... blocking NKikimr::TEvTablet::TEvCommitResult from TABLET_ACTOR to FLAT_EXECUTOR cookie 2 ... blocking NKikimr::TEvTablet::TEvCommitResult from TABLET_ACTOR to FLAT_EXECUTOR cookie 1 ... waiting for first batch is committed (done) >> TNodeBrokerTest::NodesMigrationRemoveExpired [GOOD] >> TNodeBrokerTest::TestListNodes >> TLocalTests::TestRemoveTenantWhileResolving [GOOD] >> TNodeBrokerTest::BasicFunctionality >> BackupRestore::RestoreViewQueryText [GOOD] >> BackupRestore::RestoreViewReferenceTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::ListNodesCacheWhenNoChanges-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-24T20:41:18.763987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:18.764094Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:20.381538Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:20.381608Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::FixedNodeId [GOOD] Test command err: 2025-06-24T20:41:19.048046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:19.048114Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::SingleDomainModeBannedIds [GOOD] >> TNodeBrokerTest::NodesMigrationNewActiveNode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationRemoveExpired [GOOD] Test command err: 2025-06-24T20:41:17.708765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:17.708862Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::MinDynamicNodeIdShifted >> KqpRanges::IsNullPartial [GOOD] >> KqpRanges::LiteralOr >> TNodeBrokerTest::NodesMigrationSetLocation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SingleDomainModeBannedIds [GOOD] Test command err: 2025-06-24T20:41:20.216568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:20.216641Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:20.392789Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host3:1001: ERROR_TEMP: No free node IDs 2025-06-24T20:41:20.430172Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:1001: ERROR_TEMP: No free node IDs 2025-06-24T20:41:20.446678Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node ID is banned 2025-06-24T20:41:21.422285Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:1001: ERROR_TEMP: No free node IDs 2025-06-24T20:41:21.476961Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:1001: ERROR_TEMP: No free node IDs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationNewActiveNode [GOOD] Test command err: 2025-06-24T20:41:19.809879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:19.809959Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TTenantPoolTests::TestSensorsConfigForStaticSlot |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats >> TNodeBrokerTest::NodesMigrationNewExpiredNode |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |90.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats >> BackupRestore::TestAllPrimitiveTypes-UTF8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-YSON >> TNodeBrokerTest::NodesMigrationReuseRemovedID >> KqpTx::RollbackByIdle [GOOD] >> KqpTx::RollbackInvalidated >> TSlotIndexesPoolTest::Ranges [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2-EvWrite >> TNodeBrokerTest::NodesMigrationExpiredChanged |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Ranges [GOOD] >> TNodeBrokerTest::NodesMigrationReuseIDThenExtendLease >> TxUsage::WriteToTopic_Demo_46_Table [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-true |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |90.6%| [LD] {RESULT} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut >> TTenantPoolTests::TestSensorsConfigForStaticSlot [GOOD] |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |90.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs >> TNodeBrokerTest::ExtendLeaseBumpVersion >> DataShardReadIterator::ShouldReadRangePrefix2 [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix3 >> TxUsage::WriteToTopic_Demo_46_Query >> TNodeBrokerTest::NodesMigrationSetLocation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TTenantPoolTests::TestSensorsConfigForStaticSlot [GOOD] Test command err: 2025-06-24T20:41:23.636547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:23.636627Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:23.686311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-false >> KqpLimits::TooBigKey-useSink [GOOD] >> KqpLimits::TooBigColumn-useSink >> TNodeBrokerTest::BasicFunctionality [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationSetLocation [GOOD] Test command err: 2025-06-24T20:41:23.722487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:23.722556Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> KqpSqlIn::CantRewrite [GOOD] >> KqpSqlIn::KeySuffix >> TNodeBrokerTest::MinDynamicNodeIdShifted [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TNodeBrokerTest::NodesMigrationNewExpiredNode [GOOD] >> TPersQueueTest::TestWriteStat [GOOD] >> TPersQueueTest::TestWriteSessionsConflicts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::BasicFunctionality [GOOD] Test command err: 2025-06-24T20:41:22.133195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:22.133270Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:23.881653Z node 2 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host1:1001: WRONG_REQUEST: Another location is registered for host1:1001 2025-06-24T20:41:23.898471Z node 2 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:1001: ERROR_TEMP: No free node IDs 2025-06-24T20:41:23.899018Z node 2 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Node has expired 2025-06-24T20:41:23.899547Z node 2 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Node has expired >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::MinDynamicNodeIdShifted [GOOD] Test command err: 2025-06-24T20:41:23.692728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:23.692821Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesMigrationReuseIDThenExtendLease [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-false [GOOD] Test command err: 2025-06-24T20:41:25.489467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:25.489549Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 2025-06-24T20:41:26.554698Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:26.554763Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) >> KqpRanges::NullInKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationNewExpiredNode [GOOD] Test command err: 2025-06-24T20:41:24.840181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:24.840252Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataWithFilter-default.txt] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 [FAIL] >> TNodeBrokerTest::NodesMigrationReuseRemovedID [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseIDThenExtendLease [GOOD] Test command err: 2025-06-24T20:41:25.336578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:25.336657Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::TestListNodes [GOOD] >> TNodeBrokerTest::NodesMigrationExpiredChanged [GOOD] >> AutoConfig::GetServicePoolsWith4AndMoreCPUs [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder-EvWrite >> TNodeBrokerTest::ExtendLeaseBumpVersion [GOOD] >> TNodeBrokerTest::EpochCacheUpdate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseRemovedID [GOOD] Test command err: 2025-06-24T20:41:24.729173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:24.729247Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::TestListNodes [GOOD] Test command err: 2025-06-24T20:41:22.406341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:22.406413Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) |90.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith4AndMoreCPUs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExpiredChanged [GOOD] Test command err: 2025-06-24T20:41:25.368832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:25.368902Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> KqpSinkMvcc::OltpNamedStatementNoSink [GOOD] >> KqpSinkMvcc::OltpNamedStatement >> KqpLocks::EmptyRangeAlreadyBroken [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_BTreeIndex >> TNodeBrokerTest::ExtendLeaseSetLocationInOneRegistration >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-false >> TNodeBrokerTest::NoEffectBeforeCommit |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |90.6%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut >> TNodeBrokerTest::NodeNameReuseRestartWithHostChanges >> KqpTx::RollbackInvalidated [GOOD] |90.6%| [TA] $(B)/ydb/core/mind/hive/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpKv::ReadRows_TimeoutCancelsReads [GOOD] >> KqpKv::ReadRows_PgValue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpLocks::EmptyRangeAlreadyBroken [GOOD] Test command err: Trying to start YDB, gRPC: 1814, MsgBus: 13202 2025-06-24T20:41:05.022619Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618713525302385:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:05.022773Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003157/r3tmp/tmp5CGGTB/pdisk_1.dat 2025-06-24T20:41:05.468592Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:05.468981Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618709230335049:2079] 1750797665007845 != 1750797665007848 2025-06-24T20:41:05.479950Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:05.480037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:05.498108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1814, node 1 2025-06-24T20:41:05.571393Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:05.571418Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:05.571425Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:05.571555Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13202 2025-06-24T20:41:06.031936Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13202 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:06.258368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:06.281715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:06.470248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:41:06.666823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:06.756004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:08.468039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618726410205868:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:08.468176Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:08.804058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:08.840239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:08.874449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:08.916194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:08.958665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:09.000238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:09.082479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:09.203018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618730705173830:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:09.203117Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:09.203379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618730705173835:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:09.209142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:09.253083Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618730705173837:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:41:09.318542Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618730705173888:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:10.023393Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618713525302385:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:10.032692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 28110, MsgBus: 18385 2025-06-24T20:41:12.159456Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618746156911680:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:12.159519Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... locks are not valid, code: 2001 Trying to start YDB, gRPC: 9056, MsgBus: 16219 2025-06-24T20:41:20.376740Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519618778013395492:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:20.376796Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003157/r3tmp/tmp3kN6Jk/pdisk_1.dat 2025-06-24T20:41:20.697348Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:20.697442Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:20.701621Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:20.724324Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9056, node 3 2025-06-24T20:41:20.915674Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:20.915710Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:20.915720Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:20.915877Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16219 2025-06-24T20:41:21.383296Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16219 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:21.861303Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:21.875937Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:41:21.897782Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:22.018662Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:22.259730Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:22.426487Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:25.397957Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519618778013395492:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:25.398035Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:25.815357Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519618799488233558:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:25.815480Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:25.930865Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:26.011798Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:26.056259Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:26.155942Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:26.194146Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:26.227270Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:26.311322Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:26.457620Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519618803783201526:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:26.457739Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:26.458282Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519618803783201531:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:26.463605Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:26.480153Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519618803783201533:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:41:26.557299Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519618803783201584:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:28.964716Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=MjI4OGY2NjgtY2E5NmQ3OGItY2Y5NzRjOGItMzY2YTVlOWU=, ActorId: [3:7519618808078169171:2476], ActorState: ExecuteState, TraceId: 01jyhts7sq70y0j6w4yytpbt2x, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001
: Error: tx has deferred effects, but locks are broken >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true >> TNodeBrokerTest::NodesMigrationExtendLeaseThenRemove >> TNodeBrokerTest::NodesMigrationExtendLeaseThenExpire >> TNodeBrokerTest::ListNodesEpochDeltasPersistance >> BackupRestore::RestoreViewReferenceTable [FAIL] >> BackupRestore::RestoreViewToDifferentDatabase >> TxUsage::Write_Only_Big_Messages_In_Wide_Transactions_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::RollbackInvalidated [GOOD] Test command err: Trying to start YDB, gRPC: 8094, MsgBus: 1500 2025-06-24T20:41:15.311847Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618758961700134:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:15.320471Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003142/r3tmp/tmpb3NVkE/pdisk_1.dat 2025-06-24T20:41:15.806233Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618758961700035:2079] 1750797675286435 != 1750797675286438 2025-06-24T20:41:15.817894Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:15.834511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:15.834645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:15.837134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8094, node 1 2025-06-24T20:41:16.091073Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:16.091108Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:16.091120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:16.091289Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:41:16.343695Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1500 TClient is connected to server localhost:1500 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:17.222135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:17.249107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:41:17.267167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:17.513456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:41:17.729072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:17.829795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:20.315330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618758961700134:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:20.315434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:20.507527Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618780436538148:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:20.507645Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:20.981042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:21.032060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:21.085296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:21.130769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:21.172107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:21.231538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:21.289790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:21.415211Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618784731506113:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:21.415341Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:21.415579Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618784731506118:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:21.422189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:21.449742Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618784731506120:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:41:21.541218Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618784731506171:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:22.898014Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YWViZTQyMTEtZTY1MGMwZWQtMmU1YmI3NTYtNGMxY2NiMDQ=, ActorId: [1:7519618789026473747:2479], ActorState: ReadyState, TraceId: 01jyhts25z5jzbfr2vc18qwr0q, Create QueryResponse for error on request, msg: Trying to start YD ... initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003142/r3tmp/tmp54xuOp/pdisk_1.dat 2025-06-24T20:41:24.140093Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:24.144553Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:24.145019Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:24.173107Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27300, node 2 2025-06-24T20:41:24.315776Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:24.315802Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:24.315811Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:24.315946Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21700 TClient is connected to server localhost:21700 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:41:24.943301Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:25.032533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:25.058125Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:25.146163Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:25.346621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:25.417978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:27.736510Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618807793979130:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:27.736599Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:27.805130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:27.855546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:27.925064Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:27.988742Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:28.047453Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:28.138562Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:28.250392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:28.379679Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618812088947088:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:28.379790Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:28.380282Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618812088947093:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:28.385205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:28.445573Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618812088947095:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:41:28.534495Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618812088947146:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:28.930107Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618790614108353:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:28.930237Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:30.323681Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519618820678882038:2486], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[/Root/BadTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:41:30.326031Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NjIzYjIyNTAtNTNiODJjNTktZTE3MDhjYjktYmZiYjJhYTY=, ActorId: [2:7519618820678882016:2476], ActorState: ExecuteState, TraceId: 01jyhts9c77kapq2shpbswv0t6, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 01jyhts9bk1mt9e00p4et6fxtk 2025-06-24T20:41:30.345236Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NjIzYjIyNTAtNTNiODJjNTktZTE3MDhjYjktYmZiYjJhYTY=, ActorId: [2:7519618820678882016:2476], ActorState: ReadyState, TraceId: 01jyhts9f8c60dq5c2r164sq62, Create QueryResponse for error on request, msg: >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TNodeBrokerTest::EpochCacheUpdate [GOOD] |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut >> BackupRestore::TestAllPrimitiveTypes-YSON [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UUID ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-24T20:41:31.801482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:31.801553Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) 2025-06-24T20:41:32.552054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:32.552135Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR |90.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> TNodeBrokerTest::NoEffectBeforeCommit [GOOD] |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication >> KqpRanges::LiteralOr [GOOD] >> KqpRanges::LiteralOrCompisite |90.6%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |90.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::EpochCacheUpdate [GOOD] Test command err: 2025-06-24T20:41:26.633558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:26.633647Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:30.377722Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:30.377808Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TFlatTableExecutor_ResourceProfile::TestExecutorStaticMemoryLimits [GOOD] >> TNodeBrokerTest::ExtendLeaseSetLocationInOneRegistration [GOOD] >> TPersQueueTest::InflightLimit [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix3 [GOOD] >> TNodeBrokerTest::NodesMigrationExtendLeaseThenExpire [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxDataLimitExceeded [GOOD] >> TNodeBrokerTest::NodesMigrationExtendLeaseThenRemove [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix4 >> TFlatTableExecutor_ResourceProfile::TestExecutorTxDataGC [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxPartialDataHold >> TFlatTableExecutor_ResourceProfile::TestExecutorTxPartialDataHold [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldAndUse [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldOnRelease [GOOD] >> TFlatTableExecutor_ResourceProfile::TestUpdateConfig [GOOD] >> TFlatTableExecutor_SliceOverlapScan::TestSliceOverlapScan ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NoEffectBeforeCommit [GOOD] Test command err: 2025-06-24T20:41:32.376375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:32.376450Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for commit ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... waiting for commit (done) ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ExtendLeaseSetLocationInOneRegistration [GOOD] Test command err: 2025-06-24T20:41:32.180570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:32.180654Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodeNameExpiration >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder+EvWrite >> TNodeBrokerTest::NodeNameReuseRestartWithHostChanges [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExtendLeaseThenRemove [GOOD] Test command err: 2025-06-24T20:41:33.152508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:33.152610Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExtendLeaseThenExpire [GOOD] Test command err: 2025-06-24T20:41:33.049612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:33.049676Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::SyncNodes >> TNodeBrokerTest::ListNodesEpochDeltasPersistance [GOOD] >> TNodeBrokerTest::NodesMigration2000Nodes >> TNetClassifierTest::TestInitFromRemoteSource >> TFlatTableExecutor_SliceOverlapScan::TestSliceOverlapScan [GOOD] >> TNodeBrokerTest::SeveralNodesSubscribersPerPipe >> KqpRanges::NullInKey [GOOD] >> KqpRanges::NullInKeySuffix >> TFlatTableExecutor_SnapshotWithCommits::SnapshotWithCommits [GOOD] >> TFlatTableExecutor_StickyPages::TestNonSticky_FlatIndex >> TFlatTableExecutor_StickyPages::TestNonSticky_FlatIndex [GOOD] >> TNodeBrokerTest::NodesMigrationExtendLease >> TFlatTableExecutor_StickyPages::TestNonSticky_BTreeIndex |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ListNodesEpochDeltasPersistance [GOOD] Test command err: 2025-06-24T20:41:33.454937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:33.455037Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) |90.6%| [TA] {RESULT} $(B)/ydb/core/mind/hive/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodeNameReuseRestartWithHostChanges [GOOD] Test command err: 2025-06-24T20:41:32.339687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:32.339773Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:32.683090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::InflightLimit [GOOD] Test command err: 2025-06-24T20:35:19.763570Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617230715964780:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:19.763647Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:19.858947Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617228822287939:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:19.859526Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00498a/r3tmp/tmpPUsAFm/pdisk_1.dat 2025-06-24T20:35:20.203555Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:20.203920Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:20.555667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:20.555763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:20.558846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:20.558925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:20.567753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:20.568018Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:35:20.569306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:20.570940Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9348, node 1 2025-06-24T20:35:20.749193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00498a/r3tmp/yandextxiMnV.tmp 2025-06-24T20:35:20.749223Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00498a/r3tmp/yandextxiMnV.tmp 2025-06-24T20:35:20.749380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00498a/r3tmp/yandextxiMnV.tmp 2025-06-24T20:35:20.749512Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:20.805847Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:20.867529Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:20.946765Z INFO: TTestServer started on Port 62869 GrpcPort 9348 TClient is connected to server localhost:62869 PQClient connected to localhost:9348 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:21.331596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:35:21.429803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T20:35:24.011950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617247895835012:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:24.012078Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617252190802324:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:24.012167Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:24.016536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:24.087382Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617252190802330:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:35:24.431324Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617252190802419:2764] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:24.482962Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519617250297124743:2279], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:24.484798Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzkwYmUyNzAtMTU0Mjc0NzctOGNlZjBlYmMtZmM2YzMzZDU=, ActorId: [2:7519617250297124695:2272], ActorState: ExecuteState, TraceId: 01jyhte3s49c9zgvfsy2e00rah, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:35:24.493496Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:35:24.491316Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617252190802432:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:24.493262Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTYyMjM0MTQtNWI2MzI3OTEtNjExNjY2MWQtM2EyMWFkZjM=, ActorId: [1:7519617247895835007:2296], ActorState: ExecuteState, TraceId: 01jyhte3pp4s8fkjw3q20te4fa, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:35:24.493586Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:35:24.495240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:24.600398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:24.763716Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617230715964780:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:24.763803Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:24.802370Z node 1 :FLAT_TX_SCH ... " SourceId: "\000source" SeqNo: 3 WriteTimestampMS: 1750797677726 CreateTimestampMS: 1750797677721 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 4 WriteTimestampMS: 1750797677809 CreateTimestampMS: 1750797677806 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 3 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 5 WriteTimestampMS: 1750797677826 CreateTimestampMS: 1750797677825 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 4 SizeLag: 88 RealReadOffset: 3 WaitQuotaTimeMs: 3045 EndOffset: 4 StartOffset: 0 } Cookie: 0 } 2025-06-24T20:41:24.975630Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 2 consumer session _29_2_1438297942336449122_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 4 2025-06-24T20:41:24.975703Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 2 consumer session _29_2_1438297942336449122_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 4 ReadOffset 4 ReadGuid 1e293ccf-4c48e1e-d38d57b0-68f22925 has messages 1 2025-06-24T20:41:24.975905Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer session _29_2_1438297942336449122_v1 read done: guid# 1e293ccf-4c48e1e-d38d57b0-68f22925, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 82616 2025-06-24T20:41:24.975948Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer session _29_2_1438297942336449122_v1 response to read: guid# 1e293ccf-4c48e1e-d38d57b0-68f22925 2025-06-24T20:41:24.976341Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer session _29_2_1438297942336449122_v1 Process answer. Aval parts: 0 Bytes readed: 82616 Offset: 0 from session 1 Offset: 1 from session 1 Offset: 2 from session 1 Offset: 3 from session 1 2025-06-24T20:41:24.986409Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer session _29_2_1438297942336449122_v1 grpc read done: success# 0, data# { } 2025-06-24T20:41:24.986448Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer session _29_2_1438297942336449122_v1 grpc read failed 2025-06-24T20:41:24.986481Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer session _29_2_1438297942336449122_v1 grpc closed 2025-06-24T20:41:24.986515Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer session _29_2_1438297942336449122_v1 is DEAD 2025-06-24T20:41:24.988217Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _29_2_1438297942336449122_v1 2025-06-24T20:41:24.988296Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [29:7519618786043122403:2540] destroyed 2025-06-24T20:41:24.988366Z node 30 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _29_2_1438297942336449122_v1 2025-06-24T20:41:25.954536Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=82536, count=4, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:41:28.979868Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 5 Topic 'rt3.dc1--topic1' partition 0 user $without_consumer offset 0 count 4 size 99043 endOffset 4 max time lag 0ms effective offset 0 2025-06-24T20:41:28.981224Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 5 added 4 blobs, size 82536 count 4 last offset 3, current partition end offset: 4 2025-06-24T20:41:28.981285Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 5. Send blob request. 2025-06-24T20:41:28.981394Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 3 times before, last time 2025-06-24T20:41:24.000000Z 2025-06-24T20:41:28.981417Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 1 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 2 times before, last time 2025-06-24T20:41:24.000000Z 2025-06-24T20:41:28.981439Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 2 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 2 times before, last time 2025-06-24T20:41:24.000000Z 2025-06-24T20:41:28.981459Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 3 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 2 times before, last time 2025-06-24T20:41:24.000000Z 2025-06-24T20:41:28.981521Z node 30 :PERSQUEUE DEBUG: read.h:121: Reading cookie 5. All 4 blobs are from cache. 2025-06-24T20:41:28.981602Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 4 blobs 2025-06-24T20:41:28.982034Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-24T20:41:28.982171Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 1 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-24T20:41:28.982266Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 2 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-24T20:41:28.982351Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-24T20:41:28.982550Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--topic1' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T20:41:28.983519Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:41:28.983562Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 1 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:41:28.983591Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:41:28.983617Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 3 partno 0 count 1 parts 0 suffix '63' 2025-06-24T20:41:28.989346Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 3 consumer session _29_3_6842001148891216050_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 4 Result { Offset: 0 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 2 WriteTimestampMS: 1750797677611 CreateTimestampMS: 1750797677610 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 3 WriteTimestampMS: 1750797677726 CreateTimestampMS: 1750797677721 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 4 WriteTimestampMS: 1750797677809 CreateTimestampMS: 1750797677806 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 3 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 5 WriteTimestampMS: 1750797677826 CreateTimestampMS: 1750797677825 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 4 SizeLag: 88 RealReadOffset: 3 WaitQuotaTimeMs: 7024 EndOffset: 4 StartOffset: 0 } Cookie: 0 } 2025-06-24T20:41:28.989705Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 3 consumer session _29_3_6842001148891216050_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 4 2025-06-24T20:41:28.989778Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 3 consumer session _29_3_6842001148891216050_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 4 ReadOffset 4 ReadGuid bed26b71-28c5656d-d5da458-1f8c016c has messages 1 2025-06-24T20:41:28.990024Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 3 consumer session _29_3_6842001148891216050_v1 read done: guid# bed26b71-28c5656d-d5da458-1f8c016c, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 82616 2025-06-24T20:41:28.990070Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 3 consumer session _29_3_6842001148891216050_v1 response to read: guid# bed26b71-28c5656d-d5da458-1f8c016c Bytes readed: 82616 Offset: 0 from session 1 Offset: 1 from session 1 Offset: 2 from session 1 Offset: 3 from session 1 2025-06-24T20:41:28.990478Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 3 consumer session _29_3_6842001148891216050_v1 Process answer. Aval parts: 0 2025-06-24T20:41:28.996054Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer session _29_3_6842001148891216050_v1 grpc read done: success# 0, data# { } 2025-06-24T20:41:28.996086Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer session _29_3_6842001148891216050_v1 grpc read failed 2025-06-24T20:41:28.996151Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 3 consumer session _29_3_6842001148891216050_v1 closed 2025-06-24T20:41:28.996238Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer session _29_3_6842001148891216050_v1 is DEAD 2025-06-24T20:41:28.999739Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session _29_3_6842001148891216050_v1 2025-06-24T20:41:28.999823Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [29:7519618786043122405:2542] destroyed 2025-06-24T20:41:28.999893Z node 30 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _29_3_6842001148891216050_v1 2025-06-24T20:41:29.638236Z node 27 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=82536, count=4, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:41:33.252605Z node 27 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [27:7519618835575028282:2725] TxId: 281474976715741. Ctx: { TraceId: 01jyhtsbkd389egj14pp39c49t, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=27&id=ZThiZmZlNjUtMjgwZmZjOGQtM2ZhNmI1NjktMzRhZTkzNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 28 2025-06-24T20:41:33.253716Z node 27 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [27:7519618835575028286:2725], TxId: 281474976715741, task: 3. Ctx: { TraceId : 01jyhtsbkd389egj14pp39c49t. SessionId : ydb://session/3?node_id=27&id=ZThiZmZlNjUtMjgwZmZjOGQtM2ZhNmI1NjktMzRhZTkzNw==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [27:7519618835575028282:2725], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> TFlatTableExecutor_StickyPages::TestNonSticky_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestSticky >> TFlatTableExecutor_StickyPages::TestSticky [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_FlatIndex >> TNodeBrokerTest::NodesMigrationReuseExpiredID >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_BTreeIndex >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyMain >> TFlatTableExecutor_StickyPages::TestStickyMain [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_FlatIndex >> TPersQueueTest::DisableDeduplication [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_BTreeIndex >> TFlatTableExecutor_StickyPages::TestStickyAlt_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAll >> TFlatTableExecutor_StickyPages::TestStickyAll [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilySticky |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |90.6%| [LD] {RESULT} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut >> TNodeBrokerTest::ShiftIdRangeRemoveActive >> TFlatTableExecutor_StickyPages::TestAlterAddFamilySticky [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilyPartiallySticky >> TFlatTableExecutor_StickyPages::TestAlterAddFamilyPartiallySticky [GOOD] >> TFlatTableExecutor_VersionedLargeBlobs::TestMultiVersionCompactionLargeBlobs [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRows >> TNodeBrokerTest::SyncNodes [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRows [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsSmallBlobs >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue+EvWrite >> KqpSqlIn::KeySuffix [GOOD] >> KqpSqlIn::KeySuffix_NotPointPrefix ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SyncNodes [GOOD] Test command err: 2025-06-24T20:41:38.251408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:38.251481Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesMigration999Nodes >> KqpSinkMvcc::OltpNamedStatement [GOOD] >> TNodeBrokerTest::DoNotReuseDynnodeIdsBelowMinDynamicNodeId >> TNodeBrokerTest::UpdateNodesLogEmptyEpoch >> KqpLimits::TooBigColumn-useSink [GOOD] >> TNodeBrokerTest::NodesV2BackMigration |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |90.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign >> TNodeBrokerTest::NodeNameExpiration [GOOD] |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNodeBrokerTest::NodesMigration1001Nodes >> TSlotIndexesPoolTest::Expansion [GOOD] >> TNodeBrokerTest::NodesMigrationExtendLease [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodeNameExpiration [GOOD] Test command err: 2025-06-24T20:41:37.655503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:37.655574Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:38.011889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query >> TNodeBrokerTest::SeveralNodesSubscribersPerPipe [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OltpNamedStatement [GOOD] Test command err: Trying to start YDB, gRPC: 16558, MsgBus: 16543 2025-06-24T20:41:18.876844Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618772242970012:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:18.877270Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003120/r3tmp/tmp28BpXn/pdisk_1.dat 2025-06-24T20:41:19.369792Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:19.375249Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618772242969823:2079] 1750797678845798 != 1750797678845801 2025-06-24T20:41:19.404985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:19.405123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:19.426711Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16558, node 1 2025-06-24T20:41:19.533816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:19.533845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:19.533861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:19.534956Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16543 2025-06-24T20:41:19.864142Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16543 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:20.299244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:20.315807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:41:22.521036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618789422839631:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:22.521125Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:22.521368Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618789422839663:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:22.526409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:22.545721Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618789422839665:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:41:22.644366Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618789422839716:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:23.229434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:23.458545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:24.434325Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618772242970012:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:24.434561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:24.855259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 21872, MsgBus: 62567 2025-06-24T20:41:30.559726Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618824145931779:2212];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003120/r3tmp/tmp8HgCwV/pdisk_1.dat 2025-06-24T20:41:30.671065Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:41:30.729802Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:30.730886Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519618824145931577:2079] 1750797690431753 != 1750797690431756 2025-06-24T20:41:30.742857Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:30.743236Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:30.745073Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21872, node 2 2025-06-24T20:41:30.821621Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:30.821645Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:30.821654Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:30.821786Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62567 TClient is connected to server localhost:62567 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:31.540492Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:41:31.544611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:31.552521Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:41:34.240304Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618841325801380:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:34.240415Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:34.240686Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618841325801407:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:34.244809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:34.259970Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519618841325801409:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T20:41:34.331952Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519618841325801460:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:34.390114Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:34.471007Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:35.628471Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618824145931779:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:35.645983Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:36.210585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Expansion [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExtendLease [GOOD] Test command err: 2025-06-24T20:41:40.125512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:40.125586Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TxUsage::WriteToTopic_Demo_46_Query [GOOD] >> TNodeBrokerTest::NodesMigrationReuseExpiredID [GOOD] >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 [FAIL] Test command err: Starting YDB, grpc: 7324, msgbus: 8653 2025-06-24T20:37:20.019120Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617748611585963:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:20.019557Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00366d/r3tmp/tmpIEh42g/pdisk_1.dat 2025-06-24T20:37:20.476055Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:20.527063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:20.529080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:20.533586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7324, node 1 2025-06-24T20:37:20.674842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:20.674870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:20.674876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:20.674988Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8653 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:37:20.950601Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519617748611586053:2117] Handle TEvNavigate describe path dc-1 2025-06-24T20:37:20.976610Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519617748611586572:2446] HANDLE EvNavigateScheme dc-1 2025-06-24T20:37:20.978291Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519617748611586572:2446] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:21.030863Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:37:21.085368Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519617748611586572:2446] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-24T20:37:21.096657Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519617748611586572:2446] Handle TEvDescribeSchemeResult Forward to# [1:7519617748611586568:2442] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:37:21.141595Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519617748611586053:2117] Handle TEvProposeTransaction 2025-06-24T20:37:21.141623Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519617748611586053:2117] TxId# 281474976710657 ProcessProposeTransaction 2025-06-24T20:37:21.141736Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519617748611586053:2117] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519617752906553891:2455] 2025-06-24T20:37:21.265380Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519617752906553891:2455] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T20:37:21.265466Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519617752906553891:2455] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:37:21.265484Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519617752906553891:2455] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:37:21.265562Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519617752906553891:2455] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:37:21.265856Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519617752906553891:2455] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:21.266039Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519617752906553891:2455] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T20:37:21.266088Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519617752906553891:2455] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-24T20:37:21.266219Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519617752906553891:2455] txid# 281474976710657 HANDLE EvClientConnected 2025-06-24T20:37:21.266824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:37:21.271266Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519617752906553891:2455] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-06-24T20:37:21.271330Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519617752906553891:2455] txid# 281474976710657 SEND to# [1:7519617752906553890:2454] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} 2025-06-24T20:37:21.293198Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519617748611586053:2117] Handle TEvProposeTransaction 2025-06-24T20:37:21.293221Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519617748611586053:2117] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T20:37:21.293257Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519617748611586053:2117] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519617752906553932:2489] 2025-06-24T20:37:21.295853Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519617752906553932:2489] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T20:37:21.295905Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519617752906553932:2489] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:37:21.295921Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519617752906553932:2489] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:37:21.295985Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519617752906553932:2489] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:37:21.296249Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519617752906553932:2489] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:21.296330Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519617752906553932:2489] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:37:21.296367Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519617752906553932:2489] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T20:37:21.296520Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519617752906553932:2489] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T20:37:21.296986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 28147497671065 ... nsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:41:20.417193Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618772185173361:2104] Handle TEvProposeTransaction 2025-06-24T20:41:20.417227Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618772185173361:2104] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T20:41:20.417349Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618772185173361:2104] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [59:7519618780775108671:2449] 2025-06-24T20:41:20.420512Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618780775108671:2449] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T20:41:20.420588Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618780775108671:2449] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:20.420617Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618780775108671:2449] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:41:20.420682Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618780775108671:2449] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:20.421062Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618780775108671:2449] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:20.421187Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519618780775108671:2449] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T20:41:20.421253Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519618780775108671:2449] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T20:41:20.421423Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519618780775108671:2449] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T20:41:20.422157Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:41:20.428135Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519618780775108671:2449] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T20:41:20.428210Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618780775108671:2449] txid# 281474976715657 SEND to# [59:7519618780775108670:2448] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-24T20:41:20.450719Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618772185173361:2104] Handle TEvProposeTransaction 2025-06-24T20:41:20.450751Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618772185173361:2104] TxId# 281474976715658 ProcessProposeTransaction 2025-06-24T20:41:20.450797Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618772185173361:2104] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [59:7519618780775108709:2483] 2025-06-24T20:41:20.453729Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618780775108709:2483] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T20:41:20.453804Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618780775108709:2483] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:20.453831Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618780775108709:2483] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:41:20.453897Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618780775108709:2483] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:20.454312Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618780775108709:2483] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:20.454428Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519618780775108709:2483] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:41:20.454490Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519618780775108709:2483] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-24T20:41:20.454654Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519618780775108709:2483] txid# 281474976715658 HANDLE EvClientConnected 2025-06-24T20:41:20.455821Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:41:20.459407Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519618780775108709:2483] txid# 281474976715658 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715658} 2025-06-24T20:41:20.459479Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618780775108709:2483] txid# 281474976715658 SEND to# [59:7519618780775108708:2482] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 48} 2025-06-24T20:41:23.831496Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[59:7519618772185173351:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:23.831598Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:26.810609Z node 59 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyhtrzvv2dp0dajv15az1fns", Request deadline has expired for 1.315424s seconds assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:23521 TBackTrace::Capture()+28 (0x19709CCC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x19BC7AE0) NKikimr::NTxProxyUT::CreateLocalUser(NKikimr::NTxProxyUT::TTestEnv const&, TBasicString> const&, TBasicString> const&, TBasicString> const&)+2057 (0x192E02D9) NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase)+1859 (0x192F66E3) std::__y1::__bind_return, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase>, std::__y1::tuple, __is_valid_bind_return, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase>, std::__y1::tuple>::value>::type std::__y1::__bind const&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase const&>::operator()[abi:fe200000](NUnitTest::TTestContext&)+588 (0x1933557C) std::__y1::__function::__func, void ()>::operator()()+280 (0x193255C8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19BFECE6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19BCE669) NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TCurrentTest::Execute()+1204 (0x19324474) NUnitTest::TTestFactory::Execute()+2438 (0x19BCFF36) NUnitTest::RunMain(int, char**)+5213 (0x19BF925D) ??+0 (0x7F0FD5D2ED90) __libc_start_main+128 (0x7F0FD5D2EE40) _start+41 (0x16BA9029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-72 [GOOD] Test command err: Starting YDB, grpc: 10302, msgbus: 7649 2025-06-24T20:37:13.480837Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617719469465855:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:13.481115Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003687/r3tmp/tmpoE9Cn1/pdisk_1.dat 2025-06-24T20:37:14.254316Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:14.316175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:14.316279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:14.353809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:37:14.479417Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 10302, node 1 2025-06-24T20:37:14.696017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:14.696046Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:14.696060Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:14.696229Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7649 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:37:15.097944Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519617719469465884:2117] Handle TEvNavigate describe path dc-1 2025-06-24T20:37:15.166865Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519617728059400997:2439] HANDLE EvNavigateScheme dc-1 2025-06-24T20:37:15.167312Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519617728059400997:2439] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:15.241494Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519617728059400997:2439] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-24T20:37:15.262781Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519617728059400997:2439] Handle TEvDescribeSchemeResult Forward to# [1:7519617728059400995:2437] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:37:15.303471Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519617719469465884:2117] Handle TEvProposeTransaction 2025-06-24T20:37:15.303504Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519617719469465884:2117] TxId# 281474976710657 ProcessProposeTransaction 2025-06-24T20:37:15.303625Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519617719469465884:2117] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519617728059401004:2445] 2025-06-24T20:37:15.525332Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519617728059401004:2445] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T20:37:15.525437Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519617728059401004:2445] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:37:15.525487Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519617728059401004:2445] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:37:15.525564Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519617728059401004:2445] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:37:15.525905Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519617728059401004:2445] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:15.526113Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519617728059401004:2445] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T20:37:15.526192Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519617728059401004:2445] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-24T20:37:15.526377Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519617728059401004:2445] txid# 281474976710657 HANDLE EvClientConnected 2025-06-24T20:37:15.527821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:15.536983Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519617728059401004:2445] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-06-24T20:37:15.537056Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519617728059401004:2445] txid# 281474976710657 SEND to# [1:7519617728059401003:2444] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} waiting... 2025-06-24T20:37:15.572300Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519617719469465884:2117] Handle TEvProposeTransaction 2025-06-24T20:37:15.572330Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519617719469465884:2117] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T20:37:15.572377Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519617719469465884:2117] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519617728059401046:2482] 2025-06-24T20:37:15.574850Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519617728059401046:2482] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T20:37:15.574912Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519617728059401046:2482] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:37:15.574930Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519617728059401046:2482] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:37:15.574990Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519617728059401046:2482] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:37:15.575323Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519617728059401046:2482] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:15.575414Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519617728059401046:2482] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:37:15.575452Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519617728059401046:2482] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T20:37:15.575592Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519617728059401046:2482] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T20:37:15.576200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710 ... Request# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:41:13.969977Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519618748849598878:2595] txid# 281474976710661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710661 TabletId# 72057594046644480} 2025-06-24T20:41:13.970129Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519618748849598878:2595] txid# 281474976710661 HANDLE EvClientConnected 2025-06-24T20:41:13.980198Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519618748849598878:2595] txid# 281474976710661 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710661} 2025-06-24T20:41:13.980339Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618748849598878:2595] txid# 281474976710661 SEND to# [59:7519618748849598877:2295] Source {TEvProposeTransactionStatus txid# 281474976710661 Status# 48} 2025-06-24T20:41:14.104448Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618727374761441:2113] Handle TEvProposeTransaction 2025-06-24T20:41:14.104482Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618727374761441:2113] TxId# 281474976710662 ProcessProposeTransaction 2025-06-24T20:41:14.104532Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618727374761441:2113] Cookie# 0 userReqId# "" txid# 281474976710662 SEND to# [59:7519618753144566194:2609] 2025-06-24T20:41:14.107322Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618753144566194:2609] txid# 281474976710662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\022\010\001\022\016\032\014ordinaryuser\n\032\010\000\022\026\010\001\020\200\200\002\032\014ordinaryuser \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:55676" 2025-06-24T20:41:14.107412Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618753144566194:2609] txid# 281474976710662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:14.107441Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618753144566194:2609] txid# 281474976710662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:41:14.107504Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618753144566194:2609] txid# 281474976710662 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:14.107934Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618753144566194:2609] txid# 281474976710662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:14.108053Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519618753144566194:2609] HANDLE EvNavigateKeySetResult, txid# 281474976710662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:41:14.108112Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519618753144566194:2609] txid# 281474976710662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710662 TabletId# 72057594046644480} 2025-06-24T20:41:14.108278Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519618753144566194:2609] txid# 281474976710662 HANDLE EvClientConnected 2025-06-24T20:41:14.108871Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:41:14.115048Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519618753144566194:2609] txid# 281474976710662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710662} 2025-06-24T20:41:14.115116Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618753144566194:2609] txid# 281474976710662 SEND to# [59:7519618753144566193:2308] Source {TEvProposeTransactionStatus txid# 281474976710662 Status# 48} 2025-06-24T20:41:14.202439Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618727374761441:2113] Handle TEvProposeTransaction 2025-06-24T20:41:14.202480Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618727374761441:2113] TxId# 281474976710663 ProcessProposeTransaction 2025-06-24T20:41:14.202527Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618727374761441:2113] Cookie# 0 userReqId# "" txid# 281474976710663 SEND to# [59:7519618753144566225:2626] 2025-06-24T20:41:14.205717Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618753144566225:2626] txid# 281474976710663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:55700" 2025-06-24T20:41:14.205797Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618753144566225:2626] txid# 281474976710663 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:14.205826Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618753144566225:2626] txid# 281474976710663 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:41:14.205881Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618753144566225:2626] txid# 281474976710663 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:14.206307Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618753144566225:2626] txid# 281474976710663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:14.206431Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519618753144566225:2626] HANDLE EvNavigateKeySetResult, txid# 281474976710663 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:41:14.206493Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519618753144566225:2626] txid# 281474976710663 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710663 TabletId# 72057594046644480} 2025-06-24T20:41:14.206646Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519618753144566225:2626] txid# 281474976710663 HANDLE EvClientConnected 2025-06-24T20:41:14.226725Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519618753144566225:2626] txid# 281474976710663 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710663} 2025-06-24T20:41:14.227385Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618753144566225:2626] txid# 281474976710663 SEND to# [59:7519618753144566224:2310] Source {TEvProposeTransactionStatus txid# 281474976710663 Status# 48} 2025-06-24T20:41:14.347654Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618727374761441:2113] Handle TEvProposeTransaction 2025-06-24T20:41:14.347689Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618727374761441:2113] TxId# 281474976710664 ProcessProposeTransaction 2025-06-24T20:41:14.347748Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618727374761441:2113] Cookie# 0 userReqId# "" txid# 281474976710664 SEND to# [59:7519618753144566253:2638] 2025-06-24T20:41:14.350862Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618753144566253:2638] txid# 281474976710664 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\014ordinaryuser\022\030\022\026\n\024all-users@well-known\032\334\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDg0MDg3NCwiaWF0IjoxNzUwNzk3Njc0LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.b68GvFBiEpeowNgM1HRsQCZH6XCqynvGy_5kcyWtIZKoZhdNEbsFRoUoPpYo2x5vYtpy2_WogYzcMHFJiwvzfTTMQHxd8ly5LsR3vvns8BgerKz2Rdnj46-xckPLU6Au9wLOXQfMDqFMfeN5PdvqPZIig6yJtgsiP-h4f6BBywbVedh7mgWr8jiZ7G5IJV4z2kkqOJUZ6DSE3HCeN_KD3JqBBgIRj75-EhJurDov1k8xWT8uf6Bcih_e8CdsM-jldEADr6iCoo1C9O9P724Ft9cQpiFlSpPGFjyzATmHbGv6kIk9RhdTAEDSQg-aTdHoDS-V752uCx1T9jd6f_X-3w\"\005Login*\210\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDg0MDg3NCwiaWF0IjoxNzUwNzk3Njc0LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.**" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:55732" 2025-06-24T20:41:14.350942Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618753144566253:2638] txid# 281474976710664 Bootstrap, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:14.350973Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618753144566253:2638] txid# 281474976710664 Bootstrap, UserSID: ordinaryuser IsClusterAdministrator: 0 2025-06-24T20:41:14.351134Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519618753144566253:2638] txid# 281474976710664 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-24T20:41:14.352766Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519618753144566253:2638] txid# 281474976710664 HandleResolveDatabase, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-24T20:41:14.352829Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618753144566253:2638] txid# 281474976710664 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:14.353329Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618753144566253:2638] txid# 281474976710664 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:14.353375Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519618753144566253:2638] txid# 281474976710664, Access denied for ordinaryuser, attempt to manage user 2025-06-24T20:41:14.353510Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519618753144566253:2638] txid# 281474976710664, issues: { message: "Access denied for ordinaryuser" issue_code: 200000 severity: 1 } 2025-06-24T20:41:14.353549Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618753144566253:2638] txid# 281474976710664 SEND to# [59:7519618753144566252:2322] Source {TEvProposeTransactionStatus Status# 5} 2025-06-24T20:41:14.353855Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=ZjBhMzMwOS05NmM3MjNlMi0xYTlmODdhYS1hOGZlMjA4ZA==, ActorId: [59:7519618753144566243:2322], ActorState: ExecuteState, TraceId: 01jyhtrstpcv5vdm9r5ek1pkv8, Create QueryResponse for error on request, msg: 2025-06-24T20:41:14.354224Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519618727374761441:2113] Handle TEvExecuteKqpTransaction 2025-06-24T20:41:14.354246Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519618727374761441:2113] TxId# 281474976710665 ProcessProposeKqpTransaction >> TNetClassifierTest::TestInitFromRemoteSource [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SeveralNodesSubscribersPerPipe [GOOD] Test command err: 2025-06-24T20:41:40.248863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:40.248943Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for updates are sent ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to TEST_ACTOR_RUNTIME cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to TEST_ACTOR_RUNTIME cookie 0 ... waiting for updates are sent (done) ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to TEST_ACTOR_RUNTIME ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to TEST_ACTOR_RUNTIME ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 [GOOD] Test command err: Starting YDB, grpc: 11784, msgbus: 30570 2025-06-24T20:37:15.956178Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617725512944834:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:15.956245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00367a/r3tmp/tmpdTNzB1/pdisk_1.dat 2025-06-24T20:37:16.576404Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:16.589768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:16.589824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:16.597639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11784, node 1 2025-06-24T20:37:16.743853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:16.743881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:16.743892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:16.744005Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30570 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:37:16.983955Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519617729807912321:2117] Handle TEvNavigate describe path dc-1 2025-06-24T20:37:16.994747Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:37:17.010535Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519617734102880133:2442] HANDLE EvNavigateScheme dc-1 2025-06-24T20:37:17.010966Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519617734102880133:2442] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:17.073686Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519617734102880133:2442] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-24T20:37:17.082804Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519617734102880133:2442] Handle TEvDescribeSchemeResult Forward to# [1:7519617729807912836:2441] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:37:17.101318Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519617729807912321:2117] Handle TEvProposeTransaction 2025-06-24T20:37:17.101357Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519617729807912321:2117] TxId# 281474976710657 ProcessProposeTransaction 2025-06-24T20:37:17.101507Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519617729807912321:2117] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519617734102880139:2447] 2025-06-24T20:37:17.217658Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519617734102880139:2447] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T20:37:17.217739Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519617734102880139:2447] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 0 2025-06-24T20:37:17.217759Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519617734102880139:2447] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:37:17.217811Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519617734102880139:2447] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:37:17.218131Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519617734102880139:2447] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:17.218469Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519617734102880139:2447] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T20:37:17.218536Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519617734102880139:2447] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-24T20:37:17.218687Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519617734102880139:2447] txid# 281474976710657 HANDLE EvClientConnected 2025-06-24T20:37:17.219412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:17.226106Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519617734102880139:2447] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-06-24T20:37:17.226163Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519617734102880139:2447] txid# 281474976710657 SEND to# [1:7519617734102880138:2446] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} waiting... 2025-06-24T20:37:17.264307Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519617729807912321:2117] Handle TEvProposeTransaction 2025-06-24T20:37:17.264329Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519617729807912321:2117] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T20:37:17.264356Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519617729807912321:2117] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519617734102880178:2482] 2025-06-24T20:37:17.266898Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519617734102880178:2482] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T20:37:17.266947Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519617734102880178:2482] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 0 2025-06-24T20:37:17.266960Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519617734102880178:2482] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:37:17.267021Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519617734102880178:2482] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:37:17.267312Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519617734102880178:2482] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:17.267402Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519617734102880178:2482] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:37:17.267445Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519617734102880178:2482] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T20:37:17.267564Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519617734102880178:2482] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T20:37:17.267961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 2814749767 ... T20:41:00.096698Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519618692221144781:2567] txid# 281474976710660 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)} 2025-06-24T20:41:00.096909Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519618692221144781:2567] txid# 281474976710660, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:00.096952Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618692221144781:2567] txid# 281474976710660 SEND to# [59:7519618687926177408:2301] Source {TEvProposeTransactionStatus txid# 281474976710660 Status# 48} 2025-06-24T20:41:00.119409Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618670746307373:2110] Handle TEvProposeTransaction 2025-06-24T20:41:00.119443Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618670746307373:2110] TxId# 281474976710661 ProcessProposeTransaction 2025-06-24T20:41:00.119493Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618670746307373:2110] Cookie# 0 userReqId# "" txid# 281474976710661 SEND to# [59:7519618692221144805:2579] 2025-06-24T20:41:00.122355Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618692221144805:2579] txid# 281474976710661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "ordinaryuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:52896" 2025-06-24T20:41:00.122423Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618692221144805:2579] txid# 281474976710661 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:00.122450Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618692221144805:2579] txid# 281474976710661 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:41:00.122500Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618692221144805:2579] txid# 281474976710661 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:00.122877Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618692221144805:2579] txid# 281474976710661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:00.122986Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519618692221144805:2579] HANDLE EvNavigateKeySetResult, txid# 281474976710661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:41:00.123046Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519618692221144805:2579] txid# 281474976710661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710661 TabletId# 72057594046644480} 2025-06-24T20:41:00.123670Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519618692221144805:2579] txid# 281474976710661 HANDLE EvClientConnected 2025-06-24T20:41:00.131330Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519618692221144805:2579] txid# 281474976710661 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710661} 2025-06-24T20:41:00.131387Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618692221144805:2579] txid# 281474976710661 SEND to# [59:7519618692221144804:2294] Source {TEvProposeTransactionStatus txid# 281474976710661 Status# 48} 2025-06-24T20:41:00.258015Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618670746307373:2110] Handle TEvProposeTransaction 2025-06-24T20:41:00.258045Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618670746307373:2110] TxId# 281474976710662 ProcessProposeTransaction 2025-06-24T20:41:00.258089Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618670746307373:2110] Cookie# 0 userReqId# "" txid# 281474976710662 SEND to# [59:7519618692221144832:2596] 2025-06-24T20:41:00.260830Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618692221144832:2596] txid# 281474976710662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\022\010\001\022\016\032\014ordinaryuser\n\032\010\000\022\026\010\001\020\200\200\002\032\014ordinaryuser \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:52912" 2025-06-24T20:41:00.260911Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618692221144832:2596] txid# 281474976710662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:00.260932Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618692221144832:2596] txid# 281474976710662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:41:00.260988Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618692221144832:2596] txid# 281474976710662 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:00.261323Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618692221144832:2596] txid# 281474976710662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:00.261416Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519618692221144832:2596] HANDLE EvNavigateKeySetResult, txid# 281474976710662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:41:00.261466Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519618692221144832:2596] txid# 281474976710662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710662 TabletId# 72057594046644480} 2025-06-24T20:41:00.261633Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519618692221144832:2596] txid# 281474976710662 HANDLE EvClientConnected 2025-06-24T20:41:00.262138Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T20:41:00.267681Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519618692221144832:2596] txid# 281474976710662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710662} 2025-06-24T20:41:00.267736Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618692221144832:2596] txid# 281474976710662 SEND to# [59:7519618692221144831:2308] Source {TEvProposeTransactionStatus txid# 281474976710662 Status# 48} 2025-06-24T20:41:00.356011Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618670746307373:2110] Handle TEvProposeTransaction 2025-06-24T20:41:00.356074Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618670746307373:2110] TxId# 281474976710663 ProcessProposeTransaction 2025-06-24T20:41:00.356154Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618670746307373:2110] Cookie# 0 userReqId# "" txid# 281474976710663 SEND to# [59:7519618692221144870:2616] 2025-06-24T20:41:00.367824Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618692221144870:2616] txid# 281474976710663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014ordinaryuser\022\030\022\026\n\024all-users@well-known\032\334\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDg0MDg2MCwiaWF0IjoxNzUwNzk3NjYwLCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.NxYZzVZHRb0UX7_kqk3TB6JnZe30jBLuqHvuVEC9bF5ROgh6yyuk8-zF4Ec1drT9YhLOQfEZjNg0b9uW-Jze9nK7kQwTfG0FCiG8EHfj2zZINcIX7DBr3c1n4m9SKYPfsVyE9dj7q3ydZbmi-yeWD8HxFik0jhT-mn1RaN5Taoa0Vmq1Yx61a_iaRfOq9LxHn6rz14gzG0dtvDI_yfixvbZOOUwAhmI1T0HDdq1aoGmpTYtqoyOGsyFzAbR3XCSWB-oYA14GPceOb-OBmD3VTovyO8IDPOQLt4cVWXfo3Wc2pmFa9Ha7WeUkoXXXnCF-OoqDvu4nETVdQuMi-X_Rjg\"\005Login*\210\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDg0MDg2MCwiaWF0IjoxNzUwNzk3NjYwLCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.**" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:52936" 2025-06-24T20:41:00.367941Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618692221144870:2616] txid# 281474976710663 Bootstrap, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:00.367973Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618692221144870:2616] txid# 281474976710663 Bootstrap, UserSID: ordinaryuser IsClusterAdministrator: 0 2025-06-24T20:41:00.368198Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519618692221144870:2616] txid# 281474976710663 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-24T20:41:00.368255Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519618692221144870:2616] txid# 281474976710663 HandleResolveDatabase, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-24T20:41:00.368316Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618692221144870:2616] txid# 281474976710663 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:00.368679Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618692221144870:2616] txid# 281474976710663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:00.368714Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519618692221144870:2616] txid# 281474976710663, Access denied for ordinaryuser, attempt to manage user 2025-06-24T20:41:00.368847Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519618692221144870:2616] txid# 281474976710663, issues: { message: "Access denied for ordinaryuser" issue_code: 200000 severity: 1 } 2025-06-24T20:41:00.368887Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618692221144870:2616] txid# 281474976710663 SEND to# [59:7519618692221144869:2314] Source {TEvProposeTransactionStatus Status# 5} 2025-06-24T20:41:00.371545Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=ZTg5ODQyZDgtYjQyMWQ1YzMtMmQ0ZGU4ODgtYTAxYzM5MTY=, ActorId: [59:7519618692221144853:2314], ActorState: ExecuteState, TraceId: 01jyhtrc5h979dm6h4ya4c9y91, Create QueryResponse for error on request, msg: 2025-06-24T20:41:00.372036Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519618670746307373:2110] Handle TEvExecuteKqpTransaction 2025-06-24T20:41:00.372103Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519618670746307373:2110] TxId# 281474976710664 ProcessProposeKqpTransaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-72 [GOOD] Test command err: Starting YDB, grpc: 13461, msgbus: 6475 2025-06-24T20:37:14.478180Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617721599667806:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:37:14.478520Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036c3/r3tmp/tmpRAwiFZ/pdisk_1.dat 2025-06-24T20:37:15.382496Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:37:15.399487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:37:15.399580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:37:15.471239Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:37:15.479194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13461, node 1 2025-06-24T20:37:15.659754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:37:15.659780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:37:15.659787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:37:15.659906Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6475 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T20:37:16.007934Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519617721599667815:2117] Handle TEvNavigate describe path dc-1 2025-06-24T20:37:16.041956Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519617730189602935:2449] HANDLE EvNavigateScheme dc-1 2025-06-24T20:37:16.042482Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519617730189602935:2449] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:16.084362Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519617730189602935:2449] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-24T20:37:16.097149Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519617730189602935:2449] Handle TEvDescribeSchemeResult Forward to# [1:7519617730189602934:2448] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T20:37:16.123371Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519617721599667815:2117] Handle TEvProposeTransaction 2025-06-24T20:37:16.123403Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519617721599667815:2117] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T20:37:16.123542Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519617721599667815:2117] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519617730189602945:2455] 2025-06-24T20:37:16.338017Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519617730189602945:2455] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T20:37:16.338112Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519617730189602945:2455] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:37:16.338149Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519617730189602945:2455] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:37:16.338216Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519617730189602945:2455] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:37:16.338553Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519617730189602945:2455] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:16.338734Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519617730189602945:2455] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T20:37:16.338817Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519617730189602945:2455] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T20:37:16.339000Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519617730189602945:2455] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T20:37:16.339836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:37:16.349209Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519617730189602945:2455] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T20:37:16.349312Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519617730189602945:2455] txid# 281474976715657 SEND to# [1:7519617730189602943:2453] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-24T20:37:16.385751Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519617721599667815:2117] Handle TEvProposeTransaction 2025-06-24T20:37:16.385778Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519617721599667815:2117] TxId# 281474976715658 ProcessProposeTransaction 2025-06-24T20:37:16.385809Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519617721599667815:2117] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519617730189602987:2493] 2025-06-24T20:37:16.391914Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519617730189602987:2493] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T20:37:16.391993Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519617730189602987:2493] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:37:16.392008Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519617730189602987:2493] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:37:16.392081Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519617730189602987:2493] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:37:16.392382Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519617730189602987:2493] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:37:16.392481Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519617730189602987:2493] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:37:16.392523Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519617730189602987:2493] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-24T20:37:16.392682Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519617730189602987:2493] txid# 281474976715658 HANDLE EvClientConnected 2025-06-24T20:37:16.393178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715 ... 20882580420:2303], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T20:41:06.455898Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618695112775774:2114] Handle TEvProposeTransaction 2025-06-24T20:41:06.455945Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618695112775774:2114] TxId# 281474976715661 ProcessProposeTransaction 2025-06-24T20:41:06.456015Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618695112775774:2114] Cookie# 0 userReqId# "" txid# 281474976715661 SEND to# [59:7519618720882580494:2589] 2025-06-24T20:41:06.461467Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618720882580494:2589] txid# 281474976715661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/dc-1" 2025-06-24T20:41:06.461534Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618720882580494:2589] txid# 281474976715661 Bootstrap, UserSID: metadata@system CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:06.461557Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618720882580494:2589] txid# 281474976715661 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 0 2025-06-24T20:41:06.461783Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519618720882580494:2589] txid# 281474976715661 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-24T20:41:06.461823Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519618720882580494:2589] txid# 281474976715661 HandleResolveDatabase, UserSID: metadata@system CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-24T20:41:06.462408Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [59:7519618720882580494:2589] txid# 281474976715661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T20:41:06.462507Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618720882580494:2589] txid# 281474976715661 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:06.462787Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618720882580494:2589] txid# 281474976715661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:06.462954Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519618720882580494:2589] HANDLE EvNavigateKeySetResult, txid# 281474976715661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:41:06.463022Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519618720882580494:2589] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-24T20:41:06.463223Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519618720882580494:2589] txid# 281474976715661 HANDLE EvClientConnected 2025-06-24T20:41:06.466697Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519618720882580494:2589] txid# 281474976715661 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715661 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)} 2025-06-24T20:41:06.466869Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519618720882580494:2589] txid# 281474976715661, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:06.466913Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618720882580494:2589] txid# 281474976715661 SEND to# [59:7519618720882580420:2303] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-24T20:41:06.492040Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618695112775774:2114] Handle TEvProposeTransaction 2025-06-24T20:41:06.492081Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618695112775774:2114] TxId# 281474976715662 ProcessProposeTransaction 2025-06-24T20:41:06.492132Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618695112775774:2114] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7519618720882580518:2601] 2025-06-24T20:41:06.494945Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618720882580518:2601] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:56482" 2025-06-24T20:41:06.495036Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618720882580518:2601] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:06.495062Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618720882580518:2601] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T20:41:06.495115Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618720882580518:2601] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:06.495539Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618720882580518:2601] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:06.495672Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519618720882580518:2601] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:41:06.495738Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519618720882580518:2601] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-24T20:41:06.495902Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519618720882580518:2601] txid# 281474976715662 HANDLE EvClientConnected 2025-06-24T20:41:06.506452Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519618720882580518:2601] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-24T20:41:06.506521Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618720882580518:2601] txid# 281474976715662 SEND to# [59:7519618720882580517:2296] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-24T20:41:06.599295Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519618695112775774:2114] Handle TEvProposeTransaction 2025-06-24T20:41:06.599335Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519618695112775774:2114] TxId# 281474976715663 ProcessProposeTransaction 2025-06-24T20:41:06.599392Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519618695112775774:2114] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7519618720882580551:2615] 2025-06-24T20:41:06.602500Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519618720882580551:2615] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\024ordinaryuser@builtin\022\030\022\026\n\024all-users@well-known\032\024ordinaryuser@builtin\"\007Builtin*\027ordi****ltin (32520BBF)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:56492" 2025-06-24T20:41:06.602582Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519618720882580551:2615] txid# 281474976715663 Bootstrap, UserSID: ordinaryuser@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T20:41:06.602608Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519618720882580551:2615] txid# 281474976715663 Bootstrap, UserSID: ordinaryuser@builtin IsClusterAdministrator: 0 2025-06-24T20:41:06.602822Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519618720882580551:2615] txid# 281474976715663 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-24T20:41:06.602863Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519618720882580551:2615] txid# 281474976715663 HandleResolveDatabase, UserSID: ordinaryuser@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-24T20:41:06.602914Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519618720882580551:2615] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:41:06.606996Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519618720882580551:2615] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:41:06.607045Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519618720882580551:2615] txid# 281474976715663, Access denied for ordinaryuser@builtin, attempt to manage user 2025-06-24T20:41:06.607216Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519618720882580551:2615] txid# 281474976715663, issues: { message: "Access denied for ordinaryuser@builtin" issue_code: 200000 severity: 1 } 2025-06-24T20:41:06.607257Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519618720882580551:2615] txid# 281474976715663 SEND to# [59:7519618720882580550:2313] Source {TEvProposeTransactionStatus Status# 5} 2025-06-24T20:41:06.608537Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=NjJlM2RkOGYtMzIzYzA4NTEtNGY1YWUyZDctNmEyNzRkZTk=, ActorId: [59:7519618720882580536:2313], ActorState: ExecuteState, TraceId: 01jyhtrj825akzj7wfvsdj13v2, Create QueryResponse for error on request, msg: 2025-06-24T20:41:06.611505Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519618695112775774:2114] Handle TEvExecuteKqpTransaction 2025-06-24T20:41:06.611548Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519618695112775774:2114] TxId# 281474976715664 ProcessProposeKqpTransaction >> TNodeBrokerTest::ShiftIdRangeRemoveActive [GOOD] >> TxUsage::WriteToTopic_Demo_47_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseExpiredID [GOOD] Test command err: 2025-06-24T20:41:40.210465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:40.210534Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::ShiftIdRangeRemoveExpired >> TNodeBrokerTest::UpdateNodesLogEmptyEpoch [GOOD] >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-false >> TNodeBrokerTest::NodesV2BackMigrationShiftIdRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromRemoteSource [GOOD] Test command err: 2025-06-24T20:41:38.695652Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618857282950406:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:38.695881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004456/r3tmp/tmpB7FAay/pdisk_1.dat 2025-06-24T20:41:39.695943Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:41:39.967074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:39.992041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:39.992151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:40.012827Z node 1 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:11449) connection closed with error: Connection refused 2025-06-24T20:41:40.015892Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:40.024104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:40.025946Z node 1 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-24T20:41:40.311915Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.112268s 2025-06-24T20:41:40.312002Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.112377s 2025-06-24T20:41:40.321076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:40.321103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:40.321110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:40.321239Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TNodeBrokerTest::NodesMigrationReuseID >> TNodeBrokerTest::NodesV2BackMigrationManyNodesInterrupted ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveActive [GOOD] Test command err: 2025-06-24T20:41:41.293089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:41.293156Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:42.794856Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1026 not in range (1023, 1025] >> TSlotIndexesPoolTest::Init [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix4 [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix5 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::UpdateNodesLogEmptyEpoch [GOOD] Test command err: 2025-06-24T20:41:42.476961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:42.477042Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesMigration2000Nodes [GOOD] >> TNodeBrokerTest::NodesAlreadyMigrated |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |90.7%| [LD] {RESULT} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] >> AutoConfig::GetServicePoolsWith1CPU [GOOD] >> TDynamicNameserverTest::BasicFunctionality-EnableNodeBrokerDeltaProtocol-false |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Init [GOOD] >> TNodeBrokerTest::NodesV2BackMigration [GOOD] >> TNodeBrokerTest::NodesMigrationExpireActive >> KqpSqlIn::KeySuffix_OnlyTail |90.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith1CPU [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration2000Nodes [GOOD] Test command err: 2025-06-24T20:41:38.522144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:38.522222Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] Test command err: 2025-06-24T20:41:44.818186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:44.818256Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) 2025-06-24T20:41:45.831455Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:45.831529Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) >> TNodeBrokerTest::NodesMigration999Nodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesV2BackMigration [GOOD] Test command err: 2025-06-24T20:41:42.446515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:42.446584Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::TooBigColumn-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 4378, MsgBus: 26833 2025-06-24T20:35:44.526541Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617336962368394:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:44.526601Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0033c3/r3tmp/tmpR322KR/pdisk_1.dat 2025-06-24T20:35:45.041713Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:45.055496Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617336962368377:2079] 1750797344525925 != 1750797344525928 2025-06-24T20:35:45.084823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:45.084936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:45.089783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4378, node 1 2025-06-24T20:35:45.310941Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:35:45.310990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:35:45.311001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:35:45.311173Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:45.565413Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26833 TClient is connected to server localhost:26833 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:46.233782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:35:46.264982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:35:46.275058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:46.433053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:46.673812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:46.794443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:35:49.388327Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617358437206500:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:49.388469Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:49.530590Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617336962368394:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:49.530672Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:35:49.643730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:49.760247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:49.847897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:49.932560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:50.072557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:50.152997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:50.229888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:50.335392Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617362732174457:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:50.335476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:50.335703Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617362732174462:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:50.344833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:50.376667Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617362732174464:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:35:50.467083Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617362732174517:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:52.682446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... 8897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:26.740675Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21366, node 4 2025-06-24T20:41:26.915856Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:26.915890Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:26.915900Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:26.916092Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25214 2025-06-24T20:41:27.516794Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25214 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:28.027364Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:28.039431Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:41:28.058824Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:28.236534Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:28.580491Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:28.706056Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:31.494729Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519618804609216798:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:31.494827Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:34.482848Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519618838968956729:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:34.483188Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:34.526194Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:34.579830Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:34.638208Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:34.693485Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:34.751657Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:34.966179Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:35.065662Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:35.225839Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519618843263924693:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:35.225998Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:35.226533Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519618843263924698:2441], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:35.232973Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:35.267740Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519618843263924700:2442], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:41:35.345419Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519618843263924751:3433] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:39.757107Z node 4 :TX_DATASHARD ERROR: check_data_tx_unit.cpp:186: Transaction write column value of 20971522 bytes is larger than the allowed threshold 2025-06-24T20:41:39.757260Z node 4 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710672 at tablet 72075186224037911 status: EXEC_ERROR errors: BAD_ARGUMENT (Transaction write column value of 20971522 bytes is larger than the allowed threshold) | 2025-06-24T20:41:39.759240Z node 4 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [4:7519618860443794247:2481] TxId: 281474976710672. Ctx: { TraceId: 01jyhtshs7egc4sgv34v039m1m, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZDY1NGY5YTctZmFmMWJhMDYtYWZlMjk5OS01ZTQ4ZGNhZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [BAD_ARGUMENT] Transaction write column value of 20971522 bytes is larger than the allowed threshold; 2025-06-24T20:41:39.759677Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZDY1NGY5YTctZmFmMWJhMDYtYWZlMjk5OS01ZTQ4ZGNhZA==, ActorId: [4:7519618856148826891:2481], ActorState: ExecuteState, TraceId: 01jyhtshs7egc4sgv34v039m1m, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [BAD_ARGUMENT] Transaction write column value of 20971522 bytes is larger than the allowed threshold >> TNodeBrokerTest::DoNotReuseDynnodeIdsBelowMinDynamicNodeId [GOOD] >> TNodeBrokerTest::ConfigPipelining |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNodeBrokerTest::NodesMigration1001Nodes [GOOD] >> KqpRanges::LiteralOrCompisite [GOOD] >> KqpRanges::LiteralOrCompisiteCollision >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder-EvWrite >> TSlotIndexesPoolTest::Basic [GOOD] >> BackupRestore::RestoreViewToDifferentDatabase [FAIL] >> BackupRestore::RestoreViewDependentOnAnotherView >> TNodeBrokerTest::NodesMigrationReuseID [GOOD] >> DataShardReadIterator::ShouldReadRangeCellVec >> TNetClassifierTest::TestInitFromFile ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration999Nodes [GOOD] Test command err: 2025-06-24T20:41:42.344063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:42.344158Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |90.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers >> TNodeBrokerTest::ShiftIdRangeRemoveExpired [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration1001Nodes [GOOD] Test command err: 2025-06-24T20:41:42.958719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:42.958793Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Basic [GOOD] >> KqpRanges::NullInKeySuffix [GOOD] >> KqpRanges::NullInPredicate >> DataShardReadIterator::ShouldStopWhenNodeDisconnected >> DataShardReadIterator::ShouldReadKeyCellVec ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseID [GOOD] Test command err: 2025-06-24T20:41:46.789842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:46.789913Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> DataShardReadIteratorSysTables::ShouldRead >> TNodeBrokerTest::NodesMigrationExpireActive [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveExpired [GOOD] Test command err: 2025-06-24T20:41:46.111944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:46.112040Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:48.045364Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1026 not in range (1023, 1025] |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> TNodeBrokerTest::NodesV2BackMigrationShiftIdRange [GOOD] >> TDynamicNameserverTest::BasicFunctionality-EnableNodeBrokerDeltaProtocol-false [GOOD] >> GracefulShutdown::TTxGracefulShutdown >> TNodeBrokerTest::ConfigPipelining [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadWithPlanQueueRace >> TPersQueueTest::DefaultMeteringMode [GOOD] >> KqpKv::ReadRows_PgValue [GOOD] >> KqpKv::ReadRows_PgKey >> DataShardReadIterator::ShouldRangeReadReverseLeftInclusive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExpireActive [GOOD] Test command err: 2025-06-24T20:41:48.148409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:48.148502Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesV2BackMigrationShiftIdRange [GOOD] Test command err: 2025-06-24T20:41:46.514835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:46.514915Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:49.009890Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1150: [DB] Removing node with wrong ID 1025 not in range (1023, 1024] ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR >> DataShardReadIterator::ShouldHandleReadAck >> TNodeBrokerTest::NodesAlreadyMigrated [GOOD] >> LocalPartitionReader::FeedSlowly >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataWithFilter-default.txt] [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ConfigPipelining [GOOD] Test command err: 2025-06-24T20:41:42.382172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:42.382241Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:48.905743Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:48.905814Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:49.306139Z node 9 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host1:1001: ERROR_TEMP: No free node IDs ... waiting for commit ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... waiting for commit (done) ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::FeedSlowly [GOOD] >> test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] >> LocalPartitionReader::Simple >> TPersQueueTest::TestReadPartitionByGroupId [GOOD] >> TPersQueueTest::SrcIdCompatibility >> LocalPartitionReader::Simple [GOOD] |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue-EvWrite |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::FeedSlowly [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesAlreadyMigrated [GOOD] Test command err: 2025-06-24T20:41:47.369403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:47.369485Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::Simple [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_FlatIndex |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> GracefulShutdown::TTxGracefulShutdown [GOOD] >> AutoConfig::GetASPoolsith1CPU [GOOD] |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::DefaultMeteringMode [GOOD] Test command err: 2025-06-24T20:35:19.860088Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617229481985429:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:19.860198Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:19.935872Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617229543136198:2215];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00496c/r3tmp/tmp74B1WE/pdisk_1.dat 2025-06-24T20:35:20.220882Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:20.217472Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:20.279570Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:20.517303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:20.517417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:20.517981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:20.518052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:20.522370Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:20.542919Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:35:20.543075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:20.544525Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21908, node 1 2025-06-24T20:35:20.688806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00496c/r3tmp/yandexN595NE.tmp 2025-06-24T20:35:20.688841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00496c/r3tmp/yandexN595NE.tmp 2025-06-24T20:35:20.689055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00496c/r3tmp/yandexN595NE.tmp 2025-06-24T20:35:20.689242Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:20.792727Z INFO: TTestServer started on Port 10990 GrpcPort 21908 2025-06-24T20:35:20.851311Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:20.937603Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10990 PQClient connected to localhost:21908 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:21.235478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:35:21.296917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T20:35:23.911649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617246661855482:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:23.911654Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617246661855470:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:23.911944Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:23.915417Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617246723005581:2276], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:23.915481Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519617246723005570:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:23.915700Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:35:23.919757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:35:23.955306Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519617246723005585:2168] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T20:35:23.970002Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617246661855494:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:35:23.971481Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519617246723005584:2277], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:35:24.049868Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617250956822883:2748] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:24.070065Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519617251017972907:2174] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:35:24.281034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:35:24.286924Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617250956822893:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:24.289009Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519617251017972914:2281], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:35:24.287414Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YmE0OTc2ZGItMjMwMWQ3OTUtZGNjZDdiZTYtOTgyZDUwY2M=, ActorId: [1:7519617246661855451:2296], ActorState: ExecuteState, TraceId: 01jyhte3kx6qy8wxak4jrx65qg, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:35:24.289644Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 ... InBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519618815902605163 RawX2: 124554053792 } Partitions { Partition { PartitionId: 0 } } 2025-06-24T20:41:47.466568Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:41:47.481387Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:41:47.481456Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state CALCULATED 2025-06-24T20:41:47.481494Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976720672, State CALCULATED 2025-06-24T20:41:47.481547Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037892] TxId 281474976720672 State CALCULATED FrontTxId 281474976720672 2025-06-24T20:41:47.481586Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976720672, NewState WAIT_RS 2025-06-24T20:41:47.481636Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037892] TxId 281474976720672 moved from CALCULATED to WAIT_RS 2025-06-24T20:41:47.481732Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3988: [PQ: 72075186224037892] Send TEvTxProcessing::TEvReadSet to 0 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-06-24T20:41:47.481790Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72075186224037892] HaveParticipantsDecision 1 2025-06-24T20:41:47.481890Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976720672, NewState EXECUTING 2025-06-24T20:41:47.481937Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037892] TxId 281474976720672 moved from WAIT_RS to EXECUTING 2025-06-24T20:41:47.481967Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72075186224037892] Received 0, Expected 1 2025-06-24T20:41:47.482129Z node 29 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1750797707491, TxId 281474976720672 2025-06-24T20:41:47.482695Z node 29 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:41:47.488070Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:41:47.488172Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T20:41:47.488335Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3553: [PQ: 72075186224037892] Handle TEvPQ::TEvTxCommitDone Step 1750797707491, TxId 281474976720672, Partition 0 2025-06-24T20:41:47.488381Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state EXECUTING 2025-06-24T20:41:47.488420Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976720672, State EXECUTING 2025-06-24T20:41:47.488465Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037892] TxId 281474976720672 State EXECUTING FrontTxId 281474976720672 2025-06-24T20:41:47.488494Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72075186224037892] Received 1, Expected 1 2025-06-24T20:41:47.488545Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4185: [PQ: 72075186224037892] TxId: 281474976720672 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-24T20:41:47.488588Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4516: [PQ: 72075186224037892] complete TxId 281474976720672 2025-06-24T20:41:47.489049Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72075186224037892] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } 2025-06-24T20:41:47.489139Z node 29 :PERSQUEUE NOTICE: pq_impl.cpp:1121: [PQ: 72075186224037892] metering mode METERING_MODE_REQUEST_UNITS 2025-06-24T20:41:47.489301Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4534: [PQ: 72075186224037892] delete partitions for TxId 281474976720672 2025-06-24T20:41:47.489335Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976720672, NewState EXECUTED 2025-06-24T20:41:47.489388Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037892] TxId 281474976720672 moved from EXECUTING to EXECUTED 2025-06-24T20:41:47.489434Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72075186224037892] write key for TxId 281474976720672 2025-06-24T20:41:47.489945Z node 29 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976720672] save tx TxId: 281474976720672 State: EXECUTED MinStep: 1750797707001 MaxStep: 18446744073709551615 Step: 1750797707491 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519618815902605163 RawX2: 124554053792 } Partitions { Partition { PartitionId: 0 } } 2025-06-24T20:41:47.490321Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:41:47.506899Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:41:47.506966Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-24T20:41:47.507001Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976720672, State EXECUTED 2025-06-24T20:41:47.507051Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037892] TxId 281474976720672 State EXECUTED FrontTxId 281474976720672 2025-06-24T20:41:47.507088Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4007: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-24T20:41:47.507122Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976720672, NewState WAIT_RS_ACKS 2025-06-24T20:41:47.507179Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037892] TxId 281474976720672 moved from EXECUTED to WAIT_RS_ACKS 2025-06-24T20:41:47.507260Z node 29 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976720672] PredicateAcks: 0/0 2025-06-24T20:41:47.507277Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4560: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-24T20:41:47.507313Z node 29 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976720672] PredicateAcks: 0/0 2025-06-24T20:41:47.507353Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4621: [PQ: 72075186224037892] add an TxId 281474976720672 to the list for deletion 2025-06-24T20:41:47.507418Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976720672, NewState DELETING 2025-06-24T20:41:47.507471Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3852: [PQ: 72075186224037892] delete key for TxId 281474976720672 2025-06-24T20:41:47.507603Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:41:47.517571Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:41:47.517632Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-24T20:41:47.517678Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976720672, State DELETING 2025-06-24T20:41:47.517726Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72075186224037892] delete TxId 281474976720672 2025-06-24T20:41:47.539508Z node 29 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-24T20:41:47.539663Z node 29 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ/ttt 2025-06-24T20:41:49.551657Z node 29 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [29:7519618906096920223:2436] TxId: 281474976720673. Ctx: { TraceId: 01jyhtstyp1mz3f4668g2xmptm, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=29&id=MzViNjUwYzQtYmI1ZTQwNTktYTlkYWMzNGQtZmRmNjMwYzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 30 2025-06-24T20:41:49.552770Z node 29 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [29:7519618906096920230:2436], TxId: 281474976720673, task: 3. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=29&id=MzViNjUwYzQtYmI1ZTQwNTktYTlkYWMzNGQtZmRmNjMwYzk=. TraceId : 01jyhtstyp1mz3f4668g2xmptm. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [29:7519618906096920223:2436], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> TNetClassifierTest::TestInitFromFile [GOOD] |90.7%| [TA] $(B)/ydb/core/backup/impl/ut_local_partition_reader/test-results/unittest/{meta.json ... results_accumulator.log} >> TPersQueueTest::TestWriteSessionsConflicts [GOOD] >> TPersQueueTest::TestReadRuleServiceTypePassword |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |90.7%| [TA] {RESULT} $(B)/ydb/core/backup/impl/ut_local_partition_reader/test-results/unittest/{meta.json ... results_accumulator.log} |90.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |90.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsith1CPU [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> GracefulShutdown::TTxGracefulShutdown [GOOD] Test command err: 2025-06-24T20:41:47.696482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:47.696610Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:48.119703Z node 1 :NODE_BROKER ERROR: node_broker.cpp:797: [Dirty] Configured lease duration (10.000000s) is too small. Using min. value: 300.000000s 2025-06-24T20:41:48.140184Z node 1 :NODE_BROKER ERROR: node_broker.cpp:797: [Committed] Configured lease duration (10.000000s) is too small. Using min. value: 300.000000s 2025-06-24T20:41:51.660851Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:51.660921Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TFlatTableExecutor_IndexLoading::Scan_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_BTreeIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromFile [GOOD] Test command err: 2025-06-24T20:41:49.943162Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618904920604895:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:49.946217Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004447/r3tmp/tmp4Ot1zp/pdisk_1.dat 2025-06-24T20:41:50.351397Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618904920604697:2079] 1750797709874224 != 1750797709874227 2025-06-24T20:41:50.386876Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:50.394631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:50.398640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:50.406250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:50.510417Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004447/r3tmp/yandexTGTY6Q.tmp 2025-06-24T20:41:50.510442Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004447/r3tmp/yandexTGTY6Q.tmp 2025-06-24T20:41:50.510762Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004447/r3tmp/yandexTGTY6Q.tmp 2025-06-24T20:41:50.510899Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:41:50.925626Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TNetClassifierTest::TestInitFromBadlyFormattedFile >> KqpSqlIn::KeySuffix_NotPointPrefix [GOOD] >> KqpSqlIn::ComplexKey |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |90.8%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut >> TNodeBrokerTest::NodesV2BackMigrationManyNodesInterrupted [GOOD] >> DataShardReadIterator::ShouldReadRangeCellVec [GOOD] >> DataShardReadIterator::ShouldReadRangeArrow >> TFlatTableExecutor_IndexLoading::Scan_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_FlatIndex >> DataShardReadIterator::ShouldReadRangePrefix5 [GOOD] >> DataShardReadIterator::ShouldReceiveErrorAfterSplit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesV2BackMigrationManyNodesInterrupted [GOOD] Test command err: 2025-06-24T20:41:46.671477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:46.671558Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for first batch is committed ... blocking NKikimr::TEvTablet::TEvCommitResult from TABLET_ACTOR to FLAT_EXECUTOR cookie 2 ... blocking NKikimr::TEvTablet::TEvCommitResult from TABLET_ACTOR to FLAT_EXECUTOR cookie 1 ... waiting for first batch is committed (done) >> DataShardReadIterator::ShouldReadKeyCellVec [GOOD] >> DataShardReadIterator::ShouldReadKeyArrow >> DataShardReadIteratorSysTables::ShouldRead [GOOD] >> DataShardReadIteratorSysTables::ShouldNotReadUserTableUsingLocalTid >> DataShardReadIteratorConsistency::LocalSnapshotReadWithPlanQueueRace [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadHasRequiredDependencies >> DataShardReadIterator::ShouldRangeReadReverseLeftInclusive [GOOD] >> DataShardReadIterator::ShouldRangeReadReverseLeftNonInclusive >> DataShardReadIterator::ShouldHandleReadAck [GOOD] >> DataShardReadIterator::ShouldHandleOutOfOrderReadAck >> BasicUsage::WriteSessionSwitchDatabases [GOOD] >> KqpKv::ReadRows_PgKey [GOOD] >> KqpKv::ReadRows_Nulls |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |90.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut >> TFlatTableExecutor_VersionedRows::TestVersionedRowsSmallBlobs [GOOD] >> KqpSqlIn::KeySuffix_OnlyTail [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Int >> TFlatTableExecutor_VersionedRows::TestVersionedRowsLargeBlobs >> TNetClassifierTest::TestInitFromBadlyFormattedFile [GOOD] >> BackupRestore::RestoreViewDependentOnAnotherView [FAIL] >> BackupRestore::RestoreKesusResources >> KqpRanges::NullInPredicate [GOOD] >> KqpRanges::NullInPredicateRow >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder+EvWrite >> TFlatTableExecutor_IndexLoading::Scan_History_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_BTreeIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromBadlyFormattedFile [GOOD] Test command err: 2025-06-24T20:41:55.618127Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618928725081241:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:55.637399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004443/r3tmp/tmpbnprWu/pdisk_1.dat 2025-06-24T20:41:56.353573Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:56.371440Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618928725081215:2079] 1750797715578047 != 1750797715578050 2025-06-24T20:41:56.378987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:56.379083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:56.388520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:56.439061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004443/r3tmp/yandextUoWYy.tmp 2025-06-24T20:41:56.439084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004443/r3tmp/yandextUoWYy.tmp 2025-06-24T20:41:56.439231Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:344: invalid NetData format 2025-06-24T20:41:56.439275Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: /home/runner/.ya/build/build_root/2btm/004443/r3tmp/yandextUoWYy.tmp 2025-06-24T20:41:56.439419Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> KqpRanges::LiteralOrCompisiteCollision [GOOD] >> KqpRanges::Like ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionSwitchDatabases [GOOD] Test command err: 2025-06-24T20:39:21.775313Z :WriteSessionNoAvailableDatabase INFO: Random seed for debugging is 1750797561775275 2025-06-24T20:39:22.740111Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618274713180815:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:22.740166Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047b3/r3tmp/tmpLpyef5/pdisk_1.dat 2025-06-24T20:39:23.367968Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:39:23.417253Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:39:23.520879Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:39:23.799324Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:39:23.883349Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:39:23.903330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:39:23.968824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:23.968962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:23.979520Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:24.005215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:24.005291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:24.030513Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:39:24.030640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:39:24.035936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10280, node 1 2025-06-24T20:39:24.397825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0047b3/r3tmp/yandexiurBba.tmp 2025-06-24T20:39:24.397852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0047b3/r3tmp/yandexiurBba.tmp 2025-06-24T20:39:24.397995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0047b3/r3tmp/yandexiurBba.tmp 2025-06-24T20:39:24.398100Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:39:24.594972Z INFO: TTestServer started on Port 21735 GrpcPort 10280 TClient is connected to server localhost:21735 PQClient connected to localhost:10280 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:39:25.569941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T20:39:27.744766Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618274713180815:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:27.744835Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:39:29.644472Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618304777952816:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:29.644571Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:29.647199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618304777952828:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:39:29.652185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:39:29.887634Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618304777952830:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720661 completed, doublechecking } 2025-06-24T20:39:29.964394Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618304777952933:2704] txid# 281474976720662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:39:30.377439Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519618304777952943:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:39:30.372201Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519618301928256543:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:39:30.374350Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YjRmM2M1NjMtNDgxMjczM2YtN2Q0MGJiYzgtOTZjNGE3ZWQ=, ActorId: [2:7519618301928256518:2271], ActorState: ExecuteState, TraceId: 01jyhtnks302z3g0wq3pzc78gs, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:39:30.379697Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OGM4YjQ5NTUtNDc2NzRkMGMtNTkyNDc5NTMtZGM3ODUzNWM=, ActorId: [1:7519618304777952813:2300], ActorState: ExecuteState, TraceId: 01jyhtnkjmcnmp0tjkkv495ehw, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:39:30.381871Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:39:30.382097Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:39:30.382974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:30.611532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:39:30.846918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ ... on.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } 2025-06-24T20:41:54.006963Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][rt3.dc1--test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 4 DataSize: 0 UsedReserveSize: 0 2025-06-24T20:41:54.007086Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][rt3.dc1--test-topic] ProcessPendingStats. PendingUpdates size 1 2025-06-24T20:41:54.491985Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=320, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:41:56.347285Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Write 1 messages with Id from 1 to 1 >>> Got event: ReadyToAcceptEvent >>> Ready to answer: ok 2025-06-24T20:41:56.348635Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Write session: try to update token 2025-06-24T20:41:56.348685Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Send 1 message(s) (0 left), first sequence number is 3 2025-06-24T20:41:56.351482Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|4d833611-5f9a162-e991adb1-51abcf10_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-24T20:41:56.351761Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-24T20:41:56.353040Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T20:41:56.353086Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T20:41:56.353182Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-06-24T20:41:56.354263Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-06-24T20:41:56.355303Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T20:41:56.355356Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T20:41:56.355412Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72075186224037892] got client message topic: rt3.dc1--test-topic partition: 0 SourceId: '\0src_id' SeqNo: 3 partNo : 0 messageNo: 1 size 98 offset: -1 2025-06-24T20:41:56.355614Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob processing sourceId '\0src_id' seqNo 3 partNo 0 2025-06-24T20:41:56.356543Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob complete sourceId '\0src_id' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 172 count 1 nextOffset 3 batches 1 2025-06-24T20:41:56.357050Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--test-topic' partition 0 compactOffset 2,1 HeadOffset 2 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000? size 160 WTime 1750797716355 2025-06-24T20:41:56.357206Z node 4 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:41:56.357276Z node 4 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 160 2025-06-24T20:41:56.366882Z node 4 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 2 count 1 size 160 actorID [4:7519618411804676113:2360] 2025-06-24T20:41:56.367005Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 105 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T20:41:56.367053Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T20:41:56.367096Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0src_id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-24T20:41:56.367328Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=480, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:41:56.367387Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1374: [PQ: 72075186224037892] Topic 'rt3.dc1--test-topic' counters. CacheSize 480 CachedBlobs 3 2025-06-24T20:41:56.367421Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-24T20:41:56.367797Z node 4 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' size 160 2025-06-24T20:41:56.369250Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-24T20:41:56.369437Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 3 written { offset: 2 } } write_statistics { persisting_time { nanos: 11000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-24T20:41:56.369482Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] OnAck: seqNo=1, txId=? 2025-06-24T20:41:56.369513Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Write session: acknoledged message 1 2025-06-24T20:41:56.367978Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-06-24T20:41:56.450514Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 1, Msg: Cancelled on the server side, Details: , InternalError: 0 2025-06-24T20:41:56.450632Z :ERROR: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Got error. Status: CLIENT_CANCELLED, Description:
: Error: GRpc error: (1): Cancelled on the server side 2025-06-24T20:41:56.450670Z :ERROR: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Write session will not restart after a fatal error 2025-06-24T20:41:56.450712Z :INFO: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Write session will now close 2025-06-24T20:41:56.450794Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Write session: aborting 2025-06-24T20:41:56.448636Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|4d833611-5f9a162-e991adb1-51abcf10_0 grpc read done: success: 0 data: 2025-06-24T20:41:56.448670Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: src_id|4d833611-5f9a162-e991adb1-51abcf10_0 grpc read failed 2025-06-24T20:41:56.448700Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: src_id|4d833611-5f9a162-e991adb1-51abcf10_0 grpc closed 2025-06-24T20:41:56.448717Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: src_id|4d833611-5f9a162-e991adb1-51abcf10_0 is DEAD 2025-06-24T20:41:56.449205Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:41:56.452243Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [3:7519618847327152655:3168] destroyed 2025-06-24T20:41:56.452309Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:41:56.724330Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4d833611-5f9a162-e991adb1-51abcf10_0] MessageGroupId [src_id] Write session: destroy 2025-06-24T20:41:57.383767Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710889, task: 1, CA Id [3:7519618937521466732:3318]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-24T20:41:57.416969Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710889, task: 1, CA Id [3:7519618937521466732:3318]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:41:57.471237Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710889, task: 1, CA Id [3:7519618937521466732:3318]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:41:57.545434Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710889, task: 1, CA Id [3:7519618937521466732:3318]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:41:57.793006Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710889, task: 1, CA Id [3:7519618937521466732:3318]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:41:57.938277Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710889, task: 1, CA Id [3:7519618937521466732:3318]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T20:41:57.938393Z node 3 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976710890. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T20:41:57.946238Z node 3 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [3:7519618937521466766:3312] TxId: 281474976710890. Ctx: { TraceId: 01jyhtt3psbsvcnncjgjnd41qq, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=M2Q4YmE1YTktMTc4YTYyMTUtY2Y5NzgyYzAtYzNiNDBjZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T20:41:57.946646Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=M2Q4YmE1YTktMTc4YTYyMTUtY2Y5NzgyYzAtYzNiNDBjZQ==, ActorId: [3:7519618937521466712:3312], ActorState: ExecuteState, TraceId: 01jyhtt3psbsvcnncjgjnd41qq, Create QueryResponse for error on request, msg: 2025-06-24T20:41:57.968028Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyhtt4886fpqks6ykqtcrsha" } } YdbStatus: UNAVAILABLE ConsumedRu: 368 } >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips+EvWrite >> DataShardReadIterator::ShouldReadRangeArrow [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestCellVec >> DataShardReadIteratorSysTables::ShouldNotReadUserTableUsingLocalTid [GOOD] >> DataShardReadIteratorSysTables::ShouldForbidSchemaVersion >> TFlatTableExecutor_IndexLoading::Scan_History_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_FlatIndex >> TxUsage::WriteToTopic_Demo_47_Table [GOOD] >> DataShardReadIterator::ShouldReadKeyArrow [GOOD] >> DataShardReadIterator::ShouldReadKeyOnlyValueColumn >> DataShardReadIterator::ShouldRangeReadReverseLeftNonInclusive [GOOD] >> DataShardReadIterator::ShouldNotReadAfterCancel >> DataShardReadIterator::ShouldHandleOutOfOrderReadAck [GOOD] >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeRead >> overlapping_portions.py::TestOverlappingPortions::test >> DataShardReadIteratorConsistency::LocalSnapshotReadHasRequiredDependencies [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadNoUnnecessaryDependencies >> DataShardReadIterator::ShouldStopWhenNodeDisconnected [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared-Volatile-BreakLocks >> TFlatTableExecutor_IndexLoading::Scan_Groups_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex >> KqpKv::ReadRows_Nulls [GOOD] >> DataShardReadIterator::ShouldReceiveErrorAfterSplit [GOOD] >> DataShardReadIterator::ShouldReceiveErrorAfterSplitWhenExhausted |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |90.8%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view >> TxUsage::WriteToTopic_Demo_47_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpKv::ReadRows_Nulls [GOOD] Test command err: Trying to start YDB, gRPC: 14701, MsgBus: 26453 2025-06-24T20:40:51.101460Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618657006507836:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:51.101829Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003234/r3tmp/tmpK3ufFh/pdisk_1.dat 2025-06-24T20:40:52.099136Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:52.108501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:52.108632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:52.193948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:52.230283Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:52.230613Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618657006507629:2079] 1750797651064466 != 1750797651064469 2025-06-24T20:40:52.296281Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 14701, node 1 2025-06-24T20:40:53.026691Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:53.026722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:53.026735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:53.026904Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26453 TClient is connected to server localhost:26453 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:54.456757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:55.122001Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618674186377458:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:55.122177Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:56.103510Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618657006507836:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:56.103609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:56.683075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) IsSuccess(): 1 GetStatus(): SUCCESS Trying to start YDB, gRPC: 29954, MsgBus: 61232 2025-06-24T20:40:58.215306Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519618686096718942:2234];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003234/r3tmp/tmpWbljDq/pdisk_1.dat 2025-06-24T20:40:58.269474Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:40:58.358947Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:58.363449Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519618686096718733:2079] 1750797658129733 != 1750797658129736 2025-06-24T20:40:58.372348Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:58.372429Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:58.374503Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29954, node 2 2025-06-24T20:40:58.560660Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:58.560688Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:58.560695Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:58.560808Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61232 2025-06-24T20:40:59.171352Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61232 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:59.384699Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:59.407412Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:41:02.981791Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519618703276588556:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:02.981902Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:03.003105Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) IsSuccess(): 1 GetStatus(): SUCCESS [] IsSuccess(): 1 GetStatus(): SUCCESS 2025-06-24T20:41:03.159511Z node 2 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: no keys are found in request's proto 2025-06-24T20:41:03.166340Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519618686096718942:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:03.166413Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 5363, MsgBus: 25295 2025-06-24T20:41:04.055901Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519618709650115533:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:04.056871Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003234/r3tmp/tmpcQGLww/pdisk_1.dat 2025-06-24T20:41:04.213824Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:04.213913Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T2 ... 6:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:49.072936Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037936 not found 2025-06-24T20:41:49.361784Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037937 not found 2025-06-24T20:41:49.471593Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715758:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:49.714219Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:49.715709Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037938 not found 2025-06-24T20:41:49.953232Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037939 not found Trying to start YDB, gRPC: 13731, MsgBus: 12265 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003234/r3tmp/tmpuwTcyN/pdisk_1.dat 2025-06-24T20:41:51.677659Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:51.825288Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:51.825410Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:51.835005Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:51.835398Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519618912095606517:2079] 1750797711420553 != 1750797711420556 2025-06-24T20:41:51.852674Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13731, node 7 2025-06-24T20:41:52.139925Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:52.139963Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:52.139973Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:52.140117Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:41:52.475371Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12265 TClient is connected to server localhost:12265 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:53.240951Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:53.265695Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:41:57.266601Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:57.498567Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-06-24T20:41:57.514768Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:57.708106Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-24T20:41:57.715066Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:57.911757Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found Trying to start YDB, gRPC: 8021, MsgBus: 17160 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003234/r3tmp/tmpulDaJS/pdisk_1.dat 2025-06-24T20:41:59.493912Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:59.623598Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519618947715415861:2079] 1750797719209866 != 1750797719209869 2025-06-24T20:41:59.646889Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:59.648565Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:59.648675Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:59.654730Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8021, node 8 2025-06-24T20:41:59.715879Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:59.715909Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:59.715920Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:59.716091Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17160 2025-06-24T20:42:00.287678Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17160 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:42:00.912067Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:42:00.931499Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:42:05.200521Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519618973485220282:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:05.200657Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:05.239929Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) IsSuccess(): 1 GetStatus(): SUCCESS >> KqpSqlIn::ComplexKey [GOOD] >> KqpSqlIn::Dict >> DataShardReadIteratorSysTables::ShouldForbidSchemaVersion [GOOD] >> DataShardReadIteratorSysTables::ShouldNotAllowArrow >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestCellVec [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestArrow >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex_Empty >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex_Empty [GOOD] >> TFlatTableExecutor_KeepEraseMarkers::TestKeepEraseMarkers [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTx >> TFlatTableExecutor_LongTx::MemTableLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactUncommittedLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactCommittedLongTx >> TFlatTableExecutor_LongTx::CompactCommittedLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactedLongTxRestart >> TFlatTableExecutor_LongTx::CompactedLongTxRestart [GOOD] >> TFlatTableExecutor_LongTx::CompactMultipleChanges [GOOD] >> TFlatTableExecutor_LongTx::LongTxBorrow >> TFlatTableExecutor_LongTx::LongTxBorrow [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTxRead >> TFlatTableExecutor_LongTx::MemTableLongTxRead [GOOD] >> TFlatTableExecutor_LongTx::CompactedTxIdReuse [GOOD] >> TFlatTableExecutor_LongTx::MergeSkewedCommitted >> TFlatTableExecutor_LongTx::MergeSkewedCommitted [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::SmallValues >> TFlatTableExecutor_LongTxAndBlobs::SmallValues [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::OuterBlobValues >> TFlatTableExecutor_LongTxAndBlobs::OuterBlobValues [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::ExternalBlobValues >> KqpRanges::NullInPredicateRow [GOOD] >> KqpRanges::NoFullScanAtScanQuery >> KqpSqlIn::KeyTypeMissmatch_Int [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Str >> TFlatTableExecutor_LongTxAndBlobs::ExternalBlobValues [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestEnqueueCancel >> TFlatTableExecutor_LowPriorityTxs::TestEnqueueCancel [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriority [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityCancel >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityCancel [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityAllocatingCancel [GOOD] >> TFlatTableExecutor_MoveTableData::TestMoveSnapshot |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |90.8%| [LD] {RESULT} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllPrimitiveTypes-UUID Test command err: 2025-06-24T20:39:58.625137Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618427904620274:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:58.625274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmp0641RA/pdisk_1.dat 2025-06-24T20:39:59.599846Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:39:59.753420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:39:59.803636Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:59.913055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:59.913172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:59.940772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24706, node 1 2025-06-24T20:40:00.437001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:00.437024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:00.437031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:00.437191Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22335 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:01.233973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Backup "/Root" to "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpiI2tsr/"Create temporary directory "/Root/~backup_20250624T204001" in databaseProcess "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpiI2tsr/dir"Create directory "/Root/~backup_20250624T204001/dir" in databaseWrite ACL into "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpiI2tsr/dir/permissions.pb"Remove directory "/Root/~backup_20250624T204001/dir"2025-06-24T20:40:02.081593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) Remove temporary directory "/Root/~backup_20250624T204001" in database2025-06-24T20:40:02.195538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) Backup completed successfully2025-06-24T20:40:02.320023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) Restore "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpiI2tsr/" to "/Root"Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpiI2tsr/"},{"type":"Directory","path":"/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpiI2tsr/dir"}]Process "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpiI2tsr/dir"Restore empty directory "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpiI2tsr/dir" to "/Root/dir"Restore ACL "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpiI2tsr/dir" to "/Root/dir"Read ACL from "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpiI2tsr/dir/permissions.pb"2025-06-24T20:40:02.705643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully 2025-06-24T20:40:06.363123Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519618463381927896:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:06.363179Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpd5k1nc/pdisk_1.dat 2025-06-24T20:40:06.623452Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1266, node 4 2025-06-24T20:40:06.665576Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-24T20:40:06.715009Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:06.715095Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:06.717444Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:06.723780Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:06.723806Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:06.723814Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:06.723950Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16993 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:06.980691Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:07.403714Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:10.396902Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519618480561798103:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:10.397008Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:10.869865Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:11.112792Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519618484856765581:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:11.112912Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:11.365144Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519618463381927896:2074];send_ ... EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:24.698393Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:24.951301Z node 28 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:41:28.869723Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[28:7519618794047238227:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:28.870486Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:28.922392Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7519618815522075678:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:28.922521Z node 28 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:28.923063Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7519618815522075690:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:28.929900Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:28.994050Z node 28 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [28:7519618815522075692:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T20:41:29.080673Z node 28 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [28:7519618819817043072:2682] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:29.140530Z node 28 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [28:7519618819817043108:2695] txid# 281474976710660, issues: { message: "Column Key has wrong key type Yson" severity: 1 } 2025-06-24T20:41:29.143655Z node 28 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=28&id=ZDJlMjlkZmUtOTBmYjA3MjEtYzdiYWI2NTktNzhhZWI5NDA=, ActorId: [28:7519618815522075649:2294], ActorState: ExecuteState, TraceId: 01jyhts82q6f79ak5aew2b166k, Create QueryResponse for error on request, msg: 2025-06-24T20:41:29.144636Z node 28 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhts82q6f79ak5aew2b166k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZDJlMjlkZmUtOTBmYjA3MjEtYzdiYWI2NTktNzhhZWI5NDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:41:29.193458Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:29.527628Z node 28 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhts8h82ganbgm66bbt145r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZDJlMjlkZmUtOTBmYjA3MjEtYzdiYWI2NTktNzhhZWI5NDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:41:29.828360Z node 28 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyhts8pv0bhqjpykhjz845ze, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZDJlMjlkZmUtOTBmYjA3MjEtYzdiYWI2NTktNzhhZWI5NDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/"Create temporary directory "/Root/~backup_20250624T204129" in databaseProcess "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable"Copy tables: { src: "/Root/YsonTable", dst: "/Root/~backup_20250624T204129/YsonTable" }Describe table "/Root/YsonTable"Describe table "/Root/~backup_20250624T204129/YsonTable"Backup table "/Root/~backup_20250624T204129/YsonTable" to "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable"Write scheme into "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable/permissions.pb"Read table "/Root/~backup_20250624T204129/YsonTable"Write data into "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable/data_00.csv"Drop table "/Root/~backup_20250624T204129/YsonTable"Remove temporary directory "/Root/~backup_20250624T204129" in database2025-06-24T20:41:31.172295Z node 28 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 28, TabletId: 72075186224037889 not found 2025-06-24T20:41:31.249633Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) Backup completed successfullyRestore "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/" to "/Root"2025-06-24T20:41:31.400606Z node 28 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 28, TabletId: 72075186224037888 not found Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable"}]Process "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable"Read scheme from "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable" to "/Root/YsonTable"2025-06-24T20:41:31.488807Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Created "/Root/YsonTable"Read data from "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable/data_00.csv"2025-06-24T20:41:31.689931Z node 28 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { TraceId: 01jyhtsap4cacg4yx89mx8bs74, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZWZiNzRlMTMtMjFmZTZiZjAtZTZhNTRmNjMtOGIxZmZmZjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable" to "/Root/YsonTable"Read ACL from "/home/runner/.ya/build/build_root/2btm/0045e7/r3tmp/tmpzT6PNw/YsonTable/permissions.pb"2025-06-24T20:41:31.764675Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-24T20:41:31.944345Z node 28 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyhtsawcaerqzqxy7f81q1wr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZDJlMjlkZmUtOTBmYjA3MjEtYzdiYWI2NTktNzhhZWI5NDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Uncaught exception: (TSystemError) (Error 35: Resource deadlock avoided) util/system/thread.cpp:199: can not join thread ??+0 (0x16D3D3F3) std::terminate()+57 (0x197F2C19) ??+0 (0x16D3D13E) ??+0 (0x1AC24E15) NYdbGrpc::Dev::TGRpcClientLow::Stop(bool)+179 (0x1FB8A493) NYdb::Dev::TGRpcConnectionsImpl::~TGRpcConnectionsImpl()+105 (0x1FAF5339) NYdb::Dev::TGRpcConnectionsImpl::~TGRpcConnectionsImpl()+14 (0x1FAF5B9E) NYdb::Dev::NQuery::TQueryClient::TImpl::~TImpl()+918 (0x20996826) NYdb::Dev::NQuery::TQueryClient::TImpl::~TImpl()+14 (0x2099692E) NYdb::Dev::NQuery::TExecuteQueryIterator::TReaderImpl::~TReaderImpl()+388 (0x20AA1274) std::__y1::__function::__func)::'lambda'(NYdbGrpc::Dev::TGrpcStatus&&), std::__y1::allocator)::'lambda'(NYdbGrpc::Dev::TGrpcStatus&&)>, void (NYdbGrpc::Dev::TGrpcStatus&&)>::destroy()+201 (0x20A7F829) NYdbGrpc::Dev::TStreamRequestReadProcessor::OnFinished(bool)+5177 (0x20A90A99) NYdbGrpc::Dev::TQueueClientFixedEvent>::Execute(bool)+112 (0x20A95090) ??+0 (0x1FB947FB) ??+0 (0x1AC248FF) ??+0 (0x1AC24E4D) ??+0 (0x19AB9585) ??+0 (0x19764BD9) ??+0 (0x7F6D4D7DDAC3) ??+0 (0x7F6D4D86F850) >> TFlatTableExecutor_MoveTableData::TestMoveSnapshot [GOOD] >> TFlatTableExecutor_MoveTableData::TestMoveSnapshotFollower >> TFlatTableExecutor_MoveTableData::TestMoveSnapshotFollower [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScan >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [FAIL] >> TFlatTableExecutor_PostponedScan::TestPostponedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestCancelFinishedScan >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder-EvWrite >> TFlatTableExecutor_PostponedScan::TestCancelFinishedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestCancelRunningPostponedScan >> TFlatTableExecutor_PostponedScan::TestCancelRunningPostponedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScanSnapshotMVCC >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeRead [GOOD] >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeReadReverse >> TFlatTableExecutor_PostponedScan::TestPostponedScanSnapshotMVCC [GOOD] >> TFlatTableExecutor_Reboot::TestSchemeGcAfterReassign >> TFlatTableExecutor_Reboot::TestSchemeGcAfterReassign [GOOD] >> TFlatTableExecutor_RejectProbability::MaxedOutRejectProbability >> DataShardReadIterator::ShouldReadKeyOnlyValueColumn [GOOD] >> DataShardReadIterator::ShouldReadKeyValueColumnAndSomeKeyColumn >> DataShardReadIterator::ShouldNotReadAfterCancel [GOOD] >> DataShardReadIterator::ShouldLimitReadRangeChunk1Limit100 >> BackupRestore::RestoreKesusResources [GOOD] >> BackupRestore::RestoreReplicationWithoutSecret >> TFlatTableExecutor_RejectProbability::MaxedOutRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::SomeRejectProbability |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |90.8%| [LD] {RESULT} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut >> TFlatTableExecutor_RejectProbability::SomeRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbability >> TFlatTableExecutor_RejectProbability::ZeroRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbabilityMultipleTables >> test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] [FAIL] >> DataShardReadIteratorConsistency::LocalSnapshotReadNoUnnecessaryDependencies [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadWithConcurrentWrites >> YdbSdkSessions::TestActiveSessionCountAfterBadSession >> DataShardReadIterator::TryCommitLocksPrepared-Volatile-BreakLocks [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared+Volatile-BreakLocks >> TFlatTableExecutor_RejectProbability::ZeroRejectProbabilityMultipleTables [GOOD] >> TFlatTableExecutor_Reschedule::TestExecuteReschedule [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorSetResourceProfile [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestTxData >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestTxData [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorReuseStaticMemory >> TFlatTableExecutor_ResourceProfile::TestExecutorReuseStaticMemory [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestPages >> KqpRanges::Like [GOOD] >> YdbSdkSessionsPool::WaitQueue/1 >> YdbSdkSessionsPool1Session::FailTest/0 >> YdbSdkSessionsPool::PeriodicTask/0 >> YdbSdkSessionsPool::StressTestSync/1 >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestPages [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorPageLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemory >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] >> YdbSdkSessionsPool1Session::GetSession/0 >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemory [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemoryFollower >> YdbSdkSessionsPool1Session::FailTest/0 [GOOD] >> YdbSdkSessionsPool1Session::CustomPlan/0 >> YdbSdkSessionsPool1Session::RunSmallPlan/0 >> YdbSdkSessionsPool1Session::GetSession/0 [GOOD] >> YdbSdkSessionsPool::StressTestSync/0 >> DataShardReadIteratorSysTables::ShouldNotAllowArrow [GOOD] >> ReadIteratorExternalBlobs::ExtBlobs >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemoryFollower [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorMemoryLimitExceeded >> TFlatTableExecutor_ResourceProfile::TestExecutorMemoryLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData [GOOD] >> YdbSdkSessionsPool1Session::RunSmallPlan/0 [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterBadSession [GOOD] >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] >> YdbSdkSessionsPool::StressTestAsync/0 >> YdbSdkSessions::MultiThreadSync >> TPersQueueTest::SrcIdCompatibility [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpRanges::Like [GOOD] Test command err: Trying to start YDB, gRPC: 19477, MsgBus: 2624 2025-06-24T20:40:51.096683Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618656076564594:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:51.096938Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003237/r3tmp/tmpxNLdm4/pdisk_1.dat 2025-06-24T20:40:52.095300Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:40:52.130827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:52.130944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:52.230242Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:52.230809Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618656076564388:2079] 1750797651064697 != 1750797651064700 2025-06-24T20:40:52.253464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:52.297444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 19477, node 1 2025-06-24T20:40:53.050617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:40:53.050635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:40:53.050647Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:40:53.050789Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2624 TClient is connected to server localhost:2624 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:40:54.454526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:40:54.523817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:54.946628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:55.261970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:55.407699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:40:55.826201Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618673256435225:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:55.826421Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:56.099776Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618656076564594:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:56.099853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:56.683471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:56.725683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:56.786485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:56.834242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:56.876846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:56.942868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:56.991507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:57.147610Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618681846370480:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:57.147712Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:57.148079Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618681846370485:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:57.165973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:40:57.184124Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618681846370487:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:40:57.296568Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618681846370539:3432] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:00.688871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB ... 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519618955138001524:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:42:01.983647Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003237/r3tmp/tmpyG6aH2/pdisk_1.dat 2025-06-24T20:42:02.237693Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:42:02.237820Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:42:02.242580Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:42:02.246405Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:42:02.248615Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519618955138001428:2079] 1750797721907073 != 1750797721907076 TServer::EnableGrpc on GrpcPort 63831, node 7 2025-06-24T20:42:02.505915Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:42:02.505950Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:42:02.505962Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:42:02.506153Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2177 2025-06-24T20:42:02.981149Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2177 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:42:03.455703Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:42:03.468366Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:42:03.494474Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:03.607052Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:04.009545Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:04.346945Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:06.927578Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519618955138001524:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:42:06.938699Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:42:10.419197Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519618993792708768:2375], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:10.419375Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:10.463804Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:10.536034Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:10.614817Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:10.681058Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:10.757390Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:10.879388Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:10.944745Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:11.064217Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519618998087676727:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:11.064354Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:11.065596Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519618998087676732:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:11.072416Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:42:11.098637Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519618998087676734:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:42:11.198734Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519618998087676785:3443] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:42:13.291380Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> YdbSdkSessions::MultiThreadSync [GOOD] >> YdbSdkSessions::SessionsServerLimit [SKIPPED] |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] [FAIL] >> YdbSdkSessionsPool::WaitQueue/0 >> DataShardReadIterator::ShouldReceiveErrorAfterSplitWhenExhausted [GOOD] >> YdbSdkSessionsPool::WaitQueue/1 [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMany [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsLargeBlobs [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2NoRestart [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2 >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestArrow [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestCellVec >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2 [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1 [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1ToSchema2 >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1ToSchema2 [GOOD] >> TGenCompaction::OverloadFactorDuringForceCompaction >> YdbSdkSessionsPool::WaitQueue/0 [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Str [GOOD] >> KqpSqlIn::SecondaryIndex_PgKey >> TGenCompaction::OverloadFactorDuringForceCompaction [GOOD] >> TGenCompaction::ForcedCompactionNoGenerations [GOOD] >> TGenCompaction::ForcedCompactionWithGenerations [GOOD] >> TGenCompaction::ForcedCompactionWithFinalParts [GOOD] >> TGenCompaction::ForcedCompactionByDeletedRows [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccData [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccDataRestart [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccDataBorrowed [GOOD] >> TIterator::Basics >> TIterator::Basics [GOOD] >> TIterator::External >> TIterator::External [GOOD] >> TIterator::Single ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReceiveErrorAfterSplitWhenExhausted [GOOD] Test command err: 2025-06-24T20:40:10.119582Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:40:10.119979Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:40:10.120142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ce6/r3tmp/tmpCwCauT/pdisk_1.dat 2025-06-24T20:40:10.496077Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:40:10.500262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:40:10.556664Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:10.562379Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797606157980 != 1750797606157984 2025-06-24T20:40:10.613698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:10.613890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:10.628764Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:10.724242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:10.782032Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:40:10.787275Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:40:10.787938Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:40:10.788205Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:40:10.876889Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:40:10.877656Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:40:10.877820Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:40:10.879655Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:40:10.879747Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:40:10.879806Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:40:10.880233Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:40:10.880405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:40:10.880496Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:40:10.891296Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:40:10.923641Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:40:10.923865Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:40:10.923974Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:40:10.924009Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:40:10.924047Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:40:10.924084Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:40:10.924371Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:40:10.924422Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:40:10.924724Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:40:10.924822Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:40:10.924880Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:40:10.925187Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:40:10.925236Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:40:10.925289Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:40:10.925323Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:40:10.925364Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:40:10.925422Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:40:10.925588Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:40:10.925627Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:40:10.925669Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:40:10.926102Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:40:10.926147Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:40:10.926274Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:40:10.926490Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:40:10.926556Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:40:10.926664Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:40:10.926723Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:40:10.926759Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:40:10.926793Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:40:10.926824Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:40:10.927081Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:40:10.927111Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:40:10.927162Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:40:10.927194Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:40:10.927265Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:40:10.927296Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:40:10.927356Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:40:10.927400Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:40:10.927433Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:40:10.928902Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:40:10.928955Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:40:10.939801Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:40:10.939879Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:40:10.939914Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:40:10.939961Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... .cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [15:1107:2864] 2025-06-24T20:42:17.577815Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-24T20:42:17.577917Z node 15 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-06-24T20:42:17.577993Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-24T20:42:17.578161Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [15:1032:2812], Recipient [15:1032:2812]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:42:17.578200Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:42:17.578482Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553157, Sender [15:1032:2812], Recipient [15:626:2530]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037891 OperationCookie: 281474976715665 2025-06-24T20:42:17.578569Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037888 Received snapshot Ack from dst 72075186224037891 for split OpId 281474976715665 2025-06-24T20:42:17.579515Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [15:1102:2859], Recipient [15:626:2530]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037891 ClientId: [15:1102:2859] ServerId: [15:1103:2860] } 2025-06-24T20:42:17.579574Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T20:42:17.579752Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-06-24T20:42:17.579791Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:42:17.579826Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037891 2025-06-24T20:42:17.579859Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037891 has no attached operations 2025-06-24T20:42:17.579890Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037891 2025-06-24T20:42:17.579920Z node 15 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-24T20:42:17.579956Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-24T20:42:17.580528Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [15:24:2071], Recipient [15:1032:2812]: {TEvRegisterTabletResult TabletId# 72075186224037891 Entry# 3000} 2025-06-24T20:42:17.580586Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-24T20:42:17.580668Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037891 time 3000 2025-06-24T20:42:17.580743Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-24T20:42:17.580841Z node 15 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037892 ack snapshot OpId 281474976715665 2025-06-24T20:42:17.580961Z node 15 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037892 2025-06-24T20:42:17.581056Z node 15 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037892 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T20:42:17.581139Z node 15 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-24T20:42:17.581194Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037892, actorId: [15:1109:2866] 2025-06-24T20:42:17.581222Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037892 2025-06-24T20:42:17.581262Z node 15 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037892 2025-06-24T20:42:17.581290Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-24T20:42:17.581622Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [15:1103:2860], Recipient [15:1032:2812]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:42:17.581678Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:42:17.581779Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [15:1102:2859], serverId# [15:1103:2860], sessionId# [0:0:0] 2025-06-24T20:42:17.581875Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [15:1037:2814], Recipient [15:1037:2814]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:42:17.581908Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:42:17.582082Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553157, Sender [15:1037:2814], Recipient [15:626:2530]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037892 OperationCookie: 281474976715665 2025-06-24T20:42:17.582135Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037888 Received snapshot Ack from dst 72075186224037892 for split OpId 281474976715665 2025-06-24T20:42:17.582622Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [15:1101:2858], Recipient [15:626:2530]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037892 ClientId: [15:1101:2858] ServerId: [15:1104:2861] } 2025-06-24T20:42:17.582662Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T20:42:17.583002Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [15:24:2071], Recipient [15:1037:2814]: {TEvRegisterTabletResult TabletId# 72075186224037892 Entry# 3000} 2025-06-24T20:42:17.583038Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-24T20:42:17.583078Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 3000 2025-06-24T20:42:17.583114Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-24T20:42:17.583439Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-24T20:42:17.583480Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:42:17.583517Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037892 2025-06-24T20:42:17.583550Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2025-06-24T20:42:17.583581Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037892 2025-06-24T20:42:17.583624Z node 15 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-06-24T20:42:17.583669Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-24T20:42:17.583835Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [15:1104:2861], Recipient [15:1037:2814]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:42:17.583870Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T20:42:17.583908Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [15:1101:2858], serverId# [15:1104:2861], sessionId# [0:0:0] 2025-06-24T20:42:17.584153Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [15:24:2071], Recipient [15:1032:2812]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 3000 ReadStep# 3000 } 2025-06-24T20:42:17.584204Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-24T20:42:17.584301Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037891 coordinator 72057594046316545 last step 0 next step 3000 2025-06-24T20:42:17.584415Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037891: waitStep# 3000 readStep# 3000 observedStep# 3000 2025-06-24T20:42:17.584540Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037891 promoting UnprotectedReadEdge to v3000/18446744073709551615 2025-06-24T20:42:17.584918Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [15:24:2071], Recipient [15:1037:2814]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 3000 ReadStep# 3000 } 2025-06-24T20:42:17.584956Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-24T20:42:17.584989Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 0 next step 3000 2025-06-24T20:42:17.585033Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 3000 readStep# 3000 observedStep# 3000 2025-06-24T20:42:17.585079Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037892 promoting UnprotectedReadEdge to v3000/18446744073709551615 2025-06-24T20:42:17.607311Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037888 ack split to schemeshard 281474976715665 2025-06-24T20:42:17.612013Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553158, Sender [15:372:2366], Recipient [15:632:2533] 2025-06-24T20:42:17.612121Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715665, at datashard: 72075186224037888, state: SplitSrcWaitForPartitioningChanged 2025-06-24T20:42:17.622335Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037888 ack split partitioning changed to schemeshard 281474976715665 2025-06-24T20:42:17.622489Z node 15 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T20:42:17.622673Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [15:617:2524], Recipient [15:626:2530]: NKikimr::TEvTablet::TEvFollowerGcApplied >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeReadReverse [GOOD] >> DataShardReadIterator::ShouldForbidDuplicatedReadId ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:248: Test is failing right now |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::GetSession/0 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::SrcIdCompatibility [GOOD] Test command err: === Start server === Server->StartServer(false); 2025-06-24T20:35:19.865670Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617228631558450:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:19.866609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:19.961498Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519617228620541497:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:19.975708Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004988/r3tmp/tmpfYaeGv/pdisk_1.dat 2025-06-24T20:35:20.150097Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:20.165899Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:20.493011Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:20.518251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:20.518326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:20.519360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:20.519424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:20.529761Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:35:20.529911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:20.532103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22163, node 1 2025-06-24T20:35:20.663650Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004988/r3tmp/yandexjyBx2L.tmp 2025-06-24T20:35:20.663680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004988/r3tmp/yandexjyBx2L.tmp 2025-06-24T20:35:20.663859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004988/r3tmp/yandexjyBx2L.tmp 2025-06-24T20:35:20.664005Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:20.734151Z INFO: TTestServer started on Port 18721 GrpcPort 22163 2025-06-24T20:35:20.905579Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18721 2025-06-24T20:35:20.974974Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; PQClient connected to localhost:22163 === TenantModeEnabled() = 0 === Init PQ - start server on port 22163 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:21.269050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T20:35:21.269240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:21.269470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:35:21.269498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:35:21.270036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:35:21.270143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:21.272638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:35:21.272818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:35:21.273013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:21.273064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:35:21.273081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-24T20:35:21.273095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-24T20:35:21.275270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:21.275317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:35:21.275337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 3 -> 128 2025-06-24T20:35:21.277222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:35:21.277249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-24T20:35:21.277280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:35:21.277789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:21.277808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:21.277832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T20:35:21.277867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-24T20:35:21.282451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:35:21.284410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-24T20:35:21.284579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:35:21.287934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750797321329, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:35:21.288089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750797321329 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T20:35:21.288153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T20:35:21.288471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 128 -> 240 2025-06-24T20:35:21.288510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T20:35:21.288683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T20:35:21.288810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057 ... mpl.cpp:2910: [PQ: 72075186224037910] server disconnected, pipe [29:7519619012792122621:2704] destroyed 2025-06-24T20:42:15.093032Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037910, Partition: 7, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:42:15.129216Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 38, State: StateIdle] no data for compaction 2025-06-24T20:42:15.171858Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037893, Partition: 40, State: StateIdle] no data for compaction 2025-06-24T20:42:15.207713Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 8, State: StateIdle] no data for compaction 2025-06-24T20:42:15.207981Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037907, Partition: 15, State: StateIdle] no data for compaction 2025-06-24T20:42:15.227634Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037895, Partition: 71, State: StateIdle] no data for compaction 2025-06-24T20:42:15.240842Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037909, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T20:42:15.298259Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037898, Partition: 83, State: StateIdle] no data for compaction 2025-06-24T20:42:15.298501Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037893, Partition: 98, State: StateIdle] no data for compaction 2025-06-24T20:42:15.338779Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037898, Partition: 77, State: StateIdle] no data for compaction 2025-06-24T20:42:15.360024Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037893, Partition: 4, State: StateIdle] no data for compaction 2025-06-24T20:42:15.374388Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 12, State: StateIdle] no data for compaction 2025-06-24T20:42:15.374914Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037893, Partition: 74, State: StateIdle] no data for compaction 2025-06-24T20:42:15.382459Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037908, Partition: 20, State: StateIdle] no data for compaction 2025-06-24T20:42:15.389645Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037898, Partition: 91, State: StateIdle] no data for compaction 2025-06-24T20:42:15.393210Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037905, Partition: 49, State: StateIdle] no data for compaction 2025-06-24T20:42:15.402523Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037898, Partition: 9, State: StateIdle] no data for compaction 2025-06-24T20:42:15.440642Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 41, State: StateIdle] no data for compaction 2025-06-24T20:42:15.447537Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037901, Partition: 82, State: StateIdle] no data for compaction 2025-06-24T20:42:15.455551Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037901, Partition: 44, State: StateIdle] no data for compaction 2025-06-24T20:42:15.463081Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 36, State: StateIdle] no data for compaction 2025-06-24T20:42:15.463205Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037910, Partition: 7, State: StateIdle] need more data for compaction. cumulativeSize=177, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:42:15.472430Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037911, Partition: 5, State: StateIdle] need more data for compaction. cumulativeSize=352, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:42:15.472682Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037902, Partition: 86, State: StateIdle] no data for compaction 2025-06-24T20:42:15.473974Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037907, Partition: 23, State: StateIdle] no data for compaction 2025-06-24T20:42:15.474796Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 76, State: StateIdle] no data for compaction 2025-06-24T20:42:15.475481Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037903, Partition: 65, State: StateIdle] no data for compaction 2025-06-24T20:42:15.475643Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037898, Partition: 27, State: StateIdle] no data for compaction 2025-06-24T20:42:15.480983Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037907, Partition: 93, State: StateIdle] no data for compaction 2025-06-24T20:42:15.481182Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037895, Partition: 90, State: StateIdle] no data for compaction 2025-06-24T20:42:15.484097Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037896, Partition: 35, State: StateIdle] no data for compaction 2025-06-24T20:42:15.484802Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037901, Partition: 51, State: StateIdle] no data for compaction 2025-06-24T20:42:15.486660Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037909, Partition: 92, State: StateIdle] no data for compaction 2025-06-24T20:42:15.486845Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037895, Partition: 54, State: StateIdle] no data for compaction 2025-06-24T20:42:15.487026Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037911, Partition: 95, State: StateIdle] no data for compaction 2025-06-24T20:42:15.487192Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037902, Partition: 60, State: StateIdle] no data for compaction 2025-06-24T20:42:15.488641Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037895, Partition: 11, State: StateIdle] no data for compaction 2025-06-24T20:42:15.488816Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037911, Partition: 56, State: StateIdle] no data for compaction 2025-06-24T20:42:15.489651Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 13, State: StateIdle] no data for compaction 2025-06-24T20:42:15.491359Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037905, Partition: 84, State: StateIdle] no data for compaction 2025-06-24T20:42:15.491638Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 64, State: StateIdle] no data for compaction 2025-06-24T20:42:15.492246Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037908, Partition: 24, State: StateIdle] no data for compaction 2025-06-24T20:42:15.492420Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037905, Partition: 50, State: StateIdle] no data for compaction 2025-06-24T20:42:15.492555Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037908, Partition: 22, State: StateIdle] no data for compaction 2025-06-24T20:42:15.502506Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037903, Partition: 43, State: StateIdle] no data for compaction 2025-06-24T20:42:15.502760Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037905, Partition: 14, State: StateIdle] no data for compaction 2025-06-24T20:42:15.502935Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037893, Partition: 26, State: StateIdle] no data for compaction 2025-06-24T20:42:15.504556Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037900, Partition: 97, State: StateIdle] no data for compaction 2025-06-24T20:42:15.506059Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037904, Partition: 81, State: StateIdle] no data for compaction 2025-06-24T20:42:15.505837Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037903, Partition: 63, State: StateIdle] no data for compaction 2025-06-24T20:42:15.506010Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037910, Partition: 67, State: StateIdle] no data for compaction 2025-06-24T20:42:15.506194Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037901, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T20:42:15.506369Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037901, Partition: 58, State: StateIdle] no data for compaction 2025-06-24T20:42:15.507058Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037902, Partition: 87, State: StateIdle] no data for compaction 2025-06-24T20:42:15.507838Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037908, Partition: 45, State: StateIdle] no data for compaction 2025-06-24T20:42:15.508027Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037905, Partition: 25, State: StateIdle] no data for compaction 2025-06-24T20:42:15.515632Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037896, Partition: 42, State: StateIdle] no data for compaction 2025-06-24T20:42:15.516907Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 34, State: StateIdle] no data for compaction 2025-06-24T20:42:15.517113Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037911, Partition: 88, State: StateIdle] no data for compaction 2025-06-24T20:42:15.517695Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 16, State: StateIdle] no data for compaction 2025-06-24T20:42:15.545666Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037910, Partition: 28, State: StateIdle] no data for compaction 2025-06-24T20:42:15.545881Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 39, State: StateIdle] no data for compaction 2025-06-24T20:42:15.546041Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037896, Partition: 10, State: StateIdle] no data for compaction 2025-06-24T20:42:15.546222Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037908, Partition: 46, State: StateIdle] no data for compaction 2025-06-24T20:42:15.547240Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037910, Partition: 6, State: StateIdle] need more data for compaction. cumulativeSize=354, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:42:15.547442Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037896, Partition: 55, State: StateIdle] no data for compaction 2025-06-24T20:42:15.547653Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037903, Partition: 61, State: StateIdle] no data for compaction 2025-06-24T20:42:15.547829Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037910, Partition: 3, State: StateIdle] no data for compaction 2025-06-24T20:42:15.547995Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 89, State: StateIdle] no data for compaction 2025-06-24T20:42:15.548406Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037903, Partition: 79, State: StateIdle] no data for compaction 2025-06-24T20:42:15.569450Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037896, Partition: 62, State: StateIdle] no data for compaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:41:11.220633Z 00000.127 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.138 II| FAKE_ENV: Starting storage for BS group 0 00000.138 II| FAKE_ENV: Starting storage for BS group 1 00000.138 II| FAKE_ENV: Starting storage for BS group 2 00000.138 II| FAKE_ENV: Starting storage for BS group 3 00000.512 C1| TABLET_EXECUTOR: Tablet 1 unhandled exception std::runtime_error: test ??+0 (0x11A91301) __cxa_throw+221 (0x11A9112D) NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Exceptions::TTxExecuteThrowException::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&)+62 (0x10D7BD6E) NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*)+3349 (0x17DAAAB5) NKikimr::NTabletFlatExecutor::TExecutor::StateWork(TAutoPtr&)+504 (0x17D6EE88) NActors::IActor::Receive(TAutoPtr&)+237 (0x13291CBD) 00000.515 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.515 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.517 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.517 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {62b, 2} 00000.517 II| FAKE_ENV: DS.1 gone, left {35b, 1}, put {35b, 1} 00000.517 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.517 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.517 II| FAKE_ENV: All BS storage groups are stopped 00000.517 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.517 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 1 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:41:11.744105Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.015 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.015 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.015 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.015 II| FAKE_ENV: DS.0 gone, left {111b, 2}, put {131b, 3} 00000.015 II| FAKE_ENV: DS.1 gone, left {42b, 2}, put {42b, 2} 00000.015 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.015 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.015 II| FAKE_ENV: All BS storage groups are stopped 00000.015 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.015 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:41:11.766256Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.007 II| FAKE_ENV: Starting storage for BS group 0 00000.007 II| FAKE_ENV: Starting storage for BS group 1 00000.007 II| FAKE_ENV: Starting storage for BS group 2 00000.007 II| FAKE_ENV: Starting storage for BS group 3 00000.034 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 4 actors 00000.035 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.035 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.035 II| FAKE_ENV: DS.0 gone, left {561b, 14}, put {623b, 16} 00000.035 II| FAKE_ENV: DS.1 gone, left {693b, 8}, put {693b, 8} 00000.035 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.035 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.036 II| FAKE_ENV: All BS storage groups are stopped 00000.036 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.036 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:41:11.807564Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.021 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 4 actors 00000.023 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.023 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.023 II| FAKE_ENV: DS.0 gone, left {141b, 4}, put {669b, 13} 00000.023 II| FAKE_ENV: DS.1 gone, left {868b, 8}, put {987b, 10} 00000.023 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.023 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.023 II| FAKE_ENV: All BS storage groups are stopped 00000.023 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 0.000s 00000.023 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:41:11.839648Z 00000.009 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.010 II| FAKE_ENV: Starting storage for BS group 0 00000.010 II| FAKE_ENV: Starting storage for BS group 1 00000.010 II| FAKE_ENV: Starting storage for BS group 2 00000.010 II| FAKE_ENV: Starting storage for BS group 3 00000.012 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.012 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 ... initializing schema 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} release 4194304b of static, Memory{0 dyn 0} 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 ... inserting rows 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} hope 1 -> done Change{2, redo 512b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} release 4194304b of static, Memory{0 dyn 0} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 ... starting follower ... waiting for follower attach ... blocking NKikimr::TEvTablet::TEvNewFollowerAttached from TABLET_ACTOR to NKikimr::NTabletFlatExecutor::TTestFlatTablet cookie 0 ... waiting for follower attach (done) ... spamming QueueScan transactions 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.024 II| TABLET_EXECUTOR: Leader{1:2:5} starting Scan{2 on 101, TEmptyScan{}} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 8 for step 4 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.029 II| TABLET_EXECUTOR: Leader{1:2:6} starting Scan{4 on 101, TEmptyScan{}} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 8 for step 5 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.030 II| TABLET_EXECUTOR: Leader{1:2:7} starting Scan{6 on 101, TEmptyScan{}} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 8 for step 6 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.031 II| TABLET_EXECUTOR: Leader{1:2:8} starting Scan{8 on 101, TEmptyScan{}} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 8 for step 7 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.032 II| TABLET_EXECUTOR: Leader{1:2:9} starting Scan{10 on 101, TEmptyScan{}} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.032 DD| TABLET_ ... ange{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.047 DD| TABLET_EXECUTOR: Leader{1:2:4} found attached Res{10 20480b} 00000.047 DD| TABLET_EXECUTOR: release 10240b of static tx data due to attached res 10, Memory{0 dyn 20480} 00000.047 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 524267520b requested for data (524288000b in total) 00000.047 EE| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} mem 524288000b terminated, limit 314572800b is exceeded 00000.047 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{10 20480b}, Memory{0 dyn 0} 00000.047 DD| RESOURCE_BROKER: Update cookie for task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062]) 00000.047 DD| RESOURCE_BROKER: Finish task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062]) (release resources {0, 20480}) 00000.048 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001311 to 0.000000 (remove task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062])) 00000.048 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.048 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.048 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.048 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 19456b requested for data (20480b in total) 00000.048 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.048 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.048 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{11 20480b} type small_transaction 00000.049 DD| RESOURCE_BROKER: Submitted new unknown task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) priority=5 resources={0, 20480} 00000.049 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.049 DD| RESOURCE_BROKER: Allocate resources {0, 20480} for task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) from queue queue_default 00000.049 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.049 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 0.001192 (insert task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.049 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{11 20480b}, Memory{0 dyn 20480} 00000.049 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.049 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} update resource task 11 releasing 0b, Memory{0 dyn 20480} 00000.049 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} captured Res{11 20480b} 00000.049 DD| RESOURCE_BROKER: Update task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) (priority=5 type=small_transaction resources={0, 20480} resubmit=0) 00000.049 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.049 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 0.001192 (insert task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.049 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 20480} 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 19456b requested for data (20480b in total) 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 20480} 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{12 20480b} type small_transaction 00000.050 DD| RESOURCE_BROKER: Submitted new unknown task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) priority=5 resources={0, 20480} 00000.050 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.050 DD| RESOURCE_BROKER: Allocate resources {0, 20480} for task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) from queue queue_default 00000.050 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.050 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001192 to 0.002384 (insert task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{12 20480b}, Memory{0 dyn 40960} 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} found attached Res{11 20480b} 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} moving tx data from attached Res{11 20480b} to Res{12 ...} 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 524267520b requested for data (524288000b in total) 00000.050 EE| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} mem 524288000b terminated, limit 314572800b is exceeded 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{12 40960b}, Memory{0 dyn 0} 00000.050 DD| RESOURCE_BROKER: Update task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) (priority=5 type=small_transaction resources={0, 40960} resubmit=0) 00000.050 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.051 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001192 to 0.003576 (insert task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.051 DD| RESOURCE_BROKER: Finish task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) (release resources {0, 20480}) 00000.051 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.003576 to 0.002384 (remove task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.051 DD| RESOURCE_BROKER: Finish task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) (release resources {0, 40960}) 00000.051 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.002384 to 0.000000 (remove task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.051 II| TABLET_EXECUTOR: Leader{1:2:4} suiciding, Waste{2:0, 317b +(0, 0b), 3 trc, -0b acc} 00000.053 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.053 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.053 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.053 II| FAKE_ENV: DS.0 gone, left {180b, 3}, put {200b, 4} 00000.053 II| FAKE_ENV: DS.1 gone, left {352b, 3}, put {352b, 3} 00000.053 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.053 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.053 II| FAKE_ENV: All BS storage groups are stopped 00000.053 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.054 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 45 Left 401}, stopped >> DataShardReadIterator::ShouldLimitReadRangeChunk1Limit100 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit98 >> DataShardReadIterator::ShouldReadKeyValueColumnAndSomeKeyColumn [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeys |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::FailTest/0 [GOOD] >> YdbSdkSessions::TestSessionPool |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::RunSmallPlan/0 [GOOD] >> TIterator::Single [GOOD] >> TIterator::SingleReverse ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:588: Enable after accepting a pull request with merging configs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::WriteAndReadMany [GOOD] Test command err: 2025-06-24T20:38:06.425095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:38:06.425167Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:06.447041Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:38:06.472341Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:38:06.472951Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T20:38:06.473247Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:38:06.527137Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:38:06.546082Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:38:06.546321Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:38:06.548324Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T20:38:06.548418Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T20:38:06.548490Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T20:38:06.548879Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:38:06.548969Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:38:06.549038Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T20:38:06.680154Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:38:06.756763Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T20:38:06.757046Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:38:06.757183Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T20:38:06.757257Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T20:38:06.757313Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T20:38:06.757364Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:38:06.757557Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:06.757610Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:06.757947Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T20:38:06.758052Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T20:38:06.758235Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T20:38:06.758286Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:38:06.758342Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T20:38:06.758388Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T20:38:06.758428Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T20:38:06.758476Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T20:38:06.758529Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T20:38:06.758642Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:06.758707Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:06.758772Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T20:38:06.762763Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T20:38:06.762863Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:38:06.762981Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:38:06.763512Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T20:38:06.763581Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T20:38:06.763650Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T20:38:06.763722Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T20:38:06.763776Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T20:38:06.763815Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T20:38:06.763871Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T20:38:06.764250Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T20:38:06.764293Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T20:38:06.764332Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T20:38:06.764389Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T20:38:06.764442Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T20:38:06.764489Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T20:38:06.764544Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T20:38:06.764601Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T20:38:06.764629Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T20:38:06.782609Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T20:38:06.782720Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T20:38:06.782770Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T20:38:06.782816Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T20:38:06.782924Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T20:38:06.784171Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:06.784239Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:38:06.784295Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T20:38:06.784463Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T20:38:06.784506Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T20:38:06.784694Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T20:38:06.784746Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T20:38:06.784791Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T20:38:06.784847Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T20:38:06.792383Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T20:38:06.792493Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:38:06.792840Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:06.792904Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:38:06.792974Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T20:38:06.793022Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:38:06.793059Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T20:38:06.793123Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T20:38:06.793170Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 02?\022\002\205\000\034MyReads MyWrites\205\004\205\002?\022\002\206\202\024Reply\024Write?\030\205\002\206\203\010\002 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\024)\211\026?\022\203\005\004\200\205\006\203\004\203\004\203\004\006\n\016\213\004\203\004\207\203\001H\213\002\203\004\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?* h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000?\014\005?2\003?,D\003?.F\003?0p\007\013?:\003?4\000\'?8\003\013?>\003?<\003j\030\001\003?@\000\003?B\000\003?D\007\240%&\003?F\000\006\004?J\003\203\014\000\003\203\014\000\003\003?L\000\377\007\002\000\005?\032\005?\026?x\000\005?\030\003\005? \005?\034?x\000\006 2025-06-24T20:42:08.027980Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:42:08.028061Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:42:08.028818Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit CheckDataTx 2025-06-24T20:42:08.072914Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-24T20:42:08.073025Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit CheckDataTx 2025-06-24T20:42:08.073067Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-24T20:42:08.073106Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit BuildAndWaitDependencies 2025-06-24T20:42:08.073174Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-24T20:42:08.073267Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:11] at 9437184 2025-06-24T20:42:08.073313Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-24T20:42:08.073342Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-24T20:42:08.073369Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit ExecuteDataTx 2025-06-24T20:42:08.073399Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-24T20:42:08.079051Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-24T20:42:08.080572Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-24T20:42:08.080664Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-24T20:42:08.137732Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:42:08.137817Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-24T20:42:08.138685Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-24T20:42:08.149628Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-24T20:42:08.149856Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-24T20:42:08.149924Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-24T20:42:08.472906Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:42:08.472990Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-24T20:42:08.478507Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-24T20:42:08.538267Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:11] at 9437184 exceeded memory limit 4194304 and requests 33554432 more for the next try 2025-06-24T20:42:08.538662Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-24T20:42:08.538722Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-24T20:42:08.539549Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:42:08.539608Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-24T20:42:08.540408Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-24T20:42:08.958471Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-24T20:42:08.960450Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-24T20:42:08.960541Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-24T20:42:09.361969Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:42:09.362059Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-24T20:42:09.362909Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-24T20:42:09.839440Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:11] at 9437184 exceeded memory limit 37748736 and requests 301989888 more for the next try 2025-06-24T20:42:09.841791Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-24T20:42:09.841895Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-24T20:42:10.107785Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:42:10.107883Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-24T20:42:10.108707Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-24T20:42:10.113830Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-24T20:42:10.114051Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-24T20:42:10.114110Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-24T20:42:10.154474Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:42:10.154560Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-24T20:42:10.155449Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-24T20:42:10.157440Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-24T20:42:10.157641Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-24T20:42:10.157702Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-24T20:42:10.257925Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:42:10.258006Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-24T20:42:10.258890Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-24T20:42:10.276614Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-24T20:42:10.276864Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-24T20:42:10.276923Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-24T20:42:10.857968Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:42:10.858056Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-24T20:42:10.858940Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-24T20:42:12.657277Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:11] at tablet 9437184 with status COMPLETE 2025-06-24T20:42:12.657414Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:11] at 9437184: {NSelectRow: 0, NSelectRange: 1, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 129871, SelectRangeBytes: 40000268, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T20:42:12.657500Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-24T20:42:12.657545Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit ExecuteDataTx 2025-06-24T20:42:12.657588Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit FinishPropose 2025-06-24T20:42:12.657628Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit FinishPropose 2025-06-24T20:42:12.657681Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 11 at tablet 9437184 send to client, exec latency: 62 ms, propose latency: 62 ms, status: COMPLETE 2025-06-24T20:42:12.657802Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is DelayComplete 2025-06-24T20:42:12.657836Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit FinishPropose 2025-06-24T20:42:12.657868Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit CompletedOperations 2025-06-24T20:42:12.657902Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit CompletedOperations 2025-06-24T20:42:12.657954Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-24T20:42:12.657980Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit CompletedOperations 2025-06-24T20:42:12.658011Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:11] at 9437184 has finished 2025-06-24T20:42:12.699231Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T20:42:12.699318Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:11] at 9437184 on unit FinishPropose 2025-06-24T20:42:12.699387Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimit [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:548: Enable after accepting a pull request with merging configs |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::WaitQueue/1 [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared+Volatile-BreakLocks [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared-Volatile+BreakLocks >> DataShardReadIteratorConsistency::LocalSnapshotReadWithConcurrentWrites [GOOD] >> DataShardReadIteratorConsistency::Bug_7674_IteratorDuplicateRows >> KqpSqlIn::Dict [GOOD] >> KqpSqlIn::Delete >> TIterator::SingleReverse [GOOD] >> TIterator::Mixed >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder-EvWrite [GOOD] |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |90.8%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit+UseSink >> YdbSdkSessions::TestSessionPool [GOOD] |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::WaitQueue/0 [GOOD] >> BackupRestore::RestoreReplicationWithoutSecret [FAIL] >> BackupRestore::RestoreExternalDataSourceWithoutSecret >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestCellVec [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestArrow ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite [GOOD] Test command err: 2025-06-24T20:40:09.732119Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:40:09.732583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:40:09.732816Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002cc4/r3tmp/tmpo8OnWK/pdisk_1.dat 2025-06-24T20:40:10.227257Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:40:10.235297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:40:10.306363Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:10.333108Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797605741387 != 1750797605741391 2025-06-24T20:40:10.384584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:10.384767Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:10.396991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:10.522198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:10.675529Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:40:10.676875Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:40:10.677399Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:40:10.677669Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:40:10.745342Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:40:10.746212Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:40:10.746377Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:40:10.748418Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:40:10.748535Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:40:10.748602Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:40:10.749042Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:40:10.749214Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:40:10.749312Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:40:10.763862Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:40:10.798654Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:40:10.798893Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:40:10.799026Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:40:10.799075Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:40:10.799116Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:40:10.799209Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:40:10.799531Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:40:10.799582Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:40:10.799946Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:40:10.800056Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:40:10.800120Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:40:10.800190Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:40:10.800264Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:40:10.800306Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:40:10.800342Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:40:10.800391Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:40:10.800460Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:40:10.800617Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:40:10.800664Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:40:10.800719Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:40:10.801203Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:40:10.801252Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:40:10.801367Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:40:10.801633Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:40:10.801721Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:40:10.801842Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:40:10.801901Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:40:10.801945Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:40:10.801999Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:40:10.802038Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:40:10.802389Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:40:10.802437Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:40:10.802478Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:40:10.802522Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:40:10.802574Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:40:10.802623Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:40:10.802662Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:40:10.802697Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:40:10.802742Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:40:10.804378Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:40:10.804442Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:40:10.815205Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:40:10.815295Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:40:10.815334Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:40:10.815396Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715666 keys extracted: 0 2025-06-24T20:42:22.933357Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-24T20:42:22.933386Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit LoadTxDetails 2025-06-24T20:42:22.933416Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-24T20:42:22.933447Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-24T20:42:22.933492Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:281474976715666] is the new logically complete end at 72075186224037889 2025-06-24T20:42:22.933547Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:281474976715666] is the new logically incomplete end at 72075186224037889 2025-06-24T20:42:22.933592Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:281474976715666] at 72075186224037889 2025-06-24T20:42:22.933645Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-24T20:42:22.933677Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-24T20:42:22.933705Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-24T20:42:22.933734Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-24T20:42:22.933853Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-24T20:42:22.933884Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-24T20:42:22.933929Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-24T20:42:22.933976Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-24T20:42:22.934004Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-24T20:42:22.934030Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-24T20:42:22.934055Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompleteOperation 2025-06-24T20:42:22.934085Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-24T20:42:22.934243Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is DelayComplete 2025-06-24T20:42:22.934277Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompleteOperation 2025-06-24T20:42:22.934330Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T20:42:22.934375Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompletedOperations 2025-06-24T20:42:22.934414Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-24T20:42:22.934441Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T20:42:22.934473Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715666] at 72075186224037889 has finished 2025-06-24T20:42:22.934517Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:42:22.934562Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-24T20:42:22.934615Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-24T20:42:22.934659Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-24T20:42:22.948523Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-24T20:42:22.948689Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:42:22.948775Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037888 on unit CompleteOperation 2025-06-24T20:42:22.948891Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1031:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:42:22.948992Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:42:22.949135Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-24T20:42:22.949167Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T20:42:22.949188Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-24T20:42:22.949217Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [15:1031:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:42:22.949240Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:42:22.951417Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [15:553:2479], Recipient [15:626:2530]: NKikimrTxDataShard.TEvRead ReadId: 10 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-24T20:42:22.951728Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T20:42:22.951879Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-24T20:42:22.952057Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:42:22.952150Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-24T20:42:22.952231Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:42:22.952305Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:42:22.952366Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-24T20:42:22.952441Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:42:22.952479Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:42:22.952513Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T20:42:22.952547Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:42:22.952763Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 10 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW } 2025-06-24T20:42:22.953329Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 1011121314, counter# 18446744073709551615 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T20:42:22.953442Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/281474976715666 2025-06-24T20:42:22.953539Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:553:2479], 10} after executionsCount# 1 2025-06-24T20:42:22.953649Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:553:2479], 10} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:42:22.953991Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[15:553:2479], 10} finished in read 2025-06-24T20:42:22.954120Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:42:22.954154Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T20:42:22.954184Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:42:22.954218Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:42:22.954291Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:42:22.954324Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:42:22.954378Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-24T20:42:22.954953Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T20:42:22.955252Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder-EvWrite [GOOD] Test command err: 2025-06-24T20:40:08.379254Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:40:08.379672Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:40:08.379878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002cf4/r3tmp/tmpyw1gTm/pdisk_1.dat 2025-06-24T20:40:08.932948Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:40:08.937960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:40:09.055836Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:40:09.066277Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797604867375 != 1750797604867379 2025-06-24T20:40:09.120519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:40:09.120724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:40:09.136828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:40:09.226992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:40:09.271732Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:40:09.273138Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:40:09.273743Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:40:09.274092Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:40:09.324033Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:40:09.324939Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:40:09.325096Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:40:09.327451Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:40:09.327593Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:40:09.327710Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:40:09.328715Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:40:09.328959Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:40:09.329085Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:40:09.340203Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:40:09.454481Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:40:09.454872Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:40:09.455132Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:40:09.455200Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:40:09.455245Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:40:09.455290Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:40:09.455633Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:40:09.455692Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:40:09.456111Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:40:09.456233Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:40:09.456301Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:40:09.456370Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:40:09.456432Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:40:09.456471Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:40:09.456508Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:40:09.456543Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:40:09.456593Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:40:09.456721Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:40:09.456789Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:40:09.456858Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:40:09.463320Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:40:09.463419Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:40:09.463628Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:40:09.463952Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:40:09.464043Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:40:09.464195Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:40:09.464265Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:40:09.464329Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:40:09.464377Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:40:09.464413Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:40:09.464737Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:40:09.464783Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:40:09.464817Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:40:09.464859Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:40:09.464939Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:40:09.464978Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:40:09.465014Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:40:09.465053Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:40:09.465101Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:40:09.466896Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:40:09.466970Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:40:09.479929Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:40:09.480034Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:40:09.480076Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:40:09.480142Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... hard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715666 keys extracted: 0 2025-06-24T20:42:23.116259Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-24T20:42:23.116287Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit LoadTxDetails 2025-06-24T20:42:23.116329Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-24T20:42:23.116358Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-24T20:42:23.116393Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:281474976715666] is the new logically complete end at 72075186224037889 2025-06-24T20:42:23.116440Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:281474976715666] is the new logically incomplete end at 72075186224037889 2025-06-24T20:42:23.116490Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:281474976715666] at 72075186224037889 2025-06-24T20:42:23.116544Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-24T20:42:23.116567Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-24T20:42:23.116592Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-24T20:42:23.116615Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-24T20:42:23.116713Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-24T20:42:23.116736Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-24T20:42:23.116772Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-24T20:42:23.116813Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-24T20:42:23.116837Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-24T20:42:23.116857Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-24T20:42:23.116879Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompleteOperation 2025-06-24T20:42:23.116902Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-24T20:42:23.117023Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is DelayComplete 2025-06-24T20:42:23.117048Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompleteOperation 2025-06-24T20:42:23.117083Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T20:42:23.117122Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompletedOperations 2025-06-24T20:42:23.117151Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-24T20:42:23.117172Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T20:42:23.117194Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715666] at 72075186224037889 has finished 2025-06-24T20:42:23.117232Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:42:23.117268Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-24T20:42:23.117306Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-24T20:42:23.117345Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-24T20:42:23.128793Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-24T20:42:23.128983Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:42:23.129083Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037888 on unit CompleteOperation 2025-06-24T20:42:23.129217Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1031:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:42:23.129338Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:42:23.129554Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-24T20:42:23.129606Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T20:42:23.129638Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-24T20:42:23.129685Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [15:1031:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:42:23.129728Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:42:23.134228Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [15:553:2479], Recipient [15:626:2530]: NKikimrTxDataShard.TEvRead ReadId: 3 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-24T20:42:23.134517Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T20:42:23.134666Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-24T20:42:23.134852Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:42:23.134945Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-24T20:42:23.135039Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:42:23.135122Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:42:23.135203Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-24T20:42:23.135283Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:42:23.135322Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:42:23.135353Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T20:42:23.135386Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:42:23.135582Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 3 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW } 2025-06-24T20:42:23.136168Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 1011121314, counter# 18446744073709551615 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T20:42:23.136283Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/281474976715666 2025-06-24T20:42:23.136396Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:553:2479], 3} after executionsCount# 1 2025-06-24T20:42:23.136497Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:553:2479], 3} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:42:23.136806Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[15:553:2479], 3} finished in read 2025-06-24T20:42:23.136949Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:42:23.136984Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T20:42:23.137020Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:42:23.137054Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:42:23.137118Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T20:42:23.137144Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:42:23.137193Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-24T20:42:23.137275Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T20:42:23.137534Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> YdbSdkSessions::TestMultipleSessions >> AutoConfig::GetASPoolsWith3CPUs [GOOD] >> YdbSdkSessionsPool::StressTestSync/1 [FAIL] >> YdbSdkSessions::TestMultipleSessions [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterTransportError |90.8%| [TA] $(B)/ydb/core/mind/address_classification/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbSdkSessionsPool1Session::CustomPlan/0 [GOOD] >> TxUsage::WriteToTopic_Demo_47_Query [GOOD] >> YdbSdkSessionsPool::StressTestSync/0 [GOOD] |90.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith3CPUs [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeys [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeysOneByOne >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit98 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit99 >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0_UNIQUE_SYNC-pk_types20-all_types20-index20-Uint32-UNIQUE-SYNC] >> DataShardReadIterator::ShouldForbidDuplicatedReadId [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1000 |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |90.8%| [TA] {RESULT} $(B)/ydb/core/mind/address_classification/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |90.9%| [TA] $(B)/ydb/core/driver_lib/run/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__SYNC-pk_types6-all_types6-index6-Timestamp--SYNC] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__ASYNC-pk_types13-all_types13-index13-DyNumber--ASYNC] |90.9%| [TA] {RESULT} $(B)/ydb/core/driver_lib/run/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSessionPool [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__SYNC-pk_types21-all_types21-index21-Uint32--SYNC] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__ASYNC-pk_types22-all_types22-index22-Uint32--ASYNC] >> TxUsage::WriteToTopic_Demo_48_Table >> DataShardReadIteratorConsistency::Bug_7674_IteratorDuplicateRows [GOOD] >> DataShardReadIteratorConsistency::LeaseConfirmationNotOutOfOrder >> TBtreeIndexBuilder::NoNodes >> TBtreeIndexBuilder::NoNodes [GOOD] >> TBtreeIndexBuilder::OneNode [GOOD] >> TBtreeIndexBuilder::FewNodes [GOOD] >> TBtreeIndexBuilder::SplitBySize [GOOD] >> TBtreeIndexNode::TIsNullBitmap [GOOD] >> TBtreeIndexNode::CompareTo [GOOD] >> TBtreeIndexNode::Basics [GOOD] >> TBtreeIndexNode::Group [GOOD] >> TBtreeIndexNode::History [GOOD] >> TBtreeIndexNode::OneKey [GOOD] >> TBtreeIndexNode::Reusable [GOOD] >> TBtreeIndexNode::CutKeys [GOOD] >> TBtreeIndexTPart::Conf [GOOD] >> TBtreeIndexTPart::NoNodes [GOOD] >> TBtreeIndexTPart::OneNode [GOOD] >> TBtreeIndexTPart::FewNodes [GOOD] >> TBtreeIndexTPart::Erases [GOOD] >> TBtreeIndexTPart::Groups >> TBtreeIndexTPart::Groups [GOOD] >> TBtreeIndexTPart::History >> TBtreeIndexTPart::History [GOOD] >> TBtreeIndexTPart::External >> DataShardReadIterator::TryCommitLocksPrepared-Volatile+BreakLocks [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared+Volatile+BreakLocks >> TBtreeIndexTPart::External [GOOD] >> TChargeBTreeIndex::NoNodes >> TChargeBTreeIndex::NoNodes [GOOD] >> TChargeBTreeIndex::NoNodes_Groups >> test_ttl.py::TestTTL::test_ttl[table_Date_0__ASYNC-pk_types31-all_types31-index31-Date--ASYNC] >> Viewer::JsonStorageListingV2PDiskIdFilter [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__SYNC-pk_types15-all_types15-index15-DyNumber--SYNC] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::CustomPlan/0 [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestSync/0 [GOOD] >> TIterator::Mixed [GOOD] >> TIterator::MixedReverse >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__ASYNC-pk_types1-all_types1-index1-Datetime--ASYNC] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestSync/1 [FAIL] Test command err: ydb/public/sdk/cpp/tests/integration/sessions_pool/main.cpp:269: Expected equality of these values: Client->GetCurrentPoolSize() Which is: 0 activeSessionsLimit Which is: 10 >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestArrow [GOOD] >> DataShardReadIterator::ShouldReadNonExistingKey >> TChargeBTreeIndex::NoNodes_Groups [GOOD] >> TChargeBTreeIndex::NoNodes_History >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit+UseSink [GOOD] >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit-UseSink >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] >> TChargeBTreeIndex::NoNodes_History [GOOD] >> TChargeBTreeIndex::NoNodes_Groups_History >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__SYNC-pk_types9-all_types9-index9-Timestamp--SYNC] >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool >> KqpSqlIn::SecondaryIndex_PgKey [GOOD] >> KqpSqlIn::SecondaryIndex_SimpleKey >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__SYNC-pk_types12-all_types12-index12-DyNumber--SYNC] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit99 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit100 >> DataShardReadIterator::ShouldReadMultipleKeysOneByOne [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix1 >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1000 [GOOD] >> DataShardReadIterator::ShouldFailUknownColumns >> TPersQueueTest::TestReadRuleServiceTypePassword [GOOD] >> TPersQueueTest::TestReadPartitionStatus >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient >> YdbSdkSessionsPool::PeriodicTask/0 [GOOD] >> YdbSdkSessionsPool::PeriodicTask/1 >> TChargeBTreeIndex::NoNodes_Groups_History [GOOD] >> TChargeBTreeIndex::OneNode >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1_UNIQUE_SYNC-pk_types11-all_types11-index11-Timestamp-UNIQUE-SYNC] >> TChargeBTreeIndex::OneNode [GOOD] >> TChargeBTreeIndex::OneNode_Groups ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:200: Test is failing right now >> KqpSqlIn::Delete [GOOD] >> KqpSqlIn::InWithCast ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::DisableDeduplication [GOOD] Test command err: === Server->StartServer(false); 2025-06-24T20:35:37.154426Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617304777953619:2178];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:37.154775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004950/r3tmp/tmpB2037S/pdisk_1.dat 2025-06-24T20:35:37.465902Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:37.707554Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:37.709874Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617304777953479:2079] 1750797337112694 != 1750797337112697 2025-06-24T20:35:37.718306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:37.718420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:37.724777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21819, node 1 2025-06-24T20:35:37.954091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004950/r3tmp/yandexijNByN.tmp 2025-06-24T20:35:37.954115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004950/r3tmp/yandexijNByN.tmp 2025-06-24T20:35:37.954475Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004950/r3tmp/yandexijNByN.tmp 2025-06-24T20:35:37.954584Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:38.011512Z INFO: TTestServer started on Port 63941 GrpcPort 21819 2025-06-24T20:35:38.164141Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63941 PQClient connected to localhost:21819 === TenantModeEnabled() = 0 === Init PQ - start server on port 21819 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:38.466990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T20:35:38.467221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:38.467424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:35:38.467466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:35:38.467808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:35:38.467854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:38.468461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:35:38.471351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:35:38.471559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:38.471640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:35:38.471657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-24T20:35:38.471716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 2 -> 3 2025-06-24T20:35:38.472580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:38.472625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:35:38.472640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 3 -> 128 2025-06-24T20:35:38.472998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:38.473012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:38.473027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T20:35:38.473059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 waiting... 2025-06-24T20:35:38.477600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:35:38.477923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:35:38.477938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-24T20:35:38.477965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:35:38.478191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-24T20:35:38.478285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:35:38.479968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750797338528, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:35:38.480088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750797338528 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T20:35:38.480115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T20:35:38.480357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 128 -> 240 2025-06-24T20:35:38.480383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T20:35:38.480534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T20:35:38.480617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-24T20:35:38.481308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T20:35:38.481320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-24T20:35:38.481475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T20:35:38.481505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519617304777953968:2241], at schemeshard: 72057594046644480, txId: 281474976710657, path id: 1 2025-06-24T20:35:38.481545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T20:35 ... )>, grpc_core::DebugLocation const&) /-S/contrib/libs/grpc/src/core/lib/gprpp/work_serializer.cc:108:5 #38 0x1b2d9c28 in grpc_core::WorkSerializer::Run(std::__y1::function, grpc_core::DebugLocation const&) /-S/contrib/libs/grpc/src/core/lib/gprpp/work_serializer.cc:237:10 #39 0x1b2cf85c in grpc_core::PollingResolver::OnRequestComplete(grpc_core::Resolver::Result) /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/resolver/polling_resolver.cc:148:21 #40 0x1b2551e4 in grpc_core::(anonymous namespace)::AresClientChannelDNSResolver::AresRequestWrapper::OnHostnameResolved(void*, y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc:340:22 #41 0x1affb76e in exec_ctx_run /-S/contrib/libs/grpc/src/core/lib/iomgr/exec_ctx.cc:45:3 #42 0x1affb76e in grpc_core::ExecCtx::Flush() /-S/contrib/libs/grpc/src/core/lib/iomgr/exec_ctx.cc:72:9 #43 0x1b04fed7 in ~ExecCtx /-S/contrib/libs/grpc/src/core/lib/iomgr/exec_ctx.h:117:5 #44 0x1b04fed7 in grpc_call_start_batch /-S/contrib/libs/grpc/src/core/lib/surface/call.cc:3543:3 #45 0x1c225efa in grpc::internal::CallOpSet, grpc::internal::CallNoOp<3>, grpc::internal::CallNoOp<4>, grpc::internal::CallNoOp<5>, grpc::internal::CallNoOp<6>>::ContinueFillOpsAfterInterception() /-S/contrib/libs/grpc/include/grpcpp/impl/call_op_set.h:974:9 #46 0x1c239f4c in PerformOps /-S/contrib/libs/grpc/include/grpcpp/impl/call.h:67:17 #47 0x1c239f4c in grpc::ClientReaderWriter::ClientReaderWriter(grpc::ChannelInterface*, grpc::internal::RpcMethod const&, grpc::ClientContext*) /-S/contrib/libs/grpc/include/grpcpp/support/sync_stream.h:565:13 #48 0x1c2121aa in Create /-S/contrib/libs/grpc/include/grpcpp/support/sync_stream.h:439:16 #49 0x1c2121aa in Ydb::Topic::V1::TopicService::Stub::StreamReadRaw(grpc::ClientContext*) /-B/ydb/public/api/grpc/ydb_topic_v1.grpc.pb.cc:78:10 #50 0x195cd20a in StreamRead /-B/ydb/public/api/grpc/ydb_topic_v1.grpc.pb.h:395:148 #51 0x195cd20a in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TDirectReadTestSetup::InitControlSession(TBasicString> const&, unsigned long) /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:836:35 #52 0x1960bd6d in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TTestCaseDirectReadCleanCache::Execute_(NUnitTest::TTestContext&) /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:1559:15 #53 0x198c1037 in operator() /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1 #54 0x198c1037 in __invoke<(lambda at /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #55 0x198c1037 in __call<(lambda at /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #56 0x198c1037 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #57 0x198c1037 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #58 0x1a360295 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #59 0x1a360295 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #60 0x1a360295 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #61 0x1a32fc38 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #62 0x198bffe3 in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TCurrentTest::Execute() /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1 #63 0x1a331505 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #64 0x1a35a80c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #65 0x7f54c0df1d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) Indirect leak of 16 byte(s) in 1 object(s) allocated from: #0 0x19ba31bd in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x1b3d2a59 in make_unique /-S/contrib/libs/cxxsupp/libcxx/include/__memory/unique_ptr.h:642:26 #2 0x1b3d2a59 in grpc_core::ClientChannel::CreateLbPolicyLocked(grpc_core::ChannelArgs const&) /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/client_channel.cc:1421:7 #3 0x1b3d0932 in grpc_core::ClientChannel::CreateOrUpdateLbPolicyLocked(grpc_core::RefCountedPtr, std::__y1::optional>> const&, grpc_core::Resolver::Result) /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/client_channel.cc:1405:18 #4 0x1b3cdc29 in grpc_core::ClientChannel::OnResolverResultChangedLocked(grpc_core::Resolver::Result) /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/client_channel.cc:1330:30 #5 0x1b40057e in grpc_core::ClientChannel::ResolverResultHandler::ReportResult(grpc_core::Resolver::Result) /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/client_channel.cc:470:13 #6 0x1b2d0473 in grpc_core::PollingResolver::OnRequestCompleteLocked(grpc_core::Resolver::Result) /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/resolver/polling_resolver.cc:183:22 #7 0x1b2d2a35 in operator() /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/resolver/polling_resolver.cc:149:34 #8 0x1b2d2a35 in __invoke<(lambda at /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/resolver/polling_resolver.cc:149:7) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #9 0x1b2d2a35 in __call<(lambda at /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/resolver/polling_resolver.cc:149:7) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #10 0x1b2d2a35 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #11 0x1b2d2a35 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #12 0x1b2d8891 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #13 0x1b2d8891 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #14 0x1b2d8891 in grpc_core::WorkSerializer::WorkSerializerImpl::Run(std::__y1::function, grpc_core::DebugLocation const&) /-S/contrib/libs/grpc/src/core/lib/gprpp/work_serializer.cc:108:5 #15 0x1b2d9c28 in grpc_core::WorkSerializer::Run(std::__y1::function, grpc_core::DebugLocation const&) /-S/contrib/libs/grpc/src/core/lib/gprpp/work_serializer.cc:237:10 #16 0x1b2cf85c in grpc_core::PollingResolver::OnRequestComplete(grpc_core::Resolver::Result) /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/resolver/polling_resolver.cc:148:21 #17 0x1b2551e4 in grpc_core::(anonymous namespace)::AresClientChannelDNSResolver::AresRequestWrapper::OnHostnameResolved(void*, y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc:340:22 #18 0x1affb76e in exec_ctx_run /-S/contrib/libs/grpc/src/core/lib/iomgr/exec_ctx.cc:45:3 #19 0x1affb76e in grpc_core::ExecCtx::Flush() /-S/contrib/libs/grpc/src/core/lib/iomgr/exec_ctx.cc:72:9 #20 0x1b04fed7 in ~ExecCtx /-S/contrib/libs/grpc/src/core/lib/iomgr/exec_ctx.h:117:5 #21 0x1b04fed7 in grpc_call_start_batch /-S/contrib/libs/grpc/src/core/lib/surface/call.cc:3543:3 #22 0x1c225efa in grpc::internal::CallOpSet, grpc::internal::CallNoOp<3>, grpc::internal::CallNoOp<4>, grpc::internal::CallNoOp<5>, grpc::internal::CallNoOp<6>>::ContinueFillOpsAfterInterception() /-S/contrib/libs/grpc/include/grpcpp/impl/call_op_set.h:974:9 #23 0x1c239f4c in PerformOps /-S/contrib/libs/grpc/include/grpcpp/impl/call.h:67:17 #24 0x1c239f4c in grpc::ClientReaderWriter::ClientReaderWriter(grpc::ChannelInterface*, grpc::internal::RpcMethod const&, grpc::ClientContext*) /-S/contrib/libs/grpc/include/grpcpp/support/sync_stream.h:565:13 #25 0x1c2121aa in Create /-S/contrib/libs/grpc/include/grpcpp/support/sync_stream.h:439:16 #26 0x1c2121aa in Ydb::Topic::V1::TopicService::Stub::StreamReadRaw(grpc::ClientContext*) /-B/ydb/public/api/grpc/ydb_topic_v1.grpc.pb.cc:78:10 #27 0x195cd20a in StreamRead /-B/ydb/public/api/grpc/ydb_topic_v1.grpc.pb.h:395:148 #28 0x195cd20a in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TDirectReadTestSetup::InitControlSession(TBasicString> const&, unsigned long) /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:836:35 #29 0x19611936 in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TTestCaseDirectReadRestartPQRB::Execute_(NUnitTest::TTestContext&) /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:1594:15 #30 0x198c1037 in operator() /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1 #31 0x198c1037 in __invoke<(lambda at /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #32 0x198c1037 in __call<(lambda at /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #33 0x198c1037 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #34 0x198c1037 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #35 0x1a360295 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #36 0x1a360295 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #37 0x1a360295 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #38 0x1a32fc38 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #39 0x198bffe3 in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TCurrentTest::Execute() /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1 #40 0x1a331505 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #41 0x1a35a80c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #42 0x7f54c0df1d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) SUMMARY: AddressSanitizer: 7900200 byte(s) leaked in 2223 allocation(s). >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0_UNIQUE_SYNC-pk_types14-all_types14-index14-DyNumber-UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Date_0__SYNC-pk_types30-all_types30-index30-Date--SYNC] |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |90.9%| [LD] {RESULT} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonStorageListingV2PDiskIdFilter [GOOD] Test command err: 2025-06-24T20:33:54.099531Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:2806:2397], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:54.101321Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:54.101795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:54.102076Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:1445:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:54.103863Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:2765:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:54.103989Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:54.105224Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:54.106056Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:2762:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:54.106154Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:54.107129Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:2800:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:54.107406Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:54.107859Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:54.108476Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:54.108687Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:2809:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:54.108858Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:54.110218Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:54.110277Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:54.111000Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:54.113056Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:2813:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:54.114401Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:54.115579Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:54.115956Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:1448:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:54.116692Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:54.117255Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:2816:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:54.117346Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:33:54.118400Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:54.118742Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:54.575756Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:33:54.749868Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:33:54.772979Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:33:55.360612Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 24223, node 1 TClient is connected to server localhost:15551 2025-06-24T20:33:55.713531Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:33:55.713606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:33:55.713648Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:33:55.713972Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:42.652841Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:2783:2394], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:35:42.654033Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [16:1837:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:35:42.656193Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:35:42.656705Z node 18 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [18:1843:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:35:42.657318Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:35:42.657477Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:35:42.658622Z node 15 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [15:1834:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:35:42.658736Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:35:42.658939Z node 17 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [17:1840:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:35:42.659027Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:35:42.660309Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:35:42.660877Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:35:42.660972Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:35:42.661811Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:35:42.661901Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:35:42.665143Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [Wo ... ath status: LookupError; 2025-06-24T20:38:33.160814Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:33.160897Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:33.161109Z node 27 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [27:2220:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:33.161796Z node 22 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [22:2205:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:33.161918Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:33.163008Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:33.163097Z node 27 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:33.163941Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:33.164067Z node 27 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:33.164644Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:38:34.412164Z node 19 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:38:34.836735Z node 19 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:38:34.891974Z node 19 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:38:36.399802Z node 19 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 18044, node 19 TClient is connected to server localhost:13538 2025-06-24T20:38:37.665355Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:38:37.665479Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:38:37.665569Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:38:37.666621Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:41:45.805706Z node 28 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [28:2805:2397], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:45.808292Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:45.811384Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:41:45.812484Z node 29 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [29:2800:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:45.814403Z node 30 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [30:2809:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:45.814850Z node 35 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [35:1835:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:45.815112Z node 36 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [36:1838:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:45.816789Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:45.817895Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:41:45.817988Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:45.818262Z node 31 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [31:2813:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:45.818519Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:45.818636Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:45.819454Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:41:45.819951Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:41:45.820628Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:45.820939Z node 32 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [32:2816:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:45.821140Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:41:45.822722Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:41:45.823445Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:45.824219Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:41:45.827618Z node 34 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [34:1832:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:45.829268Z node 33 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [33:1829:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:45.829424Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:45.830756Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:41:45.831602Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:45.832844Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:41:46.618315Z node 28 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:47.136938Z node 28 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:41:47.244899Z node 28 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:41:49.952090Z node 28 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 18706, node 28 TClient is connected to server localhost:18483 2025-06-24T20:41:51.529345Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:51.529482Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:51.529587Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:51.532621Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TChargeBTreeIndex::OneNode_Groups [GOOD] >> TChargeBTreeIndex::OneNode_History >> DataShardReadIteratorConsistency::LeaseConfirmationNotOutOfOrder [GOOD] >> DataShardReadIteratorConsistency::BrokenWriteLockBeforeIteration |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |90.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence >> DataShardReadIterator::TryCommitLocksPrepared+Volatile+BreakLocks [GOOD] >> DataShardReadIterator::TryWriteManyRows+Commit >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] >> DataShardReadIterator::ShouldReadNonExistingKey [GOOD] >> DataShardReadIterator::ShouldReadNotExistingRange >> TIterator::MixedReverse [GOOD] >> TIterator::Serial >> TChargeBTreeIndex::OneNode_History [GOOD] >> TChargeBTreeIndex::OneNode_Groups_History >> TDataShardLocksTest::MvccTestWriteBreaksLocks >> TDataShardLocksTest::MvccTestWriteBreaksLocks [GOOD] >> TDataShardLocksTest::Points_ManyTx >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit >> TDataShardLocksTest::Points_ManyTx [GOOD] >> TDataShardLocksTest::Points_ManyTx_BreakAll >> TDataShardLocksTest::Points_ManyTx_BreakAll [GOOD] >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit-UseSink [GOOD] >> KqpNewEngine::DependentSelect >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0_UNIQUE_SYNC-pk_types8-all_types8-index8-Timestamp-UNIQUE-SYNC] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit100 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit101 |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf [GOOD] |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |90.9%| [LD] {RESULT} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut >> DataShardReadIterator::ShouldFailUknownColumns [GOOD] >> DataShardReadIterator::ShouldFailWrongSchema >> TIterator::Serial [GOOD] >> TIterator::SerialReverse |90.9%| [TA] $(B)/ydb/core/tx/datashard/ut_locks/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardReadIterator::ShouldReadKeyPrefix1 [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix2 |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient >> TChargeBTreeIndex::OneNode_Groups_History [GOOD] >> TChargeBTreeIndex::FewNodes >> TChargeBTreeIndex::FewNodes [GOOD] >> TChargeBTreeIndex::FewNodes_Groups >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient [GOOD] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |90.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_locks/test-results/unittest/{meta.json ... results_accumulator.log} |90.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init >> TIterator::SerialReverse [GOOD] >> TIterator::GetKey >> TIterator::GetKey [GOOD] >> TIterator::GetKeyWithEraseCache [GOOD] >> TIterator::GetKeyWithVersionSkips [GOOD] >> TLegacy::IndexIter >> TLegacy::IndexIter [GOOD] >> TLegacy::ScreenedIndexIter [GOOD] >> TLegacy::StatsIter >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] >> KqpSqlIn::SecondaryIndex_SimpleKey [GOOD] >> KqpSqlIn::SecondaryIndex_ComplexKey_In_And_In >> KqpSqlIn::InWithCast [GOOD] >> TLegacy::StatsIter [GOOD] >> TPageHandleTest::Uninitialized [GOOD] >> TPageHandleTest::NormalUse >> TPageHandleTest::NormalUse [GOOD] >> TPageHandleTest::HandleRef [GOOD] >> TPageHandleTest::PinnedRef [GOOD] >> TPageHandleTest::PinnedRefPure [GOOD] >> TPart::BasicColumnGroups [GOOD] >> DataShardReadIterator::ShouldReadNotExistingRange [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk1_100 >> TChargeBTreeIndex::FewNodes_Groups [GOOD] >> TChargeBTreeIndex::FewNodes_History ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::InWithCast [GOOD] Test command err: Trying to start YDB, gRPC: 28865, MsgBus: 8854 2025-06-24T20:41:15.338695Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618758574175303:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:15.338989Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00322a/r3tmp/tmpso0AKx/pdisk_1.dat 2025-06-24T20:41:15.877523Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:15.879311Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618758574175284:2079] 1750797675337041 != 1750797675337044 2025-06-24T20:41:15.920213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:15.920390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 28865, node 1 2025-06-24T20:41:15.929396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:16.007976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:16.008003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:16.008010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:16.008148Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8854 2025-06-24T20:41:16.393544Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8854 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:16.879464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:16.898659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:41:16.913842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:17.087981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:41:17.287737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:17.420910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:19.147424Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618775754046124:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:19.147555Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:19.559032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:19.641586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:19.719816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:19.765846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:19.821589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:19.878978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:19.927388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:20.051303Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618780049014090:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:20.051380Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:20.051552Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618780049014095:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:20.055593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:20.080265Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618780049014097:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:41:20.172535Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618780049014150:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:20.342163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618758574175303:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:20.342263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:21.811178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... pp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:32.534579Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:32.641905Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 25312, MsgBus: 10311 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00322a/r3tmp/tmpop96sF/pdisk_1.dat 2025-06-24T20:42:38.213293Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:42:38.294226Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:42:38.294345Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:42:38.297023Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:42:38.297404Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:42:38.303317Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519619114513013729:2079] 1750797758062460 != 1750797758062463 TServer::EnableGrpc on GrpcPort 25312, node 7 2025-06-24T20:42:38.415886Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:42:38.415917Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:42:38.415932Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:42:38.416109Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10311 2025-06-24T20:42:39.081352Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10311 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:42:39.407534Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:42:39.431020Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:39.548731Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:39.876687Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:40.053270Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:43.426516Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519619135987851857:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:43.426681Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:43.560095Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:43.660630Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:43.714632Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:43.815638Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:43.897815Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:44.030311Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:44.144592Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:44.336225Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519619140282819826:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:44.336367Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:44.336896Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519619140282819831:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:44.345513Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:42:44.393980Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519619140282819833:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:42:44.451398Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519619140282819886:3433] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> DataShardReadIteratorConsistency::BrokenWriteLockBeforeIteration [GOOD] >> DataShardReadIteratorConsistency::BrokenWriteLockDuringIteration >> KqpNewEngine::DependentSelect [GOOD] >> KqpNewEngine::DqSourceCount >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit101 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit198 >> TChargeBTreeIndex::FewNodes_History [GOOD] >> TChargeBTreeIndex::FewNodes_Sticky |90.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TPart::BasicColumnGroups [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:41:34.794912Z 00000.011 DD| RESOURCE_BROKER: TResourceBrokerActor bootstrap 00000.014 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.015 II| FAKE_ENV: Starting storage for BS group 0 00000.015 II| FAKE_ENV: Starting storage for BS group 1 00000.015 II| FAKE_ENV: Starting storage for BS group 2 00000.015 II| FAKE_ENV: Starting storage for BS group 3 00000.021 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.022 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} hope 1 -> done Change{2, redo 0b alter 302b annex 0, ~{ } -{ }, 0 gb} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} release 4194304b of static, Memory{0 dyn 0} 00000.024 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 1, state Free, final id 0, final level 0 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} hope 1 -> done Change{2, redo 0b alter 15b annex 0, ~{ } -{ }, 0 gb} 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} release 4194304b of static, Memory{0 dyn 0} 00000.025 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 1, state Free, final id 0, final level 0 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 104856577b requested for data (104857601b in total) 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{1 104857601b} type large_transaction 00000.027 DD| RESOURCE_BROKER: Submitted new unknown task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) priority=5 resources={0, 104857601} 00000.027 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.027 DD| RESOURCE_BROKER: Allocate resources {0, 104857601} for task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) from queue queue_default 00000.027 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.027 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 12.207031 (insert task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])) 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{1 104857601b}, Memory{0 dyn 104857601} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{1 104857601b}, Memory{0 dyn 0} 00000.027 DD| RESOURCE_BROKER: Finish task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) (release resources {0, 104857601}) 00000.027 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 12.207031 to 0.000000 (remove task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])) 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 104856577b requested for data (104857601b in total) 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 104857601b of static mem, Memory{104857601 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 104857601b of static, Memory{0 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 209714177b requested for data (209715201b in total) 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{2 209715201b} type large_transaction 00000.029 DD| RESOURCE_BROKER: Submitted new unknown task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) priority=5 resources={0, 209715201} 00000.029 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.029 DD| RESOURCE_BROKER: Allocate resources {0, 209715201} for task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) from queue queue_default 00000.029 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.029 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 23.193359 (insert task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])) 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{2 209715201b}, Memory{0 dyn 209715201} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{2 209715201b}, Memory{0 dyn 0} 00000.029 DD| RESOURCE_BROKER: Finish task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) (release resources {0, 209715201}) 00000.029 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 23.193359 to 0.000000 (remove task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])) 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.030 DD| TABLET_EXE ... 76:97:0], [1:2:54:1:24576:97:0], [1:2:55:1:24576:97:0], [1:2:56:1:24576:97:0], [1:2:57:1:24576:97:0], [1:2:58:1:24576:97:0], [1:2:59:1:24576:97:0], [1:2:60:1:24576:97:0], [1:2:61:1:24576:97:0], [1:2:62:1:24576:97:0], [1:2:63:1:24576:97:0], [1:2:64:1:24576:97:0], [1:2:65:1:24576:97:0], [1:2:66:1:24576:97:0], [1:2:67:1:24576:97:0], [1:2:68:1:24576:97:0], [1:2:69:1:24576:97:0], [1:2:70:1:24576:97:0], [1:2:71:1:24576:97:0], [1:2:72:1:24576:97:0], [1:2:73:1:24576:101:0], [1:2:74:1:24576:102:0], [1:2:75:1:24576:101:0], [1:2:76:1:24576:102:0], [1:2:77:1:24576:104:0], [1:2:78:1:24576:104:0], [1:2:79:1:24576:104:0], [1:2:80:1:24576:104:0], [1:2:81:1:24576:103:0], [1:2:82:1:24576:101:0], [1:2:83:1:24576:104:0], [1:2:84:1:24576:104:0], [1:2:85:1:24576:104:0], [1:2:86:1:24576:104:0], [1:2:87:1:24576:104:0], [1:2:88:1:24576:104:0], [1:2:89:1:24576:104:0], [1:2:90:1:24576:101:0], [1:2:91:1:24576:104:0], [1:2:92:1:24576:104:0], [1:2:93:1:24576:98:0], [1:2:94:1:24576:104:0], [1:2:95:1:24576:104:0], [1:2:96:1:24576:104:0], [1:2:97:1:24576:104:0], [1:2:98:1:24576:104:0], [1:2:99:1:24576:104:0], [1:2:100:1:24576:104:0], [1:2:101:1:24576:97:0], [1:2:102:1:24576:100:0], [1:2:103:1:24576:104:0], [1:2:104:1:24576:104:0], [1:2:105:1:24576:104:0], [1:2:106:1:24576:104:0], [1:2:107:1:24576:104:0], [1:2:108:1:24576:104:0], [1:2:109:1:24576:104:0], [1:2:110:1:24576:104:0], [1:2:111:1:24576:104:0], [1:2:112:1:24576:104:0], [1:2:113:1:24576:104:0], [1:2:114:1:24576:104:0], [1:2:115:1:24576:104:0], [1:2:116:1:24576:104:0], [1:2:117:1:24576:104:0], [1:2:118:1:24576:104:0], [1:2:119:1:24576:104:0], [1:2:120:1:24576:104:0], [1:2:121:1:24576:104:0], [1:2:122:1:24576:104:0], [1:2:123:1:24576:104:0], [1:2:124:1:24576:104:0], [1:2:125:1:24576:104:0], [1:2:126:1:24576:104:0], [1:2:127:1:24576:104:0], [1:2:128:1:24576:104:0], [1:2:129:1:24576:104:0], [1:2:130:1:24576:104:0], [1:2:131:1:24576:104:0], [1:2:132:1:24576:104:0], [1:2:133:1:24576:104:0], [1:2:134:1:24576:104:0], [1:2:135:1:24576:104:0], [1:2:136:1:24576:104:0], [1:2:137:1:24576:104:0], [1:2:138:1:24576:104:0], [1:2:139:1:24576:104:0], [1:2:140:1:24576:104:0], [1:2:141:1:24576:104:0], [1:2:142:1:24576:104:0], [1:2:145:1:24576:60:0], [1:2:146:1:24576:60:0] } 00000.143 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:143:1:12288:758:0] 00000.143 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.143 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] owner [20:212:2237] cookie 4 class Online from cache [ ] already requested [ ] to request [ 22 23 24 25 ] 00000.144 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:143:1:12288:758:0] status OK pages [ 22 23 24 25 ] 00000.144 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:143:1:12288:758:0] owner [20:212:2237] class Online pages [ 22 23 24 25 ] cookie 4 00000.144 II| TABLET_EXECUTOR: Leader{1:3:0} activating executor 00000.145 II| TABLET_EXECUTOR: LSnap{1:3, on 3:1, 1880b, wait} done, Waste{2:0, 141856b +(140, 14018b), 146 trc} 00000.146 DD| TABLET_SAUSAGECACHE: Attach page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.146 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] owner [20:212:2237] cookie 2 class AsyncLoad from cache [ 22 23 24 25 ] already requested [ ] to request [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.146 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] async queue pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.146 DD| TABLET_EXECUTOR: Leader{1:3:2} commited cookie 2 for step 1 00000.147 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:143:1:12288:758:0] owner [20:212:2237] pages [ 22 23 24 25 ] 00000.147 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:143:1:12288:758:0] status OK pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.153 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:143:1:12288:758:0] owner [20:212:2237] class AsyncLoad pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 ] cookie 2 00000.154 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{26 pages [1:2:143:1:12288:758:0] ok OK}, category 2 00000.154 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:143:1:12288:758:0] owner [20:212:2237] pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 ] 00000.155 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan 00000.155 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.155 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} hope 1 -> done Change{145, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.156 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} release 4194304b of static, Memory{0 dyn 0} 00000.156 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.156 II| TABLET_EXECUTOR: Leader{1:3:2} suiciding, Waste{2:0, 141856b +(0, 0b), 1 trc, -14018b acc} 00000.156 DD| TABLET_SAUSAGECACHE: Unregister owner [20:212:2237] 00000.157 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.157 DD| TABLET_SAUSAGECACHE: Remove owner [20:212:2237] 00000.157 DD| TABLET_SAUSAGECACHE: Drop expired page collection [1:2:143:1:12288:758:0] 00000.157 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {6 1077b} miss {50 281387b} 00000.157 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.161 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {14354b, 149} 00000.161 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.161 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.161 II| FAKE_ENV: DS.1 gone, left {143736b, 8}, put {157893b, 150} 00000.161 II| FAKE_ENV: All BS storage groups are stopped 00000.161 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.162 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 795}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:41:40.014188Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.038 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.039 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {3 512b} miss {0 0b} 00000.039 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.039 II| FAKE_ENV: DS.0 gone, left {1356b, 12}, put {1376b, 13} 00000.039 II| FAKE_ENV: DS.1 gone, left {6814b, 23}, put {6814b, 23} 00000.040 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.040 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.040 II| FAKE_ENV: All BS storage groups are stopped 00000.040 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.040 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:41:40.062815Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.406 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.406 NN| TABLET_SAUSAGECACHE: Poison cache serviced 10 reqs hit {860 5551893b} miss {0 0b} 00000.415 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.415 II| FAKE_ENV: DS.0 gone, left {1201b, 13}, put {1221b, 14} 00000.415 II| FAKE_ENV: DS.1 gone, left {6751256b, 17}, put {6751256b, 17} 00000.418 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.418 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.418 II| FAKE_ENV: All BS storage groups are stopped 00000.418 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.418 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:41:40.494153Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.007 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00019.155 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00019.155 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4109 reqs hit {2091 2366986b} miss {6144 6340608b} 00019.164 II| FAKE_ENV: Shut order, stopping 4 BS groups 00019.164 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00019.164 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00019.164 II| FAKE_ENV: DS.0 gone, left {1761b, 14}, put {1781b, 15} 00019.165 II| FAKE_ENV: DS.1 gone, left {6927727b, 27}, put {6927727b, 27} 00019.178 II| FAKE_ENV: All BS storage groups are stopped 00019.178 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00019.178 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:41:59.686886Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00019.175 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00019.176 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4106 reqs hit {43 253450b} miss {4096 4227072b} 00019.182 II| FAKE_ENV: Shut order, stopping 4 BS groups 00019.182 II| FAKE_ENV: DS.0 gone, left {44744b, 2}, put {164747b, 16} 00019.182 II| FAKE_ENV: DS.1 gone, left {2764621b, 2068}, put {2764621b, 2068} 00019.190 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00019.190 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00019.190 II| FAKE_ENV: All BS storage groups are stopped 00019.190 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00019.190 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:42:18.906364Z 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:42:18.939582Z 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:42:18.987993Z 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:42:19.045518Z 00000.013 II| FAKE_ENV: Starting storage for BS group 0 00000.013 II| FAKE_ENV: Starting storage for BS group 1 00000.013 II| FAKE_ENV: Starting storage for BS group 2 00000.013 II| FAKE_ENV: Starting storage for BS group 3 |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |90.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export >> KqpRanges::NoFullScanAtScanQuery [GOOD] >> KqpRanges::NoFullScanAtDNFPredicate >> TChargeBTreeIndex::FewNodes_Sticky [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History >> DataShardReadIterator::ShouldFailWrongSchema [GOOD] >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChange ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:539: Enable after interactive tx support >> DataShardReadIterator::ShouldReadKeyPrefix2 [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix3 |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |90.9%| [LD] {RESULT} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |90.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest |90.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__ASYNC-pk_types10-all_types10-index10-Timestamp--ASYNC] |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |90.9%| [LD] {RESULT} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut >> DataShardReadIterator::TryWriteManyRows+Commit [GOOD] >> DataShardReadIterator::TryWriteManyRows-Commit >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__ASYNC-pk_types19-all_types19-index19-Uint32--ASYNC] >> DataShardReadIterator::ShouldReadRangeChunk1_100 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk1 |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |90.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction >> YdbSdkSessionsPool::PeriodicTask/1 [GOOD] >> BackupRestore::RestoreExternalDataSourceWithoutSecret [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobal >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__SYNC-pk_types0-all_types0-index0-Datetime--SYNC] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1_UNIQUE_SYNC-pk_types17-all_types17-index17-DyNumber-UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__SYNC-pk_types18-all_types18-index18-Uint32--SYNC] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit198 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit900 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonStorageListingV1PDiskIdFilter 2025-06-24 20:42:49,348 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 20:42:49,806 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 153753 47.4M 47.1M 24.0M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/00491f/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args 155503 3.6G 3.5G 3.5G └─ ydb-core-viewer-ut --trace-path-append /home/runner/.ya/build/build_root/2btm/00491f/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/ytest.report.trace Test command err: 2025-06-24T20:33:08.503895Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:297:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:33:08.504405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:33:08.504466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:33:08.884889Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 20912, node 1 TClient is connected to server localhost:15602 2025-06-24T20:34:15.171710Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:2780:2394], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:15.173515Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:15.174825Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:34:15.175783Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:2783:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:15.176322Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:15.176566Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:1446:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:15.177084Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:2774:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:15.177215Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:34:15.177396Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:1443:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:15.178884Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:15.180012Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:15.180203Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:15.180271Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:34:15.181463Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:34:15.181745Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:34:15.183681Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:2793:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:15.184895Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:2787:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:15.185074Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:15.185715Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:34:15.185821Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:2796:2338], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:15.186238Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:15.186710Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:34:15.186809Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:2790:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:34:15.186872Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:15.187561Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:34:15.188304Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:34:15.188473Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:34:15.792038Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:16.013645Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:34:16.043820Z node 2 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:34:16.753251Z node 2 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 26407, node 2 TClient is connected to server localhost:65220 2025-06-24T20:34:17.395268Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:34:17.395356Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:34:17.395419Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:34:17.395835Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:36:19.475576Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:2785:2394], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:36:19.477721Z node 17 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [17:1839:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:36:19.479140Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:36:19.479690Z node 19 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [19:1845:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:36:19.480374Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:36:19.480546Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:36:19.481877Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [16:1836:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:36:19.481983Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:36:19.482125Z node 18 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [18:1842:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:36:19.482210Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:36:19.483503Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:36:19.484136Z node 16 ... or_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:59.648772Z node 21 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [21:2797:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:38:59.648922Z node 27 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:38:59.650159Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:38:59.651133Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:39:00.383898Z node 20 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:00.762660Z node 20 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:39:00.811948Z node 20 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:39:01.827405Z node 20 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 24112, node 20 TClient is connected to server localhost:20641 2025-06-24T20:39:02.653103Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:39:02.653255Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:39:02.653351Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:39:02.654203Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:42:04.464161Z node 29 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [29:2803:2394], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:42:04.466243Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:42:04.468473Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:42:04.468683Z node 31 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [31:2380:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:42:04.469803Z node 30 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [30:2793:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:42:04.471205Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:42:04.471367Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:42:04.473328Z node 32 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [32:2414:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:42:04.473947Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:42:04.474067Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:42:04.476030Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:42:04.476962Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:42:04.478301Z node 33 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [33:2417:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:42:04.478580Z node 35 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [35:2423:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:42:04.478833Z node 36 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [36:2426:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:42:04.481413Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:42:04.481575Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:42:04.481657Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:42:04.481991Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:42:04.482245Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:42:04.483697Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:42:04.485796Z node 34 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [34:2420:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:42:04.486096Z node 37 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [37:2429:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:42:04.487734Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:42:04.487848Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:42:04.488610Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:42:04.488688Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-24T20:42:05.394190Z node 29 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:42:05.745873Z node 29 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T20:42:05.830361Z node 29 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:434} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-24T20:42:08.564280Z node 29 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 7532, node 29 TClient is connected to server localhost:16779 2025-06-24T20:42:10.388165Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:42:10.388330Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:42:10.388454Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:42:10.390525Z node 29 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/00491f/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/00491f/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [FAIL] Test command err: 2025-06-24T20:34:42.793988Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617069904857271:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:42.794286Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:43.293875Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00458c/r3tmp/tmpoztWpf/pdisk_1.dat 2025-06-24T20:34:43.555496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:43.555598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:43.566837Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617069904857109:2079] 1750797282759759 != 1750797282759762 2025-06-24T20:34:43.579971Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:43.581092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28324, node 1 2025-06-24T20:34:43.694989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00458c/r3tmp/yandexYcJbV1.tmp 2025-06-24T20:34:43.695020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00458c/r3tmp/yandexYcJbV1.tmp 2025-06-24T20:34:43.695526Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00458c/r3tmp/yandexYcJbV1.tmp 2025-06-24T20:34:43.695696Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:43.795951Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:43.804351Z INFO: TTestServer started on Port 19026 GrpcPort 28324 TClient is connected to server localhost:19026 PQClient connected to localhost:28324 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:44.267579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:44.285059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:44.334428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:34:44.555231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:47.128470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617091379694388:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.128626Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.128821Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617091379694401:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.143600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617091379694432:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.143708Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.148049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:47.187782Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617091379694403:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:34:47.516686Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617091379694459:2445] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:47.554574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:47.680786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:47.701885Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617091379694468:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:47.708266Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmIzYTI1OTctYjYzZmQ4OTgtMzc1NjljOGYtYzQzZjFiOTc=, ActorId: [1:7519617091379694386:2298], ActorState: ExecuteState, TraceId: 01jyhtcznw41gcpynm3te06sf5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:47.710412Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:47.782360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617069904857271:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:47.782447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:47.814706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519617095674662059:2623] === CheckClustersList. Ok 2025-06-24T20:34:53.501575Z :Sinks_Oltp_WriteToTopic_1_Table INFO: TTopicSdkTestSetup started 2025-06-24T20:34:53.531325Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T20:34:53.551235Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519617117149498738:2712] connected; active server actors: 1 2025-06-24T20:34:53.551936Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-24T20:34:53.556020Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-24T20:34:53.556171Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-t ... ge_group_id|57fe7522-fe0c1fe5-19802e3e-652319e9_0 grpc read done: success: 0 data: 2025-06-24T20:42:12.403197Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message_group_id|57fe7522-fe0c1fe5-19802e3e-652319e9_0 grpc read failed 2025-06-24T20:42:12.403240Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message_group_id|57fe7522-fe0c1fe5-19802e3e-652319e9_0 grpc closed 2025-06-24T20:42:12.403259Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message_group_id|57fe7522-fe0c1fe5-19802e3e-652319e9_0 is DEAD 2025-06-24T20:42:12.404402Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:42:12.404442Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:42:12.405082Z node 20 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|636337f9-744052f3-55fbf625-69264893_0 grpc read done: success: 0 data: 2025-06-24T20:42:12.405100Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|636337f9-744052f3-55fbf625-69264893_0 grpc read failed 2025-06-24T20:42:12.405132Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|636337f9-744052f3-55fbf625-69264893_0 grpc closed 2025-06-24T20:42:12.405150Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|636337f9-744052f3-55fbf625-69264893_0 is DEAD 2025-06-24T20:42:12.405681Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:42:12.405716Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:42:12.405906Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037896] server disconnected, pipe [20:7519618999014224583:3402] destroyed 2025-06-24T20:42:12.405935Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037896] server disconnected, pipe [20:7519618999014224586:3402] destroyed 2025-06-24T20:42:12.407350Z node 20 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037939;self_id=[20:7519618947474611597:2521];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037939;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:42:12.407505Z node 20 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[20:7519618973244418264:2954];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037993;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:42:12.407607Z node 20 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037943;self_id=[20:7519618951769579442:2570];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037943;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:42:12.407691Z node 20 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[20:7519618951769578948:2535];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037929;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:42:12.415506Z node 20 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224038002;self_id=[20:7519618973244418440:2984];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224038002;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:42:12.415661Z node 20 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224038003;self_id=[20:7519618973244418259:2949];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224038003;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:42:12.415889Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:5095: [PQ: 72075186224037896] Handle TEvLongTxService::TEvLockStatus LockId: 281474976710692 LockNode: 20 Status: STATUS_NOT_FOUND 2025-06-24T20:42:12.415921Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:5110: [PQ: 72075186224037896] TxWriteInfo: WriteId {20, 281474976710692}, TxId (empty maybe), Status STATUS_SUBSCRIBED 2025-06-24T20:42:12.415939Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:5124: [PQ: 72075186224037896] delete partitions for WriteId {20, 281474976710692} 2025-06-24T20:42:12.415967Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:5219: [PQ: 72075186224037896] send TEvPQ::TEvDeletePartition to partition {0, {20, 281474976710692}, 100000} 2025-06-24T20:42:12.416054Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [20:7519618999014224537:3392] destroyed 2025-06-24T20:42:12.416085Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [20:7519618999014224540:3392] destroyed 2025-06-24T20:42:12.416127Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:5095: [PQ: 72075186224037894] Handle TEvLongTxService::TEvLockStatus LockId: 281474976710692 LockNode: 20 Status: STATUS_NOT_FOUND 2025-06-24T20:42:12.416144Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:5110: [PQ: 72075186224037894] TxWriteInfo: WriteId {20, 281474976710692}, TxId (empty maybe), Status STATUS_SUBSCRIBED 2025-06-24T20:42:12.416155Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:5124: [PQ: 72075186224037894] delete partitions for WriteId {20, 281474976710692} 2025-06-24T20:42:12.416173Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:5219: [PQ: 72075186224037894] send TEvPQ::TEvDeletePartition to partition {0, {20, 281474976710692}, 100000} 2025-06-24T20:42:12.416229Z node 20 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:42:12.416295Z node 20 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: {0, {20, 281474976710692}, 100000}, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:42:12.416352Z node 20 :PERSQUEUE DEBUG: partition.cpp:3863: [PQ: 72075186224037896, Partition: {0, {20, 281474976710692}, 100000}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-24T20:42:12.416721Z node 20 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037938;self_id=[20:7519618947474611454:2516];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037938;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:42:12.416817Z node 20 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224038007;self_id=[20:7519618973244418265:2955];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224038007;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T20:42:12.416869Z node 20 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:42:12.416912Z node 20 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: {0, {20, 281474976710692}, 100000}, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:42:12.416948Z node 20 :PERSQUEUE DEBUG: partition.cpp:3863: [PQ: 72075186224037894, Partition: {0, {20, 281474976710692}, 100000}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-24T20:42:12.417132Z node 20 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:42:12.417152Z node 20 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from D0000100000(+) to D0000100001(-) 2025-06-24T20:42:12.417175Z node 20 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T20:42:12.417186Z node 20 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from D0000100000(+) to D0000100001(-) 2025-06-24T20:42:12.420010Z node 20 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 100000 offset 0 count 1 actorID [20:7519618943179643703:2459] 2025-06-24T20:42:12.420080Z node 20 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 100000 offset 0 count 1 actorID [20:7519618943179643833:2480] 2025-06-24T20:42:12.420095Z node 20 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 100000 offset 1 count 9 actorID [20:7519618943179643833:2480] 2025-06-24T20:42:12.420352Z node 20 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {20, 281474976710692}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=293, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:42:12.420408Z node 20 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {20, 281474976710692}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=293, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:42:12.421004Z node 20 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037896, Partition: {0, {20, 281474976710692}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=1076, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:42:12.421050Z node 20 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037896, Partition: {0, {20, 281474976710692}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=1076, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:42:12.421554Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:5157: [PQ: 72075186224037894] Handle TEvPQ::TEvDeletePartitionDone {0, {20, 281474976710692}, 100000} 2025-06-24T20:42:12.421636Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:3608: [PQ: 72075186224037894] send TEvUnsubscribeLock for WriteId {20, 281474976710692} 2025-06-24T20:42:12.421709Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037894] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:42:12.422345Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:5157: [PQ: 72075186224037896] Handle TEvPQ::TEvDeletePartitionDone {0, {20, 281474976710692}, 100000} 2025-06-24T20:42:12.422411Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:3608: [PQ: 72075186224037896] send TEvUnsubscribeLock for WriteId {20, 281474976710692} 2025-06-24T20:42:12.422458Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:42:12.422894Z node 20 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037894' partition 100000 offset 0 partno 0 count 1 parts 0 suffix '63' size 293 2025-06-24T20:42:12.422947Z node 20 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037896' partition 100000 offset 0 partno 0 count 1 parts 0 suffix '63' size 158 2025-06-24T20:42:12.422978Z node 20 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037896' partition 100000 offset 1 partno 0 count 9 parts 0 suffix '63' size 918 2025-06-24T20:42:12.425190Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:42:12.425292Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/query_replay/ydb_query_replay |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay/ydb_query_replay |90.9%| [LD] {RESULT} $(B)/ydb/tools/query_replay/ydb_query_replay |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots >> TChargeBTreeIndex::FewNodes_Groups_History [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History_Sticky >> DataShardReadIteratorConsistency::BrokenWriteLockDuringIteration [GOOD] >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRetryAndRestart >> TPersQueueTest::TestReadPartitionStatus [GOOD] >> TPersQueueTest::TxCounters |90.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |90.9%| [TA] $(B)/ydb/core/viewer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |91.0%| [TA] {RESULT} $(B)/ydb/core/viewer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.0%| [LD] {RESULT} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut >> KqpNewEngine::DqSourceCount [GOOD] >> KqpNewEngine::DqSource >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChange [GOOD] >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChangeExhausted >> DataShardReadIterator::ShouldReadKeyPrefix3 [GOOD] >> DataShardReadIterator::ShouldReadHeadFromFollower >> TxUsage::Transactions_Conflict_On_SeqNo_Table [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsert+UseSink >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::PeriodicTask/1 [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0_UNIQUE_SYNC-pk_types26-all_types26-index26-Uint64-UNIQUE-SYNC] |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |91.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |91.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__ASYNC-pk_types16-all_types16-index16-DyNumber--ASYNC] >> KqpSqlIn::SecondaryIndex_ComplexKey_In_And_In [GOOD] >> KqpSqlIn::PhasesCount >> TGroupMapperTest::MakeDisksUnusable [GOOD] |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MakeDisksUnusable [GOOD] >> KqpRanges::NoFullScanAtDNFPredicate [GOOD] >> KqpRanges::MergeRanges >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit900 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit900 |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |91.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRetryAndRestart [GOOD] >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRestartWithStateMigrationRetryAndRestartWithoutStateMigration >> DataShardReadIterator::ShouldReadRangeChunk1 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk2 |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/tool |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/tool |91.0%| [LD] {RESULT} $(B)/ydb/tests/stability/tool/tool >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChangeExhausted [GOOD] >> DataShardReadIterator::NoErrorOnFinalACK |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |91.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris >> JsonChangeRecord::Heartbeat [GOOD] |91.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::Heartbeat [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History_Sticky [GOOD] >> TClockProCache::Lifecycle [GOOD] >> TClockProCache::EvictNext [GOOD] >> TClockProCache::Erase [GOOD] >> TClockProCache::Random >> TClockProCache::Random [GOOD] >> NFwd_TFlatIndexCache::Skip_Wait [GOOD] >> NFwd_TFlatIndexCache::Trace [GOOD] >> NFwd_TFlatIndexCache::Slices [GOOD] >> NFwd_TLoadedPagesCircularBuffer::Basics [GOOD] >> NOther::Blocks [GOOD] >> NPage::Encoded [GOOD] >> NPage::ABI_002 >> KqpNewEngine::DqSource [GOOD] >> KqpNewEngine::DqSourceLiteralRange >> TxUsage::WriteToTopic_Demo_48_Table [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] >> NPage::ABI_002 [GOOD] >> NPage::GroupIdEncoding [GOOD] >> NPageCollection::Align [GOOD] >> NPageCollection::Meta >> NPageCollection::Meta [GOOD] >> NPageCollection::PagesToBlobsConverter [GOOD] >> NPageCollection::Grow [GOOD] >> NPageCollection::Groups [GOOD] >> NPageCollection::Chop [GOOD] >> NPageCollection::CookieAllocator [GOOD] >> NProto::LargeGlobId [GOOD] >> Redo::ABI_008 >> Redo::ABI_008 [GOOD] >> Self::Literals [GOOD] >> ReadIteratorExternalBlobs::ExtBlobs [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithSpecificKeys >> TxUsage::WriteToTopic_Demo_48_Query |91.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> DataShardReadIterator::ShouldReadHeadFromFollower [GOOD] >> DataShardReadIterator::ShouldReadFromHead >> DataShardReadIterator::TryWriteManyRows-Commit [GOOD] >> DataShardReadIteratorBatchMode::RangeFull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> Self::Literals [GOOD] Test command err: + BTreeIndex{PageId: 0 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385, 13 rev 1, 683b} | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | > {0, a, false, 0} | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | > {1, b, true, 10} | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | > {2, c, false, 20} | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | > {3, d, true, 30} | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | > {4, e, false, 40} | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | > {5, f, true, 50} | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | > {6, g, false, 60} | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | > {7, h, true, 70} | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | > {8, i, false, 80} | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | > {9, j, true, 90} | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 + BTreeIndex{PageId: 9 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 116b} | + BTreeIndex{PageId: 5 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306, 13 rev 1, 179b} | | + BTreeIndex{PageId: 0 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93, 13 rev 1, 179b} | | | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | | | > {0, a, false, 0} | | | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | | | > {1, b, true, 10} | | | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | | > {2, c, false, 20} | | + BTreeIndex{PageId: 1 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195, 13 rev 1, 179b} | | | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | | | > {3, d, true, 30} | | | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | | | > {4, e, false, 40} | | | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | | > {5, f, true, 50} | | + BTreeIndex{PageId: 2 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306, 13 rev 1, 179b} | | | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | | | > {6, g, false, 60} | | | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | | | > {7, h, true, 70} | | | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | > {8, i, false, 80} | + BTreeIndex{PageId: 8 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 242b} | | + BTreeIndex{PageId: 3 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426, 13 rev 1, 179b} | | | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | | | > {9, j, true, 90} | | | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 | | | > {10, k, false, 100} | | | PageId: 10011 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426 | | > {11, l, true, 110} | | + BTreeIndex{PageId: 4 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555, 13 rev 1, 179b} | | | PageId: 10012 RowCount: 1378 DataSize: 13078 GroupDataSize: 26078 ErasedRowCount: 468 | | | > {12, m, false, 120} | | | PageId: 10013 RowCount: 1491 DataSize: 14091 GroupDataSize: 28091 ErasedRowCount: 511 | | | > {13, n, true, 130} | | | PageId: 10014 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555 | | > {14, o, false, 140} | | + BTreeIndex{PageId: 6 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693, 13 rev 1, 179b} | | | PageId: 10015 RowCount: 1720 DataSize: 16120 GroupDataSize: 32120 ErasedRowCount: 600 | | | > {15, p, true, 150} | | | PageId: 10016 RowCount: 1836 DataSize: 17136 GroupDataSize: 34136 ErasedRowCount: 646 | | | > {16, q, false, 160} | | | PageId: 10017 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693 | | > {17, r, true, 170} | | + BTreeIndex{PageId: 7 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 179b} | | | PageId: 10018 RowCount: 2071 DataSize: 19171 GroupDataSize: 38171 ErasedRowCount: 741 | | | > {18, s, false, 180} | | | PageId: 10019 RowCount: 2190 DataSize: 20190 GroupDataSize: 40190 ErasedRowCount: 790 | | | > {19, t, true, 190} | | | PageId: 10020 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840 + BTreeIndex{PageId: 15 RowCount: 15150 DataSize: 106050 GroupDataSize: 207050 ErasedRowCount: 8080, 13 rev 1, 174b} | + BTreeIndex{PageId: 12 RowCount: 9078 DataSize: 70278 GroupDataSize: 138278 ErasedRowCount: 4318, 13 rev 1, 690b} | | + BTreeIndex{PageId: 0 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426, 13 rev 1, 702b} | | | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | | | > {0, x, NULL, NULL} | | | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | | | > {1, xx, NULL, NULL} | | | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | | | > {2, xxx, NULL, NULL} | | | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | | | > {3, xxxx, NULL, NULL} | | | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | | | > {4, xxxxx, NULL, NULL} | | | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | | | > {5, xxxxxx, NULL, NULL} | | | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | | | > {6, xxxxxxx, NULL, NULL} | | | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | | | > {7, xxxxxxxx, NULL, NULL} | | | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | | | > {8, xxxxxxxxx, NULL, NULL} | | | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | | | > {9, xxxxxxxxxx, NULL, NULL} | | | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 | | | > {10, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10011 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426 | | > {11, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 1 RowCount: 2431 DataSize: 22231 GroupDataSize: 44231 ErasedRowCount: 891, 13 rev 1, 683b} | | | PageId: 10012 RowCount: 1378 DataSize: 13078 GroupDataSize: 26078 ErasedRowCount: 468 | | | > {12, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10013 RowCount: 1491 DataSize: 14091 GroupDataSize: 28091 ErasedRowCount: 511 | | | > {13, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10014 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555 | | | > {14, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10015 RowCount: 1720 DataSize: 16120 GroupDataSize: 32120 ErasedRowCount: 600 | | | > {15, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10016 RowCount: 1836 DataSize: 17136 GroupDataSize: 34136 ErasedRowCount: 646 | | | > {16, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10017 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693 | | | > {17, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10018 RowCount: 2071 DataSize: 19171 GroupDataSize: 38171 ErasedRowCount: 741 | | | > {18, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10019 RowCount: 2190 DataSize: 20190 GroupDataSize: 40190 ErasedRowCount: 790 | | | > {19, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10020 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840 | | | > {20, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10021 RowCount: 2431 DataSize: 22231 GroupDataSize: 44231 ErasedRowCount: 891 | | > {21, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 2 RowCount: 3565 DataSize: 31465 GroupDataSize: 62465 ErasedRowCount: 1395, 13 rev 1, 689b} | | | PageId: 10022 RowCount: 2553 DataSize: 23253 GroupDataSize: 46253 ErasedRowCount: 943 | | | > {22, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10023 RowCount: 2676 DataSize: 24276 GroupDataSize: 48276 ErasedRowCount: 996 | | | > {23, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10024 RowCount: 2800 DataSize: 25300 GroupDataSize: 50300 ErasedRowCount: 1050 | | | > {24, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10025 RowCount: 2925 DataSize: 26325 GroupDataSize: 52325 ErasedRowCount: 1105 | | | > {25, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10026 RowCount: 3051 DataSize: 27351 GroupDataSize: 54351 ErasedRowCount: 1161 | | | > {26, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10027 RowCount: 3178 DataSize: 28378 GroupDataSize: 56378 ErasedRowCount: 1218 | | | > {27, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10028 RowCount: 3306 DataSize: 29406 GroupDataSize: 58406 ErasedRowCount: 1276 | | | > {28, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10029 RowCount: 3435 DataSize: 30435 GroupDataSize: 60435 ErasedRowCount: 1335 | | | > {29, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10030 RowCount: 3565 DataSize: 31465 GroupDataSize: 62465 ErasedRowCount: 1395 | | > {30, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 3 RowCount: 4641 DataSize: 39741 GroupDataSize: 78741 ErasedRowCount: 1911, 13 rev 1, 669b} | | | PageId: 10031 RowCount: 3696 DataSize: 32496 GroupDataSize: 64496 ErasedRowCount: 1456 | | | > {31, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10032 RowCount: 3828 DataSize: 33528 GroupDataSize: 66528 ErasedRowCount: 1518 | | | > {32, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10033 RowCount: 3961 DataSize: 34561 GroupDataSize: 68561 ErasedRowCount: 1581 | | | > {33, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10034 RowCount: 4095 DataSize: 35595 GroupDataSize: 70595 ErasedRowCount: 1645 | | | > {34, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10035 RowCount: 4230 DataSize: 36630 GroupDataSize: 72630 ErasedRowCount: 1710 | | | > {35, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10036 RowCount: 4366 DataSize: 37666 GroupDataSize: 74666 ErasedRowCount: 1776 | | | > {36, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10037 RowCount: 4503 DataSize: 38703 GroupDataSize: 76703 ErasedRowCount: 1843 | | | > {37, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10038 RowCount: 4641 DataSize: 39741 GroupDataSize: 78741 ErasedRowCount: 1911 | | > {38, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 4 RowCount: 5781 DataSize: 48081 GroupDataSize: 95081 ErasedRowCount: 2491, 13 rev 1, 725b} | | | PageId: 10039 RowCount: 4780 DataSize: 40780 GroupDataSize: 80780 ErasedRowCount: 1980 | | | > {39, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10040 RowCount: 4920 DataSize: 41820 GroupDataSize: 82820 ErasedRowCount: 2050 | | | > {40, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10041 RowCount: 5061 DataSize: 42861 GroupDataSize: 84861 ErasedRowCount: 2121 | | | > {41, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10042 RowCount: 5203 DataSize: 43903 GroupDataSize: 86903 ErasedRowCount: 2193 | | | > {42, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10043 RowCount: 5346 DataSize: 44946 GroupDataSize: 88946 ErasedRowCount: 2266 | | | > {43, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10044 RowCount: 5490 DataSize: 45990 GroupDataSize: 90990 ErasedRowCount: 2340 | | | > {44, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10045 RowCount: 5635 DataSize: 47035 GroupDataSize: 93035 ErasedRowCount: 2415 | | | > {45, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10046 RowCount: 5781 DataSize: 48081 GroupDataSize: 95081 ErasedRowCount: 2491 | | > {46, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 5 RowCount: 6831 DataSize: 55431 GroupDataSize: 109431 ErasedRowCount: 3051, 13 ... 7 RowCount: 34 DataSize: 1202 GroupDataSize: 7632 ErasedRowCount: 0 | | | > {4, 10} | | | PageId: 70 RowCount: 36 DataSize: 1284 GroupDataSize: 8163 ErasedRowCount: 0 | | | > {5, 3} | | | PageId: 82 RowCount: 38 DataSize: 1350 GroupDataSize: 8602 ErasedRowCount: 0 | | | > {5, 6} | | | PageId: 87 RowCount: 40 DataSize: 1416 GroupDataSize: 9358 ErasedRowCount: 0 + Rows{0} Label{04 rev 1, 66b}, [0, +2)row | ERowOp 1: {0, 1} | ERowOp 1: {0, 3} + Rows{2} Label{24 rev 1, 66b}, [2, +2)row | ERowOp 1: {0, 4} | ERowOp 1: {0, 6} + Rows{4} Label{44 rev 1, 82b}, [4, +2)row | ERowOp 1: {0, 7} | ERowOp 1: {0, 8} + Rows{8} Label{84 rev 1, 66b}, [6, +2)row | ERowOp 1: {0, 10} | ERowOp 1: {1, 1} + Rows{11} Label{114 rev 1, 66b}, [8, +2)row | ERowOp 1: {1, 3} | ERowOp 1: {1, 4} + Rows{14} Label{144 rev 1, 82b}, [10, +2)row | ERowOp 1: {1, 6} | ERowOp 1: {1, 7} + Rows{20} Label{204 rev 1, 66b}, [12, +2)row | ERowOp 1: {1, 8} | ERowOp 1: {1, 10} + Rows{23} Label{234 rev 1, 66b}, [14, +2)row | ERowOp 1: {2, 1} | ERowOp 1: {2, 3} + Rows{26} Label{264 rev 1, 82b}, [16, +2)row | ERowOp 1: {2, 4} | ERowOp 1: {2, 6} + Rows{36} Label{364 rev 1, 66b}, [18, +2)row | ERowOp 1: {2, 7} | ERowOp 1: {2, 8} + Rows{39} Label{394 rev 1, 66b}, [20, +2)row | ERowOp 1: {2, 10} | ERowOp 1: {3, 1} + Rows{42} Label{424 rev 1, 82b}, [22, +2)row | ERowOp 1: {3, 3} | ERowOp 1: {3, 4} + Rows{48} Label{484 rev 1, 66b}, [24, +2)row | ERowOp 1: {3, 6} | ERowOp 1: {3, 7} + Rows{53} Label{534 rev 1, 66b}, [26, +2)row | ERowOp 1: {3, 8} | ERowOp 1: {3, 10} + Rows{58} Label{584 rev 1, 82b}, [28, +2)row | ERowOp 1: {4, 1} | ERowOp 1: {4, 3} + Rows{64} Label{644 rev 1, 66b}, [30, +2)row | ERowOp 1: {4, 4} | ERowOp 1: {4, 6} + Rows{67} Label{674 rev 1, 66b}, [32, +2)row | ERowOp 1: {4, 7} | ERowOp 1: {4, 8} + Rows{70} Label{704 rev 1, 82b}, [34, +2)row | ERowOp 1: {4, 10} | ERowOp 1: {5, 1} + Rows{82} Label{824 rev 1, 66b}, [36, +2)row | ERowOp 1: {5, 3} | ERowOp 1: {5, 4} + Rows{87} Label{874 rev 1, 66b}, [38, +2)row | ERowOp 1: {5, 6} | ERowOp 1: {5, 7} Slices{ [0, 39] } 0.29189 Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} >> TNodeBrokerTest::Test999NodesSubscribers [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobal [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalAsync >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit900 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1001 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::Test999NodesSubscribers [GOOD] Test command err: 2025-06-24T20:40:08.208113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:40:08.208210Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk2 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk3 >> DataShardReadIterator::NoErrorOnFinalACK [GOOD] >> DataShardReadIterator::ShouldCancelMvccSnapshotFromFuture |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |91.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRestartWithStateMigrationRetryAndRestartWithoutStateMigration [GOOD] >> DataShardReadIteratorFastCancel::ShouldProcessFastCancel >> YdbSdkSessionsPool::StressTestAsync/0 [GOOD] >> YdbSdkSessionsPool::StressTestAsync/1 >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesEmpty [GOOD] >> KqpSqlIn::PhasesCount [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__ASYNC-pk_types7-all_types7-index7-Timestamp--ASYNC] |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesEmpty [GOOD] >> KqpRanges::MergeRanges [GOOD] |91.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |91.0%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test >> test_ttl.py::TestTTL::test_ttl[table_Date_1__SYNC-pk_types33-all_types33-index33-Date--SYNC] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::PhasesCount [GOOD] Test command err: Trying to start YDB, gRPC: 9379, MsgBus: 13040 2025-06-24T20:41:47.185996Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618897669839207:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:47.191698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003213/r3tmp/tmpDG2QMl/pdisk_1.dat 2025-06-24T20:41:48.102029Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618897669839023:2079] 1750797707134309 != 1750797707134312 2025-06-24T20:41:48.119590Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:48.159181Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:41:48.188125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:48.188244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:48.202850Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:48.204753Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 9379, node 1 2025-06-24T20:41:48.539597Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:48.539623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:48.539631Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:48.539739Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13040 TClient is connected to server localhost:13040 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:50.187140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:50.255563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:41:50.265400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:50.477772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:50.714260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:50.809380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:52.163315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618897669839207:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:52.163391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:53.178740Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618923439644443:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:53.178862Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:53.644445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:53.683134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:53.729639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:53.828436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:53.900043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:53.998594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:54.080106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:54.268444Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618927734612409:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:54.268537Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:54.268956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618927734612414:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:54.280198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:54.299115Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618927734612416:2441], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:41:54.404768Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618927734612467:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:55.752540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operat ... ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:04.028353Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:43:04.028394Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded Trying to start YDB, gRPC: 11483, MsgBus: 16198 2025-06-24T20:43:07.644702Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519619240718083220:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:43:07.644893Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003213/r3tmp/tmpAqv5ZL/pdisk_1.dat 2025-06-24T20:43:07.860133Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:43:07.860294Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:43:07.860975Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:43:07.861787Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519619240718083188:2079] 1750797787641008 != 1750797787641011 2025-06-24T20:43:07.880640Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11483, node 7 2025-06-24T20:43:08.015968Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:43:08.016004Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:43:08.016018Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:43:08.016216Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16198 2025-06-24T20:43:08.657136Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16198 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:43:09.335291Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:43:09.369187Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:09.571563Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:10.040613Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:10.238784Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:12.647283Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519619240718083220:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:43:12.647398Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:43:14.826523Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519619270782855913:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:14.826633Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:14.913479Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:15.014835Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:15.077350Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:15.169242Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:15.353327Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:15.897090Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:16.074562Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:16.434480Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519619279372791190:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:16.434585Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:16.435396Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519619279372791195:2442], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:16.461405Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:43:16.525225Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519619279372791197:2443], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:43:16.583434Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519619279372791248:3441] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpRanges::MergeRanges [GOOD] Test command err: Trying to start YDB, gRPC: 11319, MsgBus: 16303 2025-06-24T20:41:28.344864Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618816088876515:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:28.345351Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003223/r3tmp/tmp9pMJU9/pdisk_1.dat 2025-06-24T20:41:29.272909Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:29.273038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:29.275193Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519618816088876294:2079] 1750797688205963 != 1750797688205966 2025-06-24T20:41:29.323315Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:41:29.327377Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:29.328634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11319, node 1 2025-06-24T20:41:29.667747Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:41:29.667769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:41:29.667777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:41:29.667892Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16303 TClient is connected to server localhost:16303 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:41:30.899126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:41:30.942233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:41:30.960068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:31.162006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:31.387601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:31.508474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:41:33.279729Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618837563714415:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:33.279843Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:33.305189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618816088876515:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:41:33.305314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:41:33.612179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:33.663105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:33.755345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:33.800456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:33.838225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:33.884196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:33.923048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:34.035937Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618841858682375:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:34.036040Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:34.039494Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618841858682380:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:41:34.045466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:41:34.065128Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519618841858682382:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:41:34.137720Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519618841858682433:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:41:35.706229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... e_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519619248324153929:2086];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:43:09.169074Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003223/r3tmp/tmpPvZxqx/pdisk_1.dat 2025-06-24T20:43:09.372145Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:43:09.395331Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:43:09.395444Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:43:09.400989Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15657, node 10 2025-06-24T20:43:09.520051Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:43:09.520081Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:43:09.520095Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:43:09.520262Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3063 2025-06-24T20:43:10.187595Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3063 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:43:10.716090Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:43:10.739772Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:43:10.761253Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:10.912940Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:43:11.241078Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:11.440954Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:14.171606Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519619248324153929:2086];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:43:14.171730Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:43:15.326382Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519619274093959294:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:15.327348Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:15.434733Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:15.563542Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:15.663798Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:15.776272Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:15.848591Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:16.074276Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:16.287656Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:16.525748Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519619278388927268:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:16.525901Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:16.526452Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519619278388927273:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:16.697579Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:43:16.732519Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519619278388927275:2441], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:43:16.796817Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519619278388927327:3448] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:43:20.409484Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:21.824752Z node 10 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750797801830, txId: 281474976710674] shutting down >> TPersQueueTest::TxCounters [GOOD] |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |91.0%| [LD] {RESULT} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/stress_tool/ydb_stress_tool |91.0%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ydb_stress_tool |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |91.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |91.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 >> DataShardReadIterator::ShouldReadFromHead [GOOD] >> DataShardReadIterator::ShouldReadFromHeadWithConflict+UseSink |91.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |91.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore >> DataShardReadIteratorBatchMode::RangeFull [GOOD] >> DataShardReadIteratorBatchMode::RangeFromInclusive |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |91.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1001 [GOOD] >> DataShardReadIterator::ShouldReadFromFollower >> KqpNewEngine::DqSourceLiteralRange [GOOD] >> KqpNewEngine::DqSourceLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::TxCounters [GOOD] Test command err: === Server->StartServer(false); 2025-06-24T20:35:36.703919Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617301142506848:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:35:36.703996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:35:37.348179Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:35:37.366799Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004958/r3tmp/tmph26HIT/pdisk_1.dat 2025-06-24T20:35:37.544826Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:35:37.698553Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:37.715283Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:35:37.860206Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:35:38.036542Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:38.040085Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617301142506651:2080] 1750797336630858 != 1750797336630861 2025-06-24T20:35:38.115741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:38.115831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:38.125368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:35:38.125434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:35:38.138606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:35:38.148867Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:35:38.150685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23100, node 1 2025-06-24T20:35:38.616353Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004958/r3tmp/yandexSJbp6j.tmp 2025-06-24T20:35:38.616386Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004958/r3tmp/yandexSJbp6j.tmp 2025-06-24T20:35:38.616544Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004958/r3tmp/yandexSJbp6j.tmp 2025-06-24T20:35:38.616662Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:35:38.789305Z INFO: TTestServer started on Port 15187 GrpcPort 23100 TClient is connected to server localhost:15187 PQClient connected to localhost:23100 === TenantModeEnabled() = 0 === Init PQ - start server on port 23100 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:35:39.413939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T20:35:39.414132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:39.414330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T20:35:39.414355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T20:35:39.414589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T20:35:39.414670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:39.424094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T20:35:39.424342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T20:35:39.424511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:39.424551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T20:35:39.424571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-24T20:35:39.424591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 2 -> 3 2025-06-24T20:35:39.440546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:39.440672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T20:35:39.440703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 3 -> 128 waiting... 2025-06-24T20:35:39.453154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:39.453196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T20:35:39.453216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:35:39.453256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-24T20:35:39.458823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T20:35:39.460698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:35:39.460742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-24T20:35:39.460774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:35:39.468074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-24T20:35:39.468241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T20:35:39.476916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750797339515, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T20:35:39.477132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750797339515 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T20:35:39.477176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:35:39.477528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 128 -> 240 2025-06-24T20:35:39.477580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T20:35:39.477810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 720575940466 ... connected no session 2025-06-24T20:43:24.539524Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: 123|7f08a6cf-2e07bfdc-7718cda3-88132523_0 grpc read done: success: 0 data: 2025-06-24T20:43:24.539548Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: 123|7f08a6cf-2e07bfdc-7718cda3-88132523_0 grpc read failed 2025-06-24T20:43:24.540483Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 2 sessionId: 123|7f08a6cf-2e07bfdc-7718cda3-88132523_0 2025-06-24T20:43:24.540515Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: 123|7f08a6cf-2e07bfdc-7718cda3-88132523_0 is DEAD 2025-06-24T20:43:24.541327Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:24.546404Z node 32 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-24T20:43:24.546441Z node 32 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 3 2025-06-24T20:43:24.552348Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: grpc read done: success: 1 data: init_request { path: "topic" producer_id: "123" partition_with_generation { generation: 1 } } 2025-06-24T20:43:24.552647Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 3 path: "topic" producer_id: "123" partition_with_generation { generation: 1 } from ipv6:[::1]:37188 2025-06-24T20:43:24.552685Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=3 sessionId= userAgent="topic server" ip=ipv6:[::1]:37188 proto=topic topic=topic durationSec=0 2025-06-24T20:43:24.552702Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T20:43:24.552752Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:475: session to partition: 0, generation: 1 2025-06-24T20:43:24.556733Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 3 sessionId: describe result for acl check 2025-06-24T20:43:24.557094Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T20:43:24.557131Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T20:43:24.557161Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T20:43:24.557188Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [32:7519619313077437019:2463] (SourceId=123, PreferedPartition=0) ReplyResult: Partition=0, SeqNo=0 2025-06-24T20:43:24.557217Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 3 sessionId: partition: 0 expectedGeneration: 1 2025-06-24T20:43:24.564607Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 32, Generation: 1 2025-06-24T20:43:24.565146Z node 32 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|4a37150a-db20c1f7-70d0a4d7-b0060870_0 generated for partition 0 topic 'topic' owner 123 2025-06-24T20:43:24.571721Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 3 partition: 0 MaxSeqNo: 0 sessionId: 123|4a37150a-db20c1f7-70d0a4d7-b0060870_0 2025-06-24T20:43:24.588897Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|4a37150a-db20c1f7-70d0a4d7-b0060870_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-24T20:43:24.591224Z node 32 :PQ_WRITE_PROXY INFO: writer.cpp:263: TPartitionWriter 72075186224037892 (partition=0) Start of a request to KQP for a WriteId. SessionId: ydb://session/3?node_id=32&id=MTliZGZmMmEtMzM5NDRlYWUtYjc4YzZhMS1mYzVkYzkyNw== TxId: 01jyhtwr9v60ef0kejtp8nmyx7 2025-06-24T20:43:24.631694Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|4a37150a-db20c1f7-70d0a4d7-b0060870_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-24T20:43:24.632775Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|4a37150a-db20c1f7-70d0a4d7-b0060870_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-24T20:43:24.633262Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|4a37150a-db20c1f7-70d0a4d7-b0060870_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-24T20:43:24.655459Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 32, Generation: 1 2025-06-24T20:43:24.656426Z node 32 :PQ_WRITE_PROXY INFO: writer.cpp:283: TPartitionWriter 72075186224037892 (partition=0) End of the request to KQP for the WriteId. SessionId: ydb://session/3?node_id=32&id=MTliZGZmMmEtMzM5NDRlYWUtYjc4YzZhMS1mYzVkYzkyNw== TxId: 01jyhtwr9v60ef0kejtp8nmyx7 2025-06-24T20:43:24.656492Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:301: SessionId: ydb://session/3?node_id=32&id=MTliZGZmMmEtMzM5NDRlYWUtYjc4YzZhMS1mYzVkYzkyNw== TxId: 01jyhtwr9v60ef0kejtp8nmyx7 WriteId: {32, 281474976710672} 2025-06-24T20:43:24.728696Z node 32 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037892, Partition: {0, {32, 281474976710672}, 100000}, State: StateInit] bootstrapping {0, {32, 281474976710672}, 100000} [32:7519619313077437040:2469] 2025-06-24T20:43:24.769132Z node 32 :PERSQUEUE INFO: partition_init.cpp:895: [topic:{0, {32, 281474976710672}, 100000}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T20:43:24.769237Z node 32 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037892, Partition: {0, {32, 281474976710672}, 100000}, State: StateInit] init complete for topic 'topic' partition {0, {32, 281474976710672}, 100000} generation 1 [32:7519619313077437040:2469] 2025-06-24T20:43:24.770030Z node 32 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|c781e1a8-1b835166-2c518dd1-a062a52e_0 generated for partition {0, {32, 281474976710672}, 100000} topic 'topic' owner 123 2025-06-24T20:43:24.770348Z node 32 :PQ_WRITE_PROXY INFO: writer.cpp:422: TPartitionWriter 72075186224037892 (partition=0) Start of a request to KQP to save PartitionId. SessionId: ydb://session/3?node_id=32&id=MTliZGZmMmEtMzM5NDRlYWUtYjc4YzZhMS1mYzVkYzkyNw== TxId: 01jyhtwr9v60ef0kejtp8nmyx7 2025-06-24T20:43:24.772755Z node 32 :PQ_WRITE_PROXY INFO: writer.cpp:431: TPartitionWriter 72075186224037892 (partition=0) End of a request to KQP to save PartitionId. SessionId: ydb://session/3?node_id=32&id=MTliZGZmMmEtMzM5NDRlYWUtYjc4YzZhMS1mYzVkYzkyNw== TxId: 01jyhtwr9v60ef0kejtp8nmyx7 2025-06-24T20:43:24.817278Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:43:24.817396Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:43:24.817438Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:43:24.817473Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:43:24.883459Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:43:24.907801Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:43:24.907871Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:43:24.907947Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T20:43:24.995530Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|4a37150a-db20c1f7-70d0a4d7-b0060870_0 grpc read done: success: 0 data: 2025-06-24T20:43:24.995574Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 3 sessionId: 123|4a37150a-db20c1f7-70d0a4d7-b0060870_0 grpc read failed 2025-06-24T20:43:24.995642Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 3 sessionId: 123|4a37150a-db20c1f7-70d0a4d7-b0060870_0 grpc closed 2025-06-24T20:43:24.995682Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: 123|4a37150a-db20c1f7-70d0a4d7-b0060870_0 is DEAD 2025-06-24T20:43:24.997382Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:24.997441Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison Counters: ================================
name=api.grpc.topic.stream_write.bytes: 20796
name=api.grpc.topic.stream_write.messages: 4
name=api.grpc.topic.stream_write.uncommitted_bytes: 20796
name=api.grpc.topic.stream_write.uncommitted_messages: 4
name=topic.compaction.lag_milliseconds_max: 0
name=topic.compaction.unprocessed_bytes_max: 0
name=topic.compaction.unprocessed_count_max: 0
name=topic.write.bytes: 20796
name=topic.write.discarded_bytes: 0
name=topic.write.discarded_messages: 0
name=topic.write.messages: 4
name=topic.write.uncommitted_bytes: 20796
name=topic.write.uncommitted_messages: 4
name=topic.write.uncompressed_bytes: 16
name=topic.write.lag_milliseconds:
    bin=100: 0
    bin=1000: 0
    bin=10000: 0
    bin=180000: 0
    bin=200: 0
    bin=2000: 3
    bin=30000: 0
    bin=500: 0
    bin=5000: 1
    bin=60000: 0
    bin=999999: 0
name=topic.write.message_size_bytes:
    bin=1024: 1
    bin=10240: 2
    bin=102400: 0
    bin=1048576: 0
    bin=10485760: 0
    bin=20480: 1
    bin=204800: 0
    bin=2097152: 0
    bin=5120: 0
    bin=51200: 0
    bin=524288: 0
    bin=5242880: 0
    bin=67108864: 0
    bin=99999999: 0
name=topic.write.partition_throttled_milliseconds:
    bin=0: 4
    bin=1: 0
    bin=10: 0
    bin=100: 0
    bin=1000: 0
    bin=10000: 0
    bin=20: 0
    bin=2500: 0
    bin=5: 0
    bin=50: 0
    bin=500: 0
    bin=5000: 0
    bin=999999: 0
2025-06-24T20:43:25.127468Z node 32 :PERSQUEUE WARN: pq_impl.cpp:4230: [PQ: 72075186224037892] Unknown transaction 281474976710673 >> DataShardReadIterator::ShouldReadRangeChunk3 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk5 >> DataShardReadIterator::ShouldCancelMvccSnapshotFromFuture [GOOD] >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInOneTransaction >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__ASYNC-pk_types28-all_types28-index28-Uint64--ASYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__SYNC-pk_types24-all_types24-index24-Uint64--SYNC] >> DataShardReadIteratorFastCancel::ShouldProcessFastCancel [GOOD] >> DataShardReadIteratorLatency::ReadSplitLatency >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalAsync [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalUnique [GOOD] >> BackupRestore::PrefixedVectorIndex |91.0%| [TA] $(B)/ydb/services/persqueue_v1/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_ttl.py::TestTTL::test_ttl[table_Date_1__ASYNC-pk_types34-all_types34-index34-Date--ASYNC] >> KqpNamedExpressions::NamedExpressionRandomInsert+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsert-UseSink >> test_ttl.py::TestTTL::test_ttl[table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0_UNIQUE_SYNC-pk_types2-all_types2-index2-Datetime-UNIQUE-SYNC] |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots >> DataShardReadIterator::ShouldReadFromHeadWithConflict+UseSink [GOOD] >> DataShardReadIterator::ShouldReadFromHeadWithConflict-UseSink >> TConsoleConfigSubscriptionTests::TestConfigSubscriptionsCleanup [GOOD] >> TConsoleConfigTests::TestAddConfigItem |91.0%| [TA] {RESULT} $(B)/ydb/services/persqueue_v1/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots >> TNodeBrokerTest::Test1001NodesSubscribers [GOOD] >> TNodeBrokerTest::TestRandomActions [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::Test1001NodesSubscribers [GOOD] Test command err: 2025-06-24T20:40:30.773593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:40:30.773683Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> DataShardReadIteratorBatchMode::RangeFromInclusive [GOOD] >> DataShardReadIteratorBatchMode::RangeFromNonInclusive >> KqpNewEngine::DqSourceLimit [GOOD] >> TConsoleConfigTests::TestAddConfigItem [GOOD] >> TConsoleConfigTests::TestAutoKind >> DataShardReadIterator::ShouldReadRangeChunk5 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk7 >> DataShardReadIterator::ShouldReadFromFollower [GOOD] >> DataShardReadIterator::ShouldNotReadFutureMvccFromFollower ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::DqSourceLimit [GOOD] Test command err: Trying to start YDB, gRPC: 27028, MsgBus: 5313 2025-06-24T20:42:24.492009Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519619054610879565:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:42:24.492269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00320d/r3tmp/tmpCItA4g/pdisk_1.dat 2025-06-24T20:42:25.031511Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519619054610879345:2079] 1750797744460595 != 1750797744460598 2025-06-24T20:42:25.039591Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27028, node 1 2025-06-24T20:42:25.051174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:42:25.051297Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:42:25.055427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:42:25.231730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:42:25.231764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:42:25.231792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:42:25.231916Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:42:25.489114Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5313 TClient is connected to server localhost:5313 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:42:26.171434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:42:26.212140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:26.436596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:26.781961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:26.906746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:42:29.137994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519619076085717468:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:29.138155Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:29.486157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519619054610879565:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:42:29.486213Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:42:29.525502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:29.572481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:29.617241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:29.660168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:29.730512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:29.808453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:29.855917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:29.920632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519619076085718128:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:29.920715Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:29.920864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519619076085718133:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:42:29.925433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:42:29.943453Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519619076085718135:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:42:30.020275Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519619080380685482:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 7573, MsgBus: 64997 2025-06-24T20:42:33.087478Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519619094712249765:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:42:33.087529Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # ... OR: schemereq.cpp:553: Actor# [6:7519619310802423732:3452] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 1799, MsgBus: 20906 2025-06-24T20:43:30.596834Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519619338001347249:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:43:30.596899Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00320d/r3tmp/tmpFyjgjH/pdisk_1.dat 2025-06-24T20:43:31.209644Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:43:31.209772Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:43:31.227332Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519619338001347224:2079] 1750797810581953 != 1750797810581956 2025-06-24T20:43:31.272716Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:43:31.285583Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1799, node 7 2025-06-24T20:43:31.531920Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:43:31.531949Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:43:31.531963Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:43:31.532139Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:43:31.624276Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20906 TClient is connected to server localhost:20906 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:43:32.556096Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:43:32.567791Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:43:32.580326Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:32.751020Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:33.032327Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:33.152831Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:35.597347Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519619338001347249:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:43:35.597453Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:43:37.341125Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519619368066119937:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:37.341301Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:37.460000Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:37.515210Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:37.560744Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:37.601767Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:37.647038Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:37.731610Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:37.804786Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:37.986334Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519619368066120618:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:37.986446Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:37.986622Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519619368066120623:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:37.992826Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:43:38.027992Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519619368066120625:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:43:38.114851Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519619372361087973:3439] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::TestRandomActions [GOOD] Test command err: 2025-06-24T20:41:18.842781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:18.842862Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-24T20:41:19.148058Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:41:19.148726Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:41:20.036542Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:41:20.037247Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:41:20.037534Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:41:20.076907Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:41:20.077531Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:41:20.077819Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:41:20.210562Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-24T20:41:20.960389Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-24T20:41:21.359294Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:41:21.373736Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:41:21.403488Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-24T20:41:21.920243Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:41:21.941240Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:41:21.957822Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:41:22.893433Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:41:22.916715Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:41:22.938816Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:41:23.337170Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-24T20:41:23.337677Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-24T20:41:23.380685Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:41:23.381139Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:41:23.382504Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:41:23.808172Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:41:23.853137Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-24T20:41:23.988591Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-24T20:41:24.531854Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:41:24.532377Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:41:24.532835Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:41:24.564573Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.041431Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.041789Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.059495Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.060367Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.645192Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.749063Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.749635Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.750115Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.889874Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.891311Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.891923Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.892363Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-24T20:41:25.973791Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-24T20:41:26.003236Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-24T20:41:26.004784Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-24T20:41:26.005272Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-24T20:41:26.006212Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-24T20:41:26.031938Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1033: WRONG_REQUEST: Unknown node 2025-06-24T20:41:26.051887Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:41:26.053958Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host9:8: ERROR_TEMP: No free node IDs 2025-06-24T20:41:26.958399Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:41:26.974283Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host3:2: ERROR_TEMP: No free node IDs 2025-06-24T20:41:27.135421Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:41:27.140032Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host5:4: ERROR_TEMP: No free node IDs 2025-06-24T20:41:27.142510Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:41:27.144230Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host9:8: ERROR_TEMP: No free node IDs 2025-06-24T20:41:27.169080Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:41:27.216419Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host9:8: ERROR_TEMP: No free node IDs 2025-06-24T20:41:27.217211Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:41:27.218660Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host7:6: ERROR_TEMP: No free node IDs 2025-06-24T20:41:27.225649Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:41:27.889006Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Node has expired 2025-06-24T20:41:27.894894Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host9:8: ERROR_TEMP: No free node IDs 2025-06-24T20:41:27.917711Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Node has expired 2025-06-24T20:41:27.918542Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host9:8: ERROR_TEMP: No free node IDs 2025-06-24T20:41:27.938250Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Node has expired 2025-06-24T20:41:27.939189Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host3:2: ERROR_TEMP: No free node IDs 2025-06-24T20:41:27.956327Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Node has expired 2025-06-24T20:41:27.958151Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Node has expired 2025-06-24T20:41:27.961146Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host11:10: ERROR_TEMP: No free node IDs 2025-06-24T20:41:27.963394Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Node has expired 2025-06-24T20:41:27.964053Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Node has expired 2025-06-24T20:41:27.965501Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Node has expired 2025-06-24T20:41:27.984970Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host7:6: ERROR_TEMP: No free node IDs 2025-06-24T20:41:27.985820Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Node has expired 2025-06-24T20:41:27.987026Z node 1 :NODE_BROKER ERROR: node_broker__extend_ ... _BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:25.900802Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:25.904036Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:25.953498Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:43:25.981230Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:43:26.669994Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:43:26.672652Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.330567Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.681582Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.730304Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.733743Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.777180Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.780192Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.782763Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.785284Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.809758Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.812339Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:43:27.814751Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:43:29.784939Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-24T20:43:30.621650Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:30.648576Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:31.142921Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:31.172326Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:32.093798Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-24T20:43:32.118598Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-24T20:43:32.126272Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-24T20:43:32.162635Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1033: WRONG_REQUEST: Unknown node 2025-06-24T20:43:32.648768Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-24T20:43:33.194017Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:33.197633Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:33.801514Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-24T20:43:33.810054Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-24T20:43:34.323488Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:34.326342Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:34.329529Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:34.332182Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:34.434662Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:34.871652Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:34.874516Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-24T20:43:35.688837Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:43:35.702276Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:43:35.710252Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:43:35.786544Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:43:35.815103Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-24T20:43:35.822951Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-24T20:43:36.336849Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-24T20:43:36.362827Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:43:36.368751Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-24T20:43:36.491226Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-24T20:43:36.574346Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host2:1: ERROR_TEMP: No free node IDs 2025-06-24T20:43:36.654550Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host5:4: ERROR_TEMP: No free node IDs 2025-06-24T20:43:36.682324Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:43:36.701428Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:43:36.718976Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host11:10: ERROR_TEMP: No free node IDs 2025-06-24T20:43:36.726548Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:43:36.729170Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:43:36.733115Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host2:1: ERROR_TEMP: No free node IDs 2025-06-24T20:43:36.854229Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host5:4: ERROR_TEMP: No free node IDs 2025-06-24T20:43:36.934209Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:43:36.937806Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host7:6: ERROR_TEMP: No free node IDs 2025-06-24T20:43:36.939251Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host12:11: ERROR_TEMP: No free node IDs 2025-06-24T20:43:36.940374Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host11:10: ERROR_TEMP: No free node IDs 2025-06-24T20:43:36.952575Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:43:37.669742Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-24T20:43:37.672689Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host11:10: ERROR_TEMP: No free node IDs 2025-06-24T20:43:38.187093Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-24T20:43:38.193919Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-24T20:43:38.200721Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host15:14: ERROR_TEMP: No free node IDs 2025-06-24T20:43:38.218413Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host8:7: ERROR_TEMP: No free node IDs 2025-06-24T20:43:38.222145Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-24T20:43:38.331839Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host7:6: ERROR_TEMP: No free node IDs 2025-06-24T20:43:38.334343Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host9:8: ERROR_TEMP: No free node IDs 2025-06-24T20:43:38.336646Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host15:14: ERROR_TEMP: No free node IDs 2025-06-24T20:43:38.341660Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-24T20:43:38.343960Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-24T20:43:38.424244Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host12:11: ERROR_TEMP: No free node IDs 2025-06-24T20:43:39.414632Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:43:39.492063Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-24T20:43:39.589653Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node >> TConsoleConfigTests::TestAutoKind [GOOD] >> TConsoleConfigTests::TestAllowedScopes |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/query_replay_yt/query_replay_yt |91.1%| [LD] {RESULT} $(B)/ydb/tools/query_replay_yt/query_replay_yt |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay_yt/query_replay_yt >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInOneTransaction [GOOD] >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInSeparateTransactions >> DataShardReadIteratorLatency::ReadSplitLatency [GOOD] >> DataShardReadIteratorPageFaults::CancelPageFaultedReadThenDropTable |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots >> TConsoleConfigTests::TestAllowedScopes [GOOD] >> TConsoleConfigTests::TestAffectedConfigs |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |91.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration >> test_ttl.py::TestTTL::test_ttl[table_Date_0_UNIQUE_SYNC-pk_types32-all_types32-index32-Date-UNIQUE-SYNC] >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] >> TConsoleConfigTests::TestAffectedConfigs [GOOD] |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots >> DataShardReadIterator::ShouldReadFromHeadWithConflict-UseSink [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict+UseSink >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__ASYNC-pk_types4-all_types4-index4-Datetime--ASYNC] >> BackupRestore::PrefixedVectorIndex [GOOD] >> BackupRestore::RestoreReplicationThatDoesNotUseSecret ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Transactions_Conflict_On_SeqNo_Table [GOOD] Test command err: 2025-06-24T20:34:44.234489Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617077158897328:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:44.234743Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:44.701595Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004550/r3tmp/tmpMSBUY0/pdisk_1.dat 2025-06-24T20:34:44.950041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:44.950152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:44.953656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:45.003976Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:45.007515Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617077158897147:2079] 1750797284201762 != 1750797284201765 TServer::EnableGrpc on GrpcPort 61546, node 1 2025-06-24T20:34:45.230808Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:45.231361Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004550/r3tmp/yandexDUaa6E.tmp 2025-06-24T20:34:45.231386Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004550/r3tmp/yandexDUaa6E.tmp 2025-06-24T20:34:45.231540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004550/r3tmp/yandexDUaa6E.tmp 2025-06-24T20:34:45.231655Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:45.314980Z INFO: TTestServer started on Port 15523 GrpcPort 61546 TClient is connected to server localhost:15523 PQClient connected to localhost:61546 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:45.695842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:45.711929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:45.730451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:34:45.926288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:45.936728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-24T20:34:48.612512Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617094338767124:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.612662Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.613158Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617094338767136:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.619068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:48.639084Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617094338767138:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:34:48.704092Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617094338767203:2444] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:49.141819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.145231Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617094338767211:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:49.147652Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Mjc4N2FiZTQtYmJjNjEyYTktNmU4ZjQ1NmQtMmNjODVjODM=, ActorId: [1:7519617094338767122:2298], ActorState: ExecuteState, TraceId: 01jyhtd1515g26vpdgd3grc155, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:49.150002Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:49.177505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:49.223584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617077158897328:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:49.223688Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:49.299906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519617098633734783:2621] === CheckClustersList. Ok 2025-06-24T20:34:55.978409Z :TwoSessionOneConsumer_Table INFO: TTopicSdkTestSetup started 2025-06-24T20:34:56.015677Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T20:34:56.050084Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:34:56.051038Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-24T20:34:56.051276Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:34:56.051535Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-24T20:34:56.051552Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T20:34:56.051569Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-24T20:34:56.051589Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-24T20:34:56.051620Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T20:34:56.051661Z node 1 :PERS ... e28d80-e7cfdb7e-6dfceed6_0 grpc read failed 2025-06-24T20:43:04.187887Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 17 sessionId: test-message_group_id_3|b059b99b-a7e28d80-e7cfdb7e-6dfceed6_0 grpc closed 2025-06-24T20:43:04.187922Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 17 sessionId: test-message_group_id_3|b059b99b-a7e28d80-e7cfdb7e-6dfceed6_0 is DEAD 2025-06-24T20:43:04.189065Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037904 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.189126Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037904 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.189164Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037904 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.189194Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037904 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.189232Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037904 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.189615Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519619057574586270:4104] destroyed 2025-06-24T20:43:04.189656Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519619057574586826:4104] destroyed 2025-06-24T20:43:04.189683Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519619057574586661:4104] destroyed 2025-06-24T20:43:04.189711Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519619057574586782:4104] destroyed 2025-06-24T20:43:04.189738Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037894] server disconnected, pipe [19:7519619057574586733:4104] destroyed 2025-06-24T20:43:04.189770Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037898] server disconnected, pipe [19:7519619066164523982:7330] destroyed 2025-06-24T20:43:04.189798Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037898] server disconnected, pipe [19:7519619066164523957:7330] destroyed 2025-06-24T20:43:04.189827Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037898] server disconnected, pipe [19:7519619066164523731:7330] destroyed 2025-06-24T20:43:04.189855Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037898] server disconnected, pipe [19:7519619066164524033:7330] destroyed 2025-06-24T20:43:04.189884Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037898] server disconnected, pipe [19:7519619066164523862:7330] destroyed 2025-06-24T20:43:04.190817Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1070 sessionId: test-message_group_id_2|395aab45-ef014c25-753cb651-5cc82125_0 grpc read done: success: 0 data: 2025-06-24T20:43:04.190838Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1070 sessionId: test-message_group_id_2|395aab45-ef014c25-753cb651-5cc82125_0 grpc read failed 2025-06-24T20:43:04.190873Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1070 sessionId: test-message_group_id_2|395aab45-ef014c25-753cb651-5cc82125_0 grpc closed 2025-06-24T20:43:04.190891Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1070 sessionId: test-message_group_id_2|395aab45-ef014c25-753cb651-5cc82125_0 is DEAD 2025-06-24T20:43:04.196615Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037906 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.196679Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037906 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.196721Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037906 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.196757Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037906 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.196799Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037906 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.197122Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 5, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:43:04.197233Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:43:04.197395Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037904] server disconnected, pipe [19:7519618804171434596:2813] destroyed 2025-06-24T20:43:04.197444Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037904] server disconnected, pipe [19:7519618636667662946:2813] destroyed 2025-06-24T20:43:04.197479Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037904] server disconnected, pipe [19:7519618808466402567:2813] destroyed 2025-06-24T20:43:04.197506Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037904] server disconnected, pipe [19:7519618804171434165:2813] destroyed 2025-06-24T20:43:04.197534Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037904] server disconnected, pipe [19:7519618804171434905:2813] destroyed 2025-06-24T20:43:04.198848Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037906] server disconnected, pipe [19:7519618894365774579:5532] destroyed 2025-06-24T20:43:04.198885Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037906] server disconnected, pipe [19:7519618894365774532:5532] destroyed 2025-06-24T20:43:04.198915Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037906] server disconnected, pipe [19:7519618894365774304:5532] destroyed 2025-06-24T20:43:04.198944Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037906] server disconnected, pipe [19:7519618894365774648:5532] destroyed 2025-06-24T20:43:04.198972Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037906] server disconnected, pipe [19:7519618894365774478:5532] destroyed 2025-06-24T20:43:04.207334Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037906, Partition: 2, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:43:04.207664Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 12 sessionId: test-message_group_id_1|5b5f2f38-adc976bf-ab5787c1-a637ccb4_0 grpc read done: success: 0 data: 2025-06-24T20:43:04.207688Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 12 sessionId: test-message_group_id_1|5b5f2f38-adc976bf-ab5787c1-a637ccb4_0 grpc read failed 2025-06-24T20:43:04.207728Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 12 sessionId: test-message_group_id_1|5b5f2f38-adc976bf-ab5787c1-a637ccb4_0 grpc closed 2025-06-24T20:43:04.207749Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 12 sessionId: test-message_group_id_1|5b5f2f38-adc976bf-ab5787c1-a637ccb4_0 is DEAD 2025-06-24T20:43:04.208829Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037908 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.208887Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037908 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.208927Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037908 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.208957Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037908 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.208995Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037908 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.209190Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 6 sessionId: test-message_group_id_0|450da7c2-19afc9b3-7b2a806c-5376df8e_0 grpc read done: success: 0 data: 2025-06-24T20:43:04.209208Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 6 sessionId: test-message_group_id_0|450da7c2-19afc9b3-7b2a806c-5376df8e_0 grpc read failed 2025-06-24T20:43:04.209244Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 6 sessionId: test-message_group_id_0|450da7c2-19afc9b3-7b2a806c-5376df8e_0 grpc closed 2025-06-24T20:43:04.209262Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 6 sessionId: test-message_group_id_0|450da7c2-19afc9b3-7b2a806c-5376df8e_0 is DEAD 2025-06-24T20:43:04.210046Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.210092Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.210137Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.210171Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.210210Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:43:04.211068Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037904, Partition: 3, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:43:04.219312Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037908] server disconnected, pipe [19:7519618804171434898:2791] destroyed 2025-06-24T20:43:04.219407Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037908] server disconnected, pipe [19:7519618804171434163:2791] destroyed 2025-06-24T20:43:04.219439Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037908] server disconnected, pipe [19:7519618804171434551:2791] destroyed 2025-06-24T20:43:04.219474Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037908] server disconnected, pipe [19:7519618636667662858:2791] destroyed 2025-06-24T20:43:04.219504Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037908] server disconnected, pipe [19:7519618808466402555:2791] destroyed 2025-06-24T20:43:04.219888Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037897] server disconnected, pipe [19:7519618808466402552:2785] destroyed 2025-06-24T20:43:04.219923Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037897] server disconnected, pipe [19:7519618804171434159:2785] destroyed 2025-06-24T20:43:04.219953Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037897] server disconnected, pipe [19:7519618804171434900:2785] destroyed 2025-06-24T20:43:04.219984Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037897] server disconnected, pipe [19:7519618804171434530:2785] destroyed 2025-06-24T20:43:04.220012Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037897] server disconnected, pipe [19:7519618636667662839:2785] destroyed 2025-06-24T20:43:04.220265Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037908, Partition: 1, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:43:04.220331Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037897, Partition: 0, State: StateIdle] TPartition::DropOwner. >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__ASYNC-pk_types25-all_types25-index25-Uint64--ASYNC] >> DataShardReadIteratorBatchMode::RangeFromNonInclusive [GOOD] >> DataShardReadIteratorBatchMode::RangeToInclusive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TConsoleConfigTests::TestAffectedConfigs [GOOD] Test command err: 2025-06-24T20:35:42.981494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:42.981567Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:43.042638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:44.776293Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:44.776361Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:44.844551Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:46.214058Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:46.214150Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:46.264563Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:47.660345Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:47.660428Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:47.729971Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:48.994098Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:48.994202Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:49.075595Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:51.109105Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:51.109190Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:51.172729Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:52.681296Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:52.681371Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:52.728706Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:54.066611Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:54.066701Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:54.123788Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:56.736006Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:56.736088Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:56.780281Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:35:58.903245Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:35:58.903350Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:35:58.956758Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:01.389989Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:01.390082Z node 11 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:01.531786Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:03.966497Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:03.966592Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:04.022354Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:06.878033Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:06.878113Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:06.977029Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:09.704841Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:09.704936Z node 14 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:09.779575Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:12.660171Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:12.660275Z node 15 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:12.712913Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:15.909565Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:15.909658Z node 16 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:15.988941Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:18.804380Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:18.804487Z node 17 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:18.860680Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:19.524573Z node 17 :CMS_CONFIGS ERROR: console_configs_provider.cpp:1235: Unexpected config sender died for subscription id=1 2025-06-24T20:36:20.463527Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:20.463660Z node 18 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:20.512374Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:21.575616Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:36:21.575730Z node 18 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:21.739554Z node 18 :CMS_CONFIGS ERROR: console_configs_provider.cpp:1201: Couldn't deliver config notification for subscription id=1 tabletid=8651011 serviceid=[0:0:0] nodeid=1 host=host1 tenant=tenant1 nodetype=type1 kinds=2 lastprovidedconfig= 2025-06-24T20:36:22.743706Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:22.743824Z node 19 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:22.797007Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:23.935975Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:36:23.936100Z node 19 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:24.038094Z node 19 :CMS_CONFIGS ERROR: console_configs_provider.cpp:1201: Couldn't deliver config notification for subscription id=1 tabletid=0 serviceid=[19:8246204620103118691:7960687] nodeid=1 host=host1 tenant=tenant1 nodetype=type1 kinds=2 lastprovidedconfig= 2025-06-24T20:36:25.571666Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:25.571774Z node 20 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:25.733199Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:30.566064Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:30.566157Z node 21 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:30.620933Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:35.084496Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:35.084604Z node 22 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:35.171811Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:37.076019Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:37.076137Z node 23 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:37.155771Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:38.694292Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:36:38.694403Z node 24 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:36:38.749143Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:36:46.365377Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:36:46.365515Z node 24 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:14.961479Z node 24 :CMS_CONFIGS ERROR: console_configs_provider.cpp:1201: Couldn't deliver config notification for subscription id=1 tabletid=0 serviceid=[100:28538277257700723:0] nodeid=100 host=host100 tenant=tenant-100 nodetype=type100 kinds=2 lastprovidedconfig= 2025-06-24T20:41:19.123269Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:19.127252Z node 25 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:19.501723Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:41:27.319569Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T20:41:27.319685Z node 25 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:43:40.684001Z node 26 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:43:40.684163Z node 26 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:43:40.778449Z node 26 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:43:44.127684Z node 27 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:43:44.127789Z node 27 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:43:44.196107Z node 27 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:43:47.202089Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:43:47.202207Z node 28 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:43:47.310950Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:43:50.162582Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:43:50.162723Z node 29 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:43:50.231544Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) >> DataShardReadIterator::ShouldNotReadFutureMvccFromFollower [GOOD] >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc+UseSink >> DataShardReadIterator::ShouldReadRangeChunk7 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk100 |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots >> DataShardReadIteratorPageFaults::CancelPageFaultedReadThenDropTable [GOOD] >> DataShardReadIteratorPageFaults::LocksNotLostOnPageFault |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |91.1%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots >> TxUsage::WriteToTopic_Demo_48_Query [GOOD] >> TxUsage::WriteToTopic_Demo_50_Query |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] |91.1%| [TA] $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInSeparateTransactions [GOOD] >> DataShardReadIterator::HandlePersistentSnapshotGoneInContinue [GOOD] >> DataShardReadIterator::HandleMvccGoneInContinue [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__SYNC-pk_types27-all_types27-index27-Uint64--SYNC] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict+UseSink [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |91.1%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::HandleMvccGoneInContinue [GOOD] Test command err: 2025-06-24T20:41:55.623835Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:55.624258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:55.624458Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002982/r3tmp/tmpbG86E4/pdisk_1.dat 2025-06-24T20:41:56.019405Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:41:56.026557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:41:56.129803Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:56.137774Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797711962484 != 1750797711962488 2025-06-24T20:41:56.184209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:56.184349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:56.195921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:56.313741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:56.373012Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:41:56.374268Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:41:56.374829Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:41:56.375097Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:41:56.427890Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:41:56.428796Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:41:56.428962Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:41:56.430683Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:41:56.430774Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:41:56.430834Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:41:56.431319Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:41:56.431515Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:41:56.431599Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:41:56.446137Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:41:56.491964Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:41:56.492184Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:41:56.492326Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:41:56.492361Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:41:56.492401Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:41:56.492438Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:41:56.492730Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:56.492778Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:56.493103Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:41:56.493240Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:41:56.493327Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:41:56.493376Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:41:56.493415Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:41:56.493453Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:41:56.493487Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:41:56.493520Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:41:56.493561Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:41:56.493680Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:56.493713Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:56.493756Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:41:56.494199Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:41:56.494266Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:41:56.494446Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:41:56.494731Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:41:56.494805Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:41:56.494900Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:41:56.494957Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:41:56.494993Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:41:56.495030Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:41:56.495075Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:56.495409Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:41:56.495447Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:41:56.495488Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:41:56.495516Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:56.495592Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:41:56.495622Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:41:56.495658Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:41:56.495688Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:41:56.495735Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:41:56.497239Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:41:56.497289Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:41:56.510088Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:41:56.510172Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:56.510227Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:56.510289Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... cution unit LoadTxDetails 2025-06-24T20:44:03.281805Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit LoadTxDetails 2025-06-24T20:44:03.282003Z node 14 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715665 keys extracted: 0 2025-06-24T20:44:03.282071Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-24T20:44:03.282110Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit LoadTxDetails 2025-06-24T20:44:03.282150Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-24T20:44:03.282185Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-24T20:44:03.282242Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:281474976715665] is the new logically complete end at 72075186224037889 2025-06-24T20:44:03.282301Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:281474976715665] is the new logically incomplete end at 72075186224037889 2025-06-24T20:44:03.282358Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:281474976715665] at 72075186224037889 2025-06-24T20:44:03.282420Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-24T20:44:03.282455Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-24T20:44:03.282487Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-24T20:44:03.282521Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-24T20:44:03.282683Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-24T20:44:03.282722Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-24T20:44:03.282770Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-24T20:44:03.282818Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-24T20:44:03.282851Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-24T20:44:03.282883Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-24T20:44:03.282913Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit CompleteOperation 2025-06-24T20:44:03.282945Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit CompleteOperation 2025-06-24T20:44:03.283168Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is DelayComplete 2025-06-24T20:44:03.283225Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit CompleteOperation 2025-06-24T20:44:03.283284Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T20:44:03.283334Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit CompletedOperations 2025-06-24T20:44:03.283389Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-24T20:44:03.283421Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T20:44:03.283457Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715665] at 72075186224037889 has finished 2025-06-24T20:44:03.283518Z node 14 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:44:03.283577Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-24T20:44:03.283627Z node 14 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-24T20:44:03.283681Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-24T20:44:03.295335Z node 14 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-24T20:44:03.295565Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:44:03.295694Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715665] at 72075186224037888 on unit CompleteOperation 2025-06-24T20:44:03.295863Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715665] from 72075186224037888 at tablet 72075186224037888 send result to client [14:1019:2804], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:44:03.295976Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:44:03.296553Z node 14 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-24T20:44:03.296628Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T20:44:03.296668Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715665] at 72075186224037889 on unit CompleteOperation 2025-06-24T20:44:03.296730Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [14:1019:2804], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:44:03.296787Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:44:03.299235Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [14:552:2478], Recipient [14:625:2529]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715665 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-24T20:44:03.299522Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T20:44:03.299678Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CheckRead 2025-06-24T20:44:03.299891Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-24T20:44:03.299989Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CheckRead 2025-06-24T20:44:03.300087Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:44:03.300174Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:44:03.300243Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:8] at 72075186224037888 2025-06-24T20:44:03.300333Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-24T20:44:03.300379Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:44:03.300413Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T20:44:03.300449Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:44:03.300678Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715665 } ResultFormat: FORMAT_ARROW } 2025-06-24T20:44:03.301239Z node 14 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/281474976715665 2025-06-24T20:44:03.301355Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[14:552:2478], 1} after executionsCount# 1 2025-06-24T20:44:03.301468Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[14:552:2478], 1} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:03.301782Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[14:552:2478], 1} finished in read 2025-06-24T20:44:03.301924Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-24T20:44:03.301967Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T20:44:03.302003Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:44:03.302040Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:44:03.302109Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-24T20:44:03.302143Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:44:03.302195Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 72075186224037888 has finished 2025-06-24T20:44:03.302287Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T20:44:03.302551Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/etcd_proxy/etcd_proxy >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__SYNC-pk_types3-all_types3-index3-Datetime--SYNC] >> TxUsage::Transactions_Conflict_On_SeqNo_Query [GOOD] |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |91.1%| [LD] {RESULT} $(B)/ydb/apps/etcd_proxy/etcd_proxy >> DataShardReadIteratorBatchMode::RangeToInclusive [GOOD] >> DataShardReadIteratorBatchMode::RangeToNonInclusive >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other >> test_ttl.py::TestTTL::test_ttl[table_Date_0__ASYNC-pk_types31-all_types31-index31-Date--ASYNC] [GOOD] >> unstable_connection.py::TestUnstableConnection::test >> BackupRestore::RestoreReplicationThatDoesNotUseSecret [FAIL] >> BackupRestore::ReplicasAreNotBackedUp >> DataShardReadIteratorPageFaults::LocksNotLostOnPageFault [GOOD] >> DataShardReadIteratorState::ShouldCalculateQuota [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__ASYNC-pk_types13-all_types13-index13-DyNumber--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__ASYNC-pk_types1-all_types1-index1-Datetime--ASYNC] [GOOD] >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc+UseSink [GOOD] >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__SYNC-pk_types6-all_types6-index6-Timestamp--SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIteratorState::ShouldCalculateQuota [GOOD] Test command err: 2025-06-24T20:41:55.003324Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:55.003745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:55.003935Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0029b9/r3tmp/tmpcMwgGk/pdisk_1.dat 2025-06-24T20:41:55.419424Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:41:55.422436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:41:55.470325Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:55.483642Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797711293653 != 1750797711293657 2025-06-24T20:41:55.533750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:55.533892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:55.550304Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:55.652047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:55.708642Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:41:55.709832Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:41:55.710427Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:41:55.710708Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:41:55.779748Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:41:55.780583Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:41:55.780732Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:41:55.782617Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:41:55.782727Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:41:55.782800Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:41:55.783227Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:41:55.783401Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:41:55.783480Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:41:55.783964Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:41:55.877911Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:41:55.878134Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:41:55.878246Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:41:55.878281Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:41:55.878336Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:41:55.878438Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:41:55.878683Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:55.878736Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:55.879079Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:41:55.879227Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:41:55.879308Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:41:55.879379Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:41:55.879417Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:41:55.879454Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:41:55.879488Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:41:55.879518Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:41:55.879574Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:41:55.879699Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:55.879739Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:55.879786Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:41:55.880172Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:41:55.880224Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:41:55.880342Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:41:55.880536Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:41:55.880605Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:41:55.880713Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:41:55.880780Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:41:55.880823Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:41:55.880862Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:41:55.880911Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:55.881243Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:41:55.881285Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:41:55.881323Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:41:55.881370Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:55.881416Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:41:55.881446Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:41:55.881485Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:41:55.881529Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:41:55.881559Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:41:55.882358Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:41:55.882407Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:55.882441Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:55.882506Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-24T20:41:55.882565Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T20:41:55.892666Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:41:55.892734Z ... 5-06-24T20:44:13.088608Z node 14 :TX_DATASHARD INFO: datashard_write_operation.cpp:707: Write transaction 5 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-24T20:44:13.088773Z node 14 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 2025-06-24T20:44:13.088921Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-24T20:44:13.088958Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteWrite 2025-06-24T20:44:13.088990Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-24T20:44:13.089028Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T20:44:13.089220Z node 14 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-24T20:44:13.089299Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-06-24T20:44:13.089353Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-24T20:44:13.089436Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:44:13.089498Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:44:13.089575Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-24T20:44:13.089604Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:44:13.089646Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-24T20:44:13.089775Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-24T20:44:13.089856Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T20:44:13.089944Z node 14 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 5 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_LOCKS_BROKEN 2025-06-24T20:44:13.090167Z node 14 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-24T20:44:13.090313Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:44:13.090798Z node 14 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [14:958:2709], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [14:905:2709]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[14:958:2709].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T20:44:13.091067Z node 14 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [14:951:2709], SessionActorId: [14:905:2709], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[14:905:2709]. isRollback=0 2025-06-24T20:44:13.091874Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=14&id=Y2IxYjA0MjAtODk0N2JkZC0yOTdkYWZkNS1lOTNmNmJmZg==, ActorId: [14:905:2709], ActorState: ExecuteState, TraceId: 01jyhty88e2q48tf2yz098gnfd, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [14:952:2709] from: [14:951:2709] 2025-06-24T20:44:13.092161Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [14:951:2709], Recipient [14:829:2657]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-24T20:44:13.092208Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-24T20:44:13.092454Z node 14 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [14:952:2709] TxId: 281474976715665. Ctx: { TraceId: 01jyhty88e2q48tf2yz098gnfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=Y2IxYjA0MjAtODk0N2JkZC0yOTdkYWZkNS1lOTNmNmJmZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T20:44:13.092870Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [14:829:2657], Recipient [14:829:2657]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T20:44:13.092922Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T20:44:13.093009Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-24T20:44:13.093168Z node 14 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-24T20:44:13.093260Z node 14 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715663, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-24T20:44:13.093349Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-06-24T20:44:13.093398Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T20:44:13.093435Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-06-24T20:44:13.093469Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:44:13.093502Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:44:13.093550Z node 14 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v3500/18446744073709551615 ImmediateWriteEdge# v3501/0 ImmediateWriteEdgeReplied# v3501/0 2025-06-24T20:44:13.093611Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-24T20:44:13.093650Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T20:44:13.093681Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:44:13.093713Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-06-24T20:44:13.093742Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-06-24T20:44:13.093781Z node 14 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-24T20:44:13.093928Z node 14 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 2025-06-24T20:44:13.094019Z node 14 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:6] at 72075186224037888 2025-06-24T20:44:13.094114Z node 14 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-24T20:44:13.094245Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:44:13.094298Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-06-24T20:44:13.094379Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-24T20:44:13.094460Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T20:44:13.094505Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-06-24T20:44:13.094538Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-24T20:44:13.094572Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:44:13.094604Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:44:13.094657Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T20:44:13.094687Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:44:13.094721Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-24T20:44:13.094793Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-24T20:44:13.094832Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T20:44:13.094880Z node 14 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-24T20:44:13.094981Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:44:13.095335Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=Y2IxYjA0MjAtODk0N2JkZC0yOTdkYWZkNS1lOTNmNmJmZg==, ActorId: [14:905:2709], ActorState: ExecuteState, TraceId: 01jyhty88e2q48tf2yz098gnfd, Create QueryResponse for error on request, msg: 2025-06-24T20:44:13.096906Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [14:61:2108], Recipient [14:829:2657]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715663 LockNode: 14 Status: STATUS_NOT_FOUND >> DataShardReadIterator::ShouldReadRangeChunk100 [GOOD] |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/pgwire/pgwire |91.1%| [LD] {RESULT} $(B)/ydb/apps/pgwire/pgwire |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/pgwire/pgwire >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__ASYNC-pk_types22-all_types22-index22-Uint32--ASYNC] [GOOD] |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |91.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results >> KqpNamedExpressions::NamedExpressionRandomInsert-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomDataQuery+UseSink >> TxUsage::WriteToTopic_Demo_50_Query [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__SYNC-pk_types9-all_types9-index9-Timestamp--SYNC] [GOOD] |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReadRangeChunk100 [GOOD] Test command err: 2025-06-24T20:41:53.101157Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:53.101614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:53.101821Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002c04/r3tmp/tmpHl19Rg/pdisk_1.dat 2025-06-24T20:41:53.542180Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:41:53.556233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:41:53.608668Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:53.614571Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797709755553 != 1750797709755557 2025-06-24T20:41:53.666450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:53.666660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:53.679215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:53.798548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:53.852071Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:41:53.853320Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:41:53.853849Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:41:53.854156Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:41:53.905075Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:41:53.905964Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:41:53.906131Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:41:53.908188Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:41:53.908298Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:41:53.908360Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:41:53.908793Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:41:53.908966Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:41:53.909067Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:41:53.923788Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:41:53.963711Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:41:53.963958Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:41:53.964088Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:41:53.964143Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:41:53.964186Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:41:53.964245Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:41:53.964574Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:53.964627Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:53.965025Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:41:53.965124Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:41:53.965179Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:41:53.965240Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:41:53.965284Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:41:53.965340Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:41:53.965375Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:41:53.965407Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:41:53.965450Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:41:53.965564Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:53.965606Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:53.965650Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:41:53.966091Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:41:53.966151Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:41:53.966291Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:41:53.966528Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:41:53.966596Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:41:53.966715Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:41:53.966765Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:41:53.966807Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:41:53.966845Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:41:53.966882Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:53.967185Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:41:53.967229Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:41:53.967269Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:41:53.967332Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:53.967397Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:41:53.967432Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:41:53.967466Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:41:53.967498Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:41:53.967528Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:41:53.969146Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:41:53.969217Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:41:53.983801Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:41:53.983882Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:53.983923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:53.983968Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542804, quota bytes left# 18446744073708987711, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.842870Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.842944Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.842996Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.843420Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542705, quota bytes left# 18446744073708981375, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.843597Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.843658Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.843704Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.844097Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542606, quota bytes left# 18446744073708975039, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.844267Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.844320Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.844364Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.844744Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542507, quota bytes left# 18446744073708968703, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.844939Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.844994Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.845045Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.845450Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542408, quota bytes left# 18446744073708962367, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.845589Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.845641Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.845686Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.846120Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542309, quota bytes left# 18446744073708956031, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.846289Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.846340Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.846384Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.846768Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542210, quota bytes left# 18446744073708949695, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.846982Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.847039Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.847088Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.847598Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542111, quota bytes left# 18446744073708943359, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.847793Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.847847Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.847893Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.848267Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542012, quota bytes left# 18446744073708937023, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.848413Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.848470Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.848513Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.848875Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541913, quota bytes left# 18446744073708930687, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.849051Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.849106Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.849145Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.849573Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541814, quota bytes left# 18446744073708924351, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.849722Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.849776Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.849822Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.850189Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541715, quota bytes left# 18446744073708918015, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.850346Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.850400Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.850440Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.850821Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541616, quota bytes left# 18446744073708911679, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.850972Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:937:2737], Recipient [15:937:2737]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:15.851022Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 0 2025-06-24T20:44:15.851067Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 0 2025-06-24T20:44:15.854077Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:553:2479], 1} sends rowCount# 1, bytes# 64, quota rows left# 18446744073709541615, quota bytes left# 18446744073708911615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:15.854212Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3103: 72075186224037890 read iterator# {[15:553:2479], 1} finished in ReadContinue >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1_UNIQUE_SYNC-pk_types29-all_types29-index29-Uint64-UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__SYNC-pk_types15-all_types15-index15-DyNumber--SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_50_Query [GOOD] Test command err: 2025-06-24T20:34:45.086146Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617081243112676:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:45.086927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00453f/r3tmp/tmpzmKqJK/pdisk_1.dat 2025-06-24T20:34:45.544355Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:34:45.855548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:45.855635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:45.858748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:45.905421Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:45.912681Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617076948145179:2079] 1750797285016180 != 1750797285016183 TServer::EnableGrpc on GrpcPort 24685, node 1 2025-06-24T20:34:46.081089Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:46.207776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00453f/r3tmp/yandexPyxK7Y.tmp 2025-06-24T20:34:46.207803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00453f/r3tmp/yandexPyxK7Y.tmp 2025-06-24T20:34:46.208006Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00453f/r3tmp/yandexPyxK7Y.tmp 2025-06-24T20:34:46.208120Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:46.307352Z INFO: TTestServer started on Port 62810 GrpcPort 24685 TClient is connected to server localhost:62810 PQClient connected to localhost:24685 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:46.919488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:46.951370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:47.096475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:34:47.308042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:47.331118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-24T20:34:49.904216Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617098422982447:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:49.904385Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:49.906323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617098422982468:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:49.912984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:49.947702Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617098422982470:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:34:50.079261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617081243112676:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:50.079368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:50.340788Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617102717949830:2444] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:50.411092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:50.501444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:50.572809Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617102717949840:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:50.573207Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=M2IyNzBjMmUtZGU4NjRiYzctMzZkMDA3YmQtZTJlOWMyYzE=, ActorId: [1:7519617098422982429:2297], ActorState: ExecuteState, TraceId: 01jyhtd2c114t5zvhmmsph8tdm, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:50.582846Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:50.643321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519617102717950126:2625] === CheckClustersList. Ok 2025-06-24T20:34:56.455594Z :WriteToTopic_Demo_4_Table INFO: TTopicSdkTestSetup started 2025-06-24T20:34:56.494098Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T20:34:56.520443Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519617128487754089:2706] connected; active server actors: 1 2025-06-24T20:34:56.520705Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-24T20:34:56.521597Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-24T20:34:56.521716Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-24T20:34:56.522848Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-24T20:34:56.539705Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:34:56.539955Z node 1 :PERSQUEUE ... nd: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "test-topic" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/test-topic" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519619477981368610 RawX2: 81604380770 } Partitions { Partition { PartitionId: 0 } } 2025-06-24T20:44:18.916777Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:44:18.922477Z node 19 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-24T20:44:18.923081Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-24T20:44:18.923819Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:44:18.923850Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-24T20:44:18.923875Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976710672, State EXECUTED 2025-06-24T20:44:18.923902Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037892] TxId 281474976710672 State EXECUTED FrontTxId 281474976710672 2025-06-24T20:44:18.923927Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4007: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-24T20:44:18.923952Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976710672, NewState WAIT_RS_ACKS 2025-06-24T20:44:18.923980Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037892] TxId 281474976710672 moved from EXECUTED to WAIT_RS_ACKS 2025-06-24T20:44:18.924012Z node 19 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976710672] PredicateAcks: 0/0 2025-06-24T20:44:18.924022Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4560: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-24T20:44:18.924039Z node 19 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976710672] PredicateAcks: 0/0 2025-06-24T20:44:18.924066Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4621: [PQ: 72075186224037892] add an TxId 281474976710672 to the list for deletion 2025-06-24T20:44:18.924100Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976710672, NewState DELETING 2025-06-24T20:44:18.924129Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3852: [PQ: 72075186224037892] delete key for TxId 281474976710672 2025-06-24T20:44:18.924207Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T20:44:18.930630Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-24T20:44:18.930218Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T20:44:18.930263Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-24T20:44:18.930288Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976710672, State DELETING 2025-06-24T20:44:18.930313Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72075186224037892] delete TxId 281474976710672 2025-06-24T20:44:18.931413Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-24T20:44:18.931453Z :INFO: [/Root] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:28775 2025-06-24T20:44:18.971643Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-24T20:44:18.973254Z node 19 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-24T20:44:18.973302Z node 19 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-24T20:44:18.973925Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-24T20:44:18.974066Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:50606 2025-06-24T20:44:18.974089Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:50606 proto=v1 topic=test-topic durationSec=0 2025-06-24T20:44:18.974101Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T20:44:18.984519Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-24T20:44:18.984757Z node 19 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T20:44:18.984775Z node 19 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T20:44:18.984791Z node 19 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T20:44:18.984846Z node 19 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [19:7519619542405879204:2437] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-24T20:44:18.984876Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T20:44:18.985899Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 19, Generation: 1 2025-06-24T20:44:18.985965Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037892] server connected, pipe [19:7519619542405879207:2437], now have 1 active actors on pipe 2025-06-24T20:44:18.986077Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-24T20:44:18.986110Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-24T20:44:18.986220Z node 19 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0 generated for partition 0 topic 'test-topic' owner src 2025-06-24T20:44:18.986365Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T20:44:18.986432Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T20:44:18.986616Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-24T20:44:18.986637Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-24T20:44:18.986732Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T20:44:18.986826Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0 2025-06-24T20:44:18.988656Z :INFO: [/Root] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750797858988 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:44:18.988779Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0" topic: "test-topic" 2025-06-24T20:44:18.991367Z :INFO: [/Root] MessageGroupId [src] SessionId [src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0] Write session: close. Timeout = 0 ms 2025-06-24T20:44:18.991428Z :INFO: [/Root] MessageGroupId [src] SessionId [src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0] Write session will now close 2025-06-24T20:44:18.991487Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0] Write session: aborting 2025-06-24T20:44:18.992046Z :INFO: [/Root] MessageGroupId [src] SessionId [src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0] Write session: gracefully shut down, all writes complete 2025-06-24T20:44:18.992097Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0] Write session: destroy 2025-06-24T20:44:18.996142Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0 grpc read done: success: 0 data: 2025-06-24T20:44:18.996175Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0 grpc read failed 2025-06-24T20:44:18.996215Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0 grpc closed 2025-06-24T20:44:18.996233Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|abdbe7e6-ab0da8c5-b5f406fa-c50612f7_0 is DEAD 2025-06-24T20:44:18.997107Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T20:44:18.999088Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [19:7519619542405879207:2437] destroyed 2025-06-24T20:44:18.999181Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T20:44:19.019357Z :WriteToTopic_Demo_50_Query INFO: Topic created >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__SYNC-pk_types12-all_types12-index12-DyNumber--SYNC] [GOOD] |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots >> DataShardReadIteratorBatchMode::RangeToNonInclusive [GOOD] >> DataShardReadIteratorBatchMode::MultipleRanges >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink [GOOD] |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_0__ASYNC-pk_types31-all_types31-index31-Date--ASYNC] [GOOD] |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |91.1%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__ASYNC-pk_types1-all_types1-index1-Datetime--ASYNC] [GOOD] >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink [GOOD] Test command err: 2025-06-24T20:41:53.558073Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:53.558509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:53.558731Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bfd/r3tmp/tmpYsJxqK/pdisk_1.dat 2025-06-24T20:41:54.075964Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:41:54.087751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:41:54.179062Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:54.201729Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797710357475 != 1750797710357479 2025-06-24T20:41:54.261974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:54.262155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:54.274750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:54.378430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:54.443944Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:41:54.445326Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:41:54.445907Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:41:54.446213Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:41:54.499037Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:41:54.500024Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:41:54.500227Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:41:54.502271Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:41:54.502367Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:41:54.502474Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:41:54.502904Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:41:54.503068Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:41:54.503390Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:41:54.515962Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:41:54.572703Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:41:54.572942Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:41:54.573079Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:41:54.573133Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:41:54.573181Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:41:54.573227Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:41:54.573554Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:54.573609Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:54.573961Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:41:54.574068Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:41:54.574136Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:41:54.574198Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:41:54.574249Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:41:54.574296Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:41:54.574341Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:41:54.574377Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:41:54.574420Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:41:54.574544Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:54.574593Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:54.574648Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:41:54.575075Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:41:54.575119Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:41:54.575386Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:41:54.575642Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:41:54.575725Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:41:54.575837Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:41:54.575911Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:41:54.575964Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:41:54.576002Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:41:54.576037Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:54.576333Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:41:54.576369Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:41:54.576410Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:41:54.576464Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:54.576529Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:41:54.576561Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:41:54.576598Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:41:54.576632Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:41:54.576664Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:41:54.578140Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:41:54.578216Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:41:54.589173Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:41:54.589277Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:54.589317Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:54.589376Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-24T20:44:25.544788Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v3001/18446744073709551615 2025-06-24T20:44:25.544924Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2025-06-24T20:44:25.545150Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T20:44:25.545233Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2025-06-24T20:44:25.545325Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:44:25.545408Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:44:25.545503Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2025-06-24T20:44:25.545590Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T20:44:25.545628Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:44:25.545661Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T20:44:25.545702Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:44:25.545898Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 ResultFormat: FORMAT_ARROW MaxRowsInResult: 2 } 2025-06-24T20:44:25.546487Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Continue 2025-06-24T20:44:25.546553Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Continue at tablet# 72075186224037888 2025-06-24T20:44:25.546702Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T20:44:25.591939Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [15:1006:2800], Recipient [15:626:2530]: {TEvReadSet step# 3001 txid# 281474976715667 TabletSource# 72075186224037891 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037891 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T20:44:25.592141Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T20:44:25.592267Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037891 dest 72075186224037888 producer 72075186224037891 txId 281474976715667 2025-06-24T20:44:25.592539Z node 15 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 3001 txid# 281474976715667 TabletSource# 72075186224037891 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037891 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T20:44:25.592882Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3001 : 281474976715667] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1096:2850], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:44:25.593035Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T20:44:25.593148Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:4] at 72075186224037888 for ExecuteRead 2025-06-24T20:44:25.593728Z node 15 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1365: ActorId: [15:1096:2850] TxId: 281474976715667. Ctx: { TraceId: 01jyhtym0w4mv4d52te0wvtrav, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=NTM4MjFlYTUtM2UyM2M0ZTMtMzU4OWNhZjYtNDViNTEzZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got propose result, shard: 72075186224037888, status: COMPLETE, error: 2025-06-24T20:44:25.594062Z node 15 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [15:1096:2850] TxId: 281474976715667. Ctx: { TraceId: 01jyhtym0w4mv4d52te0wvtrav, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=NTM4MjFlYTUtM2UyM2M0ZTMtMzU4OWNhZjYtNDViNTEzZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T20:44:25.594222Z node 15 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [15:1096:2850] TxId: 281474976715667. Ctx: { TraceId: 01jyhtym0w4mv4d52te0wvtrav, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=NTM4MjFlYTUtM2UyM2M0ZTMtMzU4OWNhZjYtNDViNTEzZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T20:44:25.594483Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [15:626:2530], Recipient [15:626:2530]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:44:25.594607Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:44:25.594998Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:44:25.596637Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:44:25.596758Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T20:44:25.596848Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:282: Return cached ready operation [0:4] at 72075186224037888 2025-06-24T20:44:25.596935Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:44:25.597170Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 2, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 ResultFormat: FORMAT_ARROW MaxRowsInResult: 2 } 2025-06-24T20:44:25.599388Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-24T20:44:25.599530Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:553:2479], 1} after executionsCount# 2 2025-06-24T20:44:25.599665Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:553:2479], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551613, quota bytes left# 18446744073709551583, hasUnreadQueries# 1, total queries# 6, firstUnprocessed# 0 2025-06-24T20:44:25.600099Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T20:44:25.600208Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T20:44:25.600302Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:44:25.600390Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:44:25.600461Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T20:44:25.600492Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:44:25.600534Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037888 has finished 2025-06-24T20:44:25.600625Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:44:25.600713Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T20:44:25.600821Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:44:25.600919Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:44:25.601733Z node 15 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 5, sender: [15:553:2479], selfId: [15:57:2104], source: [15:1074:2850] 2025-06-24T20:44:25.601981Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:626:2530], Recipient [15:626:2530]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:25.602125Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037888 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 2 2025-06-24T20:44:25.602463Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037888 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 2 2025-06-24T20:44:25.602737Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037888 readContinue iterator# {[15:553:2479], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551611, quota bytes left# 18446744073709551551, hasUnreadQueries# 1, total queries# 6, firstUnprocessed# 2 2025-06-24T20:44:25.615397Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:626:2530], Recipient [15:626:2530]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T20:44:25.615571Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037888 ReadContinue for iterator# {[15:553:2479], 1}, firstUnprocessedQuery# 4 2025-06-24T20:44:25.615793Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037888 ReadContinue: iterator# {[15:553:2479], 1}, FirstUnprocessedQuery# 4 2025-06-24T20:44:25.615972Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037888 readContinue iterator# {[15:553:2479], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551609, quota bytes left# 18446744073709551519, hasUnreadQueries# 0, total queries# 6, firstUnprocessed# 4 2025-06-24T20:44:25.616191Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3103: 72075186224037888 read iterator# {[15:553:2479], 1} finished in ReadContinue 2025-06-24T20:44:25.617680Z node 15 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=15&id=NTM4MjFlYTUtM2UyM2M0ZTMtMzU4OWNhZjYtNDViNTEzZTU=, workerId: [15:1074:2850], local sessions count: 0 2025-06-24T20:44:25.618048Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [15:61:2108], Recipient [15:1006:2800]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715666 LockNode: 15 Status: STATUS_NOT_FOUND >> BackupRestore::ReplicasAreNotBackedUp [FAIL] >> BackupRestore::SkipEmptyDirsOnRestore >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink [GOOD] >> TxUsage::TestRetentionOnLongTxAndBigMessages >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |91.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__ASYNC-pk_types13-all_types13-index13-DyNumber--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0_UNIQUE_SYNC-pk_types20-all_types20-index20-Uint32-UNIQUE-SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink [GOOD] Test command err: 2025-06-24T20:41:55.050854Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:55.055399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:55.055674Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002949/r3tmp/tmpBPRpKf/pdisk_1.dat 2025-06-24T20:41:55.507821Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:41:55.510998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:41:55.599894Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:55.606159Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797711554455 != 1750797711554459 2025-06-24T20:41:55.659545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:55.659724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:55.671837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:55.763428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:55.813322Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:41:55.814807Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:41:55.815267Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:41:55.815522Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:41:55.866998Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:41:55.867752Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:41:55.867888Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:41:55.870144Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:41:55.870239Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:41:55.870311Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:41:55.870738Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:41:55.870939Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:41:55.871463Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:41:55.883688Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:41:55.951596Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:41:55.951827Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:41:55.951949Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:41:55.951987Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:41:55.952027Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:41:55.952077Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:41:55.952365Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:55.952424Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:55.952775Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:41:55.952881Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:41:55.952951Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:41:55.953017Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:41:55.953059Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:41:55.953094Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:41:55.953127Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:41:55.953162Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:41:55.953207Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:41:55.953344Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:55.953389Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:55.953450Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:41:55.953875Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:41:55.953921Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:41:55.954056Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:41:55.954264Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:41:55.954330Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:41:55.954426Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:41:55.954514Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:41:55.954553Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:41:55.954616Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:41:55.954653Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:55.954939Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:41:55.954985Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:41:55.955024Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:41:55.955064Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:55.955126Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:41:55.955186Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:41:55.955223Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:41:55.955257Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:41:55.955299Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:41:55.956862Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:41:55.956922Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:41:55.967693Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:41:55.967791Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:55.967831Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:55.967877Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 24T20:44:30.608781Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 2, request: { ReadId: 3 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-24T20:44:30.609210Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-24T20:44:30.609254Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:552:2478], 3} after executionsCount# 2 2025-06-24T20:44:30.609298Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:552:2478], 3} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:30.609500Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:552:2478], 3} finished in read 2025-06-24T20:44:30.609583Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-24T20:44:30.609615Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T20:44:30.609644Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:44:30.609674Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:44:30.609719Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-24T20:44:30.609742Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:44:30.609765Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 72075186224037888 has finished 2025-06-24T20:44:30.609792Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:44:30.609834Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T20:44:30.609913Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:44:30.609990Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:44:30.612110Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [16:552:2478], Recipient [16:625:2529]: NKikimrTxDataShard.TEvRead ReadId: 4 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-24T20:44:30.612411Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T20:44:30.612578Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit CheckRead 2025-06-24T20:44:30.612921Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-24T20:44:30.613036Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit CheckRead 2025-06-24T20:44:30.613128Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:44:30.613230Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:44:30.613312Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:9] at 72075186224037888 2025-06-24T20:44:30.613405Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-24T20:44:30.613444Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:44:30.613477Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T20:44:30.613512Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:44:30.613757Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 4 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-24T20:44:30.614281Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-24T20:44:30.614388Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:552:2478], 4} after executionsCount# 1 2025-06-24T20:44:30.614508Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:552:2478], 4} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:30.614856Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:552:2478], 4} finished in read 2025-06-24T20:44:30.614976Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-24T20:44:30.615006Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T20:44:30.615032Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:44:30.615061Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:44:30.615114Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-24T20:44:30.615137Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:44:30.615200Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:9] at 72075186224037888 has finished 2025-06-24T20:44:30.615288Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T20:44:30.616774Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [16:552:2478], Recipient [16:625:2529]: NKikimrTxDataShard.TEvRead ReadId: 5 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-24T20:44:30.617042Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T20:44:30.617220Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit CheckRead 2025-06-24T20:44:30.617761Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-24T20:44:30.617884Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit CheckRead 2025-06-24T20:44:30.617976Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:44:30.618057Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:44:30.618129Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:10] at 72075186224037888 2025-06-24T20:44:30.618203Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-24T20:44:30.618240Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:44:30.618267Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T20:44:30.618290Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:44:30.618482Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 5 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-24T20:44:30.618969Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3000/18446744073709551615 2025-06-24T20:44:30.619076Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:552:2478], 5} after executionsCount# 1 2025-06-24T20:44:30.619212Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:552:2478], 5} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T20:44:30.619559Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:552:2478], 5} finished in read 2025-06-24T20:44:30.619703Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-24T20:44:30.619742Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T20:44:30.619780Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:44:30.619820Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:44:30.619886Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-24T20:44:30.619917Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:44:30.619965Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:10] at 72075186224037888 has finished 2025-06-24T20:44:30.620057Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 >> ttl_delete_s3.py::TestDeleteS3Ttl::test_data_unchanged_after_ttl_change |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__SYNC-pk_types6-all_types6-index6-Timestamp--SYNC] [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__ASYNC-pk_types22-all_types22-index22-Uint32--ASYNC] [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__SYNC-pk_types21-all_types21-index21-Uint32--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__ASYNC-pk_types19-all_types19-index19-Uint32--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0_UNIQUE_SYNC-pk_types14-all_types14-index14-DyNumber-UNIQUE-SYNC] [GOOD] |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |91.2%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap >> tier_delete.py::TestTierDelete::test_delete_s3_ttl >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__SYNC-pk_types18-all_types18-index18-Uint32--SYNC] [GOOD] >> data_correctness.py::TestDataCorrectness::test >> test_ttl.py::TestTTL::test_ttl[table_Date_0__SYNC-pk_types30-all_types30-index30-Date--SYNC] [GOOD] |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |91.2%| [LD] {RESULT} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |91.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0_UNIQUE_SYNC-pk_types8-all_types8-index8-Timestamp-UNIQUE-SYNC] [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__SYNC-pk_types12-all_types12-index12-DyNumber--SYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__SYNC-pk_types15-all_types15-index15-DyNumber--SYNC] [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__SYNC-pk_types9-all_types9-index9-Timestamp--SYNC] [GOOD] >> DataShardReadIteratorBatchMode::MultipleRanges [GOOD] >> DataShardReadIteratorBatchMode::SelectingColumns |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__ASYNC-pk_types16-all_types16-index16-DyNumber--ASYNC] [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__SYNC-pk_types21-all_types21-index21-Uint32--SYNC] [GOOD] |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |91.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots >> ttl_unavailable_s3.py::TestUnavailableS3::test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure >> BackupRestore::SkipEmptyDirsOnRestore [GOOD] |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |91.2%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__ASYNC-pk_types19-all_types19-index19-Uint32--ASYNC] [GOOD] |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |91.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |91.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0_UNIQUE_SYNC-pk_types14-all_types14-index14-DyNumber-UNIQUE-SYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_path_with_long_name_failed |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0_UNIQUE_SYNC-pk_types20-all_types20-index20-Uint32-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__ASYNC-pk_types10-all_types10-index10-Timestamp--ASYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__SYNC-pk_types18-all_types18-index18-Uint32--SYNC] [GOOD] >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_and_drop_table_many_times_in_range |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__ASYNC-pk_types16-all_types16-index16-DyNumber--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1_UNIQUE_SYNC-pk_types11-all_types11-index11-Timestamp-UNIQUE-SYNC] [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |91.2%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0_UNIQUE_SYNC-pk_types8-all_types8-index8-Timestamp-UNIQUE-SYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0_UNIQUE_SYNC-pk_types26-all_types26-index26-Uint64-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__SYNC-pk_types0-all_types0-index0-Datetime--SYNC] [GOOD] >> TNodeBrokerTest::Test1000NodesSubscribers [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_0__SYNC-pk_types30-all_types30-index30-Date--SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::Test1000NodesSubscribers [GOOD] Test command err: 2025-06-24T20:41:15.288428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:41:15.288507Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success [GOOD] >> DataShardReadIteratorBatchMode::SelectingColumns [GOOD] >> DataShardReadIteratorBatchMode::ShouldHandleReadAck |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |91.3%| [LD] {RESULT} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_path_with_long_name_failed [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__ASYNC-pk_types7-all_types7-index7-Timestamp--ASYNC] [GOOD] |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |91.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure [GOOD] >> KqpNamedExpressions::NamedExpressionRandomDataQuery+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomDataQuery-UseSink >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Write_Only_Big_Messages_In_Wide_Transactions_Table 2025-06-24 20:44:40,871 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 20:44:43,305 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 183160 48.9M 48.3M 25.4M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/00455b/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk9/testi 183629 6.0G 5.9G 5.9G └─ src-client-topic-ut-with_direct_read_ut --trace-path-append /home/runner/.ya/build/build_root/2btm/00455b/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test Test command err: 2025-06-24T20:34:43.461127Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617076532268695:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:43.519046Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:43.985064Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00455b/r3tmp/tmpbJfniS/pdisk_1.dat 2025-06-24T20:34:44.219475Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519617076532268484:2079] 1750797283360650 != 1750797283360653 2025-06-24T20:34:44.271684Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:44.273129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:44.273209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:44.278459Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19512, node 1 2025-06-24T20:34:44.452044Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:34:44.494753Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00455b/r3tmp/yandexZJ7ELC.tmp 2025-06-24T20:34:44.494775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00455b/r3tmp/yandexZJ7ELC.tmp 2025-06-24T20:34:44.495042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00455b/r3tmp/yandexZJ7ELC.tmp 2025-06-24T20:34:44.495162Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:44.592880Z INFO: TTestServer started on Port 5421 GrpcPort 19512 TClient is connected to server localhost:5421 PQClient connected to localhost:19512 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:45.202240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:34:45.258833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T20:34:45.264916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T20:34:48.464365Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617076532268695:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:48.475338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:48.501546Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617098007105758:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.501758Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.502116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617098007105770:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:48.506676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:48.539713Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617098007105772:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:34:48.767316Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617098007105838:2447] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:48.791891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:48.871929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:48.896001Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617098007105846:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:48.899040Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZThjZDdlZDgtODNlM2MxYTQtNzBmOWNjNTktNDAxMDZjOTA=, ActorId: [1:7519617098007105755:2298], ActorState: ExecuteState, TraceId: 01jyhtd10v9jktv3d9t6nhwavg, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:48.902003Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:48.973501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519617102302073421:2625] === CheckClustersList. Ok 2025-06-24T20:34:54.457625Z :WriteToTopic_Invalid_Session_Table INFO: TTopicSdkTestSetup started 2025-06-24T20:34:54.486748Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T20:34:54.504254Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519617123776910086:2704] connected; active server actors: 1 2025-06-24T20:34:54.504509Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-24T20:34:54.505437Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-24T20:34:54.505554Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-24T20:34:54.506847Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-24T20:34:54.513473Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:34:54.514374Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-24T20:34:54.514603Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:34:54.514817Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx in ... ssionId [] PartitionId [0] Generation [0] Getting partition location, partition 0 2025-06-24T20:44:43.219318Z node 18 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:166: new Describe partition request 2025-06-24T20:44:43.219459Z node 18 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/topic_A" include_location: true 2025-06-24T20:44:43.219514Z node 18 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[18:7519619651872850081:9258]: Bootstrap 2025-06-24T20:44:43.234001Z node 18 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [18:7519619651872850081:9258]: Request location 2025-06-24T20:44:43.239795Z node 18 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037914][topic_A] pipe [18:7519619651872850083:9259] connected; active server actors: 1 2025-06-24T20:44:43.240006Z node 18 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037914][topic_A] addPartitionToResponse tabletId 72075186224037897, partitionId 0, NodeId 18, Generation 1 2025-06-24T20:44:43.240048Z node 18 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [18:7519619651872850081:9258]: Got location 2025-06-24T20:44:43.241329Z node 18 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037914][topic_A] pipe [18:7519619651872850083:9259] disconnected; active server actors: 1 2025-06-24T20:44:43.241363Z node 18 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037914][topic_A] pipe [18:7519619651872850083:9259] disconnected no session 2025-06-24T20:44:43.247369Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [0] Got PartitionLocation response. Status SUCCESS, proto: partition { active: true partition_location { node_id: 18 generation: 1 } } 2025-06-24T20:44:43.247442Z :TRACE: [/Root] TRACE_EVENT DescribePartitionResponse partition_id=0 active=1 pl_node_id=18 pl_generation=1 2025-06-24T20:44:43.247483Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [0] GetPreferredEndpoint: partitionId 0, partitionNodeId 18 exists in the endpoint pool. 2025-06-24T20:44:43.247516Z :TRACE: [/Root] TRACE_EVENT PreferredPartitionLocation Endpoint= NodeId=18 Generation=1 2025-06-24T20:44:43.247555Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [1] Start write session. Will connect to nodeId: 18 2025-06-24T20:44:43.251338Z node 18 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-24T20:44:43.251382Z node 18 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 643 2025-06-24T20:44:43.251594Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [1] Write session: direct write to partition: 0, generation 1 2025-06-24T20:44:43.251736Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [1] Write session: send init request: init_request { path: "topic_A" producer_id: "test-message_group_id_32_0" partition_with_generation { generation: 1 } } 2025-06-24T20:44:43.251777Z :TRACE: [/Root] TRACE_EVENT InitRequest pwg_partition_id=0 pwg_generation=1 2025-06-24T20:44:43.252122Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [1] Write session: OnWriteDone gRpcStatusCode: 0 2025-06-24T20:44:43.252520Z node 18 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 643 sessionId: grpc read done: success: 1 data: init_request { path: "topic_A" producer_id: "test-message_group_id_32_0" partition_with_generation { generation: 1 } } 2025-06-24T20:44:43.252686Z node 18 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 643 path: "topic_A" producer_id: "test-message_group_id_32_0" partition_with_generation { generation: 1 } from ipv6:[::1]:52008 2025-06-24T20:44:43.252710Z node 18 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=643 sessionId= userAgent="topic server" ip=ipv6:[::1]:52008 proto=topic topic=topic_A durationSec=0 2025-06-24T20:44:43.252722Z node 18 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T20:44:43.252749Z node 18 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:475: session to partition: 0, generation: 1 2025-06-24T20:44:43.254378Z node 18 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 643 sessionId: describe result for acl check 2025-06-24T20:44:43.254662Z node 18 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T20:44:43.254676Z node 18 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T20:44:43.254691Z node 18 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T20:44:43.254719Z node 18 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [18:7519619651872850088:9261] (SourceId=test-message_group_id_32_0, PreferedPartition=0) ReplyResult: Partition=0, SeqNo=0 2025-06-24T20:44:43.254747Z node 18 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 643 sessionId: partition: 0 expectedGeneration: 1 2025-06-24T20:44:43.259612Z node 18 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037897 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037897, NodeId 18, Generation: 1 2025-06-24T20:44:43.259698Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037897] server connected, pipe [18:7519619651872850091:9261], now have 1 active actors on pipe 2025-06-24T20:44:43.259802Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-24T20:44:43.259834Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037897] got client message batch for topic 'topic_A' partition 0 2025-06-24T20:44:43.259938Z node 18 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-message_group_id_32_0|118d2aec-706a3d42-bb8115b0-d0270f1f_0 generated for partition 0 topic 'topic_A' owner test-message_group_id_32_0 2025-06-24T20:44:43.260062Z node 18 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037897, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T20:44:43.260223Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T20:44:43.260429Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-24T20:44:43.260462Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037897] got client message batch for topic 'topic_A' partition 0 2025-06-24T20:44:43.260533Z node 18 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T20:44:43.260657Z node 18 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 643 partition: 0 MaxSeqNo: 0 sessionId: test-message_group_id_32_0|118d2aec-706a3d42-bb8115b0-d0270f1f_0 2025-06-24T20:44:43.261457Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [1] Write session: OnReadDone gRpcStatusCode: 0 2025-06-24T20:44:43.261522Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [1] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750797883261 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T20:44:43.261612Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [1] Write session established. Init response: session_id: "test-message_group_id_32_0|118d2aec-706a3d42-bb8115b0-d0270f1f_0" 2025-06-24T20:44:43.261652Z :TRACE: [/Root] TRACE_EVENT InitResponse partition_id=0 session_id=test-message_group_id_32_0|118d2aec-706a3d42-bb8115b0-d0270f1f_0 2025-06-24T20:44:43.262038Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id_32_0|118d2aec-706a3d42-bb8115b0-d0270f1f_0] PartitionId [0] Generation [1] OnWrite: seqNo=1, txId={ydb://session/3?node_id=18&id=NzlmZGY3YzAtMzcwODk0ODAtMWFmNTg4ZDEtMjBjYjJhOGY=, 01jyhtz5t48ghgkbydmw2fzf79}, WriteCount=1, AckCount=0 2025-06-24T20:44:43.269451Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id_32_0|118d2aec-706a3d42-bb8115b0-d0270f1f_0] PartitionId [0] Generation [1] Write 1 messages with Id from 1 to 1 2025-06-24T20:44:43.273702Z node 18 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037907, Partition: {18, {18, 281474976710781}, 100027}, State: StateIdle] need more data for compaction. cumulativeSize=6501235, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:44:43.273998Z node 18 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037900, Partition: {7, {18, 281474976710777}, 100026}, State: StateIdle] need more data for compaction. cumulativeSize=6501222, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T20:44:43.276060Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id_32_0|118d2aec-706a3d42-bb8115b0-d0270f1f_0] PartitionId [0] Generation [1] Write session: try to update token 2025-06-24T20:44:43.276100Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id_32_0|118d2aec-706a3d42-bb8115b0-d0270f1f_0] PartitionId [0] Generation [1] Send 1 message(s) (0 left), first sequence number is 1 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/00455b/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/00455b/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::SkipEmptyDirsOnRestore [GOOD] Test command err: 2025-06-24T20:39:57.992432Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519618423590926750:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:39:57.992496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmpZ7OzUu/pdisk_1.dat 2025-06-24T20:39:58.965157Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:39:58.998725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:39:58.998828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:39:58.999307Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:39:59.008546Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:39:59.083063Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 15998, node 1 2025-06-24T20:39:59.357165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:39:59.357189Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:39:59.357198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:39:59.357802Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26090 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:39:59.771524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:39:59.999770Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519618427885894366:2209]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:59.999819Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:39:59.999883Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519618427885894366:2209], Recipient [1:7519618427885894366:2209]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:40:00.001042Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:40:01.003397Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519618427885894366:2209]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:40:01.003440Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:40:01.003499Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519618427885894366:2209], Recipient [1:7519618427885894366:2209]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:40:01.003515Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:40:02.006483Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519618427885894366:2209]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:40:02.006536Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:40:02.006588Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519618427885894366:2209], Recipient [1:7519618427885894366:2209]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:40:02.006602Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:40:02.926343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618445065764239:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:02.926719Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519618445065764230:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:02.926752Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519618427885894236:2142] Handle TEvProposeTransaction 2025-06-24T20:40:02.926783Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:40:02.926793Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519618427885894236:2142] TxId# 281474976715658 ProcessProposeTransaction 2025-06-24T20:40:02.926836Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519618427885894236:2142] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519618445065764260:2637] 2025-06-24T20:40:02.992667Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519618423590926750:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:40:02.992742Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:40:03.007032Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519618427885894366:2209]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:40:03.007069Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T20:40:03.007162Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519618427885894366:2209], Recipient [1:7519618427885894366:2209]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:40:03.007178Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T20:40:03.011041Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519618445065764260:2637] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-24T20:40:03.011131Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519618445065764260:2637] txid# 281474976715658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T20:40:03.011169Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519618445065764260:2637] txid# 281474976715658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-24T20:40:03.012634Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519618445065764260:2637] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T20:40:03.012740Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519618445065764260:2637] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T20:40:03.012959Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519618445065764260:2637] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T20:40:03.013092Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519618445065764260:2637] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T20:40:03.013137Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519618445065764260:2637] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-24T20:40:03.013272Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519618445065764260:2637] txid# 281474976715658 HANDLE EvClientConnected 2025-06-24T20:40:03.013345Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# ... ) 2025-06-24T20:43:58.772000Z node 52 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64567 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:44:00.975472Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:44:01.977040Z node 52 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[52:7519619450529703053:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:44:01.977177Z node 52 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; GRpc shutdown warning: left infly: 1, spent: 3.492837 sec 2025-06-24T20:44:11.360755Z node 52 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyhtxwt398c9j5awhx5he6wx", Request deadline has expired for 5.156559s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession 2025-06-24T20:44:15.379806Z node 55 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[55:7519619532976550235:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:44:15.379926Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmpwty1LC/pdisk_1.dat 2025-06-24T20:44:16.437390Z node 55 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:44:16.651740Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:44:16.721235Z node 55 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:44:16.784304Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:44:16.784486Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:44:17.019908Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5745, node 55 2025-06-24T20:44:17.544708Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:44:17.544746Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:44:17.544769Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:44:17.545071Z node 55 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23550 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:44:19.716331Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:44:20.365835Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[55:7519619532976550235:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:44:20.365987Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:44:27.846319Z node 55 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyhtyf3ga80fqwjerzq7z1hn", Request deadline has expired for 2.910109s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession 2025-06-24T20:44:32.940944Z node 58 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[58:7519619605041621538:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:44:32.941053Z node 58 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmp3y6rVL/pdisk_1.dat 2025-06-24T20:44:34.032223Z node 58 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T20:44:34.032315Z node 58 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:44:34.077020Z node 58 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:44:34.163738Z node 58 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(58, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:44:34.163914Z node 58 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(58, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:44:34.200211Z node 58 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(58, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27784, node 58 2025-06-24T20:44:34.888852Z node 58 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:44:34.888892Z node 58 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:44:34.888916Z node 58 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:44:34.889235Z node 58 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27164 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:44:37.086415Z node 58 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Restore "/home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmpC5nvz0/" to "/Root"Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmpC5nvz0/"},{"type":"Directory","path":"/home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmpC5nvz0/with_one_dir"},{"type":"Directory","path":"/home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmpC5nvz0/with_one_file"}]Process "/home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmpC5nvz0/with_one_dir"Restore empty directory "/home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmpC5nvz0/with_one_dir" to "/Root/with_one_dir"Process "/home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmpC5nvz0/with_one_file"Restore empty directory "/home/runner/.ya/build/build_root/2btm/0045fc/r3tmp/tmpC5nvz0/with_one_file" to "/Root/with_one_file"Restore completed successfully2025-06-24T20:44:37.943440Z node 58 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[58:7519619605041621538:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:44:37.943553Z node 58 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__ASYNC-pk_types10-all_types10-index10-Timestamp--ASYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1_UNIQUE_SYNC-pk_types11-all_types11-index11-Timestamp-UNIQUE-SYNC] [GOOD] |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |91.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_and_drop_table_many_times_in_range [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanLogs::GraceJoin-EnabledLogs 2025-06-24 20:44:48,726 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 20:44:48,983 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 186142 48.7M 48.1M 25.2M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/003760/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1/testing_out_stuff/test_tool.args 186469 1.7G 1.6G 1.5G └─ ydb-core-kqp-ut-runtime --trace-path-append /home/runner/.ya/build/build_root/2btm/003760/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1/ytest.re Test command err: cwd: /home/runner/.ya/build/build_root/2btm/003760/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1 Trying to start YDB, gRPC: 22996, MsgBus: 64762 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003760/r3tmp/tmp5KuZfW/pdisk_1.dat TServer::EnableGrpc on GrpcPort 22996, node 1 TClient is connected to server localhost:64762 TClient is connected to server localhost:64762 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '778) '('"_id" '"779740b9-481f95ad-fa3c8dd3-2ebcf484") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1 '"HashV1")) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '676) '('"_id" '"c7cee117-d4d1f16c-8cef5a01-f13f7867") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '688) '('"_id" '"c29b02f3-f285f3f3-72bb0c0e-6dcb76dc")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) cwd: /home/runner/.ya/build/build_root/2btm/003760/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1 Trying to start YDB, gRPC: 3664, MsgBus: 20657 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003760/r3tmp/tmpmHlFqv/pdisk_1.dat TServer::EnableGrpc on GrpcPort 3664, node 2 TClient is connected to server localhost:20657 TClient is connected to server localhost:20657 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/003760/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/003760/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |91.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0_UNIQUE_SYNC-pk_types26-all_types26-index26-Uint64-UNIQUE-SYNC] [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__SYNC-pk_types0-all_types0-index0-Datetime--SYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error [GOOD] |91.3%| [TA] $(B)/ydb/core/kqp/ut/runtime/test-results/unittest/{meta.json ... results_accumulator.log} >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_1__SYNC-pk_types33-all_types33-index33-Date--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1_UNIQUE_SYNC-pk_types17-all_types17-index17-DyNumber-UNIQUE-SYNC] [GOOD] >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_directory_from_leaf_success >> DataShardReadIteratorBatchMode::ShouldHandleReadAck [GOOD] |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok [GOOD] |91.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |91.3%| [TA] $(B)/ydb/core/mind/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__ASYNC-pk_types7-all_types7-index7-Timestamp--ASYNC] [GOOD] |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |91.3%| [LD] {RESULT} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |91.3%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/runtime/test-results/unittest/{meta.json ... results_accumulator.log} |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> ReadIteratorExternalBlobs::ExtBlobsWithSpecificKeys [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheBeginning ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIteratorBatchMode::ShouldHandleReadAck [GOOD] Test command err: 2025-06-24T20:41:58.679287Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:58.680030Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:58.680149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:58.680208Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T20:41:58.680700Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:58.680743Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0029db/r3tmp/tmp560uG4/pdisk_1.dat 2025-06-24T20:41:59.541389Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:59.716981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:41:59.840113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:59.840282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:59.851419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:59.851538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:59.873136Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T20:41:59.873722Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:59.874168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:42:00.192330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:42:00.327936Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [2:1176:2339], Recipient [2:1201:2351]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:42:00.338329Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [2:1176:2339], Recipient [2:1201:2351]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:42:00.338947Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1201:2351] 2025-06-24T20:42:00.339677Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:42:00.403479Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [2:1176:2339], Recipient [2:1201:2351]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:42:00.417648Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:42:00.418144Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:42:00.422703Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:42:00.422831Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:42:00.422893Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:42:00.423377Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:42:00.428033Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:42:00.428182Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1225:2351] in generation 1 2025-06-24T20:42:00.446250Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:42:00.535565Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:42:00.535833Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:42:00.536004Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1228:2368] 2025-06-24T20:42:00.536109Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:42:00.536155Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:42:00.536206Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:42:00.536530Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1201:2351], Recipient [2:1201:2351]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:42:00.536595Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:42:00.537026Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:42:00.537180Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:42:00.537316Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:42:00.537367Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:42:00.537416Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:42:00.537483Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:42:00.537534Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:42:00.537573Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:42:00.537637Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:42:00.612113Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1232:2369], Recipient [2:1201:2351]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:42:00.612223Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:42:00.612289Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1184:2732], serverId# [2:1232:2369], sessionId# [0:0:0] 2025-06-24T20:42:00.612656Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:759:2426], Recipient [2:1232:2369] 2025-06-24T20:42:00.612705Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:42:00.612855Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:42:00.613163Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:42:00.613237Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:42:00.613371Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:42:00.613453Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:42:00.613507Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:42:00.613546Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:42:00.613599Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:42:00.613920Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:42:00.613964Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:42:00.614002Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:42:00.614055Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:42:00.614110Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:42:00.614157Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:42:00.614195Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:42:00.614243Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:42:00.614273Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:42:00.616838Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, ... 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-24T20:45:18.085546Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-24T20:45:18.085675Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-24T20:45:18.085709Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-24T20:45:18.085756Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3000:281474976715664] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-24T20:45:18.085803Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-24T20:45:18.085830Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is Executed 2025-06-24T20:45:18.085857Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-24T20:45:18.085887Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3000:281474976715664] at 72075186224037889 to execution unit CompleteOperation 2025-06-24T20:45:18.085919Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit CompleteOperation 2025-06-24T20:45:18.086127Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is DelayComplete 2025-06-24T20:45:18.086181Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit CompleteOperation 2025-06-24T20:45:18.086234Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3000:281474976715664] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T20:45:18.086288Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit CompletedOperations 2025-06-24T20:45:18.086334Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is Executed 2025-06-24T20:45:18.086972Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T20:45:18.087019Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3000:281474976715664] at 72075186224037889 has finished 2025-06-24T20:45:18.087071Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:45:18.087117Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-24T20:45:18.087188Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-24T20:45:18.087245Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-24T20:45:18.109247Z node 16 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3000} 2025-06-24T20:45:18.109470Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:45:18.109572Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3000:281474976715664] at 72075186224037888 on unit CompleteOperation 2025-06-24T20:45:18.109712Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3000 : 281474976715664] from 72075186224037888 at tablet 72075186224037888 send result to client [16:993:2780], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:45:18.109871Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:45:18.110432Z node 16 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3000} 2025-06-24T20:45:18.110493Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T20:45:18.110525Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3000:281474976715664] at 72075186224037889 on unit CompleteOperation 2025-06-24T20:45:18.110606Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3000 : 281474976715664] from 72075186224037889 at tablet 72075186224037889 send result to client [16:993:2780], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T20:45:18.110656Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T20:45:18.112895Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [16:552:2478], Recipient [16:625:2529]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 281474976715664 } ResultFormat: FORMAT_ARROW MaxRows: 1 Hints: 1 RangesSize: 1 2025-06-24T20:45:18.113171Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T20:45:18.113305Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2025-06-24T20:45:18.113532Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T20:45:18.113619Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2025-06-24T20:45:18.113704Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T20:45:18.113781Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T20:45:18.113843Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2025-06-24T20:45:18.113930Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T20:45:18.113963Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T20:45:18.113991Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T20:45:18.114024Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-24T20:45:18.114241Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 281474976715664 } ResultFormat: FORMAT_ARROW MaxRows: 1 Hints: 1 } 2025-06-24T20:45:18.114338Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3000/281474976715664 2025-06-24T20:45:18.114692Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T20:45:18.114726Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T20:45:18.114754Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T20:45:18.114783Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2025-06-24T20:45:18.114844Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T20:45:18.114868Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T20:45:18.114917Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037888 has finished 2025-06-24T20:45:18.114999Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T20:45:18.117225Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T20:45:18.118563Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553236, Sender [16:1009:2794], Recipient [16:625:2529]: NKikimr::TEvDataShard::TEvReadScanStarted 2025-06-24T20:45:18.119927Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553218, Sender [16:552:2478], Recipient [16:625:2529]: NKikimrTxDataShard.TEvReadAck ReadId: 1 SeqNo: 1 MaxRows: 2 MaxBytes: 10000 2025-06-24T20:45:18.120099Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1193: 72075186224037888 forwarding NKikimr::TEvDataShard::TEvReadAck to scan actor [16:1009:2794] 2025-06-24T20:45:18.122602Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553218, Sender [16:552:2478], Recipient [16:625:2529]: NKikimrTxDataShard.TEvReadAck ReadId: 1 SeqNo: 2 MaxRows: 100 MaxBytes: 10000 2025-06-24T20:45:18.122758Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1193: 72075186224037888 forwarding NKikimr::TEvDataShard::TEvReadAck to scan actor [16:1009:2794] 2025-06-24T20:45:18.123796Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553237, Sender [16:1009:2794], Recipient [16:625:2529]: NKikimr::TEvDataShard::TEvReadScanFinished 2025-06-24T20:45:18.124069Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [16:625:2529], Recipient [16:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:45:18.124132Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:45:18.124283Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:45:18.124386Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:45:18.124483Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T20:45:18.124580Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:45:18.124673Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:45:18.124788Z node 16 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:45:18.124920Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 |91.3%| [TA] {RESULT} $(B)/ydb/core/mind/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_1__SYNC-pk_types33-all_types33-index33-Date--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__ASYNC-pk_types28-all_types28-index28-Uint64--ASYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error [GOOD] |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |91.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme >> test_ttl.py::TestTTL::test_ttl[table_Date_1__ASYNC-pk_types34-all_types34-index34-Date--ASYNC] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_directory_from_leaf_success [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_table_that_doesnt_exist_failure [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__SYNC-pk_types24-all_types24-index24-Uint64--SYNC] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok [GOOD] |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |91.3%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1_UNIQUE_SYNC-pk_types17-all_types17-index17-DyNumber-UNIQUE-SYNC] [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__ASYNC-pk_types4-all_types4-index4-Datetime--ASYNC] [GOOD] |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |91.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |91.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0_UNIQUE_SYNC-pk_types2-all_types2-index2-Datetime-UNIQUE-SYNC] [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success [GOOD] |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |91.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__ASYNC-pk_types25-all_types25-index25-Uint64--ASYNC] [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__ASYNC-pk_types28-all_types28-index28-Uint64--ASYNC] [GOOD] >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_1__ASYNC-pk_types34-all_types34-index34-Date--ASYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_all_types-pk_types12-all_types12-index12---] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::TestRetentionOnLongTxAndBigMessages 2025-06-24 20:44:40,127 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 20:44:41,119 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 183025 47.9M 45.3M 24.6M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/00456c/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk2/testi 183438 1.4G 1.4G 1.3G └─ src-client-topic-ut-with_direct_read_ut --trace-path-append /home/runner/.ya/build/build_root/2btm/00456c/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test Test command err: 2025-06-24T20:34:42.868695Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519617071136487423:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:42.869076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:34:43.379356Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00456c/r3tmp/tmp2wve6i/pdisk_1.dat 2025-06-24T20:34:43.778721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:34:43.782827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:34:43.828387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:34:43.838304Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:34:43.867377Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 4001, node 1 2025-06-24T20:34:44.124126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00456c/r3tmp/yandexBhMTsX.tmp 2025-06-24T20:34:44.124152Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00456c/r3tmp/yandexBhMTsX.tmp 2025-06-24T20:34:44.124316Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00456c/r3tmp/yandexBhMTsX.tmp 2025-06-24T20:34:44.124432Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:34:44.251036Z INFO: TTestServer started on Port 2203 GrpcPort 4001 TClient is connected to server localhost:2203 PQClient connected to localhost:4001 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:34:44.798489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:34:44.831792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:44.859290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:34:45.037380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:34:47.427041Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617092611324525:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.427180Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.427314Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519617092611324538:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:34:47.432263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:34:47.466229Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519617092611324540:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:34:47.538710Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519617092611324604:2445] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:34:48.093441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519617071136487423:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:34:48.093758Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:34:48.129851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:48.134936Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519617092611324612:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:34:48.137269Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTlmY2Y4ODktMzkwMjBhZDItNTIwMDQwOTAtZTU1NDg4NTM=, ActorId: [1:7519617092611324523:2298], ActorState: ExecuteState, TraceId: 01jyhtczywem2shmz66afbb37m, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:34:48.139879Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:34:48.184100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:34:48.295529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519617096906292196:2623] === CheckClustersList. Ok 2025-06-24T20:34:53.928855Z :Sinks_Oltp_WriteToTopic_2_Table INFO: TTopicSdkTestSetup started 2025-06-24T20:34:53.987263Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T20:34:54.047224Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519617118381128855:2702] connected; active server actors: 1 2025-06-24T20:34:54.047509Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-24T20:34:54.048473Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-24T20:34:54.048595Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-24T20:34:54.049714Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-24T20:34:54.064900Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T20:34:54.065584Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-24T20:34:54.065898Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T20:34:54.066080Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-24T20:34:54 ... .cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[19:7519619605967699807:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:44:33.086283Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T20:44:33.086354Z node 19 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T20:44:33.460063Z node 19 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:44:33.463486Z node 19 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [19:7519619605967699584:2079] 1750797872862425 != 1750797872862428 2025-06-24T20:44:33.493760Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:44:33.493863Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:44:33.497535Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24253, node 19 2025-06-24T20:44:33.585157Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00456c/r3tmp/yandexhOus6v.tmp 2025-06-24T20:44:33.585201Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00456c/r3tmp/yandexhOus6v.tmp 2025-06-24T20:44:33.585397Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00456c/r3tmp/yandexhOus6v.tmp 2025-06-24T20:44:33.585635Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:44:33.676910Z INFO: TTestServer started on Port 7596 GrpcPort 24253 2025-06-24T20:44:33.860360Z node 19 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7596 PQClient connected to localhost:24253 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:44:34.098372Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T20:44:34.135375Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T20:44:34.151514Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T20:44:34.331911Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T20:44:34.356474Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-24T20:44:37.927298Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[19:7519619605967699807:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:44:37.927410Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:44:39.592199Z node 19 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyhtyx5xewexca4xxh8whwc1", Request deadline has expired for 0.228024s seconds 2025-06-24T20:44:39.595007Z node 19 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [19:7519619636032471453:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:44:39.595103Z node 19 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:44:39.595777Z node 19 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [19:7519619636032471465:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:44:39.604034Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:44:39.620857Z node 19 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [19:7519619636032471468:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T20:44:39.658876Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:44:39.705722Z node 19 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [19:7519619636032471606:2487] txid# 281474976710664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:44:39.712800Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:44:39.785188Z node 19 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [19:7519619636032471633:2322], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T20:44:39.787512Z node 19 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=19&id=Y2U3ZjliNTQtYmQ2NTg4ZWMtMzkxMTg5ZmQtZDc1M2UwMWM=, ActorId: [19:7519619636032471451:2302], ActorState: ExecuteState, TraceId: 01jyhtz2986rp8ssz55fbn0v77, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T20:44:39.788081Z node 19 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T20:44:39.965003Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/00456c/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk2/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/00456c/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk2/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__SYNC-pk_types24-all_types24-index24-Uint64--SYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure [GOOD] |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |91.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_0_UNIQUE_SYNC-pk_types32-all_types32-index32-Date-UNIQUE-SYNC] [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__ASYNC-pk_types25-all_types25-index25-Uint64--ASYNC] [GOOD] |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |91.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |91.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> KqpNamedExpressions::NamedExpressionRandomDataQuery-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery+UseSink >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__ASYNC-pk_types4-all_types4-index4-Datetime--ASYNC] [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__SYNC-pk_types3-all_types3-index3-Datetime--SYNC] [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_0_UNIQUE_SYNC-pk_types32-all_types32-index32-Date-UNIQUE-SYNC] [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0_UNIQUE_SYNC-pk_types2-all_types2-index2-Datetime-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__SYNC-pk_types27-all_types27-index27-Uint64--SYNC] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_table_that_doesnt_exist_failure [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__SYNC-pk_types27-all_types27-index27-Uint64--SYNC] [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__SYNC-pk_types3-all_types3-index3-Datetime--SYNC] [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1_UNIQUE_SYNC-pk_types29-all_types29-index29-Uint64-UNIQUE-SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> YdbSdkSessionsPool::StressTestAsync/1 [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] |91.4%| [TA] $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheBeginning [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheEnd |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok [GOOD] >> overlapping_portions.py::TestOverlappingPortions::test [GOOD] |91.4%| [TA] $(B)/ydb/tests/functional/scheme_shard/test-results/py3test/{meta.json ... results_accumulator.log} |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestAsync/1 [GOOD] |91.5%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] |91.5%| [TA] {RESULT} $(B)/ydb/tests/functional/scheme_shard/test-results/py3test/{meta.json ... results_accumulator.log} |91.5%| [TA] $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/{meta.json ... results_accumulator.log} |91.5%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1_UNIQUE_SYNC-pk_types29-all_types29-index29-Uint64-UNIQUE-SYNC] [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery-UseSink >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/oom/py3test >> overlapping_portions.py::TestOverlappingPortions::test [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> unstable_connection.py::TestUnstableConnection::test [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomSelect+UseSink >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> unstable_connection.py::TestUnstableConnection::test [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheEnd [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheMiddle >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |91.5%| [TA] $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> KqpNamedExpressions::NamedExpressionRandomSelect+UseSink [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> KqpNamedExpressions::NamedExpressionRandomSelect-UseSink |91.5%| [TA] {RESULT} $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> KqpNamedExpressions::NamedExpressionRandomSelect-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandomSelect-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 9285, MsgBus: 13960 2025-06-24T20:43:04.625825Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519619226140400459:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:43:04.626206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003204/r3tmp/tmppQyZvH/pdisk_1.dat 2025-06-24T20:43:05.464378Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:43:05.468358Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519619226140400260:2079] 1750797784546873 != 1750797784546876 2025-06-24T20:43:05.506680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:43:05.506846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:43:05.594515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:43:05.607684Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 9285, node 1 2025-06-24T20:43:05.863779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:43:05.863834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:43:05.863852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:43:05.863961Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13960 TClient is connected to server localhost:13960 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:43:07.301615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:43:07.330671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:43:07.352978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:07.599434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:07.794874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:07.903004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:43:09.611565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519619226140400459:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:43:09.611665Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:43:10.069288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519619251910205673:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:10.069430Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:10.526244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:10.605835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:10.675824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:10.734240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:10.823084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:10.910367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:10.990259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:43:11.098426Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519619256205173639:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:11.098522Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:11.098770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519619256205173644:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:43:11.103093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:43:11.127596Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519619256205173646:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:43:11.212219Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519619256205173697:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } UPSERT INTO [[["db5360b0-552c-482c-8666-f9aada9acebb"]];[["ef0397f6-8e09-4eae-9195-cf6f696cda53"]]] [[["db5360b0-552c-482c-8666-f9aada9acebb"]];[["ef0397f6-8e09-4eae-9195-cf6f696cda53"]]] [["8a9d7c14-446a-4aa7-af9e-6e0930254fb3"];["c831055f-0fcf-4576-8668-2acd05d1b6cd"]] Trying to start YDB, gRPC: 16167, MsgBus: 24187 2025-06-24T20:43 ... re/tx/schemeshard/schemeshard__operation_create_table.cpp:667) [[2u]] Trying to start YDB, gRPC: 25967, MsgBus: 4619 2025-06-24T20:47:33.196048Z node 20 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[20:7519620380831237883:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:47:33.196176Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003204/r3tmp/tmpf0AGNp/pdisk_1.dat 2025-06-24T20:47:33.457577Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:47:33.457717Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:47:33.469729Z node 20 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:47:33.502434Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25967, node 20 2025-06-24T20:47:33.708156Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:47:33.708191Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:47:33.708206Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:47:33.708458Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T20:47:34.210977Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4619 TClient is connected to server localhost:4619 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:47:34.997182Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:47:35.013454Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:47:35.022747Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:47:35.156883Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:47:35.498920Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:47:35.662747Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:47:38.197771Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[20:7519620380831237883:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:47:38.197865Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:47:39.892370Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519620406601043273:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:47:39.892552Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:47:39.954766Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:47:40.016795Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:47:40.096101Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:47:40.152370Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:47:40.216275Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:47:40.293470Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:47:40.407131Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:47:40.645495Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519620410896011254:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:47:40.645663Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:47:40.646334Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519620410896011259:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:47:40.655092Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:47:40.697852Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [20:7519620410896011261:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:47:40.766932Z node 20 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [20:7519620410896011312:3439] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:47:43.012448Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) [[2u]] >> tier_delete.py::TestTierDelete::test_delete_s3_ttl [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> data_correctness.py::TestDataCorrectness::test [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> tier_delete.py::TestTierDelete::test_delete_s3_ttl [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC] [FAIL] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] [FAIL] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> ttl_unavailable_s3.py::TestUnavailableS3::test [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheMiddle [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloaded |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC] [FAIL] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> ttl_unavailable_s3.py::TestUnavailableS3::test [GOOD] Test command err: !!! simulating S3 hang up -- sending SIGSTOP !!! simulating S3 recovery -- sending SIGCONT >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> data_correctness.py::TestDataCorrectness::test [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter >> test_copy_table.py::TestCopyTable::test_copy_table[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_data_unchanged_after_ttl_change [GOOD] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering >> test_retry.py::TestRetry::test_fail_first[kikimr0] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC] [FAIL] >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter [GOOD] >> test_cp_ic.py::TestCpIc::test_discovery >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloaded [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloadedWithReboot >> test_cp_ic.py::TestCpIc::test_discovery [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC] [FAIL] |91.5%| [TA] $(B)/ydb/tests/datashard/ttl/test-results/py3test/{meta.json ... results_accumulator.log} |91.5%| [TA] {RESULT} $(B)/ydb/tests/datashard/ttl/test-results/py3test/{meta.json ... results_accumulator.log} |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_cp_ic.py::TestCpIc::test_discovery [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_dispatch.py::TestMapping::test_mapping >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloadedWithReboot [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsMultipleColumns >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |91.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> KqpMergeCn::TopSortBy_PK_Uint64_Limit3 |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> TGroupMapperTest::SimplestMirror3dc >> TGroupMapperTest::SimplestMirror3dc [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::SimplestMirror3dc [GOOD] |91.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> test_dispatch.py::TestMapping::test_mapping [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] >> KqpMergeCn::TopSortBy_PK_Uint64_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Int32_Limit3 >> DataCleanup::CleanupDataNoTables >> DataCleanup::CleanupDataNoTables [GOOD] >> DataCleanup::CleanupDataNoTablesWithRestart [GOOD] >> DataCleanup::CleanupDataLog >> DataCleanup::CleanupDataLog [GOOD] >> DataCleanup::CleanupData [GOOD] >> DataCleanup::CleanupDataMultipleFamilies >> DataCleanup::CleanupDataMultipleFamilies [GOOD] >> DataCleanup::CleanupDataMultipleTables >> DataCleanup::CleanupDataMultipleTables [GOOD] >> DataCleanup::CleanupDataWithFollowers [GOOD] >> DataCleanup::CleanupDataMultipleTimes >> DataCleanup::CleanupDataMultipleTimes [GOOD] >> DataCleanup::CleanupDataEmptyTable [GOOD] >> DataCleanup::CleanupDataWithRestarts >> DataCleanup::CleanupDataWithRestarts [GOOD] >> DataCleanup::CleanupDataRetryWithNotGreaterGenerations [GOOD] >> DataCleanup::CleanupDataWithTabletGCErrors >> DataCleanup::CleanupDataWithTabletGCErrors [GOOD] >> DataCleanup::CleanupDataWithSysTabletGCErrors >> DataCleanup::CleanupDataWithSysTabletGCErrors [GOOD] >> DBase::WideKey >> DBase::WideKey [GOOD] >> DBase::VersionPureMem >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] >> DBase::VersionPureMem [GOOD] >> DBase::VersionPureParts |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> DBase::VersionPureParts [GOOD] >> DBase::VersionCompactedMem |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> DBase::VersionCompactedMem [GOOD] >> DBase::VersionCompactedParts |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] [GOOD] |91.6%| [TA] $(B)/ydb/tests/datashard/async_replication/test-results/py3test/{meta.json ... results_accumulator.log} >> DBase::VersionCompactedParts [GOOD] >> Memtable::Basics [GOOD] >> Memtable::BasicsReverse [GOOD] >> Memtable::Markers [GOOD] >> Memtable::Overlap [GOOD] >> Memtable::Wreck |91.6%| [TA] {RESULT} $(B)/ydb/tests/datashard/async_replication/test-results/py3test/{meta.json ... results_accumulator.log} >> Memtable::Wreck [GOOD] >> Memtable::Erased >> Memtable::Erased [GOOD] >> NFwd_TBlobs::MemTableTest [GOOD] >> NFwd_TBlobs::Lower [GOOD] >> NFwd_TBlobs::Sieve [GOOD] >> NFwd_TBlobs::SieveFiltered [GOOD] >> NFwd_TBlobs::Basics [GOOD] >> NFwd_TBlobs::Simple [GOOD] >> NFwd_TBlobs::Shuffle [GOOD] >> NFwd_TBlobs::Grow [GOOD] >> NFwd_TBlobs::Trace [GOOD] >> NFwd_TBlobs::Filtered [GOOD] >> NFwd_TBTreeIndexCache::Basics [GOOD] >> NFwd_TBTreeIndexCache::IndexPagesLocator [GOOD] >> NFwd_TBTreeIndexCache::GetTwice [GOOD] >> NFwd_TBTreeIndexCache::ForwardTwice [GOOD] >> NFwd_TBTreeIndexCache::Forward_OnlyUsed [GOOD] >> NFwd_TBTreeIndexCache::Skip_Done [GOOD] >> NFwd_TBTreeIndexCache::Skip_Done_None [GOOD] >> NFwd_TBTreeIndexCache::Skip_Keep [GOOD] >> NFwd_TBTreeIndexCache::Skip_Wait [GOOD] >> NFwd_TBTreeIndexCache::Trace_BTree [GOOD] >> NFwd_TBTreeIndexCache::Trace_Data [GOOD] >> NFwd_TBTreeIndexCache::End [GOOD] >> NFwd_TBTreeIndexCache::Slices [GOOD] >> NFwd_TBTreeIndexCache::ManyApplies >> NFwd_TBTreeIndexCache::ManyApplies [GOOD] >> NFwd_TFlatIndexCache::Basics [GOOD] >> NFwd_TFlatIndexCache::IndexPagesLocator [GOOD] >> NFwd_TFlatIndexCache::GetTwice [GOOD] >> NFwd_TFlatIndexCache::ForwardTwice [GOOD] >> NFwd_TFlatIndexCache::Skip_Done [GOOD] >> NFwd_TFlatIndexCache::Skip_Done_None [GOOD] >> NFwd_TFlatIndexCache::Skip_Keep [GOOD] >> NFwd_TFlatIndexCache::End [GOOD] >> KqpMergeCn::TopSortBy_Int32_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Float_Limit4 >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> NFwd_TFlatIndexCache::End [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:25.968343Z 00000.103 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.117 II| FAKE_ENV: Starting storage for BS group 0 00000.118 II| FAKE_ENV: Starting storage for BS group 1 00000.118 II| FAKE_ENV: Starting storage for BS group 2 00000.118 II| FAKE_ENV: Starting storage for BS group 3 00000.149 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.150 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.152 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.152 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {146b, 4} 00000.152 II| FAKE_ENV: DS.1 gone, left {105b, 3}, put {105b, 3} 00000.152 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.152 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.152 II| FAKE_ENV: All BS storage groups are stopped 00000.152 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.152 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:26.128344Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.020 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.021 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.021 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.021 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {292b, 8} 00000.021 II| FAKE_ENV: DS.1 gone, left {210b, 6}, put {210b, 6} 00000.021 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.021 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.021 II| FAKE_ENV: All BS storage groups are stopped 00000.021 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.021 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:26.154979Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.106 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.107 NN| TABLET_SAUSAGECACHE: Poison cache serviced 1 reqs hit {1 76b} miss {0 0b} 00000.107 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.107 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1181b, 13} 00000.107 II| FAKE_ENV: DS.1 gone, left {909b, 3}, put {1913b, 12} 00000.107 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {132b, 2} 00000.107 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {116b, 2} 00000.108 II| FAKE_ENV: All BS storage groups are stopped 00000.108 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.108 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:26.268215Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.020 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.021 NN| TABLET_SAUSAGECACHE: Poison cache serviced 1 reqs hit {1 102443b} miss {0 0b} 00000.021 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.021 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {753b, 11} 00000.021 II| FAKE_ENV: DS.1 gone, left {541b, 3}, put {103970b, 10} 00000.021 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.021 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.021 II| FAKE_ENV: All BS storage groups are stopped 00000.021 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.021 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:26.309765Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.014 II| FAKE_ENV: Starting storage for BS group 0 00000.015 II| FAKE_ENV: Starting storage for BS group 1 00000.015 II| FAKE_ENV: Starting storage for BS group 2 00000.015 II| FAKE_ENV: Starting storage for BS group 3 ... blocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A cookie 0 00000.080 II| TABLET_SAUSAGECACHE: Wakeup 1 ... unblocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A 00000.081 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.082 NN| TABLET_SAUSAGECACHE: Poison cache serviced 11 reqs hit {18 513007b} miss {0 0b} 00000.082 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.082 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {2095b, 23} 00000.082 II| FAKE_ENV: DS.1 gone, left {774b, 4}, put {210604b, 21} 00000.082 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {205178b, 4} 00000.082 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {102690b, 4} 00000.082 II| FAKE_ENV: All BS storage groups are stopped 00000.082 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 15.00s 00000.082 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 16}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:26.397466Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.060 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.061 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {3 307329b} miss {0 0b} 00000.061 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.061 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1828b, 23} 00000.061 II| FAKE_ENV: DS.1 gone, left {1247b, 3}, put {311467b, 22} 00000.061 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.061 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.061 II| FAKE_ENV: All BS storage groups are stopped 00000.061 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.061 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:26.464513Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.056 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 5 actors 00000.057 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4 reqs hit {8 307836b} miss {0 0b} 00000.057 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.057 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.057 II| FAKE_ENV: DS.0 gone, left {57b, 2}, put {1436b, 31} 00000.057 II| FAKE_ENV: DS.1 gone, left {629b, 3}, put {310476b, 16} 00000.057 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.057 II| FAKE_ENV: All BS storage groups are stopped 00000.057 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 0.000s 00000.058 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:26.527855Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.051 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.052 NN| TABLET_SAUSAGECACHE: Poison cache serviced 2 reqs hit {2 194646b} miss {0 0b} 00000.052 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.052 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1571b, 23} 00000.052 II| FAKE_ENV: DS.1 gone, left {529b, 3}, put {197610b, 21} 00000.052 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.052 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.053 II| FAKE_ENV: All BS storage groups are stopped 00000.053 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.053 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:26.588746Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.014 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.015 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.015 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.015 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {326b, 7} 00000.015 II| FAKE_ENV: DS.1 gone, left {418b, 4}, put {453b, 5} 00000.015 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.016 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.016 II| FAKE_ENV: All BS storage groups are stopped 00000.016 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.016 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:26.610197Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 ... blocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A cookie 0 00000.077 II| TABLET_SAUSAGECACHE: Wakeup 1 ... unblocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A 00000.078 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.079 NN| TABLET_SAUSAGECACHE: Poison cache serviced 6 reqs hit {8 410030b} miss {0 0b} 00000.079 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.079 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.079 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1492b, 23} 00000.079 II| FAKE_ENV: DS.1 gone, left {504b, 4}, put {310786b, 20} 00000.079 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.079 II| FAKE_ENV: All BS storage groups are stopped 00000.079 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 15.00s 00000.080 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 16}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T20:50:26.696643Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for ... 3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> KqpMergeCn::TopSortBy_Float_Limit4 [GOOD] >> KqpMergeCn::TopSortBy_String_Limit3 |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_dispatch.py::TestMapping::test_idle >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] [GOOD] >> KqpMergeCn::TopSortBy_String_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Utf8_Limit2 |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> KqpMergeCn::TopSortBy_Utf8_Limit2 [GOOD] >> KqpMergeCn::TopSortBy_Timestamp_Limit2 |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |91.6%| [TA] $(B)/ydb/tests/datashard/copy_table/test-results/py3test/{meta.json ... results_accumulator.log} |91.6%| [TA] {RESULT} $(B)/ydb/tests/datashard/copy_table/test-results/py3test/{meta.json ... results_accumulator.log} |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> KqpMergeCn::TopSortBy_Timestamp_Limit2 [GOOD] >> KqpMergeCn::TopSortBy_Interval_Limit3 |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> KqpMergeCn::TopSortBy_Interval_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Decimal_Limit5 >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] >> test_select.py::TestDML::test_select[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_select.py::TestDML::test_select[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_select.py::TestDML::test_select[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_select.py::TestDML::test_select[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] [GOOD] >> KqpMergeCn::TopSortBy_Decimal_Limit5 [GOOD] >> test_select.py::TestDML::test_select[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpMergeCn::TopSortBy_Decimal_Limit5 [GOOD] Test command err: Trying to start YDB, gRPC: 25363, MsgBus: 29220 2025-06-24T20:50:19.102234Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519621093677964023:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:50:19.102637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031fc/r3tmp/tmp7hC4ak/pdisk_1.dat 2025-06-24T20:50:19.420911Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25363, node 1 2025-06-24T20:50:19.494673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:50:19.495235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:50:19.508637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:50:19.539728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:50:19.539749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:50:19.539760Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:50:19.539864Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29220 TClient is connected to server localhost:29220 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T20:50:20.103369Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:50:20.234778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:50:20.248918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T20:50:20.264111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:50:20.453928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:50:20.666692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:50:20.760309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:50:22.272761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519621106562867376:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:50:22.272887Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:50:22.606596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:50:22.641060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:50:22.672890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:50:22.719634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:50:22.774696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:50:22.854438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:50:22.923537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:50:23.049880Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519621110857835337:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:50:23.049959Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:50:23.050224Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519621110857835342:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:50:23.054456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:50:23.084231Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519621110857835344:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T20:50:23.175741Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519621110857835397:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:50:24.103544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519621093677964023:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:50:24.103617Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:50:24.251888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:50:25.227691Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discard ... 61814Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031fc/r3tmp/tmpN3Bqdi/pdisk_1.dat 2025-06-24T20:51:20.416783Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:51:20.417986Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519621356051804801:2079] 1750798280160859 != 1750798280160862 2025-06-24T20:51:20.443118Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:51:20.443476Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:51:20.446109Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29261, node 8 2025-06-24T20:51:20.551179Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T20:51:20.551212Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T20:51:20.551228Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T20:51:20.551437Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2843 2025-06-24T20:51:21.211368Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2843 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T20:51:21.754709Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T20:51:21.769738Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T20:51:21.781299Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:51:21.882458Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:51:22.139910Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:51:22.267995Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T20:51:25.162867Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519621356051804819:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T20:51:25.162970Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T20:51:26.152809Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519621381821610224:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:51:26.152957Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:51:26.266385Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:51:26.347432Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:51:26.401630Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:51:26.449574Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:51:26.505733Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:51:26.573601Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:51:26.640842Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:51:26.755600Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519621381821610891:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:51:26.755734Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:51:26.755792Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519621381821610896:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T20:51:26.763283Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T20:51:26.782099Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7519621381821610898:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T20:51:26.847714Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7519621381821610950:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T20:51:28.683102Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:51:30.205248Z node 8 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750798290220, txId: 281474976710674] shutting down >> test_select.py::TestDML::test_select[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_dispatch.py::TestMapping::test_idle [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] [GOOD] >> test_select.py::TestDML::test_select[table_all_types-pk_types12-all_types12-index12---] >> test_select.py::TestDML::test_select[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_dispatch.py::TestMapping::test_idle [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_select.py::TestDML::test_as_table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> ReadIteratorExternalBlobs::ExtBlobsMultipleColumns 2025-06-24 20:51:48,266 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 20:51:48,514 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 283706 53.7M 53.6M 30.2M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/00299d/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_to 283999 1.7G 1.6G 1.5G └─ ydb-core-tx-datashard-ut_read_iterator --trace-path-append /home/runner/.ya/build/build_root/2btm/00299d/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/tes Test command err: 2025-06-24T20:41:54.579316Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T20:41:54.579776Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T20:41:54.580020Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00299d/r3tmp/tmpnU0iQ4/pdisk_1.dat 2025-06-24T20:41:55.023710Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T20:41:55.028051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T20:41:55.098194Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:41:55.109260Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750797710917724 != 1750797710917728 2025-06-24T20:41:55.165257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T20:41:55.165438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T20:41:55.180473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T20:41:55.281083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T20:41:55.365092Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:41:55.366471Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:41:55.367067Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T20:41:55.367647Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:41:55.416534Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:41:55.417464Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:41:55.417655Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:41:55.419692Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T20:41:55.419797Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T20:41:55.419869Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T20:41:55.420313Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:41:55.420479Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:41:55.420588Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T20:41:55.435953Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:41:55.471590Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T20:41:55.471810Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:41:55.471946Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T20:41:55.471985Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T20:41:55.472030Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T20:41:55.472084Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T20:41:55.472389Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:55.472441Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:41:55.472779Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T20:41:55.472889Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T20:41:55.472953Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T20:41:55.473020Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:41:55.473083Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T20:41:55.473123Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T20:41:55.473164Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T20:41:55.473197Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T20:41:55.473264Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T20:41:55.473421Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:55.473462Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:41:55.473519Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T20:41:55.473971Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T20:41:55.474023Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:41:55.474155Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T20:41:55.474411Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T20:41:55.474503Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T20:41:55.474634Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T20:41:55.474694Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T20:41:55.474738Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T20:41:55.474780Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T20:41:55.474818Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:55.475123Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T20:41:55.475191Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T20:41:55.475256Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T20:41:55.475296Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:55.475394Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T20:41:55.475444Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T20:41:55.475490Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T20:41:55.475525Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T20:41:55.475556Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T20:41:55.477164Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T20:41:55.477226Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T20:41:55.500049Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T20:41:55.500142Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T20:41:55.500180Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T20:41:55.500270Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL\");", parameters: 0b 2025-06-24T20:51:02.248660Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhvaetwcbfzyhz5xr64p2ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=NTIwNTYyZDctODQyZThjNTUtZmViNWQxOWItYmE4MTExNTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:51:11.987411Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhvar2c36b4v0y8dvvatfat, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=ZTk1ODkzM2UtYzc4NjY4ODgtNmRmNGM3MWMtNzVlNTVjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:51:22.000666Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhvb1jtdp4jrckdcaaqd8qd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=ZGYwM2EzYjEtZWE2YjdkYjAtMjhlZjAwNjgtOTJkYjRiYWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:51:31.642237Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhvbbbz9pn8y8c0nd13q8pe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=ODRjNWU0YzUtNGMwZDcwNDUtYTNkYmYzNC1kZGMyNjBiOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T20:51:41.697810Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyhvbmw022bnn5816e2bmc5j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=NWE4ZTc1MzMtMTgyODA1ZDYtOGNjYzUxMDYtNjc4ODAxZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/00299d/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/00299d/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TA] $(B)/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/{meta.json ... results_accumulator.log} |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/{meta.json ... results_accumulator.log} |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Date-pk_types18-all_types18-index18-Date--] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] [GOOD] >> test_select.py::TestDML::test_select[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_select.py::TestDML::test_as_table [GOOD] >> test_select.py::TestDML::test_select[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_select.py::TestDML::test_select[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_select.py::TestDML::test_select[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] |91.8%| [TA] $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} |91.8%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-fifo] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_as_table [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-fifo] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] [GOOD] >> test_retry.py::TestRetry::test_fail_first[kikimr0] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-fifo] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] [GOOD] >> test_retry.py::TestRetry::test_low_rate[kikimr0] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-fifo] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-fifo] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-fifo] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] [GOOD] >> test_yandex_audit.py::TestCloudEvents::test_create_update_delete_one_queue >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] [GOOD] |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-fifo] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] [GOOD] >> test_yandex_audit.py::TestCloudEvents::test_create_update_delete_one_queue [GOOD] >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] [GOOD] >> test_insert.py::TestInsert::test[read_data_during_bulk_upsert] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering 2025-06-24 20:54:22,207 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 20:54:22,778 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 358513 752M 754M 667M ydb-tests-olap-ttl_tiering --basetemp /home/runner/.ya/build/build_root/2btm/004937/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modu 360322 2.9G 2.9G 2.3G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/004937/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3 362607 540M 536M 508M └─ moto_server s3 --port 22755 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/ttl_tiering/ttl_delete_s3.py", line 255, in test_delete_s3_tiering self.ydb_client.query(""" File "ydb/tests/olap/common/ydb_client.py", line 24, in query return self.session_pool.execute_with_retries(statement) File "contrib/python/ydb/py3/ydb/query/pool.py", line 204, in execute_with_retries return retry_operation_sync(wrapped_callee, retry_settings) File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync for next_opt in opt_generator: File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 202, in wrapped_callee return [result_set for result_set in it] File "contrib/python/ydb/py3/ydb/_utilities.py", line 173, in __next__ return self._next() File "contrib/python/ydb/py3/ydb/_utilities.py", line 164, in _next res = self.wrapper(next(self.it)) File "contrib/python/grpcio/py3/grpc/_channel.py", line 475, in __next__ return self._next() File "contrib/python/grpcio/py3/grpc/_channel.py", line 872, in _next _common.wait(self._state.condition.wait, _response_ready) File "contrib/python/grpcio/py3/grpc/_common.py", line 150, in wait _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) File "contrib/python/grpcio/py3/grpc/_common.py", line 112, in _wait_once wait_fn(timeout=timeout) File "contrib/tools/python3/Lib/threading.py", line 359, in wait gotit = waiter.acquire(True, timeout) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d_root/2btm/004937/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/004937/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/004937', '--source-root', '/home/runner/.ya/build/build_root/2btm/004937/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/004937/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '3', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d_root/2btm/004937/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/004937/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/004937', '--source-root', '/home/runner/.ya/build/build_root/2btm/004937/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/004937/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '3', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-fifo] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete 2025-06-24 20:54:22,711 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 20:54:23,424 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 358590 629M 628M 543M ydb-tests-olap-ttl_tiering --basetemp /home/runner/.ya/build/build_root/2btm/004933/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modu 360104 3.8G 3.8G 3.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/004933/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4 361272 388M 387M 355M └─ moto_server s3 --port 17914 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/ttl_tiering/ttl_delete_s3.py", line 367, in test_ttl_delete self.ydb_client.query(""" File "ydb/tests/olap/common/ydb_client.py", line 24, in query return self.session_pool.execute_with_retries(statement) File "contrib/python/ydb/py3/ydb/query/pool.py", line 204, in execute_with_retries return retry_operation_sync(wrapped_callee, retry_settings) File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync for next_opt in opt_generator: File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 202, in wrapped_callee return [result_set for result_set in it] File "contrib/python/ydb/py3/ydb/_utilities.py", line 173, in __next__ return self._next() File "contrib/python/ydb/py3/ydb/_utilities.py", line 164, in _next res = self.wrapper(next(self.it)) File "contrib/python/grpcio/py3/grpc/_channel.py", line 475, in __next__ return self._next() File "contrib/python/grpcio/py3/grpc/_channel.py", line 872, in _next _common.wait(self._state.condition.wait, _response_ready) File "contrib/python/grpcio/py3/grpc/_common.py", line 150, in wait _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) File "contrib/python/grpcio/py3/grpc/_common.py", line 112, in _wait_once wait_fn(timeout=timeout) File "contrib/tools/python3/Lib/threading.py", line 359, in wait gotit = waiter.acquire(True, timeout) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d_root/2btm/004933/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/004933/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/004933', '--source-root', '/home/runner/.ya/build/build_root/2btm/004933/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/004933/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '4', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d_root/2btm/004933/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/004933/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/004933', '--source-root', '/home/runner/.ya/build/build_root/2btm/004933/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/004933/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '4', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint64-pk_types3-all_types3-index3] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int8-pk_types2-all_types2-index2] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_select.py::TestDML::test_select[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates [GOOD] >> test_retry.py::TestRetry::test_low_rate[kikimr0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int32-pk_types1-all_types1-index1] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint32-pk_types4-all_types4-index4] >> test_alter_compression.py::TestAlterCompression::test[alter_compression] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int64-pk_types0-all_types0-index0] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Utf8-pk_types7-all_types7-index7] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v0] >> test_select.py::TestDML::test_select[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v1] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_String-pk_types6-all_types6-index6] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] [FAIL] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v1] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_retry.py::TestRetry::test_low_rate[kikimr0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters |91.9%| [TA] $(B)/ydb/tests/fq/multi_plane/test-results/py3test/{meta.json ... results_accumulator.log} |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |91.9%| [TA] {RESULT} $(B)/ydb/tests/fq/multi_plane/test-results/py3test/{meta.json ... results_accumulator.log} >> test_select.py::TestDML::test_select[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |91.9%| [TA] $(B)/ydb/tests/olap/ttl_tiering/test-results/py3test/{meta.json ... results_accumulator.log} >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test[read_update_write_load] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_select.py::TestDML::test_select[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v1] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-fifo] >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types1-all_types1-index1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v0] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v1] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v0] |91.9%| [TA] {RESULT} $(B)/ydb/tests/olap/ttl_tiering/test-results/py3test/{meta.json ... results_accumulator.log} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] [GOOD] >> test_select.py::TestDML::test_select[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] [GOOD] Test command err: run test with cloud_id=CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e folder_id=folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e iam_token=usr_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e cloud_account=acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e 2025-06-24T20:54:06.328163Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e]","tx_id":"281474976720692","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DIRECTORY","component":"schemeshard"} ======================================== 2025-06-24T20:54:06.599395Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e/000000000000000100hf]","tx_id":"281474976720698","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DIRECTORY","component":"schemeshard"} ======================================== 2025-06-24T20:54:06.662564Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e/000000000000000100hf/v2]","tx_id":"281474976720699","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DIRECTORY","component":"schemeshard"} ======================================== 2025-06-24T20:54:06.795170Z: {"request_id":"3dcbbd0c-a22c99bc-91783734-ae4a0edf","cloud_id":"CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","status":"SUCCESS","account":"CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","subject":"fake_user_sid@as","operation":"create_queue","component":"ymq","folder_id":"folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e"} ======================================== 2025-06-24T20:54:07.571346Z: {"request_id":"3dcbbd0c-a22c99bc-91783734-ae4a0edf","permission":"ymq.queues.create","id":"9910344353512807793$CreateMessageQueue$1750798446512","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","masked_token":"CLOU****cb8e (F198DC6B)","auth_type":"{none}","created_at":"1750798446512","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000100hf","labels":"{}","operation":"CreateMessageQueue","folder_id":"folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","component":"ymq"} ======================================== 2025-06-24T20:54:07.638668Z: {"request_id":"3dcbbd0c-a22c99bc-91783734-ae4a0edf","permission":"ymq.queues.create","id":"9910344353512807793$CreateMessageQueue$1750798446512","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","masked_token":"CLOU****cb8e (F198DC6B)","auth_type":"{none}","created_at":"1750798446512","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000100hf","labels":"{}","operation":"CreateMessageQueue","folder_id":"folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","component":"ymq"} ======================================== 2025-06-24T20:54:09.773691Z: {"request_id":"52a7842d-69dd51a9-5bb0105d-68294c92","permission":"ymq.queues.setAttributes","id":"8791268568117092294$UpdateMessageQueue$1750798447994","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","masked_token":"CLOU****cb8e (F198DC6B)","auth_type":"{none}","created_at":"1750798447994","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000100hf","labels":"{}","operation":"UpdateMessageQueue","folder_id":"folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","component":"ymq"} ======================================== 2025-06-24T20:54:09.773726Z: {"request_id":"8c033330-28a12de7-fd7230db-19f02922","permission":"ymq.queues.setAttributes","id":"13145790857496983581$UpdateMessageQueue$1750798449081","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","masked_token":"CLOU****cb8e (F198DC6B)","auth_type":"{none}","created_at":"1750798449081","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000100hf","labels":"{}","operation":"UpdateMessageQueue","folder_id":"folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","component":"ymq"} ======================================== 2025-06-24T20:54:10.300215Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e/000000000000000100hf/v2]","tx_id":"281474976720722","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"DROP DIRECTORY","component":"schemeshard"} ======================================== 2025-06-24T20:54:10.346461Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e/000000000000000100hf]","tx_id":"281474976720723","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"DROP DIRECTORY","component":"schemeshard"} ======================================== 2025-06-24T20:54:10.376063Z: {"request_id":"51588c6a-ed0f842e-c8b1f21e-b6cc972d","cloud_id":"CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","status":"SUCCESS","account":"CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","subject":"fake_user_sid@as","queue":"000000000000000100hf","resource_id":"000000000000000100hf","operation":"delete_queue","component":"ymq","folder_id":"folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e"} ======================================== 2025-06-24T20:54:11.822203Z: {"request_id":"51588c6a-ed0f842e-c8b1f21e-b6cc972d","permission":"ymq.queues.delete","id":"11814536273812558288$DeleteMessageQueue$1750798450128","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","masked_token":"CLOU****cb8e (F198DC6B)","auth_type":"{none}","created_at":"1750798450128","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000100hf","labels":"{}","operation":"DeleteMessageQueue","folder_id":"folder_acc_5e257eb5-513d-11f0-a738-d00d1cfdcb8e","component":"ymq"} ======================================== ======================================== >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v0] >> test_simple.py::TestSimple::test[alter_table] >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types0-all_types0-index0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v0] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v0] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_alter_tiering.py::TestAlterTiering::test[many_tables] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> test_select.py::TestDML::test_select[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v1] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v1] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v1] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v1] >> test_simple.py::TestSimple::test[alter_table] [GOOD] >> test_simple.py::TestSimple::test[alter_tablestore] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v0] >> test_select.py::TestDML::test_select[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] >> test_scheme_load.py::TestSchemeLoad::test[create_and_drop_tables] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] >> test_insert.py::TestInsert::test[read_data_during_bulk_upsert] [GOOD] >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint8-pk_types5-all_types5-index5] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v0] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v0] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] >> test_simple.py::TestSimple::test[alter_tablestore] [GOOD] >> test_simple.py::TestSimple::test[table] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-std] >> test_select.py::TestDML::test_select[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_simple.py::TestSimple::test[table] [GOOD] >> test_simple.py::TestSimple::test[tablestores] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-fifo] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] >> test_simple.py::TestSimple::test[tablestores] [GOOD] >> test_simple.py::TestSimple::test_multi[alter_table] [GOOD] >> test_simple.py::TestSimple::test_multi[alter_tablestore] [GOOD] >> test_simple.py::TestSimple::test_multi[table] [GOOD] >> test_simple.py::TestSimple::test_multi[tablestores] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v1] [GOOD] >> test_select.py::TestDML::test_select[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v0] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v0] [GOOD] >> test_select.py::TestDML::test_select[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint64-pk_types3-all_types3-index3] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v0] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int8-pk_types2-all_types2-index2] [GOOD] >> test_select.py::TestDML::test_select[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v1] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v1] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int32-pk_types1-all_types1-index1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int64-pk_types0-all_types0-index0] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint32-pk_types4-all_types4-index4] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint64-pk_types3-all_types3-index3] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Utf8-pk_types7-all_types7-index7] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-std] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int8-pk_types2-all_types2-index2] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v1] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_simple.py::TestSimple::test_multi[tablestores] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] [GOOD] >> test_scheme_load.py::TestSchemeLoad::test[create_and_drop_tables] [GOOD] >> test_scheme_load.py::TestSchemeLoad::test_multi[create_and_drop_tables] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v0] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Utf8-pk_types7-all_types7-index7] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_String-pk_types6-all_types6-index6] [GOOD] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v1] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int32-pk_types1-all_types1-index1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v0] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint32-pk_types4-all_types4-index4] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-fifo] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-fifo] [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int64-pk_types0-all_types0-index0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_String-pk_types6-all_types6-index6] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-std] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v1] [GOOD] >> test_select.py::TestDML::test_select[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-std] >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types1-all_types1-index1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v1] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] >> test_select.py::TestDML::test_select[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v0] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_scheme_load.py::TestSchemeLoad::test_multi[create_and_drop_tables] [GOOD] >> test_select.py::TestDML::test_select[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v0] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] [GOOD] |92.1%| [TA] $(B)/ydb/tests/datashard/split_merge/test-results/py3test/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v1] |92.1%| [TA] {RESULT} $(B)/ydb/tests/datashard/split_merge/test-results/py3test/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v0] >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types0-all_types0-index0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v1] >> test_select.py::TestDML::test_select[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_fifo_read_delete_single_message >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_fifo_read_delete_single_message [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_only_single_read_infly_from_fifo >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v0] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-std] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_only_single_read_infly_from_fifo [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v1] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types1-all_types1-index1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v1] |92.1%| [TA] $(B)/ydb/tests/functional/sqs/cloud/test-results/py3test/{meta.json ... results_accumulator.log} |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] |92.1%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/cloud/test-results/py3test/{meta.json ... results_accumulator.log} |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types0-all_types0-index0] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-by_deduplication_id] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-std] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v0] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-std] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_disk.py::TestSafeDiskBreak::test_erase_method >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint8-pk_types5-all_types5-index5] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-fifo] >> test_select.py::TestDML::test_select[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v0] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-fifo] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-fifo] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v1] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v1] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-fifo] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-std] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-fifo] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-std] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v0] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-fifo] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v1] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v0] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test[read_update_write_load] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v0] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test_multi[read_update_write_load] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint8-pk_types5-all_types5-index5] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v0] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] |92.2%| [TA] $(B)/ydb/tests/datashard/partitioning/test-results/py3test/{meta.json ... results_accumulator.log} >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] |92.2%| [TA] {RESULT} $(B)/ydb/tests/datashard/partitioning/test-results/py3test/{meta.json ... results_accumulator.log} |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-content_based] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v0] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v1] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-fifo] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-fifo] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v0] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v1] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> test_disk.py::TestSafeDiskBreak::test_erase_method [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v1] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-by_deduplication_id] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-by_deduplication_id] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_disk.py::TestSafeDiskBreak::test_erase_method [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-content_based] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_fifo_read_delete_single_message >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test_multi[read_update_write_load] [GOOD] Test command err: Was written: 0.0 MiB, Speed: 0.0 MiB/s Step 1. only write Write: 10% 4129 30% 4129 50% 4129 90% 4129 99% 4129 ms Write: 10% 3307 30% 3307 50% 3307 90% 3307 99% 3307 ms Write: 10% 17496 30% 17496 50% 17496 90% 17496 99% 17496 ms Write: 10% 16373 30% 16373 50% 16373 90% 16373 99% 16373 ms Write: 10% 15928 30% 15928 50% 15928 90% 15928 99% 15928 ms Write: 10% 16373 30% 16373 50% 16373 90% 16373 99% 16373 ms Write: 10% 14180 30% 14180 50% 14180 90% 14180 99% 14180 ms Write: 10% 13353 30% 13353 50% 13353 90% 13353 99% 13353 ms Write: 10% 12720 30% 12720 50% 12720 90% 12720 99% 12720 ms Write: 10% 13471 30% 13471 50% 13471 90% 13471 99% 13471 ms Write: 10% 17022 30% 17022 50% 17022 90% 17022 99% 17022 ms Write: 10% 16945 30% 16945 50% 16945 90% 16945 99% 16945 ms Write: 10% 11725 30% 11725 50% 11725 90% 11725 99% 11725 ms Write: 10% 16601 30% 16601 50% 16601 90% 16601 99% 16601 ms Write: 10% 15685 30% 15685 50% 15685 90% 15685 99% 15685 ms Write: 10% 14691 30% 14691 50% 14691 90% 14691 99% 14691 ms Write: 10% 13605 30% 13605 50% 13605 90% 13605 99% 13605 ms Write: 10% 14811 30% 14811 50% 14811 90% 14811 99% 14811 ms Write: 10% 11649 30% 11649 50% 11649 90% 11649 99% 11649 ms Write: 10% 8023 30% 8023 50% 8023 90% 8023 99% 8023 ms Write: 10% 14576 30% 14576 50% 14576 90% 14576 99% 14576 ms Write: 10% 14029 30% 14029 50% 14029 90% 14029 99% 14029 ms Write: 10% 17486 30% 17486 50% 17486 90% 17486 99% 17486 ms Write: 10% 14032 30% 14032 50% 14032 90% 14032 99% 14032 ms Write: 10% 13924 30% 13924 50% 13924 90% 13924 99% 13924 ms Write: 10% 12325 30% 12325 50% 12325 90% 12325 99% 12325 ms Write: 10% 12975 30% 12975 50% 12975 90% 12975 99% 12975 ms Write: 10% 13075 30% 13075 50% 13075 90% 13075 99% 13075 ms Write: 10% 11055 30% 11055 50% 11055 90% 11055 99% 11055 ms Write: 10% 8475 30% 8475 50% 8475 90% 8475 99% 8475 ms Write: 10% 5148 30% 5148 50% 5148 90% 5148 99% 5148 ms Write: 10% 7544 30% 7544 50% 7544 90% 7544 99% 7544 ms Write: 10% 6143 30% 6143 50% 6143 90% 6143 99% 6143 ms Write: 10% 7271 30% 7271 50% 7271 90% 7271 99% 7271 ms Write: 10% 7925 30% 7925 50% 7925 90% 7925 99% 7925 ms Write: 10% 14584 30% 14584 50% 14584 90% 14584 99% 14584 ms Write: 10% 7788 30% 7788 50% 7788 90% 7788 99% 7788 ms Write: 10% 4112 30% 4112 50% 4112 90% 4112 99% 4112 ms Write: 10% 2665 30% 2665 50% 2665 90% 2665 99% 2665 ms Write: 10% 6822 30% 6822 50% 6822 90% 6822 99% 6822 ms Write: 10% 9684 30% 9684 50% 9684 90% 9684 99% 9684 ms Write: 10% 2871 30% 2871 50% 2871 90% 2871 99% 2871 ms Write: 10% 10441 30% 10441 50% 10441 90% 10441 99% 10441 ms Write: 10% 2514 30% 2514 50% 2514 90% 2514 99% 2514 ms Write: 10% 11022 30% 11022 50% 11022 90% 11022 99% 11022 ms Write: 10% 8637 30% 8637 50% 8637 90% 8637 99% 8637 ms Write: 10% 2986 30% 2986 50% 2986 90% 2986 99% 2986 ms Write: 10% 4097 30% 4097 50% 4097 90% 4097 99% 4097 ms Write: 10% 3651 30% 3651 50% 3651 90% 3651 99% 3651 ms Write: 10% 5685 30% 5685 50% 5685 90% 5685 99% 5685 ms Write: 10% 4001 30% 4001 50% 4001 90% 4001 99% 4001 ms Write: 10% 2600 30% 2600 50% 2600 90% 2600 99% 2600 ms Write: 10% 2649 30% 2649 50% 2649 90% 2649 99% 2649 ms Write: 10% 3887 30% 3887 50% 3887 90% 3887 99% 3887 ms Write: 10% 5230 30% 5230 50% 5230 90% 5230 99% 5230 ms Write: 10% 2396 30% 2396 50% 2396 90% 2396 99% 2396 ms Write: 10% 4264 30% 4264 50% 4264 90% 4264 99% 4264 ms Write: 10% 2420 30% 2420 50% 2420 90% 2420 99% 2420 ms Write: 10% 10256 30% 10256 50% 10256 90% 10256 99% 10256 ms Write: 10% 3301 30% 3301 50% 3301 90% 3301 99% 3301 ms Write: 10% 2532 30% 2532 50% 2532 90% 2532 99% 2532 ms Write: 10% 2819 30% 2819 50% 2819 90% 2819 99% 2819 ms Write: 10% 4663 30% 4663 50% 4663 90% 4663 99% 4663 ms Write: 10% 5243 30% 5243 50% 5243 90% 5243 99% 5243 ms Step 2. read write Write: 10% 3024 30% 3024 50% 3024 90% 3024 99% 3024 ms Write: 10% 4365 30% 4365 50% 4365 90% 4365 99% 4365 ms Write: 10% 8004 30% 8004 50% 8004 90% 8004 99% 8004 ms Write: 10% 10477 30% 10477 50% 10477 90% 10477 99% 10477 ms Write: 10% 13116 30% 13116 50% 13116 90% 13116 99% 13116 ms Write: 10% 12501 30% 12501 50% 12501 90% 12501 99% 12501 ms Write: 10% 12654 30% 12654 50% 12654 90% 12654 99% 12654 ms Write: 10% 11658 30% 11658 50% 11658 90% 11658 99% 11658 ms Write: 10% 14532 30% 14532 50% 14532 90% 14532 99% 14532 ms Write: 10% 16415 30% 16415 50% 16415 90% 16415 99% 16415 ms Write: 10% 18427 30% 18427 50% 18427 90% 18427 99% 18427 ms Write: 10% 17523 30% 17523 50% 17523 90% 17523 99% 17523 ms Write: 10% 15719 30% 15719 50% 15719 90% 15719 99% 15719 ms Write: 10% 15125 30% 15125 50% 15125 90% 15125 99% 15125 ms Write: 10% 14027 30% 14027 50% 14027 90% 14027 99% 14027 ms Write: 10% 13986 30% 13986 50% 13986 90% 13986 99% 13986 ms Write: 10% 12815 30% 12815 50% 12815 90% 12815 99% 12815 ms Write: 10% 16180 30% 16180 50% 16180 90% 16180 99% 16180 ms Write: 10% 12839 30% 12839 50% 12839 90% 12839 99% 12839 ms Write: 10% 12423 30% 12423 50% 12423 90% 12423 99% 12423 ms Write: 10% 16133 30% 16133 50% 16133 90% 16133 99% 16133 ms Write: 10% 14212 30% 14212 50% 14212 90% 14212 99% 14212 ms Write: 10% 12508 30% 12508 50% 12508 90% 12508 99% 12508 ms Write: 10% 12247 30% 12247 50% 12247 90% 12247 99% 12247 ms Write: 10% 15295 30% 15295 50% 15295 90% 15295 99% 15295 ms Write: 10% 9831 30% 9831 50% 9831 90% 9831 99% 9831 ms Write: 10% 9758 30% 9758 50% 9758 90% 9758 99% 9758 ms Write: 10% 13617 30% 13617 50% 13617 90% 13617 99% 13617 ms Write: 10% 12775 30% 12775 50% 12775 90% 12775 99% 12775 ms Write: 10% 12934 30% 12934 50% 12934 90% 12934 99% 12934 ms Write: 10% 15270 30% 15270 50% 15270 90% 15270 99% 15270 ms Write: 10% 5951 30% 5951 50% 5951 90% 5951 99% 5951 ms Write: 10% 4592 30% 4592 50% 4592 90% 4592 99% 4592 ms Write: 10% 15393 30% 15393 50% 15393 90% 15393 99% 15393 ms Write: 10% 5851 30% 5851 50% 5851 90% 5851 99% 5851 ms Write: 10% 3803 30% 3803 50% 3803 90% 3803 99% 3803 ms Write: 10% 14643 30% 14643 50% 14643 90% 14643 99% 14643 ms Write: 10% 6348 30% 6348 50% 6348 90% 6348 99% 6348 ms Write: 10% 13003 30% 13003 50% 13003 90% 13003 99% 13003 ms Write: 10% 17572 30% 17572 50% 17572 90% 17572 99% 17572 ms Write: 10% 10461 30% 10461 50% 10461 90% 10461 99% 10461 ms Write: 10% 7442 30% 7442 50% 7442 90% 7442 99% 7442 ms Write: 10% 5137 30% 5137 50% 5137 90% 5137 99% 5137 ms Write: 10% 9975 30% 9975 50% 9975 90% 9975 99% 9975 ms Write: 10% 13450 30% 13450 50% 13450 90% 13450 99% 13450 ms Write: 10% 3145 30% 3145 50% 3145 90% 3145 99% 3145 ms Write: 10% 1702 30% 1702 50% 1702 90% 1702 99% 1702 ms Write: 10% 5099 30% 5099 50% 5099 90% 5099 99% 5099 ms Write: 10% 4688 30% 4688 50% 4688 90% 4688 99% 4688 ms Write: 10% 3261 30% 3261 50% 3261 90% 3261 99% 3261 ms Write: 10% 2131 30% 2131 50% 2131 90% 2131 99% 2131 ms Write: 10% 2943 30% 2943 50% 2943 90% 2943 99% 2943 ms Write: 10% 2594 30% 2594 50% 2594 90% 2594 99% 2594 ms Write: 10% 2996 30% 2996 50% 2996 90% 2996 99% 2996 ms Write: 10% 8966 30% 8966 50% 8966 90% 8966 99% 8966 ms Write: 10% 1978 30% 1978 50% 1978 90% 1978 99% 1978 ms Write: 10% 2552 30% 2552 50% 2552 90% 2552 99% 2552 ms Write: 10% 2052 30% 2052 50% 2052 90% 2052 99% 2052 ms Write: 10% 2832 30% 2832 50% 2832 90% 2832 99% 2832 ms Write: 10% 1944 30% 1944 50% 1944 90% 1944 99% 1944 ms Write: 10% 2461 30% 2461 50% 2461 90% 2461 99% 2461 ms Write: 10% 1994 30% 1994 50% 1994 90% 1994 99% 1994 ms Write: 10% 5174 30% 5174 50% 5174 90% 5174 99% 5174 ms Write: 10% 2610 30% 2610 50% 2610 90% 2610 99% 2610 ms Read: 10% 7025 30% 10997 50% 14968 90% 22912 99% 24699 ms Step 3. write modify Write: 10% 1802 30% 1802 50% 1802 90% 1802 99% 1802 ms Write: 10% 2016 30% 2016 50% 2016 90% 2016 99% 2016 ms Write: 10% 1936 30% 1936 50% 1936 90% 1936 99% 1936 ms Write: 10% 2456 30% 2456 50% 2456 90% 2456 99% 2456 ms Write: 10% 5053 30% 5053 50% 5053 90% 5053 99% 5053 ms Was written: 16.69921875 MiB, Speed: 0.2783203125 MiB/s Write: 10% 6330 30% 6330 50% 6330 90% 6330 99% 6330 ms Write: 10% 5246 30% 5246 50% 5246 90% 5246 99% 5246 ms Write: 10% 12535 30% 12535 50% 12535 90% 12535 99% 12535 ms Write: 10% 13657 30% 13657 50% 13657 90% 13657 99% 13657 ms Write: 10% 13463 30% 13463 50% 13463 90% 13463 99% 13463 ms Write: 10% 16483 30% 16483 50% 16483 90% 16483 99% 16483 ms Write: 10% 16390 30% 16390 50% 16390 90% 16390 99% 16390 ms Write: 10% 16617 30% 16617 50% 16617 90% 16617 99% 16617 ms Write: 10% 15676 30% 15676 50% 15676 90% 15676 99% 15676 ms Write: 10% 13770 30% 13770 50% 13770 90% 13770 99% 13770 ms Write: 10% 14114 30% 14114 50% 14114 90% 14114 99% 14114 ms Write: 10% 15770 30% 15770 50% 15770 90% 15770 99% 15770 ms Write: 10% 15662 30% 15662 50% 15662 90% 15662 99% 15662 ms Write: 10% 15507 30% 15507 50% 15507 90% 15507 99% 15507 ms Write: 10% 13357 30% 13357 50% 13357 90% 13357 99% 13357 ms Write: 10% 15131 30% 15131 50% 15131 90% 15131 99% 15131 ms Write: 10% 13079 30% 13079 50% 13079 90% 13079 99% 13079 ms Write: 10% 15536 30% 15536 50% 15536 90% 15536 99% 15536 ms Write: 10% 15774 30% 15774 50% 15774 90% 15774 99% 15774 ms Write: 10% 14080 30% 14080 50% 14080 90% 14080 99% 14080 ms Write: 10% 13802 30% 13802 50% 13802 90% 13802 99% 13802 ms Write: 10% 7767 30% 7767 50% 7767 90% 7767 99% 7767 ms Write: 10% 10267 30% 10267 50% 10267 90% 10267 99% 10267 ms Write: 10% 8706 30% 8706 50% 8706 90% 8706 99% 8706 ms Write: 10% 13652 30% 13652 50% 13652 90% 13652 99% 13652 ms Write: 10% 2720 30% 2720 50% 2720 90% 2720 99% 2720 ms Write: 10% 13174 30% 13174 50% 13174 90% 13174 99% 13174 ms Write: 10% 13728 30% 13728 50% 13728 90% 13728 99% 13728 ms Write: 10% 13380 30% 13380 50% 13380 90% 13380 99% 13380 ms Write: 10% 14456 30% 14456 50% 14456 90% 14456 99% 14456 ms Write: 10% 12701 30% 12701 50% 12701 90% 12701 99% 12701 ms Write: 10% 15214 30% 15214 50% 15214 90% 15214 99% 15214 ms Write: 10% 7319 30% 7319 50% 7319 90% 7319 99% 7319 ms Write: 10% 3905 30% 3905 50% 3905 90% 3905 99% 3905 ms Write: 10% 8496 30% 8496 50% 8496 90% 8496 99% 8496 ms Write: 10% 8290 30% 8290 50% 8290 90% 8290 99% 8290 ms Write: 10% 11879 30% 11879 50% 11879 90% 11879 99% 11879 ms Write: 10% 2575 30% 2575 50% 2575 90% 2575 99% 2575 ms Write: 10% 5185 30% 5185 50% 5185 90% 5185 99% 5185 ms Write: 10% 4851 30% 4851 50% 4851 90% 4851 99% 4851 ms Write: 10% 6159 30% 6159 50% 6159 90% 6159 99% 6159 ms Write: 10% 3443 30% 3443 50% 3443 90% 3443 99% 3443 ms Write: 10% 2428 30% 2428 50% 2428 90% 2428 99% 2428 ms Write: 10% 5788 30% 5788 50% 5788 90% 5788 99% 5788 ms Write: 10% 3013 30% 3013 50% 3013 90% 3013 99% 3013 ms Write: 10% 7965 30% 7965 50% 7965 90% 7965 99% 7965 ms Write: 10% 7900 30% 7900 50% 7900 90% 7900 99% 7900 ms Write: 10% 2832 30% 2832 50% 2832 90% 2832 99% 2832 ms Write: 10% 7607 30% 7607 50% 7607 90% 7607 99% 7607 ms Write: 10% 7665 30% 7665 50% 7665 90% 7665 99% 7665 ms Write: 10% 5111 30% 5111 50% 5111 90% 5111 99% 5111 ms Write: 10% 5592 30% 5592 50% 5592 90% 5592 99% 5592 ms Write: 10% 3063 30% 3063 50% 3063 90% 3063 99% 3063 ms Write: 10% 4081 30% 4081 50% 4081 90% 4081 99% 4081 ms Write: 10% 9893 30% 9893 50% 9893 90% 9893 99% 9893 ms Write: 10% 4537 30% 4537 50% 4537 90% 4537 99% 4537 ms Write: 10% 6025 30% 6025 50% 6025 90% 6025 99% 6025 ms Write: 10% 6948 30% 6948 50% 6948 90% 6948 99% 6948 ms Write: 10% 5031 30% 5031 50% 5031 90% 5031 99% 5031 ms Update: 10% 1977 30% 1977 50% 1977 90% 1977 99% 1977 ms Step 4. read modify write Write: 10% 8043 30% 8043 50% 8043 90% 8043 99% 8043 ms Write: 10% 10023 30% 10023 50% 10023 90% 10023 99% 10023 ms Write: 10% 10921 30% 10921 50% 10921 90% 10921 99% 10921 ms Write: 10% 12515 30% 12515 50% 12515 90% 12515 99% 12515 ms Write: 10% 15559 30% 15559 50% 15559 90% 15559 99% 15559 ms Write: 10% 17356 30% 17356 50% 17356 90% 17356 99% 17356 ms Write: 10% 17272 30% 17272 50% 17272 90% 17272 99% 17272 ms Write: 10% 17722 30% 17722 50% 17722 90% 17722 99% 17722 ms Write: 10% 17678 30% 17678 50% 17678 90% 17678 99% 17678 ms Write: 10% 18566 30% 18566 50% 18566 90% 18566 99% 18566 ms Write: 10% 19102 30% 19102 50% 19102 90% 19102 99% 19102 ms Write: 10% 17982 30% 17982 50% 17982 90% 17982 99% 17982 ms Write: 10% 17785 30% 17785 50% 17785 90% 17785 99% 17785 ms Write: 10% 17037 30% 17037 50% 17037 90% 17037 99% 17037 ms Write: 10% 16445 30% 16445 50% 16445 90% 16445 99% 16445 ms Write: 10% 15941 30% 15941 50% 15941 90% 15941 99% 15941 ms Write: 10% 14034 30% 14034 50% 14034 90% 14034 99% 14034 ms Write: 10% 10037 30% 10037 50% 10037 90% 10037 99% 10037 ms Write: 10% 14747 30% 14747 50% 14747 90% 14747 99% 14747 ms Write: 10% 14975 30% 14975 50% 14975 90% 14975 99% 14975 ms Write: 10% 8749 30% 8749 50% 8749 90% 8749 99% 8749 ms Write: 10% 15440 30% 15440 50% 15440 90% 15440 99% 15440 ms Write: 10% 14087 30% 14087 50% 14087 90% 14087 99% 14087 ms Write: 10% 16881 30% 16881 50% 16881 90% 16881 99% 16881 ms Write: 10% 12715 30% 12715 50% 12715 90% 12715 99% 12715 ms Write: 10% 2881 30% 2881 50% 2881 90% 2881 99% 2881 ms Write: 10% 13199 30% 13199 50% 13199 90% 13199 99% 13199 ms Write: 10% 10588 30% 10588 50% 10588 90% 10588 99% 10588 ms Write: 10% 12251 30% 12251 50% 12251 90% 12251 99% 12251 ms Write: 10% 14917 30% 14917 50% 14917 90% 14917 99% 14917 ms Write: 10% 7797 30% 7797 50% 7797 90% 7797 99% 7797 ms Write: 10% 12747 30% 12747 50% 12747 90% 12747 99% 12747 ms Write: 10% 12354 30% 12354 50% 12354 90% 12354 99% 12354 ms Write: 10% 11344 30% 11344 50% 11344 90% 11344 99% 11344 ms Write: 10% 15129 30% 15129 50% 15129 90% 15129 99% 15129 ms Write: 10% 5979 30% 5979 50% 5979 90% 5979 99% 5979 ms Write: 10% 4746 30% 4746 50% 4746 90% 4746 99% 4746 ms Write: 10% 10056 30% 10056 50% 10056 90% 10056 99% 10056 ms Write: 10% 10230 30% 10230 50% 10230 90% 10230 99% 10230 ms Write: 10% 10356 30% 10356 50% 10356 90% 10356 99% 10356 ms Write: 10% 4278 30% 4278 50% 4278 90% 4278 99% 4278 ms Write: 10% 4637 30% 4637 50% 4637 90% 4637 99% 4637 ms Write: 10% 3810 30% 3810 50% 3810 90% 3810 99% 3810 ms Write: 10% 7725 30% 7725 50% 7725 90% 7725 99% 7725 ms Write: 10% 3191 30% 3191 50% 3191 90% 3191 99% 3191 ms Write: 10% 5674 30% 5674 50% 5674 90% 5674 99% 5674 ms Write: 10% 5873 30% 5873 50% 5873 90% 5873 99% 5873 ms Write: 10% 10091 30% 10091 50% 10091 90% 10091 99% 10091 ms Write: 10% 8907 30% 8907 50% 8907 90% 8907 99% 8907 ms Write: 10% 10074 30% 10074 50% 10074 90% 10074 99% 10074 ms Write: 10% 5302 30% 5302 50% 5302 90% 5302 99% 5302 ms Write: 10% 4172 30% 4172 50% 4172 90% 4172 99% 4172 ms Write: 10% 6557 30% 6557 50% 6557 90% 6557 99% 6557 ms Write: 10% 4907 30% 4907 50% 4907 90% 4907 99% 4907 ms Write: 10% 3840 30% 3840 50% 3840 90% 3840 99% 3840 ms Write: 10% 3864 30% 3864 50% 3864 90% 3864 99% 3864 ms Write: 10% 3950 30% 3950 50% 3950 90% 3950 99% 3950 ms Write: 10% 5880 30% 5880 50% 5880 90% 5880 99% 5880 ms Write: 10% 6446 30% 6446 50% 6446 90% 6446 99% 6446 ms Write: 10% 5613 30% 5613 50% 5613 90% 5613 99% 5613 ms Write: 10% 6048 30% 6048 50% 6048 90% 6048 99% 6048 ms Write: 10% 5289 30% 5289 50% 5289 90% 5289 99% 5289 ms Write: 10% 4988 30% 4988 50% 4988 90% 4988 99% 4988 ms Write: 10% 8265 30% 8265 50% 8265 90% 8265 99% 8265 ms Read: 10% 3345 30% 3548 50% 3751 90% 20268 99% 23984 ms Update: 10% 2051 30% 2051 50% 2051 90% 2051 99% 2051 ms >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_fifo_read_delete_single_message [GOOD] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-by_deduplication_id] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] >> test_alter_tiering.py::TestAlterTiering::test[many_tables] [FAIL] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> test_alter_tiering.py::TestAlterTiering::test_multi[many_tables] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> TCheckpointStorageTest::ShouldRegisterCoordinator >> TCheckpointStorageTest::ShouldCreateCheckpoint >> TStorageServiceTest::ShouldNotCreateCheckpointAfterGenerationChanged >> TStateStorageTest::ShouldDeleteNoCheckpoints >> TStorageServiceTest::ShouldNotRegisterPrevGeneration >> TStorageServiceTest::ShouldRegister >> TCheckpointStorageTest::ShouldUpdateCheckpointStatusForCheckpointsWithTheSameGenAndNo >> TStateStorageTest::ShouldSaveGetOldSmallState |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TCheckpointStorageTest::ShouldRegisterCoordinator [GOOD] >> TCheckpointStorageTest::ShouldGetCoordinators |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TCheckpointStorageTest::ShouldCreateCheckpoint [GOOD] >> TCheckpointStorageTest::ShouldCreateGetCheckpoints >> TStateStorageTest::ShouldIssueErrorOnWrongGetStateParams >> test_select.py::TestDML::test_select[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> TStorageServiceTest::ShouldRegister [GOOD] >> TStorageServiceTest::ShouldRegisterNextGeneration >> TStorageServiceTest::ShouldNotRegisterPrevGeneration [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointWhenUnregistered >> TStateStorageTest::ShouldDeleteNoCheckpoints [GOOD] >> TStateStorageTest::ShouldDeleteNoCheckpoints2 >> TStateStorageTest::ShouldIssueErrorOnWrongGetStateParams [GOOD] >> TStateStorageTest::ShouldIssueErrorOnNonExistentState >> TStateStorageTest::ShouldSaveGetOldSmallState [GOOD] >> TStateStorageTest::ShouldSaveGetOldBigState >> TCheckpointStorageTest::ShouldGetCoordinators [GOOD] >> TCheckpointStorageTest::ShouldMarkCheckpointsGc |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TCheckpointStorageTest::ShouldUpdateCheckpointStatusForCheckpointsWithTheSameGenAndNo [GOOD] >> TGcTest::ShouldRemovePreviousCheckpoints >> TStorageServiceTest::ShouldNotCreateCheckpointAfterGenerationChanged [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutCreation >> TStateStorageTest::ShouldSaveGetOldSmallState2Tasks >> TStateStorageTest::ShouldSaveGetOldBigState [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementSmallState >> TStateStorageTest::ShouldDeleteNoCheckpoints2 [GOOD] >> TStateStorageTest::ShouldDeleteCheckpoints >> TStorageServiceTest::ShouldRegisterNextGeneration [GOOD] >> TStorageServiceTest::ShouldPendingAndCompleteCheckpoint |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointWhenUnregistered [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointTwice |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo [GOOD] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStateStorageTest::ShouldIssueErrorOnNonExistentState [GOOD] >> TStateStorageTest::ShouldLoadLastSnapshot >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotAbortCheckpointWithoutCreation >> TStateStorageTest::ShouldSaveGetIncrementSmallState [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementBigState >> TCheckpointStorageTest::ShouldCreateGetCheckpoints [GOOD] >> TCheckpointStorageTest::ShouldGetCheckpointsEmpty >> TCheckpointStorageTest::ShouldMarkCheckpointsGc [GOOD] >> TCheckpointStorageTest::ShouldNotDeleteUnmarkedCheckpoints |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] >> TStateStorageTest::ShouldDeleteCheckpoints [GOOD] >> TStateStorageTest::ShouldDeleteGraph >> TStateStorageTest::ShouldSaveGetIncrementBigState [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendState |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] [GOOD] >> TStateStorageTest::ShouldLoadLastSnapshot [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendSnaphotState >> TStorageServiceTest::ShouldNotCreateCheckpointTwice [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointWithoutCreation >> TStateStorageTest::ShouldSaveGetOldSmallState2Tasks [GOOD] >> TStorageServiceTest::ShouldCreateCheckpoint |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStorageServiceTest::ShouldNotAbortCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutPending >> TStateStorageTest::ShouldNotGetNonExistendState [GOOD] >> TCheckpointStorageTest::ShouldGetCheckpointsEmpty [GOOD] >> TCheckpointStorageTest::ShouldDeleteGraph >> test_auditlog.py::test_single_dml_query_logged[replace] >> TStateStorageTest::ShouldDeleteGraph [GOOD] >> TStateStorageTest::ShouldGetMultipleStates >> TStorageServiceTest::ShouldPendingAndCompleteCheckpoint [GOOD] >> TStorageServiceTest::ShouldSaveState >> TStateStorageTest::ShouldNotGetNonExistendSnaphotState [GOOD] >> TStateStorageTest::ShouldLoadIncrementSnapshot |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[update] >> TStorageServiceTest::ShouldNotPendingCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStorageServiceTest::ShouldCreateCheckpoint [GOOD] >> TStorageServiceTest::ShouldGetCheckpoints |92.3%| [TA] $(B)/ydb/tests/functional/sqs/messaging/test-results/py3test/{meta.json ... results_accumulator.log} >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutPending [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldNotGetNonExistendState [GOOD] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/messaging/test-results/py3test/{meta.json ... results_accumulator.log} |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStateStorageTest::ShouldLoadIncrementSnapshot [GOOD] >> TCheckpointStorageTest::ShouldDeleteGraph [GOOD] >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints >> TStorageServiceTest::ShouldSaveState [GOOD] >> TStorageServiceTest::ShouldUseGc >> TCheckpointStorageTest::ShouldNotDeleteUnmarkedCheckpoints [GOOD] >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId >> TGcTest::ShouldRemovePreviousCheckpoints [GOOD] >> TGcTest::ShouldIgnoreIncrementCheckpoint >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_alter_tiering.py::TestAlterTiering::test_multi[many_tables] [GOOD] Test command err: Thread 0 failed |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> TStateStorageTest::ShouldGetMultipleStates [GOOD] >> TStorageServiceTest::ShouldGetCheckpoints [GOOD] >> TStorageServiceTest::ShouldAbortCheckpoint >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] |92.3%| [TA] $(B)/ydb/tests/datashard/select/test-results/py3test/{meta.json ... results_accumulator.log} >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId [GOOD] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TA] {RESULT} $(B)/ydb/tests/datashard/select/test-results/py3test/{meta.json ... results_accumulator.log} |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldLoadIncrementSnapshot [GOOD] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged [GOOD] Test command err: 2025-06-24T20:58:40.131237Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7519623235937216245:2048] with connection to localhost:26642:local 2025-06-24T20:58:40.155285Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:41.527464Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:41.527498Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:41.540473Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.16] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:41.739435Z node 1 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:197: [graph_graphich.16] Failed to register graph:
: Warning: Table: local/TStorageServiceTestShouldNotRegisterPrevGeneration/coordinators_sync, pk: graph_graphich, current generation: 17, expected/new generation: 16, operation: RegisterCheck, code: 400130 2025-06-24T20:58:41.739468Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.16] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:43.458442Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7519623253992306321:2048] with connection to localhost:26642:local 2025-06-24T20:58:43.458585Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:44.684536Z node 2 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:1] Failed to create checkpoint:
: Warning: Table: local/TStorageServiceTestShouldNotCreateCheckpointWhenUnregistered/coordinators_sync, pk: graph_graphich, current generation: 0, expected/new generation: 17, operation: Check, code: 400130 2025-06-24T20:58:44.684587Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:46.729236Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7519623267351079538:2048] with connection to localhost:26642:local 2025-06-24T20:58:46.729347Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:47.218638Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:47.218680Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:47.219616Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:48.765121Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:58:48.765179Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:48.775744Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:49.322093Z node 3 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:1] Failed to create checkpoint:
: Error: Constraint violated. Table: `local/TStorageServiceTestShouldNotCreateCheckpointTwice/checkpoints_metadata`., code: 2012
: Error: Conflict with existing key., code: 2012 2025-06-24T20:58:49.322123Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:51.679991Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7519623288840662497:2048] with connection to localhost:26642:local 2025-06-24T20:58:51.680074Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:52.202384Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:52.202411Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:52.211284Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-24T20:58:52.651899Z node 4 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:274: [graph_graphich.17] [17:1] Failed to set 'PendingCommit' status:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-24T20:58:52.651928Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T20:58:54.705642Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7519623303472756504:2048] with connection to localhost:26642:local 2025-06-24T20:58:54.705739Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:55.267523Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:55.267553Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:55.268016Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:56.978426Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:58:56.978463Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:56.979065Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:57.247418Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-24T20:58:57.247446Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:57.251545Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-24T20:58:57.492919Z node 5 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:274: [graph_graphich.17] [17:1] Failed to set 'PendingCommit' status:
: Warning: Table: local/TStorageServiceTestShouldNotPendingCheckpointGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-24T20:58:57.492957Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse >> JsonChangeRecord::DataChange [GOOD] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::DataChange [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] |92.3%| [TA] $(B)/ydb/core/tx/replication/service/ut_json_change_record/test-results/unittest/{meta.json ... results_accumulator.log} |92.3%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_json_change_record/test-results/unittest/{meta.json ... results_accumulator.log} |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldGetMultipleStates [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged [GOOD] Test command err: 2025-06-24T20:58:40.443494Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7519623238012828960:2048] with connection to localhost:11048:local 2025-06-24T20:58:40.443583Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:41.757102Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:41.757139Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:41.767427Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:43.081853Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:58:43.081900Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:43.087259Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:43.234254Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-24T20:58:43.234280Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:43.234628Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-24T20:58:43.672172Z node 1 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:2] Failed to create checkpoint:
: Warning: Table: local/TStorageServiceTestShouldNotCreateCheckpointAfterGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-24T20:58:43.672212Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-24T20:58:45.800847Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7519623263601582861:2048] with connection to localhost:11048:local 2025-06-24T20:58:45.800974Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:46.306238Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:46.306270Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:46.307962Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-24T20:58:46.774620Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-24T20:58:46.774660Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-24T20:58:49.259621Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7519623276204648374:2048] with connection to localhost:11048:local 2025-06-24T20:58:49.263360Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:49.707386Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:49.707425Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:49.708671Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:1] Got TEvAbortCheckpointRequest 2025-06-24T20:58:50.083963Z node 3 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:331: [graph_graphich.17] [17:1] Failed to abort checkpoint:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-24T20:58:50.083999Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:1] Send TEvAbortCheckpointResponse 2025-06-24T20:58:52.235949Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7519623288300028025:2048] with connection to localhost:11048:local 2025-06-24T20:58:52.236068Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:52.708499Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:52.708530Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:52.715628Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:54.116615Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:58:54.116651Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:54.124222Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-24T20:58:54.555905Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Selected checkpoint '17:1' with status Pending, while expected PendingCommit, code: 400080 2025-06-24T20:58:54.555938Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-24T20:58:57.029422Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7519623308912877071:2048] with connection to localhost:11048:local 2025-06-24T20:58:57.029549Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:57.555410Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:57.555443Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:57.560157Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:59.506929Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:58:59.506965Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:59.508827Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-24T20:59:00.349816Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-24T20:59:00.349862Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T20:59:00.355923Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-24T20:59:00.683678Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-24T20:59:00.683715Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-24T20:59:00.689954Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-24T20:59:00.803723Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Table: local/TStorageServiceTestShouldNotPendingCheckpointGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-24T20:59:00.803769Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints [GOOD] >> TGroupMapperTest::NonUniformCluster2 |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId [GOOD] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> TStorageServiceTest::ShouldUseGc [GOOD] >> TStorageServiceTest::ShouldAbortCheckpoint [GOOD] >> TStorageServiceTest::ShouldGetState |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TGcTest::ShouldIgnoreIncrementCheckpoint [GOOD] >> TStateStorageTest::ShouldCountStates |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_create_and_remove_tenant |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TGroupMapperTest::NonUniformCluster2 [GOOD] >> TStateStorageTest::ShouldCountStates [GOOD] >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldUseGc [GOOD] Test command err: 2025-06-24T20:58:40.303015Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7519623235694780617:2048] with connection to localhost:32167:local 2025-06-24T20:58:40.303116Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:41.474036Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:41.474064Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:43.588646Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7519623257109397954:2048] with connection to localhost:32167:local 2025-06-24T20:58:43.588756Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:44.172052Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:44.172085Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:44.175619Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:44.472598Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-24T20:58:44.472625Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:44.472923Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:44.612430Z node 2 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:197: [graph_graphich.17] Failed to register graph:
: Warning: Table: local/TStorageServiceTestShouldRegisterNextGeneration/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: RegisterCheck, code: 400130 2025-06-24T20:58:44.612465Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:46.970994Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7519623269443608141:2048] with connection to localhost:32167:local 2025-06-24T20:58:46.971094Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:47.419449Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:47.419484Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:47.420022Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:49.092423Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:58:49.092462Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:49.099842Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-24T20:58:50.079510Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-24T20:58:50.079545Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T20:58:50.080028Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-24T20:58:50.554708Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-24T20:58:50.554750Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-24T20:58:50.556445Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-24T20:58:51.051435Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-24T20:58:51.051469Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T20:58:51.051983Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-24T20:58:51.497025Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-24T20:58:51.497063Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-24T20:58:51.503358Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:58:52.031976Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:58:54.173458Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7519623298888113275:2048] with connection to localhost:32167:local 2025-06-24T20:58:54.173537Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:54.764652Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:54.764695Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:54.765280Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:56.579433Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:58:56.579468Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:56.580151Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:365: [graph_graphich] [17:1] Got TEvSaveTaskState: task 1317 2025-06-24T20:58:56.868381Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:389: [graph_graphich] [17:1] TEvSaveTaskState Apply: task: 1317 2025-06-24T20:58:56.875281Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:404: [graph_graphich] [17:1] Send TEvSaveTaskStateResult: task: 1317 2025-06-24T20:58:58.830924Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7519623318288702500:2048] with connection to localhost:32167:local 2025-06-24T20:58:58.830955Z node 5 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [5:7519623322583669906:2131] 2025-06-24T20:58:58.831005Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:59.422222Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:59.422257Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:59.428020Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:59:01.336669Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:59:01.336707Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:59:01.339630Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-24T20:59:02.079353Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-24T20:59:02.079387Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T20:59:02.080161Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-24T20:59:02.543898Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:1] Status updated to 'Completed' 2025-06-24T20:59:02.543939Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:1] Send TEvNewCheckpointSucceeded 2025-06-24T20:59:02.543963Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-24T20:59:02.544149Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:1 for graph 'graph_graphich' 2025-06-24T20:59:02.544810Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-24T20:59:02.811107Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-24T20:59:02.811161Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-24T20:59:02.816082Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-24T20:59:02.913079Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-24T20:59:02.913116Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T20:59:02.919295Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-24T20:59:03.021134Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-24T20:59:03.021177Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:2] Send TEvNewCheckpointSucceeded 2025-06-24T20:59:03.021202Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-24T20:59:03.022051Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:2 for graph 'graph_graphich' 2025-06-24T20:59:03.025355Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:3] Got TEvCreateCheckpointRequest 2025-06-24T20:59:03.189322Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:3] Checkpoint created 2025-06-24T20:59:03.189361Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:3] Send TEvCreateCheckpointResponse 2025-06-24T20:59:03.192402Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:3] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-24T20:59:03.281056Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:3] Status updated to 'PendingCommit' 2025-06-24T20:59:03.281086Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:3] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T20:59:03.281565Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:3] Got TEvCompleteCheckpointRequest 2025-06-24T20:59:03.376786Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:3] Status updated to 'Completed' 2025-06-24T20:59:03.376821Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:3] Send TEvNewCheckpointSucceeded 2025-06-24T20:59:03.376855Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:3] Send TEvCompleteCheckpointResponse 2025-06-24T20:59:03.377129Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:3 for graph 'graph_graphich' 2025-06-24T20:59:03.378003Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:03.755172Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:03.809329Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:1 2025-06-24T20:59:03.820538Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:3 2025-06-24T20:59:03.825506Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:2 2025-06-24T20:59:03.858123Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:03.890106Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:03.994532Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:04.061356Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:04.162312Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:04.189953Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:04.293289Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:04.339572Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:04.444432Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:04.480956Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:04.583795Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:04.620276Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:04.723725Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:04.777822Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:04.883718Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:04.912412Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:05.016651Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:05.045024Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:05.146208Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:05.176948Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:05.283408Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:05.329057Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:05.435258Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:05.468728Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:05.571507Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:05.609441Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:05.720168Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:05.759130Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:05.860224Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:05.892532Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:05.997692Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:06.025220Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:06.127783Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:06.150059Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:06.255849Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:06.291724Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:06.395650Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:06.427720Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:06.535422Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:06.564020Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:06.671370Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:06.708459Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:06.815279Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:06.844033Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:06.945176Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:06.979982Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:07.087252Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:07.160565Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:07.263505Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:07.371282Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:07.483524Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:07.617394Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformCluster2 [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStorageServiceTest::ShouldGetState [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint [GOOD] >> test_auditlog.py::test_single_dml_query_logged[replace] [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[update] [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/00391b/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk8/testing_out_stuff/test_auditlog.py.test_cloud_ids_are_logged.attrs0/audit.txt 2025-06-24T20:59:00.836400Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","cloud_id":"cloud-id-A","end_time":"2025-06-24T20:59:00.836352Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-24T20:59:00.472311Z","subject":"root@builtin","detailed_status":"SUCCESS","resource_id":"database-id-C","operation":"ExecuteDataQueryRequest","folder_id":"folder-id-B","component":"grpc-proxy"} |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardTxOrder::RandomPoints_DelayRS_Reboot_Dirty >> test_auditlog.py::test_dynconfig |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldGetState [GOOD] Test command err: 2025-06-24T20:58:52.070799Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7519623287336761976:2048] with connection to localhost:26369:local 2025-06-24T20:58:52.070916Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:52.584748Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:52.584780Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:52.599239Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:54.230911Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:58:54.230958Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:56.532619Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7519623312054970776:2048] with connection to localhost:26369:local 2025-06-24T20:58:56.532728Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:58:57.047689Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:58:57.047714Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:58:57.048851Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:58:58.661925Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:58:58.661971Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:58:58.662389Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-24T20:58:59.150149Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-24T20:58:59.150185Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-24T20:58:59.155311Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:3] Got TEvCreateCheckpointRequest 2025-06-24T20:58:59.746564Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:3] Checkpoint created 2025-06-24T20:58:59.746633Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:3] Send TEvCreateCheckpointResponse 2025-06-24T20:58:59.751890Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:00.236248Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:02.435688Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7519623332499269699:2048] with connection to localhost:26369:local 2025-06-24T20:59:02.435799Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:59:02.985171Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:59:02.985204Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:59:02.985833Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:59:04.290523Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:59:04.290562Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:59:04.291168Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-24T20:59:05.058415Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-24T20:59:05.058456Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T20:59:05.062098Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-24T20:59:05.667573Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-24T20:59:05.667605Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-24T20:59:05.668013Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-24T20:59:06.116778Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-24T20:59:06.116814Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T20:59:06.117654Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-24T20:59:06.612081Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-24T20:59:06.612125Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-24T20:59:06.615953Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:1] Got TEvAbortCheckpointRequest 2025-06-24T20:59:07.102388Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:333: [graph_graphich.17] [17:1] Checkpoint aborted 2025-06-24T20:59:07.102428Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:1] Send TEvAbortCheckpointResponse 2025-06-24T20:59:07.105802Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:2] Got TEvAbortCheckpointRequest 2025-06-24T20:59:07.628829Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:333: [graph_graphich.17] [17:2] Checkpoint aborted 2025-06-24T20:59:07.628878Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:2] Send TEvAbortCheckpointResponse 2025-06-24T20:59:07.629485Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-24T20:59:08.044815Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-24T20:59:10.636088Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7519623364727127687:2048] with connection to localhost:26369:local 2025-06-24T20:59:10.636163Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-24T20:59:11.173063Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-24T20:59:11.173095Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-24T20:59:11.183316Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-24T20:59:12.504084Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-24T20:59:12.504110Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-24T20:59:12.523434Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:365: [graph_graphich] [17:1] Got TEvSaveTaskState: task 1317 2025-06-24T20:59:12.783858Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:389: [graph_graphich] [17:1] TEvSaveTaskState Apply: task: 1317 2025-06-24T20:59:12.783928Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:404: [graph_graphich] [17:1] Send TEvSaveTaskStateResult: task: 1317 2025-06-24T20:59:12.787480Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:413: [graph_graphich] [17:1] Got TEvGetTaskState: tasks {1317} 2025-06-24T20:59:12.788137Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:532: [graph_graphich] [17:1] GetState, tasks: 1317 2025-06-24T20:59:13.646697Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:667: [graph_graphich] [17:1] ListOfStates results: 2025-06-24T20:59:13.646800Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:688: [graph_graphich] [17:1] taskId 1317 checkpoint id: 17:1, rows count: 1 2025-06-24T20:59:13.646836Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:920: [graph_graphich] [17:1] SkipStatesInFuture, skip 0 checkpoints 2025-06-24T20:59:13.647185Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:812: [graph_graphich] [17:1] SelectState: task_id 1317, seq_no 1, blob_seq_num 0 2025-06-24T20:59:14.343435Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:423: [graph_graphich] [17:1] DeserializeState, task id 1317, blob size 49 2025-06-24T20:59:14.343513Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:979: [graph_graphich] [17:1] ApplyIncrements 2025-06-24T20:59:14.349518Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:432: [graph_graphich] [{ Id: 1 Generation: 17 }] Send TEvGetTaskStateResult: tasks: {1317} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint [GOOD] Test command err: 2025-06-24T20:58:50.728074Z node 1 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [1:37:2084] Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/TGcTestShouldRemovePreviousCheckpoints"); SELECT * FROM checkpoints_graphs_description; 2025-06-24T20:58:51.387894Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 11:3 for graph 'graph' 2025-06-24T20:58:52.628677Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph' up to 11:3 Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/TGcTestShouldRemovePreviousCheckpoints"); SELECT * FROM checkpoints_graphs_description; 2025-06-24T20:59:05.569266Z node 2 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [2:37:2084] Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/ShouldIgnoreIncrementCheckpoint"); SELECT * FROM checkpoints_graphs_description; 2025-06-24T20:59:06.298344Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 11:3 for graph 'graph' 2025-06-24T20:59:06.298427Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:96: GC skip increment checkpoint for graph 'graph' |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] |92.4%| [TA] $(B)/ydb/core/fq/libs/checkpoint_storage/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.4%| [TA] {RESULT} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills [GOOD] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test_auditlog.py::test_dml_begin_commit_logged >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] >> test_auditlog.py::test_create_and_remove_tenant [GOOD] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[replace] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/0038f0/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk19/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.replace/audit.txt 2025-06-24T20:59:15.159028Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T20:59:15.158972Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-24T20:59:15.051585Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[update] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/0038ee/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk21/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.update/audit.txt 2025-06-24T20:59:16.142117Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T20:59:16.142079Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-24T20:59:15.825080Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dynconfig [GOOD] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] |92.5%| [TA] $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] |92.5%| [TA] {RESULT} $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/0038cb/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk13/testing_out_stuff/test_auditlog.py.test_dml_requests_arent_logged_when_sid_is_expected/audit.txt |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[select] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_unauthorized |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_create_and_remove_tenant [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/0038aa/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk10/testing_out_stuff/test_auditlog.py.test_create_and_remove_tenant/audit.txt 2025-06-24T20:59:20.006549Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"BEGIN INIT DATABASE CONFIG","remote_address":"::1","database":"/Root/users/database"} 2025-06-24T20:59:20.017791Z: {"paths":"[/Root/users/database]","tx_id":"281474976715660","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DATABASE","component":"schemeshard"} 2025-06-24T20:59:20.081971Z: {"paths":"[/Root/users/database]","tx_id":"281474976715661","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"ALTER DATABASE","component":"schemeshard"} 2025-06-24T20:59:24.944475Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"END INIT DATABASE CONFIG","remote_address":"::1","database":"/Root/users/database"} 2025-06-24T20:59:27.394288Z: {"paths":"[.metadata/workload_manager/pools/default]","tx_id":"281474976720657","new_owner":"metadata@system","acl_add":"[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin]","database":"/Root/users/database","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"metadata@system","detailed_status":"StatusAccepted","operation":"CREATE RESOURCE POOL","component":"schemeshard"} 2025-06-24T20:59:27.532746Z: {"reason":"Check failed: path: '/Root/users/database/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)","paths":"[default]","tx_id":"281474976720658","new_owner":"metadata@system","acl_add":"[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin]","database":"/Root/users/database","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"metadata@system","detailed_status":"StatusAlreadyExists","operation":"CREATE RESOURCE POOL","component":"schemeshard"} 2025-06-24T20:59:27.557883Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"BEGIN REMOVE DATABASE","remote_address":"::1","database":"/Root/users/database"} 2025-06-24T20:59:27.568402Z: {"paths":"[/Root/users/database]","tx_id":"281474976715662","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"DROP DATABASE","component":"schemeshard"} 2025-06-24T20:59:27.593588Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"END REMOVE DATABASE","remote_address":"::1","database":"/Root/users/database"} |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_begin_commit_logged [GOOD] >> test_auditlog.py::test_single_dml_query_logged[insert] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[delete] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dynconfig [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/00388b/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk16/testing_out_stuff/test_auditlog.py.test_dynconfig/audit.txt 2025-06-24T20:59:37.193356Z: {"sanitized_token":"**** (B6C6F477)","subject":"root@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/003884/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk1/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_good_dynconfig/audit.txt 2025-06-24T20:59:38.628508Z: {"sanitized_token":"**** (C877DF61)","subject":"__bad__@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] [GOOD] >> test_ttl.py::TestTTLOnIndexedTable::test_case |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[upsert] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTLAlterSettings::test_case >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[select] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLDefaultEnv::test_case |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> test_delete_all_after_inserts.py::TestDeleteAllAfterInserts::test_delete_all_rows_after_several_inserts [SKIPPED] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> test_delete_all_after_inserts.py::TestDeleteAllAfterInserts::test_delete_all_rows_after_several_inserts [SKIPPED] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> DataShardOutOfOrder::TestReadTableImmediateWriteBlock ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/00380f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk5/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_good_dynconfig/audit.txt 2025-06-24T20:59:50.720202Z: {"sanitized_token":"othe****ltin (27F910A9)","subject":"other-user@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_delete_by_explicit_row_id.py::TestDeleteByExplicitRowId::test_delete_row_by_explicit_row_id ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_begin_commit_logged [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/00381b/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk11/testing_out_stuff/test_auditlog.py.test_dml_begin_commit_logged/audit.txt 2025-06-24T20:59:50.774243Z: {"tx_id":"01jyhvtw3n1typqddj1kr9etka","database":"/Root/test_auditlog.py","end_time":"2025-06-24T20:59:50.774208Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"SUCCESS","start_time":"2025-06-24T20:59:50.773627Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"BeginTransactionRequest","component":"grpc-proxy"} 2025-06-24T20:59:50.969680Z: {"tx_id":"01jyhvtw3n1typqddj1kr9etka","database":"/Root/test_auditlog.py","end_time":"2025-06-24T20:59:50.969637Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","commit_tx":"0","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-24T20:59:50.785906Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T20:59:50.992511Z: {"tx_id":"01jyhvtw3n1typqddj1kr9etka","database":"/Root/test_auditlog.py","end_time":"2025-06-24T20:59:50.992462Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"SUCCESS","start_time":"2025-06-24T20:59:50.986133Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"CommitTransactionRequest","component":"grpc-proxy"} >> test_auditlog.py::test_dml_requests_logged_when_unauthorized [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> test_ttl.py::TestTTLAlterSettings::test_case [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_12_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 12] >> alter_compression.py::TestAllCompression::test_all_supported_compression[lz4_compression-COMPRESSION = "lz4"] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/00380e/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk4/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_bad_dynconfig/audit.txt 2025-06-24T20:59:54.000400Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"othe****ltin (27F910A9)","remote_address":"127.0.0.1","status":"ERROR","subject":"other-user@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} >> test_auditlog.py::test_single_dml_query_logged[delete] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[insert] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/003804/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk0/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_bad_dynconfig/audit.txt 2025-06-24T20:59:55.337176Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"**** (C877DF61)","remote_address":"127.0.0.1","status":"ERROR","subject":"__bad__@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/0037f7/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk7/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_root-_good_dynconfig/audit.txt 2025-06-24T20:59:56.499360Z: {"sanitized_token":"**** (B6C6F477)","subject":"root@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> DataShardOutOfOrder::TestReadTableImmediateWriteBlock [GOOD] >> DataShardOutOfOrder::TestReadTableSingleShardImmediate ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/0037e2/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk6/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_root-_bad_dynconfig/audit.txt 2025-06-24T20:59:57.104553Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"ERROR","subject":"root@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} >> test_auditlog.py::test_single_dml_query_logged[upsert] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/00378d/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk12/testing_out_stuff/test_auditlog.py.test_dml_requests_arent_logged_when_anonymous/audit.txt >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[select] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/0037d9/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk20/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.select/audit.txt 2025-06-24T20:59:58.261089Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T20:59:58.261048Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-24T20:59:58.114133Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact >> DataShardOutOfOrder::TestReadTableSingleShardImmediate [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration >> TFqYdbTest::ShouldStatusToIssuesProcessEmptyIssues [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestReadTableSingleShardImmediate [GOOD] Test command err: 2025-06-24T21:00:09.158572Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:00:09.159104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:00:09.159354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e3a/r3tmp/tmpr2ZcaA/pdisk_1.dat 2025-06-24T21:00:09.629128Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:00:09.638039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:00:09.689895Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:00:09.695221Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750798805509569 != 1750798805509573 2025-06-24T21:00:09.742869Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:00:09.745299Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:00:09.745655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:00:09.745809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:00:09.759047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:00:09.842528Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:00:09.842616Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:00:09.843626Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:00:10.001903Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 2 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:00:10.001997Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:00:10.003743Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:00:10.003864Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:00:10.004202Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:00:10.004431Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:00:10.004571Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:00:10.008348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:00:10.009574Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:00:10.010313Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:00:10.010386Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:00:10.071921Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:631:2532]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:00:10.073500Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:631:2532]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:00:10.073992Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:631:2532] 2025-06-24T21:00:10.074271Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:00:10.126079Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:631:2532]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:00:10.126181Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:617:2524], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:00:10.128842Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:617:2524], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:00:10.129362Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:633:2534] 2025-06-24T21:00:10.129807Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:00:10.138563Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:617:2524], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:00:10.139041Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:00:10.139685Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:00:10.141413Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:00:10.141517Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:00:10.141573Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:00:10.141934Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:00:10.142129Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:00:10.142219Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:664:2532] in generation 1 2025-06-24T21:00:10.142762Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:00:10.142846Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:00:10.144616Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:00:10.144679Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:00:10.144740Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:00:10.145082Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:00:10.145187Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:00:10.145262Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:665:2534] in generation 1 2025-06-24T21:00:10.159842Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:00:10.206733Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:00:10.206928Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:00:10.207033Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:668:2553] 2025-06-24T21:00:10.207088Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:00:10.207230Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:00:10.207267Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:00:10.207558Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:631:2532], Recipient [1:631:2532]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:00:10.207612Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:00:10.207718Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:00:10.207757Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:00:10.207826Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:00:10.207882Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:669:2554] 2025-06-24T21:00:10.207905Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:00:10.207926Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:00:10.207945Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:00:10.208184Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender ... ecutedNoMoreRestarts 2025-06-24T21:00:17.664999Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteWrite 2025-06-24T21:00:17.665032Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-24T21:00:17.665059Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:00:17.665125Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:00:17.665144Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-24T21:00:17.665171Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:00:17.665195Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:00:17.665242Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:00:17.665265Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:00:17.665293Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037888 has finished 2025-06-24T21:00:17.676397Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-24T21:00:17.676481Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:00:17.676551Z node 2 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 2 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-24T21:00:17.676645Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:00:17.678620Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [2:59:2106] Handle TEvProposeTransaction 2025-06-24T21:00:17.678685Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [2:59:2106] TxId# 281474976715661 ProcessProposeTransaction 2025-06-24T21:00:17.678755Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:273: actor# [2:59:2106] Cookie# 0 userReqId# "" txid# 281474976715661 SEND to# [2:811:2642] DataReq marker# P0 2025-06-24T21:00:17.678921Z node 2 :TX_PROXY DEBUG: datareq.cpp:1330: Actor# [2:811:2642] Cookie# 0 txid# 281474976715661 HANDLE TDataReq marker# P1 2025-06-24T21:00:17.679179Z node 2 :TX_PROXY DEBUG: datareq.cpp:1467: Actor# [2:811:2642] txid# 281474976715661 HANDLE EvNavigateKeySetResult TDataReq marker# P3b ErrorCount# 0 2025-06-24T21:00:17.679405Z node 2 :TX_PROXY DEBUG: datareq.cpp:1620: Actor# [2:811:2642] txid# 281474976715661 HANDLE EvResolveKeySetResult TDataReq marker# P3 ErrorCount# 0 2025-06-24T21:00:17.679503Z node 2 :TX_PROXY DEBUG: datareq.cpp:1204: Actor# [2:811:2642] txid# 281474976715661 SEND TEvProposeTransaction to datashard 72075186224037888 with read table request affected shards 1 followers disallowed marker# P4b 2025-06-24T21:00:17.679841Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [2:811:2642], Recipient [2:624:2529]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCAN SourceDeprecated { RawX1: 811 RawX2: 8589937234 } TxBody: " \0018\001B8\n\014\010\200\202\224\204\200\200\200\200\001\020\002\022\t\010\001\022\003key\030\002\022\013\010\002\022\005value\030\002\032\016\n\006\001\000\000\000\000\200\022\000\030\001 \001 \001H\001R\022\t+\003\000\000\000\000\000\000\021R\n\000\000\002\000\000\000" TxId: 281474976715661 ExecLevel: 0 Flags: 8 2025-06-24T21:00:17.679899Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:00:17.680013Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:00:17.680227Z node 2 :TX_DATASHARD TRACE: key_validator.cpp:33: -- AddReadRange: [(Uint32 : NULL) ; ()] table: [72057594046644480:2:0] 2025-06-24T21:00:17.680327Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit CheckDataTx 2025-06-24T21:00:17.680383Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Executed 2025-06-24T21:00:17.680424Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715661] at 72075186224037888 executing on unit CheckDataTx 2025-06-24T21:00:17.680465Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715661] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:00:17.680507Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:00:17.680554Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-24T21:00:17.680616Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715661] at 72075186224037888 2025-06-24T21:00:17.680657Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Executed 2025-06-24T21:00:17.680682Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715661] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:00:17.680708Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715661] at 72075186224037888 to execution unit MakeScanSnapshot 2025-06-24T21:00:17.680732Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit MakeScanSnapshot 2025-06-24T21:00:17.680759Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Executed 2025-06-24T21:00:17.680783Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715661] at 72075186224037888 executing on unit MakeScanSnapshot 2025-06-24T21:00:17.680816Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715661] at 72075186224037888 to execution unit WaitForStreamClearance 2025-06-24T21:00:17.680839Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit WaitForStreamClearance 2025-06-24T21:00:17.680888Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:99: Requested stream clearance from [2:811:2642] for [0:281474976715661] at 72075186224037888 2025-06-24T21:00:17.680922Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Continue 2025-06-24T21:00:17.680980Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:00:17.681065Z node 2 :TX_PROXY DEBUG: datareq.cpp:2504: Got clearance request, shard: 72075186224037888, txid: 281474976715661 2025-06-24T21:00:17.681137Z node 2 :TX_PROXY DEBUG: datareq.cpp:2513: Collected all clerance requests, txid: 281474976715661 2025-06-24T21:00:17.681181Z node 2 :TX_PROXY DEBUG: datareq.cpp:2968: Send stream clearance, shard: 72075186224037888, txid: 281474976715661, cleared: 1 2025-06-24T21:00:17.681301Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287942, Sender [2:811:2642], Recipient [2:624:2529]: NKikimrTx.TEvStreamClearancePending TxId: 281474976715661 2025-06-24T21:00:17.681346Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvTxProcessing::TEvStreamClearancePending 2025-06-24T21:00:17.681449Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:811:2642], Recipient [2:624:2529]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715661 Cleared: true 2025-06-24T21:00:17.681486Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-24T21:00:17.681573Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:624:2529], Recipient [2:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:00:17.681605Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:00:17.681669Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:00:17.681714Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:00:17.681770Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:00:17.681811Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit WaitForStreamClearance 2025-06-24T21:00:17.681860Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [0:281474976715661] at 72075186224037888 2025-06-24T21:00:17.681903Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Executed 2025-06-24T21:00:17.681949Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715661] at 72075186224037888 executing on unit WaitForStreamClearance 2025-06-24T21:00:17.681993Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715661] at 72075186224037888 to execution unit ReadTableScan 2025-06-24T21:00:17.682033Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit ReadTableScan 2025-06-24T21:00:17.682284Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Continue 2025-06-24T21:00:17.682332Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:00:17.682372Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T21:00:17.682412Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:00:17.682448Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:00:17.682515Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:00:17.682976Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:817:2647], Recipient [2:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-24T21:00:17.683020Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact [GOOD] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 [GOOD] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact [GOOD] >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLAlterSettings::test_case [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails [GOOD] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_unauthorized [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/003728/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk15/testing_out_stuff/test_auditlog.py.test_dml_requests_logged_when_unauthorized/audit.txt 2025-06-24T21:00:06.754190Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:06.754146Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-24T21:00:06.721133Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T21:00:06.901854Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:06.901815Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-24T21:00:06.868418Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T21:00:07.036952Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:07.036906Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-24T21:00:07.013982Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T21:00:07.174837Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:07.174801Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-24T21:00:07.149190Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T21:00:07.323015Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:07.322977Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-24T21:00:07.290553Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T21:00:07.459640Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:07.459602Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-24T21:00:07.437475Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |92.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TFqYdbTest::ShouldStatusToIssuesProcessEmptyIssues [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/00370f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk2/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_no_auth-_bad_dynconfig/audit.txt 2025-06-24T21:00:07.602999Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"{none}","remote_address":"127.0.0.1","status":"ERROR","subject":"{none}","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration [GOOD] |92.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] |92.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration [GOOD] |92.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions [GOOD] |92.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 [GOOD] |92.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact [GOOD] |92.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[insert] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/0036d7/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk18/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.insert/audit.txt 2025-06-24T21:00:10.375528Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:10.375477Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-24T21:00:10.311182Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[delete] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/0036e4/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk17/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.delete/audit.txt 2025-06-24T21:00:10.421061Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:10.421014Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-24T21:00:10.236225Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/00365f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk3/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_no_auth-_good_dynconfig/audit.txt 2025-06-24T21:00:15.170072Z: {"sanitized_token":"{none}","subject":"{none}","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/0036ad/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk9/testing_out_stuff/test_auditlog.py.test_cloud_ids_are_logged.attrs1/audit.txt 2025-06-24T21:00:11.658412Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:11.658371Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-24T21:00:11.424084Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","folder_id":"folder-id-B","component":"grpc-proxy"} >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] |92.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] |92.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[upsert] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/00364d/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk22/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.upsert/audit.txt 2025-06-24T21:00:13.749980Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:13.749912Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-24T21:00:13.644291Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/2btm/00366e/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk14/testing_out_stuff/test_auditlog.py.test_dml_requests_logged_when_sid_is_unexpected/audit.txt 2025-06-24T21:00:13.668307Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:13.668256Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-24T21:00:13.552151Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T21:00:13.998020Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:13.997990Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-24T21:00:13.787022Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T21:00:14.245489Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:14.245454Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-24T21:00:14.114224Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T21:00:14.525102Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:14.525064Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-24T21:00:14.364577Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T21:00:14.732671Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:14.732619Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-24T21:00:14.637527Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-24T21:00:14.940299Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-24T21:00:14.940264Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-24T21:00:14.848961Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] |92.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] |92.8%| [TA] $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.8%| [TA] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test |92.8%| [TA] $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} |92.8%| [TA] {RESULT} $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_all_types-pk_types12-all_types12-index12---] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> DataShardTxOrder::RandomPoints_DelayRS_Reboot_Dirty [GOOD] >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_DelayRS_Reboot_Dirty [GOOD] Test command err: 2025-06-24T20:59:20.735048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T20:59:20.735136Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T20:59:20.747876Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T20:59:20.777107Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T20:59:20.777699Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T20:59:20.777988Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T20:59:20.837186Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T20:59:20.867581Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T20:59:20.867908Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T20:59:20.873306Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T20:59:20.873403Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T20:59:20.873458Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T20:59:20.876267Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T20:59:20.876418Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T20:59:20.876517Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T20:59:21.011918Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T20:59:21.046989Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T20:59:21.049654Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T20:59:21.049847Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T20:59:21.049889Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T20:59:21.049950Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T20:59:21.050000Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:59:21.050187Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:59:21.050267Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:59:21.051523Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T20:59:21.051641Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T20:59:21.051832Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T20:59:21.051889Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T20:59:21.051968Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T20:59:21.052010Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T20:59:21.052047Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T20:59:21.052086Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T20:59:21.052150Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T20:59:21.052277Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:59:21.052318Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:59:21.052374Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T20:59:21.061976Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\001J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T20:59:21.062066Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T20:59:21.062152Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T20:59:21.062433Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T20:59:21.062500Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T20:59:21.062574Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T20:59:21.062705Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T20:59:21.062746Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T20:59:21.062782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T20:59:21.062840Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T20:59:21.063507Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T20:59:21.063557Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T20:59:21.063598Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T20:59:21.063648Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T20:59:21.063705Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T20:59:21.063746Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T20:59:21.063797Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T20:59:21.063830Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T20:59:21.063864Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T20:59:21.084177Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T20:59:21.084277Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T20:59:21.084338Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T20:59:21.084381Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T20:59:21.084471Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T20:59:21.089340Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T20:59:21.089408Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T20:59:21.089457Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T20:59:21.089607Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T20:59:21.089640Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T20:59:21.089805Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T20:59:21.089872Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T20:59:21.089932Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T20:59:21.089971Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T20:59:21.094820Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T20:59:21.094907Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T20:59:21.095175Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T20:59:21.095223Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T20:59:21.095285Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T20:59:21.095325Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T20:59:21.095379Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T20:59:21.095431Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T20:59:21.095473Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 37184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.740550Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.741103Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.741162Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:22] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.741219Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 22] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.741267Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.741445Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.741485Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:23] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.741537Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 23] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.741577Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.741785Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.741825Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:24] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.741876Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 24] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.741917Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.742107Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.742145Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:25] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.742195Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 25] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.742233Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.742459Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.742500Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:26] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.742550Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 26] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.742592Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.742813Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.742854Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:27] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.742913Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 27] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.742955Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.743213Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.743263Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:28] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.743313Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 28] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.743352Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.743612Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.743650Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:29] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.743700Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 29] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.743768Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.744024Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.744062Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:30] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.744109Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 30] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.744149Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.744317Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.744353Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:31] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.744415Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 31] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.744453Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.744674Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.744713Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:32] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.744760Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 32] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.744815Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.745009Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.745044Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:33] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.745092Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 33] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.745133Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.745339Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.745377Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:34] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.745425Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 34] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.745462Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.745625Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.745662Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:35] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.745709Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 35] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.745746Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.745976Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.746016Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:36] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.746060Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 36] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.746099Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.746340Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:00:58.746381Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:37] at 9437184 on unit CompleteOperation 2025-06-24T21:00:58.746430Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 37] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:00:58.746468Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:00:58.747054Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:804:2727], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-24T21:00:58.747119Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:00:58.747183Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 5 2025-06-24T21:00:58.747320Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:804:2727], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 8 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-24T21:00:58.747361Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:00:58.747398Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 8 expect 28 30 27 28 31 31 27 28 27 27 28 30 30 31 30 31 28 30 31 31 24 31 30 10 - 31 24 22 - 0 0 - actual 28 30 27 28 31 31 27 28 27 27 28 30 30 31 30 31 28 30 31 31 24 31 30 10 - 31 24 22 - 0 0 - interm - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_ttl.py::TestTTLOnIndexedTable::test_case [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> KqpNewEngine::BatchUpload >> TGroupMapperTest::SanitizeGroupTest3dc >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_ttl.py::TestTTLDefaultEnv::test_case [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> DataShardTxOrder::ImmediateBetweenOnline >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLOnIndexedTable::test_case [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_all_types-pk_types12-all_types12-index12---] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] [GOOD] >> DataShardTxOrder::RandomPoints_ReproducerDelayData1 >> TGroupMapperTest::MakeDisksForbidden [GOOD] |92.9%| [TA] $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} |92.9%| [TA] {RESULT} $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MakeDisksForbidden [GOOD] >> DataShardTxOrder::ImmediateBetweenOnline_oo8 >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> DataShardOutOfOrder::TestShardSnapshotReadNoEarlyReply >> KqpNewEngine::BatchUpload [GOOD] >> KqpNewEngine::Aggregate >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLDefaultEnv::test_case [GOOD] |92.9%| [TA] $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} |92.9%| [TA] {RESULT} $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} >> DataShardTxOrder::ImmediateBetweenOnline [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_15_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 15] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline [GOOD] Test command err: 2025-06-24T21:01:07.818447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:01:07.818510Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:07.820333Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:01:07.833267Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:01:07.833757Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:01:07.834034Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:01:07.882942Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:01:07.890753Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:01:07.891005Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:01:07.892656Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:01:07.892740Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:01:07.892808Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:01:07.893159Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:01:07.893253Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:01:07.893319Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:01:07.975735Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:01:08.018492Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:01:08.018690Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:01:08.018789Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:01:08.018827Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:01:08.018863Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:01:08.018910Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:08.019063Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:08.019112Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:08.019531Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:01:08.019626Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:01:08.019806Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:08.019863Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:01:08.019909Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:01:08.019943Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:01:08.019977Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:01:08.020007Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:01:08.020058Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:08.020156Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:08.020194Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:08.020243Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:01:08.023490Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:01:08.023557Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:01:08.023638Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:01:08.023809Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:01:08.023857Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:01:08.023920Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:01:08.023959Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:01:08.023992Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:01:08.024026Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:01:08.024064Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:01:08.024396Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:01:08.024430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:01:08.024463Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:01:08.024614Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:01:08.024671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:01:08.024703Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:01:08.024745Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:01:08.024787Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:01:08.024846Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:01:08.044098Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:01:08.044223Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:01:08.044273Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:01:08.044321Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:01:08.044448Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:01:08.045220Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:08.045285Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:08.045331Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:01:08.045495Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:01:08.045535Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:01:08.045701Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:01:08.045781Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:01:08.045826Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:01:08.045866Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:01:08.049925Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:01:08.050018Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:08.050337Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:08.050390Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:08.050459Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:08.050506Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:01:08.050568Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:01:08.050622Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:01:08.050673Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... [1000005:152] at 9437186 on unit CompletedOperations 2025-06-24T21:01:15.487681Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is Executed 2025-06-24T21:01:15.487731Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit CompletedOperations 2025-06-24T21:01:15.487792Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:152] at 9437186 has finished 2025-06-24T21:01:15.487832Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:01:15.487861Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-24T21:01:15.487890Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-24T21:01:15.487917Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-24T21:01:15.488212Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 110 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 36} 2025-06-24T21:01:15.488296Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.488339Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 110 2025-06-24T21:01:15.488423Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-24T21:01:15.488447Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.488486Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 149 2025-06-24T21:01:15.488599Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 104 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 34} 2025-06-24T21:01:15.488629Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.488659Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 104 2025-06-24T21:01:15.488740Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 113 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 37} 2025-06-24T21:01:15.488778Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.488823Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 113 2025-06-24T21:01:15.488941Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 116 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 38} 2025-06-24T21:01:15.488971Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.488994Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 116 2025-06-24T21:01:15.489050Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 119 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 39} 2025-06-24T21:01:15.489074Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.489110Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 119 2025-06-24T21:01:15.489204Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 122 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 40} 2025-06-24T21:01:15.489229Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.489250Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-06-24T21:01:15.489301Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-24T21:01:15.489345Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.489375Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-24T21:01:15.489455Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-24T21:01:15.489479Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.489506Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-24T21:01:15.489581Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-24T21:01:15.489618Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.489664Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-24T21:01:15.489727Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-24T21:01:15.489755Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.489784Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-06-24T21:01:15.489895Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-24T21:01:15.489927Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.489953Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2025-06-24T21:01:15.490063Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-24T21:01:15.490092Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.490135Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-24T21:01:15.490214Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-24T21:01:15.490239Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.490278Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-24T21:01:15.490369Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-24T21:01:15.490396Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.490418Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-24T21:01:15.490489Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-24T21:01:15.490546Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.490583Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-24T21:01:15.515888Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T21:01:15.515945Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2025-06-24T21:01:15.516065Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:01:15.516146Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:01:15.516196Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:01:15.516484Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:01:15.516523Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:15.516562Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_4_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 4] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_compression-COMPRESSION = "zstd"] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_6_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 6] >> KqpNewEngine::PureExpr >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> DataShardTxOrder::ZigZag_oo >> TGroupMapperTest::SanitizeGroupTest3dc [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> DataShardTxOrder::ImmediateBetweenOnline_oo8 [GOOD] >> KqpNewEngine::Aggregate [GOOD] >> KqpNewEngine::AggregateTuple |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::SanitizeGroupTest3dc [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> Bloom::Conf [GOOD] >> Bloom::Hashes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_oo8 [GOOD] Test command err: 2025-06-24T21:01:14.576870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:01:14.576939Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:14.580298Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:01:14.595004Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:01:14.595649Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:01:14.595963Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:01:14.647785Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:01:14.656832Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:01:14.657100Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:01:14.659212Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:01:14.659302Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:01:14.659360Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:01:14.659756Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:01:14.659862Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:01:14.659945Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:01:14.775955Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:01:14.868257Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:01:14.868494Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:01:14.868620Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:01:14.868667Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:01:14.868709Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:01:14.868766Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:14.868960Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:14.869018Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:14.869323Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:01:14.869429Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:01:14.869613Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:14.869685Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:01:14.869736Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:01:14.869777Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:01:14.869818Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:01:14.869856Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:01:14.869919Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:14.870028Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:14.870067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:14.870128Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:01:14.873547Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:01:14.873638Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:01:14.873739Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:01:14.873927Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:01:14.873984Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:01:14.874052Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:01:14.874104Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:01:14.874140Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:01:14.874178Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:01:14.874228Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:01:14.874589Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:01:14.874631Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:01:14.874671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:01:14.874730Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:01:14.874796Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:01:14.874837Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:01:14.874910Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:01:14.874950Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:01:14.874977Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:01:14.894560Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:01:14.894672Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:01:14.894723Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:01:14.894770Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:01:14.894869Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:01:14.895518Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:14.895578Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:14.895630Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:01:14.895771Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:01:14.895807Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:01:14.895967Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:01:14.896024Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:01:14.896064Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:01:14.896126Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:01:14.900691Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:01:14.900786Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:14.901113Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:14.901181Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:14.901266Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:14.901320Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:01:14.901359Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:01:14.901410Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:01:14.901457Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... , processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.341713Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-06-24T21:01:22.341824Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-24T21:01:22.341855Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.341879Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-24T21:01:22.341952Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-24T21:01:22.341987Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.342020Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-24T21:01:22.342096Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-24T21:01:22.342120Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.342143Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-24T21:01:22.342229Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-24T21:01:22.342259Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.342301Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-24T21:01:22.342396Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-24T21:01:22.342445Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.342475Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-24T21:01:22.342550Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:01:22.342596Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.342626Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 2025-06-24T21:01:22.342759Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-24T21:01:22.342790Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.342816Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-24T21:01:22.342882Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-24T21:01:22.342908Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.342948Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-24T21:01:22.343087Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-24T21:01:22.343122Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.352603Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-06-24T21:01:22.352991Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-24T21:01:22.353074Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.353119Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2025-06-24T21:01:22.353213Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:22.353259Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:149] at 9437184 on unit CompleteOperation 2025-06-24T21:01:22.353342Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 149] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 2 ms, propose latency: 3 ms 2025-06-24T21:01:22.353417Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-24T21:01:22.353463Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:22.353689Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:22.353720Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:151] at 9437184 on unit CompleteOperation 2025-06-24T21:01:22.353765Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 151] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 2 ms, propose latency: 3 ms 2025-06-24T21:01:22.353814Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-24T21:01:22.353841Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:22.353951Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:22.353976Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437184 on unit CompleteOperation 2025-06-24T21:01:22.354028Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 2 ms, propose latency: 3 ms 2025-06-24T21:01:22.354082Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-24T21:01:22.354108Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:22.354246Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:22.354292Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:154] at 9437184 on unit CompleteOperation 2025-06-24T21:01:22.354337Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 154] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 2 ms, propose latency: 3 ms 2025-06-24T21:01:22.354380Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-24T21:01:22.354404Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:22.354606Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-24T21:01:22.354646Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.354677Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 149 2025-06-24T21:01:22.354841Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-24T21:01:22.354876Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.354899Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 151 2025-06-24T21:01:22.355002Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-24T21:01:22.355028Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.355070Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-06-24T21:01:22.355184Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-24T21:01:22.355213Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:22.355237Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> Bloom::Hashes [GOOD] >> Bloom::Rater >> Bloom::Rater [GOOD] >> Bloom::Dipping |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> Bloom::Dipping [GOOD] >> Bloom::Basics [GOOD] >> Bloom::Stairs >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_8_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 8] >> Bloom::Stairs [GOOD] >> BuildStatsBTreeIndex::Single >> BuildStatsBTreeIndex::Single [GOOD] >> BuildStatsBTreeIndex::Single_Slices >> BuildStatsBTreeIndex::Single_Slices [GOOD] >> BuildStatsBTreeIndex::Single_History >> DataShardTxOrder::ZigZag >> BuildStatsBTreeIndex::Single_History [GOOD] >> BuildStatsBTreeIndex::Single_History_Slices >> KqpNewEngine::PureExpr [GOOD] >> KqpNewEngine::PureTxMixedWithDeferred >> BuildStatsBTreeIndex::Single_History_Slices [GOOD] >> BuildStatsBTreeIndex::Single_Groups >> BuildStatsBTreeIndex::Single_Groups [GOOD] >> BuildStatsBTreeIndex::Single_Groups_Slices >> BuildStatsBTreeIndex::Single_Groups_Slices [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History >> BuildStatsBTreeIndex::Single_Groups_History [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History_Slices >> BuildStatsBTreeIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsBTreeIndex::Mixed >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_18_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 18] >> BuildStatsBTreeIndex::Mixed [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_20_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 20] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> DataShardTxOrder::RandomPoints_ReproducerDelayData1 [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups_History >> BuildStatsBTreeIndex::Mixed_Groups_History [GOOD] >> BuildStatsFlatIndex::Single >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> BuildStatsFlatIndex::Single [GOOD] >> BuildStatsFlatIndex::Single_Slices ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_ReproducerDelayData1 [GOOD] Test command err: 2025-06-24T21:01:13.416013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:01:13.416069Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:13.428283Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:01:13.448612Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:01:13.449156Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:01:13.449471Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:01:13.541774Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:01:13.581858Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:01:13.582097Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:01:13.583902Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:01:13.583983Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:01:13.584035Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:01:13.584464Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:01:13.584562Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:01:13.584661Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:01:13.668332Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:01:13.744048Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:01:13.744271Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:01:13.744391Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:01:13.744429Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:01:13.744470Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:01:13.744511Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:13.751532Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:13.751615Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:13.751938Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:01:13.752058Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:01:13.752215Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:13.752253Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:01:13.752309Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:01:13.752366Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:01:13.752401Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:01:13.752440Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:01:13.752494Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:13.752597Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:13.752631Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:13.752683Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:01:13.768329Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:01:13.768412Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:01:13.768516Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:01:13.768730Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:01:13.768782Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:01:13.768851Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:01:13.768902Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:01:13.768942Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:01:13.768978Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:01:13.769050Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:01:13.769416Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:01:13.769461Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:01:13.769500Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:01:13.769550Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:01:13.769612Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:01:13.769652Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:01:13.769698Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:01:13.769733Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:01:13.769762Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:01:13.785522Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:01:13.785624Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:01:13.785665Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:01:13.785703Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:01:13.785796Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:01:13.786377Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:13.786430Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:13.786475Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:01:13.786609Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:01:13.786640Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:01:13.786770Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:01:13.786819Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:01:13.786874Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:01:13.786938Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:01:13.790613Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:01:13.790695Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:13.790965Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:13.791012Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:13.791075Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:13.791114Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:01:13.791290Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:01:13.791374Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:01:13.791423Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:01:29.435168Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:29.435201Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:508] at 9437184 on unit CompleteOperation 2025-06-24T21:01:29.435261Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 508] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:01:29.435303Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 508 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-24T21:01:29.435331Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:29.435434Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:01:29.435455Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:01:29.435475Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:01:29.435496Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:29.435517Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:509] at 9437184 on unit CompleteOperation 2025-06-24T21:01:29.435559Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 509] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:01:29.435620Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 509 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-24T21:01:29.435648Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:29.435767Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:01:29.435790Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:29.435811Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:510] at 9437184 on unit CompleteOperation 2025-06-24T21:01:29.435847Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 510] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:01:29.435908Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 510 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-24T21:01:29.435932Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:29.436029Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:29.436054Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:511] at 9437184 on unit CompleteOperation 2025-06-24T21:01:29.436085Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 511] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:01:29.436135Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 511 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-24T21:01:29.436164Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:29.436278Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:29.436301Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:512] at 9437184 on unit CompleteOperation 2025-06-24T21:01:29.436335Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 512] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:01:29.436368Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 512 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-24T21:01:29.436389Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:29.436499Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:29.436540Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:516] at 9437184 on unit FinishPropose 2025-06-24T21:01:29.436618Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 516 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-24T21:01:29.436705Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:29.436879Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:29.436922Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:514] at 9437184 on unit CompleteOperation 2025-06-24T21:01:29.436973Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 514] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 2 ms, propose latency: 4 ms 2025-06-24T21:01:29.437016Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 514 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-24T21:01:29.437042Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:29.437156Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:29.437179Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:515] at 9437184 on unit CompleteOperation 2025-06-24T21:01:29.437205Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 515] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:01:29.437228Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:29.437426Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 506 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-24T21:01:29.437477Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:29.437517Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 506 2025-06-24T21:01:29.437872Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 507 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-24T21:01:29.437923Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:29.437952Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 507 2025-06-24T21:01:29.438084Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 508 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-24T21:01:29.438115Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:29.438139Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 508 2025-06-24T21:01:29.438791Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 509 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-24T21:01:29.438837Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:29.438864Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 509 2025-06-24T21:01:29.439062Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 510 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-24T21:01:29.439093Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:29.439116Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 510 2025-06-24T21:01:29.439307Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 511 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-24T21:01:29.439345Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:29.439384Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 511 2025-06-24T21:01:29.439628Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 512 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-24T21:01:29.439659Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:29.439695Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 512 2025-06-24T21:01:29.439850Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 514 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-24T21:01:29.439884Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:01:29.439907Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 514 expect 5 6 - 6 6 7 - - - - - - - - - - - - - - - - - - - - - - - - - - actual 5 6 - 6 6 7 - - - - - - - - - - - - - - - - - - - - - - - - - - interm 5 6 - 6 6 - - - - - - - - - - - - - - - - - - - - - - - - - - - >> BuildStatsFlatIndex::Single_Slices [GOOD] >> BuildStatsFlatIndex::Single_History >> BuildStatsFlatIndex::Single_History [GOOD] >> BuildStatsFlatIndex::Single_History_Slices >> BuildStatsFlatIndex::Single_History_Slices [GOOD] >> BuildStatsFlatIndex::Single_Groups >> BuildStatsFlatIndex::Single_Groups [GOOD] >> BuildStatsFlatIndex::Single_Groups_Slices >> KqpNewEngine::AggregateTuple [GOOD] >> KqpNewEngine::AsyncIndexUpdate >> BuildStatsFlatIndex::Single_Groups_Slices [GOOD] >> BuildStatsFlatIndex::Single_Groups_History >> BuildStatsFlatIndex::Single_Groups_History [GOOD] >> BuildStatsFlatIndex::Single_Groups_History_Slices >> BuildStatsFlatIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsFlatIndex::Mixed >> BuildStatsFlatIndex::Mixed [GOOD] >> BuildStatsFlatIndex::Mixed_Groups >> BuildStatsFlatIndex::Mixed_Groups [GOOD] >> BuildStatsFlatIndex::Mixed_Groups_History >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> BuildStatsFlatIndex::Mixed_Groups_History [GOOD] >> BuildStatsFlatIndex::Serial >> BuildStatsFlatIndex::Serial [GOOD] >> BuildStatsFlatIndex::Serial_Groups >> BuildStatsFlatIndex::Serial_Groups [GOOD] >> BuildStatsFlatIndex::Serial_Groups_History >> BuildStatsFlatIndex::Serial_Groups_History [GOOD] >> BuildStatsHistogram::Single >> KqpNewEngine::PureTxMixedWithDeferred [GOOD] >> KqpNewEngine::PrunePartitionsByLiteral >> DataShardOutOfOrder::TestShardSnapshotReadNoEarlyReply [GOOD] >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock+EvWrite >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_2_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 2] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> DataShardTxOrder::ZigZag [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ZigZag [GOOD] Test command err: 2025-06-24T21:01:28.398944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:01:28.399007Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:28.401140Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:01:28.427118Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:01:28.427658Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:01:28.427942Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:01:28.508714Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:01:28.516510Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:01:28.516742Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:01:28.518353Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:01:28.518426Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:01:28.518476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:01:28.518855Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:01:28.518951Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:01:28.519012Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:01:28.624079Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:01:28.671854Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:01:28.672131Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:01:28.672301Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:01:28.672367Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:01:28.672425Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:01:28.672485Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:28.672700Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:28.672786Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:28.673198Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:01:28.673319Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:01:28.673540Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:28.673608Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:01:28.673679Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:01:28.673739Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:01:28.673788Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:01:28.673831Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:01:28.673902Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:28.674018Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:28.674070Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:28.674139Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:01:28.677548Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:01:28.677631Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:01:28.677737Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:01:28.677947Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:01:28.678016Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:01:28.678084Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:01:28.678136Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:01:28.678185Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:01:28.678224Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:01:28.678259Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:01:28.678711Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:01:28.678756Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:01:28.678795Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:01:28.678850Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:01:28.678910Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:01:28.678951Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:01:28.679007Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:01:28.679043Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:01:28.679077Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:01:28.691597Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:01:28.691703Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:01:28.691756Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:01:28.691804Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:01:28.691894Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:01:28.692476Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:28.692539Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:28.692607Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:01:28.692747Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:01:28.692784Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:01:28.692941Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:01:28.692986Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:01:28.693028Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:01:28.693064Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:01:28.712462Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:01:28.712552Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:28.712831Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:28.712880Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:28.712944Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:28.712988Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:01:28.713027Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:01:28.713072Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:01:28.713122Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 7185 executing on unit CompletedOperations 2025-06-24T21:01:37.762625Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000016:45] at 9437185 has finished 2025-06-24T21:01:37.762662Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:01:37.762694Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-24T21:01:37.762729Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-24T21:01:37.762763Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-24T21:01:37.763012Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:238:2229], Recipient [2:238:2229]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:37.763049Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:37.763100Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:37.763130Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:01:37.771281Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:01:37.771346Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000016:45] in PlanQueue unit at 9437184 2025-06-24T21:01:37.771484Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit PlanQueue 2025-06-24T21:01:37.771534Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-24T21:01:37.771565Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit PlanQueue 2025-06-24T21:01:37.771595Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit LoadTxDetails 2025-06-24T21:01:37.771624Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit LoadTxDetails 2025-06-24T21:01:37.772417Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437184 loaded tx from db 1000016:45 keys extracted: 2 2025-06-24T21:01:37.772467Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-24T21:01:37.772497Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit LoadTxDetails 2025-06-24T21:01:37.772522Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit FinalizeDataTxPlan 2025-06-24T21:01:37.772549Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit FinalizeDataTxPlan 2025-06-24T21:01:37.772583Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-24T21:01:37.772620Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit FinalizeDataTxPlan 2025-06-24T21:01:37.772651Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-24T21:01:37.772672Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit BuildAndWaitDependencies 2025-06-24T21:01:37.772726Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000016:45] is the new logically complete end at 9437184 2025-06-24T21:01:37.772753Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000016:45] is the new logically incomplete end at 9437184 2025-06-24T21:01:37.772778Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000016:45] at 9437184 2025-06-24T21:01:37.772827Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-24T21:01:37.772848Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-24T21:01:37.772866Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit BuildDataTxOutRS 2025-06-24T21:01:37.772887Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit BuildDataTxOutRS 2025-06-24T21:01:37.772938Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-24T21:01:37.772959Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit BuildDataTxOutRS 2025-06-24T21:01:37.772980Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit StoreAndSendOutRS 2025-06-24T21:01:37.773003Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit StoreAndSendOutRS 2025-06-24T21:01:37.773027Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-24T21:01:37.773048Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit StoreAndSendOutRS 2025-06-24T21:01:37.773069Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit PrepareDataTxInRS 2025-06-24T21:01:37.773095Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit PrepareDataTxInRS 2025-06-24T21:01:37.773123Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-24T21:01:37.773143Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit PrepareDataTxInRS 2025-06-24T21:01:37.773163Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit LoadAndWaitInRS 2025-06-24T21:01:37.773200Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit LoadAndWaitInRS 2025-06-24T21:01:37.773227Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-24T21:01:37.773248Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit LoadAndWaitInRS 2025-06-24T21:01:37.773267Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit ExecuteDataTx 2025-06-24T21:01:37.773286Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit ExecuteDataTx 2025-06-24T21:01:37.773671Z node 2 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000016:45] at tablet 9437184 with status COMPLETE 2025-06-24T21:01:37.773733Z node 2 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000016:45] at 9437184: {NSelectRow: 2, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 2, SelectRowBytes: 16, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:01:37.773790Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-24T21:01:37.773822Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit ExecuteDataTx 2025-06-24T21:01:37.773847Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit CompleteOperation 2025-06-24T21:01:37.773873Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit CompleteOperation 2025-06-24T21:01:37.774068Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is DelayComplete 2025-06-24T21:01:37.774097Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit CompleteOperation 2025-06-24T21:01:37.774153Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit CompletedOperations 2025-06-24T21:01:37.774183Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit CompletedOperations 2025-06-24T21:01:37.774215Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-24T21:01:37.774237Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit CompletedOperations 2025-06-24T21:01:37.774262Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000016:45] at 9437184 has finished 2025-06-24T21:01:37.774288Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:01:37.774312Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:01:37.774339Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:01:37.774364Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:01:37.803692Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000016 txid# 45} 2025-06-24T21:01:37.803779Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000016} 2025-06-24T21:01:37.803850Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:37.803896Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437184 on unit CompleteOperation 2025-06-24T21:01:37.803964Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437184 at tablet 9437184 send result to client [2:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:01:37.804019Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:37.804699Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 1000016 txid# 45} 2025-06-24T21:01:37.804745Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 1000016} 2025-06-24T21:01:37.804784Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-24T21:01:37.804830Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437185 on unit CompleteOperation 2025-06-24T21:01:37.804895Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437185 at tablet 9437185 send result to client [2:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:01:37.804932Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> BuildStatsHistogram::Single [GOOD] >> BuildStatsHistogram::Single_Slices >> KqpNewEngine::AsyncIndexUpdate [GOOD] >> KqpNewEngine::AutoChooseIndex |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock+EvWrite [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> KqpNewEngine::PrunePartitionsByLiteral [GOOD] >> KqpNewEngine::PrunePartitionsByExpr |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock+EvWrite [GOOD] Test command err: 2025-06-24T21:01:18.069741Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:01:18.070268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:01:18.070494Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ddf/r3tmp/tmpQP9Xfh/pdisk_1.dat 2025-06-24T21:01:18.526785Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:01:18.530708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:01:18.651079Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:18.657092Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750798874396061 != 1750798874396065 2025-06-24T21:01:18.709592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:01:18.709740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:01:18.724479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:01:18.828691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:19.297264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:19.430180Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:01:19.652120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:780:2632], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:19.652319Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:790:2637], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:19.652440Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:19.659527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:01:19.841650Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:794:2640], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:01:19.966037Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:850:2677] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:01:20.669542Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhvxjx1bvey0bm7n6qxk6de, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTdjZTI3YWMtOTQ1MGNiNmYtNzkyZTUwZjEtZWFlOTU4NzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:01:20.762546Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhvxkxs9vf5tgmysnm1shp8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzE0NzQxZmMtOGUxYzUzMy0yMzhkNGY5Ni04ZDlmZTJkZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... waiting for at least 2 blocked commits 2025-06-24T21:01:24.137011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:01:24.137086Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... blocked commit for tablet 72075186224037888 ... blocked commit for tablet 72075186224037888 ... blocked commit for tablet 72075186224037889 ... blocked commit for tablet 72075186224037889 2025-06-24T21:01:34.627459Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhvy17c8cg7tnberkkqg42a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWFhMzA2NTktYWI0MTc5YzMtM2YzM2VhODctODY4NTA3MzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:01:34.821662Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhvy1hm66kmv7h841y3rsyr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjQzZDMwOTQtZjFlYzc4ZmEtMTkwOWE5ZjEtMWU1OGQ3ZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... shards are ready for read-only immediate transactions ... waiting for at least 2 blocked commits ... blocked commit for tablet 72075186224037888 ... blocked commit for tablet 72075186224037889 2025-06-24T21:01:39.603693Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:01:39.604259Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:01:39.604404Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ddf/r3tmp/tmpIzzrBy/pdisk_1.dat 2025-06-24T21:01:39.952765Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:01:39.954552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:01:39.989010Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:39.990529Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750798896058908 != 1750798896058912 2025-06-24T21:01:40.043079Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:01:40.043277Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:01:40.059007Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:01:40.152932Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:40.497753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:40.644865Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:01:40.862325Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:779:2632], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:40.862430Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:789:2637], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:40.862522Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:40.868535Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:01:41.035956Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:793:2640], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:01:41.073473Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:849:2677] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:01:41.162316Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhvy7kwb8bk8s4ysfn3ngmq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTBmMjM0ZGItMmVlZDg1YjAtOGNkODQwYTktMTkzZWM3M2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:01:41.261601Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhvy7y87433v3mygcqv11wd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQ1ODVmMS1kNGNjNDZlLTFmOTE4YzVlLTFhOTFjOTI0, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:01:42.316774Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhvy8771ar1vtsk9yqqz6b7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzBhNTQ5YWQtNjliNzQ0Yi0zZDE5MmRkNi1hZWQ5OWMwNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } 2025-06-24T21:01:42.817083Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhvy9dpa3413jb4n2b62nnd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzkwY2FjNmQtYzI3MWMxZS01MDY0OTAwMi02NjBkODRkMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:01:43.050647Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhvy9j6f59j9vytwaed4v61, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzBhNTQ5YWQtNjliNzQ0Yi0zZDE5MmRkNi1hZWQ5OWMwNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:01:43.217549Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhvy9rpa0gfeqk1m4q7kyhh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzBhNTQ5YWQtNjliNzQ0Yi0zZDE5MmRkNi1hZWQ5OWMwNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:01:43.302954Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NzBhNTQ5YWQtNjliNzQ0Yi0zZDE5MmRkNi1hZWQ5OWMwNA==, ActorId: [2:920:2725], ActorState: ExecuteState, TraceId: 01jyhvy9xpba9t3wdf5ced7wyv, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> BuildStatsHistogram::Single_Slices [GOOD] >> BuildStatsHistogram::Single_History |93.0%| [TA] $(B)/ydb/tests/datashard/parametrized_queries/test-results/py3test/{meta.json ... results_accumulator.log} |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TA] {RESULT} $(B)/ydb/tests/datashard/parametrized_queries/test-results/py3test/{meta.json ... results_accumulator.log} |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::DeactivatedQueryService >> Secret::Deactivated >> DataShardTxOrder::ZigZag_oo [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::Simple |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ZigZag_oo [GOOD] Test command err: 2025-06-24T21:01:23.216747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:01:23.216840Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:23.218814Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:01:23.232345Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:01:23.232931Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:01:23.233245Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:01:23.295520Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:01:23.313161Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:01:23.313431Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:01:23.315921Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:01:23.316018Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:01:23.316081Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:01:23.316612Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:01:23.316726Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:01:23.316829Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:01:23.392363Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:01:23.446995Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:01:23.447425Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:01:23.447566Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:01:23.447612Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:01:23.447661Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:01:23.447718Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:23.447924Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:23.447980Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:23.448299Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:01:23.448412Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:01:23.448596Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:23.448641Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:01:23.448700Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:01:23.448744Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:01:23.448788Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:01:23.448877Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:01:23.448939Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:23.449083Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:23.449130Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:23.449190Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:01:23.452819Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\004\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:01:23.452913Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:01:23.453017Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:01:23.453219Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:01:23.453279Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:01:23.453334Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:01:23.453393Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:01:23.453434Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:01:23.453473Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:01:23.453511Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:01:23.453877Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:01:23.453921Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:01:23.453962Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:01:23.454013Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:01:23.454075Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:01:23.454122Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:01:23.454170Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:01:23.454205Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:01:23.454235Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:01:23.470287Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:01:23.470378Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:01:23.470416Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:01:23.470447Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:01:23.470524Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:01:23.470973Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:23.471014Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:01:23.471049Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:01:23.471174Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:01:23.471202Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:01:23.471315Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:01:23.471349Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:01:23.471383Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:01:23.471409Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:01:23.475708Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:01:23.475809Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:01:23.476105Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:23.476154Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:23.476220Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:01:23.476265Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:01:23.476303Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:01:23.476344Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:01:23.476403Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 000016:45] at 9437185 is Executed 2025-06-24T21:01:50.931580Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit WaitForPlan 2025-06-24T21:01:50.931618Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit PlanQueue 2025-06-24T21:01:50.931767Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 45 at step 1000016 at tablet 9437185 { Transactions { TxId: 45 AckTo { RawX1: 102 RawX2: 25769805911 } } Step: 1000016 MediatorID: 0 TabletID: 9437185 } 2025-06-24T21:01:50.931804Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-24T21:01:50.932025Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [6:346:2311], Recipient [6:346:2311]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:50.932062Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:01:50.932106Z node 6 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-24T21:01:50.932139Z node 6 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:01:50.932166Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-24T21:01:50.932198Z node 6 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000016:45] in PlanQueue unit at 9437185 2025-06-24T21:01:50.932228Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit PlanQueue 2025-06-24T21:01:50.932279Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:01:50.932306Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit PlanQueue 2025-06-24T21:01:50.932332Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit LoadTxDetails 2025-06-24T21:01:50.932360Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit LoadTxDetails 2025-06-24T21:01:50.933089Z node 6 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437185 loaded tx from db 1000016:45 keys extracted: 2 2025-06-24T21:01:50.933136Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:01:50.933166Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit LoadTxDetails 2025-06-24T21:01:50.933193Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit FinalizeDataTxPlan 2025-06-24T21:01:50.933222Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit FinalizeDataTxPlan 2025-06-24T21:01:50.933261Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:01:50.933286Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit FinalizeDataTxPlan 2025-06-24T21:01:50.933311Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit BuildAndWaitDependencies 2025-06-24T21:01:50.933336Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit BuildAndWaitDependencies 2025-06-24T21:01:50.933388Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000016:45] is the new logically complete end at 9437185 2025-06-24T21:01:50.933418Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000016:45] is the new logically incomplete end at 9437185 2025-06-24T21:01:50.933448Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000016:45] at 9437185 2025-06-24T21:01:50.933492Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:01:50.933525Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit BuildAndWaitDependencies 2025-06-24T21:01:50.933550Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit BuildDataTxOutRS 2025-06-24T21:01:50.933583Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit BuildDataTxOutRS 2025-06-24T21:01:50.933638Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:01:50.933664Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit BuildDataTxOutRS 2025-06-24T21:01:50.933687Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit StoreAndSendOutRS 2025-06-24T21:01:50.933712Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit StoreAndSendOutRS 2025-06-24T21:01:50.933739Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:01:50.933762Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit StoreAndSendOutRS 2025-06-24T21:01:50.933788Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit PrepareDataTxInRS 2025-06-24T21:01:50.933811Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit PrepareDataTxInRS 2025-06-24T21:01:50.933841Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:01:50.933864Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit PrepareDataTxInRS 2025-06-24T21:01:50.933887Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit LoadAndWaitInRS 2025-06-24T21:01:50.933911Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit LoadAndWaitInRS 2025-06-24T21:01:50.933935Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:01:50.933958Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit LoadAndWaitInRS 2025-06-24T21:01:50.933980Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit ExecuteDataTx 2025-06-24T21:01:50.934003Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit ExecuteDataTx 2025-06-24T21:01:50.934379Z node 6 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000016:45] at tablet 9437185 with status COMPLETE 2025-06-24T21:01:50.934443Z node 6 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000016:45] at 9437185: {NSelectRow: 2, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 2, SelectRowBytes: 16, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:01:50.934503Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:01:50.934534Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit ExecuteDataTx 2025-06-24T21:01:50.934571Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit CompleteOperation 2025-06-24T21:01:50.934598Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit CompleteOperation 2025-06-24T21:01:50.934815Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is DelayComplete 2025-06-24T21:01:50.934848Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit CompleteOperation 2025-06-24T21:01:50.934880Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit CompletedOperations 2025-06-24T21:01:50.934914Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit CompletedOperations 2025-06-24T21:01:50.934948Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:01:50.934977Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit CompletedOperations 2025-06-24T21:01:50.935003Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000016:45] at 9437185 has finished 2025-06-24T21:01:50.935034Z node 6 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:01:50.935063Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-24T21:01:50.935092Z node 6 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-24T21:01:50.935122Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-24T21:01:50.951185Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 1000016 txid# 45} 2025-06-24T21:01:50.951282Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 1000016} 2025-06-24T21:01:50.951383Z node 6 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-24T21:01:50.951439Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437185 on unit CompleteOperation 2025-06-24T21:01:50.951522Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437185 at tablet 9437185 send result to client [6:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:01:50.951583Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-24T21:01:50.953858Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000016 txid# 45} 2025-06-24T21:01:50.953922Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000016} 2025-06-24T21:01:50.953971Z node 6 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:01:50.954007Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437184 on unit CompleteOperation 2025-06-24T21:01:50.954063Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437184 at tablet 9437184 send result to client [6:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:01:50.954105Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> KqpNewEngine::PrunePartitionsByExpr [GOOD] >> KqpNewEngine::PruneWritePartitions+UseSink |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::ValidationQueryService >> Secret::SimpleQueryService >> KqpNewEngine::AutoChooseIndex [GOOD] >> KqpNewEngine::AutoChooseIndexOrderByLimit |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::Validation |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> BuildStatsHistogram::Single_History [GOOD] >> BuildStatsHistogram::Single_History_Slices |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[lz4_compression-COMPRESSION = "lz4"] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_10_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 10] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |93.0%| [TA] $(B)/ydb/tests/datashard/dump_restore/test-results/py3test/{meta.json ... results_accumulator.log} |93.1%| [TA] {RESULT} $(B)/ydb/tests/datashard/dump_restore/test-results/py3test/{meta.json ... results_accumulator.log} >> KqpNewEngine::PruneWritePartitions+UseSink [GOOD] >> KqpNewEngine::PruneWritePartitions-UseSink >> BackupRestoreS3::RestoreTablePartitioningSettings >> BackupRestoreS3::TestAllPrimitiveTypes-PRIMITIVE_TYPE_ID_UNSPECIFIED [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT8 >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedExport >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeInvalid [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeDir [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypePersQueueGroup [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBlockStoreVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeKesus [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExtSubDomain [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeFileStore [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeColumnStore [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeColumnTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeCdcStream >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTable >> BackupRestore::TestAllIndexTypes-EIndexTypeInvalid [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> KqpNewEngine::AutoChooseIndexOrderByLimit [GOOD] >> KqpNewEngine::AutoChooseIndexOrderByLambda >> BuildStatsHistogram::Single_History_Slices [GOOD] >> BuildStatsHistogram::Ten_Mixed >> BackupPathTest::ExportWholeDatabase >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedExport [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePathSpecified >> Secret::Deactivated [GOOD] >> Secret::DeactivatedQueryService [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSubDomain [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeRtmrVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSolomonVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTableIndex >> BackupRestoreS3::RestoreTablePartitioningSettings [GOOD] >> BackupRestoreS3::RestoreIndexTablePartitioningSettings >> BackupRestoreS3::TestAllPrimitiveTypes-INT8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT16 >> KqpNewEngine::PruneWritePartitions-UseSink [GOOD] >> KqpNewEngine::PruneEffectPartitions-UseSink >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree [GOOD] >> BackupRestore::TestAllPrimitiveTypes-PRIMITIVE_TYPE_ID_UNSPECIFIED [GOOD] >> BackupRestore::TestAllPrimitiveTypes-BOOL ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::Deactivated [GOOD] Test command err: 2025-06-24T21:01:56.225248Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:262:2306], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001f5e/r3tmp/tmpCn6IA2/pdisk_1.dat 2025-06-24T21:01:56.596720Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 22470, node 1 TClient is connected to server localhost:21885 2025-06-24T21:01:57.450934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:01:57.490549Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:57.495562Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:01:57.495642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:01:57.495700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:01:57.496357Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:01:57.496613Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750798911787604 != 1750798911787608 2025-06-24T21:01:57.565075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:01:57.565233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:01:57.577849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:01:57.818772Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-24T21:02:10.047530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:642:2532], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:10.048070Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: Execution, code: 1060
:1:50: Error: Executing CREATE OBJECT SECRET
: Error: metadata provider service is disabled ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::DeactivatedQueryService [GOOD] Test command err: 2025-06-24T21:01:55.583325Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:262:2306], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001f83/r3tmp/tmpi7OClA/pdisk_1.dat 2025-06-24T21:01:56.198258Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 4561, node 1 TClient is connected to server localhost:10055 2025-06-24T21:01:57.422434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:01:57.465291Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:57.479308Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:01:57.479389Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:01:57.479429Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:01:57.480416Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:01:57.480890Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750798911787385 != 1750798911787389 2025-06-24T21:01:57.564110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:01:57.564301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:01:57.577487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:01:57.805133Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-24T21:02:09.481272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:649:2536], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:09.481529Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:639:2531], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:09.484626Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:09.494577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:02:09.520692Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:653:2539], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T21:02:09.580577Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:704:2571] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:02:10.043709Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:714:2580], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:1:50: Error: Executing CREATE OBJECT SECRET
: Error: metadata provider service is disabled 2025-06-24T21:02:10.046163Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjdkMzY3MDktNDk1MGQxNWMtMzE3OWRjYzAtNjRjNzU4MWQ=, ActorId: [1:637:2529], ActorState: ExecuteState, TraceId: 01jyhvz3h08h8rjfr946t8xt30, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: Execution, code: 1060
:1:50: Error: Executing CREATE OBJECT SECRET
: Error: metadata provider service is disabled ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeCdcStream [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeReplication [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBlobDepot [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExternalTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExternalDataSource [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeResourcePool [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBackupCollection [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT8 |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> BuildStatsHistogram::Ten_Mixed [GOOD] >> BuildStatsHistogram::Ten_Serial >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePathSpecified [GOOD] >> BackupPathTest::ExportWholeDatabase [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPrefixSpecified >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeInvalid [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTable >> BackupPathTest::ExportWholeDatabaseWithEncryption >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_12_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 12] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_13_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 13] >> KqpNewEngine::AutoChooseIndexOrderByLambda [GOOD] >> BackupRestore::TestAllPrimitiveTypes-BOOL [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT8 >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTableIndex [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSequence >> BackupRestoreS3::RestoreIndexTablePartitioningSettings [GOOD] >> BackupRestoreS3::RestoreIndexTableReadReplicasSettings >> BuildStatsHistogram::Ten_Serial [GOOD] >> BuildStatsHistogram::Ten_Crossed >> BackupRestoreS3::TestAllPrimitiveTypes-INT16 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT16 |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> TExtSubDomainTest::DeclareAndAlterPools-AlterDatabaseCreateHiveFirst-false >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPrefixSpecified [GOOD] >> TExtSubDomainTest::CreateTableInsideThenStopTenantAndForceDeleteSubDomain-AlterDatabaseCreateHiveFirst-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::AutoChooseIndexOrderByLambda [GOOD] Test command err: Trying to start YDB, gRPC: 14561, MsgBus: 63795 2025-06-24T21:01:05.455666Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519623867988532214:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:01:05.468420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031fa/r3tmp/tmpDf1Pm0/pdisk_1.dat 2025-06-24T21:01:06.090092Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519623867988532185:2079] 1750798865442801 != 1750798865442804 2025-06-24T21:01:06.131439Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:06.134578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:01:06.134662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:01:06.140691Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14561, node 1 2025-06-24T21:01:06.447726Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:01:06.447748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:01:06.447757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:01:06.447868Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:01:06.483080Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63795 TClient is connected to server localhost:63795 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:01:07.370511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:01:07.416381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:01:07.640262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:01:07.862840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:01:07.963414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:01:09.889042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519623885168403014:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:09.889161Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:10.329114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:10.388073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:10.459521Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519623867988532214:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:01:10.459986Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:01:10.462830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:10.513760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:10.585797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:10.656703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:10.714897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:10.805125Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519623889463370969:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:10.805224Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:10.805590Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519623889463370974:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:10.809721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:01:10.832980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:01:10.835624Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519623889463370976:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:01:10.911826Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519623889463371028:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 23823, MsgBus: 11222 2025-06-24T21:01:14.395382Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519623907357305324:2086];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:01:14.403616Z node 2 :METADATA_PROVIDER ERROR: log.cp ... : schemereq.cpp:553: Actor# [6:7519624113976820751:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:02:03.784986Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 27211, MsgBus: 2632 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031fa/r3tmp/tmpmrbHJq/pdisk_1.dat 2025-06-24T21:02:06.801816Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:02:06.848393Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519624133135241348:2079] 1750798926673213 != 1750798926673216 2025-06-24T21:02:06.883932Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:06.891600Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:06.891712Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:06.896185Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27211, node 7 2025-06-24T21:02:06.985023Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:06.985049Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:06.985062Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:06.985263Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2632 2025-06-24T21:02:07.689431Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2632 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:02:07.854120Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:02:07.875339Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:02:07.996455Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:02:08.207253Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:02:08.313767Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:02:11.513815Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519624154610079471:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:11.513918Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:11.583695Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:11.627490Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:11.671529Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:11.719963Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:11.774781Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:11.866235Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:11.921481Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:12.017625Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519624158905047431:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:12.017847Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:12.018284Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519624158905047436:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:12.024169Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:02:12.047558Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519624158905047438:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:02:12.115454Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519624158905047489:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:02:14.029269Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> BackupRestoreS3::TestAllPrimitiveTypes-UINT8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UTF8 |93.1%| [TA] $(B)/ydb/tests/datashard/s3/test-results/py3test/{meta.json ... results_accumulator.log} |93.1%| [TA] {RESULT} $(B)/ydb/tests/datashard/s3/test-results/py3test/{meta.json ... results_accumulator.log} >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedImport >> TExtSubDomainTest::CreateTableInsideAndLs-AlterDatabaseCreateHiveFirst-false >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-false >> KqpNewEngine::PruneEffectPartitions-UseSink [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithNodes-AlterDatabaseCreateHiveFirst-false >> BackupPathTest::ExportWholeDatabaseWithEncryption [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PruneEffectPartitions-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 62586, MsgBus: 24167 2025-06-24T21:01:19.687284Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519623928230076203:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:01:19.687508Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ea/r3tmp/tmpFFARzJ/pdisk_1.dat 2025-06-24T21:01:20.327359Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519623928230076022:2079] 1750798879635128 != 1750798879635131 2025-06-24T21:01:20.360003Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:20.366196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:01:20.366310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:01:20.370661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62586, node 1 2025-06-24T21:01:20.664763Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:01:20.679687Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:01:20.679713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:01:20.679726Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:01:20.679830Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24167 TClient is connected to server localhost:24167 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:01:21.704207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:01:21.730632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:01:21.938139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:01:22.176261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:01:22.312356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:01:24.667266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519623928230076203:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:01:24.667337Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:01:24.772846Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519623949704914161:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:24.773003Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:25.139794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:25.176885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:25.242193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:25.277642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:25.330251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:25.407480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:25.459501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:01:25.543606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519623953999882118:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:25.543707Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:25.543956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519623953999882123:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:01:25.548572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:01:25.569690Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519623953999882125:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:01:25.677073Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519623953999882179:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 6911, MsgBus: 15759 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ea/r3tmp/tmpBwPr76/pdisk_1.dat 2025-06-24T21:01:28.332163Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:01:28.413009Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Discon ... 914947253:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:02:08.044239Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519624138209914600:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 62709, MsgBus: 11721 2025-06-24T21:02:11.439289Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519624150677210144:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:11.439424Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ea/r3tmp/tmp5tKGa3/pdisk_1.dat 2025-06-24T21:02:11.633248Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:11.656300Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:11.656409Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:11.660469Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62709, node 7 2025-06-24T21:02:11.730905Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:11.730931Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:11.730943Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:11.731099Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11721 2025-06-24T21:02:12.499958Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11721 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:02:12.633265Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:02:12.643917Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:02:12.658429Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:02:12.760858Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:02:13.013875Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:02:13.128193Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:02:16.420038Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519624150677210144:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:16.420168Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:02:16.482028Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519624172152048240:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:16.482178Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:16.574348Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:16.621970Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:16.666363Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:16.713326Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:16.757010Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:16.809118Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:16.865988Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:16.971638Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519624172152048899:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:16.971834Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:16.972401Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519624172152048904:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:16.977389Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:02:16.999170Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519624172152048906:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:02:17.089652Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519624176447016253:3433] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> BackupPathTest::ExportWithCommonSourcePath >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTable [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypePersQueueGroup >> BuildStatsHistogram::Ten_Crossed [GOOD] >> BuildStatsHistogram::Ten_Mixed_Log >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedImport [GOOD] >> TExtSubDomainTest::DeclareAndLs >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePrefixSpecified >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-false [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT16 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT32 >> BackupRestoreS3::RestoreIndexTableReadReplicasSettings [GOOD] >> BackupRestoreS3::RestoreTableSplitBoundaries >> TExtSubDomainTest::DeclareAndAlterPools-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::DeclareAndAlterPools-AlterDatabaseCreateHiveFirst-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: 2025-06-24T21:02:21.121673Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624194017896941:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:21.122176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003628/r3tmp/tmp7hLktp/pdisk_1.dat 2025-06-24T21:02:21.801327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:21.801450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:21.808610Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:21.823866Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624194017896901:2079] 1750798941107337 != 1750798941107340 2025-06-24T21:02:21.859178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:22.120565Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22862 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:02:22.159529Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624194017897140:2115] Handle TEvNavigate describe path dc-1 2025-06-24T21:02:22.182651Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624198312864938:2444] HANDLE EvNavigateScheme dc-1 2025-06-24T21:02:22.182788Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624194017897165:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:22.182858Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624198312864921:2439][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624194017897165:2129], cookie# 1 2025-06-24T21:02:22.184644Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624198312864925:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864922:2439], cookie# 1 2025-06-24T21:02:22.184714Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624198312864926:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864923:2439], cookie# 1 2025-06-24T21:02:22.184734Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624198312864927:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864924:2439], cookie# 1 2025-06-24T21:02:22.184772Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624194017896871:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864925:2439], cookie# 1 2025-06-24T21:02:22.184800Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624194017896874:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864926:2439], cookie# 1 2025-06-24T21:02:22.184821Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624194017896877:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864927:2439], cookie# 1 2025-06-24T21:02:22.184869Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624198312864925:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624194017896871:2049], cookie# 1 2025-06-24T21:02:22.184886Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624198312864926:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624194017896874:2052], cookie# 1 2025-06-24T21:02:22.184919Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624198312864927:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624194017896877:2055], cookie# 1 2025-06-24T21:02:22.184957Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624198312864921:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624198312864922:2439], cookie# 1 2025-06-24T21:02:22.184981Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624198312864921:2439][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:22.184997Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624198312864921:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624198312864923:2439], cookie# 1 2025-06-24T21:02:22.185009Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624198312864921:2439][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:22.185029Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624198312864921:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624198312864924:2439], cookie# 1 2025-06-24T21:02:22.185058Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624198312864921:2439][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:22.185109Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624194017897165:2129], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:22.191915Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624194017897165:2129], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624198312864921:2439] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:22.192057Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624194017897165:2129], cacheItem# { Subscriber: { Subscriber: [1:7519624198312864921:2439] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T21:02:22.207698Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624198312864939:2445], recipient# [1:7519624198312864938:2444], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:22.207800Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624198312864938:2444] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:22.272019Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624198312864938:2444] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:02:22.275482Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624198312864938:2444] Handle TEvDescribeSchemeResult Forward to# [1:7519624198312864937:2443] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:02:22.324288Z ... tatus: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 1750798942760 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 2 IsSync: true Partial: 0 } 2025-06-24T21:02:23.387813Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624202607832567:2683], recipient# [1:7519624202607832566:2682], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: RedirectLookupError Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:23.387837Z node 1 :TX_PROXY INFO: describe.cpp:356: Actor# [1:7519624202607832566:2682] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 1 2025-06-24T21:02:23.390911Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624194017897140:2115] Handle TEvNavigate describe path /dc-1 2025-06-24T21:02:23.407560Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624202607832569:2685] HANDLE EvNavigateScheme /dc-1 2025-06-24T21:02:23.407666Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624194017897165:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:23.407726Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624198312864921:2439][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624194017897165:2129], cookie# 4 2025-06-24T21:02:23.407780Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624198312864925:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864922:2439], cookie# 4 2025-06-24T21:02:23.407799Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624198312864926:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864923:2439], cookie# 4 2025-06-24T21:02:23.407815Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624198312864927:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864924:2439], cookie# 4 2025-06-24T21:02:23.407849Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624194017896871:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864925:2439], cookie# 4 2025-06-24T21:02:23.407888Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624194017896874:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864926:2439], cookie# 4 2025-06-24T21:02:23.407911Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624194017896877:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198312864927:2439], cookie# 4 2025-06-24T21:02:23.407943Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624198312864925:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624194017896871:2049], cookie# 4 2025-06-24T21:02:23.407976Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624198312864926:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624194017896874:2052], cookie# 4 2025-06-24T21:02:23.408002Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624198312864927:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624194017896877:2055], cookie# 4 2025-06-24T21:02:23.408033Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624198312864921:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624198312864922:2439], cookie# 4 2025-06-24T21:02:23.408050Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624198312864921:2439][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:23.408065Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624198312864921:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624198312864923:2439], cookie# 4 2025-06-24T21:02:23.408081Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624198312864921:2439][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:23.408094Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624198312864921:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624198312864924:2439], cookie# 4 2025-06-24T21:02:23.408113Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624198312864921:2439][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:23.408200Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624194017897165:2129], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:23.408250Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624194017897165:2129], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624198312864921:2439] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750798942676 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:23.408309Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624194017897165:2129], cacheItem# { Subscriber: { Subscriber: [1:7519624198312864921:2439] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750798942676 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-24T21:02:23.408502Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624202607832570:2686], recipient# [1:7519624202607832569:2685], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:23.408533Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624202607832569:2685] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:23.408593Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624202607832569:2685] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:02:23.409323Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624202607832569:2685] Handle TEvDescribeSchemeResult Forward to# [1:7519624202607832568:2684] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750798942676 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "" Kind: "storage-pool-number-1" } StoragePools { Name: "" Kind: "storage-pool-number-2" } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750798942676 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750798942760 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046... (TRUNCATED) >> BackupRestoreS3::TestAllPrimitiveTypes-UTF8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-YSON >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSequence [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeView >> TExtSubDomainTest::CreateTableInsideAndLs-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::CreateTableInsideAndLs-AlterDatabaseCreateHiveFirst-true >> TExtSubDomainTest::DeclareAndDefineWithNodes-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::DeclareAndLs [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithNodes-AlterDatabaseCreateHiveFirst-true >> TExtSubDomainTest::DeclareAndDrop >> BackupRestore::TestAllPrimitiveTypes-INT16 >> BackupPathTest::ExportWithCommonSourcePath [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePrefixSpecified [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypePersQueueGroup [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSubDomain [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeRtmrVolume [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeKesus ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndLs [GOOD] Test command err: 2025-06-24T21:02:25.383926Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624212289918474:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:25.383995Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003605/r3tmp/tmpIiuGMI/pdisk_1.dat 2025-06-24T21:02:25.891880Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:25.894113Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624212289918455:2079] 1750798945381877 != 1750798945381880 2025-06-24T21:02:25.904948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:25.905062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:25.908564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9108 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:02:26.171654Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624212289918667:2099] Handle TEvNavigate describe path dc-1 2025-06-24T21:02:26.196816Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624216584886257:2258] HANDLE EvNavigateScheme dc-1 2025-06-24T21:02:26.196956Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624212289918697:2115], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:26.196985Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519624212289918697:2115], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T21:02:26.197186Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:02:26.199402Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519624212289918425:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519624216584886262:2259] 2025-06-24T21:02:26.199465Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519624212289918425:2049] Subscribe: subscriber# [1:7519624216584886262:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T21:02:26.199529Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519624212289918431:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519624216584886264:2259] 2025-06-24T21:02:26.199552Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519624212289918431:2055] Subscribe: subscriber# [1:7519624216584886264:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T21:02:26.199604Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519624216584886262:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624212289918425:2049] 2025-06-24T21:02:26.199646Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519624216584886264:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624212289918431:2055] 2025-06-24T21:02:26.199689Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624216584886259:2259] 2025-06-24T21:02:26.199724Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624216584886261:2259] 2025-06-24T21:02:26.199774Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519624216584886258:2259][/dc-1] Set up state: owner# [1:7519624212289918697:2115], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:26.199929Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624216584886262:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886259:2259], cookie# 1 2025-06-24T21:02:26.199946Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624216584886263:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886260:2259], cookie# 1 2025-06-24T21:02:26.199958Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624216584886264:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886261:2259], cookie# 1 2025-06-24T21:02:26.199979Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624212289918425:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519624216584886262:2259] 2025-06-24T21:02:26.200019Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624212289918425:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886262:2259], cookie# 1 2025-06-24T21:02:26.200070Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624212289918431:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519624216584886264:2259] 2025-06-24T21:02:26.200086Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624212289918431:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886264:2259], cookie# 1 2025-06-24T21:02:26.200243Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519624212289918428:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519624216584886263:2259] 2025-06-24T21:02:26.200285Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519624212289918428:2052] Subscribe: subscriber# [1:7519624216584886263:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T21:02:26.200315Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624212289918428:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886263:2259], cookie# 1 2025-06-24T21:02:26.200350Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624216584886262:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624212289918425:2049], cookie# 1 2025-06-24T21:02:26.200363Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624216584886264:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624212289918431:2055], cookie# 1 2025-06-24T21:02:26.200409Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519624216584886263:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624212289918428:2052] 2025-06-24T21:02:26.200446Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624216584886263:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624212289918428:2052], cookie# 1 2025-06-24T21:02:26.200479Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624216584886259:2259], cookie# 1 2025-06-24T21:02:26.200499Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624216584886258:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:26.200517Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624216584886261:2259], cookie# 1 2025-06-24T21:02:26.200535Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624216584886258:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:26.200567Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624216584886260:2259] 2025-06-24T21:02:26.200611Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519624216584886258:2259][/dc-1] Path was already updated: owner# [1:7519624212289918697:2115], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:26.200850Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624216584886260:2259], cookie# 1 2025-06-24T21:02:26.200896Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624216584886258:2259][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:26.200945Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624212289918428:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519624216584886263:2259] 2025-06-24T21:02:26.251510Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624212289918697:2115], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsI ... : [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T21:02:26.448083Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624216584886327:2301], recipient# [1:7519624216584886319:2299], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: RedirectLookupError Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:26.448118Z node 1 :TX_PROXY INFO: describe.cpp:356: Actor# [1:7519624216584886319:2299] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 1 TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 13 ErrorReason: "Could not resolve redirected path" TClient::Ls request: /dc-1 2025-06-24T21:02:26.452492Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624212289918667:2099] Handle TEvNavigate describe path /dc-1 2025-06-24T21:02:26.470552Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624216584886329:2303] HANDLE EvNavigateScheme /dc-1 2025-06-24T21:02:26.470659Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624212289918697:2115], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:26.470730Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624212289918697:2115], cookie# 4 2025-06-24T21:02:26.470784Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624216584886262:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886259:2259], cookie# 4 2025-06-24T21:02:26.470813Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624216584886263:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886260:2259], cookie# 4 2025-06-24T21:02:26.470826Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624216584886264:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886261:2259], cookie# 4 2025-06-24T21:02:26.470847Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624212289918425:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886262:2259], cookie# 4 2025-06-24T21:02:26.470868Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624212289918428:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886263:2259], cookie# 4 2025-06-24T21:02:26.470883Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624212289918431:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624216584886264:2259], cookie# 4 2025-06-24T21:02:26.470903Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624216584886262:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624212289918425:2049], cookie# 4 2025-06-24T21:02:26.470931Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624216584886263:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624212289918428:2052], cookie# 4 2025-06-24T21:02:26.470946Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624216584886264:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624212289918431:2055], cookie# 4 2025-06-24T21:02:26.470973Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624216584886259:2259], cookie# 4 2025-06-24T21:02:26.470988Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624216584886258:2259][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:26.471011Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624216584886260:2259], cookie# 4 2025-06-24T21:02:26.471023Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624216584886258:2259][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:26.471035Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624216584886258:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624216584886261:2259], cookie# 4 2025-06-24T21:02:26.471057Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624216584886258:2259][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:26.471125Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624212289918697:2115], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:26.471190Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624212289918697:2115], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624216584886258:2259] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750798946428 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:26.471263Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624212289918697:2115], cacheItem# { Subscriber: { Subscriber: [1:7519624216584886258:2259] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750798946428 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-24T21:02:26.471459Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624216584886330:2304], recipient# [1:7519624216584886329:2303], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:26.471488Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624216584886329:2303] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:26.471543Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624216584886329:2303] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:02:26.472146Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624216584886329:2303] Handle TEvDescribeSchemeResult Forward to# [1:7519624216584886328:2302] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750798946428 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750798946428 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750798946463 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } Children { Name: ".sys" PathId: 18446744073709551615 ... (TRUNCATED) >> BackupPathTest::ExportWithCommonSourcePathAndExplicitTableInside >> BuildStatsHistogram::Ten_Mixed_Log [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> BuildStatsHistogram::Ten_Serial_Log >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPathSpecified >> TExtSubDomainTest::CreateTableInsideThenStopTenantAndForceDeleteSubDomain-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::CreateTableInsideThenStopTenantAndForceDeleteSubDomain-AlterDatabaseCreateHiveFirst-true >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-false >> TExtSubDomainTest::DeclareAndDrop [GOOD] >> TExtSubDomainTest::DeclareAndAlterPools-AlterDatabaseCreateHiveFirst-true [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndDrop [GOOD] Test command err: 2025-06-24T21:02:29.412387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624230281330808:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:29.412451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0035f8/r3tmp/tmpF3CylG/pdisk_1.dat 2025-06-24T21:02:30.077208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:30.077286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:30.082601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:30.123258Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624230281330600:2079] 1750798949359340 != 1750798949359343 2025-06-24T21:02:30.141507Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:30.363292Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14715 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:02:30.488058Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624230281330820:2102] Handle TEvNavigate describe path dc-1 2025-06-24T21:02:30.511039Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624234576298421:2263] HANDLE EvNavigateScheme dc-1 2025-06-24T21:02:30.511225Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624230281330848:2117], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:30.511307Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624234576298404:2258][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624230281330848:2117], cookie# 1 2025-06-24T21:02:30.513293Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624234576298408:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298405:2258], cookie# 1 2025-06-24T21:02:30.513350Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624234576298409:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298406:2258], cookie# 1 2025-06-24T21:02:30.513371Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624234576298410:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298407:2258], cookie# 1 2025-06-24T21:02:30.513399Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624230281330570:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298408:2258], cookie# 1 2025-06-24T21:02:30.513422Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624230281330573:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298409:2258], cookie# 1 2025-06-24T21:02:30.513437Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624230281330576:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298410:2258], cookie# 1 2025-06-24T21:02:30.513484Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624234576298408:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624230281330570:2049], cookie# 1 2025-06-24T21:02:30.513499Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624234576298409:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624230281330573:2052], cookie# 1 2025-06-24T21:02:30.513512Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624234576298410:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624230281330576:2055], cookie# 1 2025-06-24T21:02:30.513554Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624234576298404:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624234576298405:2258], cookie# 1 2025-06-24T21:02:30.513585Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624234576298404:2258][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:30.513613Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624234576298404:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624234576298406:2258], cookie# 1 2025-06-24T21:02:30.513643Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624234576298404:2258][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:30.513662Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624234576298404:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624234576298407:2258], cookie# 1 2025-06-24T21:02:30.513681Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624234576298404:2258][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:30.513759Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624230281330848:2117], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:30.526282Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624230281330848:2117], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624234576298404:2258] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:30.526420Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624230281330848:2117], cacheItem# { Subscriber: { Subscriber: [1:7519624234576298404:2258] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T21:02:30.529186Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624234576298422:2264], recipient# [1:7519624234576298421:2263], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:30.529242Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624234576298421:2263] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:30.584910Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624234576298421:2263] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:02:30.594896Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624234576298421:2263] Handle TEvDescribeSchemeResult Forward to# [1:7519624234576298420:2262] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:02:30.621680Z ... IsVirtual: 0 SchemaVersion: 0 } 2025-06-24T21:02:30.816511Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624230281330848:2117], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Strong: 1 } 2025-06-24T21:02:30.816669Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624230281330848:2117], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519624234576298468:2302] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 1750798950824 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# { Subscriber: { Subscriber: [1:7519624234576298468:2302] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 1750798950824 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 } 2025-06-24T21:02:30.816811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 TClient::Ls request: /dc-1 2025-06-24T21:02:30.818591Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624230281330820:2102] Handle TEvNavigate describe path /dc-1 2025-06-24T21:02:30.833901Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624234576298496:2321] HANDLE EvNavigateScheme /dc-1 2025-06-24T21:02:30.834001Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624230281330848:2117], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:30.834086Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624234576298404:2258][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624230281330848:2117], cookie# 4 2025-06-24T21:02:30.834137Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624234576298408:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298405:2258], cookie# 4 2025-06-24T21:02:30.834205Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624234576298409:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298406:2258], cookie# 4 2025-06-24T21:02:30.834226Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624234576298410:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298407:2258], cookie# 4 2025-06-24T21:02:30.834262Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624230281330570:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298408:2258], cookie# 4 2025-06-24T21:02:30.834294Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624230281330573:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298409:2258], cookie# 4 2025-06-24T21:02:30.834308Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624230281330576:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624234576298410:2258], cookie# 4 2025-06-24T21:02:30.834327Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624234576298408:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519624230281330570:2049], cookie# 4 2025-06-24T21:02:30.834338Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624234576298409:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519624230281330573:2052], cookie# 4 2025-06-24T21:02:30.834348Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624234576298410:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519624230281330576:2055], cookie# 4 2025-06-24T21:02:30.834373Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624234576298404:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519624234576298405:2258], cookie# 4 2025-06-24T21:02:30.834393Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624234576298404:2258][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:30.834406Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624234576298404:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519624234576298406:2258], cookie# 4 2025-06-24T21:02:30.834416Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624234576298404:2258][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:30.834444Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624234576298404:2258][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519624234576298407:2258], cookie# 4 2025-06-24T21:02:30.834466Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624234576298404:2258][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:30.834518Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624230281330848:2117], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:30.834577Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624230281330848:2117], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624234576298404:2258] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750798950796 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:30.834651Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624230281330848:2117], cacheItem# { Subscriber: { Subscriber: [1:7519624234576298404:2258] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750798950796 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-24T21:02:30.834813Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624234576298497:2322], recipient# [1:7519624234576298496:2321], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:30.834842Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624234576298496:2321] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:30.834902Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624234576298496:2321] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750798950796 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) 2025-06-24T21:02:30.836212Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624234576298496:2321] Handle TEvDescribeSchemeResult Forward to# [1:7519624234576298495:2320] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750798950796 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndAlterPools-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-24T21:02:19.395173Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624188690815048:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:19.395232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003689/r3tmp/tmp8KUMua/pdisk_1.dat 2025-06-24T21:02:20.190480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:20.190571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:20.206669Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:20.267473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:20.455359Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19738 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:02:20.545712Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624188690815253:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:02:20.571974Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624192985783055:2454] HANDLE EvNavigateScheme dc-1 2025-06-24T21:02:20.573220Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624188690815331:2150], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:20.573334Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624192985783033:2444][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624188690815331:2150], cookie# 1 2025-06-24T21:02:20.574791Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624192985783037:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624192985783034:2444], cookie# 1 2025-06-24T21:02:20.574825Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624192985783038:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624192985783035:2444], cookie# 1 2025-06-24T21:02:20.574841Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624192985783039:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624192985783036:2444], cookie# 1 2025-06-24T21:02:20.574873Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624188690814970:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624192985783037:2444], cookie# 1 2025-06-24T21:02:20.574910Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624188690814973:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624192985783038:2444], cookie# 1 2025-06-24T21:02:20.574952Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624188690814976:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624192985783039:2444], cookie# 1 2025-06-24T21:02:20.575003Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624192985783037:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624188690814970:2050], cookie# 1 2025-06-24T21:02:20.575050Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624192985783038:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624188690814973:2053], cookie# 1 2025-06-24T21:02:20.575066Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624192985783039:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624188690814976:2056], cookie# 1 2025-06-24T21:02:20.575124Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624192985783033:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624192985783034:2444], cookie# 1 2025-06-24T21:02:20.575168Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624192985783033:2444][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:20.575188Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624192985783033:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624192985783035:2444], cookie# 1 2025-06-24T21:02:20.575198Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624192985783033:2444][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:20.575210Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624192985783033:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624192985783036:2444], cookie# 1 2025-06-24T21:02:20.575230Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624192985783033:2444][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:20.575292Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624188690815331:2150], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:20.580766Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624188690815331:2150], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624192985783033:2444] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:20.580900Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624188690815331:2150], cacheItem# { Subscriber: { Subscriber: [1:7519624192985783033:2444] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T21:02:20.591784Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624192985783056:2455], recipient# [1:7519624192985783055:2454], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:20.592029Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624192985783055:2454] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:20.639605Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624192985783055:2454] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:02:20.643975Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624192985783055:2454] Handle TEvDescribeSchemeResult Forward to# [1:7519624192985783054:2453] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:02:20.681922Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624188690815253:2117] Handle TEvProposeTransaction 2025-06-24T21:02:20.681948Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:75196241886 ... ][3:7519624238735655288:2999][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519624221555784585:2050] 2025-06-24T21:02:31.589422Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519624238735655289:2999][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519624221555784588:2053] 2025-06-24T21:02:31.589443Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519624238735655290:2999][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519624221555784591:2056] 2025-06-24T21:02:31.589472Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519624238735655278:2999][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519624238735655285:2999] 2025-06-24T21:02:31.589499Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519624238735655278:2999][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519624238735655286:2999] 2025-06-24T21:02:31.589519Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519624238735655278:2999][/dc-1/.metadata/workload_manager/running_requests] Set up state: owner# [3:7519624221555784884:2128], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:31.589537Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519624238735655278:2999][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519624238735655287:2999] 2025-06-24T21:02:31.589555Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519624238735655278:2999][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [3:7519624221555784884:2128], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:31.589578Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624221555784585:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624238735655282:2998] 2025-06-24T21:02:31.589595Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624221555784585:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624238735655288:2999] 2025-06-24T21:02:31.589608Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624221555784588:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624238735655283:2998] 2025-06-24T21:02:31.589621Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624221555784588:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624238735655289:2999] 2025-06-24T21:02:31.589635Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624221555784591:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624238735655284:2998] 2025-06-24T21:02:31.589649Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624221555784591:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624238735655290:2999] 2025-06-24T21:02:31.589712Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519624221555784884:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-24T21:02:31.589783Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519624221555784884:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519624238735655277:2998] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:31.589876Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624221555784884:2128], cacheItem# { Subscriber: { Subscriber: [3:7519624238735655277:2998] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:31.589906Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519624221555784884:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-24T21:02:31.589944Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519624221555784884:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519624238735655278:2999] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:31.589984Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624221555784884:2128], cacheItem# { Subscriber: { Subscriber: [3:7519624238735655278:2999] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:31.590073Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624238735655291:3000], recipient# [3:7519624238735655276:2286], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:32.400683Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519624221555784683:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:32.400760Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:02:32.450966Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624221555784884:2128], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:32.451079Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624221555784884:2128], cacheItem# { Subscriber: { Subscriber: [3:7519624225850752853:2599] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:32.451166Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624243030622609:3006], recipient# [3:7519624243030622608:2287], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:32.587500Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624221555784884:2128], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:32.587642Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624221555784884:2128], cacheItem# { Subscriber: { Subscriber: [3:7519624238735655266:2996] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:32.587724Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624243030622611:3007], recipient# [3:7519624243030622610:2288], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> BackupRestoreS3::TestAllPrimitiveTypes-INT32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT32 >> BackupRestoreS3::TestAllPrimitiveTypes-YSON [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UUID >> TExtSubDomainTest::CreateTableInsideAndLs-AlterDatabaseCreateHiveFirst-true [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithNodes-AlterDatabaseCreateHiveFirst-true [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeKesus [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSolomonVolume [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTableIndex >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPathSpecified [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeView [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTransfer [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSysView [GOOD] >> CommonEncryptionRequirementsTest::CommonEncryptionRequirements >> TExtSubDomainTest::GenericCases >> BuildStatsHistogram::Ten_Serial_Log [GOOD] >> BuildStatsHistogram::Ten_Crossed_Log ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::CreateTableInsideAndLs-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-24T21:02:21.343531Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624195814635250:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:21.343573Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003638/r3tmp/tmprsukv5/pdisk_1.dat 2025-06-24T21:02:21.916494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:21.916573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:21.928374Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:21.959389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28292 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:02:22.277670Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624195814635472:2141] Handle TEvNavigate describe path dc-1 2025-06-24T21:02:22.342676Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624200109603208:2444] HANDLE EvNavigateScheme dc-1 2025-06-24T21:02:22.343012Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624195814635496:2154], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:22.343089Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624195814635496:2154], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:22.343119Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519624195814635496:2154], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T21:02:22.343278Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624195814635496:2154], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:22.343439Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519624200109603211:2446][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:02:22.345331Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519624195814635151:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519624200109603215:2446] 2025-06-24T21:02:22.345384Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519624195814635151:2050] Subscribe: subscriber# [1:7519624200109603215:2446], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T21:02:22.345442Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519624195814635154:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519624200109603216:2446] 2025-06-24T21:02:22.345459Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519624195814635154:2053] Subscribe: subscriber# [1:7519624200109603216:2446], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T21:02:22.345478Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519624195814635157:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519624200109603217:2446] 2025-06-24T21:02:22.345492Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519624195814635157:2056] Subscribe: subscriber# [1:7519624200109603217:2446], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T21:02:22.345556Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519624200109603215:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624195814635151:2050] 2025-06-24T21:02:22.345586Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519624200109603216:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624195814635154:2053] 2025-06-24T21:02:22.345613Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519624200109603217:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624195814635157:2056] 2025-06-24T21:02:22.345652Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519624200109603211:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624200109603212:2446] 2025-06-24T21:02:22.345685Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519624200109603211:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624200109603213:2446] 2025-06-24T21:02:22.345736Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519624200109603211:2446][/dc-1] Set up state: owner# [1:7519624195814635496:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:22.345850Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519624200109603211:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624200109603214:2446] 2025-06-24T21:02:22.345911Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519624200109603211:2446][/dc-1] Path was already updated: owner# [1:7519624195814635496:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:22.345954Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624200109603215:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624200109603212:2446], cookie# 1 2025-06-24T21:02:22.345971Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624200109603216:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624200109603213:2446], cookie# 1 2025-06-24T21:02:22.345983Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624200109603217:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624200109603214:2446], cookie# 1 2025-06-24T21:02:22.346038Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624195814635151:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519624200109603215:2446] 2025-06-24T21:02:22.346069Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624195814635151:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624200109603215:2446], cookie# 1 2025-06-24T21:02:22.346089Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624195814635154:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519624200109603216:2446] 2025-06-24T21:02:22.346100Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624195814635154:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624200109603216:2446], cookie# 1 2025-06-24T21:02:22.346112Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624195814635157:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519624200109603217:2446] 2025-06-24T21:02:22.346124Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624195814635157:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624200109603217:2446], cookie# 1 2025-06-24T21:02:22.347221Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624200109603215:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624195814635151:2050], cookie# 1 2025-06-24T21:02:22.347252Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624200109603216:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624195814635154:2053], cookie# 1 2025-06-24T21:02:22.347265Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624200109603217:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624195814635157:2056], cookie# 1 2025-06-24T21:02:22.347292Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624200109603211:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624200109603212:2446], cookie# 1 2025-06-24T21:02:22.347320Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624200109603211:2446][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:22.347335Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624200109603211:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624200109603213:2446], cookie# 1 2025-06-24T21:02:22.347346Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624200109603211:2446][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:22.347358Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624200109603211:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624200109603214:2446], cookie# 1 2025-06-24T21:02:22.347401Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624200109603211:2446][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:22.372447Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:02:22.427072Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624195814635496:215 ... lid> DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:34.351420Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519624230781808819:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-24T21:02:34.351530Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519624230781808819:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519624252256646410:2925] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:34.351643Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624230781808819:2141], cacheItem# { Subscriber: { Subscriber: [3:7519624252256646410:2925] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:34.351701Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519624230781808819:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-24T21:02:34.351751Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519624230781808819:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519624252256646418:2926] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:34.351811Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624230781808819:2141], cacheItem# { Subscriber: { Subscriber: [3:7519624252256646418:2926] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:34.351899Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624252256646432:2928], recipient# [3:7519624252256646409:2285], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:34.351949Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624226486841191:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624252256646429:2927] 2025-06-24T21:02:34.351976Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624226486841194:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624252256646430:2927] 2025-06-24T21:02:34.351996Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624226486841197:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624252256646431:2927] 2025-06-24T21:02:34.357693Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519624252256646419:2927][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519624252256646427:2927] 2025-06-24T21:02:34.357793Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519624252256646419:2927][/dc-1/.metadata/workload_manager/running_requests] Set up state: owner# [3:7519624230781808819:2141], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:34.357860Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519624252256646419:2927][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519624252256646428:2927] 2025-06-24T21:02:34.357889Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519624252256646419:2927][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [3:7519624230781808819:2141], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:34.357958Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519624230781808819:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-24T21:02:34.358964Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519624230781808819:2141], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519624252256646419:2927] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:34.359089Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624230781808819:2141], cacheItem# { Subscriber: { Subscriber: [3:7519624252256646419:2927] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:34.359230Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624252256646433:2929], recipient# [3:7519624252256646417:2286], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:35.047527Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624230781808819:2141], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:35.047681Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624230781808819:2141], cacheItem# { Subscriber: { Subscriber: [3:7519624235076776855:2682] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:35.047777Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624256551613748:2935], recipient# [3:7519624256551613747:2287], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:35.088980Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624230781808819:2141], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:35.089136Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624230781808819:2141], cacheItem# { Subscriber: { Subscriber: [3:7519624235076776855:2682] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:35.089244Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624256551613750:2936], recipient# [3:7519624256551613749:2288], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> BackupPathTest::ExportWithCommonSourcePathAndExplicitTableInside [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::SrcPrefixAndSrcPathSpecified >> BackupRestoreS3::RestoreTableSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreIndexTableSplitBoundaries ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndDefineWithNodes-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-24T21:02:21.955784Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624194559446023:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:21.955839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00360f/r3tmp/tmp4Of5lv/pdisk_1.dat 2025-06-24T21:02:22.585908Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:22.604276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:22.604366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:22.609448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16466 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:02:22.855534Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624194559446208:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:02:22.902453Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624198854413967:2430] HANDLE EvNavigateScheme dc-1 2025-06-24T21:02:22.902971Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624198854413551:2137], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:22.903054Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519624198854413551:2137], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-24T21:02:22.903284Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519624198854413968:2431][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:02:22.909110Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519624194559445923:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519624198854413972:2431] 2025-06-24T21:02:22.909182Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519624194559445923:2050] Subscribe: subscriber# [1:7519624198854413972:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T21:02:22.909250Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519624194559445929:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519624198854413974:2431] 2025-06-24T21:02:22.909265Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519624194559445929:2056] Subscribe: subscriber# [1:7519624198854413974:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T21:02:22.909309Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519624198854413972:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624194559445923:2050] 2025-06-24T21:02:22.909342Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519624198854413974:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624194559445929:2056] 2025-06-24T21:02:22.909384Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519624198854413968:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624198854413969:2431] 2025-06-24T21:02:22.909439Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519624194559445926:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519624198854413973:2431] 2025-06-24T21:02:22.909464Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519624194559445926:2053] Subscribe: subscriber# [1:7519624198854413973:2431], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-24T21:02:22.915540Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519624198854413968:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624198854413971:2431] 2025-06-24T21:02:22.915675Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624194559445923:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519624198854413972:2431] 2025-06-24T21:02:22.915708Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624194559445929:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519624198854413974:2431] 2025-06-24T21:02:22.916750Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519624198854413968:2431][/dc-1] Set up state: owner# [1:7519624198854413551:2137], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:22.916940Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519624198854413973:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624194559445926:2053] 2025-06-24T21:02:22.916974Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624198854413972:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198854413969:2431], cookie# 1 2025-06-24T21:02:22.916989Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624198854413973:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198854413970:2431], cookie# 1 2025-06-24T21:02:22.917002Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624198854413974:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198854413971:2431], cookie# 1 2025-06-24T21:02:22.917026Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519624198854413968:2431][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519624198854413970:2431] 2025-06-24T21:02:22.917109Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519624198854413968:2431][/dc-1] Path was already updated: owner# [1:7519624198854413551:2137], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:22.919164Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624194559445926:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519624198854413973:2431] 2025-06-24T21:02:22.919202Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624194559445926:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198854413973:2431], cookie# 1 2025-06-24T21:02:22.919232Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624194559445923:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198854413972:2431], cookie# 1 2025-06-24T21:02:22.919247Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624194559445929:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624198854413974:2431], cookie# 1 2025-06-24T21:02:22.919276Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624198854413973:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624194559445926:2053], cookie# 1 2025-06-24T21:02:22.919289Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624198854413972:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624194559445923:2050], cookie# 1 2025-06-24T21:02:22.919318Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624198854413974:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624194559445929:2056], cookie# 1 2025-06-24T21:02:22.919360Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624198854413968:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624198854413970:2431], cookie# 1 2025-06-24T21:02:22.919385Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624198854413968:2431][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:22.919399Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624198854413968:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624198854413969:2431], cookie# 1 2025-06-24T21:02:22.919409Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624198854413968:2431][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:22.919421Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624198854413968:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624198854413971:2431], cookie# 1 2025-06-24T21:02:22.919445Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624198854413968:2431][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:22.979316Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:02:23.034577Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624198854413551:2137], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 Resour ... rSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519624251369971575:2782] 2025-06-24T21:02:34.480221Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519624251369971561:2782][/dc-1/.metadata/workload_manager/running_requests] Set up state: owner# [3:7519624229895134107:2129], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:34.480237Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519624251369971561:2782][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519624251369971576:2782] 2025-06-24T21:02:34.480256Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519624251369971561:2782][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [3:7519624229895134107:2129], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:34.480272Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624229895133808:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624251369971571:2781] 2025-06-24T21:02:34.480286Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624229895133808:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624251369971577:2782] 2025-06-24T21:02:34.480298Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624229895133811:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624251369971572:2781] 2025-06-24T21:02:34.480309Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624229895133811:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624251369971578:2782] 2025-06-24T21:02:34.480322Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624229895133814:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624251369971573:2781] 2025-06-24T21:02:34.480333Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519624229895133814:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519624251369971579:2782] 2025-06-24T21:02:34.480358Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519624229895134107:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-24T21:02:34.480414Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519624229895134107:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519624251369971560:2781] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:34.480464Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624229895134107:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624251369971560:2781] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:34.480484Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519624229895134107:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-24T21:02:34.480539Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519624229895134107:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519624251369971561:2782] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:34.480592Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624229895134107:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624251369971561:2782] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:34.480658Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624251369971581:2784], recipient# [3:7519624251369971559:2283], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:35.399535Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624229895134107:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:35.399688Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624229895134107:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624234190102057:2587] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:35.399794Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624255664938898:2792], recipient# [3:7519624255664938897:2284], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:35.431099Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624229895134107:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:35.431283Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624229895134107:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624234190102057:2587] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:35.431393Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624255664938900:2793], recipient# [3:7519624255664938899:2285], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:35.499563Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624229895134107:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:35.499716Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624229895134107:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624251369971558:2780] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:35.499821Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624255664938905:2794], recipient# [3:7519624255664938904:2286], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> BackupRestore::TestAllPrimitiveTypes-INT16 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT32 |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-true >> BackupPathTest::EmptyDirectoryIsOk >> EncryptedBackupParamsValidationTestFeatureDisabled::SrcPrefixAndSrcPathSpecified [GOOD] >> BuildStatsHistogram::Ten_Crossed_Log [GOOD] >> BuildStatsHistogram::Five_Five_Mixed >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-true [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT64 >> EncryptedExportTest::EncryptedExportAndImport >> TExtSubDomainTest::GenericCases [GOOD] >> BsControllerConfig::ManyPDisksRestarts >> BsControllerConfig::Basic >> BsControllerConfig::OverlayMap >> BackupRestoreS3::TestAllPrimitiveTypes-UUID [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTableIndex [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeFileStore [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSequence ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-24T21:02:39.805007Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624274210317005:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:39.805845Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0035de/r3tmp/tmpO4b5Sr/pdisk_1.dat 2025-06-24T21:02:40.492295Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:40.517347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:40.517444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:40.532797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:40.807423Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:65347 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:02:40.887616Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624274210317105:2109] Handle TEvNavigate describe path dc-1 2025-06-24T21:02:40.975492Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624278505284916:2444] HANDLE EvNavigateScheme dc-1 2025-06-24T21:02:40.975684Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624278505284499:2149], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:40.975764Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624278505284895:2435][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624278505284499:2149], cookie# 1 2025-06-24T21:02:40.977683Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624278505284899:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284896:2435], cookie# 1 2025-06-24T21:02:40.977764Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624278505284900:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284897:2435], cookie# 1 2025-06-24T21:02:40.977785Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624278505284901:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284898:2435], cookie# 1 2025-06-24T21:02:40.977826Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624274210316850:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284899:2435], cookie# 1 2025-06-24T21:02:40.977861Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624274210316853:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284900:2435], cookie# 1 2025-06-24T21:02:40.977890Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624274210316856:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284901:2435], cookie# 1 2025-06-24T21:02:40.977935Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624278505284899:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624274210316850:2049], cookie# 1 2025-06-24T21:02:40.977950Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624278505284900:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624274210316853:2052], cookie# 1 2025-06-24T21:02:40.977994Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624278505284901:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624274210316856:2055], cookie# 1 2025-06-24T21:02:40.978032Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624278505284895:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624278505284896:2435], cookie# 1 2025-06-24T21:02:40.978057Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624278505284895:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:40.978076Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624278505284895:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624278505284897:2435], cookie# 1 2025-06-24T21:02:40.978088Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624278505284895:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:40.978104Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624278505284895:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624278505284898:2435], cookie# 1 2025-06-24T21:02:40.978131Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624278505284895:2435][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:40.978195Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624278505284499:2149], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:41.003535Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624278505284499:2149], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624278505284895:2435] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:41.003704Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624278505284499:2149], cacheItem# { Subscriber: { Subscriber: [1:7519624278505284895:2435] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T21:02:41.013694Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624282800252213:2445], recipient# [1:7519624278505284916:2444], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:41.013799Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624278505284916:2444] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:41.049393Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624278505284916:2444] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:02:41.053308Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624278505284916:2444] Handle TEvDescribeSchemeResult Forward to# [1:7519624278505284912:2440] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:02:41.088237Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624274210317105:2109] Handle TEvProposeTransaction 2025-06-24T21:02:41.088267Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:75196242742 ... ath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 2 IsSync: true Partial: 0 } 2025-06-24T21:02:42.009750Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624287095219835:2677], recipient# [1:7519624287095219834:2676], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: RedirectLookupError Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:42.009776Z node 1 :TX_PROXY INFO: describe.cpp:356: Actor# [1:7519624287095219834:2676] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 1 TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 13 ErrorReason: "Could not resolve redirected path" TClient::Ls request: /dc-1 2025-06-24T21:02:42.018079Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624274210317105:2109] Handle TEvNavigate describe path /dc-1 2025-06-24T21:02:42.034638Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624287095219837:2679] HANDLE EvNavigateScheme /dc-1 2025-06-24T21:02:42.034733Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624278505284499:2149], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:42.034802Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624278505284895:2435][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624278505284499:2149], cookie# 4 2025-06-24T21:02:42.034877Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624278505284899:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284896:2435], cookie# 4 2025-06-24T21:02:42.034895Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624278505284900:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284897:2435], cookie# 4 2025-06-24T21:02:42.034924Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624278505284901:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284898:2435], cookie# 4 2025-06-24T21:02:42.034948Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624274210316850:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284899:2435], cookie# 4 2025-06-24T21:02:42.034970Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624274210316853:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284900:2435], cookie# 4 2025-06-24T21:02:42.035003Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624274210316856:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624278505284901:2435], cookie# 4 2025-06-24T21:02:42.035031Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624278505284899:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624274210316850:2049], cookie# 4 2025-06-24T21:02:42.035043Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624278505284900:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624274210316853:2052], cookie# 4 2025-06-24T21:02:42.035054Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624278505284901:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624274210316856:2055], cookie# 4 2025-06-24T21:02:42.035099Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624278505284895:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624278505284896:2435], cookie# 4 2025-06-24T21:02:42.035120Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624278505284895:2435][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:42.035136Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624278505284895:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624278505284897:2435], cookie# 4 2025-06-24T21:02:42.035222Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624278505284895:2435][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:42.035234Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624278505284895:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519624278505284898:2435], cookie# 4 2025-06-24T21:02:42.035253Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624278505284895:2435][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:42.035298Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624278505284499:2149], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:42.035356Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624278505284499:2149], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624278505284895:2435] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750798961296 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:42.035416Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624278505284499:2149], cacheItem# { Subscriber: { Subscriber: [1:7519624278505284895:2435] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750798961296 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-24T21:02:42.035594Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624287095219838:2680], recipient# [1:7519624287095219837:2679], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:42.035629Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624287095219837:2679] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:42.035711Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624287095219837:2679] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:02:42.036348Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624287095219837:2679] Handle TEvDescribeSchemeResult Forward to# [1:7519624287095219836:2678] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750798961296 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "" Kind: "storage-pool-number-1" } StoragePools { Name: "" Kind: "storage-pool-number-2" } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750798961296 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750798961366 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046... (TRUNCATED) >> BsControllerConfig::OverlayMap [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT64 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::GenericCases [GOOD] Test command err: 2025-06-24T21:02:37.853230Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624264144582765:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:37.853288Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0035e7/r3tmp/tmpSudUlW/pdisk_1.dat 2025-06-24T21:02:38.663441Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:38.671947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:38.672067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:38.694083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:38.943310Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24153 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:02:39.003542Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624264144582886:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:02:39.069950Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624272734518042:2452] HANDLE EvNavigateScheme dc-1 2025-06-24T21:02:39.070105Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624268439550271:2130], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:39.070200Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624268439550726:2444][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624268439550271:2130], cookie# 1 2025-06-24T21:02:39.075901Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624268439550730:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624268439550727:2444], cookie# 1 2025-06-24T21:02:39.075963Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624268439550731:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624268439550728:2444], cookie# 1 2025-06-24T21:02:39.075984Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624268439550732:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624268439550729:2444], cookie# 1 2025-06-24T21:02:39.076021Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624264144582666:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624268439550730:2444], cookie# 1 2025-06-24T21:02:39.076047Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624264144582669:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624268439550731:2444], cookie# 1 2025-06-24T21:02:39.076069Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624264144582672:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624268439550732:2444], cookie# 1 2025-06-24T21:02:39.076105Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624268439550730:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624264144582666:2050], cookie# 1 2025-06-24T21:02:39.076133Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624268439550731:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624264144582669:2053], cookie# 1 2025-06-24T21:02:39.076148Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624268439550732:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624264144582672:2056], cookie# 1 2025-06-24T21:02:39.076177Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624268439550726:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624268439550727:2444], cookie# 1 2025-06-24T21:02:39.076211Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624268439550726:2444][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:39.076228Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624268439550726:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624268439550728:2444], cookie# 1 2025-06-24T21:02:39.076240Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624268439550726:2444][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:39.076253Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624268439550726:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624268439550729:2444], cookie# 1 2025-06-24T21:02:39.076273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624268439550726:2444][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:39.076341Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624268439550271:2130], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:39.086684Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624268439550271:2130], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624268439550726:2444] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:39.086834Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624268439550271:2130], cacheItem# { Subscriber: { Subscriber: [1:7519624268439550726:2444] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T21:02:39.089735Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624272734518043:2453], recipient# [1:7519624272734518042:2452], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:39.089812Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624272734518042:2452] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:39.125540Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624272734518042:2452] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:02:39.130026Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624272734518042:2452] Handle TEvDescribeSchemeResult Forward to# [1:7519624272734518041:2451] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:02:39.171908Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624264144582886:2117] Handle TEvProposeTransaction 2025-06-24T21:02:39.171942Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:75196242641 ... : [main][1:7519624285619420752:3040][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [1:7519624285619420754:3040] 2025-06-24T21:02:42.714176Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519624285619420752:3040][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [1:7519624268439550271:2130], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:02:42.714195Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624264144582669:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519624285619420762:3039] 2025-06-24T21:02:42.714195Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624268439550271:2130], cacheItem# { Subscriber: { Subscriber: [1:7519624285619420752:3040] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:42.714208Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519624264144582669:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519624285619420761:3040] 2025-06-24T21:02:42.714230Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624268439550271:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-24T21:02:42.714288Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624268439550271:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519624285619420751:3039] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:42.714331Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624268439550271:2130], cacheItem# { Subscriber: { Subscriber: [1:7519624285619420751:3039] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:42.714446Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624285619420765:3041], recipient# [1:7519624285619420750:2299], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:42.859261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624264144582765:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:42.859351Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:02:42.867560Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624268439550271:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:42.867663Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624268439550271:2130], cacheItem# { Subscriber: { Subscriber: [1:7519624268439550734:2446] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:42.867749Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624285619420770:3042], recipient# [1:7519624285619420769:2300], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:43.713237Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624268439550271:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:43.713372Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624268439550271:2130], cacheItem# { Subscriber: { Subscriber: [1:7519624285619420735:3037] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:43.713467Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624289914388087:3050], recipient# [1:7519624289914388086:2301], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:43.863340Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624268439550271:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:43.863500Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624268439550271:2130], cacheItem# { Subscriber: { Subscriber: [1:7519624268439550734:2446] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:43.863613Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624289914388092:3051], recipient# [1:7519624289914388091:2302], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:43.872539Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624268439550271:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:43.872655Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624268439550271:2130], cacheItem# { Subscriber: { Subscriber: [1:7519624268439550734:2446] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:43.872733Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624289914388094:3052], recipient# [1:7519624289914388093:2303], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::OverlayMap [GOOD] >> BackupPathTest::EmptyDirectoryIsOk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-UUID [GOOD] Test command err: 2025-06-24T21:02:03.812441Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624119723165123:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:03.812699Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045a1/r3tmp/tmpPMCYjY/pdisk_1.dat 2025-06-24T21:02:04.454877Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14284, node 1 2025-06-24T21:02:04.511937Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:02:04.630811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:04.633412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:04.637241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:04.663583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:04.663615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:04.663628Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:04.663761Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:02:04.872812Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18898 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:02:05.202675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:02:07.816012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624136903035235:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.816137Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:08.104642Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624119723165241:2142] Handle TEvProposeTransaction 2025-06-24T21:02:08.104683Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519624119723165241:2142] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T21:02:08.104750Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519624119723165241:2142] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519624141198002557:2625] 2025-06-24T21:02:08.174219Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519624141198002557:2625] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-24T21:02:08.174268Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519624141198002557:2625] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:02:08.174662Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519624141198002557:2625] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:02:08.174743Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519624141198002557:2625] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:02:08.174929Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519624141198002557:2625] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:08.175097Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519624141198002557:2625] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:02:08.175157Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519624141198002557:2625] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:02:08.175320Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519624141198002557:2625] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T21:02:08.176789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:08.180783Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519624141198002557:2625] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-06-24T21:02:08.180868Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519624141198002557:2625] txid# 281474976710658 SEND to# [1:7519624141198002556:2304] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-06-24T21:02:08.345545Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624141198002698:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:08.345663Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:08.425036Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624119723165241:2142] Handle TEvProposeTransaction 2025-06-24T21:02:08.425065Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519624119723165241:2142] TxId# 281474976710659 ProcessProposeTransaction 2025-06-24T21:02:08.425101Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519624119723165241:2142] Cookie# 0 userReqId# "" txid# 281474976710659 SEND to# [1:7519624141198002710:2742] 2025-06-24T21:02:08.426974Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519624141198002710:2742] txid# 281474976710659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateCdcStream CreateCdcStream { TableName: "table" StreamDescription { Name: "a" Mode: ECdcStreamModeUpdate Format: ECdcStreamFormatJson VirtualTimestamps: false AwsRegion: "" SchemaChanges: false } } } } UserToken: "" DatabaseName: "" PeerName: "" 2025-06-24T21:02:08.427007Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519624141198002710:2742] txid# 281474976710659 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:02:08.427071Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519624141198002710:2742] txid# 281474976710659 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:02:08.427436Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519624141198002710:2742] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:08.427545Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519624141198002710:2742] HANDLE EvNavigateKeySetResult, txid# 281474976710659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:02:08.427579Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519624141198002710:2742] txid# 281474976710659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710659 TabletId# 72057594046644480} 2025-06-24T21:02:08.427762Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519624141198002710:2742] txid# 281474976710659 HANDLE EvClientConnected 2025-06-24T21:02:08.455107Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519624141198002710:2742] txid# 281474976710659 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-24T21:02:08.455173Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519624141198002710:2742] txid# 281474976710659 SEND to# [1:7519624141198002709:2316] Source {TEvProposeTransactionStatus txid# 281474976710659 Status# 53} 2025-06-24T21:02:08.564728Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][1:7519624141198002883:2323] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:4:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-24T21:02:08.621562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624141198002984:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:08.621645Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to ... hemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:02:42.977421Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:989: TImport::TTxProgress: OnSchemeResult: id# 281474976710665, itemIdx# 0, success# 1 2025-06-24T21:02:42.977793Z node 13 :IMPORT INFO: schemeshard_import__create.cpp:629: TImport::TTxProgress: Allocate txId: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-24T21:02:42.996621Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:02:42.996785Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:02:42.996799Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:1218: TImport::TTxProgress: OnAllocateResult: txId# 281474976715760, id# 281474976710665 2025-06-24T21:02:42.996853Z node 13 :IMPORT INFO: schemeshard_import__create.cpp:419: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976715760 2025-06-24T21:02:42.997011Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:02:42.999369Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:43.009061Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:02:43.009101Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:1314: TImport::TTxProgress: OnModifyResult: txId# 281474976715760, status# StatusAccepted 2025-06-24T21:02:43.009267Z node 13 :IMPORT INFO: schemeshard_import__create.cpp:643: TImport::TTxProgress: Wait for completion: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976715760 Issue: '' } 2025-06-24T21:02:43.020514Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:02:43.125757Z node 13 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [13:7519624289807665368:2362] [0] Resolve database: name# /Root 2025-06-24T21:02:43.127354Z node 13 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [13:7519624289807665368:2362] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:43.127395Z node 13 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [13:7519624289807665368:2362] [0] Send request: schemeShardId# 72057594046644480 2025-06-24T21:02:43.133427Z node 13 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [13:7519624289807665368:2362] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710665 Status: SUCCESS Progress: PROGRESS_PREPARING ImportFromS3Settings { endpoint: "localhost:20700" scheme: HTTP bucket: "test_bucket" items { source_prefix: "UuidTable" destination_path: "/Root/UuidTable" } } StartTime { seconds: 1750798962 } } 2025-06-24T21:02:43.136322Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:02:43.136353Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976715760 2025-06-24T21:02:43.136460Z node 13 :IMPORT INFO: schemeshard_import__create.cpp:629: TImport::TTxProgress: Allocate txId: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-24T21:02:43.140942Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:02:43.141072Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:02:43.141088Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:1218: TImport::TTxProgress: OnAllocateResult: txId# 281474976715761, id# 281474976710665 2025-06-24T21:02:43.141148Z node 13 :IMPORT INFO: schemeshard_import__create.cpp:520: TImport::TTxProgress: Restore propose: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976715761 2025-06-24T21:02:43.141985Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:02:43.142528Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976715761:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-24T21:02:43.145910Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:02:43.145941Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:1314: TImport::TTxProgress: OnModifyResult: txId# 281474976715761, status# StatusAccepted 2025-06-24T21:02:43.146052Z node 13 :IMPORT INFO: schemeshard_import__create.cpp:643: TImport::TTxProgress: Wait for completion: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976715761 Issue: '' } 2025-06-24T21:02:43.149479Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete REQUEST: HEAD /test_bucket/UuidTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:20700 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C6C76797-9AB7-4C4B-9CBE-715D5494605C amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250624/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=b5ad2c26da001902192b5f629b02ffb65fb3839ecfa4919e2087977a230f5cf5 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250624T210243Z S3_MOCK::HttpServeRead: /test_bucket/UuidTable/data_00.csv / 39 REQUEST: GET /test_bucket/UuidTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:20700 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 146AE4D7-E7F9-4360-9547-3DDB92B48474 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250624/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=c9bca2c158cd609d55b6f95e987d19b07b2353bd9bbc4ef22b37a880becbde93 content-type: application/xml range: bytes=0-38 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250624T210243Z S3_MOCK::HttpServeRead: /test_bucket/UuidTable/data_00.csv / 39 2025-06-24T21:02:43.264005Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:02:43.264036Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976715761 2025-06-24T21:02:43.268040Z node 13 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:02:43.549071Z node 13 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [13:7519624289807665462:2366] [0] Resolve database: name# /Root 2025-06-24T21:02:43.551795Z node 13 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [13:7519624289807665462:2366] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:43.551842Z node 13 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [13:7519624289807665462:2366] [0] Send request: schemeShardId# 72057594046644480 2025-06-24T21:02:43.552895Z node 13 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [13:7519624289807665462:2366] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710665 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:20700" scheme: HTTP bucket: "test_bucket" items { source_prefix: "UuidTable" destination_path: "/Root/UuidTable" } } StartTime { seconds: 1750798962 } EndTime { seconds: 1750798963 } } 2025-06-24T21:02:43.720473Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [13:7519624259742892442:2114] Handle TEvExecuteKqpTransaction 2025-06-24T21:02:43.720508Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [13:7519624259742892442:2114] TxId# 281474976710666 ProcessProposeKqpTransaction 2025-06-24T21:02:43.722975Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhw04vf30zhkykdg7mce10z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ODUyMDU4YTgtMWY1ZTkxZC0zM2ZjYzU2Yy0zM2IyNDE2Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TExtSubDomainTest::CreateTableInsideThenStopTenantAndForceDeleteSubDomain-AlterDatabaseCreateHiveFirst-true [GOOD] >> BsControllerConfig::ExtendByCreatingSeparateBox >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-true >> BackupPathTest::CommonPrefixButExplicitImportItems >> BsControllerConfig::ReassignGroupDisk >> BsControllerConfig::OverlayMapCrossReferences >> BsControllerConfig::Basic [GOOD] >> BsControllerConfig::DeleteStoragePool >> BackupRestoreS3::RestoreIndexTableSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreIndexTableDecimalSplitBoundaries ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::CreateTableInsideThenStopTenantAndForceDeleteSubDomain-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-24T21:02:19.446678Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624189115088027:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:19.451851Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003682/r3tmp/tmp4PaSGI/pdisk_1.dat 2025-06-24T21:02:20.187427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:20.187533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:20.204755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:20.227920Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:20.451383Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4075 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:02:20.545681Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624189115088135:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:02:20.592364Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624193410055922:2441] HANDLE EvNavigateScheme dc-1 2025-06-24T21:02:20.592481Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624189115088182:2138], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:20.592542Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624193410055904:2435][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624189115088182:2138], cookie# 1 2025-06-24T21:02:20.594003Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624193410055908:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624193410055905:2435], cookie# 1 2025-06-24T21:02:20.594042Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624193410055909:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624193410055906:2435], cookie# 1 2025-06-24T21:02:20.594056Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624193410055910:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624193410055907:2435], cookie# 1 2025-06-24T21:02:20.594107Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624189115087851:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624193410055908:2435], cookie# 1 2025-06-24T21:02:20.594142Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624189115087854:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624193410055909:2435], cookie# 1 2025-06-24T21:02:20.594157Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624189115087857:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624193410055910:2435], cookie# 1 2025-06-24T21:02:20.594181Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624193410055908:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624189115087851:2050], cookie# 1 2025-06-24T21:02:20.594199Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624193410055909:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624189115087854:2053], cookie# 1 2025-06-24T21:02:20.594218Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624193410055910:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624189115087857:2056], cookie# 1 2025-06-24T21:02:20.594268Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624193410055904:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624193410055905:2435], cookie# 1 2025-06-24T21:02:20.594293Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624193410055904:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:20.594329Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624193410055904:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624193410055906:2435], cookie# 1 2025-06-24T21:02:20.594346Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624193410055904:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:20.594361Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624193410055904:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624193410055907:2435], cookie# 1 2025-06-24T21:02:20.594381Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624193410055904:2435][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:20.594446Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624189115088182:2138], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:20.600636Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624189115088182:2138], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624193410055904:2435] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:20.601039Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624189115088182:2138], cacheItem# { Subscriber: { Subscriber: [1:7519624193410055904:2435] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T21:02:20.603533Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624193410055923:2442], recipient# [1:7519624193410055922:2441], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:20.603626Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624193410055922:2441] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:20.663869Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624193410055922:2441] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:02:20.666764Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624193410055922:2441] Handle TEvDescribeSchemeResult Forward to# [1:7519624193410055921:2440] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:02:20.707552Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624189115088135:2117] Handle TEvProposeTransaction 2025-06-24T21:02:20.707603Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:751962418911 ... esNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:44.331779Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624294640412755:3551], recipient# [3:7519624294640412754:2311], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:44.940244Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624247395770444:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:44.940415Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624247395770444:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624247395771011:2518] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:44.940559Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624294640412763:3552], recipient# [3:7519624294640412762:2312], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:44.991583Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624247395770444:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:44.991736Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624247395770444:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624247395771011:2518] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:44.991844Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624294640412771:3556], recipient# [3:7519624294640412770:2313], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:45.336024Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624247395770444:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:45.336186Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624247395770444:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624268870608171:2999] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:45.336294Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624298935380075:3557], recipient# [3:7519624298935380074:2314], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:45.942580Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624247395770444:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:45.942735Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624247395770444:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624247395771011:2518] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:45.942847Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624298935380085:3558], recipient# [3:7519624298935380084:2315], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:45.995805Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624247395770444:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:45.995946Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624247395770444:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624247395771011:2518] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:45.996048Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624298935380093:3562], recipient# [3:7519624298935380092:2316], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:46.339590Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624247395770444:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:46.339722Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624247395770444:2129], cacheItem# { Subscriber: { Subscriber: [3:7519624268870608171:2999] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:46.339817Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624303230347399:3563], recipient# [3:7519624303230347398:2317], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> BsControllerConfig::PDiskCreate >> BuildStatsHistogram::Five_Five_Mixed [GOOD] >> BuildStatsHistogram::Five_Five_Serial >> EncryptedExportTest::EncryptedExportAndImport [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-FLOAT >> BsControllerConfig::OverlayMapCrossReferences [GOOD] >> BsControllerConfig::ReassignGroupDisk [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT64 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::ReassignGroupDisk [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:291:2068] recipient: [1:265:2079] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:291:2068] recipient: [1:265:2079] Leader for TabletID 72057594037932033 is [1:303:2081] sender: [1:304:2068] recipient: [1:265:2079] 2025-06-24T21:02:48.478195Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T21:02:48.482833Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T21:02:48.483266Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T21:02:48.484081Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:02:48.537139Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T21:02:48.537290Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:48.537317Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:48.537580Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T21:02:48.547392Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T21:02:48.547531Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T21:02:48.547713Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T21:02:48.547822Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:48.547929Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:48.548003Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:303:2081] sender: [1:326:2068] recipient: [1:22:2069] 2025-06-24T21:02:48.559771Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T21:02:48.559927Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:48.571736Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:48.571870Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:48.571942Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:48.572025Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:48.572159Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:48.572218Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:48.572272Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:48.572325Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:48.583699Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:48.583832Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:48.594676Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:48.594841Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T21:02:48.596181Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T21:02:48.596237Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T21:02:48.596446Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T21:02:48.596502Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T21:02:48.610414Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk" } } } Command { DefineBox { BoxId: 1 Name: "box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 8 PDiskFilter { Property { Type: ROT } } } } } 2025-06-24T21:02:48.611036Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk 2025-06-24T21:02:48.611091Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk 2025-06-24T21:02:48.611130Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk 2025-06-24T21:02:48.611327Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk 2025-06-24T21:02:48.611352Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk 2025-06-24T21:02:48.611379Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk 2025-06-24T21:02:48.611402Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk 2025-06-24T21:02:48.611424Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk 2025-06-24T21:02:48.611447Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk 2025-06-24T21:02:48.611495Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk 2025-06-24T21:02:48.611544Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk 2025-06-24T21:02:48.611568Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1000 Path# /dev/disk 2025-06-24T21:02:48.653424Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 Response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true ConfigTxSeqNo: 1 2025-06-24T21:02:48.655663Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { UpdateDriveStatus { HostKey { NodeId: 1 } Path: "/dev/disk" Status: INACTIVE } } } Response# Status { Success: true } Success: true ConfigTxSeqNo: 2 Leader for TabletID 72057594037932033 is [0:0:0] sender: [13:291:2068] recipient: [13:265:2079] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [13:291:2068] recipient: [13:265:2079] Leader for TabletID 72057594037932033 is [13:302:2081] sender: [13:304:2068] recipient: [13:265:2079] 2025-06-24T21:02:51.031421Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T21:02:51.032532Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T21:02:51.032718Z node 13 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T21:02:51.033925Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:02:51.034154Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T21:02:51.034269Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:51.034283Z node 13 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:51.034429Z node 13 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T21:02:51.043497Z node 13 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T21:02:51.043660Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T21:02:51.043787Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T21:02:51.043916Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:51.044016Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:51.044101Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [13:302:2081] sender: [13:326:2068] recipient: [13:22:2069] 2025-06-24T21:02:51.055789Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T21:02:51.055948Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:51.067294Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:51.067432Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:51.067519Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:51.067594Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:51.067797Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:51.067852Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:51.067895Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:51.067960Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:51.079731Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:51.079867Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:51.090718Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:51.090856Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T21:02:51.092130Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T21:02:51.092187Z node 13 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T21:02:51.092366Z node 13 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T21:02:51.092426Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T21:02:51.093239Z node 13 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "/dev/disk" } } } Command { DefineBox { BoxId: 1 Name: "box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 2 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 8 PDiskFilter { Property { Type: ROT } } } } } 2025-06-24T21:02:51.093764Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1000 Path# /dev/disk 2025-06-24T21:02:51.093826Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1000 Path# /dev/disk 2025-06-24T21:02:51.093852Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1000 Path# /dev/disk 2025-06-24T21:02:51.093899Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1000 Path# /dev/disk 2025-06-24T21:02:51.093925Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1000 Path# /dev/disk 2025-06-24T21:02:51.093947Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1000 Path# /dev/disk 2025-06-24T21:02:51.093969Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1000 Path# /dev/disk 2025-06-24T21:02:51.094003Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1000 Path# /dev/disk 2025-06-24T21:02:51.094034Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 21:1000 Path# /dev/disk 2025-06-24T21:02:51.094060Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 22:1000 Path# /dev/disk 2025-06-24T21:02:51.094084Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 23:1000 Path# /dev/disk 2025-06-24T21:02:51.094105Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 24:1000 Path# /dev/disk 2025-06-24T21:02:51.125718Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 Response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true ConfigTxSeqNo: 1 2025-06-24T21:02:51.127952Z node 13 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { UpdateDriveStatus { HostKey { NodeId: 1 } Path: "/dev/disk" Status: INACTIVE } } } Response# Status { ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" FailReason: kHostNotFound FailParam { NodeId: 1 } } ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" >> BsControllerConfig::PDiskCreate [GOOD] >> EncryptedExportTest::EncryptionAndCompression |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::OverlayMapCrossReferences [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::PDiskCreate [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:204:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:204:2077] Leader for TabletID 72057594037932033 is [1:234:2079] sender: [1:236:2066] recipient: [1:204:2077] 2025-06-24T21:02:50.351354Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T21:02:50.357512Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T21:02:50.357893Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T21:02:50.360704Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:02:50.361210Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T21:02:50.361329Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:50.361376Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:50.361598Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T21:02:50.371071Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T21:02:50.371271Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T21:02:50.371438Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T21:02:50.371570Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:50.371664Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:50.371720Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:234:2079] sender: [1:258:2066] recipient: [1:20:2067] 2025-06-24T21:02:50.383305Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T21:02:50.383487Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:50.394411Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:50.394546Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:50.394621Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:50.394681Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:50.394821Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:50.394880Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:50.394923Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:50.394985Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:50.407696Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:50.407847Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:50.418577Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:50.418763Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T21:02:50.420023Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T21:02:50.420078Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T21:02:50.420262Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T21:02:50.420331Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T21:02:50.435394Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } } } Command { QueryBaseConfig { } } } 2025-06-24T21:02:50.435962Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-24T21:02:50.436010Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-24T21:02:50.436056Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-24T21:02:50.436110Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-24T21:02:50.436152Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-24T21:02:50.436173Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-24T21:02:50.436195Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-24T21:02:50.436216Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-24T21:02:50.436237Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-24T21:02:50.436258Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-24T21:02:50.436280Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-24T21:02:50.436316Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-24T21:02:50.436349Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-24T21:02:50.436386Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-24T21:02:50.436410Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-24T21:02:50.436433Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-24T21:02:50.436468Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-24T21:02:50.436495Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-24T21:02:50.436516Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-24T21:02:50.436553Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-24T21:02:50.436576Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2025-06-24T21:02:50.436597Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk1 2025-06-24T21:02:50.436624Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1001 Path# /dev/disk2 2025-06-24T21:02:50.436655Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1002 Path# /dev/disk3 2025-06-24T21:02:50.436686Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk1 2025-06-24T21:02:50.436708Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1001 Path# /dev/disk2 2025-06-24T21:02:50.436729Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1002 Path# /dev/disk3 2025-06-24T21:02:50.436766Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk1 2025-06-24T21:02:50.436790Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1001 Path# /dev/disk2 2025-06-24T21:02:50.436813Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1002 Path# /dev/disk3 Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] Leader for TabletID 72057594037932033 is [11:231:2079] sender: [11:234:2066] recipient: [11:204:2077] 2025-06-24T21:02:52.497513Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T21:02:52.498602Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T21:02:52.498765Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T21:02:52.500296Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:02:52.500605Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T21:02:52.500767Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:52.500788Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:52.500962Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T21:02:52.510195Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T21:02:52.510372Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T21:02:52.510533Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T21:02:52.510661Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:52.510800Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:52.510865Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:231:2079] sender: [11:258:2066] recipient: [11:20:2067] 2025-06-24T21:02:52.524181Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T21:02:52.524353Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:52.535104Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:52.535280Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:52.535351Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:52.535435Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:52.535591Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:52.535685Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:52.535732Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:52.535796Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:52.546489Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:52.546645Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:52.557259Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:52.557384Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T21:02:52.558623Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T21:02:52.558680Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T21:02:52.558845Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T21:02:52.558887Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T21:02:52.559608Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 2 } } } Command { QueryBaseConfig { } } } 2025-06-24T21:02:52.560048Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk1 2025-06-24T21:02:52.560084Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1001 Path# /dev/disk2 2025-06-24T21:02:52.560106Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1002 Path# /dev/disk3 2025-06-24T21:02:52.560128Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1000 Path# /dev/disk1 2025-06-24T21:02:52.560157Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1001 Path# /dev/disk2 2025-06-24T21:02:52.560178Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1002 Path# /dev/disk3 2025-06-24T21:02:52.560197Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1000 Path# /dev/disk1 2025-06-24T21:02:52.560214Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1001 Path# /dev/disk2 2025-06-24T21:02:52.560235Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1002 Path# /dev/disk3 2025-06-24T21:02:52.560253Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1000 Path# /dev/disk1 2025-06-24T21:02:52.560272Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1001 Path# /dev/disk2 2025-06-24T21:02:52.560312Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1002 Path# /dev/disk3 2025-06-24T21:02:52.560336Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1000 Path# /dev/disk1 2025-06-24T21:02:52.560358Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1001 Path# /dev/disk2 2025-06-24T21:02:52.560379Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1002 Path# /dev/disk3 2025-06-24T21:02:52.560401Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1000 Path# /dev/disk1 2025-06-24T21:02:52.560435Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1001 Path# /dev/disk2 2025-06-24T21:02:52.560467Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1002 Path# /dev/disk3 2025-06-24T21:02:52.560489Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1000 Path# /dev/disk1 2025-06-24T21:02:52.560568Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1001 Path# /dev/disk2 2025-06-24T21:02:52.560620Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1002 Path# /dev/disk3 2025-06-24T21:02:52.560657Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1000 Path# /dev/disk1 2025-06-24T21:02:52.560690Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1001 Path# /dev/disk2 2025-06-24T21:02:52.560717Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1002 Path# /dev/disk3 2025-06-24T21:02:52.560740Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1000 Path# /dev/disk1 2025-06-24T21:02:52.560764Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1001 Path# /dev/disk2 2025-06-24T21:02:52.560785Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1002 Path# /dev/disk3 2025-06-24T21:02:52.560815Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1000 Path# /dev/disk1 2025-06-24T21:02:52.560837Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1001 Path# /dev/disk2 2025-06-24T21:02:52.560874Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1002 Path# /dev/disk3 >> BackupPathTest::CommonPrefixButExplicitImportItems [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_10_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 10] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_11_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 11] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSequence [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeReplication >> BuildStatsHistogram::Five_Five_Serial [GOOD] >> BuildStatsHistogram::Five_Five_Crossed >> BackupPathTest::ExportDirectoryWithEncryption >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsSchemeshardRestart >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactBorrowedAfterSplitMergeWhenDisabled >> TSchemeshardCompactionQueueTest::EnqueueEmptyShard [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShard [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShardWhenEnabled [GOOD] >> TSchemeshardCompactionQueueTest::ShouldNotEnqueueEmptyShard [GOOD] >> TSchemeshardCompactionQueueTest::RemoveLastShardFromSubQueues [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShardWhenEnabled [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::RemoveLastShardFromSubQueues [GOOD] >> BackupRestoreS3::RestoreIndexTableDecimalSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreViewQueryText >> BackupRestore::TestAllPrimitiveTypes-FLOAT [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DOUBLE >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBackups >> TSchemeshardCompactionQueueTest::EnqueueBelowSearchHeightThreshold [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueBelowRowDeletesThreshold [GOOD] >> TSchemeshardCompactionQueueTest::CheckOrderWhenAllQueues [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::CheckOrderWhenAllQueues [GOOD] >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-true [GOOD] >> EncryptedExportTest::EncryptionAndCompression [GOOD] >> BuildStatsHistogram::Five_Five_Crossed [GOOD] >> BuildStatsHistogram::Single_Small_2_Levels >> BuildStatsHistogram::Single_Small_2_Levels [GOOD] >> BuildStatsHistogram::Single_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Single_Small_1_Level >> BuildStatsHistogram::Single_Small_1_Level [GOOD] >> BuildStatsHistogram::Single_Small_0_Levels [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_2_Levels >> BackupRestoreS3::TestAllPrimitiveTypes-UINT64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-FLOAT >> BuildStatsHistogram::Three_Mixed_Small_2_Levels [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_2_Levels_3_Buckets >> BuildStatsHistogram::Three_Mixed_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_1_Level >> BuildStatsHistogram::Three_Mixed_Small_1_Level [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_0_Levels >> BuildStatsHistogram::Three_Mixed_Small_0_Levels [GOOD] >> BuildStatsHistogram::Mixed_Groups_History >> EncryptedExportTest::EncryptionAndChecksum >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerless >> CommonEncryptionRequirementsTest::CommonEncryptionRequirements [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-24T21:02:33.484984Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624247814066282:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:33.486176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0035eb/r3tmp/tmphh2joq/pdisk_1.dat 2025-06-24T21:02:34.471302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:34.471389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:34.483169Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:34.514894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:34.558655Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10712 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:02:34.816029Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624247814066477:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:02:34.845693Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624252109034263:2442] HANDLE EvNavigateScheme dc-1 2025-06-24T21:02:34.845837Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519624247814066500:2130], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:34.845919Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519624252109034178:2382][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519624247814066500:2130], cookie# 1 2025-06-24T21:02:34.847584Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624252109034183:2382][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624252109034180:2382], cookie# 1 2025-06-24T21:02:34.847648Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624252109034184:2382][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624252109034181:2382], cookie# 1 2025-06-24T21:02:34.847669Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519624252109034185:2382][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624252109034182:2382], cookie# 1 2025-06-24T21:02:34.847706Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624247814066191:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624252109034183:2382], cookie# 1 2025-06-24T21:02:34.847731Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624247814066194:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624252109034184:2382], cookie# 1 2025-06-24T21:02:34.847748Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519624247814066197:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519624252109034185:2382], cookie# 1 2025-06-24T21:02:34.847775Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624252109034183:2382][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624247814066191:2050], cookie# 1 2025-06-24T21:02:34.847790Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624252109034184:2382][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624247814066194:2053], cookie# 1 2025-06-24T21:02:34.847803Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519624252109034185:2382][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624247814066197:2056], cookie# 1 2025-06-24T21:02:34.847841Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624252109034178:2382][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624252109034180:2382], cookie# 1 2025-06-24T21:02:34.847871Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624252109034178:2382][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:02:34.847889Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624252109034178:2382][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624252109034181:2382], cookie# 1 2025-06-24T21:02:34.847900Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519624252109034178:2382][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:02:34.847919Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519624252109034178:2382][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519624252109034182:2382], cookie# 1 2025-06-24T21:02:34.847942Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519624252109034178:2382][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-24T21:02:34.848003Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519624247814066500:2130], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-24T21:02:34.861836Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519624247814066500:2130], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519624252109034178:2382] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:02:34.861978Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519624247814066500:2130], cacheItem# { Subscriber: { Subscriber: [1:7519624252109034178:2382] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-24T21:02:34.868567Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519624252109034264:2443], recipient# [1:7519624252109034263:2442], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:02:34.868656Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624252109034263:2442] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:34.968806Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624252109034263:2442] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:02:34.976797Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624252109034263:2442] Handle TEvDescribeSchemeResult Forward to# [1:7519624252109034261:2440] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:02:35.034424Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624247814066477:2117] Handle TEvProposeTransaction 2025-06-24T21:02:35.034457Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:75196242478 ... 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:57.584819Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624349752954616:4063], recipient# [3:7519624349752954615:2308], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:58.407874Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624311098246445:2128], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:58.408054Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624311098246445:2128], cacheItem# { Subscriber: { Subscriber: [3:7519624332573084380:3196] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:58.408212Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624354047921930:4067], recipient# [3:7519624354047921929:2309], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:58.586990Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624311098246445:2128], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:58.587129Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624311098246445:2128], cacheItem# { Subscriber: { Subscriber: [3:7519624315393214500:2667] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:58.587230Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624354047921936:4069], recipient# [3:7519624354047921935:2310], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:59.412835Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624311098246445:2128], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:59.413016Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624311098246445:2128], cacheItem# { Subscriber: { Subscriber: [3:7519624332573084380:3196] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:59.413120Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624358342889257:4081], recipient# [3:7519624358342889256:2311], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:59.591595Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624311098246445:2128], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:02:59.591753Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624311098246445:2128], cacheItem# { Subscriber: { Subscriber: [3:7519624315393214500:2667] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:02:59.591854Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624358342889262:4082], recipient# [3:7519624358342889261:2312], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:03:00.415736Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624311098246445:2128], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:03:00.415900Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624311098246445:2128], cacheItem# { Subscriber: { Subscriber: [3:7519624332573084380:3196] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:03:00.416007Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624362637856575:4086], recipient# [3:7519624362637856574:2313], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:03:00.595528Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519624311098246445:2128], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:03:00.595675Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519624311098246445:2128], cacheItem# { Subscriber: { Subscriber: [3:7519624315393214500:2667] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:03:00.595779Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519624362637856580:4087], recipient# [3:7519624362637856579:2314], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> BuildStatsHistogram::Mixed_Groups_History [GOOD] >> BuildStatsHistogram::Serial_Groups_History >> BackupPathTest::ExportDirectoryWithEncryption [GOOD] |93.1%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/test-results/unittest/{meta.json ... results_accumulator.log} |93.1%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/test-results/unittest/{meta.json ... results_accumulator.log} >> BuildStatsHistogram::Serial_Groups_History [GOOD] >> BuildStatsHistogram::Benchmark >> EncryptedBackupParamsValidationTest::BadSourcePath >> BsControllerConfig::ExtendByCreatingSeparateBox [GOOD] >> BsControllerConfig::ExtendBoxAndStoragePool >> BackupPathTest::EncryptedExportWithExplicitDestinationPath >> TSchemeshardCompactionQueueTest::ShouldNotEnqueueSinglePartedShardWithMemData [GOOD] >> TSchemeshardCompactionQueueTest::ShouldPopWhenOnlyLastCompactionQueue [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::ShouldPopWhenOnlyLastCompactionQueue [GOOD] >> BuildStatsHistogram::Benchmark [GOOD] >> BuildStatsHistogram::Many_Mixed >> BsControllerConfig::DeleteStoragePool [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DOUBLE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE >> BackupRestoreS3::RestoreViewQueryText [GOOD] >> BackupRestoreS3::RestoreViewReferenceTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::DeleteStoragePool [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:204:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:204:2077] Leader for TabletID 72057594037932033 is [1:234:2079] sender: [1:236:2066] recipient: [1:204:2077] 2025-06-24T21:02:45.188406Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T21:02:45.206702Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T21:02:45.214425Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T21:02:45.218510Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:02:45.220100Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T21:02:45.220290Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:45.220345Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:45.220807Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T21:02:45.248562Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T21:02:45.248838Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T21:02:45.255398Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T21:02:45.255656Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:45.255786Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:45.255870Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:234:2079] sender: [1:257:2066] recipient: [1:20:2067] 2025-06-24T21:02:45.278538Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T21:02:45.278700Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:45.303707Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:45.303850Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:45.303925Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:45.303994Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:45.304096Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:45.304149Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:45.304181Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:45.304241Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:45.315018Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:45.315128Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:45.327648Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:45.327781Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T21:02:45.339321Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T21:02:45.339418Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T21:02:45.339710Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T21:02:45.339765Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T21:02:45.375271Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {} Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:204:2077] Leader for TabletID 72057594037932033 is [11:231:2079] sender: [11:234:2066] recipient: [11:204:2077] 2025-06-24T21:02:47.073503Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T21:02:47.074764Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T21:02:47.075012Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T21:02:47.077140Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:02:47.077496Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T21:02:47.077801Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:47.077829Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:47.078049Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T21:02:47.093846Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T21:02:47.093995Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T21:02:47.094117Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T21:02:47.094247Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:47.094396Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:47.094497Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:231:2079] sender: [11:257:2066] recipient: [11:20:2067] 2025-06-24T21:02:47.107775Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T21:02:47.107925Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:47.119178Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:47.119331Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:47.119422Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:47.119496Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:47.119597Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:47.119651Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:47.119705Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:47.119864Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:47.130591Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:47.130732Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:47.143553Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:47.143645Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T21:02:47.144486Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T21:02:47.144517Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T21:02:47.144646Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T21:02:47.144689Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T21:02:47.145076Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {} Leader for TabletID 72057594037932033 is [0:0:0] sender: [21:3066:2106] recipient: [21:2963:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [21:3066:2106] recipient: [21:2963:2117] Leader for TabletID 72057594037932033 is [21:3112:2119] sender: [21:3116:2106] recipient: [21:2963:2117] 2025-06-24T21:02:49.450696Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T21:02:49.452095Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T21:02:49.452328Z n ... 031Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1001 Path# /dev/disk2 2025-06-24T21:02:58.715055Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1002 Path# /dev/disk3 2025-06-24T21:02:58.715080Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1000 Path# /dev/disk1 2025-06-24T21:02:58.715103Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1001 Path# /dev/disk2 2025-06-24T21:02:58.715128Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1002 Path# /dev/disk3 2025-06-24T21:02:58.716867Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1000 Path# /dev/disk1 2025-06-24T21:02:58.716952Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1001 Path# /dev/disk2 2025-06-24T21:02:58.716979Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1002 Path# /dev/disk3 2025-06-24T21:02:58.717005Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1000 Path# /dev/disk1 2025-06-24T21:02:58.717029Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1001 Path# /dev/disk2 2025-06-24T21:02:58.717054Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1002 Path# /dev/disk3 2025-06-24T21:02:58.717082Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1000 Path# /dev/disk1 2025-06-24T21:02:58.717108Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1001 Path# /dev/disk2 2025-06-24T21:02:58.717133Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1002 Path# /dev/disk3 2025-06-24T21:02:58.717158Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1000 Path# /dev/disk1 2025-06-24T21:02:58.717183Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1001 Path# /dev/disk2 2025-06-24T21:02:58.717205Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1002 Path# /dev/disk3 2025-06-24T21:02:58.717229Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1000 Path# /dev/disk1 2025-06-24T21:02:58.717254Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1001 Path# /dev/disk2 2025-06-24T21:02:58.717277Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1002 Path# /dev/disk3 2025-06-24T21:02:58.717302Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1000 Path# /dev/disk1 2025-06-24T21:02:58.717324Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1001 Path# /dev/disk2 2025-06-24T21:02:58.717367Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1002 Path# /dev/disk3 2025-06-24T21:02:58.717408Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1000 Path# /dev/disk1 2025-06-24T21:02:58.717433Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1001 Path# /dev/disk2 2025-06-24T21:02:58.717458Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1002 Path# /dev/disk3 2025-06-24T21:02:58.717483Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1000 Path# /dev/disk1 2025-06-24T21:02:58.717509Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1001 Path# /dev/disk2 2025-06-24T21:02:58.717533Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1002 Path# /dev/disk3 2025-06-24T21:02:58.717556Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1000 Path# /dev/disk1 2025-06-24T21:02:58.717580Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1001 Path# /dev/disk2 2025-06-24T21:02:58.717605Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1002 Path# /dev/disk3 2025-06-24T21:02:58.717636Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1000 Path# /dev/disk1 2025-06-24T21:02:58.717662Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1001 Path# /dev/disk2 2025-06-24T21:02:58.717686Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1002 Path# /dev/disk3 2025-06-24T21:02:58.717709Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1000 Path# /dev/disk1 2025-06-24T21:02:58.717734Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1001 Path# /dev/disk2 2025-06-24T21:02:58.717758Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1002 Path# /dev/disk3 2025-06-24T21:02:58.717784Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1000 Path# /dev/disk1 2025-06-24T21:02:58.717817Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1001 Path# /dev/disk2 2025-06-24T21:02:58.717851Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1002 Path# /dev/disk3 2025-06-24T21:02:58.717877Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1000 Path# /dev/disk1 2025-06-24T21:02:58.717901Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1001 Path# /dev/disk2 2025-06-24T21:02:58.717925Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1002 Path# /dev/disk3 2025-06-24T21:02:58.717947Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1000 Path# /dev/disk1 2025-06-24T21:02:58.717971Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1001 Path# /dev/disk2 2025-06-24T21:02:58.717994Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1002 Path# /dev/disk3 2025-06-24T21:02:58.718018Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1000 Path# /dev/disk1 2025-06-24T21:02:58.718041Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1001 Path# /dev/disk2 2025-06-24T21:02:58.718065Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1002 Path# /dev/disk3 2025-06-24T21:02:58.718091Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1000 Path# /dev/disk1 2025-06-24T21:02:58.718115Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1001 Path# /dev/disk2 2025-06-24T21:02:58.718139Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1002 Path# /dev/disk3 2025-06-24T21:02:58.718161Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1000 Path# /dev/disk1 2025-06-24T21:02:58.718186Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1001 Path# /dev/disk2 2025-06-24T21:02:58.718208Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1002 Path# /dev/disk3 2025-06-24T21:02:58.718239Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1000 Path# /dev/disk1 2025-06-24T21:02:58.718273Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1001 Path# /dev/disk2 2025-06-24T21:02:58.718301Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1002 Path# /dev/disk3 2025-06-24T21:02:58.718323Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1000 Path# /dev/disk1 2025-06-24T21:02:58.718346Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1001 Path# /dev/disk2 2025-06-24T21:02:58.718371Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1002 Path# /dev/disk3 2025-06-24T21:02:58.718394Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1000 Path# /dev/disk1 2025-06-24T21:02:58.718418Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1001 Path# /dev/disk2 2025-06-24T21:02:58.718442Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1002 Path# /dev/disk3 2025-06-24T21:02:58.718469Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1000 Path# /dev/disk1 2025-06-24T21:02:58.718493Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1001 Path# /dev/disk2 2025-06-24T21:02:58.718517Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1002 Path# /dev/disk3 2025-06-24T21:02:58.718540Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1000 Path# /dev/disk1 2025-06-24T21:02:58.718564Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1001 Path# /dev/disk2 2025-06-24T21:02:58.718589Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1002 Path# /dev/disk3 2025-06-24T21:02:58.735079Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool 1" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 50 PDiskFilter { Property { Type: ROT } } } } } 2025-06-24T21:02:58.841206Z node 71 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.106209s 2025-06-24T21:02:58.841321Z node 71 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.106344s 2025-06-24T21:02:58.852767Z node 71 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 71 Type# 268639257 2025-06-24T21:02:58.861732Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 StoragePoolId: 2 Name: "storage pool 2" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 50 PDiskFilter { Property { Type: SSD } } } } Command { DeleteStoragePool { BoxId: 1 StoragePoolId: 2 ItemConfigGeneration: 1 } } } 2025-06-24T21:02:58.942961Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DeleteStoragePool { BoxId: 1 StoragePoolId: 1 ItemConfigGeneration: 1 } } Command { QueryBaseConfig { } } } 2025-06-24T21:02:58.977832Z node 71 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 71 Type# 268639257 >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedBeforeSplit >> EncryptedBackupParamsValidationTest::BadSourcePath [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-FLOAT [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP >> EncryptedBackupParamsValidationTest::NoDestination >> BackupPathTest::EncryptedExportWithExplicitDestinationPath [GOOD] >> EncryptedExportTest::EncryptionAndChecksum [GOOD] >> BackupPathTest::EncryptedExportWithExplicitObjectList >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_6_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 6] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_compression-COMPRESSION = "zstd"] [GOOD] >> KqpYql::UuidPrimaryKeyBulkUpsert >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsSchemeshardRestart [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsConfigRequest >> EncryptedExportTest::EncryptionChecksumAndCompression >> BackupRestore::TestAllPrimitiveTypes-DATE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATETIME >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerless [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldCompactServerless >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_4_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 4] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] >> EncryptedBackupParamsValidationTest::NoDestination [GOOD] >> BackupRestoreS3::RestoreViewReferenceTable [GOOD] >> BackupRestoreS3::RestoreViewDependentOnAnotherView >> EncryptedBackupParamsValidationTest::NoItemDestination >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeReplication [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalTable >> BackupPathTest::EncryptedExportWithExplicitObjectList [GOOD] >> KqpYql::UuidPrimaryKeyBulkUpsert [GOOD] >> alter_compression.py::TestAlterCompression::test_availability_data ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UuidPrimaryKeyBulkUpsert [GOOD] Test command err: Trying to start YDB, gRPC: 29055, MsgBus: 7965 2025-06-24T21:03:15.440470Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624427206506200:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:15.440582Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003082/r3tmp/tmpBw8ubZ/pdisk_1.dat 2025-06-24T21:03:16.060089Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:16.063324Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624427206506176:2079] 1750798995393542 != 1750798995393545 2025-06-24T21:03:16.134488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:03:16.134584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:03:16.136284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29055, node 1 2025-06-24T21:03:16.408576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:16.408603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:16.408610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:16.408722Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:03:16.476157Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7965 TClient is connected to server localhost:7965 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:03:17.675510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:03:18.888717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624440091408706:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:18.888799Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:19.615862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:19.891695Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624444386376118:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:19.891775Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:19.892152Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624444386376123:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:19.897393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:03:19.912681Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624444386376125:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:03:20.017091Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624448681343472:2403] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:03:20.480139Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624427206506200:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:20.480456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> BackupRestore::TestAllPrimitiveTypes-DATETIME [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL >> BackupPathTest::ExportCommonSourcePathImportExplicitly >> KqpYql::InsertCV+useSink >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_13_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 13] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_14_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 14] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_8_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 8] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] >> EncryptedExportTest::EncryptionChecksumAndCompression [GOOD] >> EncryptedBackupParamsValidationTest::NoItemDestination [GOOD] >> BackupRestoreS3::RestoreViewDependentOnAnotherView [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeInvalid [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobal >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_2_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 2] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] >> TSchemeshardBackgroundCompactionTest::ShouldCompactServerless [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable >> EncryptedExportTest::ChangefeedEncryption >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBackups [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBorrowed >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_DATE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_DATETIME [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_TIMESTAMP [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP64 >> EncryptedBackupParamsValidationTest::NoCommonDestination >> BsControllerConfig::ExtendBoxAndStoragePool [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_15_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 15] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_16_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 16] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::ExtendBoxAndStoragePool [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2973:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2973:2117] Leader for TabletID 72057594037932033 is [1:3113:2119] sender: [1:3115:2106] recipient: [1:2973:2117] 2025-06-24T21:02:48.617089Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T21:02:48.620473Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T21:02:48.620739Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T21:02:48.622377Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:02:48.622920Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T21:02:48.623068Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:48.623086Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:48.623356Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T21:02:48.631679Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T21:02:48.631826Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T21:02:48.631977Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T21:02:48.632220Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:48.632332Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:48.632416Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3113:2119] sender: [1:3138:2106] recipient: [1:60:2107] 2025-06-24T21:02:48.647756Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T21:02:48.647921Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:48.659397Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:48.659532Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:48.659604Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:48.659700Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:48.659864Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:48.659927Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:48.660001Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:48.660068Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:48.670921Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:48.671065Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:48.682238Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:48.682410Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T21:02:48.683727Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T21:02:48.683789Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T21:02:48.683984Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T21:02:48.684035Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T21:02:48.709666Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 60 PDiskFilter { Property { Type: ROT } } } } Command { QueryBaseConfig { } } } 2025-06-24T21:02:48.710564Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-24T21:02:48.710620Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-24T21:02:48.710672Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-24T21:02:48.710702Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-24T21:02:48.710724Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-24T21:02:48.710759Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-24T21:02:48.710788Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-24T21:02:48.710828Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-24T21:02:48.710853Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-24T21:02:48.710876Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-24T21:02:48.710900Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-24T21:02:48.710925Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-24T21:02:48.710956Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-24T21:02:48.710993Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-24T21:02:48.711016Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-24T21:02:48.711052Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-24T21:02:48.711088Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-24T21:02:48.711120Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-24T21:02:48.711184Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-24T21:02:48.711208Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-24T21:02:48.711234Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2025-06-24T21:02:48.711258Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk1 2025-06-24T21:02:48.711283Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1001 Path# /dev/disk2 2025-06-24T21:02:48.711331Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1002 Path# /dev/disk3 2025-06-24T21:02:48.711371Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk1 2025-06-24T21:02:48.711399Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1001 Path# /dev/disk2 2025-06-24T21:02:48.711423Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1002 Path# /dev/disk3 2025-06-24T21:02:48.711468Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk1 2025-06-24T21:02:48.711496Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1001 Path# /dev/disk2 2025-06-24T21:02:48.711520Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1002 Path# /dev/disk3 2025-06-24T21:02:48.711548Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk1 2025-06-24T21:02:48.711576Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1001 Path# /dev/disk2 2025-06-24T21:02:48.711602Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Cr ... S_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 204:1002 Path# /dev/disk3 2025-06-24T21:03:17.479396Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1000 Path# /dev/disk1 2025-06-24T21:03:17.479421Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1001 Path# /dev/disk2 2025-06-24T21:03:17.479448Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1002 Path# /dev/disk3 2025-06-24T21:03:17.479473Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1000 Path# /dev/disk1 2025-06-24T21:03:17.479499Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1001 Path# /dev/disk2 2025-06-24T21:03:17.479525Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1002 Path# /dev/disk3 2025-06-24T21:03:17.479551Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1000 Path# /dev/disk1 2025-06-24T21:03:17.479578Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1001 Path# /dev/disk2 2025-06-24T21:03:17.479604Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1002 Path# /dev/disk3 2025-06-24T21:03:17.479627Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1000 Path# /dev/disk1 2025-06-24T21:03:17.479649Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1001 Path# /dev/disk2 2025-06-24T21:03:17.479671Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1002 Path# /dev/disk3 2025-06-24T21:03:17.479708Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1000 Path# /dev/disk1 2025-06-24T21:03:17.479753Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1001 Path# /dev/disk2 2025-06-24T21:03:17.479779Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1002 Path# /dev/disk3 2025-06-24T21:03:17.479802Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1000 Path# /dev/disk1 2025-06-24T21:03:17.479828Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1001 Path# /dev/disk2 2025-06-24T21:03:17.479876Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1002 Path# /dev/disk3 2025-06-24T21:03:17.787068Z node 161 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.320598s 2025-06-24T21:03:17.787319Z node 161 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.320868s 2025-06-24T21:03:17.800422Z node 161 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 161 Type# 268639257 2025-06-24T21:03:17.833487Z node 161 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 4 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12051 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12052 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12053 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12054 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12055 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12056 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12057 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12058 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12059 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12060 } HostConfigId: 4 } ItemConfigGeneration: 1 } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 180 PDiskFilter { Property { Type: ROT } } ItemConfigGeneration: 1 } } Command { QueryBaseConfig { } } } 2025-06-24T21:03:17.835976Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1000 Path# /dev/disk1 2025-06-24T21:03:17.836054Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1001 Path# /dev/disk2 2025-06-24T21:03:17.836083Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1002 Path# /dev/disk3 2025-06-24T21:03:17.836123Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1000 Path# /dev/disk1 2025-06-24T21:03:17.836166Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1001 Path# /dev/disk2 2025-06-24T21:03:17.836192Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1002 Path# /dev/disk3 2025-06-24T21:03:17.836219Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1000 Path# /dev/disk1 2025-06-24T21:03:17.836245Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1001 Path# /dev/disk2 2025-06-24T21:03:17.836271Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1002 Path# /dev/disk3 2025-06-24T21:03:17.836301Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1000 Path# /dev/disk1 2025-06-24T21:03:17.836343Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1001 Path# /dev/disk2 2025-06-24T21:03:17.836386Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1002 Path# /dev/disk3 2025-06-24T21:03:17.836418Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1000 Path# /dev/disk1 2025-06-24T21:03:17.836448Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1001 Path# /dev/disk2 2025-06-24T21:03:17.836475Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1002 Path# /dev/disk3 2025-06-24T21:03:17.836500Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1000 Path# /dev/disk1 2025-06-24T21:03:17.836535Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1001 Path# /dev/disk2 2025-06-24T21:03:17.836572Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1002 Path# /dev/disk3 2025-06-24T21:03:17.836605Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1000 Path# /dev/disk1 2025-06-24T21:03:17.836631Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1001 Path# /dev/disk2 2025-06-24T21:03:17.836664Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1002 Path# /dev/disk3 2025-06-24T21:03:17.836692Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1000 Path# /dev/disk1 2025-06-24T21:03:17.836718Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1001 Path# /dev/disk2 2025-06-24T21:03:17.836745Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1002 Path# /dev/disk3 2025-06-24T21:03:17.836773Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1000 Path# /dev/disk1 2025-06-24T21:03:17.836800Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1001 Path# /dev/disk2 2025-06-24T21:03:17.836826Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1002 Path# /dev/disk3 2025-06-24T21:03:17.836855Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1000 Path# /dev/disk1 2025-06-24T21:03:17.836884Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1001 Path# /dev/disk2 2025-06-24T21:03:17.836913Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1002 Path# /dev/disk3 2025-06-24T21:03:17.913534Z node 161 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 161 Type# 268639257 >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalTable [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalDataSource >> BackupRestore::TestAllPrimitiveTypes-INTERVAL [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE32 >> BackupPathTest::ExportCommonSourcePathImportExplicitly [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsConfigRequest [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotRequestCompactionsAfterDisable >> KqpYql::InsertCV+useSink [GOOD] >> KqpYql::InsertCV-useSink >> KqpYql::InsertCVList+useSink >> BackupPathTest::ImportFilterByPrefix >> BuildStatsHistogram::Many_Mixed [GOOD] >> BuildStatsHistogram::Many_Serial >> EncryptedBackupParamsValidationTest::NoCommonDestination [GOOD] >> EncryptedExportTest::ChangefeedEncryption [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobal [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalAsync >> EncryptedBackupParamsValidationTest::IncorrectKeyLengthExport >> BackupRestore::TestAllPrimitiveTypes-DATE32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATETIME64 >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL64 >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalDataSource [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeView >> KqpYql::InsertCV-useSink [GOOD] >> EncryptedExportTest::TopicEncryption >> KqpYql::InsertCVList+useSink [GOOD] >> KqpYql::InsertCVList-useSink >> BackupPathTest::ImportFilterByPrefix [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::InsertCV-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 8435, MsgBus: 29969 2025-06-24T21:03:24.639242Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624468292328015:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:24.639270Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00307e/r3tmp/tmpGJxe4w/pdisk_1.dat 2025-06-24T21:03:25.109446Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:25.111712Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624468292327978:2079] 1750799004628856 != 1750799004628859 2025-06-24T21:03:25.132081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:03:25.132198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8435, node 1 2025-06-24T21:03:25.136461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:03:25.227799Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:25.227825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:25.227832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:25.227951Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29969 2025-06-24T21:03:25.663633Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29969 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:03:26.102504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:03:26.145974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:26.326175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:26.539420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:26.654851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:28.533163Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624485472198813:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:28.533277Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:28.913014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:28.971877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:29.012277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:29.045444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:29.080619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:29.128982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:29.174281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:29.254996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624489767166770:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:29.255061Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:29.255349Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624489767166775:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:29.265921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:03:29.278928Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624489767166777:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:03:29.426133Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624489767166828:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:03:29.647384Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624468292328015:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:29.647508Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:03:30.985851Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-24T21:03:30.997394Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037914 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:03:30.997620Z node 1 :TX_DATASHARD ERROR: finish_pr ... (2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:03:32.177165Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22893, node 2 2025-06-24T21:03:32.271751Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:32.271779Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:32.271786Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:32.271912Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6833 TClient is connected to server localhost:6833 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:03:32.894295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:03:32.908065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:03:32.927297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:33.005400Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:03:33.043013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:33.203486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:33.302046Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:35.847331Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624511309169472:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:35.847455Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:35.907407Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:35.961708Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:36.008090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:36.042128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:36.078008Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:36.151311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:36.235825Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:36.304411Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624515604137429:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:36.304550Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:36.308420Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624515604137434:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:36.313843Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:03:36.330623Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624515604137436:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:03:36.410726Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624515604137487:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:03:36.985422Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624494129298681:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:36.985507Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:03:38.250638Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7519624524194072366:2481], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhw1ss48cq879ep4g7nmefc. SessionId : ydb://session/3?node_id=2&id=NTQyNDMzN2YtMTNkNThjNWMtNDk3NGJiMDgtNzdkNGQ0Y2Y=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-24T21:03:38.251286Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519624524194072368:2482], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=NTQyNDMzN2YtMTNkNThjNWMtNDk3NGJiMDgtNzdkNGQ0Y2Y=. TraceId : 01jyhw1ss48cq879ep4g7nmefc. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519624524194072363:2469], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-24T21:03:38.251787Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NTQyNDMzN2YtMTNkNThjNWMtNDk3NGJiMDgtNzdkNGQ0Y2Y=, ActorId: [2:7519624519899105024:2469], ActorState: ExecuteState, TraceId: 01jyhw1ss48cq879ep4g7nmefc, Create QueryResponse for error on request, msg:
: Error: Execution, code: 1060
: Error: Conflict with existing key., code: 2012 >> BackupPathTest::ImportFilterByYdbObjectPath >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> KqpYql::EvaluateIf >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> EncryptedBackupParamsValidationTest::IncorrectKeyLengthExport [GOOD] >> EncryptedBackupParamsValidationTest::NoSourcePrefix >> KqpYql::InsertCVList-useSink [GOOD] >> BsControllerConfig::ManyPDisksRestarts [GOOD] >> BsControllerConfig::MergeBoxes >> EncryptedExportTest::TopicEncryption [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATETIME64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL64 >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::InsertCVList-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 17287, MsgBus: 27740 2025-06-24T21:03:32.379206Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624500382074319:2187];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:32.379300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00307b/r3tmp/tmpjsmZfu/pdisk_1.dat 2025-06-24T21:03:32.796589Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:32.853966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:03:32.854072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 17287, node 1 2025-06-24T21:03:32.866281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:03:33.003776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:33.003807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:33.003813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:33.003941Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27740 2025-06-24T21:03:33.387359Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27740 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:03:34.038322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:03:34.064044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:03:34.082600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:34.265237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:34.468689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:03:34.559542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:36.435279Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624517561944979:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:36.435422Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:36.837579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:36.875950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:36.928212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:36.972554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:37.048857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:37.093433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:37.133215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:37.253320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624521856912933:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:37.253402Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:37.253446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624521856912938:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:37.257213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:03:37.267080Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624521856912940:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:03:37.370683Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624521856912993:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:03:37.374843Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624500382074319:2187];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:37.387293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:03:38.644451Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-24T21:03:38.668038Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037914 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:03:38.668224Z node 1 :TX_DATASHARD ERROR: finish_propose_write_ ... 2:7519624530879214652:2079] 1750799019716303 != 1750799019716306 2025-06-24T21:03:39.967934Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29413, node 2 2025-06-24T21:03:40.143750Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:40.143776Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:40.143783Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:40.143916Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27155 2025-06-24T21:03:40.749509Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27155 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:03:40.956506Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:03:40.971643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:03:40.989816Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:41.074106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:41.265314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:41.360324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:43.536172Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624548059085472:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:43.536231Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:43.602038Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:43.697565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:43.748318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:43.799076Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:43.874815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:43.913588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:43.989526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:44.099586Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624552354053440:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:44.099699Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:44.099926Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624552354053445:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:44.104782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:03:44.115382Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624552354053447:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:03:44.208979Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624552354053498:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:03:44.731463Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624530879214730:2113];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:44.731552Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:03:45.820503Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7519624556649021070:2481], TxId: 281474976710673, task: 1. Ctx: { SessionId : ydb://session/3?node_id=2&id=MWM0YzU3ZjEtNzY1MTEwZDktMzcxOTIwMzYtODY1NDE0YjM=. TraceId : 01jyhw21a768mcdhsj7mrbdan2. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Duplicated keys found., code: 2012 }. 2025-06-24T21:03:45.820991Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519624556649021072:2482], TxId: 281474976710673, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jyhw21a768mcdhsj7mrbdan2. SessionId : ydb://session/3?node_id=2&id=MWM0YzU3ZjEtNzY1MTEwZDktMzcxOTIwMzYtODY1NDE0YjM=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519624556649021067:2470], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-24T21:03:45.821480Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MWM0YzU3ZjEtNzY1MTEwZDktMzcxOTIwMzYtODY1NDE0YjM=, ActorId: [2:7519624556649021037:2470], ActorState: ExecuteState, TraceId: 01jyhw21a768mcdhsj7mrbdan2, Create QueryResponse for error on request, msg:
: Error: Execution, code: 1060
: Error: Duplicated keys found., code: 2012 >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-STRING >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalAsync [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalUnique [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree >> EncryptedExportTest::ViewEncryption >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeView [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeResourcePool [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTransfer [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSysView [GOOD] >> BackupRestore::TestReplaceRestoreOption >> KqpYql::EvaluateIf [GOOD] >> KqpYql::EvaluateFor >> KqpYql::TestUuidDefaultColumn >> BackupPathTest::ImportFilterByYdbObjectPath [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_20_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 20] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_18_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 18] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_19_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 19] >> BackupPathTest::EncryptedImportWithoutCommonPrefix >> EncryptedBackupParamsValidationTest::NoSourcePrefix [GOOD] >> KqpYql::TestUuidDefaultColumn [GOOD] >> KqpYql::EvaluateFor [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-STRING >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_11_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 11] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::TestUuidDefaultColumn [GOOD] Test command err: Trying to start YDB, gRPC: 5653, MsgBus: 3374 2025-06-24T21:03:50.041229Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624578993987723:2127];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:50.041882Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003075/r3tmp/tmpjZnyiz/pdisk_1.dat 2025-06-24T21:03:50.485623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:03:50.486476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:03:50.488690Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:50.488698Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624574699020324:2079] 1750799030027931 != 1750799030027934 2025-06-24T21:03:50.500536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5653, node 1 2025-06-24T21:03:50.646355Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:50.646383Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:50.646390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:50.646537Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3374 2025-06-24T21:03:51.047356Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3374 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:03:51.331121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:03:53.562001Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624591878890153:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:53.562127Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:53.909479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:54.101419Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624596173857553:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:54.101534Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:54.101825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624596173857558:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:54.106061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:03:54.117134Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624596173857560:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:03:54.173317Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624596173857611:2395] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> EncryptedExportTest::ViewEncryption [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::EvaluateFor [GOOD] Test command err: Trying to start YDB, gRPC: 5104, MsgBus: 18294 2025-06-24T21:03:43.239657Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624547464094576:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:43.283605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003078/r3tmp/tmplPo0Wk/pdisk_1.dat 2025-06-24T21:03:43.809718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:03:43.809831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:03:43.823462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:03:43.883361Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624547464094558:2079] 1750799023225754 != 1750799023225757 2025-06-24T21:03:43.905439Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5104, node 1 2025-06-24T21:03:43.995889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:43.995913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:43.995926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:43.996049Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18294 2025-06-24T21:03:44.266535Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18294 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:03:44.694905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:03:44.723777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:44.924115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:45.105689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:45.191008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:46.781503Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624560348998071:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:46.781618Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:47.094084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:47.136942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:47.170317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:47.199069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:47.235212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:47.311241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:47.353922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:47.439968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624564643966033:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:47.440098Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:47.440225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624564643966038:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:47.444607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:03:47.458925Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624564643966040:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:03:47.552271Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624564643966091:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:03:48.234187Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624547464094576:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:48.234277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 9941, MsgBus: 24375 2025-06-24T21:03:49.674615Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624572847884892:2147];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003078/r3tmp/tmpgrpvJf/pdisk_1.dat 2025-06-24T21:03:49.738451Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:03:49.841082Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:03:49.841160Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:03:49.850547Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:49.851335Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624572847884770:2079] 1750799029651133 != 1750799029651136 2025-06-24T21:03:49.863294Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9941, node 2 2025-06-24T21:03:49.929794Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:49.929815Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:49.929823Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:49.929935Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24375 TClient is connected to server localhost:24375 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:03:50.338785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:03:50.368665Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:50.465427Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:50.638432Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:50.673428Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:03:50.712235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:52.789801Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624585732788272:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:52.789949Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:52.833444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:52.873593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:52.915371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:52.992241Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:53.036368Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:53.072445Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:53.145726Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:53.259562Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624590027756238:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:53.259645Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:53.259969Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624590027756243:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:03:53.264489Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:03:53.280236Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624590027756245:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:03:53.341227Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624590027756296:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> EncryptedBackupParamsValidationTest::EmptyImportItem >> BuildStatsHistogram::Many_Serial [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-STRING [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON >> ListObjectsInS3Export::ExportWithSchemaMapping >> KqpScripting::StreamExecuteYqlScriptWriteCancelAfterBruteForced >> KqpScripting::SelectNullType >> KqpScripting::ScanQueryInvalid |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> BackupPathTest::EncryptedImportWithoutCommonPrefix [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> BuildStatsHistogram::Many_Serial [GOOD] Test command err: Got : 24000 2106439 49449 9 9 Expected: 24000 2106439 49449 9 9 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 49449 9 9 Expected: 12816 1121048 49449 9 9 Got : 24000 3547100 81694 9 9 Expected: 24000 3547100 81694 9 9 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425282 81694 9 9 Expected: 9582 1425282 81694 9 9 Got : 24000 2460139 23760 9 9 Expected: 24000 2460139 23760 9 9 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060767 23760 9 9 Expected: 10440 1060767 23760 9 9 Got : 24000 4054050 46562 9 9 Expected: 24000 4054050 46562 9 9 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2273213 46562 9 9 Expected: 13570 2273213 46562 9 9 Got : 24000 2106459 49449 9 9 Expected: 24000 2106459 49449 9 9 Got : 24000 2460219 23555 9 9 Expected: 24000 2460219 23555 9 9 Got : 24000 4054270 46543 9 9 Expected: 24000 4054270 46543 9 9 Got : 24000 2106439 25272 38 44 Expected: 24000 2106439 25272 38 44 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 25272 20 23 Expected: 12816 1121048 25272 20 23 Got : 24000 3547100 49916 64 44 Expected: 24000 3547100 49916 64 44 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425198 49916 26 17 Expected: 9582 1425198 49916 26 17 Got : 24000 2460139 13170 42 41 Expected: 24000 2460139 13170 42 41 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 13170 18 18 Expected: 10440 1060798 13170 18 18 Got : 24000 4054050 29361 68 43 Expected: 24000 4054050 29361 68 43 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2277890 29361 38 24 Expected: 13570 2277890 29361 38 24 Got : 24000 2106459 25428 38 44 Expected: 24000 2106459 25428 38 44 Got : 24000 2460219 13482 41 41 Expected: 24000 2460219 13482 41 41 Got : 24000 4054270 29970 67 43 Expected: 24000 4054270 29970 67 43 Got : 24000 2106479 25458 38 44 Expected: 24000 2106479 25458 38 44 Got : 24000 2460259 13528 42 41 Expected: 24000 2460259 13528 42 41 Got : 24000 4054290 30013 67 43 Expected: 24000 4054290 30013 67 43 1 parts: [0:0:1:0:0:0:0] 240000 rows, 10181 pages, 7 levels: (159964, 53329) (319996, 106673) (479902, 159975) (639565, 213196) (799303, 266442) Checking BTree: Touched 0% bytes, 4 pages RowCountHistogram: 10% (actual 10%) key = (80152, 26725) value = 24033 (actual 24079 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 48088 (actual 48136 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 72280 (actual 72327 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 96428 (actual 96478 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 120604 (actual 120651 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 144727 (actual 144775 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 168893 (actual 168936 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 192974 (actual 193024 - 0% error) 5% (actual 5%) key = (683260, 227761) value = 205073 (actual 205115 - 0% error) 14% (actual 14%) DataSizeHistogram: 10% (actual 10%) key = (80152, 26725) value = 2048715 (actual 2052707 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 4098370 (actual 4102393 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 6145924 (actual 6149966 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 8194622 (actual 8198636 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 10244365 (actual 10248317 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 12292389 (actual 12296360 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 14344066 (actual 14348128 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 16393002 (actual 16396983 - 0% error) 5% (actual 5%) key = (683260, 227761) value = 17416844 (actual 17420850 - 0% error) 14% (actual 14%) Checking Flat: Touched 100% bytes, 1 pages RowCountHistogram: 10% (actual 10%) key = (80065, 26696) value = 24008 (actual 24056 - 0% error) 10% (actual 10%) key = (160045, 53356) value = 48012 (actual 48061 - 0% error) 10% (actual 10%) key = (240238, 80087) value = 72016 (actual 72061 - 0% error) 10% (actual 10%) key = (320152, 106725) value = 96035 (actual 96085 - 0% error) 10% (actual 10%) key = (400354, 133459) value = 120047 (actual 120093 - 0% error) 10% (actual 10%) key = (480133, 160052) value = 144053 (actual 144100 - 0% error) 10% (actual 10%) key = (560080, 186701) value = 168060 (actual 168102 - 0% error) 10% (actual 10%) key = (639892, 213305) value = 192073 (actual 192119 - 0% error) 10% (actual 10%) key = (719776, 239933) value = 216090 (actual 216137 - 0% error) 9% (actual 9%) DataSizeHistogram: 10% (actual 10%) key = (79732, 26585) value = 2038706 (actual 2042645 - 0% error) 10% (actual 10%) key = (159427, 53150) value = 4076220 (actual 4080259 - 0% error) 10% (actual 10%) key = (239872, 79965) value = 6113940 (actual 6117932 - 0% error) 10% (actual 10%) key = (319834, 106619) value = 8152983 (actual 8156951 - 0% error) 10% (actual 10%) key = (400105, 133376) value = 10190566 (actual 10194584 - 0% error) 10% (actual 10%) key = (479833, 159952) value = 12228261 (actual 12232212 - 0% error) 10% (actual 10%) key = (559774, 186599) value = 14265925 (actual 14269984 - 0% error) 10% (actual 10%) key = (639385, 213136) value = 16304923 (actual 16308915 - 0% error) 10% (actual 10%) key = (719437, 239820) value = 18342658 (actual 18346641 - 0% error) 9% (actual 9%) Checking Mixed: Touched 1% bytes, 51 pages RowCountHistogram: 10% (actual 10%) key = (80152, 26725) value = 24033 (actual 24079 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 48088 (actual 48136 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 72280 (actual 72327 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 96428 (actual 96478 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 120604 (actual 120651 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 144727 (actual 144775 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 168893 (actual 168936 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 192974 (actual 193024 - 0% error) 10% (actual 10%) key = (723403, 241142) value = 217180 (actual 217228 - 0% error) 9% (actual 9%) DataSizeHistogram: 10% (actual 10%) key = (80152, 26725) value = 2048715 (actual 2052707 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 4098370 (actual 4102393 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 6145924 (actual 6149966 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 8194622 (actual 8198636 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 10244365 (actual 10248317 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 12292389 (actual 12296360 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 14344066 (actual 14348128 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 16393002 (actual 16396983 - 0% error) 10% (actual 10%) key = (723403, 241142) value = 18443184 (actual 18447186 - 0% error) 9% (actual 9%) { [12965, 17271), [20685, 27602), [31405, 43682), [58051, 73731), [81074, 85635), [86559, 89297), [92588, 112654), [134937, 148111), [152568, 158136), [169526, 171272), [181381, 184364), [188301, 199001), [201179, 227534) } 1 parts: [0:0:1:0:0:0:0] 240000 rows, 10181 pages, 7 levels: (159964, 53329) (319996, 106673) (479902, 159975) (639565, 213196) (799303, 266442) Checking BTree: Touched 3% bytes, 111 pages RowCountHistogram: 6% (actual 6%) key = (80152, 26725) value = 7654 (actual 7700 - 0% error) 11% (actual 11%) key = (140245, 46756) value = 21908 (actual 21959 - 0% error) 12% (actual 12%) key = (241096, 80373) value = 37729 (actual 37776 - 0% error) 5% (actual 5%) key = (291388, 97137) value = 44561 (actual 44610 - 0% error) 14% (actual 14%) key = (361831, 120618) value = 62406 (actual 62455 - 0% error) 6% (actual 6%) key = (462178, 154067) value = 70269 (actual 70314 - 0% error) 10% (actual 10%) key = (522574, 174199) value = 83950 (actual 83996 - 0% error) 9% (actual 9%) key = (647905, 215976) value = 96207 (actual 96256 - 0% error) 11% (actual 11%) key = (703270, 234431) value = 110645 (actual 110694 - 0% error) 12% (actual 12%) DataSizeHistogram: 6% (actual 6%) key = (80152, 26725) value = 650681 (actual 654673 - 0% error) 11% (actual 11%) key = (140245, 46756) value = 1862907 (actual 1866988 - 0% error) 12% (actual 12%) key = (241096, 80373) value = 3200081 (actual 3204123 - 0% error) 5% (actual 5%) key = (291388, 97137) value = 3780473 (actual 3784554 - 0% error) 14% (actual 14%) key = (361831, 120618) value = 5294670 (actual 5298760 - 0% error) 6% (actual 6%) key = (462178, 154067) value = 5965285 (actual 5969310 - 0% error) 10% (actual 10%) key = (522574, 174199) value = 7125413 (actual 7129406 - 0% error) 9% (actual 9%) key = (647905, 215976) value = 8166922 (actual 8170966 - 0% error) 11% (actual 11%) key = (703270, 234431) value = 9391370 (actual 9395383 - 0% error) 12% (actual 12%) { [12965, 17271), [20685, 27602), [31405, 43682), [58051, 73731), [81074, 85635), [86559, 89297), [92588, 112654), [134937, 148111), [152568, 158136), [169526, 171272), [181381, 184364), [188301, 199001), [201179, 227534) } Checking Flat: Touched 100% bytes, 1 pages RowCountHistogram: 10% (actual 10%) key = (109672, 36565) value = 12716 (actual 12760 - 0% error) 10% (actual 10%) key = (200011, 66678) value = 25439 (actual 25485 - 0% error) 10% (actual 10%) key = (242497, 80840) value = 38151 (actual 38197 - 0% error) 10% (actual 10%) key = (323278, 107767) value = 50861 (actual 50910 - 0% error) 9% (actual 9%) key = (365755, 121926) value = 63568 (actual 63614 - 0% error) 10% (actual 10%) key = (482191, 160738) value = 76283 (actual 76335 - 0% error) 10% (actual 9%) key = (610882, 203635) value = 88992 (actual 89039 - 0% error) 10% (actual 10%) key = (673702, 224575) value = 101722 (actual 101768 - 0% error) 10% (actual 10%) key = (715753, 238592) value = 114435 (actual 114484 - 0% error) 9% (actual 9%) DataSizeHistogram: 10% (actual 10%) ... 140, NULL) (311209, NULL) (311281, NULL) (311344, NULL) (311416, NULL) [0:0:935:0:0:0:0] 100 rows, 100 pages, 4 levels: (311479, NULL) (311542, NULL) (311614, NULL) (311683, NULL) (311755, NULL) [0:0:936:0:0:0:0] 100 rows, 100 pages, 4 levels: (311821, NULL) (311890, NULL) (311956, NULL) (312034, NULL) (312100, NULL) [0:0:937:0:0:0:0] 100 rows, 100 pages, 4 levels: (312172, NULL) (312232, NULL) (312301, NULL) (312370, NULL) (312439, NULL) [0:0:938:0:0:0:0] 100 rows, 100 pages, 4 levels: (312508, NULL) (312571, NULL) (312637, NULL) (312700, NULL) (312760, NULL) [0:0:939:0:0:0:0] 100 rows, 100 pages, 4 levels: (312835, NULL) (312904, NULL) (312970, NULL) (313030, NULL) (313102, NULL) [0:0:940:0:0:0:0] 100 rows, 100 pages, 4 levels: (313174, NULL) (313240, NULL) (313300, NULL) (313366, NULL) (313429, NULL) [0:0:941:0:0:0:0] 100 rows, 100 pages, 4 levels: (313498, NULL) (313573, NULL) (313639, NULL) (313699, NULL) (313768, NULL) [0:0:942:0:0:0:0] 100 rows, 100 pages, 4 levels: (313828, NULL) (313891, NULL) (313957, NULL) (314023, NULL) (314086, NULL) [0:0:943:0:0:0:0] 100 rows, 100 pages, 4 levels: (314149, NULL) (314212, NULL) (314275, NULL) (314338, NULL) (314401, NULL) [0:0:944:0:0:0:0] 100 rows, 100 pages, 4 levels: (314464, NULL) (314530, NULL) (314590, NULL) (314656, NULL) (314719, NULL) [0:0:945:0:0:0:0] 100 rows, 100 pages, 4 levels: (314788, NULL) (314854, NULL) (314920, NULL) (314983, NULL) (315046, NULL) [0:0:946:0:0:0:0] 100 rows, 100 pages, 4 levels: (315109, NULL) (315178, NULL) (315238, NULL) (315304, NULL) (315370, NULL) [0:0:947:0:0:0:0] 100 rows, 100 pages, 4 levels: (315433, NULL) (315496, NULL) (315565, NULL) (315631, NULL) (315697, NULL) [0:0:948:0:0:0:0] 100 rows, 100 pages, 4 levels: (315766, NULL) (315826, NULL) (315889, NULL) (315952, NULL) (316024, NULL) [0:0:949:0:0:0:0] 100 rows, 100 pages, 4 levels: (316087, NULL) (316156, NULL) (316222, NULL) (316288, NULL) (316357, NULL) [0:0:950:0:0:0:0] 100 rows, 100 pages, 4 levels: (316432, NULL) (316498, NULL) (316564, NULL) (316636, NULL) (316705, NULL) [0:0:951:0:0:0:0] 100 rows, 100 pages, 4 levels: (316768, NULL) (316831, NULL) (316891, NULL) (316951, NULL) (317011, NULL) [0:0:952:0:0:0:0] 100 rows, 100 pages, 4 levels: (317080, NULL) (317143, NULL) (317218, NULL) (317287, NULL) (317356, NULL) [0:0:953:0:0:0:0] 100 rows, 100 pages, 4 levels: (317422, NULL) (317497, NULL) (317563, NULL) (317632, NULL) (317701, NULL) [0:0:954:0:0:0:0] 100 rows, 100 pages, 4 levels: (317764, NULL) (317824, NULL) (317887, NULL) (317953, NULL) (318019, NULL) [0:0:955:0:0:0:0] 100 rows, 100 pages, 4 levels: (318088, NULL) (318166, NULL) (318235, NULL) (318304, NULL) (318370, NULL) [0:0:956:0:0:0:0] 100 rows, 100 pages, 4 levels: (318442, NULL) (318511, NULL) (318574, NULL) (318640, NULL) (318703, NULL) [0:0:957:0:0:0:0] 100 rows, 100 pages, 4 levels: (318772, NULL) (318838, NULL) (318898, NULL) (318970, NULL) (319036, NULL) [0:0:958:0:0:0:0] 100 rows, 100 pages, 4 levels: (319099, NULL) (319162, NULL) (319225, NULL) (319294, NULL) (319360, NULL) [0:0:959:0:0:0:0] 100 rows, 100 pages, 4 levels: (319423, NULL) (319492, NULL) (319555, NULL) (319621, NULL) (319687, NULL) [0:0:960:0:0:0:0] 100 rows, 100 pages, 4 levels: (319753, NULL) (319828, NULL) (319900, NULL) (319963, NULL) (320035, NULL) [0:0:961:0:0:0:0] 100 rows, 100 pages, 4 levels: (320104, NULL) (320164, NULL) (320233, NULL) (320299, NULL) (320365, NULL) [0:0:962:0:0:0:0] 100 rows, 100 pages, 4 levels: (320428, NULL) (320500, NULL) (320569, NULL) (320629, NULL) (320698, NULL) [0:0:963:0:0:0:0] 100 rows, 100 pages, 4 levels: (320764, NULL) (320833, NULL) (320893, NULL) (320959, NULL) (321019, NULL) [0:0:964:0:0:0:0] 100 rows, 100 pages, 4 levels: (321085, NULL) (321151, NULL) (321214, NULL) (321277, NULL) (321352, NULL) [0:0:965:0:0:0:0] 100 rows, 100 pages, 4 levels: (321421, NULL) (321493, NULL) (321562, NULL) (321631, NULL) (321691, NULL) [0:0:966:0:0:0:0] 100 rows, 100 pages, 4 levels: (321757, NULL) (321823, NULL) (321886, NULL) (321949, NULL) (322009, NULL) [0:0:967:0:0:0:0] 100 rows, 100 pages, 4 levels: (322081, NULL) (322159, NULL) (322225, NULL) (322294, NULL) (322363, NULL) [0:0:968:0:0:0:0] 100 rows, 100 pages, 4 levels: (322429, NULL) (322498, NULL) (322564, NULL) (322642, NULL) (322711, NULL) [0:0:969:0:0:0:0] 100 rows, 100 pages, 4 levels: (322783, NULL) (322846, NULL) (322915, NULL) (322978, NULL) (323041, NULL) [0:0:970:0:0:0:0] 100 rows, 100 pages, 4 levels: (323104, NULL) (323164, NULL) (323230, NULL) (323305, NULL) (323368, NULL) [0:0:971:0:0:0:0] 100 rows, 100 pages, 4 levels: (323434, NULL) (323506, NULL) (323569, NULL) (323632, NULL) (323707, NULL) [0:0:972:0:0:0:0] 100 rows, 100 pages, 4 levels: (323776, NULL) (323851, NULL) (323917, NULL) (323986, NULL) (324052, NULL) [0:0:973:0:0:0:0] 100 rows, 100 pages, 4 levels: (324115, NULL) (324184, NULL) (324256, NULL) (324316, NULL) (324379, NULL) [0:0:974:0:0:0:0] 100 rows, 100 pages, 4 levels: (324442, NULL) (324502, NULL) (324568, NULL) (324631, NULL) (324703, NULL) [0:0:975:0:0:0:0] 100 rows, 100 pages, 4 levels: (324769, NULL) (324838, NULL) (324904, NULL) (324973, NULL) (325033, NULL) [0:0:976:0:0:0:0] 100 rows, 100 pages, 4 levels: (325105, NULL) (325174, NULL) (325234, NULL) (325297, NULL) (325363, NULL) [0:0:977:0:0:0:0] 100 rows, 100 pages, 4 levels: (325438, NULL) (325504, NULL) (325570, NULL) (325630, NULL) (325699, NULL) [0:0:978:0:0:0:0] 100 rows, 100 pages, 4 levels: (325771, NULL) (325834, NULL) (325900, NULL) (325966, NULL) (326032, NULL) [0:0:979:0:0:0:0] 100 rows, 100 pages, 4 levels: (326101, NULL) (326170, NULL) (326233, NULL) (326296, NULL) (326359, NULL) [0:0:980:0:0:0:0] 100 rows, 100 pages, 4 levels: (326434, NULL) (326497, NULL) (326563, NULL) (326632, NULL) (326701, NULL) [0:0:981:0:0:0:0] 100 rows, 100 pages, 4 levels: (326773, NULL) (326836, NULL) (326905, NULL) (326965, NULL) (327025, NULL) [0:0:982:0:0:0:0] 100 rows, 100 pages, 4 levels: (327097, NULL) (327169, NULL) (327232, NULL) (327301, NULL) (327364, NULL) [0:0:983:0:0:0:0] 100 rows, 100 pages, 4 levels: (327430, NULL) (327496, NULL) (327559, NULL) (327622, NULL) (327682, NULL) [0:0:984:0:0:0:0] 100 rows, 100 pages, 4 levels: (327742, NULL) (327811, NULL) (327871, NULL) (327934, NULL) (327997, NULL) [0:0:985:0:0:0:0] 100 rows, 100 pages, 4 levels: (328072, NULL) (328138, NULL) (328222, NULL) (328291, NULL) (328363, NULL) [0:0:986:0:0:0:0] 100 rows, 100 pages, 4 levels: (328432, NULL) (328501, NULL) (328573, NULL) (328648, NULL) (328717, NULL) [0:0:987:0:0:0:0] 100 rows, 100 pages, 4 levels: (328783, NULL) (328849, NULL) (328915, NULL) (328978, NULL) (329044, NULL) [0:0:988:0:0:0:0] 100 rows, 100 pages, 4 levels: (329119, NULL) (329185, NULL) (329248, NULL) (329317, NULL) (329383, NULL) [0:0:989:0:0:0:0] 100 rows, 100 pages, 4 levels: (329455, NULL) (329518, NULL) (329590, NULL) (329662, NULL) (329722, NULL) [0:0:990:0:0:0:0] 100 rows, 100 pages, 4 levels: (329782, NULL) (329854, NULL) (329917, NULL) (329983, NULL) (330049, NULL) [0:0:991:0:0:0:0] 100 rows, 100 pages, 4 levels: (330118, NULL) (330187, NULL) (330253, NULL) (330322, NULL) (330382, NULL) [0:0:992:0:0:0:0] 100 rows, 100 pages, 4 levels: (330454, NULL) (330520, NULL) (330595, NULL) (330673, NULL) (330739, NULL) [0:0:993:0:0:0:0] 100 rows, 100 pages, 4 levels: (330808, NULL) (330874, NULL) (330940, NULL) (331003, NULL) (331072, NULL) [0:0:994:0:0:0:0] 100 rows, 100 pages, 4 levels: (331132, NULL) (331204, NULL) (331276, NULL) (331342, NULL) (331405, NULL) [0:0:995:0:0:0:0] 100 rows, 100 pages, 4 levels: (331465, NULL) (331540, NULL) (331615, NULL) (331684, NULL) (331753, NULL) [0:0:996:0:0:0:0] 100 rows, 100 pages, 4 levels: (331816, NULL) (331891, NULL) (331960, NULL) (332026, NULL) (332086, NULL) [0:0:997:0:0:0:0] 100 rows, 100 pages, 4 levels: (332152, NULL) (332215, NULL) (332284, NULL) (332350, NULL) (332419, NULL) [0:0:998:0:0:0:0] 100 rows, 100 pages, 4 levels: (332491, NULL) (332557, NULL) (332623, NULL) (332686, NULL) (332752, NULL) [0:0:999:0:0:0:0] 100 rows, 100 pages, 4 levels: (332818, NULL) (332884, NULL) (332944, NULL) (333013, NULL) (333073, NULL) [0:0:1000:0:0:0:0] 100 rows, 100 pages, 4 levels: (333148, NULL) (333214, NULL) (333274, NULL) (333340, NULL) (333403, NULL) Checking BTree: Touched 0% bytes, 0 pages RowCountHistogram: 5% (actual 6%) key = (16984, 5669) value = 5100 (actual 6998 - -1% error) 10% (actual 9%) key = (50416, 16813) value = 15100 (actual 16798 - -1% error) 10% (actual 9%) key = (83701, 27908) value = 25100 (actual 26598 - -1% error) 10% (actual 9%) key = (116986, 39003) value = 35100 (actual 36398 - -1% error) 10% (actual 9%) key = (150319, 50114) value = 45100 (actual 46198 - -1% error) 10% (actual 9%) key = (183700, 61241) value = 55100 (actual 55998 - 0% error) 10% (actual 9%) key = (217081, 72368) value = 65100 (actual 65798 - 0% error) 10% (actual 9%) key = (250486, 83503) value = 75100 (actual 75598 - 0% error) 10% (actual 9%) key = (283771, 94598) value = 85100 (actual 85398 - 0% error) 14% (actual 14%) DataSizeHistogram: 5% (actual 6%) key = (16648, 5557) value = 524891 (actual 723287 - -1% error) 10% (actual 9%) key = (50086, 16703) value = 1569936 (actual 1747238 - -1% error) 9% (actual 9%) key = (83356, 27793) value = 2610698 (actual 2767306 - -1% error) 10% (actual 9%) key = (116647, 38890) value = 3652143 (actual 3787394 - -1% error) 9% (actual 9%) key = (149656, 49893) value = 4685435 (actual 4800597 - -1% error) 10% (actual 9%) key = (183040, 61021) value = 5728420 (actual 5822785 - 0% error) 10% (actual 9%) key = (216727, 72250) value = 6776444 (actual 6848929 - 0% error) 9% (actual 9%) key = (250144, 83389) value = 7813547 (actual 7865227 - 0% error) 9% (actual 9%) key = (283444, 94489) value = 8853697 (actual 8884838 - 0% error) 14% (actual 14%) Checking Flat: Touched 100% bytes, 1000 pages RowCountHistogram: 10% (actual 11%) key = (33379, 11134) value = 10000 (actual 11800 - -1% error) 10% (actual 9%) key = (66721, 22248) value = 20000 (actual 21600 - -1% error) 10% (actual 9%) key = (100015, 33346) value = 30000 (actual 31400 - -1% error) 10% (actual 9%) key = (133258, 44427) value = 40000 (actual 41200 - -1% error) 10% (actual 9%) key = (166621, 55548) value = 50000 (actual 51000 - -1% error) 10% (actual 9%) key = (200041, 66688) value = 60000 (actual 60800 - 0% error) 10% (actual 9%) key = (233449, 77824) value = 70000 (actual 70600 - 0% error) 10% (actual 9%) key = (266824, 88949) value = 80000 (actual 80400 - 0% error) 10% (actual 9%) key = (300073, 100032) value = 90000 (actual 90200 - 0% error) 10% (actual 9%) DataSizeHistogram: 10% (actual 11%) key = (33187, NULL) value = 1041247 (actual 1229534 - -1% error) 10% (actual 9%) key = (66517, NULL) value = 2082456 (actual 2249844 - -1% error) 10% (actual 9%) key = (99709, NULL) value = 3123684 (actual 3270138 - -1% error) 10% (actual 9%) key = (132925, NULL) value = 4164886 (actual 4290603 - -1% error) 10% (actual 9%) key = (166246, NULL) value = 5206111 (actual 5311117 - -1% error) 10% (actual 9%) key = (199678, NULL) value = 6247321 (actual 6331068 - 0% error) 10% (actual 9%) key = (233290, NULL) value = 7288529 (actual 7350869 - 0% error) 10% (actual 9%) key = (266701, NULL) value = 8329759 (actual 8371441 - 0% error) 10% (actual 9%) key = (300052, NULL) value = 9371030 (actual 9392083 - 0% error) 9% (actual 9%) Checking Mixed: Touched 0% bytes, 0 pages RowCountHistogram: 100% (actual 100%) DataSizeHistogram: 100% (actual 100%) >> KqpScripting::StreamExecuteYqlScriptClientTimeoutBruteForce >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-BOOL >> KqpScripting::StreamExecuteYqlScriptData |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> BackupPathTest::ExplicitDuplicatedItems >> KqpScripting::EndOfQueryCommit >> BackupRestore::TestAllPrimitiveTypes-STRING [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON >> KqpYql::ColumnNameConflict >> EncryptedBackupParamsValidationTest::EmptyImportItem [GOOD] >> ListObjectsInS3Export::ExportWithSchemaMapping [GOOD] >> KqpScripting::SelectNullType [GOOD] >> KqpScripting::StreamDdlAndDml >> KqpScripting::ScanQueryInvalid [GOOD] >> KqpScripting::ScanQueryTruncate >> BsControllerConfig::MergeBoxes [GOOD] >> EncryptedBackupParamsValidationTest::IncorrectKeyImport >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::MergeBoxes [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:11115:2156] recipient: [1:10875:2167] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:11115:2156] recipient: [1:10875:2167] Leader for TabletID 72057594037932033 is [1:11212:2169] sender: [1:11215:2156] recipient: [1:10875:2167] 2025-06-24T21:02:46.752765Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T21:02:46.758475Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T21:02:46.758890Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T21:02:46.761414Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:02:46.762184Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T21:02:46.762515Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:46.762540Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T21:02:46.763003Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T21:02:46.772786Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T21:02:46.772960Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T21:02:46.773132Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T21:02:46.773246Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:46.773339Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:02:46.773415Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:11212:2169] sender: [1:11238:2156] recipient: [1:110:2157] 2025-06-24T21:02:46.791755Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T21:02:46.791929Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:46.803174Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:02:46.803340Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:46.803431Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:02:46.803547Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:46.803684Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:02:46.803755Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:46.803816Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:02:46.803887Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:46.814742Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:02:46.814903Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:46.827866Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:02:46.828048Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T21:02:46.829380Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T21:02:46.829429Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T21:02:46.829626Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T21:02:46.829675Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T21:02:46.859623Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk0" } Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" } Drive { Path: "/dev/disk3" } Drive { Path: "/dev/disk4" } Drive { Path: "/dev/disk5" } Drive { Path: "/dev/disk6" } Drive { Path: "/dev/disk7" } Drive { Path: "/dev/disk8" Type: SSD } Drive { Path: "/dev/disk9" Type: SSD } Drive { Path: "/dev/disk10" Type: SSD } Drive { Path: "/dev/disk11" Type: SSD } Drive { Path: "/dev/disk12" Type: SSD } Drive { Path: "/dev/disk13" Type: SSD } Drive { Path: "/dev/disk14" Type: SSD } Drive { Path: "/dev/disk15" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12051 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12052 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12053 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12054 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12055 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12056 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12057 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12058 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12059 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12060 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12061 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12062 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12063 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12064 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12065 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12066 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12067 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12068 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12069 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12070 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12071 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12072 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12073 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12074 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12075 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12076 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12077 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12078 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12079 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12080 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12081 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12082 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12083 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12084 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12085 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12086 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12087 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12088 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12089 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12090 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12091 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12092 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12093 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12094 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12095 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12096 } HostConfigId: 1 } Host { Ke ... pp:340} Create new pdisk PDiskId# 276:1000 Path# /dev/disk1 2025-06-24T21:03:56.933155Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 276:1001 Path# /dev/disk2 2025-06-24T21:03:56.933183Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 276:1002 Path# /dev/disk3 2025-06-24T21:03:56.933210Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1000 Path# /dev/disk1 2025-06-24T21:03:56.933237Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1001 Path# /dev/disk2 2025-06-24T21:03:56.933267Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1002 Path# /dev/disk3 2025-06-24T21:03:56.933295Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1000 Path# /dev/disk1 2025-06-24T21:03:56.933322Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1001 Path# /dev/disk2 2025-06-24T21:03:56.933352Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1002 Path# /dev/disk3 2025-06-24T21:03:56.933377Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1000 Path# /dev/disk1 2025-06-24T21:03:56.933414Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1001 Path# /dev/disk2 2025-06-24T21:03:56.933458Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1002 Path# /dev/disk3 2025-06-24T21:03:56.933487Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1000 Path# /dev/disk1 2025-06-24T21:03:56.933515Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1001 Path# /dev/disk2 2025-06-24T21:03:56.933545Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1002 Path# /dev/disk3 2025-06-24T21:03:56.933572Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1000 Path# /dev/disk1 2025-06-24T21:03:56.933600Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1001 Path# /dev/disk2 2025-06-24T21:03:56.933628Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1002 Path# /dev/disk3 2025-06-24T21:03:56.933655Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1000 Path# /dev/disk1 2025-06-24T21:03:56.933682Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1001 Path# /dev/disk2 2025-06-24T21:03:56.933707Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1002 Path# /dev/disk3 2025-06-24T21:03:56.933736Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1000 Path# /dev/disk1 2025-06-24T21:03:56.933762Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1001 Path# /dev/disk2 2025-06-24T21:03:56.933792Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1002 Path# /dev/disk3 2025-06-24T21:03:56.933821Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1000 Path# /dev/disk1 2025-06-24T21:03:56.933858Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1001 Path# /dev/disk2 2025-06-24T21:03:56.933907Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1002 Path# /dev/disk3 2025-06-24T21:03:56.933937Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1000 Path# /dev/disk1 2025-06-24T21:03:56.933964Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1001 Path# /dev/disk2 2025-06-24T21:03:56.933995Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1002 Path# /dev/disk3 2025-06-24T21:03:56.934023Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1000 Path# /dev/disk1 2025-06-24T21:03:56.934052Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1001 Path# /dev/disk2 2025-06-24T21:03:56.934078Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1002 Path# /dev/disk3 2025-06-24T21:03:56.934114Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1000 Path# /dev/disk1 2025-06-24T21:03:56.934141Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1001 Path# /dev/disk2 2025-06-24T21:03:56.934179Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1002 Path# /dev/disk3 2025-06-24T21:03:56.934235Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1000 Path# /dev/disk1 2025-06-24T21:03:56.934275Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1001 Path# /dev/disk2 2025-06-24T21:03:56.934306Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1002 Path# /dev/disk3 2025-06-24T21:03:56.934331Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1000 Path# /dev/disk1 2025-06-24T21:03:56.934359Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1001 Path# /dev/disk2 2025-06-24T21:03:56.934387Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1002 Path# /dev/disk3 2025-06-24T21:03:56.934414Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1000 Path# /dev/disk1 2025-06-24T21:03:56.934444Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1001 Path# /dev/disk2 2025-06-24T21:03:56.934470Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1002 Path# /dev/disk3 2025-06-24T21:03:56.934497Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1000 Path# /dev/disk1 2025-06-24T21:03:56.934526Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1001 Path# /dev/disk2 2025-06-24T21:03:56.934552Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1002 Path# /dev/disk3 2025-06-24T21:03:56.934584Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1000 Path# /dev/disk1 2025-06-24T21:03:56.934613Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1001 Path# /dev/disk2 2025-06-24T21:03:56.934642Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1002 Path# /dev/disk3 2025-06-24T21:03:56.934673Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1000 Path# /dev/disk1 2025-06-24T21:03:56.934700Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1001 Path# /dev/disk2 2025-06-24T21:03:56.934728Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1002 Path# /dev/disk3 2025-06-24T21:03:56.934754Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1000 Path# /dev/disk1 2025-06-24T21:03:56.935201Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1001 Path# /dev/disk2 2025-06-24T21:03:56.935256Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1002 Path# /dev/disk3 2025-06-24T21:03:56.935285Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1000 Path# /dev/disk1 2025-06-24T21:03:56.935313Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1001 Path# /dev/disk2 2025-06-24T21:03:56.935347Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1002 Path# /dev/disk3 2025-06-24T21:03:56.935374Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1000 Path# /dev/disk1 2025-06-24T21:03:56.935406Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1001 Path# /dev/disk2 2025-06-24T21:03:56.935434Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1002 Path# /dev/disk3 2025-06-24T21:03:56.935462Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1000 Path# /dev/disk1 2025-06-24T21:03:56.935491Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1001 Path# /dev/disk2 2025-06-24T21:03:56.935519Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1002 Path# /dev/disk3 2025-06-24T21:03:56.935548Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1000 Path# /dev/disk1 2025-06-24T21:03:56.935578Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1001 Path# /dev/disk2 2025-06-24T21:03:56.935606Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1002 Path# /dev/disk3 2025-06-24T21:03:56.935635Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1000 Path# /dev/disk1 2025-06-24T21:03:56.935664Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1001 Path# /dev/disk2 2025-06-24T21:03:56.935695Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1002 Path# /dev/disk3 2025-06-24T21:03:56.935722Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1000 Path# /dev/disk1 2025-06-24T21:03:56.935751Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1001 Path# /dev/disk2 2025-06-24T21:03:56.935781Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1002 Path# /dev/disk3 2025-06-24T21:03:57.160339Z node 251 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.229880s 2025-06-24T21:03:57.160574Z node 251 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.230137s 2025-06-24T21:03:57.172620Z node 251 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 251 Type# 268639257 2025-06-24T21:03:57.199391Z node 251 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MergeBoxes { OriginBoxId: 2 OriginBoxGeneration: 1 TargetBoxId: 1 TargetBoxGeneration: 1 StoragePoolIdMap { OriginStoragePoolId: 1 TargetStoragePoolId: 2 } } } } 2025-06-24T21:03:57.224577Z node 251 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { ReadBox { BoxId: 1 } } Command { QueryBaseConfig { } } } >> ListObjectsInS3Export::ExportWithoutSchemaMapping >> BackupPathTest::ExplicitDuplicatedItems [GOOD] |93.1%| [TA] $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} |93.1%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] [GOOD] >> KqpScripting::StreamExecuteYqlScriptData [GOOD] >> KqpScripting::StreamExecuteYqlScriptEmptyResults >> KqpYql::UuidPrimaryKeyDisabled >> BackupPathTest::ExportUnexistingExplicitPath >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactBorrowedAfterSplitMergeWhenDisabled [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleDataShardReboot >> KqpScripting::EndOfQueryCommit [GOOD] >> KqpScripting::ExecuteYqlScriptPg >> KqpYql::ColumnNameConflict [GOOD] >> KqpYql::ColumnTypeMismatch >> KqpScripting::StreamExecuteYqlScriptClientTimeoutBruteForce [GOOD] >> KqpScripting::StreamExecuteYqlScriptClientOperationTimeoutBruteForce >> BackupRestoreS3::TestAllPrimitiveTypes-BOOL [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DOUBLE >> KqpScripting::ScanQueryTruncate [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT >> KqpScripting::StreamDdlAndDml [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ScanQueryTruncate [GOOD] Test command err: Trying to start YDB, gRPC: 14800, MsgBus: 9192 2025-06-24T21:03:58.675316Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624611477764871:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:58.675368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003068/r3tmp/tmpTmhcrA/pdisk_1.dat 2025-06-24T21:03:59.165207Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:59.168434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:03:59.168529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:03:59.173482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14800, node 1 2025-06-24T21:03:59.293614Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:59.293642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:59.293649Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:59.293807Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9192 2025-06-24T21:03:59.691915Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9192 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:00.047137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:00.087289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:04:00.101350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:00.255792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:00.422192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:00.510405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:02.147870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624628657635652:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.149224Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.529747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.565456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.622835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.651101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.690659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.725533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.814228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.908045Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624628657636313:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.908118Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.908283Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624628657636318:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.912467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:02.930580Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624628657636320:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:03.030362Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624632952603667:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:03.675607Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624611477764871:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:03.675728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:04.425591Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519624637247571253:2482], status: PRECONDITION_FAILED, issues:
: Error: Default error
:1:746: Error: Scan query should have a single result set., code: 2029
: Error: Default error
:1:746: Error: Scan query should have a single result set., code: 2029 2025-06-24T21:04:04.425870Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjhhNDIyMDAtOGQzZ ... :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624644336263423:2079] 1750799045451133 != 1750799045451136 TServer::EnableGrpc on GrpcPort 8564, node 2 2025-06-24T21:04:05.629965Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:05.634918Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:05.642165Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:05.695171Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:05.695198Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:05.695208Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:05.695338Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13191 TClient is connected to server localhost:13191 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:06.273674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:06.280377Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:06.292658Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:06.367624Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:06.484629Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:06.543513Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:06.644650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:09.072228Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624661516134230:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:09.072321Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:09.135344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:09.208639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:09.250952Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:09.288378Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:09.333487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:09.408875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:09.517225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:09.615491Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624661516134897:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:09.615635Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:09.615837Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624661516134902:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:09.619442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:09.631491Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624661516134904:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:09.713016Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624661516134955:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:10.458506Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624644336263448:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:10.458578Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:10.899290Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7519624665811102610:2054], tablet: [2:7519624648631231487:2283], scanId: 4, table: /Root/EightShard 2025-06-24T21:04:10.899335Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7519624665811102612:2055], tablet: [2:7519624648631231488:2284], scanId: 2, table: /Root/EightShard 2025-06-24T21:04:10.899358Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7519624665811102614:2056], tablet: [2:7519624648631231489:2285], scanId: 1, table: /Root/EightShard 2025-06-24T21:04:10.899380Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7519624665811102616:2057], tablet: [2:7519624648631231471:2282], scanId: 3, table: /Root/EightShard 2025-06-24T21:04:10.902497Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799050924, txId: 281474976710672] shutting down >> EncryptedBackupParamsValidationTest::IncorrectKeyImport [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamDdlAndDml [GOOD] Test command err: Trying to start YDB, gRPC: 30785, MsgBus: 27529 2025-06-24T21:03:58.319809Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624613362869418:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:58.319933Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003070/r3tmp/tmp4QKP6f/pdisk_1.dat 2025-06-24T21:03:58.834736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:03:58.834838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:03:58.847557Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:03:58.879907Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:58.886556Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624613362869398:2079] 1750799038316105 != 1750799038316108 TServer::EnableGrpc on GrpcPort 30785, node 1 2025-06-24T21:03:59.047882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:59.047908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:59.047915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:59.048116Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:03:59.339455Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27529 TClient is connected to server localhost:27529 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:03:59.735742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:03:59.749986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:03:59.763167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:59.916179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:00.107571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:00.186161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:02.010346Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624630542740223:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.010476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.395030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.445980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.479930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.516195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.547246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.591114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.643548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.702686Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624630542740879:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.702764Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.702909Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624630542740884:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.707828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:02.722208Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624630542740886:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:02.807413Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624630542740937:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:03.320213Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624613362869418:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:03.320326Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:03.966055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 1844Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003070/r3tmp/tmpQfnAeo/pdisk_1.dat 2025-06-24T21:04:05.351221Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:05.352269Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624643030490986:2079] 1750799045241403 != 1750799045241406 2025-06-24T21:04:05.367618Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:05.367722Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:05.370763Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4204, node 2 2025-06-24T21:04:05.442822Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:05.442849Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:05.442858Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:05.442971Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26699 TClient is connected to server localhost:26699 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:05.970469Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:05.979445Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:05.990844Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:06.078515Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:06.253196Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:06.305606Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:06.384543Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:08.648308Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624655915394499:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:08.648419Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:08.711109Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:08.758772Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:08.796228Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:08.831815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:08.872727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:08.931663Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:08.984173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:09.097863Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624660210362455:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:09.097963Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:09.098473Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624660210362460:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:09.102613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:09.119731Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624660210362462:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:09.214779Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624660210362513:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:10.243275Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624643030491010:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:10.243350Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:10.370811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:10.975577Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799051008, txId: 281474976715674] shutting down >> KqpYql::UuidPrimaryKeyDisabled [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_11_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 11] [GOOD] >> ListObjectsInS3Export::ExportWithoutSchemaMapping [GOOD] >> BackupPathTest::ExportUnexistingExplicitPath [GOOD] >> KqpScripting::StreamExecuteYqlScriptEmptyResults [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UuidPrimaryKeyDisabled [GOOD] Test command err: Trying to start YDB, gRPC: 27815, MsgBus: 22011 2025-06-24T21:04:08.928716Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624654927298465:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:08.931030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003037/r3tmp/tmpm4IPDi/pdisk_1.dat 2025-06-24T21:04:09.366610Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27815, node 1 2025-06-24T21:04:09.414242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:09.414442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:09.418352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:09.466951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:09.466974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:09.466991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:09.467117Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22011 TClient is connected to server localhost:22011 2025-06-24T21:04:09.928728Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:10.099705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:10.123257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:12.193574Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624672107168158:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.193745Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.506971Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624672107168179:2305] txid# 281474976710658, issues: { message: "Uuid as primary key is forbiden by configuration: key" severity: 1 } 2025-06-24T21:04:12.543335Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624672107168187:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.543404Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.558960Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624672107168194:2313] txid# 281474976710659, issues: { message: "Uuid as primary key is forbiden by configuration: key" severity: 1 } 2025-06-24T21:04:12.583288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624672107168202:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.583391Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.603138Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624672107168209:2321] txid# 281474976710660, issues: { message: "Uuid as primary key is forbiden by configuration: val" severity: 1 } 2025-06-24T21:04:12.634579Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624672107168217:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.634694Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.648435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.795573Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624672107168306:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.795665Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } >> KqpScripting::ExecuteYqlScriptPg [GOOD] >> EncryptedBackupParamsValidationTest::EncryptionSettingsWithoutKeyImport >> KqpYql::TableConcat >> KqpYql::ColumnTypeMismatch [GOOD] >> KqpPragma::OrderedColumns >> ListObjectsInS3Export::ExportWithEncryption >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER >> KqpYql::FlexibleTypes >> BackupPathTest::ExportUnexistingCommonSourcePath ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptEmptyResults [GOOD] Test command err: Trying to start YDB, gRPC: 26803, MsgBus: 4596 2025-06-24T21:04:00.943575Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624621566994095:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:00.943794Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00304d/r3tmp/tmpG9mZcG/pdisk_1.dat 2025-06-24T21:04:01.391305Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624621566994077:2079] 1750799040942866 != 1750799040942869 2025-06-24T21:04:01.403832Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:01.419832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:01.419919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:01.421957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26803, node 1 2025-06-24T21:04:01.514451Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:01.514487Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:01.514525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:01.514666Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4596 2025-06-24T21:04:01.987465Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4596 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:02.164927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:02.215810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:02.233966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:02.450948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:02.638907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:02.707597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:04.573252Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624638746864902:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:04.573352Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:04.887883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:04.922500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:04.991479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.032109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.073633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.107091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.145592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.206145Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624643041832861:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:05.206227Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:05.206280Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624643041832866:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:05.211832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:05.225069Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624643041832868:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:05.300228Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624643041832919:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:05.947284Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624621566994095:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:05.947394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:07.097247Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799047088, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 61852, MsgBus: 28539 2025-06-24T21:04:08.106608Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624654283971208:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:08.106661Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00304d/r3tmp/tmpqWR1du/pdisk_1.dat 2025-06-24T21:04:08.223823Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:08.224736Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624654283971173:2079] 1750799048101302 != 1750799048101305 TServer::EnableGrpc on GrpcPort 61852, node 2 2025-06-24T21:04:08.248361Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:08.248447Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:08.252374Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:08.287689Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:08.287717Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:08.287725Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:08.287837Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28539 TClient is connected to server localhost:28539 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:08.887599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:08.899754Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:04:08.911572Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:08.991761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:09.116537Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:09.227132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:09.292138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:11.514036Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624667168874707:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:11.514140Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:11.580946Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:11.610053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:11.659297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:11.698585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:11.739765Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:11.794027Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:11.877435Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:11.951238Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624667168875369:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:11.951451Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:11.951884Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624667168875374:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:11.956634Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:11.975055Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624667168875376:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:12.062493Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624671463842723:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:13.107496Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624654283971208:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:13.107556Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:13.442373Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799053472, txId: 281474976715672] shutting down 2025-06-24T21:04:13.587634Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799053619, txId: 281474976715674] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ExecuteYqlScriptPg [GOOD] Test command err: Trying to start YDB, gRPC: 21287, MsgBus: 61055 2025-06-24T21:04:01.540532Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624625655893193:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:01.540597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003044/r3tmp/tmp6DC4jD/pdisk_1.dat 2025-06-24T21:04:01.941238Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:02.001903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:02.002002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 21287, node 1 2025-06-24T21:04:02.003544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:02.123937Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:02.123963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:02.123973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:02.124149Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61055 2025-06-24T21:04:02.552276Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61055 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:02.752056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:02.771378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:02.908478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:03.085957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:03.194911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:05.005165Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624642835763979:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:05.005291Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:05.460384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.494611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.553226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.590563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.637483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.716111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.786365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:05.916967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624642835764651:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:05.917093Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:05.917146Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624642835764656:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:05.921873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:05.933936Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624642835764658:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:06.023892Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624647130732007:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:06.546089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624625655893193:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:06.546162Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:07.270670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:07.904694Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799047935, txId: 281474976710674] shutting down Trying to start YDB, gRPC: 24839, MsgBus: 11780 2025-06-24T21:04:08.872469Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624653744901138:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:08.872516Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003044/r3tmp/tmpcpZwTF/pdisk_1.dat 2025-06-24T21:04:09.027665Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:09.028877Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624653744901117:2079] 1750799048871929 != 1750799048871932 2025-06-24T21:04:09.045050Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:09.045158Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:09.046649Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24839, node 2 2025-06-24T21:04:09.185384Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:09.185406Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:09.185414Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:09.185522Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11780 TClient is connected to server localhost:11780 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:09.707197Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:09.712322Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:09.722344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:09.797828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:09.888679Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:09.950341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:10.043746Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:12.385915Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624670924771936:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.386008Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.442702Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.475486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.507685Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.544015Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.575762Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.613546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.649814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.750466Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624670924772599:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.750565Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.750932Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624670924772604:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.755678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:12.769437Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624670924772606:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:12.829757Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624670924772657:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:13.876819Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624653744901138:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:13.876897Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::ColumnTypeMismatch [GOOD] Test command err: Trying to start YDB, gRPC: 15162, MsgBus: 2694 2025-06-24T21:04:03.159186Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624632741927591:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:03.159256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003040/r3tmp/tmpw0rX34/pdisk_1.dat 2025-06-24T21:04:03.597470Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624632741927569:2079] 1750799043153893 != 1750799043153896 2025-06-24T21:04:03.602229Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15162, node 1 2025-06-24T21:04:03.680830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:03.681421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:03.686325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:03.716805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:03.716823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:03.716828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:03.716968Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2694 2025-06-24T21:04:04.185438Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2694 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:04.360776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:04.383815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:04.397400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:04.560194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:04.723572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:04.809173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:06.464068Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624645626831105:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:06.464170Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:06.792893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:06.839729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:06.882485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:06.936917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:06.974114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:07.037906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:07.090896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:07.196597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624649921799066:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:07.196752Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:07.197362Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624649921799071:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:07.201568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:07.217277Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624649921799073:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:07.312043Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624649921799124:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:08.159916Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624632741927591:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:08.160025Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Type annotation, code: 1030
:7:30: Error: At function: KiCreateTable!
:7:30: Error: Duplicate column: Value. Trying to start YDB, gRPC: 10586, MsgBus: 6850 2025-06-24T21:04:09.178610Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;s ... 169645 2025-06-24T21:04:09.326243Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:09.354568Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected TServer::EnableGrpc on GrpcPort 10586, node 2 2025-06-24T21:04:09.354667Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:09.358665Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:09.475631Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:09.475656Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:09.475662Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:09.475765Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6850 TClient is connected to server localhost:6850 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:09.938470Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:09.953419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:10.018799Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:10.219233Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:04:10.245119Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:10.333934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:12.637511Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624674311434830:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.637600Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:12.706752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.742445Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.790942Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.828164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.863913Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.917432Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:12.993771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:13.103551Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624678606402785:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:13.103660Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:13.103753Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624678606402790:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:13.111167Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:13.125199Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624678606402792:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:13.203450Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624678606402843:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:14.173300Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624661426531320:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:14.173381Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:14.311033Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519624682901370418:2478], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:5:26: Error: At function: KiWriteTable!
:6:27: Error: Failed to convert type: Struct<'Key':Uint64,'Value':Uint64> to Struct<'Key':Uint64?,'Value':String?>
:6:27: Error: Failed to convert 'Value': Uint64 to Optional
:6:27: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:04:14.311364Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YmNlYjYxM2MtNzM2MDg2MzQtOGM4MTcwYy1kYWY4ZGFiNw==, ActorId: [2:7519624682901370410:2473], ActorState: ExecuteState, TraceId: 01jyhw2xefc72bffvy4qjnr8hy, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:5:26: Error: At function: KiWriteTable!
:6:27: Error: Failed to convert type: Struct<'Key':Uint64,'Value':Uint64> to Struct<'Key':Uint64?,'Value':String?>
:6:27: Error: Failed to convert 'Value': Uint64 to Optional
:6:27: Error: Failed to convert input columns types to scheme types, code: 2031 >> KqpScripting::StreamExecuteYqlScriptScanCancelation >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBorrowed [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldHandleCompactionTimeouts >> KqpYql::EvaluateExpr1 >> KqpScripting::StreamExecuteYqlScriptScan >> KqpYql::RefSelect >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleDataShardReboot [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactAfterDrop |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] [GOOD] >> KqpScripting::StreamExecuteYqlScriptClientOperationTimeoutBruteForce [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotRequestCompactionsAfterDisable [GOOD] >> KqpYql::EvaluateExprPgNull >> KqpYql::TableConcat [GOOD] >> KqpYql::TableNameConflict >> BackupPathTest::ExportUnexistingCommonSourcePath [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DOUBLE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotRequestCompactionsAfterDisable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:02:59.229232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:02:59.229316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:02:59.229370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:02:59.229418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:02:59.231285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:02:59.231348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:02:59.231435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:02:59.231521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:02:59.232309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:02:59.232668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:02:59.324316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:02:59.324387Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:59.343573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:02:59.349236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:02:59.349598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:02:59.362587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:02:59.367205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:02:59.369647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:02:59.370976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:02:59.384217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:02:59.384860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:02:59.406420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:02:59.406524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:02:59.406659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:02:59.406722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:02:59.406768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:02:59.406967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.424240Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:02:59.591089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:02:59.591367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.591663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:02:59.591727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:02:59.591942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:02:59.592011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:02:59.594752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:02:59.594940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:02:59.595196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.595274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:02:59.595327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:02:59.595371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:02:59.600516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.600588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:02:59.600634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:02:59.603084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.603179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.603230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:02:59.603299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:02:59.607124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:02:59.610077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:02:59.610331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:02:59.611444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:02:59.611609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:02:59.611665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:02:59.611961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:02:59.612009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:02:59.612187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:02:59.612258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:02:59.614726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:02:59.614815Z node 1 :FLAT_TX_SCHEMESHARD ... ard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409546 2025-06-24T21:04:20.664062Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:321:2303]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:04:20.664224Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409546, FollowerId 0, tableId 2 2025-06-24T21:04:20.664613Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:321:2303], Recipient [3:127:2151]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409546 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 29 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 32 Memory: 124232 Storage: 14156 } ShardState: 2 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 41 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-24T21:04:20.664663Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:04:20.664710Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.0032 2025-06-24T21:04:20.664854Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 29 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:04:20.664909Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-24T21:04:20.677520Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:329:2309]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-24T21:04:20.677592Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-24T21:04:20.677666Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409547 outdated step 5000002 last cleanup 0 2025-06-24T21:04:20.677714Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409547 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:04:20.677752Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409547 2025-06-24T21:04:20.677816Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409547 has no attached operations 2025-06-24T21:04:20.677850Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409547 2025-06-24T21:04:20.677973Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:329:2309]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:04:20.678057Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409547, FollowerId 0, tableId 2 2025-06-24T21:04:20.678380Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:329:2309], Recipient [3:127:2151]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409547 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 26 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186233409547 NodeId: 3 StartTime: 43 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-24T21:04:20.678432Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:04:20.678480Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0026 2025-06-24T21:04:20.678569Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:04:20.722615Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:20.722700Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:20.722750Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-24T21:04:20.722838Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 2 2025-06-24T21:04:20.722873Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 2 2025-06-24T21:04:20.722998Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-06-24T21:04:20.723099Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-24T21:04:20.723133Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-24T21:04:20.723230Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:29.000000Z at schemeshard 72057594046678944 2025-06-24T21:04:20.723298Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 2 out of 2 partitions 2025-06-24T21:04:20.723354Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-24T21:04:20.723406Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:04:20.723450Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409547, followerId 0 2025-06-24T21:04:20.723509Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:2 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046678944 2025-06-24T21:04:20.723564Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409547 by size, its table already has 2 out of 2 partitions 2025-06-24T21:04:20.723635Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:04:20.734132Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:20.734216Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:20.734251Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:04:20.767895Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:1325:3244], Recipient [3:321:2303]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:04:20.767983Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:04:20.768036Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409546, clientId# [3:1324:3243], serverId# [3:1325:3244], sessionId# [0:0:0] 2025-06-24T21:04:20.768247Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553213, Sender [3:1323:3242], Recipient [3:321:2303]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72057594046678944 LocalId: 2 } 2025-06-24T21:04:20.771677Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:1328:3247], Recipient [3:329:2309]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:04:20.771736Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:04:20.771777Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409547, clientId# [3:1327:3246], serverId# [3:1328:3247], sessionId# [0:0:0] 2025-06-24T21:04:20.771973Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553213, Sender [3:1326:3245], Recipient [3:329:2309]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72057594046678944 LocalId: 2 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptClientOperationTimeoutBruteForce [GOOD] Test command err: Trying to start YDB, gRPC: 23037, MsgBus: 30184 2025-06-24T21:03:59.484492Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624618016681140:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:59.491225Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003062/r3tmp/tmpPeygrO/pdisk_1.dat 2025-06-24T21:04:00.017963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:00.018068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:00.024415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:00.060606Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:00.062806Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624618016681098:2079] 1750799039478708 != 1750799039478711 TServer::EnableGrpc on GrpcPort 23037, node 1 2025-06-24T21:04:00.138876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:00.138898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:00.138910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:00.139062Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30184 2025-06-24T21:04:00.545058Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30184 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:00.830393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:00.851497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:00.867655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:01.025662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:01.209062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:01.293900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:03.141905Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624635196551925:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:03.142036Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:03.529404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:03.573530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:03.624575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:03.657297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:03.691786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:03.727839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:03.763139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:03.829913Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624635196552582:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:03.830033Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:03.830480Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624635196552587:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:03.834453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:03.846104Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624635196552589:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:03.942823Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624635196552640:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:04.484802Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624618016681140:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:04.484867Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:05.485754Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799045436, txId: 281474976710674] shutting down 2025-06-24T21:04:05.486601Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding sna ... T21:04:13.704680Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:13.754259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:13.842189Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624676757375638:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:13.842277Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:13.842485Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624676757375643:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:13.846261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:13.857883Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624676757375645:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:13.925639Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624676757375696:3409] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:15.248317Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624663872471516:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:15.270587Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:15.331468Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799055348, txId: 281474976715674] shutting down 2025-06-24T21:04:15.335644Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799055341, txId: 281474976715672] shutting down 2025-06-24T21:04:15.336442Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799055348, txId: 281474976715673] shutting down 2025-06-24T21:04:15.710888Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799055600, txId: 281474976715679] shutting down 2025-06-24T21:04:15.733348Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799055600, txId: 281474976715678] shutting down 2025-06-24T21:04:15.885367Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799055775, txId: 281474976715683] shutting down 2025-06-24T21:04:15.885600Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799055775, txId: 281474976715682] shutting down 2025-06-24T21:04:16.082309Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799055936, txId: 281474976715686] shutting down 2025-06-24T21:04:16.105934Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799055936, txId: 281474976715687] shutting down 2025-06-24T21:04:16.201897Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056132, txId: 281474976715691] shutting down 2025-06-24T21:04:16.202616Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056132, txId: 281474976715690] shutting down 2025-06-24T21:04:16.336973Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056328, txId: 281474976715696] shutting down 2025-06-24T21:04:16.337626Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056328, txId: 281474976715694] shutting down 2025-06-24T21:04:16.338730Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056328, txId: 281474976715695] shutting down 2025-06-24T21:04:16.751009Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056580, txId: 281474976715700] shutting down 2025-06-24T21:04:16.763789Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056580, txId: 281474976715701] shutting down 2025-06-24T21:04:16.851410Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056776, txId: 281474976715704] shutting down 2025-06-24T21:04:16.852267Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056776, txId: 281474976715705] shutting down 2025-06-24T21:04:16.953547Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056958, txId: 281474976715710] shutting down 2025-06-24T21:04:16.957248Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056958, txId: 281474976715708] shutting down 2025-06-24T21:04:16.970131Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799056958, txId: 281474976715709] shutting down 2025-06-24T21:04:17.174044Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799057182, txId: 281474976715715] shutting down 2025-06-24T21:04:17.174761Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799057182, txId: 281474976715714] shutting down 2025-06-24T21:04:17.363256Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799057392, txId: 281474976715718] shutting down 2025-06-24T21:04:17.502210Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799057525, txId: 281474976715720] shutting down 2025-06-24T21:04:17.502495Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799057525, txId: 281474976715721] shutting down 2025-06-24T21:04:17.761744Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799057784, txId: 281474976715725] shutting down 2025-06-24T21:04:17.762538Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799057784, txId: 281474976715724] shutting down 2025-06-24T21:04:18.041753Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799058057, txId: 281474976715728] shutting down 2025-06-24T21:04:18.042758Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799058057, txId: 281474976715729] shutting down 2025-06-24T21:04:18.394561Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799058330, txId: 281474976715732] shutting down 2025-06-24T21:04:18.444657Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799058470, txId: 281474976715735] shutting down 2025-06-24T21:04:18.448025Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799058470, txId: 281474976715734] shutting down 2025-06-24T21:04:18.692204Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799058708, txId: 281474976715738] shutting down 2025-06-24T21:04:18.866812Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799058876, txId: 281474976715740] shutting down 2025-06-24T21:04:19.015166Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799059037, txId: 281474976715742] shutting down 2025-06-24T21:04:19.162857Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799059191, txId: 281474976715745] shutting down 2025-06-24T21:04:19.163200Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799059191, txId: 281474976715744] shutting down 2025-06-24T21:04:19.427809Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799059450, txId: 281474976715748] shutting down 2025-06-24T21:04:19.552285Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799059576, txId: 281474976715750] shutting down 2025-06-24T21:04:19.797457Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799059814, txId: 281474976715752] shutting down 2025-06-24T21:04:19.799460Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799059814, txId: 281474976715753] shutting down >> KqpPragma::OrderedColumns [GOOD] >> KqpPragma::MatchRecognizeWithoutTimeOrderRecoverer >> Secret::Simple [GOOD] >> KqpYql::FlexibleTypes [GOOD] >> KqpYql::FromBytes >> KqpScripting::StreamExecuteYqlScriptOperationTmeoutBruteForce >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable [GOOD] >> ListObjectsInS3Export::ExportWithEncryption [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] [GOOD] >> EncryptedBackupParamsValidationTest::EncryptionSettingsWithoutKeyImport [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:03:04.024068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:03:04.024163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:03:04.024203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:03:04.024252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:03:04.024287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:03:04.024325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:03:04.024372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:03:04.024440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:03:04.025075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:03:04.025343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:03:04.110357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:03:04.110430Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:04.127047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:03:04.127466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:03:04.127645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:03:04.135203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:03:04.135453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:03:04.136191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:03:04.136512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:03:04.140493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:03:04.140779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:03:04.142022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:03:04.142088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:03:04.142282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:03:04.142322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:03:04.142358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:03:04.142461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:03:04.148507Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:03:04.305901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:03:04.306170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:04.306467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:03:04.306525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:03:04.306777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:03:04.306879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:03:04.309333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:03:04.309550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:03:04.309782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:04.309859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:03:04.309902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:03:04.309940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:03:04.316249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:04.316314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:03:04.316367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:03:04.318603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:04.318677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:04.318727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:03:04.318807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:03:04.322812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:03:04.328195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:03:04.328455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:03:04.329716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:03:04.329877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:03:04.329933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:03:04.330331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:03:04.330404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:03:04.330580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:03:04.330659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:03:04.336505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:03:04.336586Z node 1 :FLAT_TX_SCHEMESHARD ... 1:04:22.096215Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5011: StateWork, processing event TEvPrivate::TEvRunConditionalErase 2025-06-24T21:04:22.096260Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-24T21:04:22.096331Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-24T21:04:22.096388Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-24T21:04:22.179068Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:773:2653]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-24T21:04:22.179174Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-24T21:04:22.179301Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409552 outdated step 200 last cleanup 0 2025-06-24T21:04:22.179375Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409552 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:04:22.179414Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409552 2025-06-24T21:04:22.179446Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409552 has no attached operations 2025-06-24T21:04:22.179477Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409552 2025-06-24T21:04:22.179633Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:775:2654]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-24T21:04:22.179672Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-24T21:04:22.179721Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409553 outdated step 200 last cleanup 0 2025-06-24T21:04:22.179775Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409553 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:04:22.179802Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409553 2025-06-24T21:04:22.179826Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409553 has no attached operations 2025-06-24T21:04:22.179855Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409553 2025-06-24T21:04:22.179987Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:773:2653]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:04:22.180106Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409552, FollowerId 0, tableId 2 2025-06-24T21:04:22.180188Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:775:2654]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:04:22.180287Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409553, FollowerId 0, tableId 2 2025-06-24T21:04:22.180643Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:773:2653], Recipient [3:903:2755]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409552 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 28 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186233409552 NodeId: 3 StartTime: 152 TableOwnerId: 72075186233409549 FollowerId: 0 2025-06-24T21:04:22.180701Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:04:22.180752Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409552 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0028 2025-06-24T21:04:22.180875Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409552 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:04:22.180928Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-24T21:04:22.181186Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:775:2654], Recipient [3:903:2755]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409553 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 18 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186233409553 NodeId: 3 StartTime: 152 TableOwnerId: 72075186233409549 FollowerId: 0 2025-06-24T21:04:22.181248Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:04:22.181292Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409553 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0018 2025-06-24T21:04:22.181383Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409553 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:04:22.208398Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:903:2755]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:22.208478Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:22.208584Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:903:2755], Recipient [3:903:2755]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:22.208632Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:22.219175Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435096, Sender [0:0:0], Recipient [3:903:2755]: NKikimr::NSchemeShard::TEvPrivate::TEvSendBaseStatsToSA 2025-06-24T21:04:22.219283Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5136: StateWork, processing event TEvPrivate::TEvSendBaseStatsToSA 2025-06-24T21:04:22.219548Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435076, Sender [0:0:0], Recipient [3:903:2755]: NKikimr::NSchemeShard::TEvPrivate::TEvRunConditionalErase 2025-06-24T21:04:22.219585Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5011: StateWork, processing event TEvPrivate::TEvRunConditionalErase 2025-06-24T21:04:22.219618Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-24T21:04:22.219689Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-24T21:04:22.219753Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-24T21:04:22.219937Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269746180, Sender [3:2013:3827], Recipient [3:903:2755]: NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult 2025-06-24T21:04:22.219982Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5135: StateWork, processing event TEvTxProxySchemeCache::TEvNavigateKeySetResult 2025-06-24T21:04:22.254647Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:2016:3830], Recipient [3:773:2653]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:04:22.254749Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:04:22.254814Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409552, clientId# [3:2015:3829], serverId# [3:2016:3830], sessionId# [0:0:0] 2025-06-24T21:04:22.255082Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553213, Sender [3:2014:3828], Recipient [3:773:2653]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72075186233409549 LocalId: 2 } 2025-06-24T21:04:22.255767Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:2019:3833], Recipient [3:775:2654]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:04:22.255803Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:04:22.255857Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409553, clientId# [3:2018:3832], serverId# [3:2019:3833], sessionId# [0:0:0] 2025-06-24T21:04:22.256012Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553213, Sender [3:2017:3831], Recipient [3:775:2654]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72075186233409549 LocalId: 2 } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::Simple [GOOD] Test command err: 2025-06-24T21:01:55.639983Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:01:55.640431Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:01:55.640614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001f2a/r3tmp/tmpm8p9XQ/pdisk_1.dat 2025-06-24T21:01:56.196670Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 64428, node 1 TClient is connected to server localhost:1103 2025-06-24T21:01:57.404014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:01:57.457453Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:01:57.457535Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:01:57.457582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:01:57.457937Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:57.470019Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:01:57.474962Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750798912134908 != 1750798912134912 2025-06-24T21:01:57.564918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:01:57.565040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:01:57.578694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected snapshot->GetSecrets().size() incorrect: SECRETS:ACCESS: 2025-06-24T21:01:57.820216Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 2025-06-24T21:02:10.022454Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:767:2633], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:10.022634Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:10.038470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:10.270681Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:884:2711], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:10.270798Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:10.271086Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:889:2716], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:10.275642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:02:10.370639Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:891:2718], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:02:11.036885Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:985:2783] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:02:11.926141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:12.506041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:13.311549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:14.103502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:14.528143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:16.097227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) snapshot->GetSecrets().size() incorrect: SECRETS:ACCESS: 2025-06-24T21:02:16.547670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 snapshot->GetAccess().size() incorrect: SECRETS:root@builtin:secret1:100;ACCESS: REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1:100;root@builtin:secret1_1:100;ACCESS: 2025-06-24T21:02:35.094230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:02:35.094318Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded FINISHED_REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1:100;root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;EXPECTATION=1;WAITING=1 2025-06-24T21:02:59.616418Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715723. Ctx: { TraceId: 01jyhw0kyfafz8zz3sfdpccxns, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmVkODAzZGQtZTlkY2UyZjYtMTg3OGRiYWUtMjM3MmQyMmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-24T21:03:23.133110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715742:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:24.188872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715747:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:26.148736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715756:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:03:26.882081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 snapshot->GetAccess().size() incorrect (zero expects): SECRETS:root@builtin:secret1:abcde;root@builtin:secret1_1:200;ACCESS:root@builtin:secret1:test@test1; FINISHED_REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-24T21:03:40.009906Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715771. Ctx: { TraceId: 01jyhw1vft67na6mxbc2jsa8y8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDUzMmNhNjUtNTExMjRlYi1mMDBjZTk2My02MDczYmVmNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 snapshot->GetAccess().size() incorrect: SECRETS:root@builtin:secret1:abcde;root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=1;WAITING=1 2025-06-24T21:04:19.761893Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715805. Ctx: { TraceId: 01jyhw32k499g47a15ckzfgngd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWM2YWMyZDAtNmM1NDA4ZGMtZDUxYzNkYjYtODhhYzg5NWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 >> Secret::SimpleQueryService [GOOD] >> BackupPathTest::FilterByPathFailsWhenNoSchemaMapping >> KqpPragma::Auth >> KqpYql::EvaluateExpr1 [GOOD] >> KqpYql::Discard >> KqpYql::TestUuidPrimaryKeyPrefixSearch >> ListObjectsInS3Export::ExportWithWrongEncryptionKey >> EncryptedBackupParamsValidationTest::NoSourcePrefixEncrypted >> KqpScripting::StreamExecuteYqlScriptScanCancelation [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanClientOperationTimeoutBruteForce ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::SimpleQueryService [GOOD] Test command err: 2025-06-24T21:01:59.477028Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:01:59.477484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:01:59.477686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001d96/r3tmp/tmpUyYs9o/pdisk_1.dat 2025-06-24T21:01:59.847728Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 65070, node 1 TClient is connected to server localhost:25867 2025-06-24T21:02:00.169000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:02:00.214512Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:00.214611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:00.214662Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:00.215072Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:00.218266Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:02:00.218647Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750798915901090 != 1750798915901094 2025-06-24T21:02:00.269847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:00.270026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:00.284669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected snapshot->GetSecrets().size() incorrect: SECRETS:ACCESS: 2025-06-24T21:02:00.529113Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 2025-06-24T21:02:12.271833Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:764:2632], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:12.271999Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:774:2637], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:12.272108Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:12.278524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:02:12.302954Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:778:2640], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T21:02:12.363823Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:829:2672] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:02:12.629363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:13.676303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:14.228924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:15.151137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:15.865757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:16.386066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:17.552912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) snapshot->GetSecrets().size() incorrect: SECRETS:ACCESS: 2025-06-24T21:02:18.089327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 snapshot->GetAccess().size() incorrect: SECRETS:root@builtin:secret1:100;ACCESS: REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1:100;root@builtin:secret1_1:100;ACCESS: 2025-06-24T21:02:36.010877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:02:36.010951Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded FINISHED_REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1:100;root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;EXPECTATION=1;WAITING=1 2025-06-24T21:02:59.765535Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715719. Ctx: { TraceId: 01jyhw0m9mc5j334hdd2wk0bs6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDJkMzIwYjYtM2M2OWY0M2UtZDA3MTAwMDktZTI2NWU2MWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-24T21:03:23.000803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715736:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:24.363882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715743:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:26.401414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715754:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:03:27.043438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715757:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 snapshot->GetAccess().size() incorrect (zero expects): SECRETS:root@builtin:secret1:abcde;root@builtin:secret1_1:200;ACCESS:root@builtin:secret1:test@test1; FINISHED_REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-24T21:03:41.130957Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715769. Ctx: { TraceId: 01jyhw1wk22rtqex79hrvqgxqp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTIxMTMyMGItYTAzOGY3NzItZmI5OGI1YjYtODliYmNmYzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 snapshot->GetAccess().size() incorrect: SECRETS:root@builtin:secret1:abcde;root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=1;WAITING=1 2025-06-24T21:04:20.888749Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715809. Ctx: { TraceId: 01jyhw33rg051k9bzvzajk08hp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmZiMjU1NC03MDM0ZTBlMS1lMDQxNjA4OC1iMjFmOTU1OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 >> KqpYql::RefSelect [GOOD] >> KqpYql::PgIntPrimaryKey >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER [GOOD] >> KqpScripting::StreamExecuteYqlScriptMixed >> KqpScripting::StreamExecuteYqlScriptScan [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanCancelAfterBruteForce >> KqpPragma::ResetPerQuery >> KqpScripting::StreamExecuteYqlScriptScanWriteCancelAfterBruteForced ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER [GOOD] Test command err: 2025-06-24T21:02:03.591807Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624119662596470:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:03.597813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045ca/r3tmp/tmpt7vT2K/pdisk_1.dat 2025-06-24T21:02:04.276462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:04.276555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:04.286702Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:04.292247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11550, node 1 2025-06-24T21:02:04.617651Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:02:04.669921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:04.669944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:04.669951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:04.670065Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8919 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:02:05.359861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:02:05.399540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:02:07.717374Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624136842466590:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.717770Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624119662596573:2142] Handle TEvProposeTransaction 2025-06-24T21:02:07.717804Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519624119662596573:2142] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T21:02:07.717844Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519624119662596573:2142] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519624136842466594:2633] 2025-06-24T21:02:07.718371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624136842466579:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.718498Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.786014Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519624136842466594:2633] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-24T21:02:07.786100Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519624136842466594:2633] txid# 281474976710658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:02:07.786120Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519624136842466594:2633] txid# 281474976710658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-24T21:02:07.787916Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519624136842466594:2633] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:02:07.788008Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519624136842466594:2633] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:02:07.788280Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519624136842466594:2633] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:07.788447Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519624136842466594:2633] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:02:07.788511Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519624136842466594:2633] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:02:07.788640Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519624136842466594:2633] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T21:02:07.790098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:02:07.794531Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519624136842466594:2633] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-06-24T21:02:07.794595Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519624136842466594:2633] txid# 281474976710658 SEND to# [1:7519624136842466593:2305] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-06-24T21:02:07.820640Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624136842466593:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:02:07.895185Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624119662596573:2142] Handle TEvProposeTransaction 2025-06-24T21:02:07.895220Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519624119662596573:2142] TxId# 281474976710659 ProcessProposeTransaction 2025-06-24T21:02:07.895267Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519624119662596573:2142] Cookie# 0 userReqId# "" txid# 281474976710659 SEND to# [1:7519624136842466668:2687] 2025-06-24T21:02:07.898096Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519624136842466668:2687] txid# 281474976710659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-24T21:02:07.898164Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519624136842466668:2687] txid# 281474976710659 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:02:07.898181Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519624136842466668:2687] txid# 281474976710659 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-24T21:02:07.898726Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519624136842466668:2687] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:02:07.898814Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519624136842466668:2687] txid# 281474976710659 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:02:07.899060Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519624136842466668:2687] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:07.899233Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519624136842466668:2687] HANDLE EvNavigateKeySetResult, txid# 281474976710659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainIn ... s: DoExecute 2025-06-24T21:04:23.224615Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:989: TImport::TTxProgress: OnSchemeResult: id# 281474976710665, itemIdx# 0, success# 1 2025-06-24T21:04:23.225174Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:629: TImport::TTxProgress: Allocate txId: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-24T21:04:23.243661Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:04:23.243846Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:04:23.243863Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1218: TImport::TTxProgress: OnAllocateResult: txId# 281474976715760, id# 281474976710665 2025-06-24T21:04:23.243924Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:419: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976715760 2025-06-24T21:04:23.244087Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:04:23.246089Z node 46 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.249478Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:04:23.249505Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1314: TImport::TTxProgress: OnModifyResult: txId# 281474976715760, status# StatusAccepted 2025-06-24T21:04:23.249653Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:643: TImport::TTxProgress: Wait for completion: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976715760 Issue: '' } 2025-06-24T21:04:23.260593Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:04:23.339864Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:04:23.339917Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976715760 2025-06-24T21:04:23.340020Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:629: TImport::TTxProgress: Allocate txId: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-24T21:04:23.342735Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:04:23.342845Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:04:23.342860Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1218: TImport::TTxProgress: OnAllocateResult: txId# 281474976715761, id# 281474976710665 2025-06-24T21:04:23.342949Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:520: TImport::TTxProgress: Restore propose: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976715761 2025-06-24T21:04:23.343812Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:04:23.344345Z node 46 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976715761:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-24T21:04:23.346699Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:04:23.346724Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1314: TImport::TTxProgress: OnModifyResult: txId# 281474976715761, status# StatusAccepted 2025-06-24T21:04:23.346806Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:643: TImport::TTxProgress: Wait for completion: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976715761 Issue: '' } 2025-06-24T21:04:23.348582Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:04:23.367860Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [46:7519624720000981935:2364] [0] Resolve database: name# /Root 2025-06-24T21:04:23.370210Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [46:7519624720000981935:2364] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:04:23.370255Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [46:7519624720000981935:2364] [0] Send request: schemeShardId# 72057594046644480 2025-06-24T21:04:23.379303Z node 46 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [46:7519624720000981935:2364] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710665 Status: SUCCESS Progress: PROGRESS_TRANSFER_DATA ImportFromS3Settings { endpoint: "localhost:1900" scheme: HTTP bucket: "test_bucket" items { source_prefix: "DyNumberTable" destination_path: "/Root/DyNumberTable" } } StartTime { seconds: 1750799063 } } REQUEST: HEAD /test_bucket/DyNumberTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:1900 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 5BF71B29-8245-4A49-8E9C-8D6D3A37BD45 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250624/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=60e57d95705b12d9989d84860c0a8a6751ea35c61a4d3fd253de2d795fa8f73a content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250624T210423Z S3_MOCK::HttpServeRead: /test_bucket/DyNumberTable/data_00.csv / 7 REQUEST: GET /test_bucket/DyNumberTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:1900 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: EC8BE329-C378-4516-854A-EC2004B1F169 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250624/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=7c0473103e25d1e828842cf02235132964d4aceeb9f43ef6b76aa9de101b27c8 content-type: application/xml range: bytes=0-6 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250624T210423Z S3_MOCK::HttpServeRead: /test_bucket/DyNumberTable/data_00.csv / 7 2025-06-24T21:04:23.543808Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:04:23.543860Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976715761 2025-06-24T21:04:23.546939Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:04:23.797807Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [46:7519624720000981978:2367] [0] Resolve database: name# /Root 2025-06-24T21:04:23.800171Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [46:7519624720000981978:2367] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:04:23.800215Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [46:7519624720000981978:2367] [0] Send request: schemeShardId# 72057594046644480 2025-06-24T21:04:23.801188Z node 46 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [46:7519624720000981978:2367] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710665 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:1900" scheme: HTTP bucket: "test_bucket" items { source_prefix: "DyNumberTable" destination_path: "/Root/DyNumberTable" } } StartTime { seconds: 1750799063 } EndTime { seconds: 1750799063 } } 2025-06-24T21:04:23.972014Z node 46 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [46:7519624689936209136:2133] Handle TEvExecuteKqpTransaction 2025-06-24T21:04:23.972050Z node 46 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [46:7519624689936209136:2133] TxId# 281474976710666 ProcessProposeKqpTransaction 2025-06-24T21:04:23.973307Z node 46 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhw36rd8vk68m23bwbzw0xt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=46&id=Yjc3ODUxYjQtYWVhOWJiZjUtNjhlMzZhM2QtZjdmMWEwNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> KqpYql::TableNameConflict [GOOD] >> KqpYql::FromBytes [GOOD] >> KqpYql::EvaluateExprPgNull [GOOD] >> KqpYql::EvaluateExprYsonAndType |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::TableNameConflict [GOOD] Test command err: Trying to start YDB, gRPC: 9382, MsgBus: 6909 2025-06-24T21:04:15.165941Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624685973751133:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:15.166045Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ffa/r3tmp/tmp5gZghA/pdisk_1.dat 2025-06-24T21:04:15.529336Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9382, node 1 2025-06-24T21:04:15.592324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:15.592471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:15.595323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:15.648974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:15.649004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:15.649011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:15.649194Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6909 2025-06-24T21:04:16.183435Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6909 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:16.520915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:16.534357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:16.541467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:16.706323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:16.896535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:16.992266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:19.012521Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624698858654624:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.012654Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.345725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.393477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.463501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.538253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.579725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.657394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.714679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.800645Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624703153622588:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.800725Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.800996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624703153622593:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.804844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:19.822243Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624703153622595:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:19.896860Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624703153622646:3429] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:20.169473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624685973751133:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:20.169551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Table intent determination, code: 1040
:3:27: Error: CONCAT is not supported on Kikimr clusters. Trying to start YDB, gRPC: 19722, MsgBus: 23559 2025-06-24T21:04:21.970003Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624709305226676:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:21.975687Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ffa/r3tmp/tmpA3jqig/pdisk_1.dat 2025-06-24T21:04:22.205933Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:22.206015Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:22.207736Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:22.210980Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:22.211617Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624709305226659:2079] 1750799061968350 != 1750799061968353 TServer::EnableGrpc on GrpcPort 19722, node 2 2025-06-24T21:04:22.275767Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:22.275794Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:22.275802Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:22.275914Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23559 TClient is connected to server localhost:23559 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:22.926542Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:22.935179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:22.953834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:23.027729Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:23.067517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:23.230907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:23.326975Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:25.560440Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624726485097465:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:25.560526Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:25.662293Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:25.699077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:25.754749Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:25.836333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:25.876407Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:25.981818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.066054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.165173Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624730780065427:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.165255Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.165553Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624730780065432:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.169634Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:26.191246Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624730780065434:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:26.282611Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624730780065485:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:26.969064Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624709305226676:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:26.969139Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Type annotation, code: 1030
:12:30: Error: At function: KiCreateTable!
:12:30: Error: Table name conflict: db.[/Root/Test] is used to reference multiple tables. >> KqpScripting::LimitOnShard >> KqpPragma::MatchRecognizeWithoutTimeOrderRecoverer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::FromBytes [GOOD] Test command err: Trying to start YDB, gRPC: 14132, MsgBus: 25393 2025-06-24T21:04:15.906674Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624685268500868:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:15.906756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ff4/r3tmp/tmpXk6BUy/pdisk_1.dat 2025-06-24T21:04:16.451971Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624685268500845:2079] 1750799055905293 != 1750799055905296 2025-06-24T21:04:16.456588Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14132, node 1 2025-06-24T21:04:16.469853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:16.469940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:16.472061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:16.628276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:16.628297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:16.628320Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:16.628433Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25393 2025-06-24T21:04:16.927964Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25393 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:17.371838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:17.396401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:17.403805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:17.559009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:17.746787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:17.826195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:19.535469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624702448371676:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.535600Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.907432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.966483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:20.004622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:20.047715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:20.092562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:20.139188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:20.197494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:20.291181Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624706743339630:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:20.291267Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:20.291330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624706743339635:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:20.296913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:20.312599Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624706743339637:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:20.372476Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624706743339688:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:20.908090Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624685268500868:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:20.908197Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 6733, MsgBus: 20697 2025-06-24T21:04:22.660275Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624716262084982:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:22.660343Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ff4/r3tmp/tmpjmu2ky/pdisk_1.dat 2025-06-24T21:04:22.779978Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6733, node 2 2025-06-24T21:04:22.811836Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:22.811949Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:22.814348Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:22.939704Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:22.939732Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:22.939741Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:22.939847Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20697 TClient is connected to server localhost:20697 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:04:23.460070Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:23.472585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:23.527193Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.679398Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:23.693739Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:23.772553Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:26.357178Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624733441955755:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.357254Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.415831Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.461062Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.507729Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.553454Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.582807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.660301Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.732352Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.824310Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624733441956425:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.824398Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.824693Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624733441956430:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.828276Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:26.844776Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624733441956432:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:26.942298Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624733441956483:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:27.663282Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624716262084982:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:27.663390Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpYql::Discard [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER [GOOD] >> KqpYql::TestUuidPrimaryKeyPrefixSearch [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_14_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 14] [GOOD] >> KqpPragma::Auth [GOOD] >> KqpPragma::MatchRecognizeWithTimeOrderRecoverer >> BackupPathTest::FilterByPathFailsWhenNoSchemaMapping [GOOD] >> KqpYql::PgIntPrimaryKey [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::TestUuidPrimaryKeyPrefixSearch [GOOD] Test command err: Trying to start YDB, gRPC: 5084, MsgBus: 15582 2025-06-24T21:04:24.676932Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624721896502424:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:24.687817Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f84/r3tmp/tmpd2mrzr/pdisk_1.dat 2025-06-24T21:04:25.227140Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5084, node 1 2025-06-24T21:04:25.246697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:25.246798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:25.251973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:25.387590Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:25.387610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:25.387619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:25.387718Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15582 2025-06-24T21:04:25.693382Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15582 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:26.156044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:26.176396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:28.465175Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624739076372138:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:28.465294Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:28.742402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:28.894422Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624739076372243:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:28.894517Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:28.894872Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624739076372248:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:28.899411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:28.927071Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624739076372250:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:04:28.989683Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624739076372301:2396] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:29.654643Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624721896502424:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:29.654715Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::Discard [GOOD] Test command err: Trying to start YDB, gRPC: 11283, MsgBus: 5423 2025-06-24T21:04:18.055536Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624697258362799:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:18.066493Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fe4/r3tmp/tmpcwS0pF/pdisk_1.dat 2025-06-24T21:04:18.450506Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11283, node 1 2025-06-24T21:04:18.507728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:18.507832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:18.513115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:18.597222Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:18.597248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:18.597280Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:18.597437Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5423 2025-06-24T21:04:19.074207Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5423 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:19.258944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:19.292921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:19.446243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:19.608244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:19.679012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:21.556993Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624710143266289:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:21.557123Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:21.871719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:21.920402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:21.959421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:21.992346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:22.033165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:22.110714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:22.156307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:22.243688Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624714438234246:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:22.243781Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:22.244066Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624714438234251:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:22.248637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:22.264229Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624714438234253:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:22.349048Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624714438234304:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:23.043568Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624697258362799:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:23.043619Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 26897, MsgBus: 10402 2025-06-24T21:04:24.452730Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624722236841150:2174];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fe4/r3tmp/tmpcxBWIG/pdisk_1.dat 2025-06-24T21:04:24.507097Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:04:24.593777Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:24.599348Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624722236841001:2079] 1750799064389595 != 1750799064389598 2025-06-24T21:04:24.609030Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:24.609145Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 26897, node 2 2025-06-24T21:04:24.613527Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:24.683653Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:24.683673Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:24.683683Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:24.683804Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10402 TClient is connected to server localhost:10402 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:25.141989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:25.149304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:25.154618Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:25.259031Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:25.442915Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:25.452525Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:04:25.526449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.784023Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624735121744520:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:27.784113Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:27.843425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.879209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.918227Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.956643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.995997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:28.050992Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:28.117303Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:28.226076Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624739416712475:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:28.226164Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:28.226385Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624739416712480:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:28.229289Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:28.243297Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624739416712482:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:28.301830Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624739416712533:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:29.427374Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624722236841150:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:29.440775Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:29.465786Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519624743711680104:2476], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:2:13: Error: DISCARD not supported in YDB queries, code: 2008 2025-06-24T21:04:29.467583Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=OGM0YjBiMC0zYzJhOGEwMy1hYjJiNTBkYS0yN2NhNjk3MQ==, ActorId: [2:7519624743711680097:2472], ActorState: ExecuteState, TraceId: 01jyhw3c66dk9kh9qttejqs88p, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpPragma::MatchRecognizeWithoutTimeOrderRecoverer [GOOD] Test command err: Trying to start YDB, gRPC: 62326, MsgBus: 64101 2025-06-24T21:04:15.460656Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624685127484985:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:15.460726Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ff9/r3tmp/tmpLFsuuA/pdisk_1.dat 2025-06-24T21:04:15.921196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:15.921321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:15.929704Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:15.936097Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624685127484855:2079] 1750799055444305 != 1750799055444308 2025-06-24T21:04:15.956593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62326, node 1 2025-06-24T21:04:16.174028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:16.174050Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:16.174056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:16.174167Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64101 2025-06-24T21:04:16.463536Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64101 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:16.908284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:16.944620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:17.109625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:17.287641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:17.379616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:19.120790Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624702307355670:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.120896Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.467046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.498416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.532120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.572124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.598058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.647963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.735510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:19.808284Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624702307356333:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.808379Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.809223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624702307356338:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:19.813455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:19.827114Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624702307356340:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:19.897270Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624702307356391:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:20.465577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624685127484985:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:20.465678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:21.152376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:21.374221Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotMa ... 2361Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ff9/r3tmp/tmp5CHquB/pdisk_1.dat 2025-06-24T21:04:22.590504Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:22.595372Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624715618153179:2079] 1750799062408319 != 1750799062408322 2025-06-24T21:04:22.615479Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:22.615561Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:22.621698Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3382, node 2 2025-06-24T21:04:22.695738Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:22.695770Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:22.695778Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:22.695897Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25388 TClient is connected to server localhost:25388 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:23.313361Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:23.320257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:23.331572Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:23.429358Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:23.514141Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:23.605324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:23.727754Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:26.183901Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624732798024001:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.183979Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.249267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.288788Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.324298Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.414415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.468482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.530211Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.605984Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.706902Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624732798024669:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.706990Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.707275Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624732798024674:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.712017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:26.732085Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624732798024676:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:26.805750Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624732798024727:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:27.433793Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624715618153255:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:27.433871Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:28.262431Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.190106Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799069166, txId: 281474976710674] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER [GOOD] Test command err: 2025-06-24T21:02:04.073145Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624123215333963:2147];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpmTnvBr/pdisk_1.dat 2025-06-24T21:02:04.342053Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:02:04.593661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:04.593792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:04.662363Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:04.687277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:04.740418Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 13607, node 1 2025-06-24T21:02:05.171485Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:02:05.190832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:05.190858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:05.190866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:05.190982Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62550 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:02:05.563394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:02:07.924961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624136100236753:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.925088Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:08.147325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Backup "/Root" to "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/"Create temporary directory "/Root/~backup_20250624T210208" in databaseProcess "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250624T210208/table" }Describe table "/Root/table"Describe table "/Root/~backup_20250624T210208/table"Backup table "/Root/~backup_20250624T210208/table" to "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table"Write scheme into "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table/permissions.pb"Read table "/Root/~backup_20250624T210208/table"Write data into "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table/data_00.csv"Drop table "/Root/~backup_20250624T210208/table"2025-06-24T21:02:08.856542Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-24T21:02:08.859201Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-24T21:02:08.859225Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found Remove temporary directory "/Root/~backup_20250624T210208" in database2025-06-24T21:02:08.890526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) Backup completed successfully2025-06-24T21:02:08.909342Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624140395205147:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:08.909417Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:08.998510Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-24T21:02:08.998533Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T21:02:08.998547Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found Restore "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/" to "/Root"2025-06-24T21:02:09.041295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624123215333963:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:09.041363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table"}]Process "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table"Read scheme from "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table" to "/Root/table"2025-06-24T21:02:09.110935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Created "/Root/table"Read data from "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table/data_00.csv"Restore index "byValue" on "/Root/table"2025-06-24T21:02:09.213748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:09.319393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:09.413501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:09.608736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710762:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:02:09.714665Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2025-06-24T21:02:09.714703Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found Restore ACL "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmp8tgrrI/table/permissions.pb"2025-06-24T21:02:09.993816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed suc ... er/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-24T21:04:19.324391Z node 40 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyhw325m2r3qmypfa7f70rth, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=40&id=ZjFkZDU1NmEtZjViM2ZkOGQtNDEzZGIyYmItYTU4M2RjYmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:04:21.450715Z node 43 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[43:7519624708971305172:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:21.450793Z node 43 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpMlt04N/pdisk_1.dat 2025-06-24T21:04:21.656314Z node 43 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:21.681472Z node 43 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(43, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:21.681585Z node 43 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(43, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:21.691820Z node 43 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(43, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11332, node 43 2025-06-24T21:04:21.755250Z node 43 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:21.755272Z node 43 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:21.755283Z node 43 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:21.755468Z node 43 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23209 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:22.126179Z node 43 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:22.491300Z node 43 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:26.451335Z node 43 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[43:7519624708971305172:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:26.451416Z node 43 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:26.765678Z node 43 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [43:7519624730446142720:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.765788Z node 43 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [43:7519624730446142735:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.765863Z node 43 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.771059Z node 43 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:26.812892Z node 43 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [43:7519624730446142749:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:04:26.894242Z node 43 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [43:7519624730446142825:2688] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:26.930308Z node 43 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.176079Z node 43 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhw39y08k06vw90x23v12q0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=43&id=NTViMjY1OTEtYWJhNTg2MjUtZmY3MDJlNC1iMWQ2ZjUxNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:04:27.380529Z node 43 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhw3a1p2sxbx5kebbbh9j8e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=43&id=NTViMjY1OTEtYWJhNTg2MjUtZmY3MDJlNC1iMWQ2ZjUxNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/"Create temporary directory "/Root/~backup_20250624T210427" in databaseProcess "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable"Copy tables: { src: "/Root/DyNumberTable", dst: "/Root/~backup_20250624T210427/DyNumberTable" }Describe table "/Root/DyNumberTable"Describe table "/Root/~backup_20250624T210427/DyNumberTable"Backup table "/Root/~backup_20250624T210427/DyNumberTable" to "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable"Write scheme into "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable/permissions.pb"Read table "/Root/~backup_20250624T210427/DyNumberTable"Write data into "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable/data_00.csv"Drop table "/Root/~backup_20250624T210427/DyNumberTable"2025-06-24T21:04:28.080102Z node 43 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 43, TabletId: 72075186224037889 not found Remove temporary directory "/Root/~backup_20250624T210427" in database2025-06-24T21:04:28.129342Z node 43 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) Backup completed successfullyRestore "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/" to "/Root"2025-06-24T21:04:28.275944Z node 43 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 43, TabletId: 72075186224037888 not found Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable"}]Process "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable"Read scheme from "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable" to "/Root/DyNumberTable"2025-06-24T21:04:28.353876Z node 43 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Created "/Root/DyNumberTable"Read data from "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable/data_00.csv"2025-06-24T21:04:28.653001Z node 43 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710671. Ctx: { TraceId: 01jyhw3bc0bhch35zn3e0dt5np, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=43&id=NTVjODIyYzEtYzkyYzZhNTYtM2ZjZmFmMTctNTBkMzE5NDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable" to "/Root/DyNumberTable"Read ACL from "/home/runner/.ya/build/build_root/2btm/004596/r3tmp/tmpsrF4WM/DyNumberTable/permissions.pb"2025-06-24T21:04:28.724405Z node 43 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-24T21:04:28.908948Z node 43 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { TraceId: 01jyhw3bjbdvv9dt4kx6a4n3j1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=43&id=NTViMjY1OTEtYWJhNTg2MjUtZmY3MDJlNC1iMWQ2ZjUxNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> KqpYql::InsertIgnore >> BackupRestore::TestReplaceRestoreOption [GOOD] >> BackupRestore::TestReplaceRestoreOptionOnNonExistingSchemeObjects >> KqpYql::BinaryJsonOffsetNormal >> KqpScripting::StreamScanQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::PgIntPrimaryKey [GOOD] Test command err: Trying to start YDB, gRPC: 23173, MsgBus: 25824 2025-06-24T21:04:18.605625Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624697754576611:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:18.606417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fad/r3tmp/tmpg4punp/pdisk_1.dat 2025-06-24T21:04:18.996492Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624697754576489:2079] 1750799058585228 != 1750799058585231 2025-06-24T21:04:19.040361Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:19.068416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:19.068523Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 23173, node 1 2025-06-24T21:04:19.070214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:19.247399Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:19.247419Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:19.247435Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:19.247551Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25824 2025-06-24T21:04:19.608406Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25824 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:20.000420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:20.053307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:20.230378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:20.399840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:20.487897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:22.326044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624714934447317:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:22.326192Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:22.746805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:22.805782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:22.846689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:22.903779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:22.949540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.022914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.087027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.207272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624719229415275:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:23.207347Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:23.207822Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624719229415280:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:23.212059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:23.226458Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624719229415282:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:23.300605Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624719229415335:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:23.599257Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624697754576611:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:23.599365Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Optimization, code: 1070
:4:20: Error: RefSelect mode isn't supported by provider: kikimr Trying to start YDB, gRPC: 25961, MsgBus: 6809 2025-06-24T21:04:25.578212Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624728023347849:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:25.662110Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fad/r3tmp/tmpY2jhu5/pdisk_1.dat 2025-06-24T21:04:25.811272Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624728023347634:2079] 1750799065504956 != 1750799065504959 2025-06-24T21:04:25.858741Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:25.859728Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:25.859795Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:25.864133Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25961, node 2 2025-06-24T21:04:26.056220Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:26.056244Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:26.056249Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:26.056364Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6809 2025-06-24T21:04:26.501747Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6809 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:04:26.661214Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:26.665442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:29.081237Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624745203217452:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.081334Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.096360Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.190393Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624745203217555:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.190505Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.190720Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624745203217560:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.195644Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:29.207248Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624745203217562:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:04:29.270641Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624745203217613:2390] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:30.551528Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624728023347849:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:30.551629Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpScripting::StreamExecuteYqlScriptScanCancelAfterBruteForce [GOOD] >> ListObjectsInS3Export::ExportWithWrongEncryptionKey [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME >> BackupPathTest::OnlyOneEmptyDirectory >> KqpPragma::ResetPerQuery [GOOD] >> KqpPragma::Warning >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactAfterDrop [GOOD] >> KqpYql::EvaluateExpr2 >> KqpScripting::QueryStats >> EncryptedBackupParamsValidationTest::NoSourcePrefixEncrypted [GOOD] >> KqpYql::ScriptUdf >> KqpYql::UuidPrimaryKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptScanCancelAfterBruteForce [GOOD] Test command err: Trying to start YDB, gRPC: 17112, MsgBus: 28547 2025-06-24T21:04:18.523820Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624699024873002:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:18.523946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fac/r3tmp/tmpDANOMY/pdisk_1.dat 2025-06-24T21:04:18.942011Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:18.948818Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624699024872983:2079] 1750799058522871 != 1750799058522874 2025-06-24T21:04:18.958435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:18.958539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:18.964175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17112, node 1 2025-06-24T21:04:19.229564Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:19.229599Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:19.229608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:19.229764Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28547 2025-06-24T21:04:19.545824Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28547 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:20.007195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:20.024618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:20.039088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:20.254222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:20.445442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:20.526630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:22.469546Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624716204743807:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:22.469669Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:22.826776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:22.906592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:22.951756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.025323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.064133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.144162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.225558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.339403Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624720499711780:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:23.339520Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:23.341667Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624720499711785:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:23.346650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:23.359505Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624720499711787:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:23.455485Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624720499711840:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:23.528021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624699024873002:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:23.528090Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:25.135566Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799065141, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 8491, MsgBus: 17574 2025-06-24T21:04:26.062084Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624733929538184:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:26.062135Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fac/r3tmp/tmpquEvc4/pdisk_1.dat 2025-06-24T21:04:26.326882Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:26.330855Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624733929538162:2079] 1750799066058136 != 1750799066058139 2025-06-24T21:04:26.336812Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:26.336918Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:26.339580Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8491, node 2 2025-06-24T21:04:26.403928Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:26.403949Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:26.403956Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:26.404064Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17574 TClient is connected to server localhost:17574 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:27.032776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:27.056715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:27.104682Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:27.125914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:27.300856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:27.386871Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:29.656324Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624746814441693:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.656420Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.718481Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.755606Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.797344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.857133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.898291Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.965837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.017055Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.147283Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624751109409655:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:30.147393Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:30.147523Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624751109409660:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:30.150485Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:30.161728Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624751109409662:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:30.255970Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624751109409713:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:31.079472Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624733929538184:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:31.079595Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:31.628765Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799071651, txId: 281474976715672] shutting down 2025-06-24T21:04:31.882698Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799071910, txId: 281474976715674] shutting down 2025-06-24T21:04:32.181065Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799072190, txId: 281474976715676] shutting down >> KqpScripting::StreamExecuteYqlScriptMixed [GOOD] >> KqpScripting::StreamExecuteYqlScriptLeadingEmptyScan >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> KqpScripting::ScriptingCreateAndAlterTableTest >> test_alter_compression.py::TestAlterCompression::test[alter_compression] [GOOD] >> KqpYql::EvaluateExprYsonAndType [GOOD] >> test_alter_compression.py::TestAlterCompression::test_multi[alter_compression] [GOOD] >> ListObjectsInS3Export::PagingParameters ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactAfterDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:02:59.228766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:02:59.228926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:02:59.228980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:02:59.229037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:02:59.229878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:02:59.229938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:02:59.230027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:02:59.230139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:02:59.231010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:02:59.232546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:02:59.326401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:02:59.326470Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:59.351617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:02:59.356332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:02:59.356537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:02:59.370129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:02:59.370408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:02:59.371110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:02:59.371454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:02:59.383388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:02:59.384642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:02:59.403525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:02:59.403660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:02:59.403823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:02:59.403884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:02:59.403930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:02:59.404107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.412431Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:02:59.549292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:02:59.550540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.551804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:02:59.551883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:02:59.553228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:02:59.553331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:02:59.557535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:02:59.558476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:02:59.558718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.558865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:02:59.558929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:02:59.558982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:02:59.561328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.561396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:02:59.561441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:02:59.563469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.563521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:02:59.563568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:02:59.563626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:02:59.569095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:02:59.571669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:02:59.571901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:02:59.574277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:02:59.574442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:02:59.574496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:02:59.575967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:02:59.576045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:02:59.576263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:02:59.576407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:02:59.579372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:02:59.579430Z node 1 :FLAT_TX_SCHEMESHARD ... Id: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-24T21:04:32.834218Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-24T21:04:32.834304Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:18.000000Z at schemeshard 72057594046678944 2025-06-24T21:04:32.834386Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-24T21:04:32.834476Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:04:32.847627Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:32.847707Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:32.847742Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:04:33.242221Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:33.242297Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:33.242390Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:312:2297]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:04:33.242529Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:127:2151], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:33.242557Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:33.285912Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:92: Operation queue wakeup 2025-06-24T21:04:33.286082Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:31: [BackgroundCompaction] [Start] Compacting for pathId# [OwnerId: 72057594046678944, LocalPathId: 2], datashard# 72075186233409546, compactionInfo# {72057594046678944:1, SH# 1, Rows# 100, Deletes# 0, Compaction# 1970-01-01T00:00:18.000000Z}, next wakeup in# 0.000000s, rate# 1, in queue# 1 shards, waiting after compaction# 0 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-24T21:04:33.286231Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 30 seconds 2025-06-24T21:04:33.286527Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [3:127:2151], Recipient [3:312:2297]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046678944 LocalId: 2 } CompactSinglePartedShards: true 2025-06-24T21:04:33.286744Z node 3 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 7 of 72075186233409546 tableId# 2 localTid# 1001, requested from [3:127:2151], partsCount# 1, memtableSize# 0, memtableWaste# 0, memtableRows# 0 2025-06-24T21:04:33.288005Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186233409546, table# 1001, finished edge# 6, ts 1970-01-01T00:00:19.153000Z 2025-06-24T21:04:33.288090Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186233409546, table# 1001, finished edge# 6, front# 7 2025-06-24T21:04:33.300404Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435080, Sender [3:1265:3199], Recipient [3:312:2297]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvAsyncTableStats 2025-06-24T21:04:33.300551Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409546, FollowerId 0, tableId 2 2025-06-24T21:04:33.301795Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:312:2297], Recipient [3:127:2151]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409546 TableLocalId: 2 Generation: 2 Round: 6 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 19 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 22302 Memory: 124232 Storage: 14156 } ShardState: 2 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 41 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-24T21:04:33.301854Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:04:33.301921Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 2.2302 2025-06-24T21:04:33.302055Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 19 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:04:33.302100Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-24T21:04:33.310068Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:302:2289], Recipient [3:312:2297]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:04:33.321504Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186233409546, table# 1001, finished edge# 7, ts 1970-01-01T00:00:20.153000Z 2025-06-24T21:04:33.321580Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186233409546, table# 1001, finished edge# 7, front# 7 2025-06-24T21:04:33.321621Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186233409546, table# 1001 sending TEvCompactTableResult to# [3:127:2151]pathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:04:33.321904Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553211, Sender [3:312:2297], Recipient [3:127:2151]: NKikimrTxDataShard.TEvCompactTableResult TabletId: 72075186233409546 PathId { OwnerId: 72057594046678944 LocalId: 2 } Status: OK 2025-06-24T21:04:33.321937Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5027: StateWork, processing event TEvDataShard::TEvCompactTableResult 2025-06-24T21:04:33.322005Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 0 seconds 2025-06-24T21:04:33.322047Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:112: [BackgroundCompaction] [Finished] Compaction completed for pathId# [OwnerId: 72057594046678944, LocalPathId: 2], datashard# 72075186233409546, shardIdx# 72057594046678944:1 in# 4 ms, with status# 0, next wakeup in# 0.996000s, rate# 1, in queue# 1 shards, waiting after compaction# 0 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-24T21:04:33.324398Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:302:2289], Recipient [3:312:2297]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:04:33.344155Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:189: Updated last full compaction of tablet# 72075186233409546, tableId# 2, last full compaction# 1970-01-01T00:00:20.153000Z 2025-06-24T21:04:33.400844Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:33.400932Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:33.400983Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-24T21:04:33.401088Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-24T21:04:33.401160Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-24T21:04:33.401328Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-06-24T21:04:33.401416Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-24T21:04:33.401455Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-24T21:04:33.401547Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:19.000000Z at schemeshard 72057594046678944 2025-06-24T21:04:33.401634Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-24T21:04:33.401726Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:04:33.413529Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:33.413615Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:33.413651Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::EvaluateExprYsonAndType [GOOD] Test command err: Trying to start YDB, gRPC: 23815, MsgBus: 2712 2025-06-24T21:04:21.790454Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624709625474735:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:21.791389Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f8b/r3tmp/tmph5uhwr/pdisk_1.dat 2025-06-24T21:04:22.109268Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:22.111301Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624709625474624:2079] 1750799061768202 != 1750799061768205 TServer::EnableGrpc on GrpcPort 23815, node 1 2025-06-24T21:04:22.178619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:22.178953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:22.190087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:22.298081Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:22.298101Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:22.298115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:22.298241Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2712 2025-06-24T21:04:22.787253Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2712 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:23.096453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:23.123789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:23.275809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:23.458633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:23.556002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:25.577798Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624726805345456:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:25.577909Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.048530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.104887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.171062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.214163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.263790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.317264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.361216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:26.496297Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624731100313417:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.496400Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.496943Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624731100313422:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.501995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:26.521407Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624731100313424:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:26.613892Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624731100313477:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:26.773625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624709625474735:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:26.773705Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 28842, MsgBus: 28966 2025-06-24T21:04:28.869214Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624743155759206:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:28.869422Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f8b/r3tmp/tmpWCLQiA/pdisk_1.dat 2025-06-24T21:04:29.144967Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:29.182339Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:29.182425Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 28842, node 2 2025-06-24T21:04:29.205195Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:29.274983Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:29.275004Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:29.275012Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:29.275119Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28966 TClient is connected to server localhost:28966 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:04:29.837521Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:04:29.852723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:29.859684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:29.877500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:29.958929Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.137818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:30.258113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:32.459434Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624760335629976:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:32.459524Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:32.530377Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.611689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.701452Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.739787Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.813899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.890190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.950226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:33.049488Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624764630597939:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:33.049577Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:33.049803Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624764630597944:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:33.053880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:33.086078Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624764630597946:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:33.154356Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624764630597997:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:33.871692Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624743155759206:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:33.871807Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpYql::UpdateBadType >> KqpScripting::LimitOnShard [GOOD] >> KqpScripting::NoAstSizeLimit >> KqpScripting::ScriptValidate >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> KqpScripting::StreamExecuteYqlScriptSeveralQueries >> KqpPragma::MatchRecognizeWithTimeOrderRecoverer [GOOD] >> KqpYql::InsertIgnore [GOOD] >> KqpYql::JsonCast >> KqpYql::BinaryJsonOffsetNormal [GOOD] >> KqpYql::Closure >> KqpScripting::StreamExecuteYqlScriptOperationTmeoutBruteForce [GOOD] >> KqpScripting::StreamExecuteYqlScriptPg >> KqpScripting::StreamScanQuery [GOOD] >> KqpScripting::SyncExecuteYqlScriptSeveralQueries >> BackupPathTest::OnlyOneEmptyDirectory [GOOD] >> KqpPragma::Warning [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpPragma::MatchRecognizeWithTimeOrderRecoverer [GOOD] Test command err: Trying to start YDB, gRPC: 16934, MsgBus: 19718 2025-06-24T21:04:24.569176Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624722732940262:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:24.569462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f88/r3tmp/tmpNa8dht/pdisk_1.dat 2025-06-24T21:04:25.033593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:25.033651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:25.040303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:25.088271Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16934, node 1 2025-06-24T21:04:25.093715Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624722732940060:2079] 1750799064514301 != 1750799064514304 2025-06-24T21:04:25.275714Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:25.275741Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:25.275763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:25.275916Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:25.496483Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19718 TClient is connected to server localhost:19718 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:25.966384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:26.011012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:26.224236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:26.458830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:26.573024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:28.348470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624739912810892:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:28.348615Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:28.728149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:28.766508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:28.833030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:28.874050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:28.949280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.026308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.106278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.203535Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624744207778854:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.203619Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.203780Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624744207778859:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.207697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:29.262354Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624744207778861:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:29.318766Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624744207778912:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:29.568798Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624722732940262:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:29.568864Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:30.473551Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519624748502746490:2480], status: GENERIC_ERROR, issues:
: Error: Pre type annotation, code: 1020
:2:34: Error: Pragma auth not supported inside Kikimr query., code: 2016 2025-06-24T21:04:30.475452Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZGY3OGE2N2QtZGFkY2JhMjItZjQ3ZDVhNmItMzhlZDQzYzk=, ActorId: [1:7519624748502746482:2475], ActorSt ... ADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624755546089345:2170];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f88/r3tmp/tmpPJdvG6/pdisk_1.dat 2025-06-24T21:04:31.575444Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:04:31.640436Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24762, node 2 2025-06-24T21:04:31.671773Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:31.671861Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:31.685543Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:31.771857Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:31.771884Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:31.771892Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:31.772014Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12876 TClient is connected to server localhost:12876 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:32.361762Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:32.372356Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:32.390161Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:32.492972Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:32.574263Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:32.685848Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:32.780261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:35.357345Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624772725960008:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:35.357437Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:35.394115Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:35.435119Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:35.475444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:35.525999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:35.568058Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:35.630878Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:35.719662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:35.827665Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624772725960671:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:35.827810Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:35.830501Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624772725960676:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:35.844120Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:35.861205Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624772725960678:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:35.948990Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624772725960729:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:36.509695Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624755546089345:2170];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:36.509799Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:37.362315Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.247402Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799078280, txId: 281474976710674] shutting down >> KqpYql::ScriptUdf [GOOD] >> KqpYql::SelectNoAsciiValue >> KqpYql::EvaluateExpr2 [GOOD] >> KqpYql::EvaluateExpr3 >> KqpYql::UuidPrimaryKey [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpPragma::Warning [GOOD] Test command err: Trying to start YDB, gRPC: 63912, MsgBus: 26864 2025-06-24T21:04:26.227865Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624731893975308:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:26.228240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f80/r3tmp/tmpSw2JN9/pdisk_1.dat 2025-06-24T21:04:26.852739Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:26.855312Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624731893975125:2079] 1750799066181462 != 1750799066181465 2025-06-24T21:04:26.868873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:26.869023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:26.875015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63912, node 1 2025-06-24T21:04:27.002307Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:27.002359Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:27.002366Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:27.002500Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:27.207359Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26864 TClient is connected to server localhost:26864 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:27.764378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:27.784131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:27.945494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:28.124838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:28.206793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:29.872034Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624744778878637:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.872162Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:30.272249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.316246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.356161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.404464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.444570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.485103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.525031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.597826Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624749073846592:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:30.597938Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:30.598229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624749073846597:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:30.603755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:30.621278Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624749073846599:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:30.708230Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624749073846650:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:31.205142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624731893975308:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:31.205274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:32.609932Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519624757663781546:2487], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject
:2:50: Error: At function: AssumeColumnOrderPartial, At function: Aggregate, At tuple
:2:20: Error: At tuple /lib/yql/aggregate.yqls:650:12: Erro ... 24T21:04:32.611434Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODJhMmJiNmItNDg1NjY3ZWYtOWMyY2RhNTctOWJhY2Q3MTU=, ActorId: [1:7519624753368814218:2474], ActorState: ExecuteState, TraceId: 01jyhw3f995a3wwy8ypksn76y2, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 12690, MsgBus: 18217 2025-06-24T21:04:33.615620Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624762364973407:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:33.624125Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f80/r3tmp/tmpdkUjY0/pdisk_1.dat 2025-06-24T21:04:33.763229Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:33.767339Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624762364973350:2079] 1750799073579551 != 1750799073579554 2025-06-24T21:04:33.776166Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:33.776257Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 12690, node 2 2025-06-24T21:04:33.783785Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:33.843748Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:33.843772Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:33.843779Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:33.843902Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18217 TClient is connected to server localhost:18217 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:04:34.674311Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:34.747924Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:34.763591Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:34.778324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:34.883546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:35.107710Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:35.195448Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:37.474924Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624779544844159:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.475022Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.541363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.583939Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.618872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.666827Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.723209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.771784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.817166Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.913914Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624779544844817:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.914094Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.914497Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624779544844822:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.919559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:37.961914Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624779544844824:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:38.040693Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624783839812171:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:38.619342Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624762364973407:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:38.619426Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpScripting::NoAstSizeLimit [GOOD] >> BackupPathTest::ExportRecursiveWithoutDestinationPrefix ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UuidPrimaryKey [GOOD] Test command err: Trying to start YDB, gRPC: 19317, MsgBus: 23612 2025-06-24T21:04:34.358547Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624764939824497:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:34.359881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f23/r3tmp/tmp7oOkty/pdisk_1.dat 2025-06-24T21:04:34.983272Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624764939824474:2079] 1750799074350222 != 1750799074350225 2025-06-24T21:04:34.986996Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:35.000125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:35.000217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:35.031138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19317, node 1 2025-06-24T21:04:35.291636Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:35.291664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:35.291676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:35.291841Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:35.374228Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23612 TClient is connected to server localhost:23612 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:36.064777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:36.080021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:38.089256Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624782119694301:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.089401Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.443507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.604366Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624782119694406:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.604451Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.604840Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624782119694411:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.609678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:38.621822Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624782119694413:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:04:38.683631Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624782119694464:2395] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:39.081305Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519624786414661859:2329], status: GENERIC_ERROR, issues:
:3:25: Error: Invalid value "invalid-uuid" for type Uuid 2025-06-24T21:04:39.082851Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDQwNjVlYTItZjM2N2ZkNjMtNjNmN2E3MjAtY2M2YTAzNzY=, ActorId: [1:7519624782119694283:2289], ActorState: ExecuteState, TraceId: 01jyhw3nmtbxn99pcj9764g6c4, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:04:39.356743Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624764939824497:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:39.356835Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpScripting::QueryStats [GOOD] >> KqpScripting::Pure >> KqpScripting::StreamExecuteYqlScriptLeadingEmptyScan [GOOD] >> KqpScripting::ScriptExplainCreatedTable >> KqpYql::DdlDmlMix >> KqpScripting::ScriptingCreateAndAlterTableTest [GOOD] >> KqpScripting::SecondaryIndexes >> KqpYql::UpdateBadType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::NoAstSizeLimit [GOOD] Test command err: Trying to start YDB, gRPC: 23282, MsgBus: 21476 2025-06-24T21:04:30.228334Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624751130979735:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:30.228387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f70/r3tmp/tmpg3rcoq/pdisk_1.dat 2025-06-24T21:04:30.731798Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:30.741246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:30.741328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:30.752587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23282, node 1 2025-06-24T21:04:30.949654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:30.949684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:30.949699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:30.949808Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21476 2025-06-24T21:04:31.285944Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21476 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:31.579239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:31.607891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:31.619168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:31.749060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:31.935085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:32.025912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:34.064294Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624768310850536:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:34.064427Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:34.412833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:34.534067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:34.614345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:34.692344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:34.732772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:34.774462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:34.832736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:34.935655Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624768310851208:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:34.935734Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:34.936257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624768310851213:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:34.940729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:34.967605Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624768310851215:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:35.045597Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624772605818564:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:35.230034Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624751130979735:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:35.230129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:36.473108Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799076488, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 8788, MsgBus: 10427 2025-06-24T21:04:37.407313Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624779055048347:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:37.407397Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f70/r3tmp/tmpIALJeD/pdisk_1.dat 2025-06-24T21:04:37.593401Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:37.608610Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:37.608685Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:37.610242Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8788, node 2 2025-06-24T21:04:37.681705Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:37.681729Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:37.681736Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:37.681841Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10427 TClient is connected to server localhost:10427 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:38.253073Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:38.259986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:38.466685Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:40.896105Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624791939950811:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:40.896272Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:40.909197Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:40.971488Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624791939950928:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:40.971560Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:41.003939Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624796234918235:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:41.004027Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:41.004250Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624796234918240:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:41.008417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:41.025204Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624796234918242:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:04:41.108714Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624796234918293:2395] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpYql::TableUseBeforeCreate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptLeadingEmptyScan [GOOD] Test command err: Trying to start YDB, gRPC: 64388, MsgBus: 5792 2025-06-24T21:04:25.991769Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624727957452323:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:25.992338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f83/r3tmp/tmpbgZjO5/pdisk_1.dat 2025-06-24T21:04:26.431197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:26.431321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:26.434267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:26.468052Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:26.469712Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624727957452142:2079] 1750799065906914 != 1750799065906917 TServer::EnableGrpc on GrpcPort 64388, node 1 2025-06-24T21:04:26.615849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:26.615877Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:26.615901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:26.616090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5792 2025-06-24T21:04:26.919629Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5792 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:27.370739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:27.392564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:27.409515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:27.567102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:27.755449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:27.842460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:29.576238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624745137322963:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.576356Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:29.935734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:29.969984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.006973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.069780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.112571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.161837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.220037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:30.309782Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624749432290919:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:30.309906Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:30.310269Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624749432290924:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:30.314736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:30.329837Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624749432290926:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:30.412506Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624749432290977:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:30.970122Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624727957452323:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:30.970203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:32.315465Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799072323, txId: 281474976715672] shutting down 2025-06-24T21:04:32.725230Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapsh ... 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624766002997268:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:34.927789Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f83/r3tmp/tmpjze1kp/pdisk_1.dat 2025-06-24T21:04:35.125738Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:35.127310Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624766002997246:2079] 1750799074925986 != 1750799074925989 2025-06-24T21:04:35.157032Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:35.157138Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:35.162056Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10902, node 2 2025-06-24T21:04:35.349676Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:35.349702Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:35.349710Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:35.349822Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30665 TClient is connected to server localhost:30665 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:04:35.972683Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:36.112410Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:36.123493Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:36.141578Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.248651Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.447863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.549321Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:39.017112Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624787477835385:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:39.017254Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:39.090804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.126541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.165358Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.206136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.283407Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.319706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.360065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.456589Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624787477836048:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:39.456682Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:39.456894Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624787477836053:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:39.461343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:39.475026Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624787477836055:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:39.550500Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624787477836106:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:39.931259Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624766002997268:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:39.931390Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:41.074259Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799081094, txId: 281474976715672] shutting down 2025-06-24T21:04:41.520606Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799081535, txId: 281474976715674] shutting down >> KqpScripting::ScriptValidate [GOOD] >> KqpScripting::ScriptStats >> KqpScripting::ScanQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UpdateBadType [GOOD] Test command err: Trying to start YDB, gRPC: 28485, MsgBus: 19880 2025-06-24T21:04:37.048127Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624778029887310:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:37.048188Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f1b/r3tmp/tmpkbqFOx/pdisk_1.dat 2025-06-24T21:04:37.587092Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28485, node 1 2025-06-24T21:04:37.593690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:37.593800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:37.599337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:37.680006Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:37.680040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:37.680057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:37.680231Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19880 2025-06-24T21:04:38.075277Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19880 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:38.382222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:38.395999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:38.414237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:38.583461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.750019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:38.821878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.471962Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624790914790808:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:40.472061Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:40.764116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:40.808085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:40.850749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:40.887741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:40.921233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:41.006974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:41.069257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:41.150360Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624795209758764:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:41.150456Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:41.150713Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624795209758770:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:41.155397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:41.169119Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624795209758772:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:41.273207Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624795209758823:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:42.048438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624778029887310:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:42.048511Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Type annotation, code: 1030
:4:26: Error: At function: KiUpdateTable!
:3:20: Error: Failed to convert type: Struct<'Amount':String?> to Struct<'Amount':Uint64?>
:3:20: Error: Failed to convert 'Amount': Optional to Optional
:3:20: Error: Row type mismatch for table: db.[/Root/Test] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_14_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 14] [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedBeforeSplit [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedAfterSplitMerge >> KqpScripting::StreamExecuteYqlScriptScanClientOperationTimeoutBruteForce [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE32 >> KqpYql::Closure [GOOD] >> KqpYql::JsonCast [GOOD] >> KqpScripting::UnsafeTimestampCast >> KqpScripting::StreamExecuteYqlScriptSeveralQueries [GOOD] >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex >> KqpYql::NonStrictDml >> KqpScripting::StreamExecuteYqlScriptPg [GOOD] >> KqpScripting::SyncExecuteYqlScriptSeveralQueries [GOOD] >> KqpYql::TableRange >> KqpYql::UpdatePk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptScanClientOperationTimeoutBruteForce [GOOD] Test command err: Trying to start YDB, gRPC: 17441, MsgBus: 27246 2025-06-24T21:04:16.867485Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624689168997700:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:16.867814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fe5/r3tmp/tmpbTgFbW/pdisk_1.dat 2025-06-24T21:04:17.344348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:17.344446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:17.346630Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:17.393484Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17441, node 1 2025-06-24T21:04:17.480980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:17.481004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:17.481010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:17.481150Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27246 2025-06-24T21:04:17.867305Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27246 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:18.121712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:18.135805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:18.148824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:18.311492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:18.485731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:18.570497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:20.441232Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624706348868334:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:20.441342Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:20.789590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:20.849930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:20.901794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:20.981057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:21.014557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:21.062165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:21.116602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:21.202993Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624710643836289:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:21.203087Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:21.203411Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624710643836294:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:21.207428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:21.218012Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624710643836296:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:21.308059Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624710643836347:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:21.859312Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624689168997700:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:21.859403Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:23.403306Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [1:7519624714938803912:2472] 2025-06-24T21:04:23.406386Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519624714938803964:2479] TxId: 281474976715673. Ctx: { TraceId: 01jyhw35by0tm7vjzsnyjfj3jn, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGE2MWJhMzUtMmRhYTI2YzUtOGQ5ZWYyZjMtZDM2MTBjMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: ... 9:2282] 2025-06-24T21:04:41.025675Z node 2 :KQP_COMPUTE ERROR: kqp_scan_fetcher_actor.cpp:188: SelfId: [2:7519624793403355945:3165]. TKqpScanFetcherActor: broken tablet for this request 72075186224037892, retries limit exceeded (0/20) 2025-06-24T21:04:41.026032Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710701. Snapshot is not valid, tabletId: 72075186224037894, step: 1750799081017 2025-06-24T21:04:41.026097Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710701. Snapshot is not valid, tabletId: 72075186224037895, step: 1750799081017 2025-06-24T21:04:41.026144Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710701. Snapshot is not valid, tabletId: 72075186224037893, step: 1750799081017 2025-06-24T21:04:41.026195Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624793403355943:3163]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037894, actor_id: [2:7519624733273809439:2288] 2025-06-24T21:04:41.026258Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624793403355944:3164]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037895, actor_id: [2:7519624733273809502:2289] 2025-06-24T21:04:41.026308Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624793403355946:3166]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037893, actor_id: [2:7519624733273809432:2285] 2025-06-24T21:04:41.026318Z node 2 :KQP_COMPUTE ERROR: kqp_scan_fetcher_actor.cpp:188: SelfId: [2:7519624793403355946:3166]. TKqpScanFetcherActor: broken tablet for this request 72075186224037893, retries limit exceeded (0/20) 2025-06-24T21:04:41.027008Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710701. Snapshot is not valid, tabletId: 72075186224037894, step: 1750799081017 2025-06-24T21:04:41.027073Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710701. Snapshot is not valid, tabletId: 72075186224037895, step: 1750799081017 2025-06-24T21:04:41.027122Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624793403355943:3163]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037894, actor_id: [2:7519624733273809439:2288] 2025-06-24T21:04:41.027133Z node 2 :KQP_COMPUTE ERROR: kqp_scan_fetcher_actor.cpp:188: SelfId: [2:7519624793403355943:3163]. TKqpScanFetcherActor: broken tablet for this request 72075186224037894, retries limit exceeded (0/20) 2025-06-24T21:04:41.027266Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624793403355944:3164]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037895, actor_id: [2:7519624733273809502:2289] 2025-06-24T21:04:41.027276Z node 2 :KQP_COMPUTE ERROR: kqp_scan_fetcher_actor.cpp:188: SelfId: [2:7519624793403355944:3164]. TKqpScanFetcherActor: broken tablet for this request 72075186224037895, retries limit exceeded (0/20) 2025-06-24T21:04:41.224858Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519624793403355961:3167] 2025-06-24T21:04:41.225659Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799081262, txId: 281474976710703] shutting down 2025-06-24T21:04:41.476541Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519624797698323409:3176] 2025-06-24T21:04:41.720379Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519624797698323474:3189] 2025-06-24T21:04:41.984446Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519624797698323554:3204] TxId: 281474976710706. Ctx: { TraceId: 01jyhw3r7x96khrays7t815x6b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjJlZjk0Y2UtZWU1NTEzMzUtZmM0ODQ1MDAtY2RjYWYwYWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-24T21:04:41.984661Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZjJlZjk0Y2UtZWU1NTEzMzUtZmM0ODQ1MDAtY2RjYWYwYWY=, ActorId: [2:7519624797698323520:3204], ActorState: ExecuteState, TraceId: 01jyhw3r7x96khrays7t815x6b, Create QueryResponse for error on request, msg: 2025-06-24T21:04:41.985225Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799081990, txId: 281474976710705] shutting down 2025-06-24T21:04:41.985435Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519624797698323564:3212], TxId: 281474976710706, task: 5. Ctx: { TraceId : 01jyhw3r7x96khrays7t815x6b. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=ZjJlZjk0Y2UtZWU1NTEzMzUtZmM0ODQ1MDAtY2RjYWYwYWY=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519624797698323554:3204], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:04:41.986064Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NmMzNzU1MmItNmEzYmI0NzctYjQwZTdjYWItZGIxYmU3NjA=, ActorId: [2:7519624797698323510:3200], ActorState: ExecuteState, TraceId: 01jyhw3r7x96khrays7t815x6b, Create QueryResponse for error on request, msg: 2025-06-24T21:04:42.227103Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799082256, txId: 281474976710708] shutting down 2025-06-24T21:04:42.229272Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519624797698323623:3217] 2025-06-24T21:04:42.491234Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519624801993291038:3235] 2025-06-24T21:04:42.492075Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799082508, txId: 281474976710710] shutting down 2025-06-24T21:04:42.751203Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519624801993291149:3254] 2025-06-24T21:04:42.760848Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519624801993291194:3260] TxId: 281474976710713. Ctx: { TraceId: 01jyhw3s042he0j8eze3ggp0kx, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTFjMDMzMmMtYjQyYjFiYjgtZWY2MDcyMzctNzc0MjUxMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-24T21:04:42.760988Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519624801993291199:3264], TxId: 281474976710713, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=MTFjMDMzMmMtYjQyYjFiYjgtZWY2MDcyMzctNzc0MjUxMzQ=. TraceId : 01jyhw3s042he0j8eze3ggp0kx. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519624801993291194:3260], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:04:42.761102Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MTFjMDMzMmMtYjQyYjFiYjgtZWY2MDcyMzctNzc0MjUxMzQ=, ActorId: [2:7519624801993291161:3260], ActorState: ExecuteState, TraceId: 01jyhw3s042he0j8eze3ggp0kx, Create QueryResponse for error on request, msg: 2025-06-24T21:04:42.761654Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799082788, txId: 281474976710712] shutting down 2025-06-24T21:04:42.761875Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519624801993291203:3268], TxId: 281474976710713, task: 5. Ctx: { CustomerSuppliedId : . TraceId : 01jyhw3s042he0j8eze3ggp0kx. SessionId : ydb://session/3?node_id=2&id=MTFjMDMzMmMtYjQyYjFiYjgtZWY2MDcyMzctNzc0MjUxMzQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519624801993291194:3260], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:04:42.761876Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519624801993291201:3266], TxId: 281474976710713, task: 3. Ctx: { TraceId : 01jyhw3s042he0j8eze3ggp0kx. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=MTFjMDMzMmMtYjQyYjFiYjgtZWY2MDcyMzctNzc0MjUxMzQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519624801993291194:3260], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:04:42.762271Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519624801993291200:3265], TxId: 281474976710713, task: 2. Ctx: { SessionId : ydb://session/3?node_id=2&id=MTFjMDMzMmMtYjQyYjFiYjgtZWY2MDcyMzctNzc0MjUxMzQ=. TraceId : 01jyhw3s042he0j8eze3ggp0kx. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519624801993291194:3260], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:04:42.762365Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519624801993291202:3267], TxId: 281474976710713, task: 4. Ctx: { CustomerSuppliedId : . TraceId : 01jyhw3s042he0j8eze3ggp0kx. SessionId : ydb://session/3?node_id=2&id=MTFjMDMzMmMtYjQyYjFiYjgtZWY2MDcyMzctNzc0MjUxMzQ=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519624801993291194:3260], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:04:42.763522Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7519624801993291236:2156], tablet: [2:7519624733273809434:2287], scanId: 43, table: /Root/EightShard 2025-06-24T21:04:42.763564Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7519624801993291238:2157], tablet: [2:7519624733273809433:2286], scanId: 44, table: /Root/EightShard 2025-06-24T21:04:42.763601Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7519624801993291240:2158], tablet: [2:7519624733273809439:2288], scanId: 41, table: /Root/EightShard 2025-06-24T21:04:43.015186Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519624801993291230:3273] 2025-06-24T21:04:43.171378Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799083180, txId: 281474976710715] shutting down 2025-06-24T21:04:43.450753Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519624806288258679:3297] 2025-06-24T21:04:43.699761Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799083719, txId: 281474976710718] shutting down 2025-06-24T21:04:43.727295Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519624806288258733:3307] 2025-06-24T21:04:43.987323Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799084013, txId: 281474976710720] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::Closure [GOOD] Test command err: Trying to start YDB, gRPC: 19022, MsgBus: 5158 2025-06-24T21:04:32.269726Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624758674743272:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:32.269871Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f64/r3tmp/tmpXlw8kM/pdisk_1.dat 2025-06-24T21:04:32.902378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:32.902468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:32.909663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:32.942877Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:32.944950Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624758674743256:2079] 1750799072268857 != 1750799072268860 TServer::EnableGrpc on GrpcPort 19022, node 1 2025-06-24T21:04:33.183773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:33.183806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:33.183813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:33.183933Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:33.299303Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5158 TClient is connected to server localhost:5158 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:33.979826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:34.027284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:34.164304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:34.415641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:34.514234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.273143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624775854614104:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:36.273255Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:36.654377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.723055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.758812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.790476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.829575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.875476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.911523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.015056Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624780149582064:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.015186Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.015822Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624780149582069:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.020137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:37.033643Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624780149582071:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:37.131811Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624780149582122:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:37.271263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624758674743272:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:37.271348Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 22597, MsgBus: 25756 2025-06-24T21:04:39.185376Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624786366754074:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:39.185475Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f64/r3tmp/tmphxYuv6/pdisk_1.dat 2025-06-24T21:04:39.348154Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:39.348242Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:39.353224Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:39.368875Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22597, node 2 2025-06-24T21:04:39.430433Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:39.430458Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:39.430465Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:39.430571Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25756 TClient is connected to server localhost:25756 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:39.944927Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:39.953929Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:39.963300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.062553Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.216790Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:40.227012Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.310468Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:42.583260Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624799251657551:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:42.583361Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:42.649868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.722846Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.766041Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.812301Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.881767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.968839Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.011322Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.118187Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624803546625521:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.118270Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.118567Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624803546625526:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.121982Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:43.137892Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624803546625528:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:43.235524Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624803546625581:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:44.185991Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624786366754074:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:44.186064Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::JsonCast [GOOD] Test command err: Trying to start YDB, gRPC: 23015, MsgBus: 20509 2025-06-24T21:04:32.136535Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624759087924477:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:32.139332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f6d/r3tmp/tmpvjLNG6/pdisk_1.dat 2025-06-24T21:04:32.682175Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:32.718545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:32.718639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 23015, node 1 2025-06-24T21:04:32.721827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:32.828097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:32.828118Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:32.828125Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:32.828300Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:33.155436Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20509 TClient is connected to server localhost:20509 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:33.645530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:33.683499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:33.858669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:34.073851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:34.187473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.014746Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624776267795234:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:36.014868Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:36.384963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.423233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.492325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.532188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.605755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.652829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.731430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.832707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624776267795904:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:36.832791Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:36.833025Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624776267795909:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:36.838159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:36.852313Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624776267795911:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:36.937013Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624776267795962:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:37.139273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624759087924477:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:37.139414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Table intent determination, code: 1040
:3:35: Error: INSERT OR IGNORE is not yet supported for Kikimr. Trying to start YDB, gRPC: 62716, MsgBus: 23450 2025-06-24T21:04:39.196458Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624786202075649:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:39.196520Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f6d/r3tmp/tmp2zKwwL/pdisk_1.dat 2025-06-24T21:04:39.332132Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:39.337735Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624786202075627:2079] 1750799079194663 != 1750799079194666 2025-06-24T21:04:39.354828Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected TServer::EnableGrpc on GrpcPort 62716, node 2 2025-06-24T21:04:39.354911Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:39.356110Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:39.415699Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:39.415723Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:39.415728Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:39.415836Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23450 TClient is connected to server localhost:23450 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:39.904084Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:39.911629Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:39.915564Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:39.999684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.165667Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.212171Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:40.235395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:42.695383Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624799086979132:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:42.695458Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:42.743228Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.786663Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.866555Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.942887Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.990173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.086783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.177251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.269752Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624803381947098:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.269852Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.270224Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624803381947103:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.274745Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:43.285386Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624803381947105:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:43.366004Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624803381947156:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:44.196011Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624786202075649:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:44.196109Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; [[#]] >> KqpYql::EvaluateExpr3 [GOOD] >> KqpYql::SelectNoAsciiValue [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptPg [GOOD] Test command err: Trying to start YDB, gRPC: 27520, MsgBus: 10724 2025-06-24T21:04:22.636458Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624714266696928:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:22.636492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f8a/r3tmp/tmpGLeGMK/pdisk_1.dat 2025-06-24T21:04:23.214165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:23.214288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:23.225336Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624714266696910:2079] 1750799062622905 != 1750799062622908 2025-06-24T21:04:23.230300Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:23.231452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27520, node 1 2025-06-24T21:04:23.451013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:23.451035Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:23.451043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:23.451181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:23.679401Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10724 TClient is connected to server localhost:10724 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:24.236208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:24.285629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:24.301682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:24.578114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:24.843404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:24.939981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:26.938227Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624731446567728:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:26.938347Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:27.327285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.365758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.407386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.446054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.481710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.533165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.572420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:27.636538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624714266696928:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:27.636614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:27.645267Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624735741535681:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:27.645331Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:27.645555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624735741535686:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:27.650249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:27.677891Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624735741535688:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:27.745769Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624735741535739:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:29.060233Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=N2U5MTdiMDAtM2QxNjNkNzUtOWVhYmI3NC1hNjUwMDFkMw==, ActorId: [1:7519624744331470608:2477], ActorState: ExecuteState, TraceId: 01jyhw3btd9zprja1h32mryj77, Create QueryResponse for error on request, msg: 2025-06-24T ... ger.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799078399, txId: 281474976715792] shutting down 2025-06-24T21:04:38.511537Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799078546, txId: 281474976715794] shutting down Trying to start YDB, gRPC: 23036, MsgBus: 28092 2025-06-24T21:04:39.491798Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624787640493969:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:39.491941Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f8a/r3tmp/tmpjmyXyh/pdisk_1.dat 2025-06-24T21:04:39.682249Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:39.685667Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624787640493948:2079] 1750799079483279 != 1750799079483282 TServer::EnableGrpc on GrpcPort 23036, node 2 2025-06-24T21:04:39.703859Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:39.703956Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:39.705606Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:39.760626Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:39.760650Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:39.760659Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:39.760813Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28092 TClient is connected to server localhost:28092 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:40.413684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:40.422375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:40.437789Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:40.535900Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:40.536119Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:40.717886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.809208Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:43.210348Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624804820364779:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.210455Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.281585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.322784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.361114Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.404150Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.447229Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.540324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.601998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.694029Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624804820365441:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.694104Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.694362Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624804820365446:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.698323Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:43.715791Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624804820365448:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:43.813591Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624804820365499:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:44.492051Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624787640493969:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:44.492127Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_16_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 16] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_17_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 17] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::SyncExecuteYqlScriptSeveralQueries [GOOD] Test command err: Trying to start YDB, gRPC: 23929, MsgBus: 10358 2025-06-24T21:04:32.638453Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624758815309504:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:32.638509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f5b/r3tmp/tmpnPKa0W/pdisk_1.dat 2025-06-24T21:04:33.114690Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:33.116475Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624758815309481:2079] 1750799072635990 != 1750799072635993 2025-06-24T21:04:33.129393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:33.129464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:33.132137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23929, node 1 2025-06-24T21:04:33.239592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:33.239611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:33.239636Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:33.239712Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10358 2025-06-24T21:04:33.659410Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10358 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:34.182190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:34.200087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:34.209925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:34.390829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:34.634241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:34.740676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.509206Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624775995180309:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:36.509330Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:36.807805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.843381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.874945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.967004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.003529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.079790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.123994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:37.223469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624780290148275:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.223572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.223855Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624780290148280:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.227688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:37.239029Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624780290148282:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:37.330718Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624780290148333:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:37.638485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624758815309504:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:37.638551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:39.083691Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799079106, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 9320, MsgBus: 25376 2025-06-24T21:04:39.894607Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624788856056830:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:39.894723Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f5b/r3tmp/tmpxCxMBn/pdisk_1.dat 2025-06-24T21:04:40.067620Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:40.067712Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:40.070545Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:40.075102Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:40.076139Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624788856056796:2079] 1750799079890872 != 1750799079890875 TServer::EnableGrpc on GrpcPort 9320, node 2 2025-06-24T21:04:40.190588Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:40.190613Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:40.190621Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:40.190744Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25376 TClient is connected to server localhost:25376 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:40.820720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:40.843571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:40.864278Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.941065Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:40.983716Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:41.155691Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:41.219524Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:43.396442Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624806035927624:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.396549Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.461565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.512821Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.554763Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.590565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.624735Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.664375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.714765Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.810155Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624806035928289:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.810283Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.810540Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624806035928294:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.814511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:43.825345Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624806035928296:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:43.912886Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624806035928347:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:44.899405Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624788856056830:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:44.974447Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> KqpScripting::Pure [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::EvaluateExpr3 [GOOD] Test command err: Trying to start YDB, gRPC: 9067, MsgBus: 9898 2025-06-24T21:04:34.182687Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624768198099692:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:34.191842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f5a/r3tmp/tmpXHI3J1/pdisk_1.dat 2025-06-24T21:04:34.668229Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:34.668326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:34.671614Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:34.685947Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9067, node 1 2025-06-24T21:04:34.832510Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:34.832546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:34.832555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:34.832673Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9898 2025-06-24T21:04:35.147287Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9898 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:35.648783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:35.666396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:35.673522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:35.866649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.178219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.275232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:37.981237Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624781083003168:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:37.981368Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.347649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.384234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.428372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.505171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.538065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.599736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.667919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.744870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624785377971125:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.744946Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.745297Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624785377971130:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.749516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:38.764530Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624785377971132:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:38.830962Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624785377971183:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:39.183105Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624768198099692:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:39.183220Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 15759, MsgBus: 25626 2025-06-24T21:04:41.078774Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624795316057832:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:41.078841Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f5a/r3tmp/tmpxAuodV/pdisk_1.dat 2025-06-24T21:04:41.208049Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:41.208142Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:41.231569Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:41.257337Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15759, node 2 2025-06-24T21:04:41.311917Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:41.311944Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:41.311952Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:41.312074Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25626 TClient is connected to server localhost:25626 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:41.762242Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:04:41.774560Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:41.838526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:41.996062Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:42.068808Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:42.087253Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:44.516441Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624808200961298:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:44.516518Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:44.578183Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.626547Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.663751Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.698172Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.736268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.814607Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.903813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:45.007497Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624812495929256:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:45.007587Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:45.009252Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624812495929261:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:45.013366Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:45.026000Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624812495929263:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:45.079363Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624812495929314:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:46.081264Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624795316057832:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:46.081339Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpScripting::StreamExecuteYqlScriptScanClientTimeoutBruteForce ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::SelectNoAsciiValue [GOOD] Test command err: Trying to start YDB, gRPC: 16529, MsgBus: 7118 2025-06-24T21:04:34.298742Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624767717197647:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:34.298785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f37/r3tmp/tmpzOQnCB/pdisk_1.dat 2025-06-24T21:04:34.906416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:34.906539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:34.909572Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:34.914423Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624767717197629:2079] 1750799074297649 != 1750799074297652 2025-06-24T21:04:34.923284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16529, node 1 2025-06-24T21:04:35.141215Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:35.141240Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:35.141247Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:35.141381Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:35.331594Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7118 TClient is connected to server localhost:7118 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:35.902186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:35.954360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.180174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.397819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.487419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:38.165973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624784897068446:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.166077Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.465866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.507723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.537639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.571641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.621346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.661381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.735984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.813116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624784897069107:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.813204Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.813325Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624784897069112:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.817618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:38.828615Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624784897069114:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:38.901731Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624784897069165:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:39.299245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624767717197647:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:39.299361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Type annotation, code: 1030
:10:13: Error: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:10:20: Error: At function: Apply
:8:28: Error: At function: ScriptUdf
:8:28: Error: Module not loaded for script type: Python3 Trying to start YDB, gRPC: 29966, MsgBus: 12005 2025-06-24T21:04:40.803062Z node 2 :METADATA_PROVIDER WARN: ... 778Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f37/r3tmp/tmpNqwnvo/pdisk_1.dat 2025-06-24T21:04:41.025323Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:41.035319Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624794232819365:2079] 1750799080741859 != 1750799080741862 2025-06-24T21:04:41.047027Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:41.047115Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:41.059294Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29966, node 2 2025-06-24T21:04:41.151371Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:41.151399Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:41.151411Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:41.151545Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12005 TClient is connected to server localhost:12005 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:41.666432Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:41.673875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:41.686002Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:41.759235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:41.867611Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:41.921683Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:42.010430Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:44.403516Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624811412690171:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:44.403616Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:44.442931Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.479571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.516406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.551879Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.626625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.685859Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.767821Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.881355Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624811412690841:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:44.881448Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:44.881651Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624811412690846:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:44.887052Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:44.900099Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624811412690848:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:44.995753Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624811412690899:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:45.794623Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624794232819495:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:45.794733Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:46.197729Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.559488Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799086596, txId: 281474976715674] shutting down >> KqpScripting::ExecuteYqlScriptScanScalar ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::Pure [GOOD] Test command err: Trying to start YDB, gRPC: 2693, MsgBus: 13951 2025-06-24T21:04:34.271618Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624767664241971:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:34.288994Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f24/r3tmp/tmpfgXjpn/pdisk_1.dat 2025-06-24T21:04:34.774264Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624767664241944:2079] 1750799074233777 != 1750799074233780 2025-06-24T21:04:34.785932Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:34.832653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:34.832753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:34.835823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2693, node 1 2025-06-24T21:04:35.002476Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:35.002502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:35.002508Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:35.002619Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13951 2025-06-24T21:04:35.301050Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13951 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:35.733972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:35.763434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:35.793986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:35.974460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:36.183698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:36.291570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:38.169000Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624784844112780:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.169116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.548306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.592761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.636879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.674949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.745733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.788769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.829537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:38.903443Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624784844113438:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.903573Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.903898Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624784844113443:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.911689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:38.926078Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624784844113445:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:39.030577Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624784844113496:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:39.268020Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624767664241971:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:39.268103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:41.538568Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799081570, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 17773, MsgBus: 25720 2025-06-24T21:04:42.409948Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624802904476136:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:42.410037Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f24/r3tmp/tmpIJHMIV/pdisk_1.dat 2025-06-24T21:04:42.559822Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:42.559907Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:42.563568Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:42.567283Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624802904476105:2079] 1750799082408984 != 1750799082408987 2025-06-24T21:04:42.576226Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17773, node 2 2025-06-24T21:04:42.627693Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:42.627718Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:42.627725Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:42.627864Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25720 TClient is connected to server localhost:25720 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:04:43.140581Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:43.157577Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:43.286164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:43.427165Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:43.467896Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:43.557816Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:45.828626Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624815789379616:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:45.828707Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:45.913427Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:45.972955Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.013706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.109582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.162291Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.220246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.259033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.326082Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624820084347573:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:46.326177Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:46.326606Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624820084347578:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:46.331435Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:46.347435Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624820084347580:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:46.441592Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624820084347633:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:47.415270Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624802904476136:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:47.415350Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpYql::BinaryJsonOffsetBound >> BackupPathTest::ExportRecursiveWithoutDestinationPrefix [GOOD] >> KqpYql::DdlDmlMix [GOOD] >> KqpYql::CreateUseTable >> KqpYql::TableUseBeforeCreate [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> KqpScripting::ScanQuery [GOOD] >> KqpScripting::ScanQueryDisable >> BackupPathTest::ParallelBackupWholeDatabase >> KqpScripting::ScriptExplainCreatedTable [GOOD] >> KqpScripting::ScriptExplain >> KqpScripting::SecondaryIndexes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::TableUseBeforeCreate [GOOD] Test command err: Trying to start YDB, gRPC: 3753, MsgBus: 4905 2025-06-24T21:04:44.055441Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624808770958715:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:44.055474Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f10/r3tmp/tmpCAav6e/pdisk_1.dat 2025-06-24T21:04:44.495017Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:44.495115Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624808770958529:2079] 1750799084037598 != 1750799084037601 2025-06-24T21:04:44.522490Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:44.522598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 3753, node 1 2025-06-24T21:04:44.524619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:44.610690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:44.610723Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:44.610735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:44.610843Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4905 2025-06-24T21:04:45.034423Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4905 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:45.410538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:45.436400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:45.454013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:45.607507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:45.818950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:45.902667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:47.825303Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624821655862053:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:47.825452Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.149147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.180711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.229483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.267811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.297410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.356965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.391856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.466618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624825950830008:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.466743Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.466772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624825950830013:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.470979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:48.481754Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624825950830015:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:48.577961Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624825950830066:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:49.055618Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624808770958715:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:49.055700Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Type annotation, code: 1030
:3:13: Error: At function: KiReadTable!
:3:13: Error: Cannot find table 'db.[/Root/NewTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 >> KqpScripting::UnsafeTimestampCast [GOOD] >> KqpScripting::SystemTables >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex [GOOD] >> KqpYql::TableRange [GOOD] >> KqpYql::UpdatePk [GOOD] >> KqpYql::NonStrictDml [GOOD] >> KqpYql::JsonNumberPrecision >> KqpScripting::ScriptStats [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::SecondaryIndexes [GOOD] Test command err: Trying to start YDB, gRPC: 25736, MsgBus: 29049 2025-06-24T21:04:35.347535Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624770716062905:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:35.348863Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f1e/r3tmp/tmpfITGCn/pdisk_1.dat 2025-06-24T21:04:35.770325Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624770716062808:2079] 1750799075316791 != 1750799075316794 2025-06-24T21:04:35.771575Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:35.778940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:35.779218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:35.785004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25736, node 1 2025-06-24T21:04:35.980157Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:35.980182Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:35.980189Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:35.980309Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29049 2025-06-24T21:04:36.365716Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29049 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:36.708483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:36.728625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:36.741071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:36.912294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:37.081382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:37.170666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:38.802539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624783600966320:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:38.802640Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:39.078639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.112802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.148821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.187061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.225410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.304396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.354630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:39.431625Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624787895934280:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:39.431724Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:39.431811Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624787895934285:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:39.436258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:39.452328Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624787895934287:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:39.537355Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624787895934338:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:40.339819Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624770716062905:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:40.339897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:40.554613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... -06-24T21:04:43.485791Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:43.485889Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:43.496882Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24580, node 2 2025-06-24T21:04:43.567791Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:43.567821Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:43.567842Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:43.567954Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20323 TClient is connected to server localhost:20323 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:44.124941Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:44.132465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:44.149873Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:44.229641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:44.328555Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:44.414570Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:44.543583Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:46.873164Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624818490765310:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:46.873267Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:46.932767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.973507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:47.014695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:47.050502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:47.080075Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:47.125036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:47.165892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:47.245697Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624822785733263:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:47.245777Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:47.251087Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624822785733268:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:47.255200Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:47.285002Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624822785733270:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:47.384956Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624822785733323:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:48.239985Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624805605861834:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:48.240071Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:48.530482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.577055Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.616366Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> alter_compression.py::TestAlterCompression::test_availability_data [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex [GOOD] Test command err: Trying to start YDB, gRPC: 61293, MsgBus: 12410 2025-06-24T21:04:39.157010Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624789494971543:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:39.157126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f14/r3tmp/tmpKqaO7W/pdisk_1.dat 2025-06-24T21:04:39.646712Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:39.647378Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624789494971520:2079] 1750799079156098 != 1750799079156101 2025-06-24T21:04:39.658970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:39.659047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 61293, node 1 2025-06-24T21:04:39.663951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:39.715087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:39.715112Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:39.715123Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:39.715237Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12410 2025-06-24T21:04:40.185806Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12410 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:40.383859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:40.401568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:40.419207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.586443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.764568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:40.854988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:42.637036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624802379875056:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:42.637167Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:42.973089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.011758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.046311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.091310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.131863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.216678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.289736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:43.360288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624806674843015:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.360372Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.360601Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624806674843020:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:43.365499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:43.377993Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624806674843022:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:43.445424Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624806674843073:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:44.159307Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624789494971543:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:44.159377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 6005, MsgBus: 7682 2025-06-24T21:04:45.663488Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624814664597646:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:45.664669Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f14/r3tmp/tmpsJBTkO/pdisk_1.dat 2025-06-24T21:04:45.822892Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:45.823103Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:45.823168Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:45.825997Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624814664597597:2079] 1750799085660214 != 1750799085660217 2025-06-24T21:04:45.836095Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6005, node 2 2025-06-24T21:04:46.007799Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:46.007821Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:46.007828Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:46.007934Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7682 TClient is connected to server localhost:7682 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:46.541269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:46.548784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:46.562358Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:46.629025Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:46.698086Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:46.779101Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:46.859401Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:49.331046Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624831844468422:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.331119Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.389349Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.436935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.516513Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.549220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.604815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.639617Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.713316Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.804022Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624831844469086:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.804135Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.804322Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624831844469091:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.808474Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:49.820752Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624831844469093:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:49.891502Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624831844469144:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:50.669675Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624814664597646:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:50.669754Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Result: [[[[101u]]];[[[102u]]];[[[103u]]];[[[104u]]];[[[105u]]]] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UpdatePk [GOOD] Test command err: Trying to start YDB, gRPC: 2595, MsgBus: 63847 2025-06-24T21:04:46.706626Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624817977938087:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:46.706808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f05/r3tmp/tmpEoyEXF/pdisk_1.dat 2025-06-24T21:04:47.213997Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624817977937991:2079] 1750799086671064 != 1750799086671067 2025-06-24T21:04:47.214074Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:47.248679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:47.248775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:47.253067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2595, node 1 2025-06-24T21:04:47.351701Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:47.351733Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:47.351746Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:47.351860Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63847 2025-06-24T21:04:47.744035Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63847 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:48.075366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:48.100456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:48.119726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:48.324019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:48.517800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:48.606857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:50.197864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624835157808805:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.197957Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.486095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.523531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.556347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.629428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.683534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.719560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.792568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.845948Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624835157809469:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.846049Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.846122Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624835157809474:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.849790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:50.860124Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624835157809476:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:50.935634Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624835157809527:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:51.703214Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624817977938087:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:51.703290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Type annotation, code: 1030
:3:20: Warning: At lambda, At function: AsStruct, At tuple
:4:31: Warning: At function: +
:4:31: Warning: Integral type implicit bitcast: Optional and Int32, code: 1107
:5:27: Error: At function: KiUpdateTable!
:5:27: Error: Cannot update primary key column: Group ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::TableRange [GOOD] Test command err: Trying to start YDB, gRPC: 12945, MsgBus: 5910 2025-06-24T21:04:46.616370Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624818185441725:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:46.616470Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f07/r3tmp/tmpultLh5/pdisk_1.dat 2025-06-24T21:04:47.172487Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:47.185325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:47.185423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:47.194082Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12945, node 1 2025-06-24T21:04:47.305933Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:47.305963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:47.305968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:47.306069Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5910 2025-06-24T21:04:47.631546Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5910 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:48.010463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:48.048280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:48.077177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:48.226772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:48.422977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.509634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:50.108972Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624835365312510:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.109085Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.457196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.532168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.570704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.604289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.655910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.728715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.772774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.862505Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624835365313180:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.862595Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.862811Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624835365313185:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.866271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:50.877786Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624835365313187:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:50.938197Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624835365313238:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:51.616222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624818185441725:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:51.616324Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Table intent determination, code: 1040
:3:27: Error: RANGE is not supported on Kikimr clusters. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ScriptStats [GOOD] Test command err: Trying to start YDB, gRPC: 27904, MsgBus: 24968 2025-06-24T21:04:37.787502Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624781008663348:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:37.788861Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f17/r3tmp/tmprrzQ3i/pdisk_1.dat 2025-06-24T21:04:38.283157Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:38.295310Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624781008663309:2079] 1750799077785081 != 1750799077785084 TServer::EnableGrpc on GrpcPort 27904, node 1 2025-06-24T21:04:38.318372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:38.318454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:38.328338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:38.408772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:38.408804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:38.408817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:38.408964Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24968 2025-06-24T21:04:38.815345Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24968 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:39.187392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:39.219644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:39.403407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:39.593008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:39.665911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:41.432504Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624798188534144:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:41.432755Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:41.785533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:41.830436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:41.869303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:41.905786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:41.937112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:41.987671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.039620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:42.125007Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624802483502098:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:42.125082Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:42.125555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624802483502103:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:42.130553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:42.144144Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624802483502105:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:42.221759Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624802483502156:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:42.787688Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624781008663348:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:42.787772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 25011, MsgBus: 8636 2025-06-24T21:04:44.260267Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624809279251125:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:44.260596Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPat ... loaded 2025-06-24T21:04:44.491401Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624809279251097:2079] 1750799084251170 != 1750799084251173 2025-06-24T21:04:44.496533Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:44.496593Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:44.498071Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25011, node 2 2025-06-24T21:04:44.676442Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:44.676467Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:44.676475Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:44.676590Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8636 TClient is connected to server localhost:8636 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:45.292758Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:45.296070Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:04:45.308442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:45.321351Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:45.428510Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:45.611325Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:45.711447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:47.992170Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624822164154635:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:47.992273Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.072980Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.120289Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.195448Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.243021Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.275846Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.310981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.386794Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.459079Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624826459122597:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.459158Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.459179Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624826459122602:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.462584Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:48.474113Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624826459122604:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:48.556051Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624826459122655:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:49.262228Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624809279251125:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:49.262303Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:50.365066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.994430Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799090999, txId: 281474976715675] shutting down 2025-06-24T21:04:51.677186Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799091650, txId: 281474976715679] shutting down 2025-06-24T21:04:52.101419Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799092091, txId: 281474976715683] shutting down |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableAggrStatNonLocalTablet >> AnalyzeColumnshard::AnalyzeTwoColumnTables >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeResolve >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeAggregate >> AnalyzeDatashard::AnalyzeOneTable >> KqpErrors::ResolveTableError |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> KqpErrors::ProposeError |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaInAggregate >> KqpScripting::ExecuteYqlScriptScanScalar [GOOD] >> KqpScripting::JoinIndexLookup >> KqpYql::BinaryJsonOffsetBound [GOOD] >> KqpYql::AnsiIn ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_alter_compression.py::TestAlterCompression::test_multi[alter_compression] [GOOD] 2025-06-24 21:04:49,731 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:04:50,124 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 464383 1004M 1009M 908M ydb-tests-olap-scenario --basetemp /home/runner/.ya/build/build_root/2btm/003f1e/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modules 466256 2.3G 2.3G 1.7G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/003f1e/ydb/tests/olap/scenario/test-results/py3test/testing_out_stuff/chunk0/te Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 135, in runtestprotocol reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 183, in pytest_runtest_teardown item.session._setupstate.teardown_exact(nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 543, in teardown_exact fin() File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1042, in finish func() File "contrib/python/pytest/py3/_pytest/fixtures.py", line 926, in _teardown_yield_fixture next(it) File "contrib/python/pytest/py3/_pytest/python.py", line 847, in xunit_setup_class_fixture _call_with_optional_argument(func, self.obj) File "contrib/python/pytest/py3/_pytest/python.py", line 764, in _call_with_optional_argument func(arg) File "/home/runner/.ya/build/build_root/2btm/003f1e/environment/arcadia/ydb/tests/olap/scenario/conftest.py", line 80, in teardown_class cls._ydb_instance.stop() File "/home/runner/.ya/build/build_root/2btm/003f1e/environment/arcadia/ydb/tests/olap/scenario/conftest.py", line 62, in stop self._temp_ydb_cluster.stop() File "ydb/tests/library/harness/kikimr_runner.py", line 599, in stop thread.join() File "contrib/tools/python3/Lib/threading.py", line 1149, in join self._wait_for_tstate_lock() File "contrib/tools/python3/Lib/threading.py", line 1169, in _wait_for_tstate_lock if lock.acquire(block, timeout): File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...a/build/build_root/2btm/003f1e/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/003f1e/ydb/tests/olap/scenario/test-results/py3test/testing_out_stuff/chunk0/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/003f1e', '--source-root', '/home/runner/.ya/build/build_root/2btm/003f1e/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/003f1e/ydb/tests/olap/scenario/test-results/py3test/testing_out_stuff/chunk0/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/scenario', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '0', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/scenario', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...a/build/build_root/2btm/003f1e/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/003f1e/ydb/tests/olap/scenario/test-results/py3test/testing_out_stuff/chunk0/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/003f1e', '--source-root', '/home/runner/.ya/build/build_root/2btm/003f1e/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/003f1e/ydb/tests/olap/scenario/test-results/py3test/testing_out_stuff/chunk0/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/scenario', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '0', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/scenario', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAggregate >> TraverseDatashard::TraverseTwoTablesServerless >> BackupRestoreS3::TestAllPrimitiveTypes-DATE32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 >> KqpYql::CreateUseTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> EncryptedBackupParamsValidationTest::NoSourcePrefixEncrypted [GOOD] Test command err: 2025-06-24T21:02:03.794321Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624119048873911:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:03.794722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004597/r3tmp/tmpYRelWW/pdisk_1.dat 2025-06-24T21:02:04.483982Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:04.507186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:04.507276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:04.512920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10293, node 1 2025-06-24T21:02:04.807467Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:02:04.951758Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:04.951780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:04.951790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:04.951910Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15721 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:02:05.382663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:02:07.664572Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624136228744043:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.664713Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.948038Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624119048874007:2115] Handle TEvProposeTransaction 2025-06-24T21:02:07.948081Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519624119048874007:2115] TxId# 281474976715658 ProcessProposeTransaction 2025-06-24T21:02:07.948138Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519624119048874007:2115] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519624136228744081:2634] 2025-06-24T21:02:08.091336Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519624136228744081:2634] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-24T21:02:08.091394Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519624136228744081:2634] txid# 281474976715658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:02:08.091821Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519624136228744081:2634] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:02:08.091925Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519624136228744081:2634] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:02:08.092110Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519624136228744081:2634] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:08.092269Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519624136228744081:2634] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:02:08.092327Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519624136228744081:2634] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-24T21:02:08.092548Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519624136228744081:2634] txid# 281474976715658 HANDLE EvClientConnected 2025-06-24T21:02:08.094123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:08.097448Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519624136228744081:2634] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-06-24T21:02:08.097517Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519624136228744081:2634] txid# 281474976715658 SEND to# [1:7519624136228744080:2304] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-06-24T21:02:08.243198Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624140523711525:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:08.243316Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:08.243516Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624140523711530:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:08.243896Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624119048874007:2115] Handle TEvProposeTransaction 2025-06-24T21:02:08.243924Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519624119048874007:2115] TxId# 281474976715659 ProcessProposeTransaction 2025-06-24T21:02:08.243987Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519624119048874007:2115] Cookie# 0 userReqId# "" txid# 281474976715659 SEND to# [1:7519624140523711533:2754] 2025-06-24T21:02:08.247066Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519624140523711533:2754] txid# 281474976715659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-24T21:02:08.247126Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519624140523711533:2754] txid# 281474976715659 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:02:08.247307Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519624140523711533:2754] txid# 281474976715659 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-24T21:02:08.249930Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519624140523711533:2754] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:02:08.250018Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519624140523711533:2754] txid# 281474976715659 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:02:08.250208Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519624140523711533:2754] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:08.250365Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519624140523711533:2754] HANDLE EvNavigateKeySetResult, txid# 281474976715659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:02:08.250414Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519624140523711533:2754] txid# 281474976715659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715659 TabletId# 72057594046644480} 2025-06-24T21:02:08.250555Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519624140523711533:2754] txid# 281474976715659 HANDLE EvClientConnected 2025-06-24T21:02:08.251901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo ... } Origin: 72075186224037890 State: 2 TxId: 281474976715761 Step: 0 Generation: 1 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-24T21:04:34.062923Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TRestore TProposedWaitParts, opId: 281474976715761:0 HandleReply TEvSchemaChanged at tablet# 72057594046644480 message# Source { RawX1: 7519624763227314388 RawX2: 4503784310966561 } Origin: 72075186224037890 State: 2 TxId: 281474976715761 Step: 0 Generation: 1 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-24T21:04:34.062972Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715761:0, shardIdx: 72057594046644480:3, shard: 72075186224037890, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-24T21:04:34.062987Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976715761:0, at schemeshard: 72057594046644480 2025-06-24T21:04:34.063002Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976715761:0, datashard: 72075186224037890, at schemeshard: 72057594046644480 2025-06-24T21:04:34.063033Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715761:0 129 -> 240 2025-06-24T21:04:34.063247Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 281474976715761:0, reason# domain is not a serverless db, domain# /Root, domainPathId# [OwnerId: 72057594046644480, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046644480, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-24T21:04:34.063535Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:04:34.068464Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715761:0, at schemeshard: 72057594046644480 2025-06-24T21:04:34.068500Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:04:34.068518Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715761:0 2025-06-24T21:04:34.068625Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [43:7519624763227314388:2337] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715761 at schemeshard: 72057594046644480 2025-06-24T21:04:34.068827Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [43:7519624728867574571:2202], Recipient [43:7519624728867574571:2202]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:04:34.068864Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:04:34.068926Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715761:0, at schemeshard: 72057594046644480 2025-06-24T21:04:34.068956Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715761:0 ProgressState 2025-06-24T21:04:34.069167Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:04:34.069197Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715761:0 progress is 1/1 2025-06-24T21:04:34.069212Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715761 ready parts: 1/1 2025-06-24T21:04:34.069240Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715761:0 progress is 1/1 2025-06-24T21:04:34.069255Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715761 ready parts: 1/1 2025-06-24T21:04:34.069275Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715761, ready parts: 1/1, is published: true 2025-06-24T21:04:34.069332Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [43:7519624728867574571:2202] message: TxId: 281474976715761 2025-06-24T21:04:34.069372Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715761 ready parts: 1/1 2025-06-24T21:04:34.069399Z node 43 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715761:0 2025-06-24T21:04:34.069411Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715761:0 2025-06-24T21:04:34.069557Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 14] was 3 2025-06-24T21:04:34.072407Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:04:34.072521Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [43:7519624728867574571:2202] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715761 at schemeshard: 72057594046644480 2025-06-24T21:04:34.072788Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124998, Sender [43:7519624728867574571:2202], Recipient [43:7519624728867574571:2202]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715761 2025-06-24T21:04:34.072818Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5109: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-06-24T21:04:34.072844Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976715761 2025-06-24T21:04:34.072873Z node 43 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976715761 2025-06-24T21:04:34.072931Z node 43 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:04:34.072952Z node 43 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976715761 2025-06-24T21:04:34.073092Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_xxport__tx_base.h:63: SendNotifications: : id# 281474976710662, subscribers count# 0 2025-06-24T21:04:34.076493Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [43:7519624728867574571:2202]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:34.076540Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:34.076603Z node 43 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:04:34.076694Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [43:7519624728867574571:2202], Recipient [43:7519624728867574571:2202]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:34.076716Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:34.078575Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [43:7519624767522281839:3284], Recipient [43:7519624728867574571:2202]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:04:34.078606Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:04:34.078632Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:04:34.084060Z node 43 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [43:7519624767522281851:2344] [0] Resolve database: name# /Root 2025-06-24T21:04:34.085846Z node 43 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [43:7519624767522281851:2344] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:04:34.085888Z node 43 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [43:7519624767522281851:2344] [0] Send request: schemeShardId# 72057594046644480 2025-06-24T21:04:34.086461Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [43:7519624767522281854:3297], Recipient [43:7519624728867574571:2202]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:04:34.086498Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:04:34.086513Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:04:34.086760Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 275251202, Sender [43:7519624767522281851:2344], Recipient [43:7519624728867574571:2202]: NKikimrImport.TEvGetImportRequest Request { Id: 281474976710662 } DatabaseName: "/Root" 2025-06-24T21:04:34.086791Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5064: StateWork, processing event TEvImport::TEvGetImportRequest 2025-06-24T21:04:34.087375Z node 43 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [43:7519624767522281851:2344] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710662 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:27217" scheme: HTTP bucket: "test_bucket" source_prefix: "Prefix" destination_path: "/Root/RestorePath" encryption_settings { symmetric_key { key: "Cool random key!" } } } StartTime { seconds: 1750799073 } EndTime { seconds: 1750799074 } } 2025-06-24T21:04:34.094077Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [43:7519624767522281854:3297], Recipient [43:7519624728867574571:2202]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:04:34.094133Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:04:34.094158Z node 43 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 >> TraverseColumnShard::TraverseServerlessColumnTable |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldHandleCompactionTimeouts [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::CreateUseTable [GOOD] Test command err: Trying to start YDB, gRPC: 61022, MsgBus: 31100 2025-06-24T21:04:43.355581Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624806129019653:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:43.355695Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f12/r3tmp/tmpvyjRJD/pdisk_1.dat 2025-06-24T21:04:43.967951Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:43.984690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:43.984782Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 61022, node 1 2025-06-24T21:04:44.019597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:44.104540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:44.104567Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:44.104573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:44.104705Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:44.353503Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31100 TClient is connected to server localhost:31100 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:44.953258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:44.972726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:44.979374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:45.178050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:45.383293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:45.493803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:47.449390Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624823308890255:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:47.449519Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:47.978829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.029654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.069537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.148296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.181950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.227773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.280070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.356139Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624806129019653:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:48.356221Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:48.377888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624827603858217:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.377979Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.378187Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624827603858222:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.382258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:48.399532Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624827603858224:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:48.459249Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624827603858279:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Optimization, code: 1070
:4:24: Error: Queries with mixed data and scheme operations are not supported. Use separate queries for different types of operations., code: 2009 Trying to start YDB, gRPC: 2781, MsgBus: 27266 2025-06-24T21:04:50.659992Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624836641375850:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:50.663490Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f12/r3tmp/tmpEPfiR2/pdisk_1.dat 2025-06-24T21:04:50.793496Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:50.793578Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:50.793799Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:50.810740Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2781, node 2 2025-06-24T21:04:50.872151Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:50.872178Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:50.872186Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:50.872315Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27266 TClient is connected to server localhost:27266 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:51.362373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:51.373292Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:51.452448Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:51.619130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:51.671074Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:51.696862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:53.958804Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624849526279308:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:53.958906Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:54.028882Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:54.084992Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:54.125187Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:54.181469Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:54.213642Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:54.249638Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:54.289296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:54.350319Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624853821247260:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:54.350404Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624853821247265:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:54.350447Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:54.354481Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:54.370212Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624853821247267:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:54.448684Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624853821247318:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:55.604752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.663405Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624836641375850:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:55.663485Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:55.888588Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799095920, txId: 281474976710674] shutting down |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldHandleCompactionTimeouts [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:03:01.238176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:03:01.238263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:03:01.238303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:03:01.238343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:03:01.238381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:03:01.238429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:03:01.238490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:03:01.238579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:03:01.239317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:03:01.239628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:03:01.322073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:03:01.322136Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:01.334958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:03:01.339390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:03:01.339595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:03:01.347847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:03:01.348184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:03:01.348844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:03:01.349226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:03:01.352395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:03:01.352635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:03:01.353882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:03:01.353968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:03:01.354092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:03:01.354164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:03:01.354211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:03:01.354394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:03:01.364040Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:03:01.485700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:03:01.485970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:01.486288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:03:01.486340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:03:01.486560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:03:01.486652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:03:01.496284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:03:01.496517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:03:01.496773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:01.496847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:03:01.496934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:03:01.496977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:03:01.500048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:01.500115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:03:01.500170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:03:01.508262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:01.508327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:01.508405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:03:01.508476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:03:01.511956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:03:01.521767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:03:01.521997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:03:01.523076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:03:01.523239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:03:01.523284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:03:01.523551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:03:01.523598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:03:01.523759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:03:01.523851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:03:01.529166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:03:01.529223Z node 1 :FLAT_TX_SCHEMESHARD ... [0:0:0], Recipient [3:312:2297]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-24T21:04:54.975299Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-24T21:04:54.975393Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409546 outdated step 5000002 last cleanup 0 2025-06-24T21:04:54.975463Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409546 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:04:54.975501Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409546 2025-06-24T21:04:54.975533Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409546 has no attached operations 2025-06-24T21:04:54.975568Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409546 2025-06-24T21:04:54.975715Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:312:2297]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:04:54.976001Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409546, FollowerId 0, tableId 2 2025-06-24T21:04:54.983288Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:312:2297], Recipient [3:127:2151]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409546 TableLocalId: 2 Generation: 2 Round: 7 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 80 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 155 Memory: 124232 Storage: 14156 GroupWriteThroughput { GroupID: 0 Channel: 0 Throughput: 261 } GroupWriteThroughput { GroupID: 0 Channel: 1 Throughput: 444 } GroupWriteIops { GroupID: 0 Channel: 0 Iops: 1 } } ShardState: 2 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 41 TableOwnerId: 72057594046678944 FollowerId: 2025-06-24T21:04:54.983362Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:04:54.983415Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.0155 2025-06-24T21:04:54.983538Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 80 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:04:54.983580Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-24T21:04:54.984801Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435080, Sender [3:1062:3004], Recipient [3:312:2297]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvAsyncTableStats 2025-06-24T21:04:55.028431Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:55.028513Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:55.028563Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-24T21:04:55.028645Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-24T21:04:55.028688Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-24T21:04:55.028796Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-06-24T21:04:55.028890Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-24T21:04:55.028927Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-24T21:04:55.029000Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:01:20.000000Z at schemeshard 72057594046678944 2025-06-24T21:04:55.029087Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-24T21:04:55.029205Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:04:55.040166Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:55.040233Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:04:55.040265Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:04:55.342280Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:55.342353Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:55.342447Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:127:2151], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:55.342480Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:55.711271Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:55.711359Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:55.711466Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:127:2151], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:55.711502Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:56.087963Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:56.088041Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:56.088139Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:127:2151], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:56.088177Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:56.493899Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:56.493980Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:56.494086Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:127:2151], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:56.494124Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:56.879255Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:56.879338Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:56.879433Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:127:2151], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:56.879466Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:56.915498Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:312:2297]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:04:57.271580Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:57.271657Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:04:57.271755Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:127:2151], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:04:57.271793Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> Secret::ValidationQueryService [GOOD] >> KqpScripting::ScriptExplain [GOOD] >> KqpScripting::ScanQueryDisable [GOOD] >> KqpYql::JsonNumberPrecision [GOOD] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAnalyzeTableResponse |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> KqpScripting::SystemTables [GOOD] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeResolve ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::ValidationQueryService [GOOD] Test command err: 2025-06-24T21:01:59.204617Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:01:59.205052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:01:59.205231Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001dac/r3tmp/tmpvCv1Jl/pdisk_1.dat 2025-06-24T21:01:59.580120Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 21092, node 1 TClient is connected to server localhost:7748 2025-06-24T21:01:59.842028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:01:59.876268Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:01:59.876349Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:01:59.876392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:01:59.876737Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:01:59.880637Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:01:59.881118Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750798915898760 != 1750798915898764 2025-06-24T21:01:59.933454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:01:59.933593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:01:59.948348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:00.175733Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-24T21:02:11.752720Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2584], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:11.752919Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:714:2589], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:11.753026Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:11.760879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:02:11.821530Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:718:2592], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T21:02:11.931308Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:769:2624] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:02:11.996755Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:779:2633], status: GENERIC_ERROR, issues:
:1:20: Error: mismatched input '-' expecting '(' 2025-06-24T21:02:11.998891Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YjBjMDA4MGItM2VlZWVlYTMtOTMxY2UwOGMtNmJlNjIyMGQ=, ActorId: [1:702:2582], ActorState: ExecuteState, TraceId: 01jyhvz5rc3sjv6t2g73wsf7qp, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;RESULT=
:1:20: Error: mismatched input '-' expecting '(' ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=0;WAITING=1 2025-06-24T21:02:22.566611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:23.522232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:24.234063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:25.174222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:26.173439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:26.761285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:27.469933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:28.525198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:02:30.856958Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YWE5NjFiNDktN2E0NDQ1ZjMtYjU1OWI1YjktNDRiMDU2NDI=, ActorId: [1:797:2643], ActorState: ExecuteState, TraceId: 01jyhvzg054esg6m8jytm5k6fe, Create QueryResponse for error on request, msg: 2025-06-24T21:02:30.858728Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715689. Ctx: { TraceId: 01jyhvzg054esg6m8jytm5k6fe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWE5NjFiNDktN2E0NDQ1ZjMtYjU1OWI1YjktNDRiMDU2NDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=
: Error: preparation problem: secret secret1 not found for alter ;EXPECTATION=0 2025-06-24T21:02:31.562548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:02:31.562628Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-24T21:03:08.053159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715712:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:09.338804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715719:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:11.167864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose ... 4046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:03:12.077796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715731:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=0;WAITING=1 2025-06-24T21:03:25.578611Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YWU5ZDc3NjUtMmU3MWNhZDctMzZkMWYzNzItZDZhYmJhZmQ=, ActorId: [1:3111:4355], ActorState: ExecuteState, TraceId: 01jyhw1d2e4djk393ee3p6knda, Create QueryResponse for error on request, msg: 2025-06-24T21:03:25.580933Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jyhw1d2e4djk393ee3p6knda, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWU5ZDc3NjUtMmU3MWNhZDctMzZkMWYzNzItZDZhYmJhZmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);RESULT=
: Error: preparation problem: used in access secret secret2 not found ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-24T21:03:38.644385Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=15; 2025-06-24T21:03:38.644768Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 15 at tablet 72075186224037892 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:03:38.645074Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 15 at tablet 72075186224037892 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:03:38.645749Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:3486:4559], Table: `//Root/.metadata/secrets/access` ([72057594046644480:13:1]), SessionActorId: [1:3393:4559]Got CONSTRAINT VIOLATION for table `//Root/.metadata/secrets/access`. ShardID=72075186224037892, Sink=[1:3486:4559].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:03:38.646407Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:3479:4559], SessionActorId: [1:3393:4559], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `//Root/.metadata/secrets/access`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[1:3393:4559]. isRollback=0 2025-06-24T21:03:38.647042Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=Y2U5OWZmNDYtNzMzMWExYzQtNGNjZjRiNjAtMjI3NmUzNzk=, ActorId: [1:3393:4559], ActorState: ExecuteState, TraceId: 01jyhw1teg61v3vv38ps2w1f0y, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [1:3480:4559] from: [1:3479:4559] 2025-06-24T21:03:38.648559Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:3480:4559] TxId: 281474976715757. Ctx: { TraceId: 01jyhw1teg61v3vv38ps2w1f0y, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2U5OWZmNDYtNzMzMWExYzQtNGNjZjRiNjAtMjI3NmUzNzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `//Root/.metadata/secrets/access`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:03:38.649574Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=Y2U5OWZmNDYtNzMzMWExYzQtNGNjZjRiNjAtMjI3NmUzNzk=, ActorId: [1:3393:4559], ActorState: ExecuteState, TraceId: 01jyhw1teg61v3vv38ps2w1f0y, Create QueryResponse for error on request, msg: 2025-06-24T21:03:38.660905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor.h:64;event=unexpected reply;error_message=operation { ready: true status: PRECONDITION_FAILED issues { message: "Constraint violated. Table: `//Root/.metadata/secrets/access`." issue_code: 2012 severity: 1 issues { message: "Conflict with existing key." issue_code: 2012 severity: 1 } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { id: "01jyhw1t3p0ywj5aas8nhsxfmc" } } } } ;request=session_id: "ydb://session/3?node_id=1&id=Y2U5OWZmNDYtNzMzMWExYzQtNGNjZjRiNjAtMjI3NmUzNzk=" tx_control { tx_id: "01jyhw1t3p0ywj5aas8nhsxfmc" } query { yql_text: "DECLARE $objects AS List>;\nINSERT INTO `//Root/.metadata/secrets/access`\nSELECT ownerUserId,secretId,accessSID FROM AS_TABLE($objects)\n" } parameters { key: "$objects" value { type { list_type { item { struct_type { members { name: "ownerUserId" type { type_id: UTF8 } } members { name: "secretId" type { type_id: UTF8 } } members { name: "accessSID" type { type_id: UTF8 } } } } } } value { items { items { text_value: "root@builtin" } items { text_value: "secret1" } items { text_value: "test@test1" } } } } } ; REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=0;WAITING=1 2025-06-24T21:03:51.608384Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YmM2MzZiZjItNGVkM2U1NDAtNmJlYTcwOTEtN2MwZWI5OTI=, ActorId: [1:3670:4757], ActorState: ExecuteState, TraceId: 01jyhw26kw32gj19g3j07cq25t, Create QueryResponse for error on request, msg: 2025-06-24T21:03:51.609718Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715769. Ctx: { TraceId: 01jyhw26kw32gj19g3j07cq25t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmM2MzZiZjItNGVkM2U1NDAtNmJlYTcwOTEtN2MwZWI5OTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=DROP OBJECT `secret1` (TYPE SECRET);RESULT=
: Error: preparation problem: secret secret1 using in access for test@test1 ;EXPECTATION=0 FINISHED_REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=0;WAITING=1 2025-06-24T21:04:03.105287Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:3969:4978], for# root@builtin, access# DescribeSchema 2025-06-24T21:04:03.105407Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:3969:4978], for# root@builtin, access# DescribeSchema 2025-06-24T21:04:03.107785Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:3966:4975], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/secrets/values]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:04:03.110310Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YjI0ZTYyZjAtODI4YjgyNDItNzQyMDU0ZGMtZWI5YTk0Mjg=, ActorId: [1:3962:4972], ActorState: ExecuteState, TraceId: 01jyhw2jgfc8ty9s9tpf8v5p3j, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: REQUEST=SELECT * FROM `/Root/.metadata/secrets/values`;RESULT=
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/secrets/values]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 ;EXPECTATION=0 REQUEST=SELECT * FROM `/Root/.metadata/secrets/values`;EXPECTATION=0 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-24T21:04:14.937775Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (51449FAE): Could not find correct token validator 2025-06-24T21:04:15.825428Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YjBhNjQ3OTUtZTgyZmRmOTctNGIzNGE5NTItZTgyNzk5ZjQ=, ActorId: [1:4205:5149], ActorState: ExecuteState, TraceId: 01jyhw2y2h60q7mcx5czk2yxpb, Create QueryResponse for error on request, msg: 2025-06-24T21:04:15.826559Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715793. Ctx: { TraceId: 01jyhw2y2h60q7mcx5czk2yxpb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjBhNjQ3OTUtZTgyZmRmOTctNGIzNGE5NTItZTgyNzk5ZjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: cannot CREATE objects: Secret already exists: secret1 ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-24T21:04:28.381514Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YzI1NWY4NjAtZmMyNzU2ZjMtMmIyYjY5MDQtN2M1ZDRlMjI=, ActorId: [1:4571:5416], ActorState: ExecuteState, TraceId: 01jyhw3ac33drzzjjxrrm65x11, Create QueryResponse for error on request, msg: 2025-06-24T21:04:28.382672Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715809. Ctx: { TraceId: 01jyhw3ac33drzzjjxrrm65x11, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzI1NWY4NjAtZmMyNzU2ZjMtMmIyYjY5MDQtN2M1ZDRlMjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: cannot UPSERT objects: Secret already exists: secret1 ;EXPECTATION=0 FINISHED_REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 2025-06-24T21:04:55.554247Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715838. Ctx: { TraceId: 01jyhw45a05rp60zm2jhs8j0cs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTE0MjlkNDYtNzE0NzQ2ZjAtOTY4MTlhZTYtYTMwMWU3MDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ScriptExplain [GOOD] Test command err: Trying to start YDB, gRPC: 6203, MsgBus: 18123 2025-06-24T21:04:42.615934Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624799899164448:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:42.616129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f13/r3tmp/tmp1ig6TQ/pdisk_1.dat 2025-06-24T21:04:43.120779Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:43.122239Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624799899164429:2079] 1750799082615330 != 1750799082615333 2025-06-24T21:04:43.155533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:43.155605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:43.180410Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6203, node 1 2025-06-24T21:04:43.339887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:43.339917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:43.339923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:43.340034Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18123 2025-06-24T21:04:43.631773Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18123 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:44.102015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:44.126516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:04:44.144668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:44.302435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:44.527068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:44.613728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:46.470547Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624817079035248:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:46.470675Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:46.845424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.880567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.937323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:46.979466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:47.026612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:47.087887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:47.146990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:47.243544Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624821374003206:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:47.243632Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:47.244070Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624821374003211:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:47.248622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:47.297072Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624821374003213:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:47.372452Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624821374003264:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:47.619243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624799899164448:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:47.619330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:48.535274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f13/r3tmp/tmpv8mZ2U/pdisk_1.dat 2025-06-24T21:04:51.688101Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624841299064278:2079] 1750799091571831 != 1750799091571834 2025-06-24T21:04:51.697530Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4021, node 2 2025-06-24T21:04:51.711803Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:51.711910Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:51.714265Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:51.743268Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:51.743296Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:51.743304Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:51.743419Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29066 TClient is connected to server localhost:29066 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:04:52.200005Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:52.212340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.269205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:52.457460Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:52.526272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:52.591830Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:55.093974Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624858478935107:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:55.094071Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:55.160748Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.207413Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.284702Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.325069Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.365143Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.399682Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.470268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.532594Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624858478935763:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:55.532697Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:55.532955Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624858478935768:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:55.536572Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:55.547182Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624858478935770:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:55.622371Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624858478935821:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:56.631524Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624841299064308:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:56.631873Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:57.630401Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519624867068870719:2479], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:168: Error: At function: DataQueryBlocks
:1:185: Error: At function: TKiDataQueryBlock
:1:208: Error: At function: KiEffects
:1:219: Error: At function: KiWriteTable!
:1:219: Error: Cannot find table 'db.[/Root/ScriptingTest]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:04:57.631432Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MzNmMTg5OTUtZWExNGZlZjQtZjA1ZmQ4N2UtNjhkNWUzMWM=, ActorId: [2:7519624867068870717:2478], ActorState: ExecuteState, TraceId: 01jyhw47rb00c6vfc2ag689rvd, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ScanQueryDisable [GOOD] Test command err: Trying to start YDB, gRPC: 16308, MsgBus: 12404 2025-06-24T21:04:44.424554Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624809743955099:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:44.424650Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f0f/r3tmp/tmptOkbEH/pdisk_1.dat 2025-06-24T21:04:44.906419Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:44.906794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:44.906849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:44.911309Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624809743954904:2079] 1750799084401552 != 1750799084401555 2025-06-24T21:04:44.921125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16308, node 1 2025-06-24T21:04:45.018512Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:45.018544Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:45.018550Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:45.018688Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12404 2025-06-24T21:04:45.443689Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12404 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:45.709752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:45.740730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:45.893358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:46.106032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:46.188468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:48.022421Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624826923825722:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.022555Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.362639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.402775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.489675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.527140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.562605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.616140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.668554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:48.752927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624826923826380:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.753011Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.753092Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624826923826385:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:48.757392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:48.770987Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624826923826387:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:48.836719Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624826923826438:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:49.432394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624809743955099:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:49.466704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:50.641645Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799090656, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 12026, MsgBus: 17746 2025-06-24T21:04:51.451707Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624838070907217:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:51.451777Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f0f/r3tmp/tmpP8VS3K/pdisk_1.dat 2025-06-24T21:04:51.597705Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12026, node 2 2025-06-24T21:04:51.620097Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:51.620173Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:51.622793Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:51.671835Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:51.671862Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:51.671871Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:51.672009Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17746 TClient is connected to server localhost:17746 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:52.197861Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:52.203837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:52.216349Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:52.279167Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.455506Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:52.493794Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:52.523608Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:54.865501Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624850955810701:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:54.865616Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:54.921772Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:54.956006Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.029822Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.069053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.120508Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.172306Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.244095Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.306225Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624855250778660:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:55.306319Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:55.306625Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624855250778665:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:55.310217Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:55.321146Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624855250778667:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:55.403614Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624855250778718:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:56.452594Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624838070907217:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:56.452685Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:57.159723Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799097173, txId: 281474976710672] shutting down >> TraverseDatashard::TraverseTwoTables |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::JsonNumberPrecision [GOOD] Test command err: Trying to start YDB, gRPC: 21407, MsgBus: 20606 2025-06-24T21:04:46.340167Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624816410527021:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:46.340682Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f09/r3tmp/tmpS6bNLs/pdisk_1.dat 2025-06-24T21:04:46.763553Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624816410526840:2079] 1750799086280517 != 1750799086280520 2025-06-24T21:04:46.775127Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:46.781671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:46.781804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 21407, node 1 2025-06-24T21:04:46.783881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:46.926221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:46.926243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:46.926256Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:46.926365Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20606 2025-06-24T21:04:47.303329Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20606 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:47.764702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:47.795494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:47.963297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:48.130093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:48.210415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:49.961560Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624829295430371:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.961724Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.315258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.358598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.394720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.467276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.538358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.574704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.648543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:50.748707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624833590398341:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.748821Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.749049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624833590398346:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:50.753051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:50.769318Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624833590398348:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:50.831131Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624833590398399:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:51.304398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624816410527021:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:51.304466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 27882, MsgBus: 7882 2025-06-24T21:04:52.972058Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624842550298971:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:52.972121Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f09/r3tmp/tmpWMjZNo/pdisk_1.dat 2025-06-24T21:04:53.105665Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:53.122613Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:53.122701Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:53.124700Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27882, node 2 2025-06-24T21:04:53.164723Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:53.164746Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:53.164753Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:53.164875Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7882 TClient is connected to server localhost:7882 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:53.622721Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:53.639601Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:53.712365Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:53.855079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:53.928646Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:53.998278Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:56.340378Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624859730169736:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:56.340479Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:56.393949Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:56.435051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:56.473146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:56.522568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:56.556974Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:56.602782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:56.679981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:56.779624Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624859730170397:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:56.779774Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:56.783485Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624859730170402:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:56.791705Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:56.826723Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624859730170404:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:56.896467Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624859730170455:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:57.972133Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624842550298971:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:57.972215Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::SystemTables [GOOD] Test command err: Trying to start YDB, gRPC: 8564, MsgBus: 30597 2025-06-24T21:04:45.499254Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624815348163142:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:45.499322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f0b/r3tmp/tmpJtrnkx/pdisk_1.dat 2025-06-24T21:04:45.929701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:45.929807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:45.935353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:45.940241Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:45.941563Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624815348162952:2079] 1750799085479865 != 1750799085479868 TServer::EnableGrpc on GrpcPort 8564, node 1 2025-06-24T21:04:46.174284Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:46.174314Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:46.174322Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:46.174457Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30597 2025-06-24T21:04:46.503017Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30597 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:46.909450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:46.946031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:47.139397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:47.335788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:47.428043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:49.236467Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624832528033763:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.236601Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.601333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.634244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.666545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.702945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.739363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.774484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.810279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:49.889214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624832528034420:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.889306Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.889371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624832528034425:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:49.893678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:49.909001Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624832528034427:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:49.968362Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624832528034478:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:50.500905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624815348163142:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:50.501013Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:51.254305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 63432, MsgBus: 23933 2025-06-24T21:04:52.394245Z node 2 :METADATA_PROVIDER WARN: ... 394327Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f0b/r3tmp/tmpaF8l7X/pdisk_1.dat 2025-06-24T21:04:52.520977Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:52.522684Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624844341210683:2079] 1750799092392466 != 1750799092392469 TServer::EnableGrpc on GrpcPort 63432, node 2 2025-06-24T21:04:52.547604Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:52.547755Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:52.550070Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:52.580593Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:52.580616Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:52.580626Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:52.580746Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23933 TClient is connected to server localhost:23933 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:53.092000Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:53.100545Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:53.121902Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:53.199199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:53.337228Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:53.403629Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:53.404447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:55.756554Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624857226114199:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:55.756659Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:55.816999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.857154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.889268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.929650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:55.970630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:56.007791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:56.050592Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:56.163478Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624861521082157:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:56.163612Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:56.167488Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624861521082162:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:56.175951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:56.189623Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624861521082164:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:56.270143Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624861521082215:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:57.397678Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624844341210702:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:57.397970Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:57.738008Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799097716, txId: 281474976715672] shutting down 2025-06-24T21:04:57.890767Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799097876, txId: 281474976715674] shutting down 2025-06-24T21:04:59.014494Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799099014, txId: 281474976715676] shutting down |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |93.3%| [TA] $(B)/ydb/tests/olap/scenario/test-results/py3test/{meta.json ... results_accumulator.log} >> KqpScripting::StreamExecuteYqlScriptScanClientTimeoutBruteForce [GOOD] >> Secret::Validation [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanOperationTmeoutBruteForce >> KqpYql::AnsiIn [GOOD] >> TraverseColumnShard::TraverseColumnTableAggrStatUnavailableNode >> AnalyzeColumnshard::AnalyzeStatus |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAlterCompression::test_availability_data [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseOneTableServerless >> KqpScripting::JoinIndexLookup [GOOD] >> TraverseColumnShard::TraverseColumnTable >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::Validation [GOOD] Test command err: 2025-06-24T21:02:00.503344Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:02:00.503780Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:02:00.503964Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001cf5/r3tmp/tmpweTedr/pdisk_1.dat 2025-06-24T21:02:00.879913Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 28318, node 1 TClient is connected to server localhost:23862 2025-06-24T21:02:01.147946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:02:01.185288Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:01.185393Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:01.185437Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:01.185811Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:01.190083Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:02:01.190530Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750798917087213 != 1750798917087217 2025-06-24T21:02:01.241854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:01.242009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:01.256591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:01.470620Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-24T21:02:13.203251Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2585], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:13.203492Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;RESULT=
:1:20: Error: mismatched input '-' expecting '(' ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=0;WAITING=1 2025-06-24T21:02:23.650581Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:732:2599], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:23.650739Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:23.658400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:23.949038Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:842:2676], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:23.949128Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:23.949340Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:847:2681], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:23.954074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:02:24.133932Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:849:2683], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:02:24.467516Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:942:2747] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:02:25.101981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:25.593282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:26.488785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:27.339876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:27.957203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:02:29.178274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:29.513939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=
: Error: Execution, code: 1060
:1:48: Error: Executing ALTER OBJECT SECRET
: Error: preparation problem: secret secret1 not found for alter ;EXPECTATION=0 2025-06-24T21:02:32.365202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:02:32.365294Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-24T21:03:09.546449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715713:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:10.591398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715718:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:03:12.477715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715727:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:03:13.115884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715730:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);RESULT=
: Error: Execution, code: 1060
:1:42: Error: Executing CREATE OBJECT SECRET_ACCESS
: Error: preparation problem: used in access secret secret2 not found ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-24T21:03:39.761681Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=16; 2025-06-24T21:03:39.762051Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 16 at tablet 72075186224037892 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:03:39.762304Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 16 at tablet 72075186224037892 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:03:39.762671Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:3553:4602], Table: `//Root/.metadata/secrets/access` ([72057594046644480:13:1]), SessionActorId: [1:3460:4602]Got CONSTRAINT VIOLATION for table `//Root/.metadata/secrets/access`. ShardID=72075186224037892, Sink=[1:3553:4602].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:03:39.763292Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:3546:4602], SessionActorId: [1:3460:4602], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `//Root/.metadata/secrets/access`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[1:3460:4602]. isRollback=0 2025-06-24T21:03:39.763854Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=NWVjNTc5MzctOWM0ZTM5MTEtNzE5OWRmMDEtM2U0MjFiZDk=, ActorId: [1:3460:4602], ActorState: ExecuteState, TraceId: 01jyhw1vks01weh05m6jkdwzc7, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [1:3547:4602] from: [1:3546:4602] 2025-06-24T21:03:39.764595Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:3547:4602] TxId: 281474976715757. Ctx: { TraceId: 01jyhw1vks01weh05m6jkdwzc7, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWVjNTc5MzctOWM0ZTM5MTEtNzE5OWRmMDEtM2U0MjFiZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `//Root/.metadata/secrets/access`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:03:39.765066Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NWVjNTc5MzctOWM0ZTM5MTEtNzE5OWRmMDEtM2U0MjFiZDk=, ActorId: [1:3460:4602], ActorState: ExecuteState, TraceId: 01jyhw1vks01weh05m6jkdwzc7, Create QueryResponse for error on request, msg: 2025-06-24T21:03:39.771331Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor.h:64;event=unexpected reply;error_message=operation { ready: true status: PRECONDITION_FAILED issues { message: "Constraint violated. Table: `//Root/.metadata/secrets/access`." issue_code: 2012 severity: 1 issues { message: "Conflict with existing key." issue_code: 2012 severity: 1 } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { id: "01jyhw1va3cept3mq01arbbyym" } } } } ;request=session_id: "ydb://session/3?node_id=1&id=NWVjNTc5MzctOWM0ZTM5MTEtNzE5OWRmMDEtM2U0MjFiZDk=" tx_control { tx_id: "01jyhw1va3cept3mq01arbbyym" } query { yql_text: "DECLARE $objects AS List>;\nINSERT INTO `//Root/.metadata/secrets/access`\nSELECT ownerUserId,secretId,accessSID FROM AS_TABLE($objects)\n" } parameters { key: "$objects" value { type { list_type { item { struct_type { members { name: "ownerUserId" type { type_id: UTF8 } } members { name: "secretId" type { type_id: UTF8 } } members { name: "accessSID" type { type_id: UTF8 } } } } } } value { items { items { text_value: "root@builtin" } items { text_value: "secret1" } items { text_value: "test@test1" } } } } } ; REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=0;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);RESULT=
: Error: Execution, code: 1060
:1:29: Error: Executing DROP OBJECT SECRET
: Error: preparation problem: secret secret1 using in access for test@test1 ;EXPECTATION=0 FINISHED_REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=0;WAITING=1 2025-06-24T21:04:04.140585Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:4046:5030], for# root@builtin, access# DescribeSchema 2025-06-24T21:04:04.140701Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:4046:5030], for# root@builtin, access# DescribeSchema 2025-06-24T21:04:04.143439Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:4038:5022], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/secrets/values]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:04:04.146221Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NWY4N2EzOTYtYjI5MzQ5YTUtMmU4NmU3NDEtMjQ2MTNiNzI=, ActorId: [1:4026:5011], ActorState: ExecuteState, TraceId: 01jyhw2kfa7d3kp5wss87816ke, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: REQUEST=SELECT * FROM `/Root/.metadata/secrets/values`;RESULT=
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/secrets/values]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 ;EXPECTATION=0 REQUEST=SELECT * FROM `/Root/.metadata/secrets/values`;EXPECTATION=0 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-24T21:04:15.904849Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (51449FAE): Could not find correct token validator REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: Execution, code: 1060
:1:50: Error: Executing CREATE OBJECT SECRET
: Error: cannot CREATE objects: Secret already exists: secret1 ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: Execution, code: 1060
:1:50: Error: Executing UPSERT OBJECT SECRET
: Error: cannot UPSERT objects: Secret already exists: secret1 ;EXPECTATION=0 FINISHED_REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 2025-06-24T21:04:58.390825Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715839. Ctx: { TraceId: 01jyhw483gczpevvm5drp65zfy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmQzN2NjY2QtNDk1ZDlmNDItYWE5ZjIxOGItOGU0ODdiNTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::AnsiIn [GOOD] Test command err: Trying to start YDB, gRPC: 6790, MsgBus: 4722 2025-06-24T21:04:50.033147Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624834647437837:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:50.037093Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ef6/r3tmp/tmpJwXNwh/pdisk_1.dat 2025-06-24T21:04:50.494851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:50.494937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:50.518664Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:50.534249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6790, node 1 2025-06-24T21:04:50.615435Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:50.615464Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:50.615470Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:50.615605Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4722 2025-06-24T21:04:51.032608Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4722 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:51.247978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:51.281458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:51.438079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:51.617687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:51.703748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:53.379062Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624847532341277:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:53.379223Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:53.765673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:53.797284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:53.835842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:53.883400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:53.921888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:53.957700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:54.007401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:54.074735Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624851827309228:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:54.074816Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:54.075223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624851827309233:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:54.080162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:54.097139Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624851827309235:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:04:54.179921Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624851827309286:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:55.025733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624834647437837:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:55.025811Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:55.393709Z node 1 :KQP_SESSION ERROR: kqp_session_actor.cpp:2865: SessionId: ydb://session/3?node_id=1&id=YjMxYTE0ODMtZTQzYjg2YzQtM2NjYWE2NGMtNzhlMGVhOWM=, ActorId: [1:7519624856122276852:2473], ActorState: ExecuteState, TraceId: 01jyhw45f32p6ajhfsrqy7ch0n, Internal error, message: yql/essentials/types/binary_json/read.cpp:161: StringOffset must be inside buffer 2025-06-24T21:04:55.393789Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YjMxYTE0ODMtZTQzYjg2YzQtM2NjYWE2NGMtNzhlMGVhOWM=, ActorId: [1:7519624856122276852:2473], ActorState: ExecuteState, TraceId: 01jyhw45f32p6ajhfsrqy7ch0n, Create QueryResponse for error on request, msg: yql/essentials/types/binary_json/read.cpp:161: StringOffset must be inside buffer Trying to start YDB, gRPC: 1238, MsgBus: 21287 2025-06-24T21:04:56.317573Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624860059226899:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:56.320819Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ef6/r3tmp/tmpnduDyj/pdisk_1.dat 2025-06-24T21:04:56.472256Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:56.472521Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:56.472587Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:56.473116Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624860059226876:2079] 1750799096316874 != 1750799096316877 2025-06-24T21:04:56.486032Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1238, node 2 2025-06-24T21:04:56.617309Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:56.617337Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:56.617345Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:56.617465Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21287 TClient is connected to server localhost:21287 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:04:57.182627Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:57.190772Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:57.207188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:57.285854Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:57.391867Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:04:57.456680Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:57.539649Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:59.797540Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624872944130389:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:59.797655Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:59.843561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:59.884626Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:59.918547Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:59.976787Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:00.013497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:00.051722Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:00.099839Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:00.186983Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624877239098341:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:00.187051Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:00.187656Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624877239098346:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:00.191503Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:05:00.201765Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624877239098348:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:05:00.257072Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624877239098399:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:05:01.317736Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624860059226899:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:01.317799Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |93.3%| [TA] {RESULT} $(B)/ydb/tests/olap/scenario/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::JoinIndexLookup [GOOD] Test command err: Trying to start YDB, gRPC: 29131, MsgBus: 3913 2025-06-24T21:04:49.065382Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624831572980559:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:49.067059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002efd/r3tmp/tmprsoViT/pdisk_1.dat 2025-06-24T21:04:49.390665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:49.390799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:49.431288Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624831572980466:2079] 1750799089050000 != 1750799089050003 2025-06-24T21:04:49.444007Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:49.445709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29131, node 1 2025-06-24T21:04:49.551686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:49.551704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:49.551708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:49.551829Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3913 TClient is connected to server localhost:3913 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:04:50.068894Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:50.110978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:50.137728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:50.297759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:50.474210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:50.558798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:52.375080Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624844457884010:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:52.375254Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:52.738647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.808543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.840755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.867571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.901426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.941106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.979742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:53.075936Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624848752851967:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:53.076031Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:53.076403Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624848752851972:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:53.080813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:53.095408Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624848752851974:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:53.171874Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624848752852027:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:54.057744Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624831572980559:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:54.057819Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:55.293512Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799095304, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 11524, MsgBus: 15683 2025-06-24T21:04:56.112055Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624863076940909:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:56.112132Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002efd/r3tmp/tmppDIx2k/pdisk_1.dat 2025-06-24T21:04:56.259578Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:56.259655Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:56.263691Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:56.263998Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:56.264748Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624863076940889:2079] 1750799096111631 != 1750799096111634 TServer::EnableGrpc on GrpcPort 11524, node 2 2025-06-24T21:04:56.372423Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:56.372445Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:56.372451Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:56.372572Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15683 TClient is connected to server localhost:15683 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:04:56.864246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:56.872074Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:04:56.881609Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:04:56.971744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:57.140043Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:04:57.194786Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:57.278145Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:59.664523Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624875961844388:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:59.664631Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:59.730441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:59.772024Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:59.828105Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:59.866473Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:59.898624Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:59.944059Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:00.007972Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:00.085503Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624880256812344:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:00.085615Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:00.086160Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624880256812349:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:00.090509Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:05:00.103919Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624880256812351:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:05:00.195197Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624880256812404:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:05:01.119287Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624863076940909:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:01.119391Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeSameOperationId >> AnalyzeColumnshard::AnalyzeRebootSaBeforeSave |93.3%| [TA] $(B)/ydb/services/metadata/secret/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.3%| [TA] {RESULT} $(B)/ydb/services/metadata/secret/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TraverseColumnShard::TraverseColumnTableRebootColumnshard |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeDatashard::AnalyzeTwoTables >> AnalyzeColumnshard::AnalyzeTable >> AnalyzeColumnshard::AnalyzeDeadline >> AnalyzeColumnshard::AnalyzeServerless >> KqpErrors::ResolveTableError [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] [GOOD] >> AnalyzeColumnshard::AnalyzeMultiOperationId >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ResolveTableError [GOOD] Test command err: 2025-06-24T21:05:02.017989Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:02.019802Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00262b/r3tmp/tmpuJItkV/pdisk_1.dat 2025-06-24T21:05:02.555172Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:02.837142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:03.000598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:03.000737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:03.004138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:03.004241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:03.024321Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:03.024923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:03.025344Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:03.349775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:05.235198Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:198: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Bootstrap done, become ReadyState 2025-06-24T21:05:05.236928Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Executing physical tx, type: 2, stages: 1 2025-06-24T21:05:05.237992Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-24T21:05:05.239289Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:623: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Got request, become WaitResolveState 2025-06-24T21:05:05.240706Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715658. Resolved key sets: 1 2025-06-24T21:05:05.240919Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715658. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-24T21:05:05.241818Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (Iterator (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3))))) )))) ) 2025-06-24T21:05:05.244394Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:1512: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] create compute task: 1 2025-06-24T21:05:05.247995Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-06-24T21:05:05.248192Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-24T21:05:05.252901Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Collect channels updates for task: 1 at actor [1:1471:2903] 2025-06-24T21:05:05.253015Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Sending channels info to compute actor: [1:1471:2903], channels: 0 2025-06-24T21:05:05.253137Z node 1 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:05:05.253187Z node 1 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2809: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Updating channels after the creation of compute actors 2025-06-24T21:05:05.253257Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Collect channels updates for task: 1 at actor [1:1471:2903] 2025-06-24T21:05:05.253311Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Sending channels info to compute actor: [1:1471:2903], channels: 0 2025-06-24T21:05:05.253388Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Waiting for: CA [1:1471:2903], 2025-06-24T21:05:05.253458Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [1:1471:2903], 2025-06-24T21:05:05.253518Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-24T21:05:05.288296Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, got execution state from compute actor: [1:1471:2903], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-24T21:05:05.288446Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Waiting for: CA [1:1471:2903], 2025-06-24T21:05:05.288522Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [1:1471:2903], 2025-06-24T21:05:05.293461Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, got execution state from compute actor: [1:1471:2903], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 21048 Tasks { TaskId: 1 CpuTimeUs: 11290 FinishTimeMs: 1750799105292 EgressBytes: 30 EgressRows: 3 ComputeCpuTimeUs: 1739 BuildCpuTimeUs: 9551 HostName: "ghrun-4pjfmvffsq" NodeId: 1 CreateTimeMs: 1750799105260 UpdateTimeMs: 1750799105292 } MaxMemoryUsage: 1048576 } 2025-06-24T21:05:05.293650Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Compute actor has finished execution: [1:1471:2903] 2025-06-24T21:05:05.293760Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:276: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Send Commit to BufferActor=[1:1467:2903] 2025-06-24T21:05:05.293845Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Resource usage for last stat interval: ComputeTime: 0.021048s ReadRows: 0 ReadBytes: 0 ru: 14 rate limiter was not found force flag: 1 2025-06-24T21:05:05.353544Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. terminate execution. 2025-06-24T21:05:05.353650Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [1:1468:2903] TxId: 281474976715658. Ctx: { TraceId: 01jyhw4e9mfg4cxea1fy1cr3n1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhjMDA3YS1iMDk2NTNjYi1hZjdjZjM3MC01NGE3NzFkOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Terminate, become ZombieState 2025-06-24T21:05:05.484524Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:1487:2921], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[/Root/table-1]
: Error: LookupError, code: 2005 2025-06-24T21:05:05.486958Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTQ1ZWMxNzEtMTg2ZmM2YjgtZjRiZDBiMmYtOGViMzE1ZmQ=, ActorId: [1:1485:2919], ActorState: ExecuteState, TraceId: 01jyhw4fajddf33pq35dt2c2k1, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: >> KqpErrors::ProposeError [GOOD] >> KqpErrors::ProposeErrorEvWrite >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> BackupPathTest::ParallelBackupWholeDatabase [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_19_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 19] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 [GOOD] Test command err: 2025-06-24T21:02:03.495665Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624120039150968:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:03.495719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045de/r3tmp/tmpQof9oq/pdisk_1.dat 2025-06-24T21:02:04.277164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:04.277254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:04.307665Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:04.315622Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8666, node 1 2025-06-24T21:02:04.563578Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:02:04.682389Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:04.682413Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:04.682421Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:04.682557Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14828 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:02:05.190836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:02:07.556912Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624137219021182:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.557036Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.948377Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624120039151177:2134] Handle TEvProposeTransaction 2025-06-24T21:02:07.948413Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519624120039151177:2134] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T21:02:07.948467Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519624120039151177:2134] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519624137219021209:2624] 2025-06-24T21:02:08.025929Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519624137219021209:2624] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 10 SplitByLoadSettings { Enabled: true } } } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-24T21:02:08.025990Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519624137219021209:2624] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:02:08.026371Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519624137219021209:2624] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:02:08.026440Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519624137219021209:2624] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:02:08.026580Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519624137219021209:2624] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:08.026706Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519624137219021209:2624] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:02:08.026750Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519624137219021209:2624] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:02:08.026912Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519624137219021209:2624] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T21:02:08.028931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:08.033238Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519624137219021209:2624] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-06-24T21:02:08.033295Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519624137219021209:2624] txid# 281474976710658 SEND to# [1:7519624137219021208:2304] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-06-24T21:02:08.233840Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519624120039151177:2134] Handle TEvNavigate describe path /Root/table 2025-06-24T21:02:08.278691Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519624141513988648:2739] HANDLE EvNavigateScheme /Root/table 2025-06-24T21:02:08.278966Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519624141513988648:2739] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:08.279069Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519624141513988648:2739] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/table" Options { ShowPrivateTable: false } 2025-06-24T21:02:08.281402Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519624141513988648:2739] Handle TEvDescribeSchemeResult Forward to# [1:7519624141513988646:2310] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/table" PathDescription { Self { Name: "table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750798928165 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 10 SplitByLoadSettings { Enabled: true } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 ... .570204Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:989: TImport::TTxProgress: OnSchemeResult: id# 281474976710665, itemIdx# 0, success# 1 2025-06-24T21:05:05.570823Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:629: TImport::TTxProgress: Allocate txId: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-24T21:05:05.588241Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:05:05.588448Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:05:05.588472Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1218: TImport::TTxProgress: OnAllocateResult: txId# 281474976715760, id# 281474976710665 2025-06-24T21:05:05.588565Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:419: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976715760 2025-06-24T21:05:05.588800Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:05:05.590950Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:05.594895Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:05:05.594932Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1314: TImport::TTxProgress: OnModifyResult: txId# 281474976715760, status# StatusAccepted 2025-06-24T21:05:05.595189Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:643: TImport::TTxProgress: Wait for completion: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976715760 Issue: '' } 2025-06-24T21:05:05.600365Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:05:05.667117Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:05:05.667165Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976715760 2025-06-24T21:05:05.667292Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:629: TImport::TTxProgress: Allocate txId: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-24T21:05:05.668924Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:05:05.669053Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:05:05.669083Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1218: TImport::TTxProgress: OnAllocateResult: txId# 281474976715761, id# 281474976710665 2025-06-24T21:05:05.669171Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:520: TImport::TTxProgress: Restore propose: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976715761 2025-06-24T21:05:05.670456Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:05:05.671167Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976715761:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-24T21:05:05.673775Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:05:05.673806Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1314: TImport::TTxProgress: OnModifyResult: txId# 281474976715761, status# StatusAccepted 2025-06-24T21:05:05.673937Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:643: TImport::TTxProgress: Wait for completion: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976715761 Issue: '' } 2025-06-24T21:05:05.675778Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:05:05.687069Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [52:7519624898550486800:2366] [0] Resolve database: name# /Root 2025-06-24T21:05:05.687567Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [52:7519624898550486800:2366] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:05:05.687611Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [52:7519624898550486800:2366] [0] Send request: schemeShardId# 72057594046644480 2025-06-24T21:05:05.689379Z node 52 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [52:7519624898550486800:2366] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710665 Status: SUCCESS Progress: PROGRESS_TRANSFER_DATA ImportFromS3Settings { endpoint: "localhost:25677" scheme: HTTP bucket: "test_bucket" items { source_prefix: "Datetime64Table" destination_path: "/Root/Datetime64Table" } } StartTime { seconds: 1750799105 } } REQUEST: HEAD /test_bucket/Datetime64Table/data_00.csv HTTP/1.1 HEADERS: Host: localhost:25677 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: CE06834C-8494-4488-9A25-A70A3AB8748F amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250624/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=d9b0ceb71a346e0a6a90ff7417ecdce4e6bab75c861ebca86af3aa88a6f0ef87 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250624T210505Z S3_MOCK::HttpServeRead: /test_bucket/Datetime64Table/data_00.csv / 7 REQUEST: GET /test_bucket/Datetime64Table/data_00.csv HTTP/1.1 HEADERS: Host: localhost:25677 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A41BA383-A091-437C-B9E5-77FC6DFC4ABB amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250624/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=faddbb02f5faa26386d4d3aecdb01c06bba50d956e76ae8ad43c8d0cdf89d813 content-type: application/xml range: bytes=0-6 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250624T210505Z S3_MOCK::HttpServeRead: /test_bucket/Datetime64Table/data_00.csv / 7 2025-06-24T21:05:05.776175Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:05:05.776224Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976715761 2025-06-24T21:05:05.779079Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:05:06.098059Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [52:7519624902845454159:2369] [0] Resolve database: name# /Root 2025-06-24T21:05:06.098657Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [52:7519624902845454159:2369] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:05:06.098723Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [52:7519624902845454159:2369] [0] Send request: schemeShardId# 72057594046644480 2025-06-24T21:05:06.099532Z node 52 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [52:7519624902845454159:2369] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710665 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:25677" scheme: HTTP bucket: "test_bucket" items { source_prefix: "Datetime64Table" destination_path: "/Root/Datetime64Table" } } StartTime { seconds: 1750799105 } EndTime { seconds: 1750799105 } } 2025-06-24T21:05:06.254952Z node 52 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [52:7519624864190746602:2133] Handle TEvExecuteKqpTransaction 2025-06-24T21:05:06.254984Z node 52 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [52:7519624864190746602:2133] TxId# 281474976710666 ProcessProposeKqpTransaction 2025-06-24T21:05:06.256216Z node 52 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhw4g1w0hpr2zz3t9rsb9yh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=52&id=NzVjZjJkNTktMmFmYjRjYWItMTlmMTIxYmQtNjIzMzM3Nw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeReqDistribution >> KqpScripting::StreamExecuteYqlScriptScanOperationTmeoutBruteForce [GOOD] >> BackupPathTest::ChecksumsForSchemaMappingFiles |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptScanOperationTmeoutBruteForce [GOOD] Test command err: Trying to start YDB, gRPC: 17332, MsgBus: 8566 2025-06-24T21:04:48.930457Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624826472326902:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:48.930557Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f04/r3tmp/tmpn61PIH/pdisk_1.dat 2025-06-24T21:04:49.281618Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624826472326886:2079] 1750799088929160 != 1750799088929163 2025-06-24T21:04:49.291493Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17332, node 1 2025-06-24T21:04:49.365255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:49.365355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:49.366397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:04:49.427831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:49.427859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:49.427866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:49.428027Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8566 TClient is connected to server localhost:8566 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:04:49.959504Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:49.973270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:49.986211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:49.996653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:50.138129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:50.317774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:50.392206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:52.238161Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624843652197696:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:52.238284Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:52.555568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.595397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.668440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.716962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.748235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.779874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.860795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:52.934444Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624843652198355:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:52.934553Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:52.934959Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624843652198360:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:52.939667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:52.996819Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624843652198362:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:53.026438Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624847947165711:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:53.930094Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624826472326902:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:53.930180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:54.227786Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [1:7519624852242133278:2473] 2025-06-24T21:04:54.299714Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [1:7519624852242133285:2476] 2025-06-24T21:04:54.313870Z node 1 :RPC_REQUEST W ... 6100:2685]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037892, actor_id: [2:7519624888137549906:2288] 2025-06-24T21:05:08.732846Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037891, step: 1750799108723 2025-06-24T21:05:08.732904Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037893, step: 1750799108723 2025-06-24T21:05:08.732954Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037892, step: 1750799108723 2025-06-24T21:05:08.733004Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356099:2684]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037891, actor_id: [2:7519624888137549878:2282] 2025-06-24T21:05:08.733065Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356101:2686]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037893, actor_id: [2:7519624888137549879:2283] 2025-06-24T21:05:08.733117Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356100:2685]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037892, actor_id: [2:7519624888137549906:2288] 2025-06-24T21:05:08.733369Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037891, step: 1750799108723 2025-06-24T21:05:08.733431Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037893, step: 1750799108723 2025-06-24T21:05:08.733476Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037892, step: 1750799108723 2025-06-24T21:05:08.733527Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356099:2684]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037891, actor_id: [2:7519624888137549878:2282] 2025-06-24T21:05:08.733674Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356100:2685]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037892, actor_id: [2:7519624888137549906:2288] 2025-06-24T21:05:08.733798Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037890, step: 1750799108723 2025-06-24T21:05:08.733914Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037891, step: 1750799108723 2025-06-24T21:05:08.733964Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356098:2683]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037890, actor_id: [2:7519624888137549877:2281] 2025-06-24T21:05:08.734019Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356099:2684]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037891, actor_id: [2:7519624888137549878:2282] 2025-06-24T21:05:08.734201Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037892, step: 1750799108723 2025-06-24T21:05:08.734257Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037891, step: 1750799108723 2025-06-24T21:05:08.734308Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037890, step: 1750799108723 2025-06-24T21:05:08.734358Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356100:2685]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037892, actor_id: [2:7519624888137549906:2288] 2025-06-24T21:05:08.734380Z node 2 :KQP_COMPUTE ERROR: kqp_scan_fetcher_actor.cpp:188: SelfId: [2:7519624913907356100:2685]. TKqpScanFetcherActor: broken tablet for this request 72075186224037892, retries limit exceeded (0/20) 2025-06-24T21:05:08.734518Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356099:2684]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037891, actor_id: [2:7519624888137549878:2282] 2025-06-24T21:05:08.734587Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356098:2683]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037890, actor_id: [2:7519624888137549877:2281] 2025-06-24T21:05:08.734639Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356101:2686]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037893, actor_id: [2:7519624888137549879:2283] 2025-06-24T21:05:08.734826Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037890, step: 1750799108723 2025-06-24T21:05:08.734888Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037893, step: 1750799108723 2025-06-24T21:05:08.734936Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356098:2683]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037890, actor_id: [2:7519624888137549877:2281] 2025-06-24T21:05:08.734950Z node 2 :KQP_COMPUTE ERROR: kqp_scan_fetcher_actor.cpp:188: SelfId: [2:7519624913907356098:2683]. TKqpScanFetcherActor: broken tablet for this request 72075186224037890, retries limit exceeded (0/20) 2025-06-24T21:05:08.735039Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356101:2686]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037893, actor_id: [2:7519624888137549879:2283] 2025-06-24T21:05:08.735283Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037891, step: 1750799108723 2025-06-24T21:05:08.735361Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356099:2684]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037891, actor_id: [2:7519624888137549878:2282] 2025-06-24T21:05:08.735379Z node 2 :KQP_COMPUTE ERROR: kqp_scan_fetcher_actor.cpp:188: SelfId: [2:7519624913907356099:2684]. TKqpScanFetcherActor: broken tablet for this request 72075186224037891, retries limit exceeded (0/20) 2025-06-24T21:05:08.735751Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037893, step: 1750799108723 2025-06-24T21:05:08.735813Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356101:2686]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037893, actor_id: [2:7519624888137549879:2283] 2025-06-24T21:05:08.736103Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037893, step: 1750799108723 2025-06-24T21:05:08.736154Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356101:2686]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037893, actor_id: [2:7519624888137549879:2283] 2025-06-24T21:05:08.736423Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976710674. Snapshot is not valid, tabletId: 72075186224037893, step: 1750799108723 2025-06-24T21:05:08.736483Z node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:7519624913907356101:2686]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/EightShard' scheme changed., code: 2028 , tablet id: 72075186224037893, actor_id: [2:7519624888137549879:2283] 2025-06-24T21:05:08.736497Z node 2 :KQP_COMPUTE ERROR: kqp_scan_fetcher_actor.cpp:188: SelfId: [2:7519624913907356101:2686]. TKqpScanFetcherActor: broken tablet for this request 72075186224037893, retries limit exceeded (0/20) 2025-06-24T21:05:08.890318Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519624913907356254:2703] TxId: 281474976710677. Ctx: { TraceId: 01jyhw4jnpbmbpt5h6fv811bbw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWQ1ODc5NTUtZjQ2Njk3ZmQtNjk1MmFhNzItYTFjNWMyNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-24T21:05:08.890503Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YWQ1ODc5NTUtZjQ2Njk3ZmQtNjk1MmFhNzItYTFjNWMyNTk=, ActorId: [2:7519624913907356228:2703], ActorState: ExecuteState, TraceId: 01jyhw4jnpbmbpt5h6fv811bbw, Create QueryResponse for error on request, msg: 2025-06-24T21:05:08.890987Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799108926, txId: 281474976710676] shutting down 2025-06-24T21:05:08.891306Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519624913907356264:2708], TxId: 281474976710677, task: 5. Ctx: { TraceId : 01jyhw4jnpbmbpt5h6fv811bbw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=YWQ1ODc5NTUtZjQ2Njk3ZmQtNjk1MmFhNzItYTFjNWMyNTk=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519624913907356254:2703], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:05:09.224282Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799109255, txId: 281474976710679] shutting down 2025-06-24T21:05:09.478620Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YWIwZmNhNi1mMDU1YWJjMC1kNTczODk0Yy1lNmU2NmE2YQ==, ActorId: [2:7519624918202323788:2757], ActorState: ExecuteState, TraceId: 01jyhw4k7hb8tneyf3bgbvan71, Create QueryResponse for error on request, msg: 2025-06-24T21:05:09.587766Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799109612, txId: 281474976710681] shutting down 2025-06-24T21:05:09.930040Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799109962, txId: 281474976710683] shutting down >> TraverseDatashard::TraverseTwoTablesServerless [GOOD] >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseTwoTablesServerless [GOOD] Test command err: 2025-06-24T21:05:00.288383Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:00.288778Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:00.288999Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043c6/r3tmp/tmpLYX2Ls/pdisk_1.dat 2025-06-24T21:05:00.689654Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26855, node 1 2025-06-24T21:05:00.960087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:00.960164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:00.960240Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:00.960486Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:00.963307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:01.083283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:01.083453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:01.100680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22710 2025-06-24T21:05:01.691128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:05.231848Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:05.270921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:05.271045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:05.331925Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:05.334476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:05.557409Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:05.595141Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.595830Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.596419Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.596588Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.596685Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.596930Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.597027Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.597108Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.597189Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.788640Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:05.788777Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:05.815780Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:06.016525Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:06.070541Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:06.070626Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:06.098552Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:06.099913Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:06.100170Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:06.100236Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:06.100309Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:06.100386Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:06.100425Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:06.100483Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:06.101574Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:06.146259Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:06.146387Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:06.154079Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:06.158929Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:06.159257Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:06.169286Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:05:06.203073Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:06.203170Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:06.203250Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:05:06.226237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:06.241354Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:06.241521Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:06.429268Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:06.634298Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:06.690734Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:07.229165Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:07.257787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:07.840882Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:07.992558Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:05:07.992628Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:05:07.992738Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2496:2900], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:05:07.997773Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2504:2904] 2025-06-24T21:05:07.998101Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2504:2904], schemeshard id = 72075186224037899 2025-06-24T21:05:09.254922Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2623:3195], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:09.255094Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:09.272112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:09.586114Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2921:3241], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:09.586410Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:09.587707Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2926:3245]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:09.587929Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:09.588092Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 18446744073709551615 ] 2025-06-24T21:05:09.588160Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2929:3248] 2025-06-24T21:05:09.588217Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2929:3248] 2025-06-24T21:05:09.588796Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2930:3128] 2025-06-24T21:05:09.589157Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2929:3248], server id = [2:2930:3128], tablet id = 72075186224037894, status = OK 2025-06-24T21:05:09.589308Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2930:3128], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:05:09.589388Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-24T21:05:09.589617Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:05:09.589694Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2926:3245], StatRequests.size() = 1 2025-06-24T21:05:09.606674Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2934:3252], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:09.606876Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:09.607447Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2939:3257], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:09.613408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:05:09.781552Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:05:09.781646Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:05:09.802938Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2929:3248], schemeshard count = 1 2025-06-24T21:05:10.054217Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2941:3259], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T21:05:10.306270Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:3064:3327] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:05:10.320989Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:3087:3343]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:10.321182Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:10.321249Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:3087:3343], StatRequests.size() = 1 2025-06-24T21:05:10.380522Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhw4kd5agpmbwy0e8q8zkb0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njc5MGMxNmQtYjhkNWExMzEtM2JjY2NhZDQtYTZkYzgyMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:05:10.481761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:10.911310Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:3420:3409]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:10.911546Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:05:10.911602Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:3420:3409], StatRequests.size() = 1 2025-06-24T21:05:10.941485Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [1:3429:3418]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:10.941692Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-24T21:05:10.941732Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [1:3429:3418], StatRequests.size() = 1 2025-06-24T21:05:10.992727Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhw4mqd2kqpfw09fzqe5d0b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODM5MjlkNzktYTFlNzUwNDYtNGE3YzY3YTktZjYyNmNjZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:05:11.034927Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:3471:3383]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:11.037969Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:11.038037Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:05:11.038404Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:11.038465Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-24T21:05:11.038523Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:05:11.080465Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-24T21:05:11.081640Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-24T21:05:11.082044Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:3495:3395]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:11.085045Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:11.085111Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:05:11.085530Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:11.085584Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-24T21:05:11.085639Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 3] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:05:11.088278Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 2 ], RowsCount[ 0 ] 2025-06-24T21:05:11.088568Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 >> TraverseDatashard::TraverseTwoTables [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> BackupRestore::TestReplaceRestoreOptionOnNonExistingSchemeObjects [GOOD] >> BackupRestoreS3::PrefixedVectorIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseTwoTables [GOOD] Test command err: 2025-06-24T21:05:03.791059Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:03.791597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:03.791714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043b0/r3tmp/tmp861YKo/pdisk_1.dat 2025-06-24T21:05:04.182707Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26028, node 1 2025-06-24T21:05:04.434658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:04.434719Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:04.434751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:04.435301Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:04.438172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:04.544005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:04.544163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:04.561555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17454 2025-06-24T21:05:05.136278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:08.437466Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:08.479761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:08.479899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:08.512686Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:08.515078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:08.717554Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:08.741974Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.742601Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.743127Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.743302Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.743570Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.743659Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.743807Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.743901Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.744000Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.933023Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:08.933142Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:08.946382Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:09.114801Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:09.180441Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:09.180622Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:09.220226Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:09.220552Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:09.221023Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:09.221085Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:09.221145Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:09.221240Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:09.221301Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:09.221363Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:09.221934Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:09.246549Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:09.246677Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:09.255279Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2570] 2025-06-24T21:05:09.265357Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1847:2588] 2025-06-24T21:05:09.265671Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1847:2588], schemeshard id = 72075186224037897 2025-06-24T21:05:09.272849Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:09.291802Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:09.291869Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:09.291939Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:09.304720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:09.318407Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:09.318583Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:09.513452Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:09.689526Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:09.768705Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:10.428352Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:10.701448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2142:3021], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:10.701651Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:10.720381Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:11.230257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2447:3070], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:11.230575Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:11.231584Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2452:3074]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:11.231726Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:11.231823Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2454:3076] 2025-06-24T21:05:11.231876Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2454:3076] 2025-06-24T21:05:11.232355Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2455:2949] 2025-06-24T21:05:11.232582Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2454:3076], server id = [2:2455:2949], tablet id = 72075186224037894, status = OK 2025-06-24T21:05:11.232793Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2455:2949], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:05:11.232864Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-24T21:05:11.233057Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:05:11.233116Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2452:3074], StatRequests.size() = 1 2025-06-24T21:05:11.248704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2459:3080], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:11.248812Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:11.249109Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2464:3085], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:11.286967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:05:11.432735Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:05:11.432830Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:05:11.520109Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2454:3076], schemeshard count = 1 2025-06-24T21:05:11.889522Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2466:3087], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:05:12.038791Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2577:3157] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:05:12.052599Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2600:3173]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:12.052775Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:12.052812Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2600:3173], StatRequests.size() = 1 2025-06-24T21:05:12.114139Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhw4n0hevk0pa67ka9s8y19, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWUzYjgzODgtOWJiNWFiZmYtMTQwMDZiMDgtYzBmY2I4NTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:05:12.208162Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:12.602665Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:2948:3238]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:12.602951Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:05:12.603012Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:2948:3238], StatRequests.size() = 1 2025-06-24T21:05:12.630452Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [1:2957:3247]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:12.630637Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-24T21:05:12.630674Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [1:2957:3247], StatRequests.size() = 1 2025-06-24T21:05:12.676381Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhw4pc8b34ymg0ssaj0vgqp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTAyZDAwN2UtMTk5MWFiZTEtMzMyMmNmMDgtMmI2YzJiY2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:05:12.795833Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:3007:3212]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:12.798872Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:12.798948Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:05:12.799347Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:12.799405Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:05:12.799468Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:05:12.830620Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-24T21:05:12.831072Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-24T21:05:12.831439Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:3031:3224]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:12.834300Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:12.834361Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:05:12.834720Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:12.834772Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:05:12.834824Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 5] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:05:12.837262Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 2 ], RowsCount[ 0 ] 2025-06-24T21:05:12.837576Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 >> TraverseColumnShard::TraverseColumnTableHiveDistributionZeroNodes >> TraverseDatashard::TraverseTwoTablesTwoServerlessDbs >> TraverseDatashard::TraverseOneTableServerless [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseOneTableServerless [GOOD] Test command err: 2025-06-24T21:05:05.729764Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:05.730107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:05.730267Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00439a/r3tmp/tmp1Vybhq/pdisk_1.dat 2025-06-24T21:05:06.146228Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13195, node 1 2025-06-24T21:05:06.378309Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:06.378393Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:06.378457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:06.378717Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:06.384634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:06.499454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:06.499639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:06.514640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2977 2025-06-24T21:05:07.101337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:10.518248Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:10.556632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:10.556755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:10.617069Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:10.619380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:10.840587Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:10.875973Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.876636Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.877264Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.877428Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.877532Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.877771Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.877863Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.877945Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.878033Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.065885Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:11.066008Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:11.079112Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:11.212004Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:11.254119Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:11.254195Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:11.282295Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:11.283516Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:11.283683Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:11.283742Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:11.283786Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:11.283824Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:11.283863Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:11.283899Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:11.284623Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:11.321066Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:11.321166Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:11.327517Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:11.330482Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:11.330763Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:11.338437Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:05:11.364317Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:11.364398Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:11.364472Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:05:11.379137Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:11.392206Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:11.392357Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:11.561665Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:11.730854Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:11.788045Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:12.332283Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:12.362040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:12.945357Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:13.088708Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:05:13.088775Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:05:13.088902Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2496:2900], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:05:13.091557Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2504:2904] 2025-06-24T21:05:13.091865Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2504:2904], schemeshard id = 72075186224037899 2025-06-24T21:05:14.268137Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2621:3193], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.268295Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.288791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:14.626265Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2925:3241], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.626415Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.675281Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2930:3245]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:14.675533Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:14.675744Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 18446744073709551615 ] 2025-06-24T21:05:14.675813Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2933:3248] 2025-06-24T21:05:14.675879Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2933:3248] 2025-06-24T21:05:14.676539Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2934:3134] 2025-06-24T21:05:14.676936Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2933:3248], server id = [2:2934:3134], tablet id = 72075186224037894, status = OK 2025-06-24T21:05:14.677148Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2934:3134], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:05:14.677261Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-24T21:05:14.677545Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:05:14.677643Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2930:3245], StatRequests.size() = 1 2025-06-24T21:05:14.692169Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2938:3252], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.692322Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.692612Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2943:3257], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.698580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:05:14.808977Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:05:14.809066Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:05:14.877683Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2933:3248], schemeshard count = 1 2025-06-24T21:05:15.148871Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2945:3259], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T21:05:15.350081Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:3062:3327] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:05:15.363663Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:3085:3343]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:15.363781Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:15.363812Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:3085:3343], StatRequests.size() = 1 2025-06-24T21:05:15.465903Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhw4rc076me03ce9p78rx6t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjRiYmIxMmYtOGM1NWJjNGYtODg5ZmEzNWQtZDUyMjhhZWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:05:15.528618Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:3127:3183]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:15.531805Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:15.531877Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:05:15.532323Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:15.532380Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-24T21:05:15.532441Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:05:15.545269Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-24T21:05:15.545540Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 >> TraverseColumnShard::TraverseColumnTableHiveDistributionAbsentNodes >> AnalyzeColumnshard::Analyze >> AnalyzeDatashard::DropTableNavigateError >> KqpErrors::ProposeErrorEvWrite [GOOD] >> BackupPathTest::ChecksumsForSchemaMappingFiles [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeErrorEvWrite [GOOD] Test command err: 2025-06-24T21:05:01.999575Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:02.000527Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:02.000666Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:02.000725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:05:02.001238Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:02.001302Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00264c/r3tmp/tmpwTdrKJ/pdisk_1.dat 2025-06-24T21:05:02.563131Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:02.834877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:02.998856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:02.999017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:03.005006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:03.005174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:03.021651Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:03.022189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:03.022734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:03.349787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:04.344838Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1503:2909], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:04.344987Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1513:2914], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:04.345103Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:04.355383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:05:04.552728Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:04.554241Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:04.977346Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1517:2917], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:05:05.201144Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1657:2997] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:05:05.544781Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:96: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Begin literal execution. Operation timeout: 0.000000s, cancelAfter: (empty maybe) 2025-06-24T21:05:05.544880Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:125: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Begin literal execution, txs: 1 2025-06-24T21:05:05.544958Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-24T21:05:05.545017Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:135: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (ToStream (Just (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3)))))) )))) ) 2025-06-24T21:05:05.545082Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:241: Create result channelId: 1 from task: 1 with index: 0 2025-06-24T21:05:05.554463Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:275: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Execution is complete, results: 1 2025-06-24T21:05:05.567172Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:96: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw4eancc243z616gbgkp2d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThlYjllOGUtZTc3ZjdjZmQtMzBkNmU1N2ItMzc1MDA0YjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Begin literal execution. Operation timeout: 299.420729s, cancelAfter: (empty maybe) 2025-06-24T21:05:05.567275Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:125: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw4eancc243z616gbgkp2d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThlYjllOGUtZTc3ZjdjZmQtMzBkNmU1N2ItMzc1MDA0YjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Begin literal execution, txs: 1 2025-06-24T21:05:05.567356Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-24T21:05:05.567417Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:135: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw4eancc243z616gbgkp2d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThlYjllOGUtZTc3ZjdjZmQtMzBkNmU1N2ItMzc1MDA0YjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (ToStream (Just (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3)))))) )))) ) 2025-06-24T21:05:05.567491Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:241: Create result channelId: 1 from task: 1 with index: 0 2025-06-24T21:05:05.568159Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:275: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw4eancc243z616gbgkp2d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThlYjllOGUtZTc3ZjdjZmQtMzBkNmU1N2ItMzc1MDA0YjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Execution is complete, results: 1 2025-06-24T21:05:05.568426Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:198: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw4eancc243z616gbgkp2d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThlYjllOGUtZTc3ZjdjZmQtMzBkNmU1N2ItMzc1MDA0YjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Bootstrap done, become ReadyState 2025-06-24T21:05:05.568780Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:1683:2907] TxId: 281474976715660. Ctx: { TraceId: 01jyhw4eancc243z616gbgkp2d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThlYjllOGUtZTc3ZjdjZmQtMzBkNmU1N2ItMzc1MDA0YjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-06-24T21:05:05.568859Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-24T21:05:05.569042Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:623: ActorId: [1:1683:2907] TxId: 281474976715660. Ctx: { TraceId: 01jyhw4eancc243z616gbgkp2d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThlYjllOGUtZTc3ZjdjZmQtMzBkNmU1N2ItMzc1MDA0YjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got request, become WaitResolveState 2025-06-24T21:05:05.569403Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715660. Resolved key sets: 1 2025-06-24T21:05:05.569609Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715660. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-24T21:05:05.569808Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [1:1683:2907] TxId: 281474976715660. Ctx: { TraceId: 01jyhw4eancc243z616gbgkp2d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThlYjllOGUtZTc3ZjdjZmQtMzBkNmU1N2ItMzc1MDA0YjI=, CurrentExecutionId: , ... zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:05:18.400850Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-24T21:05:18.401186Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [3:1944:3143] 2025-06-24T21:05:18.401267Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1944:3143], channels: 0 2025-06-24T21:05:18.401334Z node 3 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:05:18.401385Z node 3 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2809: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Updating channels after the creation of compute actors 2025-06-24T21:05:18.401435Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [3:1944:3143] 2025-06-24T21:05:18.401488Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1944:3143], channels: 0 2025-06-24T21:05:18.401557Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [3:1944:3143], 2025-06-24T21:05:18.401629Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1944:3143], 2025-06-24T21:05:18.401680Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-24T21:05:18.402617Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1944:3143], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-24T21:05:18.402691Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [3:1944:3143], 2025-06-24T21:05:18.402759Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1944:3143], 2025-06-24T21:05:18.403622Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1944:3143], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 659 Tasks { TaskId: 1 CpuTimeUs: 110 FinishTimeMs: 1750799118403 EgressBytes: 10 EgressRows: 1 ComputeCpuTimeUs: 21 BuildCpuTimeUs: 89 HostName: "ghrun-4pjfmvffsq" NodeId: 3 CreateTimeMs: 1750799118402 UpdateTimeMs: 1750799118403 } MaxMemoryUsage: 1048576 } 2025-06-24T21:05:18.403751Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:1944:3143] 2025-06-24T21:05:18.403823Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:276: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send Commit to BufferActor=[3:1940:3143] 2025-06-24T21:05:18.403892Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000659s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:05:18.418149Z node 3 :KQP_COMPUTE WARN: kqp_write_actor.cpp:715: SelfId: [3:1947:3143], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:1931:3143]Got OUT_OF_SPACE for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:1947:3143]. Ignored this error. 2025-06-24T21:05:18.418281Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:1940:3143], SessionActorId: [3:1931:3143], statusCode=OVERLOADED. Issue=
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 . sessionActorId=[3:1931:3143]. isRollback=0 2025-06-24T21:05:18.418571Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, ActorId: [3:1931:3143], ActorState: ExecuteState, TraceId: 01jyhw4vys8gj641k78w71r34j, got TEvKqpBuffer::TEvError in ExecuteState, status: OVERLOADED send to: [3:1941:3143] from: [3:1940:3143] 2025-06-24T21:05:18.418709Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:814: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got EvAbortExecution, status: OVERLOADED, message: {
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 } 2025-06-24T21:05:18.418786Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. OVERLOADED: {
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 } 2025-06-24T21:05:18.418868Z node 3 :KQP_EXECUTER INFO: kqp_executer_impl.h:1951: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. task: 1, does not have the CA id yet or is already complete 2025-06-24T21:05:18.419026Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2062: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ReplyErrorAndDie. Response: Status: OVERLOADED Issues { message: "Tablet 72075186224037888 is out of space. Table `/Root/table-1`." issue_code: 2006 severity: 1 } Result { Stats { CpuTimeUs: 659 } } , to ActorId: [3:1931:3143] 2025-06-24T21:05:18.419079Z node 3 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2877: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shutdown immediately - nothing to wait 2025-06-24T21:05:18.419229Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:05:18.419289Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [3:1941:3143] TxId: 281474976715672. Ctx: { TraceId: 01jyhw4vys8gj641k78w71r34j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-24T21:05:18.419489Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YTc3MjBmZjYtMTQ0NjAzZDYtNGFlZTQ4MS0zMjkxNmQ3MQ==, ActorId: [3:1931:3143], ActorState: ExecuteState, TraceId: 01jyhw4vys8gj641k78w71r34j, Create QueryResponse for error on request, msg: >> KqpScripting::StreamExecuteYqlScriptWriteCancelAfterBruteForced [GOOD] >> KqpScripting::StreamOperationTimeout |93.3%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} |93.3%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> AnalyzeColumnshard::AnalyzeTable [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> TraverseColumnShard::TraverseColumnTableRebootSaTabletInAggregate |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_19_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 19] [GOOD] >> TraverseDatashard::TraverseOneTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeTable [GOOD] Test command err: 2025-06-24T21:05:09.063302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:09.063538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:09.063650Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004388/r3tmp/tmplSvZdH/pdisk_1.dat 2025-06-24T21:05:09.455252Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10671, node 1 2025-06-24T21:05:09.680169Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:09.680240Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:09.680290Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:09.680526Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:09.683267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:09.791320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:09.791501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:09.805780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62995 2025-06-24T21:05:10.338300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:13.540762Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:13.573655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:13.573786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:13.632646Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:13.634958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:13.823319Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:13.858082Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.858553Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.859045Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.859286Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.859455Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.859746Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.859847Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.859945Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.860005Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.036558Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:14.036682Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:14.052242Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:14.187440Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:14.231337Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:14.231448Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:14.263289Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:14.264549Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:14.264782Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:14.264834Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:14.264890Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:14.264948Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:14.264995Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:14.265047Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:14.265957Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:14.299446Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:14.299587Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:14.305699Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:14.308562Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:14.308821Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:14.315801Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:14.340510Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:14.340584Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:14.340658Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:14.354786Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:14.361092Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:14.361237Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:14.535984Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:14.688610Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:14.745488Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:15.238069Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:15.471586Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:15.471730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:15.489592Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:15.606353Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:15.606660Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:15.607008Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:15.607229Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:15.607409Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:05:15.607563Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:05:15.607704Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:05:15.607854Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:05:15.608001Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:05:15.608183Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:05:15.608347Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:05:15.642520Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:05:15.642633Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:05:15.642843Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:05:15.642902Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:05:15.643168Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:05:15.643226Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:05:15.643373Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:05:15.643416Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:05:15.643498Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:05:15.643574Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:05:15.643858Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:05:15.643919Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:05:15.644205Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:05:15.644269Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:05:15.644415Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:05:15.644462Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:05:15.644592Z node 2 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:05:15.644687Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:05:15.644739Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:05:15.645690Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:05:15.645761Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:05:15.735731Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];ev=NActors::IEventHandle;tablet_id=72075186224037899;tx_id=281474976715659;this=88923097004672;method=TTxController::StartProposeOnExecute;tx_info=281474976715659:TX_KIND_SCHEMA;min=1970;max=18446744073709551615;plan=0;src=[2:1542:2409];cookie=121:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=4;result=not_found; 2025-06-24T21:05:15.773680Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=4;result=not_found; 2025-06-24T21:05:15.788099Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-24T21:05:16.594406Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2499:3074], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:16.594648Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:16.598212Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-24T21:05:16.700521Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:05:17.472577Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2597:3119], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:17.472775Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:17.476579Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-24T21:05:17.517082Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:05:18.255645Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2711:3159], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:18.255796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:18.273236Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-24T21:05:18.317152Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; waiting actualization: 0/0.000015s FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeReqDistribution ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupPathTest::ChecksumsForSchemaMappingFiles [GOOD] Test command err: 2025-06-24T21:02:08.529494Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624141403559067:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:08.529866Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004591/r3tmp/tmpTAaLDr/pdisk_1.dat 2025-06-24T21:02:08.910521Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:08.962978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:08.963065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 13986, node 1 2025-06-24T21:02:09.021692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:09.046720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:09.046744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:09.046751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:09.046912Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13942 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:02:09.334139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:02:09.539208Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:02:09.930411Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519624141403559368:2201]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:02:09.930466Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:02:09.930533Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519624141403559368:2201], Recipient [1:7519624141403559368:2201]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:02:09.930557Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:02:10.930933Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519624141403559368:2201]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:02:10.930966Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:02:10.931011Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519624141403559368:2201], Recipient [1:7519624141403559368:2201]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:02:10.931032Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:02:11.667082Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624154288461907:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:11.667312Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:11.673519Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624154288461943:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:11.674744Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624141403559257:2142] Handle TEvProposeTransaction 2025-06-24T21:02:11.674769Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519624141403559257:2142] TxId# 281474976715658 ProcessProposeTransaction 2025-06-24T21:02:11.674814Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519624141403559257:2142] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519624154288461946:2620] 2025-06-24T21:02:11.865716Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519624154288461946:2620] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-24T21:02:11.865800Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519624154288461946:2620] txid# 281474976715658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:02:11.865820Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519624154288461946:2620] txid# 281474976715658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-24T21:02:11.868567Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519624154288461946:2620] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:02:11.868648Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519624154288461946:2620] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:02:11.868801Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519624154288461946:2620] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:11.868943Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519624154288461946:2620] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:02:11.868991Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519624154288461946:2620] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-24T21:02:11.869098Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519624154288461946:2620] txid# 281474976715658 HANDLE EvClientConnected 2025-06-24T21:02:11.869165Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519624154288461971:2626], Recipient [1:7519624141403559368:2201]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:02:11.869202Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:02:11.869216Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:02:11.869264Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519624154288461946:2620], Recipient [1:7519624141403559368:2201]: {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-24T21:02:11.869278Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:02:11.871355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: ".metadata/workload_manager/pools/default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } TxId: 281474976715658 TabletId: 72057594046644480 Owner: "metadata@system" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:02:11.871896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-24T21:02:11.872047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: ... .ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-24T21:05:20.742354Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976715765:0, at schemeshard: 72057594046644480 2025-06-24T21:05:20.742374Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976715765:0, datashard: 72075186224037894, at schemeshard: 72057594046644480 2025-06-24T21:05:20.742402Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715765:0 129 -> 240 2025-06-24T21:05:20.742563Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 281474976715765:0, reason# domain is not a serverless db, domain# /Root, domainPathId# [OwnerId: 72057594046644480, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046644480, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-24T21:05:20.742781Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:05:20.744825Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715765:0, at schemeshard: 72057594046644480 2025-06-24T21:05:20.744850Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:05:20.744868Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715765:0 2025-06-24T21:05:20.744948Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [61:7519624964705177827:2404] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715765 at schemeshard: 72057594046644480 2025-06-24T21:05:20.745094Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [61:7519624926050469715:2196], Recipient [61:7519624926050469715:2196]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:05:20.745126Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:05:20.745165Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715765:0, at schemeshard: 72057594046644480 2025-06-24T21:05:20.745189Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715765:0 ProgressState 2025-06-24T21:05:20.745283Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:05:20.745309Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715765:0 progress is 1/1 2025-06-24T21:05:20.745325Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715765 ready parts: 1/1 2025-06-24T21:05:20.745352Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715765:0 progress is 1/1 2025-06-24T21:05:20.745366Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715765 ready parts: 1/1 2025-06-24T21:05:20.745384Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715765, ready parts: 1/1, is published: true 2025-06-24T21:05:20.745429Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [61:7519624926050469715:2196] message: TxId: 281474976715765 2025-06-24T21:05:20.745460Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715765 ready parts: 1/1 2025-06-24T21:05:20.745479Z node 61 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715765:0 2025-06-24T21:05:20.745490Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715765:0 2025-06-24T21:05:20.745585Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 3 2025-06-24T21:05:20.747368Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:05:20.747451Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [61:7519624926050469715:2196] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715765 at schemeshard: 72057594046644480 2025-06-24T21:05:20.747575Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [61:7519624964705177971:3995], Recipient [61:7519624926050469715:2196]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:05:20.747607Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:05:20.747620Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:05:20.747670Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124998, Sender [61:7519624926050469715:2196], Recipient [61:7519624926050469715:2196]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715765 2025-06-24T21:05:20.747688Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5109: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-06-24T21:05:20.747709Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976715765 2025-06-24T21:05:20.747734Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976715765 2025-06-24T21:05:20.747773Z node 61 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-24T21:05:20.747802Z node 61 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976715765 2025-06-24T21:05:20.747942Z node 61 :IMPORT NOTICE: schemeshard_import__create.cpp:753: TImport::TTxProgress: issues during restore, cancelling, info# { Id: 281474976710672 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Prefix_6/Table2' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 20] State: Transferring SubState: Subscribed WaitTxId: 0 Issue: 'shard: 72057594046644480:7, error: Checksum mismatch for Prefix/Table2/data_00.csv expected# f3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, got# e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' } 2025-06-24T21:05:20.748007Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_xxport__tx_base.h:63: SendNotifications: : id# 281474976710672, subscribers count# 0 2025-06-24T21:05:20.749499Z node 61 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-24T21:05:20.784884Z node 61 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [61:7519624964705177983:2411] [0] Resolve database: name# /Root 2025-06-24T21:05:20.785316Z node 61 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [61:7519624964705177983:2411] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:05:20.785350Z node 61 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [61:7519624964705177983:2411] [0] Send request: schemeShardId# 72057594046644480 2025-06-24T21:05:20.785655Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [61:7519624964705177986:4008], Recipient [61:7519624926050469715:2196]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:05:20.785695Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:05:20.785712Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:05:20.785810Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 275251202, Sender [61:7519624964705177983:2411], Recipient [61:7519624926050469715:2196]: NKikimrImport.TEvGetImportRequest Request { Id: 281474976710672 } DatabaseName: "/Root" 2025-06-24T21:05:20.785835Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5064: StateWork, processing event TEvImport::TEvGetImportRequest 2025-06-24T21:05:20.786190Z node 61 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [61:7519624964705177983:2411] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710672 Status: CANCELLED Issues { message: "shard: 72057594046644480:7, error: Checksum mismatch for Prefix/Table2/data_00.csv expected# f3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, got# e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" severity: 1 } Progress: PROGRESS_CANCELLED ImportFromS3Settings { endpoint: "localhost:4778" scheme: HTTP bucket: "test_bucket" source_prefix: "Prefix" destination_path: "/Root/Prefix_6" } StartTime { seconds: 1750799120 } EndTime { seconds: 1750799120 } } 2025-06-24T21:05:20.786969Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [61:7519624964705177986:4008], Recipient [61:7519624926050469715:2196]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:05:20.787015Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:05:20.787030Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:05:20.856866Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [61:7519624926050469715:2196]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:05:20.856917Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:05:20.856977Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [61:7519624926050469715:2196], Recipient [61:7519624926050469715:2196]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:05:20.856998Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> BackupRestoreS3::PrefixedVectorIndex [GOOD] >> AnalyzeColumnshard::AnalyzeAnalyzeOneColumnTableSpecificColumns |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::PrefixedVectorIndex [GOOD] Test command err: 2025-06-24T21:02:15.765874Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624168500468208:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:15.765927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpJHDbTp/pdisk_1.dat 2025-06-24T21:02:16.172258Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:02:16.230817Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:16.230939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:16.236315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6886, node 1 2025-06-24T21:02:16.403845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:16.403884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:16.403899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:16.404017Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12827 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:02:16.772338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:02:16.786171Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:02:16.804089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:02:19.140943Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624185680338372:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:19.141060Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:19.422769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:02:19.593988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624185680338556:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:19.594086Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:19.594404Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624185680338561:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:19.598607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:02:19.627103Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624185680338563:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:02:19.681987Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624185680338634:2795] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:02:19.903746Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhvzde9ejxsqzm7bdbvmq4b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODZiMWQ2N2YtN2FhY2M4NDgtNGM1OTRjZjMtZTk0Mjg1M2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:02:20.100183Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhvzdrs1wpq2fx3ksqsem8w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODZiMWQ2N2YtN2FhY2M4NDgtNGM1OTRjZjMtZTk0Mjg1M2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/"Create temporary directory "/Root/~backup_20250624T210220" in databaseProcess "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250624T210220/table" }Describe table "/Root/table"Describe table "/Root/~backup_20250624T210220/table"Backup table "/Root/~backup_20250624T210220/table" to "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table"Write scheme into "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table/permissions.pb"Read table "/Root/~backup_20250624T210220/table"2025-06-24T21:02:20.766384Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624168500468208:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:20.766452Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Write data into "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table/data_00.csv"Drop table "/Root/~backup_20250624T210220/table"2025-06-24T21:02:20.927425Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found Remove temporary directory "/Root/~backup_20250624T210220" in database2025-06-24T21:02:20.988641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) Backup completed successfullyRestore "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/" to "/Root"2025-06-24T21:02:21.123198Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table"}]Process "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table"Read scheme from "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table" to "/Root/table"2025-06-24T21:02:21.241091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Created "/Root/table"Read data from "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table/data_00.csv"2025-06-24T21:02:21.436241Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyhvzf4n43rc68z0vsa9djwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWMyNDc1NTktZWJjOGUzZTItYmQyM2I2MWItZTU0OTZmYjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/2btm/004582/r3tmp/tmpHvq5QT/table/permissions.pb"2025-06-24T21:02:21.488821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-24T21:02:21.668070Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jyhvzfa9988a75p9e8d3edyh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODZiMWQ2N2YtN2FhY2M4NDgtNGM1OTRjZjMtZTk0Mj ... t@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "table" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Group" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 0 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } } TableIndexes { Name: "value_idx" LocalPathId: 10 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "Group" KeyColumnNames: "Value" SchemaVersion: 2 PathOwnerId: 72057594046644480 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: SIMILARITY_INNER_PRODUCT vector_type: VECTOR_TYPE_FLOAT vector_dimension: 768 } clusters: 80 levels: 2 } } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 9 PathOwnerId: 72057594046644480 >> KqpScripting::StreamOperationTimeout [GOOD] >> AnalyzeColumnshard::AnalyzeRebootColumnShard |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamOperationTimeout [GOOD] Test command err: Trying to start YDB, gRPC: 11797, MsgBus: 23712 2025-06-24T21:03:58.056355Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624613951995795:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:03:58.056448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003073/r3tmp/tmpiwHSSf/pdisk_1.dat 2025-06-24T21:03:58.569641Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624613951995772:2079] 1750799038049270 != 1750799038049273 2025-06-24T21:03:58.576709Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:58.585327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:03:58.585424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:03:58.595328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11797, node 1 2025-06-24T21:03:58.724488Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:03:58.724509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:03:58.724515Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:03:58.724669Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23712 2025-06-24T21:03:59.099281Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23712 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:03:59.368898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:03:59.401395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:03:59.410744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:59.590423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:59.800482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:03:59.895839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:01.725091Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624626836899298:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:01.725200Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.046570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.093475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.127215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.173360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.213070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.291514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.389735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:02.462430Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624631131867257:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.462476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.462687Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624631131867262:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:02.466729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:02.518641Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624631131867264:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:02.612484Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624631131867315:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:03.056632Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624613951995795:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:03.056707Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:04.151848Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799044162, txId: 281474976710673] shutting down 2025-06-24T21:04:04.459476Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding sna ... -24T21:05:19.912961Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799119944, txId: 281474976711270] shutting down Trying to start YDB, gRPC: 15522, MsgBus: 32609 2025-06-24T21:05:20.940647Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519624962513808374:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:20.940739Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003073/r3tmp/tmppWWzON/pdisk_1.dat 2025-06-24T21:05:21.077977Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:21.079384Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519624962513808355:2079] 1750799120940060 != 1750799120940063 2025-06-24T21:05:21.099611Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:21.099718Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 15522, node 2 2025-06-24T21:05:21.101529Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:21.135252Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:21.135279Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:21.135291Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:21.135468Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32609 TClient is connected to server localhost:32609 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:21.686140Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:21.700343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:21.776573Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:21.932210Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:21.990831Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:22.011388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:24.312822Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624979693679180:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:24.312899Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:24.372565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:24.401094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:24.424949Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:24.452685Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:24.481969Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:24.515959Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:24.547599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:24.629397Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624979693679837:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:24.629475Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:24.629540Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519624979693679842:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:24.632930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:05:24.642849Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519624979693679844:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:05:24.703743Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519624979693679895:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:05:25.862809Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=N2E4ZTliOTctZDA1ZjM1MjMtZTM0OGNkZmQtNmRjZWQzMQ==, ActorId: [2:7519624983988647461:2472], ActorState: ExecuteState, TraceId: 01jyhw53adcmq5gpx9mb77cr5v, Create QueryResponse for error on request, msg: 2025-06-24T21:05:25.940710Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519624962513808374:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:25.940799Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TKqpScanData::DifferentNumberOfInputAndResultColumns >> TKqpScanData::ArrowToUnboxedValueConverter >> TComputeScheduler::QueryLimits [GOOD] >> TKqpScanData::DifferentNumberOfInputAndResultColumns [GOOD] >> TKqpScanData::ArrowToUnboxedValueConverter [GOOD] >> TKqpScanData::UnboxedValueSize >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::QueryLimits [GOOD] Test command err: 800 800 800 800 >> TKqpScanData::UnboxedValueSize [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::DifferentNumberOfInputAndResultColumns [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::ArrowToUnboxedValueConverter [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::UnboxedValueSize [GOOD] >> TKqpScanData::EmptyColumns >> TComputeScheduler::ResourceWeight [GOOD] >> TKqpScanData::EmptyColumnsAndNonEmptyArrowBatch [GOOD] >> TKqpScanData::EmptyColumns [GOOD] >> TraverseDatashard::TraverseTwoTablesTwoServerlessDbs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::ResourceWeight [GOOD] Test command err: 510 500 1510 1500 990 1000 1000 1000 |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::EmptyColumns [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::EmptyColumnsAndNonEmptyArrowBatch [GOOD] >> TKqpScanData::FailOnUnsupportedPgType >> TKqpScanData::FailOnUnsupportedPgType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseTwoTablesTwoServerlessDbs [GOOD] Test command err: 2025-06-24T21:05:18.748744Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:18.749052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:18.749222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004371/r3tmp/tmpzH5D1y/pdisk_1.dat 2025-06-24T21:05:19.097374Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8089, node 1 2025-06-24T21:05:19.287484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:19.287547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:19.287608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:19.287862Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:19.290135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:19.400232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:19.400379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:19.413589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23629 2025-06-24T21:05:19.923957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:22.758709Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:22.791391Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:22.791491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:22.850628Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:22.852454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:23.032915Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:23.066997Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:23.067545Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:23.068081Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:23.068185Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:23.068281Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:23.068478Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:23.068576Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:23.068662Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:23.068725Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:23.258954Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:23.259060Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:23.271717Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:23.408537Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:23.454437Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:23.454528Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:23.479437Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:23.480741Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:23.480983Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:23.481049Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:23.481117Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:23.481178Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:23.481230Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:23.481301Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:23.482274Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:23.515947Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:23.516059Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:23.522819Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:23.526000Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:23.526285Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:23.534187Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:05:23.558057Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:23.558117Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:23.558168Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:05:23.570910Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:23.578274Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:23.578412Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:23.742102Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:23.893020Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:23.949416Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:24.434329Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:24.462080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:25.014747Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:25.150996Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:05:25.151052Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:05:25.151186Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2496:2900], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:05:25.153313Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2504:2904] 2025-06-24T21:05:25.153592Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2504:2904], schemeshard id = 72075186224037899 2025-06-24T21:05:25.989171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:26.434738Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:26.688847Z node 2 :STATISTICS DEBUG: schemeshard_impl.c ... ode 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2969:3094], at schemeshard: 72075186224037905, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037905 2025-06-24T21:05:26.690426Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2971:3096] 2025-06-24T21:05:26.690559Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2971:3096], schemeshard id = 72075186224037905 2025-06-24T21:05:27.767965Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3096:3357], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:27.768129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:27.785228Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:28.253095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3404:3404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:28.253304Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:28.254740Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:3409:3408]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:28.254973Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:28.255186Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 18446744073709551615 ] 2025-06-24T21:05:28.255278Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:3412:3411] 2025-06-24T21:05:28.255362Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:3412:3411] 2025-06-24T21:05:28.256147Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:3413:3335] 2025-06-24T21:05:28.256583Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:3412:3411], server id = [2:3413:3335], tablet id = 72075186224037894, status = OK 2025-06-24T21:05:28.256969Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:3413:3335], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:05:28.257073Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-24T21:05:28.257461Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:05:28.257583Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:3409:3408], StatRequests.size() = 1 2025-06-24T21:05:28.275254Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3417:3415], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:28.275532Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:28.276359Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3422:3420], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:28.285116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:05:28.574556Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:05:28.574641Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:05:28.628834Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:3412:3411], schemeshard count = 1 2025-06-24T21:05:28.908796Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:3424:3422], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715664 completed, doublechecking } 2025-06-24T21:05:29.087166Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:3548:3496] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:05:29.099580Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:3571:3512]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:29.099790Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:29.099827Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:3571:3512], StatRequests.size() = 1 2025-06-24T21:05:29.152793Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhw55mb316b4bdyyc35w863, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmU0MjhjZGItNGY5NzAxNC03ZWU3ODQ2Mi0xMjg2MDQ3YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:05:29.248796Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72075186224037905, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:29.670501Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:3929:3579]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:29.670782Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:05:29.671312Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 1, schemeshard count = 1, urgent = 0 2025-06-24T21:05:29.671374Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-24T21:05:29.671657Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:05:29.671738Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:3929:3579], StatRequests.size() = 1 2025-06-24T21:05:29.703326Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [1:3938:3588]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:29.703564Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-24T21:05:29.703612Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [1:3938:3588], StatRequests.size() = 1 2025-06-24T21:05:29.755580Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyhw571md9zxthvhvq8y5kts, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmQ3YTM1NjctNjcxOWQ3Y2EtYjBhNDYxY2ItMjU3OTAzNmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:05:29.812189Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:3982:3600]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:29.815778Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:29.815857Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:05:29.816253Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:29.816308Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-24T21:05:29.816360Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:05:29.833651Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-24T21:05:29.833954Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-24T21:05:29.834455Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:4006:3612]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:29.837826Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:29.837905Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:05:29.838423Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:29.838489Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-24T21:05:29.838550Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037905, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:05:29.841299Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 2 ], RowsCount[ 0 ] 2025-06-24T21:05:29.841587Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::FailOnUnsupportedPgType [GOOD] |93.4%| [TA] $(B)/ydb/core/kqp/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.4%| [TA] {RESULT} $(B)/ydb/core/kqp/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TraverseDatashard::TraverseOneTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseOneTable [GOOD] Test command err: 2025-06-24T21:05:26.231025Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:26.231457Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:26.231698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00433c/r3tmp/tmpTLVi7b/pdisk_1.dat 2025-06-24T21:05:26.547999Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62691, node 1 2025-06-24T21:05:26.763895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:26.763963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:26.764011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:26.764238Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:26.771535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:26.879611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:26.879772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:26.893474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13625 2025-06-24T21:05:27.408956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:30.128953Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:30.158486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:30.158585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:30.217774Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:30.220089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:30.389954Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:30.424521Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:30.424993Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:30.425519Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:30.425645Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:30.425724Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:30.425923Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:30.426023Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:30.426115Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:30.426172Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:30.601025Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:30.601131Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:30.614226Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:30.776311Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:30.826987Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:30.827102Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:30.861203Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:30.863046Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:30.863349Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:30.863420Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:30.863501Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:30.863557Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:30.863615Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:30.863674Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:30.864594Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:30.902329Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:30.902446Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:30.908616Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:30.911582Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:30.911889Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:30.919304Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:30.943039Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:30.943100Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:30.943180Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:30.956020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:30.962759Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:30.962930Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:31.126094Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:31.274508Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:31.319763Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:31.820496Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:32.095666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:32.095782Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:32.110259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:32.530174Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2448:3072], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:32.530296Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:32.531098Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2453:3076]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:32.531332Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:32.531406Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2455:3078] 2025-06-24T21:05:32.531473Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2455:3078] 2025-06-24T21:05:32.531883Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2456:2948] 2025-06-24T21:05:32.532129Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2455:3078], server id = [2:2456:2948], tablet id = 72075186224037894, status = OK 2025-06-24T21:05:32.532280Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2456:2948], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:05:32.532343Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-24T21:05:32.532550Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:05:32.532635Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2453:3076], StatRequests.size() = 1 2025-06-24T21:05:32.546828Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2460:3082], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:32.546909Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:32.547276Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2465:3087], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:32.553028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:05:32.683044Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:05:32.683125Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:05:32.770626Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2455:3078], schemeshard count = 1 2025-06-24T21:05:33.093789Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2467:3089], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:05:33.218006Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2578:3159] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:05:33.228251Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2601:3175]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:33.228398Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:05:33.228423Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2601:3175], StatRequests.size() = 1 2025-06-24T21:05:33.274562Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhw59t32s16bfym3q7ecvr4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzQwZGQ2NjQtYWEzZTM2ZjEtYmFiZTFkOWMtNjZjNDg2NmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:05:33.325650Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:2650:2994]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:33.328147Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:33.328208Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:05:33.328522Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:33.328558Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:05:33.328598Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:05:33.339915Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-24T21:05:33.340194Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TRtmrTest::CreateWithoutTimeCastBuckets >> TRtmrTest::CreateWithoutTimeCastBuckets [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TRtmrTest::CreateWithoutTimeCastBuckets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:05:36.104802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:05:36.104895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:05:36.104924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:05:36.104960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:05:36.106008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:05:36.106054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:05:36.106185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:05:36.106267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:05:36.107039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:05:36.109590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:05:36.179667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:05:36.179726Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:36.194417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:05:36.194790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:05:36.195020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:05:36.202258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:05:36.202514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:05:36.204501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:05:36.205388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:05:36.211517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:05:36.212472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:05:36.218530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:05:36.218595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:05:36.219513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:05:36.219568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:05:36.219657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:05:36.219749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:05:36.225568Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:05:36.330616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:05:36.332314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:05:36.333235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:05:36.333284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:05:36.334407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:05:36.334481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:36.336982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:05:36.337809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:05:36.338030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:05:36.338153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:05:36.338193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:05:36.338216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:05:36.339908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:05:36.339954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:05:36.339984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:05:36.341397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:05:36.341430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:05:36.341460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:05:36.341514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:05:36.345185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:05:36.346768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:05:36.347739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:05:36.348708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:05:36.348870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:05:36.348913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:05:36.350050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:05:36.350104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:05:36.350305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:05:36.350372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:05:36.352058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:05:36.352089Z node 1 :FLAT_TX_SCHEMESHARD ... d propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 100 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:05:36.398416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 100:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:100 msg type: 269090816 2025-06-24T21:05:36.398507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 100, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 100 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000002 2025-06-24T21:05:36.398752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:05:36.398837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:05:36.398874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_rtmr.cpp:130: TCreateRTMR TPropose, operationId: 100:0 HandleReply TEvOperationPlan, at schemeshard: 72057594046678944 2025-06-24T21:05:36.398940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 100:0 128 -> 240 2025-06-24T21:05:36.399060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:05:36.399164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 FAKE_COORDINATOR: Erasing txId 100 2025-06-24T21:05:36.400591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:05:36.400617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:05:36.400729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:05:36.400828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:05:36.400870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-24T21:05:36.400909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-24T21:05:36.400949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:05:36.400977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-24T21:05:36.401041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:05:36.401067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:05:36.401093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:05:36.401115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:05:36.401150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-24T21:05:36.401180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:05:36.401218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 100:0 2025-06-24T21:05:36.401240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 100:0 2025-06-24T21:05:36.401286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:05:36.401325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-24T21:05:36.401355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:05:36.401385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-24T21:05:36.402107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:05:36.402175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:05:36.402202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:05:36.402227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:05:36.402254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:05:36.402672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:05:36.402732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:05:36.402749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:05:36.402766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T21:05:36.402783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:05:36.402820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-24T21:05:36.405493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-24T21:05:36.405820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-24T21:05:36.406951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:05:36.406985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-24T21:05:36.407247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:05:36.407311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:05:36.407335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:313:2302] TestWaitNotification: OK eventTxId 100 2025-06-24T21:05:36.408687Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/rtmr1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:05:36.408839Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/rtmr1" took 167us result status StatusSuccess 2025-06-24T21:05:36.409716Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/rtmr1" PathDescription { Self { Name: "rtmr1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeRtmrVolume CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 RTMRVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } RtmrVolumeDescription { Name: "rtmr1" PathId: 2 PartitionsCount: 0 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |93.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_rtmr/test-results/unittest/{meta.json ... results_accumulator.log} |93.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr/test-results/unittest/{meta.json ... results_accumulator.log} |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateCheckerTest::CheckSubjectDns >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates [GOOD] >> TCertificateCheckerTest::CheckSubjectDns [GOOD] >> TCertificateAuthUtilsTest::ClientCertAuthorizationParamsMatch [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateCheckerTest::CheckSubjectDns [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::ClientCertAuthorizationParamsMatch [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest |93.4%| [TA] $(B)/ydb/core/security/certificate_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.4%| [TA] {RESULT} $(B)/ydb/core/security/certificate_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpScripting::StreamExecuteYqlScriptScanWriteCancelAfterBruteForced [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanScalar >> DstCreator::SameOwner >> DstCreator::WithSyncIndex >> DstCreator::ColumnsSizeMismatch >> DstCreator::WithSyncIndexAndIntermediateDir >> DstCreator::ReplicationModeMismatch >> DstCreator::ExistingDst >> DstCreator::NonExistentSrc >> DstCreator::NonExistentSrc [GOOD] >> DstCreator::KeyColumnsSizeMismatch >> DstCreator::ColumnsSizeMismatch [GOOD] >> DstCreator::ColumnTypeMismatch >> DstCreator::WithSyncIndexAndIntermediateDir [GOOD] >> DstCreator::ReplicationModeMismatch [GOOD] >> DstCreator::ReplicationConsistencyLevelMismatch >> DstCreator::ExistingDst [GOOD] >> DstCreator::EmptyReplicationConfig >> DstCreator::SameOwner [GOOD] >> DstCreator::SamePartitionCount >> DstCreator::WithSyncIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithSyncIndexAndIntermediateDir [GOOD] Test command err: 2025-06-24T21:05:42.556916Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625060642829230:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:42.557035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003098/r3tmp/tmpaXkeSy/pdisk_1.dat 2025-06-24T21:05:42.846415Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625060642829196:2079] 1750799142552863 != 1750799142552866 2025-06-24T21:05:42.857221Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:42.915803Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:42.915918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:42.918378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24970 TServer::EnableGrpc on GrpcPort 8423, node 1 2025-06-24T21:05:43.147786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:43.147808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:43.147816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:43.147934Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24970 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:05:43.563892Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:43.607877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:43.638256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750799143905 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143667 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750799143905 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-24T21:05:43.968409Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.968543Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.968560Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:43.968960Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:44.908249Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799143905, tx_id: 281474976715658 } } } 2025-06-24T21:05:44.909524Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:44.911930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:44.913553Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-24T21:05:44.913571Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-24T21:05:44.978241Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-24T21:05:44.979559Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dir/Replicated" PathDescription { Self { Name: "Replicated" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799145018 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compacti ... : 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 8 PathOwnerId: 72057594046644480 } 2025-06-24T21:05:45.033204Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 2] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 8] TClient::Ls request: /Root/Dir/Replicated/index_by_value TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "index_by_value" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799145018 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799145018 ParentPathId: 7 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { ... (TRUNCATED) TClient::Ls request: /Root/Dir/Replicated/index_by_value/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799145018 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799145018 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } Path: "/Root/Dir/Replicated/index_by_value/indexImplTable" ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithSyncIndex [GOOD] Test command err: 2025-06-24T21:05:42.556944Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625057740179413:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:42.557033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030b0/r3tmp/tmpKPdwoE/pdisk_1.dat 2025-06-24T21:05:42.846053Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625057740179380:2079] 1750799142552855 != 1750799142552858 2025-06-24T21:05:42.867757Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:42.946372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:42.946522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:42.948419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25386 TServer::EnableGrpc on GrpcPort 26461, node 1 2025-06-24T21:05:43.147756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:43.147793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:43.147816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:43.147942Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25386 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:05:43.565206Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:43.618504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:43.638692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799143919 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143674 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799143919 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-24T21:05:43.998610Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.998835Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.998857Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:43.999468Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:45.114974Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799143919, tx_id: 281474976710658 } } } 2025-06-24T21:05:45.115420Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:45.117311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:45.118628Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-24T21:05:45.118660Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-24T21:05:45.182633Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-24T21:05:45.183755Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Replicated" PathDescription { Self { Name: "Replicated" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799145221 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_ ... 0 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 7 PathOwnerId: 72057594046644480 } TClient::Ls request: /Root/Replicated/index_by_value 2025-06-24T21:05:45.246855Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 2] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 7] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "index_by_value" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799145221 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799145221 ParentPathId: 6 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { ... (TRUNCATED) TClient::Ls request: /Root/Replicated/index_by_value/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799145221 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799145221 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } Path: "/Root/Replicated/index_by_value/indexImplTable" >> KqpScripting::StreamExecuteYqlScriptScanScalar [GOOD] >> DstCreator::GlobalConsistency >> DstCreator::EmptyReplicationConfig [GOOD] >> DstCreator::KeyColumnsSizeMismatch [GOOD] >> DstCreator::ColumnTypeMismatch [GOOD] >> DstCreator::ReplicationConsistencyLevelMismatch [GOOD] >> DstCreator::SamePartitionCount [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptScanScalar [GOOD] Test command err: Trying to start YDB, gRPC: 27069, MsgBus: 9475 2025-06-24T21:04:27.710753Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624736555213692:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:27.710797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f72/r3tmp/tmpsFJNlu/pdisk_1.dat 2025-06-24T21:04:28.165207Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519624736555213673:2079] 1750799067709968 != 1750799067709971 2025-06-24T21:04:28.204843Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:04:28.208568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:28.208660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:28.212508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27069, node 1 2025-06-24T21:04:28.311741Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:28.311766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:28.311773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:28.311888Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9475 2025-06-24T21:04:28.749085Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9475 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:04:29.076598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:04:29.092173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:04:29.108897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:29.278424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:29.460229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:29.542734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:04:31.578555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624753735084488:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:31.578699Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:31.872567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:31.922372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:31.957151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.002140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.038568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.080075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.156297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:04:32.276704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624758030052444:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:32.276793Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:32.277221Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624758030052449:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:04:32.281339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:04:32.294755Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519624758030052451:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:04:32.387395Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519624758030052502:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:04:32.711005Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519624736555213692:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:04:32.711114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:04:33.901352Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799073891, txId: 281474976710673] shutting down 2025-06-24T21:04:34.239752Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapsh ... SnapshotManager: discarding snapshot; our snapshot: [step: 1750799140398, txId: 281474976711267] shutting down 2025-06-24T21:05:40.632115Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799140664, txId: 281474976711270] shutting down Trying to start YDB, gRPC: 8202, MsgBus: 15255 2025-06-24T21:05:41.440691Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625052794762702:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:41.440768Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f72/r3tmp/tmpFM9ojJ/pdisk_1.dat 2025-06-24T21:05:41.540440Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:41.541060Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625052794762682:2079] 1750799141440198 != 1750799141440201 TServer::EnableGrpc on GrpcPort 8202, node 2 2025-06-24T21:05:41.577113Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:41.577195Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:41.578326Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:41.587567Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:41.587587Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:41.587591Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:41.587717Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15255 TClient is connected to server localhost:15255 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:42.036740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:42.052570Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:42.135559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:42.275408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:42.333853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:42.502346Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:44.568026Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625065679666221:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:44.568113Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:44.613940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:44.644347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:44.677368Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:44.708024Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:44.738886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:44.770653Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:44.802864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:44.879874Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625065679666880:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:44.879961Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625065679666885:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:44.879965Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:44.883551Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:05:44.893386Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625065679666887:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:05:44.965243Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625065679666938:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:05:46.440889Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625052794762702:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:46.440962Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:05:46.998537Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799147020, txId: 281474976715672] shutting down >> DstCreator::WithIntermediateDir ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::ColumnTypeMismatch [GOOD] Test command err: 2025-06-24T21:05:42.556911Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625057895284651:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:42.557011Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030a4/r3tmp/tmpYEN3xR/pdisk_1.dat 2025-06-24T21:05:42.843580Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:42.849808Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625057895284620:2079] 1750799142552918 != 1750799142552921 2025-06-24T21:05:42.960746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:42.960882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:42.963221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7088 TServer::EnableGrpc on GrpcPort 5591, node 1 2025-06-24T21:05:43.147851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:43.147880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:43.147887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:43.148004Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7088 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:05:43.564254Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:43.622384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:43.646023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:43.751119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143688 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799143821 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143688 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799143821 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-24T21:05:43.815723Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.816002Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.816026Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:43.816633Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:45.088019Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799143751, tx_id: 281474976710658 } } } 2025-06-24T21:05:45.088395Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:45.090026Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-24T21:05:45.091711Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799143821 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "extra" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCom ... UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:46.095314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:46.102611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:46.168321Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799146145 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146236 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799146145 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146236 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-24T21:05:46.230541Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:46.230701Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:46.230713Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:46.231173Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:46.566183Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:47.723752Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799146208, tx_id: 281474976715658 } } } 2025-06-24T21:05:47.724007Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:47.725114Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-24T21:05:47.726070Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146236 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-24T21:05:47.726207Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Column type mismatch: name: value, expected: Utf8, got: Uint32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::EmptyReplicationConfig [GOOD] Test command err: 2025-06-24T21:05:42.557060Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625058044512954:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:42.557200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030b9/r3tmp/tmpisMGsU/pdisk_1.dat 2025-06-24T21:05:42.845051Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:42.846603Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625058044512921:2079] 1750799142552886 != 1750799142552889 2025-06-24T21:05:42.917476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:42.917546Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:42.919685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8308 TServer::EnableGrpc on GrpcPort 10275, node 1 2025-06-24T21:05:43.147802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:43.147834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:43.147845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:43.147955Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:05:43.563754Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:43.606991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:43.638476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:43.738559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143667 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799143800 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143667 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799143800 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-24T21:05:43.803056Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.803321Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.803370Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:43.803965Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:44.959455Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799143751, tx_id: 281474976715658 } } } 2025-06-24T21:05:44.959753Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:44.962010Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-24T21:05:44.963772Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799143800 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBroke ... builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:46.018914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:46.026870Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:46.060258Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799146068 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146131 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799146068 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146131 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-24T21:05:46.160066Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:46.160289Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:46.160321Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:46.160757Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:46.531823Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:47.698078Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799146096, tx_id: 281474976715658 } } } 2025-06-24T21:05:47.698355Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:47.699515Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-24T21:05:47.700155Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146131 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-24T21:05:47.700241Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Empty replication config ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::KeyColumnsSizeMismatch [GOOD] Test command err: 2025-06-24T21:05:42.556909Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625060018261472:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:42.557036Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00308d/r3tmp/tmpE4ePSp/pdisk_1.dat 2025-06-24T21:05:42.863824Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625060018261439:2079] 1750799142552825 != 1750799142552828 2025-06-24T21:05:42.869715Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:42.965235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:42.965360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:42.967022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30746 TServer::EnableGrpc on GrpcPort 22443, node 1 2025-06-24T21:05:43.147913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:43.147957Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:43.147965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:43.148079Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30746 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:05:43.563578Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:43.637960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143695 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143695 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) 2025-06-24T21:05:43.684088Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.684213Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.684226Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:43.684755Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:44.923135Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { status: SCHEME_ERROR, issues: } } 2025-06-24T21:05:44.923220Z node 1 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Cannot describe table: status: SCHEME_ERROR, issue: 2025-06-24T21:05:45.505849Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625069906536407:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:45.505923Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00308d/r3tmp/tmpim69Jj/pdisk_1.dat 2025-06-24T21:05:45.601272Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:45.602389Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625069906536387:2079] 1750799145505374 != 1750799145505377 2025-06-24T21:05:45.651858Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:45.651917Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:45.653581Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6954 TServer::EnableGrpc on GrpcPort 10387, node 2 2025-06-24T21:05:45.811534Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:45.811565Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:45.811577Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:45.811721Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6954 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:46.059603Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:46.066789Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:46.161319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799146110 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146229 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799146110 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146229 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-24T21:05:46.221199Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:46.221347Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:46.221361Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:46.221812Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:46.513084Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:47.743935Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799146173, tx_id: 281474976715658 } } } 2025-06-24T21:05:47.744305Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:47.745622Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-24T21:05:47.747100Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146229 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnNames: "value" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-24T21:05:47.747316Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Key columns size mismatch: expected: 1, got: 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::ReplicationConsistencyLevelMismatch [GOOD] Test command err: 2025-06-24T21:05:42.556935Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625056898125052:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:42.557709Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ac/r3tmp/tmpaSuMza/pdisk_1.dat 2025-06-24T21:05:42.876076Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:42.877847Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625056898125024:2079] 1750799142552805 != 1750799142552808 2025-06-24T21:05:42.966056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:42.966164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:42.967937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27671 TServer::EnableGrpc on GrpcPort 11670, node 1 2025-06-24T21:05:43.147881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:43.147913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:43.147921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:43.148047Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27671 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:05:43.564023Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:43.618964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:43.647499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:43.761746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143688 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799143828 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143688 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799143828 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-24T21:05:43.828535Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.828620Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.828645Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:43.829234Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:44.988300Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799143751, tx_id: 281474976710658 } } } 2025-06-24T21:05:44.988712Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:44.990052Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-24T21:05:44.991552Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799143828 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBro ... ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:46.037652Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:46.044074Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:46.107280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799146089 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146173 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799146089 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146173 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-24T21:05:46.160003Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:46.160143Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:46.160155Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:46.160469Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:46.557402Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:47.932936Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799146145, tx_id: 281474976715658 } } } 2025-06-24T21:05:47.933220Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:47.934659Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-24T21:05:47.935734Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799146173 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_GLOBAL } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-24T21:05:47.935930Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Replication consistency level mismatch: expected: CONSISTENCY_LEVEL_ROW, got: 1 |93.5%| [TA] $(B)/ydb/core/kqp/ut/yql/test-results/unittest/{meta.json ... results_accumulator.log} |93.5%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/yql/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::SamePartitionCount [GOOD] Test command err: 2025-06-24T21:05:42.557031Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625058941496705:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:42.557150Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030a3/r3tmp/tmpoDjpYJ/pdisk_1.dat 2025-06-24T21:05:42.877483Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625058941496672:2079] 1750799142552873 != 1750799142552876 2025-06-24T21:05:42.885916Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:42.977435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:42.977534Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:42.979276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28806 TServer::EnableGrpc on GrpcPort 20301, node 1 2025-06-24T21:05:43.147826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:43.147848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:43.147855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:43.148075Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28806 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:05:43.563542Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:43.607237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:43.634544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:05:43.638462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799143751 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799143674 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799143751 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-24T21:05:43.802481Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.802719Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:43.802749Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:43.803319Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:45.064566Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799143751, tx_id: 281474976710659 } } } 2025-06-24T21:05:45.064898Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:45.066603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:45.067393Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710660} 2025-06-24T21:05:45.067423Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710660 2025-06-24T21:05:45.089022Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710660 2025-06-24T21:05:45.089048Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750799145130 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-24T21:05:45.763344Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625070300458821:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:45.763523Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030a3/r3tmp/tmpsbvIma/pdisk_1.dat 2025-06-24T21:05:45.844203Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:45.894322Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:45.894383Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:45.895788Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28228 TServer::EnableGrpc on GrpcPort 6942, node 2 2025-06-24T21:05:46.044956Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:46.044977Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:46.044986Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:46.045093Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28228 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:46.292486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:46.299194Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750799146383 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799146341 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750799146383 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-24T21:05:46.375897Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:46.376029Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:46.376042Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:46.376407Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:46.770686Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:48.068312Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799146383, tx_id: 281474976715658 } } } 2025-06-24T21:05:48.069640Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:48.071458Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:48.072311Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-24T21:05:48.072332Z node 2 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-24T21:05:48.095395Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 TClient::Ls request: /Root/Table 2025-06-24T21:05:48.095423Z node 2 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750799146383 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799148140 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) >> DstCreator::GlobalConsistency [GOOD] >> DstCreator::KeyColumnNameMismatch >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedAfterSplitMerge [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleBorrowCompactionTimeouts |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoMap [GOOD] >> DstCreator::WithIntermediateDir [GOOD] >> DstCreator::WithAsyncIndex >> JsonProtoConversion::NlohmannJsonToProtoArray [GOOD] >> JsonProtoConversion::JsonToProtoMap [GOOD] >> JsonProtoConversion::ProtoMapToJson_ReceiveMessageResult [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoMap [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoMap [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoArray [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson_ReceiveMessageResult [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoSingleValue [GOOD] >> JsonProtoConversion::ProtoMapToJson [GOOD] >> DstCreator::KeyColumnNameMismatch [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoSingleValue [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> JsonProtoConversion::JsonToProtoArray [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::KeyColumnNameMismatch [GOOD] Test command err: 2025-06-24T21:05:48.067052Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625085999599561:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:48.067136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00307d/r3tmp/tmpBpWxq9/pdisk_1.dat 2025-06-24T21:05:48.319620Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:48.325969Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625085999599542:2079] 1750799148066356 != 1750799148066359 2025-06-24T21:05:48.440037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:48.440163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:48.442002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5681 TServer::EnableGrpc on GrpcPort 18151, node 1 2025-06-24T21:05:48.494731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:48.494752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:48.494759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:48.494868Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5681 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:48.765950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:48.777981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750799148882 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799148819 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750799148882 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-24T21:05:48.908239Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:48.908388Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:48.908424Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:48.908937Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:49.077165Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:50.326691Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799148882, tx_id: 281474976715658 } } } 2025-06-24T21:05:50.327075Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:50.328523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:50.329256Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-24T21:05:50.329275Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 TClient::Ls request: 2025-06-24T21:05:50.349764Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 /Root/Replicated 2025-06-24T21:05:50.349788Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799150394 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-24T21:05:50.829803Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625094748910073:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:50.829878Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00307d/r3tmp/tmpIpOWg5/pdisk_1.dat 2025-06-24T21:05:50.931590Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625094748910054:2079] 1750799150829273 != 1750799150829276 2025-06-24T21:05:50.943999Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:50.945209Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:50.945269Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:50.947791Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected ... AttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:51.331325Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:51.337380Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:05:51.364404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799151381 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799151430 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799151381 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799151430 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-24T21:05:51.416532Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:51.416769Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:51.416797Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:51.417229Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:51.836634Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:53.062801Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799151402, tx_id: 281474976710658 } } } 2025-06-24T21:05:53.063069Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:53.064559Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-24T21:05:53.065653Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799151430 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-24T21:05:53.065957Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Key column name mismatch: position: 0, expected: key, got: value |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoArray [GOOD] >> DstCreator::WithAsyncIndex [GOOD] |93.5%| [TA] $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.5%| [TA] {RESULT} $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_17_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 17] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithAsyncIndex [GOOD] Test command err: 2025-06-24T21:05:49.154167Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625089062692501:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:49.154282Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003071/r3tmp/tmpRqkZkA/pdisk_1.dat 2025-06-24T21:05:49.403678Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625089062692479:2079] 1750799149153371 != 1750799149153374 2025-06-24T21:05:49.416516Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:49.514522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:49.514639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:49.516322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17089 TServer::EnableGrpc on GrpcPort 13039, node 1 2025-06-24T21:05:49.600251Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:49.600279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:49.600286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:49.600406Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17089 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:49.918289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:49.933969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799150037 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799149974 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799150037 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-24T21:05:50.057946Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:50.058116Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:50.058129Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:50.058578Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:50.162007Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:51.423698Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799150037, tx_id: 281474976710658 } } } 2025-06-24T21:05:51.424050Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:51.425556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:51.426295Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-24T21:05:51.426316Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-24T21:05:51.447130Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-24T21:05:51.447172Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 4] TClient::Ls request: /Root/Dir/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750799151486 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-24T21:05:51.920829Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625096506085743:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:05:51.920927Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003071/r3tmp/tmpoJEVT9/pdisk_1.dat 2025-06-24T21:05:52.010522Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:52.016902Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625096506085724:2079] 1750799151920264 != 1750799151920267 2025-06-24T21:05:52.049902Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:52.049965Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:52.051335Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3010 TServer::EnableGrpc on GrpcPort 28698, node 2 2025-06-24T21:05:52.196038Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:52.196061Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:52.196066Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:52.196146Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:05:52.430678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:05:52.437942Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750799152669 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799152480 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750799152669 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-24T21:05:52.727934Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:52.728109Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:05:52.728128Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:05:52.728469Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:05:52.927315Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:54.185128Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799152669, tx_id: 281474976715658 } } } 2025-06-24T21:05:54.185396Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:05:54.186734Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:54.187469Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-24T21:05:54.187486Z node 2 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-24T21:05:54.212087Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-24T21:05:54.212114Z node 2 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 5] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750799154251 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key... (TRUNCATED) >> ListObjectsInS3Export::PagingParameters [GOOD] |93.5%| [TA] $(B)/ydb/core/tx/replication/controller/ut_dst_creator/test-results/unittest/{meta.json ... results_accumulator.log} |93.5%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardReadTableSnapshots::ReadTableDropColumnLatePropose >> DataShardReadTableSnapshots::ReadTableDropColumn >> DataShardReadTableSnapshots::ReadTableSplitNewTxIdResolveResultReorder >> DataShardReadTableSnapshots::ReadTableSnapshot |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::DropByDisconnect >> TBoardSubscriberTest::ManySubscribersManyPublisher >> ListObjectsInS3Export::ParametersValidation >> TBoardSubscriberTest::ManySubscribersManyPublisher [GOOD] >> TBoardSubscriberTest::DropByDisconnect [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ManySubscribersManyPublisher [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::DropByDisconnect [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ReconnectReplica >> TBoardSubscriberTest::SimpleSubscriber |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ReconnectReplica [GOOD] >> TBoardSubscriberTest::SimpleSubscriber [GOOD] >> TBoardSubscriberTest::NotAvailableByShutdown |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ReconnectReplica [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::SimpleSubscriber [GOOD] >> TBoardSubscriberTest::NotAvailableByShutdown [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::NotAvailableByShutdown [GOOD] >> DataShardReadTableSnapshots::ReadTableSnapshot [GOOD] >> DataShardReadTableSnapshots::ReadTableSplitAfter >> DataShardReadTableSnapshots::ReadTableDropColumnLatePropose [GOOD] >> DataShardReadTableSnapshots::ReadTableMaxRows >> DataShardReadTableSnapshots::ReadTableDropColumn [GOOD] >> DataShardReadTableSnapshots::CorruptedDyNumber |93.5%| [TA] $(B)/ydb/core/base/ut_board_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |93.5%| [TA] {RESULT} $(B)/ydb/core/base/ut_board_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardReadTableSnapshots::ReadTableSplitNewTxIdResolveResultReorder [GOOD] >> DataShardReadTableSnapshots::ReadTableUUID >> ListObjectsInS3Export::ParametersValidation [GOOD] >> TCmsTenatsTest::TestTenantLimit >> TDowntimeTest::AddDowntime [GOOD] >> TDowntimeTest::HasUpcomingDowntime [GOOD] >> TDowntimeTest::CleanupOldSegments [GOOD] >> TMaintenanceApiTest::ManyActionGroupsWithSingleAction >> TCmsTest::VDisksEvictionShouldFailWhileSentinelIsDisabled |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TDowntimeTest::CleanupOldSegments [GOOD] >> TCmsTest::RequestRestartServicesOk >> DataShardReadTableSnapshots::CorruptedDyNumber [GOOD] >> DataShardReadTableSnapshots::ReadTableSplitAfter [GOOD] >> TMaintenanceApiTest::ManyActionGroupsWithSingleAction [GOOD] >> TMaintenanceApiTest::CreateTime >> TCmsTest::VDisksEvictionShouldFailWhileSentinelIsDisabled [GOOD] >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction >> DataShardReadTableSnapshots::ReadTableUUID [GOOD] >> DataShardReadTableSnapshots::ReadTableMaxRows [GOOD] >> TCmsTenatsTest::TestTenantLimit [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithNonePolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::CorruptedDyNumber [GOOD] Test command err: 2025-06-24T21:05:58.741765Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:58.742076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:58.742208Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e4e/r3tmp/tmpGMKhJL/pdisk_1.dat 2025-06-24T21:05:59.065025Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:05:59.075037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:59.114329Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:59.121919Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799156705527 != 1750799156705531 2025-06-24T21:05:59.165017Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:05:59.165912Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:05:59.166131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:59.166246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:59.178353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:59.260307Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:05:59.260380Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:05:59.261528Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:05:59.390989Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:05:59.391094Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:05:59.391639Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:05:59.391710Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:05:59.391935Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:05:59.392072Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:05:59.392179Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:05:59.393618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:59.394061Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:05:59.394556Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:05:59.394605Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:05:59.425034Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:05:59.426389Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:05:59.426944Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:05:59.427305Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:05:59.472844Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:05:59.473710Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:05:59.473863Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:05:59.475744Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:05:59.475831Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:05:59.475899Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:05:59.476346Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:05:59.476495Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:05:59.476582Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:05:59.487274Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:05:59.512252Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:05:59.512469Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:05:59.512595Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:05:59.512631Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:05:59.512664Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:05:59.512694Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:05:59.512876Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:05:59.512918Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:05:59.513215Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:05:59.513306Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:05:59.513362Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:05:59.513412Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:05:59.513472Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:05:59.513527Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:05:59.513556Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:05:59.513585Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:05:59.513618Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:05:59.513718Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:05:59.513751Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:05:59.513812Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:05:59.514173Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:05:59.514208Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:05:59.514298Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:05:59.514527Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:05:59.514583Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:05:59.514658Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:05:59.514700Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... ng event TEvTxProcessing::TEvStreamClearancePending 2025-06-24T21:06:04.414679Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:699:2581], Recipient [2:624:2529]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715659 Cleared: true 2025-06-24T21:06:04.414709Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-24T21:06:04.414875Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:624:2529], Recipient [2:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:06:04.414903Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:06:04.414946Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:06:04.414975Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:06:04.415013Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715659] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:06:04.415047Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit WaitForStreamClearance 2025-06-24T21:06:04.415113Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [0:281474976715659] at 72075186224037888 2025-06-24T21:06:04.415174Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-24T21:06:04.415204Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit WaitForStreamClearance 2025-06-24T21:06:04.415230Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit ReadTableScan 2025-06-24T21:06:04.415255Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit ReadTableScan 2025-06-24T21:06:04.415423Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Continue 2025-06-24T21:06:04.415457Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:06:04.415488Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T21:06:04.415512Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:06:04.415536Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:06:04.415589Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:06:04.415962Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:729:2598], Recipient [2:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-24T21:06:04.415996Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-24T21:06:04.416091Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:729:2598], Recipient [2:699:2581]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715659 ShardId: 72075186224037888 2025-06-24T21:06:04.416131Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:699:2581] TxId# 281474976715658] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-24T21:06:04.416413Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:698:2581], Recipient [2:699:2581]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715658 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-24T21:06:04.416461Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:699:2581] TxId# 281474976715658] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-24T21:06:04.416494Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:699:2581] TxId# 281474976715658] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-24T21:06:04.416567Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715659, MessageQuota: 1 2025-06-24T21:06:04.416682Z node 2 :TX_DATASHARD ERROR: read_table_scan.cpp:681: Got scan fatal error: Invalid DyNumber binary representation 2025-06-24T21:06:04.416727Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715659, MessageQuota: 1 2025-06-24T21:06:04.416896Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:06:04.416934Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715659, at: 72075186224037888 2025-06-24T21:06:04.417038Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287429, Sender [2:729:2598], Recipient [2:699:2581]: NKikimrTx.TEvStreamQuotaRelease TxId: 281474976715659 ShardId: 72075186224037888 2025-06-24T21:06:04.417068Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:699:2581] TxId# 281474976715658] Received TEvStreamQuotaRelease from ShardId# 72075186224037888 2025-06-24T21:06:04.417095Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:699:2581] TxId# 281474976715658] Released quota 1 reserved messages from ShardId# 72075186224037888 2025-06-24T21:06:04.417186Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:624:2529], Recipient [2:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:06:04.417221Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:06:04.417269Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:06:04.417298Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:06:04.417325Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715659] at 72075186224037888 for ReadTableScan 2025-06-24T21:06:04.417353Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit ReadTableScan 2025-06-24T21:06:04.417402Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715659] at 72075186224037888 error: Invalid DyNumber binary representation, IsFatalError: 1 2025-06-24T21:06:04.417454Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-24T21:06:04.417498Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit ReadTableScan 2025-06-24T21:06:04.417524Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:06:04.417548Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit FinishPropose 2025-06-24T21:06:04.417570Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is DelayComplete 2025-06-24T21:06:04.417588Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:06:04.417610Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:06:04.417651Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:06:04.417676Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-24T21:06:04.417689Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:06:04.417723Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715659] at 72075186224037888 has finished 2025-06-24T21:06:04.417749Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:06:04.417782Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T21:06:04.417810Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:06:04.417833Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:06:04.417872Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:06:04.417897Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715659] at 72075186224037888 on unit FinishPropose 2025-06-24T21:06:04.417928Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715659 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: EXEC_ERROR 2025-06-24T21:06:04.417958Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715659 at tablet 72075186224037888 status: EXEC_ERROR errors: PROGRAM_ERROR (Invalid DyNumber binary representation) | 2025-06-24T21:06:04.418014Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:06:04.418218Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:624:2529], Recipient [2:699:2581]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037888 Status: EXEC_ERROR Error { Kind: PROGRAM_ERROR Reason: "Invalid DyNumber binary representation" } TxId: 281474976715659 Step: 0 OrderId: 281474976715659 ExecLatency: 0 ProposeLatency: 0 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037888 CpuTimeUsec: 445 } } CommitVersion { Step: 0 TxId: 281474976715659 } 2025-06-24T21:06:04.418246Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1921: [ReadTable [2:699:2581] TxId# 281474976715658] Received TEvProposeTransactionResult Status# EXEC_ERROR ShardId# 72075186224037888 2025-06-24T21:06:04.418290Z node 2 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [2:699:2581] TxId# 281474976715658] RESPONSE Status# ExecError shard: 72075186224037888 table: /Root/Table 2025-06-24T21:06:04.418508Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:699:2581], Recipient [2:624:2529]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1500 TxId: 281474976715658 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableUUID [GOOD] Test command err: 2025-06-24T21:05:58.741723Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:58.742052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:58.742178Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e47/r3tmp/tmpcuKLoE/pdisk_1.dat 2025-06-24T21:05:59.065063Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:05:59.072692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:59.113287Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:59.119531Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799156705506 != 1750799156705510 2025-06-24T21:05:59.163756Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:05:59.165666Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:05:59.165945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:59.166062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:59.178412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:59.260263Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:05:59.260333Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:05:59.261504Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:05:59.370085Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:05:59.370166Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:05:59.370589Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:05:59.370650Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:05:59.370902Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:05:59.371021Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:05:59.371123Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:05:59.372480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:59.372897Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:05:59.373419Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:05:59.373474Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:05:59.403453Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:05:59.404386Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:05:59.404702Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:05:59.404896Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:05:59.435171Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:05:59.435721Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:05:59.435841Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:05:59.437072Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:05:59.437122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:05:59.437171Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:05:59.437433Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:05:59.437537Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:05:59.437588Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:05:59.448096Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:05:59.469960Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:05:59.471313Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:05:59.471462Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:05:59.471501Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:05:59.471532Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:05:59.471566Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:05:59.471772Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:05:59.471818Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:05:59.472977Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:05:59.473053Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:05:59.473100Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:05:59.473134Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:05:59.473225Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:05:59.473270Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:05:59.473303Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:05:59.473335Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:05:59.473383Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:05:59.473475Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:05:59.473502Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:05:59.473544Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:05:59.474672Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:05:59.474719Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:05:59.474803Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:05:59.475122Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:05:59.475196Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:05:59.475272Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:05:59.475380Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... 86224037888 to execution unit ReadTableScan 2025-06-24T21:06:05.163507Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit ReadTableScan 2025-06-24T21:06:05.163634Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is Continue 2025-06-24T21:06:05.163661Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:06:05.163681Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T21:06:05.163699Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:06:05.163715Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:06:05.163749Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:06:05.164049Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:825:2654], Recipient [2:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-24T21:06:05.164073Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-24T21:06:05.164120Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:814:2644] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-24T21:06:05.164353Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:814:2644] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-24T21:06:05.164390Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:814:2644] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-24T21:06:05.164478Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-24T21:06:05.164569Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715662, Size: 38, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:06:05.164633Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:814:2644] TxId# 281474976715661] Received stream data from ShardId# 72075186224037888 2025-06-24T21:06:05.164687Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715662, PendingAcks: 0 2025-06-24T21:06:05.164727Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:814:2644] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-24T21:06:05.164947Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:814:2644] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-24T21:06:05.164964Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:814:2644] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-24T21:06:05.164989Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-24T21:06:05.165033Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715662, Size: 38, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:06:05.165077Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:814:2644] TxId# 281474976715661] Received stream data from ShardId# 72075186224037888 2025-06-24T21:06:05.165109Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715662, PendingAcks: 0 2025-06-24T21:06:05.165132Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:814:2644] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-24T21:06:05.165280Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:814:2644] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-24T21:06:05.165296Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:814:2644] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-24T21:06:05.165363Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-24T21:06:05.165409Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715662, Size: 38, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:06:05.165445Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:814:2644] TxId# 281474976715661] Received stream data from ShardId# 72075186224037888 2025-06-24T21:06:05.165476Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715662, PendingAcks: 0 2025-06-24T21:06:05.165518Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:814:2644] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-24T21:06:05.165680Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:814:2644] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-24T21:06:05.165696Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:814:2644] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-24T21:06:05.165730Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-24T21:06:05.165767Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-24T21:06:05.165890Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:06:05.165934Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715662, at: 72075186224037888 2025-06-24T21:06:05.166054Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:814:2644] TxId# 281474976715661] Received TEvStreamQuotaRelease from ShardId# 72075186224037888 2025-06-24T21:06:05.166102Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:814:2644] TxId# 281474976715661] Released quota 1 reserved messages from ShardId# 72075186224037888 2025-06-24T21:06:05.166201Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:624:2529], Recipient [2:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:06:05.166249Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:06:05.166352Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:06:05.166405Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:06:05.166453Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715662] at 72075186224037888 for ReadTableScan 2025-06-24T21:06:05.166492Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit ReadTableScan 2025-06-24T21:06:05.166535Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715662] at 72075186224037888 error: , IsFatalError: 0 2025-06-24T21:06:05.166580Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is Executed 2025-06-24T21:06:05.166623Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037888 executing on unit ReadTableScan 2025-06-24T21:06:05.166664Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:06:05.166728Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit FinishPropose 2025-06-24T21:06:05.166774Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is DelayComplete 2025-06-24T21:06:05.166810Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:06:05.166860Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:06:05.166896Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:06:05.166948Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is Executed 2025-06-24T21:06:05.166966Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:06:05.166985Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715662] at 72075186224037888 has finished 2025-06-24T21:06:05.167015Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:06:05.167041Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T21:06:05.167064Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:06:05.167088Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:06:05.167129Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:06:05.167169Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715662] at 72075186224037888 on unit FinishPropose 2025-06-24T21:06:05.167202Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715662 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-24T21:06:05.167253Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:06:05.167383Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1850: [ReadTable [2:814:2644] TxId# 281474976715661] Received stream complete from ShardId# 72075186224037888 2025-06-24T21:06:05.167429Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:814:2644] TxId# 281474976715661] RESPONSE Status# ExecComplete prepare time: 0.012565s execute time: 0.101846s total time: 0.114411s 2025-06-24T21:06:05.167644Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:814:2644], Recipient [2:624:2529]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableSplitAfter [GOOD] Test command err: 2025-06-24T21:05:58.741812Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:58.742192Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:58.742363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e59/r3tmp/tmp0UgaSv/pdisk_1.dat 2025-06-24T21:05:59.065029Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:05:59.072918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:59.115830Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:59.119538Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799156705533 != 1750799156705537 2025-06-24T21:05:59.163772Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:05:59.165575Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:05:59.165803Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:59.165900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:59.178354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:59.260303Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:05:59.260367Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:05:59.261488Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:05:59.339870Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:05:59.339966Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:05:59.340507Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:05:59.340581Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:05:59.340820Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:05:59.340933Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:05:59.341028Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:05:59.343336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:59.344390Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:05:59.344977Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:05:59.345066Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:05:59.383346Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:05:59.384553Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:05:59.385841Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:05:59.386012Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:05:59.422712Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:05:59.423386Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:05:59.423484Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:05:59.424849Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:05:59.424900Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:05:59.424938Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:05:59.426878Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:05:59.427072Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:05:59.427196Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:05:59.437956Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:05:59.478784Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:05:59.478934Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:05:59.479018Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:05:59.479046Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:05:59.479069Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:05:59.479108Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:05:59.479262Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:05:59.479294Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:05:59.479501Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:05:59.479568Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:05:59.479620Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:05:59.479643Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:05:59.479680Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:05:59.479718Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:05:59.479741Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:05:59.479764Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:05:59.479794Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:05:59.479863Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:05:59.479886Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:05:59.479911Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:05:59.480173Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:05:59.480201Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:05:59.480270Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:05:59.480499Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:05:59.480565Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:05:59.480657Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:05:59.480714Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... 2644]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715662 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\003\000\000\000b\005\035!\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 1 DataLastKey: "\001\000\004\000\000\000\003\000\000\000" 2025-06-24T21:06:05.075128Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:814:2644] TxId# 281474976715661] Received stream data from ShardId# 72075186224037890 2025-06-24T21:06:05.075164Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:814:2644] TxId# 281474976715661] Sending TEvStreamDataAck to [2:951:2750] ShardId# 72075186224037890 2025-06-24T21:06:05.075224Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:951:2750], Recipient [2:814:2644]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715662 ShardId: 72075186224037890 2025-06-24T21:06:05.075242Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:814:2644] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-24T21:06:05.075275Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715662, PendingAcks: 0 2025-06-24T21:06:05.075537Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:813:2644], Recipient [2:814:2644]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715661 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-24T21:06:05.075572Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:814:2644] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-24T21:06:05.075594Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:814:2644] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037890 2025-06-24T21:06:05.075626Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715662, MessageQuota: 1 2025-06-24T21:06:05.075692Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715662, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:06:05.075792Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:951:2750], Recipient [2:814:2644]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715662 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\004\000\000\000b\005\035,\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 2 DataLastKey: "\001\000\004\000\000\000\004\000\000\000" 2025-06-24T21:06:05.075812Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:814:2644] TxId# 281474976715661] Received stream data from ShardId# 72075186224037890 2025-06-24T21:06:05.075830Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:814:2644] TxId# 281474976715661] Sending TEvStreamDataAck to [2:951:2750] ShardId# 72075186224037890 2025-06-24T21:06:05.075868Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715662, PendingAcks: 0 2025-06-24T21:06:05.075915Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:951:2750], Recipient [2:814:2644]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715662 ShardId: 72075186224037890 2025-06-24T21:06:05.075934Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:814:2644] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-24T21:06:05.076139Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:813:2644], Recipient [2:814:2644]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715661 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-24T21:06:05.076161Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:814:2644] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-24T21:06:05.076181Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:814:2644] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037890 2025-06-24T21:06:05.076209Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715662, MessageQuota: 1 2025-06-24T21:06:05.076248Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715662, MessageQuota: 1 2025-06-24T21:06:05.076371Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287429, Sender [2:951:2750], Recipient [2:814:2644]: NKikimrTx.TEvStreamQuotaRelease TxId: 281474976715662 ShardId: 72075186224037890 2025-06-24T21:06:05.076392Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:814:2644] TxId# 281474976715661] Received TEvStreamQuotaRelease from ShardId# 72075186224037890 2025-06-24T21:06:05.076412Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:814:2644] TxId# 281474976715661] Released quota 1 reserved messages from ShardId# 72075186224037890 2025-06-24T21:06:05.076446Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-24T21:06:05.076468Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715662, at: 72075186224037890 2025-06-24T21:06:05.076582Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:854:2675], Recipient [2:854:2675]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:06:05.076607Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:06:05.076637Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-24T21:06:05.076660Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:06:05.076684Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715662] at 72075186224037890 for ReadTableScan 2025-06-24T21:06:05.076702Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037890 on unit ReadTableScan 2025-06-24T21:06:05.076724Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715662] at 72075186224037890 error: , IsFatalError: 0 2025-06-24T21:06:05.076752Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037890 is Executed 2025-06-24T21:06:05.076776Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037890 executing on unit ReadTableScan 2025-06-24T21:06:05.076804Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037890 to execution unit FinishPropose 2025-06-24T21:06:05.076826Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037890 on unit FinishPropose 2025-06-24T21:06:05.076850Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037890 is DelayComplete 2025-06-24T21:06:05.076872Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037890 executing on unit FinishPropose 2025-06-24T21:06:05.076892Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037890 to execution unit CompletedOperations 2025-06-24T21:06:05.076919Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037890 on unit CompletedOperations 2025-06-24T21:06:05.076963Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037890 is Executed 2025-06-24T21:06:05.076983Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037890 executing on unit CompletedOperations 2025-06-24T21:06:05.077001Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715662] at 72075186224037890 has finished 2025-06-24T21:06:05.077020Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:06:05.077040Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-24T21:06:05.077068Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-24T21:06:05.077092Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-24T21:06:05.077139Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:06:05.077171Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715662] at 72075186224037890 on unit FinishPropose 2025-06-24T21:06:05.077200Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715662 at tablet 72075186224037890 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-24T21:06:05.077253Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:06:05.077473Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:854:2675], Recipient [2:814:2644]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: COMPLETE TxId: 281474976715662 Step: 0 OrderId: 281474976715662 ExecLatency: 0 ProposeLatency: 0 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037890 CpuTimeUsec: 299 } } CommitVersion { Step: 0 TxId: 281474976715662 } 2025-06-24T21:06:05.077497Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1850: [ReadTable [2:814:2644] TxId# 281474976715661] Received stream complete from ShardId# 72075186224037890 2025-06-24T21:06:05.077540Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:814:2644] TxId# 281474976715661] RESPONSE Status# ExecComplete prepare time: 0.013626s execute time: 0.266080s total time: 0.279706s 2025-06-24T21:06:05.077985Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:814:2644], Recipient [2:624:2529]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 2025-06-24T21:06:05.078154Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:814:2644], Recipient [2:850:2673]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 2025-06-24T21:06:05.078344Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:814:2644], Recipient [2:854:2675]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableMaxRows [GOOD] Test command err: 2025-06-24T21:05:58.741684Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:58.742015Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:58.742133Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002df1/r3tmp/tmpv8LF1U/pdisk_1.dat 2025-06-24T21:05:59.065025Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:05:59.075426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:59.113067Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:59.119583Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799156705576 != 1750799156705580 2025-06-24T21:05:59.163784Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:05:59.165648Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:05:59.165896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:59.166022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:59.178380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:59.260325Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:05:59.260389Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:05:59.261508Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:05:59.380295Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:05:59.380401Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:05:59.380912Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:05:59.380979Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:05:59.381218Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:05:59.381386Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:05:59.381514Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:05:59.382999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:59.383465Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:05:59.384002Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:05:59.384082Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:05:59.410313Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:05:59.411294Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:05:59.411680Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:05:59.411936Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:05:59.443518Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:05:59.444134Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:05:59.444227Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:05:59.445658Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:05:59.445718Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:05:59.445779Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:05:59.446093Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:05:59.446214Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:05:59.446282Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:05:59.456926Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:05:59.489180Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:05:59.489385Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:05:59.489500Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:05:59.489530Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:05:59.489561Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:05:59.489590Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:05:59.489772Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:05:59.489811Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:05:59.490092Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:05:59.490163Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:05:59.490208Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:05:59.490237Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:05:59.490288Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:05:59.490335Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:05:59.490368Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:05:59.490395Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:05:59.490431Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:05:59.490514Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:05:59.490544Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:05:59.490586Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:05:59.490929Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:05:59.490965Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:05:59.491055Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:05:59.491266Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:05:59.491316Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:05:59.491388Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:05:59.491434Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... 224037890 2025-06-24T21:06:05.163582Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:06:05.163937Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:950:2750], Recipient [2:839:2662]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-24T21:06:05.163986Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-24T21:06:05.164053Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:950:2750], Recipient [2:924:2726]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715663 ShardId: 72075186224037890 2025-06-24T21:06:05.164074Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:924:2726] TxId# 281474976715662] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-24T21:06:05.164099Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:924:2726] TxId# 281474976715662] Reserving quota 1 messages for ShardId# 72075186224037890 ... observed row limit of 2 rows at [2:950:2750] 2025-06-24T21:06:05.164154Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715663, MessageQuota: 1 2025-06-24T21:06:05.164396Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715663, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:06:05.164512Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:950:2750], Recipient [2:924:2726]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715663 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\004\000\000\000b\005\035,\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 1 DataLastKey: "\001\000\004\000\000\000\004\000\000\000" 2025-06-24T21:06:05.164536Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:924:2726] TxId# 281474976715662] Received stream data from ShardId# 72075186224037890 2025-06-24T21:06:05.164554Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:924:2726] TxId# 281474976715662] Sending TEvStreamDataAck to [2:950:2750] ShardId# 72075186224037890 2025-06-24T21:06:05.164627Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:950:2750], Recipient [2:924:2726]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715663 ShardId: 72075186224037890 2025-06-24T21:06:05.164649Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:924:2726] TxId# 281474976715662] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-24T21:06:05.164674Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715663, PendingAcks: 0 2025-06-24T21:06:05.164895Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:923:2726], Recipient [2:924:2726]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715662 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-24T21:06:05.164928Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:924:2726] TxId# 281474976715662] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-24T21:06:05.164955Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:924:2726] TxId# 281474976715662] Reserving quota 1 messages for ShardId# 72075186224037890 ... observed row limit of 1 rows at [2:950:2750] 2025-06-24T21:06:05.164992Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715663, MessageQuota: 1 2025-06-24T21:06:05.165044Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715663, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:06:05.165174Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:950:2750], Recipient [2:924:2726]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715663 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\005\000\000\000b\005\0357\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 2 DataLastKey: "\001\000\004\000\000\000\005\000\000\000" 2025-06-24T21:06:05.165203Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:924:2726] TxId# 281474976715662] Received stream data from ShardId# 72075186224037890 2025-06-24T21:06:05.165228Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:924:2726] TxId# 281474976715662] Sending TEvStreamDataAck to [2:950:2750] ShardId# 72075186224037890 2025-06-24T21:06:05.165293Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:924:2726] TxId# 281474976715662] RESPONSE Status# ExecComplete prepare time: 0.014987s execute time: 0.165106s total time: 0.180093s 2025-06-24T21:06:05.165555Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715663, PendingAcks: 0 2025-06-24T21:06:05.165597Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715663, MessageQuota: 0 2025-06-24T21:06:05.165768Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-24T21:06:05.165801Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715663, at: 72075186224037890 2025-06-24T21:06:05.166012Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:924:2726], Recipient [2:835:2660]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715662 2025-06-24T21:06:05.166270Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:839:2662], Recipient [2:839:2662]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:06:05.166301Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:06:05.166341Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-24T21:06:05.166371Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:06:05.166402Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715663] at 72075186224037890 for ReadTableScan 2025-06-24T21:06:05.166423Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715663] at 72075186224037890 on unit ReadTableScan 2025-06-24T21:06:05.166443Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715663] at 72075186224037890 error: , IsFatalError: 0 2025-06-24T21:06:05.166475Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715663] at 72075186224037890 is Executed 2025-06-24T21:06:05.166505Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715663] at 72075186224037890 executing on unit ReadTableScan 2025-06-24T21:06:05.166532Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715663] at 72075186224037890 to execution unit FinishPropose 2025-06-24T21:06:05.166554Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715663] at 72075186224037890 on unit FinishPropose 2025-06-24T21:06:05.166578Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715663] at 72075186224037890 is DelayComplete 2025-06-24T21:06:05.166598Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715663] at 72075186224037890 executing on unit FinishPropose 2025-06-24T21:06:05.166616Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715663] at 72075186224037890 to execution unit CompletedOperations 2025-06-24T21:06:05.166637Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715663] at 72075186224037890 on unit CompletedOperations 2025-06-24T21:06:05.166663Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715663] at 72075186224037890 is Executed 2025-06-24T21:06:05.166679Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715663] at 72075186224037890 executing on unit CompletedOperations 2025-06-24T21:06:05.166694Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715663] at 72075186224037890 has finished 2025-06-24T21:06:05.166712Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:06:05.166728Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-24T21:06:05.166749Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-24T21:06:05.166770Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-24T21:06:05.166803Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:06:05.166824Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715663] at 72075186224037890 on unit FinishPropose 2025-06-24T21:06:05.166847Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715663 at tablet 72075186224037890 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-24T21:06:05.166892Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:06:05.167047Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549569, Sender [2:924:2726], Recipient [2:839:2662]: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715663 2025-06-24T21:06:05.167078Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3174: StateWork, processing event TEvDataShard::TEvCancelTransactionProposal 2025-06-24T21:06:05.167112Z node 2 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037890 txId 281474976715663 2025-06-24T21:06:05.167189Z node 2 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037890 txId 281474976715663 2025-06-24T21:06:05.167316Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287431, Sender [2:924:2726], Recipient [2:839:2662]: NKikimrTx.TEvInterruptTransaction TxId: 281474976715663 2025-06-24T21:06:05.167341Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvTxProcessing::TEvInterruptTransaction 2025-06-24T21:06:05.167418Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:924:2726], Recipient [2:839:2662]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715662 >> TCmsTest::RequestRestartServicesOk [GOOD] >> TCmsTest::RequestRestartServicesPartial |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] |93.5%| [TA] $(B)/ydb/core/tx/datashard/ut_read_table/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> ListObjectsInS3Export::ParametersValidation [GOOD] Test command err: 2025-06-24T21:02:03.753090Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519624116750305752:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:02:03.753151Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004594/r3tmp/tmpMazJ1U/pdisk_1.dat 2025-06-24T21:02:04.392181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:02:04.392276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:02:04.410341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:02:04.455561Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17613, node 1 2025-06-24T21:02:04.663694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:02:04.663725Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:02:04.663731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:02:04.663866Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:02:04.759286Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17648 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:02:05.136520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:02:05.393952Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519624121045273385:2204]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:02:05.394020Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:02:05.394115Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519624121045273385:2204], Recipient [1:7519624121045273385:2204]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:02:05.399230Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:02:06.399708Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519624121045273385:2204]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:02:06.399753Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:02:06.399814Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519624121045273385:2204], Recipient [1:7519624121045273385:2204]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:02:06.399829Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:02:07.400750Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519624121045273385:2204]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:02:07.400782Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:02:07.400821Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519624121045273385:2204], Recipient [1:7519624121045273385:2204]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:02:07.400834Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:02:07.407734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624133930175951:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.408345Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.408549Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519624133930175963:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:02:07.408891Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519624116750305971:2142] Handle TEvProposeTransaction 2025-06-24T21:02:07.408920Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519624116750305971:2142] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T21:02:07.408961Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519624116750305971:2142] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519624133930175966:2629] 2025-06-24T21:02:07.476923Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519624133930175966:2629] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-24T21:02:07.477008Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519624133930175966:2629] txid# 281474976710658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:02:07.477029Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519624133930175966:2629] txid# 281474976710658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-24T21:02:07.478740Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519624133930175966:2629] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:02:07.478832Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519624133930175966:2629] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:02:07.479112Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519624133930175966:2629] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:02:07.479282Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519624133930175966:2629] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:02:07.479340Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519624133930175966:2629] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:02:07.479492Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519624133930175966:2629] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T21:02:07.479565Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519624133930175991:2635], Recipient [1:7519624121045273385:2204]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:02:07.479587Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:02:07.479598Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:02:07.479636Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519624133930175966:2629], Recipient [1:7519624121045273385:2204]: {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:02:07.479652Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:02:07.482331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: ".metadata/workload_manager/pools/default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { k ... , Sender [58:7519625143208941243:2348], Recipient [58:7519625121734102835:2199]: NKikimrExport.TEvGetExportRequest Request { Id: 281474976715664 } DatabaseName: "/Root" 2025-06-24T21:06:02.056102Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvExport::TEvGetExportRequest 2025-06-24T21:06:02.056599Z node 58 :TX_PROXY DEBUG: rpc_get_operation.cpp:209: [GetExport] [58:7519625143208941243:2348] [0] Handle TEvExport::TEvGetExportResponse: record# Entry { Id: 281474976715664 Status: SUCCESS Progress: PROGRESS_DONE ExportToS3Settings { endpoint: "localhost:17409" scheme: HTTP bucket: "test_bucket" items { source_path: "dir1/Table1" destination_prefix: "Prefix/dir1/Table1" } items { source_path: "Table0" destination_prefix: "Prefix/Table0" } items { source_path: "dir1/dir2/Table2" destination_prefix: "Prefix/dir1/dir2/Table2" } source_path: "/Root" destination_prefix: "Prefix//" } ItemsProgress { parts_total: 1 parts_completed: 1 start_time { seconds: 1750799161 } end_time { seconds: 1750799161 } } ItemsProgress { parts_total: 1 parts_completed: 1 start_time { seconds: 1750799161 } end_time { seconds: 1750799161 } } ItemsProgress { parts_total: 1 parts_completed: 1 start_time { seconds: 1750799161 } end_time { seconds: 1750799162 } } StartTime { seconds: 1750799161 } EndTime { seconds: 1750799162 } } 2025-06-24T21:06:02.057270Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [58:7519625143208941246:3604], Recipient [58:7519625121734102835:2199]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:06:02.057301Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:06:02.057318Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:06:02.061964Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:57: [ListObjectsInS3Export] [58:7519625143208941247:2349] Resolve database: name# /Root 2025-06-24T21:06:02.062217Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:73: [ListObjectsInS3Export] [58:7519625143208941247:2349] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:06:02.062234Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:134: [ListObjectsInS3Export] [58:7519625143208941247:2349] Send request: schemeShardId# 72057594046644480 2025-06-24T21:06:02.062522Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [58:7519625143208941250:3606], Recipient [58:7519625121734102835:2199]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:06:02.062569Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:06:02.062589Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:06:02.062772Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 275251210, Sender [58:7519625143208941247:2349], Recipient [58:7519625121734102835:2199]: NKikimrImport.TEvListObjectsInS3ExportRequest OperationParams { } Settings { endpoint: "localhost:17409" scheme: HTTP bucket: "test_bucket" access_key: "test_key" secret_key: "test_secret" } PageSize: 0 PageToken: "" 2025-06-24T21:06:02.062805Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5068: StateWork, processing event TEvImport::TEvListObjectsInS3ExportRequest 2025-06-24T21:06:02.062951Z node 58 :IMPORT INFO: schemeshard_import_getters.cpp:1338: Reply: self# [58:7519625143208941251:3607], status# 400010, error# Empty S3 prefix specified 2025-06-24T21:06:02.063064Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:156: [ListObjectsInS3Export] [58:7519625143208941247:2349] Handle TListObjectsInS3ExportRPC::TEvListObjectsInS3ExportResponse: record# Status: BAD_REQUEST Issues { message: "Empty S3 prefix specified" } 2025-06-24T21:06:02.063523Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [58:7519625143208941250:3606], Recipient [58:7519625121734102835:2199]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:06:02.063552Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:06:02.063573Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:06:02.067686Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:57: [ListObjectsInS3Export] [58:7519625143208941252:2350] Resolve database: name# /Root 2025-06-24T21:06:02.067925Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:73: [ListObjectsInS3Export] [58:7519625143208941252:2350] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:06:02.067942Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:134: [ListObjectsInS3Export] [58:7519625143208941252:2350] Send request: schemeShardId# 72057594046644480 2025-06-24T21:06:02.068185Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [58:7519625143208941255:3609], Recipient [58:7519625121734102835:2199]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:06:02.068213Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:06:02.068235Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:06:02.068348Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 275251210, Sender [58:7519625143208941252:2350], Recipient [58:7519625121734102835:2199]: NKikimrImport.TEvListObjectsInS3ExportRequest OperationParams { } Settings { endpoint: "localhost:17409" scheme: HTTP bucket: "test_bucket" access_key: "test_key" secret_key: "test_secret" prefix: "Prefix" } PageSize: -42 PageToken: "" 2025-06-24T21:06:02.068376Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5068: StateWork, processing event TEvImport::TEvListObjectsInS3ExportRequest 2025-06-24T21:06:02.068460Z node 58 :IMPORT INFO: schemeshard_import_getters.cpp:1338: Reply: self# [58:7519625143208941256:3610], status# 400010, error# Page size should be greater than or equal to 0 2025-06-24T21:06:02.068534Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:156: [ListObjectsInS3Export] [58:7519625143208941252:2350] Handle TListObjectsInS3ExportRPC::TEvListObjectsInS3ExportResponse: record# Status: BAD_REQUEST Issues { message: "Page size should be greater than or equal to 0" } 2025-06-24T21:06:02.068905Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [58:7519625143208941255:3609], Recipient [58:7519625121734102835:2199]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:06:02.068931Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:06:02.068947Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:06:02.077502Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:57: [ListObjectsInS3Export] [58:7519625143208941257:2351] Resolve database: name# /Root 2025-06-24T21:06:02.077806Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:73: [ListObjectsInS3Export] [58:7519625143208941257:2351] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:06:02.077827Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:134: [ListObjectsInS3Export] [58:7519625143208941257:2351] Send request: schemeShardId# 72057594046644480 2025-06-24T21:06:02.078050Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [58:7519625143208941260:3612], Recipient [58:7519625121734102835:2199]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:06:02.078085Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:06:02.078101Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:06:02.078212Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 275251210, Sender [58:7519625143208941257:2351], Recipient [58:7519625121734102835:2199]: NKikimrImport.TEvListObjectsInS3ExportRequest OperationParams { } Settings { endpoint: "localhost:17409" scheme: HTTP bucket: "test_bucket" access_key: "test_key" secret_key: "test_secret" prefix: "Prefix" } PageSize: 42 PageToken: "incorrect page token" 2025-06-24T21:06:02.078230Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5068: StateWork, processing event TEvImport::TEvListObjectsInS3ExportRequest 2025-06-24T21:06:02.078354Z node 58 :IMPORT INFO: schemeshard_import_getters.cpp:1338: Reply: self# [58:7519625143208941261:3613], status# 400010, error# Failed to parse page token 2025-06-24T21:06:02.078475Z node 58 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:156: [ListObjectsInS3Export] [58:7519625143208941257:2351] Handle TListObjectsInS3ExportRPC::TEvListObjectsInS3ExportResponse: record# Status: BAD_REQUEST Issues { message: "Failed to parse page token" } 2025-06-24T21:06:02.078878Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [58:7519625143208941260:3612], Recipient [58:7519625121734102835:2199]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:06:02.078910Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:06:02.078925Z node 58 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 |93.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_table/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestBlock42 >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction [GOOD] >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions >> TCmsTest::ManageRequestsWrong >> TCmsTest::ManagePermissions >> TMaintenanceApiTest::CreateTime [GOOD] >> TMaintenanceApiTest::LastRefreshTime >> TMaintenanceApiTest::SingleCompositeActionGroup >> TCmsTenatsTest::TestScheduledPermissionWithNonePolicy [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartMode >> TCmsTenatsTest::TestTenantRatioLimit >> TCmsTenatsTest::TestClusterLimit |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_17_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 17] [GOOD] >> TCmsTest::RequestRestartServicesPartial [GOOD] >> TCmsTest::RequestRestartServicesMultipleNodes |93.5%| [TA] $(B)/ydb/tests/olap/column_family/compression/test-results/py3test/{meta.json ... results_accumulator.log} >> TCmsTest::ActionIssuePartialPermissions >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestBlock42 [GOOD] >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestMirror3dc >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions [GOOD] |93.5%| [TA] {RESULT} $(B)/ydb/tests/olap/column_family/compression/test-results/py3test/{meta.json ... results_accumulator.log} >> TMaintenanceApiTest::LastRefreshTime [GOOD] >> TCmsTest::ManageRequestsWrong [GOOD] >> TCmsTest::ManageRequestsDry |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions [GOOD] >> TCmsTest::ManagePermissions [GOOD] >> TCmsTest::ManagePermissionWrongRequest |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::LastRefreshTime [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartMode [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled |93.5%| [TA] $(B)/ydb/services/ydb/backup_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTenatsTest::TestClusterLimit [GOOD] >> TCmsTenatsTest::TestTenantRatioLimit [GOOD] >> TCmsTenatsTest::RequestShutdownHost >> TCmsTenatsTest::TestTenantRatioLimitForceRestartMode >> TCmsTest::RequestRestartServicesMultipleNodes [GOOD] >> TCmsTest::RequestRestartServicesNoUser >> TCmsTest::ScheduledEmergencyDuringRollingRestart >> TCmsTenatsTest::TestNoneTenantPolicy >> TCmsTest::ActionIssuePartialPermissions [GOOD] >> TCmsTest::ActionWithZeroDuration >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestMirror3dc [GOOD] >> TCmsTest::VDisksEviction >> TMaintenanceApiTest::SingleCompositeActionGroup [GOOD] >> TCmsTest::ManageRequestsDry [GOOD] >> TCmsTest::ManagePermissionWrongRequest [GOOD] >> TMaintenanceApiTest::SimplifiedMirror3DC >> TCmsTest::Notifications >> TCmsTest::ManageRequests |93.6%| [TA] {RESULT} $(B)/ydb/services/ydb/backup_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled [GOOD] >> TCmsTest::RequestRestartServicesNoUser [GOOD] >> TCmsTenatsTest::TestTenantRatioLimitForceRestartMode [GOOD] >> TCmsTenatsTest::TestTenantRatioLimitForceRestartModeScheduled >> TMaintenanceApiTest::SimplifiedMirror3DC [GOOD] >> TMaintenanceApiTest::RequestReplaceDevicePDisk |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::RequestRestartServicesNoUser [GOOD] >> TCmsTest::ScheduledEmergencyDuringRollingRestart [GOOD] >> TCmsTest::ScheduledWalleRequestDuringRollingRestart >> TCmsTest::ActionWithZeroDuration [GOOD] >> TCmsTest::CheckUnreplicatedDiskPreventsRestart >> TCmsTenatsTest::TestNoneTenantPolicy [GOOD] >> TCmsTenatsTest::TestDefaultTenantPolicyWithSingleTenantHost >> TCmsTest::VDisksEviction [GOOD] >> TCmsTest::Notifications [GOOD] >> TCmsTest::Mirror3dcPermissions >> TCmsTest::ManageRequests [GOOD] >> TCmsTest::EnableCMSRequestPrioritiesFeatureFlag >> TCmsTest::RequestReplaceDevices >> TCmsTenatsTest::RequestShutdownHost [GOOD] >> TCmsTenatsTest::RequestShutdownHostWithTenantPolicy >> TCmsTest::RequestRestartServicesReject ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::VDisksEviction [GOOD] Test command err: 2025-06-24T21:06:12.311823Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-24T21:06:12.311910Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-24T21:06:12.312041Z node 18 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-24T21:06:12.313393Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 18 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 19 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 20 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 21 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 22 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 23 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 24 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 25 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120030000 } } 2025-06-24T21:06:12.314736Z node 18 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 18 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 19 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 20 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 21 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 22 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 23 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 24 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120030000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120030000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120030000 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120030000 } Timestamp: 120030000 NodeId: 25 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120030000 } 2025-06-24T21:06:12.314966Z node 18 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003000s 2025-06-24T21:06:12.315022Z node 18 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-24T21:06:12.315191Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true 2025-06-24T21:06:12.315266Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 2025-06-24T21:06:12.315326Z node 18 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: VDisks eviction from host 18 has not yet been completed) 2025-06-24T21:06:12.315471Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-24T21:06:12.315649Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 18 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-24T21:06:12.315717Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Add host marker: host# 18, marker# MARKER_DISK_FAULTY 2025-06-24T21:06:12.315948Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 18, wbId# [18:8388350642965737326:1634689637] 2025-06-24T21:06:12.315991Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 19, wbId# [19:8388350642965737326:1634689637] 2025-06-24T21:06:12.316014Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 20, wbId# [20:8388350642965737326:1 ... "storage" State: UP Version: "-1" Timestamp: 120543048 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120543048 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120543048 } Timestamp: 120543048 NodeId: 18 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120543048 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120543048 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120543048 } Timestamp: 120543048 NodeId: 19 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120543048 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120543048 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120543048 } Timestamp: 120543048 NodeId: 20 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120543048 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120543048 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120543048 } Timestamp: 120543048 NodeId: 21 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120543048 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120543048 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120543048 } Timestamp: 120543048 NodeId: 22 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120543048 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120543048 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120543048 } Timestamp: 120543048 NodeId: 23 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120543048 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120543048 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120543048 } Timestamp: 120543048 NodeId: 24 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120543048 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120543048 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120543048 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120543048 } Timestamp: 120543048 NodeId: 25 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120543048 } 2025-06-24T21:06:12.697536Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true 2025-06-24T21:06:12.697597Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 2025-06-24T21:06:12.697640Z node 18 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: VDisks eviction from host 18 has not yet been completed) 2025-06-24T21:06:12.697752Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-24T21:06:12.697904Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-3, owner# user, order# 3, priority# 0, body# User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 18 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-24T21:06:12.697944Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Add host marker: host# 18, marker# MARKER_DISK_FAULTY 2025-06-24T21:06:12.698124Z node 18 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 0.100000s 2025-06-24T21:06:12.698171Z node 18 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-24T21:06:12.698259Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 18, wbId# [18:8388350642965737326:1634689637] 2025-06-24T21:06:12.698299Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 19, wbId# [19:8388350642965737326:1634689637] 2025-06-24T21:06:12.698323Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 20, wbId# [20:8388350642965737326:1634689637] 2025-06-24T21:06:12.698346Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 21, wbId# [21:8388350642965737326:1634689637] 2025-06-24T21:06:12.698378Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 22, wbId# [22:8388350642965737326:1634689637] 2025-06-24T21:06:12.698400Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 23, wbId# [23:8388350642965737326:1634689637] 2025-06-24T21:06:12.698422Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 24, wbId# [24:8388350642965737326:1634689637] 2025-06-24T21:06:12.698478Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 25, wbId# [25:8388350642965737326:1634689637] 2025-06-24T21:06:12.698620Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 18, response# PDiskStateInfo { PDiskId: 18 CreateTime: 120444560 ChangeTime: 120444560 Path: "/18/pdisk-18.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120543 2025-06-24T21:06:12.699342Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 19, response# PDiskStateInfo { PDiskId: 19 CreateTime: 120444560 ChangeTime: 120444560 Path: "/19/pdisk-19.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120543 2025-06-24T21:06:12.699411Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 20, response# PDiskStateInfo { PDiskId: 20 CreateTime: 120444560 ChangeTime: 120444560 Path: "/20/pdisk-20.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120543 2025-06-24T21:06:12.699476Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 21, response# PDiskStateInfo { PDiskId: 21 CreateTime: 120444560 ChangeTime: 120444560 Path: "/21/pdisk-21.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120543 2025-06-24T21:06:12.699529Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 22, response# PDiskStateInfo { PDiskId: 22 CreateTime: 120444560 ChangeTime: 120444560 Path: "/22/pdisk-22.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120543 2025-06-24T21:06:12.699567Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 23, response# PDiskStateInfo { PDiskId: 23 CreateTime: 120444560 ChangeTime: 120444560 Path: "/23/pdisk-23.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120543 2025-06-24T21:06:12.699607Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 24, response# PDiskStateInfo { PDiskId: 24 CreateTime: 120444560 ChangeTime: 120444560 Path: "/24/pdisk-24.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120543 2025-06-24T21:06:12.699649Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 25, response# PDiskStateInfo { PDiskId: 25 CreateTime: 120444560 ChangeTime: 120444560 Path: "/25/pdisk-25.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120543 2025-06-24T21:06:12.699685Z node 18 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-24T21:06:12.711810Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-24T21:06:12.712037Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "VDisks eviction from host 18 has not yet been completed" } RequestId: "user-r-3" Deadline: 0 } 2025-06-24T21:06:12.712547Z node 18 :CMS INFO: cms.cpp:1410: User user removes request user-r-3 2025-06-24T21:06:12.712589Z node 18 :CMS DEBUG: cms.cpp:1433: Resulting status: OK 2025-06-24T21:06:12.712659Z node 18 :CMS DEBUG: cms_tx_remove_request.cpp:21: TTxRemoveRequest Execute 2025-06-24T21:06:12.712712Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 18 2025-06-24T21:06:12.712823Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-3, reason# explicit remove 2025-06-24T21:06:12.724740Z node 18 :CMS DEBUG: cms_tx_remove_request.cpp:45: TTxRemoveRequest Complete 2025-06-24T21:06:12.724891Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManageRequestRequest { User: "user" Command: REJECT RequestId: "user-r-3" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManageRequestResponse { Status { Code: OK } } >> TCmsTenatsTest::TestTenantRatioLimitForceRestartModeScheduled [GOOD] >> TCmsTest::ActionIssue >> TCmsTest::ScheduledWalleRequestDuringRollingRestart [GOOD] >> TCmsTest::SamePriorityRequest >> TCmsTest::CheckUnreplicatedDiskPreventsRestart [GOOD] >> TCmsTest::AllVDisksEvictionInRack >> TCmsTest::RequestReplaceBrokenDevices >> TCmsTenatsTest::TestDefaultTenantPolicyWithSingleTenantHost [GOOD] >> TCmsTenatsTest::TestLimitsWithDownNode >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> TCmsTest::EnableCMSRequestPrioritiesFeatureFlag [GOOD] >> TCmsTest::RequestRestartServicesReject [GOOD] >> TCmsTest::RequestRestartServicesRejectSecond >> TMaintenanceApiTest::RequestReplaceDevicePDisk [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::EnableCMSRequestPrioritiesFeatureFlag [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::RequestReplaceDevicePDisk [GOOD] >> TCmsTest::RequestReplaceDevices [GOOD] >> TCmsTest::RequestReplaceDevicePDisk >> TCmsTest::SamePriorityRequest [GOOD] >> TCmsTest::SamePriorityRequest2 >> TCmsTest::WalleRebootDownNode >> TCmsTest::RequestReplaceBrokenDevices [GOOD] >> TCmsTest::PermissionDuration >> TCmsTenatsTest::TestLimitsWithDownNode [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy >> TClusterInfoTest::DeviceId >> TCmsTenatsTest::RequestShutdownHostWithTenantPolicy [GOOD] >> TCmsTenatsTest::TestClusterLimitForceRestartMode >> TClusterInfoTest::DeviceId [GOOD] >> TClusterInfoTest::FillInfo [GOOD] >> TCmsTenatsTest::CollectInfo >> TCmsTest::RequestRestartServicesRejectSecond [GOOD] >> TCmsTest::RequestRestartServicesWrongHost >> TCmsTest::Mirror3dcPermissions [GOOD] >> TCmsTest::RequestReplaceDevicePDisk [GOOD] >> TCmsTest::RequestReplaceDevicePDiskByPath >> TCmsTest::ActionIssue [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::Mirror3dcPermissions [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::ActionIssue [GOOD] >> TCmsTest::SamePriorityRequest2 [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleBorrowCompactionTimeouts [GOOD] >> TCmsTest::PermissionDuration [GOOD] >> TCmsTest::RacyStartCollecting |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::SamePriorityRequest2 [GOOD] >> TCmsTest::WalleRebootDownNode [GOOD] >> TCmsTest::WalleCleanupTest >> TCmsTenatsTest::CollectInfo [GOOD] >> TCmsTenatsTest::RequestRestartServices >> TCmsTest::TestKeepAvailableModeDisconnects >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy [GOOD] >> TCmsTest::RequestRestartServicesWrongHost [GOOD] >> TCmsTest::RestartNodeInDownState >> TCmsTenatsTest::TestClusterLimitForceRestartMode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleBorrowCompactionTimeouts [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:03:09.601559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:03:09.601648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:03:09.601682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:03:09.601736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:03:09.601785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:03:09.601828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:03:09.601903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:03:09.601990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:03:09.602679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:03:09.603033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:03:09.684433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:03:09.684500Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:03:09.700753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:03:09.701169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:03:09.701389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:03:09.712232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:03:09.712503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:03:09.713150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:03:09.713502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:03:09.716952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:03:09.717177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:03:09.718338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:03:09.718401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:03:09.718640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:03:09.718705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:03:09.718755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:03:09.718872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:03:09.726077Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:03:09.857772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:03:09.858036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:09.858318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:03:09.858376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:03:09.858632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:03:09.858723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:03:09.860948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:03:09.861184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:03:09.861377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:09.861444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:03:09.861482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:03:09.861515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:03:09.863556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:09.863607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:03:09.863648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:03:09.865613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:09.865671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:03:09.865719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:03:09.865772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:03:09.869367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:03:09.871954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:03:09.872181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:03:09.873191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:03:09.873325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:03:09.873368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:03:09.873652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:03:09.873719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:03:09.873916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:03:09.874008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:03:09.876438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:03:09.876487Z node 1 :FLAT_TX_SCHEMESHARD ... shard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.0321 2025-06-24T21:06:19.534306Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: true Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:06:19.534342Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-24T21:06:19.544717Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:06:19.544785Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:06:19.544850Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:127:2151], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:06:19.544877Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:06:19.587084Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:06:19.587178Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:06:19.587224Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-24T21:06:19.587299Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-24T21:06:19.587331Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-24T21:06:19.587454Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-06-24T21:06:19.587531Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-24T21:06:19.587562Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-24T21:06:19.587647Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-24T21:06:19.587732Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:06:19.598205Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:06:19.598279Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:06:19.598309Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:06:19.639573Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:720:2684]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:06:19.639828Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409547, FollowerId 0, tableId 3 2025-06-24T21:06:19.640183Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:720:2684], Recipient [3:127:2151]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409547 TableLocalId: 3 Generation: 2 Round: 12 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 51 Memory: 124232 } ShardState: 2 UserTablePartOwners: 72075186233409547 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 214 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-24T21:06:19.640226Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:06:19.640276Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 3] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.0051 2025-06-24T21:06:19.640376Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 3] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:06:19.640418Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-24T21:06:19.681626Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:92: Operation queue wakeup 2025-06-24T21:06:19.681719Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__borrowed_compaction.cpp:65: Borrowed compaction timeout for pathId# [OwnerId: 72057594046678944, LocalPathId: 3], datashard# 72075186233409547, next wakeup# 0.000000s, in queue# 0 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-24T21:06:19.681777Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__borrowed_compaction.cpp:28: RunBorrowedCompaction for pathId# [OwnerId: 72057594046678944, LocalPathId: 3], datashard# 72075186233409547, next wakeup# 0.000000s, rate# 0, in queue# 1 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-24T21:06:19.681860Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 3 seconds 2025-06-24T21:06:19.681886Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__borrowed_compaction.cpp:100: Borrowed compaction enqueued shard# 72057594046678944:2 at schemeshard 72057594046678944 2025-06-24T21:06:19.682010Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:06:19.682056Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:06:19.682084Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-24T21:06:19.682162Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-24T21:06:19.682200Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-24T21:06:19.682307Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046678944:2 data size 13940 row count 100 2025-06-24T21:06:19.682382Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], pathId map=CopyTable, is column=0, is olap=0, RowCount 100, DataSize 13940, with borrowed parts 2025-06-24T21:06:19.682412Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409547, followerId 0 2025-06-24T21:06:19.682497Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:472: Want to split tablet 72075186233409547 by size split by size (shardCount: 1, maxShardCount: 2, shardSize: 13940, maxShardSize: 1) 2025-06-24T21:06:19.682545Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:505: Postpone split tablet 72075186233409547 because it has borrow parts, enqueue compact them first 2025-06-24T21:06:19.682575Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__borrowed_compaction.cpp:100: Borrowed compaction enqueued shard# 72057594046678944:2 at schemeshard 72057594046678944 2025-06-24T21:06:19.682645Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:06:19.693044Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:06:19.693105Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:06:19.693153Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:06:19.933198Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:06:19.933295Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:06:19.933393Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:127:2151], Recipient [3:127:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:06:19.933448Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TCmsTest::StateStorageNodesFromOneRing >> TCmsTest::RequestReplaceDevicePDiskByPath [GOOD] >> TCmsTest::RequestReplaceDeviceTwiceWithNoVDisks |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestClusterLimitForceRestartMode [GOOD] |93.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_compaction/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTest::TestOutdatedState |93.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_compaction/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTest::TestKeepAvailableMode >> TCmsTest::RacyStartCollecting [GOOD] >> TCmsTest::PriorityRange >> TCmsTest::CollectInfo >> TCmsTest::StateRequest >> TCmsTest::StateStorageNodesFromOneRing [GOOD] >> TCmsTest::StateStorageAvailabilityMode >> TCmsTest::WalleCleanupTest [GOOD] >> TCmsTest::WalleRequestDuringRollingRestart >> TCmsTest::AllVDisksEvictionInRack [GOOD] >> TCmsTest::RestartNodeInDownState [GOOD] >> TCmsTest::TestKeepAvailableModeDisconnects [GOOD] >> TCmsTest::TestKeepAvailableModeScheduled ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::AllVDisksEvictionInRack [GOOD] Test command err: 2025-06-24T21:06:16.041355Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-24T21:06:16.041469Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-24T21:06:16.041583Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-24T21:06:16.042877Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 25 InterconnectPort: 12001 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-26-26" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 26 InterconnectPort: 12002 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-27-27" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 27 InterconnectPort: 12003 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-28-28" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 28 InterconnectPort: 12004 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-29-29" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 29 InterconnectPort: 12005 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-30-30" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 30 InterconnectPort: 12006 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-31-31" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 31 InterconnectPort: 12007 Location { Rack: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-32-32" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 32 InterconnectPort: 12008 Location { Rack: "4" } StartTimeSeconds: 0 } Timestamp: 120029000 } } 2025-06-24T21:06:16.043513Z node 25 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 25 InterconnectPort: 12001 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-26-26" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 26 InterconnectPort: 12002 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-27-27" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 27 InterconnectPort: 12003 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-28-28" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 28 InterconnectPort: 12004 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-29-29" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 29 InterconnectPort: 12005 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-30-30" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 30 InterconnectPort: 12006 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-31-31" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 31 InterconnectPort: 12007 Location { Rack: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-32-32" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 32 InterconnectPort: 12008 Location { Rack: "4" } StartTimeSeconds: 0 } Timestamp: 120029000 } 2025-06-24T21:06:16.043682Z node 25 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003000s 2025-06-24T21:06:16.043721Z node 25 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-24T21:06:16.043845Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true 2025-06-24T21:06:16.043894Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 2025-06-24T21:06:16.043937Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: VDisks eviction from host 25 has not yet been completed) 2025-06-24T21:06:16.044045Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-24T21:06:16.044199Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-24T21:06:16.044239Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Add host marker: host# 25, marker# MARKER_DISK_FAULTY 2025-06-24T21:06:16.044419Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 25, wbId# [25:8388350642965737326:1634689637] 2025-06-24T21:06:16.044454Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 26, wbId# [26:8388350642965737326:1634689637] 2025-06-24T21:06:16.044478Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 27, wbId# [27:8388350642965737326:1634689637] 2025-06-24T21:06:16.044500Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 28, wbId# [28:8388350642965737326:1634689637] 2025-06-24T21:06:16.044542Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 29, wbId# [29:8388350642965737326:1634689637] 2025-06-24T21:06:16.044566Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 30, wbId# [30:8388350642965737326:1634689637] 2025-06-24T21:06:16.044585Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] R ... dle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 27, response# PDiskStateInfo { PDiskId: 27 CreateTime: 0 ChangeTime: 0 Path: "/27/pdisk-27.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-24T21:06:21.129931Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 30, response# PDiskStateInfo { PDiskId: 30 CreateTime: 0 ChangeTime: 0 Path: "/30/pdisk-30.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-24T21:06:21.129967Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 31, response# PDiskStateInfo { PDiskId: 31 CreateTime: 0 ChangeTime: 0 Path: "/31/pdisk-31.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-24T21:06:21.130042Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 32, response# PDiskStateInfo { PDiskId: 32 CreateTime: 0 ChangeTime: 0 Path: "/32/pdisk-32.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-24T21:06:21.130084Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 28, response# PDiskStateInfo { PDiskId: 28 CreateTime: 0 ChangeTime: 0 Path: "/28/pdisk-28.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-24T21:06:21.130119Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 29, response# PDiskStateInfo { PDiskId: 29 CreateTime: 0 ChangeTime: 0 Path: "/29/pdisk-29.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-24T21:06:21.130172Z node 25 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-24T21:06:21.130390Z node 25 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 26:26, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-06-24T21:06:21.130449Z node 25 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 25:25, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-06-24T21:06:21.130482Z node 25 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 2 2025-06-24T21:06:21.130624Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-06-24T21:06:21.130852Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-06-24T21:06:21.130980Z node 25 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Status { Success: true } Success: true, cookie# 1 2025-06-24T21:06:21.131049Z node 25 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 25:25 2025-06-24T21:06:21.131095Z node 25 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 26:26 2025-06-24T21:06:21.143644Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-06-24T21:06:21.143737Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-06-24T21:06:21.157859Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-24T21:06:21.157957Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-24T21:06:21.158005Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:03:00Z 2025-06-24T21:06:21.158597Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-24T21:06:21.158687Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } 2025-06-24T21:06:21.158741Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 25, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-24T21:06:21.158794Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-24T21:06:21.158819Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-24T21:06:21.158832Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-24T21:06:21.158850Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-24T21:06:21.158997Z node 25 :CMS DEBUG: cms.cpp:1042: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-24T21:06:21.159070Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:13:00Z) 2025-06-24T21:06:21.159160Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-24T21:06:21.159326Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:13:00.129000Z, action# Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 2025-06-24T21:06:21.159421Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-24T21:06:21.171361Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-24T21:06:21.171614Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 } Deadline: 780129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 25 InterconnectPort: 12001 } } } } 2025-06-24T21:06:21.171699Z node 25 :CMS DEBUG: cms.cpp:1070: Schedule cleanup at 1970-01-01T00:33:00.129000Z 2025-06-24T21:06:21.185970Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:13:00Z) 2025-06-24T21:06:21.186235Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-24T21:06:21.186291Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-24T21:06:21.186335Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:03:00Z 2025-06-24T21:06:21.186906Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 26 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-24T21:06:21.186982Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 26 has not yet been completed" } 2025-06-24T21:06:21.187029Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-24T21:06:21.187063Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-24T21:06:21.187187Z node 25 :CMS DEBUG: cms.cpp:1042: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-24T21:06:21.187266Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:13:00Z) 2025-06-24T21:06:21.187322Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-24T21:06:21.187438Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:13:00.230512Z, action# Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 2025-06-24T21:06:21.187512Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-2, owner# user, order# 2, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-24T21:06:21.199740Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-24T21:06:21.200046Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-2" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-2" Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 } Deadline: 780230512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-06-24T21:06:21.200640Z node 25 :CMS INFO: cms.cpp:1332: User user is done with permissions user-p-1 2025-06-24T21:06:21.200697Z node 25 :CMS DEBUG: cms.cpp:1355: Resulting status: OK 2025-06-24T21:06:21.200765Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-24T21:06:21.200874Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 25 2025-06-24T21:06:21.200977Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, reason# permission user-p-1 was removed 2025-06-24T21:06:21.201027Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-24T21:06:21.213284Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-24T21:06:21.213435Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-24T21:06:21.213800Z node 25 :CMS INFO: cms.cpp:1332: User user is done with permissions user-p-2 2025-06-24T21:06:21.213840Z node 25 :CMS DEBUG: cms.cpp:1355: Resulting status: OK 2025-06-24T21:06:21.213878Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-24T21:06:21.213938Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 26 2025-06-24T21:06:21.214011Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-2, reason# permission user-p-2 was removed 2025-06-24T21:06:21.214043Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-24T21:06:21.225680Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-24T21:06:21.225849Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::RestartNodeInDownState [GOOD] >> TCmsTest::RequestReplaceDeviceTwiceWithNoVDisks [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::RequestReplaceDeviceTwiceWithNoVDisks [GOOD] >> TCmsTest::TestOutdatedState [GOOD] >> TCmsTest::TestSetResetMarkers >> TCmsTest::PriorityRange [GOOD] >> TDowntimeTest::SetIgnoredDowntimeGap [GOOD] >> TMaintenanceApiTest::CompositeActionGroupSameStorageGroup >> TCmsTest::CollectInfo [GOOD] >> TCmsTest::DynamicConfig >> TCmsTenatsTest::RequestRestartServices [GOOD] >> TCmsTest::TestKeepAvailableMode [GOOD] >> TCmsTest::TestForceRestartModeDisconnects >> TCmsTest::StateRequest [GOOD] >> TCmsTest::StateRequestNode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::PriorityRange [GOOD] Test command err: 2025-06-24T21:06:20.833009Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-06-24T21:06:20.833065Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-06-24T21:06:20.833082Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-06-24T21:06:20.833096Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-06-24T21:06:20.833126Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-06-24T21:06:20.833145Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-06-24T21:06:20.833157Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-06-24T21:06:20.833170Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 2025-06-24T21:06:20.848068Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-06-24T21:06:20.848118Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-06-24T21:06:20.848146Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-06-24T21:06:20.848163Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-06-24T21:06:20.848177Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-06-24T21:06:20.848194Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-06-24T21:06:20.848214Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-06-24T21:06:20.848236Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 2025-06-24T21:06:20.960868Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-06-24T21:06:20.960952Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-06-24T21:06:20.960991Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-06-24T21:06:20.961011Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-06-24T21:06:20.961032Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-06-24T21:06:20.961052Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-06-24T21:06:20.961071Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-06-24T21:06:20.961090Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 >> TCmsTest::DynamicConfig [GOOD] >> TCmsTest::DisabledEvictVDisks |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::RequestRestartServices [GOOD] >> TCmsTest::StateStorageAvailabilityMode [GOOD] >> TCmsTest::StateStorageRollingRestart >> TCmsTest::WalleRequestDuringRollingRestart [GOOD] >> TCmsTest::TestKeepAvailableModeScheduled [GOOD] >> TCmsTest::TestKeepAvailableModeScheduledDisconnects >> TCmsTest::WalleTasks |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::WalleRequestDuringRollingRestart [GOOD] >> TCmsTest::TestSetResetMarkers [GOOD] >> TCmsTest::TestProcessingQueue >> TMaintenanceApiTest::CompositeActionGroupSameStorageGroup [GOOD] >> TMaintenanceApiTest::ActionReason >> TCmsTest::TestForceRestartModeDisconnects [GOOD] >> TCmsTest::TestForceRestartModeScheduled >> TCmsTest::StateRequestNode [GOOD] >> TCmsTest::StateRequestUnknownNode >> TCmsTest::TestKeepAvailableModeScheduledDisconnects [GOOD] >> TCmsTest::TestLoadLog >> VectorIndexBuildTest::TTxReply_DoExecute_Throws >> VectorIndexBuildTest::RecreatedColumns >> IndexBuildTest::BaseCase >> IndexBuildTest::RejectsCreate >> VectorIndexBuildTest::CreateAndDrop >> TCmsTest::TestProcessingQueue [GOOD] >> TCmsTest::TestLogOperationsRollback >> TCmsTest::TestLoadLog [GOOD] >> TMaintenanceApiTest::ActionReason [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::TestLoadLog [GOOD] >> TCmsTest::StateRequestUnknownNode [GOOD] >> TCmsTest::StateRequestUnknownMultipleNodes >> TCmsTest::TestForceRestartModeScheduled [GOOD] >> TCmsTest::TestForceRestartModeScheduledDisconnects |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::ActionReason [GOOD] >> VectorIndexBuildTest::TTxReply_DoExecute_Throws [GOOD] >> VectorIndexBuildTest::TTxProgress_Throws >> IndexBuildTest::ShadowDataNotAllowedByDefault >> VectorIndexBuildTest::SimpleDuplicates >> TCmsTest::StateStorageRollingRestart [GOOD] >> TCmsTest::StateStorageLockedNodes >> TCmsTest::StateRequestUnknownMultipleNodes [GOOD] >> TCmsTest::TestForceRestartModeScheduledDisconnects [GOOD] >> IndexBuildTest::ShadowDataNotAllowedByDefault [GOOD] >> IndexBuildTest::ShadowDataEdgeCases |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::StateRequestUnknownMultipleNodes [GOOD] >> VectorIndexBuildTest::TTxProgress_Throws [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::TestForceRestartModeScheduledDisconnects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::TTxProgress_Throws [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:28.172969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:28.173042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:28.173069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:28.173105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:28.173949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:28.173989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:28.174042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:28.174101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:28.174733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:28.175879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:28.242229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:28.242273Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:28.252840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:28.253092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:28.253202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:28.258685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:28.260034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:28.261637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.262805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:28.269498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:28.270657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:28.276739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:28.276820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:28.277123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:28.277178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:28.277239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:28.277367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.282321Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:28.375371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:28.375534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.375704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:28.375735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:28.375868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:28.375924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:28.378282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.378404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:28.378517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.378565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:28.378595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:28.378627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:28.379970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.380007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:28.380034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:28.381123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.381162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.381201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.381236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:28.388905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:28.390387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:28.390533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:28.391105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.391211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:28.391242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.391443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:28.391483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.391621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:28.391673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:28.393196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:28.393226Z node 1 :FLAT_TX_SCHEMESHARD ... it to activation from: 2025-06-24T21:06:31.098214Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186233409549 Initiating switch from PreOffline to Offline state 2025-06-24T21:06:31.100432Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409549 Reporting state Offline to schemeshard 72057594046678944 2025-06-24T21:06:31.100577Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:625:2571], Recipient [2:635:2579]: NKikimr::TEvTablet::TEvFollowerGcApplied Leader for TabletID 72057594046678944 is [2:1084:2999] sender: [2:1146:2058] recipient: [2:15:2062] 2025-06-24T21:06:31.100994Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [2:1145:3049], Recipient [2:635:2579]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [2:1147:3050] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-24T21:06:31.101030Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:06:31.101140Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 635 RawX2: 8589937171 } TabletId: 72075186233409549 State: 4 2025-06-24T21:06:31.101199Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409549, state: Offline, at schemeshard: 72057594046678944 2025-06-24T21:06:31.102426Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:06:31.102597Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [2:1084:2999], Recipient [2:635:2579]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-24T21:06:31.102630Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-24T21:06:31.102658Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409549 state Offline 2025-06-24T21:06:31.102770Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:1145:3049], Recipient [2:635:2579]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [2:1145:3049] ServerId: [2:1147:3050] } 2025-06-24T21:06:31.102794Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:06:31.102943Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-24T21:06:31.103179Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [2:625:2571], Recipient [2:635:2579]: NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:06:31.103337Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409549 2025-06-24T21:06:31.103419Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409549 Forgetting tablet 72075186233409549 2025-06-24T21:06:31.104326Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:06:31.104540Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-24T21:06:31.104937Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:31.104973Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-24T21:06:31.105054Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:06:31.106372Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:06:31.106430Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T21:06:31.106522Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:06:31.118098Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186233409550 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:06:31.118228Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186233409550 Initiating switch from PreOffline to Offline state 2025-06-24T21:06:31.120117Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409550 Reporting state Offline to schemeshard 72057594046678944 2025-06-24T21:06:31.120224Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:742:2675], Recipient [2:752:2683]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:06:31.120383Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [2:1162:3065], Recipient [2:752:2683]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [2:1163:3066] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-24T21:06:31.120408Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:06:31.120510Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 752 RawX2: 8589937275 } TabletId: 72075186233409550 State: 4 2025-06-24T21:06:31.120553Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409550, state: Offline, at schemeshard: 72057594046678944 2025-06-24T21:06:31.121632Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:5 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:06:31.121730Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [2:1084:2999], Recipient [2:752:2683]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-24T21:06:31.121750Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-24T21:06:31.121768Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409550 state Offline 2025-06-24T21:06:31.121862Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:1162:3065], Recipient [2:752:2683]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [2:1162:3065] ServerId: [2:1163:3066] } 2025-06-24T21:06:31.121879Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:06:31.121983Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 TabletID: 72075186233409550 Forgetting tablet 72075186233409550 2025-06-24T21:06:31.122094Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-24T21:06:31.122238Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-24T21:06:31.122461Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [2:742:2675], Recipient [2:752:2683]: NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:06:31.122594Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409550 2025-06-24T21:06:31.122657Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409550 2025-06-24T21:06:31.123486Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:31.123514Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-24T21:06:31.123550Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:06:31.125304Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:06:31.125342Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-24T21:06:31.125460Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:06:31.187298Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-24T21:06:31.187539Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 Issues { message: "Unhandled exception ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp:1138: Unreachable" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: INVALID UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 0 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_UNSPECIFIED Settings { source_path: "/MyRoot/vectors" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "Unhandled exception ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp:1138: Unreachable" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: INVALID UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 0 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_UNSPECIFIED Settings { source_path: "/MyRoot/vectors" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } } >> IndexBuildTest::ShadowDataEdgeCases [GOOD] >> IndexBuildTest::WithFollowers >> IndexBuildTest::Lock >> TCmsTest::StateStorageLockedNodes [GOOD] >> IndexBuildTest::WithFollowers [GOOD] >> TCmsTest::WalleTasks [GOOD] >> TCmsTest::WalleTasksWithNodeLimit |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::StateStorageLockedNodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::WithFollowers [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:30.513212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:30.513278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:30.513302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:30.513327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:30.513358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:30.513388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:30.513420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:30.513486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:30.513992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:30.514218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:30.566735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:30.566779Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:30.577321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:30.577603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:30.577726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:30.583199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:30.583380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:30.583844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:30.584071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:30.586463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:30.586693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:30.587707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:30.587752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:30.587922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:30.587954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:30.587984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:30.588058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.593324Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:30.687579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:30.687758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.687968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:30.688005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:30.688195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:30.688273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:30.689818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:30.689966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:30.690104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.690149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:30.690180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:30.690209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:30.691611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.691654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:30.691678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:30.692987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.693034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.693084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:30.693127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:30.695794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:30.697257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:30.697400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:30.698164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:30.698275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:30.698312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:30.698527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:30.698585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:30.698732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:30.698789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:30.700312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:30.700343Z node 1 :FLAT_TX_SCHEMESHARD ... oard.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:06:32.969932Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:06:32.970583Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:06:32.970625Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:06:32.970641Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:06:32.970656Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-24T21:06:32.970673Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:06:32.971277Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:06:32.971326Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:06:32.971353Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:06:32.971839Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:2, at schemeshard: 72057594046678944 2025-06-24T21:06:32.971874Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:2 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:32.972077Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:06:32.972173Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:2 progress is 3/3 2025-06-24T21:06:32.972194Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-24T21:06:32.972233Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:2 progress is 3/3 2025-06-24T21:06:32.972254Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-24T21:06:32.972274Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: false 2025-06-24T21:06:32.972293Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-24T21:06:32.972328Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:06:32.972352Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:06:32.972416Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:06:32.972445Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:1 2025-06-24T21:06:32.972459Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:1 2025-06-24T21:06:32.972476Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:06:32.972490Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:2 2025-06-24T21:06:32.972501Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:2 2025-06-24T21:06:32.972522Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:06:32.972540Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 104, publications: 1, subscribers: 1 2025-06-24T21:06:32.972586Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-24T21:06:32.973239Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:06:32.973288Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:06:32.973309Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:06:32.973342Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-24T21:06:32.973372Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:06:32.973441Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 1 2025-06-24T21:06:32.973506Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [3:338:2315] 2025-06-24T21:06:32.974263Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:06:32.974975Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:06:32.975028Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:06:32.975049Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:06:32.976025Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:06:32.976085Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:06:32.976112Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [3:706:2661] TestWaitNotification: OK eventTxId 104 2025-06-24T21:06:32.976493Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/WithFollowers" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:32.976666Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/WithFollowers" took 185us result status StatusSuccess 2025-06-24T21:06:32.976942Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/WithFollowers" PathDescription { Self { Name: "WithFollowers" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "WithFollowers" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "valueFloat" Type: "Float" TypeId: 33 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TCmsTest::TestLogOperationsRollback [GOOD] >> IndexBuildTest::RejectsCreate [GOOD] >> IndexBuildTest::RejectsDropIndex >> VectorIndexBuildTest::ServerLessDB-smallScanBuffer-false >> IndexBuildTest::Lock [GOOD] >> IndexBuildTest::MergeIndexTableShardsOnlyWhenReady ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::TestLogOperationsRollback [GOOD] Test command err: 2025-06-24T21:06:22.228093Z node 1 :CMS ERROR: info_collector.cpp:281: [InfoCollector] Couldn't get base config 2025-06-24T21:06:22.346855Z node 1 :CMS ERROR: info_collector.cpp:281: [InfoCollector] Couldn't get base config 2025-06-24T21:06:22.361114Z node 1 :CMS ERROR: info_collector.cpp:281: [InfoCollector] Couldn't get base config 2025-06-24T21:06:22.479511Z node 1 :CMS ERROR: info_collector.cpp:281: [InfoCollector] Couldn't get base config 2025-06-24T21:06:26.499033Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-06-24T21:06:26.499088Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-06-24T21:06:26.499122Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-06-24T21:06:26.499157Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-06-24T21:06:26.499173Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-06-24T21:06:26.499189Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-06-24T21:06:26.499204Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-06-24T21:06:26.499219Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 >> BasicUsage::FallbackToSingleDb >> BasicUsage::WriteSessionWriteInHandlers >> IndexBuildTest::RejectsDropIndex [GOOD] >> IndexBuildTest::RejectsCancel >> BasicUsage::WriteSessionCloseWaitsForWrites >> BasicUsage::WaitEventBlocksBeforeDiscovery >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> IndexBuildTest::BaseCase [GOOD] >> IndexBuildTest::CancellationNoTable >> VectorIndexBuildTest::SimpleDuplicates [GOOD] >> VectorIndexBuildTest::TTxInit_Throws >> IndexBuildTest::CancellationNoTable [GOOD] >> IndexBuildTest::CancelBuild >> TCmsTest::WalleTasksWithNodeLimit [GOOD] >> TCmsTest::WalleTasksDifferentPriorities >> VectorIndexBuildTest::ServerLessDB-smallScanBuffer-false [GOOD] >> VectorIndexBuildTest::ServerLessDB-smallScanBuffer-true >> VectorIndexBuildTest::TTxInit_Throws [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> TCmsTest::DisabledEvictVDisks [GOOD] >> TCmsTest::EmergencyDuringRollingRestart >> TCmsTest::WalleTasksDifferentPriorities [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::TTxInit_Throws [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:30.727282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:30.727350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:30.727390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:30.727424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:30.727456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:30.727490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:30.727539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:30.727603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:30.728167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:30.728405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:30.785806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:30.785856Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:30.798007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:30.798276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:30.798384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:30.804962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:30.805146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:30.805717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:30.806018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:30.808535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:30.808714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:30.809652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:30.809699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:30.809881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:30.809917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:30.809948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:30.810027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.814840Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:30.919045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:30.919230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.919433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:30.919486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:30.919674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:30.919734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:30.921455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:30.921625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:30.921768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.921821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:30.921848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:30.921873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:30.923576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.923633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:30.923675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:30.925395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.925440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:30.925497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:30.925577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:30.929280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:30.931194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:30.931367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:30.932068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:30.932173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:30.932207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:30.932450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:30.932488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:30.932622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:30.932690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:30.934368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:30.934405Z node 1 :FLAT_TX_SCHEMESHARD ... :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:40.995737Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186233409552 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:06:40.995887Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186233409552 Initiating switch from PreOffline to Offline state 2025-06-24T21:06:40.998281Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409552 Reporting state Offline to schemeshard 72057594046678944 2025-06-24T21:06:40.998444Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:1224:3131], Recipient [2:1233:3138]: NKikimr::TEvTablet::TEvFollowerGcApplied Leader for TabletID 72057594046678944 is [2:2756:4633] sender: [2:2815:2058] recipient: [2:15:2062] 2025-06-24T21:06:40.998839Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [2:2814:4680], Recipient [2:1233:3138]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [2:2816:4681] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-24T21:06:40.998869Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:06:40.998967Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 1233 RawX2: 8589937730 } TabletId: 72075186233409552 State: 4 2025-06-24T21:06:40.999036Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409552, state: Offline, at schemeshard: 72057594046678944 2025-06-24T21:06:41.000216Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:7 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:06:41.000343Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [2:2756:4633], Recipient [2:1233:3138]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-24T21:06:41.000370Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-24T21:06:41.000402Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409552 state Offline 2025-06-24T21:06:41.000546Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:2814:4680], Recipient [2:1233:3138]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [2:2814:4680] ServerId: [2:2816:4681] } 2025-06-24T21:06:41.000570Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:06:41.000759Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186233409552 2025-06-24T21:06:41.000988Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [2:1224:3131], Recipient [2:1233:3138]: NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:06:41.001178Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409552 2025-06-24T21:06:41.001273Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409552 2025-06-24T21:06:41.002438Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-06-24T21:06:41.002681Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 1 Forgetting tablet 72075186233409552 2025-06-24T21:06:41.003002Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:41.003041Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 9], at schemeshard: 72057594046678944 2025-06-24T21:06:41.003099Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:06:41.004824Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-24T21:06:41.004881Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-24T21:06:41.005209Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:06:41.016588Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186233409553 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:06:41.016689Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186233409553 Initiating switch from PreOffline to Offline state 2025-06-24T21:06:41.018758Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409553 Reporting state Offline to schemeshard 72057594046678944 2025-06-24T21:06:41.018871Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:1638:3533], Recipient [2:1646:3539]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:06:41.019054Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [2:2833:4698], Recipient [2:1646:3539]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [2:2834:4699] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-24T21:06:41.019076Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:06:41.019174Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 1646 RawX2: 8589938131 } TabletId: 72075186233409553 State: 4 2025-06-24T21:06:41.019219Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409553, state: Offline, at schemeshard: 72057594046678944 2025-06-24T21:06:41.020328Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:8 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:06:41.020435Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [2:2756:4633], Recipient [2:1646:3539]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-24T21:06:41.020459Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-24T21:06:41.020485Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409553 state Offline 2025-06-24T21:06:41.020598Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:2833:4698], Recipient [2:1646:3539]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [2:2833:4698] ServerId: [2:2834:4699] } 2025-06-24T21:06:41.020618Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:06:41.020771Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 8 TxId_Deprecated: 8 TabletID: 72075186233409553 2025-06-24T21:06:41.020953Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [2:1638:3533], Recipient [2:1646:3539]: NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:06:41.021100Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409553 2025-06-24T21:06:41.021182Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409553 2025-06-24T21:06:41.022077Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 8 ShardOwnerId: 72057594046678944 ShardLocalIdx: 8, at schemeshard: 72057594046678944 2025-06-24T21:06:41.022253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 1 Forgetting tablet 72075186233409553 2025-06-24T21:06:41.022686Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:41.022715Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 10], at schemeshard: 72057594046678944 2025-06-24T21:06:41.022757Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:06:41.023966Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:8 2025-06-24T21:06:41.024006Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:8 tabletId 72075186233409553 2025-06-24T21:06:41.024116Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:06:41.044833Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-24T21:06:41.045088Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 Issues { message: "Init IndexBuild unhandled exception ydb/core/tx/schemeshard/schemeshard_info_types.h:3659: Condition violated: `creationConfig.ParseFromString(row.template GetValue())\'" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/vectors" index { name: "by_embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "Init IndexBuild unhandled exception ydb/core/tx/schemeshard/schemeshard_info_types.h:3659: Condition violated: `creationConfig.ParseFromString(row.template GetValue())\'" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/vectors" index { name: "by_embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 } >> IndexBuildTest::CancelBuild [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::WalleTasksDifferentPriorities [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::CancelBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:28.172962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:28.173042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:28.173085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:28.173115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:28.173946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:28.173985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:28.174029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:28.174087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:28.174706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:28.175945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:28.239944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:28.239983Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:28.251722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:28.252038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:28.252212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:28.259405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:28.260062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:28.261697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.262845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:28.269778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:28.270720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:28.276813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:28.276880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:28.277133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:28.277181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:28.277234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:28.277331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.282882Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:28.375353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:28.375598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.375856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:28.375913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:28.376156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:28.376263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:28.378590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.378754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:28.378922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.379016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:28.379056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:28.379086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:28.380847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.380900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:28.380934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:28.382483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.382540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.382586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.382637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:28.386150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:28.387861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:28.388027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:28.388879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.388991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:28.389035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.389321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:28.389387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.389569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:28.389650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:28.391420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:28.391461Z node 1 :FLAT_TX_SCHEMESHARD ... done id#281474976710760:0 progress is 1/1 2025-06-24T21:06:42.085064Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:06:42.085089Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-24T21:06:42.085127Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:127:2151] message: TxId: 281474976710760 2025-06-24T21:06:42.085156Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:06:42.085177Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-24T21:06:42.085196Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710760:0 2025-06-24T21:06:42.085235Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 13 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-24T21:06:42.086418Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-24T21:06:42.086462Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710760 2025-06-24T21:06:42.086504Z node 3 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-24T21:06:42.086556Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [3:1174:3024], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-24T21:06:42.087610Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking 2025-06-24T21:06:42.087687Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [3:1174:3024], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:06:42.087731Z node 3 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Cancellation_Unlocking to Cancelled 2025-06-24T21:06:42.089076Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled 2025-06-24T21:06:42.089128Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancelled, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [3:1174:3024], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:06:42.089154Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-24T21:06:42.089258Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:06:42.089291Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [3:1270:3109] TestWaitNotification: OK eventTxId 102 2025-06-24T21:06:42.091093Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-24T21:06:42.091305Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } 2025-06-24T21:06:42.092473Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:42.092660Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 198us result status StatusSuccess 2025-06-24T21:06:42.092982Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:42.094348Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:06:42.094510Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/index1" took 177us result status StatusPathDoesNotExist 2025-06-24T21:06:42.094620Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/index1\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeTableIndex, state: EPathStateNotExist), drop stepId: 5000005, drop txId: 281474976710759" Path: "/MyRoot/Table/index1" PathId: 3 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BasicUsage::SelectDatabaseByHash [GOOD] >> BasicUsage::SelectDatabase [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::SelectDatabase [GOOD] >> TCmsTest::EmergencyDuringRollingRestart [GOOD] >> BasicUsage::RetryDiscoveryWithCancel ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::EmergencyDuringRollingRestart [GOOD] Test command err: 2025-06-24T21:06:25.380945Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: true UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-06-24T21:06:25.381284Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-24T21:06:25.419572Z node 10 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-24T21:06:25.419712Z node 10 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-24T21:06:25.420978Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-17-17" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 17 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-10-10" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 10 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-11-11" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 11 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-12-12" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 12 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-13-13" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 13 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-14-14" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 14 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-15-15" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 15 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-16-16" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 16 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Timestamp: 120026512 } } 2025-06-24T21:06:25.421483Z node 10 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-17-17" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 17 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-10-10" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 10 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-11-11" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 11 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-12-12" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 12 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-13-13" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 13 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-14-14" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 14 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-15-15" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 15 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-16-16" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 16 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Timestamp: 120026512 } 2025-06-24T21:06:25.421664Z node 10 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.002512s 2025-06-24T21:06:25.421708Z node 10 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-24T21:06:25.421779Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 10, wbId# [10:8388350642965737326:1634689637] 2025-06-24T21:06:25.421818Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 11, wbId# [11:8388350642965737326:1634689637] 2025-06-24T21:06:25.421852Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 12, wbId# [12:8388350642965737326:1634689637] 2025-06-24T21:06:25.421873Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 13, wbId# [13:8388350642965737326:1634689637] 2025-06-24T21:06:25.421892Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 14, wbId# [14:8388350642965737326:1634689637] 2025-06-24T21:06:25.421909Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 15, wbId# ... de 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 17, wbId# [17:8388350642965737326:1634689637] 2025-06-24T21:06:35.411441Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 10, response# PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-24T21:06:35.411880Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 16, response# PDiskStateInfo { PDiskId: 16 CreateTime: 0 ChangeTime: 0 Path: "/16/pdisk-16.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-24T21:06:35.412027Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 17, response# PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/17/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-24T21:06:35.412067Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 11, response# PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-24T21:06:35.412100Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 12, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-24T21:06:35.412133Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 13, response# PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-24T21:06:35.412176Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 14, response# PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/14/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-24T21:06:35.412218Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 15, response# PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/15/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-24T21:06:35.412254Z node 10 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-24T21:06:35.412432Z node 10 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 10:10, status# FAULTY, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 3 StateLimit# 1, dry run# 0 2025-06-24T21:06:35.412480Z node 10 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 1 2025-06-24T21:06:35.412586Z node 10 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-06-24T21:06:35.412779Z node 10 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Success: true, cookie# 2 2025-06-24T21:06:35.412826Z node 10 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 10:10 2025-06-24T21:06:35.424990Z node 10 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-06-24T21:06:35.451711Z node 10 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-24T21:06:35.451790Z node 10 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-24T21:06:35.451840Z node 10 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:04:00Z 2025-06-24T21:06:35.452495Z node 10 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "10" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 10 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-24T21:06:35.452560Z node 10 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "10" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 10 has not yet been completed" } 2025-06-24T21:06:35.452596Z node 10 :CMS DEBUG: cms.cpp:398: Result: ERROR (reason: Evict vdisks is disabled in Sentinel (self heal)) 2025-06-24T21:06:35.452676Z node 10 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-24T21:06:35.452788Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-24T21:06:35.464904Z node 10 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-24T21:06:35.465140Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ERROR Reason: "Evict vdisks is disabled in Sentinel (self heal)" } RequestId: "user-r-1" } 2025-06-24T21:06:35.465819Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-24T21:06:35.477836Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-24T21:06:35.478099Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: true UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 1 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-06-24T21:06:40.353263Z node 10 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-24T21:06:40.353333Z node 10 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-24T21:06:40.353424Z node 10 :CMS DEBUG: cms.cpp:1153: Running CleanupWalleTasks 2025-06-24T21:06:40.353673Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 10, wbId# [10:8388350642965737326:1634689637] 2025-06-24T21:06:40.353728Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 11, wbId# [11:8388350642965737326:1634689637] 2025-06-24T21:06:40.353747Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 12, wbId# [12:8388350642965737326:1634689637] 2025-06-24T21:06:40.353764Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 13, wbId# [13:8388350642965737326:1634689637] 2025-06-24T21:06:40.353788Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 14, wbId# [14:8388350642965737326:1634689637] 2025-06-24T21:06:40.353812Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 15, wbId# [15:8388350642965737326:1634689637] 2025-06-24T21:06:40.353856Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 16, wbId# [16:8388350642965737326:1634689637] 2025-06-24T21:06:40.353892Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 17, wbId# [17:8388350642965737326:1634689637] 2025-06-24T21:06:40.354211Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 10, response# PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-24T21:06:40.354531Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 11, response# PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-24T21:06:40.354666Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 12, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-24T21:06:40.354744Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 13, response# PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-24T21:06:40.354800Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 15, response# PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/15/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-24T21:06:40.354853Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 16, response# PDiskStateInfo { PDiskId: 16 CreateTime: 0 ChangeTime: 0 Path: "/16/pdisk-16.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-24T21:06:40.354890Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 17, response# PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/17/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-24T21:06:40.354924Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 14, response# PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/14/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-24T21:06:40.354964Z node 10 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-24T21:06:40.355213Z node 10 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 10:10, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-06-24T21:06:40.355286Z node 10 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 1 2025-06-24T21:06:40.355479Z node 10 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-06-24T21:06:40.355745Z node 10 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Success: true, cookie# 3 2025-06-24T21:06:40.355801Z node 10 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 10:10 |93.6%| [TA] $(B)/ydb/core/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.6%| [TA] {RESULT} $(B)/ydb/core/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> TSchemeShardMoveTest::MoveTableForBackup >> TSchemeShardMoveTest::Chain >> TSchemeShardMoveTest::Boot >> TSchemeShardMoveTest::Reject >> BasicUsage::WriteSessionWriteInHandlers [GOOD] >> BasicUsage::WriteSessionCloseWaitsForWrites [GOOD] >> BasicUsage::WriteSessionCloseIgnoresWrites >> BasicUsage::FallbackToSingleDb [GOOD] >> BasicUsage::FallbackToSingleDbAfterBadRequest >> TSchemeShardMoveTest::Boot [GOOD] >> TSchemeShardMoveTest::AsyncIndexWithSyncInFly ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionWriteInHandlers [GOOD] Test command err: 2025-06-24T21:06:36.340821Z :WriteSessionWriteInHandlers INFO: Random seed for debugging is 1750799196340803 2025-06-24T21:06:36.666966Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625289257198992:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:06:36.667092Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:06:36.701267Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625292968748807:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:06:36.701571Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:06:36.817118Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004793/r3tmp/tmpSBTLn7/pdisk_1.dat 2025-06-24T21:06:36.831617Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:06:37.012115Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:37.017013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:06:37.017141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:06:37.025531Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:06:37.026189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:06:37.036204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:06:37.036330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:06:37.039243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24450, node 1 2025-06-24T21:06:37.156637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004793/r3tmp/yandexgygWnQ.tmp 2025-06-24T21:06:37.156667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004793/r3tmp/yandexgygWnQ.tmp 2025-06-24T21:06:37.157690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004793/r3tmp/yandexgygWnQ.tmp 2025-06-24T21:06:37.157816Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:06:37.348482Z INFO: TTestServer started on Port 17487 GrpcPort 24450 TClient is connected to server localhost:17487 PQClient connected to localhost:24450 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:06:37.616188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-24T21:06:37.674673Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:06:37.707448Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:06:39.058752Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625302142101864:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:39.058823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625302142101852:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:39.058932Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:39.059600Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625305853650981:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:39.059600Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625305853650970:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:39.059654Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:39.062364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:06:39.064911Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625302142101903:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:39.064986Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:39.066548Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625305853650985:2124] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:06:39.076401Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625302142101867:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T21:06:39.076464Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625305853650984:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T21:06:39.158858Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625305853651012:2130] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:06:39.165870Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625302142101961:2684] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:06:39.315627Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519625302142101980:2307], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:06:39.315679Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519625305853651027:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:06:39.315966Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NWUyMzNjY2UtOWUzNDA2OWEtOTNiNmYyNDUtOTRlNTNmMDI=, ActorId: [2:7519625305853650968:2267], ActorState: ExecuteState, TraceId: 01jyhw7atj0bwf9aewt2y3fqf8, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:06:39.319773Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:06: ... user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T21:06:46.442272Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=177, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:06:46.442337Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-24T21:06:46.442439Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T21:06:46.442867Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-06-24T21:06:46.443024Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-24T21:06:46.443055Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 177 count 1 last offset 0, current partition end offset: 1 2025-06-24T21:06:46.443078Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-24T21:06:46.443114Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 177 accessed 0 times before, last time 2025-06-24T21:06:46.000000Z 2025-06-24T21:06:46.443140Z node 1 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-24T21:06:46.443212Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-24T21:06:46.443224Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-24T21:06:46.443353Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 157 from pos 0 cbcount 1 2025-06-24T21:06:46.443407Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1750799206429 queuesize 0 startOffset 0 2025-06-24T21:06:46.443490Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 1 written { } } write_statistics { persisting_time { nanos: 2000000 } min_queue_wait_time { nanos: 9000000 } max_queue_wait_time { nanos: 9000000 } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-24T21:06:46.443521Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] OnAck: seqNo=1, txId=? 2025-06-24T21:06:46.443547Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session: acknoledged message 1 === Inside AcksHandler 2025-06-24T21:06:46.443688Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write 1 messages with Id from 2 to 2 === Inside ReadyToAcceptHandler === AcksHandler has written a message, closing the session 2025-06-24T21:06:46.444071Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session: try to update token 2025-06-24T21:06:46.444109Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Send 1 message(s) (0 left), first sequence number is 2 2025-06-24T21:06:46.444612Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-24T21:06:46.444783Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-24T21:06:46.444864Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:06:46.444885Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:06:46.444937Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 2 requestId: cookie: 2 2025-06-24T21:06:46.444960Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T21:06:46.445005Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:06:46.445018Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:06:46.445046Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72075186224037892] got client message topic: rt3.dc1--test-topic partition: 0 SourceId: '\0src_id' SeqNo: 2 partNo : 0 messageNo: 3 size 107 offset: -1 2025-06-24T21:06:46.445157Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob processing sourceId '\0src_id' seqNo 2 partNo 0 2025-06-24T21:06:46.445883Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob complete sourceId '\0src_id' seqNo 2 partNo 0 FormedBlobsCount 0 NewHead: Offset 1 PartNo 0 PackedSize 181 count 1 nextOffset 2 batches 1 2025-06-24T21:06:46.446187Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--test-topic' partition 0 compactOffset 1,1 HeadOffset 1 endOffset 1 curOffset 2 d0000000000_00000000000000000001_00000_0000000001_00000? size 169 WTime 1750799206446 2025-06-24T21:06:46.446283Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:06:46.446334Z node 1 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 1 partNo 0 count 1 size 169 2025-06-24T21:06:46.448040Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 1 count 1 size 169 actorID [1:7519625327911906765:2443] 2025-06-24T21:06:46.448106Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 114 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:06:46.448117Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 1 partno 0 count 1 parts 0 suffix '63' size 169 2025-06-24T21:06:46.448135Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:06:46.448156Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0src_id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-06-24T21:06:46.448256Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=346, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:06:46.448285Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 3 requestId: cookie: 2 2025-06-24T21:06:46.448323Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T21:06:46.448722Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-24T21:06:46.448862Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 2 written { offset: 1 } } write_statistics { persisting_time { nanos: 2000000 } min_queue_wait_time { nanos: 1000000 } max_queue_wait_time { nanos: 1000000 } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-24T21:06:46.448887Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] OnAck: seqNo=2, txId=? 2025-06-24T21:06:46.448907Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session: acknoledged message 2 === Inside AcksHandler === Inside SessionClosedHandler 2025-06-24T21:06:46.449117Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write 1 messages with Id from 3 to 3 === SessionClosedHandler has 'written' a message 2025-06-24T21:06:46.449225Z :INFO: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session: close. Timeout 0.000000s 2025-06-24T21:06:46.449268Z :INFO: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session will now close 2025-06-24T21:06:46.449307Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session: aborting 2025-06-24T21:06:46.449961Z :WARNING: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-24T21:06:46.450038Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 1, Msg: CANCELLED, Details: , InternalError: 0 2025-06-24T21:06:46.450069Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session is aborting and will not restart 2025-06-24T21:06:46.450158Z :DEBUG: [/Root] TraceId [] SessionId [src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0] MessageGroupId [src_id] Write session: destroy 2025-06-24T21:06:46.450232Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0 grpc read done: success: 0 data: 2025-06-24T21:06:46.450260Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0 grpc read failed 2025-06-24T21:06:46.450289Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0 grpc closed 2025-06-24T21:06:46.450304Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: src_id|74e9f4ad-5743570c-3a214cbe-d920e060_0 is DEAD 2025-06-24T21:06:46.451353Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:06:46.451534Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [1:7519625332206874368:2482] destroyed 2025-06-24T21:06:46.451569Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TSchemeShardMoveTest::MoveTableForBackup [GOOD] >> TSchemeShardMoveTest::MoveTableWithSequence >> TSchemeShardMoveTest::Chain [GOOD] >> TSchemeShardMoveTest::Index >> TSchemeShardMoveTest::Reject [GOOD] >> TSchemeShardMoveTest::OneTable >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> TSchemeShardMoveTest::AsyncIndexWithSyncInFly [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> TSchemeShardMoveTest::MoveTableWithSequence [GOOD] >> TSchemeShardMoveTest::Index [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::AsyncIndexWithSyncInFly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:47.748169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:47.748260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:47.748298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:47.748350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:47.749022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:47.749075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:47.749150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:47.749241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:47.749999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:47.751417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:47.832675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:47.832749Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:47.847496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:47.847870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:47.848039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:47.854747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:47.854893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:47.855386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.855605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:47.858225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:47.858400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:47.859465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:47.859521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:47.859718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:47.859767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:47.859824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:47.859911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.866255Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:47.978152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:47.978332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.978481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:47.978508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:47.978672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:47.978716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:47.980568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.980711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:47.980849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.980890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:47.980917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:47.980940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:47.982306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.982346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:47.982387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:47.983473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.983506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.983535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.983573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:47.985912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:47.987389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:47.987546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:47.988154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.988257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:47.988295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.988533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:47.988568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.988699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:47.988761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:47.990160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:47.990203Z node 1 :FLAT_TX_SCHEMESHARD ... 9 } Origin: 72075186233409546 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-24T21:06:49.042644Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 103:2, shardIdx: 72057594046678944:2, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:49.042683Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-24T21:06:49.042725Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 103:2, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:06:49.042766Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:2 129 -> 240 2025-06-24T21:06:49.043569Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 8589936901 } Origin: 72075186233409547 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-24T21:06:49.043607Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409547, partId: 0 2025-06-24T21:06:49.043707Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 8589936901 } Origin: 72075186233409547 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-24T21:06:49.043748Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:06:49.043813Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 328 RawX2: 8589936901 } Origin: 72075186233409547 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-24T21:06:49.043856Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 103:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:49.043884Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:06:49.043913Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 103:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:06:49.043943Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:0 129 -> 240 2025-06-24T21:06:49.046990Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-24T21:06:49.047193Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:06:49.050028Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-24T21:06:49.050196Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-24T21:06:49.050238Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 103:2 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:49.050291Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 103:2 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 4], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-24T21:06:49.050395Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:2 progress is 2/3 2025-06-24T21:06:49.050430Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 2/3 2025-06-24T21:06:49.050467Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:2 progress is 2/3 2025-06-24T21:06:49.050501Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 2/3 2025-06-24T21:06:49.050538Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 2/3, is published: true 2025-06-24T21:06:49.051017Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:06:49.051268Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:06:49.051306Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:49.051340Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 103:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 2], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-24T21:06:49.051413Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 3/3 2025-06-24T21:06:49.051446Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 3/3 2025-06-24T21:06:49.051478Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 3/3 2025-06-24T21:06:49.051503Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 3/3 2025-06-24T21:06:49.051530Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 3/3, is published: true 2025-06-24T21:06:49.051563Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 3/3 2025-06-24T21:06:49.051607Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T21:06:49.051642Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T21:06:49.051755Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-24T21:06:49.051793Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:06:49.051828Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:1 2025-06-24T21:06:49.051848Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:1 2025-06-24T21:06:49.051874Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-24T21:06:49.051896Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:06:49.051916Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:2 2025-06-24T21:06:49.051941Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:2 2025-06-24T21:06:49.051993Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-24T21:06:49.052018Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T21:06:49.052382Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:49.052424Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T21:06:49.052487Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:06:49.052522Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:06:49.052553Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:06:49.052583Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:06:49.052629Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:06:49.058913Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:06:49.059338Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [2:272:2261] Handle TEvGetProxyServicesRequest TestWaitNotification wait txId: 103 2025-06-24T21:06:49.105880Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T21:06:49.105941Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T21:06:49.106392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T21:06:49.106474Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:06:49.106508Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:683:2565] TestWaitNotification: OK eventTxId 103 >> TSchemeShardMoveTest::Replace ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveTableWithSequence [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:47.748171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:47.748259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:47.748298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:47.748339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:47.749035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:47.749092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:47.749154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:47.749240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:47.750013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:47.751410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:47.831378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:47.831441Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:47.845402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:47.845782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:47.845967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:47.852542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:47.852683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:47.853138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.853339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:47.855611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:47.855813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:47.856778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:47.856820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:47.856957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:47.856987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:47.857025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:47.857086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.861792Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:47.957562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:47.958430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.959219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:47.959260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:47.960150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:47.960208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:47.962409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.963134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:47.963317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.963429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:47.963459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:47.963483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:47.965150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.965208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:47.965300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:47.966772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.966804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.966833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.966865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:47.971098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:47.972867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:47.973100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:47.974012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.974161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:47.974210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.975339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:47.975409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.975623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:47.975705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:47.977672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:47.977727Z node 1 :FLAT_TX_SCHEMESHARD ... DoNotify send TEvNotifyTxCompletionResult to actorId: [2:377:2344] message: TxId: 102 2025-06-24T21:06:49.305876Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-24T21:06:49.305906Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:06:49.305926Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:06:49.306018Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:06:49.306045Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:06:49.306068Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:1 2025-06-24T21:06:49.306081Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:1 2025-06-24T21:06:49.306120Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-24T21:06:49.306141Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:06:49.306398Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:49.306429Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:06:49.306477Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:06:49.306506Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:06:49.306544Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:06:49.308782Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:06:49.308838Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:470:2424] 2025-06-24T21:06:49.309352Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-24T21:06:49.313248Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/myseq" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:49.313407Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/myseq" took 174us result status StatusPathDoesNotExist 2025-06-24T21:06:49.313528Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/myseq\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Table/myseq" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:06:49.313795Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:49.313898Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 107us result status StatusPathDoesNotExist 2025-06-24T21:06:49.314023Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Table" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:06:49.314387Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:49.314612Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove" took 228us result status StatusSuccess 2025-06-24T21:06:49.315032Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove" PathDescription { Self { Name: "TableMove" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: true } Table { Name: "TableMove" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 DefaultFromSequence: "myseq" NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false Sequences { Name: "myseq" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 2 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:49.315607Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/myseq" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:06:49.315862Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/myseq" took 277us result status StatusSuccess 2025-06-24T21:06:49.316042Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/myseq" PathDescription { Self { Name: "myseq" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeSequence CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SequenceVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SequenceDescription { Name: "myseq" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 2 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::Index [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:47.748170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:47.748263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:47.748323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:47.748364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:47.749020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:47.749066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:47.749153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:47.749254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:47.750013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:47.751416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:47.818046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:47.818095Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:47.831656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:47.831964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:47.832109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:47.839940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:47.840211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:47.841810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.842120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:47.847070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:47.848110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:47.855834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:47.855896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:47.856780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:47.856830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:47.856903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:47.856987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.862550Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:47.978358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:47.978545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.978681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:47.978709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:47.978898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:47.978947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:47.980770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.980911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:47.981028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.981078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:47.981104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:47.981126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:47.982411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.982450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:47.982497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:47.983687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.983727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.983781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.983829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:47.991827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:47.993287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:47.993479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:47.994135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.994233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:47.994270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.994445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:47.994479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.994622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:47.994701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:47.996160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:47.996193Z node 1 :FLAT_TX_SCHEMESHARD ... 57594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:49.621323Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/Sync" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:06:49.621457Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/Sync" took 148us result status StatusSuccess 2025-06-24T21:06:49.622017Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/Sync" PathDescription { Self { Name: "Sync" PathId: 10 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 11 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 10 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Sync" LocalPathId: 10 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value0" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 10 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:49.622501Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/Async" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:06:49.622638Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/Async" took 145us result status StatusSuccess 2025-06-24T21:06:49.623083Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/Async" PathDescription { Self { Name: "Async" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 8 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Async" LocalPathId: 8 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value1" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardMoveTest::OneTable [GOOD] >> VectorIndexBuildTest::RecreatedColumns [GOOD] >> VectorIndexBuildTest::PrefixedDuplicates ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::OneTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:47.748169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:47.748260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:47.748298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:47.748335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:47.749006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:47.749047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:47.749135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:47.749290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:47.750057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:47.751405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:47.833778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:47.833830Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:47.848600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:47.848938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:47.849085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:47.856084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:47.856259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:47.856832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.857071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:47.859810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:47.859961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:47.860924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:47.860978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:47.861180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:47.861226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:47.861271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:47.861343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.867390Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:47.990028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:47.990194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.990358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:47.990386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:47.990546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:47.990604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:47.992164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.992321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:47.992435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.992486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:47.992512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:47.992536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:47.993810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.993853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:47.993891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:47.994954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.994988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:47.995024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.995070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:47.997489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:47.998627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:47.998755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:47.999350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:47.999440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:47.999477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.999662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:47.999697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:47.999821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:47.999866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:48.001120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:48.001154Z node 1 :FLAT_TX_SCHEMESHARD ... -24T21:06:50.110560Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 108, path id: 4 2025-06-24T21:06:50.110961Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:06:50.111013Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 108:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:06:50.111091Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:06:50.111133Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 108:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:06:50.111204Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 108:0 129 -> 240 2025-06-24T21:06:50.111953Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 23 PathOwnerId: 72057594046678944, cookie: 108 2025-06-24T21:06:50.112049Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 23 PathOwnerId: 72057594046678944, cookie: 108 2025-06-24T21:06:50.112085Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-24T21:06:50.112121Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 23 2025-06-24T21:06:50.112166Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:06:50.112789Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 108 2025-06-24T21:06:50.112861Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 108 2025-06-24T21:06:50.112889Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-24T21:06:50.112915Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-24T21:06:50.112942Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:06:50.113010Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 108, ready parts: 0/1, is published: true 2025-06-24T21:06:50.115722Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:06:50.115781Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 108:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:50.116078Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:06:50.116191Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#108:0 progress is 1/1 2025-06-24T21:06:50.116231Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-24T21:06:50.116269Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#108:0 progress is 1/1 2025-06-24T21:06:50.116301Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-24T21:06:50.116336Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: true 2025-06-24T21:06:50.116398Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:340:2317] message: TxId: 108 2025-06-24T21:06:50.116447Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-24T21:06:50.116485Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 108:0 2025-06-24T21:06:50.116515Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 108:0 2025-06-24T21:06:50.116593Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:06:50.117509Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-24T21:06:50.117750Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-24T21:06:50.118882Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-24T21:06:50.118931Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [2:840:2795] TestWaitNotification: OK eventTxId 108 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-24T21:06:50.120645Z node 2 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-24T21:06:50.120723Z node 2 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409547 2025-06-24T21:06:50.139651Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 8589936886 } TabletId: 72075186233409546 State: 4 2025-06-24T21:06:50.139747Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-24T21:06:50.141385Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:06:50.141732Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-24T21:06:50.141907Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:50.142113Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T21:06:50.142307Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:50.142351Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T21:06:50.142399Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 Forgetting tablet 72075186233409546 2025-06-24T21:06:50.146225Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:06:50.146283Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:06:50.146568Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 Deleted tabletId 72075186233409546 2025-06-24T21:06:50.147061Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:50.147210Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 175us result status StatusSuccess 2025-06-24T21:06:50.147493Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 23 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 23 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 21 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardMoveTest::MoveMigratedTable >> VectorIndexBuildTest::ServerLessDB-smallScanBuffer-true [GOOD] >> VectorIndexBuildTest::Shard_Build_Error >> TSchemeShardMoveTest::ResetCachedPath >> TSchemeShardMoveTest::TwoTables >> TSchemeShardMoveTest::Replace [GOOD] >> VectorIndexBuildTest::CreateAndDrop [GOOD] >> VectorIndexBuildTest::CommonDB >> TSchemeShardMoveTest::MoveMigratedTable [GOOD] >> TSchemeShardMoveTest::MoveOldTableWithIndex >> TSchemeShardMoveTest::MoveIndexSameDst ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::Replace [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:50.266087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:50.266153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:50.266178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:50.266206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:50.266237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:50.266256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:50.266311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:50.266360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:50.266913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:50.267165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:50.323022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:50.323067Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:50.333748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:50.334038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:50.334175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:50.340084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:50.340260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:50.340723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:50.340978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:50.343692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:50.343877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:50.344712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:50.344758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:50.344928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:50.344971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:50.345005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:50.345066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:50.350601Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:50.443534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:50.443729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:50.443882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:50.443913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:50.444064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:50.444159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:50.446038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:50.446190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:50.446328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:50.446373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:50.446401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:50.446424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:50.448001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:50.448044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:50.448080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:50.449268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:50.449307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:50.449342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:50.449378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:50.451831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:50.453149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:50.453282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:50.453884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:50.453984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:50.454021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:50.454217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:50.454264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:50.454389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:50.454434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:50.455857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:50.455893Z node 1 :FLAT_TX_SCHEMESHARD ... PathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 3 2025-06-24T21:06:51.695510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 2 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:51.695539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 16], at schemeshard: 72057594046678944 2025-06-24T21:06:51.695594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 15] was 1 2025-06-24T21:06:51.695621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 15], at schemeshard: 72057594046678944 2025-06-24T21:06:51.695653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 2 2025-06-24T21:06:51.695672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 14], at schemeshard: 72057594046678944 2025-06-24T21:06:51.695690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 1 2025-06-24T21:06:51.695705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 13], at schemeshard: 72057594046678944 2025-06-24T21:06:51.695722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 1 2025-06-24T21:06:51.695736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 12], at schemeshard: 72057594046678944 2025-06-24T21:06:51.695766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:06:51.696952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:06:51.697006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409546 2025-06-24T21:06:51.698681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:06:51.698726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:06:51.698821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:06:51.698874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409548 2025-06-24T21:06:51.699055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 5 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted 2025-06-24T21:06:51.699522Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-24T21:06:51.699594Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-24T21:06:51.699630Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 2025-06-24T21:06:51.699971Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Src" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:51.700106Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Src" took 139us result status StatusPathDoesNotExist 2025-06-24T21:06:51.700259Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Src\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Src" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:06:51.700868Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dst" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:51.701061Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dst" took 218us result status StatusSuccess 2025-06-24T21:06:51.701343Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dst" PathDescription { Self { Name: "Dst" PathId: 22 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Async" LocalPathId: 23 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableIndexes { Name: "Sync" LocalPathId: 25 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 22 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:51.701957Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:51.702086Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 116us result status StatusSuccess 2025-06-24T21:06:51.702358Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 28 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 28 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 26 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 22 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardMoveTest::ResetCachedPath [GOOD] >> TSchemeShardMoveTest::TwoTables [GOOD] >> VectorIndexBuildTest::Shard_Build_Error [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::ResetCachedPath [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:51.576062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:51.576130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:51.576159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:51.576188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:51.576219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:51.576247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:51.576298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:51.576348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:51.576892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:51.577117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:51.640745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:51.640793Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:51.651397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:51.651690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:51.651830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:51.657342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:51.657493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:51.657971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:51.658200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:51.660295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:51.660443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:51.661254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:51.661296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:51.661445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:51.661475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:51.661505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:51.661578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.666183Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:51.765025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:51.765237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.765406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:51.765437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:51.765611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:51.765702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:51.767837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:51.767994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:51.768135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.768178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:51.768208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:51.768231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:51.769712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.769778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:51.769812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:51.771014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.771049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.771099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:51.771136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:51.773492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:51.774754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:51.774898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:51.775552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:51.775668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:51.775705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:51.775907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:51.775940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:51.776063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:51.776112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:51.777439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:51.777474Z node 1 :FLAT_TX_SCHEMESHARD ... ropose operationId# 105:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:52.410502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 105 ready parts: 1/1 2025-06-24T21:06:52.410620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409549 Flags: 2 } ExecLevel: 0 TxId: 105 MinStep: 1 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:52.412067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 105:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:105 msg type: 269090816 2025-06-24T21:06:52.412242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 105, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 105 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 105 at step: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72075186233409549 for txId: 105 at step: 5000004 2025-06-24T21:06:52.412610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:52.412751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 105 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:52.412823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:359: TAlterTable TPropose operationId# 105:0 HandleReply TEvOperationPlan, operationId: 105:0, stepId: 5000004, at schemeshard: 72057594046678944 2025-06-24T21:06:52.413138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 105:0 128 -> 129 2025-06-24T21:06:52.413267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000004 2025-06-24T21:06:52.416714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:52.416747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:06:52.416931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:52.416962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-24T21:06:52.417025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.417064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 105:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:06:52.417739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:06:52.417831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:06:52.417863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-24T21:06:52.417900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 8 2025-06-24T21:06:52.417933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:06:52.418003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 105, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 105 2025-06-24T21:06:52.420398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 963 } } CommitVersion { Step: 5000004 TxId: 105 } 2025-06-24T21:06:52.420427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409549, partId: 0 2025-06-24T21:06:52.420515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 963 } } CommitVersion { Step: 5000004 TxId: 105 } 2025-06-24T21:06:52.420605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 963 } } CommitVersion { Step: 5000004 TxId: 105 } 2025-06-24T21:06:52.421254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 679 RawX2: 4294969911 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-06-24T21:06:52.421288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409549, partId: 0 2025-06-24T21:06:52.421416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: Source { RawX1: 679 RawX2: 4294969911 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-06-24T21:06:52.421458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 105:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:06:52.421524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 105:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 679 RawX2: 4294969911 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-06-24T21:06:52.421590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 105:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:52.421618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.421642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 105:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-24T21:06:52.421681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 105:0 129 -> 240 2025-06-24T21:06:52.422247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T21:06:52.423696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.424295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.424412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.424446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 105:0 ProgressState 2025-06-24T21:06:52.424519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#105:0 progress is 1/1 2025-06-24T21:06:52.424542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:06:52.424576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#105:0 progress is 1/1 2025-06-24T21:06:52.424601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:06:52.424624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: true 2025-06-24T21:06:52.424669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 105 2025-06-24T21:06:52.424701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:06:52.424725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 105:0 2025-06-24T21:06:52.424748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 105:0 2025-06-24T21:06:52.424834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:06:52.426098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T21:06:52.426152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:846:2763] TestWaitNotification: OK eventTxId 105 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::TwoTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:51.792574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:51.792633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:51.792658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:51.792684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:51.792716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:51.792736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:51.792776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:51.792830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:51.793376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:51.793615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:51.847225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:51.847268Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:51.858438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:51.858738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:51.858881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:51.864790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:51.864976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:51.865523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:51.865789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:51.868109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:51.868254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:51.869104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:51.869159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:51.869341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:51.869375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:51.869410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:51.869478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.874734Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:51.967100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:51.967342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.967530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:51.967570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:51.967746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:51.967841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:51.969765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:51.969923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:51.970054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.970103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:51.970135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:51.970165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:51.971616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.971673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:51.971714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:51.972884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.972920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.972961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:51.973003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:51.981227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:51.983035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:51.983245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:51.983952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:51.984074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:51.984114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:51.984333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:51.984381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:51.984513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:51.984566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:51.986536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:51.986594Z node 1 :FLAT_TX_SCHEMESHARD ... 72165Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:52.372316Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table1" took 152us result status StatusPathDoesNotExist 2025-06-24T21:06:52.372432Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Table1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:06:52.372714Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:52.372851Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove1" took 142us result status StatusSuccess 2025-06-24T21:06:52.373147Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove1" PathDescription { Self { Name: "TableMove1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TableMove1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:52.373640Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:52.373767Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table2" took 104us result status StatusPathDoesNotExist 2025-06-24T21:06:52.373856Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table2\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Table2" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:06:52.374205Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:52.374327Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove2" took 127us result status StatusSuccess 2025-06-24T21:06:52.374569Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove2" PathDescription { Self { Name: "TableMove2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TableMove2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:52.375060Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:52.375197Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 101us result status StatusSuccess 2025-06-24T21:06:52.375495Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 15 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 15 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 13 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "TableMove1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "TableMove2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardMoveTest::MoveOldTableWithIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::Shard_Build_Error [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:34.196573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:34.196636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:34.196666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:34.196692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:34.196728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:34.196778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:34.196820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:34.196875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:34.197463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:34.197763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:34.252550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:34.252596Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:34.263668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:34.263976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:34.264126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:34.270084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:34.270246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:34.270742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:34.270987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:34.273200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:34.273376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:34.274313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:34.274371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:34.274540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:34.274582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:34.274616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:34.274705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:34.279962Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:34.370635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:34.370830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:34.371074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:34.371118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:34.371329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:34.371405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:34.373215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:34.373384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:34.373551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:34.373601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:34.373636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:34.373682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:34.375206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:34.375259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:34.375289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:34.376444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:34.376477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:34.376517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:34.376566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:34.379294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:34.380709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:34.380849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:34.381612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:34.381726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:34.381760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:34.381989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:34.382042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:34.382179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:34.382240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:34.383770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:34.383805Z node 1 :FLAT_TX_SCHEMESHARD ... shard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-24T21:06:52.522250Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409547 state Offline 2025-06-24T21:06:52.522404Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:1153:3058], Recipient [3:459:2423]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [3:1153:3058] ServerId: [3:1155:3060] } 2025-06-24T21:06:52.522426Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:06:52.522495Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:06:52.522550Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [3:1068:2985], Recipient [3:464:2426]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-24T21:06:52.522576Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-24T21:06:52.522603Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409548 state Offline 2025-06-24T21:06:52.522948Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:1154:3059], Recipient [3:464:2426]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [3:1154:3059] ServerId: [3:1156:3061] } 2025-06-24T21:06:52.522988Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:06:52.523207Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:5 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:06:52.523399Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [3:1068:2985], Recipient [3:752:2683]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-24T21:06:52.523433Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-24T21:06:52.523464Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409550 state Offline 2025-06-24T21:06:52.523645Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:1158:3063], Recipient [3:752:2683]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [3:1158:3063] ServerId: [3:1160:3065] } 2025-06-24T21:06:52.523683Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:06:52.523792Z node 3 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409547 2025-06-24T21:06:52.523892Z node 3 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409548 2025-06-24T21:06:52.523968Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:06:52.524161Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-24T21:06:52.524450Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:06:52.524546Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 Forgetting tablet 72075186233409547 2025-06-24T21:06:52.524685Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:444:2412], Recipient [3:459:2423]: NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:06:52.524857Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409547 2025-06-24T21:06:52.524934Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409547 2025-06-24T21:06:52.525870Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:446:2413], Recipient [3:464:2426]: NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:06:52.526084Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409548 2025-06-24T21:06:52.526142Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409548 Forgetting tablet 72075186233409548 2025-06-24T21:06:52.527514Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 2 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:52.527545Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-24T21:06:52.527596Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:06:52.527622Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T21:06:52.527644Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:06:52.527804Z node 3 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 TabletID: 72075186233409550 2025-06-24T21:06:52.528107Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:743:2676], Recipient [3:752:2683]: NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:06:52.528238Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409550 2025-06-24T21:06:52.528287Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409550 2025-06-24T21:06:52.529165Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-24T21:06:52.529309Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 Forgetting tablet 72075186233409550 2025-06-24T21:06:52.531043Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:06:52.531077Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409547 2025-06-24T21:06:52.531835Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:06:52.531861Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409548 2025-06-24T21:06:52.531935Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-24T21:06:52.532089Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:52.532137Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-24T21:06:52.532201Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:06:52.532239Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:06:52.532262Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:06:52.533048Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:06:52.533101Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-24T21:06:52.534243Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:06:52.585888Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-24T21:06:52.586185Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 Issues { message: "One of the shards report BUILD_ERROR
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n at Filling stage, process has to be canceled, shardId: 72075186233409549, shardIdx: 72057594046678944:4" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: BUILD_ERROR UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n SeqNoRound: 0 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_REJECTED Settings { source_path: "/MyRoot/vectors" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 0 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "One of the shards report BUILD_ERROR
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n at Filling stage, process has to be canceled, shardId: 72075186233409549, shardIdx: 72057594046678944:4" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: BUILD_ERROR UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n SeqNoRound: 0 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_REJECTED Settings { source_path: "/MyRoot/vectors" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 0 } >> TSchemeShardMoveTest::MoveIndexSameDst [GOOD] >> TSchemeShardMoveTest::MoveIntoBuildingIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveOldTableWithIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:51.272660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:51.272736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:51.272763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:51.272790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:51.272827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:51.272858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:51.272892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:51.272964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:51.273494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:51.273732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:51.327195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:51.327244Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:51.337619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:51.337915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:51.338048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:51.343043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:51.343263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:51.343878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:51.344179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:51.346813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:51.346973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:51.347944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:51.347987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:51.348164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:51.348197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:51.348243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:51.348324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.353580Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:51.437344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:51.437529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.437709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:51.437741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:51.437949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:51.438009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:51.439884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:51.440054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:51.440189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.440224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:51.440252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:51.440274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:51.441753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.441806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:51.441854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:51.443116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.443162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:51.443190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:51.443239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:51.445830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:51.447107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:51.447327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:51.447961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:51.448059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:51.448097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:51.448319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:51.448367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:51.448513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:51.448587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:51.450158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:51.450201Z node 1 :FLAT_TX_SCHEMESHARD ... ESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:06:52.919702Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 325 RawX2: 8589936899 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:06:52.919763Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:2, shardIdx: 72057594046678944:2, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:52.919801Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-24T21:06:52.919835Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:2, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:06:52.919870Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:2 129 -> 240 2025-06-24T21:06:52.920785Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 8589936901 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:06:52.920819Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-24T21:06:52.920892Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 8589936901 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:06:52.920922Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:06:52.920972Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 328 RawX2: 8589936901 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:06:52.921009Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:52.921034Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.921057Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:06:52.921085Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:06:52.922299Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-24T21:06:52.922695Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.924233Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-24T21:06:52.924529Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.924680Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-24T21:06:52.924716Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 102:2 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:52.924764Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 102:2 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 4], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-24T21:06:52.924870Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:2 progress is 2/3 2025-06-24T21:06:52.924916Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/3 2025-06-24T21:06:52.924948Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:2 progress is 2/3 2025-06-24T21:06:52.924976Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/3 2025-06-24T21:06:52.925006Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/3, is published: true 2025-06-24T21:06:52.925240Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.925271Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:52.925305Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 102:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 2], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-24T21:06:52.925352Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 3/3 2025-06-24T21:06:52.925370Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-24T21:06:52.925394Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 3/3 2025-06-24T21:06:52.925412Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-24T21:06:52.925431Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/3, is published: true 2025-06-24T21:06:52.925486Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:384:2350] message: TxId: 102 2025-06-24T21:06:52.925529Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-24T21:06:52.925587Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:06:52.925616Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:06:52.925718Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-24T21:06:52.925755Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:06:52.925791Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:1 2025-06-24T21:06:52.925815Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:1 2025-06-24T21:06:52.925847Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-24T21:06:52.925866Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:06:52.925883Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:2 2025-06-24T21:06:52.925898Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:2 2025-06-24T21:06:52.925931Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-24T21:06:52.925947Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T21:06:52.926243Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:52.926280Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T21:06:52.926365Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:06:52.926402Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:06:52.926427Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:06:52.926447Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:06:52.926470Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:06:52.928785Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:06:52.928834Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:481:2440] 2025-06-24T21:06:52.928918Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 >> ConvertMiniKQLValueToYdbValueTest::Void [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Struct [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Tuple [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Variant [GOOD] >> ConvertTableDescription::StorageSettings >> ConvertYdbValueToMiniKQLValueTest::SimpleBool [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleBoolTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleDecimal [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleDecimalTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalString [GOOD] >> ConvertYdbValueToMiniKQLValueTest::PgValue [GOOD] >> ConvertTableDescription::StorageSettings [GOOD] >> ConvertTableDescription::ColumnFamilies [GOOD] >> ConvertYdbPermissionNameToACLAttrs::SimpleConvertGood [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzDateTime [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzTimeStamp [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::UuidType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantTuple [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantStruct [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Void [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::PgValue [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbPermissionNameToACLAttrs::SimpleConvertGood [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleBool [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalString [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalEmpty [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalOptionalEmpty [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalOptionalEmpty2 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::List [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Dict [GOOD] >> TSchemeShardMoveTest::MoveIntoBuildingIndex [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLValueToYdbValueTest::Dict [GOOD] >> CellsFromTupleTest::CellsFromTupleSuccess [GOOD] >> CellsFromTupleTest::CellsFromTupleSuccessPg >> CellsFromTupleTest::CellsFromTupleSuccessPg [GOOD] >> CellsFromTupleTest::CellsFromTupleFails >> CellsFromTupleTest::CellsFromTupleFails [GOOD] >> CellsFromTupleTest::CellsFromTupleFailsPg [GOOD] >> CompressionTests::Zstd [GOOD] >> CompressionTests::Unsupported [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::DecimalType [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleInt32 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleInt64 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzDate [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzDateTime [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzTimeStamp [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleDecimal [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleUuid [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveIntoBuildingIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:52.661881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:52.661936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:52.661975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:52.662001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:52.662032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:52.662059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:52.662106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:52.662158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:52.662719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:52.662943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:52.719469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:52.719516Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:52.732875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:52.733289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:52.733489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:52.740684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:52.740896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:52.741538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:52.741828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:52.744590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:52.744764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:52.745778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:52.745836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:52.746022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:52.746076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:52.746121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:52.746221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.751278Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:52.858230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:52.858482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.858643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:52.858671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:52.858866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:52.858952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:52.860796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:52.860957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:52.861089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.861127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:52.861153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:52.861175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:52.862673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.862714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:52.862755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:52.863978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.864018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:52.864053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:52.864087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:52.866502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:52.867942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:52.868078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:52.868690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:52.868796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:52.868831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:52.869040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:52.869080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:52.869203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:52.869273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:52.870855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:52.870907Z node 1 :FLAT_TX_SCHEMESHARD ... saction is registered, txId: 281474976710760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-06-24T21:06:54.308773Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:54.308827Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 8589936750 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:54.308861Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-24T21:06:54.308890Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710760:0 128 -> 240 2025-06-24T21:06:54.310039Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-24T21:06:54.310071Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-06-24T21:06:54.310121Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-24T21:06:54.310152Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:06:54.310176Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-24T21:06:54.310194Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:06:54.310214Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-24T21:06:54.310247Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:127:2151] message: TxId: 281474976710760 2025-06-24T21:06:54.310270Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:06:54.310289Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-24T21:06:54.310305Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710760:0 2025-06-24T21:06:54.310340Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-24T21:06:54.311367Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-24T21:06:54.311415Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710760 2025-06-24T21:06:54.311451Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-24T21:06:54.311512Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:455:2414], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-24T21:06:54.312409Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-24T21:06:54.312458Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:455:2414], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:06:54.312485Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-24T21:06:54.313385Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-24T21:06:54.313430Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:455:2414], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:06:54.313456Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-24T21:06:54.313517Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:06:54.313546Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:636:2583] TestWaitNotification: OK eventTxId 102 2025-06-24T21:06:54.314021Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:54.314190Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 193us result status StatusSuccess 2025-06-24T21:06:54.314471Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "SomeIndex" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableIndexes { Name: "Sync" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::DecimalType [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLValueToYdbValueTest::SimpleUuid [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleInt32 [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzDate [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzDateTime [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzTimeStamp [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleInt32TypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleUuid [GOOD] |93.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_move/test-results/unittest/{meta.json ... results_accumulator.log} |93.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move/test-results/unittest/{meta.json ... results_accumulator.log} >> ConvertMiniKQLTypeToYdbTypeTest::SimpleType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzDate [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Optional [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::List [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Struct [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Dict [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::PgType [GOOD] >> ConvertYdbPermissionNameToACLAttrs::TestEqualGranularAndDeprecatedAcl [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalEmpty [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalOptionalEmpty [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalOptionalEmpty2 [GOOD] >> ConvertYdbValueToMiniKQLValueTest::List [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Dict [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::SimpleUuid [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::PgType [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::Dict [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Void [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleUuidTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Struct [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Tuple [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Variant [GOOD] >> ConvertYdbValueToMiniKQLValueTest::VariantIndexUnderflow [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::VariantIndexUnderflow [GOOD] |93.7%| [TA] $(B)/ydb/core/ydb_convert/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.7%| [TA] {RESULT} $(B)/ydb/core/ydb_convert/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists >> TExternalDataSourceTest::SchemeErrors >> TExternalDataSourceTest::CreateExternalDataSource >> TExternalDataSourceTest::ParallelCreateSameExternalDataSource >> TExternalDataSourceTest::CreateExternalDataSourceWithProperties >> TExternalDataSourceTest::DropTableTwice >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists >> TExternalDataSourceTest::ReadOnlyMode >> IndexBuildTest::RejectsCancel [GOOD] >> BasicUsage::FallbackToSingleDbAfterBadRequest [GOOD] >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists [GOOD] >> TExternalDataSourceTest::SchemeErrors [GOOD] >> TExternalDataSourceTest::CreateExternalDataSource [GOOD] >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists >> TExternalDataSourceTest::ParallelCreateSameExternalDataSource [GOOD] >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources >> TExternalDataSourceTest::CreateExternalDataSourceWithProperties [GOOD] >> TExternalDataSourceTest::DropExternalDataSource >> TExternalDataSourceTest::DropTableTwice [GOOD] >> TExternalDataSourceTest::ParallelCreateExternalDataSource >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::RejectsCancel [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:28.172975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:28.173051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:28.173092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:28.173120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:28.173928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:28.173963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:28.174009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:28.174072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:28.174670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:28.175870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:28.238950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:28.239002Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:28.251729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:28.252057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:28.252185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:28.259217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:28.260046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:28.261657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.262828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:28.269745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:28.270683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:28.276740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:28.276811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:28.277040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:28.277079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:28.277152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:28.277231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.282435Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:28.375709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:28.375886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.376088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:28.376131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:28.376310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:28.376371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:28.378246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.378425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:28.378570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.378619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:28.378650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:28.378677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:28.380255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.380302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:28.380332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:28.381527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.381559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.381592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.381637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:28.384502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:28.385839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:28.386020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:28.387497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.387613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:28.387663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.388744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:28.388785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.388947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:28.389014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:28.390565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:28.390599Z node 1 :FLAT_TX_SCHEMESHARD ... max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { seconds: 30 } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "TShardStatus { ShardIdx: 72057594046678944:2 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:3 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:5 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:6 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:7 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:8 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:9 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:10 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { seconds: 30 } } 2025-06-24T21:06:58.706081Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:58.706316Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 277us result status StatusSuccess 2025-06-24T21:06:58.706670Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:58.708408Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:06:58.708657Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/index1" took 268us result status StatusSuccess 2025-06-24T21:06:58.709269Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/index1" PathDescription { Self { Name: "index1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExternalDataSourceTest::ReadOnlyMode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::SchemeErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-24T21:06:59.136245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:59.136318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:59.136385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:59.136417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:59.136442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:59.136501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:59.137148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.138546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:59.201799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:06:59.201855Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:59.202629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.214315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:59.214546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:59.214665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:59.219440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:59.219633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:59.222197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.223106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:59.227492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.228258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:59.233655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:59.233888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.233943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:59.234169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.240743Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:06:59.333614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.333787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.333946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:59.333977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:59.334143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:59.334190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:59.335983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.336133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:59.336301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.336363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:59.336402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:59.336437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:59.337929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.337985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:59.338015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:59.339167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.339207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.339239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.339277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:59.341735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:59.342979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:59.343168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:59.343885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.343989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.344042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.344224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:59.344266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.344413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.344479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06: ... 69Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 126:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" } 2025-06-24T21:06:59.408338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 126:0, path# /MyRoot/DirA/MyExternalDataSource 2025-06-24T21:06:59.408474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 126:1, propose status:StatusSchemeError, reason: Authorization method isn't specified, at schemeshard: 72057594046678944 2025-06-24T21:06:59.410407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 126, response: Status: StatusSchemeError Reason: "Authorization method isn\'t specified" TxId: 126 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.410640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 126, database: /MyRoot, subject: , status: StatusSchemeError, reason: Authorization method isn't specified, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 126, wait until txId: 126 TestModificationResults wait txId: 127 2025-06-24T21:06:59.413074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } } TxId: 127 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.413326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 127:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } 2025-06-24T21:06:59.413389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 127:0, path# /MyRoot/DirA/MyExternalDataSource 2025-06-24T21:06:59.413497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 127:1, propose status:StatusSchemeError, reason: Maximum length of location must be less or equal equal to 1000 but got 1001, at schemeshard: 72057594046678944 2025-06-24T21:06:59.415030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 127, response: Status: StatusSchemeError Reason: "Maximum length of location must be less or equal equal to 1000 but got 1001" TxId: 127 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.415226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 127, database: /MyRoot, subject: , status: StatusSchemeError, reason: Maximum length of location must be less or equal equal to 1000 but got 1001, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 127, wait until txId: 127 TestModificationResults wait txId: 128 2025-06-24T21:06:59.417283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Installation: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.417521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Installation: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } 2025-06-24T21:06:59.417595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 128:0, path# /MyRoot/DirA/MyExternalDataSource 2025-06-24T21:06:59.417737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 128:1, propose status:StatusSchemeError, reason: Maximum length of installation must be less or equal equal to 1000 but got 1001, at schemeshard: 72057594046678944 2025-06-24T21:06:59.419755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 128, response: Status: StatusSchemeError Reason: "Maximum length of installation must be less or equal equal to 1000 but got 1001" TxId: 128 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.420020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusSchemeError, reason: Maximum length of installation must be less or equal equal to 1000 but got 1001, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 128, wait until txId: 128 TestModificationResults wait txId: 129 2025-06-24T21:06:59.422288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } } } TxId: 129 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.422462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 129:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } } 2025-06-24T21:06:59.422588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 129:0, path# /MyRoot/DirA/ 2025-06-24T21:06:59.422708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 129:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/DirA/', error: path part shouldn't be empty, at schemeshard: 72057594046678944 2025-06-24T21:06:59.424450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 129, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/DirA/\', error: path part shouldn\'t be empty" TxId: 129 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.424639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 129, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/DirA/', error: path part shouldn't be empty, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/ TestModificationResult got TxId: 129, wait until txId: 129 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-24T21:06:59.136224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:59.136287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:59.136346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:59.136376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:59.136397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:59.136491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:59.137065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.138549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:59.199886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:06:59.199931Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:59.200510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.213949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:59.214179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:59.214292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:59.219361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:59.219548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:59.222221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.223105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:59.227409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.228213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:59.233527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:59.233711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.233768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:59.233898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.238635Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:06:59.326265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.326418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.326559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:59.326587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:59.327453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:59.327513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:59.329847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.330628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:59.330812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.330863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:59.330898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:59.330925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:59.332262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.332316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:59.332346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:59.333643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.333694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.333750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.333808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:59.341948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:59.343425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:59.343592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:59.344157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.344249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.344287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.344463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:59.344503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.344639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.344696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06: ... HEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.401290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_external_data_source.cpp:35: [72057594046678944] TAlterExternalDataSource TPropose, operationId: 102:0HandleReply TEvOperationPlan: step# 5000003 2025-06-24T21:06:59.401402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 240 2025-06-24T21:06:59.401553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.401638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:06:59.402473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:06:59.402754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:06:59.403762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.403790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.403864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:06:59.403912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:06:59.403974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.404006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-24T21:06:59.404028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:06:59.404041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:06:59.404210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.404233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:06:59.404308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:06:59.404333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:06:59.404359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:06:59.404378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:06:59.404400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T21:06:59.404427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:06:59.404465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:06:59.404484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:06:59.404539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:06:59.404572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T21:06:59.404593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T21:06:59.404616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:06:59.405215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:06:59.405266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:06:59.405289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:06:59.405315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:06:59.405360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:06:59.405794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:06:59.405843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:06:59.405870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:06:59.405888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:06:59.405914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:06:59.405967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:06:59.407651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:06:59.408529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:06:59.408728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:06:59.408780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:06:59.409149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:06:59.409222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:06:59.409255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:335:2324] TestWaitNotification: OK eventTxId 102 2025-06-24T21:06:59.409776Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:59.409964Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 207us result status StatusSuccess 2025-06-24T21:06:59.410302Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 2 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:59.136224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:59.136296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:59.136353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:59.136381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:59.136423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:59.136470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:59.137074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.138570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:59.210323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:59.210380Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:59.221705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:59.221990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:59.222125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:59.228265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:59.228434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:59.228939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.229203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:59.231408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.231542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:59.233549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:59.233829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.233869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:59.233945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.239054Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:59.344926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.345119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.345284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:59.345320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:59.345516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:59.345644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:59.347458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.347614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:59.347738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.347781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:59.347813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:59.347853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:59.349134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.349173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:59.349202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:59.350322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.350356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.350386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.350433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:59.352999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:59.354402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:59.354558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:59.355192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.355285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.355323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.355544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:59.355614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.355753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.355815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:59.357235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.357268Z node 1 :FLAT_TX_SCHEMESHARD ... xecute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.388865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:06:59.388936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:06:59.388962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:06:59.388993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:06:59.389015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:06:59.389047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:06:59.389089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:06:59.389118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:06:59.389140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:06:59.389188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:06:59.389219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T21:06:59.389251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-24T21:06:59.389282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-24T21:06:59.389860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:06:59.389946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:06:59.389974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:06:59.390022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-24T21:06:59.390052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:06:59.390534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:06:59.390590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:06:59.390609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:06:59.390629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T21:06:59.390650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:06:59.390702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:06:59.393083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:06:59.393729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:06:59.393950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:06:59.393983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:06:59.394465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:06:59.394611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:06:59.394656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:301:2290] TestWaitNotification: OK eventTxId 101 2025-06-24T21:06:59.395114Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/UniqueName" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:59.395300Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/UniqueName" took 194us result status StatusSuccess 2025-06-24T21:06:59.395614Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/UniqueName" PathDescription { Self { Name: "UniqueName" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "UniqueName" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-06-24T21:06:59.398180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "UniqueName" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.398402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "UniqueName" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } 2025-06-24T21:06:59.398470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_external_data_source.cpp:212: [72057594046678944] TAlterExternalDataSource Propose: opId# 102:0, path# /MyRoot/UniqueName 2025-06-24T21:06:59.398579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, at schemeshard: 72057594046678944 2025-06-24T21:06:59.400216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/UniqueName\', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-06-24T21:06:59.400403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/UniqueName TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:06:59.400627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:06:59.400656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:06:59.400967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:06:59.401034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:06:59.401061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:309:2298] TestWaitNotification: OK eventTxId 102 >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources [GOOD] >> TExternalDataSourceTest::DropExternalDataSource [GOOD] >> TExternalDataSourceTest::ParallelCreateExternalDataSource [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::FallbackToSingleDbAfterBadRequest [GOOD] Test command err: 2025-06-24T21:06:36.328875Z :FallbackToSingleDb INFO: Random seed for debugging is 1750799196328848 2025-06-24T21:06:36.667012Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625292486539307:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:06:36.667099Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:06:36.700826Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625291287822890:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:06:36.700907Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004789/r3tmp/tmpBN9Yve/pdisk_1.dat 2025-06-24T21:06:36.831883Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:06:36.833748Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:06:37.003915Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625292486539264:2080] 1750799196664015 != 1750799196664018 2025-06-24T21:06:37.016782Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:06:37.016892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:06:37.022289Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:06:37.022981Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:37.023897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:06:37.057795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:06:37.057891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 12841, node 1 2025-06-24T21:06:37.067067Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T21:06:37.067101Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T21:06:37.071317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:06:37.156914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004789/r3tmp/yandexocSfDl.tmp 2025-06-24T21:06:37.156948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004789/r3tmp/yandexocSfDl.tmp 2025-06-24T21:06:37.157739Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004789/r3tmp/yandexocSfDl.tmp 2025-06-24T21:06:37.157868Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:06:37.348396Z INFO: TTestServer started on Port 26519 GrpcPort 12841 TClient is connected to server localhost:26519 PQClient connected to localhost:12841 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:06:37.599764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:06:37.674302Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:06:37.708699Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:06:38.997597Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625299877757762:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.997655Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625299877757740:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.997707Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:39.002384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:06:39.017605Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625299877757769:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T21:06:39.097648Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625304172725093:2130] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:06:39.336471Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519625305371442239:2302], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:06:39.338278Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YjdkMGE3MjYtODQxZmFlYjEtNWQ1NTJhNWEtMzgzOGI3OTU=, ActorId: [1:7519625305371442213:2295], ActorState: ExecuteState, TraceId: 01jyhw7atc8276jky85bpdn871, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:06:39.339851Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:06:39.343166Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519625304172725108:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:06:39.343350Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NjQ3ODRkZDktZTkzYjQ0Yi05M2ZjNDBhZS1jZDU5MmExOA==, ActorId: [2:7519625299877757738:2267], ActorState: ExecuteState, TraceId: 01jyhw7ark8wxthatt70yp9ra1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:06:39.343670Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:06:39.386702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:06:39.503585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:06:39.631763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/sche ... 025-06-24T21:06:56.935667Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7519625376558201670:2450] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-24T21:06:57.042541Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [3:7519625376558201670:2450] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-24T21:06:57.042971Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625380853169002:2450] connected; active server actors: 1 2025-06-24T21:06:57.043015Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [3:7519625376558201670:2450] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-24T21:06:57.043027Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7519625376558201670:2450] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-24T21:06:57.043434Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625380853169002:2450] disconnected; active server actors: 1 2025-06-24T21:06:57.043469Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625380853169002:2450] disconnected no session 2025-06-24T21:06:57.130540Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7519625376558201670:2450] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-24T21:06:57.130566Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519625376558201670:2450] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-24T21:06:57.130578Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7519625376558201670:2450] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-24T21:06:57.130598Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T21:06:57.131183Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037892] server connected, pipe [3:7519625380853169030:2450], now have 1 active actors on pipe 2025-06-24T21:06:57.131244Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-06-24T21:06:57.131418Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:06:57.131450Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:06:57.131538Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|676da777-7449894a-b3af6dbf-f41e66e6_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-24T21:06:57.131638Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:06:57.131712Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:06:57.132077Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:06:57.132103Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:06:57.132161Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:06:57.132343Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|676da777-7449894a-b3af6dbf-f41e66e6_0 2025-06-24T21:06:57.132935Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750799217132 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:06:57.133022Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|676da777-7449894a-b3af6dbf-f41e66e6_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-24T21:06:57.133157Z :INFO: [] MessageGroupId [src] SessionId [src|676da777-7449894a-b3af6dbf-f41e66e6_0] Write session: close. Timeout = 0 ms 2025-06-24T21:06:57.133203Z :INFO: [] MessageGroupId [src] SessionId [src|676da777-7449894a-b3af6dbf-f41e66e6_0] Write session will now close 2025-06-24T21:06:57.133236Z :DEBUG: [] MessageGroupId [src] SessionId [src|676da777-7449894a-b3af6dbf-f41e66e6_0] Write session: aborting 2025-06-24T21:06:57.133547Z :INFO: [] MessageGroupId [src] SessionId [src|676da777-7449894a-b3af6dbf-f41e66e6_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:06:57.133588Z :DEBUG: [] MessageGroupId [src] SessionId [src|676da777-7449894a-b3af6dbf-f41e66e6_0] Write session: destroy PORTS 12033 19047 2025-06-24T21:06:57.134323Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|676da777-7449894a-b3af6dbf-f41e66e6_0 grpc read done: success: 0 data: 2025-06-24T21:06:57.134339Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|676da777-7449894a-b3af6dbf-f41e66e6_0 grpc read failed 2025-06-24T21:06:57.134354Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|676da777-7449894a-b3af6dbf-f41e66e6_0 grpc closed 2025-06-24T21:06:57.134365Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|676da777-7449894a-b3af6dbf-f41e66e6_0 is DEAD 2025-06-24T21:06:57.135058Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:06:57.135375Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [3:7519625380853169030:2450] destroyed 2025-06-24T21:06:57.135382Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. Session was created >>> Ready to answer: ok 2025-06-24T21:06:58.142725Z :INFO: [/Root] OnFederationDiscovery fall back to single mode, database=/Root 2025-06-24T21:06:58.142862Z :INFO: [/Root] [] [841e3f6-a84d822f-81fb1c5f-242ce9d5] Open read subsessions to databases: { name: , endpoint: localhost:19047, path: /Root } 2025-06-24T21:06:58.143050Z :INFO: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] Starting read session 2025-06-24T21:06:58.143084Z :DEBUG: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] Starting single session 2025-06-24T21:06:58.143537Z :DEBUG: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] [] In Reconnect, ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-24T21:06:58.143709Z :DEBUG: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] [] New values: ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-24T21:06:58.143784Z :DEBUG: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] [] Reconnecting session to cluster in 0.000000s 2025-06-24T21:06:58.143969Z :ERROR: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] [] Got error. Status: CLIENT_CALL_UNIMPLEMENTED. Description:
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:19047
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:19047. 2025-06-24T21:06:58.144018Z :DEBUG: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] [] In Reconnect, ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-24T21:06:58.144045Z :DEBUG: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] [] New values: ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-24T21:06:58.144160Z :INFO: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] [] Closing session to cluster: SessionClosed { Status: CLIENT_CALL_UNIMPLEMENTED Issues: "
: Error: Failed to establish connection to server "localhost:19047" ( cluster ). Attempts done: 1
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:19047
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:19047. " } 2025-06-24T21:06:58.144423Z :NOTICE: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:06:58.144457Z :DEBUG: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] [] Abort session to cluster Got new read session event: SessionClosed { Status: CLIENT_CALL_UNIMPLEMENTED Issues: "
: Error: Failed to establish connection to server "localhost:19047" ( cluster ). Attempts done: 1
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:19047
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:19047. " } 2025-06-24T21:06:58.144546Z :INFO: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] Closing read session. Close timeout: 0.010000s 2025-06-24T21:06:58.144578Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:06:58.144603Z :INFO: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] Counters: { Errors: 1 CurrentSessionLifetimeMs: 1 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:06:58.144627Z :INFO: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] Closing read session. Close timeout: 0.000000s 2025-06-24T21:06:58.144658Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:06:58.144688Z :INFO: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] Counters: { Errors: 1 CurrentSessionLifetimeMs: 1 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:06:58.144714Z :INFO: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] Closing read session. Close timeout: 0.000000s 2025-06-24T21:06:58.144752Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:06:58.144776Z :INFO: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] Counters: { Errors: 1 CurrentSessionLifetimeMs: 1 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:06:58.144826Z :NOTICE: [/Root] [/Root] [9340d18f-af838276-998ba90b-5729c066] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:06:58.665916Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720688, task: 1, CA Id [3:7519625385148136417:2477]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-24T21:06:58.697278Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720688, task: 1, CA Id [3:7519625385148136417:2477]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-24T21:06:59.136244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:59.136316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:59.136379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:59.136410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:59.136436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:59.136511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:59.137140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.138525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:59.199922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:06:59.199965Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:59.200514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.213947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:59.214143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:59.214256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:59.219545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:59.219738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:59.222203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.223105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:59.227684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.228236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:59.233525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:59.233739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.233792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:59.233958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.239292Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:06:59.341551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.341733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.341884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:59.341920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:59.342077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:59.342125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:59.343821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.343985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:59.344144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.344192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:59.344229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:59.344263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:59.345567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.345658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:59.345705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:59.346964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.346999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.347043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.347079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:59.349620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:59.350856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:59.351038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:59.351703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.351787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.351827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.352003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:59.352043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.352166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.352228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06: ... 28:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:59.709099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 128 ready parts: 1/1 2025-06-24T21:06:59.709228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 128 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:59.710198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 128 2025-06-24T21:06:59.710297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 128 2025-06-24T21:06:59.710340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 128 2025-06-24T21:06:59.710375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-24T21:06:59.710423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-24T21:06:59.711101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 128 2025-06-24T21:06:59.711219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 128 2025-06-24T21:06:59.711251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 128 2025-06-24T21:06:59.711279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 2 2025-06-24T21:06:59.711308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:06:59.711373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 128, ready parts: 0/1, is published: true 2025-06-24T21:06:59.713239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 128:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:128 msg type: 269090816 2025-06-24T21:06:59.713418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 128, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 128 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 128 at step: 5000004 2025-06-24T21:06:59.715140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 2025-06-24T21:06:59.715261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 2025-06-24T21:06:59.715564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.715683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 128 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.715746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 128:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000004, at schemeshard: 72057594046678944 2025-06-24T21:06:59.715892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 128:0 128 -> 240 2025-06-24T21:06:59.716055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:06:59.716113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 FAKE_COORDINATOR: Erasing txId 128 2025-06-24T21:06:59.717902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.717939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 128, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.718091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 128, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T21:06:59.718176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.718214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:456:2412], at schemeshard: 72057594046678944, txId: 128, path id: 1 2025-06-24T21:06:59.718261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:456:2412], at schemeshard: 72057594046678944, txId: 128, path id: 4 2025-06-24T21:06:59.718543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 128:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.718600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 128:0 ProgressState 2025-06-24T21:06:59.718708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#128:0 progress is 1/1 2025-06-24T21:06:59.718742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 128 ready parts: 1/1 2025-06-24T21:06:59.718783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#128:0 progress is 1/1 2025-06-24T21:06:59.718817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 128 ready parts: 1/1 2025-06-24T21:06:59.718852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 128, ready parts: 1/1, is published: false 2025-06-24T21:06:59.718890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 128 ready parts: 1/1 2025-06-24T21:06:59.718923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 128:0 2025-06-24T21:06:59.718962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 128:0 2025-06-24T21:06:59.719025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:06:59.719067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 128, publications: 2, subscribers: 0 2025-06-24T21:06:59.719100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 128, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-24T21:06:59.719175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 128, [OwnerId: 72057594046678944, LocalPathId: 4], 3 2025-06-24T21:06:59.719870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 128 2025-06-24T21:06:59.719946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 128 2025-06-24T21:06:59.719984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 128 2025-06-24T21:06:59.720023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-24T21:06:59.720105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-24T21:06:59.720944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 128 2025-06-24T21:06:59.721015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 128 2025-06-24T21:06:59.721046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 128 2025-06-24T21:06:59.721078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-24T21:06:59.721107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T21:06:59.721186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 128, subscribers: 0 2025-06-24T21:06:59.723611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 2025-06-24T21:06:59.724856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 TestModificationResult got TxId: 128, wait until txId: 128 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-24T21:06:59.136259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:59.136339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:59.136404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:59.136436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:59.136480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:59.136545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:59.137217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.138539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:59.199959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:06:59.200007Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:59.200637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.214172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:59.214411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:59.214535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:59.219563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:59.219757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:59.222186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.223120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:59.227482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.228239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:59.233615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:59.233784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.233830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:59.234005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.239118Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:06:59.323755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.325083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.326096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:59.326153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:59.327471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:59.327554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:59.330012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.330511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:59.330716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.330819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:59.330854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:59.330883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:59.332405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.332452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:59.332491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:59.333688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.333723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.333770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.333806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:59.337303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:59.338596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:59.339642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:59.340395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.340505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.340545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.341634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:59.341682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.341828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.341904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06: ... 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:06:59.990895Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:06:59.990927Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:06:59.991310Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:06:59.991357Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:06:59.991376Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:06:59.991398Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T21:06:59.991450Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:06:59.991506Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:06:59.993420Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:06:59.993493Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:06:59.993671Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:06:59.993705Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:06:59.993958Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:06:59.994016Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:06:59.994040Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:307:2296] TestWaitNotification: OK eventTxId 101 2025-06-24T21:06:59.994356Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:59.994472Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 152us result status StatusSuccess 2025-06-24T21:06:59.994711Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-06-24T21:06:59.996801Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Auth { None { } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.996982Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Auth { None { } } } 2025-06-24T21:06:59.997033Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 102:0, path# /MyRoot/MyExternalDataSource 2025-06-24T21:06:59.997176Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/MyExternalDataSource', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-24T21:06:59.998723Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges)" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-06-24T21:06:59.998893Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/MyExternalDataSource', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges), operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/MyExternalDataSource TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:06:59.999069Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:06:59.999096Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:06:59.999394Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:06:59.999454Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:06:59.999481Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:315:2304] TestWaitNotification: OK eventTxId 102 2025-06-24T21:06:59.999789Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:59.999901Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 144us result status StatusSuccess 2025-06-24T21:07:00.000108Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-24T21:06:59.136279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:59.136360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:59.136445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:59.136480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:59.136528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:59.136595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:59.137307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.138578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:59.199876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:06:59.199924Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:59.200450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.213885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:59.214084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:59.214182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:59.219317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:59.219494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:59.222205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.223121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:59.227404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.228221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:59.233497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:59.233645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.233682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:59.233849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.238652Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:06:59.345758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.345898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.346082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:59.346125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:59.346300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:59.346354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:59.347975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.348096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:59.348235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.348282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:59.348322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:59.348347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:59.349607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.349667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:59.349706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:59.350837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.350866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.350898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.350934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:59.353232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:59.354433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:59.354571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:59.355222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.355297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.355337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.355516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:59.355552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.355674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.355752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06: ... DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:06:59.994217Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:06:59.994266Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:06:59.994286Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:06:59.994306Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T21:06:59.994324Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:06:59.994903Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:06:59.994953Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:06:59.994975Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:06:59.994998Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T21:06:59.995017Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:06:59.995057Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:06:59.996030Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:06:59.997079Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:06:59.997135Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:06:59.997270Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:06:59.997300Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:06:59.997517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:06:59.997565Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:06:59.997610Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:337:2326] TestWaitNotification: OK eventTxId 101 2025-06-24T21:06:59.997937Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:59.998060Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 137us result status StatusSuccess 2025-06-24T21:06:59.998313Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-06-24T21:07:00.000268Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpDropExternalDataSource Drop { Name: "ExternalDataSource" } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:00.000370Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_external_data_source.cpp:116: [72057594046678944] TDropExternalDataSource Propose: opId# 103:0, path# /MyRoot/ExternalDataSource 2025-06-24T21:07:00.000445Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusSchemeError, reason: Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable, at schemeshard: 72057594046678944 2025-06-24T21:07:00.001964Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusSchemeError Reason: "Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:00.002219Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusSchemeError, reason: Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable, operation: DROP EXTERNAL DATA SOURCE, path: /MyRoot/ExternalDataSource TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T21:07:00.002508Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T21:07:00.002549Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T21:07:00.002989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T21:07:00.003101Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:07:00.003173Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:345:2334] TestWaitNotification: OK eventTxId 103 2025-06-24T21:07:00.003691Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:07:00.003858Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 189us result status StatusSuccess 2025-06-24T21:07:00.004192Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { References { Path: "/MyRoot/ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } } } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::DropExternalDataSource [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-24T21:06:59.136226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:59.136292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:59.136349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:59.136379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:59.136413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:59.136458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:59.137057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.138510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:59.199886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:06:59.199926Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:59.200471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.213933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:59.214118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:59.214206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:59.219294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:59.219514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:59.222213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.223130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:59.227416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.228198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:59.233499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:59.233651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.233694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:59.233843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.238617Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:06:59.323723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.325040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.326111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:59.326177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:59.327453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:59.327517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:59.329846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.330536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:59.330816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.330894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:59.330947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:59.330984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:59.332362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.332409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:59.332436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:59.333608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.333639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.333672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.333722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:59.337445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:59.338636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:59.339613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:59.340355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.340439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.340481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.341559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:59.341611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.341782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.341866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06: ... n RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-24T21:06:59.987160Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.987231Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 8589936751 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.987262Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 102:0 HandleReply TEvOperationPlan: step# 5000003 2025-06-24T21:06:59.987330Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:06:59.987407Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 240 2025-06-24T21:06:59.987591Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.987652Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:06:59.988383Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:06:59.989348Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:06:59.990142Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.990167Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.990230Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:06:59.990321Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.990348Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-24T21:06:59.990375Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:06:59.990505Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.990528Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:06:59.990599Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:06:59.990620Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:06:59.990653Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:06:59.990683Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:06:59.990718Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T21:06:59.990750Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:06:59.990773Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:06:59.990797Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:06:59.990890Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:06:59.990940Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T21:06:59.990980Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T21:06:59.991014Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T21:06:59.991445Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:06:59.991547Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:06:59.991585Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:06:59.991623Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:06:59.991684Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:06:59.991899Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:06:59.991929Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:06:59.991970Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:06:59.992154Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:06:59.992196Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:06:59.992213Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:06:59.992238Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:06:59.992259Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.992309Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:06:59.994565Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:06:59.994626Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:06:59.994677Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:06:59.994800Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:06:59.994825Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:06:59.995059Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:06:59.995119Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:06:59.995160Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:335:2324] TestWaitNotification: OK eventTxId 102 2025-06-24T21:06:59.995426Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:59.995542Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 128us result status StatusPathDoesNotExist 2025-06-24T21:06:59.995647Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ParallelCreateExternalDataSource [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-24T21:06:59.136232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:59.136296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:59.136364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:59.136391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:59.136428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:59.136474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:59.136521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:59.137070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.138530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:59.200393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:06:59.200429Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:59.200917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:59.213788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:59.213992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:59.214164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:59.219167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:59.219448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:59.222207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.223118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:59.227354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.228226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:59.233479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:59.233595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:59.233628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:59.233710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:59.233832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.238532Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:06:59.336521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:59.336684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.336835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:59.336868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:59.337023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:59.337075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:59.338838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.338970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:59.339122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.339181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:59.339228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:59.339253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:59.340519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.340560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:59.340595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:59.341736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.341768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:59.341799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.341847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:59.348816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:59.350384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:59.350540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:59.351179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:59.351261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.351303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.351462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:59.351495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:59.351617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:59.351678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06: ... hId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource1" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.979230Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/MyExternalDataSource2" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:59.979350Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/MyExternalDataSource2" took 137us result status StatusSuccess 2025-06-24T21:06:59.979545Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/MyExternalDataSource2" PathDescription { Self { Name: "MyExternalDataSource2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 126 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource2" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.980012Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:59.980124Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 116us result status StatusSuccess 2025-06-24T21:06:59.980444Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 124 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "MyExternalDataSource1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 125 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "MyExternalDataSource2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 126 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.980782Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/MyExternalDataSource1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:59.980886Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/MyExternalDataSource1" took 120us result status StatusSuccess 2025-06-24T21:06:59.981065Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/MyExternalDataSource1" PathDescription { Self { Name: "MyExternalDataSource1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 125 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource1" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:59.981427Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/MyExternalDataSource2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:06:59.981552Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/MyExternalDataSource2" took 117us result status StatusSuccess 2025-06-24T21:06:59.981746Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/MyExternalDataSource2" PathDescription { Self { Name: "MyExternalDataSource2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 126 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource2" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BasicUsage::WriteSessionCloseIgnoresWrites [GOOD] >> TExternalDataSourceTest::RemovingReferencesFromDataSources >> IndexBuildTest::MergeIndexTableShardsOnlyWhenReady [GOOD] >> IndexBuildTest::IndexPartitioningIsPersisted >> TExternalDataSourceTest::RemovingReferencesFromDataSources [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionCloseIgnoresWrites [GOOD] Test command err: 2025-06-24T21:06:36.347747Z :WriteSessionCloseWaitsForWrites INFO: Random seed for debugging is 1750799196347729 2025-06-24T21:06:36.666694Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625290179558858:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:06:36.666796Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:06:36.700885Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625291571055508:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:06:36.701784Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00479c/r3tmp/tmppx7ZeS/pdisk_1.dat 2025-06-24T21:06:36.825783Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:06:36.832456Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:06:37.007643Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:37.019119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:06:37.019202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:06:37.024916Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:06:37.025690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:06:37.049560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:06:37.049651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:06:37.052096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7361, node 1 2025-06-24T21:06:37.156598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00479c/r3tmp/yandexWB6cf1.tmp 2025-06-24T21:06:37.156646Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00479c/r3tmp/yandexWB6cf1.tmp 2025-06-24T21:06:37.157739Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00479c/r3tmp/yandexWB6cf1.tmp 2025-06-24T21:06:37.157878Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:06:37.348349Z INFO: TTestServer started on Port 25876 GrpcPort 7361 TClient is connected to server localhost:25876 PQClient connected to localhost:7361 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:06:37.605231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:06:37.673429Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:06:37.706171Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:06:38.990862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625298769494435:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.990965Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625298769494425:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.990899Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625300160990377:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.990947Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625300160990369:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.991066Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.991105Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.994198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:06:39.003994Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625300160990388:2124] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:06:39.008815Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625300160990387:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:06:39.008815Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625298769494440:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:06:39.072379Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625304455957711:2130] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:06:39.092620Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625303064461839:2688] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:06:39.315868Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519625303064461849:2307], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:06:39.315868Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519625304455957726:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:06:39.316174Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YmFjYWE0ZWEtMjg1NDY2YTMtYWI0NjVhODEtODU3NGZiODA=, ActorId: [2:7519625300160990356:2267], ActorState: ExecuteState, TraceId: 01jyhw7ardc9fd221fnp8r8h35, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:06:39.319104Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:06:39.321550Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=M2RkZmJkYS0xNzJmNjhhZS0xMjJiNTM3MS00ODUxYWFmYQ==, ActorId: [1:7519625298769494423:2295], ActorState: ExecuteState, TraceId: 01jyhw7ard2e2xhz743ebeng7e, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:06:39.321834Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { ... pp:1531: proxy answer CallPersQueueGRPC response: Status: 1 ErrorCode: OK MetaResponse { CmdGetTopicMetadataResult { TopicInfo { Topic: "rt3.dc1--test-topic" NumPartitions: 1 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } Version: 1 LocalDC: true Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } TopicPath: "/Root/PQ/rt3.dc1--test-topic" YdbDatabasePath: "/Root" Consumers { Name: "user" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } Version: 0 Important: false } } ErrorCode: OK } } } === Topic created, have version: 1 2025-06-24T21:06:56.892796Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-24T21:06:56.893483Z :INFO: [] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-24T21:06:56.893530Z :INFO: [] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:62887 2025-06-24T21:06:56.901833Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-24T21:06:56.902248Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-24T21:06:56.902279Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-24T21:06:56.902552Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-24T21:06:56.902655Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:53334 2025-06-24T21:06:56.902674Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:53334 proto=v1 topic=test-topic durationSec=0 2025-06-24T21:06:56.902684Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T21:06:56.903725Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-24T21:06:56.903797Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-24T21:06:56.903809Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T21:06:56.903814Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-24T21:06:56.903827Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7519625378272434367:2445] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-24T21:06:56.905588Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7519625378272434367:2445] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-24T21:06:57.032358Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [3:7519625378272434367:2445] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-24T21:06:57.032518Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625382567401699:2445] connected; active server actors: 1 2025-06-24T21:06:57.032560Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [3:7519625378272434367:2445] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-24T21:06:57.032572Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7519625378272434367:2445] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-24T21:06:57.032711Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625382567401699:2445] disconnected; active server actors: 1 2025-06-24T21:06:57.032725Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625382567401699:2445] disconnected no session 2025-06-24T21:06:57.102620Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7519625378272434367:2445] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-24T21:06:57.102657Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519625378272434367:2445] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-24T21:06:57.102674Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7519625378272434367:2445] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-24T21:06:57.102697Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T21:06:57.103333Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037892] server connected, pipe [3:7519625382567401725:2445], now have 1 active actors on pipe 2025-06-24T21:06:57.103406Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-06-24T21:06:57.103572Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:06:57.103594Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:06:57.103664Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|abbf4e65-c5a79336-117185dc-98285abb_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-24T21:06:57.103741Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:06:57.103807Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:06:57.104081Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:06:57.104095Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:06:57.104145Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:06:57.104311Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|abbf4e65-c5a79336-117185dc-98285abb_0 2025-06-24T21:06:57.104946Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750799217104 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:06:57.105038Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|abbf4e65-c5a79336-117185dc-98285abb_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-24T21:06:57.105212Z :INFO: [] MessageGroupId [src] SessionId [src|abbf4e65-c5a79336-117185dc-98285abb_0] Write session: close. Timeout = 0 ms 2025-06-24T21:06:57.105265Z :INFO: [] MessageGroupId [src] SessionId [src|abbf4e65-c5a79336-117185dc-98285abb_0] Write session will now close 2025-06-24T21:06:57.105310Z :DEBUG: [] MessageGroupId [src] SessionId [src|abbf4e65-c5a79336-117185dc-98285abb_0] Write session: aborting 2025-06-24T21:06:57.105665Z :INFO: [] MessageGroupId [src] SessionId [src|abbf4e65-c5a79336-117185dc-98285abb_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:06:57.105694Z :DEBUG: [] MessageGroupId [src] SessionId [src|abbf4e65-c5a79336-117185dc-98285abb_0] Write session: destroy 2025-06-24T21:06:57.106520Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|abbf4e65-c5a79336-117185dc-98285abb_0 grpc read done: success: 0 data: 2025-06-24T21:06:57.106542Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|abbf4e65-c5a79336-117185dc-98285abb_0 grpc read failed 2025-06-24T21:06:57.106582Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|abbf4e65-c5a79336-117185dc-98285abb_0 grpc closed 2025-06-24T21:06:57.106605Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|abbf4e65-c5a79336-117185dc-98285abb_0 is DEAD 2025-06-24T21:06:57.107787Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:06:57.108080Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [3:7519625382567401725:2445] destroyed 2025-06-24T21:06:57.108107Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. Session was created >>> Ready to answer: ok 2025-06-24T21:06:57.165165Z :ERROR: [/Root] OnFederationDiscovery: Got error. Status: UNAVAILABLE. Description: 2025-06-24T21:07:00.518621Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720690, task: 1, CA Id [3:7519625395452303786:2485]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-24T21:07:00.550477Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720690, task: 1, CA Id [3:7519625395452303786:2485]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:07:00.602725Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720690, task: 1, CA Id [3:7519625395452303786:2485]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:07:00.665151Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976720690, task: 1, CA Id [3:7519625395452303786:2485]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::RemovingReferencesFromDataSources [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-24T21:07:01.475606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:01.475672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:01.475701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:01.475733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:01.475763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:01.475784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:01.475839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:01.475901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:01.476455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:01.476692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:01.539767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:07:01.539825Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:01.540539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:01.557075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:01.557429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:01.557626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:01.563792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:01.564061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:01.564679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:01.564888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:01.567687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:01.567831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:01.568653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:01.568703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:01.568793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:01.568836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:01.568886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:01.569043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:01.574588Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:07:01.693774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:01.693970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:01.694120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:01.694161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:01.694324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:01.694375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:01.696491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:01.696627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:01.696798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:01.696846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:01.696884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:01.696912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:01.698333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:01.698393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:01.698422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:01.699644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:01.699675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:01.699710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:01.699748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:01.702062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:01.703383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:01.703552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:01.704235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:01.704319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:01.704370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:01.704543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:01.704572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:01.704704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:01.704784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07: ... ation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-24T21:07:01.790080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:01.790153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:01.790186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-24T21:07:01.790251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:07:01.790314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 128 -> 240 2025-06-24T21:07:01.790431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:01.790478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:07:01.790905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:07:01.792061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 FAKE_COORDINATOR: Erasing txId 104 2025-06-24T21:07:01.792741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:01.792766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:01.792853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:07:01.792963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:01.792985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-24T21:07:01.793012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-24T21:07:01.793260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:07:01.793286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T21:07:01.793352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:07:01.793373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:07:01.793398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:07:01.793423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:07:01.793456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-24T21:07:01.793480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:07:01.793503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:07:01.793523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:07:01.793580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:07:01.793630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-24T21:07:01.793654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-24T21:07:01.793677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T21:07:01.793845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:07:01.793919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:07:01.793952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:07:01.793991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:07:01.794026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:07:01.794316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:07:01.794347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:07:01.794403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:07:01.794552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:07:01.794624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:07:01.794647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:07:01.794672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-24T21:07:01.794693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:01.794732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-24T21:07:01.797117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:07:01.797172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:07:01.797210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T21:07:01.797464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T21:07:01.797497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T21:07:01.797833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T21:07:01.797893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:07:01.797918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:390:2379] TestWaitNotification: OK eventTxId 104 2025-06-24T21:07:01.798281Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:07:01.798399Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 128us result status StatusPathDoesNotExist 2025-06-24T21:07:01.798519Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> AnalyzeDatashard::DropTableNavigateError [GOOD] >> IndexBuildTest::IndexPartitioningIsPersisted [GOOD] >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::IndexPartitioningIsPersisted [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:32.959135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:32.959242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:32.959277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:32.959310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:32.959344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:32.959377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:32.959418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:32.959465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:32.960000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:32.960224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:33.021251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:33.021303Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:33.032823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:33.033117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:33.033238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:33.039033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:33.039240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:33.039783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:33.040135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:33.042362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:33.042543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:33.043438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:33.043491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:33.043664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:33.043703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:33.043730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:33.043804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:33.048336Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:33.133028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:33.133196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:33.133381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:33.133414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:33.133592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:33.133659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:33.135409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:33.135557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:33.135725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:33.135771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:33.135799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:33.135822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:33.137063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:33.137098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:33.137133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:33.138248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:33.138279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:33.138312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:33.138352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:33.140654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:33.141966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:33.142104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:33.142733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:33.142833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:33.142865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:33.143061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:33.143096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:33.143236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:33.143290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:33.144677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:33.144710Z node 1 :FLAT_TX_SCHEMESHARD ... ntSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:02.854887Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:07:02.855110Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index" took 237us result status StatusSuccess 2025-06-24T21:07:02.855825Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index" PathDescription { Self { Name: "Index" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Index" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:02.856383Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:07:02.856696Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable" took 324us result status StatusSuccess 2025-06-24T21:07:02.857532Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "alice" } } Tuple { } } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "bob" } } Tuple { } } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\005\000\000\000alice\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TablePartitions { EndOfRangeKeyPrefix: "\002\000\003\000\000\000bob\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 3 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeDatashard::DropTableNavigateError [GOOD] Test command err: 2025-06-24T21:05:22.069164Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:22.069398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:22.069504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00435d/r3tmp/tmp9Y3X2C/pdisk_1.dat 2025-06-24T21:05:22.431378Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64786, node 1 2025-06-24T21:05:22.623487Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:22.623556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:22.623616Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:22.623832Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:22.626356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:22.732214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:22.732399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:22.745469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1865 2025-06-24T21:05:23.270333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:25.905050Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:25.934829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:25.934965Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:25.994493Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:25.996711Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:26.167428Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:26.202645Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:26.203277Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:26.203798Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:26.203964Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:26.204074Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:26.204291Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:26.204382Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:26.204466Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:26.204579Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:26.380133Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:26.380251Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:26.393254Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:26.571890Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:26.614282Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:26.614408Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:26.648737Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:26.650237Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:26.650499Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:26.650571Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:26.650625Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:26.650688Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:26.650745Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:26.650804Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:26.651894Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:26.688623Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:26.688755Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:26.695343Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:26.698578Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:26.698858Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:26.707126Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:26.736327Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:26.736403Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:26.736511Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:26.753634Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:26.761875Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:26.762040Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:26.943355Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:27.088962Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:27.134875Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:27.635420Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:27.939055Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:27.939232Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:27.958919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:28.383771Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2448:3072], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:28.383911Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:28.384947Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2453:3076]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:28.385147Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:28.385250Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2455:3078] 2025-06-24T21:05:28.385353Z nod ... 24T21:06:38.321960Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:06:40.536778Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:06:42.356093Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:06:42.356423Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:06:44.736885Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:06:46.615381Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:06:46.615724Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:06:49.121337Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:06:50.856151Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:06:50.856462Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:06:53.380488Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:06:55.024937Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:06:55.025241Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:06:57.479889Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:06:58.415274Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-24T21:06:58.415354Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7957: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:06:58.415385Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7988: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:06:58.415417Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-24T21:06:59.477907Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:06:59.478252Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:06:59.520375Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037897 2025-06-24T21:06:59.520436Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 198.000000s, at schemeshard: 72075186224037897 2025-06-24T21:06:59.520643Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 25 2025-06-24T21:06:59.533644Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:07:00.332173Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:00.332260Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:00.332297Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:07:00.332342Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-24T21:07:00.332382Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:00.332728Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:00.336911Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:07:00.340667Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6626:4684], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:00.340778Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6637:4689], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:00.341197Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:00.350116Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:00.391495Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6640:4692], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:07:00.530119Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:6733:4738] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:00.587682Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:6762:4753]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:00.587977Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:00.588118Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:6764:4755] 2025-06-24T21:07:00.588208Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:6764:4755] 2025-06-24T21:07:00.588667Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:6765:4756] 2025-06-24T21:07:00.588797Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:6764:4755], server id = [2:6765:4756], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:00.588926Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:6765:4756], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:00.589008Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:00.589119Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:00.589203Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:6762:4753], StatRequests.size() = 1 2025-06-24T21:07:00.795649Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=N2FjMjI3MWUtYTQyNjY3NWItYzhiZDA1MmQtODQyOTE3ZDY=, TxId: 2025-06-24T21:07:00.795721Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=N2FjMjI3MWUtYTQyNjY3NWItYzhiZDA1MmQtODQyOTE3ZDY=, TxId: 2025-06-24T21:07:00.796620Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:00.810041Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:00.810117Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:00.842750Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:00.842826Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:00.895791Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:6764:4755], schemeshard count = 1 2025-06-24T21:07:01.606305Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:01.606409Z node 2 :STATISTICS ERROR: aggregator_impl.cpp:805: [72075186224037894] IsColumnTable. traversal path [OwnerId: 72075186224037897, LocalPathId: 4] is not known to schemeshard 2025-06-24T21:07:01.606693Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:01.608775Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:07:01.616154Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=N2MzMTEyYjItOGY1ZDJlMjgtN2E0NDExZWYtYzkyNTc5Mw==, TxId: 2025-06-24T21:07:01.616205Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=N2MzMTEyYjItOGY1ZDJlMjgtN2E0NDExZWYtYzkyNTc5Mw==, TxId: 2025-06-24T21:07:01.616526Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:01.629543Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:01.629626Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:2758:3221] 2025-06-24T21:07:01.630038Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:6840:4803]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:01.631985Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:01.632030Z node 2 :STATISTICS ERROR: service_impl.cpp:796: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] Navigate failed 2025-06-24T21:07:01.632063Z node 2 :STATISTICS DEBUG: service_impl.cpp:1304: ReplyFailed(), request id = 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-24T21:07:02.864648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:02.864711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:02.864743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:02.864782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:02.864824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:02.864844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:02.864896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:02.864937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:02.865451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:02.865692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:02.927510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:07:02.927558Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:02.928052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:02.938064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:02.938254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:02.938393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:02.942433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:02.942603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:02.943053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:02.943210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:02.945093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:02.945265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:02.946054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:02.946093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:02.946141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:02.946183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:02.946227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:02.946367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:02.950575Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:07:03.043047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:03.043247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:03.043397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:03.043439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:03.043630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:03.043686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:03.045441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:03.045610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:03.045792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:03.045831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:03.045890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:03.045913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:03.047220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:03.047258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:03.047298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:03.048451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:03.048482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:03.048509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:03.048547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:03.055478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:03.056876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:03.057047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:03.057794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:03.057876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:03.057914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:03.058092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:03.058145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:03.058282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:03.058349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:03.059798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:03.059837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:03.059951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:03.059975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:07:03.060165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:03.060193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:07:03.060259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:07:03.060281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:07:03.060316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:07:03.060363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:07:03.060390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:07:03.060419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:07:03.060464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:07:03.060484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:07:03.060523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:07:03.060545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:07:03.060590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:07:03.062096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:07:03.062178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:07:03.062223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:07:03.062251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:07:03.062277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:03.062351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:07:03.064221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:07:03.064595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T21:07:03.065430Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:272:2261] Bootstrap 2025-06-24T21:07:03.080439Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:272:2261] Become StateWork (SchemeCache [1:277:2266]) 2025-06-24T21:07:03.082360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:03.082620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 101:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } 2025-06-24T21:07:03.082688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-24T21:07:03.082740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusPreconditionFailed, reason: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-24T21:07:03.083329Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:272:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:07:03.085643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusPreconditionFailed Reason: "Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:03.085847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/MyExternalDataSource 2025-06-24T21:07:03.086248Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:07:03.086401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:07:03.086436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:07:03.086675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:07:03.086736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:07:03.086757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:287:2276] TestWaitNotification: OK eventTxId 101 2025-06-24T21:07:03.087112Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:07:03.087250Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 143us result status StatusPathDoesNotExist 2025-06-24T21:07:03.087386Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |93.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_external_data_source/test-results/unittest/{meta.json ... results_accumulator.log} |93.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardSnapshots::LockedWriteBulkUpsertConflict+UseSink >> DataShardSnapshots::LockedWriteReuseAfterCommit+UseSink >> DataShardSnapshots::VolatileSnapshotSplit >> DataShardSnapshots::MvccSnapshotTailCleanup >> DataShardSnapshots::MvccSnapshotAndSplit >> DataShardSnapshots::UncommittedChangesRenameTable+UseSink >> TExportToS3Tests::CancelUponTransferringSingleShardTableShouldSucceed >> TExportToS3Tests::ShouldCheckQuotasExportsLimited >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed >> TExportToS3Tests::CheckItemProgress >> TExportToS3Tests::ExportPartitioningSettings >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs >> TExportToS3Tests::ShouldSucceedOnSingleShardTable >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed [GOOD] >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed >> TExportToS3Tests::ExportPartitioningSettings [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs [GOOD] >> TExportToS3Tests::ShouldSucceedOnSingleShardTable [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentExport >> TExportToS3Tests::ShouldSucceedOnMultiShardTable >> TExportToS3Tests::ShouldCheckQuotasExportsLimited [GOOD] >> TExportToS3Tests::ExportIndexTablePartitioningSettings >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited >> TExportToS3Tests::CheckItemProgress [GOOD] >> TExportToS3Tests::ExportIndexTablePartitioningSettings [GOOD] >> TExportToS3Tests::CompletedExportEndTime >> TExportToS3Tests::EnableChecksumsPersistance >> TExportToS3Tests::ShouldSucceedOnMultiShardTable [GOOD] >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentExport [GOOD] >> TExportToS3Tests::ShouldSucceedOnManyTables >> TExportToS3Tests::SchemaMappingEncryption >> TExportToS3Tests::ShouldSucceedOnConcurrentImport >> TExportToS3Tests::EnableChecksumsPersistance [GOOD] >> TExportToS3Tests::CompletedExportEndTime [GOOD] >> TExportToS3Tests::EncryptedExport >> TExportToS3Tests::Checksums >> TExportToS3Tests::ShouldSucceedOnManyTables [GOOD] >> DataShardSnapshots::LockedWriteBulkUpsertConflict+UseSink [GOOD] >> DataShardSnapshots::LockedWriteBulkUpsertConflict-UseSink >> DataShardSnapshots::LockedWriteReuseAfterCommit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteReuseAfterCommit-UseSink >> TExportToS3Tests::SchemaMappingEncryption [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentImport [GOOD] >> TExportToS3Tests::TablePermissions >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey >> TExportToS3Tests::ShouldRetryAtFinalStage >> DataShardSnapshots::VolatileSnapshotSplit [GOOD] >> DataShardSnapshots::VolatileSnapshotMerge >> TExportToS3Tests::Checksums [GOOD] >> DataShardSnapshots::UncommittedChangesRenameTable+UseSink [GOOD] >> DataShardSnapshots::ShardRestartWholeShardLockBasic >> TExportToS3Tests::ChecksumsWithCompression >> TExportToS3Tests::TablePermissions [GOOD] >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey [GOOD] >> TExportToS3Tests::EncryptedExport [GOOD] >> DataShardSnapshots::MvccSnapshotAndSplit [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites+UseSink >> TExportToS3Tests::ChecksumsWithCompression [GOOD] >> TExportToS3Tests::CancelUponTransferringSingleShardTableShouldSucceed [GOOD] >> TExportToS3Tests::Changefeeds >> TExportToS3Tests::CancelUponTransferringMultiShardTableShouldSucceed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::TablePermissions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:06.641789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:06.641890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.641921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:06.641947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:06.642574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:06.642622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:06.642681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.642791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:06.643341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:06.644116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:06.709383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:06.709424Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:06.720032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:06.720301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:06.720416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:06.725818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:06.725944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:06.726606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.726906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:06.731009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.732053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:06.737949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.738005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.738168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:06.738205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:06.738246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:06.738312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.743536Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:06.835108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:06.836149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.837208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:06.837262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:06.838456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:06.838533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:06.841146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.841849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:06.842006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.842117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:06.842149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:06.842182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:06.843757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.843812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:06.843842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:06.845254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.845291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.845335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.845370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:06.848788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:06.850294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:06.851413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:06.852133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.852237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:06.852271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.853581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:06.853659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.853834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:06.853898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:06.855427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.855456Z node 1 :FLAT_TX_SCHEMESHARD ... 000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710759 at step: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72075186233409547 for txId: 281474976710759 at step: 5000005 2025-06-24T21:07:10.584707Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:10.584810Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710759 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 17179871341 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:10.584857Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 281474976710759:0 HandleReply TEvOperationPlan, stepId: 5000005, at schemeshard: 72057594046678944 2025-06-24T21:07:10.584965Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710759:0 128 -> 129 2025-06-24T21:07:10.585099Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000005 2025-06-24T21:07:10.618080Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:10.618116Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710759, path id: [OwnerId: 72057594046678944, LocalPathId: 4] REQUEST: PUT /metadata.json HTTP/1.1 2025-06-24T21:07:10.618295Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 HEADERS: Host: localhost:25650 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 70028A37-3A3D-41B8-8836-113327FF7D5D amz-sdk-request: attempt=1 content-length: 106 content-md5: MiY7vpEE4i/Xg+IZdddDVg== 2025-06-24T21:07:10.618331Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 281474976710759, path id: 4 content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 106 2025-06-24T21:07:10.618615Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:10.618654Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 281474976710759:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:10.620240Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-24T21:07:10.620322Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-24T21:07:10.620346Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710759 2025-06-24T21:07:10.620370Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710759, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-24T21:07:10.620400Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:07:10.620466Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 281474976710759 REQUEST: PUT /permissions.pb HTTP/1.1 HEADERS: Host: localhost:25650 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B3B4C480-0802-498E-AFDC-9B6B5C64B185 amz-sdk-request: attempt=1 content-length: 137 content-md5: WeIr3D5bqIjvqMGEjx2JrA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /permissions.pb / / 137 2025-06-24T21:07:10.622455Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710759 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:25650 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C6050DF1-E310-45C5-A27E-DBEFC1DEC35D amz-sdk-request: attempt=1 content-length: 355 content-md5: 4DhJNWgTpoG3PVvZ0uCHUA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 355 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:25650 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 93E8294B-3EC7-4580-ADFE-E4E41D4784F7 amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / / 0 2025-06-24T21:07:10.636673Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 457 RawX2: 17179871608 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-24T21:07:10.636720Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-24T21:07:10.636821Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 457 RawX2: 17179871608 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-24T21:07:10.636915Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 457 RawX2: 17179871608 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-24T21:07:10.636981Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:10.637026Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:10.637065Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:07:10.637107Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710759:0 129 -> 240 2025-06-24T21:07:10.637243Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:10.638486Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:10.638585Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:10.638613Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-24T21:07:10.638706Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-24T21:07:10.638729Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-24T21:07:10.638756Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-24T21:07:10.638776Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-24T21:07:10.638799Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-24T21:07:10.638837Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:127:2151] message: TxId: 281474976710759 2025-06-24T21:07:10.638868Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-24T21:07:10.638893Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-24T21:07:10.638913Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710759:0 2025-06-24T21:07:10.639004Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:07:10.640340Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-24T21:07:10.640383Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710759 2025-06-24T21:07:10.641487Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:07:10.641544Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:484:2443] TestWaitNotification: OK eventTxId 103 >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:06.641821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:06.641922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.641956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:06.641995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:06.642574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:06.642604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:06.642665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.642738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:06.643376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:06.644142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:06.705954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:06.706005Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:06.717134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:06.717477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:06.717660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:06.724354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:06.724489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:06.726619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.726932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:06.732101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.732305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:06.737949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.738010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.738176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:06.738214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:06.738253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:06.738319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.743435Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:06.848252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:06.848510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.848803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:06.848860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:06.849096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:06.849201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:06.850972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.851130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:06.851294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.851345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:06.851397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:06.851447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:06.853057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.853113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:06.853239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:06.854969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.855017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.855075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.855162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:06.858946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:06.860858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:06.861051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:06.862038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.862185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:06.862231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.862457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:06.862495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.862632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:06.862687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:06.864254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.864292Z node 1 :FLAT_TX_SCHEMESHARD ... hemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710758, at schemeshard: 72057594046678944 2025-06-24T21:07:10.692899Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710758:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710758 msg type: 269090816 2025-06-24T21:07:10.693014Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710758, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:07:10.693110Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 FAKE_COORDINATOR: Add transaction: 281474976710758 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710758 at step: 5000005 2025-06-24T21:07:10.693528Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:10.693659Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710758 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 17179871341 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:10.693702Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710758:0, step: 5000005, at schemeshard: 72057594046678944 2025-06-24T21:07:10.693783Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710758:0, at schemeshard: 72057594046678944 2025-06-24T21:07:10.693835Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710758:0 progress is 1/1 2025-06-24T21:07:10.693866Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-24T21:07:10.693914Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710758:0 progress is 1/1 2025-06-24T21:07:10.693946Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-24T21:07:10.693993Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:07:10.694049Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T21:07:10.694075Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710758, ready parts: 1/1, is published: false 2025-06-24T21:07:10.694112Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-24T21:07:10.694145Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710758:0 2025-06-24T21:07:10.694173Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710758:0 2025-06-24T21:07:10.694219Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:07:10.694251Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976710758, publications: 2, subscribers: 1 2025-06-24T21:07:10.694285Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-24T21:07:10.694314Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-24T21:07:10.695168Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-24T21:07:10.696062Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:10.696090Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:10.696199Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T21:07:10.696305Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:10.696328Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 1 2025-06-24T21:07:10.696354Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710758 2025-06-24T21:07:10.696865Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-24T21:07:10.696918Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-24T21:07:10.696942Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710758 2025-06-24T21:07:10.696983Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-24T21:07:10.697020Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-24T21:07:10.697448Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-24T21:07:10.697496Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-24T21:07:10.697515Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710758 2025-06-24T21:07:10.697536Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-24T21:07:10.697556Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T21:07:10.697610Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710758, subscribers: 1 2025-06-24T21:07:10.697659Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:127:2151] 2025-06-24T21:07:10.697987Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:07:10.698046Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T21:07:10.698134Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:07:10.699830Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-24T21:07:10.700894Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-24T21:07:10.700973Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710758 2025-06-24T21:07:10.701029Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710758 2025-06-24T21:07:10.701075Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-24T21:07:10.701112Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710758 2025-06-24T21:07:10.701146Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710758, id# 103, itemIdx# 4294967295 2025-06-24T21:07:10.701382Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:07:10.702245Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 103 2025-06-24T21:07:10.702403Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T21:07:10.702438Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T21:07:10.702733Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T21:07:10.702795Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:07:10.702825Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:555:2513] TestWaitNotification: OK eventTxId 103 >> TExportToS3Tests::AuditCompletedExport ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::EncryptedExport [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:06.641839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:06.641949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.642013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:06.642054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:06.642589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:06.642623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:06.642701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.642781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:06.643577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:06.644212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:06.725181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:06.725236Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:06.741179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:06.741552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:06.741748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:06.749482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:06.749673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:06.750361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.750654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:06.753668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.753885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:06.755069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.755129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.755382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:06.755430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:06.755485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:06.755578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.762180Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:06.898037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:06.898240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.898419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:06.898464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:06.898649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:06.898753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:06.900760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.900915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:06.901054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.901102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:06.901141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:06.901182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:06.902643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.902693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:06.902722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:06.903900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.903935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.903977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.904029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:06.906477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:06.907671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:06.907825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:06.908484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.908583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:06.908627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.908831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:06.908872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.909010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:06.909072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:06.910562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.910601Z node 1 :FLAT_TX_SCHEMESHARD ... 1:07:10.744580Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-24T21:07:10.744601Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-24T21:07:10.744633Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:07:10.744692Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-24T21:07:10.746008Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:10.746082Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-24T21:07:10.746112Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-24T21:07:10.746150Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-24T21:07:10.747399Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710763:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710763 msg type: 269090816 2025-06-24T21:07:10.747496Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710763, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:07:10.747641Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 FAKE_COORDINATOR: Add transaction: 281474976710763 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710763 at step: 5000010 2025-06-24T21:07:10.747773Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:10.747849Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:10.747908Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710763 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 17179871341 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:10.747940Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710763:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-24T21:07:10.748016Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710763:0, at schemeshard: 72057594046678944 2025-06-24T21:07:10.748059Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-24T21:07:10.748088Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-24T21:07:10.748123Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-24T21:07:10.748149Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-24T21:07:10.748192Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:07:10.748236Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:07:10.748263Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 1/1, is published: false 2025-06-24T21:07:10.748296Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-24T21:07:10.748342Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710763:0 2025-06-24T21:07:10.748370Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710763:0 2025-06-24T21:07:10.748420Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:07:10.748450Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976710763, publications: 2, subscribers: 1 2025-06-24T21:07:10.748481Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-24T21:07:10.748506Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-24T21:07:10.749672Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:10.749701Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:10.749796Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T21:07:10.749891Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:10.749958Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 1 2025-06-24T21:07:10.749991Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710763 2025-06-24T21:07:10.750500Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:10.750558Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:10.750584Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-24T21:07:10.750619Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-24T21:07:10.750652Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-24T21:07:10.751302Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:10.751352Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:10.751384Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-24T21:07:10.751410Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-24T21:07:10.751430Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:07:10.751484Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710763, subscribers: 1 2025-06-24T21:07:10.751520Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:127:2151] 2025-06-24T21:07:10.753453Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:10.753703Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:10.753752Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710763 2025-06-24T21:07:10.753786Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710763 2025-06-24T21:07:10.753821Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-24T21:07:10.753843Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763 2025-06-24T21:07:10.753865Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763, id# 103, itemIdx# 4294967295 2025-06-24T21:07:10.755011Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:10.755087Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:07:10.755123Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:1121:2994] TestWaitNotification: OK eventTxId 103 >> DataShardSnapshots::MvccSnapshotTailCleanup [GOOD] >> DataShardSnapshots::MvccSnapshotReadWithLongPlanQueue >> VectorIndexBuildTest::CommonDB [GOOD] >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-false >> TExportToS3Tests::Changefeeds [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> TExportToS3Tests::AuditCompletedExport [GOOD] >> DataShardSnapshots::LockedWriteReuseAfterCommit-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitSuccess+UseSink >> TExportToS3Tests::AuditCancelledExport >> TExportToS3Tests::RebootDuringCompletion >> DataShardSnapshots::LockedWriteBulkUpsertConflict-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitAborted+UseSink >> TExportToS3Tests::UidAsIdempotencyKey >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings >> DataShardSnapshots::ShardRestartWholeShardLockBasic [GOOD] >> DataShardSnapshots::ShardRestartLockUnrelatedUpsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::Changefeeds [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:06.641828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:06.641942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.641988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:06.642028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:06.642580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:06.642612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:06.642696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.642768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:06.643457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:06.644145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:06.724610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:06.724663Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:06.739635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:06.739997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:06.740168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:06.747210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:06.747382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:06.747914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.748170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:06.750681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.750866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:06.751978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.752028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.752196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:06.752240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:06.752297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:06.752384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.758401Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:06.901168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:06.901330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.901477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:06.901510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:06.901668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:06.901754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:06.903268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.903392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:06.903497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.903535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:06.903588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:06.903618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:06.904767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.904803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:06.904827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:06.905927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.905955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.905991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.906024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:06.908297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:06.909532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:06.909660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:06.910218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.910301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:06.910336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.910535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:06.910571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.910688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:06.910732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:06.911931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.911961Z node 1 :FLAT_TX_SCHEMESHARD ... 21:07:13.050989Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-24T21:07:13.051015Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 7 2025-06-24T21:07:13.051036Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-24T21:07:13.051090Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-24T21:07:13.053147Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:13.053641Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-24T21:07:13.053687Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-24T21:07:13.053756Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-24T21:07:13.054263Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2025-06-24T21:07:13.054362Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:07:13.054606Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000010 2025-06-24T21:07:13.054866Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:13.054949Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 21474838638 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:13.055007Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-24T21:07:13.055118Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-24T21:07:13.055217Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-24T21:07:13.055262Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-24T21:07:13.055321Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-24T21:07:13.055370Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-24T21:07:13.055453Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:07:13.055536Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-24T21:07:13.055576Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-06-24T21:07:13.055618Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-24T21:07:13.055654Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-24T21:07:13.055699Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710761:0 2025-06-24T21:07:13.055775Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-24T21:07:13.055811Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-06-24T21:07:13.055862Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 12 2025-06-24T21:07:13.055896Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 9], 18446744073709551615 2025-06-24T21:07:13.056897Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:13.058006Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:13.058041Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:13.058173Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 9] 2025-06-24T21:07:13.058265Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:13.058301Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-06-24T21:07:13.058329Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 9 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-24T21:07:13.058883Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:13.058970Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:13.059005Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-24T21:07:13.059064Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 12 2025-06-24T21:07:13.059117Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:07:13.059681Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:13.059759Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:13.059790Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-24T21:07:13.059812Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 18446744073709551615 2025-06-24T21:07:13.059856Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-24T21:07:13.059913Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-06-24T21:07:13.059968Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [5:128:2152] 2025-06-24T21:07:13.062026Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:13.062362Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:13.062430Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-24T21:07:13.062472Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710761 2025-06-24T21:07:13.062523Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-24T21:07:13.062546Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-24T21:07:13.062567Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 105, itemIdx# 4294967295 2025-06-24T21:07:13.063694Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:13.063762Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T21:07:13.063812Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [5:1393:3180] TestWaitNotification: OK eventTxId 105 >> TExportToS3Tests::AuditCancelledExport [GOOD] >> DataShardSnapshots::VolatileSnapshotMerge [GOOD] >> DataShardSnapshots::VolatileSnapshotAndLocalMKQLUpdate >> TExportToS3Tests::AutoDropping >> TExportToS3Tests::RebootDuringCompletion [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites-UseSink >> TExportToS3Tests::UidAsIdempotencyKey [GOOD] >> TExportToS3Tests::RebootDuringAbortion >> TExportToS3Tests::UserSID >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings [GOOD] >> TExportToS3Tests::ShouldPreserveIncrBackupFlag >> TExportToS3Tests::UserSID [GOOD] >> TExportToS3Tests::Topics >> TExportToS3Tests::AutoDropping [GOOD] >> TExportToS3Tests::RebootDuringAbortion [GOOD] >> TExportToS3Tests::DropSourceTableBeforeTransferring >> TExportToS3Tests::ShouldPreserveIncrBackupFlag [GOOD] >> TExportToS3Tests::ExportStartTime >> TExportToS3Tests::ShouldExcludeBackupTableFromStats >> TExportToS3Tests::Topics [GOOD] >> TExportToS3Tests::CancelUponTransferringMultiShardTableShouldSucceed [GOOD] >> TExportToS3Tests::CancelUponTransferringSingleTableShouldSucceed [GOOD] >> TExportToS3Tests::CancelUponTransferringManyTablesShouldSucceed >> TExportToS3Tests::TopicsWithPermissions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::AutoDropping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:06.641792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:06.641878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.641907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:06.641946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:06.642542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:06.642569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:06.642652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.642713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:06.643301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:06.644131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:06.705424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:06.705468Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:06.717092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:06.717330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:06.717513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:06.723710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:06.723900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:06.726615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.726831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:06.731621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.732051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:06.737918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.737969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.738157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:06.738192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:06.738251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:06.738312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.743318Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:06.847966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:06.848131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.848338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:06.848375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:06.848551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:06.848609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:06.850528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.850693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:06.850816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.850855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:06.850904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:06.850935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:06.852280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.852366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:06.852409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:06.853661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.853695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.853736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.853776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:06.861366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:06.862964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:06.863096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:06.863805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.863910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:06.863967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.864170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:06.864208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.864350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:06.864421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:06.865906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.865954Z node 1 :FLAT_TX_SCHEMESHARD ... d__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:15.646826Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-24T21:07:15.646856Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710761 2025-06-24T21:07:15.646882Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-24T21:07:15.646914Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-24T21:07:15.646946Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-24T21:07:15.648422Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:15.648485Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:07:15.648564Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:476:2435] TestWaitNotification: OK eventTxId 102 2025-06-24T21:07:15.649422Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:07:15.649631Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 239us result status StatusSuccess 2025-06-24T21:07:15.650085Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 desc: 1 2025-06-24T21:07:15.650498Z node 5 :EXPORT DEBUG: schemeshard_export__forget.cpp:79: TExport::TTxForget, dropping export tables, info: { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Done WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2025-06-24T21:07:15.652240Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-24T21:07:15.652292Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:739: TExport::TTxProgress: Resume: id# 102 2025-06-24T21:07:15.652365Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:537: TExport::TTxProgress: Allocate txId: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2025-06-24T21:07:15.652422Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:15.652533Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 102, at schemeshard: 72057594046678944 2025-06-24T21:07:15.652587Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-24T21:07:15.652638Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:859: TExport::TTxProgress: OnAllocateResult: txId# 281474976710762, id# 102 2025-06-24T21:07:15.652713Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:529: TExport::TTxProgress: Drop propose: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, txId# 281474976710762 2025-06-24T21:07:15.652823Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:15.655241Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "export-102" } Internal: true } TxId: 281474976710762 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:15.655350Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:29: TRmDir Propose, path: /MyRoot/export-102, pathId: 0, opId: 281474976710762:0, at schemeshard: 72057594046678944 2025-06-24T21:07:15.655477Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710762:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-102', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-24T21:07:15.657501Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710762, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761, at schemeshard: 72057594046678944 2025-06-24T21:07:15.657824Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710762, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-102', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, operation: DROP DIRECTORY, path: /MyRoot/export-102 2025-06-24T21:07:15.657979Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6777: Handle: TEvModifySchemeTransactionResult: txId# 281474976710762, status# StatusPathDoesNotExist 2025-06-24T21:07:15.658084Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6779: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761 2025-06-24T21:07:15.658148Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-24T21:07:15.658197Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:920: TExport::TTxProgress: OnModifyResult: txId# 281474976710762, status# StatusPathDoesNotExist 2025-06-24T21:07:15.658283Z node 5 :EXPORT TRACE: schemeshard_export__create.cpp:921: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761 2025-06-24T21:07:15.658407Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:1102: TExport::TTxProgress: Wait for completion: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, itemIdx# 4294967295, txId# 281474976710761 2025-06-24T21:07:15.660071Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:15.660217Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-24T21:07:15.660315Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-24T21:07:15.660371Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710761 2025-06-24T21:07:15.660415Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-24T21:07:15.660457Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-24T21:07:15.660496Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-24T21:07:15.662147Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 102 2025-06-24T21:07:15.662377Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:07:15.662430Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:07:15.662891Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:07:15.662990Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:07:15.663028Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:696:2650] TestWaitNotification: OK eventTxId 102 >> TExportToS3Tests::ExportStartTime [GOOD] >> TExportToS3Tests::DropSourceTableBeforeTransferring [GOOD] >> TExportToS3Tests::TopicsWithPermissions [GOOD] >> TExportToS3Tests::SchemaMapping >> test_sql_streaming.py::test[suites-GroupByHop-default.txt] >> TExportToS3Tests::DropCopiesBeforeTransferring1 >> TExportToS3Tests::DropCopiesBeforeTransferring1 [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitSuccess+UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitSuccess-UseSink >> DataShardSnapshots::ShardRestartLockUnrelatedUpsert [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByConflict >> TExportToS3Tests::DropCopiesBeforeTransferring2 >> TExportToS3Tests::SchemaMapping [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::TopicsWithPermissions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:14.210961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:14.211043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:14.211071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:14.211099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:14.211139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:14.211182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:14.211232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:14.211279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:14.211812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:14.212046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:14.266391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:14.266433Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:14.278872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:14.279166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:14.279290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:14.284378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:14.284509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:14.284917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:14.285090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:14.287428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:14.287585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:14.288414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:14.288465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:14.288635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:14.288669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:14.288697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:14.288760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.293505Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:14.385177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:14.385362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.385517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:14.385549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:14.385714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:14.385816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:14.387753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:14.387893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:14.388024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.388067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:14.388100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:14.388129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:14.389502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.389544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:14.389570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:14.390791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.390825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.390881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:14.390920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:14.393351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:14.394880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:14.395067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:14.396177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:14.396318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:14.396369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:14.396648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:14.396703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:14.396873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:14.396929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:14.398337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:14.398368Z node 1 :FLAT_TX_SCHEMESHARD ... blish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710757, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:07:16.932847Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:16.932875Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 281474976710757, path id: 1 2025-06-24T21:07:16.932905Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 281474976710757, path id: 3 2025-06-24T21:07:16.932947Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710757:0, at schemeshard: 72057594046678944 2025-06-24T21:07:16.932976Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 281474976710757:0 ProgressState 2025-06-24T21:07:16.933032Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710757:0 progress is 1/1 2025-06-24T21:07:16.933053Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 1/1 2025-06-24T21:07:16.933078Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710757:0 progress is 1/1 2025-06-24T21:07:16.933098Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 1/1 2025-06-24T21:07:16.933120Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710757, ready parts: 1/1, is published: false 2025-06-24T21:07:16.933146Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 1/1 2025-06-24T21:07:16.933167Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710757:0 2025-06-24T21:07:16.933185Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710757:0 2025-06-24T21:07:16.933226Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:07:16.933248Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976710757, publications: 2, subscribers: 1 2025-06-24T21:07:16.933269Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710757, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T21:07:16.933289Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710757, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-24T21:07:16.934331Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-24T21:07:16.934413Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-24T21:07:16.934445Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710757 2025-06-24T21:07:16.934477Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710757, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:07:16.934513Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:07:16.934927Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-24T21:07:16.934999Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-24T21:07:16.935021Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710757 2025-06-24T21:07:16.935045Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710757, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:07:16.935066Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:07:16.935113Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710757, subscribers: 1 2025-06-24T21:07:16.935158Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:127:2151] 2025-06-24T21:07:16.937770Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710757 2025-06-24T21:07:16.937889Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710757 2025-06-24T21:07:16.937948Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710757 2025-06-24T21:07:16.937983Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710757 2025-06-24T21:07:16.942882Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:16.944101Z node 4 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 129us result status StatusSuccess 2025-06-24T21:07:16.944436Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic" PathDescription { Self { Name: "Topic" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409548 } PersQueueGroup { Name: "Topic" PathId: 2 TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot" } Partitions { PartitionId: 0 TabletId: 72075186233409547 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409548 NextPartitionId: 2 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 102 2025-06-24T21:07:16.960072Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:07:16.960116Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:07:16.960382Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:07:16.960412Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 REQUEST: PUT /create_topic.pb HTTP/1.1 HEADERS: Host: localhost:12078 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A7833437-D430-4F83-9FCC-32705FC696C6 amz-sdk-request: attempt=1 content-length: 468 content-md5: eolrX6cGdcMGCBM8sb+6PQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /create_topic.pb / / 468 REQUEST: PUT /permissions.pb HTTP/1.1 HEADERS: Host: localhost:12078 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: ABFCDD10-ED11-4139-9F3B-E3A9579A97FA amz-sdk-request: attempt=1 content-length: 43 content-md5: JIqMFsQjXF0c+sG0y+coog== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /permissions.pb / / 43 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:12078 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 5B0E6A21-78C6-47B0-AEF5-BAB2B66B999F amz-sdk-request: attempt=1 content-length: 64 content-md5: axcCOQtFAWkgKK80Zy2JrQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 64 2025-06-24T21:07:16.977946Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:07:16.978002Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:623:2550] TestWaitNotification: OK eventTxId 102 >> DataShardSnapshots::VolatileSnapshotAndLocalMKQLUpdate [GOOD] >> DataShardSnapshots::VolatileSnapshotReadTable >> DataShardSnapshots::LockedWriteDistributedCommitAborted+UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitAborted-UseSink >> test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] >> TExportToS3Tests::DropCopiesBeforeTransferring2 [GOOD] >> TExportToS3Tests::CorruptedDyNumber >> DataShardSnapshots::MvccSnapshotLockedWrites-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesRestart+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::SchemaMapping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:13.931476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:13.931552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:13.931581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:13.931604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:13.931652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:13.931673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:13.931722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:13.931779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:13.932308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:13.932512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:13.985589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:13.985650Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:13.996384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:13.996686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:13.996824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:14.002822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:14.002952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:14.003426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:14.003654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:14.006000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:14.006152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:14.007223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:14.007293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:14.007472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:14.007504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:14.007534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:14.007618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.012576Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:14.111468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:14.111668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.111888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:14.111923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:14.112075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:14.112159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:14.114122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:14.114268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:14.114404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.114448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:14.114483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:14.114517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:14.116101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.116143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:14.116169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:14.117720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.117753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.117794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:14.117832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:14.120429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:14.121840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:14.121971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:14.122692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:14.122785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:14.122824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:14.123022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:14.123060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:14.123199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:14.123252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:14.124835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:14.124880Z node 1 :FLAT_TX_SCHEMESHARD ... 21:07:18.065757Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-24T21:07:18.065811Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-24T21:07:18.065852Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:07:18.065981Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-24T21:07:18.068384Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:18.068608Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-24T21:07:18.068659Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-24T21:07:18.068741Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-24T21:07:18.070334Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710763:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710763 msg type: 269090816 2025-06-24T21:07:18.070497Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710763, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:07:18.070965Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 FAKE_COORDINATOR: Add transaction: 281474976710763 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710763 at step: 5000010 2025-06-24T21:07:18.071688Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:18.071845Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710763 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 17179871341 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:18.071944Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710763:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-24T21:07:18.072116Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710763:0, at schemeshard: 72057594046678944 2025-06-24T21:07:18.072205Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-24T21:07:18.072269Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-24T21:07:18.072343Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-24T21:07:18.072389Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-24T21:07:18.072500Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:07:18.072613Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:07:18.072676Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 1/1, is published: false 2025-06-24T21:07:18.072756Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-24T21:07:18.072815Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710763:0 2025-06-24T21:07:18.072871Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710763:0 2025-06-24T21:07:18.072987Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:07:18.073051Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976710763, publications: 2, subscribers: 1 2025-06-24T21:07:18.073137Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-24T21:07:18.073212Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-24T21:07:18.074206Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:18.076130Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:18.076211Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:18.076468Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T21:07:18.076635Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:18.076681Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 1 2025-06-24T21:07:18.076769Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710763 2025-06-24T21:07:18.077826Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:18.077979Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:18.078039Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-24T21:07:18.078116Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-24T21:07:18.078192Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-24T21:07:18.079908Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:18.080086Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:18.080133Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-24T21:07:18.080193Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-24T21:07:18.080249Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:07:18.080411Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710763, subscribers: 1 2025-06-24T21:07:18.080515Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:127:2151] 2025-06-24T21:07:18.083901Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:18.084325Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-24T21:07:18.084436Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710763 2025-06-24T21:07:18.084507Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710763 2025-06-24T21:07:18.084586Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-24T21:07:18.084639Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763 2025-06-24T21:07:18.084695Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763, id# 103, itemIdx# 4294967295 2025-06-24T21:07:18.086729Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:18.086836Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:07:18.086893Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:857:2784] TestWaitNotification: OK eventTxId 103 >> TExportToS3Tests::CorruptedDyNumber [GOOD] >> TExportToS3Tests::DisableAutoDropping >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> AnalyzeDatashard::AnalyzeOneTable [GOOD] >> TExportToS3Tests::DisableAutoDropping [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeDatashard::AnalyzeOneTable [GOOD] Test command err: 2025-06-24T21:04:58.376627Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:04:58.377012Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:04:58.377274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d5/r3tmp/tmpfY0hzx/pdisk_1.dat 2025-06-24T21:04:58.794872Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12462, node 1 2025-06-24T21:04:59.383838Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:59.383928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:59.383973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:59.384416Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:59.397575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:59.547357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:59.547508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:59.562710Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31558 2025-06-24T21:05:00.167431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:03.682963Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:03.721925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:03.722048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:03.752213Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:03.754475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:03.997789Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:04.024256Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.024966Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.025567Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.025711Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.025944Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.026030Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.026118Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.026200Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.026277Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.221332Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:04.221476Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:04.235386Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:04.379882Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:04.432100Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:04.432222Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:04.470649Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:04.470947Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:04.471224Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:04.471292Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:04.471359Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:04.471414Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:04.471490Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:04.471552Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:04.472491Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:04.504722Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:04.504878Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:04.512871Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1799:2569] 2025-06-24T21:05:04.520843Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1837:2586] 2025-06-24T21:05:04.521472Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:04.522052Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1837:2586], schemeshard id = 72075186224037897 2025-06-24T21:05:04.545289Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:04.545339Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:04.545392Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:04.558979Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:04.576850Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:04.577056Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:04.765512Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:04.989806Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:05.089026Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:05.719189Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:06.112260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2139:3019], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.112522Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.222139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:06.743775Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2436:3066], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.743942Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.745222Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2441:3070]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:06.745435Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:06.745511Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2443:3072] 2025-06-24T21:05:06.746954Z no ... unt = 1 2025-06-24T21:07:08.693264Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:07:12.342558Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:07:13.620639Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-24T21:07:13.620719Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7957: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:07:13.620763Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7988: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:07:13.620796Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-24T21:07:14.894233Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:07:14.894502Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:07:14.947439Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-24T21:07:14.947506Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 188.000000s, at schemeshard: 72075186224037897 2025-06-24T21:07:14.947747Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 49 2025-06-24T21:07:14.960328Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:07:16.244957Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:16.245058Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:16.245105Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:07:16.245167Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-24T21:07:16.245212Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:16.245616Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:16.274388Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:07:16.278075Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6463:4604], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:16.278188Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6473:4609], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:16.278340Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:16.290398Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:16.332142Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6477:4612], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:07:16.515460Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:6575:4660] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:16.554770Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:6604:4675]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:16.554913Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:16.554974Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:6606:4677] 2025-06-24T21:07:16.555018Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:6606:4677] 2025-06-24T21:07:16.555260Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:6607:4678] 2025-06-24T21:07:16.555341Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:6607:4678], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:16.555376Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:16.555477Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:6606:4677], server id = [2:6607:4678], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:16.555560Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:16.555615Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:6604:4675], StatRequests.size() = 1 2025-06-24T21:07:16.648406Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YWE5MjczZTMtNzMwZWVkNWYtOWFjNjFhY2YtNzE2MmY2MGQ=, TxId: 2025-06-24T21:07:16.648472Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YWE5MjczZTMtNzMwZWVkNWYtOWFjNjFhY2YtNzE2MmY2MGQ=, TxId: 2025-06-24T21:07:16.648844Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:16.661747Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:16.661811Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:16.703807Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:16.703891Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:16.787277Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:6606:4677], schemeshard count = 1 2025-06-24T21:07:17.777515Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:17.777602Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-24T21:07:17.777640Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:645: [72075186224037894] ScheduleNextAnalyze. Skip analyze for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:18.997177Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:07:19.018284Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:19.018389Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-24T21:07:19.018446Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:19.018734Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:19.020685Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:07:19.028940Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDU2YmJiNjUtN2MyNmEyMzYtY2E5OTc2NWMtYzRiZmVjMDI=, TxId: 2025-06-24T21:07:19.028999Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDU2YmJiNjUtN2MyNmEyMzYtY2E5OTc2NWMtYzRiZmVjMDI=, TxId: 2025-06-24T21:07:19.029319Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:19.042354Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:19.042411Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:2640:3183] 2025-06-24T21:07:19.042870Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:6714:4741]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:19.045211Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:19.045275Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:19.048620Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:19.048667Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:19.048707Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:19.050536Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-24T21:07:19.050797Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 >> test_sql_streaming.py::test[suites-ReadTwoTopics-default.txt] >> TExportToS3Tests::CancelUponTransferringManyTablesShouldSucceed [GOOD] >> AnalyzeDatashard::AnalyzeTwoTables [GOOD] >> TExportToS3Tests::CancelledExportEndTime >> DataShardSnapshots::ShardRestartLockBrokenByConflict [GOOD] >> DataShardSnapshots::ShardRestartWholeShardLockBrokenByUpsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::DisableAutoDropping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:16.060915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:16.061009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:16.061040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:16.061064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:16.061108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:16.061129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:16.061179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:16.061229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:16.061749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:16.061970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:16.118073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:16.118117Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:16.129327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:16.129560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:16.129704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:16.135108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:16.135243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:16.135659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:16.135839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:16.137930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:16.138089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:16.138887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:16.138940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:16.139132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:16.139181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:16.139211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:16.139270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:16.144392Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:16.226643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:16.226840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:16.227001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:16.227032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:16.227211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:16.227301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:16.229084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:16.229221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:16.229382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:16.229428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:16.229471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:16.229507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:16.230953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:16.230991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:16.231048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:16.232471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:16.232505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:16.232567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:16.232632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:16.235915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:16.237297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:16.237421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:16.238079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:16.238175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:16.238210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:16.238401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:16.238434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:16.238558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:16.238604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:16.240141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:16.240175Z node 1 :FLAT_TX_SCHEMESHARD ... T21:07:20.559520Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-24T21:07:20.559539Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-24T21:07:20.559562Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:07:20.559604Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-24T21:07:20.560902Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:20.560998Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-24T21:07:20.561038Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-24T21:07:20.561083Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-24T21:07:20.561478Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2025-06-24T21:07:20.561568Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:07:20.561779Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000007 2025-06-24T21:07:20.562181Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:20.562244Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 21474838638 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:20.562276Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000007, at schemeshard: 72057594046678944 2025-06-24T21:07:20.562358Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-24T21:07:20.562406Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-24T21:07:20.562436Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-24T21:07:20.562468Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-24T21:07:20.562494Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-24T21:07:20.562535Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:07:20.562583Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:07:20.562618Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-06-24T21:07:20.562664Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-24T21:07:20.562704Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-24T21:07:20.562739Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710761:0 2025-06-24T21:07:20.562788Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:07:20.562817Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-06-24T21:07:20.562848Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-24T21:07:20.562895Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-24T21:07:20.563339Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:20.564220Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:20.564248Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:20.564332Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:07:20.564395Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:20.564417Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-06-24T21:07:20.564459Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 3 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-24T21:07:20.564918Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:20.564969Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:20.564996Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-24T21:07:20.565029Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-24T21:07:20.565064Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:07:20.565705Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:20.565786Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:20.565816Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-24T21:07:20.565843Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-24T21:07:20.565874Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:07:20.565939Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-06-24T21:07:20.565980Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [5:128:2152] 2025-06-24T21:07:20.567547Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:20.567712Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-24T21:07:20.567755Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-24T21:07:20.567788Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710761 2025-06-24T21:07:20.567819Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-24T21:07:20.567839Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-24T21:07:20.567858Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-24T21:07:20.568865Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-24T21:07:20.568924Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:07:20.568956Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:620:2574] TestWaitNotification: OK eventTxId 102 >> DataShardSnapshots::MvccSnapshotReadWithLongPlanQueue [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts-UseSink >> DataShardSnapshots::LockedWriteDistributedCommitSuccess-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitFreeze+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeDatashard::AnalyzeTwoTables [GOOD] Test command err: 2025-06-24T21:05:09.111483Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:09.111862Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:09.112027Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00438a/r3tmp/tmpmEA01J/pdisk_1.dat 2025-06-24T21:05:09.495696Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23419, node 1 2025-06-24T21:05:09.702585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:09.702653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:09.702703Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:09.702908Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:09.705437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:09.811264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:09.811426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:09.824929Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20120 2025-06-24T21:05:10.351291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:13.515285Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:13.551254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:13.551369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:13.610628Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:13.612645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:13.784191Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:13.819059Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.819732Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.820365Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.820560Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.820695Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.820963Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.821094Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.821246Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.821329Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.996731Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:13.996839Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:14.010032Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:14.170839Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:14.214311Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:14.214404Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:14.246774Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:14.248512Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:14.248790Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:14.248868Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:14.248927Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:14.248985Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:14.249035Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:14.249096Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:14.250040Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:14.286570Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:14.286690Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:14.292906Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:14.295961Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:14.296264Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:14.303746Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:14.330624Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:14.330697Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:14.330772Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:14.343770Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:14.355469Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:14.355600Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:14.538128Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:14.688147Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:14.733365Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:15.232476Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:15.502578Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:15.502754Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:15.521399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:15.989530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2448:3072], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:15.989677Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:15.990849Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2453:3076]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:05:15.991049Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:05:15.991132Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2455:3078] 2025-06-24T21:05:15.991225Z no ... istics, node id = 2 2025-06-24T21:07:13.693384Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7095:4969], StatRequests.size() = 1 2025-06-24T21:07:13.798329Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OTVmNGNhN2EtZWIxZDI5MDgtZThjN2M0YmMtZGRlYzMyODk=, TxId: 2025-06-24T21:07:13.798394Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OTVmNGNhN2EtZWIxZDI5MDgtZThjN2M0YmMtZGRlYzMyODk=, TxId: 2025-06-24T21:07:13.798817Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:13.812158Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-24T21:07:13.812214Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:13.844140Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:13.844256Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:13.919189Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7097:4971], schemeshard count = 1 2025-06-24T21:07:14.934533Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:14.934636Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-24T21:07:14.934689Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:645: [72075186224037894] ScheduleNextAnalyze. Skip analyze for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:16.017260Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:07:16.038768Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:16.038900Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-24T21:07:16.038931Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:16.039224Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:16.041416Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:07:16.049924Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZThjMjNkMjctYTY0ZWU4OTUtZWRmNzc4NDAtNmJiMTFhOA==, TxId: 2025-06-24T21:07:16.049973Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZThjMjNkMjctYTY0ZWU4OTUtZWRmNzc4NDAtNmJiMTFhOA==, TxId: 2025-06-24T21:07:16.050352Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:16.063535Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:16.063612Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:47: [72075186224037894] TTxFinishTraversal::Complete. Don't send TEvAnalyzeResponse. There are pending operations, OperationId operationId , ActorId=[1:3013:3264] 2025-06-24T21:07:17.215816Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:17.215894Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is data table. 2025-06-24T21:07:17.215946Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:645: [72075186224037894] ScheduleNextAnalyze. Skip analyze for datashard table [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-24T21:07:18.221507Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:07:18.221632Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:18.221898Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:07:18.243007Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:18.243061Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-24T21:07:18.243085Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:18.243302Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:18.245039Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:07:18.251068Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MjQyYzU4MzYtNDgwY2Q4YzYtMzU2YTFlMjQtZGRmZjMyNDY=, TxId: 2025-06-24T21:07:18.251108Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MjQyYzU4MzYtNDgwY2Q4YzYtMzU2YTFlMjQtZGRmZjMyNDY=, TxId: 2025-06-24T21:07:18.251411Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:18.264056Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:18.264095Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:19.345103Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:19.345181Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:19.345222Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:07:20.390854Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:20.390946Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is data table. 2025-06-24T21:07:20.390968Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-24T21:07:20.391234Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:20.393048Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:07:20.399543Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OWM2MDM4ODItOGZiMDFmMGItOThhMTdlMmMtYWU4OTBhYzQ=, TxId: 2025-06-24T21:07:20.399588Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OWM2MDM4ODItOGZiMDFmMGItOThhMTdlMmMtYWU4OTBhYzQ=, TxId: 2025-06-24T21:07:20.399894Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:20.412397Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-24T21:07:20.412453Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3013:3264] 2025-06-24T21:07:20.412828Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7391:5131]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:20.415025Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:20.415068Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:20.418020Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:20.418063Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:20.418105Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:20.420089Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-24T21:07:20.420267Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 2025-06-24T21:07:20.420575Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:7421:5143]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:20.422531Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:20.422570Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:20.422791Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:20.422821Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:20.422857Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 5] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:20.424483Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 2 ], RowsCount[ 0 ] 2025-06-24T21:07:20.424655Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >> BasicUsage::WaitEventBlocksBeforeDiscovery [GOOD] >> BasicUsage::SimpleHandlers >> TExportToS3Tests::CancelledExportEndTime [GOOD] >> DataShardSnapshots::VolatileSnapshotReadTable [GOOD] >> DataShardSnapshots::VolatileSnapshotRefreshDiscard >> DataShardSnapshots::LockedWriteDistributedCommitAborted-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::CancelledExportEndTime [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:06.641797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:06.641895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.641935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:06.641990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:06.642593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:06.642642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:06.642713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.642775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:06.643566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:06.644198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:06.729222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:06.729262Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:06.739976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:06.740207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:06.740334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:06.746185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:06.746314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:06.746823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.747036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:06.749219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.749404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:06.750268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.750314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.750457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:06.750486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:06.750538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:06.750610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.755315Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:06.845898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:06.846096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.846285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:06.846322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:06.846497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:06.846555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:06.848595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.848788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:06.848941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.849014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:06.849051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:06.849075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:06.850569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.850609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:06.850635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:06.852038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.852068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.852109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.852138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:06.855095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:06.856501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:06.856633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:06.857330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.857431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:06.857462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.857685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:06.857722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.857853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:06.857907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:06.859448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.859501Z node 1 :FLAT_TX_SCHEMESHARD ... 94046678944 TestWaitNotification wait txId: 102 2025-06-24T21:07:22.651336Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:07:22.651380Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:07:22.653698Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/export-102" OperationType: ESchemeOpBackup Backup { TableName: "0" NumberOfRetries: 0 S3Settings { Endpoint: "localhost:4224" Scheme: HTTP Bucket: "" ObjectKeyPattern: "" AccessKey: "" SecretKey: "" StorageClass: STORAGE_CLASS_UNSPECIFIED UseVirtualAddressing: true } Table { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } NeedToBill: true SnapshotStep: 0 SnapshotTxId: 0 EnableChecksums: false EnablePermissions: false } Internal: true } TxId: 281474976710759 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:22.654136Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_backup_restore_common.h:586: TBackup Propose, path: /MyRoot/export-102/0, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:22.654232Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:07:22.654264Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710759:0 type: TxBackup target path: [OwnerId: 72057594046678944, LocalPathId: 4] source path: 2025-06-24T21:07:22.654541Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710759:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:22.654588Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpBackup, opId: 281474976710759:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-24T21:07:22.655513Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:07:22.655553Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:07:22.656778Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710759, response: Status: StatusAccepted TxId: 281474976710759 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:22.657029Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710759, database: /MyRoot, subject: , status: StatusAccepted, operation: BACKUP TABLE, path: /MyRoot/export-102/0 2025-06-24T21:07:22.657249Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6777: Handle: TEvModifySchemeTransactionResult: txId# 281474976710759, status# StatusAccepted 2025-06-24T21:07:22.657298Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6779: Message: Status: StatusAccepted TxId: 281474976710759 SchemeshardId: 72057594046678944 2025-06-24T21:07:22.657503Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:22.657551Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710759:0 ProgressState, operation type: TxBackup, at tablet# 72057594046678944 2025-06-24T21:07:22.657592Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976710759:0 ProgressState no shards to create, do next state 2025-06-24T21:07:22.657621Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710759:0 2 -> 3 2025-06-24T21:07:22.659498Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:75: TTxOperationProposeCancelTx Execute, at schemeshard: 72057594046678944, message: TargetTxId: 281474976710759 TxId: 102 2025-06-24T21:07:22.659553Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_cancel_tx.cpp:37: Execute cancel tx: opId# 102:0, target opId# 281474976710759:0 2025-06-24T21:07:22.660044Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:22.660088Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:58: TBackup TConfigurePart ProgressState, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:22.660247Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_backup.cpp:41: Propose backup to datashard 72075186233409547 txid 281474976710759:0 at schemeshard 72057594046678944 2025-06-24T21:07:22.662286Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:92: TTxOperationProposeCancelTx Complete, at schemeshard: 72057594046678944 2025-06-24T21:07:22.662457Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:22.662482Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:58: TBackup TConfigurePart ProgressState, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:22.662564Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_backup.cpp:41: Propose backup to datashard 72075186233409547 txid 281474976710759:0 at schemeshard 72057594046678944 2025-06-24T21:07:22.662899Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6864: Handle: TEvCancelTxResult: Cookie: 102, at schemeshard: 72057594046678944 2025-06-24T21:07:22.662979Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6866: Message: Status: StatusAccepted Result: "Cancelled at SchemeShard" TargetTxId: 281474976710759 TxId: 102 2025-06-24T21:07:22.663437Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710759:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-06-24T21:07:22.663549Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710759, partId: 0, tablet: 72075186233409547 2025-06-24T21:07:22.665601Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710759:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-06-24T21:07:22.666076Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:07:22.666122Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:564:2520] TestWaitNotification: OK eventTxId 102 >> DataShardSnapshots::MvccSnapshotLockedWritesRestart+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesRestart-UseSink >> KqpUniqueIndex::ReplaceFkPartialColumnSet >> KqpUniqueIndex::UpdateFkAlreadyExist >> KqpUniqueIndex::InsertFkPkOverlap >> KqpIndexes::InnerJoinSecondaryIndexLookupAndRightTablePredicateNonIndexColumn >> KqpIndexes::NullInIndexTable >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeAggregate [GOOD] >> DataShardSnapshots::ShardRestartWholeShardLockBrokenByUpsert [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead+UseSink >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotReadLockedWrites+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeAggregate [GOOD] Test command err: 2025-06-24T21:04:58.279062Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:04:58.279614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:04:58.279707Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d1/r3tmp/tmpE0aEF1/pdisk_1.dat 2025-06-24T21:04:58.736154Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17975, node 1 2025-06-24T21:04:59.386617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:59.386672Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:59.386707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:59.387309Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:59.401849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:59.527779Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:59.527938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:59.546179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17950 2025-06-24T21:05:00.137233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:03.501353Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:03.548626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:03.548761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:03.602627Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:03.605542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:03.876621Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:03.916926Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.917585Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.918133Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.918288Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.918544Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.918637Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.918735Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.918819Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.918896Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.119908Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:04.120035Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:04.136915Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:04.300395Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:04.352567Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:04.352671Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:04.388161Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:04.388892Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:04.389138Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:04.389220Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:04.389272Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:04.389326Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:04.389379Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:04.389435Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:04.389956Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:04.419436Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:04.419545Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:04.426949Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:04.437184Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1838:2585] 2025-06-24T21:05:04.438583Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1838:2585], schemeshard id = 72075186224037897 2025-06-24T21:05:04.444025Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:04.465553Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:04.465596Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:04.465651Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:04.479703Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:04.488437Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:04.488583Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:04.717456Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:04.896312Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:04.972768Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:05.534303Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:06.112270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2143:3020], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.112504Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.225981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:06.435345Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2239:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:06.435599Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2239:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:06.435886Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2239:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:06.436018Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2239:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:06.436153Z node 2 :TX_COLUMNSHARD WARN: ... , at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:07:24.437312Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8287:6112], server id = [2:8288:6113], tablet id = 72075186224037894 2025-06-24T21:07:24.437380Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8405:6173] 2025-06-24T21:07:24.437430Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8405:6173] 2025-06-24T21:07:24.469286Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:07:24.469374Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:07:24.469797Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:07:24.470378Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:07:24.470616Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-24T21:07:24.470652Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-24T21:07:24.470682Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-24T21:07:24.470706Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-24T21:07:24.470734Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1750799244404339 2025-06-24T21:07:24.470759Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-24T21:07:24.470789Z node 2 :STATISTICS DEBUG: tx_init.cpp:84: [72075186224037894] Loaded global traversal round: 2 2025-06-24T21:07:24.470870Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-24T21:07:24.470927Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:07:24.470993Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-24T21:07:24.471035Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:07:24.471101Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:07:24.471191Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:07:24.471317Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:24.472194Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:07:24.472671Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:24.472735Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:24.472829Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:07:24.474030Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:24.474092Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:24.475569Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:24.539293Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:24.539555Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:07:24.540265Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8452:6204], server id = [2:8456:6208], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:24.540573Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8452:6204], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:24.540809Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8453:6205], server id = [2:8457:6209], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:24.540844Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8453:6205], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:24.541710Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8454:6206], server id = [2:8458:6210], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:24.541758Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8454:6206], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:24.541881Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8455:6207], server id = [2:8459:6211], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:24.541910Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8455:6207], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:24.546978Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:24.547604Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8452:6204], server id = [2:8456:6208], tablet id = 72075186224037899 2025-06-24T21:07:24.547649Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:24.547978Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:24.548218Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8453:6205], server id = [2:8457:6209], tablet id = 72075186224037900 2025-06-24T21:07:24.548237Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:24.548664Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:24.548884Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8454:6206], server id = [2:8458:6210], tablet id = 72075186224037901 2025-06-24T21:07:24.548903Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:24.549125Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:24.549161Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:24.549335Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:24.549491Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:24.549693Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:24.551846Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8455:6207], server id = [2:8459:6211], tablet id = 72075186224037902 2025-06-24T21:07:24.551889Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:24.552400Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:24.585549Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8488:6236]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:24.585788Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:24.585831Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8488:6236], StatRequests.size() = 1 2025-06-24T21:07:24.725352Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzBjZGNlMTYtMWQwM2RjMWYtNjc2M2IzNmQtYjJkYWZlNTU=, TxId: 2025-06-24T21:07:24.725421Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzBjZGNlMTYtMWQwM2RjMWYtNjc2M2IzNmQtYjJkYWZlNTU=, TxId: 2025-06-24T21:07:24.725963Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:24.749308Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8503:6242] 2025-06-24T21:07:24.749578Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8503:6242], schemeshard id = 72075186224037897 2025-06-24T21:07:24.749696Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8405:6173], server id = [2:8504:6243], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:24.749806Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8504:6243] 2025-06-24T21:07:24.749869Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8504:6243], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-24T21:07:24.762771Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:24.762846Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:24.827497Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8507:6246]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:24.827867Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:24.827925Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:24.830544Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:24.830594Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:24.830633Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:24.834380Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> DataShardSnapshots::LockedWriteDistributedCommitFreeze+UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitFreeze-UseSink >> DataShardSnapshots::VolatileSnapshotRefreshDiscard [GOOD] >> DataShardSnapshots::VolatileSnapshotTimeout >> KqpMultishardIndex::SortedRangeReadDesc >> DataShardSnapshots::MvccSnapshotLockedWritesRestart-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts+UseSink >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> KqpUniqueIndex::InsertFkPkOverlap [GOOD] >> KqpUniqueIndex::InsertNullInComplexFk >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict+UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit+UseSink >> KqpUniqueIndex::ReplaceFkPartialColumnSet [GOOD] >> KqpUniqueIndex::ReplaceFkDuplicate >> AnalyzeColumnshard::AnalyzeRebootSaBeforeResolve [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead+UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead-UseSink >> KqpIndexes::NullInIndexTable [GOOD] >> KqpIndexes::MultipleSecondaryIndexWithSameComulns+UseSink >> DataShardSnapshots::MvccSnapshotReadLockedWrites+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotReadLockedWrites-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeResolve [GOOD] Test command err: 2025-06-24T21:05:03.430037Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:03.430248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:03.430381Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043b1/r3tmp/tmpSEWu5E/pdisk_1.dat 2025-06-24T21:05:03.828285Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21011, node 1 2025-06-24T21:05:04.059993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:04.060051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:04.060110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:04.060352Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:04.063002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:04.175781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:04.175903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:04.196599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24177 2025-06-24T21:05:04.800758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:08.279396Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:08.318529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:08.318667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:08.379448Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:08.382098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:08.607053Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:08.642233Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.642805Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.643403Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.643547Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.643630Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.643848Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.643943Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.644043Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.644129Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.821258Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:08.821385Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:08.834773Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:08.990760Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:09.050770Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:09.050883Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:09.089107Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:09.090705Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:09.090962Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:09.091020Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:09.091087Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:09.091179Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:09.091245Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:09.091329Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:09.092449Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:09.128946Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:09.129110Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:09.136649Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:09.140489Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:09.140794Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:09.155936Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:09.183825Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:09.183918Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:09.183997Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:09.202418Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:09.210753Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:09.210928Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:09.400164Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:09.556748Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:09.618627Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:10.115348Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:10.343507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:10.343647Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:10.362056Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:10.453033Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:10.453313Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:10.453605Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:10.453739Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:10.453854Z node 2 :TX_COLUMNSHARD WARN: ... rsalOperations: table count# 1 2025-06-24T21:07:26.902494Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 1 2025-06-24T21:07:26.902547Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:07:26.902649Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:26.903313Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:07:26.903872Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:26.903950Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:26.904113Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing ... blocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to STATISTICS_AGGREGATOR cookie 0 2025-06-24T21:07:26.967704Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7490:5464] 2025-06-24T21:07:26.967955Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7490:5464], schemeshard id = 72075186224037897 2025-06-24T21:07:26.968049Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7444:5435], server id = [2:7491:5465], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:26.968119Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7491:5465] 2025-06-24T21:07:26.968164Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7491:5465], node id = 2, have schemeshards count = 1, need schemeshards count = 0 ... unblocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to ... unblocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to STATISTICS_AGGREGATOR 2025-06-24T21:07:27.053080Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:27.053163Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:27.054082Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7495:5468] 2025-06-24T21:07:27.054281Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:27.054855Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3044:3297] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-24T21:07:27.054892Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3044:3297] 2025-06-24T21:07:27.067721Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:27.067825Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-24T21:07:27.067955Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:27.068354Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7498:5471], server id = [2:7499:5472], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:27.068432Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7498:5471], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:27.070932Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:27.071022Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:27.071175Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:27.071330Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:27.071520Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:27.073851Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7498:5471], server id = [2:7499:5472], tablet id = 72075186224037899 2025-06-24T21:07:27.073888Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:27.074362Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:27.102154Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7519:5491]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:27.102294Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:27.102327Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7519:5491], StatRequests.size() = 1 2025-06-24T21:07:27.206217Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MjcxODBmMC03MTRjZmE4Yy0zZTAwZGQ4Zi1iZjIxNDZjYw==, TxId: 2025-06-24T21:07:27.206274Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MjcxODBmMC03MTRjZmE4Yy0zZTAwZGQ4Zi1iZjIxNDZjYw==, TxId: 2025-06-24T21:07:27.206680Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:27.219843Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:27.219892Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:27.711456Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-24T21:07:27.711520Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:28.336856Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:28.336933Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:28.336988Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:07:29.469135Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:29.469303Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:29.469354Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:29.469988Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:29.482945Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:29.483293Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:29.483355Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:29.483684Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:29.496702Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:29.496901Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:07:29.497351Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7613:5541], server id = [2:7614:5542], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:29.497448Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7613:5541], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:29.498655Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:29.498758Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:29.498982Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:29.499183Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:29.499425Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:29.501396Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7613:5541], server id = [2:7614:5542], tablet id = 72075186224037899 2025-06-24T21:07:29.501424Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:29.501785Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:29.523432Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzdiOTkzNTgtNjQ0ZmE3YzctMmU1ZjE1YTItMzZmMzRkZGU=, TxId: 2025-06-24T21:07:29.523504Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzdiOTkzNTgtNjQ0ZmE3YzctMmU1ZjE1YTItMzZmMzRkZGU=, TxId: 2025-06-24T21:07:29.524044Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:29.537401Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:29.537459Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3044:3297] >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeResolve [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> KqpPrefixedVectorIndexes::PrefixedVectorIndexOrderByCosineDistanceWithCover+Nullable >> KqpUniqueIndex::UpdateFkAlreadyExist [GOOD] >> KqpUniqueIndex::UpdateFkPkOverlap >> DataShardSnapshots::LockedWriteDistributedCommitFreeze-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeResolve [GOOD] Test command err: 2025-06-24T21:04:58.197788Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:04:58.198333Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:04:58.198427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e2/r3tmp/tmpaDwInS/pdisk_1.dat 2025-06-24T21:04:58.712630Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23711, node 1 2025-06-24T21:04:59.384968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:59.385030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:59.385069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:59.385744Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:59.391522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:59.516018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:59.516209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:59.536899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9652 2025-06-24T21:05:00.150217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:03.540896Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:03.583860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:03.583991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:03.614215Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:03.616692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:03.864614Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:03.890372Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.893887Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.894472Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.894612Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.894836Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.894921Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.895003Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.895086Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:03.895188Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.088437Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:04.088569Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:04.101741Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:04.256448Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:04.313468Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:04.313596Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:04.356262Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:04.356599Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:04.356838Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:04.356912Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:04.356974Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:04.357067Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:04.357133Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:04.357187Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:04.357754Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:04.386334Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:04.386447Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:04.394424Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2570] 2025-06-24T21:05:04.411259Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1847:2588] 2025-06-24T21:05:04.411551Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1847:2588], schemeshard id = 72075186224037897 2025-06-24T21:05:04.419755Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:04.438602Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:04.438662Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:04.438742Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:04.465079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:04.475716Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:04.475872Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:04.701740Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:04.880901Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:04.962832Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:05.650710Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:06.112257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2142:3021], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.112515Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.226459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:06.442982Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2242:2804];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:06.443342Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2242:2804];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:06.443716Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2242:2804];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:06.443878Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2242:2804];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:06.444031Z node 2 :TX_COLUMNSHARD WARN: ... ics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:30.697275Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8391:6185], server id = [2:8395:6189], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:30.697588Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8391:6185], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:30.697878Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8392:6186], server id = [2:8396:6190], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:30.697923Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8392:6186], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:30.698042Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8393:6187], server id = [2:8397:6191], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:30.698072Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8393:6187], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:30.699679Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8394:6188], server id = [2:8398:6192], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:30.699733Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8394:6188], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:30.703061Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:30.703606Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8391:6185], server id = [2:8395:6189], tablet id = 72075186224037899 2025-06-24T21:07:30.703657Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:30.704222Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:30.704585Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8392:6186], server id = [2:8396:6190], tablet id = 72075186224037900 2025-06-24T21:07:30.704609Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:30.705563Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:30.705968Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8393:6187], server id = [2:8397:6191], tablet id = 72075186224037901 2025-06-24T21:07:30.705993Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:30.706091Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:30.706124Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:30.706282Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:30.706432Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:30.706719Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:30.709114Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8394:6188], server id = [2:8398:6192], tablet id = 72075186224037902 2025-06-24T21:07:30.709150Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:30.709754Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:30.741497Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8427:6217]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:30.741752Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:30.741795Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8427:6217], StatRequests.size() = 1 ... blocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to KQP_TABLE_RESOLVER cookie 0 ... waiting for 3rd TEvResolveKeySetResult (done) 2025-06-24T21:07:30.844450Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8274:6125], server id = [2:8275:6126], tablet id = 72075186224037894 2025-06-24T21:07:30.844568Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8433:6220] 2025-06-24T21:07:30.844625Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8433:6220] 2025-06-24T21:07:30.844922Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:8434:6221], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:07:30.888328Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:07:30.888422Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:07:30.888853Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:07:30.889540Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:07:30.889842Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-24T21:07:30.889907Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-24T21:07:30.889952Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-24T21:07:30.889983Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-24T21:07:30.890020Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1750799250662933 2025-06-24T21:07:30.890048Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-24T21:07:30.890077Z node 2 :STATISTICS DEBUG: tx_init.cpp:84: [72075186224037894] Loaded global traversal round: 2 2025-06-24T21:07:30.890154Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-24T21:07:30.890208Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:07:30.890312Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-24T21:07:30.890365Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:07:30.890409Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:07:30.890458Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:07:30.890585Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:30.891497Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:07:30.891883Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:30.891938Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:30.892055Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing ... blocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to STATISTICS_AGGREGATOR cookie 0 2025-06-24T21:07:30.976780Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8480:6250] 2025-06-24T21:07:30.976892Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8433:6220], server id = [2:8480:6250], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:30.977015Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8480:6250], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-24T21:07:30.977144Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8481:6251] 2025-06-24T21:07:30.977205Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8481:6251], schemeshard id = 72075186224037897 ... unblocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to KQP_TABLE_RESOLVER ... unblocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to STATISTICS_AGGREGATOR ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse 2025-06-24T21:07:31.052308Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:31.052408Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:31.055507Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:31.073139Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MTJlYmE0MWYtYjc4NjMyMmUtYjdjZWVmMTctOTRjNWJjOWU=, TxId: 2025-06-24T21:07:31.073215Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MTJlYmE0MWYtYjc4NjMyMmUtYjdjZWVmMTctOTRjNWJjOWU=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-24T21:07:31.073772Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8489:6255]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:31.074028Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:31.074068Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:31.076876Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:31.076935Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:31.076980Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:31.080631Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> KqpMultishardIndex::SortedRangeReadDesc [GOOD] >> KqpMultishardIndex::WriteIntoRenamingSyncIndex >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-false [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:28.172984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:28.173082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:28.173125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:28.173164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:28.173960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:28.174037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:28.174101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:28.174173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:28.174948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:28.175946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:28.238942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:28.238995Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:28.251739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:28.252056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:28.252171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:28.259382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:28.260060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:28.261667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.262806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:28.269707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:28.270682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:28.276736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:28.276802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:28.277072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:28.277119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:28.277164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:28.277264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.282586Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:28.387735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:28.387930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.388180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:28.388234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:28.388439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:28.388512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:28.390537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.390696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:28.390819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.390866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:28.390896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:28.390920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:28.392337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.392377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:28.392407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:28.393601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.393638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.393685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.393734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:28.396349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:28.397649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:28.397803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:28.398525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.398630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:28.398690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.398882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:28.398915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.399045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:28.399095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:28.400475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:28.400508Z node 1 :FLAT_TX_SCHEMESHARD ... ld , records: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:32.943582Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:07:32.943689Z node 3 :BUILD_INDEX DEBUG: schemeshard_info_types.h:3696: AddShardStatus id# 102 shard 72057594046678944:11 2025-06-24T21:07:32.943750Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:07:32.943788Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:07:32.943830Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 4, at schemeshard: 72057594046678944 2025-06-24T21:07:32.948524Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:32.948917Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-24T21:07:32.948977Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: by_embedding, IndexColumn: embedding, DataColumns: covered, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [0:0:0], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976720769, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976720770, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:07:32.949003Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 0 2025-06-24T21:07:32.951780Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:32.951837Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:32.952028Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:32.952063Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:32.952087Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:32.952453Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [3:3209:5002] sender: [3:3265:2058] recipient: [3:15:2062] 2025-06-24T21:07:32.985160Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/by_embedding" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:07:32.985388Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/by_embedding" took 254us result status StatusSuccess 2025-06-24T21:07:32.986130Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/by_embedding" PathDescription { Self { Name: "by_embedding" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "by_embedding" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "embedding" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "covered" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpIndexes::CheckUpsertNonEquatableType+NotNull >> KqpIndexes::UniqIndexComplexPkComplexFkOverlap >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> BasicUsage::SimpleHandlers [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit-UseSink >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts+UseSink >> KqpUniqueIndex::InsertNullInComplexFk [GOOD] >> KqpUniqueIndex::InsertNullInComplexFkDuplicate >> AnalyzeColumnshard::AnalyzeStatus [GOOD] >> KqpUniqueIndex::UpdateFkSameValue >> DataShardSnapshots::MvccSnapshotReadLockedWrites-UseSink [GOOD] >> DataShardSnapshots::ReadIteratorLocalSnapshotThenRestart >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead-UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead+UseSink >> TraverseColumnShard::TraverseColumnTableRebootColumnshard [GOOD] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAnalyzeTableResponse [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::SimpleHandlers [GOOD] Test command err: 2025-06-24T21:06:36.408440Z :WaitEventBlocksBeforeDiscovery INFO: Random seed for debugging is 1750799196408403 2025-06-24T21:06:36.666947Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625290565340164:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:06:36.667126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:06:36.701217Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625291070643623:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:06:36.701285Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:06:36.812269Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:06:36.815950Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00478f/r3tmp/tmpPTbIiF/pdisk_1.dat 2025-06-24T21:06:36.998273Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:37.020745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:06:37.020823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:06:37.026914Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:06:37.027872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:06:37.045852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:06:37.045920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:06:37.049447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30104, node 1 2025-06-24T21:06:37.156710Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00478f/r3tmp/yandexfaDmrY.tmp 2025-06-24T21:06:37.156765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00478f/r3tmp/yandexfaDmrY.tmp 2025-06-24T21:06:37.158069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00478f/r3tmp/yandexfaDmrY.tmp 2025-06-24T21:06:37.158183Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:06:37.348323Z INFO: TTestServer started on Port 7075 GrpcPort 30104 TClient is connected to server localhost:7075 PQClient connected to localhost:30104 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:06:37.598465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:06:37.674734Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:06:37.708015Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:06:38.940945Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625299155275730:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.941030Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.941229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625299155275742:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.949751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:06:38.956840Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625299155275781:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.956914Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:38.966272Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625299155275744:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:06:39.036492Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625303450243123:2677] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:06:39.428613Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519625299660578512:2273], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:06:39.428845Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZjIzOGYwMTctYTYwNTI2ZTUtYjcwOTdiOWMtMjUxMDZjYWQ=, ActorId: [2:7519625299660578486:2267], ActorState: ExecuteState, TraceId: 01jyhw7aqb7hk44aywnnn29a9w, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:06:39.429691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:06:39.429809Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519625303450243142:2307], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:06:39.429977Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YzBkNWMxNTItZGNjYTg5NzEtYjBhZTFjMDYtYjNkMmY1NDM=, ActorId: [1:7519625299155275712:2295], ActorState: ExecuteState, TraceId: 01jyhw7apfemedw99zxwnkb5g1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:06:39.430455Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:06:39.430452Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:06:39.558969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:06:39.678732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation ... 2025-06-24T21:07:33.615242Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_15960709156192115028_v1 grpc closed 2025-06-24T21:07:33.615270Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|146644a3-89abd411-2dbbe4e9-cd369716_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:07:33.615288Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_15960709156192115028_v1 is DEAD 2025-06-24T21:07:33.615298Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|146644a3-89abd411-2dbbe4e9-cd369716_0] Write session: destroy 2025-06-24T21:07:33.615573Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session shared/user_3_1_15960709156192115028_v1 2025-06-24T21:07:33.615636Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [3:7519625536784882859:2482] destroyed 2025-06-24T21:07:33.615697Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_15960709156192115028_v1 2025-06-24T21:07:33.616246Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625536784882842:2466] disconnected; active server actors: 1 2025-06-24T21:07:33.616313Z :INFO: [/Root] [/Root] [9fbb20fb-8aa2c632-672ebb6b-b52beb9c] Closing read session. Close timeout: 0.000000s 2025-06-24T21:07:33.616278Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625536784882842:2466] client user disconnected session shared/user_3_1_15960709156192115028_v1 2025-06-24T21:07:33.616362Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:299:0 2025-06-24T21:07:33.616359Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-24T21:07:33.616406Z :INFO: [/Root] [/Root] [9fbb20fb-8aa2c632-672ebb6b-b52beb9c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 459 BytesRead: 4936800 MessagesRead: 300 BytesReadCompressed: 4936800 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:07:33.616430Z :INFO: [/Root] [/Root] [810f7962-ab9ab2cc-13dd1d11-bb026f32] Closing read session. Close timeout: 0.000000s 2025-06-24T21:07:33.616420Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037893][rt3.dc1--test-topic] consumer user balancing. Sessions=2, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-06-24T21:07:33.616449Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:07:33.616466Z :INFO: [/Root] [/Root] [810f7962-ab9ab2cc-13dd1d11-bb026f32] Counters: { Errors: 0 CurrentSessionLifetimeMs: 455 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:07:33.616480Z :INFO: [/Root] [/Root] [acde9702-2823671f-cbc673e8-a98147e4] Closing read session. Close timeout: 0.000000s 2025-06-24T21:07:33.616496Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:07:33.616478Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037893][rt3.dc1--test-topic] consumer user balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "shared/user_3_3_10801818117768023356_v1" (Sender=[3:7519625536784882834:2470], Pipe=[3:7519625536784882846:2470], Partitions=[], ActiveFamilyCount=0) 2025-06-24T21:07:33.616510Z :INFO: [/Root] [/Root] [acde9702-2823671f-cbc673e8-a98147e4] Counters: { Errors: 0 CurrentSessionLifetimeMs: 454 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:07:33.616532Z :INFO: [/Root] [/Root] [acde9702-2823671f-cbc673e8-a98147e4] Closing read session. Close timeout: 0.000000s 2025-06-24T21:07:33.616558Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:07:33.616542Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037893][rt3.dc1--test-topic] consumer user family 1 status Active partitions [0] session "shared/user_3_3_10801818117768023356_v1" sender [3:7519625536784882834:2470] lock partition 0 for ReadingSession "shared/user_3_3_10801818117768023356_v1" (Sender=[3:7519625536784882834:2470], Pipe=[3:7519625536784882846:2470], Partitions=[], ActiveFamilyCount=1) generation 1 step 2 2025-06-24T21:07:33.616591Z :INFO: [/Root] [/Root] [acde9702-2823671f-cbc673e8-a98147e4] Counters: { Errors: 0 CurrentSessionLifetimeMs: 454 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:07:33.616596Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037893][rt3.dc1--test-topic] consumer user start rebalancing. familyCount=1, sessionCount=2, desiredFamilyCount=0, allowPlusOne=1 2025-06-24T21:07:33.616624Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037893][rt3.dc1--test-topic] consumer user balancing duration: 0.000183s 2025-06-24T21:07:33.616668Z :NOTICE: [/Root] [/Root] [acde9702-2823671f-cbc673e8-a98147e4] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:07:33.617101Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_3_2_13012868333411927821_v1 grpc read done: success# 0, data# { } 2025-06-24T21:07:33.617119Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_3_2_13012868333411927821_v1 grpc read failed 2025-06-24T21:07:33.617152Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 2 consumer shared/user session shared/user_3_2_13012868333411927821_v1 closed 2025-06-24T21:07:33.617203Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_3_2_13012868333411927821_v1 is DEAD 2025-06-24T21:07:33.617215Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 3 consumer shared/user session shared/user_3_3_10801818117768023356_v1 assign: record# { Partition: 0 TabletId: 72075186224037892 Topic: "rt3.dc1--test-topic" Generation: 1 Step: 2 Session: "shared/user_3_3_10801818117768023356_v1" ClientId: "user" PipeClient { RawX1: 7519625536784882846 RawX2: 4503612512274854 } Path: "/Root/PQ/rt3.dc1--test-topic" } 2025-06-24T21:07:33.617241Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer shared/user session shared/user_3_3_10801818117768023356_v1 grpc read done: success# 0, data# { } 2025-06-24T21:07:33.617247Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer shared/user session shared/user_3_3_10801818117768023356_v1 grpc read failed 2025-06-24T21:07:33.617258Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer shared/user session shared/user_3_3_10801818117768023356_v1 grpc closed 2025-06-24T21:07:33.617270Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer shared/user session shared/user_3_3_10801818117768023356_v1 is DEAD 2025-06-24T21:07:33.617433Z :INFO: [/Root] [/Root] [810f7962-ab9ab2cc-13dd1d11-bb026f32] Closing read session. Close timeout: 0.000000s 2025-06-24T21:07:33.617458Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:07:33.617476Z :INFO: [/Root] [/Root] [810f7962-ab9ab2cc-13dd1d11-bb026f32] Counters: { Errors: 0 CurrentSessionLifetimeMs: 456 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:07:33.617510Z :NOTICE: [/Root] [/Root] [810f7962-ab9ab2cc-13dd1d11-bb026f32] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:07:33.617515Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:1132: session cookie 3 consumer shared/user session shared/user_3_3_10801818117768023356_v1 INITING TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) 2025-06-24T21:07:33.617565Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|146644a3-89abd411-2dbbe4e9-cd369716_0 grpc read done: success: 0 data: 2025-06-24T21:07:33.617578Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: src_id|146644a3-89abd411-2dbbe4e9-cd369716_0 grpc read failed 2025-06-24T21:07:33.617607Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: src_id|146644a3-89abd411-2dbbe4e9-cd369716_0 grpc closed 2025-06-24T21:07:33.617629Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: src_id|146644a3-89abd411-2dbbe4e9-cd369716_0 is DEAD 2025-06-24T21:07:33.617722Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625536784882847:2469] disconnected; active server actors: 1 2025-06-24T21:07:33.617747Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625536784882847:2469] client user disconnected session shared/user_3_2_13012868333411927821_v1 2025-06-24T21:07:33.617772Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-24T21:07:33.617799Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625536784882846:2470] disconnected; active server actors: 1 2025-06-24T21:07:33.617813Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625536784882846:2470] client user disconnected session shared/user_3_3_10801818117768023356_v1 2025-06-24T21:07:33.617893Z :INFO: [/Root] [/Root] [9fbb20fb-8aa2c632-672ebb6b-b52beb9c] Closing read session. Close timeout: 0.000000s 2025-06-24T21:07:33.617924Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:299:0 2025-06-24T21:07:33.617949Z :INFO: [/Root] [/Root] [9fbb20fb-8aa2c632-672ebb6b-b52beb9c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 460 BytesRead: 4936800 MessagesRead: 300 BytesReadCompressed: 4936800 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:07:33.617994Z :NOTICE: [/Root] [/Root] [9fbb20fb-8aa2c632-672ebb6b-b52beb9c] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:07:33.618062Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:07:33.618315Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [3:7519625536784882898:2481] destroyed 2025-06-24T21:07:33.618361Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. >> KqpIndexes::InnerJoinSecondaryIndexLookupAndRightTablePredicateNonIndexColumn [GOOD] >> KqpIndexes::IndexOr ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeStatus [GOOD] Test command err: 2025-06-24T21:05:05.992641Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:05.993148Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:05.993302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00439e/r3tmp/tmp3m5WLn/pdisk_1.dat 2025-06-24T21:05:06.392805Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29252, node 1 2025-06-24T21:05:06.667568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:06.667623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:06.667655Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:06.668198Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:06.670927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:06.781883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:06.782039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:06.797479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7282 2025-06-24T21:05:07.367118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:10.571323Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:10.605270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:10.605395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:10.654973Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:10.657105Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:10.869745Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:10.894577Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.895213Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.895888Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.896032Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.896295Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.896389Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.896502Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.896637Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.896731Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.091474Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:11.091603Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:11.104873Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:11.263925Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:11.306004Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:11.306122Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:11.331199Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:11.332558Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:11.332799Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:11.332872Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:11.332938Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:11.332999Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:11.333054Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:11.333131Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:11.333561Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:11.373857Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:11.373970Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1787:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:11.381187Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1800:2569] 2025-06-24T21:05:11.390988Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:11.391842Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1835:2585] 2025-06-24T21:05:11.392510Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1835:2585], schemeshard id = 72075186224037897 2025-06-24T21:05:11.414663Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:11.414737Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:11.414818Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:11.435940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:11.451074Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:11.451269Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:11.622341Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:11.802617Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:11.892770Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:12.570825Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:12.871874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2143:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:12.872082Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:12.890732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:13.010751Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2213:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:13.011029Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2213:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:13.011354Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2213:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:13.011492Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2213:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:13.011628Z node 2 :TX_COLUMNSHARD WARN: ... :7203:5306], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:07:31.717098Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7296:5352] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:31.770707Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7325:5367]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:31.770863Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:31.770918Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7327:5369] 2025-06-24T21:07:31.770987Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7327:5369] 2025-06-24T21:07:31.771242Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7328:5370] 2025-06-24T21:07:31.771346Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7328:5370], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:31.771404Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:31.771535Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7327:5369], server id = [2:7328:5370], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:31.771572Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:31.771650Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7325:5367], StatRequests.size() = 1 2025-06-24T21:07:31.897447Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=Y2YxNWY3MDItMjgxOTZlZjQtZjE3MjFhZWEtZmQyNjk4MDM=, TxId: 2025-06-24T21:07:31.897509Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=Y2YxNWY3MDItMjgxOTZlZjQtZjE3MjFhZWEtZmQyNjk4MDM=, TxId: 2025-06-24T21:07:31.897877Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:31.910994Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:31.911057Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:31.954463Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:31.954540Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:32.018914Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7327:5369], schemeshard count = 1 2025-06-24T21:07:33.068067Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:33.068155Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:33.070424Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:33.086151Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:33.086691Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:33.086754Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-24T21:07:33.099924Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:33.110769Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. ... blocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR cookie 0 ... waiting for TEvAnalyzeTableResponse (done) 2025-06-24T21:07:33.112305Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7402:5412] 2025-06-24T21:07:33.112724Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_ENQUEUED 2025-06-24T21:07:33.113469Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7404:5413]
---- StatisticsAggregator ----
Database: /Root/Database
BaseStatistics: 1
SchemeShards: 1
    72075186224037897
Nodes: 1
    2
RequestedSchemeShards: 1
    72075186224037897
FastCounter: 3
FastCheckInFlight: 0
FastSchemeShards: 0
FastNodes: 0
PropagationInFlight: 0
PropagationSchemeShards: 0
PropagationNodes: 0
LastSSIndex: 0
PendingRequests: 0
ProcessUrgentInFlight: 0
Columns: 2
DatashardRanges: 0
CountMinSketches: 0
ScheduleTraversalsByTime: 2
  oldest table: [OwnerId: 72075186224037897, LocalPathId: 4], update time: 1970-01-01T00:00:00Z
ScheduleTraversalsBySchemeShard: 1
    72075186224037897
    [OwnerId: 72075186224037897, LocalPathId: 4], [OwnerId: 72075186224037897, LocalPathId: 3]
ForceTraversals: 1
    1970-01-01T00:00:06Z
NavigateType: Analyze
NavigateAnalyzeOperationId: 
NavigatePathId: 
ForceTraversalOperationId: 
TraversalStartTime: 1970-01-01T00:00:00Z
TraversalPathId: 
TraversalIsColumnTable: 0
TraversalStartKey: 
GlobalTraversalRound: 1
TraversalRound: 0
HiveRequestRound: 0
... unblocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR 2025-06-24T21:07:33.114311Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-24T21:07:33.114398Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-24T21:07:33.127670Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-24T21:07:34.384803Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:34.384928Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:34.384970Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:34.385411Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:34.398327Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:34.398700Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:34.398782Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:34.399594Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:34.412455Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:34.412693Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:34.413158Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7453:5441], server id = [2:7454:5442], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:34.413282Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7453:5441], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:34.416386Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:34.416520Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:34.416767Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:34.416921Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:34.417123Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7453:5441], server id = [2:7454:5442], tablet id = 72075186224037899 2025-06-24T21:07:34.417154Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:34.417317Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:34.420120Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:34.450301Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7474:5461]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:34.450480Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:34.450518Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7474:5461], StatRequests.size() = 1 2025-06-24T21:07:34.541242Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZjE1MDY3MGEtZmM5NzFkYWYtMThkNWUxNTAtNzNjMGM1OTM=, TxId: 2025-06-24T21:07:34.541308Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZjE1MDY3MGEtZmM5NzFkYWYtMThkNWUxNTAtNzNjMGM1OTM=, TxId: 2025-06-24T21:07:34.541864Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:34.555365Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:34.555428Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:912:2713] 2025-06-24T21:07:34.556920Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7491:5469] 2025-06-24T21:07:34.557323Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_NO_OPERATION ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAnalyzeTableResponse [GOOD] Test command err: 2025-06-24T21:05:02.702172Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:02.702523Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:02.702713Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043b5/r3tmp/tmpZ3dJOX/pdisk_1.dat 2025-06-24T21:05:03.146840Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9631, node 1 2025-06-24T21:05:03.450044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:03.450115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:03.450178Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:03.450412Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:03.453097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:03.565764Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:03.565952Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:03.579984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15378 2025-06-24T21:05:04.150732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:07.497021Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:07.536660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:07.536796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:07.597892Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:07.600170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:07.814686Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:07.852843Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:07.853514Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:07.854037Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:07.854180Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:07.854264Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:07.854487Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:07.854570Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:07.854649Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:07.854721Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:08.022329Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:08.022457Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:08.035334Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:08.204481Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:08.253026Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:08.253131Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:08.290285Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:08.291754Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:08.292053Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:08.292139Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:08.292197Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:08.292265Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:08.292335Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:08.292393Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:08.294163Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:08.333079Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:08.333232Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:08.348118Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2568] 2025-06-24T21:05:08.363659Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1852:2588] 2025-06-24T21:05:08.364476Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1852:2588], schemeshard id = 72075186224037897 2025-06-24T21:05:08.370255Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:08.392857Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:08.392935Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:08.393037Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:08.405579Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:08.414311Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:08.414467Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:08.601160Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:08.751238Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:08.820998Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:09.306971Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:09.606033Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2141:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:09.606177Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:09.626799Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:09.759813Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2213:2786];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:09.760112Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2213:2786];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:09.760452Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2213:2786];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:09.760620Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2213:2786];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:09.760760Z node 2 :TX_COLUMNSHARD WARN: ... :802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:29.696256Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:29.697104Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:29.710284Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:29.710708Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:29.710791Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:29.711799Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:29.724645Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:29.724827Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:29.725194Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7487:5462], server id = [2:7488:5463], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:29.725283Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7487:5462], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:29.727717Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:29.727821Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:29.727977Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:29.728124Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:29.728321Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:29.730562Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7487:5462], server id = [2:7488:5463], tablet id = 72075186224037899 2025-06-24T21:07:29.730596Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:29.731009Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:29.759901Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7508:5482]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:29.760070Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:29.760112Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7508:5482], StatRequests.size() = 1 2025-06-24T21:07:29.848117Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MmYzNjk4N2ItMzA1YTEzZjktYjYzZWU2NDQtZWY3ZmUyYzE=, TxId: 2025-06-24T21:07:29.848184Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MmYzNjk4N2ItMzA1YTEzZjktYjYzZWU2NDQtZWY3ZmUyYzE=, TxId: 2025-06-24T21:07:29.848645Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:29.861568Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:29.861626Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:30.403370Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-24T21:07:30.403444Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:31.135035Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:31.135129Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:31.135744Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:31.148983Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:31.149361Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:31.149413Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-24T21:07:31.173752Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:32.422260Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:32.422333Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:32.422359Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:07:32.422554Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-24T21:07:32.422941Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-24T21:07:32.423017Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-24T21:07:32.436054Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-24T21:07:33.652458Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:33.652534Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:33.652576Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:07:34.870371Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:07:34.870563Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:34.881332Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:34.881450Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:34.881480Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:34.882017Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:34.895078Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:34.895426Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:34.895485Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:34.895806Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:34.919935Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:34.920155Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:07:34.920686Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7676:5572], server id = [2:7677:5573], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:34.920797Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7676:5572], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:34.922204Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:34.922303Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:34.922468Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:34.922673Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:34.923014Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:34.925395Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7676:5572], server id = [2:7677:5573], tablet id = 72075186224037899 2025-06-24T21:07:34.925439Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:34.925935Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:34.947513Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MTg2ODE1MTktYTEzZDY1ZmItNTVlYjkyNGQtZTljNWU1MTk=, TxId: 2025-06-24T21:07:34.947569Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MTg2ODE1MTktYTEzZDY1ZmItNTVlYjkyNGQtZTljNWU1MTk=, TxId: 2025-06-24T21:07:34.947925Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:34.961593Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:34.961669Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3040:3297] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootColumnshard [GOOD] Test command err: 2025-06-24T21:05:08.115017Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:08.115395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:08.115592Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00438c/r3tmp/tmpf2cljb/pdisk_1.dat 2025-06-24T21:05:08.506329Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9486, node 1 2025-06-24T21:05:08.722443Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:08.722506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:08.722572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:08.722815Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:08.725490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:08.837479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:08.837639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:08.851137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11754 2025-06-24T21:05:09.422436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:12.633183Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:12.671632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:12.671767Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:12.731674Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:12.733886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:12.909815Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:12.945549Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.946267Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.946802Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.946985Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.947282Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.947404Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.947514Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.947640Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.947735Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.126735Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:13.126870Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:13.140197Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:13.303220Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:13.353614Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:13.353728Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:13.388645Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:13.390310Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:13.390608Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:13.390671Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:13.390741Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:13.390808Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:13.390867Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:13.390928Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:13.392078Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:13.433003Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:13.433123Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:13.439871Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:13.442841Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:13.443074Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:13.448995Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:13.475490Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:13.475566Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:13.475656Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:13.491318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:13.499414Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:13.499560Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:13.682259Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:13.822259Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:13.868035Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:14.427402Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:14.700927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.701081Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.724530Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:14.922309Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:14.922826Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:14.923223Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:14.923416Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:14.923582Z node 2 :TX_COLUMNSHARD WARN: ... DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8217:6113]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:32.242418Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:32.242477Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8219:6115] 2025-06-24T21:07:32.242534Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8219:6115] 2025-06-24T21:07:32.242779Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8220:6116] 2025-06-24T21:07:32.242876Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8219:6115], server id = [2:8220:6116], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:32.242945Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8220:6116], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:32.243003Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:32.243093Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:32.243196Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8217:6113], StatRequests.size() = 1 2025-06-24T21:07:32.350385Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDUyNTVhMGItODBmNDQ0ZDEtZGUxMzdiNTUtMzM0OTc4N2I=, TxId: 2025-06-24T21:07:32.350499Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDUyNTVhMGItODBmNDQ0ZDEtZGUxMzdiNTUtMzM0OTc4N2I=, TxId: 2025-06-24T21:07:32.351068Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:32.364249Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:32.364306Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:32.462154Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:32.462244Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:32.526577Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8219:6115], schemeshard count = 1 2025-06-24T21:07:34.310188Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:34.310261Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:07:34.310299Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:34.310342Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:34.313520Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:34.330019Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:34.330617Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:34.330707Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:34.331664Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:34.344852Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:34.345057Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:34.345784Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8334:6175], server id = [2:8338:6179], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:34.346246Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8334:6175], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:34.347758Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8335:6176], server id = [2:8339:6180], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:34.347832Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8335:6176], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:34.348415Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8336:6177], server id = [2:8340:6181], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:34.348472Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8336:6177], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:34.349343Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8337:6178], server id = [2:8341:6182], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:34.349403Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8337:6178], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:34.354196Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:34.354811Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8334:6175], server id = [2:8338:6179], tablet id = 72075186224037899 2025-06-24T21:07:34.354846Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:34.355085Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:34.355520Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8335:6176], server id = [2:8339:6180], tablet id = 72075186224037900 2025-06-24T21:07:34.355544Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:34.355962Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:34.356231Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8336:6177], server id = [2:8340:6181], tablet id = 72075186224037901 2025-06-24T21:07:34.356251Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:34.356574Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:34.356620Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:34.356761Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:34.356906Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:34.357202Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:34.359207Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8337:6178], server id = [2:8341:6182], tablet id = 72075186224037902 2025-06-24T21:07:34.359232Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:34.359663Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:34.386642Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8370:6207]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:34.386880Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:34.386913Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8370:6207], StatRequests.size() = 1 2025-06-24T21:07:34.507650Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=M2FmYjJkOTEtYjZiYmE0NmQtZGExOTgwZmUtYzU0ZTY4YmI=, TxId: 2025-06-24T21:07:34.507715Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=M2FmYjJkOTEtYjZiYmE0NmQtZGExOTgwZmUtYzU0ZTY4YmI=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-24T21:07:34.508352Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:34.509582Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];ev=NActors::IEventHandle;fline=columnshard_impl.cpp:865;event=tablet_die; 2025-06-24T21:07:34.551881Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:34.551938Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:34.607165Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:8392:6218];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=18; 2025-06-24T21:07:34.837055Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8504:6313]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:34.837355Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:34.837409Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:34.839796Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:34.839853Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:34.839899Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:34.843120Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> KqpUniqueIndex::ReplaceFkDuplicate [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> KqpIndexes::SelectFromAsyncIndexedTable >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAggregate [GOOD] >> KqpVectorIndexes::CoveredVectorIndexWithFollowers+StaleRO >> AnalyzeColumnshard::AnalyzeRebootSaInAggregate [GOOD] >> KqpIndexes::InnerJoinWithNonIndexWherePredicate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::ReplaceFkDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 3473, MsgBus: 6226 2025-06-24T21:07:24.316109Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625499073078640:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:24.316171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004633/r3tmp/tmpBVzO0k/pdisk_1.dat 2025-06-24T21:07:24.586905Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:24.588224Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625499073078617:2079] 1750799244313093 != 1750799244313096 TServer::EnableGrpc on GrpcPort 3473, node 1 2025-06-24T21:07:24.682363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:24.682479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:24.683983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:24.726409Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:24.726444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:24.726478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:24.726624Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6226 TClient is connected to server localhost:6226 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:25.303293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:25.322941Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:25.338388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.494132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.616034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.664486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:26.640468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625507663014865:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:26.640563Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.034931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.058707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.079857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.100741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.125134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.152479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.215956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.300789Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625511957982827:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.300848Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.300895Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625511957982832:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.304057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:27.311718Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625511957982834:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:27.409806Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625511957982887:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:28.197911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:29.316477Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625499073078640:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:29.316529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 30480, MsgBus: 15962 2025-06-24T21:07:30.044877Z node 2 :METADATA_PROVIDE ... r.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:30.164560Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:30.172366Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:30.172428Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:30.174101Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15962 TClient is connected to server localhost:15962 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:30.462674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:30.476829Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:30.545843Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:30.655536Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:30.701005Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:31.050666Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:32.316576Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625530391361349:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:32.316654Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:32.355662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:32.377616Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:32.399131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:32.425270Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:32.446860Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:32.471577Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:32.496722Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:32.573472Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625530391362005:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:32.573545Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625530391362010:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:32.573558Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:32.576496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:32.584955Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625530391362012:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:32.665975Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625530391362063:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:33.369238Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:34.695138Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw90ke1hbk378myzn1mqdz, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmVjZGUyN2YtN2RjMWVmMDYtMzYyYzI4MDgtOWRmNjdkYzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:07:34.703465Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YmVjZGUyN2YtN2RjMWVmMDYtMzYyYzI4MDgtOWRmNjdkYzQ=, ActorId: [2:7519625534686330391:2529], ActorState: ExecuteState, TraceId: 01jyhw90ke1hbk378myzn1mqdz, Create QueryResponse for error on request, msg: 2025-06-24T21:07:35.045250Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625521801425126:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:35.045297Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:35.269500Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw915n0w0xjg95rvv8jcmw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmVjZGUyN2YtN2RjMWVmMDYtMzYyYzI4MDgtOWRmNjdkYzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:07:35.269756Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YmVjZGUyN2YtN2RjMWVmMDYtMzYyYzI4MDgtOWRmNjdkYzQ=, ActorId: [2:7519625534686330391:2529], ActorState: ExecuteState, TraceId: 01jyhw915n0w0xjg95rvv8jcmw, Create QueryResponse for error on request, msg: >> KqpVectorIndexes::VectorIndexIsNotUpdatable >> AnalyzeColumnshard::AnalyzeSameOperationId [GOOD] >> AnalyzeColumnshard::AnalyzeDeadline [GOOD] >> KqpUniqueIndex::UpdateOnHidenChanges+DataColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAggregate [GOOD] Test command err: 2025-06-24T21:05:00.052918Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:00.053437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:00.053543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043c5/r3tmp/tmpNY5EGb/pdisk_1.dat 2025-06-24T21:05:00.490139Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24445, node 1 2025-06-24T21:05:00.787282Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:00.787354Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:00.787392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:00.788031Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:00.792019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:00.897988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:00.898142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:00.915092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3118 2025-06-24T21:05:01.574349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:05.067186Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:05.111692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:05.111828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:05.166041Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:05.168515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:05.401928Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:05.427034Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.427755Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.428467Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.428648Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.428912Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.429008Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.429093Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.429195Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.429295Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.624094Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:05.624222Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:05.637666Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:05.813533Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:05.869901Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:05.870016Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:05.910296Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:05.912299Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:05.912577Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:05.912661Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:05.912732Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:05.912809Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:05.912867Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:05.912932Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:05.914199Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:05.984013Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:05.984132Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2557], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:05.998324Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2568] 2025-06-24T21:05:06.005594Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1825:2579] 2025-06-24T21:05:06.007212Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1825:2579], schemeshard id = 72075186224037897 2025-06-24T21:05:06.018431Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:06.042537Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:06.042615Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:06.042706Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:06.059214Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:06.067723Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:06.067902Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:06.268106Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:06.438245Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:06.579621Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:07.275837Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:07.553312Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2141:3021], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:07.553488Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:07.572314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:07.698313Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2211:2786];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:07.698575Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2211:2786];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:07.698909Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2211:2786];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:07.699082Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2211:2786];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:07.699269Z node 2 :TX_COLUMNSHARD WARN: ... 37894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:07:33.794957Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-24T21:07:33.795024Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 1 2025-06-24T21:07:33.795084Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 1 2025-06-24T21:07:33.795129Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:07:33.795257Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:33.795852Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:07:33.796348Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:33.796401Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:33.796488Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:07:33.797390Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:33.797446Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:33.798724Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:33.860394Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:33.860581Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:07:33.861092Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7492:5469], server id = [2:7493:5470], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:33.861192Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7492:5469], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:33.864324Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:33.864427Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:33.864557Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:33.864702Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:33.864899Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:33.867249Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7492:5469], server id = [2:7493:5470], tablet id = 72075186224037899 2025-06-24T21:07:33.867287Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:33.867721Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:33.897138Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7513:5489]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:33.897300Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:33.897354Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7513:5489], StatRequests.size() = 1 2025-06-24T21:07:33.998738Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODQ0MWNkN2ItZmUzN2M0MS0yOWRkNTI0NC0yZmQ2MjAyOQ==, TxId: 2025-06-24T21:07:33.998822Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODQ0MWNkN2ItZmUzN2M0MS0yOWRkNTI0NC0yZmQ2MjAyOQ==, TxId: 2025-06-24T21:07:33.999547Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:34.011812Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7528:5495] 2025-06-24T21:07:34.012009Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7528:5495], schemeshard id = 72075186224037897 2025-06-24T21:07:34.012126Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7445:5438], server id = [2:7529:5496], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:34.012162Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7529:5496] 2025-06-24T21:07:34.012267Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7529:5496], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-24T21:07:34.024916Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:34.024969Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:34.123640Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7540:5499] 2025-06-24T21:07:34.124560Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3039:3296] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-24T21:07:34.124630Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3039:3296] 2025-06-24T21:07:34.124700Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-24T21:07:34.629969Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-24T21:07:34.630051Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:35.415674Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:35.415763Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:35.415817Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:07:36.706683Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:36.706817Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:36.706862Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:36.707472Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:36.720245Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:36.720555Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:36.720613Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:36.720956Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:36.733909Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:36.734138Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-24T21:07:36.734660Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7612:5542], server id = [2:7613:5543], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:36.734745Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7612:5542], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:36.735961Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:36.736071Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:36.736223Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:36.736368Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:36.736717Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:36.739248Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7612:5542], server id = [2:7613:5543], tablet id = 72075186224037899 2025-06-24T21:07:36.739315Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:36.739889Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:36.773075Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTgwNGMxNWEtMzFlYjIzNzItNzgyOWExOGYtMTM4N2E4ZTY=, TxId: 2025-06-24T21:07:36.773184Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTgwNGMxNWEtMzFlYjIzNzItNzgyOWExOGYtMTM4N2E4ZTY=, TxId: 2025-06-24T21:07:36.773840Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:36.807095Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:36.807204Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3039:3296] >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave [GOOD] >> KqpIndexes::CheckUpsertNonEquatableType+NotNull [GOOD] >> KqpIndexes::CheckUpsertNonEquatableType-NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaInAggregate [GOOD] Test command err: 2025-06-24T21:04:59.383678Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:04:59.384081Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:04:59.384269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043cb/r3tmp/tmpuQcNa4/pdisk_1.dat 2025-06-24T21:04:59.785352Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28672, node 1 2025-06-24T21:05:00.110202Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:00.110300Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:00.110358Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:00.110621Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:00.113694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:00.238139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:00.238331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:00.254239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27848 2025-06-24T21:05:00.883032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:04.462930Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:04.513059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:04.513244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:04.581855Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:04.592244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:04.815332Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:04.850803Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.851573Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.852109Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.852242Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.852484Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.852572Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.852654Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.852735Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.852813Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.028635Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:05.028747Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:05.042003Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:05.194379Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:05.242805Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:05.242927Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:05.277152Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:05.278381Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:05.278660Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:05.278731Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:05.278783Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:05.278859Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:05.278913Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:05.278967Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:05.280224Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:05.321042Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:05.321163Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:05.327415Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:05.330586Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:05.330854Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:05.339588Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:05.367774Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:05.367842Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:05.367917Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:05.383588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:05.392140Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:05.392312Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:05.586861Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:05.816023Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:05.865452Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:06.388409Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:06.658171Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.658314Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.683185Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:07.049639Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2278:2825];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:07.049913Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2278:2825];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:07.050258Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2278:2825];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:07.050448Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2278:2825];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:07.050595Z node 2 :TX_COLUMNSHARD WARN: ... 5186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:36.035630Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:07:37.022254Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:37.022420Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:37.022489Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:37.023221Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:37.037202Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:37.037806Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:37.037899Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:37.038482Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:37.052220Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:37.052380Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-24T21:07:37.053248Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10535:7835], server id = [2:10540:7840], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:37.053366Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10535:7835], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.054663Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10536:7836], server id = [2:10541:7841], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:37.054733Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10536:7836], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.055127Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10537:7837], server id = [2:10542:7842], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:37.055211Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10537:7837], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.055670Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10538:7838], server id = [2:10543:7843], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:37.055723Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10538:7838], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.056254Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10539:7839], server id = [2:10545:7845], tablet id = 72075186224037903, status = OK 2025-06-24T21:07:37.056308Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10539:7839], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.057062Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:37.058389Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10535:7835], server id = [2:10540:7840], tablet id = 72075186224037899 2025-06-24T21:07:37.058432Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.058892Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:37.060006Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10536:7836], server id = [2:10541:7841], tablet id = 72075186224037900 2025-06-24T21:07:37.060037Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.060096Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:37.060385Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10548:7848], server id = [2:10551:7851], tablet id = 72075186224037904, status = OK 2025-06-24T21:07:37.060449Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10548:7848], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.061280Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:37.061611Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037903 2025-06-24T21:07:37.062162Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10537:7837], server id = [2:10542:7842], tablet id = 72075186224037901 2025-06-24T21:07:37.062190Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.062437Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10552:7852], server id = [2:10554:7854], tablet id = 72075186224037905, status = OK 2025-06-24T21:07:37.062495Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10552:7852], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.062657Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10538:7838], server id = [2:10543:7843], tablet id = 72075186224037902 2025-06-24T21:07:37.062683Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.063301Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10553:7853], server id = [2:10555:7855], tablet id = 72075186224037906, status = OK 2025-06-24T21:07:37.063363Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10553:7853], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.064162Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10539:7839], server id = [2:10545:7845], tablet id = 72075186224037903 2025-06-24T21:07:37.064192Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.064310Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037904 2025-06-24T21:07:37.064925Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10557:7857], server id = [2:10559:7859], tablet id = 72075186224037907, status = OK 2025-06-24T21:07:37.064981Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10557:7857], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.065165Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10558:7858], server id = [2:10560:7860], tablet id = 72075186224037908, status = OK 2025-06-24T21:07:37.065211Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10558:7858], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.065278Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10548:7848], server id = [2:10551:7851], tablet id = 72075186224037904 2025-06-24T21:07:37.065303Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.066414Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-24T21:07:37.066833Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-24T21:07:37.067034Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10552:7852], server id = [2:10554:7854], tablet id = 72075186224037905 2025-06-24T21:07:37.067059Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.067412Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-24T21:07:37.067633Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10553:7853], server id = [2:10555:7855], tablet id = 72075186224037906 2025-06-24T21:07:37.067663Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.067747Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-24T21:07:37.067787Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:37.067997Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:37.068184Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:37.068451Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:37.071356Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10557:7857], server id = [2:10559:7859], tablet id = 72075186224037907 2025-06-24T21:07:37.071391Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.071819Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10558:7858], server id = [2:10560:7860], tablet id = 72075186224037908 2025-06-24T21:07:37.071849Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.072165Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:37.094955Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZDY2Y2NmNGQtYzg3YTM3NS1mOGEyYTZjNC03YmM0M2M3Yg==, TxId: 2025-06-24T21:07:37.095033Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZDY2Y2NmNGQtYzg3YTM3NS1mOGEyYTZjNC03YmM0M2M3Yg==, TxId: 2025-06-24T21:07:37.095674Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:37.110070Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:37.110161Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:5788:3832] >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict-UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart-UseSink >> TraverseColumnShard::TraverseColumnTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeSameOperationId [GOOD] Test command err: 2025-06-24T21:05:08.191446Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:08.191828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:08.192001Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00438b/r3tmp/tmpgLWCnW/pdisk_1.dat 2025-06-24T21:05:08.574604Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17451, node 1 2025-06-24T21:05:08.778513Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:08.778570Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:08.778618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:08.778829Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:08.784603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:08.900608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:08.900768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:08.914577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1643 2025-06-24T21:05:09.469171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:12.740962Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:12.779361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:12.779483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:12.839415Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:12.841718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:13.026482Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:13.062059Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.062692Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.063295Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.063456Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.063601Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.063843Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.063944Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.064072Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.064156Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.239423Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:13.239555Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:13.252791Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:13.410295Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:13.466452Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:13.466578Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:13.500855Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:13.502550Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:13.502792Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:13.502867Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:13.502931Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:13.502996Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:13.503068Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:13.503123Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:13.503825Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:13.541126Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:13.541272Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:13.547705Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:13.550911Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:13.551228Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:13.559211Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:13.583846Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:13.583933Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:13.584031Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:13.599310Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:13.607713Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:13.607868Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:13.793109Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:13.953705Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:13.999475Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:14.561874Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:14.827394Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.827509Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.846088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:14.952988Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:14.953247Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:14.953520Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:14.953673Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:14.953771Z node 2 :TX_COLUMNSHARD WARN: ... d: [2:7198:5304], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:07:29.326678Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7291:5350] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:29.379124Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7320:5365]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:29.379345Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:29.379405Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7322:5367] 2025-06-24T21:07:29.379462Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7322:5367] 2025-06-24T21:07:29.379674Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7323:5368] 2025-06-24T21:07:29.379761Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7323:5368], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:29.379811Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:29.379914Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7322:5367], server id = [2:7323:5368], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:29.380002Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:29.380071Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7320:5365], StatRequests.size() = 1 2025-06-24T21:07:29.491793Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=N2M3OWMyYjktYTA5YzA1YzEtNjVhYmI5YWEtMTM3N2Q3ZWI=, TxId: 2025-06-24T21:07:29.491855Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=N2M3OWMyYjktYTA5YzA1YzEtNjVhYmI5YWEtMTM3N2Q3ZWI=, TxId: 2025-06-24T21:07:29.492233Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:29.505716Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:29.505789Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:29.549319Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:29.549391Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:29.602634Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7322:5367], schemeshard count = 1 2025-06-24T21:07:30.562875Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:30.562957Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:30.565326Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:30.580436Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:30.580822Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:30.580868Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-24T21:07:30.593584Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:30.604401Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. ... blocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR cookie 0 ... waiting for TEvAnalyzeTableResponse (done) ... unblocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR 2025-06-24T21:07:30.605400Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-24T21:07:30.605524Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-24T21:07:30.606041Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3044:3297] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-24T21:07:30.606113Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3044:3297] 2025-06-24T21:07:30.629949Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-24T21:07:30.630032Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-24T21:07:31.798359Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:31.798515Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:31.798577Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:31.799118Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:31.812191Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:31.812523Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:31.812589Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:31.813338Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:31.826153Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:31.826331Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:31.826723Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7445:5437], server id = [2:7446:5438], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:31.826817Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7445:5437], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:31.829826Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:31.829921Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:31.830216Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:31.830398Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:31.830616Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7445:5437], server id = [2:7446:5438], tablet id = 72075186224037899 2025-06-24T21:07:31.830661Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:31.830852Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:31.833676Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:31.864697Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7466:5457]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:31.864849Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:31.864907Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7466:5457], StatRequests.size() = 1 2025-06-24T21:07:31.951047Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGY2ZTVlNDQtMzJkYjI2ZDktZGU2YzFhYTMtOWIyNjhkMTQ=, TxId: 2025-06-24T21:07:31.951113Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGY2ZTVlNDQtMzJkYjI2ZDktZGU2YzFhYTMtOWIyNjhkMTQ=, TxId: 2025-06-24T21:07:31.951548Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:31.964656Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:31.964721Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3044:3297] 2025-06-24T21:07:32.493136Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-24T21:07:32.493235Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:34.281816Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:07:34.281988Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:34.303596Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:36.510504Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:36.510609Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:07:37.602969Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:07:37.624827Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-24T21:07:37.624920Z node 2 :STATISTICS DEBUG: service_impl.cpp:1021: Skip TEvStatisticsRequestTimeout >> KqpMultishardIndex::SecondaryIndexSelectNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeDeadline [GOOD] Test command err: 2025-06-24T21:05:09.352956Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:09.353203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:09.353334Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004383/r3tmp/tmp1gCgHo/pdisk_1.dat 2025-06-24T21:05:09.726604Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63419, node 1 2025-06-24T21:05:09.929349Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:09.929406Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:09.929460Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:09.929713Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:09.932248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:10.042201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:10.042363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:10.060740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20523 2025-06-24T21:05:10.568637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:13.648815Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:13.682447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:13.682558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:13.741977Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:13.744012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:13.919242Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:13.954367Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.955003Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.955606Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.955787Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.955904Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.956139Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.956254Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.956383Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:13.956471Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.132562Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:14.132667Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:14.145759Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:14.289468Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:14.332264Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:14.332338Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:14.359906Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:14.361267Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:14.361528Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:14.361596Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:14.361657Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:14.361726Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:14.361787Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:14.361846Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:14.362843Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:14.400540Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:14.400643Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:14.407191Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:14.410717Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:14.411035Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:14.419743Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:14.446922Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:14.447018Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:14.447128Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:14.466813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:14.479823Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:14.479990Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:14.660481Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:14.810405Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:14.856208Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:15.355633Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:15.641035Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:15.641198Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:15.661716Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:15.773633Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:15.773927Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:15.774208Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:15.774339Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:15.774453Z node 2 :TX_COLUMNSHARD WARN: ... .698345Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7988: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:07:28.698374Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-24T21:07:30.214471Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-24T21:07:30.214559Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 222.000000s, at schemeshard: 72075186224037897 2025-06-24T21:07:30.214796Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 51 2025-06-24T21:07:30.227555Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:07:31.436075Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:31.436163Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:31.436200Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:07:31.436243Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-24T21:07:31.436298Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:31.436688Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:31.439946Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:07:31.443817Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7185:5297], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:31.443917Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7195:5302], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:31.444001Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:31.456934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:31.504690Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7199:5305], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:07:31.716794Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7295:5351] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:31.774470Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7324:5366]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:31.774644Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:31.774701Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7326:5368] 2025-06-24T21:07:31.774755Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7326:5368] 2025-06-24T21:07:31.774993Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7327:5369] 2025-06-24T21:07:31.775076Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7327:5369], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:31.775128Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:31.775237Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7326:5368], server id = [2:7327:5369], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:31.775287Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:31.775361Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7324:5366], StatRequests.size() = 1 2025-06-24T21:07:31.877291Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGU0NTdiNjktNmE1YWJmM2QtYWM0MzY4NGUtYzk0NzBkN2M=, TxId: 2025-06-24T21:07:31.877366Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGU0NTdiNjktNmE1YWJmM2QtYWM0MzY4NGUtYzk0NzBkN2M=, TxId: 2025-06-24T21:07:31.877962Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:31.891234Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:31.891311Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:31.933861Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:31.933937Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:32.019450Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7326:5368], schemeshard count = 1 2025-06-24T21:07:33.015117Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:33.015219Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:33.017215Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:33.032017Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:33.032455Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:33.032502Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-24T21:07:33.045148Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:33.056003Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. ... blocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR cookie 0 ... waiting for TEvAnalyzeTableResponse (done) 2025-06-24T21:07:33.489875Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:33.489959Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:33.489997Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:07:33.490044Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:33.490113Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:33.493573Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:33.507623Z node 2 :STATISTICS ERROR: tx_analyze_deadline.cpp:28: [72075186224037894] Delete long analyze operation, OperationId=operationId 2025-06-24T21:07:33.787235Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:07:33.787382Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:35.628049Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-24T21:07:35.628138Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7957: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:07:35.628192Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7988: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:07:35.628231Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-24T21:07:37.762237Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-24T21:07:37.762305Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 234.000000s, at schemeshard: 72075186224037897 2025-06-24T21:07:37.762427Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 51 2025-06-24T21:07:37.897817Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:37.898087Z node 2 :STATISTICS DEBUG: tx_analyze_deadline.cpp:46: [72075186224037894] TTxAnalyzeDeadline::Complete. Send TEvAnalyzeResponse for deleted operation, OperationId=operationId, ActorId=[1:3046:3298] 2025-06-24T21:07:37.898158Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:07:37.898527Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:37.898588Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:37.899281Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 >> KqpIndexes::MultipleSecondaryIndexWithSameComulns+UseSink [GOOD] >> KqpIndexes::MultipleSecondaryIndexWithSameComulns-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave [GOOD] Test command err: 2025-06-24T21:05:16.287814Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:16.288083Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:16.288205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004378/r3tmp/tmpo8hIuY/pdisk_1.dat 2025-06-24T21:05:16.600475Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24241, node 1 2025-06-24T21:05:16.805849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:16.805910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:16.805978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:16.806275Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:16.809181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:16.919911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:16.920045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:16.933414Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25101 2025-06-24T21:05:17.477340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:20.310659Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:20.342389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:20.342492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:20.401446Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:20.403398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:20.575926Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:20.611085Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:20.611612Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:20.612095Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:20.612226Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:20.612306Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:20.612502Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:20.612583Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:20.612692Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:20.612754Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:20.786029Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:20.786147Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:20.799723Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:20.933468Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:20.976708Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:20.976788Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:21.009892Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:21.011292Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:21.011535Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:21.011599Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:21.011650Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:21.011715Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:21.011792Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:21.011849Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:21.012820Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:21.048497Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:21.048635Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:21.054734Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:21.057690Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:21.057936Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:21.065243Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:21.091069Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:21.091135Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:21.091237Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:21.105655Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:21.112993Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:21.113160Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:21.295849Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:21.448139Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:21.504203Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:21.995510Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:22.361798Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:22.361966Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:22.380696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:22.552662Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:22.552884Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:22.553145Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:22.553271Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:22.553380Z node 2 :TX_COLUMNSHARD WARN: ... d = [2:8230:6120], server id = [2:8231:6121], tablet id = 72075186224037894 2025-06-24T21:07:37.527045Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8365:6196] 2025-06-24T21:07:37.527106Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8365:6196] 2025-06-24T21:07:37.527289Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:8366:6197], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:07:37.589797Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:07:37.589924Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:07:37.590533Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:07:37.591536Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:07:37.591906Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-24T21:07:37.591964Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-24T21:07:37.592028Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-24T21:07:37.592078Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-24T21:07:37.592117Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1750799257476097 2025-06-24T21:07:37.592163Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-24T21:07:37.592205Z node 2 :STATISTICS DEBUG: tx_init.cpp:84: [72075186224037894] Loaded global traversal round: 2 2025-06-24T21:07:37.592309Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-24T21:07:37.592378Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:07:37.592480Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-24T21:07:37.592564Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:07:37.592644Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:07:37.592723Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:07:37.592897Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:37.593949Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:07:37.594826Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:37.594915Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:37.595086Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:07:37.596615Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:37.596715Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:37.598648Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:37.673271Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:37.673440Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:07:37.674248Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8415:6228], server id = [2:8419:6232], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:37.674382Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8415:6228], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.674681Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8416:6229], server id = [2:8420:6233], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:37.674733Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8416:6229], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.676347Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8417:6230], server id = [2:8421:6234], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:37.676435Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8417:6230], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.676606Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8418:6231], server id = [2:8422:6235], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:37.676652Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8418:6231], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.678314Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:37.678773Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:37.678899Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8415:6228], server id = [2:8419:6232], tablet id = 72075186224037899 2025-06-24T21:07:37.678937Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.679301Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:37.679471Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8416:6229], server id = [2:8420:6233], tablet id = 72075186224037900 2025-06-24T21:07:37.679495Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.679634Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:37.679683Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:37.679897Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:37.680128Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:37.680389Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:37.683390Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8418:6231], server id = [2:8422:6235], tablet id = 72075186224037902 2025-06-24T21:07:37.683427Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.683863Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8417:6230], server id = [2:8421:6234], tablet id = 72075186224037901 2025-06-24T21:07:37.683890Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.684185Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:37.725210Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8443:6256]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:37.725575Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:37.725634Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8443:6256], StatRequests.size() = 1 2025-06-24T21:07:37.828910Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8454:6259] 2025-06-24T21:07:37.829021Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8365:6196], server id = [2:8454:6259], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:37.829123Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8454:6259], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-24T21:07:37.829235Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8455:6260] 2025-06-24T21:07:37.829285Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8455:6260], schemeshard id = 72075186224037897 2025-06-24T21:07:37.844493Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=M2FkMDI2YTUtZjQ0NmQwYTQtY2U3MTk0NzktNjEzMzgy, TxId: 2025-06-24T21:07:37.844588Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=M2FkMDI2YTUtZjQ0NmQwYTQtY2U3MTk0NzktNjEzMzgy, TxId: 2025-06-24T21:07:37.845207Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:37.859021Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:37.859106Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:37.903053Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8462:6266]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:37.903543Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:37.903616Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:37.907250Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:37.907336Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:37.907406Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:37.912682Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> KqpIndexMetadata::HandleNotReadyIndex >> DataShardSnapshots::LockedWriteCleanupOnSplit-UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable+UseSink >> KqpUniqueIndex::InsertNullInComplexFkDuplicate [GOOD] >> KqpUniqueIndex::InsertFkAlreadyExist ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTable [GOOD] Test command err: 2025-06-24T21:05:06.461641Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:06.461981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:06.462135Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004399/r3tmp/tmpbrHiOv/pdisk_1.dat 2025-06-24T21:05:06.856289Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5751, node 1 2025-06-24T21:05:07.089987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:07.090060Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:07.090116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:07.090350Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:07.097641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:07.226571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:07.226738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:07.240785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6688 2025-06-24T21:05:07.828582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:11.204786Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:11.246376Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:11.246508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:11.307197Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:11.309272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:11.506737Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:11.542176Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.542873Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.543542Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.543754Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.543877Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.544115Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.544217Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.544332Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.544440Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:11.732739Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:11.732856Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:11.746165Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:11.897283Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:11.949109Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:11.949214Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:11.983818Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:11.985353Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:11.985593Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:11.985666Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:11.985775Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:11.985836Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:11.985890Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:11.985944Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:11.987051Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:12.024397Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:12.024564Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:12.031183Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:12.034450Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:12.034739Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:12.042063Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:12.069727Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:12.069798Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:12.069874Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:12.084948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:12.093174Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:12.093347Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:12.281991Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:12.430383Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:12.476172Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:13.023240Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:13.293298Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:13.293456Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:13.317777Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:13.507078Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:13.507378Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:13.507692Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:13.507847Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:13.507984Z node 2 :TX_COLUMNSHARD WARN: l ... er/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:36.114474Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:8106:6050], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:07:36.351341Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8180:6095] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:36.418323Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8202:6109]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:36.418678Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:36.418783Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8204:6111] 2025-06-24T21:07:36.418863Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8204:6111] 2025-06-24T21:07:36.419341Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8205:6112] 2025-06-24T21:07:36.419510Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8204:6111], server id = [2:8205:6112], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:36.419646Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8205:6112], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:36.419734Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:36.419910Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:36.420023Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8202:6109], StatRequests.size() = 1 2025-06-24T21:07:36.561209Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NWQ1YmE2MDgtMWFlOTYwZjctY2I1NDVkOGQtMjJlMjhhY2U=, TxId: 2025-06-24T21:07:36.561275Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NWQ1YmE2MDgtMWFlOTYwZjctY2I1NDVkOGQtMjJlMjhhY2U=, TxId: 2025-06-24T21:07:36.561686Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:36.575767Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:36.575832Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:36.663903Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:36.663991Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:36.750007Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8204:6111], schemeshard count = 1 2025-06-24T21:07:38.449763Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:38.449837Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:07:38.449883Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:38.449956Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:38.454555Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:38.470995Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:38.471675Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:38.471755Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:38.472733Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:38.486728Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:38.486975Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:38.487885Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8319:6171], server id = [2:8323:6175], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:38.488306Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8319:6171], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:38.488685Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8320:6172], server id = [2:8324:6176], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:38.488751Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8320:6172], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:38.488900Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8321:6173], server id = [2:8325:6177], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:38.488951Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8321:6173], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:38.491287Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8322:6174], server id = [2:8326:6178], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:38.491361Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8322:6174], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:38.496873Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:38.497877Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8319:6171], server id = [2:8323:6175], tablet id = 72075186224037899 2025-06-24T21:07:38.497954Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:38.498570Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:38.499070Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8320:6172], server id = [2:8324:6176], tablet id = 72075186224037900 2025-06-24T21:07:38.499107Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:38.499878Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:38.500265Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8321:6173], server id = [2:8325:6177], tablet id = 72075186224037901 2025-06-24T21:07:38.500294Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:38.500385Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:38.500431Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:38.500643Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:38.500854Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:38.501244Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:38.503620Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8322:6174], server id = [2:8326:6178], tablet id = 72075186224037902 2025-06-24T21:07:38.503656Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:38.504297Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:38.538252Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8355:6203]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:38.538469Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:38.538531Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8355:6203], StatRequests.size() = 1 2025-06-24T21:07:38.684630Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTM5NzUyOGEtZGYwYzE0OTMtZjRhNTRiNy02YzU0YmNmMA==, TxId: 2025-06-24T21:07:38.684686Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTM5NzUyOGEtZGYwYzE0OTMtZjRhNTRiNy02YzU0YmNmMA==, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-24T21:07:38.685146Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8368:6209]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:38.685364Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:38.685752Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:38.685794Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:38.688715Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:38.688781Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:38.688826Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:38.692480Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts-UseSink >> DataShardSnapshots::ReadIteratorLocalSnapshotThenRestart [GOOD] >> DataShardSnapshots::ReadIteratorLocalSnapshotThenWrite >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead+UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead-UseSink >> KqpIndexes::DoUpsertWithoutIndexUpdate-UniqIndex-UseSink >> KqpUniqueIndex::UpsertImplicitNullInComplexFk >> KqpUniqueIndex::UpdateFkPkOverlap [GOOD] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeSave [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertNullInComplexFkDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 6200, MsgBus: 1970 2025-06-24T21:07:24.316180Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625497272516232:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:24.316267Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00463d/r3tmp/tmp8nEBJW/pdisk_1.dat 2025-06-24T21:07:24.594515Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:24.594907Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625497272516209:2079] 1750799244313104 != 1750799244313107 TServer::EnableGrpc on GrpcPort 6200, node 1 2025-06-24T21:07:24.677436Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:24.677512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:24.679169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:24.726392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:24.726416Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:24.726425Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:24.726608Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1970 TClient is connected to server localhost:1970 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:25.325373Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:25.330489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:25.346145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.462702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.601072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.660222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:26.592592Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625505862452441:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:26.592681Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.035024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.059082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.081868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.103606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.166988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.190321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.212486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.287730Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625510157420397:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.287817Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.287870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625510157420402:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.291187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:27.300318Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625510157420404:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:27.385345Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625510157420457:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:28.197472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:29.316128Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625497272516232:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:29.316187Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4929, MsgBus: 17836 2025-06-24T21:07:29.953439Z node 2 :METADATA_PROVIDER ... , LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:33.294197Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... Trying to start YDB, gRPC: 10900, MsgBus: 11911 2025-06-24T21:07:35.424632Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519625544830695401:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:35.424733Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00463d/r3tmp/tmptSjagu/pdisk_1.dat 2025-06-24T21:07:35.513782Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:35.514529Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519625544830695381:2079] 1750799255424005 != 1750799255424008 TServer::EnableGrpc on GrpcPort 10900, node 3 2025-06-24T21:07:35.552852Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:35.552921Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:35.554303Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:35.562977Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:35.563000Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:35.563009Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:35.563136Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11911 TClient is connected to server localhost:11911 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:36.026243Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:36.041320Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:36.092775Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:36.232437Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:36.308447Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:36.433019Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:38.016194Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625557715598909:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:38.016294Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:38.058327Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.087187Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.114354Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.142513Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.170936Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.201081Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.228070Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.278186Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625557715599571:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:38.278279Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:38.278359Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625557715599576:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:38.281803Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:38.290440Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625557715599578:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:38.363750Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625557715599629:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:39.218991Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable-UseSimilarity >> DataShardSnapshots::VolatileSnapshotTimeout [GOOD] >> DataShardSnapshots::VolatileSnapshotTimeoutRefresh >> KqpVectorIndexes::OrderByCosineLevel1-Nullable-UseSimilarity |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateFkPkOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 32577, MsgBus: 6419 2025-06-24T21:07:24.316018Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625496513502746:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:24.316103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004634/r3tmp/tmpjxKOQe/pdisk_1.dat 2025-06-24T21:07:24.584474Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625496513502723:2079] 1750799244313110 != 1750799244313113 2025-06-24T21:07:24.593464Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32577, node 1 2025-06-24T21:07:24.654023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:24.654135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:24.656613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:24.726131Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:24.726154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:24.726163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:24.726393Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6419 TClient is connected to server localhost:6419 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:07:25.326549Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:25.344499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:25.376250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.488484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.611643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.659401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:26.595996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625505103438958:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:26.596091Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.034852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.060178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.084291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.106574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.125819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.149456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.171726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.254797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625509398406913:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.254874Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.254878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625509398406918:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.259734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:27.267364Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625509398406920:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:27.343291Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625509398406973:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:28.207836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:29.316500Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625496513502746:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:29.316550Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:29.639367Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0: ... teState, TraceId: 01jyhw8wwt19dgh63xrazn1pw0, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 19058, MsgBus: 20937 2025-06-24T21:07:32.900379Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625529993039173:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:32.900425Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004634/r3tmp/tmpeFjsOZ/pdisk_1.dat 2025-06-24T21:07:32.982292Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625529993039154:2079] 1750799252899799 != 1750799252899802 2025-06-24T21:07:32.983980Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19058, node 2 2025-06-24T21:07:33.017372Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:33.017397Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:33.017424Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:33.017546Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:33.030433Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:33.030517Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:33.031987Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20937 TClient is connected to server localhost:20937 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:33.393101Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:33.403357Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:33.448967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:33.566355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:33.633911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:33.905498Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:35.487525Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625542877942669:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:35.487635Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:35.535321Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.561795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.589217Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.618036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.647083Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.676495Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.707511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.761210Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625542877943325:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:35.761368Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:35.761582Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625542877943330:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:35.764844Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:35.773447Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625542877943332:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:35.849748Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625542877943383:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:36.628782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:37.900674Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625529993039173:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:37.900751Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> VectorIndexBuildTest::PrefixedDuplicates [GOOD] >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-true >> KqpUniqueIndex::UpdateFkSameValue [GOOD] >> KqpUniqueIndex::UpdateImplicitNullInComplexFk2 >> KqpIndexes::SelectFromAsyncIndexedTable [GOOD] >> KqpIndexes::SelectFromIndexesAndFreeSpaceLogicDoesntTimeout >> test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopWithDataWatermarks-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeSave [GOOD] Test command err: 2025-06-24T21:05:07.934104Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:07.934466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:07.934639Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004396/r3tmp/tmpbGrWX0/pdisk_1.dat 2025-06-24T21:05:08.313207Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27854, node 1 2025-06-24T21:05:08.539395Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:08.539454Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:08.539531Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:08.539774Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:08.542697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:08.655852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:08.656005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:08.669738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11552 2025-06-24T21:05:09.224495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:12.204445Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:12.246697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:12.246825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:12.309159Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:12.311764Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:12.506534Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:12.542134Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.542730Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.543368Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.543531Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.543642Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.543892Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.544008Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.544120Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.544204Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:12.723420Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:12.723539Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:12.737143Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:12.904689Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:12.952995Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:12.953102Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:12.989895Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:12.991379Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:12.991654Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:12.991733Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:12.991800Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:12.991867Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:12.991936Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:12.991997Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:12.993072Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:13.030285Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:13.030390Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:13.037668Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:13.041625Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:13.041915Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:13.050044Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:13.079073Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:13.079165Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:13.079243Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:13.094470Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:13.102816Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:13.102982Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:13.282650Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:13.443035Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:13.489107Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:13.993070Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:14.279449Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.279629Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:14.301213Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:14.419785Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:14.420083Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:14.420427Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:14.420608Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:14.420760Z node 2 :TX_COLUMNSHARD WARN: ... ICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 1 2025-06-24T21:07:37.747404Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 1 2025-06-24T21:07:37.747469Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:07:37.747633Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:37.748676Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:07:37.749194Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:07:37.749659Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:37.749757Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:37.750944Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:37.751018Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:37.752559Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:37.816796Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:37.817037Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:07:37.817679Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7500:5474], server id = [2:7501:5475], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:37.817839Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7500:5474], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:37.819249Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:37.819373Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:37.819640Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:37.819835Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:37.820144Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:37.823444Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7500:5474], server id = [2:7501:5475], tablet id = 72075186224037899 2025-06-24T21:07:37.823501Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:37.824228Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:37.866255Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7519:5493]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:37.866502Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:37.866562Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7519:5493], StatRequests.size() = 1 2025-06-24T21:07:37.994993Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MTFkMzQzNjEtZjc2NjFiMWQtOTc1ZWNjMWMtYmVjMjBkNjg=, TxId: 2025-06-24T21:07:37.995079Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MTFkMzQzNjEtZjc2NjFiMWQtOTc1ZWNjMWMtYmVjMjBkNjg=, TxId: 2025-06-24T21:07:37.995741Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:38.008667Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7534:5499] 2025-06-24T21:07:38.008924Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7534:5499], schemeshard id = 72075186224037897 2025-06-24T21:07:38.009037Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7453:5443], server id = [2:7535:5500], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:38.009140Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7535:5500] 2025-06-24T21:07:38.009266Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7535:5500], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-24T21:07:38.023051Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:38.023185Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:38.121354Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7539:5503] 2025-06-24T21:07:38.122189Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3045:3298] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-24T21:07:38.122257Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3045:3298] 2025-06-24T21:07:38.122334Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-24T21:07:38.637941Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-24T21:07:38.638053Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:38.649003Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-24T21:07:38.649095Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:39.416970Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:39.417080Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:39.417128Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:07:40.719059Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:40.719263Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:40.719330Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:40.720132Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:40.733970Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:40.734405Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:40.734486Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:40.734921Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:40.748952Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:40.749174Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-24T21:07:40.749721Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7618:5546], server id = [2:7619:5547], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:40.749856Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7618:5546], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:40.751204Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:40.751305Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:40.751506Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:40.751682Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:40.751962Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:40.754319Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7618:5546], server id = [2:7619:5547], tablet id = 72075186224037899 2025-06-24T21:07:40.754360Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:40.754788Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:40.775451Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MWQyODdmNWEtNWEyN2JmYzQtMTgwNDQyYjctNjFiYTI4NTc=, TxId: 2025-06-24T21:07:40.775536Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MWQyODdmNWEtNWEyN2JmYzQtMTgwNDQyYjctNjFiYTI4NTc=, TxId: 2025-06-24T21:07:40.776111Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:40.789284Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:40.789354Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3045:3298] >> test_sql_streaming.py::test[suites-ReadTwoTopics-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadWriteSameTopic-default.txt] >> KqpIndexes::IndexOr [GOOD] >> KqpIndexes::IndexTopSortPushDown >> test_sql_streaming.py::test[suites-GroupByHop-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopByStringKey-default.txt] >> TraverseColumnShard::TraverseColumnTableAggrStatNonLocalTablet [GOOD] >> AnalyzeColumnshard::Analyze [GOOD] >> KqpIndexes::UpdateDeletePlan+UseSink >> TExportToS3Tests::ShouldExcludeBackupTableFromStats [GOOD] >> TraverseColumnShard::TraverseColumnTableAggrStatUnavailableNode [GOOD] >> TExportToS3Tests::ShouldRestartOnScanErrors >> KqpVectorIndexes::VectorIndexIsNotUpdatable [GOOD] >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover-Nullable >> KqpUniqueIndex::ReplaceFkAlreadyExist >> KqpIndexes::CheckUpsertNonEquatableType-NotNull [GOOD] >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL >> KqpIndexes::SelectConcurentTX >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeReqDistribution [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableAggrStatNonLocalTablet [GOOD] Test command err: 2025-06-24T21:04:58.337120Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:04:58.337607Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:04:58.337698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043db/r3tmp/tmpifLTgL/pdisk_1.dat 2025-06-24T21:04:58.749024Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62664, node 1 2025-06-24T21:04:59.382421Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:59.382477Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:59.382506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:59.382876Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:59.394303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:59.513799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:59.513998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:59.535961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27797 2025-06-24T21:05:00.157035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:03.699974Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:03.749438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:03.749609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:03.802137Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:03.804979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:04.038523Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:04.063641Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.064338Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.064959Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.065119Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.065939Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.066045Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.066131Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.066211Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.066297Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.273512Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:04.273607Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:04.287225Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:04.467417Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:04.517026Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:04.517152Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:04.553778Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:04.555685Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:04.555956Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:04.556017Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:04.556088Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:04.556150Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:04.556213Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:04.556273Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:04.557513Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:04.600075Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:04.600214Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2557], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:04.609628Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2568] 2025-06-24T21:05:04.615045Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1825:2579] 2025-06-24T21:05:04.616132Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1825:2579], schemeshard id = 72075186224037897 2025-06-24T21:05:04.624678Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:04.647428Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:04.647503Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:04.647575Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:04.662917Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:04.671244Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:04.671417Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:04.879867Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:05.052837Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:05.172557Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:05.825799Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:06.112271Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2141:3021], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.112502Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.225973Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:06.428674Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2229:2797];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:06.428971Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2229:2797];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:06.429329Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2229:2797];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:06.429491Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2229:2797];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:06.429636Z node 2 :TX_COLUMNSHARD WARN: ... 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:35.848723Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:35.903545Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:35.903630Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:35.988299Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8263:6119], schemeshard count = 1 2025-06-24T21:07:38.290547Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:38.290622Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:07:38.290665Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:38.290710Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:38.294578Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:38.311249Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:38.311698Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:38.311761Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:38.312420Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:38.325942Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:38.326178Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:38.326888Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8380:6179], server id = [2:8384:6183], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:38.327300Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8380:6179], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:38.328734Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8381:6180], server id = [2:8385:6184], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:38.328810Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8381:6180], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:38.329410Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8382:6181], server id = [2:8386:6185], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:38.329472Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8382:6181], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:38.330230Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8383:6182], server id = [2:8387:6186], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:38.330295Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8383:6182], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:38.335237Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:38.336328Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8380:6179], server id = [2:8384:6183], tablet id = 72075186224037899 2025-06-24T21:07:38.336394Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:38.337106Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:38.337534Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8381:6180], server id = [2:8385:6184], tablet id = 72075186224037900 2025-06-24T21:07:38.337559Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:38.338071Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:38.338443Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8383:6182], server id = [2:8387:6186], tablet id = 72075186224037902 2025-06-24T21:07:38.338466Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:38.338521Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:38.338557Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:38.338753Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:38.339117Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8382:6181], server id = [2:8386:6185], tablet id = 72075186224037901 2025-06-24T21:07:38.339139Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:38.363000Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:38.363308Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:07:38.916497Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 3 2025-06-24T21:07:38.916591Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:41.925256Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:07:41.925524Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:42.143670Z node 2 :STATISTICS INFO: service_impl.cpp:416: Node 3 is unavailable 2025-06-24T21:07:42.143767Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:42.143972Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-24T21:07:42.144006Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:42.144085Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:42.144155Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:42.144618Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:42.158419Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:42.158623Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-24T21:07:42.159292Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8514:6252], server id = [2:8515:6253], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:42.159386Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8514:6252], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:42.160588Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:42.160677Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:42.160893Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:42.161058Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:42.161389Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:42.163654Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8514:6252], server id = [2:8515:6253], tablet id = 72075186224037900 2025-06-24T21:07:42.163691Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:42.164579Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:42.202648Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8533:6271]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:42.202899Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:42.202952Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8533:6271], StatRequests.size() = 1 2025-06-24T21:07:42.327099Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZDEwYWNlZWQtNzBkNjhhOGUtMTkzNjNmZmItYzRhZGM5YzI=, TxId: 2025-06-24T21:07:42.327191Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZDEwYWNlZWQtNzBkNjhhOGUtMTkzNjNmZmItYzRhZGM5YzI=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-24T21:07:42.327856Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8547:6277]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:42.328166Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:42.328222Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:42.328420Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:42.332013Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:42.332084Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:42.332179Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:42.337176Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 probe = 4 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::Analyze [GOOD] Test command err: 2025-06-24T21:05:21.647256Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:21.647582Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:21.647720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004366/r3tmp/tmpg7Yen0/pdisk_1.dat 2025-06-24T21:05:21.985483Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22908, node 1 2025-06-24T21:05:22.182317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:22.182385Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:22.182463Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:22.182707Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:22.185472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:22.294242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:22.294423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:22.307817Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62323 2025-06-24T21:05:22.805321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:25.636025Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:25.668716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:25.668833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:25.728048Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:25.730301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:25.915798Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:25.950703Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.951326Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.951919Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.952090Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.952209Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.952453Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.952553Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.952678Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.952766Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:26.127072Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:26.127197Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:26.140695Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:26.287420Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:26.327484Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:26.327593Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:26.354889Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:26.356059Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:26.356258Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:26.356312Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:26.356360Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:26.356420Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:26.356467Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:26.356519Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:26.357297Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:26.395706Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:26.395828Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:26.402061Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:26.404899Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:26.405124Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:26.412041Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:26.437002Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:26.437072Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:26.437167Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:26.451255Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:26.458221Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:26.458373Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:26.620671Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:26.774819Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:26.821153Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:27.322340Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:27.566698Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:27.566819Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:27.581950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:27.673337Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:27.673561Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:27.673816Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:27.673910Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:27.673998Z node 2 :TX_COLUMNSHARD WARN: ... pl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:39.444829Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:39.448930Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:07:39.452580Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7181:5295], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:39.452668Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7191:5300], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:39.452737Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:39.462657Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:39.511781Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7195:5303], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:07:39.746577Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7291:5349] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:39.816300Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7320:5364]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:39.816592Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:39.816690Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7322:5366] 2025-06-24T21:07:39.816770Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7322:5366] 2025-06-24T21:07:39.817102Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7323:5367] 2025-06-24T21:07:39.817249Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7323:5367], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:39.817322Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:39.817497Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7322:5366], server id = [2:7323:5367], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:39.817577Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:39.817685Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7320:5364], StatRequests.size() = 1 2025-06-24T21:07:39.948378Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzU0ZmY1NmMtYWY5NzQ4Y2ItYWVlMmQ4M2UtZjljNzBjODk=, TxId: 2025-06-24T21:07:39.948443Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzU0ZmY1NmMtYWY5NzQ4Y2ItYWVlMmQ4M2UtZjljNzBjODk=, TxId: 2025-06-24T21:07:39.949146Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:39.963014Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:39.963089Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:40.016473Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:40.016572Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:40.080594Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7322:5366], schemeshard count = 1 2025-06-24T21:07:40.986431Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:40.986527Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:40.988900Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:41.004537Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:41.005044Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:41.005120Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-24T21:07:41.018596Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:41.029505Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-24T21:07:41.030508Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-24T21:07:41.030621Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-24T21:07:41.043863Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-24T21:07:42.202726Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:42.202851Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:42.202905Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:42.203422Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:42.217247Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:42.217641Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:42.217752Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:42.218661Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:42.232275Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:42.232581Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:42.233185Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7441:5436], server id = [2:7442:5437], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:42.233324Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7441:5436], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:42.236859Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:42.236982Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:42.237228Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:42.237412Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:42.237780Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:42.240195Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7441:5436], server id = [2:7442:5437], tablet id = 72075186224037899 2025-06-24T21:07:42.240250Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:42.240891Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:42.280408Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7462:5456]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:42.280687Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:42.280740Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7462:5456], StatRequests.size() = 1 2025-06-24T21:07:42.410766Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NzVhYmE1NzktYWU4YTQ0NTAtYjQ4ODViOTItMzE3ODIwNDc=, TxId: 2025-06-24T21:07:42.410837Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NzVhYmE1NzktYWU4YTQ0NTAtYjQ4ODViOTItMzE3ODIwNDc=, TxId: 2025-06-24T21:07:42.411564Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:42.426151Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:42.426233Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3044:3297] >> KqpIndexes::UpsertWithoutExtraNullDelete+UseSink >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> KqpIndexes::UniqIndexComplexPkComplexFkOverlap [GOOD] >> KqpIndexes::UniqAndNoUniqSecondaryIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableAggrStatUnavailableNode [GOOD] Test command err: 2025-06-24T21:05:05.557962Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:05.558340Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:05.558448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043a4/r3tmp/tmpEnXUfl/pdisk_1.dat 2025-06-24T21:05:05.979393Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4098, node 1 2025-06-24T21:05:06.224185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:06.224243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:06.224270Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:06.224743Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:06.227074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:06.322553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:06.322669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:06.337735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15086 2025-06-24T21:05:06.907849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:10.040394Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:10.083633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:10.083759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:10.112690Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:10.115071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:10.321158Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:10.345300Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.345886Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.346388Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.346557Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.346827Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.346926Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.347047Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.347174Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.347290Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:10.556027Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:10.556176Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:10.568854Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:10.713203Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:10.772267Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:10.772368Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:10.809031Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:10.809353Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:10.809592Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:10.809661Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:10.809741Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:10.809818Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:10.809873Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:10.809927Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:10.810328Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:10.835743Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:10.835870Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:10.845641Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2570] 2025-06-24T21:05:10.855121Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1847:2588] 2025-06-24T21:05:10.855426Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1847:2588], schemeshard id = 72075186224037897 2025-06-24T21:05:10.863222Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:10.881970Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:10.882027Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:10.882093Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:10.895591Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:10.908234Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:10.908357Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:11.110627Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:11.264775Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:11.343653Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:12.016120Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:12.247004Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:12.247243Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:12.264607Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:12.444615Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2242:2805];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:12.444788Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2242:2805];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:12.445012Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2242:2805];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:12.445102Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2242:2805];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:12.445184Z node 2 :TX_COLUMNSHARD WARN: ... ount = 1 2025-06-24T21:07:40.540038Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:40.540226Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8267:6125], server id = [2:8268:6126], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:40.540377Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:40.540508Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8265:6123], StatRequests.size() = 1 2025-06-24T21:07:40.680728Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZWI1YzU4NGItZWNiNzAzNjQtNDhkZDY0YzUtZTg1YmU1MWI=, TxId: 2025-06-24T21:07:40.680814Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZWI1YzU4NGItZWNiNzAzNjQtNDhkZDY0YzUtZTg1YmU1MWI=, TxId: 2025-06-24T21:07:40.681285Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:40.695245Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:40.695310Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:40.739827Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:40.739919Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:40.826273Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8267:6125], schemeshard count = 1 2025-06-24T21:07:43.154335Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:43.154396Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:07:43.154432Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:43.154483Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:43.157826Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:43.174012Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:43.174505Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:43.174585Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:43.175461Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:43.192515Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:43.192776Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:43.193540Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8385:6185], server id = [2:8389:6189], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:43.193965Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8385:6185], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:43.195366Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8386:6186], server id = [2:8390:6190], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:43.195442Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8386:6186], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:43.196096Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8387:6187], server id = [2:8391:6191], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:43.196166Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8387:6187], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:43.197067Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8388:6188], server id = [2:8392:6192], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:43.197125Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8388:6188], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:43.202770Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:43.203541Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8385:6185], server id = [2:8389:6189], tablet id = 72075186224037899 2025-06-24T21:07:43.203601Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:43.204174Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:43.204762Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8386:6186], server id = [2:8390:6190], tablet id = 72075186224037900 2025-06-24T21:07:43.204795Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:43.205031Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:43.205372Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8387:6187], server id = [2:8391:6191], tablet id = 72075186224037901 2025-06-24T21:07:43.205397Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:43.205528Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:43.205571Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:43.205777Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:43.205888Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:43.206125Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8388:6188], server id = [2:8392:6192], tablet id = 72075186224037902 2025-06-24T21:07:43.206164Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:43.206524Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:43.230882Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:43.231106Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:07:43.231648Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8407:6203], server id = [2:8408:6204], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:43.231735Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8407:6203], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:43.233022Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:43.233113Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:43.233285Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:43.233442Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:43.233842Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:43.236105Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8407:6203], server id = [2:8408:6204], tablet id = 72075186224037900 2025-06-24T21:07:43.236161Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:43.236723Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:43.273472Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8426:6222]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:43.273729Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:43.273797Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8426:6222], StatRequests.size() = 1 2025-06-24T21:07:43.431679Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODBkMmFkY2ItMmZhMmU4OGQtZDVmMDJiNjktNWZjNGY4ZWY=, TxId: 2025-06-24T21:07:43.431753Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODBkMmFkY2ItMmZhMmU4OGQtZDVmMDJiNjktNWZjNGY4ZWY=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-24T21:07:43.432391Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8440:6228]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:43.432639Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:43.433549Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:43.433612Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:43.436668Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:43.436739Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:43.436825Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:43.441627Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 probe = 4 >> TExportToS3Tests::ShouldRestartOnScanErrors [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeReqDistribution [GOOD] Test command err: 2025-06-24T21:05:12.874615Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:12.874919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:12.875075Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00437c/r3tmp/tmpk0jetT/pdisk_1.dat 2025-06-24T21:05:13.264047Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7362, node 1 2025-06-24T21:05:13.509441Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:13.509503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:13.509572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:13.509822Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:13.512685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:13.621517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:13.621691Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:13.635261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25524 2025-06-24T21:05:14.186769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:17.274630Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:17.316478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:17.316628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:17.376803Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:17.378882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:17.567072Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:17.601920Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:17.602599Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:17.603179Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:17.603370Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:17.603522Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:17.603822Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:17.603973Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:17.604111Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:17.604238Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:17.777883Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:17.777989Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:17.811072Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:17.924380Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:17.962746Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:17.962844Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:17.995239Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:17.996613Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:17.996841Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:17.996896Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:17.996973Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:17.997043Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:17.997111Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:17.997174Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:17.998203Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:18.031840Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:18.031935Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:18.038542Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:18.041596Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:18.041906Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:18.049936Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:18.074399Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:18.074446Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:18.074501Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:18.088976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:18.097185Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:18.097362Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:18.275769Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:18.416515Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:18.461769Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:18.959605Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:19.147359Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:19.147507Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:19.168444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:19.338302Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:19.338493Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:19.338728Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:19.338818Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:19.338891Z node 2 :TX_COLUMNSHARD WARN: ... 25-06-24T21:07:43.257838Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:8326:6174], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:07:43.258006Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8213:6116], server id = [2:8214:6117], tablet id = 72075186224037894 2025-06-24T21:07:43.258083Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8327:6175] 2025-06-24T21:07:43.258120Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8327:6175] 2025-06-24T21:07:43.299999Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:07:43.300122Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:07:43.300666Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:07:43.301399Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:07:43.301699Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-24T21:07:43.301760Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-24T21:07:43.301801Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-24T21:07:43.301838Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-24T21:07:43.301879Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1750799263233460 2025-06-24T21:07:43.301925Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-24T21:07:43.302017Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-24T21:07:43.302095Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:07:43.302185Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-24T21:07:43.302246Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:07:43.302308Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:07:43.302368Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:07:43.302528Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:43.303349Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:07:43.304064Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:43.304138Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:43.304246Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:07:43.305541Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:43.305604Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:43.308286Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:43.387513Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:43.387698Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:43.388674Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8374:6206], server id = [2:8378:6210], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:43.389093Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8374:6206], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:43.390460Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8375:6207], server id = [2:8379:6211], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:43.390551Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8375:6207], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:43.391850Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8376:6208], server id = [2:8380:6212], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:43.391949Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8376:6208], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:43.392140Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8377:6209], server id = [2:8381:6213], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:43.392186Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8377:6209], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:43.397972Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:43.398732Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8374:6206], server id = [2:8378:6210], tablet id = 72075186224037899 2025-06-24T21:07:43.398789Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:43.399975Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:43.400335Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8375:6207], server id = [2:8379:6211], tablet id = 72075186224037900 2025-06-24T21:07:43.400371Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:43.400919Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:43.401189Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8376:6208], server id = [2:8380:6212], tablet id = 72075186224037901 2025-06-24T21:07:43.401217Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:43.401524Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:43.401609Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:43.401889Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:43.402098Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:43.402384Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:43.406411Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8377:6209], server id = [2:8381:6213], tablet id = 72075186224037902 2025-06-24T21:07:43.406454Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:43.407268Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:43.452350Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8410:6238]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:43.452631Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:43.452694Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8410:6238], StatRequests.size() = 1 2025-06-24T21:07:43.605504Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=Y2I4NDc3OTYtMmMzZTUxMTgtMWUyMTFiMi05YmNlMjA2MA==, TxId: 2025-06-24T21:07:43.605602Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=Y2I4NDc3OTYtMmMzZTUxMTgtMWUyMTFiMi05YmNlMjA2MA==, TxId: 2025-06-24T21:07:43.606303Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:43.619549Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8427:6244] 2025-06-24T21:07:43.619842Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8427:6244], schemeshard id = 72075186224037897 2025-06-24T21:07:43.619946Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8327:6175], server id = [2:8428:6245], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:43.620054Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8428:6245] 2025-06-24T21:07:43.620135Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8428:6245], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-24T21:07:43.644731Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:43.644825Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:43.701883Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8431:6248]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:43.702244Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:43.702292Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:43.704828Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:43.704887Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:43.704946Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:43.709206Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> KqpIndexes::InnerJoinWithNonIndexWherePredicate [GOOD] >> KqpIndexes::JoinWithNonPKColumnsInPredicate+UseStreamJoin >> AnalyzeColumnshard::AnalyzeTwoColumnTables [GOOD] >> KqpMultishardIndex::DataColumnUpsertMixedSemantic >> KqpUniqueIndex::UpdateOnHidenChanges+DataColumn [GOOD] >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn >> TExportToS3Tests::ShouldRetryAtFinalStage [GOOD] >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex-UseSink >> KqpMultishardIndex::SecondaryIndexSelectNull [GOOD] >> KqpMultishardIndex::SecondaryIndexSelect >> KqpIndexes::UpsertMultipleUniqIndexes >> DataShardSnapshots::LockedWriteCleanupOnCopyTable+UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable-UseSink >> KqpUniqueIndex::InsertFkAlreadyExist [GOOD] >> KqpUniqueIndex::InsertFkPartialColumnSet >> DataShardSnapshots::ReadIteratorLocalSnapshotThenWrite [GOOD] >> DataShardSnapshots::RepeatableReadAfterSplitRace ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeTwoColumnTables [GOOD] Test command err: 2025-06-24T21:04:58.197747Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:04:58.198147Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:04:58.198349Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e0/r3tmp/tmpRjLGoq/pdisk_1.dat 2025-06-24T21:04:58.702121Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15980, node 1 2025-06-24T21:04:59.389541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:04:59.389612Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:04:59.389665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:04:59.389914Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:04:59.398564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:04:59.552052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:04:59.552220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:04:59.566006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30369 2025-06-24T21:05:00.244806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:03.702118Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:03.741404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:03.741499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:03.805048Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:03.807314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:04.018773Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:04.056697Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.057327Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.057867Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.058010Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.058156Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.058401Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.058496Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.058571Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.058646Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:04.255577Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:04.255703Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:04.276723Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:04.425467Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:04.470968Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:04.471037Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:04.503865Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:04.505327Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:04.505561Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:04.505625Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:04.505679Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:04.505753Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:04.505810Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:04.505863Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:04.506857Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:04.543239Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:04.543348Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:04.549336Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:04.552261Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:04.552510Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:04.562052Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:04.588696Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:04.588763Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:04.588856Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:04.606384Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:04.624362Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:04.624549Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:04.819673Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:05.071188Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:05.148508Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:05.633339Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:06.113915Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.114073Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:06.224583Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:06.338356Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:06.338687Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:06.338975Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:06.339103Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:06.339305Z node 2 :TX_COLUMNSHARD WARN: ... 6-24T21:07:39.902751Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:39.916886Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:39.916941Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:47: [72075186224037894] TTxFinishTraversal::Complete. Don't send TEvAnalyzeResponse. There are pending operations, OperationId operationId , ActorId=[1:3814:3568] 2025-06-24T21:07:40.510484Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-24T21:07:40.510559Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:41.276146Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:41.276234Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is column table. 2025-06-24T21:07:41.279821Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:41.296376Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:41.296813Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:41.296863Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 5], AnalyzedShards 1 2025-06-24T21:07:41.321859Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:41.332823Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-24T21:07:41.333915Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-24T21:07:41.333992Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-24T21:07:41.357723Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-24T21:07:42.662495Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:07:42.662704Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:42.684509Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:42.684605Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is column table. 2025-06-24T21:07:42.684645Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-24T21:07:42.685295Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:42.698772Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:42.699166Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:42.699243Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:42.699695Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:42.724152Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:42.724382Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:07:42.724883Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8219:5911], server id = [2:8220:5912], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:42.724986Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8219:5911], path = { OwnerId: 72075186224037897 LocalId: 5 } 2025-06-24T21:07:42.727885Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:42.727982Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:42.728233Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:42.728439Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:42.728766Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:42.730822Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8219:5911], server id = [2:8220:5912], tablet id = 72075186224037900 2025-06-24T21:07:42.730864Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:42.731499Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:42.752962Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODVhYmY4NmItNzNiNDdiYjctYjUxZTZjMDItODBkOWUxMGQ=, TxId: 2025-06-24T21:07:42.753044Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODVhYmY4NmItNzNiNDdiYjctYjUxZTZjMDItODBkOWUxMGQ=, TxId: 2025-06-24T21:07:42.753583Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:42.778812Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-24T21:07:42.778877Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:43.426835Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-24T21:07:43.426914Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:44.093600Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:44.093673Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:44.093721Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:07:45.430814Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:45.430947Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is column table. 2025-06-24T21:07:45.430981Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-24T21:07:45.431604Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:45.448113Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:45.448579Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:45.448679Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:45.449168Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:45.464557Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:45.464778Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-24T21:07:45.465383Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8318:5967], server id = [2:8319:5968], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:45.465502Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8318:5967], path = { OwnerId: 72075186224037897 LocalId: 5 } 2025-06-24T21:07:45.469409Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:45.469529Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:45.469814Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:45.470020Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:45.473397Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:45.476108Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8318:5967], server id = [2:8319:5968], tablet id = 72075186224037900 2025-06-24T21:07:45.476159Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:45.477011Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:45.505488Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjUxMTYyYzItNTFlOGQxOWYtYTQxYTNkZWUtYWJkM2U4ZWI=, TxId: 2025-06-24T21:07:45.505564Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YjUxMTYyYzItNTFlOGQxOWYtYTQxYTNkZWUtYWJkM2U4ZWI=, TxId: 2025-06-24T21:07:45.506166Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:45.521894Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-24T21:07:45.521980Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3814:3568] >> KqpPrefixedVectorIndexes::OrderByCosineLevel1+Nullable-UseSimilarity ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldRestartOnScanErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:14.377340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:14.377433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:14.377467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:14.377495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:14.377543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:14.377567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:14.377655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:14.377724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:14.378297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:14.378557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:14.435504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:14.435553Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:14.446740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:14.447056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:14.447238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:14.453463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:14.453616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:14.454319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:14.454591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:14.457232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:14.457409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:14.458388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:14.458445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:14.458649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:14.458689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:14.458729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:14.458806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.464311Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:14.552728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:14.552944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.553107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:14.553146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:14.553320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:14.553416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:14.555302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:14.555436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:14.555596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.555649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:14.555694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:14.555724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:14.557184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.557276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:14.557318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:14.558502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.558532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:14.558587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:14.558631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:14.566431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:14.568085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:14.568231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:14.568885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:14.569007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:14.569051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:14.569265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:14.569309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:14.569437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:14.569523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:14.571172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:14.571209Z node 1 :FLAT_TX_SCHEMESHARD ... id 281474976710759:0 128 -> 129 2025-06-24T21:07:45.648229Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:17821 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 547995E7-C31F-4FB0-8D31-8B63A2B581C3 amz-sdk-request: attempt=1 content-length: 106 content-md5: heRlZdXBqq/26pCrTLfM5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 106 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000005 2025-06-24T21:07:45.689785Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:45.689839Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710759, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T21:07:45.690090Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:45.690123Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 281474976710759, path id: 4 2025-06-24T21:07:45.690713Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:45.690765Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 281474976710759:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:45.692077Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-24T21:07:45.692175Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-24T21:07:45.692206Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710759 2025-06-24T21:07:45.692237Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710759, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-24T21:07:45.692279Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:07:45.692374Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 0/1, is published: true REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:17821 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 9165FFC7-8267-4166-A4A7-5C6CF09B4A80 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 357 FAKE_COORDINATOR: Erasing txId 281474976710759 2025-06-24T21:07:45.696087Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710759 TestWaitNotification wait txId: 102 2025-06-24T21:07:45.696324Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:07:45.696380Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:07:45.705695Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:07:45.705812Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:17821 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B7F435A2-D00C-4D25-9860-95F9077F8BA9 amz-sdk-request: attempt=1 content-length: 106 content-md5: heRlZdXBqq/26pCrTLfM5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 106 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:17821 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 3385A5CA-4367-486D-A65A-A8FB4687BAF5 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 357 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:17821 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 6FB2001E-C789-4D02-A32E-F62403FDB41A amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-24T21:07:46.275995Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 448 RawX2: 17179871599 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:07:46.276108Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-24T21:07:46.276327Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 448 RawX2: 17179871599 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:07:46.276504Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 448 RawX2: 17179871599 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:07:46.276627Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:46.276689Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:46.276750Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:07:46.276816Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710759:0 129 -> 240 2025-06-24T21:07:46.277022Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:46.279989Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:46.280497Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:46.280576Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-24T21:07:46.280783Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-24T21:07:46.280833Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-24T21:07:46.280911Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-24T21:07:46.280961Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-24T21:07:46.281020Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-24T21:07:46.281120Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:127:2151] message: TxId: 281474976710759 2025-06-24T21:07:46.281210Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-24T21:07:46.281295Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-24T21:07:46.281343Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710759:0 2025-06-24T21:07:46.281502Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:07:46.284784Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-24T21:07:46.284903Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710759 2025-06-24T21:07:46.287664Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:07:46.287741Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:590:2544] TestWaitNotification: OK eventTxId 102 >> AnalyzeColumnshard::AnalyzeAnalyzeOneColumnTableSpecificColumns [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart-UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart-UseSink >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead-UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldRetryAtFinalStage [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:07:06.641806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:07:06.641895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.641949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:07:06.641989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:07:06.642573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:07:06.642603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:07:06.642671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:07:06.642741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:07:06.643342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:07:06.644148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:07:06.706771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:07:06.706824Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:06.717669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:07:06.717924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:07:06.718080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:07:06.723802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:07:06.723919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:07:06.726612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.726830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:07:06.731085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.732033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:07:06.737928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.737979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:07:06.738181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:06.738225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:06.738272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:06.738339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.743445Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:07:06.857553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:07:06.857764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.857963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:07:06.858002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:07:06.858160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:07:06.858247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:06.860015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.860160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:07:06.860280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.860324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:07:06.860390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:07:06.860421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:07:06.861713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.861803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:07:06.861840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:07:06.863168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.863212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:07:06.863279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.863339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:07:06.866898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:07:06.868752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:07:06.868972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:07:06.869947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:06.870080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:07:06.870142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.870409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:07:06.870464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:07:06.870670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:07:06.870749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:07:06.872624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:07:06.872671Z node 1 :FLAT_TX_SCHEMESHARD ... wnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0018 2025-06-24T21:07:39.677339Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-24T21:07:39.677544Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 70 row count 2 2025-06-24T21:07:39.677641Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Table, is column=0, is olap=0, RowCount 2, DataSize 70 2025-06-24T21:07:39.677770Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-24T21:07:39.677828Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-24T21:07:39.677871Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=0, is column=0, is olap=0, RowCount 0, DataSize 0, with borrowed parts 2025-06-24T21:07:39.677907Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-24T21:07:39.688361Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:07:43.175820Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 70 rowCount 2 cpuUsage 0.0018 2025-06-24T21:07:43.199793Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0016 2025-06-24T21:07:43.241779Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-24T21:07:43.242020Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 70 row count 2 2025-06-24T21:07:43.242112Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Table, is column=0, is olap=0, RowCount 2, DataSize 70 2025-06-24T21:07:43.242243Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-24T21:07:43.242302Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-24T21:07:43.242348Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=0, is column=0, is olap=0, RowCount 0, DataSize 0, with borrowed parts 2025-06-24T21:07:43.242385Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-24T21:07:43.252799Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:07:46.737667Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [4:579:2535], attempt# 1 2025-06-24T21:07:46.758947Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:118: [Export] [scanner] Handle TEvExportScan::TEvReset: self# [4:578:2534] 2025-06-24T21:07:46.766751Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [4:579:2535], sender# [4:578:2534] 2025-06-24T21:07:46.766845Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [4:578:2534] 2025-06-24T21:07:46.767010Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [4:579:2535], sender# [4:578:2534], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } 2025-06-24T21:07:46.767302Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:526: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [4:579:2535], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [6e3e0a41fdab8add833862f1bd2954c3,1d8dd09e584ce6a47582a31b591900e2,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:4229 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7C709FF3-28EE-4953-A714-570C3E1DFC39 amz-sdk-request: attempt=1 content-length: 459 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv / uploadId=1 2025-06-24T21:07:46.772308Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:623: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [4:579:2535], result# 2025-06-24T21:07:46.772589Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [4:578:2534], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-24T21:07:46.788460Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 452 RawX2: 17179871603 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:07:46.788547Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-24T21:07:46.788737Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 452 RawX2: 17179871603 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:07:46.788909Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 452 RawX2: 17179871603 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:07:46.789001Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:07:46.789049Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:46.789088Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:07:46.789147Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710759:0 129 -> 240 2025-06-24T21:07:46.789321Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:46.792603Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:46.793108Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-24T21:07:46.793175Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-24T21:07:46.793327Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-24T21:07:46.793376Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-24T21:07:46.793431Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-24T21:07:46.793489Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-24T21:07:46.793538Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-24T21:07:46.793633Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:127:2151] message: TxId: 281474976710759 2025-06-24T21:07:46.793704Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-24T21:07:46.793750Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-24T21:07:46.793805Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710759:0 2025-06-24T21:07:46.793944Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:07:46.796376Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-24T21:07:46.796483Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710759 2025-06-24T21:07:46.798753Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:07:46.798818Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:600:2551] TestWaitNotification: OK eventTxId 102 >> KqpIndexes::DoUpsertWithoutIndexUpdate-UniqIndex-UseSink [GOOD] >> KqpIndexes::DoUpsertWithoutIndexUpdate-UniqIndex+UseSink >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithReadConflicts |93.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_export/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpIndexMetadata::HandleNotReadyIndex [GOOD] >> KqpIndexMetadata::TestNoReadFromMainTableBeforeJoin |93.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export/test-results/unittest/{meta.json ... results_accumulator.log} >> TraverseColumnShard::TraverseColumnTableHiveDistributionAbsentNodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeAnalyzeOneColumnTableSpecificColumns [GOOD] Test command err: 2025-06-24T21:05:27.636642Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:27.636848Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:27.636988Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004322/r3tmp/tmpXubYib/pdisk_1.dat 2025-06-24T21:05:27.968754Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12812, node 1 2025-06-24T21:05:28.168489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:28.168544Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:28.168580Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:28.168761Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:28.171254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:28.281870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:28.282020Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:28.295341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5260 2025-06-24T21:05:28.818342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:31.358511Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:31.389936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:31.390047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:31.449985Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:31.452517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:31.636056Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:31.671125Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.671700Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.672240Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.672436Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.672566Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.672869Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.672986Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.673111Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.673205Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.846709Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:31.846822Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:31.860513Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:32.018520Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:32.066063Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:32.066188Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:32.098639Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:32.099935Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:32.100177Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:32.100239Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:32.100297Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:32.100343Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:32.100389Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:32.100434Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:32.101444Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:32.134855Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:32.134968Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:32.140162Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:32.142733Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:32.142927Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:32.149592Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:32.173343Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:32.173414Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:32.173482Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:32.186744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:32.199234Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:32.199408Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:32.367200Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:32.540851Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:32.586112Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:33.083197Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:33.344129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:33.344265Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:33.361276Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:33.452896Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:33.453204Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:33.453515Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:33.453629Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:33.453714Z node 2 :TX_COLUMNSHARD WARN: ... :626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:43.324770Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:43.328193Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:43.345221Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:43.345827Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:43.345931Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-24T21:07:43.360229Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:44.570967Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:44.571055Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:44.571094Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:07:44.571139Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:44.571195Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:44.571811Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:44.585362Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-24T21:07:44.585474Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:44.585931Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:44.586023Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:44.586888Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-24T21:07:44.587001Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-24T21:07:44.587464Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:44.600777Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-24T21:07:44.600885Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:44.601064Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:44.601606Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7445:5437], server id = [2:7446:5438], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:44.601719Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7445:5437], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:44.604957Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:44.605066Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:44.605295Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:44.605499Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:44.605667Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7445:5437], server id = [2:7446:5438], tablet id = 72075186224037899 2025-06-24T21:07:44.605708Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:44.605930Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:44.608522Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:44.645135Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7466:5457]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:44.645389Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:44.645432Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7466:5457], StatRequests.size() = 1 2025-06-24T21:07:44.776458Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=Y2RiNjg2YTgtMmQ1OGMyZGQtYjM0NzVlZWUtYWM5Y2RiZjQ=, TxId: 2025-06-24T21:07:44.776527Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=Y2RiNjg2YTgtMmQ1OGMyZGQtYjM0NzVlZWUtYWM5Y2RiZjQ=, TxId: 2025-06-24T21:07:44.776913Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:44.790449Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:44.790496Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:45.339201Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-24T21:07:45.339300Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:46.032457Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:46.032537Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:46.032593Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:07:47.195247Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:07:47.195505Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:47.217278Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:47.217441Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:47.217485Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:47.218037Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:47.231947Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:47.232281Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:47.232344Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:47.232762Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:47.257830Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:47.258031Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:07:47.258582Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7560:5507], server id = [2:7561:5508], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:47.258690Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7560:5507], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:47.259965Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:47.260059Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:47.260331Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:47.260538Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:47.260889Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:47.263532Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7560:5507], server id = [2:7561:5508], tablet id = 72075186224037899 2025-06-24T21:07:47.263572Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:47.264289Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:47.286668Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OWQ1NjJhNTMtYjdjMjE2MmEtNzc0ZjUwN2UtZDI4MDZiMjY=, TxId: 2025-06-24T21:07:47.286748Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OWQ1NjJhNTMtYjdjMjE2MmEtNzc0ZjUwN2UtZDI4MDZiMjY=, TxId: 2025-06-24T21:07:47.287372Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:47.302811Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:47.302898Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3046:3298] >> KqpIndexes::SecondaryIndexUpsert2Update >> KqpIndexes::ForbidViewModification >> KqpIndexes::UpdateDeletePlan+UseSink [GOOD] >> KqpIndexes::UpdateDeletePlan-UseSink >> KqpPrefixedVectorIndexes::OrderByCosineLevel1-Nullable-UseSimilarity ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableHiveDistributionAbsentNodes [GOOD] Test command err: 2025-06-24T21:05:21.414425Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:21.414780Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:21.414937Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00436d/r3tmp/tmpDx2bHL/pdisk_1.dat 2025-06-24T21:05:21.754367Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29417, node 1 2025-06-24T21:05:21.963036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:21.963123Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:21.963195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:21.963460Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:21.966183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:22.070485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:22.070646Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:22.083917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29989 2025-06-24T21:05:22.631166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:25.385487Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:25.425559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:25.425692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:25.486175Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:25.488532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:25.678678Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:25.713960Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.714581Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.715117Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.715309Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.715433Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.715664Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.715781Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.715909Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.716000Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:25.890345Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:25.890471Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:25.903713Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:26.073674Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:26.116973Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:26.117054Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:26.141970Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:26.143187Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:26.143402Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:26.143448Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:26.143489Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:26.143578Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:26.143628Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:26.143669Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:26.144449Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:26.177097Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:26.177187Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:26.182855Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:26.185497Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:26.185692Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:26.191848Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:26.214565Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:26.214620Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:26.214683Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:26.226581Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:26.233048Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:26.233217Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:26.410501Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:26.553840Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:26.598954Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:27.095002Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:27.391805Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:27.391961Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:27.414864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:27.572479Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:27.572707Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:27.572933Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:27.573013Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:27.573102Z node 2 :TX_COLUMNSHARD WARN: ... er/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:45.977374Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:8124:6059], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:07:46.196073Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8196:6103] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:46.274284Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8218:6117]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:46.274551Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:46.274648Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8220:6119] 2025-06-24T21:07:46.274727Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8220:6119] 2025-06-24T21:07:46.275257Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8220:6119], server id = [2:8221:6120], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:46.275349Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8221:6120] 2025-06-24T21:07:46.275475Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8221:6120], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:46.275576Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:46.275787Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:46.275907Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8218:6117], StatRequests.size() = 1 2025-06-24T21:07:46.444493Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MTUyZTYxOTItNWRlZmMwMGEtZWVjNThmNWUtNjZhZTc3Zg==, TxId: 2025-06-24T21:07:46.444560Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MTUyZTYxOTItNWRlZmMwMGEtZWVjNThmNWUtNjZhZTc3Zg==, TxId: 2025-06-24T21:07:46.444987Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:46.470991Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:46.471071Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:46.563946Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:46.564060Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:46.629735Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8220:6119], schemeshard count = 1 2025-06-24T21:07:48.544074Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:48.544142Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:07:48.544180Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:48.544233Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:48.548309Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:48.567272Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:48.567889Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:48.567989Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:48.568959Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:48.583780Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:48.584106Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:48.585045Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8335:6179], server id = [2:8339:6183], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:48.585481Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8335:6179], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:48.585838Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8336:6180], server id = [2:8340:6184], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:48.585917Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8336:6180], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:48.594652Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8337:6181], server id = [2:8341:6185], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:48.594790Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8337:6181], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:48.595169Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8338:6182], server id = [2:8342:6186], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:48.595231Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8338:6182], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:48.603366Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:48.604015Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8335:6179], server id = [2:8339:6183], tablet id = 72075186224037899 2025-06-24T21:07:48.604074Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:48.604344Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:48.604921Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8336:6180], server id = [2:8340:6184], tablet id = 72075186224037900 2025-06-24T21:07:48.604957Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:48.605730Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:48.606375Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8338:6182], server id = [2:8342:6186], tablet id = 72075186224037902 2025-06-24T21:07:48.606411Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:48.606473Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:48.606520Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:48.606730Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:48.606960Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:48.607370Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:48.609694Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8337:6181], server id = [2:8341:6185], tablet id = 72075186224037901 2025-06-24T21:07:48.609738Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:48.610447Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:48.650277Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8371:6211]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:48.650534Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:48.650578Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8371:6211], StatRequests.size() = 1 2025-06-24T21:07:48.798679Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MTI5OTIwZDktNjlkM2YyZTItNDYyOGEwZDktZmNiMjY0YmU=, TxId: 2025-06-24T21:07:48.798753Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MTI5OTIwZDktNjlkM2YyZTItNDYyOGEwZDktZmNiMjY0YmU=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-24T21:07:48.799267Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8384:6217]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:48.799643Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:48.799997Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:48.800042Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:48.802940Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:48.803021Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:48.803079Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:48.807240Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> TraverseColumnShard::TraverseColumnTableRebootSaTabletInAggregate [GOOD] >> TraverseColumnShard::TraverseColumnTableHiveDistributionZeroNodes [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL [GOOD] >> KqpIndexes::SelectConcurentTX [GOOD] >> KqpIndexes::SelectConcurentTX2 >> KqpIndexes::SelectFromIndexesAndFreeSpaceLogicDoesntTimeout [GOOD] >> KqpIndexes::Uint8Index >> KqpUniqueIndex::UpdateImplicitNullInComplexFk2 [GOOD] >> KqpUniqueIndex::UpsertImplicitNullInComplexFk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletInAggregate [GOOD] Test command err: 2025-06-24T21:05:25.152369Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:25.152668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:25.152803Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00433f/r3tmp/tmpKJs5VB/pdisk_1.dat 2025-06-24T21:05:25.502062Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14630, node 1 2025-06-24T21:05:25.703262Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:25.703314Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:25.703353Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:25.703521Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:25.707967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:25.810192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:25.810332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:25.823621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30137 2025-06-24T21:05:26.341760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:28.912156Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:28.942062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:28.942179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:29.001001Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:29.002679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:29.174856Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:29.209922Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:29.210560Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:29.211232Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:29.211443Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:29.211568Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:29.211813Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:29.211940Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:29.212054Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:29.212137Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:29.387369Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:29.387489Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:29.400285Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:29.530570Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:29.567253Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:29.567352Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:29.593491Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:29.594621Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:29.594829Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:29.594881Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:29.594919Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:29.594960Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:29.595011Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:29.595077Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:29.595717Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:29.626541Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:29.626655Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:29.631539Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:29.634203Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:29.634366Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:29.639981Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:29.660857Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:29.660903Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:29.660955Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:29.671720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:29.677463Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:29.677567Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:29.824347Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:29.956575Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:30.001801Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:30.495256Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:30.738077Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:30.738189Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:30.753327Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:30.891404Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:30.891713Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:30.892065Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:30.892194Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:30.892265Z node 2 :TX_COLUMNSHARD WARN: ... tDestroyed 2025-06-24T21:07:49.826615Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:07:49.826702Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:07:49.827078Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:07:49.827794Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:07:49.828051Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-24T21:07:49.828087Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-24T21:07:49.828114Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-24T21:07:49.828139Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-24T21:07:49.828165Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1750799269712328 2025-06-24T21:07:49.828192Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-24T21:07:49.828218Z node 2 :STATISTICS DEBUG: tx_init.cpp:84: [72075186224037894] Loaded global traversal round: 2 2025-06-24T21:07:49.828269Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-24T21:07:49.828306Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:07:49.828382Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-24T21:07:49.828431Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:07:49.828480Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:07:49.828547Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:07:49.828672Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:49.829335Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:07:49.829820Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:49.829891Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:49.830296Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:07:49.831396Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:49.831472Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:49.833316Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:49.922816Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:49.922977Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 2 2025-06-24T21:07:49.923620Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8323:6174], server id = [2:8327:6178], tablet id = 72075186224037900 2025-06-24T21:07:49.923672Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:49.924315Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8389:6219], server id = [2:8393:6223], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:49.924445Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8389:6219], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:49.925656Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8390:6220], server id = [2:8394:6224], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:49.925732Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8390:6220], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:49.926069Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8391:6221], server id = [2:8396:6226], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:49.926125Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8391:6221], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:49.926467Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8392:6222], server id = [2:8395:6225], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:49.926541Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8392:6222], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:49.927442Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:49.928833Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8389:6219], server id = [2:8393:6223], tablet id = 72075186224037899 2025-06-24T21:07:49.928884Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:49.929618Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:49.930003Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8392:6222], server id = [2:8395:6225], tablet id = 72075186224037902 2025-06-24T21:07:49.930051Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:49.930183Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:49.930835Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8391:6221], server id = [2:8396:6226], tablet id = 72075186224037901 2025-06-24T21:07:49.930863Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:49.933952Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:49.934016Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:49.934279Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:49.934585Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:49.934839Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:49.937922Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8390:6220], server id = [2:8394:6224], tablet id = 72075186224037900 2025-06-24T21:07:49.937965Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:49.938686Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:49.979319Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8419:6248]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:49.979545Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:49.979584Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8419:6248], StatRequests.size() = 1 2025-06-24T21:07:50.089276Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8430:6251] 2025-06-24T21:07:50.089432Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8337:6187], server id = [2:8430:6251], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:50.089604Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8430:6251], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-24T21:07:50.089847Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8431:6252] 2025-06-24T21:07:50.089948Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8431:6252], schemeshard id = 72075186224037897 2025-06-24T21:07:50.105803Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:03.000000Z, event interval end# 2025-06-24T21:07:48.000000Z 2025-06-24T21:07:50.106504Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MmE1YWZmNzYtMzNjMGU0MTYtMjVmYjI4MGQtYTU1ZWQ5OTA=, TxId: 2025-06-24T21:07:50.106566Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MmE1YWZmNzYtMzNjMGU0MTYtMjVmYjI4MGQtYTU1ZWQ5OTA=, TxId: 2025-06-24T21:07:50.107123Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:50.152098Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:50.152175Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:50.206367Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8438:6258]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:50.206739Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:50.206798Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:50.209881Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:50.209961Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:50.210015Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:50.215382Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> KqpVectorIndexes::BuildIndexTimesAndUser >> KqpUniqueIndex::ReplaceFkAlreadyExist [GOOD] >> KqpUniqueIndex::InsertNullInPk >> KqpIndexes::UpsertWithoutExtraNullDelete+UseSink [GOOD] >> KqpIndexes::UpsertWithNullKeysSimple >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel3 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableHiveDistributionZeroNodes [GOOD] Test command err: 2025-06-24T21:05:18.272714Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:18.273006Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:18.273109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004374/r3tmp/tmpIsYrtR/pdisk_1.dat 2025-06-24T21:05:18.621569Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9479, node 1 2025-06-24T21:05:18.834028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:18.834094Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:18.834149Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:18.834403Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:18.837345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:18.943706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:18.943830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:18.956947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5925 2025-06-24T21:05:19.473405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:22.172207Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:22.205499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:22.205598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:22.265344Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:22.267526Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:22.440774Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:22.475911Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:22.476487Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:22.477117Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:22.477283Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:22.477402Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:22.477647Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:22.477786Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:22.477904Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:22.477992Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:22.651899Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:22.651993Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:22.664875Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:22.814341Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:22.858798Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:22.858894Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:22.887876Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:22.889314Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:22.889501Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:22.889553Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:22.889618Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:22.889672Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:22.889717Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:22.889762Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:22.890364Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:22.921938Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:22.922020Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:22.927905Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:22.930623Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:22.930818Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:22.937587Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:22.959915Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:22.959977Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:22.960037Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:22.971734Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:22.978165Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:22.978286Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:23.134128Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:23.285821Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:23.341696Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:23.836060Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:24.115102Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:24.115274Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:24.135319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:24.300763Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:24.300995Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:24.301239Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:24.301346Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2802];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:24.301474Z node 2 :TX_COLUMNSHARD WARN: l ... 1:07:46.667558Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8213:6118]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:46.667933Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:46.668045Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8215:6120] 2025-06-24T21:07:46.668159Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8215:6120] 2025-06-24T21:07:46.668807Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8215:6120], server id = [2:8216:6121], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:46.668908Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8216:6121] 2025-06-24T21:07:46.669080Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8216:6121], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:46.669171Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:46.669382Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:46.669522Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8213:6118], StatRequests.size() = 1 2025-06-24T21:07:46.839527Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZGU2NjRkZDktZmJkZmJkNjAtNDQ5MjlkNTYtMjM0NzhmY2I=, TxId: 2025-06-24T21:07:46.839614Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZGU2NjRkZDktZmJkZmJkNjAtNDQ5MjlkNTYtMjM0NzhmY2I=, TxId: 2025-06-24T21:07:46.840229Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:46.855083Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:46.855180Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:46.944500Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:46.944607Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:47.039880Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8215:6120], schemeshard count = 1 2025-06-24T21:07:48.967397Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:48.967460Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:07:48.967496Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:07:48.967538Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:48.971203Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:48.989136Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:48.989728Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:48.989851Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:48.990952Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 2 2025-06-24T21:07:48.991031Z node 2 :STATISTICS WARN: tx_response_tablet_distribution.cpp:65: [72075186224037894] TTxResponseTabletDistribution::Execute. Some tablets are probably in Hive boot queue 2025-06-24T21:07:48.991098Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:50.217542Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:50.232117Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:50.232363Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:50.233096Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8367:6196], server id = [2:8371:6200], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:50.233469Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8367:6196], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:50.233855Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8368:6197], server id = [2:8372:6201], tablet id = 72075186224037900, status = OK 2025-06-24T21:07:50.233917Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8368:6197], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:50.235050Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8369:6198], server id = [2:8373:6202], tablet id = 72075186224037901, status = OK 2025-06-24T21:07:50.235107Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8369:6198], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:50.236384Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8370:6199], server id = [2:8374:6203], tablet id = 72075186224037902, status = OK 2025-06-24T21:07:50.236450Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8370:6199], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:50.241762Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:50.242727Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8367:6196], server id = [2:8371:6200], tablet id = 72075186224037899 2025-06-24T21:07:50.242783Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:50.243185Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:07:50.243654Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8368:6197], server id = [2:8372:6201], tablet id = 72075186224037900 2025-06-24T21:07:50.243686Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:50.244134Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:07:50.244535Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8369:6198], server id = [2:8373:6202], tablet id = 72075186224037901 2025-06-24T21:07:50.244565Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:50.245021Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:07:50.245070Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:50.245250Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:50.245458Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:50.245848Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:50.247905Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8370:6199], server id = [2:8374:6203], tablet id = 72075186224037902 2025-06-24T21:07:50.247942Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:50.248725Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:50.313936Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8403:6228]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:50.314196Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:50.314260Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8403:6228], StatRequests.size() = 1 2025-06-24T21:07:50.438240Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-06-24T21:07:48.000000Z 2025-06-24T21:07:50.438906Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YmY1OGEzZDgtZjM2YjY4NTEtMmU4M2Q4ZjQtY2UzNmI1ZWQ=, TxId: 2025-06-24T21:07:50.438955Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YmY1OGEzZDgtZjM2YjY4NTEtMmU4M2Q4ZjQtY2UzNmI1ZWQ=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-24T21:07:50.439509Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8416:6234]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:50.439923Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:50.439978Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:50.440171Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:50.443713Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:50.443825Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:07:50.443879Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:50.448753Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL [GOOD] Test command err: Trying to start YDB, gRPC: 32106, MsgBus: 25721 2025-06-24T21:07:34.308815Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625541841354250:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:34.308897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004629/r3tmp/tmpyquN8f/pdisk_1.dat 2025-06-24T21:07:34.545247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:34.545352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:34.547131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:34.569282Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:34.569599Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625541841354231:2079] 1750799254308176 != 1750799254308179 TServer::EnableGrpc on GrpcPort 32106, node 1 2025-06-24T21:07:34.606616Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:34.606644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:34.606652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:34.606833Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25721 TClient is connected to server localhost:25721 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:35.008218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:35.027309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:35.142904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:35.274485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:35.315396Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:35.350649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:36.598592Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625550431290467:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:36.598694Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:36.868200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:36.890090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:36.913868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:36.935749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:36.962182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:36.991923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:37.021281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:37.104042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625554726258422:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:37.104133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:37.104185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625554726258427:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:37.122356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:37.131380Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625554726258429:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:37.203661Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625554726258480:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:38.083037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.515103Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:07:38.527294Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill Trying to start YDB, gRPC: 61798, MsgBus: 17563 2025-06-24T21:07:39.265262Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625559567903082:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:3 ... vered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004629/r3tmp/tmpI7yI94/pdisk_1.dat 2025-06-24T21:07:44.984825Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:44.986020Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519625583173143709:2079] 1750799264890649 != 1750799264890652 2025-06-24T21:07:45.037530Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:45.037629Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:45.039096Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62080, node 3 2025-06-24T21:07:45.091229Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:45.091250Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:45.091256Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:45.091348Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11584 TClient is connected to server localhost:11584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:07:45.593974Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:45.606586Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:45.679041Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:45.850574Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:45.908904Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:45.921708Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:47.986800Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625596058047213:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.986881Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.037306Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.064359Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.099962Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.136062Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.166655Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.208935Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.251660Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.318908Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625600353015163:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.318991Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.319221Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625600353015168:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.324012Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:48.335263Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625600353015170:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:48.406242Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625600353015221:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:49.631782Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:49.755584Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625604647983062:3739] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/TestTable\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:49.891351Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625583173143728:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:49.891861Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:49.926857Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:07:50.966307Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> KqpIndexes::IndexTopSortPushDown [GOOD] >> KqpMultishardIndex::DataColumnUpsertMixedSemantic [GOOD] >> KqpMultishardIndex::DataColumnWrite+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateImplicitNullInComplexFk2 [GOOD] Test command err: Trying to start YDB, gRPC: 20336, MsgBus: 14594 2025-06-24T21:07:35.759698Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625545669803206:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:35.759819Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004626/r3tmp/tmpehQhrN/pdisk_1.dat 2025-06-24T21:07:36.008758Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:36.008977Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625545669803188:2079] 1750799255758861 != 1750799255758864 TServer::EnableGrpc on GrpcPort 20336, node 1 2025-06-24T21:07:36.068341Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:36.068363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:36.068369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:36.068563Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:36.098421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:36.098528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:36.100298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14594 TClient is connected to server localhost:14594 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:36.572263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:36.600727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:36.686542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:36.768947Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:36.805675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:36.879537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:38.122402Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625558554706713:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:38.122509Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:38.400173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.423397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.449793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.475688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.501460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.529783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.558181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:38.633485Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625558554707369:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:38.633546Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:38.633624Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625558554707374:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:38.637053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:38.646366Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625558554707376:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:38.711097Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625558554707427:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:39.540655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:40.760100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625545669803206:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:40.760183Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8820, MsgBus: 5566 2025-06-24T21:07:43.289691Z node 2 :METADATA_PROV ... : HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:43.468305Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:43.468329Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:43.468337Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:43.468417Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5566 TClient is connected to server localhost:5566 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:43.901414Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:43.917412Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:43.969759Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.110475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.178231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.312414Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:46.627193Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625589332428250:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:46.627279Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:46.690703Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.732552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.761193Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.791020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.826610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.864127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.914671Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.005918Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625593627396209:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.006025Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.006218Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625593627396214:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.011031Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:47.021861Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625593627396216:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:47.101784Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625593627396267:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:48.175386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:48.339310Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625576447524748:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:48.341492Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:50.475451Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw9fns3etyqhchqn2qhde6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDQ2YjBmNTUtMmFiMjkwMmEtMTg0NjNjMmUtNTI4ZTlmOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:07:50.487126Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZDQ2YjBmNTUtMmFiMjkwMmEtMTg0NjNjMmUtNTI4ZTlmOTE=, ActorId: [2:7519625597922364594:2531], ActorState: ExecuteState, TraceId: 01jyhw9fns3etyqhchqn2qhde6, Create QueryResponse for error on request, msg: 2025-06-24T21:07:51.800732Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw9gp4a08t05cdeg59rs1f, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDQ2YjBmNTUtMmFiMjkwMmEtMTg0NjNjMmUtNTI4ZTlmOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:07:51.800980Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZDQ2YjBmNTUtMmFiMjkwMmEtMTg0NjNjMmUtNTI4ZTlmOTE=, ActorId: [2:7519625597922364594:2531], ActorState: ExecuteState, TraceId: 01jyhw9gp4a08t05cdeg59rs1f, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::IndexTopSortPushDown [GOOD] Test command err: Trying to start YDB, gRPC: 22276, MsgBus: 12811 2025-06-24T21:07:24.316031Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625497036418140:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:24.316100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00463c/r3tmp/tmp2z1Xsw/pdisk_1.dat 2025-06-24T21:07:24.632441Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:24.632808Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625497036418117:2079] 1750799244313118 != 1750799244313121 TServer::EnableGrpc on GrpcPort 22276, node 1 2025-06-24T21:07:24.703593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:24.703681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:24.705415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:24.726199Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:24.726225Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:24.726231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:24.726410Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12811 TClient is connected to server localhost:12811 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:25.303291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:25.323211Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:25.338571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.468315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.584391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:25.656028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:26.847285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625505626354351:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:26.847384Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.061726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.083926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.106810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.131127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.153233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.218263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.244831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.327253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625509921322312:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.327322Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.327470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625509921322318:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.330422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:27.337992Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625509921322320:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:27.413482Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625509921322371:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:28.222285Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625514216289955:3602], Recipient [1:7519625497036418475:2160]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:28.222319Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:28.222329Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:28.222361Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625514216289951:3599], Recipient [1:7519625497036418475:2160]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-24T21:07:28.222371Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvS ... 43.507274Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519625580177198202:2079] 1750799263384022 != 1750799263384025 TServer::EnableGrpc on GrpcPort 3401, node 3 2025-06-24T21:07:43.520590Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:43.520700Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:43.524620Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:43.551790Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:43.551812Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:43.551821Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:43.551953Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21109 TClient is connected to server localhost:21109 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:44.002861Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:44.017207Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.090722Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.263391Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.329206Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.407388Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:46.968354Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625593062101719:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:46.968450Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.039664Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.076449Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.111429Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.150984Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.183222Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.254928Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.288701Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.357073Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625597357069671:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.357163Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.358319Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625597357069676:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.362523Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:47.380194Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625597357069678:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:47.470292Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625597357069729:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:48.385640Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625580177198226:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:48.385706Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:48.702299Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.744851Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.853875Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpIndexes::MultipleSecondaryIndex+UseSink >> KqpMultishardIndex::DataColumnWriteNull >> KqpIndexes::MultipleSecondaryIndexWithSameComulns-UseSink [GOOD] >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn [GOOD] >> KqpVectorIndexes::OrderByCosineLevel2+Nullable-UseSimilarity >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex-UseSink [GOOD] >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex+UseSink >> KqpUniqueIndex::InsertFkPartialColumnSet [GOOD] >> KqpUniqueIndex::InsertFkDuplicate >> KqpIndexes::DoUpsertWithoutIndexUpdate-UniqIndex+UseSink [GOOD] >> KqpIndexes::DuplicateUpsertInterleave >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead+UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead-UseSink >> DataShardSnapshots::LockedWriteCleanupOnCopyTable-UseSink [GOOD] >> DataShardSnapshots::DelayedWriteReadableAfterSplit >> KqpUniqueIndex::UpsertExplicitNullInComplexFk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::MultipleSecondaryIndexWithSameComulns-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 30327, MsgBus: 21244 2025-06-24T21:07:25.411133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625501381652153:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:25.411313Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004630/r3tmp/tmprFrDbe/pdisk_1.dat 2025-06-24T21:07:25.662667Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:25.663428Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625501381652126:2079] 1750799245409808 != 1750799245409811 TServer::EnableGrpc on GrpcPort 30327, node 1 2025-06-24T21:07:25.732834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:25.732851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:25.732856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:25.732976Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:25.761861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:25.761962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:25.763925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21244 TClient is connected to server localhost:21244 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:26.133792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:26.157626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:26.266780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:26.391965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:26.424900Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:26.464457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:27.558740Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625509971588372:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.558860Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.798602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.819888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.841884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.863204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.884166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.906341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.927981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:27.967361Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625509971589023:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.967445Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.967545Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625509971589028:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:27.970909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:27.977830Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625509971589030:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:28.077110Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625514266556377:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:28.740547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:28.768604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:28.798241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part propo ... 24037921 NodeId: 3 StartTime: 1750799264340 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:07:54.381862Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:07:54.381892Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037921 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 1240 rowCount 7 cpuUsage 0 2025-06-24T21:07:54.381976Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037921 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 1240 RowCount: 7 IndexSize: 0 InMemSize: 1240 LastAccessTime: 1750799266010 LastUpdateTime: 1750799266010 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 7 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:07:54.382006Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099996s, queue# 1 2025-06-24T21:07:54.382445Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [3:7519625626856575181:4399], Recipient [3:7519625566727029169:2152]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:54.382462Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:54.382472Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:54.382633Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625583906900214:2421], Recipient [3:7519625566727029169:2152]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037919 TableLocalId: 12 Generation: 1 Round: 0 TableStats { DataSize: 1032 RowCount: 5 IndexSize: 0 InMemSize: 1032 LastAccessTime: 1750799266008 LastUpdateTime: 1750799266008 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 5 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037919 NodeId: 3 StartTime: 1750799264340 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:07:54.382646Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:07:54.382666Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037919 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 1032 rowCount 5 cpuUsage 0 2025-06-24T21:07:54.382743Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037919 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 1032 RowCount: 5 IndexSize: 0 InMemSize: 1032 LastAccessTime: 1750799266008 LastUpdateTime: 1750799266008 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 5 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:07:54.390223Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [3:7519625626856575188:4403], Recipient [3:7519625566727029169:2152]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:54.390278Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:54.390302Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:54.390574Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625583906900203:2419], Recipient [3:7519625566727029169:2152]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037920 TableLocalId: 12 Generation: 1 Round: 0 TableStats { DataSize: 928 RowCount: 4 IndexSize: 0 InMemSize: 928 LastAccessTime: 1750799266009 LastUpdateTime: 1750799266009 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037920 NodeId: 3 StartTime: 1750799264340 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:07:54.390592Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:07:54.390623Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037920 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 928 rowCount 4 cpuUsage 0 2025-06-24T21:07:54.390725Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037920 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 928 RowCount: 4 IndexSize: 0 InMemSize: 928 LastAccessTime: 1750799266009 LastUpdateTime: 1750799266009 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:07:54.482190Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625566727029169:2152]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:07:54.482239Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:07:54.482261Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 3 2025-06-24T21:07:54.482331Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 3 2025-06-24T21:07:54.482365Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 3 2025-06-24T21:07:54.482442Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:34 data size 1240 row count 7 2025-06-24T21:07:54.482501Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037921 maps to shardIdx: 72057594046644480:34 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 7, DataSize 1240 2025-06-24T21:07:54.482524Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037921, followerId 0 2025-06-24T21:07:54.482583Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:34 with partCount# 0, rowCount# 7, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:07:54.482656Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037921 2025-06-24T21:07:54.482689Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:32 data size 1032 row count 5 2025-06-24T21:07:54.482720Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037919 maps to shardIdx: 72057594046644480:32 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 5, DataSize 1032 2025-06-24T21:07:54.482728Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037919, followerId 0 2025-06-24T21:07:54.482762Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:32 with partCount# 0, rowCount# 5, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:07:54.482777Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037919 2025-06-24T21:07:54.482795Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:33 data size 928 row count 4 2025-06-24T21:07:54.482821Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037920 maps to shardIdx: 72057594046644480:33 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 4, DataSize 928 2025-06-24T21:07:54.482843Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037920, followerId 0 2025-06-24T21:07:54.482885Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:33 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:07:54.482908Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037920 2025-06-24T21:07:54.482975Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:07:54.483130Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625566727029169:2152]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:07:54.483170Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:07:54.483191Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 15084, MsgBus: 21772 2025-06-24T21:07:39.102274Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625559734952931:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:39.102387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00461a/r3tmp/tmpdEuXtO/pdisk_1.dat 2025-06-24T21:07:39.368648Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:39.368886Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625559734952913:2079] 1750799259101577 != 1750799259101580 TServer::EnableGrpc on GrpcPort 15084, node 1 2025-06-24T21:07:39.448893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:39.448932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:39.448944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:39.449135Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:39.479128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:39.479267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:39.480900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21772 TClient is connected to server localhost:21772 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:39.939320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:39.958405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:40.089313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:40.109238Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:40.240757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:40.316146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:41.807873Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625568324889156:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.807966Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:42.089015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.116407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.145056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.174184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.202265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.270947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.339384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.387004Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625572619857117:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:42.387092Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:42.387374Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625572619857122:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:42.390300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:42.399238Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625572619857124:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:42.457820Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625572619857175:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:43.414308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.102398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625559734952931:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:44.102473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:45.131122Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: ... T21:07:46.986034Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:46.987291Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625590238068549:2079] 1750799266867356 != 1750799266867359 TServer::EnableGrpc on GrpcPort 29420, node 2 2025-06-24T21:07:47.020674Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:47.020765Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:47.026804Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:47.046946Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:47.046967Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:47.046972Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:47.047072Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22970 TClient is connected to server localhost:22970 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:47.386753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:47.408718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:47.491232Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.649992Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:47.711865Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.877812Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:50.078863Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625607417939369:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.078963Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.126207Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.156235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.185183Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.214642Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.246480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.322769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.356029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.414307Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625607417940029:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.414391Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.414570Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625607417940034:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.418177Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:50.429612Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625607417940036:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:50.528348Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625607417940087:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:51.630791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.869478Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625590238068567:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:51.869555Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:53.571961Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhw9jr4f75s4fjntzxg496w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzliMzQ0MjEtMTRkY2ZhMWUtNDdhNjNhMmMtMjcxYzNlYTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:07:53.572257Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YzliMzQ0MjEtMTRkY2ZhMWUtNDdhNjNhMmMtMjcxYzNlYTc=, ActorId: [2:7519625611712908402:2530], ActorState: ExecuteState, TraceId: 01jyhw9jr4f75s4fjntzxg496w, Create QueryResponse for error on request, msg: >> KqpIndexes::UniqAndNoUniqSecondaryIndex [GOOD] >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover >> KqpIndexMetadata::TestNoReadFromMainTableBeforeJoin [GOOD] >> KqpIndexMetadata::HandleWriteOnlyIndex >> KqpIndexes::SecondaryIndexWithPrimaryKeySameComulns+UseSink >> KqpIndexes::ForbidViewModification [GOOD] >> KqpIndexes::ForbidDirectIndexTableCreation >> KqpIndexes::UpdateDeletePlan-UseSink [GOOD] >> KqpIndexes::UpdateIndexSubsetPk >> TraverseColumnShard::TraverseServerlessColumnTable [GOOD] >> KqpIndexes::SecondaryIndexUpsert2Update [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin+UseStreamJoin >> DataShardSnapshots::MvccSnapshotLockedWritesWithReadConflicts [GOOD] >> DataShardSnapshots::LockedWritesLimitedPerKey+UseSink >> KqpMultishardIndex::SecondaryIndexSelect [GOOD] >> KqpMultishardIndex::SortByPk >> KqpIndexes::UpsertMultipleUniqIndexes [GOOD] >> KqpIndexes::UpsertNoIndexColumns >> KqpIndexes::SecondaryIndexUpsert1DeleteUpdate >> KqpVectorIndexes::OrderByCosineLevel2-Nullable-UseSimilarity >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart-UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseServerlessColumnTable [GOOD] Test command err: 2025-06-24T21:05:00.635789Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:00.636171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:00.636370Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043c1/r3tmp/tmpauDEMb/pdisk_1.dat 2025-06-24T21:05:01.023661Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15565, node 1 2025-06-24T21:05:01.386834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:01.386900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:01.386946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:01.387570Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:01.394728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:01.515204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:01.515408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:01.529418Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10212 2025-06-24T21:05:02.168413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:05.589470Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:05.634721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:05.634851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:05.697185Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:05.699673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:05.932490Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:05.968393Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.968884Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.969325Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.969419Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.969471Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.969634Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.969707Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.969760Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:05.969807Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:06.156307Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:06.156425Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:06.176727Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:06.318369Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:06.367951Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:06.368050Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:06.399247Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:06.400619Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:06.400814Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:06.400867Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:06.400925Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:06.400982Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:06.401038Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:06.401100Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:06.402271Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:06.440904Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:06.441030Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:06.448371Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:06.451957Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:06.452220Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:06.460578Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:05:06.488603Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:06.488679Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:06.488766Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:05:06.503926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:06.510958Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:06.511075Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:06.709555Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:06.889180Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:06.935403Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:07.452265Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:07.482945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:08.077175Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:08.230249Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:05:08.230319Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:05:08.230421Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2496:2900], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:05:08.233319Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2504:2904] 2025-06-24T21:05:08.233649Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2504:2904], schemeshard id = 72075186224037899 2025-06-24T21:05:09.347388Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2621:3193], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:09.347550Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06 ... pts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:52.765004Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:9323:6874]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:52.765408Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:52.765537Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:9325:6876] 2025-06-24T21:07:52.765621Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:9325:6876] 2025-06-24T21:07:52.766244Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:9326:6877] 2025-06-24T21:07:52.766425Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:9326:6877], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:52.766515Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:52.766759Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9325:6876], server id = [2:9326:6877], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:52.766836Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:52.766949Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:9323:6874], StatRequests.size() = 1 2025-06-24T21:07:52.926351Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZDk1YWE4MjMtNmMzYjNiMDctMjU2OTczNDEtZjY5Yzk2ODI=, TxId: 2025-06-24T21:07:52.926449Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZDk1YWE4MjMtNmMzYjNiMDctMjU2OTczNDEtZjY5Yzk2ODI=, TxId: 2025-06-24T21:07:52.927675Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:52.942210Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:52.942287Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:52.996616Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:52.996700Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:53.067927Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:9325:6876], schemeshard count = 1 2025-06-24T21:07:53.397149Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-24T21:07:53.397226Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 234.000000s, at schemeshard: 72075186224037899 2025-06-24T21:07:53.397467Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 28 2025-06-24T21:07:53.412535Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:07:55.778923Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:07:55.778987Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:07:55.779025Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-24T21:07:55.779065Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:07:55.782393Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:55.801193Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:55.801813Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:55.801919Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:55.802967Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:55.817217Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:55.817500Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:55.818665Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9446:6948], server id = [2:9450:6952], tablet id = 72075186224037905, status = OK 2025-06-24T21:07:55.819086Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9446:6948], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:07:55.820172Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9447:6949], server id = [2:9451:6953], tablet id = 72075186224037906, status = OK 2025-06-24T21:07:55.820255Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9447:6949], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:07:55.820416Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9448:6950], server id = [2:9452:6954], tablet id = 72075186224037907, status = OK 2025-06-24T21:07:55.820469Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9448:6950], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:07:55.821621Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9449:6951], server id = [2:9453:6955], tablet id = 72075186224037908, status = OK 2025-06-24T21:07:55.821680Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9449:6951], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:07:55.827932Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-24T21:07:55.828401Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9446:6948], server id = [2:9450:6952], tablet id = 72075186224037905 2025-06-24T21:07:55.828447Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:55.830244Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-24T21:07:55.830499Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-24T21:07:55.830944Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9447:6949], server id = [2:9451:6953], tablet id = 72075186224037906 2025-06-24T21:07:55.830976Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:55.831042Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9448:6950], server id = [2:9452:6954], tablet id = 72075186224037907 2025-06-24T21:07:55.831064Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:55.831195Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-24T21:07:55.831244Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:55.831477Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:55.831683Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:55.831863Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9449:6951], server id = [2:9453:6955], tablet id = 72075186224037908 2025-06-24T21:07:55.831888Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:55.832054Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:07:55.834872Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:55.873883Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:9482:6980]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:55.874258Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:55.874308Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:9482:6980], StatRequests.size() = 1 2025-06-24T21:07:55.995916Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OGRhMGZmYTEtMzM5ZWE2MWYtNWVjM2NmM2YtMTRjZGMxYzE=, TxId: 2025-06-24T21:07:55.995992Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OGRhMGZmYTEtMzM5ZWE2MWYtNWVjM2NmM2YtMTRjZGMxYzE=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-24T21:07:55.996555Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:9495:6986]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:55.996920Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:55.997434Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:55.997519Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:07:56.001457Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:07:56.001542Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-24T21:07:56.001606Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:07:56.006682Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> test_sql_streaming.py::test[suites-ReadWriteSameTopic-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadWriteTopic-default.txt] >> test_sql_streaming.py::test[suites-GroupByHopWithDataWatermarks-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHoppingWithDataWatermarks-default.txt] >> test_sql_streaming.py::test[suites-GroupByHopByStringKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopExprKey-default.txt] >> KqpVectorIndexes::BuildIndexTimesAndUser [GOOD] >> KqpIndexes::Uint8Index [GOOD] >> KqpIndexes::SelectConcurentTX2 [GOOD] >> KqpIndexes::SecondaryIndexWithPrimaryKeySameComulns-UseSink >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-true [GOOD] >> KqpIndexes::UpsertWithNullKeysSimple [GOOD] >> KqpIndexes::UpsertWithNullKeysComplex >> KqpIndexes::JoinWithNonPKColumnsInPredicate+UseStreamJoin [GOOD] >> KqpIndexes::WriteWithParamsFieldOrder >> KqpIndexes::JoinWithNonPKColumnsInPredicate-UseStreamJoin >> DataShardSnapshots::VolatileSnapshotTimeoutRefresh [GOOD] >> DataShardSnapshots::VolatileSnapshotCleanupOnReboot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:06:28.173028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:06:28.173122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:28.173163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:06:28.173200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:06:28.173983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:06:28.174046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:06:28.174115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:06:28.174194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:06:28.174973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:06:28.175969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:06:28.260842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:06:28.260895Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:28.274163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:06:28.274449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:06:28.274566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:06:28.280281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:06:28.280434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:06:28.280916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.281167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:06:28.283746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:28.283976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:06:28.285125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:28.285200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:06:28.285455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:06:28.285548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:06:28.285606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:06:28.285729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.290702Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:06:28.372643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:06:28.373685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.374603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:06:28.374646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:06:28.375794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:06:28.375873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:06:28.378180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.378320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:06:28.378461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.378554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:06:28.378586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:06:28.378610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:06:28.380030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.380068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:06:28.380099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:06:28.381266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.381300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:06:28.381330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.381371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:06:28.384471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:06:28.385737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:06:28.385874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:06:28.387422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:06:28.387517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:06:28.387553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.388713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:06:28.388753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:06:28.388937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:06:28.389010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:06:28.390602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:06:28.390640Z node 1 :FLAT_TX_SCHEMESHARD ... mplete, at schemeshard: 72057594046678944 2025-06-24T21:07:59.178149Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:07:59.178193Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:07:59.178228Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:07:59.181539Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [3:2656:4397] sender: [3:2718:2058] recipient: [3:15:2062] 2025-06-24T21:07:59.230041Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/by_embedding" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:07:59.230448Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/by_embedding" took 445us result status StatusSuccess 2025-06-24T21:07:59.231982Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/by_embedding" PathDescription { Self { Name: "by_embedding" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPrefixTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "by_embedding" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "prefix" KeyColumnNames: "embedding" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "covered" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::Uint8Index [GOOD] Test command err: Trying to start YDB, gRPC: 21041, MsgBus: 11368 2025-06-24T21:07:37.746965Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625553733841860:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:37.747095Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004623/r3tmp/tmp4fa0vU/pdisk_1.dat 2025-06-24T21:07:37.981137Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:37.981351Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625553733841841:2079] 1750799257745965 != 1750799257745968 TServer::EnableGrpc on GrpcPort 21041, node 1 2025-06-24T21:07:38.051293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:38.051327Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:38.051336Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:38.051494Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:38.096014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:38.096269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:38.098024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11368 TClient is connected to server localhost:11368 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:38.523177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:38.547022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:38.664158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:38.759021Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:38.808214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:38.884246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:40.634508Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625566618745378:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:40.634594Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:40.931541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:40.959535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:40.985989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.010007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.036871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.105890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.173816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.252569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625570913713343:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.252674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.252708Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625570913713348:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.256185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:41.265354Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625570913713350:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:41.347428Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625570913713401:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:42.304176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.551326Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2020: ActorId: [1:7519625575208681150:2472] TxId: 281474976715673. Ctx: { TraceId: 01jyhw98tn1ffn3q36yns4d6e2, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTRjYjgzY2MtY2VjOWU1M2YtMWU2MWZmYjktOTJmZmQ4NDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Read operation can be performed on async index table: [72057594046644480:19:1] only with StaleRO isolation level 2025-06-24T21:07:42.561845Z node 1 :KQP_S ... 1 != 1750799272660584 2025-06-24T21:07:52.835352Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:52.835448Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:52.837506Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24357, node 3 2025-06-24T21:07:52.881889Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:52.881914Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:52.881922Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:52.882040Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12168 TClient is connected to server localhost:12168 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:53.485052Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:53.501019Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:53.576937Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:53.690740Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:53.749890Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:53.824712Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:56.436863Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625635543471140:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.436972Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.504825Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.538321Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.574490Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.643462Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.698164Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.775381Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.848615Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.956159Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625635543471811:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.956268Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.956582Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625635543471816:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.961716Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:56.983196Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625635543471818:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:57.063705Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625639838439165:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:57.663265Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625618363600352:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:57.663343Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:58.293392Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.494891Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.535864Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:07:58.593201Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::BuildIndexTimesAndUser [GOOD] Test command err: Trying to start YDB, gRPC: 17264, MsgBus: 15639 2025-06-24T21:07:41.914076Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625571294268514:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:41.914176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004607/r3tmp/tmpPLoA8X/pdisk_1.dat 2025-06-24T21:07:42.208201Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:42.208460Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625571294268496:2079] 1750799261913239 != 1750799261913242 TServer::EnableGrpc on GrpcPort 17264, node 1 2025-06-24T21:07:42.275546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:42.275591Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:42.275629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:42.275796Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:42.301686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:42.301845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:42.303939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15639 TClient is connected to server localhost:15639 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:42.841687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:42.865691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:42.922329Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:42.997163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:43.127096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:43.182234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:45.037522Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625588474139326:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.037637Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.361908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.390709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.420618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.457093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.495935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.543561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.612610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.673045Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625588474139988:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.673100Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625588474139993:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.673146Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.676750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:45.686481Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625588474139995:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:45.741406Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625588474140046:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:46.734719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:46.915479Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625571294268514:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:46.915547Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:52.037452Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: ... cription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:53.677839Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:53.694835Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:53.780063Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.959085Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:53.982651Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:54.032937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:56.264468Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625634203430805:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.264577Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.341188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.415630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.452650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.487046Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.516551Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.560797Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.599585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.667592Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625634203431465:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.667693Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.668109Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625634203431470:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.671908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:56.684500Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625634203431472:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:56.747603Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625634203431523:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:57.966304Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625617023560011:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:57.966376Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:58.030699Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.303685Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.356871Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.446517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.498171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710764:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:07:58.561250Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037929 not found 2025-06-24T21:07:58.561287Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037927 not found 2025-06-24T21:07:58.564274Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037928 not found >> KqpUniqueIndex::InsertNullInPk [GOOD] >> KqpUniqueIndex::InsertNullInFk >> AnalyzeColumnshard::AnalyzeRebootSaBeforeReqDistribution [GOOD] >> AnalyzeColumnshard::AnalyzeServerless [GOOD] >> KqpIndexes::MultipleSecondaryIndex+UseSink [GOOD] >> KqpIndexes::MultipleSecondaryIndex-UseSink >> KqpIndexes::DuplicateUpsertInterleave [GOOD] >> KqpIndexes::ForbidDirectIndexTableCreation [GOOD] >> KqpIndexes::IndexFilterPushDown >> KqpIndexes::ExplainCollectFullDiagnostics ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeReqDistribution [GOOD] Test command err: 2025-06-24T21:05:26.858475Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:26.858689Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:26.858809Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004329/r3tmp/tmpIhW88L/pdisk_1.dat 2025-06-24T21:05:27.140255Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26737, node 1 2025-06-24T21:05:27.338646Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:27.338706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:27.338746Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:27.338948Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:27.346901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:27.463888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:27.464062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:27.477676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19671 2025-06-24T21:05:28.015414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:31.004234Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:31.043007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:31.043134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:31.101974Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:31.104193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:31.279431Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:31.315050Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.315728Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.316395Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.316570Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.316690Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.316956Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.317060Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.317196Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.317300Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:31.494213Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:31.494316Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:31.507040Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:31.637561Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:31.677601Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:31.677689Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:31.703010Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:31.704206Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:31.704404Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:31.704452Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:31.704501Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:31.704546Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:31.704596Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:31.704643Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:31.705463Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:31.737109Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:31.737211Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:31.742559Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:31.745172Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:31.745393Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:31.751556Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:31.774735Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:31.774809Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:31.774883Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:31.791335Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:31.798394Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:31.798533Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:31.967281Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:32.121213Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:32.199056Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:32.663935Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:32.968197Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:32.968298Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:32.982745Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:33.075419Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:33.075605Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:33.075833Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:33.075916Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:33.075988Z node 2 :TX_COLUMNSHARD WARN: ... 37894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:07:56.918452Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-24T21:07:56.918550Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 1 2025-06-24T21:07:56.918671Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 1 2025-06-24T21:07:56.918751Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:07:56.918949Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:56.920116Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:07:56.920960Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:56.921045Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:56.921204Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:07:56.922875Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:56.922960Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:56.924783Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:07:56.995898Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:07:56.996123Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:07:56.996759Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7492:5468], server id = [2:7493:5469], tablet id = 72075186224037899, status = OK 2025-06-24T21:07:56.996883Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7492:5468], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:07:57.000648Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:07:57.000777Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:07:57.001033Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:07:57.001249Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:07:57.001553Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:07:57.004808Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7492:5468], server id = [2:7493:5469], tablet id = 72075186224037899 2025-06-24T21:07:57.004866Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:07:57.005465Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:07:57.044766Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7513:5488]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:57.044986Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:07:57.045037Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7513:5488], StatRequests.size() = 1 2025-06-24T21:07:57.166539Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NzczYjkwMmUtN2Y5ZDUxN2QtNGI2NTAzZjgtMTU3ODMwMjY=, TxId: 2025-06-24T21:07:57.166638Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NzczYjkwMmUtN2Y5ZDUxN2QtNGI2NTAzZjgtMTU3ODMwMjY=, TxId: 2025-06-24T21:07:57.167423Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:57.181165Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7528:5494] 2025-06-24T21:07:57.181457Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7528:5494], schemeshard id = 72075186224037897 2025-06-24T21:07:57.181585Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7445:5437], server id = [2:7529:5495], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:57.181631Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7529:5495] 2025-06-24T21:07:57.181730Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7529:5495], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-24T21:07:57.196853Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:07:57.196926Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:57.299164Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7533:5498] 2025-06-24T21:07:57.299959Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3044:3297] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-24T21:07:57.300023Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3044:3297] 2025-06-24T21:07:57.300121Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-24T21:07:57.877719Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-24T21:07:57.877841Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:07:58.701790Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:58.701903Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:07:58.701954Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:08:00.079236Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:08:00.079396Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:08:00.079448Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:08:00.080145Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:08:00.095423Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:08:00.095854Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:08:00.095941Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:08:00.096422Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:08:00.113027Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:08:00.113226Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:08:00.113867Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7612:5541], server id = [2:7613:5542], tablet id = 72075186224037899, status = OK 2025-06-24T21:08:00.113989Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7612:5541], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:08:00.115677Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:08:00.115791Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:08:00.116146Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7612:5541], server id = [2:7613:5542], tablet id = 72075186224037899 2025-06-24T21:08:00.116187Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:08:00.116277Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:08:00.116461Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:08:00.116770Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:08:00.120679Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:08:00.146920Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NzRjYzI0Y2ItNzFjMWJjMTctZGE5YWNlZmYtY2M1MDZlNWE=, TxId: 2025-06-24T21:08:00.147000Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NzRjYzI0Y2ItNzFjMWJjMTctZGE5YWNlZmYtY2M1MDZlNWE=, TxId: 2025-06-24T21:08:00.147636Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:08:00.173002Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:08:00.173088Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3044:3297] |93.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpMultishardIndex::DataColumnWriteNull [GOOD] >> KqpMultishardIndex::DataColumnWrite-UseSink >> KqpPrefixedVectorIndexes::PrefixedVectorIndexOrderByCosineDistanceWithCover+Nullable [GOOD] >> KqpPrefixedVectorIndexes::PrefixedVectorIndexOrderByCosineDistanceWithCover-Nullable >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex+UseSink [GOOD] >> KqpIndexes::DirectAccessToIndexImplTable |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1+Nullable-UseSimilarity >> KqpUniqueIndex::InsertFkDuplicate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeServerless [GOOD] Test command err: 2025-06-24T21:05:09.502637Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:09.502845Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:09.502942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004382/r3tmp/tmpwHu82d/pdisk_1.dat 2025-06-24T21:05:09.840272Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17134, node 1 2025-06-24T21:05:10.069265Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:10.069343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:10.069400Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:10.069652Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:10.072461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:10.186095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:10.186248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:10.199823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29348 2025-06-24T21:05:10.755782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:14.088069Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:14.123065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:14.123192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:14.183458Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:14.185902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:14.362477Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:14.397572Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.398278Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.398812Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.398977Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.399082Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.399402Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.399528Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.399642Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.399737Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:14.584680Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:14.584782Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:14.598050Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:14.750404Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:14.800835Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:14.800963Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:14.836071Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:14.837736Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:14.838005Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:14.838083Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:14.838146Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:14.838203Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:14.838272Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:14.838336Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:14.839584Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:14.877692Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:14.877841Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:14.885056Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:14.888644Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:14.888992Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:14.897412Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:05:14.925660Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:14.925735Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:14.925808Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:05:14.942251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:14.950420Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:14.950575Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:15.136171Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:15.311411Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:15.357724Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:15.902653Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:15.935222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:16.505310Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:16.666026Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:05:16.666151Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:05:16.666250Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2495:2901], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:05:16.667703Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2496:2902] 2025-06-24T21:05:16.668532Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2496:2902], schemeshard id = 72075186224037899 2025-06-24T21:05:17.733040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2617:3190], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:17.733225Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06 ... 6-24T21:07:56.901854Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:8243:6071], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.901976Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:8253:6076], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.902119Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.918375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:57.046502Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:8257:6079], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:07:57.330585Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8355:6127] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:57.415988Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8384:6142]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:07:57.416276Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:07:57.416383Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8386:6144] 2025-06-24T21:07:57.416471Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8386:6144] 2025-06-24T21:07:57.417020Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8386:6144], server id = [2:8387:6145], tablet id = 72075186224037894, status = OK 2025-06-24T21:07:57.417092Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8387:6145] 2025-06-24T21:07:57.417209Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8387:6145], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:07:57.417292Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:07:57.417469Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:07:57.417585Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8384:6142], StatRequests.size() = 1 2025-06-24T21:07:57.586319Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MjJmZWJmYmMtNGZlNTc4ZTMtM2ZlMGU2MzAtM2FhMjcwYTY=, TxId: 2025-06-24T21:07:57.586425Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MjJmZWJmYmMtNGZlNTc4ZTMtM2ZlMGU2MzAtM2FhMjcwYTY=, TxId: 2025-06-24T21:07:57.587529Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:07:57.606107Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:07:57.606208Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:07:57.651400Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:07:57.651527Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:07:57.746530Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8386:6144], schemeshard count = 1 2025-06-24T21:07:58.088048Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-24T21:07:58.088121Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 200.000000s, at schemeshard: 72075186224037899 2025-06-24T21:07:58.088348Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 28 2025-06-24T21:07:58.108888Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:07:59.133263Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:07:59.133366Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-24T21:07:59.137253Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:07:59.154287Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:07:59.154843Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:07:59.154901Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037899, LocalPathId: 2], AnalyzedShards 1 2025-06-24T21:07:59.170065Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:07:59.181316Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-24T21:07:59.182615Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-24T21:07:59.182724Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-24T21:07:59.196775Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-24T21:08:00.709929Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:08:00.710060Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-24T21:08:00.710124Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:08:00.710863Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:08:00.725020Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:08:00.725386Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:08:00.725450Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:08:00.726511Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:08:00.744801Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:08:00.745058Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:08:00.745761Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8513:6226], server id = [2:8514:6227], tablet id = 72075186224037905, status = OK 2025-06-24T21:08:00.745892Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8513:6226], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:08:00.749968Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-24T21:08:00.750083Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:08:00.750291Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:08:00.750491Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:08:00.750903Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:08:00.753180Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8513:6226], server id = [2:8514:6227], tablet id = 72075186224037905 2025-06-24T21:08:00.753232Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:08:00.753950Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:08:00.792897Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8534:6246]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:08:00.793114Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:08:00.793160Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8534:6246], StatRequests.size() = 1 2025-06-24T21:08:00.922356Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NmQ4ODA4YTAtODBmYjE1Y2ItMjZmYjc2NjctZjhhNDVjNDg=, TxId: 2025-06-24T21:08:00.922418Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NmQ4ODA4YTAtODBmYjE1Y2ItMjZmYjc2NjctZjhhNDVjNDg=, TxId: 2025-06-24T21:08:00.922904Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:08:00.941068Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:08:00.941142Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3544:3463] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::DuplicateUpsertInterleave [GOOD] Test command err: Trying to start YDB, gRPC: 64447, MsgBus: 27829 2025-06-24T21:07:41.789531Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625570628816119:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:41.789678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00460b/r3tmp/tmpLacoYU/pdisk_1.dat 2025-06-24T21:07:42.130936Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:42.131360Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625570628816099:2079] 1750799261788570 != 1750799261788573 TServer::EnableGrpc on GrpcPort 64447, node 1 2025-06-24T21:07:42.183752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:42.183784Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:42.183793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:42.183933Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:42.185882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:42.185995Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:42.187665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27829 TClient is connected to server localhost:27829 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:42.697890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:42.717935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:42.797198Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:42.848475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:42.991603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:43.053199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.703852Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625583513719643:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:44.704013Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.043616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.076698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.108317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.142705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.182661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.255738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.289987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.385530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625587808687607:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.385611Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.385991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625587808687612:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.390816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:45.404072Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625587808687614:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:45.508840Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625587808687665:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:46.627407Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625592103655195:3599], Recipient [1:7519625574923783747:2160]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:46.627464Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:46.627478Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:46.627511Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625592103655191:3596], Recipient [1:7519625574923783747:2160]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-24T21:07:46.627530Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvS ... 74976715672 Step: 0 Generation: 1 2025-06-24T21:08:00.938230Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 281474976715672:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-24T21:08:00.938264Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 281474976715672:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7519625652976519491 RawX2: 4503612512274861 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-06-24T21:08:00.938281Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715672:2, shardIdx: 72057594046644480:36, shard: 72075186224037923, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-24T21:08:00.938288Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:00.938294Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976715672:2, datashard: 72075186224037923, at schemeshard: 72057594046644480 2025-06-24T21:08:00.938302Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715672:2 129 -> 240 2025-06-24T21:08:00.938345Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:00.938530Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:00.938537Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:00.938546Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:0 2025-06-24T21:08:00.938575Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625652976519490:2476] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:00.938651Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:00.938659Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:00.938666Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:2 2025-06-24T21:08:00.938693Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625652976519491:2477] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:00.938759Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625631501680670:2145], Recipient [3:7519625631501680670:2145]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:00.938775Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:00.938807Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:00.938826Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-06-24T21:08:00.938894Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:00.938907Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-24T21:08:00.938918Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-24T21:08:00.938932Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-24T21:08:00.938940Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-24T21:08:00.938949Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-06-24T21:08:00.939054Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625631501680670:2145], Recipient [3:7519625631501680670:2145]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:00.939063Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:00.939078Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:00.939086Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-24T21:08:00.939114Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:00.939120Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-24T21:08:00.939125Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:00.939135Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-24T21:08:00.939140Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:00.939164Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-24T21:08:00.939189Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519625652976519458:2473] message: TxId: 281474976715672 2025-06-24T21:08:00.939201Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:00.939219Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-24T21:08:00.939227Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:0 2025-06-24T21:08:00.939328Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-24T21:08:00.939344Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-24T21:08:00.939351Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:1 2025-06-24T21:08:00.939366Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-24T21:08:00.939373Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-24T21:08:00.939379Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:2 2025-06-24T21:08:00.939407Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-24T21:08:00.939617Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:00.939687Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:00.939729Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625652976519458:2473] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:00.940535Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625652976519474:3604], Recipient [3:7519625631501680670:2145]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:00.940555Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:00.940565Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:00.940942Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625652976519564:3668], Recipient [3:7519625631501680670:2145]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:00.940956Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:00.940964Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:00.940992Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625652976519565:3669], Recipient [3:7519625631501680670:2145]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:00.941003Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:00.941009Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:01.809712Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625631501680670:2145]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:01.809765Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:01.809841Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625631501680670:2145], Recipient [3:7519625631501680670:2145]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:01.809861Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead-UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead+UseSink >> DataShardSnapshots::RepeatableReadAfterSplitRace [GOOD] >> DataShardSnapshots::PostMergeNotCompactedTooEarly |93.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpIndexes::UpdateIndexSubsetPk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertFkDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 10757, MsgBus: 24143 2025-06-24T21:07:41.181318Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625571930052475:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:41.181409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00460e/r3tmp/tmpt3OSNk/pdisk_1.dat 2025-06-24T21:07:41.439349Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625571930052456:2079] 1750799261180406 != 1750799261180409 2025-06-24T21:07:41.445564Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10757, node 1 2025-06-24T21:07:41.501219Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:41.501256Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:41.501267Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:41.501430Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:41.535622Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:41.535718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:41.537787Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24143 TClient is connected to server localhost:24143 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:41.983534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:42.009814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:42.132250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:42.251037Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:42.296132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:42.363295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.031678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625584814955995:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:44.031792Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:44.352195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.379860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.403279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.429345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.457311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.524082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.590358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.644576Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625584814956658:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:44.644677Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:44.645002Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625584814956663:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:44.650064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:44.660322Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625584814956665:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:44.750872Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625584814956716:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:45.761200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:46.182074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625571930052475:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:46.182156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:47.259550Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1: ... distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:55.745907Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:55.745918Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:55.746054Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24107 TClient is connected to server localhost:24107 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:07:56.284620Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:56.291975Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:07:56.298889Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:56.396641Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.559872Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:56.572943Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:56.646520Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.048500Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625647801056361:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.048599Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.113142Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.145416Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.182767Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.214700Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.245591Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.312049Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.386297Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.470904Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625647801057025:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.471027Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.471513Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625647801057030:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.474980Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:59.485697Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625647801057032:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:59.544647Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625647801057083:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:00.529761Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625630621185560:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:00.529868Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:00.640319Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:02.350342Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7519625660685960227:2575], TxId: 281474976715677, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=OGVjZWVjODgtYWRmMzU3YjktMWZjNDY0OGYtN2RiNDY2ZDE=. TraceId : 01jyhw9vpr30vjs6pevpgw6nxj. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Duplicated keys found., code: 2012 }. 2025-06-24T21:08:02.351344Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519625660685960228:2576], TxId: 281474976715677, task: 2. Ctx: { SessionId : ydb://session/3?node_id=3&id=OGVjZWVjODgtYWRmMzU3YjktMWZjNDY0OGYtN2RiNDY2ZDE=. CustomerSuppliedId : . TraceId : 01jyhw9vpr30vjs6pevpgw6nxj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7519625660685960224:2531], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-24T21:08:02.351740Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=OGVjZWVjODgtYWRmMzU3YjktMWZjNDY0OGYtN2RiNDY2ZDE=, ActorId: [3:7519625652096025409:2531], ActorState: ExecuteState, TraceId: 01jyhw9vpr30vjs6pevpgw6nxj, Create QueryResponse for error on request, msg: |93.8%| [TA] $(B)/ydb/tests/datashard/secondary_index/test-results/py3test/{meta.json ... results_accumulator.log} >> KqpUniqueIndex::UpsertExplicitNullInComplexFk [GOOD] >> KqpUniqueIndex::UpdateOnNullInComplexFk |93.8%| [TA] {RESULT} $(B)/ydb/tests/datashard/secondary_index/test-results/py3test/{meta.json ... results_accumulator.log} >> KqpIndexes::SecondaryIndexUsingInJoin+UseStreamJoin [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin-UseStreamJoin >> DataShardSnapshots::LockedWritesLimitedPerKey+UseSink [GOOD] >> DataShardSnapshots::LockedWritesLimitedPerKey-UseSink >> KqpMultishardIndex::SortByPk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpdateIndexSubsetPk [GOOD] Test command err: Trying to start YDB, gRPC: 9075, MsgBus: 1914 2025-06-24T21:07:43.971918Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625576691440342:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:43.972092Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045f9/r3tmp/tmpQ0CCQL/pdisk_1.dat 2025-06-24T21:07:44.273735Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:44.276150Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625576691440319:2079] 1750799263969818 != 1750799263969821 TServer::EnableGrpc on GrpcPort 9075, node 1 2025-06-24T21:07:44.357660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:44.357832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:44.359511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:44.375850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:44.375875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:44.375890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:44.376023Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1914 TClient is connected to server localhost:1914 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:44.918423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:44.946952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:45.000816Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:45.086843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:45.239161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:45.309380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:47.175836Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625593871311135:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.175962Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.439273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.473630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.510301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.545882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.575940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.614194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.666196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:47.715108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625593871311794:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.715237Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.715255Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625593871311799:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:47.718413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:47.728372Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625593871311801:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:47.791695Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625593871311852:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:48.919749Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625598166279437:3600], Recipient [1:7519625580986407949:2148]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:48.919790Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:48.919806Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:48.919871Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625598166279433:3597], Recipient [1:7519625580986407949:2148]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-24T21:07:48.919887Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvScheme ... emeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:02.518155Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:02.518166Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:02.518225Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:02.518233Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:02.518283Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:02.518293Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:02.518304Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:2 2025-06-24T21:08:02.518358Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625661636981476:2482] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:02.518437Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625640162142644:2144], Recipient [3:7519625640162142644:2144]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:02.518457Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:02.518493Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:02.518511Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-24T21:08:02.518602Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:02.518618Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:2 progress is 2/3 2025-06-24T21:08:02.518630Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-24T21:08:02.518645Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:2 progress is 2/3 2025-06-24T21:08:02.518658Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-24T21:08:02.518674Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-06-24T21:08:02.518975Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:02.518985Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:02.518995Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:0 2025-06-24T21:08:02.519036Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625661636981464:2481] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:02.519115Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625640162142644:2144], Recipient [3:7519625640162142644:2144]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:02.519130Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:02.519177Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:02.519192Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-06-24T21:08:02.519249Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:02.519260Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:0 progress is 3/3 2025-06-24T21:08:02.519269Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:02.519288Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:0 progress is 3/3 2025-06-24T21:08:02.519296Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:02.519309Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-24T21:08:02.519351Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519625661636981442:2479] message: TxId: 281474976715672 2025-06-24T21:08:02.519371Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:02.519394Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-24T21:08:02.519403Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:0 2025-06-24T21:08:02.519517Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-24T21:08:02.519532Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-24T21:08:02.519539Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:1 2025-06-24T21:08:02.519556Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-24T21:08:02.519563Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-24T21:08:02.519569Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:2 2025-06-24T21:08:02.519599Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-24T21:08:02.519938Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:02.520104Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:02.520160Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625661636981442:2479] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:02.521534Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625661636981453:3606], Recipient [3:7519625640162142644:2144]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.521570Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.521582Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:02.521938Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625661636981542:3669], Recipient [3:7519625640162142644:2144]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.521963Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.521973Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:02.522011Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625661636981541:3668], Recipient [3:7519625640162142644:2144]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.522020Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.522027Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:03.095324Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625640162142644:2144]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:03.095380Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:03.095438Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625640162142644:2144], Recipient [3:7519625640162142644:2144]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:03.095457Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:04.096766Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625640162142644:2144]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:04.096809Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:04.096869Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625640162142644:2144], Recipient [3:7519625640162142644:2144]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:04.096897Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpIndexes::UpsertNoIndexColumns [GOOD] >> KqpIndexes::UpdateOnReadColumns >> KqpIndexes::WriteWithParamsFieldOrder [GOOD] >> KqpIndexes::UpsertWithoutExtraNullDelete-UseSink >> KqpIndexes::SecondaryIndexUpsert1DeleteUpdate [GOOD] >> KqpIndexes::SecondaryIndexUpdateOnUsingIndex >> KqpIndexMetadata::HandleWriteOnlyIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::SortByPk [GOOD] Test command err: Trying to start YDB, gRPC: 7381, MsgBus: 28380 2025-06-24T21:07:40.220092Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625565239632501:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:40.220186Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004619/r3tmp/tmp1VXjv4/pdisk_1.dat 2025-06-24T21:07:40.476648Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625565239632482:2079] 1750799260219101 != 1750799260219104 2025-06-24T21:07:40.476805Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:40.501395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:40.501502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:40.505820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7381, node 1 2025-06-24T21:07:40.564159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:40.564189Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:40.564200Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:40.564348Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28380 TClient is connected to server localhost:28380 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:41.070969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:41.093539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:41.214320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:41.228158Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:41.338947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:41.398561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:42.921125Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625573829568726:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:42.921264Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:43.204035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:43.234022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:43.262452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:43.288595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:43.322872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:43.399125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:43.468254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:43.530293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625578124536686:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:43.530394Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:43.530522Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625578124536691:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:43.534490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:43.544595Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625578124536693:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:43.616311Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625578124536744:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:44.577931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:45.223229Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625565239632501:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:45.276152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 9150, MsgBus: 4179 2025-06-24T21:07:47.877728Z node 2 :METADATA_PROVID ... on=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 15091, MsgBus: 28978 2025-06-24T21:07:57.791007Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519625637196486578:2168];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004619/r3tmp/tmppqsMjk/pdisk_1.dat 2025-06-24T21:07:57.857327Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:07:57.904696Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:57.906199Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519625637196486420:2079] 1750799277759268 != 1750799277759271 2025-06-24T21:07:57.935915Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:57.936004Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 15091, node 3 2025-06-24T21:07:57.938802Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:57.987890Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:57.987914Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:57.987923Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:57.988073Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28978 TClient is connected to server localhost:28978 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:58.524045Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:58.537630Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:58.629289Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:58.810182Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:58.825748Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:58.903980Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:01.303349Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625654376357219:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:01.303464Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:01.387187Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:01.422153Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:01.470789Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:01.516346Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:01.557044Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:01.631477Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:01.676021Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:01.749251Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625654376357879:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:01.749363Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:01.750296Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625654376357884:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:01.754793Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:01.773492Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625654376357886:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:01.876041Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625654376357937:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:02.795370Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625637196486578:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:02.795451Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:03.146696Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart+UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart+UseSink >> DataShardSnapshots::VolatileSnapshotCleanupOnReboot [GOOD] >> DataShardSnapshots::VolatileSnapshotCleanupOnFinish >> KqpIndexes::SecondaryIndexWithPrimaryKeySameComulns+UseSink [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin2+UseStreamJoin >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover [GOOD] >> KqpVectorIndexes::CoveredVectorIndexWithFollowers+StaleRO [GOOD] >> KqpVectorIndexes::CoveredVectorIndexWithFollowers-StaleRO |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> KqpIndexes::UpsertWithNullKeysComplex [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> StreamCreator::Basic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexMetadata::HandleWriteOnlyIndex [GOOD] Test command err: Trying to start YDB, gRPC: 6198, MsgBus: 22240 2025-06-24T21:07:40.801537Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625566501854830:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:40.801605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004618/r3tmp/tmpG2DInx/pdisk_1.dat 2025-06-24T21:07:41.068533Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:41.068815Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625566501854807:2079] 1750799260800015 != 1750799260800018 TServer::EnableGrpc on GrpcPort 6198, node 1 2025-06-24T21:07:41.126890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:41.126915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:41.126928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:41.127164Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:41.136728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:41.136853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:41.138511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22240 TClient is connected to server localhost:22240 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:41.649277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:41.680550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:41.762954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:41.865834Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:41.905782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:41.969074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:43.668569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625579386758340:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:43.668688Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:44.036622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.064772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.092660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.122670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.155784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.190211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.223862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:44.276903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625583681726295:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:44.276970Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625583681726300:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:44.276980Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:44.280599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:44.294760Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625583681726302:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:44.395778Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625583681726353:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:45.378347Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625587976693923:3598], Recipient [1:7519625566501855122:2139]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:45.378388Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:45.378402Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:45.378425Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625587976693919:3595], Recipient [1:7519625566501855122:2139]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-24T21:07:45.378438Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSch ... et# 72057594046644480 2025-06-24T21:08:02.462285Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:02.462335Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625659057335478:2473] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:02.466803Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625659057335489:3607], Recipient [3:7519625633287529408:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.466842Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.466854Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:02.467876Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625659057335584:3673], Recipient [3:7519625633287529408:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.467905Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.467915Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:02.467943Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625659057335585:3674], Recipient [3:7519625633287529408:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.467954Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:02.467959Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:02.775878Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625633287529408:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:02.775934Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:02.775990Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625633287529408:2147], Recipient [3:7519625633287529408:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:02.776010Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:03.777091Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625633287529408:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:03.777140Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:03.777194Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625633287529408:2147], Recipient [3:7519625633287529408:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:03.777214Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:04.777533Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625633287529408:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:04.777586Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:04.777646Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625633287529408:2147], Recipient [3:7519625633287529408:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:04.777666Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:05.777721Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625633287529408:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:05.777768Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:05.777841Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625633287529408:2147], Recipient [3:7519625633287529408:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:05.777862Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:06.778093Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625633287529408:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:06.778136Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:06.778195Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625633287529408:2147], Recipient [3:7519625633287529408:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:06.778216Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:07.455871Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [3:7519625680532172197:3716], Recipient [3:7519625633287529408:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:07.455924Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:07.455938Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:08:07.456268Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625637582496993:2274], Recipient [3:7519625633287529408:2147]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 0 TableStats { DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750799282336 LastUpdateTime: 1750799282336 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 3 StartTime: 1750799277411 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:08:07.456293Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:08:07.456345Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] state 'Ready' dataSize 800 rowCount 3 cpuUsage 0 2025-06-24T21:08:07.456466Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] raw table stats: DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750799282336 LastUpdateTime: 1750799282336 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:08:07.456506Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099991s, queue# 1 2025-06-24T21:08:07.456650Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [3:7519625680532172199:3717], Recipient [3:7519625633287529408:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:07.456664Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:07.456675Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:08:07.456831Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625637582496994:2275], Recipient [3:7519625633287529408:2147]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037889 TableLocalId: 2 Generation: 1 Round: 0 TableStats { DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750799282338 LastUpdateTime: 1750799282338 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037889 NodeId: 3 StartTime: 1750799277411 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:08:07.456843Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:08:07.456863Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037889 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] state 'Ready' dataSize 800 rowCount 3 cpuUsage 0 2025-06-24T21:08:07.456942Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037889 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] raw table stats: DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750799282338 LastUpdateTime: 1750799282338 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 >> KqpIndexes::ExplainCollectFullDiagnostics [GOOD] >> KqpIndexes::DuplicateUpsertInterleaveParams+UseSink >> KqpUniqueIndex::InsertNullInFk [GOOD] >> KqpIndexes::MultipleSecondaryIndex-UseSink [GOOD] >> KqpIndexes::MultipleModifications ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover [GOOD] Test command err: Trying to start YDB, gRPC: 17711, MsgBus: 2875 2025-06-24T21:07:34.697038Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625541561059434:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:34.697123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004628/r3tmp/tmpSXi68j/pdisk_1.dat 2025-06-24T21:07:34.915866Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625541561059416:2079] 1750799254696440 != 1750799254696443 2025-06-24T21:07:34.920856Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17711, node 1 2025-06-24T21:07:34.972818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:34.972846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:34.972853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:34.972995Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:35.037916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:35.038115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:35.040062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2875 TClient is connected to server localhost:2875 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:35.397205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:35.434108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:35.559515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:35.678161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:35.702803Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:35.735561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:36.939208Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625550150995660:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:36.939327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:37.174998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:37.197458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:37.220085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:37.241099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:37.263163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:37.291339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:37.322309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:37.369489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625554445963616:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:37.369555Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:37.369620Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625554445963621:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:37.372602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:37.381239Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625554445963623:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:37.444153Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625554445963674:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:38.250706Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625558740931245:3596], Recipient [1:7519625541561059741:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:38.250743Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:38.250769Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:38.250814Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625558740931241:3593], Recipient [1:7519625541561059741:2146]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-24T21:07:38.250831Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSche ... __table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-06-24T21:08:08.013433Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:08.013470Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037904 2025-06-24T21:08:08.013494Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:21 data size 0 row count 0 2025-06-24T21:08:08.013521Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037908 maps to shardIdx: 72057594046644480:21 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:08.013529Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037908, followerId 0 2025-06-24T21:08:08.013556Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:21 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:08.013567Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037908 2025-06-24T21:08:08.013583Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-06-24T21:08:08.013607Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:08.013616Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-06-24T21:08:08.013640Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:08.013653Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037905 2025-06-24T21:08:08.013670Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-24T21:08:08.013715Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:08.013726Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-24T21:08:08.013751Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:08.013766Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-24T21:08:08.013786Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:19 data size 0 row count 0 2025-06-24T21:08:08.013810Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:08.013817Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-06-24T21:08:08.013852Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:08.013862Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037906 2025-06-24T21:08:08.013878Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:20 data size 0 row count 0 2025-06-24T21:08:08.013900Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037907 maps to shardIdx: 72057594046644480:20 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:08.013907Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037907, followerId 0 2025-06-24T21:08:08.013929Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:20 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:08.013947Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037907 2025-06-24T21:08:08.013961Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:16 data size 0 row count 0 2025-06-24T21:08:08.013984Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037903 maps to shardIdx: 72057594046644480:16 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:08.013992Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037903, followerId 0 2025-06-24T21:08:08.014011Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:16 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:08.014020Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037903 2025-06-24T21:08:08.014035Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:22 data size 0 row count 0 2025-06-24T21:08:08.014058Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037909 maps to shardIdx: 72057594046644480:22 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:08.014065Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037909, followerId 0 2025-06-24T21:08:08.014087Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:22 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:08.014097Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037909 2025-06-24T21:08:08.014128Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:23 data size 0 row count 0 2025-06-24T21:08:08.014147Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037910 maps to shardIdx: 72057594046644480:23 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:08.014154Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037910, followerId 0 2025-06-24T21:08:08.014177Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:23 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:08.014186Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037910 2025-06-24T21:08:08.014203Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:14 data size 0 row count 0 2025-06-24T21:08:08.014222Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037901 maps to shardIdx: 72057594046644480:14 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:08.014231Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037901, followerId 0 2025-06-24T21:08:08.014252Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:14 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:08.014262Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037901 2025-06-24T21:08:08.014309Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:08.014419Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625634633419497:2152]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:08.014436Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:08.014449Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:08:08.288593Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:08:08.308918Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpsertWithNullKeysComplex [GOOD] Test command err: Trying to start YDB, gRPC: 30187, MsgBus: 15131 2025-06-24T21:07:45.247866Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625586169004411:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:45.247942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045f0/r3tmp/tmpa43TEF/pdisk_1.dat 2025-06-24T21:07:45.580833Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625586169004392:2079] 1750799265246976 != 1750799265246979 2025-06-24T21:07:45.581033Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:45.594879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:45.595002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:45.601277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30187, node 1 2025-06-24T21:07:45.678148Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:45.678181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:45.678205Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:45.678391Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15131 TClient is connected to server localhost:15131 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:46.207241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:46.238322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:46.260750Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:46.383052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:46.527391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:46.596705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:48.457005Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625599053907916:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.457094Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.771369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.801565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.833932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.874278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.916698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.956896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:49.028575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:49.112945Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625603348875873:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:49.113018Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:49.113182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625603348875878:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:49.117121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:49.128375Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625603348875880:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:49.216805Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625603348875931:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:50.248164Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625586169004411:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:50.248219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:50.283661Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625607643843502:3595], Recipient [1:7519625586169004715:2144]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:50.283709Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:50.283739Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: ... dy parts: 1/1 2025-06-24T21:08:07.397958Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-24T21:08:07.397991Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519625653712572698:2161] message: TxId: 281474976710760 2025-06-24T21:08:07.398008Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:08:07.398023Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-24T21:08:07.398031Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710760:0 2025-06-24T21:08:07.398061Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 5 2025-06-24T21:08:07.398547Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:07.398645Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625653712572698:2161] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710760 at schemeshard: 72057594046644480 2025-06-24T21:08:07.398826Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124998, Sender [3:7519625653712572698:2161], Recipient [3:7519625653712572698:2161]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710760 2025-06-24T21:08:07.398858Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5109: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-06-24T21:08:07.398871Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-24T21:08:07.398892Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710760 2025-06-24T21:08:07.398945Z node 3 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 281474976715678, txId# 281474976710760 2025-06-24T21:08:07.399035Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 281474976715678, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName2, IndexColumn: IndexColumn2, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7519625683777346350:2522], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1750799287384, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 85, read rows: 2, read bytes: 85 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-24T21:08:07.399095Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:07.399400Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:07.399480Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715678 Unlocking 2025-06-24T21:08:07.399545Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715678 Unlocking TBuildInfo{ IndexBuildId: 281474976715678, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName2, IndexColumn: IndexColumn2, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7519625683777346350:2522], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1750799287384, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 85, read rows: 2, read bytes: 85 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:08:07.399562Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:07.399576Z node 3 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-24T21:08:07.399845Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:07.399952Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715678 Done 2025-06-24T21:08:07.400025Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715678 Done TBuildInfo{ IndexBuildId: 281474976715678, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName2, IndexColumn: IndexColumn2, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7519625683777346350:2522], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1750799287384, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 85, read rows: 2, read bytes: 85 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:08:07.400037Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976715678, subscribers count# 0 2025-06-24T21:08:07.400047Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:07.400077Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:07.409113Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [3:7519625683777346532:3885], Recipient [3:7519625653712572698:2161]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:07.409156Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:07.409179Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:08:07.409372Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274792450, Sender [3:7519625683777346529:2533], Recipient [3:7519625653712572698:2161]: NKikimrIndexBuilder.TEvGetRequest DatabaseName: "/Root" IndexBuildId: 281474976715678 2025-06-24T21:08:07.409392Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5086: StateWork, processing event TEvIndexBuilder::TEvGetRequest 2025-06-24T21:08:07.409469Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/Root" IndexBuildId: 281474976715678 2025-06-24T21:08:07.409646Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 281474976715678 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "IndexName2" index_columns: "IndexColumn2" global_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1750799287 } EndTime { seconds: 1750799287 } } 2025-06-24T21:08:07.409667Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:07.409702Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:07.409791Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625683777346529:2533] msg type: 274792451 msg: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 281474976715678 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "IndexName2" index_columns: "IndexColumn2" global_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1750799287 } EndTime { seconds: 1750799287 } } at schemeshard: 72057594046644480 2025-06-24T21:08:07.410495Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625683777346532:3885], Recipient [3:7519625653712572698:2161]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:07.410518Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:07.410527Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:08.360693Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625653712572698:2161]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:08.360747Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:08.360812Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625653712572698:2161], Recipient [3:7519625653712572698:2161]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:08.360832Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:08.698116Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:08:08.715871Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> KqpIndexes::IndexFilterPushDown [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertNullInFk [GOOD] Test command err: Trying to start YDB, gRPC: 21809, MsgBus: 27755 2025-06-24T21:07:44.956043Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625582168070821:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:44.956537Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045f5/r3tmp/tmp1XXk3e/pdisk_1.dat 2025-06-24T21:07:45.355612Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:45.356259Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625582168070606:2079] 1750799264937900 != 1750799264937903 2025-06-24T21:07:45.357072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:45.357186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 21809, node 1 2025-06-24T21:07:45.359190Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:45.411714Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:45.411738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:45.411744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:45.411868Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27755 TClient is connected to server localhost:27755 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:45.947730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:45.955316Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:07:45.970243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:46.101784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:46.240930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:46.322402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:48.102005Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625599347941425:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.102132Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.393865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.424868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.453348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.482502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.514568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.587485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.635859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.721129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625599347942087:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.721217Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.721526Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625599347942092:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.725959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:48.738248Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625599347942094:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:48.807575Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625599347942145:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:49.773663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:49.957247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625582168070821:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:49.957319Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:51.495658Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: ... orState: ExecuteState, TraceId: 01jyhw9sqeatxav7t0bgs865ch, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 17672, MsgBus: 10782 2025-06-24T21:08:01.188307Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519625655098578654:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:01.188405Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045f5/r3tmp/tmpgFUqW5/pdisk_1.dat 2025-06-24T21:08:01.341488Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:01.341573Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:01.344152Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:01.357394Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17672, node 3 2025-06-24T21:08:01.427876Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:01.427900Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:01.427912Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:01.428087Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10782 TClient is connected to server localhost:10782 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:02.105300Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:02.110876Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:02.124050Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:02.197710Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:02.208569Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:02.425559Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:02.519744Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:05.475662Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625672278449419:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:05.475771Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:05.533053Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.615332Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.656845Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.695252Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.764837Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.798845Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.845482Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.932367Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625672278450082:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:05.932514Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:05.932915Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625672278450087:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:05.937982Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:05.949090Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625672278450089:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:06.023729Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625676573417436:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:06.188488Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625655098578654:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:06.188559Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:07.226381Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead+UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead-UseSink |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldFail >> OperationMapping::IndexBuildRejected >> OperationMapping::IndexBuildSuccess [GOOD] >> OperationMapping::IndexBuildCanceled [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::IndexFilterPushDown [GOOD] Test command err: Trying to start YDB, gRPC: 9282, MsgBus: 14581 2025-06-24T21:07:50.307093Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625609251084550:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:50.307178Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045d6/r3tmp/tmpzsrbbu/pdisk_1.dat 2025-06-24T21:07:50.686866Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:50.687294Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625609251084522:2079] 1750799270305488 != 1750799270305491 TServer::EnableGrpc on GrpcPort 9282, node 1 2025-06-24T21:07:50.707848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:50.707946Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:50.709697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:50.763730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:50.763798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:50.763809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:50.763963Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14581 TClient is connected to server localhost:14581 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:07:51.325249Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:51.362098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:51.390424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:07:51.407082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.543813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.703658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:51.775277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.461976Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625622135988053:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:53.462092Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:53.767940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.802174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.869064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.912106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.946048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.981859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:54.016727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:54.074040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625626430956005:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:54.074086Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:54.074112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625626430956010:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:54.077636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:54.088932Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625626430956012:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:54.166566Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625626430956063:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:55.215747Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625630725923634:3592], Recipient [1:7519625609251084868:2153]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:55.215780Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:55.215797Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:55.215829Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625630725923630:3589], Recipient [1:7519625609251084868:2153]: {TEvModifySche ... 25-06-24T21:08:03.002668Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:03.002741Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:03.004852Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18027, node 3 2025-06-24T21:08:03.072942Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:03.072972Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:03.072981Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:03.073114Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4705 TClient is connected to server localhost:4705 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:03.704658Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:03.713137Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:03.733687Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:03.833049Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:03.847683Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:04.028588Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:04.132311Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:06.851085Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625678950316261:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:06.851232Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:06.914725Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:06.952495Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:06.991283Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.042892Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.076222Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.148017Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.191622Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.282348Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625683245284223:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:07.282464Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:07.282780Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625683245284228:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:07.287358Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:07.302400Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625683245284230:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:07.354117Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625683245284281:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:07.847981Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625661770445583:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:07.848068Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:08.614424Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:08.667877Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:08.713762Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> SplitPathTests::WithDatabaseShouldSuccess [GOOD] >> SplitPathTests::WithDatabaseShouldFail [GOOD] >> OperationMapping::IndexBuildRejected [GOOD] >> KqpMultishardIndex::WriteIntoRenamingSyncIndex [GOOD] >> KqpMultishardIndex::WriteIntoRenamingAsyncIndex |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldSuccess [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildRejected [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldFail [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildCanceled [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildSuccess [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable-UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable-UseSimilarity >> KqpIndexes::DirectAccessToIndexImplTable [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1-Nullable-UseSimilarity [GOOD] >> KqpVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity |93.9%| [TA] $(B)/ydb/core/grpc_services/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.9%| [TA] {RESULT} $(B)/ydb/core/grpc_services/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpIndexes::SecondaryIndexWithPrimaryKeySameComulns-UseSink [GOOD] >> StreamCreator::Basic [GOOD] >> DataShardSnapshots::DelayedWriteReadableAfterSplit [GOOD] >> DataShardSnapshots::DelayedWriteReplyAfterSplit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::DirectAccessToIndexImplTable [GOOD] Test command err: Trying to start YDB, gRPC: 18283, MsgBus: 10508 2025-06-24T21:07:47.096919Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625597675531215:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:47.096965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045eb/r3tmp/tmpyNmQSv/pdisk_1.dat 2025-06-24T21:07:47.417596Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625597675531194:2079] 1750799267092466 != 1750799267092469 2025-06-24T21:07:47.482035Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18283, node 1 2025-06-24T21:07:47.532097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:47.532393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:47.537033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:47.560012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:47.560047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:47.560056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:47.560214Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10508 TClient is connected to server localhost:10508 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:07:48.110733Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:48.122757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:07:48.152136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.293962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:48.437339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:48.498920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:50.137976Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625610560434746:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.138100Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.463345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.502784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.575724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.628061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.657092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.691268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.763289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.817878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625610560435412:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.817966Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.818054Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625610560435417:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.821631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:50.831071Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625610560435419:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:50.886008Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625610560435470:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:52.072346Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625619150370297:3602], Recipient [1:7519625597675531577:2182]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:52.072394Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:52.072407Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:52.072452Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625619150370293:3599], Recipient [1:7519625597675531577:2182]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-24T21:07:52.072470Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvS ... ist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:08:11.181564Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa4qheevxsqjy5c6hzg6s, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:08:11.206403Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519625700954053490:3972], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:11.206436Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519625700954053490:3972], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:11.207964Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625700954053487:2579], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/SecondaryKeys/Index/indexImplTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:08:11.210185Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa4sr7ab1tmzvtx6ng3em, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:08:11.230840Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519625700954053497:3975], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:11.230898Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519625700954053497:3975], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:11.232351Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625700954053494:2582], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Cannot find table 'db.[/Root/SecondaryKeys]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:08:11.232568Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa4tj756da25877kgcrcr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:08:11.254619Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519625700954053504:3978], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:11.254650Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519625700954053504:3978], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:11.255800Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625700954053501:2585], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Cannot find table 'db.[/Root/SecondaryKeys/Index/indexImplTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:08:11.257344Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa4va81jgyn2065xa59m4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:08:11.279496Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519625700954053511:3981], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:11.279526Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519625700954053511:3981], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:11.280747Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625700954053508:2588], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:92: Error: At function: KiWriteTable!
:2:92: Error: Cannot find table 'db.[/Root/SecondaryKeys/Index/indexImplTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:08:11.280946Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa4w12cp73bvge3fzkgh0, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:08:11.315075Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519625700954053518:3984], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:11.315112Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519625700954053518:3984], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:11.316334Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625700954053515:2591], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:83: Error: At function: KiDeleteTable!
:2:83: Error: Cannot find table 'db.[/Root/SecondaryKeys/Index/indexImplTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:08:11.316537Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa4wr2y3jvyettsxtkzpp, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:08:11.406193Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:08:12.217819Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625705249020950:2633], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:29: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:29: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-24T21:08:12.219352Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa5rpcz7q1v5bd2zkefvs, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:08:12.260870Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625705249020962:2638], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:92: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:92: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-24T21:08:12.261155Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa5t2893pkn89mwx0502s, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:08:12.295720Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625705249020972:2642], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:83: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:83: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-24T21:08:12.295923Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa5vc8p1140ywbhfbhv8s, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:08:13.231732Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625709543988403:2683], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:29: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:29: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-24T21:08:13.231950Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa6rcdd2rpa9t3nk6scrc, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:08:13.277835Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625709543988415:2688], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:92: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:92: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-24T21:08:13.278130Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa6sy9h49whx1yzvjyhv4, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:08:13.317315Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519625709543988425:2692], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:83: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:83: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-24T21:08:13.317558Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=M2ExMGJiOTMtZDZjNmU2ODAtN2M4Yjc4OTgtOTRiNGFkNTY=, ActorId: [3:7519625700954053474:2573], ActorState: ExecuteState, TraceId: 01jyhwa6v764jnhbf8sxbyg5qy, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: >> KqpIndexes::SecondaryIndexUpdateOnUsingIndex [GOOD] >> KqpIndexes::SecondaryIndexSelectUsingScripting ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> StreamCreator::Basic [GOOD] Test command err: 2025-06-24T21:08:10.096546Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625694079041856:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:10.096846Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0027ea/r3tmp/tmpnNZ0fQ/pdisk_1.dat 2025-06-24T21:08:10.553478Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:10.555719Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625694079041728:2079] 1750799290078858 != 1750799290078861 2025-06-24T21:08:10.572874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:10.572989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:10.604719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14895 TServer::EnableGrpc on GrpcPort 18668, node 1 2025-06-24T21:08:10.971876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:10.971906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:10.971914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:10.972040Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:11.103433Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14895 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:11.597072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:08:11.629288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799291745 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799291661 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799291745 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-24T21:08:11.832764Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:08:11.832898Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:08:11.832910Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-24T21:08:11.833557Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-24T21:08:13.327730Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750799291745, tx_id: 281474976710658 } } } 2025-06-24T21:08:13.334807Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-24T21:08:13.336751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:13.337630Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-24T21:08:13.337658Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-24T21:08:13.367835Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-24T21:08:13.367881Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-24T21:08:13.368479Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:57: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::NController::TEvPrivate::TEvAllowCreateStream 2025-06-24T21:08:13.472423Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][1:7519625706963944520:2306] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:5:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-24T21:08:13.523127Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:85: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvAlterTableResponse { Result: { status: SUCCESS, issues: } } 2025-06-24T21:08:13.523171Z node 1 :REPLICATION_CONTROLLER INFO: stream_creator.cpp:100: [StreamCreator][rid 1][tid 1] Success: issues# 2025-06-24T21:08:13.552441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:08:13.569998Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:137: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvAlterTopicResponse { Result: { status: SUCCESS, issues: } } 2025-06-24T21:08:13.570059Z node 1 :REPLICATION_CONTROLLER INFO: stream_creator.cpp:155: [StreamCreator][rid 1][tid 1] Success: issues# TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799291745 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyC... (TRUNCATED) >> DataShardSnapshots::LockedWritesLimitedPerKey-UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexWithPrimaryKeySameComulns-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 23374, MsgBus: 6302 2025-06-24T21:07:45.047042Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625589078692698:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:45.047270Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045f2/r3tmp/tmpJ9GDqP/pdisk_1.dat 2025-06-24T21:07:45.382576Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:45.387272Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625589078692680:2079] 1750799265046172 != 1750799265046175 2025-06-24T21:07:45.396089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:45.396220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:45.398373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23374, node 1 2025-06-24T21:07:45.528527Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:45.528552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:45.528559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:45.528732Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6302 TClient is connected to server localhost:6302 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:07:46.062057Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:46.106366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:46.140362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:46.306113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:46.456074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:46.531597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.135880Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625601963596211:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.135974Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.421965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.456900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.482268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.517493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.548135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.619983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.649335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.698449Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625601963596868:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.698548Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.698632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625601963596873:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:48.702535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:48.712235Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625601963596875:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:48.813902Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625601963596926:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:49.807215Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625606258564497:3598], Recipient [1:7519625589078693005:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:49.807260Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:49.807275Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:49.807317Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625606258564493:3595], Recipient [1:7519625589078693005:2147]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-24T21:07:49.807337Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSche ... TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:08:11.224642Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625657119194540:2322], Recipient [3:7519625648529258988:2142]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037907 TableLocalId: 5 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037907 NodeId: 3 StartTime: 1750799281025 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:08:11.224659Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:08:11.224675Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037907 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-24T21:08:11.224755Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037907 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:08:11.323039Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625648529258988:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:11.323082Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:11.323102Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 5 2025-06-24T21:08:11.323166Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 5 2025-06-24T21:08:11.323181Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 5 2025-06-24T21:08:11.323242Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-06-24T21:08:11.323321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:11.323333Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-06-24T21:08:11.323385Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:11.323425Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037905 2025-06-24T21:08:11.323453Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:19 data size 0 row count 0 2025-06-24T21:08:11.323487Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:11.323495Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-06-24T21:08:11.323523Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:11.323534Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037906 2025-06-24T21:08:11.323552Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:17 data size 0 row count 0 2025-06-24T21:08:11.323580Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037904 maps to shardIdx: 72057594046644480:17 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:11.323590Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-06-24T21:08:11.323617Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:11.323629Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037904 2025-06-24T21:08:11.323647Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-24T21:08:11.323803Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:11.323815Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-24T21:08:11.323846Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:11.323863Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-24T21:08:11.323883Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:20 data size 0 row count 0 2025-06-24T21:08:11.323912Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037907 maps to shardIdx: 72057594046644480:20 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:11.323932Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037907, followerId 0 2025-06-24T21:08:11.323957Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:20 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:11.323976Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037907 2025-06-24T21:08:11.324022Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:11.324151Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625648529258988:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:11.324169Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:11.324183Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:08:11.948876Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625648529258988:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:11.948919Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:11.948988Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625648529258988:2142], Recipient [3:7519625648529258988:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:11.949015Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:12.013644Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:08:12.539192Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:08:12.559035Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:08:12.908308Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:08:12.949381Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625648529258988:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:12.949429Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:12.949491Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625648529258988:2142], Recipient [3:7519625648529258988:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:12.949512Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpIndexes::SecondaryIndexUsingInJoin-UseStreamJoin [GOOD] |93.9%| [TA] $(B)/ydb/core/tx/replication/controller/ut_stream_creator/test-results/unittest/{meta.json ... results_accumulator.log} |93.9%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpIndexes::UpsertWithoutExtraNullDelete-UseSink [GOOD] >> KqpMultishardIndex::CheckPushTopSort |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> test_sql_streaming.py::test[suites-ReadWriteTopic-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadWriteTopicWithSchema-default.txt] >> test_sql_streaming.py::test[suites-GroupByHoppingWithDataWatermarks-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopic-default.txt] >> KqpIndexes::DuplicateUpsertInterleaveParams+UseSink [GOOD] >> KqpIndexes::DuplicateUpsertInterleaveParams-UseSink >> StatisticsSaveLoad::Delete >> StatisticsSaveLoad::Simple >> StatisticsSaveLoad::ForbidAccess >> DataShardSnapshots::VolatileSnapshotCleanupOnFinish [GOOD] >> DataShardSnapshots::VolatileSnapshotRenameTimeout >> BasicUsage::RetryDiscoveryWithCancel [GOOD] >> BasicUsage::RecreateObserver ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexUsingInJoin-UseStreamJoin [GOOD] Test command err: Trying to start YDB, gRPC: 14573, MsgBus: 1473 2025-06-24T21:07:50.221194Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625609504252764:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:50.221270Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045db/r3tmp/tmpeMli1U/pdisk_1.dat 2025-06-24T21:07:50.569478Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14573, node 1 2025-06-24T21:07:50.623301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:50.623398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:50.625082Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:50.632571Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:50.632595Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:50.632613Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:50.632722Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1473 TClient is connected to server localhost:1473 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:51.190576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:51.205285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:07:51.218645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.230068Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:51.381805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.560545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.631297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:53.343618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625622389156269:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:53.343725Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:53.669703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.700991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.736918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.763861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.792178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.861815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.892786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:53.961268Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625622389156930:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:53.961343Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:53.961720Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625622389156935:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:53.965801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:53.978002Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625622389156937:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:54.036655Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625626684124284:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:55.068435Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625630979091853:3594], Recipient [1:7519625609504253048:2145]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:55.068467Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:55.068482Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:55.068530Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625630979091849:3591], Recipient [1:7519625609504253048:2145]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-24T21:07:55.068549Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMo ... deEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:11.612177Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-24T21:08:11.612184Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:11.612192Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715673:0 2025-06-24T21:08:11.612226Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625697100413169:2484] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715673 at schemeshard: 72057594046644480 2025-06-24T21:08:11.612292Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715673:2, at schemeshard: 72057594046644480 2025-06-24T21:08:11.612304Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:11.612310Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715673:2 2025-06-24T21:08:11.612333Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625697100413174:2485] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715673 at schemeshard: 72057594046644480 2025-06-24T21:08:11.612413Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625675625574235:2148], Recipient [3:7519625675625574235:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:11.612442Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:11.612478Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-24T21:08:11.612504Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715673:0 ProgressState 2025-06-24T21:08:11.612573Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:11.612590Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715673:0 progress is 2/3 2025-06-24T21:08:11.612601Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 2/3 2025-06-24T21:08:11.612614Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715673:0 progress is 2/3 2025-06-24T21:08:11.612627Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 2/3 2025-06-24T21:08:11.612658Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715673, ready parts: 2/3, is published: true 2025-06-24T21:08:11.612805Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625675625574235:2148], Recipient [3:7519625675625574235:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:11.612819Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:11.612836Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715673:2, at schemeshard: 72057594046644480 2025-06-24T21:08:11.612847Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715673:2 ProgressState 2025-06-24T21:08:11.612883Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:11.612890Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715673:2 progress is 3/3 2025-06-24T21:08:11.612895Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 3/3 2025-06-24T21:08:11.612906Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715673:2 progress is 3/3 2025-06-24T21:08:11.612911Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 3/3 2025-06-24T21:08:11.612919Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715673, ready parts: 3/3, is published: true 2025-06-24T21:08:11.612949Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519625697100413143:2482] message: TxId: 281474976715673 2025-06-24T21:08:11.612961Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 3/3 2025-06-24T21:08:11.612977Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715673:0 2025-06-24T21:08:11.612984Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715673:0 2025-06-24T21:08:11.613056Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 4 2025-06-24T21:08:11.613075Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715673:1 2025-06-24T21:08:11.613081Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715673:1 2025-06-24T21:08:11.613094Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 21] was 3 2025-06-24T21:08:11.613100Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715673:2 2025-06-24T21:08:11.613105Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715673:2 2025-06-24T21:08:11.613144Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 3 2025-06-24T21:08:11.613354Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:11.613435Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625697100413240:3747], Recipient [3:7519625675625574235:2148]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:11.613450Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:11.613458Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:11.613468Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:11.613500Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625697100413143:2482] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715673 at schemeshard: 72057594046644480 2025-06-24T21:08:11.613596Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625697100413239:3746], Recipient [3:7519625675625574235:2148]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:11.613613Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:11.613620Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:11.614133Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625697100413151:3684], Recipient [3:7519625675625574235:2148]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:11.614159Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:11.614169Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:12.425757Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625675625574235:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:12.425800Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:12.425854Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625675625574235:2148], Recipient [3:7519625675625574235:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:12.425889Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:13.426291Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625675625574235:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:13.426340Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:13.426393Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625675625574235:2148], Recipient [3:7519625675625574235:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:13.426410Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:14.426635Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625675625574235:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:14.426672Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:14.426727Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625675625574235:2148], Recipient [3:7519625675625574235:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:14.426743Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpIndexes::SecondaryIndexUsingInJoin2+UseStreamJoin [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin2-UseStreamJoin >> KqpPrefixedVectorIndexes::OrderByCosineLevel1+Nullable-UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity >> test_sql_streaming.py::test[suites-GroupByHopExprKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopListKey-default.txt] >> KqpPrefixedVectorIndexes::OrderByCosineLevel1-Nullable-UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel1-Nullable+UseSimilarity >> KqpIndexes::MultipleModifications [GOOD] >> KqpIndexes::JoinWithNonPKColumnsInPredicate-UseStreamJoin [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart+UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit+UseSink >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop >> TSchemeShardSubDomainTest::SimultaneousDeclare >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects >> TSchemeShardSubDomainTest::DeclareAndDelete >> TSchemeShardSubDomainTest::CreateWithNoEqualName >> TSchemeShardSubDomainTest::DeleteAdd ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::MultipleModifications [GOOD] Test command err: Trying to start YDB, gRPC: 6935, MsgBus: 4907 2025-06-24T21:07:54.898994Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625624101382523:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:54.899108Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045b6/r3tmp/tmpU3vWEE/pdisk_1.dat 2025-06-24T21:07:55.189693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:55.189844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:55.193604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:55.238336Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625624101382504:2079] 1750799274897376 != 1750799274897379 2025-06-24T21:07:55.251066Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6935, node 1 2025-06-24T21:07:55.299802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:55.299825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:55.299839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:55.299973Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4907 TClient is connected to server localhost:4907 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:55.860890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:55.890882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:55.910555Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:07:56.031102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.167052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:56.244417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.104229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625641281253313:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:58.104335Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:58.405047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.434794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.467204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.499373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.569635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.608224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.693372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.793935Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625641281253979:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:58.794069Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:58.795387Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625641281253984:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:58.801579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:58.816513Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625641281253986:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:58.890796Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625641281254037:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:59.848268Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625645576221607:3593], Recipient [1:7519625628396350115:2141]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:59.848305Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:59.848316Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:59.848346Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625645576221603:3590], Recipient [1:7519625628396350115:2141]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-24T21:07:59.848375Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvScheme ... Change state for txid 281474976715672:0 129 -> 240 2025-06-24T21:08:16.382094Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:16.382227Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715672 2025-06-24T21:08:16.382241Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:16.382286Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715672 2025-06-24T21:08:16.382298Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:16.382352Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:16.382366Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:16.382425Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:16.382438Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:16.382487Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:16.382502Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:16.382514Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:2 2025-06-24T21:08:16.382564Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625720836476292:2477] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:16.382631Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:16.382642Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:16.382651Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:0 2025-06-24T21:08:16.382681Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625720836476288:2476] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:16.382744Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625695066670168:2148], Recipient [3:7519625695066670168:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:16.382762Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:16.382799Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:16.382821Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-24T21:08:16.382900Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:16.382926Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:2 progress is 2/3 2025-06-24T21:08:16.382946Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-24T21:08:16.382964Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:2 progress is 2/3 2025-06-24T21:08:16.382995Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-24T21:08:16.383010Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-06-24T21:08:16.383209Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625695066670168:2148], Recipient [3:7519625695066670168:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:16.383234Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:16.383263Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:16.383279Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-06-24T21:08:16.383336Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:16.383356Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:0 progress is 3/3 2025-06-24T21:08:16.383366Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:16.383383Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:0 progress is 3/3 2025-06-24T21:08:16.383392Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:16.383403Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-24T21:08:16.383442Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519625720836476261:2474] message: TxId: 281474976715672 2025-06-24T21:08:16.383464Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:16.383484Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-24T21:08:16.383494Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:0 2025-06-24T21:08:16.383599Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-24T21:08:16.383615Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-24T21:08:16.383621Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:1 2025-06-24T21:08:16.383638Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-24T21:08:16.383646Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-24T21:08:16.383653Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:2 2025-06-24T21:08:16.383686Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-24T21:08:16.383965Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:16.384042Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625720836476357:3660], Recipient [3:7519625695066670168:2148]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:16.384059Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:16.384069Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:16.384143Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:16.384239Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625720836476261:2474] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:16.384435Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625720836476358:3661], Recipient [3:7519625695066670168:2148]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:16.384459Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:16.384469Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:16.384849Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625720836476269:3598], Recipient [3:7519625695066670168:2148]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:16.384879Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:16.384888Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:17.074191Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625695066670168:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:17.074230Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:17.074274Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625695066670168:2148], Recipient [3:7519625695066670168:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:17.074293Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas >> TSchemeShardSubDomainTest::CopyRejects >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir >> TSchemeShardSubDomainTest::ConcurrentCreateSubDomainAndDescribe >> TSchemeShardSubDomainTest::RedefineErrors >> TSchemeShardSubDomainTest::RmDir >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::JoinWithNonPKColumnsInPredicate-UseStreamJoin [GOOD] Test command err: Trying to start YDB, gRPC: 24690, MsgBus: 22103 2025-06-24T21:07:38.457802Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625557671079909:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:38.457937Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00461d/r3tmp/tmpLC5PvI/pdisk_1.dat 2025-06-24T21:07:38.764367Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:38.764681Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625557671079890:2079] 1750799258456823 != 1750799258456826 TServer::EnableGrpc on GrpcPort 24690, node 1 2025-06-24T21:07:38.804235Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:38.804263Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:38.804270Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:38.804420Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:38.835287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:38.835414Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:38.837024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22103 TClient is connected to server localhost:22103 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:39.293828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:39.321688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:39.442746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:39.550474Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:39.586992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:39.661502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:41.013513Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625570555983430:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.013646Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.295664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.321937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.348532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.375176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.401512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.473505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.510773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.563754Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625570555984090:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.563841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.563917Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625570555984095:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.567451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:41.577163Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625570555984097:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:41.668516Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625570555984148:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:42.667976Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625574850951752:3606], Recipient [1:7519625557671080207:2142]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:42.668010Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:42.668025Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:42.668057Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625574850951748:3603], Recipient [1:7519625557671080207:2142]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-24T21:07:42.668079Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvS ... s: 0 RangeReads: 1 PartCount: 0 RangeReadRows: 4 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:08:17.514016Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099995s, queue# 1 2025-06-24T21:08:17.514174Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625683412873640:2536], Recipient [3:7519625653348099470:2145]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037934 TableLocalId: 42 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799297431 LastUpdateTime: 1750799293217 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 1 PartCount: 0 RangeReadRows: 1 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037934 NodeId: 3 StartTime: 1750799287490 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:08:17.514188Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:08:17.514204Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037934 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 42] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-24T21:08:17.514300Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037934 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 42] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799297431 LastUpdateTime: 1750799293217 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 1 PartCount: 0 RangeReadRows: 1 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:08:17.615405Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625653348099470:2145]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:17.615466Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:17.615488Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 2 2025-06-24T21:08:17.615536Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 2 2025-06-24T21:08:17.615554Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 2 2025-06-24T21:08:17.615616Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 40 shard idx 72057594046644480:48 data size 0 row count 0 2025-06-24T21:08:17.615680Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037935 maps to shardIdx: 72057594046644480:48 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 40], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:17.615696Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037935, followerId 0 2025-06-24T21:08:17.615776Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:48 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:17.615838Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037935 2025-06-24T21:08:17.615876Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 42 shard idx 72057594046644480:49 data size 0 row count 0 2025-06-24T21:08:17.615915Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037934 maps to shardIdx: 72057594046644480:49 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 42], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:08:17.615932Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037934, followerId 0 2025-06-24T21:08:17.615968Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:49 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:17.615992Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037934 2025-06-24T21:08:17.616039Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:17.616145Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625653348099470:2145]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:17.616171Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:17.616188Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:08:17.616548Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [3:7519625726362548047:4718], Recipient [3:7519625653348099470:2145]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:17.616575Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:17.616590Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:08:17.617051Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625683412873793:2548], Recipient [3:7519625653348099470:2145]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037937 TableLocalId: 43 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799293223 LastUpdateTime: 1750799293223 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 3 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 1 LocksWholeShard: 0 LocksBroken: 1 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037937 NodeId: 3 StartTime: 1750799287591 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:08:17.617069Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:08:17.617099Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037937 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 43] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-24T21:08:17.617191Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037937 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 43] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799293223 LastUpdateTime: 1750799293223 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 3 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 1 LocksWholeShard: 0 LocksBroken: 1 2025-06-24T21:08:17.617213Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099996s, queue# 1 2025-06-24T21:08:17.623853Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [3:7519625726362548049:4719], Recipient [3:7519625653348099470:2145]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:17.623901Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:17.623917Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:08:17.624306Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625683412873802:2549], Recipient [3:7519625653348099470:2145]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037938 TableLocalId: 45 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799297420 LastUpdateTime: 1750799293220 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 4 PartCount: 0 RangeReadRows: 3 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037938 NodeId: 3 StartTime: 1750799287592 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:08:17.624326Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:08:17.624360Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037938 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 45] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-24T21:08:17.624458Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037938 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 45] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799297420 LastUpdateTime: 1750799293220 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 4 PartCount: 0 RangeReadRows: 3 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead-UseSink [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDeclare [GOOD] >> TSchemeShardSubDomainTest::DeclareAndDelete [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir [GOOD] >> TSchemeShardSubDomainTest::RedefineErrors [GOOD] >> TSchemeShardSubDomainTest::RmDir [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop [GOOD] >> TSchemeShardSubDomainTest::DeleteAdd [GOOD] >> TSchemeShardSubDomainTest::ConcurrentCreateSubDomainAndDescribe [GOOD] >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects >> TSchemeShardSubDomainTest::CreateWithNoEqualName [GOOD] >> TSchemeShardSubDomainTest::SchemeLimitsRejects >> TSchemeShardSubDomainTest::CopyRejects [GOOD] >> TSchemeShardSubDomainTest::ConsistentCopyRejects ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.380747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.380855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.380916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.380955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.381771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.381810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.381884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.381972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.383745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.399392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.482180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.482248Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.496270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.500965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.501317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.510095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.511076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.513659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.514700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.520768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.521776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.536068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.536150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.536293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.536343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.536416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.536566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.543134Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.673146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.673385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.673655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.673706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.674488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.674583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.677676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.678612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.678846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.678992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.679064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.679107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.681336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.681411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.681470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.683423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.683474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.683523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.683593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.688338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.690634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.690844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.693808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.693984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.694033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.696055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.696130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.696364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.696462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.698728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.698775Z node 1 :FLAT_TX_SCHEMESHARD ... e target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:20.797742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T21:08:20.797782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-24T21:08:20.797810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-24T21:08:20.798871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.798979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.799023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:20.799074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T21:08:20.799126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:20.801105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.801219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.801289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:20.801345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:08:20.801388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:08:20.801468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:08:20.804141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:20.805086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:08:20.805537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:20.805625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:08:20.806129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:20.806232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:20.806280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:340:2329] TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:20.806813Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:20.807032Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 229us result status StatusSuccess 2025-06-24T21:08:20.807665Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.809281Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:20.809501Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 244us result status StatusSuccess 2025-06-24T21:08:20.810004Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.810613Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/MyDir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:20.810877Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/MyDir" took 218us result status StatusSuccess 2025-06-24T21:08:20.811297Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/MyDir" PathDescription { Self { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclare [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.387368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.387451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.387494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.387524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.387553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.387574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.387622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.387683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.388282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.394254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.475650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.475722Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.503498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.503911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.504153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.510970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.511134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.513652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.514648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.526693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.526934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.536273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.536357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.536620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.536669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.536719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.536801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.543530Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.687804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.688040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.688308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.688368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.688614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.688684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.690964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.691174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.691367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.691437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.691478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.691511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.693531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.693599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.693644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.695707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.695762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.695812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.695867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.699719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.701775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.701995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.703431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.703568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.703619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.703973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.704036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.704213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.704316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.706683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.706737Z node 1 :FLAT_TX_SCHEMESHARD ... kSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.764451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 100:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:100 msg type: 269090816 2025-06-24T21:08:20.764580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 100, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 100 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000002 2025-06-24T21:08:20.764941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.765078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.765120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-24T21:08:20.765358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 100:0 128 -> 240 2025-06-24T21:08:20.765412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-24T21:08:20.765569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.765625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:20.765672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-06-24T21:08:20.767802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.767847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.768000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:20.768112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.768167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-24T21:08:20.768231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-24T21:08:20.768596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.768640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-24T21:08:20.768747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:20.768781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:20.768831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:20.768866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:20.768925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-24T21:08:20.768973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:20.769017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 100:0 2025-06-24T21:08:20.769065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 100:0 2025-06-24T21:08:20.769131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:20.769171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 100, publications: 2, subscribers: 1 2025-06-24T21:08:20.769214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:08:20.769242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:08:20.769889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:20.770006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:20.770062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:20.770102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:08:20.770142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:20.771025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:20.771117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:20.771203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:20.771236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:08:20.771265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:20.771343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 1 2025-06-24T21:08:20.771392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:277:2266] 2025-06-24T21:08:20.774981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-24T21:08:20.775380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-24T21:08:20.775462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:20.775490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:278:2267] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 100 2025-06-24T21:08:20.776011Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:20.776224Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 215us result status StatusSuccess 2025-06-24T21:08:20.778730Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareAndDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.380934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.381049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.381123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.381170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.383717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.383776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.383865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.383956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.384735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.399690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.489849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.489930Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.507935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.508344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.508555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.518061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.518239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.518792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.519063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.522111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.522327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.532607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.532710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.533031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.533098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.533178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.533278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.540958Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.710958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.711208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.711511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.711588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.711935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.712030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.714574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.714782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.715019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.715093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.715137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.715200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.717300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.717374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.717427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.719465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.719529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.719586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.719652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.723798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.726020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.726227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.727341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.727488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.727542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.727911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.727979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.728156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.728243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.730481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.730535Z node 1 :FLAT_TX_SCHEMESHARD ... LAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 101 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.812594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:20.812730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 128 -> 130 2025-06-24T21:08:20.812876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.812964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:20.814200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:20.815408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T21:08:20.816876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.816920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.817082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:20.817268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.817316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T21:08:20.817376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T21:08:20.817675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.817726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:417: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-06-24T21:08:20.817805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:20.817842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:20.817912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:20.817946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:20.818012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:08:20.818056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:20.818107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:08:20.818152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:08:20.818230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:20.818272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T21:08:20.818325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T21:08:20.818363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T21:08:20.819067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.819191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.819241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:20.819294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:08:20.819344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:20.820017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.820105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.820134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:20.820166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:08:20.820198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:20.820308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:08:20.820850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:20.820936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:20.821045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:20.821890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:20.821966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:20.822062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.825313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:20.825600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:20.825772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:20.826842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:08:20.827114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:20.827184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:08:20.827638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:20.827784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:20.827829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:344:2333] TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:20.828352Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:20.828541Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 204us result status StatusPathDoesNotExist 2025-06-24T21:08:20.828725Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.381247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.381365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.381408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.381444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.382967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.383049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.383201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.383302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.384088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.394285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.485617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.485677Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.504287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.504668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.504878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.513156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.513338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.514046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.514618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.524085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.524276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.532823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.532909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.533170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.533226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.533290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.533380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.540156Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.668418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.670840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.672946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.673039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.674503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.674619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.678020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.678611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.678844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.678915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.678985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.679032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.681553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.681642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.681694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.683894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.683949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.684010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.684080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.688559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.690632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.690797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.693801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.693983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.694044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.696119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.696199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.696419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.696542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.699493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.699547Z node 1 :FLAT_TX_SCHEMESHARD ... T21:08:20.918290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T21:08:20.918615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.918689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:417: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-06-24T21:08:20.918761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:20.918796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:20.918835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:20.918883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:20.918926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:08:20.918969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:20.919004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:08:20.919036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:08:20.919106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:20.919162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T21:08:20.919199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T21:08:20.919248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T21:08:20.919947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.920042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.920074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:20.920115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:08:20.920155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:20.920815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.920903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:20.920935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:20.920961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:08:20.920990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:20.921076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:08:20.921314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:20.921371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:20.921565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:20.922571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:20.922640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:20.922721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.925263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:20.925618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:20.927254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:20.927391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:08:20.927673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:20.927718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:08:20.928146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:20.928267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:20.928327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:345:2334] TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:20.928855Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:20.929131Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 232us result status StatusPathDoesNotExist 2025-06-24T21:08:20.929373Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:20.929981Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:20.930188Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 207us result status StatusSuccess 2025-06-24T21:08:20.930680Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RedefineErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.381261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.381368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.381419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.381473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.383072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.383136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.383267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.383363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.384187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.394285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.483475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.483530Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.496853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.501729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.501936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.509698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.511075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.513641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.514660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.522333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.522521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.532530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.532612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.532735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.532798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.532895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.533061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.543817Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.675475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.675634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.675830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.675868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.676038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.676092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.682319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.682470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.682613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.682667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.682695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.682722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.689191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.689262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.689344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.691349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.691405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.691464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.691533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.695102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.697167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.697376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.698416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.698559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.698616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.698940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.698994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.699195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.699279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.701407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.701455Z node 1 :FLAT_TX_SCHEMESHARD ... o create, do next state 2025-06-24T21:08:20.991303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 108:0 2 -> 3 2025-06-24T21:08:20.993300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.993383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 108:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.993422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 108:0 3 -> 128 2025-06-24T21:08:20.996746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.996808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.996873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 108:0, at tablet# 72057594046678944 2025-06-24T21:08:20.996934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 108 ready parts: 1/1 2025-06-24T21:08:20.997071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 108 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.998768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 108:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:108 msg type: 269090816 2025-06-24T21:08:20.998896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 108, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 108 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 108 at step: 5000007 2025-06-24T21:08:20.999180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.999326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 108 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.999371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 108:0, at tablet# 72057594046678944 2025-06-24T21:08:20.999646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 108:0 128 -> 240 2025-06-24T21:08:20.999731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 108:0, at tablet# 72057594046678944 2025-06-24T21:08:20.999916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:20.999997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 108 2025-06-24T21:08:21.001945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:21.001998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:21.002214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:21.002288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 108, path id: 2 2025-06-24T21:08:21.002570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.002612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 108:0 ProgressState 2025-06-24T21:08:21.002719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#108:0 progress is 1/1 2025-06-24T21:08:21.002753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-24T21:08:21.002791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#108:0 progress is 1/1 2025-06-24T21:08:21.002828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-24T21:08:21.002866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: false 2025-06-24T21:08:21.002964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-24T21:08:21.003018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 108:0 2025-06-24T21:08:21.003058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 108:0 2025-06-24T21:08:21.003128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T21:08:21.003185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 108, publications: 1, subscribers: 0 2025-06-24T21:08:21.003223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 108, [OwnerId: 72057594046678944, LocalPathId: 2], 8 2025-06-24T21:08:21.003849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-24T21:08:21.003976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-24T21:08:21.004030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 108 2025-06-24T21:08:21.004092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-24T21:08:21.004136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:21.004209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 108, subscribers: 0 2025-06-24T21:08:21.006894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-24T21:08:21.007209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-24T21:08:21.007266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-24T21:08:21.007763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-24T21:08:21.007849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-24T21:08:21.007889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:601:2553] TestWaitNotification: OK eventTxId 108 2025-06-24T21:08:21.008452Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:21.008645Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 213us result status StatusSuccess 2025-06-24T21:08:21.009066Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 6 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 6 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } StoragePools { Name: "pool-hdd-1" Kind: "hdd-1" } StoragePools { Name: "pool-hdd-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RmDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.381532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.381663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.381733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.381775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.381834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.381870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.381973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.382071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.382949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.399365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.499583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.499641Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.521510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.521960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.522185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.532032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.532330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.533089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.533417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.536911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.537148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.538600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.538674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.538940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.538994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.539056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.539176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.546549Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.718407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.718665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.718990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.719056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.719396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.719492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.724269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.724443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.724600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.724653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.724702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.724740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.726869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.726948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.726994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.728912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.728969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.729027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.729087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.732923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.737139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.737360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.738436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.738619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.738675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.739038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.739116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.739317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.739419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.741930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.741981Z node 1 :FLAT_TX_SCHEMESHARD ... lPathId: 1] was 1 2025-06-24T21:08:20.997362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-24T21:08:20.997420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-06-24T21:08:20.999339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.999375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.999518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:20.999612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.999678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-24T21:08:20.999725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-24T21:08:20.999972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.000014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-24T21:08:21.000138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:21.000196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:21.000250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:21.000282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:21.000334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-24T21:08:21.000376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:21.000418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 100:0 2025-06-24T21:08:21.000453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 100:0 2025-06-24T21:08:21.000718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-24T21:08:21.000766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 100, publications: 2, subscribers: 1 2025-06-24T21:08:21.000799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:08:21.000828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:08:21.001430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:21.001504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:21.001536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:21.001564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:08:21.001607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:21.003192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:21.003272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:21.003294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:21.003314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:08:21.003336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-24T21:08:21.003402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 1 2025-06-24T21:08:21.003442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:578:2486] 2025-06-24T21:08:21.005606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-24T21:08:21.006512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-24T21:08:21.006600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:21.006630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:579:2487] TestWaitNotification: OK eventTxId 100 2025-06-24T21:08:21.007078Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:21.007302Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 217us result status StatusSuccess 2025-06-24T21:08:21.007718Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T21:08:21.010038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "USER_0" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:21.010172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:29: TRmDir Propose, path: /MyRoot/USER_0, pathId: 0, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.010284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusPathIsNotDirectory, reason: Check failed: path: '/MyRoot/USER_0', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-24T21:08:21.013618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusPathIsNotDirectory Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges)" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:21.013869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPathIsNotDirectory, reason: Check failed: path: '/MyRoot/USER_0', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges), operation: DROP DIRECTORY, path: /MyRoot/USER_0 TestModificationResult got TxId: 101, wait until txId: 101 >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithNoEqualName [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.389147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.389245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.389291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.389325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.389366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.389394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.389471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.389577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.390394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.392830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.512050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.512140Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.526197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.530707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.530930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.542821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.543093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.543845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.544178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.546932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.547110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.548248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.548312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.548421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.548468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.548517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.548659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.556464Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.712782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.712975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.713249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.713301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.713525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.713595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.716922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.717123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.717311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.717381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.717422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.717454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.722383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.722454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.722497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.724433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.724482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.724523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.724587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.728235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.730036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.730211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.731242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.731363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.731414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.731749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.731805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.731971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.732085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.734220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.734286Z node 1 :FLAT_TX_SCHEMESHARD ... perationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: "USER_3" } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:21.251216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/USER_3, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.251398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 108:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/USER_3', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-24T21:08:21.253982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 108, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/USER_3\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges)" TxId: 108 SchemeshardId: 72057594046678944 PathId: 5 PathCreateTxId: 106, at schemeshard: 72057594046678944 2025-06-24T21:08:21.254228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/USER_3', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges), operation: CREATE DIRECTORY, path: /MyRoot/USER_3 TestModificationResult got TxId: 108, wait until txId: 108 2025-06-24T21:08:21.254852Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:21.255079Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 233us result status StatusSuccess 2025-06-24T21:08:21.255631Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:21.256261Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:21.256507Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 214us result status StatusSuccess 2025-06-24T21:08:21.256968Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1" PathDescription { Self { Name: "USER_1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000005 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "USER_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:21.258127Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:21.258284Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_2" took 162us result status StatusSuccess 2025-06-24T21:08:21.258650Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_2" PathDescription { Self { Name: "USER_2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 104 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:21.259375Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:21.259587Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_3" took 211us result status StatusSuccess 2025-06-24T21:08:21.259969Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_3" PathDescription { Self { Name: "USER_3" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 106 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 } DomainKey { SchemeShard: 72057594046678944 PathId: 5 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 5 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeleteAdd [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.393585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.393698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.393743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.393786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.393854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.393905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.393985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.394074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.394957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.395392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.490351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.490412Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.510081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.510507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.510808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.518899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.519102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.520014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.520322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.523283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.523475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.532685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.532770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.533038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.533107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.533170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.533281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.540022Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.713830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.714089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.714348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.714409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.714655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.714757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.717145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.717341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.717545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.717608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.717651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.717687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.719745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.719817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.719887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.722089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.722145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.722196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.722261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.726526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.728641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.728835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.729828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.729987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.730035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.730379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.730443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.730659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.730739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.732903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.732955Z node 1 :FLAT_TX_SCHEMESHARD ... 3] 2025-06-24T21:08:21.219165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:21.219202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-24T21:08:21.219239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 3 2025-06-24T21:08:21.219527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.219571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:08:21.219667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:08:21.219698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:08:21.219735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:08:21.219772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:08:21.219814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T21:08:21.219857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:08:21.219922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:08:21.219956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:08:21.220175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 9 2025-06-24T21:08:21.220221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T21:08:21.220252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-24T21:08:21.220285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-24T21:08:21.220910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:08:21.220994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:08:21.221027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:08:21.221065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-24T21:08:21.221110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:21.221744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:08:21.221835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:08:21.221875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:08:21.221928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:08:21.221962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 8 2025-06-24T21:08:21.222024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:08:21.226943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:21.228596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:08:21.228882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:08:21.228933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:08:21.229602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:08:21.229726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:21.229763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:982:2802] TestWaitNotification: OK eventTxId 102 2025-06-24T21:08:21.230399Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:21.230643Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 268us result status StatusSuccess 2025-06-24T21:08:21.231125Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409552 Coordinators: 72075186233409553 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 Mediators: 72075186233409556 Mediators: 72075186233409557 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:21.231729Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:21.231934Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 180us result status StatusSuccess 2025-06-24T21:08:21.232388Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpMultishardIndex::DataColumnWrite-UseSink [GOOD] >> KqpMultishardIndex::DuplicateUpsert >> KqpIndexes::SecondaryIndexSelectUsingScripting [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.380921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.381027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.381069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.381109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.381853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.381911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.381993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.382075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.382849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.399310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.546574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.546627Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.563634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.564009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.564194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.574336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.574558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.575285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.575595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.578971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.579210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.580405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.580472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.580725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.580778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.580831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.580926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.588128Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.748955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.749184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.749473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.749526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.749779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.749854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.752030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.752211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.752357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.752400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.752429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.752453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.753800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.753852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.753883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.755208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.755244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.755281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.755324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.764645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.766806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.767022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.768005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.768154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.768199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.768529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.768593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.768749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.768826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.771650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.771699Z node 1 :FLAT_TX_SCHEMESHARD ... 6678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:21.542041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:21.542064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:21.543968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T21:08:21.544527Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 TabletID: 72075186233409550 Forgetting tablet 72075186233409550 2025-06-24T21:08:21.546791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-24T21:08:21.547195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:08:21.548450Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-24T21:08:21.548759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:21.549001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:08:21.552252Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409546 2025-06-24T21:08:21.553370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:21.553632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:21.554262Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 2025-06-24T21:08:21.555384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:21.555646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:21.556595Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 Forgetting tablet 72075186233409547 2025-06-24T21:08:21.557399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:08:21.557594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409549 2025-06-24T21:08:21.558838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:21.558892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:08:21.558989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:21.559273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T21:08:21.559607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T21:08:21.560237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:21.560302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:21.560415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:21.562425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:08:21.562489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-24T21:08:21.562609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:21.562629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:21.562702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:21.562721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:21.567212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:21.567300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:21.567418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:08:21.567472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T21:08:21.567662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:21.567788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:21.567882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:21.567922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:21.568008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:21.569885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T21:08:21.570268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-24T21:08:21.570311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-24T21:08:21.570946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-24T21:08:21.571053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T21:08:21.571088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:912:2810] TestWaitNotification: OK eventTxId 106 2025-06-24T21:08:21.571869Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:21.572105Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 244us result status StatusSuccess 2025-06-24T21:08:21.572577Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead-UseSink [GOOD] Test command err: 2025-06-24T21:07:07.402073Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:07:07.402404Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:07:07.402525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047c0/r3tmp/tmpNHm2h1/pdisk_1.dat 2025-06-24T21:07:07.766387Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:07:07.774605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:07.815665Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:07.820287Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799225356325 != 1750799225356329 2025-06-24T21:07:07.902100Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=N2NjNzkyYjAtYWZmZjcwMDktNDQ4MGRhY2ItMmUwZjM1MGU=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id N2NjNzkyYjAtYWZmZjcwMDktNDQ4MGRhY2ItMmUwZjM1MGU= 2025-06-24T21:07:07.903000Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=N2NjNzkyYjAtYWZmZjcwMDktNDQ4MGRhY2ItMmUwZjM1MGU=, ActorId: [1:578:2500], ActorState: unknown state, session actor bootstrapped 2025-06-24T21:07:07.906672Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=N2NjNzkyYjAtYWZmZjcwMDktNDQ4MGRhY2ItMmUwZjM1MGU=, ActorId: [1:578:2500], ActorState: ReadyState, TraceId: 01jyhw87023zc0qc6q51x2t0hq, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: CREATE TABLE `/Root/table1` (key int, value int, PRIMARY KEY (key)); rpcActor: [0:0:0] database: databaseId: /Root pool id: default 2025-06-24T21:07:08.195864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:582:2503], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:08.196031Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:08.290562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:08.290696Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:08.293289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:08.310531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:08.334713Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:645:2537], Recipient [1:650:2540]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:07:08.335596Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:645:2537], Recipient [1:650:2540]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:07:08.335927Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:650:2540] 2025-06-24T21:07:08.336097Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:07:08.367699Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:645:2537], Recipient [1:650:2540]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:07:08.368303Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:07:08.368407Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:07:08.369779Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:07:08.369854Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:07:08.369894Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:07:08.370186Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:07:08.370301Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:07:08.370371Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:664:2540] in generation 1 2025-06-24T21:07:08.370722Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:07:08.404599Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:07:08.404789Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:07:08.404885Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:666:2549] 2025-06-24T21:07:08.404914Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:07:08.404943Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:07:08.404975Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:07:08.405179Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:650:2540], Recipient [1:650:2540]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.405219Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.405364Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:07:08.405436Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:07:08.405513Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:07:08.405544Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:07:08.405590Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:07:08.405684Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:07:08.405716Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:07:08.405742Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:07:08.405778Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:07:08.437965Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:669:2551], Recipient [1:650:2540]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.438023Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.438074Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:641:2535], serverId# [1:669:2551], sessionId# [0:0:0] 2025-06-24T21:07:08.438195Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:669:2551] 2025-06-24T21:07:08.438236Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:07:08.438416Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:07:08.438696Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:07:08.438786Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:07:08.438905Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:07:08.438949Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:07:08.438997Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:07:08.439039Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:07:08.439070Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:07:08.439330Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:07:08.439361Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:07:08.439388Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:07:08.439415Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:07:08.439447Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:07:08.439472Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] ... d_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:08:19.846457Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [13:863:2691], Recipient [13:863:2691]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T21:08:19.846494Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T21:08:19.846583Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:08:19.846775Z node 13 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-24T21:08:19.846864Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit CheckDataTx 2025-06-24T21:08:19.846914Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-24T21:08:19.846950Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit CheckDataTx 2025-06-24T21:08:19.846982Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:08:19.847017Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:08:19.847060Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v500/18446744073709551615 ImmediateWriteEdgeReplied# v500/18446744073709551615 2025-06-24T21:08:19.847115Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715666] at 72075186224037888 2025-06-24T21:08:19.847170Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-24T21:08:19.847200Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:08:19.847228Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit ExecuteKqpDataTx 2025-06-24T21:08:19.847255Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit ExecuteKqpDataTx 2025-06-24T21:08:19.847329Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:236: Operation [0:281474976715666] (execute_kqp_data_tx) at 72075186224037888 set memory limit 4193448 2025-06-24T21:08:19.847469Z node 13 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: false 2025-06-24T21:08:19.847583Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:482: add locks to result: 0 2025-06-24T21:08:19.847693Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-24T21:08:19.847726Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit ExecuteKqpDataTx 2025-06-24T21:08:19.847756Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:08:19.847783Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit FinishPropose 2025-06-24T21:08:19.847847Z node 13 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715666 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-24T21:08:19.847965Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is DelayComplete 2025-06-24T21:08:19.847998Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:08:19.848033Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:08:19.848065Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:08:19.848113Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-24T21:08:19.848145Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:08:19.848175Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715666] at 72075186224037888 has finished 2025-06-24T21:08:19.848238Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:08:19.848274Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715666] at 72075186224037888 on unit FinishPropose 2025-06-24T21:08:19.848318Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:08:19.849915Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [13:61:2108], Recipient [13:863:2691]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715661 LockNode: 13 Status: STATUS_NOT_FOUND 2025-06-24T21:08:20.100913Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhwad8g729pahqjyq4jt36v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTBiYTdmNzMtMjBmNTIzMDMtNmRhZDgzMGYtOTdiNjcwMzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:08:20.103506Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [13:929:2735], Recipient [13:863:2691]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-24T21:08:20.103824Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:08:20.103924Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v500/18446744073709551615 ImmediateWriteEdgeReplied# v500/18446744073709551615 2025-06-24T21:08:20.104012Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v500/18446744073709551615 2025-06-24T21:08:20.104134Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2025-06-24T21:08:20.104299Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T21:08:20.104380Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:08:20.104453Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:08:20.104518Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:08:20.104585Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2025-06-24T21:08:20.104650Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T21:08:20.104690Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:08:20.104721Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:08:20.104751Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:08:20.104942Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-24T21:08:20.105310Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[13:929:2735], 0} after executionsCount# 1 2025-06-24T21:08:20.105411Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[13:929:2735], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:08:20.105545Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[13:929:2735], 0} finished in read 2025-06-24T21:08:20.105651Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T21:08:20.105684Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:08:20.105716Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:08:20.105746Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:08:20.105803Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-24T21:08:20.105841Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:08:20.105914Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037888 has finished 2025-06-24T21:08:20.105993Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:08:20.106169Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:08:20.107384Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [13:929:2735], Recipient [13:863:2691]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:08:20.107484Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 11 } }, { items { uint32_value: 2 } items { uint32_value: 22 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.380926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.381026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.381068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.381105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.381798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.381840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.381934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.382022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.382833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.399244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.486818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.486877Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.503296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.503571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.503691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.511126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.511325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.513612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.514639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.520834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.521771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.532655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.532752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.533055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.533116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.533177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.533271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.543885Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.696072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.696275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.696538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.696586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.696865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.696940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.700429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.700597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.700777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.700837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.700876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.700910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.708139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.708238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.708289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.710341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.710397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.710453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.710510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.714186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.717182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.717373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.718551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.718701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.718747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.719005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.719045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.719209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.719282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.721567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.721617Z node 1 :FLAT_TX_SCHEMESHARD ... esult> execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TabletId: 72075186233409551 TxId: 104 Status: OK 2025-06-24T21:08:21.547831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:643: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionAttachResult triggers early, at schemeshard: 72057594046678944 message# TabletId: 72075186233409551 TxId: 104 Status: OK 2025-06-24T21:08:21.547876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:648: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionAttachResult CollectPQConfigChanged: false 2025-06-24T21:08:21.547916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 2 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-24T21:08:21.554299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.554464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: Erasing txId 104 2025-06-24T21:08:21.651226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409550, partId: 0 2025-06-24T21:08:21.651410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409550 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-24T21:08:21.651508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409550 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-24T21:08:21.651583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:5, shard: 72075186233409550, left await: 1, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.651634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: false 2025-06-24T21:08:21.651706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-24T21:08:21.652648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409551, partId: 0 2025-06-24T21:08:21.652795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-24T21:08:21.652852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-24T21:08:21.652901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:6, shard: 72075186233409551, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.652932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-24T21:08:21.653119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 128 -> 240 2025-06-24T21:08:21.653319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:21.653415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:08:21.658361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.658540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.658823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:21.658874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:21.659105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:08:21.659344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:21.659384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:338:2312], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-24T21:08:21.659428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:338:2312], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-24T21:08:21.659914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:08:21.659969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T21:08:21.660154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:08:21.660208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:21.660250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:08:21.660284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:21.660322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-24T21:08:21.660372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:21.660417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:08:21.660454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:08:21.660625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-24T21:08:21.660673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-24T21:08:21.660713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T21:08:21.660750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T21:08:21.661803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:08:21.661916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:08:21.661950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:08:21.661991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:08:21.662048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:08:21.663166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:08:21.663257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:08:21.663558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:08:21.663604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T21:08:21.663655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:08:21.663730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-24T21:08:21.675441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:08:21.675960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop [GOOD] >> TSchemeShardSubDomainTest::ConsistentCopyRejects [GOOD] >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:22.255509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:22.255621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:22.255667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:22.255703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:22.255750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:22.255784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:22.255839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:22.255970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:22.256669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:22.257007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:22.339132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:22.339334Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:22.360047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:22.360567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:22.360785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:22.369529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:22.369739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:22.370428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:22.370711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:22.373684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:22.373926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:22.374982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:22.375038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:22.375308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:22.375353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:22.375394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:22.375475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.382171Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:22.519830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:22.520185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.520482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:22.520521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:22.520768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:22.520831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:22.524036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:22.524258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:22.524472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.524551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:22.524611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:22.524649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:22.527215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.527311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:22.527357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:22.529214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.529262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.529316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:22.529384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:22.541362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:22.543660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:22.543883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:22.544873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:22.545008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:22.545061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:22.545439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:22.545502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:22.545686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:22.545766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:22.548119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:22.548164Z node 1 :FLAT_TX_SCHEMESHARD ... T21:08:22.644162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T21:08:22.644452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.644505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:417: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-06-24T21:08:22.644606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:22.644643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:22.644679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:22.644709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:22.644751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:08:22.644792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:22.644824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:08:22.644856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:08:22.644963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:22.645018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T21:08:22.645048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T21:08:22.645080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T21:08:22.645729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:22.645820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:22.645852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:22.645893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:08:22.645964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:22.646640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:22.646724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:22.646758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:22.646790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:08:22.646828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:22.646908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:08:22.647163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:22.647210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:22.647314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:22.648340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:22.648393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:22.648463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:22.651057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:22.651411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:22.652836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:22.652933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:08:22.653146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:22.653194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:08:22.653633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:22.653727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:22.653759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:345:2334] TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:22.654304Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:22.654511Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 230us result status StatusPathDoesNotExist 2025-06-24T21:08:22.654702Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:22.655214Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:22.655378Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 192us result status StatusSuccess 2025-06-24T21:08:22.655831Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::LS >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexSelectUsingScripting [GOOD] Test command err: Trying to start YDB, gRPC: 19513, MsgBus: 18746 2025-06-24T21:07:58.553607Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625643630549644:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:58.564019Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045ac/r3tmp/tmp97r4kp/pdisk_1.dat 2025-06-24T21:07:58.998198Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:59.013788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:59.013883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:59.017342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19513, node 1 2025-06-24T21:07:59.102414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:59.102440Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:59.102451Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:59.102573Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18746 TClient is connected to server localhost:18746 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:07:59.556694Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:59.684277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:59.706754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:07:59.723974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:59.882328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:00.028366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:00.100180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:01.813501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625656515453136:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:01.813598Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:02.160262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.195006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.271572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.309285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.346760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.429764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.512310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.604191Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625660810421098:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:02.604286Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:02.604895Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625660810421103:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:02.610198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:02.622176Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625660810421105:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:02.725278Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625660810421156:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:03.551264Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625643630549644:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:03.551332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:03.961016Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625665105388729:3601], Recipient [1:7519625643630549931:2149]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:03.961045Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:03.961054Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 7205759404664 ... anged, operationId: 281474976715672:2, shardIdx: 72057594046644480:36, shard: 72075186224037923, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-24T21:08:21.041150Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:21.041163Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976715672:2, datashard: 72075186224037923, at schemeshard: 72057594046644480 2025-06-24T21:08:21.041180Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715672:2 129 -> 240 2025-06-24T21:08:21.041259Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:21.041423Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:21.041444Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:21.041537Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:21.041550Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:21.041843Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:21.041860Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:21.041870Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:0 2025-06-24T21:08:21.041941Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625743657954714:2480] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:21.042022Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:21.042045Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:21.042053Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:2 2025-06-24T21:08:21.042080Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625743657954716:2481] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:21.042143Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625717888148574:2149], Recipient [3:7519625717888148574:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:21.042160Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:21.042195Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:21.042213Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-06-24T21:08:21.042283Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:21.042308Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-24T21:08:21.042321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-24T21:08:21.042335Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-24T21:08:21.042344Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-24T21:08:21.042357Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-06-24T21:08:21.042535Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625717888148574:2149], Recipient [3:7519625717888148574:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:21.042555Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:21.042578Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:21.042591Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-24T21:08:21.042634Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:21.042644Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-24T21:08:21.042651Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:21.042664Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-24T21:08:21.042671Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:21.042685Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-24T21:08:21.042721Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519625739362987389:2478] message: TxId: 281474976715672 2025-06-24T21:08:21.042739Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-24T21:08:21.042762Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-24T21:08:21.042771Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:0 2025-06-24T21:08:21.042890Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-24T21:08:21.042917Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-24T21:08:21.042924Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:1 2025-06-24T21:08:21.042938Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-24T21:08:21.042945Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-24T21:08:21.042951Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715672:2 2025-06-24T21:08:21.042980Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-24T21:08:21.043347Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:21.043567Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:21.043634Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625739362987389:2478] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-24T21:08:21.043795Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625743657954786:3676], Recipient [3:7519625717888148574:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:21.043817Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:21.043828Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:21.043848Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625743657954787:3677], Recipient [3:7519625717888148574:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:21.043855Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:21.043860Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:21.043974Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625739362987400:3612], Recipient [3:7519625717888148574:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:21.043994Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:21.044002Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:21.594738Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625717888148574:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:21.594792Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:21.594846Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625717888148574:2149], Recipient [3:7519625717888148574:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:21.594867Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable >> TSchemeShardSubDomainTest::RestartAtInFly ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ConsistentCopyRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.386884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.386982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.387028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.387063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.387110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.387163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.387256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.387338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.388124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.394307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.485102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.485173Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.498195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.501733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.501991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.509212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.511081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.513659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.514654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.520644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.521738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.532824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.532926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.533111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.533172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.533233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.533393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.543053Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.696044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.696258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.696564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.696615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.696850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.696925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.699343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.699558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.699786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.699865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.699902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.699935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.701848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.701929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.701970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.703626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.703682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.703729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.703788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.721733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.723770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.723968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.725080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.725208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.725274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.725583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.725636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.725809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.725927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.728160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.728223Z node 1 :FLAT_TX_SCHEMESHARD ... r { TxId: 106 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-06-24T21:08:22.844171Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 106:0 240 -> 240 2025-06-24T21:08:22.849331Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.849409Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 106:0 ProgressState 2025-06-24T21:08:22.849535Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T21:08:22.849584Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:08:22.849630Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T21:08:22.849669Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:08:22.849715Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-06-24T21:08:22.849804Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:642:2562] message: TxId: 106 2025-06-24T21:08:22.849869Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:08:22.849939Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 106:0 2025-06-24T21:08:22.849978Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 106:0 2025-06-24T21:08:22.850163Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-24T21:08:22.850224Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:08:22.852531Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T21:08:22.852592Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:819:2714] TestWaitNotification: OK eventTxId 106 2025-06-24T21:08:22.853407Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:22.853652Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table" took 295us result status StatusSuccess 2025-06-24T21:08:22.854139Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table" PathDescription { Self { Name: "table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:22.854902Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dst" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:22.855200Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dst" took 257us result status StatusSuccess 2025-06-24T21:08:22.855678Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dst" PathDescription { Self { Name: "dst" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "dst" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:22.856445Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:22.856636Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 213us result status StatusSuccess 2025-06-24T21:08:22.857142Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dst" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCopying Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.381144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.381213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.381245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.381274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.382102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.382137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.382212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.382278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.382875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.394283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.538778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.538856Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.553605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.558200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.558411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.566918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.567184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.567904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.568253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.570897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.571087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.572303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.572368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.572481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.572532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.572588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.572745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.580164Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.740738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.740921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.741192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.741245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.741479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.741548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.743931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.744130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.744382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.744482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.744524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.744559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.746577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.746634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.746677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.748487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.748541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.748580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.748641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.752477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.754353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.754617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.755652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.755777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.755821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.756176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.756249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.756436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.756511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.760057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.760107Z node 1 :FLAT_TX_SCHEMESHARD ... : schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 108 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 8589936750 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:22.842058Z node 2 :FLAT_TX_SCHEMESHARD INFO: alter_store.cpp:199: TAlterOlapStore TPropose operationId# 108:0 HandleReply TEvOperationPlan at tablet: 72057594046678944, stepId: 5000004 2025-06-24T21:08:22.842230Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 108:0 128 -> 129 2025-06-24T21:08:22.842447Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:22.842508Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T21:08:22.843092Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=108;fline=tx_controller.cpp:215;event=finished_tx;tx_id=108; FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000004 2025-06-24T21:08:22.844993Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:22.845047Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:22.845249Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-24T21:08:22.845381Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:22.845426Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:340:2314], at schemeshard: 72057594046678944, txId: 108, path id: 1 2025-06-24T21:08:22.845526Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:340:2314], at schemeshard: 72057594046678944, txId: 108, path id: 5 2025-06-24T21:08:22.845989Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.846040Z node 2 :FLAT_TX_SCHEMESHARD INFO: alter_store.cpp:305: TAlterOlapStore TProposedWaitParts operationId# 108:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:08:22.846107Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: alter_store.cpp:332: TAlterOlapStore TProposedWaitParts operationId# 108:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409549 2025-06-24T21:08:22.846980Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-24T21:08:22.847088Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-24T21:08:22.847125Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-24T21:08:22.847187Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-24T21:08:22.847253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:08:22.848147Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 108 2025-06-24T21:08:22.848225Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 108 2025-06-24T21:08:22.848252Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-24T21:08:22.848299Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 5 2025-06-24T21:08:22.848330Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-24T21:08:22.848398Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 108, ready parts: 0/1, is published: true 2025-06-24T21:08:22.850320Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 108:0 from tablet: 72057594046678944 to tablet: 72075186233409549 cookie: 72057594046678944:4 msg type: 275382275 2025-06-24T21:08:22.851861Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-24T21:08:22.852817Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-24T21:08:22.867603Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6226: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409549 TxId: 108 2025-06-24T21:08:22.867683Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 108, tablet: 72075186233409549, partId: 0 2025-06-24T21:08:22.867850Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 108:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409549 TxId: 108 2025-06-24T21:08:22.867912Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 108:0 129 -> 240 FAKE_COORDINATOR: Erasing txId 108 2025-06-24T21:08:22.870468Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.870688Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.870739Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 108:0 ProgressState 2025-06-24T21:08:22.870862Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#108:0 progress is 1/1 2025-06-24T21:08:22.870902Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-24T21:08:22.870946Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#108:0 progress is 1/1 2025-06-24T21:08:22.870979Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-24T21:08:22.871020Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: true 2025-06-24T21:08:22.871098Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:500:2446] message: TxId: 108 2025-06-24T21:08:22.871272Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-24T21:08:22.871327Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 108:0 2025-06-24T21:08:22.871364Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 108:0 2025-06-24T21:08:22.871569Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T21:08:22.874226Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-24T21:08:22.874289Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [2:895:2802] TestWaitNotification: OK eventTxId 108 TestModificationResults wait txId: 109 2025-06-24T21:08:22.877501Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnStore AlterColumnStore { Name: "OlapStore1" AlterSchemaPresets { Name: "default" AlterSchema { AddColumns { Name: "comment2" Type: "Utf8" } } } } } TxId: 109 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:22.877689Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: alter_store.cpp:465: TAlterOlapStore Propose, path: /MyRoot/OlapStore1, opId: 109:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.878053Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 109:1, propose status:StatusSchemeError, reason: Too many columns. new: 4. Limit: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:22.880247Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 109, response: Status: StatusSchemeError Reason: "Too many columns. new: 4. Limit: 3" TxId: 109 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:22.880444Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 109, database: /MyRoot, subject: , status: StatusSchemeError, reason: Too many columns. new: 4. Limit: 3, operation: ALTER COLUMN STORE, path: /MyRoot/OlapStore1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 2025-06-24T21:08:22.880907Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 109: send EvNotifyTxCompletion 2025-06-24T21:08:22.880940Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 109 2025-06-24T21:08:22.881439Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 109, at schemeshard: 72057594046678944 2025-06-24T21:08:22.881514Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-06-24T21:08:22.881544Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [2:930:2837] TestWaitNotification: OK eventTxId 109 >> KqpMultishardIndex::CheckPushTopSort [GOOD] >> KqpIndexes::DuplicateUpsertInterleaveParams-UseSink [GOOD] >> TSchemeShardSubDomainTest::CreateDropSolomon >> TSchemeShardSubDomainTest::CreateAndWait >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir >> KqpMultishardIndex::DataColumnWrite+UseSink [FAIL] >> KqpMultishardIndex::DataColumnSelect >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false [GOOD] >> TSchemeShardSubDomainTest::LS [GOOD] >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover-Nullable [GOOD] >> TSchemeShardSubDomainTest::RestartAtInFly [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable [GOOD] >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:24.043548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:24.043647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.043682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:24.043715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:24.043752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:24.043780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:24.043840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.043920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:24.044631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:24.044942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:24.125890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:24.125963Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:24.142742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:24.143223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:24.143413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:24.151794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:24.151997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:24.152607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.152910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:24.155869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.156068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:24.157384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.157456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.157705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:24.157755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.157799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:24.157883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.164853Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:24.293755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:24.294063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.294354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:24.294401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:24.294694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:24.294763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:24.297376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.297571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:24.297764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.297817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:24.297877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:24.297927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:24.300129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.300210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:24.300260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:24.302510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.302566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.302610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.302681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:24.312808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:24.314902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:24.315089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:24.316203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.316340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.316387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.316683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:24.316738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.316916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:24.316994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:24.319030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.319072Z node 1 :FLAT_TX_SCHEMESHARD ... ons { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.366540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-24T21:08:24.366800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 128 -> 240 2025-06-24T21:08:24.366854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-24T21:08:24.367016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:24.367081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:24.367129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T21:08:24.369035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.369086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.369237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:24.369325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.369355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T21:08:24.369425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T21:08:24.369499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.369534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:08:24.369624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:24.369660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:24.369698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:24.369726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:24.369759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:08:24.369796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:24.369851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:08:24.369887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:08:24.369971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:24.370020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T21:08:24.370057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:08:24.370083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:08:24.371261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:24.371350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:24.371391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:24.371439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:08:24.371487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:24.372081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:24.372171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:24.372208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:24.372263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:08:24.372318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:24.372381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:08:24.375585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:24.376073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-24T21:08:24.379223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:24.379401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: /MyRoot/SomeDatabase, opId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.379571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2, at schemeshard: 72057594046678944 2025-06-24T21:08:24.381751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:24.381995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2, operation: ALTER DATABASE, path: /MyRoot/SomeDatabase TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-24T21:08:24.382244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:24.382301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-24T21:08:24.382382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:08:24.382402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:08:24.382955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:24.383030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:24.383078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:317:2306] 2025-06-24T21:08:24.383252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:08:24.383334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:24.383356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:317:2306] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop >> TSchemeShardSubDomainTest::CreateForceDropSolomon ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::LS [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:24.139617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:24.139725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.139763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:24.139840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:24.139883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:24.139908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:24.139972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.140064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:24.140824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:24.141182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:24.227033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:24.227093Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:24.243389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:24.243821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:24.244019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:24.251638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:24.251843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:24.252476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.252798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:24.255688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.255900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:24.257024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.257084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.257330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:24.257377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.257419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:24.257500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.264338Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:24.416642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:24.416844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.417110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:24.417159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:24.417446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:24.417519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:24.422665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.422865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:24.423053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.423108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:24.423170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:24.423207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:24.428743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.428829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:24.428886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:24.431025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.431077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.431166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.431237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:24.434919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:24.437135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:24.437318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:24.438307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.438470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.438520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.438821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:24.438873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.439064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:24.439137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:24.441356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.441405Z node 1 :FLAT_TX_SCHEMESHARD ... chemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:24.552363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.552397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-24T21:08:24.552460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-24T21:08:24.552912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.552952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-24T21:08:24.553030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:24.553112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:24.553139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:24.553166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:24.553193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-24T21:08:24.553219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:24.553249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 100:0 2025-06-24T21:08:24.553274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 100:0 2025-06-24T21:08:24.553465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T21:08:24.553501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-24T21:08:24.553528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:08:24.553563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:08:24.554122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:24.554188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:24.554212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:24.554242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:08:24.554271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:24.554696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:24.554755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:24.554774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:24.554792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:08:24.554809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:24.554847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-24T21:08:24.558423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-24T21:08:24.558660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-24T21:08:24.558889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:24.558920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-24T21:08:24.559236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:24.559335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:24.559359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:459:2411] TestWaitNotification: OK eventTxId 100 2025-06-24T21:08:24.559766Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:24.559977Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 192us result status StatusSuccess 2025-06-24T21:08:24.560396Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.560944Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:24.561110Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 168us result status StatusSuccess 2025-06-24T21:08:24.561498Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::CheckPushTopSort [GOOD] Test command err: Trying to start YDB, gRPC: 15150, MsgBus: 4714 2025-06-24T21:08:00.799488Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625653587289913:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:00.812888Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045a8/r3tmp/tmpIeayOm/pdisk_1.dat 2025-06-24T21:08:01.290250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:01.290344Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:01.304444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:01.332799Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:01.335388Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625653587289810:2079] 1750799280775506 != 1750799280775509 TServer::EnableGrpc on GrpcPort 15150, node 1 2025-06-24T21:08:01.432520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:01.432560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:01.432570Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:01.432706Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4714 2025-06-24T21:08:01.821998Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4714 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:02.122734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:02.143800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:02.154028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:02.335249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:02.528758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:02.624462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:04.591443Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625670767160621:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:04.591579Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:04.950915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:04.985060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.027955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.062131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.092780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.169380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.209859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.320432Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625675062128586:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:05.320538Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:05.320871Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625675062128591:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:05.324807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:05.336145Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625675062128593:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:05.415000Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625675062128644:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:05.797042Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625653587289913:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:05.797123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:06.464973Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625679357096217:3600], Recipient [1:7519625657882257428:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:06.465061Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: ... chemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime Trying to start YDB, gRPC: 4197, MsgBus: 10119 2025-06-24T21:08:16.072780Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519625722047954369:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:16.072832Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045a8/r3tmp/tmp6h10hd/pdisk_1.dat 2025-06-24T21:08:16.206647Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:16.207710Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519625722047954347:2079] 1750799296071875 != 1750799296071878 2025-06-24T21:08:16.223979Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:16.224074Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:16.231721Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4197, node 3 2025-06-24T21:08:16.292438Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:16.292467Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:16.292477Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:16.292608Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10119 TClient is connected to server localhost:10119 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:08:16.833960Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:16.848770Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:16.927790Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:17.097096Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:17.115481Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:17.203528Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:20.084622Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625739227825184:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:20.084805Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:20.148437Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:20.230013Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:20.268084Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:20.299756Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:20.335801Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:20.379457Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:20.420089Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:20.493646Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625739227825843:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:20.493770Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:20.494107Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625739227825848:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:20.498919Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:20.535904Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625739227825850:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:20.613279Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625739227825901:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:21.126832Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625722047954369:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:21.127374Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:21.792243Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TSchemeShardSubDomainTest::CreateAndWait [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RestartAtInFly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:24.257386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:24.257511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.257572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:24.257615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:24.257666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:24.257697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:24.257767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.257858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:24.259814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:24.260235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:24.354418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:24.354481Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:24.372177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:24.372646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:24.372863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:24.381850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:24.382111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:24.382878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.383212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:24.389053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.389290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:24.390620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.390692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.390987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:24.391051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.391106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:24.391243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.397442Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:24.534863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:24.535064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.535337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:24.535399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:24.535712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:24.535787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:24.538029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.538233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:24.538441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.538494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:24.538544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:24.538580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:24.540725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.540797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:24.540839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:24.542599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.542649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.542701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.542761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:24.546695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:24.552126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:24.552367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:24.553401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.553540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.553591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.553929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:24.553996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.554173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:24.554261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:24.556508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.556555Z node 1 :FLAT_TX_SCHEMESHARD ... Data, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.748375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-24T21:08:24.748732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.748844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:08:24.749088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.749211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.749343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:24.749400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:24.749441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:24.749462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:24.749616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.749707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.749902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-06-24T21:08:24.750319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.750470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.750901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.751004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.751303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.751410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.751537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.751736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.751841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.752031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.752321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.752401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.752517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.752591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.752651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.757526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:24.760539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.760598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.760660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:24.760704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.760747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:24.760962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 100 2025-06-24T21:08:24.824252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:24.824391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 Leader for TabletID 72057594046678944 is [1:464:2412] sender: [1:525:2058] recipient: [1:15:2062] 2025-06-24T21:08:24.825180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:24.825295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:24.825341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:523:2457] TestWaitNotification: OK eventTxId 100 2025-06-24T21:08:24.825861Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:24.826093Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 255us result status StatusSuccess 2025-06-24T21:08:24.826645Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.827213Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:24.827397Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 216us result status StatusSuccess 2025-06-24T21:08:24.827819Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::DuplicateUpsertInterleaveParams-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 17835, MsgBus: 11601 2025-06-24T21:08:03.229417Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625664600157823:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:03.246720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045a6/r3tmp/tmpoR6AZ3/pdisk_1.dat 2025-06-24T21:08:03.712462Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:03.712796Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625664600157602:2079] 1750799283145632 != 1750799283145635 2025-06-24T21:08:03.744646Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:03.744757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:03.748837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17835, node 1 2025-06-24T21:08:03.884533Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:03.884570Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:03.884584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:03.884721Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:04.215349Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11601 TClient is connected to server localhost:11601 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:04.658774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:04.683849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:04.711537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:04.828488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:05.013003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.082009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:06.972463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625677485061118:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:06.972592Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:07.348761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.379413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.412764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.442063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.471988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.512073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.583745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.643411Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625681780029078:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:07.643480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:07.643506Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625681780029083:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:07.650349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:07.661056Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625681780029085:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:07.724951Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625681780029136:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:08.236471Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625664600157823:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:08.236540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:08.775117Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625686074996709:3601], Recipient [1:7519625664600157933:2148]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:08.775174Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... X1: 7519625747531588229 RawX2: 4503612512274851 } Origin: 72075186224037922 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-06-24T21:08:22.356538Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710672:2, shardIdx: 72057594046644480:36, shard: 72075186224037922, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-24T21:08:22.356548Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976710672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:22.356559Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976710672:2, datashard: 72075186224037922, at schemeshard: 72057594046644480 2025-06-24T21:08:22.356570Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710672:2 129 -> 240 2025-06-24T21:08:22.356653Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:22.356860Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:22.356887Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:22.356900Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976710672:0 2025-06-24T21:08:22.356960Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625747531588238:2468] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-24T21:08:22.357063Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:22.357080Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:22.357152Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625721761782153:2145], Recipient [3:7519625721761782153:2145]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:22.357176Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:22.357212Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-24T21:08:22.357236Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976710672:0 ProgressState 2025-06-24T21:08:22.357319Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:22.357340Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710672:0 progress is 2/3 2025-06-24T21:08:22.357353Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 2/3 2025-06-24T21:08:22.357367Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710672:0 progress is 2/3 2025-06-24T21:08:22.357381Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 2/3 2025-06-24T21:08:22.357395Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710672, ready parts: 2/3, is published: true 2025-06-24T21:08:22.357573Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:22.357587Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:22.357597Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976710672:2 2025-06-24T21:08:22.357640Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625747531588229:2467] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-24T21:08:22.357716Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625721761782153:2145], Recipient [3:7519625721761782153:2145]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:22.357737Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:22.357764Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710672:2, at schemeshard: 72057594046644480 2025-06-24T21:08:22.357779Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976710672:2 ProgressState 2025-06-24T21:08:22.357835Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:22.357852Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710672:2 progress is 3/3 2025-06-24T21:08:22.357862Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-24T21:08:22.357877Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710672:2 progress is 3/3 2025-06-24T21:08:22.357885Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-24T21:08:22.357896Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710672, ready parts: 3/3, is published: true 2025-06-24T21:08:22.357950Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519625747531588204:2465] message: TxId: 281474976710672 2025-06-24T21:08:22.357977Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-24T21:08:22.358004Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710672:0 2025-06-24T21:08:22.358022Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710672:0 2025-06-24T21:08:22.358136Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-24T21:08:22.358163Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710672:1 2025-06-24T21:08:22.358175Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710672:1 2025-06-24T21:08:22.358189Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-24T21:08:22.358198Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710672:2 2025-06-24T21:08:22.358203Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710672:2 2025-06-24T21:08:22.358235Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-24T21:08:22.358527Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:22.358609Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625747531588304:3669], Recipient [3:7519625721761782153:2145]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:22.358635Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:22.358655Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:22.358673Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:22.358730Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625747531588204:2465] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-24T21:08:22.359346Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625747531588307:3671], Recipient [3:7519625721761782153:2145]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:22.359367Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:22.359382Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:22.359411Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625747531588212:3602], Recipient [3:7519625721761782153:2145]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:22.359420Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:22.359424Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:23.016198Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625721761782153:2145]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:23.016241Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:23.016289Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625721761782153:2145], Recipient [3:7519625721761782153:2145]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:23.016308Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:23.623563Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:24.109056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:24.109150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.109182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:24.109213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:24.109255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:24.109276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:24.109323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.109384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:24.110009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:24.110284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:24.189511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:24.189567Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:24.205649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:24.206064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:24.206322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:24.214088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:24.214300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:24.214950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.215282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:24.218205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.218410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:24.219589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.219650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.219880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:24.219942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.219988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:24.220074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.227323Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:24.381418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:24.381644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.381924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:24.381975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:24.382273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:24.382352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:24.385075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.385295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:24.385503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.385569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:24.385607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:24.385642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:24.387832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.387911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:24.387990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:24.389975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.390028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.390075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.390165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:24.393822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:24.396119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:24.396307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:24.397326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.397471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.397547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.397844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:24.397904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.398111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:24.398187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:24.400274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.400318Z node 1 :FLAT_TX_SCHEMESHARD ... 2057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.875635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 Forgetting tablet 72075186233409546 2025-06-24T21:08:24.877010Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 2025-06-24T21:08:24.878422Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-24T21:08:24.878847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:24.879031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 Forgetting tablet 72075186233409551 2025-06-24T21:08:24.879970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-24T21:08:24.880134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409549 2025-06-24T21:08:24.881055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:24.881173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:24.881566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:08:24.881679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:24.882637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:08:24.883531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:08:24.883670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:24.883714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:24.883853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:24.885180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:08:24.885221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-24T21:08:24.885278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-24T21:08:24.885312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-24T21:08:24.885383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:24.885460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:24.885491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:24.885568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:24.885784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:24.885813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:24.888556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:24.888609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:24.888693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-24T21:08:24.888724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-24T21:08:24.888831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:24.888853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:24.888933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:08:24.888967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T21:08:24.889073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:24.891916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T21:08:24.892095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T21:08:24.892125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T21:08:24.892460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T21:08:24.892545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:08:24.892581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:777:2667] TestWaitNotification: OK eventTxId 103 2025-06-24T21:08:24.893010Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:24.893202Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 162us result status StatusPathDoesNotExist 2025-06-24T21:08:24.893490Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:24.893899Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:24.894118Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 189us result status StatusSuccess 2025-06-24T21:08:24.894552Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:24.202691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:24.202789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.202847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:24.202884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:24.202940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:24.202972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:24.203037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.203116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:24.203896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:24.204240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:24.288103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:24.288164Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:24.305667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:24.306154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:24.306351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:24.317124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:24.317317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:24.318073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.318351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:24.325112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.325335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:24.326528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.326616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.326853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:24.326910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.326962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:24.327047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.333637Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:24.477153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:24.477354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.477627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:24.477676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:24.477971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:24.478058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:24.480060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.480252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:24.480411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.480465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:24.480500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:24.480531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:24.482299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.482365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:24.482402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:24.484003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.484050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.484092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.484153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:24.493971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:24.495854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:24.496009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:24.496980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.497111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.497166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.497455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:24.497504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.497690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:24.497778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:24.501317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.501360Z node 1 :FLAT_TX_SCHEMESHARD ... at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:08:24.849062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:08:24.849106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:08:24.849169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T21:08:24.849429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 507 RawX2: 4294969755 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:08:24.849467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409549, partId: 0 2025-06-24T21:08:24.849573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 507 RawX2: 4294969755 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:08:24.849631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:08:24.849761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 507 RawX2: 4294969755 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:08:24.849846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.849903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.849969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-24T21:08:24.850028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:08:24.854437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.854571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:24.854647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:24.854712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.854973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.855018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:08:24.855121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:08:24.855189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:08:24.855243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:08:24.855285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:08:24.855325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:08:24.855394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:312:2301] message: TxId: 102 2025-06-24T21:08:24.855455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:08:24.855500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:08:24.855529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:08:24.855644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:08:24.857265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:24.857305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:533:2477] TestWaitNotification: OK eventTxId 102 2025-06-24T21:08:24.857819Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:24.858016Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 240us result status StatusSuccess 2025-06-24T21:08:24.858593Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 2 SecurityStateVersion: 0 } } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 130 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 10 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.859234Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:24.859513Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 253us result status StatusSuccess 2025-06-24T21:08:24.860016Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 130 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 10 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir [GOOD] >> TSchemeShardSubDomainTest::SchemeLimitsRejects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:24.360692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:24.360775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.360811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:24.360845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:24.360907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:24.360930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:24.360993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.361081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:24.361783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:24.362143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:24.442935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:24.443001Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:24.458540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:24.458951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:24.459173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:24.467034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:24.467317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:24.467946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.468249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:24.471446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.471670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:24.472830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.472914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.473158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:24.473212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.473255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:24.473365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.480584Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:24.625581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:24.625745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.625974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:24.626009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:24.626176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:24.626247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:24.631430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.631590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:24.631737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.631793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:24.631842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:24.631877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:24.634512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.634580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:24.634625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:24.636939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.636992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.637039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.637098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:24.640347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:24.642455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:24.642615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:24.643539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.643644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.643678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.643900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:24.643936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.644067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:24.644119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:24.646323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.646386Z node 1 :FLAT_TX_SCHEMESHARD ... 6: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:490:2445] TestWaitNotification: OK eventTxId 103 2025-06-24T21:08:25.164760Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:25.165147Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 362us result status StatusSuccess 2025-06-24T21:08:25.165729Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:25.166498Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:25.166820Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 247us result status StatusSuccess 2025-06-24T21:08:25.167326Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:25.167939Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:25.168139Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0" took 203us result status StatusSuccess 2025-06-24T21:08:25.168509Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0" PathDescription { Self { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:25.169130Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:25.169338Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0/table_1" took 229us result status StatusSuccess 2025-06-24T21:08:25.169739Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0/table_1" PathDescription { Self { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpIndexes::SecondaryIndexUsingInJoin2-UseStreamJoin [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover-Nullable [GOOD] Test command err: Trying to start YDB, gRPC: 17699, MsgBus: 26392 2025-06-24T21:07:38.645042Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625558687123061:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:38.645156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004621/r3tmp/tmpOIGPXb/pdisk_1.dat 2025-06-24T21:07:38.980275Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:38.980675Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625558687123043:2079] 1750799258644308 != 1750799258644311 TServer::EnableGrpc on GrpcPort 17699, node 1 2025-06-24T21:07:39.008213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:39.008327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:39.010030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:39.013121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:39.013140Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:39.013147Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:39.013273Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26392 TClient is connected to server localhost:26392 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:39.455206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:39.479392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:39.611182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:39.722785Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:39.761428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:39.823675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:41.570527Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625571572026565:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.570644Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.894550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.917922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.941692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.970296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.998051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.065068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.134054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.187869Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625575866994527:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:42.187946Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:42.188153Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625575866994532:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:42.191737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:42.200301Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625575866994534:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:42.276279Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625575866994585:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:43.333348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:43.553698Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__create.cpp:23: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: DoExecute TxId: 281474976710674 DatabaseName: "/Root" Settings { source_path: "/Root/TestTable" index { name: "index1" index_columns: "emb" global_vector_kmeans_tree_index { vector_settings { settings { metric: SIMILARITY_COSINE vector_type: VECTOR_TYPE_UINT8 vector_dimension: 2 } clusters: 2 levels: 2 } } } } UserSID: "" 2025-06-24T21:07:43.554623Z node 1 :BUILD_INDEX NOTICE: schemeshard_b ... 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:20.980979Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-24T21:08:20.981030Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:20.981129Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625584731077364:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:20.981149Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:20.981157Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:08:21.929498Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519625584731077364:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:21.929540Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:21.929589Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:7519625584731077364:2153], Recipient [2:7519625584731077364:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:21.929605Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:22.930718Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519625584731077364:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:22.930766Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:22.930824Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:7519625584731077364:2153], Recipient [2:7519625584731077364:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:22.930846Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:23.212391Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [2:7519625601910948398:2421], Recipient [2:7519625584731077364:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037921 TableLocalId: 12 Generation: 1 Round: 2 TableStats { DataSize: 1240 RowCount: 7 IndexSize: 0 InMemSize: 1240 LastAccessTime: 1750799269364 LastUpdateTime: 1750799269364 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 7 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 132 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037921 NodeId: 2 StartTime: 1750799268192 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:08:23.212436Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:08:23.212469Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037921 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 1240 rowCount 7 cpuUsage 0.0132 2025-06-24T21:08:23.212580Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037921 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 1240 RowCount: 7 IndexSize: 0 InMemSize: 1240 LastAccessTime: 1750799269364 LastUpdateTime: 1750799269364 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 7 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:08:23.212601Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099995s, queue# 1 2025-06-24T21:08:23.212776Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [2:7519625601910948394:2419], Recipient [2:7519625584731077364:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037920 TableLocalId: 12 Generation: 1 Round: 2 TableStats { DataSize: 928 RowCount: 4 IndexSize: 0 InMemSize: 928 LastAccessTime: 1750799269359 LastUpdateTime: 1750799269359 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 104 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037920 NodeId: 2 StartTime: 1750799268192 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:08:23.212792Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:08:23.212812Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037920 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 928 rowCount 4 cpuUsage 0.0104 2025-06-24T21:08:23.212895Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037920 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 928 RowCount: 4 IndexSize: 0 InMemSize: 928 LastAccessTime: 1750799269359 LastUpdateTime: 1750799269359 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:08:23.312581Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625584731077364:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:23.312632Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:23.312656Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 2 2025-06-24T21:08:23.312699Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 2 2025-06-24T21:08:23.312716Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 2 2025-06-24T21:08:23.312763Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:34 data size 1240 row count 7 2025-06-24T21:08:23.312821Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037921 maps to shardIdx: 72057594046644480:34 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 7, DataSize 1240 2025-06-24T21:08:23.312836Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037921, followerId 0 2025-06-24T21:08:23.312893Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:34 with partCount# 0, rowCount# 7, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:23.312949Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037921 2025-06-24T21:08:23.312991Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:33 data size 928 row count 4 2025-06-24T21:08:23.313055Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037920 maps to shardIdx: 72057594046644480:33 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 4, DataSize 928 2025-06-24T21:08:23.313070Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037920, followerId 0 2025-06-24T21:08:23.313107Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:33 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:08:23.313126Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037920 2025-06-24T21:08:23.313175Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:23.313778Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625584731077364:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:23.313802Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:08:23.313811Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:08:23.931358Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519625584731077364:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:23.931403Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:23.931468Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:7519625584731077364:2153], Recipient [2:7519625584731077364:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:23.931488Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateAndWait [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:25.003255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:25.003366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:25.003409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:25.003446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:25.003493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:25.003523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:25.003584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:25.003671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:25.004443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:25.004786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:25.087688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:25.087762Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:25.111745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:25.112213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:25.112426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:25.121457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:25.121706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:25.122448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:25.122756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:25.126023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:25.126242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:25.127479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:25.127561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:25.127845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:25.127922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:25.127970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:25.128060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.135540Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:25.250561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:25.250755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.250986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:25.251026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:25.251313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:25.251381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:25.253473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:25.253648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:25.253801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.253855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:25.253890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:25.253936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:25.256141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.256206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:25.256246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:25.258379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.258441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.258491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:25.258563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:25.262604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:25.265075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:25.265287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:25.266440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:25.266601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:25.266658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:25.267009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:25.267078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:25.267297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:25.267387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:25.270915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:25.270969Z node 1 :FLAT_TX_SCHEMESHARD ... operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.358422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:08:25.358488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:25.358528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:25.358561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:25.358596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:25.358640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:08:25.358671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:25.358710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:08:25.358736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:08:25.358788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:08:25.358819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T21:08:25.358843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-24T21:08:25.358868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-24T21:08:25.359567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:25.359648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:25.359690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:25.359721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T21:08:25.359752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:25.360617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:25.360694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:25.360722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:25.360762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:08:25.360799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:25.360856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:08:25.364679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:25.365034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 100, wait until txId: 101 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-24T21:08:25.365300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:25.365350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-24T21:08:25.365444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:25.365484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:08:25.365955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:25.366055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:25.366098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:25.366134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:335:2324] 2025-06-24T21:08:25.366315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:25.366339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:335:2324] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:25.366830Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:25.367032Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/USER_0" took 222us result status StatusSuccess 2025-06-24T21:08:25.367556Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:25.368070Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:25.368259Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir" took 203us result status StatusSuccess 2025-06-24T21:08:25.368625Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir" PathDescription { Self { Name: "dir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable [GOOD] >> TSchemeShardSubDomainTest::CreateDropSolomon [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:25.149562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:25.149660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:25.149699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:25.149734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:25.149773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:25.149802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:25.149871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:25.150006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:25.150748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:25.151098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:25.238407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:25.238462Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:25.255278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:25.255667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:25.255848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:25.264318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:25.264524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:25.265293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:25.265572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:25.269082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:25.269292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:25.270538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:25.270609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:25.270882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:25.270939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:25.270987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:25.271082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.279465Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:25.429383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:25.429554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.429759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:25.429799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:25.430019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:25.430074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:25.431857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:25.432015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:25.432164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.432215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:25.432259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:25.432283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:25.433783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.433842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:25.433875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:25.435433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.435482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.435562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:25.435636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:25.438894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:25.440879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:25.441049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:25.442032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:25.442174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:25.442235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:25.442578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:25.442637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:25.442821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:25.442903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:25.444916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:25.444955Z node 1 :FLAT_TX_SCHEMESHARD ... _board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:08:25.762468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:08:25.762496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-24T21:08:25.762521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 8 2025-06-24T21:08:25.762582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:08:25.765419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:25.766680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 100 2025-06-24T21:08:25.766903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:25.766943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-24T21:08:25.767042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:25.767063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-24T21:08:25.767106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:08:25.767126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:08:25.767625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:25.767768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:25.767809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:936:2768] 2025-06-24T21:08:25.768013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:25.768097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:25.768136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:936:2768] 2025-06-24T21:08:25.768229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:08:25.768282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:25.768301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:936:2768] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-24T21:08:25.768757Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:25.769004Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains/USER_0" took 262us result status StatusSuccess 2025-06-24T21:08:25.769491Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:25.770014Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:25.770224Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains/USER_1" took 202us result status StatusSuccess 2025-06-24T21:08:25.770610Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains/USER_1" PathDescription { Self { Name: "USER_1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409552 Coordinators: 72075186233409553 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 Mediators: 72075186233409556 Mediators: 72075186233409557 } DomainKey { SchemeShard: 72057594046678944 PathId: 4 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 4 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:25.771070Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:25.771271Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains" took 164us result status StatusSuccess 2025-06-24T21:08:25.771655Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains" PathDescription { Self { Name: "SubDomains" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } Children { Name: "USER_1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:22.054893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:22.055015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:22.055061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:22.055099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:22.055171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:22.055198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:22.055267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:22.055358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:22.056171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:22.056550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:22.141610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:22.141688Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:22.160802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:22.161227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:22.161412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:22.172771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:22.173030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:22.173729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:22.174073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:22.177624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:22.177871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:22.179095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:22.179185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:22.179446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:22.179499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:22.179550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:22.179635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.187224Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:22.325449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:22.325700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.326042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:22.326105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:22.326382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:22.326456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:22.329129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:22.329362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:22.329593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.329657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:22.329701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:22.329740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:22.332149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.332235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:22.332282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:22.334366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.334419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:22.334471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:22.334548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:22.338674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:22.341088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:22.341312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:22.342379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:22.342563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:22.342635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:22.342959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:22.343025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:22.343274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:22.343394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:22.345943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:22.346001Z node 1 :FLAT_TX_SCHEMESHARD ... p:46: Free shard 72057594046678944:14 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:25.598628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:25.598655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:16 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:25.599232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-06-24T21:08:25.600767Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-24T21:08:25.601882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:25.602172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:08:25.602634Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 15 TxId_Deprecated: 15 TabletID: 72075186233409556 2025-06-24T21:08:25.603093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 15 ShardOwnerId: 72057594046678944 ShardLocalIdx: 15, at schemeshard: 72057594046678944 2025-06-24T21:08:25.603311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 3 2025-06-24T21:08:25.603531Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 14 TxId_Deprecated: 14 TabletID: 72075186233409555 2025-06-24T21:08:25.605284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 14 ShardOwnerId: 72057594046678944 ShardLocalIdx: 14, at schemeshard: 72057594046678944 2025-06-24T21:08:25.605473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 2 2025-06-24T21:08:25.606082Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409556 2025-06-24T21:08:25.608250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:25.608466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409555 2025-06-24T21:08:25.608929Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 16 TxId_Deprecated: 16 TabletID: 72075186233409557 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409557 2025-06-24T21:08:25.611673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 16 ShardOwnerId: 72057594046678944 ShardLocalIdx: 16, at schemeshard: 72057594046678944 2025-06-24T21:08:25.611895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 1 2025-06-24T21:08:25.612269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-06-24T21:08:25.612474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-06-24T21:08:25.612575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:25.612620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 16], at schemeshard: 72057594046678944 2025-06-24T21:08:25.612694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:25.612920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:25.612988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:25.613143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:25.613196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:25.613315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:25.620432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-06-24T21:08:25.620491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409556 2025-06-24T21:08:25.620633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:14 2025-06-24T21:08:25.620660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:14 tabletId 72075186233409555 2025-06-24T21:08:25.623233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:25.623288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:25.623372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:16 2025-06-24T21:08:25.623437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:16 tabletId 72075186233409557 2025-06-24T21:08:25.623532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:25.623743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:25.623847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:25.623898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:25.623978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:25.625742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 139, wait until txId: 139 TestWaitNotification wait txId: 139 2025-06-24T21:08:25.626720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 139: send EvNotifyTxCompletion 2025-06-24T21:08:25.626781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 139 2025-06-24T21:08:25.627867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 139, at schemeshard: 72057594046678944 2025-06-24T21:08:25.627973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 139: got EvNotifyTxCompletionResult 2025-06-24T21:08:25.628018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 139: satisfy waiter [1:2271:4040] TestWaitNotification: OK eventTxId 139 2025-06-24T21:08:25.629803Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:25.630010Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 240us result status StatusSuccess 2025-06-24T21:08:25.630468Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 5 ShardsInside: 0 ShardsLimit: 6 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 20 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 4 MaxPaths: 5 MaxChildrenInDir: 4 MaxAclBytesSize: 25 MaxTableColumns: 3 MaxTableColumnNameLength: 10 MaxTableKeyColumns: 1 MaxTableIndices: 20 MaxShards: 6 MaxShardsInPath: 4 MaxConsistentCopyTargets: 1 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"$%&\'()*+,-.:;<=>?@[]^_`{|}~" MaxPQPartitions: 20 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardSnapshots::PostMergeNotCompactedTooEarly [GOOD] >> DataShardSnapshots::PipelineAndMediatorRestoreRace >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop >> TSchemeShardSubDomainTest::DeleteAndRestart ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:26.096868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:26.096962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:26.096998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:26.097053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:26.097117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:26.097151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:26.097216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:26.097296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:26.098037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:26.098381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:26.181425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:26.181476Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:26.197258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:26.197628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:26.197773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:26.205622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:26.205879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:26.206574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.206868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:26.209776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:26.209951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:26.211080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:26.211164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:26.211394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:26.211453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:26.211512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:26.211593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.218270Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:26.307927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:26.308100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.308314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:26.308348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:26.308530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:26.308584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:26.310349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.310510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:26.310647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.310681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:26.310713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:26.310737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:26.312384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.312452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:26.312482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:26.313781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.313822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.313873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.313943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:26.316348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:26.317681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:26.317804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:26.318631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.318729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:26.318770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.318974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:26.319021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.319174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:26.319282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:26.320839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:26.320876Z node 1 :FLAT_TX_SCHEMESHARD ... SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:26.362283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:26.362424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:26.362480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-24T21:08:26.362544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-24T21:08:26.362707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.362745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-24T21:08:26.362826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:26.362858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:26.362897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:26.362924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:26.362952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-24T21:08:26.362978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:26.363009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 100:0 2025-06-24T21:08:26.363039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 100:0 2025-06-24T21:08:26.363092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:26.363163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-24T21:08:26.363195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:08:26.363228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:08:26.363930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:26.364017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:26.364067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:26.364103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:08:26.364139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:26.364738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:26.364797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:26.364828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:26.364848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:08:26.364869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:26.364933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-24T21:08:26.367467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-24T21:08:26.367911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-24T21:08:26.368225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:26.368270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-24T21:08:26.368337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:26.368359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:08:26.368835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:26.368970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:26.369010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:315:2304] 2025-06-24T21:08:26.369252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:26.369290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:26.369310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:315:2304] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:26.369604Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:26.369792Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 153us result status StatusSuccess 2025-06-24T21:08:26.370115Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:26.370429Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:26.370564Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 134us result status StatusPathDoesNotExist 2025-06-24T21:08:26.370653Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/USER_0\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/USER_0" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:24.879784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:24.879888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.879930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:24.879986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:24.880037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:24.880070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:24.880136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.880238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:24.881023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:24.881405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:24.956316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:24.956376Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:24.970293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:24.970741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:24.970945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:24.978422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:24.978608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:24.979310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.979579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:24.982203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.982414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:24.983861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.983912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.984071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:24.984117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.984148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:24.984213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.989800Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:25.117304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:25.117534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.117789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:25.117841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:25.118116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:25.118180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:25.120599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:25.120824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:25.121033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.121086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:25.121123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:25.121155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:25.123175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.123243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:25.123283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:25.124955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.125001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:25.125044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:25.125106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:25.128675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:25.130806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:25.131070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:25.132188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:25.132330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:25.132401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:25.132700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:25.132774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:25.132957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:25.133048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:25.135502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:25.135549Z node 1 :FLAT_TX_SCHEMESHARD ... de 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:26.255631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:08:26.255663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:08:26.255831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:26.255877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-24T21:08:26.255916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T21:08:26.255951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T21:08:26.257217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:08:26.257311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:08:26.257361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:08:26.257404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:08:26.257449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:26.258229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:08:26.258376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:08:26.258419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:08:26.258447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:08:26.258478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:08:26.258549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-24T21:08:26.261464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:26.261519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:26.262087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:08:26.263225Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-24T21:08:26.263470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.263793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:26.264120Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-24T21:08:26.264393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:26.264592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409546 2025-06-24T21:08:26.266227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:26.266316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:26.266449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 Forgetting tablet 72075186233409547 2025-06-24T21:08:26.290962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:26.291048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:26.291161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:26.293083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:08:26.293686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:26.293754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:26.296024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:26.296096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:26.296265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:26.296412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T21:08:26.296763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T21:08:26.296816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T21:08:26.297376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T21:08:26.297479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:08:26.297521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:2102:3705] TestWaitNotification: OK eventTxId 104 2025-06-24T21:08:26.306421Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:26.306593Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/Solomon" took 195us result status StatusPathDoesNotExist 2025-06-24T21:08:26.306721Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:26.307343Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:26.307562Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 195us result status StatusPathDoesNotExist 2025-06-24T21:08:26.307705Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:25.901370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:25.901476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:25.901518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:25.901553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:25.901599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:25.901629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:25.901708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:25.901805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:25.902634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:25.903014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:25.990644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:25.990710Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:26.007294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:26.007712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:26.007902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:26.017144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:26.017393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:26.018082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.018420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:26.021785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:26.022047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:26.023326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:26.023416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:26.023683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:26.023740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:26.023786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:26.023883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.032212Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:26.156917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:26.157122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.157376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:26.157434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:26.157680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:26.157765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:26.160339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.160579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:26.160805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.160893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:26.160943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:26.160981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:26.163731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.163828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:26.163878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:26.166153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.166222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.166281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.166349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:26.170364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:26.172744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:26.172971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:26.174105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.174276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:26.174335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.174699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:26.174766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.174983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:26.175074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:26.177730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:26.177785Z node 1 :FLAT_TX_SCHEMESHARD ... Id: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:26.368826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:26.370158Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186233409551 Forgetting tablet 72075186233409548 2025-06-24T21:08:26.370741Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-24T21:08:26.371553Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-24T21:08:26.372051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-24T21:08:26.372238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409551 2025-06-24T21:08:26.373491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:26.373682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:26.374273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:08:26.374445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409549 2025-06-24T21:08:26.376377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:26.376435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:26.376608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:26.376970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:26.377035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:26.377108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:26.378184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:08:26.378233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-24T21:08:26.378436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5819: Failed to connect, to tablet: 72075186233409550, at schemeshard: 72057594046678944 2025-06-24T21:08:26.379036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:26.379072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:26.381229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:26.381265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:26.381327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-24T21:08:26.381349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-24T21:08:26.381484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5819: Failed to connect, to tablet: 72075186233409551, at schemeshard: 72057594046678944 2025-06-24T21:08:26.381561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:26.381586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:26.383651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:08:26.383708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T21:08:26.383905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:26.384003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-24T21:08:26.384264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:26.384304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-24T21:08:26.384401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:26.384422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:08:26.384804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:26.384956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:26.384995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:626:2528] 2025-06-24T21:08:26.385144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:26.385264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:26.385288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:626:2528] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:26.385717Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:26.385954Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 210us result status StatusPathDoesNotExist 2025-06-24T21:08:26.386165Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:26.386550Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:26.386742Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 184us result status StatusSuccess 2025-06-24T21:08:26.387204Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::Delete >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas >> TSchemeShardSubDomainTest::CreateForceDropSolomon [GOOD] >> KqpUniqueIndex::UpdateOnNullInComplexFk [GOOD] >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexUsingInJoin2-UseStreamJoin [GOOD] Test command err: Trying to start YDB, gRPC: 1813, MsgBus: 11573 2025-06-24T21:07:56.773515Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625634864018368:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:56.773574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045ae/r3tmp/tmp2hmpjF/pdisk_1.dat 2025-06-24T21:07:57.220805Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:57.220896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:57.225218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:57.249283Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625634864018346:2079] 1750799276771104 != 1750799276771107 2025-06-24T21:07:57.250004Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1813, node 1 2025-06-24T21:07:57.380946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:57.380975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:57.380984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:57.381132Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11573 2025-06-24T21:07:57.815290Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11573 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:58.063933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:58.079324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:07:58.089644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:58.255454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.407371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:58.474529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:00.145545Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625652043889175:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:00.145700Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:00.444635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.480965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.519894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.551393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.581457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.641687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.678463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.730337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625652043889832:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:00.730401Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:00.730651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625652043889837:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:00.734350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:00.745090Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625652043889839:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:00.816070Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625652043889892:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:01.775238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625634864018368:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:01.775320Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:01.874987Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625656338857463:3600], Recipient [1:7519625639158986006:2170]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:01.875027Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048 ... deEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:23.213200Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710673:2, at schemeshard: 72057594046644480 2025-06-24T21:08:23.213211Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:23.213222Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976710673:2 2025-06-24T21:08:23.213278Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625748499300592:2484] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710673 at schemeshard: 72057594046644480 2025-06-24T21:08:23.213343Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710673:0, at schemeshard: 72057594046644480 2025-06-24T21:08:23.213350Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:23.213356Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976710673:0 2025-06-24T21:08:23.213380Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625748499300605:2485] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710673 at schemeshard: 72057594046644480 2025-06-24T21:08:23.213442Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625722729494373:2146], Recipient [3:7519625722729494373:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:23.213458Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:23.213490Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710673:2, at schemeshard: 72057594046644480 2025-06-24T21:08:23.213511Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976710673:2 ProgressState 2025-06-24T21:08:23.213584Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:23.213601Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710673:2 progress is 2/3 2025-06-24T21:08:23.213614Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710673 ready parts: 2/3 2025-06-24T21:08:23.213626Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710673:2 progress is 2/3 2025-06-24T21:08:23.213636Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710673 ready parts: 2/3 2025-06-24T21:08:23.213650Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710673, ready parts: 2/3, is published: true 2025-06-24T21:08:23.213817Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [3:7519625722729494373:2146], Recipient [3:7519625722729494373:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:08:23.213830Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:08:23.213854Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710673:0, at schemeshard: 72057594046644480 2025-06-24T21:08:23.213867Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976710673:0 ProgressState 2025-06-24T21:08:23.213925Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:23.213935Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710673:0 progress is 3/3 2025-06-24T21:08:23.213942Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710673 ready parts: 3/3 2025-06-24T21:08:23.213956Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710673:0 progress is 3/3 2025-06-24T21:08:23.213964Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710673 ready parts: 3/3 2025-06-24T21:08:23.213972Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710673, ready parts: 3/3, is published: true 2025-06-24T21:08:23.214009Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519625748499300569:2482] message: TxId: 281474976710673 2025-06-24T21:08:23.214029Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710673 ready parts: 3/3 2025-06-24T21:08:23.214050Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710673:0 2025-06-24T21:08:23.214060Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710673:0 2025-06-24T21:08:23.214167Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 4 2025-06-24T21:08:23.214185Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710673:1 2025-06-24T21:08:23.214192Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710673:1 2025-06-24T21:08:23.214206Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 21] was 3 2025-06-24T21:08:23.214213Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710673:2 2025-06-24T21:08:23.214218Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710673:2 2025-06-24T21:08:23.214246Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 3 2025-06-24T21:08:23.214522Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:23.214645Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:23.214690Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [3:7519625748499300569:2482] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710673 at schemeshard: 72057594046644480 2025-06-24T21:08:23.214827Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625748499300665:3746], Recipient [3:7519625722729494373:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:23.214844Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:23.214854Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:23.215011Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625748499300669:3749], Recipient [3:7519625722729494373:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:23.215021Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:23.215027Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:23.215309Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [3:7519625748499300577:3683], Recipient [3:7519625722729494373:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:23.215328Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:23.215337Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:23.436384Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625722729494373:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:23.436421Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:23.436468Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625722729494373:2146], Recipient [3:7519625722729494373:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:23.436496Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:24.436851Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625722729494373:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:24.436893Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:24.436943Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625722729494373:2146], Recipient [3:7519625722729494373:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:24.436961Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:25.437262Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625722729494373:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:25.437302Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:25.437348Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625722729494373:2146], Recipient [3:7519625722729494373:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:25.437366Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:26.149477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:26.149586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:26.149633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:26.149670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:26.149725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:26.150209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:26.150298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:26.150391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:26.151277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:26.151671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:26.241224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:26.241294Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:26.257972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:26.258392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:26.258602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:26.266337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:26.266590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:26.267326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.267632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:26.270581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:26.270789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:26.271994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:26.272062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:26.272316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:26.272367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:26.272423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:26.272522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.280320Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:26.427626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:26.427827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.428140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:26.428188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:26.428407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:26.428473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:26.430784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.430991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:26.431224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.431299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:26.431345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:26.431382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:26.433572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.433659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:26.433707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:26.435826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.435877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.435926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.435993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:26.439120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:26.441041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:26.441182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:26.441988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.442100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:26.442143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.442409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:26.442454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.442598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:26.442670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:26.444779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:26.444821Z node 1 :FLAT_TX_SCHEMESHARD ... BUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:26.688598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:26.689049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:08:26.689105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-24T21:08:26.689525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5819: Failed to connect, to tablet: 72075186233409550, at schemeshard: 72057594046678944 2025-06-24T21:08:26.694180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-24T21:08:26.694345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:26.694463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:26.694513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:26.694607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:26.694818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:26.694853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:26.695444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:26.695487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:26.695696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-24T21:08:26.695724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-24T21:08:26.696158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:26.696248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:26.697219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5819: Failed to connect, to tablet: 72075186233409551, at schemeshard: 72057594046678944 2025-06-24T21:08:26.697951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:08:26.698013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T21:08:26.698240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:26.700243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 100 2025-06-24T21:08:26.700523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:26.700574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-24T21:08:26.700665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:26.700687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-24T21:08:26.700784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:08:26.700821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:08:26.701343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:26.701503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:26.701551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:644:2545] 2025-06-24T21:08:26.701800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:26.701963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:08:26.702015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:26.702042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:644:2545] 2025-06-24T21:08:26.702141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:26.702173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:644:2545] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-24T21:08:26.702732Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:26.702943Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 260us result status StatusPathDoesNotExist 2025-06-24T21:08:26.703211Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:26.703811Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:26.704029Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 218us result status StatusPathDoesNotExist 2025-06-24T21:08:26.704191Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:26.704763Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:26.704988Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 235us result status StatusSuccess 2025-06-24T21:08:26.705477Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::DiskSpaceUsage >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools >> TStoragePoolsQuotasTest::DifferentQuotasInteraction [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateForceDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:25.908440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:25.908524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:25.908553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:25.908588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:25.908626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:25.908652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:25.908704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:25.908780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:25.909440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:25.909760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:25.977499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:25.977551Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:25.990918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:25.991232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:25.991417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:25.999000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:25.999234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:25.999875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.000212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:26.003276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:26.003462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:26.004376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:26.004425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:26.004618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:26.004668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:26.004703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:26.004773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.010306Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:26.156704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:26.156971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.157244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:26.157299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:26.157581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:26.157657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:26.164354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.164583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:26.164784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.164841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:26.164881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:26.164916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:26.168057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.168129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:26.168173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:26.170362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.170425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:26.170474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.170548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:26.174650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:26.177531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:26.177720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:26.178810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:26.178959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:26.179012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.179378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:26.179448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:26.179645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:26.179743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:26.184417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:26.184477Z node 1 :FLAT_TX_SCHEMESHARD ... blet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:38 tabletId 72075186233409583 2025-06-24T21:08:27.207077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:8 2025-06-24T21:08:27.207100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:8 tabletId 72075186233409553 2025-06-24T21:08:27.211553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:12 2025-06-24T21:08:27.211613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:12 tabletId 72075186233409557 2025-06-24T21:08:27.211718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-24T21:08:27.211741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-24T21:08:27.211806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:16 2025-06-24T21:08:27.211830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:16 tabletId 72075186233409561 2025-06-24T21:08:27.211978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:25 2025-06-24T21:08:27.212006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:25 tabletId 72075186233409570 2025-06-24T21:08:27.213062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:20 2025-06-24T21:08:27.213104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:20 tabletId 72075186233409565 2025-06-24T21:08:27.213182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:29 2025-06-24T21:08:27.213227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:29 tabletId 72075186233409574 2025-06-24T21:08:27.213430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:33 2025-06-24T21:08:27.213459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:33 tabletId 72075186233409578 2025-06-24T21:08:27.215098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:27.215176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:27.217484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:37 2025-06-24T21:08:27.217526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:37 tabletId 72075186233409582 2025-06-24T21:08:27.218081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:42 2025-06-24T21:08:27.218114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:42 tabletId 72075186233409587 2025-06-24T21:08:27.219111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-24T21:08:27.219172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-24T21:08:27.219312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:11 2025-06-24T21:08:27.219354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:11 tabletId 72075186233409556 2025-06-24T21:08:27.219466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-06-24T21:08:27.219492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409560 2025-06-24T21:08:27.219550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:19 2025-06-24T21:08:27.219586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:19 tabletId 72075186233409564 2025-06-24T21:08:27.219699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-24T21:08:27.219724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-06-24T21:08:27.219800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-06-24T21:08:27.219822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-06-24T21:08:27.220837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:28 2025-06-24T21:08:27.220875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:28 tabletId 72075186233409573 2025-06-24T21:08:27.220954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:32 2025-06-24T21:08:27.220977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:32 tabletId 72075186233409577 2025-06-24T21:08:27.221047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:27.221074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:27.221194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:36 2025-06-24T21:08:27.221240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:36 tabletId 72075186233409581 2025-06-24T21:08:27.221375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:27.221532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:27.221604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:27.221653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:27.221764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:27.224804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T21:08:27.225030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T21:08:27.225071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T21:08:27.225467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T21:08:27.225550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:08:27.225582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:2062:3664] TestWaitNotification: OK eventTxId 103 2025-06-24T21:08:27.226080Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:27.226295Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/Solomon" took 209us result status StatusPathDoesNotExist 2025-06-24T21:08:27.226482Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:27.227093Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:27.227308Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 196us result status StatusPathDoesNotExist 2025-06-24T21:08:27.227463Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::Delete [GOOD] >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop [GOOD] >> TSchemeShardSubDomainTest::DeleteAndRestart [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:24.052803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:24.052904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.052947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:24.052990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:24.053037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:24.053067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:24.053138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.053232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:24.054049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:24.054436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:24.144473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:24.144533Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:24.160939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:24.161341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:24.161537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:24.169556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:24.169772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:24.170472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.170806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:24.174193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.174410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:24.175697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.175765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.176032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:24.176088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.176141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:24.176232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.183521Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:24.329953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:24.330185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.330469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:24.330523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:24.330767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:24.330839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:24.339111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.339325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:24.339522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.339582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:24.339632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:24.339673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:24.343467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.343552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:24.343607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:24.345525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.345570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.345620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.345685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:24.357298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:24.359108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:24.359272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:24.360196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.360324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.360368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.360652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:24.360698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.360846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:24.360920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:24.362626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.362669Z node 1 :FLAT_TX_SCHEMESHARD D ... 4046678944 2025-06-24T21:08:27.635110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.635346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:27.635394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:27.635606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:08:27.635793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:27.635849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-24T21:08:27.635896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-24T21:08:27.636240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.636292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:08:27.636376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.636417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-24T21:08:27.636456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:0 129 -> 240 2025-06-24T21:08:27.637244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:27.637328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:27.637357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:08:27.637396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-24T21:08:27.637448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:27.638472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:27.638563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:27.638600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:08:27.638631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-24T21:08:27.638658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:08:27.638726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-24T21:08:27.642041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.642101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:27.642446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:08:27.642601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T21:08:27.642645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:27.642685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T21:08:27.642716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:27.642746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-24T21:08:27.642812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:410:2376] message: TxId: 103 2025-06-24T21:08:27.642852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:27.642898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T21:08:27.642935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T21:08:27.643037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:27.643495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:27.643523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:27.644336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:08:27.646002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:08:27.647107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:27.647167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-24T21:08:27.647557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:08:27.647592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:732:2665] 2025-06-24T21:08:27.647904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-06-24T21:08:27.649078Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:27.649329Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 218us result status StatusSuccess 2025-06-24T21:08:27.649731Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true [GOOD] >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateOnNullInComplexFk [GOOD] Test command err: Trying to start YDB, gRPC: 12732, MsgBus: 23092 2025-06-24T21:07:56.289544Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625635515214439:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:56.289590Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045b0/r3tmp/tmpBokzo9/pdisk_1.dat 2025-06-24T21:07:56.715701Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12732, node 1 2025-06-24T21:07:56.722683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:56.722833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:56.724470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:56.845911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:56.845931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:56.845939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:56.846100Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23092 2025-06-24T21:07:57.311875Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23092 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:57.499696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:57.535764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:57.673558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:57.849575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:57.924233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.735709Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625648400117941:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.735826Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:00.044143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.081158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.115011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.150134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.182383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.255370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.298101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:00.368725Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625652695085902:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:00.368823Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:00.369044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625652695085907:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:00.372949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:00.386596Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625652695085909:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:00.473064Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625652695085960:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:01.291253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625635515214439:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:01.291332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:01.670823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... Trying to start YDB, gRPC: 23445, MsgBus: 23085 2025-06-24T21:08:05.971388Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625672625664907:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:05.971434Z nod ... 5-06-24T21:08:07.063701Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:09.323101Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625689805535682:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:09.323220Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:09.391186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:09.466812Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:09.547534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:09.619708Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:09.664811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:09.750543Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:09.831444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:09.907833Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625689805536353:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:09.907946Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:09.908260Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625689805536358:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:09.912359Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:09.925148Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625689805536360:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:10.020915Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625694100503709:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:10.973986Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625672625664907:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:10.974073Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:11.114712Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:16.835094Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhwa99z84gpbxy4yc5aaxzy, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmZiNTYwNzUtNzI1ODdmOTUtMmU2OTY0ZTAtNGRiNTJiM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:08:16.845938Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NmZiNTYwNzUtNzI1ODdmOTUtMmU2OTY0ZTAtNGRiNTJiM2M=, ActorId: [2:7519625698395472061:2532], ActorState: ExecuteState, TraceId: 01jyhwa99z84gpbxy4yc5aaxzy, Create QueryResponse for error on request, msg: 2025-06-24T21:08:21.067257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:08:21.067301Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:21.532607Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037928 not found 2025-06-24T21:08:21.532658Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037939 not found 2025-06-24T21:08:21.532674Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037940 not found 2025-06-24T21:08:21.532692Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037929 not found 2025-06-24T21:08:21.564279Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037932 not found 2025-06-24T21:08:21.567840Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037926 not found 2025-06-24T21:08:21.574296Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037931 not found 2025-06-24T21:08:21.576555Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037937 not found 2025-06-24T21:08:21.576592Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037938 not found 2025-06-24T21:08:21.579809Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037934 not found 2025-06-24T21:08:21.579871Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037933 not found 2025-06-24T21:08:21.581250Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037935 not found 2025-06-24T21:08:24.549897Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhwaggv4jxjwadf7atex1fs, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmZiNTYwNzUtNzI1ODdmOTUtMmU2OTY0ZTAtNGRiNTJiM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:08:24.550208Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NmZiNTYwNzUtNzI1ODdmOTUtMmU2OTY0ZTAtNGRiNTJiM2M=, ActorId: [2:7519625698395472061:2532], ActorState: ExecuteState, TraceId: 01jyhwaggv4jxjwadf7atex1fs, Create QueryResponse for error on request, msg: 2025-06-24T21:08:25.610973Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhwahxb5t10s707qhmhegzq, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmZiNTYwNzUtNzI1ODdmOTUtMmU2OTY0ZTAtNGRiNTJiM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:08:25.611396Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NmZiNTYwNzUtNzI1ODdmOTUtMmU2OTY0ZTAtNGRiNTJiM2M=, ActorId: [2:7519625698395472061:2532], ActorState: ExecuteState, TraceId: 01jyhwahxb5t10s707qhmhegzq, Create QueryResponse for error on request, msg: 2025-06-24T21:08:26.773803Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhwam0763jbdxxq957r407y, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmZiNTYwNzUtNzI1ODdmOTUtMmU2OTY0ZTAtNGRiNTJiM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:08:26.774092Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NmZiNTYwNzUtNzI1ODdmOTUtMmU2OTY0ZTAtNGRiNTJiM2M=, ActorId: [2:7519625698395472061:2532], ActorState: ExecuteState, TraceId: 01jyhwam0763jbdxxq957r407y, Create QueryResponse for error on request, msg: >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution >> TSchemeShardSubDomainTest::DeclareDefineAndDelete >> TSchemeShardSubDomainTest::SimultaneousDefine >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Delete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:27.810899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:27.811015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:27.811056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:27.811095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:27.811177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:27.811211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:27.811281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:27.811369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:27.812161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:27.812573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:27.902379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:27.902447Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:27.933576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:27.934004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:27.934197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:27.947830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:27.948031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:27.948689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:27.948992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:27.954673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:27.954933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:27.956198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:27.956267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:27.956531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:27.956592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:27.956645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:27.956747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.963540Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:28.084923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:28.085141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.085425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:28.085488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:28.085755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:28.085848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:28.088403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.088623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:28.088837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.088891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:28.088935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:28.088974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:28.091079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.091176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:28.091225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:28.093108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.093170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.093218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.093280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.110644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:28.116100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:28.116304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:28.117366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.117694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:28.117754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.118118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:28.118180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.118374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.118452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:28.121504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.121550Z node 1 :FLAT_TX_SCHEMESHARD ... SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:08:28.284444Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 2025-06-24T21:08:28.285000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:28.285174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409547 2025-06-24T21:08:28.286002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:28.286129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:28.286891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:28.286935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:28.287041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:28.287785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:28.287897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:28.287944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:28.288002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.288521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:28.288569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:28.290560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:28.290599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:28.290667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:28.290715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:28.290882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:28.291006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:08:28.291266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:28.291309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:08:28.291709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:28.291795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:28.291842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:499:2451] TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:28.292339Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:28.292517Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 194us result status StatusPathDoesNotExist 2025-06-24T21:08:28.292700Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:28.293282Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:28.293483Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 175us result status StatusSuccess 2025-06-24T21:08:28.293905Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted 2025-06-24T21:08:28.294392Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-24T21:08:28.294497Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-24T21:08:28.294550Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 2025-06-24T21:08:28.294967Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:28.295127Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 171us result status StatusSuccess 2025-06-24T21:08:28.295555Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeleteAndRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:27.642520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:27.642607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:27.642659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:27.642700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:27.642740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:27.642766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:27.642830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:27.642907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:27.643648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:27.643980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:27.731514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:27.731578Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:27.750540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:27.750990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:27.751203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:27.759456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:27.759658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:27.760313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:27.760625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:27.763699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:27.763907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:27.765129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:27.765194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:27.765452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:27.765506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:27.765555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:27.765668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.772945Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:27.916335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:27.916620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.916952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:27.917020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:27.917509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:27.917592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:27.920254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:27.920463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:27.920697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.920764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:27.920811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:27.920845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:27.923239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.923323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:27.923368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:27.925709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.925762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.925812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:27.925878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:27.929726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:27.932010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:27.932210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:27.933271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:27.933428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:27.933485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:27.933814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:27.933886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:27.934097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:27.934201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:27.936583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:27.936644Z node 1 :FLAT_TX_SCHEMESHARD ... ySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:28.345622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.345683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:28.345717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:28.345752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:28.345801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:28.345858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.345957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:28.346736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:28.347064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:28.361861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:28.363098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:28.363281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:28.363533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:28.363593Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:28.363970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:28.364625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.364725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.364814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.365256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.365345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:08:28.365701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.365798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.365920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.366050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.366166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.366415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.366701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.366805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.367242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.367333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.367550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.367677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.367784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.367968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.368060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.368195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.368460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.368532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.368641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.368698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.368761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.372680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:28.375507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.375576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.376144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:28.376200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.376265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:28.377362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:625:2539] sender: [1:684:2058] recipient: [1:15:2062] 2025-06-24T21:08:28.409420Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:28.409619Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 202us result status StatusPathDoesNotExist 2025-06-24T21:08:28.409800Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:28.410399Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:28.410625Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 189us result status StatusSuccess 2025-06-24T21:08:28.411012Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:27.547746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:27.547849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:27.547888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:27.547922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:27.547962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:27.548008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:27.548101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:27.548184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:27.548893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:27.549245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:27.621522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:27.621579Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:27.635415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:27.635790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:27.635944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:27.643795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:27.643969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:27.644478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:27.644749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:27.647344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:27.647585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:27.648674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:27.648737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:27.648948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:27.648992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:27.649034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:27.649125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.657468Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:27.778219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:27.778407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.778610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:27.778657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:27.778835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:27.778888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:27.781175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:27.781304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:27.781476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.781516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:27.781554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:27.781599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:27.783422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.783495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:27.783532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:27.788085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.788148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:27.788201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:27.788260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:27.791908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:27.794161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:27.794351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:27.795359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:27.795488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:27.795540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:27.795902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:27.795963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:27.796133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:27.796217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:27.799739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:27.799815Z node 1 :FLAT_TX_SCHEMESHARD ... T_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:08:28.306943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:28.307536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:28.307965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:28.308020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:28.308162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:28.311839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:08:28.311918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-24T21:08:28.312029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-24T21:08:28.312076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-24T21:08:28.312195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5819: Failed to connect, to tablet: 72075186233409552, at schemeshard: 72057594046678944 2025-06-24T21:08:28.312296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:28.312420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:28.312451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:28.313162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:28.313230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:28.313322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.316072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:28.316120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:28.316202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-24T21:08:28.316226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-24T21:08:28.316286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:28.316323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:28.316416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:08:28.316472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T21:08:28.316610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:28.318107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-24T21:08:28.318397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:28.318450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-24T21:08:28.318557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:08:28.318578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:08:28.319033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:28.319199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:28.319248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:730:2616] 2025-06-24T21:08:28.319496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:08:28.319572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:28.319597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:730:2616] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-24T21:08:28.320071Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:28.320315Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 224us result status StatusPathDoesNotExist 2025-06-24T21:08:28.320493Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:28.321130Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:28.321312Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 193us result status StatusPathDoesNotExist 2025-06-24T21:08:28.321487Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:28.321947Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:28.322115Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 191us result status StatusSuccess 2025-06-24T21:08:28.322597Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:28.086504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:28.086635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.086685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:28.086730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:28.086786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:28.086823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:28.086916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.087036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:28.088051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:28.088487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:28.182303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:28.182370Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:28.203724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:28.204194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:28.204394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:28.219302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:28.219563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:28.220307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.220689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:28.224290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.224549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:28.225853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.225946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.226221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:28.226281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.226331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:28.226431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.234166Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:28.395894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:28.396092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.396317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:28.396365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:28.396634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:28.396719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:28.398880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.399043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:28.399224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.399271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:28.399303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:28.399338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:28.401113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.401196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:28.401232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:28.402730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.402765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.402807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.402855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.405893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:28.407535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:28.407694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:28.408493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.408615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:28.408659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.408946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:28.408994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.409124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.409178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:28.410969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.411013Z node 1 :FLAT_TX_SCHEMESHARD ... meBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:28.450922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T21:08:28.452221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.452258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.452431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:28.452502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.452537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T21:08:28.452569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T21:08:28.452897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.452932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:08:28.453015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:28.453042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:28.453074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:28.453158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:28.453195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:08:28.453228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:28.453257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:08:28.453299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:08:28.453367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:28.453405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T21:08:28.453438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:08:28.453460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:08:28.453871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:28.453993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:28.454032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:28.454086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:08:28.454146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:28.454687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:28.454752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:28.454774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:28.454796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:08:28.454816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:28.454865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:08:28.457693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:28.457996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-24T21:08:28.460450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 ExternalSchemeShard: true DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:28.460601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 102:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 ExternalSchemeShard: true DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } 2025-06-24T21:08:28.460634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 102:0, path /MyRoot/SomeDatabase 2025-06-24T21:08:28.460745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , at schemeshard: 72057594046678944 2025-06-24T21:08:28.460783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , at schemeshard: 72057594046678944 2025-06-24T21:08:28.462690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: " TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:28.462959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , operation: ALTER DATABASE, path: /MyRoot/SomeDatabase TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-24T21:08:28.463282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:28.463323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-24T21:08:28.463387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:08:28.463403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:08:28.463809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:28.463883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:08:28.463914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:28.463946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:311:2300] 2025-06-24T21:08:28.464064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:28.464082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:311:2300] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside >> TSchemeShardSubDomainTest::SetSchemeLimits >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:28.207540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:28.207634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.207673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:28.207714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:28.207758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:28.207789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:28.207967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.208052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:28.208823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:28.209173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:28.281628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:28.281680Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:28.300826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:28.301131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:28.301306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:28.314489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:28.314673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:28.315290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.315571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:28.318227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.318413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:28.319615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.319680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.319920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:28.319965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.320005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:28.320112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.326370Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:28.458871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:28.459042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.459270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:28.459313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:28.459571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:28.459657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:28.461982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.462174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:28.462369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.462421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:28.462486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:28.462519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:28.464461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.464525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:28.464563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:28.466107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.466143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.466178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.466242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.468894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:28.470693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:28.470850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:28.471758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.471856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:28.471899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.472210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:28.472254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.472392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.472447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:28.474388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.474431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.474584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.474619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:08:28.474884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.474918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:08:28.474991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:08:28.475013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.475048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:08:28.475078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.475131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:08:28.475198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.475248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:08:28.475277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:08:28.475349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:28.475394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:08:28.475431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:08:28.477138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:08:28.477257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:08:28.477294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:08:28.477330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:08:28.477395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.477500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:08:28.480842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:08:28.481358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-06-24T21:08:28.482008Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:08:28.501853Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:08:28.504479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_0" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:28.504740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_0, opId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.504837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: TimeCastBucketsPerMediator is 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.505661Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:08:28.508737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: TimeCastBucketsPerMediator is 0" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:28.509003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: TimeCastBucketsPerMediator is 0, operation: CREATE DATABASE, path: /MyRoot/USER_0 2025-06-24T21:08:28.509494Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-24T21:08:28.509730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:28.509785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-24T21:08:28.510206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:28.510294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:28.510329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:286:2275] TestWaitNotification: OK eventTxId 100 2025-06-24T21:08:28.510782Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:28.510960Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 187us result status StatusPathDoesNotExist 2025-06-24T21:08:28.511129Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true [GOOD] >> KqpMultishardIndex::DuplicateUpsert [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:28.545454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:28.545536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.545585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:28.545620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:28.545656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:28.545675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:28.545734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.545803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:28.546486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:28.546799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:28.629777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:28.629844Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:28.647058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:28.647471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:28.647753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:28.656992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:28.657234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:28.658017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.658352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:28.661575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.661750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:28.662841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.662892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.663093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:28.663136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.663200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:28.663276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.670429Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:28.820046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:28.820272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.820547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:28.820610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:28.820935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:28.821016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:28.823642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.823847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:28.824069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.824122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:28.824168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:28.824205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:28.826438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.826520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:28.826569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:28.828539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.828596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.828651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.828715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.832896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:28.835130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:28.835352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:28.836356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.836515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:28.836583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.836906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:28.836965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.837161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.837243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:28.839502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.839575Z node 1 :FLAT_TX_SCHEMESHARD ... ode 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-24T21:08:28.893330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 100:0 128 -> 240 2025-06-24T21:08:28.893419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-24T21:08:28.893547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.893612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:28.893663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-06-24T21:08:28.895495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.895538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.895709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:28.895811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.895851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-24T21:08:28.895912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-24T21:08:28.896131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.896184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-24T21:08:28.896301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:28.896344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:28.896391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:28.896423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:28.896463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-24T21:08:28.896511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:28.896548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 100:0 2025-06-24T21:08:28.896583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 100:0 2025-06-24T21:08:28.896687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:28.896734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-24T21:08:28.896769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:08:28.896802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:08:28.897673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:28.897773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:28.897811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:28.897856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:08:28.897900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:28.898782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:28.898869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:28.898903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:28.898934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:08:28.898965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:28.899037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-24T21:08:28.903544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-24T21:08:28.904003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-24T21:08:28.904312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:28.904361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-24T21:08:28.904450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:28.904474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:08:28.904954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:28.905062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:28.905101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:315:2304] 2025-06-24T21:08:28.905380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:28.905455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:28.905484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:315:2304] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:28.905922Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:28.906167Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 279us result status StatusSuccess 2025-06-24T21:08:28.906711Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDefine [GOOD] >> TSchemeShardSubDomainTest::DeclareDefineAndDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:28.446613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:28.446705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.446748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:28.446786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:28.446830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:28.446864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:28.446932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.447023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:28.447830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:28.448177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:28.531411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:28.531468Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:28.547312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:28.547704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:28.547898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:28.555813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:28.556016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:28.556600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.556884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:28.559786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.560012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:28.561188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.561256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.561506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:28.561554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.561617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:28.561711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.568684Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:28.727723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:28.727961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.728262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:28.728317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:28.728587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:28.728663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:28.731934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.732154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:28.732378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.732435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:28.732470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:28.732533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:28.734980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.735070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:28.735117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:28.737321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.737375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.737428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.737511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.741346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:28.743713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:28.743922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:28.744951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.745094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:28.745165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.745517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:28.745580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.745770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.745872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:28.748408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.748460Z node 1 :FLAT_TX_SCHEMESHARD ... 06-24T21:08:29.263769Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:29.264031Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 286us result status StatusSuccess 2025-06-24T21:08:29.264630Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.265247Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:29.265520Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 233us result status StatusSuccess 2025-06-24T21:08:29.265986Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.266548Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:29.266716Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0" took 169us result status StatusSuccess 2025-06-24T21:08:29.267109Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0" PathDescription { Self { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.267691Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:29.267904Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0/table_1" took 224us result status StatusSuccess 2025-06-24T21:08:29.268298Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0/table_1" PathDescription { Self { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:24.345717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:24.345791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.345819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:24.345845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:24.345876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:24.345897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:24.345960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:24.346027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:24.346593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:24.346848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:24.430104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:24.430146Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:24.446607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:24.446936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:24.447116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:24.454541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:24.454751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:24.455371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.455682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:24.458644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.458838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:24.459817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.459883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:24.460115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:24.460155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:24.460190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:24.460265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.466241Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:24.587828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:24.588039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.588268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:24.588314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:24.588582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:24.588668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:24.592058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.592260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:24.592464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.592542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:24.592578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:24.592612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:24.595280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.595360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:24.595396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:24.597693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.597759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:24.597805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.597868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:24.601550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:24.603724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:24.603930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:24.604799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:24.604919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:24.604975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.605264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:24.605311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:24.605517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:24.605616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:24.607609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:24.607665Z node 1 :FLAT_TX_SCHEMESHARD D ... ply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:29.335667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:29.337259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-24T21:08:29.337317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-24T21:08:29.337527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-06-24T21:08:29.337714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-24T21:08:29.337764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:450:2400], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-06-24T21:08:29.337828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:450:2400], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-06-24T21:08:29.338188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:29.338232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-06-24T21:08:29.338307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:29.338335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-06-24T21:08:29.338372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 129 -> 240 2025-06-24T21:08:29.339359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:29.339441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:29.339471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-24T21:08:29.339507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 9 2025-06-24T21:08:29.339545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-24T21:08:29.340596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:29.340650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:29.340668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-24T21:08:29.340687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:08:29.340717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-24T21:08:29.340768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-24T21:08:29.343737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:29.343793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-06-24T21:08:29.344123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-24T21:08:29.344311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:08:29.344351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:29.344388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:08:29.344430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:29.344455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-24T21:08:29.344516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:555:2492] message: TxId: 104 2025-06-24T21:08:29.344562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:29.344621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:08:29.344654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:08:29.344752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-24T21:08:29.345463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-24T21:08:29.345494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-24T21:08:29.346094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-24T21:08:29.346452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-24T21:08:29.347606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-24T21:08:29.347655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:450:2400], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-24T21:08:29.347764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:08:29.347793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:748:2662] 2025-06-24T21:08:29.348338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-06-24T21:08:29.349203Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-24T21:08:29.349414Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 182us result status StatusSuccess 2025-06-24T21:08:29.349803Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "quoted_storage_pool" Kind: "quoted_storage_pool_kind" } StoragePools { Name: "unquoted_storage_pool" Kind: "unquoted_storage_pool_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "unquoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "quoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { storage_quotas { unit_kind: "quoted_storage_pool_kind" data_size_hard_quota: 1 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> TSchemeShardSubDomainTest::Restart >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside [GOOD] >> StatisticsSaveLoad::Simple [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable >> TSchemeShardSubDomainTest::SetSchemeLimits [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:29.535178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:29.535276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:29.535325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:29.535369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:29.535409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:29.535438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:29.535495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:29.535574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:29.536322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:29.536647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:29.618491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:29.618548Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:29.634594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:29.635072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:29.635264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:29.643354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:29.643564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:29.644215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.644555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:29.647812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.648030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:29.649189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.649253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.649517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:29.649565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:29.649607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:29.649692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.657038Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:29.775891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:29.776074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.776308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:29.776361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:29.776609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:29.776680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:29.779008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.779199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:29.779369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.779418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:29.779454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:29.779487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:29.781498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.781563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:29.781616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:29.783366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.783411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.783452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.783511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:29.787301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:29.789130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:29.789312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:29.790217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.790334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.790382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.790715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:29.790771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.790947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:29.791017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:29.792901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.792946Z node 1 :FLAT_TX_SCHEMESHARD ... :636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Status: SUCCESS OnTabletId: 72075186233409548 2025-06-24T21:08:29.922900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:84: NSubDomainState::TConfigureParts operationId# 101:0 HandleReply TEvConfigureStatus operationId:101:0 at schemeshard:72057594046678944 2025-06-24T21:08:29.922937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:120: NSubDomainState::TConfigureParts operationId# 101:0 Got OK TEvConfigureStatus from tablet# 72075186233409548 shardIdx# 72057594046678944:3 at schemeshard# 72057594046678944 2025-06-24T21:08:29.922980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 3 -> 128 2025-06-24T21:08:29.925195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.925320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.925356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.925391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 101:0, at tablet# 72057594046678944 2025-06-24T21:08:29.925435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 101 ready parts: 1/1 2025-06-24T21:08:29.925564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:29.927190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-24T21:08:29.927312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000003 2025-06-24T21:08:29.927594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.927683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.927740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-24T21:08:29.927969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 128 -> 240 2025-06-24T21:08:29.928016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-24T21:08:29.928170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:29.928246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T21:08:29.930119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.930184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:29.930339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.930384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T21:08:29.930694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.930736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:08:29.930832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:29.930863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:29.930896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:29.930924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:29.930964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:08:29.930997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:29.931047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:08:29.931082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:08:29.931290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T21:08:29.931332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 1, subscribers: 1 2025-06-24T21:08:29.931362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-06-24T21:08:29.931866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:29.931956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:29.931993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:29.932054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T21:08:29.932093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:29.932169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 1 2025-06-24T21:08:29.932230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:312:2301] 2025-06-24T21:08:29.935027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:29.935111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:29.935167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:319:2308] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:29.935709Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:29.935952Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 256us result status StatusSuccess 2025-06-24T21:08:29.936376Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:29.476544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:29.476645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:29.476684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:29.476725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:29.476771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:29.476799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:29.476867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:29.476965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:29.477785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:29.478195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:29.567216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:29.567277Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:29.583067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:29.583396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:29.583553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:29.590327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:29.590545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:29.591021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.591369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:29.594532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.594771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:29.596056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.596141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.596407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:29.596465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:29.596516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:29.596614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.604100Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:29.756170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:29.756398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.756642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:29.756690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:29.756893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:29.756949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:29.759416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.759558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:29.759765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.759806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:29.759853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:29.759890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:29.765423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.765517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:29.765583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:29.767625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.767671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.767711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.767773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:29.771684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:29.773878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:29.774024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:29.774818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.774965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.775015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.775312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:29.775355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.775521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:29.775622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:29.778240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.778294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:29.778497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.778545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:08:29.778892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.778935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:08:29.779033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:08:29.779066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:08:29.779108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:08:29.779164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:08:29.779217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:08:29.779263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:08:29.779315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:08:29.779347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:08:29.779428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:29.779475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:08:29.779519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:08:29.781547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:08:29.781692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:08:29.781733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:08:29.781774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:08:29.781838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:29.781970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:08:29.788019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:08:29.788571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-06-24T21:08:29.789305Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:08:29.812654Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:08:29.815792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { Coordinators: 1 Mediators: 1 Name: "USER_0" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:29.816090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_0, opId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.816195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: plan resolution is 0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.817132Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:08:29.820900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: plan resolution is 0" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.821215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: plan resolution is 0, operation: CREATE DATABASE, path: /MyRoot/USER_0 2025-06-24T21:08:29.821797Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-24T21:08:29.822090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:29.822134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-24T21:08:29.822560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:29.822677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:29.822716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:286:2275] TestWaitNotification: OK eventTxId 100 2025-06-24T21:08:29.823262Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:29.823471Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 225us result status StatusPathDoesNotExist 2025-06-24T21:08:29.823684Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareDefineAndDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:29.506374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:29.506483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:29.506536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:29.506567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:29.506611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:29.506645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:29.506707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:29.506779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:29.507548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:29.507897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:29.590009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:29.590067Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:29.606540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:29.606985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:29.607184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:29.615840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:29.616067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:29.616817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.617399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:29.620775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.621028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:29.622246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.622317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.622605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:29.622660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:29.622703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:29.622799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.630377Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:29.773490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:29.773696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.774002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:29.774067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:29.774326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:29.774395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:29.776779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.776970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:29.777147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.777200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:29.777244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:29.777277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:29.779323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.779395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:29.779471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:29.781309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.781356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.781407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.781468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:29.785259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:29.787331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:29.787537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:29.788566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.788701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.788767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.789076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:29.789132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.789313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:29.789400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:29.791576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.791623Z node 1 :FLAT_TX_SCHEMESHARD ... y parts: 1/1 2025-06-24T21:08:29.951785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T21:08:29.951830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:08:29.951862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:08:29.951892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:08:29.952061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T21:08:29.952169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T21:08:29.952204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T21:08:29.952236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T21:08:29.952861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:08:29.952954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:08:29.952989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:08:29.953024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:08:29.953075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:29.953691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:08:29.953774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:08:29.953805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:08:29.953854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:08:29.953885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:29.953956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:08:29.956080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:29.956127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:29.956166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:29.957523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:29.957662Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-24T21:08:29.957872Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-24T21:08:29.958592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.958885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-24T21:08:29.959563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:29.959734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409548 2025-06-24T21:08:29.960719Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-24T21:08:29.961220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 Forgetting tablet 72075186233409547 2025-06-24T21:08:29.961789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:29.961967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:29.962502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:29.962561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:29.962726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:29.963238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:29.963289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:29.963358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:29.965206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:29.965256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:29.966350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:29.966385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:29.966450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:29.966486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:29.968219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:29.968324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:08:29.968546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:08:29.968583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:08:29.969071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:08:29.969173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:29.969208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:523:2475] TestWaitNotification: OK eventTxId 102 2025-06-24T21:08:29.969733Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:29.969950Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 218us result status StatusPathDoesNotExist 2025-06-24T21:08:29.970144Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:28.961702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:28.961812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.961852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:28.961890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:28.961951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:28.961982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:28.962040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.962129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:28.962932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:28.963336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:29.049623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:29.049693Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:29.067045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:29.067559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:29.067770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:29.076552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:29.076793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:29.077549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.077888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:29.081310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.081550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:29.082777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.082855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.083169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:29.083234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:29.083286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:29.083394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.091215Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:29.237021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:29.237268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.237561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:29.237617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:29.237881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:29.237976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:29.240650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.240849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:29.241077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.241143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:29.241187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:29.241224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:29.243627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.243712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:29.243763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:29.245962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.246023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.246075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.246145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:29.250144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:29.252432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:29.252609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:29.253567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.253738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.253796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.254065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:29.254117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.254305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:29.254398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:29.256950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.257010Z node 1 :FLAT_TX_SCHEMESHARD ... shard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 607 RawX2: 4294969840 } Origin: 72075186233409548 State: 2 TxId: 107 Step: 0 Generation: 2 2025-06-24T21:08:29.925744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 107, tablet: 72075186233409548, partId: 0 2025-06-24T21:08:29.925844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 107:0, at schemeshard: 72057594046678944, message: Source { RawX1: 607 RawX2: 4294969840 } Origin: 72075186233409548 State: 2 TxId: 107 Step: 0 Generation: 2 2025-06-24T21:08:29.925885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 107:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:08:29.925977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 107:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 607 RawX2: 4294969840 } Origin: 72075186233409548 State: 2 TxId: 107 Step: 0 Generation: 2 2025-06-24T21:08:29.926043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 107:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.926088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.926126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 107:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-24T21:08:29.926163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 107:0 129 -> 240 2025-06-24T21:08:29.928787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-24T21:08:29.928913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-24T21:08:29.931481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-24T21:08:29.931625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-24T21:08:29.931729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-24T21:08:29.931839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-24T21:08:29.931877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 107:2 ProgressState 2025-06-24T21:08:29.931978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#107:2 progress is 2/3 2025-06-24T21:08:29.932007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 2/3 2025-06-24T21:08:29.932036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#107:2 progress is 2/3 2025-06-24T21:08:29.932060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 2/3 2025-06-24T21:08:29.932096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 107, ready parts: 2/3, is published: true 2025-06-24T21:08:29.932384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-24T21:08:29.932478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.932583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.932767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.932808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 107:0 ProgressState 2025-06-24T21:08:29.932862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#107:0 progress is 3/3 2025-06-24T21:08:29.932882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-06-24T21:08:29.932911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#107:0 progress is 3/3 2025-06-24T21:08:29.932934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-06-24T21:08:29.932964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 107, ready parts: 3/3, is published: true 2025-06-24T21:08:29.933038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:484:2430] message: TxId: 107 2025-06-24T21:08:29.933092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-06-24T21:08:29.933141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 107:0 2025-06-24T21:08:29.933173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 107:0 2025-06-24T21:08:29.933300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:08:29.933359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 107:1 2025-06-24T21:08:29.933380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 107:1 2025-06-24T21:08:29.933420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T21:08:29.933439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 107:2 2025-06-24T21:08:29.933466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 107:2 2025-06-24T21:08:29.933513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-24T21:08:29.935880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-24T21:08:29.935915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:540:2486] TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-24T21:08:29.939082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "Table7" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value0" Type: "Utf8" } Columns { Name: "Value1" Type: "Utf8" } Columns { Name: "Value2" Type: "Utf8" } Columns { Name: "Value3" Type: "Utf8" } Columns { Name: "Value4" Type: "Utf8" } KeyColumnNames: "RowId" } IndexDescription { Name: "UserDefinedIndexByValue0" KeyColumnNames: "Value0" } IndexDescription { Name: "UserDefinedIndexByValue1" KeyColumnNames: "Value1" } IndexDescription { Name: "UserDefinedIndexByValue2" KeyColumnNames: "Value2" } IndexDescription { Name: "UserDefinedIndexByValue3" KeyColumnNames: "Value3" } IndexDescription { Name: "UserDefinedIndexByValue4" KeyColumnNames: "Value4" } } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:29.939560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /MyRoot/USER_0/Table7 domain path id: [OwnerId: 72057594046678944, LocalPathId: 2] domain path: /MyRoot/USER_0 shardsToCreate: 6 GetShardsInside: 4 MaxShards: 7 2025-06-24T21:08:29.939655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 108:0, explain: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, at schemeshard: 72057594046678944 2025-06-24T21:08:29.939693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 108:1, propose status:StatusResourceExhausted, reason: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, at schemeshard: 72057594046678944 2025-06-24T21:08:29.941666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 108, response: Status: StatusResourceExhausted Reason: "indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5" TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.941870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot/USER_0, subject: , status: StatusResourceExhausted, reason: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/USER_0/Table7 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-24T21:08:29.942240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-24T21:08:29.942274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-24T21:08:29.942602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-24T21:08:29.942703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-24T21:08:29.942740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:736:2650] TestWaitNotification: OK eventTxId 108 >> StatisticsSaveLoad::Delete [GOOD] >> TSchemeShardSubDomainTest::Redefine >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::DuplicateUpsert [GOOD] Test command err: Trying to start YDB, gRPC: 11412, MsgBus: 11703 2025-06-24T21:07:55.172144Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625630318835440:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:55.172207Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045b4/r3tmp/tmpTb68fO/pdisk_1.dat 2025-06-24T21:07:55.452597Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625630318835411:2079] 1750799275169084 != 1750799275169087 2025-06-24T21:07:55.488116Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11412, node 1 2025-06-24T21:07:55.563679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:55.563816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:55.565194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:55.579748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:55.579782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:55.579790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:55.579922Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11703 TClient is connected to server localhost:11703 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:07:56.181102Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:56.192153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:56.217746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:07:56.233476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:56.416166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.579127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:56.655275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.450822Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625643203738930:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:58.450953Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:58.762204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.797153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.833662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.872290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:58.925174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.011244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.086002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.183767Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625647498706892:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.183853Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.184110Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625647498706897:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.189798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:59.198184Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625647498706899:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:59.342941Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625647498706952:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:00.172037Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625630318835440:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:00.172122Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:00.357457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037934 not found Trying to start YDB, gRPC: 9338, MsgBus: 22267 2025-06-24T21:08:22.307545Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519625747537934955:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:22.368257Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045b4/r3tmp/tmp19EdV6/pdisk_1.dat 2025-06-24T21:08:22.468372Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:22.477983Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519625747537934858:2079] 1750799302294124 != 1750799302294127 2025-06-24T21:08:22.487090Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:22.487211Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:22.489014Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9338, node 3 2025-06-24T21:08:22.551703Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:22.551736Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:22.551748Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:22.551902Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22267 TClient is connected to server localhost:22267 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:23.109802Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:23.130304Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:23.202656Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:23.354331Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:23.407199Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:23.495939Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:26.091541Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625764717805681:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:26.091641Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:26.156490Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:26.192784Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:26.228125Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:26.300849Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:26.330975Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:26.400973Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:26.438438Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:26.502618Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625764717806344:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:26.502730Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625764717806349:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:26.502732Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:26.507007Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:26.519683Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625764717806351:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:26.607190Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625764717806402:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:27.298869Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625747537934955:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:27.298953Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:27.761715Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:29.780934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:29.781036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:29.781101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:29.781143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:29.781189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:29.781219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:29.781291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:29.781379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:29.782298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:29.782698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:29.870071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:29.870128Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:29.885957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:29.886376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:29.886582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:29.894869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:29.895130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:29.895917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.896256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:29.899564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.899800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:29.901051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.901123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.901425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:29.901503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:29.901555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:29.901653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.909177Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:30.053540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:30.053801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.054156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:30.054238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:30.054574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:30.054664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:30.057678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:30.057907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:30.058187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.058256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:30.058303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:30.058347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:30.061081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.061175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:30.061223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:30.063640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.063703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.063761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:30.063828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:30.067914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:30.070467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:30.070681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:30.071852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:30.072016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:30.072095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:30.072473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:30.072539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:30.072733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:30.072826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:30.075557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:30.075617Z node 1 :FLAT_TX_SCHEMESHARD ... : 72057594046678944, cookie: 101 2025-06-24T21:08:30.156369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:30.156447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:30.156501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T21:08:30.156542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:30.156621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-24T21:08:30.158993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-24T21:08:30.159163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000003 2025-06-24T21:08:30.160149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:30.160272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:30.160325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003, at schemeshard: 72057594046678944 2025-06-24T21:08:30.160479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 128 -> 240 2025-06-24T21:08:30.160656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:30.160729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:08:30.162061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:30.163602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T21:08:30.165340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:30.165393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:30.165560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:08:30.165670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:30.165708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T21:08:30.165748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-06-24T21:08:30.166137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.166213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:08:30.166331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:30.166376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:30.166427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:30.166465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:30.166523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:08:30.166572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:30.166613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:08:30.166652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:08:30.166752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:30.166810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T21:08:30.166850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-24T21:08:30.166888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-24T21:08:30.167779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:30.167877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:30.167918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:30.167970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T21:08:30.168045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:30.168885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:30.168995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:30.169039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:30.169073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:08:30.169107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:08:30.169183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:08:30.173128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:30.173242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-24T21:08:30.177185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0/dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "RowId" } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:30.177577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/USER_0/dir/table_0, opId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.177679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/USER_0/dir/table_0, opId: 102:0, schema: Name: "table_0" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "RowId", at schemeshard: 72057594046678944 2025-06-24T21:08:30.177860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusNameConflict, reason: Inclusive subDomain do not support shared transactions, at schemeshard: 72057594046678944 2025-06-24T21:08:30.180940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusNameConflict Reason: "Inclusive subDomain do not support shared transactions" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:30.181280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot/USER_0, subject: , status: StatusNameConflict, reason: Inclusive subDomain do not support shared transactions, operation: CREATE TABLE, path: /MyRoot/USER_0/dir/table_0 TestModificationResult got TxId: 102, wait until txId: 102 >> TSchemeShardSubDomainTest::SchemeQuotas >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SetSchemeLimits [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:29.852720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:29.852808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:29.852845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:29.852872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:29.852908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:29.852930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:29.852978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:29.853038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:29.853673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:29.853966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:29.921313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:29.921355Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:29.934676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:29.935028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:29.935230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:29.942000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:29.942170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:29.942740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.942963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:29.945653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.945921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:29.946903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.947010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:29.947335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:29.947394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:29.947459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:29.947560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.954766Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:30.078406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:30.078623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.078889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:30.078955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:30.079244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:30.079324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:30.081807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:30.081998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:30.082181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.082233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:30.082276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:30.082312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:30.084476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.084569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:30.084629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:30.086610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.086670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.086720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:30.086794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:30.096659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:30.098882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:30.099089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:30.099902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:30.100023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:30.100105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:30.100424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:30.100482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:30.100665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:30.100756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:30.103045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:30.103102Z node 1 :FLAT_TX_SCHEMESHARD ... xPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:30.350668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:30.350703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:338:2312], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-24T21:08:30.350741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:338:2312], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-24T21:08:30.351014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.351047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-24T21:08:30.351132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:30.351177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:30.351220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:30.351249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:30.351296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-24T21:08:30.351349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:30.351399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 100:0 2025-06-24T21:08:30.351430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 100:0 2025-06-24T21:08:30.351595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:30.351631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-24T21:08:30.351658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:08:30.351688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:08:30.352188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:30.352257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:30.352299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:30.352340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:08:30.352382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:30.352912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:30.352968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:30.352995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:30.353015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:08:30.353047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:08:30.353112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-24T21:08:30.356119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-24T21:08:30.356245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-24T21:08:30.356484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:30.356530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-24T21:08:30.356994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:30.357089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:30.357167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:485:2431] TestWaitNotification: OK eventTxId 100 2025-06-24T21:08:30.357914Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:30.358142Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 263us result status StatusSuccess 2025-06-24T21:08:30.358614Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 3 ShardsInside: 2 ShardsLimit: 3 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 300 DatabaseQuotas { data_stream_shards_quota: 3 } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 3 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 3 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"$%&\'()*+,-.:;<=>?@[]^_`{|}~" MaxPQPartitions: 300 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:30.359134Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:30.359354Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 224us result status StatusSuccess 2025-06-24T21:08:30.359821Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 3 ShardsInside: 0 ShardsLimit: 3 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 300 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 3 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 3 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"$%&\'()*+,-.:;<=>?@[]^_`{|}~" MaxPQPartitions: 300 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel3 [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix-Nullable-Covered >> TSchemeShardSubDomainTest::CreateDropNbs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Simple [GOOD] Test command err: 2025-06-24T21:08:20.747192Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:08:20.747628Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:08:20.747805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003fec/r3tmp/tmpeP4Dim/pdisk_1.dat 2025-06-24T21:08:21.256444Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21007, node 1 2025-06-24T21:08:21.797840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:21.797916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:21.797954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:21.798205Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:21.806889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:21.940567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:21.940740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:21.956898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29835 2025-06-24T21:08:22.534758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:08:25.859107Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:08:25.898797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:25.898959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:25.965415Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:08:25.969816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:26.189726Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:26.225357Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.225904Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.226352Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.226494Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.226564Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.226747Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.226807Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.226877Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.226932Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.407435Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:26.407568Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:26.420968Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:26.559833Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:26.608199Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:08:26.608343Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:08:26.658091Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:08:26.659708Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:08:26.659970Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:08:26.660040Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:08:26.660099Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:08:26.660155Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:08:26.660253Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:08:26.660311Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:08:26.661434Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:08:26.702369Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:08:26.702491Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:08:26.709108Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:08:26.715858Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:08:26.716968Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:08:26.725709Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:08:26.757773Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:08:26.757835Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:08:26.757932Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:08:26.778369Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:26.786951Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:08:26.787170Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:08:27.004451Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:08:27.188536Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:08:27.251977Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:08:27.758362Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:27.760950Z node 1 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:08:27.761367Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:08:27.786865Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:08:27.798405Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2165:3036], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:27.798569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2182:3041], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:27.798662Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:27.808256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:27.865690Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2185:3044], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:08:28.118662Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2272:3073] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:28.521788Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2294:3085]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:08:28.522105Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:08:28.522216Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2296:3087] 2025-06-24T21:08:28.523011Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2296:3087] 2025-06-24T21:08:28.523630Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2297:2796] 2025-06-24T21:08:28.523944Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2296:3087], server id = [2:2297:2796], tablet id = 72075186224037894, status = OK 2025-06-24T21:08:28.524155Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2297:2796], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:08:28.524230Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-24T21:08:28.524444Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:08:28.524563Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2294:3085], StatRequests.size() = 1 2025-06-24T21:08:28.911572Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=ODA2YjA1YTgtNGQ3NWJlYTgtNTAzY2Q3NjItZGZjOTM0YmE=, TxId: 2025-06-24T21:08:28.911661Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=ODA2YjA1YTgtNGQ3NWJlYTgtNTAzY2Q3NjItZGZjOTM0YmE=, TxId: 2025-06-24T21:08:28.912987Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:08:28.916194Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-24T21:08:29.035283Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2330:3108]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:08:29.035532Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:08:29.035581Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2330:3108], StatRequests.size() = 1 2025-06-24T21:08:29.202494Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=OGUwYTA4N2YtMzljYTU1YTAtODczMDRlNC0yN2UyNTBmMQ==, TxId: 01jyhwapbffpmf9hgmnde02z24 2025-06-24T21:08:29.203536Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=OGUwYTA4N2YtMzljYTU1YTAtODczMDRlNC0yN2UyNTBmMQ==, TxId: 01jyhwapbffpmf9hgmnde02z24 2025-06-24T21:08:29.206064Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:08:29.208882Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-24T21:08:29.229400Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=NTlhYjMwMi01OGZlNjkzYS00NmM4ODM2Yy1hNThmNWU0Yw==, TxId: 01jyhwapcsdjt8ezzgpfwarjhr 2025-06-24T21:08:29.229532Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=NTlhYjMwMi01OGZlNjkzYS00NmM4ODM2Yy1hNThmNWU0Yw==, TxId: 01jyhwapcsdjt8ezzgpfwarjhr >> TSchemeShardSubDomainTest::Restart [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateDelete >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Delete [GOOD] Test command err: 2025-06-24T21:08:20.742122Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:08:20.742580Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:08:20.742777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f55/r3tmp/tmp9xVy9c/pdisk_1.dat 2025-06-24T21:08:21.251668Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29564, node 1 2025-06-24T21:08:21.797757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:21.797842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:21.797891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:21.798527Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:21.806320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:21.928601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:21.928761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:21.949118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10154 2025-06-24T21:08:22.552325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:08:25.922082Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:08:25.958227Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:25.958342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:26.019126Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:08:26.021552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:26.239883Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:26.274813Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.275424Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.275973Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.276111Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.276336Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.276427Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.276539Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.276623Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.276700Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.459527Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:26.459702Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:26.473137Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:26.660641Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:26.704669Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:08:26.704760Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:08:26.754291Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:08:26.754547Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:08:26.754785Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:08:26.754858Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:08:26.754917Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:08:26.754983Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:08:26.755033Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:08:26.755090Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:08:26.755725Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:08:26.781029Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:08:26.781132Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1786:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:08:26.788239Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1795:2568] 2025-06-24T21:08:26.800133Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1833:2587] 2025-06-24T21:08:26.800319Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1833:2587], schemeshard id = 72075186224037897 2025-06-24T21:08:26.801970Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:08:26.825881Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:08:26.825975Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:08:26.826127Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:08:26.842850Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:26.850601Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:08:26.850765Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:08:27.056968Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:08:27.265110Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:08:27.311680Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:08:27.798781Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:27.801651Z node 1 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:08:27.802254Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:08:27.836105Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:08:27.840639Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2156:3029], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:27.840760Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2172:3034], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:27.841127Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:27.848176Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:27.898653Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2176:3037], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:08:28.187225Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2263:3066] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:28.521675Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2285:3078]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:08:28.522005Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:08:28.522126Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2287:3080] 2025-06-24T21:08:28.523006Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2287:3080] 2025-06-24T21:08:28.523716Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2288:2794] 2025-06-24T21:08:28.524021Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2287:3080], server id = [2:2288:2794], tablet id = 72075186224037894, status = OK 2025-06-24T21:08:28.524195Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2288:2794], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:08:28.524260Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-24T21:08:28.524521Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:08:28.524617Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2285:3078], StatRequests.size() = 1 2025-06-24T21:08:28.911616Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=OTQxMmM1MDQtOTJiZGI4MTMtODcwYjYyZGQtYWEwYTdlZmE=, TxId: 2025-06-24T21:08:28.911703Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=OTQxMmM1MDQtOTJiZGI4MTMtODcwYjYyZGQtYWEwYTdlZmE=, TxId: 2025-06-24T21:08:28.912980Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:08:28.916157Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:08:28.994685Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2321:3101]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:08:28.994880Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:08:28.994928Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2321:3101], StatRequests.size() = 1 2025-06-24T21:08:29.218608Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=NTM0MThkNjAtNGM2ZTZhMzYtZWE2NTg4YzMtNDM0OTQyOGI=, TxId: 2025-06-24T21:08:29.218696Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=NTM0MThkNjAtNGM2ZTZhMzYtZWE2NTg4YzMtNDM0OTQyOGI=, TxId: 2025-06-24T21:08:29.220310Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:08:29.223286Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-24T21:08:29.279695Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:2353:3116]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:08:29.279921Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:08:29.279988Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:2353:3116], StatRequests.size() = 1 2025-06-24T21:08:29.404841Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=MmJlM2FmMDctODFjNTMzYjEtMzFiODFkMmYtZmI0MmZmMzc=, TxId: 01jyhwapj753g2ajx4026vpzww 2025-06-24T21:08:29.405013Z node 1 :STATISTICS WARN: query_actor.cpp:372: [TQueryBase] Finish with BAD_REQUEST, Issues: {
: Error: No data }, SessionId: ydb://session/3?node_id=1&id=MmJlM2FmMDctODFjNTMzYjEtMzFiODFkMmYtZmI0MmZmMzc=, TxId: 01jyhwapj753g2ajx4026vpzww >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit-UseSink >> TSchemeShardSubDomainTest::Redefine [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Restart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:30.846482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:30.846584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:30.846621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:30.846666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:30.846728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:30.846755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:30.846820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:30.846899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:30.847642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:30.848029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:30.929693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:30.929748Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:30.945428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:30.945837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:30.946056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:30.954530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:30.954748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:30.955412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:30.955718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:30.958673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:30.958923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:30.960096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:30.960164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:30.960412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:30.960477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:30.960532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:30.960626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.967617Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:31.110835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:31.111040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.111324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:31.111370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:31.111616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:31.111695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:31.115255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.115494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:31.115680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.115739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:31.115780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:31.115817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:31.117858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.117959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:31.118006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:31.120008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.120078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.120130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.120198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:31.124522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:31.126817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:31.127071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:31.128162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.128316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:31.128371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.128698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:31.128760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.128970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:31.129127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:31.136422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.136476Z node 1 :FLAT_TX_SCHEMESHARD ... shard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:31.349910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:31.350013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_0, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:31.350094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.350182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.350428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-24T21:08:31.350790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.350880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:08:31.351198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.351328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.351453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:31.351498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:31.351544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:31.351571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:31.351676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.351759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.351975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-06-24T21:08:31.352362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.352492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.352939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.353022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.353267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.353360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.353486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.353701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.353807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.353996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.354273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.354382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.354508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.354572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.354632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.360038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:31.363177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.363273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.363393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:31.363449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:31.363500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:31.363833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:470:2418] sender: [1:529:2058] recipient: [1:15:2062] 2025-06-24T21:08:31.427577Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:31.427827Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 267us result status StatusSuccess 2025-06-24T21:08:31.428274Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:31.428869Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:31.429036Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 168us result status StatusSuccess 2025-06-24T21:08:31.429469Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> StatisticsSaveLoad::ForbidAccess [GOOD] >> DataShardSnapshots::DelayedWriteReplyAfterSplit [GOOD] >> TSchemeShardSubDomainTest::Create ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:27.968953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:27.969054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:27.969099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:27.969135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:27.969180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:27.969212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:27.969278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:27.969367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:27.970203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:27.970547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:28.051804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:28.051853Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:28.068400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:28.068837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:28.069042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:28.078588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:28.078808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:28.079594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.079921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:28.083374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.083612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:28.084888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.084967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.085226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:28.085288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.085342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:28.085451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.093776Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:28.260778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:28.261000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.261263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:28.261310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:28.261541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:28.261606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:28.264183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.264361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:28.264568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.264621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:28.264678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:28.264725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:28.266783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.266866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:28.266920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:28.269527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.269578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.269629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.269701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.273192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:28.275377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:28.275601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:28.276622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.276750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:28.276801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.277112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:28.277169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.277326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.277400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:28.279746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.279792Z node 1 :FLAT_TX_SCHEMESHARD D ... tionId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:31.583012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:31.583317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-24T21:08:31.583392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-24T21:08:31.583586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-06-24T21:08:31.583753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-24T21:08:31.583796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:447:2397], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-06-24T21:08:31.583843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:447:2397], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-06-24T21:08:31.584433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:31.584488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-06-24T21:08:31.584580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:31.584638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-06-24T21:08:31.584687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 129 -> 240 2025-06-24T21:08:31.585422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:31.585530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:31.585572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-24T21:08:31.585611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 9 2025-06-24T21:08:31.585654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-24T21:08:31.586529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:31.586601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:31.586637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-24T21:08:31.586671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:08:31.586703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-24T21:08:31.586768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-24T21:08:31.589652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:31.589719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-06-24T21:08:31.590100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-24T21:08:31.590285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:08:31.590322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:31.590359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:08:31.590391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:31.590428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-24T21:08:31.590498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:554:2491] message: TxId: 104 2025-06-24T21:08:31.590545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:31.590578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:08:31.590608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:08:31.590699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-24T21:08:31.591522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-24T21:08:31.591562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-24T21:08:31.593644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-24T21:08:31.594355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-24T21:08:31.594976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-24T21:08:31.595028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:447:2397], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-24T21:08:31.595134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:08:31.595217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:816:2731] 2025-06-24T21:08:31.596059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-06-24T21:08:31.597711Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-24T21:08:31.598014Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 248us result status StatusSuccess 2025-06-24T21:08:31.598500Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:31.419013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:31.419079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.419108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:31.419134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:31.419187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:31.419207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:31.419246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.419308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:31.419978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:31.420256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:31.495491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:31.495546Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:31.511705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:31.512137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:31.512325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:31.520349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:31.520565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:31.521120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.521402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:31.524293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.524461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:31.525421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.525485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.525730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:31.525799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:31.525848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:31.525982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.533338Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:31.684791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:31.684954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.685138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:31.685175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:31.685333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:31.685378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:31.688095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.688268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:31.688448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.688545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:31.688587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:31.688631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:31.690507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.690564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:31.690600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:31.692102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.692144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.692177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.692226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:31.701234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:31.703228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:31.703419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:31.704446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.704593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:31.704646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.704935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:31.704994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.705176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:31.705254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:31.707518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.707562Z node 1 :FLAT_TX_SCHEMESHARD ... G: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:31.754641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.754667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-24T21:08:31.754708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-24T21:08:31.754757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.754791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-24T21:08:31.754870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:31.754895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:31.754929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#100:0 progress is 1/1 2025-06-24T21:08:31.754954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:31.754979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-24T21:08:31.755009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-24T21:08:31.755041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 100:0 2025-06-24T21:08:31.755086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 100:0 2025-06-24T21:08:31.755179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:31.755221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-24T21:08:31.755244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:08:31.755264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:08:31.756279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:31.756379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:31.756408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:31.756441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:08:31.756496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:31.757076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:31.757144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-24T21:08:31.757184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-24T21:08:31.757219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:08:31.757256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:31.757320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-24T21:08:31.760827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-24T21:08:31.761241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-24T21:08:31.761492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:31.761541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-24T21:08:31.761893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:31.761991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:31.762028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:313:2302] TestWaitNotification: OK eventTxId 100 2025-06-24T21:08:31.762543Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:31.762743Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 230us result status StatusSuccess 2025-06-24T21:08:31.763168Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:31.763532Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:31.763692Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 117us result status StatusSuccess 2025-06-24T21:08:31.764092Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:31.422870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:31.422963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.422998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:31.423033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:31.423074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:31.423101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:31.423203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.423278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:31.423989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:31.424339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:31.509035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:31.509095Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:31.524557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:31.524913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:31.525099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:31.532347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:31.532559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:31.533352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.533628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:31.536342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.536547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:31.537717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.537777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.538015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:31.538079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:31.538124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:31.538207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.544156Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:31.665111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:31.665321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.665624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:31.665671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:31.665933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:31.666023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:31.668530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.668723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:31.668943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.669001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:31.669039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:31.669077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:31.671350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.671428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:31.671471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:31.673809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.673893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.674000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.674071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:31.677801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:31.680310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:31.680489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:31.681435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.681597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:31.681655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.682036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:31.682115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.682394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:31.682492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:31.684765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.684803Z node 1 :FLAT_TX_SCHEMESHARD ... LAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:5 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:31.755407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:31.755445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:31.755468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:6 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:31.755498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:31.755528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:31.756780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:31.757690Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 2025-06-24T21:08:31.757899Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-24T21:08:31.758032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-24T21:08:31.758365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-24T21:08:31.758729Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 2025-06-24T21:08:31.758927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.759099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-24T21:08:31.759378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:31.759543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:31.759945Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 2025-06-24T21:08:31.760100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-24T21:08:31.760236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:08:31.760480Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-06-24T21:08:31.760617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:31.760757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:08:31.760914Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 2025-06-24T21:08:31.761080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:31.761206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:31.761236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:279:2268] 2025-06-24T21:08:31.761435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:08:31.761565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:31.761783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:31.761842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:31.761998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:31.762305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:31.762346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:31.762418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:31.765358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:08:31.765524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:31.765679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:31.766057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-24T21:08:31.768078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:31.768167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:08:31.768423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:31.768519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-24T21:08:31.769174Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:31.769419Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 231us result status StatusPathDoesNotExist 2025-06-24T21:08:31.769614Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:31.770116Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:31.770282Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 177us result status StatusSuccess 2025-06-24T21:08:31.770823Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardSnapshots::DelayedWriteReadableAfterSplitAndReboot >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 [GOOD] >> test_sql_streaming.py::test[suites-ReadWriteTopicWithSchema-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] >> TSchemeShardSubDomainTest::CreateDropNbs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Redefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:31.372601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:31.372711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.372767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:31.372821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:31.372873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:31.372906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:31.372980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.373075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:31.373733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:31.374045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:31.450545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:31.450609Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:31.469633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:31.470104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:31.470308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:31.478870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:31.479136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:31.479889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.480169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:31.483199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.483432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:31.484651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.484725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.485016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:31.485079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:31.485134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:31.485233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.492363Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:31.642306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:31.642544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.642835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:31.642897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:31.643179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:31.643275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:31.645836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.646043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:31.646256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.646340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:31.646386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:31.646426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:31.648696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.648773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:31.648820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:31.651175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.651230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.651286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.651348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:31.662199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:31.664732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:31.664944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:31.666016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.666168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:31.666216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.666556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:31.666626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.666802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:31.666887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:31.669247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.669304Z node 1 :FLAT_TX_SCHEMESHARD ... SHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:31.885174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:08:31.885242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:08:31.885263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:08:31.885283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:08:31.885303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:31.885353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-24T21:08:31.886663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:31.886703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:31.886728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:31.888298Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-24T21:08:31.889057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:08:31.889367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.889612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-24T21:08:31.890362Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-24T21:08:31.890790Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-24T21:08:31.891489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:31.891669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409548 2025-06-24T21:08:31.892340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:31.892479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409547 2025-06-24T21:08:31.892890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:31.892926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:31.893068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:31.893288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:08:31.893381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:31.893419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:31.893482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:31.895065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:31.895124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:31.895222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:31.895240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:31.896725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:31.896784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:31.896846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:31.896989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T21:08:31.897269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T21:08:31.897322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T21:08:31.897791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T21:08:31.897868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:08:31.897902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:579:2531] TestWaitNotification: OK eventTxId 104 2025-06-24T21:08:31.898419Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:31.898566Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 179us result status StatusPathDoesNotExist 2025-06-24T21:08:31.898719Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:31.899127Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:31.899290Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 154us result status StatusSuccess 2025-06-24T21:08:31.899603Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:31.014117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:31.014221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.014268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:31.014309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:31.014371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:31.014424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:31.014500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.014589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:31.015433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:31.015835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:31.104264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:31.104331Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:31.120521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:31.120917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:31.121114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:31.128755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:31.128964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:31.129670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.130012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:31.132902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.133144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:31.134377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.134448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.134666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:31.134707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:31.134754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:31.134832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.140596Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:31.288073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:31.288431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.288736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:31.288791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:31.289030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:31.289104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:31.291557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.291761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:31.291964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.292027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:31.292068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:31.292104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:31.294343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.294413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:31.294459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:31.296301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.296352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.296398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.296475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:31.300452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:31.302947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:31.303139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:31.304174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.304332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:31.304403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.304739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:31.304795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.304971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:31.305053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:31.307274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.307329Z node 1 :FLAT_TX_SCHEMESHARD ... 8: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 101 Step: 140 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 DomainCoordinators: 72075186233409547 DomainCoordinators: 72075186233409548 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1647 } } CommitVersion { Step: 140 TxId: 101 } 2025-06-24T21:08:31.764618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 622 RawX2: 4294969825 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:08:31.764688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409552, partId: 0 2025-06-24T21:08:31.764867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 622 RawX2: 4294969825 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:08:31.764934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:08:31.765080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 622 RawX2: 4294969825 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:08:31.765151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:7, shard: 72075186233409552, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.765193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.765236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409552, at schemeshard: 72057594046678944 2025-06-24T21:08:31.765286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:08:31.768888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:31.769010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:31.769741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.770141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.770460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.770511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:08:31.770626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:31.770665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:31.770783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:31.770826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:31.770863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T21:08:31.770942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:277:2266] message: TxId: 101 2025-06-24T21:08:31.771027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:31.771081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:08:31.771124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:08:31.771296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:08:31.773291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:31.773338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:278:2267] TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:31.773891Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:31.774194Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 285us result status StatusSuccess 2025-06-24T21:08:31.774765Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 140 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 10 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:31.775537Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:31.775761Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 226us result status StatusSuccess 2025-06-24T21:08:31.776255Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 140 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 10 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets [GOOD] >> test_sql_streaming.py::test[suites-ReadTopic-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateDropNbs [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:31.907997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:31.908111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.908156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:31.908212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:31.908258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:31.908291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:31.908397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.908476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:31.909253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:31.909638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:31.998262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:31.998323Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:32.015318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:32.015719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:32.015915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:32.024210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:32.024420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:32.025100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.025410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:32.028659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:32.028882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:32.030142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:32.030215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:32.030456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:32.030520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:32.030577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:32.030675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.038020Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:32.151609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:32.151809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.152081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:32.152131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:32.152423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:32.152496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:32.154977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.155241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:32.155423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.155476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:32.155512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:32.155542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:32.157607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.157679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:32.157722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:32.159547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.159594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.159651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.159719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:32.163466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:32.165311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:32.165533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:32.166637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.166773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:32.166833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.167174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:32.167240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.167427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:32.167507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:32.169620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:32.169664Z node 1 :FLAT_TX_SCHEMESHARD ... schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:32.410514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:08:32.412179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:32.412229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:32.412256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:32.412298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:32.413077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:32.413874Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-24T21:08:32.414123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.414455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:08:32.414722Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409546 2025-06-24T21:08:32.416392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:32.416626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:32.417131Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-24T21:08:32.417404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:32.417585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409548 2025-06-24T21:08:32.418599Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-24T21:08:32.418788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:08:32.418943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409547 2025-06-24T21:08:32.419232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:32.419276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:08:32.419392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409549 2025-06-24T21:08:32.420417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:32.420488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:32.420605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:32.421647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:32.422221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:08:32.422698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:32.422750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:32.422845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:32.422864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:32.424660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:32.424690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:32.424742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:08:32.424772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T21:08:32.424918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:32.425003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:32.425063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:32.425110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:32.425188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:32.426639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:08:32.426886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:08:32.426917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:08:32.427251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:08:32.427343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:32.427372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:538:2492] TestWaitNotification: OK eventTxId 102 2025-06-24T21:08:32.439819Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:32.440085Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/BSVolume" took 214us result status StatusPathDoesNotExist 2025-06-24T21:08:32.440265Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:32.440775Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:32.440917Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 149us result status StatusPathDoesNotExist 2025-06-24T21:08:32.441002Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousCreateDelete [GOOD] >> TSchemeShardSubDomainTest::ForceDropTwice >> TSchemeShardSubDomainTest::Create [GOOD] >> TSchemeShardSubDomainTest::CreateAlterNbsChannels ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::ForbidAccess [GOOD] Test command err: 2025-06-24T21:08:20.746395Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:08:20.746924Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:08:20.747023Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003fd5/r3tmp/tmpsXcIQl/pdisk_1.dat 2025-06-24T21:08:21.259101Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5371, node 1 2025-06-24T21:08:21.798030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:21.798133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:21.798185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:21.798788Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:21.809982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:21.920540Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:21.920704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:21.939660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31356 2025-06-24T21:08:22.534747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:08:25.754505Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:08:25.821307Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:25.821459Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:25.853357Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:08:25.855856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:26.112956Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:26.138856Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.141474Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.142144Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.142284Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.142519Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.142602Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.142680Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.142760Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.142835Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:08:26.346853Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:26.346988Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:26.359981Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:26.529639Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:26.601574Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:08:26.601717Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:08:26.654641Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:08:26.654939Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:08:26.655200Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:08:26.655270Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:08:26.655362Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:08:26.655424Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:08:26.655481Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:08:26.655536Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:08:26.656093Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:08:26.683203Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:08:26.683329Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:08:26.694122Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2570] 2025-06-24T21:08:26.715911Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1847:2588] 2025-06-24T21:08:26.716960Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1847:2588], schemeshard id = 72075186224037897 2025-06-24T21:08:26.726674Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:08:26.747127Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:08:26.747210Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:08:26.747273Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:08:26.763896Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:26.771911Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:08:26.772059Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:08:26.990846Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:08:27.176337Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:08:27.270061Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:08:27.946798Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:28.518089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:28.518299Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:28.609860Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:29.104984Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2446:3072], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:29.105143Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:29.106268Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2451:3076]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:08:29.106426Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:08:29.106490Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2453:3078] 2025-06-24T21:08:29.106550Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2453:3078] 2025-06-24T21:08:29.107052Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2454:2944] 2025-06-24T21:08:29.107374Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2453:3078], server id = [2:2454:2944], tablet id = 72075186224037894, status = OK 2025-06-24T21:08:29.107488Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2454:2944], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:08:29.107537Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-24T21:08:29.107705Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:08:29.107787Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2451:3076], StatRequests.size() = 1 2025-06-24T21:08:29.125477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2458:3082], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:29.125608Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:29.126030Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2463:3087], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:29.132205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:29.307802Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:08:29.307914Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:08:29.406296Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2453:3078], schemeshard count = 1 2025-06-24T21:08:29.820974Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2465:3089], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:08:29.993740Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2580:3159] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:30.007373Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2603:3175]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:08:30.007575Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:08:30.007617Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2603:3175], StatRequests.size() = 1 2025-06-24T21:08:30.070587Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwap89174kxbkbhryz0x0s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2MyM2Y5MjAtM2RiMTdkMDEtMzkwYTYtMTM3NDkwODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:08:30.310430Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:2683:3205], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:30.310504Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:2683:3205], for# user@builtin, access# DescribeSchema 2025-06-24T21:08:30.322552Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:2673:3201], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/Database/.metadata/_statistics]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:08:30.324911Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDA3YWEyYTgtYWZlNjQ3MzctZTU5MjQyMi01NGY1ZmY5Mg==, ActorId: [1:2663:3193], ActorState: ExecuteState, TraceId: 01jyhwaqdt9zsccwzvz3ccdskw, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> BasicUsage::RecreateObserver [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:32.472217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:32.472320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:32.472359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:32.472396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:32.472435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:32.472463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:32.472524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:32.472601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:32.473305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:32.473620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:32.554106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:32.554165Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:32.569841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:32.570327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:32.570513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:32.578115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:32.578349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:32.578954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.579295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:32.582297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:32.582504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:32.583661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:32.583725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:32.583981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:32.584033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:32.584094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:32.584183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.590942Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:32.722048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:32.722245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.722488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:32.722533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:32.722775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:32.722860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:32.725207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.725387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:32.725590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.725647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:32.725682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:32.725734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:32.728030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.728111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:32.728154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:32.730060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.730114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.730182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.730258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:32.733779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:32.735997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:32.736197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:32.737165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.737301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:32.737350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.737669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:32.737724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.737913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:32.738001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:32.740236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:32.740278Z node 1 :FLAT_TX_SCHEMESHARD ... node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:08:32.743677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:32.743775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:08:32.748674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:08:32.749239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-06-24T21:08:32.750003Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:08:32.772405Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:08:32.775396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Name: "USER_1" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:32.775724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_1, opId: 100:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.775853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with coordinators, but no mediators, at schemeshard: 72057594046678944 2025-06-24T21:08:32.776703Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:08:32.779794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cant create subdomain with coordinators, but no mediators" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:32.780080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with coordinators, but no mediators, operation: CREATE DATABASE, path: /MyRoot/USER_1 2025-06-24T21:08:32.780638Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 2025-06-24T21:08:32.783722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Mediators: 1 Name: "USER_2" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:32.783970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_2, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.784063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with mediators, but no coordinators, at schemeshard: 72057594046678944 2025-06-24T21:08:32.786443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cant create subdomain with mediators, but no coordinators" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:32.786679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with mediators, but no coordinators, operation: CREATE DATABASE, path: /MyRoot/USER_2 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-24T21:08:32.787007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-24T21:08:32.787053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-24T21:08:32.787194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:08:32.787230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:08:32.787731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-24T21:08:32.787909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:08:32.787958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-24T21:08:32.788007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:290:2279] 2025-06-24T21:08:32.788200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:32.788230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:290:2279] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:32.788699Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:32.788895Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 215us result status StatusPathDoesNotExist 2025-06-24T21:08:32.789119Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:32.789640Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:32.789851Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_2" took 173us result status StatusPathDoesNotExist 2025-06-24T21:08:32.790025Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_2\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_2" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:32.790571Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:32.790743Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 171us result status StatusSuccess 2025-06-24T21:08:32.791233Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:32.405163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:32.405256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:32.405294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:32.405331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:32.405374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:32.405403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:32.405472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:32.405558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:32.406336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:32.406708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:32.489019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:32.489084Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:32.505598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:32.506044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:32.506223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:32.514354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:32.514589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:32.515290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.515618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:32.518618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:32.518841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:32.519988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:32.520057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:32.520300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:32.520363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:32.520407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:32.520502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.527274Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:32.662961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:32.663227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.663487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:32.663540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:32.663756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:32.663817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:32.666271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.666446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:32.666644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.666716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:32.666758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:32.666791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:32.668828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.668900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:32.668960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:32.670811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.670863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.670920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.670983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:32.674654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:32.676744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:32.676947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:32.677884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.678037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:32.678082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.678394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:32.678448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.678653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:32.678730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:32.681054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:32.681105Z node 1 :FLAT_TX_SCHEMESHARD ... 8944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-24T21:08:32.911036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-24T21:08:32.911371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.911531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:32.911583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-24T21:08:32.911912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 128 -> 240 2025-06-24T21:08:32.911975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-24T21:08:32.912210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:32.912484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-24T21:08:32.912531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T21:08:32.914487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:32.914536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:32.914722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:32.914815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:32.914870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T21:08:32.914916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T21:08:32.915258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.915312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:08:32.915441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:32.915479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:32.915514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:08:32.915558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:32.915594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:08:32.915643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:08:32.915685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:08:32.915712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:08:32.915932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-24T21:08:32.915976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 1 2025-06-24T21:08:32.916022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:08:32.916055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:08:32.916721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:32.916823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:32.916873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:32.916948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:08:32.917010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:08:32.918011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:32.918100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:08:32.918131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:08:32.918174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:08:32.918222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-24T21:08:32.918302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 1 2025-06-24T21:08:32.918358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:571:2478] 2025-06-24T21:08:32.921975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:32.922320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:08:32.922435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:08:32.922466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:572:2479] TestWaitNotification: OK eventTxId 101 2025-06-24T21:08:32.922940Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:32.923230Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 206us result status StatusSuccess 2025-06-24T21:08:32.923821Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SchemeQuotas [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 [GOOD] Test command err: Trying to start YDB, gRPC: 18186, MsgBus: 11958 2025-06-24T21:07:51.947298Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625612462479338:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:51.947495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045cc/r3tmp/tmpnNTgMF/pdisk_1.dat 2025-06-24T21:07:52.338333Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:52.338711Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625612462479317:2079] 1750799271937282 != 1750799271937285 TServer::EnableGrpc on GrpcPort 18186, node 1 2025-06-24T21:07:52.356248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:52.356356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:52.358365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:52.411833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:52.411858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:52.411871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:52.412008Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11958 TClient is connected to server localhost:11958 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:07:52.967415Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:53.007624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:53.023507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:07:53.039954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:53.210733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:53.369152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:53.437393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:55.326836Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625629642350145:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:55.326970Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:55.677689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:55.715892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:55.750463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:55.817344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:55.853723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:55.888004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:55.929610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.049384Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625633937318105:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.049480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.049708Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625633937318110:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.053581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:56.064627Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625633937318112:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:56.146481Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625633937318163:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:56.939263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625612462479338:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:56.939336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:57.240314Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625638232285734:3596], Recipient [1:7519625616757446939:2144]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:57.240354Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... peration IsReadyToDone TxId: 281474976710772 ready parts: 1/1 2025-06-24T21:08:31.802497Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710772, ready parts: 1/1, is published: true 2025-06-24T21:08:31.802532Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:7519625756882814290:2167] message: TxId: 281474976710772 2025-06-24T21:08:31.802559Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710772 ready parts: 1/1 2025-06-24T21:08:31.802574Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710772:0 2025-06-24T21:08:31.802583Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710772:0 2025-06-24T21:08:31.802622Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 6 2025-06-24T21:08:31.803247Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:31.803314Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [2:7519625756882814290:2167] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710772 at schemeshard: 72057594046644480 2025-06-24T21:08:31.803440Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124998, Sender [2:7519625756882814290:2167], Recipient [2:7519625756882814290:2167]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710772 2025-06-24T21:08:31.803465Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5109: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-06-24T21:08:31.803486Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710772 2025-06-24T21:08:31.803509Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710772 2025-06-24T21:08:31.803604Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 281474976715674, txId# 281474976710772 2025-06-24T21:08:31.803663Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7519625782652620528:2496], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710768, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710772, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 43, upload bytes: 787, read rows: 40, read bytes: 600 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710772 2025-06-24T21:08:31.803702Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:31.803960Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:31.804056Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Unlocking 2025-06-24T21:08:31.804125Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Unlocking TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7519625782652620528:2496], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710768, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710772, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 43, upload bytes: 787, read rows: 40, read bytes: 600 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:08:31.804152Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:31.804177Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-24T21:08:31.804514Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:31.804580Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Done 2025-06-24T21:08:31.804635Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Done TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7519625782652620528:2496], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710768, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710772, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 43, upload bytes: 787, read rows: 40, read bytes: 600 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:08:31.804656Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976715674, subscribers count# 1 2025-06-24T21:08:31.804673Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:31.804703Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:31.804757Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [2:7519625782652620528:2496] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715674 at schemeshard: 72057594046644480 2025-06-24T21:08:31.805023Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274792450, Sender [2:7519625782652620528:2496], Recipient [2:7519625756882814290:2167]: NKikimrIndexBuilder.TEvGetRequest DatabaseName: "/Root" IndexBuildId: 281474976715674 2025-06-24T21:08:31.805053Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5086: StateWork, processing event TEvIndexBuilder::TEvGetRequest 2025-06-24T21:08:31.805116Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/Root" IndexBuildId: 281474976715674 2025-06-24T21:08:31.805351Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 281474976715674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1750799311 } EndTime { seconds: 1750799311 } UserSID: "" } 2025-06-24T21:08:31.805371Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:08:31.805411Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:08:31.805560Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [2:7519625782652620528:2496] msg type: 274792451 msg: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 281474976715674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1750799311 } EndTime { seconds: 1750799311 } UserSID: "" } at schemeshard: 72057594046644480 2025-06-24T21:08:31.805902Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [2:7519625782652620531:3726], Recipient [2:7519625756882814290:2167]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:31.805934Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:08:31.805954Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:08:31.837425Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [2:7519625782652621536:4493], Recipient [2:7519625756882814290:2167]: NKikimrSchemeOp.TDescribePath Path: "/Root/TestTable" Options { ShowPrivateTable: false } 2025-06-24T21:08:31.837471Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:08:31.898275Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519625756882814290:2167]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:31.898323Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:08:31.898369Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:7519625756882814290:2167], Recipient [2:7519625756882814290:2167]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:08:31.898388Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime |94.1%| [TA] $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.1%| [TA] {RESULT} $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_sql_streaming.py::test[suites-GroupByHopListKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopNoKey-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:20.383577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:20.383666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.383709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:20.383746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:20.383790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:20.383820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:20.383893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:20.383972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:20.384799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:20.395338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:20.480842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:20.480895Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:20.503736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:20.508446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:20.508691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:20.516920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:20.517212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:20.518045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.518377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:20.521235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.521751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:20.534519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.534608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:20.534736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:20.534785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:20.534835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:20.534980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.542565Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:08:20.700997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:20.701187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.701442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:20.701485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:20.701748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:20.701833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:20.704059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.704256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:20.704486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.704550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:20.704584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:20.704636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:20.707304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.707371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:20.707422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:20.709067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.709115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:20.709164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.709239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:20.720658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:20.722437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:20.722610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:20.723372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:20.723484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:20.723521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.723808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:20.723848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:20.723996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:20.724076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:20.726371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:20.726441Z node 1 :FLAT_TX_SCHEMESHARD D ... 944 2025-06-24T21:08:33.421513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.423350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:33.423399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:33.423553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:08:33.423755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:33.423793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-24T21:08:33.423829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-24T21:08:33.424057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.424100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:08:33.424184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.424219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-24T21:08:33.424254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:0 129 -> 240 2025-06-24T21:08:33.425152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:33.425237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:33.425270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:08:33.425308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 10 2025-06-24T21:08:33.425349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:33.426295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:33.426355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:33.426374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:08:33.426396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-24T21:08:33.426420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:08:33.426471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-24T21:08:33.428401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.428454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:33.428730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:08:33.428888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T21:08:33.428917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:33.428949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T21:08:33.428976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:33.429003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-24T21:08:33.429057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:411:2376] message: TxId: 103 2025-06-24T21:08:33.429088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:33.429142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T21:08:33.429168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T21:08:33.429239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:33.430069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:33.430103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:33.430803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:08:33.431814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:08:33.433228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:33.433274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-24T21:08:33.433495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:08:33.433534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:1341:3265] 2025-06-24T21:08:33.434046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 11 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-06-24T21:08:33.437221Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:33.437436Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 240us result status StatusSuccess 2025-06-24T21:08:33.437833Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:31.478344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:31.478455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.478494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:31.478526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:31.478565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:31.478593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:31.478665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.478751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:31.479465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:31.479843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:31.554444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:31.554492Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:31.568273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:31.568658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:31.568802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:31.577976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:31.578196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:31.578847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.579255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:31.582574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.582798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:31.584088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.584163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.584420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:31.584473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:31.584518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:31.584619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.592515Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:31.716525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:31.716687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.716906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:31.716956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:31.717143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:31.717213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:31.719133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.719341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:31.719542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.719596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:31.719637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:31.719666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:31.721398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.721449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:31.721488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:31.723098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.723172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.723214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.723273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:31.727094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:31.728931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:31.729137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:31.730092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.730230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:31.730286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.730579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:31.730639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:31.730803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:31.730896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:31.732937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.732970Z node 1 :FLAT_TX_SCHEMESHARD ... 137:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:33.650988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 11 2025-06-24T21:08:33.651055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 3 2025-06-24T21:08:33.653781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 137, response: Status: StatusAccepted TxId: 137 SchemeshardId: 72057594046678944 PathId: 10, at schemeshard: 72057594046678944 2025-06-24T21:08:33.654153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 137, database: /MyRoot/USER_0, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /MyRoot/USER_0/Table11 2025-06-24T21:08:33.654473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:33.654519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 137, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:33.654782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 137, path id: [OwnerId: 72057594046678944, LocalPathId: 10] 2025-06-24T21:08:33.654923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:33.654979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:1028:2888], at schemeshard: 72057594046678944, txId: 137, path id: 2 2025-06-24T21:08:33.655044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:1028:2888], at schemeshard: 72057594046678944, txId: 137, path id: 10 2025-06-24T21:08:33.655192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.655264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 137:0 ProgressState, operation type: TxCreateTable, at tablet# 72057594046678944 2025-06-24T21:08:33.655514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 137:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046678944 OwnerIdx: 10 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 10 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-24T21:08:33.656782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 18 PathOwnerId: 72057594046678944, cookie: 137 2025-06-24T21:08:33.656898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 18 PathOwnerId: 72057594046678944, cookie: 137 2025-06-24T21:08:33.656946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 137 2025-06-24T21:08:33.656989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 137, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18 2025-06-24T21:08:33.657038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 12 2025-06-24T21:08:33.659026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 10 Version: 1 PathOwnerId: 72057594046678944, cookie: 137 2025-06-24T21:08:33.659129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 10 Version: 1 PathOwnerId: 72057594046678944, cookie: 137 2025-06-24T21:08:33.659188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 137 2025-06-24T21:08:33.659224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 137, pathId: [OwnerId: 72057594046678944, LocalPathId: 10], version: 1 2025-06-24T21:08:33.659270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 4 2025-06-24T21:08:33.659360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 137, ready parts: 0/1, is published: true 2025-06-24T21:08:33.662073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 137:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:10 msg type: 268697601 2025-06-24T21:08:33.662243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 137, partId: 0, tablet: 72057594037968897 2025-06-24T21:08:33.662296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1810: TOperation RegisterRelationByShardIdx, TxId: 137, shardIdx: 72057594046678944:10, partId: 0 2025-06-24T21:08:33.662742Z node 1 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 10 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 10 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-24T21:08:33.662997Z node 1 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 10, type DataShard, boot OK, tablet id 72075186233409555 2025-06-24T21:08:33.663125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5947: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-24T21:08:33.663192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation FindRelatedPartByShardIdx, TxId: 137, shardIdx: 72057594046678944:10, partId: 0 2025-06-24T21:08:33.663325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 137:0, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-24T21:08:33.663380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:176: TCreateParts opId# 137:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2025-06-24T21:08:33.663468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:179: TCreateParts opId# 137:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-24T21:08:33.663581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 137:0 2 -> 3 2025-06-24T21:08:33.664547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 137 2025-06-24T21:08:33.666759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 137 2025-06-24T21:08:33.670124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.670875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.670934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_table.cpp:200: TCreateTable TConfigureParts operationId# 137:0 ProgressState at tabletId# 72057594046678944 2025-06-24T21:08:33.671039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:220: TCreateTable TConfigureParts operationId# 137:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409555 seqNo: 4:5 2025-06-24T21:08:33.671509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:236: TCreateTable TConfigureParts operationId# 137:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409555 message: TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 976 RawX2: 4294970143 } TxBody: "\n\236\004\n\007Table11\020\n\032\r\n\003key\030\002 \001(\000@\000\032\020\n\005Value\030\200$ \002(\000@\000(\001:\262\003\022\253\003\010\200\200\200\002\020\254\002\030\364\003 \200\200\200\010(\0000\200\200\200 8\200\200\200\010@\2008H\000RX\010\000\020\000\030\010 \010(\200\200\200@0\377\377\377\377\0178\001B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen1P\nX\200\200\001`nh\000p\000Rb\010\001\020\200\200\200\024\030\005 \020(\200\200\200\200\0020\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen2P\nX\200\200\001`nh\200\200\200\004p\200\200\200\004Rc\010\002\020\200\200\200\310\001\030\005 \020(\200\200\200\200@0\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen3P\nX\200\200\001`nh\200\200\200(p\200\200\200(X\001`\005j$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionr\017compaction_gen0z\017compaction_gen0\202\001\004scan\210\001\200\200\200\010\220\001\364\003\230\0012\270\001\2008\300\001\006R\002\020\001J\026/MyRoot/USER_0/Table11\242\001\006\001\000\000\000\000\200\252\001\000\260\001\001\270\001\000\210\002\001\222\002\013\t\240\207\205\000\000\000\000\001\020\n:\004\010\004\020\005" TxId: 137 ExecLevel: 0 Flags: 0 SchemeShardId: 72057594046678944 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } SubDomainPathId: 2 2025-06-24T21:08:33.674939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 137:0 from tablet: 72057594046678944 to tablet: 72075186233409555 cookie: 72057594046678944:10 msg type: 269549568 2025-06-24T21:08:33.675122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 137, partId: 0, tablet: 72075186233409555 TestModificationResult got TxId: 137, wait until txId: 137 >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink [GOOD] >> TSchemeShardSubDomainTest::CreateAlterNbsChannels [GOOD] >> TSchemeShardSubDomainTest::ForceDropTwice [GOOD] >> DataShardSnapshots::VolatileSnapshotRenameTimeout [GOOD] >> DataShardSnapshots::UncommittedWriteRestartDuringCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::RecreateObserver [GOOD] Test command err: 2025-06-24T21:06:44.540469Z :RetryDiscoveryWithCancel INFO: Random seed for debugging is 1750799204540443 2025-06-24T21:06:44.756808Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625327073549545:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:06:44.757178Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:06:44.772776Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625324711792628:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:06:44.772857Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:06:44.883737Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00477f/r3tmp/tmpsaL7zb/pdisk_1.dat 2025-06-24T21:06:44.900832Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:06:45.020042Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:06:45.038384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:06:45.038488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:06:45.047219Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:06:45.047921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6743, node 1 2025-06-24T21:06:45.095005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:06:45.095105Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:06:45.098050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:06:45.103576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00477f/r3tmp/yandex3ZCf33.tmp 2025-06-24T21:06:45.103619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00477f/r3tmp/yandex3ZCf33.tmp 2025-06-24T21:06:45.103822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00477f/r3tmp/yandex3ZCf33.tmp 2025-06-24T21:06:45.104025Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:06:45.146391Z INFO: TTestServer started on Port 1477 GrpcPort 6743 TClient is connected to server localhost:1477 PQClient connected to localhost:6743 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:06:45.376318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:06:45.763703Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:06:45.776500Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:06:46.907564Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625333301727500:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:46.907564Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625333301727491:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:46.907644Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:06:46.911406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:06:46.924219Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625333301727505:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:06:47.103292Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625337596694829:2130] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:06:47.124189Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519625335663485159:2302], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:06:47.124403Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MTAxYTY3ZDYtZDdhNDY2NTctNjkyNDI0YTYtNDY2M2E4NzY=, ActorId: [1:7519625335663485084:2295], ActorState: ExecuteState, TraceId: 01jyhw7jfwadyx3as3b6wh1gtk, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:06:47.125175Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519625337596694844:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:06:47.125373Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZjQwNThmYjYtOTQxYWU3YTMtZDdhNzUzYS0xNTRlMDBjNQ==, ActorId: [2:7519625333301727489:2267], ActorState: ExecuteState, TraceId: 01jyhw7jft7rrmkaekgy6fg48x, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:06:47.125828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:06:47.126207Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:06:47.126208Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:06:47.220547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:06:47.337565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:6743", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-06-24T21:06:47.502093Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhw7jz860jwmrgg5tx6dkj0, Database: , DatabaseId: /Root, ... 8:32.185333Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 1 consumer shared/user session shared/user_3_1_14999328041966599322_v1 INIT DONE TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 0 readOffset 0 committedOffset 0 2025-06-24T21:08:32.185416Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 1 consumer shared/user session shared/user_3_1_14999328041966599322_v1 sending to client partition status >>> Got event: StartPartitionSession { Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc1 Database path: /Root Database id: account-dc1 CommittedOffset: 0 EndOffset: 0 } 2025-06-24T21:08:32.186561Z :INFO: [/Root] [/Root] [390f0d81-b5fbb562-c2863c5c-83e5541c] Closing read session. Close timeout: 0.000000s 2025-06-24T21:08:32.186662Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:0:0 2025-06-24T21:08:32.186713Z :INFO: [/Root] [/Root] [390f0d81-b5fbb562-c2863c5c-83e5541c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 39 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:08:32.186818Z :NOTICE: [/Root] [/Root] [390f0d81-b5fbb562-c2863c5c-83e5541c] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:08:32.186870Z :DEBUG: [/Root] [/Root] [390f0d81-b5fbb562-c2863c5c-83e5541c] [] Abort session to cluster 2025-06-24T21:08:32.187705Z :INFO: [/Root] [/Root] [98b1df81-65665aa5-404fe604-cff34cd7] Closing read session. Close timeout: 0.000000s 2025-06-24T21:08:32.187742Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:08:32.187764Z :INFO: [/Root] [/Root] [98b1df81-65665aa5-404fe604-cff34cd7] Counters: { Errors: 0 CurrentSessionLifetimeMs: 36 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:08:32.187809Z :NOTICE: [/Root] [/Root] [98b1df81-65665aa5-404fe604-cff34cd7] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:08:32.187835Z :DEBUG: [/Root] [/Root] [98b1df81-65665aa5-404fe604-cff34cd7] [] Abort session to cluster 2025-06-24T21:08:32.188301Z :INFO: [/Root] [/Root] [d98e2166-b585530a-73aca64a-65216348] Closing read session. Close timeout: 0.000000s 2025-06-24T21:08:32.188338Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:08:32.188371Z :INFO: [/Root] [/Root] [d98e2166-b585530a-73aca64a-65216348] Counters: { Errors: 0 CurrentSessionLifetimeMs: 34 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:08:32.188416Z :NOTICE: [/Root] [/Root] [d98e2166-b585530a-73aca64a-65216348] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:08:32.188440Z :DEBUG: [/Root] [/Root] [d98e2166-b585530a-73aca64a-65216348] [] Abort session to cluster 2025-06-24T21:08:32.188841Z :INFO: [/Root] [/Root] [d98e2166-b585530a-73aca64a-65216348] Closing read session. Close timeout: 0.000000s 2025-06-24T21:08:32.188871Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:08:32.188902Z :INFO: [/Root] [/Root] [d98e2166-b585530a-73aca64a-65216348] Counters: { Errors: 0 CurrentSessionLifetimeMs: 35 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:08:32.188975Z :NOTICE: [/Root] [/Root] [d98e2166-b585530a-73aca64a-65216348] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:08:32.189719Z :INFO: [/Root] [/Root] [98b1df81-65665aa5-404fe604-cff34cd7] Closing read session. Close timeout: 0.000000s 2025-06-24T21:08:32.189752Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:08:32.189779Z :INFO: [/Root] [/Root] [98b1df81-65665aa5-404fe604-cff34cd7] Counters: { Errors: 0 CurrentSessionLifetimeMs: 38 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:08:32.189827Z :NOTICE: [/Root] [/Root] [98b1df81-65665aa5-404fe604-cff34cd7] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:08:32.190284Z :INFO: [/Root] [/Root] [390f0d81-b5fbb562-c2863c5c-83e5541c] Closing read session. Close timeout: 0.000000s 2025-06-24T21:08:32.190322Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:0:0 2025-06-24T21:08:32.190351Z :INFO: [/Root] [/Root] [390f0d81-b5fbb562-c2863c5c-83e5541c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 43 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:08:32.190405Z :NOTICE: [/Root] [/Root] [390f0d81-b5fbb562-c2863c5c-83e5541c] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:08:32.191277Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_14999328041966599322_v1 grpc read done: success# 0, data# { } 2025-06-24T21:08:32.191324Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_14999328041966599322_v1 grpc read failed 2025-06-24T21:08:32.191371Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_14999328041966599322_v1 grpc closed 2025-06-24T21:08:32.191422Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_14999328041966599322_v1 is DEAD 2025-06-24T21:08:32.196946Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_3_2_3506132747927617206_v1 grpc read done: success# 0, data# { } 2025-06-24T21:08:32.196976Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_3_2_3506132747927617206_v1 grpc read failed 2025-06-24T21:08:32.196997Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_3_2_3506132747927617206_v1 grpc closed 2025-06-24T21:08:32.197016Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_3_2_3506132747927617206_v1 is DEAD 2025-06-24T21:08:32.197720Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer shared/user session shared/user_3_3_9572405226079203024_v1 grpc read done: success# 0, data# { } 2025-06-24T21:08:32.197744Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer shared/user session shared/user_3_3_9572405226079203024_v1 grpc read failed 2025-06-24T21:08:32.197776Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer shared/user session shared/user_3_3_9572405226079203024_v1 grpc closed 2025-06-24T21:08:32.197791Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer shared/user session shared/user_3_3_9572405226079203024_v1 is DEAD 2025-06-24T21:08:32.199401Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session shared/user_3_1_14999328041966599322_v1 2025-06-24T21:08:32.197919Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625789157303125:2520] disconnected; active server actors: 1 2025-06-24T21:08:32.199455Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [3:7519625789157303129:2534] destroyed 2025-06-24T21:08:32.199514Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_14999328041966599322_v1 2025-06-24T21:08:32.197975Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625789157303125:2520] client user disconnected session shared/user_3_1_14999328041966599322_v1 2025-06-24T21:08:32.198067Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-24T21:08:32.198151Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037893][rt3.dc1--test-topic] consumer user balancing. Sessions=2, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-06-24T21:08:32.198218Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037893][rt3.dc1--test-topic] consumer user balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "shared/user_3_3_9572405226079203024_v1" (Sender=[3:7519625789157303109:2524], Pipe=[3:7519625789157303124:2524], Partitions=[], ActiveFamilyCount=0) 2025-06-24T21:08:32.198272Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037893][rt3.dc1--test-topic] consumer user family 1 status Active partitions [0] session "shared/user_3_3_9572405226079203024_v1" sender [3:7519625789157303109:2524] lock partition 0 for ReadingSession "shared/user_3_3_9572405226079203024_v1" (Sender=[3:7519625789157303109:2524], Pipe=[3:7519625789157303124:2524], Partitions=[], ActiveFamilyCount=1) generation 1 step 3 2025-06-24T21:08:32.198319Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037893][rt3.dc1--test-topic] consumer user start rebalancing. familyCount=1, sessionCount=2, desiredFamilyCount=0, allowPlusOne=1 2025-06-24T21:08:32.198346Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037893][rt3.dc1--test-topic] consumer user balancing duration: 0.000176s 2025-06-24T21:08:32.199913Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625789157303123:2523] disconnected; active server actors: 1 2025-06-24T21:08:32.199947Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625789157303123:2523] client user disconnected session shared/user_3_2_3506132747927617206_v1 2025-06-24T21:08:32.199980Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-24T21:08:32.200015Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625789157303124:2524] disconnected; active server actors: 1 2025-06-24T21:08:32.200030Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519625789157303124:2524] client user disconnected session shared/user_3_3_9572405226079203024_v1 2025-06-24T21:08:32.611453Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:08:32.611482Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateAlterNbsChannels [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:32.896303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:32.896466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:32.896504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:32.896541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:32.896582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:32.896609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:32.896692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:32.896766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:32.897625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:32.898041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:32.979104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:32.979189Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:32.995594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:32.996018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:32.996210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:33.005554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:33.005779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:33.006531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:33.006898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:33.010284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:33.010548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:33.011788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:33.011873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:33.012119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:33.012170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:33.012214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:33.012305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.019494Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:33.134940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:33.135212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.135485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:33.135532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:33.135763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:33.135830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:33.138426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:33.138600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:33.138788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.138843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:33.138884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:33.138915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:33.141014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.141093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:33.141135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:33.143200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.143256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.143301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:33.143363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:33.147077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:33.149648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:33.149814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:33.150894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:33.151024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:33.151065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:33.151536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:33.151593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:33.151750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:33.151825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:33.153926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:33.153973Z node 1 :FLAT_TX_SCHEMESHARD ... schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:34.284270Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-24T21:08:34.286114Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:34.286158Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:34.286180Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:34.286201Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:08:34.287438Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-24T21:08:34.287630Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-24T21:08:34.288041Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:34.288389Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-24T21:08:34.289193Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T21:08:34.290099Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-24T21:08:34.290272Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:34.292366Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 Forgetting tablet 72075186233409548 2025-06-24T21:08:34.293008Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T21:08:34.293403Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-24T21:08:34.293515Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:34.293669Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409547 2025-06-24T21:08:34.294929Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:08:34.295086Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409549 2025-06-24T21:08:34.295492Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T21:08:34.295617Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:34.295658Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:08:34.295731Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:08:34.296077Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:34.296124Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:34.296243Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:34.298387Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:34.298438Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:34.298532Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:34.298553Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:34.301112Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:34.301167Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:34.301308Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:08:34.301355Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T21:08:34.301424Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:34.301592Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:34.301642Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:34.301745Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:34.302115Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:34.303679Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-24T21:08:34.303927Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-24T21:08:34.303964Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-24T21:08:34.304344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-24T21:08:34.304427Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T21:08:34.304465Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:655:2604] TestWaitNotification: OK eventTxId 105 2025-06-24T21:08:34.305018Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:34.305250Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/BSVolume" took 286us result status StatusPathDoesNotExist 2025-06-24T21:08:34.305477Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:34.306125Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:34.306301Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 194us result status StatusPathDoesNotExist 2025-06-24T21:08:34.306449Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ForceDropTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:33.853098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:33.853179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:33.853227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:33.853260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:33.853300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:33.853327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:33.853428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:33.853510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:33.854210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:33.854572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:33.936189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:33.936244Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:33.952066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:33.952438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:33.952616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:33.960554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:33.960773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:33.961398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:33.961699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:33.964903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:33.965117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:33.966409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:33.966495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:33.966730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:33.966780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:33.966820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:33.966912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:33.973542Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:34.113807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:34.114001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:34.114192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:34.114243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:34.114445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:34.114508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:34.116380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:34.116531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:34.116679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:34.116754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:34.116783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:34.116810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:34.118535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:34.118607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:34.118644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:34.120421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:34.120483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:34.120533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:34.120598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:34.124366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:34.126602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:34.126792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:34.127829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:34.127967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:34.128022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:34.128372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:34.128427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:34.128604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:34.128679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:34.131160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:34.131204Z node 1 :FLAT_TX_SCHEMESHARD ... node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409546 2025-06-24T21:08:34.403882Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186233409551 2025-06-24T21:08:34.404383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:34.404570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 Forgetting tablet 72075186233409548 2025-06-24T21:08:34.405606Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-24T21:08:34.406039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-24T21:08:34.406207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:08:34.407053Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-24T21:08:34.407595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:34.407749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409551 2025-06-24T21:08:34.408629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:08:34.408795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409547 2025-06-24T21:08:34.410286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:34.410343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:34.410463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 Forgetting tablet 72075186233409549 2025-06-24T21:08:34.411356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:34.411422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:34.411499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:34.412475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:08:34.412525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-24T21:08:34.412699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:08:34.412726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:08:34.416087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:34.416137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:08:34.416252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-24T21:08:34.416276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-24T21:08:34.416518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:08:34.416562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:08:34.417054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:08:34.417095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T21:08:34.417396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:34.417651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 102 2025-06-24T21:08:34.417968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:08:34.418030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-24T21:08:34.418115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T21:08:34.418135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T21:08:34.418544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:08:34.418687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:08:34.418723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:666:2567] 2025-06-24T21:08:34.418828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T21:08:34.418933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:08:34.418955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:666:2567] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-24T21:08:34.419437Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:34.419669Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 229us result status StatusPathDoesNotExist 2025-06-24T21:08:34.419830Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:08:34.420224Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:34.420376Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 158us result status StatusSuccess 2025-06-24T21:08:34.420736Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpVectorIndexes::OrderByCosineLevel2+Nullable-UseSimilarity [GOOD] >> KqpVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity |94.1%| [TA] $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.1%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpMultishardIndex::DataColumnSelect [GOOD] >> KqpVectorIndexes::OrderByCosineLevel2-Nullable-UseSimilarity [GOOD] >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover+Nullable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink [GOOD] Test command err: 2025-06-24T21:07:07.516122Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:07:07.516387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:07:07.516505Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046c4/r3tmp/tmpvRUXw9/pdisk_1.dat 2025-06-24T21:07:07.766376Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:07:07.774177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:07.815467Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:07.819110Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799225356381 != 1750799225356385 2025-06-24T21:07:07.862458Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:07:07.863495Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:07:07.863749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:07.863836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:07.876008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:07.955326Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:07:07.955394Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:07:07.956297Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:07:08.072262Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:07:08.072385Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:07:08.072994Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:07:08.073087Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:07:08.073458Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:07:08.073687Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:07:08.073865Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:07:08.075612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:08.076029Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:07:08.076490Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:07:08.076538Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:07:08.105570Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:07:08.106477Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:07:08.106835Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:07:08.107014Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:07:08.136225Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:07:08.136740Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:07:08.136820Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:07:08.138065Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:07:08.138133Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:07:08.138185Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:07:08.138444Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:07:08.138545Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:07:08.138608Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:07:08.138930Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:07:08.161355Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:07:08.161550Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:07:08.161683Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:07:08.161712Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:07:08.161736Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:07:08.161771Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:07:08.161934Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.161971Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.162244Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:07:08.162326Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:07:08.162380Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:07:08.162409Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:07:08.162455Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:07:08.162482Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:07:08.162520Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:07:08.162545Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:07:08.162579Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:07:08.162662Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.162690Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.162730Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:07:08.163083Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:07:08.163115Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:07:08.163215Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:07:08.163402Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:07:08.163468Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:07:08.163535Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:07:08.163578Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... 88 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-24T21:08:33.558465Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-24T21:08:33.558514Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [13:712:2590], Recipient [13:624:2529]: {TEvReadSet step# 3031 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T21:08:33.558534Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:08:33.558555Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037889 dest 72075186224037888 producer 72075186224037889 txId 281474976715663 2025-06-24T21:08:33.558586Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 3031 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T21:08:33.558743Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3031 : 281474976715663] from 72075186224037888 at tablet 72075186224037888 send result to client [13:929:2727], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:08:33.559002Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [13:624:2529], Recipient [13:712:2590]: {TEvReadSet step# 3031 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T21:08:33.559023Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:08:33.559045Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715663 2025-06-24T21:08:33.559078Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 3031 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T21:08:33.559164Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3031 : 281474976715663] from 72075186224037889 at tablet 72075186224037889 send result to client [13:929:2727], exec latency: 0 ms, propose latency: 0 ms TEvProposeTransactionResult: TxKind: TX_KIND_DATA Origin: 72075186224037888 Status: COMPLETE TxId: 281474976715663 TxResult: "" ExecLatency: 0 ProposeLatency: 0 TxStats { PerShardStats { ShardId: 72075186224037888 CpuTimeUsec: 2093 } } ComputeActorStats { Tasks { Tables { TablePath: "/Root/table-1" WriteRows: 1 WriteBytes: 8 } } } CommitVersion { Step: 3031 TxId: 281474976715663 } TEvProposeTransactionResult: TxKind: TX_KIND_DATA Origin: 72075186224037889 Status: COMPLETE TxId: 281474976715663 TxResult: "" ExecLatency: 0 ProposeLatency: 0 TxStats { PerShardStats { ShardId: 72075186224037889 CpuTimeUsec: 1051 } } ComputeActorStats { Tasks { Tables { TablePath: "/Root/table-2" WriteRows: 1 WriteBytes: 8 } } } CommitVersion { Step: 3031 TxId: 281474976715663 } 2025-06-24T21:08:33.560140Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:08:33.560511Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:08:33.562365Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:08:33.569923Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-24T21:08:33.570137Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [13:624:2529], Recipient [13:712:2590]: {TEvReadSet step# 3031 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-24T21:08:33.570210Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:08:33.570270Z node 13 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715663 2025-06-24T21:08:33.570395Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-24T21:08:33.599355Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [13:712:2590], Recipient [13:624:2529]: {TEvReadSet step# 3031 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 1} 2025-06-24T21:08:33.599429Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:08:33.599472Z node 13 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715663 2025-06-24T21:08:33.827542Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [13:59:2106] Handle TEvExecuteKqpTransaction 2025-06-24T21:08:33.827654Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [13:59:2106] TxId# 281474976715667 ProcessProposeKqpTransaction 2025-06-24T21:08:33.829081Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhwatn28cpdb64cfam5q11p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGNhMWI5M2MtOGQ0YzliZTItZWYxN2I2ZDUtZWI4NTE1Nzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TEvRead: ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 2025-06-24T21:08:33.833270Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [13:1039:2833], Recipient [13:624:2529]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-24T21:08:33.833565Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:08:33.833671Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v3031/281474976715663 IncompleteEdge# v{min} UnprotectedReadEdge# v4000/18446744073709551615 ImmediateWriteEdge# v4001/0 ImmediateWriteEdgeReplied# v4001/0 2025-06-24T21:08:33.833759Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v4001/18446744073709551615 2025-06-24T21:08:33.833886Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-24T21:08:33.834087Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:08:33.834174Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:08:33.834254Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:08:33.834341Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:08:33.834413Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-24T21:08:33.834475Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:08:33.834509Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:08:33.834536Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:08:33.834563Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:08:33.834753Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-24T21:08:33.835245Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[13:1039:2833], 0} after executionsCount# 1 2025-06-24T21:08:33.835357Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[13:1039:2833], 0} sends rowCount# 2, bytes# 96, quota rows left# 999, quota bytes left# 5242784, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:08:33.835483Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[13:1039:2833], 0} finished in read 2025-06-24T21:08:33.835602Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:08:33.835640Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:08:33.835672Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:08:33.835702Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:08:33.835758Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:08:33.835783Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:08:33.835818Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-24T21:08:33.835912Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:08:33.836115Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:08:33.837352Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [13:1039:2833], Recipient [13:624:2529]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:08:33.837446Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } items { uint32_value: 11 } }, { items { uint32_value: 2 } items { uint32_value: 2 } items { uint32_value: 22 } } >> KqpVectorIndexes::OrderByCosineLevel1+Nullable-UseSimilarity [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1-Nullable+UseSimilarity >> TestYmqHttpProxy::TestSendMessage >> TestKinesisHttpProxy::DifferentContentTypes >> DataShardSnapshots::PipelineAndMediatorRestoreRace [GOOD] >> DataShardSnapshots::ShardRestartLockBasic >> TestKinesisHttpProxy::CreateStreamInIncorrectDb >> TestKinesisHttpProxy::UnauthorizedGetShardIteratorRequest >> TestKinesisHttpProxy::MissingAction >> TestKinesisHttpProxy::TestPing >> TestYmqHttpProxy::TestCreateQueue >> TestYmqHttpProxy::TestGetQueueUrl >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:31.748404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:31.748496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.748537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:31.748577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:31.748625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:31.748647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:31.748703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:31.748774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:31.749410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:31.749682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:31.833852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:31.833928Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:31.847109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:31.851531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:31.851705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:31.861368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:31.861639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:31.862393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:31.862745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:31.865752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.865914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:31.866782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:31.866833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:31.866946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:31.866995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:31.867058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:31.867216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:31.873688Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:08:32.011017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:32.011216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.011464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:32.011514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:32.011719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:32.011778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:32.013983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.014169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:32.014353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.014422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:32.014457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:32.014489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:32.016492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.016598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:32.016647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:32.018643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.018701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:32.018750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.018819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:32.022922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:32.025113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:32.025327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:32.026451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:32.026595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:32.026648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.026925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:32.026974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:32.027194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:32.027277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:32.029514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:32.029565Z node 1 :FLAT_TX_SCHEMESHARD D ... ransactionResult> complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:36.568364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:36.568642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:36.568703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:36.568896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:08:36.569058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:36.569126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-24T21:08:36.569174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-24T21:08:36.569551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:36.569607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:08:36.569697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:36.569735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-24T21:08:36.569783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:0 129 -> 240 2025-06-24T21:08:36.570600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:36.570765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:36.570827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:08:36.570869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-24T21:08:36.570899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:36.571708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:36.571785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:36.571814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:08:36.571852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-24T21:08:36.571882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:08:36.571946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-24T21:08:36.573888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:36.573938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:36.574280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:08:36.574421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T21:08:36.574446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:36.574481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T21:08:36.574521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:36.574547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-24T21:08:36.574595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:411:2376] message: TxId: 103 2025-06-24T21:08:36.574633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:36.574667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T21:08:36.574695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T21:08:36.574788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:36.575415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:36.575452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:36.576327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:08:36.577479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:08:36.578121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:36.578158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-24T21:08:36.578208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:08:36.578235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:663:2594] 2025-06-24T21:08:36.578809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-06-24T21:08:36.579524Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:36.579691Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 179us result status StatusSuccess 2025-06-24T21:08:36.580060Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "unquoted_storage_pool" Kind: "unquoted_storage_pool_kind" } StoragePools { Name: "quoted_storage_pool" Kind: "quoted_storage_pool_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "unquoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "quoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { storage_quotas { unit_kind: "quoted_storage_pool_kind" data_size_hard_quota: 1 } } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpMultishardIndex::WriteIntoRenamingAsyncIndex [GOOD] >> KqpTx::RollbackTx >> KqpTx::RollbackManyTx >> KqpSnapshotRead::TestReadOnly+withSink >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite1 >> KqpSinkMvcc::SnapshotExpiration >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite1-withSink >> KqpTx::LocksAbortOnCommit >> KqpSinkMvcc::ReadOnlyTxCommitsOnConcurrentWrite >> KqpSnapshotRead::ReadOnlyTxCommitsOnConcurrentWrite-withSink >> KqpSnapshotRead::ReadOnlyTxWithIndexCommitsOnConcurrentWrite-withSink >> KqpSinkTx::ExplicitTcl >> KqpSinkTx::LocksAbortOnCommit >> KqpSinkMvcc::OlapReadOnlyTxCommitsOnConcurrentWrite >> KqpSinkTx::OlapDeferredEffects >> KqpSnapshotIsolation::TSimpleOltp [GOOD] >> KqpSnapshotIsolation::TSimpleOlap [GOOD] >> KqpSnapshotIsolation::TSimpleOltpNoSink [GOOD] >> KqpSnapshotRead::ReadOnlyTxCommitsOnConcurrentWrite+withSink |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TSimpleOlap [GOOD] >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas [GOOD] >> KqpSinkLocks::TInvalidate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::WriteIntoRenamingAsyncIndex [GOOD] Test command err: Trying to start YDB, gRPC: 14004, MsgBus: 29667 2025-06-24T21:07:28.485843Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625512356418646:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:28.485953Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00462c/r3tmp/tmp0Tg2LW/pdisk_1.dat 2025-06-24T21:07:28.702158Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:28.708263Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625512356418627:2079] 1750799248485081 != 1750799248485084 TServer::EnableGrpc on GrpcPort 14004, node 1 2025-06-24T21:07:28.769903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:28.769922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:28.769928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:28.770061Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:28.818492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:28.818617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:28.819975Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29667 TClient is connected to server localhost:29667 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:29.196174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:29.210653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:29.332856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:29.438028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:29.494381Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:29.513296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:30.585469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625520946354862:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:30.585577Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:30.819932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:30.842300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:30.868418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:30.892096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:30.913390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:30.941106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:30.964735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:31.041277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625525241322819:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:31.041353Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:31.041455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625525241322824:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:31.044800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:31.054674Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625525241322826:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:31.112140Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625525241322877:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:31.741413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... Trying to start YDB, gRPC: 6189, MsgBus: 1636 2025-06-24T21:07:33.248239Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625534026584943:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:33.248310Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetP ... athId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:08:14.329785Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:14.349532Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:14.430610Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:14.603692Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:14.612227Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:14.693308Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:17.492159Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625722650564980:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:17.492279Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:17.561936Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:17.604593Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:17.683699Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:17.719669Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:17.794000Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:17.867469Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:17.945167Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:18.037581Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625726945532946:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:18.037690Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:18.038014Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625726945532951:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:18.042239Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:18.054813Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625726945532953:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:18.126614Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625726945533004:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:18.588988Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625705470694187:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:18.589035Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:19.356051Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:28.703883Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:08:28.703911Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:29.669409Z node 3 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:211: [TableChangeSenderShard][72075186224037924:1][72075186224037933][3:7519625735535469153:2582] Apply status: status# 2, reason# 7 2025-06-24T21:08:29.677608Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037934 not found 2025-06-24T21:08:29.697803Z node 3 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037924:1][72075186224037933][3:7519625774190183368:2582] Handshake status: status# 2, reason# 7 2025-06-24T21:08:29.705707Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037930 not found 2025-06-24T21:08:29.705750Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037931 not found 2025-06-24T21:08:29.911399Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037938 not found 2025-06-24T21:08:29.911445Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037939 not found 2025-06-24T21:08:29.912711Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037937 not found 2025-06-24T21:08:34.689324Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037935 not found 2025-06-24T21:08:34.690171Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037928 not found 2025-06-24T21:08:34.690208Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037936 not found 2025-06-24T21:08:34.690229Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037929 not found 2025-06-24T21:08:34.708554Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037926 not found 2025-06-24T21:08:34.711191Z node 3 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037923:1][72075186224037925][3:7519625795665026049:2581] Handshake status: status# 2, reason# 7 2025-06-24T21:08:36.124863Z node 3 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037922:1][72075186224037925][3:7519625804254962631:2594] Handshake status: status# 2, reason# 7 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::DataColumnSelect [GOOD] Test command err: Trying to start YDB, gRPC: 25420, MsgBus: 5494 2025-06-24T21:07:46.906230Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625591980996857:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:46.906283Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045ee/r3tmp/tmplyYTD5/pdisk_1.dat 2025-06-24T21:07:47.204720Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:47.204906Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625591980996834:2079] 1750799266904411 != 1750799266904414 TServer::EnableGrpc on GrpcPort 25420, node 1 2025-06-24T21:07:47.304036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:47.304195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:47.307080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:47.334901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:47.334931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:47.335330Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:47.335509Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5494 TClient is connected to server localhost:5494 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:47.862639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:47.873691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:07:47.884563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:47.932483Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:48.013546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:48.175986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:48.240659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:49.987689Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625604865900355:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:49.987817Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.280150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.345820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.376968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.411618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.456875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.494538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.565279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:50.624264Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625609160868315:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.624361Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.624599Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625609160868320:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:50.628286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:50.640122Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625609160868322:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:50.712449Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625609160868373:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:51.757859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.913300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625591980996857:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:51.915493Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=c ... 00546740:2079] 1750799304847909 != 1750799304847912 2025-06-24T21:08:25.046596Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28083, node 3 2025-06-24T21:08:25.119914Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:25.119946Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:25.119956Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:25.120131Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15474 TClient is connected to server localhost:15474 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:25.662307Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:25.680966Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:25.763736Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:25.890467Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:25.943865Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:26.030486Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:28.870172Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625770880417547:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:28.870291Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:28.942538Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:28.979096Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:29.012673Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:29.043742Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:29.074631Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:29.108772Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:29.144758Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:29.206484Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625775175385500:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:29.206564Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625775175385505:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:29.206583Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:29.210403Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:29.221447Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625775175385507:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:29.298189Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625775175385558:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:29.856787Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625753700546924:2193];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:29.856871Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:30.550557Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:30.590880Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:30.633029Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:32.602764Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> KqpPrefixedVectorIndexes::PrefixedVectorIndexOrderByCosineDistanceWithCover-Nullable [GOOD] >> KqpUniqueIndex::InsertComplexFkPkOverlapDuplicate >> TSchemeShardSubDomainTest::DiskSpaceUsage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:28.780568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:28.780667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.780707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:28.780739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:28.780798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:28.780835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:28.780896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.780975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:28.781747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:28.782176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:28.862634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:28.862686Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:28.878488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:28.878841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:28.879024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:28.885705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:28.885903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:28.886550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.886847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:28.890085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.890284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:28.891495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.891567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.891845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:28.891896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.891940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:28.892018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.898502Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:29.016980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:29.017182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.017442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:29.017486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:29.017764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:29.017831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:29.020019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.020199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:29.020360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.020411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:29.020452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:29.020489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:29.022429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.022501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:29.022543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:29.024508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.024554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:29.024612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.024678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:29.033416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:29.035523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:29.035707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:29.036508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:29.036622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:29.036656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.036886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:29.036925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:29.037068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:29.037136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:29.039574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:29.039621Z node 1 :FLAT_TX_SCHEMESHARD D ... 046678944, LocalPathId: 2] 2025-06-24T21:08:38.517418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:08:38.517571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:38.517610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-24T21:08:38.517651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-24T21:08:38.517679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-24T21:08:38.518152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:08:38.518199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T21:08:38.518305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T21:08:38.518358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:38.518406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T21:08:38.518441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:38.518492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-24T21:08:38.518535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:08:38.518579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T21:08:38.518611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T21:08:38.518757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:38.518804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-06-24T21:08:38.518841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 9 2025-06-24T21:08:38.518875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-24T21:08:38.520072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:38.520171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:38.520214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:08:38.520276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-24T21:08:38.520325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:08:38.521003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:08:38.521047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:08:38.521099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:38.521393Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 2025-06-24T21:08:38.522256Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 2025-06-24T21:08:38.522376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:38.522436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:08:38.522460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:08:38.522485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 9 2025-06-24T21:08:38.522511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:08:38.522566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-24T21:08:38.522881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:38.523286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:08:38.526949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:08:38.529802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:08:38.529996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:08:38.530091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:08:38.530165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T21:08:38.530807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T21:08:38.530871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T21:08:38.531451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T21:08:38.531556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:08:38.531599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:815:2723] TestWaitNotification: OK eventTxId 103 2025-06-24T21:08:39.035188Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:39.035468Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 339us result status StatusSuccess 2025-06-24T21:08:39.035996Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1" PathDescription { Self { Name: "USER_1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 1 } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpSinkTx::SnapshotRO ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DiskSpaceUsage [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:28.127823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:28.127939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.127979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:28.128015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:28.128059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:28.128121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:28.128196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.128283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:28.129024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:28.129400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:28.220391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:28.220449Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:28.242273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:28.242767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:28.242979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:28.256355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:28.256637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:28.257326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.257636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:28.264323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.264512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:28.265436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.265485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.265687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:28.265724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.265757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:28.265834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.273245Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:28.414551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:28.414768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.415064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:28.415130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:28.415432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:28.415509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:28.417870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.418127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:28.418327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.418383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:28.418429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:28.418467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:28.420543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.420622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:28.420668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:28.422653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.422708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.422762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.422855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.426993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:28.429103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:28.429318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:28.430348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.430485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:28.430551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.430937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:28.431002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.431222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.431306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:28.433634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.433685Z node 1 :FLAT_TX_SCHEMESHARD D ... ablePartition, limit 10000 2025-06-24T21:08:40.123488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:40.123563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:40.123657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:40.124400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:40.124810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:40.138788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:40.140280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:40.140458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:40.140596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:40.140627Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:40.140793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:40.141521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:40.141602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: Table1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:08:40.141658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: Table2, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:08:40.141725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.141801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.142242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 2, at schemeshard: 72057594046678944 2025-06-24T21:08:40.142363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-24T21:08:40.142411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-24T21:08:40.142552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:08:40.142877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 4, at schemeshard: 72057594046678944 2025-06-24T21:08:40.143014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.143117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:40.143193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:08:40.143220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:08:40.143239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:08:40.143356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:08:40.143536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.143827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-06-24T21:08:40.144152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.144266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.144626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.144715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.144917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.144996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.145115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.145290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.145367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.145517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.145739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.145808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.145928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.145997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.146057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:08:40.151209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:40.154595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:40.154671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:40.155785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:40.155851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:40.155903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:40.157487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:757:2670] sender: [1:813:2058] recipient: [1:15:2062] 2025-06-24T21:08:40.189840Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:40.190130Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 270us result status StatusSuccess 2025-06-24T21:08:40.190647Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Table2" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1752 DataSize: 1752 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas [GOOD] >> KqpSnapshotIsolation::TConflictWriteOltp [GOOD] >> KqpSnapshotIsolation::TConflictWriteOlap [GOOD] >> KqpSinkTx::OlapSnapshotROInteractive1 |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TConflictWriteOlap [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:28.053399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:28.053495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.053531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:28.053560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:28.053599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:28.053630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:28.053690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:28.053765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:28.054602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:28.054953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:28.133186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:28.133245Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:28.146055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:28.146343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:28.146487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:28.153424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:28.153589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:28.154066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.154300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:28.156703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.156922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:28.158125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.158195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:28.158464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:28.158511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:28.158554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:28.158646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.165428Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:28.306315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:28.306523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.306761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:28.306818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:28.307055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:28.307122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:28.309324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.309504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:28.309693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.309744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:28.309777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:28.309807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:28.311765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.311836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:28.311884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:28.313740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.313786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:28.313823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.313956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:28.317215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:28.318799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:28.318963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:28.319867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:28.320014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:28.320057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.320341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:28.320391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:28.320558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:28.320638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:28.322660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:28.322701Z node 1 :FLAT_TX_SCHEMESHARD D ... lt> complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-24T21:08:41.272853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-24T21:08:41.274192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:41.274240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:41.274400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T21:08:41.274525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:41.274590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 107, path id: 2 2025-06-24T21:08:41.274637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 107, path id: 4 2025-06-24T21:08:41.274708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-24T21:08:41.274760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 107:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:08:41.274870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-24T21:08:41.274909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 107:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-24T21:08:41.274957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 107:0 129 -> 240 2025-06-24T21:08:41.276062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 14 PathOwnerId: 72057594046678944, cookie: 107 2025-06-24T21:08:41.276194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 14 PathOwnerId: 72057594046678944, cookie: 107 2025-06-24T21:08:41.276241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-06-24T21:08:41.276276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 14 2025-06-24T21:08:41.276330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:08:41.277177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-06-24T21:08:41.277272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-06-24T21:08:41.277304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-06-24T21:08:41.277347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-24T21:08:41.277378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:08:41.277438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 107, ready parts: 0/1, is published: true 2025-06-24T21:08:41.280157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-24T21:08:41.280227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 107:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:41.280546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:08:41.280688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#107:0 progress is 1/1 2025-06-24T21:08:41.280724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-24T21:08:41.280762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#107:0 progress is 1/1 2025-06-24T21:08:41.280809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-24T21:08:41.280848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 107, ready parts: 1/1, is published: true 2025-06-24T21:08:41.280881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-24T21:08:41.280936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 107:0 2025-06-24T21:08:41.280968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 107:0 2025-06-24T21:08:41.281051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:08:41.281486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:41.281524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:08:41.282148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-24T21:08:41.283410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-24T21:08:41.284181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:41.284220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-24T21:08:41.284699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 15 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification wait txId: 107 2025-06-24T21:08:41.285081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-24T21:08:41.285123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-24T21:08:41.285519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-24T21:08:41.285593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-24T21:08:41.285621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:989:2914] TestWaitNotification: OK eventTxId 107 2025-06-24T21:08:41.286103Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:08:41.286280Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 167us result status StatusSuccess 2025-06-24T21:08:41.286663Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 15 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 15 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 1 } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpTx::InteractiveTx >> KqpLocksTricky::TestNoWrite >> TestKinesisHttpProxy::CreateStreamInIncorrectDb [GOOD] >> TestKinesisHttpProxy::TestPing [GOOD] >> TestKinesisHttpProxy::UnauthorizedGetShardIteratorRequest [GOOD] >> TestKinesisHttpProxy::MissingAction [GOOD] >> TestYmqHttpProxy::TestGetQueueUrl [GOOD] >> TestYmqHttpProxy::TestCreateQueue [GOOD] >> TestYmqHttpProxy::TestSendMessage [GOOD] >> TestKinesisHttpProxy::DifferentContentTypes [GOOD] >> DataShardSnapshots::ShardRestartLockBasic [GOOD] >> TestKinesisHttpProxy::TestRequestBadJson >> DataShardSnapshots::ShardRestartAfterDropTable >> TestKinesisHttpProxy::CreateStreamWithInvalidName >> TestKinesisHttpProxy::PutRecordsWithLongExplicitHashKey >> TestYmqHttpProxy::TestGetQueueUrlOfNotExistingQueue >> TestYmqHttpProxy::TestReceiveMessage >> KqpSinkMvcc::OltpMultiSinksNoSinks >> TestYmqHttpProxy::TestCreateQueueWithBadQueueName >> TestKinesisHttpProxy::TestRequestWithWrongRegion >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas [GOOD] >> KqpSnapshotRead::TestSnapshotExpiration-withSink >> KqpVectorIndexes::CoveredVectorIndexWithFollowers-StaleRO [GOOD] >> TestKinesisHttpProxy::GoodRequestPutRecords >> DataShardSnapshots::UncommittedWriteRestartDuringCommit [GOOD] >> DataShardSnapshots::UncommittedWriteRestartDuringCommitThenBulkErase ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:08:30.555817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:08:30.555918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:30.555962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:08:30.556004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:08:30.556069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:08:30.556116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:08:30.556180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:08:30.556254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:08:30.557080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:08:30.557450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:08:30.649000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:30.649090Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:30.675826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:08:30.676295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:08:30.676562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:08:30.685883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:08:30.686143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:08:30.686927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:30.687316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:08:30.691012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:30.691262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:08:30.692561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:30.692652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:08:30.692903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:08:30.692956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:08:30.693024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:08:30.693156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.701038Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:08:30.844485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:08:30.844740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.845036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:08:30.845092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:08:30.845379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:08:30.845476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:30.847932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:30.848168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:08:30.848375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.848429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:08:30.848479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:08:30.848522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:08:30.850831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.850906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:08:30.850957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:08:30.854276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.854337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:08:30.854378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:30.854445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:08:30.858481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:08:30.860986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:08:30.861160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:08:30.862062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:08:30.862193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:08:30.862232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:30.862520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:08:30.862582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:08:30.862728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:08:30.862831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:08:30.865012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:08:30.865076Z node 1 :FLAT_TX_SCHEMESHARD D ... Id: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:43.740809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:43.741204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-24T21:08:43.741258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-24T21:08:43.741466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-06-24T21:08:43.741670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-24T21:08:43.741718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:447:2397], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-06-24T21:08:43.741766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:447:2397], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-06-24T21:08:43.742489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:43.742552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-06-24T21:08:43.742651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:43.742693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-06-24T21:08:43.742737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 129 -> 240 2025-06-24T21:08:43.743646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:43.743758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:43.743799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-24T21:08:43.743841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 11 2025-06-24T21:08:43.743885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-24T21:08:43.745510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:43.745602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-24T21:08:43.745632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-24T21:08:43.745662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:08:43.745694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-24T21:08:43.745768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-24T21:08:43.749549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-24T21:08:43.749616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-06-24T21:08:43.749972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-24T21:08:43.750190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:08:43.750230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:43.750270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:08:43.750306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:43.750345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-24T21:08:43.750419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:554:2491] message: TxId: 104 2025-06-24T21:08:43.750475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:08:43.750513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:08:43.750546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:08:43.750647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-24T21:08:43.751733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-24T21:08:43.751774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-24T21:08:43.752712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-24T21:08:43.753945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-24T21:08:43.755371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-24T21:08:43.755423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:447:2397], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-24T21:08:43.755922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:08:43.755971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:1423:3329] 2025-06-24T21:08:43.756441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-06-24T21:08:43.760688Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-24T21:08:43.760942Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 298us result status StatusSuccess 2025-06-24T21:08:43.761452Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 12 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 12 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> KqpTx::RollbackTx [GOOD] >> KqpTx::RollbackTx2 >> KqpSnapshotRead::TestReadOnly+withSink [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3-withSink >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite1-withSink [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2+withSink >> KqpTx::LocksAbortOnCommit [GOOD] >> KqpTx::MixEnginesOldNew >> KqpSnapshotRead::ReadOnlyTxCommitsOnConcurrentWrite-withSink [GOOD] >> KqpSnapshotRead::ReadOnlyTxWithIndexCommitsOnConcurrentWrite+withSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::CoveredVectorIndexWithFollowers-StaleRO [GOOD] Test command err: Trying to start YDB, gRPC: 27409, MsgBus: 8776 2025-06-24T21:07:38.240601Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625559165578616:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:38.240684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004622/r3tmp/tmpidXlrP/pdisk_1.dat 2025-06-24T21:07:38.563798Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625559165578594:2079] 1750799258239489 != 1750799258239492 2025-06-24T21:07:38.577280Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27409, node 1 2025-06-24T21:07:38.620293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:38.620323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:38.620331Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:38.620477Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:38.639690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:38.639815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:38.641486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8776 TClient is connected to server localhost:8776 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:39.087691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:39.106749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:39.238426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:39.246879Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:39.392820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:39.455944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:41.222825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625572050482116:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.222967Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.550081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.577300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.603536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.630194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.657174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.703943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.770770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:41.821042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625572050482780:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.821115Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.821285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625572050482785:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:41.825176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:41.835416Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625572050482787:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:41.895133Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625572050482838:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:42.788590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:42.996359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:43.036364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operati ... er localhost:3442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:10.215405Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:10.224231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:10.238568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:10.313768Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:10.327375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:10.506479Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:10.592740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:13.005898Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625708299280277:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:13.006015Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:13.069888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:13.102084Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:13.176445Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:13.217577Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:13.252346Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:13.297353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:13.370790Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:13.433677Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625708299280942:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:13.433763Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:13.433988Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625708299280947:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:13.438068Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:13.449203Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625708299280949:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:13.534327Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625708299281000:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:14.307256Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625691119409495:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:14.307343Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:14.847227Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:15.190134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:15.240727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:08:15.330939Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715675:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:08:15.404483Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715676:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:08:24.416545Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:08:24.416577Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded >> KqpSnapshotRead::ReadOnlyTxCommitsOnConcurrentWrite+withSink [GOOD] |94.1%| [TA] $(B)/ydb/core/tx/schemeshard/ut_subdomain/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit-UseSink [GOOD] >> KqpSinkTx::ExplicitTcl [GOOD] >> KqpSinkTx::Interactive |94.1%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpSinkTx::LocksAbortOnCommit [GOOD] >> KqpSinkTx::InvalidateOnError >> KqpSinkMvcc::ReadOnlyTxCommitsOnConcurrentWrite [GOOD] >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite1 >> KqpSnapshotIsolation::TConflictWriteOltpNoSink [GOOD] >> KqpSnapshotIsolation::TReadOnlyOlap [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadOnlyTxCommitsOnConcurrentWrite+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 26708, MsgBus: 26831 2025-06-24T21:08:38.740306Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625815671100167:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.740419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003280/r3tmp/tmpYwIwnn/pdisk_1.dat 2025-06-24T21:08:39.208217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.208342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.211727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26708, node 1 2025-06-24T21:08:39.257566Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.279312Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625815671099973:2079] 1750799318723753 != 1750799318723756 2025-06-24T21:08:39.345576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.345606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.345613Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.345734Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.743760Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26831 TClient is connected to server localhost:26831 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.219484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.241956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:08:40.256459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:40.410518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.555253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.643412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:42.206112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625832850970796:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.206254Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.575766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.637663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.682211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.715987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.753901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.833061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.872284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.976750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625832850971464:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.976835Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.977081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625832850971469:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.981845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.995065Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625832850971471:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:43.050683Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625837145938818:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:43.743272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625815671100167:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.756752Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSinkLocks::TInvalidate [GOOD] >> KqpSinkLocks::TInvalidateOlap |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TReadOnlyOlap [GOOD] >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] [FAIL] >> KqpSinkLocks::EmptyRangeOlap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit-UseSink [GOOD] Test command err: 2025-06-24T21:07:07.402058Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:07:07.402399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:07:07.402534Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046c7/r3tmp/tmpAT2wjN/pdisk_1.dat 2025-06-24T21:07:07.766327Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:07:07.777216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:07.808503Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:07.816920Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799225356328 != 1750799225356332 2025-06-24T21:07:07.861631Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:07:07.863515Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:07:07.863730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:07.863809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:07.876015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:07.955326Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:07:07.955384Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:07:07.956296Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:07:08.039237Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:07:08.039338Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:07:08.041563Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:07:08.041690Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:07:08.042080Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:07:08.042313Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:07:08.042496Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:07:08.047115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:08.048278Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:07:08.048955Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:07:08.049021Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:07:08.090035Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:07:08.091187Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:07:08.091701Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:07:08.091968Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:07:08.130372Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:07:08.130901Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:07:08.130983Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:07:08.132196Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:07:08.132258Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:07:08.132319Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:07:08.132580Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:07:08.132672Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:07:08.132731Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:07:08.133015Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:07:08.164304Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:07:08.164441Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:07:08.164528Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:07:08.164558Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:07:08.164584Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:07:08.164608Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:07:08.164780Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.164813Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.165014Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:07:08.165069Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:07:08.165102Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:07:08.165128Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:07:08.165169Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:07:08.165197Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:07:08.165221Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:07:08.165242Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:07:08.165271Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:07:08.165359Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.165388Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.165418Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:07:08.165686Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:07:08.165716Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:07:08.165784Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:07:08.165923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:07:08.165961Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:07:08.166018Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:07:08.166075Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... r.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyhwb4tv4s7534yc4p24wjbm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTEzZjNmM2ItZGY1MDYyYzktNmVhNWJiZjItMTk5MTU5ZmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TEvRead: ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 4 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false 2025-06-24T21:08:44.704895Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [13:1599:3307], Recipient [13:749:2616]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 4 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 3 2025-06-24T21:08:44.705236Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-24T21:08:44.705358Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037889 CompleteEdge# v8006/281474976715670 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T21:08:44.705463Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037889 changed HEAD read to non-repeatable v9000/18446744073709551615 2025-06-24T21:08:44.705620Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CheckRead 2025-06-24T21:08:44.705891Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-24T21:08:44.706025Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CheckRead 2025-06-24T21:08:44.706127Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-24T21:08:44.706203Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-24T21:08:44.706295Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037889 2025-06-24T21:08:44.706386Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-24T21:08:44.706425Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-24T21:08:44.706458Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit ExecuteRead 2025-06-24T21:08:44.706490Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit ExecuteRead 2025-06-24T21:08:44.706705Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 4 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-24T21:08:44.707306Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[13:1599:3307], 0} after executionsCount# 1 2025-06-24T21:08:44.707468Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[13:1599:3307], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 3, firstUnprocessed# 0 2025-06-24T21:08:44.707636Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[13:1599:3307], 0} finished in read 2025-06-24T21:08:44.707788Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-24T21:08:44.707830Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit ExecuteRead 2025-06-24T21:08:44.707862Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T21:08:44.707901Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CompletedOperations 2025-06-24T21:08:44.707967Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-24T21:08:44.707998Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T21:08:44.708043Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037889 has finished 2025-06-24T21:08:44.708141Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-24T21:08:44.708399Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-24T21:08:44.710030Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [13:1599:3307], Recipient [13:749:2616]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:08:44.710142Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 11 } }, { items { uint32_value: 2 } items { uint32_value: 21 } } 2025-06-24T21:08:45.549285Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [13:59:2106] Handle TEvExecuteKqpTransaction 2025-06-24T21:08:45.549430Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [13:59:2106] TxId# 281474976715672 ProcessProposeKqpTransaction 2025-06-24T21:08:45.551299Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jyhwb5hs08gna9evrpk8r2m2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTRiNmQ3NzYtMTBiY2VjZDktNzRhNDkxNTMtMjAyNWExMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TEvRead: ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 7 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false 2025-06-24T21:08:45.554659Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [13:1630:3332], Recipient [13:1021:2834]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 7 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 3 2025-06-24T21:08:45.555170Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037891, FollowerId 0 2025-06-24T21:08:45.555291Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037891 CompleteEdge# v8006/281474976715670 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T21:08:45.555391Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037891 changed HEAD read to non-repeatable v9000/18446744073709551615 2025-06-24T21:08:45.555536Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit CheckRead 2025-06-24T21:08:45.555785Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-24T21:08:45.555884Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit CheckRead 2025-06-24T21:08:45.555978Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037891 to execution unit BuildAndWaitDependencies 2025-06-24T21:08:45.556061Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit BuildAndWaitDependencies 2025-06-24T21:08:45.556138Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037891 2025-06-24T21:08:45.556219Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-24T21:08:45.556260Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit BuildAndWaitDependencies 2025-06-24T21:08:45.556292Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037891 to execution unit ExecuteRead 2025-06-24T21:08:45.556322Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit ExecuteRead 2025-06-24T21:08:45.556529Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037891 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 7 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-24T21:08:45.557078Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037891 Complete read# {[13:1630:3332], 0} after executionsCount# 1 2025-06-24T21:08:45.557202Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037891 read iterator# {[13:1630:3332], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 3, firstUnprocessed# 0 2025-06-24T21:08:45.557344Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037891 read iterator# {[13:1630:3332], 0} finished in read 2025-06-24T21:08:45.557457Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-24T21:08:45.557487Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit ExecuteRead 2025-06-24T21:08:45.557512Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037891 to execution unit CompletedOperations 2025-06-24T21:08:45.557542Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit CompletedOperations 2025-06-24T21:08:45.557593Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-24T21:08:45.557618Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit CompletedOperations 2025-06-24T21:08:45.557651Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037891 has finished 2025-06-24T21:08:45.557721Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037891 2025-06-24T21:08:45.557905Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037891 2025-06-24T21:08:45.559037Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [13:1630:3332], Recipient [13:1021:2834]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:08:45.559112Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037891 ReadCancel: { ReadId: 0 } { items { uint32_value: 10 } items { uint32_value: 110 } }, { items { uint32_value: 20 } items { uint32_value: 210 } } >> KqpPrefixedVectorIndexes::OrderByCosineLevel1-Nullable+UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity >> KqpSnapshotRead::ReadOnlyTxWithIndexCommitsOnConcurrentWrite-withSink [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite1+withSink >> KqpSinkTx::SnapshotRO [GOOD] >> KqpSinkTx::SnapshotROInteractive1 >> AnalyzeColumnshard::AnalyzeMultiOperationId [GOOD] >> KqpTx::InteractiveTx [GOOD] >> KqpTx::InvalidateOnError |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] [FAIL] >> KqpPrefixedVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel4 >> KqpSinkLocks::UncommittedRead >> KqpTx::CommitRequired >> TestKinesisHttpProxy::TestRequestBadJson [GOOD] >> TestKinesisHttpProxy::CreateStreamWithInvalidName [GOOD] >> TestYmqHttpProxy::TestGetQueueUrlOfNotExistingQueue [GOOD] >> KqpTx::SnapshotRO >> test_sql_streaming.py::test[suites-GroupByHopNoKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeMultiOperationId [GOOD] Test command err: 2025-06-24T21:05:11.034346Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:11.034755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:11.034850Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004381/r3tmp/tmpxFtApS/pdisk_1.dat 2025-06-24T21:05:11.424343Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15908, node 1 2025-06-24T21:05:11.660884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:11.660945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:11.660979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:11.661573Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:11.664203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:11.765712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:11.765867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:11.781410Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30405 2025-06-24T21:05:12.347920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:15.469647Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:15.512173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:15.512310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:15.542622Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:15.545181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:15.760868Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:15.785199Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:15.785962Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:15.786561Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:15.786736Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:15.787011Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:15.787102Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:15.787290Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:15.787404Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:15.787523Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:15.977472Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:15.977626Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:15.990919Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:16.157191Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:16.208113Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:16.208250Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:16.241185Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:16.241446Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:16.241651Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:16.241712Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:16.241763Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:16.241814Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:16.241886Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:16.241936Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:16.242368Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:16.266569Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:16.266706Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:16.275518Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2570] 2025-06-24T21:05:16.285107Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1847:2588] 2025-06-24T21:05:16.285404Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1847:2588], schemeshard id = 72075186224037897 2025-06-24T21:05:16.292729Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:16.308604Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:16.308673Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:16.308747Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:16.319938Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:16.331034Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:16.331205Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:16.518303Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:16.669879Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:16.790049Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:17.400587Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:17.645099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2142:3021], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:17.645271Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:17.663036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:17.770346Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2212:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:17.770554Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2212:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:17.770822Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2212:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:17.770956Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2212:2787];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:17.771084Z node 2 :TX_COLUMNSHARD WARN: ... p:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId9 2025-06-24T21:08:38.609776Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:08:39.922753Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:08:39.922992Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:08:39.945023Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:08:41.317568Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:08:41.317653Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId8 2025-06-24T21:08:41.317691Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId9 2025-06-24T21:08:41.317733Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:08:42.556803Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:08:42.556958Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:08:42.557001Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:08:42.557726Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:08:42.576670Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:08:42.577189Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:08:42.577275Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:08:42.577746Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:08:42.595288Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:08:42.595530Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 11, current Round: 0 2025-06-24T21:08:42.596113Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9489:6514], server id = [2:9490:6515], tablet id = 72075186224037899, status = OK 2025-06-24T21:08:42.596214Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9489:6514], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:08:42.597558Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:08:42.597660Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:08:42.597838Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:08:42.598043Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:08:42.598431Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:08:42.604302Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9489:6514], server id = [2:9490:6515], tablet id = 72075186224037899 2025-06-24T21:08:42.604364Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:08:42.605797Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:08:42.646362Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NWExNGI5Ny0zNGQyMDNiYi1hNDc2OTA3Ny05NTJlYTc3Yw==, TxId: 2025-06-24T21:08:42.646451Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NWExNGI5Ny0zNGQyMDNiYi1hNDc2OTA3Ny05NTJlYTc3Yw==, TxId: 2025-06-24T21:08:42.647083Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:08:42.701305Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:08:42.701388Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId8, ActorId=[1:3038:3295] 2025-06-24T21:08:43.207957Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 11 is different from the current 0 2025-06-24T21:08:43.208043Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:08:43.937730Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:08:43.959941Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:08:43.960025Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId9 2025-06-24T21:08:43.960059Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:08:43.960230Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 10 is different from the current 0 2025-06-24T21:08:43.960266Z node 2 :STATISTICS DEBUG: service_impl.cpp:1021: Skip TEvStatisticsRequestTimeout 2025-06-24T21:08:45.312271Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:08:46.587745Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:08:46.587992Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:08:46.621137Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:08:46.621212Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId9 2025-06-24T21:08:46.621244Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:08:47.899978Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:08:47.900129Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:08:47.900168Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:08:47.900779Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:08:47.914976Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:08:47.915463Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:08:47.915544Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:08:47.915951Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:08:47.941977Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:08:47.942241Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 12, current Round: 0 2025-06-24T21:08:47.942802Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9654:6603], server id = [2:9655:6604], tablet id = 72075186224037899, status = OK 2025-06-24T21:08:47.942905Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9654:6603], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:08:47.944218Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:08:47.944291Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:08:47.944529Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9654:6603], server id = [2:9655:6604], tablet id = 72075186224037899 2025-06-24T21:08:47.944561Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:08:47.944646Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:08:47.944772Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:08:47.945110Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:08:47.948110Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:08:47.974821Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTA4OTI4ZGQtZDQ3ZjE5MTktZTc5NmMxNWQtNGM5NmE1ZjU=, TxId: 2025-06-24T21:08:47.974883Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTA4OTI4ZGQtZDQ3ZjE5MTktZTc5NmMxNWQtNGM5NmE1ZjU=, TxId: 2025-06-24T21:08:47.975273Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:08:48.004778Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:08:48.004850Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId9, ActorId=[1:3038:3295] >> TestKinesisHttpProxy::TestRequestWithWrongRegion [GOOD] >> TestKinesisHttpProxy::PutRecordsWithLongExplicitHashKey [GOOD] >> KqpTx::RollbackManyTx [GOOD] >> KqpTx::RollbackRoTx >> TestKinesisHttpProxy::TestConsumersEmptyNames |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] [FAIL] >> TestKinesisHttpProxy::CreateStreamWithDifferentRetentions >> TestYmqHttpProxy::TestGetQueueUrlWithIAM >> TestYmqHttpProxy::TestCreateQueueWithBadQueueName [GOOD] >> KqpUniqueIndex::InsertComplexFkPkOverlapDuplicate [GOOD] >> TestKinesisHttpProxy::GoodRequestPutRecords [GOOD] >> TestKinesisHttpProxy::TestRequestWithIAM >> TestKinesisHttpProxy::PutRecordsWithIncorrectHashKey >> KqpSinkTx::OlapLocksAbortOnCommit >> TestYmqHttpProxy::TestReceiveMessage [GOOD] >> KqpTx::RollbackTx2 [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithEmptyName >> DataShardSnapshots::DelayedWriteReadableAfterSplitAndReboot [GOOD] >> DataShardSnapshots::BrokenLockChangesDontLeak >> TestKinesisHttpProxy::DoubleCreateStream >> KqpSinkMvcc::OltpMultiSinksNoSinks [GOOD] >> KqpSinkMvcc::OltpMultiSinks >> TestYmqHttpProxy::TestReceiveMessageWithAttributes >> KqpTx::MixEnginesOldNew [GOOD] >> KqpLocks::Invalidate >> DataShardSnapshots::ShardRestartAfterDropTable [GOOD] >> DataShardSnapshots::ShardRestartAfterDropTableAndAbort ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertComplexFkPkOverlapDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 2552, MsgBus: 7732 2025-06-24T21:07:32.948048Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625529441969950:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:32.948202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00462b/r3tmp/tmp1Gqa0P/pdisk_1.dat 2025-06-24T21:07:33.182933Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:33.183286Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625529441969932:2079] 1750799252947311 != 1750799252947314 TServer::EnableGrpc on GrpcPort 2552, node 1 2025-06-24T21:07:33.242801Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:33.242830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:33.242843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:33.242981Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:33.302786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:33.302900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:33.304338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7732 TClient is connected to server localhost:7732 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:33.683189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:33.706547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:33.822281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:33.944063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:33.990376Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:34.011927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:35.115202Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625542326873472:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:35.115339Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:35.329552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.354734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.379924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.405972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.432721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.500134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.529942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:35.609203Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625542326874134:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:35.609273Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625542326874139:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:35.609274Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:35.612247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:35.621387Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625542326874141:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:35.674927Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625542326874192:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:36.500595Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625546621841763:3593], Recipient [1:7519625533736937542:2138]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:36.500635Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:36.500648Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:36.500681Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625546621841759:3590], Recipient [1:7519625533736937542:2138]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-24T21:07:36.500704Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvScheme ... ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 Trying to start YDB, gRPC: 30637, MsgBus: 25658 2025-06-24T21:08:40.393600Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519625824441114595:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:40.393756Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00462b/r3tmp/tmpEHyA8F/pdisk_1.dat 2025-06-24T21:08:40.624338Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:40.624444Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:40.626982Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:40.635329Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519625824441114571:2079] 1750799320391330 != 1750799320391333 2025-06-24T21:08:40.648107Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30637, node 3 2025-06-24T21:08:40.694208Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:40.694235Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:40.694244Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:40.694405Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25658 TClient is connected to server localhost:25658 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:41.321945Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:41.333082Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:41.402384Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:41.439083Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:41.643765Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:41.731389Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:44.841808Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625841620985399:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:44.841915Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:44.909317Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:44.990015Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:45.058671Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:45.114287Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:45.178439Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:45.248002Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:45.298435Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:45.395286Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625824441114595:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:45.395488Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:45.445652Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625845915953354:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:45.445778Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:45.448266Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625845915953359:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:45.456421Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:45.474511Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625845915953361:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:45.538698Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625845915953415:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:47.056913Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable-UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::RollbackTx2 [GOOD] Test command err: Trying to start YDB, gRPC: 14297, MsgBus: 12852 2025-06-24T21:08:38.732738Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625816892734061:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.732796Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003299/r3tmp/tmpDGEZUA/pdisk_1.dat 2025-06-24T21:08:39.160057Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625816892733872:2079] 1750799318701476 != 1750799318701479 2025-06-24T21:08:39.179800Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.208576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.208682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14297, node 1 2025-06-24T21:08:39.210372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:39.343453Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.343482Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.343490Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.343611Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.719465Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12852 TClient is connected to server localhost:12852 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.246464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.273046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.406394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.566928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.654275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:42.042106Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625834072604697:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.042201Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.428120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.463309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.504817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.541074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.573088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.643809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.723733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.822678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625834072605366:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.822740Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.822795Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625834072605371:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.826435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.840353Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625834072605373:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:42.920280Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625834072605426:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:43.720309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625816892734061:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.720399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:44.729807Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NzM3YzY0OGUtYjE1NTYzNmItMmFiZTc0YzQtMTQwMTMwZmI=, ActorId: [1:7519625842662540289:2474], ActorState: ReadyState, TraceId: 01jyhwb5hb5ske9azhqr9s90sq, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 16291, MsgBus: 12774 2025-06-24T21:08:45.637137Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625845597753569:2160];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003299/r3tmp/tmpV26iwf/pdisk_1.dat 2025-06-24T21:08:45.740051Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:08:45.790660Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:45.792634Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625845597753422:2079] 1750799325609891 != 1750799325609894 2025-06-24T21:08:45.804975Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:45.805057Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:45.806505Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16291, node 2 2025-06-24T21:08:45.915528Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:45.915549Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:45.915557Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:45.915672Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12774 TClient is connected to server localhost:12774 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:46.469475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:46.484417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:46.564012Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:46.657756Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:46.716343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:46.792911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:49.036784Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625862777624255:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.036875Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.077033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.106656Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.134909Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.162273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.194706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.235361Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.277154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.332927Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625862777624907:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.333026Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.333358Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625862777624912:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.336668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:49.347703Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625862777624914:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:49.414453Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625862777624965:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:50.635238Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625845597753569:2160];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:50.635314Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:50.917427Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=OTdmMzYzMjQtYTBiMGM5N2EtYjU4NTMyOC02ZDA0ZDg2MQ==, ActorId: [2:7519625867072592529:2472], ActorState: ReadyState, TraceId: 01jyhwbbk4a3ct07q8v8gkfgc4, Create QueryResponse for error on request, msg: >> KqpLocksTricky::TestNoLocksIssue-withSink >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3-withSink [GOOD] >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite1 [GOOD] >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite2 >> KqpSinkTx::OlapDeferredEffects [GOOD] >> KqpSinkTx::OlapExplicitTcl ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::MixEnginesOldNew [GOOD] Test command err: Trying to start YDB, gRPC: 18779, MsgBus: 9769 2025-06-24T21:08:38.706002Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625816493467441:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.706156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00323c/r3tmp/tmpJoQ82k/pdisk_1.dat 2025-06-24T21:08:39.190557Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.200252Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625816493467300:2079] 1750799318690385 != 1750799318690388 2025-06-24T21:08:39.216943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.217050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18779, node 1 2025-06-24T21:08:39.221067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:39.341866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.341889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.341897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.342020Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.707297Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9769 TClient is connected to server localhost:9769 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.152590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.190028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:40.195300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:40.356470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.510424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:40.585838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:42.068223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625833673338129:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.068372Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.425433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.502977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.539875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.574527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.612252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.659782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.701509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.770238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625833673338789:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.770387Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.770964Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625833673338794:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.775011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.786911Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625833673338796:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:42.870811Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625833673338847:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:43.707285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625816493467441:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.707374Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:45.155573Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZmI5NGFiMjktNmQxNGRmOGQtOGUxZWIyZmQtYzRkZDQ4ZDI=, ActorId: [1:7519625842263273672:2465], ActorState: ExecuteState, TraceId: 01jyhwb5qq3v5fdqrcdscszahh, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken 2025-06-24T21:08:45.176145Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZmI5NGFiMjktNmQxNGRmOGQtOGUxZWIyZmQtYzRkZDQ4ZDI=, ActorId: [1:7519625842263273672:2465], ActorState: ReadyState, TraceId: 01jyhwb5zqc2r45kq6s0agtck2, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 24797, MsgBus: 15500 2025-06-24T21:08:46.102260Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625849114367916:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:46.102341Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00323c/r3tmp/tmpGgVbAk/pdisk_1.dat 2025-06-24T21:08:46.228749Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:46.244535Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:46.244613Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:46.246869Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24797, node 2 2025-06-24T21:08:46.285595Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:46.285618Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:46.285626Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:46.285749Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15500 TClient is connected to server localhost:15500 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:46.804613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:46.815476Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:46.830999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:46.917186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:47.081890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:47.130163Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:47.152514Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:49.610312Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625861999271382:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.610411Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.671743Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.713795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.753377Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.797120Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.835657Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.887930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.938108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.031638Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625866294239340:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.031751Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.032040Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625866294239345:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.036268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:50.053880Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625866294239347:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:50.122333Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625866294239398:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:51.103100Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625849114367916:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:51.103210Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> DataShardSnapshots::UncommittedWriteRestartDuringCommitThenBulkErase [GOOD] >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink >> KqpSinkTx::InvalidateOnError [GOOD] >> KqpSinkTx::Interactive [GOOD] >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite1 [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2+withSink [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3-withSink [GOOD] Test command err: Trying to start YDB, gRPC: 3032, MsgBus: 20082 2025-06-24T21:08:38.697909Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625815639286488:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.697965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00328c/r3tmp/tmpSLyfgi/pdisk_1.dat 2025-06-24T21:08:39.145647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.145793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.160782Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.168150Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625815639286465:2079] 1750799318696793 != 1750799318696796 2025-06-24T21:08:39.180898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3032, node 1 2025-06-24T21:08:39.347298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.347330Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.347343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.347465Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.722597Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20082 TClient is connected to server localhost:20082 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.177566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.221662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.361597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.511642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.572765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:42.119916Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625832819157285:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.120044Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.463774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.506129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.542116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.572217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.610536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.685739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.737112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.807779Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625832819157945:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.807908Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.808341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625832819157950:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.811625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.822572Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625832819157952:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:42.920620Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625832819158003:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:43.703283Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625815639286488:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.703359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 12187, MsgBus: 26135 2025-06-24T21:08:46.091730Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625849395089010:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:46.115628Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00328c/r3tmp/tmpY7GNy5/pdisk_1.dat 2025-06-24T21:08:46.425198Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:46.425285Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:46.435261Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625849395088981:2079] 1750799326049282 != 1750799326049285 2025-06-24T21:08:46.460836Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:46.461251Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12187, node 2 2025-06-24T21:08:46.638356Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:46.638395Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:46.638404Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:46.638556Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26135 2025-06-24T21:08:47.119317Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26135 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:47.261136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:47.271044Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:47.286326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:47.363047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:47.552007Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:47.630926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:49.839767Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625862279992510:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.839868Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.905984Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.942552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.978653Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.021831Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.091539Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.168939Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.234294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.327542Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625866574960475:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.327627Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.327794Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625866574960480:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.331371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:50.343304Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625866574960482:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:50.433332Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625866574960533:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:51.095771Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625849395089010:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:51.095841Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:52.783073Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=Mzg3YmNiMTctMThjMjVlNWUtNWM2NTljNGQtZjFmMGVlNDg=, ActorId: [2:7519625870869928060:2464], ActorState: ExecuteState, TraceId: 01jyhwbd9xf6asqa1qyrmx7wfj, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken >> KqpLocksTricky::TestNoWrite [GOOD] >> KqpSinkLocks::DifferentKeyUpdate >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite1+withSink [GOOD] >> KqpSinkTx::OlapInvalidateOnError >> KqpTx::InvalidateOnError [GOOD] >> KqpSinkMvcc::OlapReadOnlyTxCommitsOnConcurrentWrite [GOOD] >> KqpSinkMvcc::OlapNamedStatement ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 28075, MsgBus: 4021 2025-06-24T21:08:38.705444Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625813566407093:2130];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.705494Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003267/r3tmp/tmpuUsenQ/pdisk_1.dat 2025-06-24T21:08:39.186633Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625813566407001:2079] 1750799318697790 != 1750799318697793 2025-06-24T21:08:39.213928Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.215070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.217646Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.223931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28075, node 1 2025-06-24T21:08:39.347668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.347697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.347703Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.347831Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.739454Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4021 TClient is connected to server localhost:4021 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.158814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.182122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:40.195462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:40.357735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.503391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:40.586835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:42.021025Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625830746277823:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.021172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.425378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.460184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.494013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.524164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.573768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.626460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.665702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.768787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625830746278485:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.768875Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.769369Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625830746278490:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.773955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.792074Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625830746278492:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:42.891090Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625830746278545:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:43.709430Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625813566407093:2130];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.709510Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:44.940765Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YzRiM2JlNTAtNjliMWE1YWItODFiMGQ1NzAtOTg1YTlmN2M=, ActorId: [1:7519625839336213371:2465], ActorState: ExecuteState, TraceId: 01jyhwb5ms28aze7m2yvmyk74z, Create QueryResponse for error on request, msg: Trying to star ... aded 2025-06-24T21:08:46.209269Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26024, node 2 2025-06-24T21:08:46.333124Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:46.333153Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:46.333160Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:46.333273Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18631 TClient is connected to server localhost:18631 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:46.828562Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:46.835725Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:46.850917Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:46.936502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:47.007477Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:47.150139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:47.237778Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:49.436304Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625863343752793:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.436376Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.494577Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.584458Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.621968Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.677250Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.725466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.797338Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.871496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.971427Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625863343753458:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.971515Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.971751Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625863343753463:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.975092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:49.995596Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625863343753465:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:50.090542Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625867638720812:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:50.961439Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625846163881994:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:50.961507Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:54.040415Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519625884818590375:2473], SessionActorId: [2:7519625871933688381:2473], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/EightShard`, code: 2001 . sessionActorId=[2:7519625871933688381:2473]. isRollback=0 2025-06-24T21:08:54.040666Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=ZjZjYjQzZmItYzJiODZiZjEtYjlmNGEwMTUtN2VhYjdhYzQ=, ActorId: [2:7519625871933688381:2473], ActorState: ExecuteState, TraceId: 01jyhwbec59edhcyhxg6wg3ctb, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519625884818590376:2473] from: [2:7519625884818590375:2473] 2025-06-24T21:08:54.040737Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519625884818590376:2473] TxId: 281474976715674. Ctx: { TraceId: 01jyhwbec59edhcyhxg6wg3ctb, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjZjYjQzZmItYzJiODZiZjEtYjlmNGEwMTUtN2VhYjdhYzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/EightShard`, code: 2001 } 2025-06-24T21:08:54.040908Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZjZjYjQzZmItYzJiODZiZjEtYjlmNGEwMTUtN2VhYjdhYzQ=, ActorId: [2:7519625871933688381:2473], ActorState: ExecuteState, TraceId: 01jyhwbec59edhcyhxg6wg3ctb, Create QueryResponse for error on request, msg: >> DataShardOutOfOrder::TestOutOfOrderLockLost ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::InvalidateOnError [GOOD] Test command err: Trying to start YDB, gRPC: 9614, MsgBus: 30069 2025-06-24T21:08:38.697310Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625816559383392:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.697361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00325b/r3tmp/tmpjavQH8/pdisk_1.dat 2025-06-24T21:08:39.130208Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.131242Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625816559383365:2079] 1750799318690151 != 1750799318690154 2025-06-24T21:08:39.180829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.180940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.182287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9614, node 1 2025-06-24T21:08:39.351511Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.351541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.351569Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.351728Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.714390Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30069 TClient is connected to server localhost:30069 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.182261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.196163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:41.914933Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625829444285883:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.914937Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625829444285905:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.915030Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.918689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:41.929041Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625829444285910:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:41.999187Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625829444285961:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:42.437969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.559459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:43.562694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:43.763006Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625816559383392:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:44.328152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:45.794144Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519625846624163459:2929], SessionActorId: [1:7519625842329195862:2929], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/KV`, code: 2001 . sessionActorId=[1:7519625842329195862:2929]. isRollback=0 2025-06-24T21:08:45.799793Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=ZjNlNmJiMDYtYTY1ODZlMDEtZDJiMTI3OS00Zjg2YWRlMQ==, ActorId: [1:7519625842329195862:2929], ActorState: ExecuteState, TraceId: 01jyhwb6hs1mddz0x4aj95b4ea, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519625846624163460:2929] from: [1:7519625846624163459:2929] 2025-06-24T21:08:45.799884Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519625846624163460:2929] TxId: 281474976710667. Ctx: { TraceId: 01jyhwb6hs1mddz0x4aj95b4ea, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjNlNmJiMDYtYTY1ODZlMDEtZDJiMTI3OS00Zjg2YWRlMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/KV`, code: 2001 } 2025-06-24T21:08:45.800079Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZjNlNmJiMDYtYTY1ODZlMDEtZDJiMTI3OS00Zjg2YWRlMQ==, ActorId: [1:7519625842329195862:2929], ActorState: ExecuteState, TraceId: 01jyhwb6hs1mddz0x4aj95b4ea, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 16647, MsgBus: 1327 2025-06-24T21:08:46.964616Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625850507434348:2148];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00325b/r3tmp/tmpBMqPST/pdisk_1.dat 2025-06-24T21:08:47.075672Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:08:47.170427Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:47.171641Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625850507434227:2079] 1750799326943762 != 1750799326943765 2025-06-24T21:08:47.197707Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:47.197795Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:47.202285Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16647, node 2 2025-06-24T21:08:47.312880Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:47.312906Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:47.312915Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:47.313056Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1327 TClient is connected to server localhost:1327 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:47.865856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:47.961449Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:50.281114Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625867687304020:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.281196Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.281379Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625867687304055:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.285529Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:50.297642Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625867687304057:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:50.371237Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625867687304108:2330] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:50.427279Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.487206Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:51.661664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:52.524797Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625850507434348:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:52.646366Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Constraint violated. Table: `/Root/KV`., code: 2012 2025-06-24T21:08:53.223446Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-24T21:08:53.223767Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037889 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:08:53.223948Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037889 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:08:53.224195Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [2:7519625880572214060:2929], Table: `/Root/KV` ([72057594046644480:7:1]), SessionActorId: [2:7519625880572214035:2929]Got CONSTRAINT VIOLATION for table `/Root/KV`. ShardID=72075186224037889, Sink=[2:7519625880572214060:2929].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:08:53.224291Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519625880572214053:2929], SessionActorId: [2:7519625880572214035:2929], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/KV`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[2:7519625880572214035:2929]. isRollback=0 2025-06-24T21:08:53.224528Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=M2YzMjJhMjUtNzVmY2FhM2QtM2QxYjAzZjEtNjAyYjE1OQ==, ActorId: [2:7519625880572214035:2929], ActorState: ExecuteState, TraceId: 01jyhwbdrmfqea4c834vy75k3k, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [2:7519625880572214054:2929] from: [2:7519625880572214053:2929] 2025-06-24T21:08:53.224604Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519625880572214054:2929] TxId: 281474976710664. Ctx: { TraceId: 01jyhwbdrmfqea4c834vy75k3k, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2YzMjJhMjUtNzVmY2FhM2QtM2QxYjAzZjEtNjAyYjE1OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/KV`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:08:53.224782Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=M2YzMjJhMjUtNzVmY2FhM2QtM2QxYjAzZjEtNjAyYjE1OQ==, ActorId: [2:7519625880572214035:2929], ActorState: ExecuteState, TraceId: 01jyhwbdrmfqea4c834vy75k3k, Create QueryResponse for error on request, msg:
: Error: Conflict with existing key., code: 2012 2025-06-24T21:08:53.298634Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=M2YzMjJhMjUtNzVmY2FhM2QtM2QxYjAzZjEtNjAyYjE1OQ==, ActorId: [2:7519625880572214035:2929], ActorState: ExecuteState, TraceId: 01jyhwbdvkcm7xbyhvrx5cjej9, Create QueryResponse for error on request, msg:
: Error: Transaction not found: 01jyhwbdr99yg22jtx4746g3jv, code: 2015 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::Interactive [GOOD] Test command err: Trying to start YDB, gRPC: 7900, MsgBus: 2184 2025-06-24T21:08:38.711734Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625815067217291:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.712195Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003301/r3tmp/tmpHfA4l2/pdisk_1.dat 2025-06-24T21:08:39.175894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.175997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.178843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:39.199250Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625815067217185:2079] 1750799318691748 != 1750799318691751 2025-06-24T21:08:39.204777Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7900, node 1 2025-06-24T21:08:39.345546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.345567Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.345574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.345678Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.719270Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2184 TClient is connected to server localhost:2184 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.230065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:41.971905Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625827952119718:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.971916Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625827952119729:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.971991Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.975276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:41.985866Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625827952119732:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:42.073295Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625832247087079:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:42.438391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.594040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:43.615248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:43.699554Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625815067217291:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.699663Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:45.418591Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=OTkyY2E4ZjYtNjEwNDg5NzYtOWJkMGRjMjMtOWVjZDc1OTM=, ActorId: [1:7519625840837029573:2930], ActorState: ReadyState, TraceId: 01jyhwb670bfzam5ea2yhmdvbz, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 61645, MsgBus: 17834 2025-06-24T21:08:46.709524Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625847981902718:2242];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003301/r3tmp/tmpT8GA6O/pdisk_1.dat 2025-06-24T21:08:46.762247Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:08:46.852911Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:46.854717Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:46.854787Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:46.855442Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625847981902488:2079] 1750799326664999 != 1750799326665002 TServer::EnableGrpc on GrpcPort 61645, node 2 2025-06-24T21:08:46.859866Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:46.895683Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:46.895707Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:46.895715Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:46.895825Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17834 TClient is connected to server localhost:17834 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:47.352512Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:47.358640Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:47.699299Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:50.172139Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625865161772291:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.172244Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.172566Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625865161772316:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.176627Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:50.190456Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625865161772318:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:08:50.277424Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625865161772370:2331] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:50.341135Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.388178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:51.510948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:52.342249Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625847981902718:2242];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:52.472525Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSnapshotRead::ReadOnlyTxWithIndexCommitsOnConcurrentWrite+withSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite1 [GOOD] Test command err: Trying to start YDB, gRPC: 29911, MsgBus: 6019 2025-06-24T21:08:38.707225Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625815612191164:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.707275Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00323f/r3tmp/tmp4pxGIJ/pdisk_1.dat 2025-06-24T21:08:39.159572Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625815612191104:2079] 1750799318690190 != 1750799318690193 2025-06-24T21:08:39.172943Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.174441Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.174639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.179860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29911, node 1 2025-06-24T21:08:39.344198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.344220Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.344224Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.344336Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.718302Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6019 TClient is connected to server localhost:6019 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.150624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.168184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:41.806310Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625828497093635:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.806521Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.807001Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625828497093647:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.813799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:41.829343Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625828497093649:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:41.922202Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625828497093700:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:42.439370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.581341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:43.687214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:43.708213Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625815612191164:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.708297Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8732, MsgBus: 28809 2025-06-24T21:08:47.065200Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625855053486050:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:47.105791Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00323f/r3tmp/tmpPhrsPd/pdisk_1.dat 2025-06-24T21:08:47.234893Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:47.234983Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:47.237598Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:47.250126Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8732, node 2 2025-06-24T21:08:47.323737Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:47.323762Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:47.323770Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:47.323901Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28809 TClient is connected to server localhost:28809 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:47.905415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:48.064879Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:50.594774Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625867938388401:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.594789Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625867938388412:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.594849Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.598933Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:50.611909Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625867938388415:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:08:50.694090Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625867938388466:2330] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:50.745153Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.848713Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:51.951524Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:52.072352Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625855053486050:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:52.078108Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:53.755687Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715666; 2025-06-24T21:08:53.761946Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [2:7519625880823298525:2930], Table: `/Root/KV` ([72057594046644480:7:1]), SessionActorId: [2:7519625880823298459:2930]Got LOCKS BROKEN for table `/Root/KV`. ShardID=72075186224037889, Sink=[2:7519625880823298525:2930].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T21:08:53.762533Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519625880823298518:2930], SessionActorId: [2:7519625880823298459:2930], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/KV`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[2:7519625880823298459:2930]. isRollback=0 2025-06-24T21:08:53.762788Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=NzNjYWZiOC1kM2U4NDk1NC05MjRmZGIyLWU5OWZjZDY1, ActorId: [2:7519625880823298459:2930], ActorState: ExecuteState, TraceId: 01jyhwbe9eenan2fca8jr5c837, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519625880823298519:2930] from: [2:7519625880823298518:2930] 2025-06-24T21:08:53.762874Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519625880823298519:2930] TxId: 281474976715666. Ctx: { TraceId: 01jyhwbe9eenan2fca8jr5c837, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzNjYWZiOC1kM2U4NDk1NC05MjRmZGIyLWU5OWZjZDY1, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/KV`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T21:08:53.763066Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NzNjYWZiOC1kM2U4NDk1NC05MjRmZGIyLWU5OWZjZDY1, ActorId: [2:7519625880823298459:2930], ActorState: ExecuteState, TraceId: 01jyhwbe9eenan2fca8jr5c837, Create QueryResponse for error on request, msg: >> KqpTx::CommitRequired [GOOD] >> KqpTx::CommitPrepared >> DataShardOutOfOrder::TestSnapshotReadPriority >> KqpExtractPredicateLookup::OverflowLookup >> KqpTx::RollbackRoTx [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::InvalidateOnError [GOOD] Test command err: Trying to start YDB, gRPC: 23040, MsgBus: 2722 2025-06-24T21:08:42.640259Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625829905098475:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:42.646074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003209/r3tmp/tmpc1wUBa/pdisk_1.dat 2025-06-24T21:08:43.074427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:43.074544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:43.079601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:43.127004Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:43.127856Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625829905098448:2079] 1750799322626371 != 1750799322626374 TServer::EnableGrpc on GrpcPort 23040, node 1 2025-06-24T21:08:43.203668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:43.203693Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:43.203700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:43.203823Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2722 TClient is connected to server localhost:2722 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:08:43.683067Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:43.872055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:43.886930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:43.914127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:44.152247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:44.452886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:44.563346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:46.365211Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625847084969279:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:46.365344Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:46.749494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:46.820848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:46.856016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:46.896131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:46.932933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:46.980103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:47.068628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:47.149357Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625851379937232:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:47.149445Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:47.149934Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625851379937237:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:47.154330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:47.169310Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625851379937239:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:47.255265Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625851379937290:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:47.642543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625829905098475:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:47.642690Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17309, MsgBus: 8017 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003209/r3tmp/tmpWmgSBE/pdisk_1.dat 2025-06-24T21:08:49.679372Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrat ... athStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:50.310440Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:50.315605Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:50.328761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:50.387585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:50.541274Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:50.550482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:50.645556Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:52.986814Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625872987477495:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:52.986902Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.054424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.096717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.171112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.217955Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.264280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.345171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.433590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.575506Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625877282445453:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.575646Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.579886Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625877282445458:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.584192Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:53.600166Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625877282445460:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:53.674541Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625877282445511:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:54.940053Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-24T21:08:54.952931Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037911 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:08:54.953150Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037911 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:08:54.953696Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [2:7519625881577413105:2473], Table: `/Root/KeyValue` ([72057594046644480:6:1]), SessionActorId: [2:7519625881577413079:2473]Got CONSTRAINT VIOLATION for table `/Root/KeyValue`. ShardID=72075186224037911, Sink=[2:7519625881577413105:2473].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:08:54.954335Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519625881577413098:2473], SessionActorId: [2:7519625881577413079:2473], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/KeyValue`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[2:7519625881577413079:2473]. isRollback=0 2025-06-24T21:08:54.954599Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=NzBiMTM5Ny03ZTZhMjkwYy1lMWRjNTJmMC02ZmMwYjNlOQ==, ActorId: [2:7519625881577413079:2473], ActorState: ExecuteState, TraceId: 01jyhwbfedc94fs9dfzx90ghc8, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [2:7519625881577413099:2473] from: [2:7519625881577413098:2473] 2025-06-24T21:08:54.954689Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519625881577413099:2473] TxId: 281474976715672. Ctx: { TraceId: 01jyhwbfedc94fs9dfzx90ghc8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzBiMTM5Ny03ZTZhMjkwYy1lMWRjNTJmMC02ZmMwYjNlOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/KeyValue`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:08:54.954880Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NzBiMTM5Ny03ZTZhMjkwYy1lMWRjNTJmMC02ZmMwYjNlOQ==, ActorId: [2:7519625881577413079:2473], ActorState: ExecuteState, TraceId: 01jyhwbfedc94fs9dfzx90ghc8, Create QueryResponse for error on request, msg: 2025-06-24T21:08:55.060865Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NzBiMTM5Ny03ZTZhMjkwYy1lMWRjNTJmMC02ZmMwYjNlOQ==, ActorId: [2:7519625881577413079:2473], ActorState: ExecuteState, TraceId: 01jyhwbfj015pdd30tv9t90d2c, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite1+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 15661, MsgBus: 32444 2025-06-24T21:08:38.702150Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625813409543677:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.702890Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003239/r3tmp/tmp9OmtuY/pdisk_1.dat 2025-06-24T21:08:39.125443Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.135244Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625813409543579:2079] 1750799318693673 != 1750799318693676 TServer::EnableGrpc on GrpcPort 15661, node 1 2025-06-24T21:08:39.193678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.193797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.194800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:39.343769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.343794Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.343806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.343947Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.707326Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32444 TClient is connected to server localhost:32444 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.132958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.173783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.362553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.510416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.603029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:41.976874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625826294447124:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.976988Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.424534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.465956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.495884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.567165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.615438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.685733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.734040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.832217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625830589415087:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.832327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.832901Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625830589415092:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.837721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.848533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:08:42.850346Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625830589415094:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:42.906992Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625830589415145:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:43.703476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625813409543677:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.703536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:44.342118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... Client is connected to server localhost:31001 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:50.073438Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:50.080082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:50.094195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:50.163742Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:50.238001Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:50.361731Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:50.446411Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:52.720436Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625873005391200:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:52.720511Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:52.806623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:52.885852Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:52.924047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:52.960282Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:52.993231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.081995Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.166580Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.271780Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625877300359164:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.271875Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.272138Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625877300359169:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.277443Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:53.294828Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625877300359171:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:53.374186Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625877300359224:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:54.183392Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625860120487699:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:54.183460Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:55.002815Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715674; 2025-06-24T21:08:55.014837Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [2:7519625885890294157:2473], Table: `/Root/TwoShard` ([72057594046644480:2:1]), SessionActorId: [2:7519625881595326791:2473]Got LOCKS BROKEN for table `/Root/TwoShard`. ShardID=72075186224037888, Sink=[2:7519625885890294157:2473].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T21:08:55.015628Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519625881595326854:2473], SessionActorId: [2:7519625881595326791:2473], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TwoShard`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[2:7519625881595326791:2473]. isRollback=0 2025-06-24T21:08:55.015906Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=Njc1ZGY5MGEtMzNiNDY4ZjMtNGQ0ODE5ZjgtYWMzNzA5YjI=, ActorId: [2:7519625881595326791:2473], ActorState: ExecuteState, TraceId: 01jyhwbfgq2ncx89tr0fjterw9, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519625881595326855:2473] from: [2:7519625881595326854:2473] 2025-06-24T21:08:55.015998Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519625881595326855:2473] TxId: 281474976715674. Ctx: { TraceId: 01jyhwbfgq2ncx89tr0fjterw9, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Njc1ZGY5MGEtMzNiNDY4ZjMtNGQ0ODE5ZjgtYWMzNzA5YjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/TwoShard`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T21:08:55.016191Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=Njc1ZGY5MGEtMzNiNDY4ZjMtNGQ0ODE5ZjgtYWMzNzA5YjI=, ActorId: [2:7519625881595326791:2473], ActorState: ExecuteState, TraceId: 01jyhwbfgq2ncx89tr0fjterw9, Create QueryResponse for error on request, msg: >> KqpSinkTx::SnapshotROInteractive1 [GOOD] >> KqpSinkTx::OlapSnapshotROInteractive1 [GOOD] >> KqpSinkTx::OlapSnapshotROInteractive2 >> TestYmqHttpProxy::TestGetQueueUrlWithIAM [GOOD] >> TestKinesisHttpProxy::TestConsumersEmptyNames [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadOnlyTxWithIndexCommitsOnConcurrentWrite+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 23800, MsgBus: 20599 2025-06-24T21:08:38.712357Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625814629955144:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.712567Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00334c/r3tmp/tmpTEqrSz/pdisk_1.dat 2025-06-24T21:08:39.208820Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.211015Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625814629955029:2079] 1750799318690206 != 1750799318690209 2025-06-24T21:08:39.225335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.225453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.231796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23800, node 1 2025-06-24T21:08:39.359228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.359254Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.359262Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.359371Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.729137Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20599 TClient is connected to server localhost:20599 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.250000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.272022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.454128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.621866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.718289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:42.163782Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625831809825836:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.163876Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.508666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.539605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.572081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.614183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.664849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.740345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.782548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.839472Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625831809826499:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.839572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.839620Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625831809826504:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.843964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.854748Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625831809826506:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:42.919059Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625831809826557:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:43.711345Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625814629955144:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.725189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 22943, MsgBus: 14238 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00334c/r3tmp/tmpgOD6jO/pdisk_1.dat 2025-06-24T21:08:46.427758Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:08:46.529521Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:46.530497Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625848276287406:2079] 1750799326217709 != 1750799326217712 2025-06-24T21:08:46.541884Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:46.541965Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:46.545674Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22943, node 2 2025-06-24T21:08:46.615639Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:46.615664Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:46.615672Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:46.615786Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14238 TClient is connected to server localhost:14238 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:08:47.219286Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:08:47.228659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:47.242715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:47.356617Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:47.531518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:47.609766Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:49.888175Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625861161190917:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.888255Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.944956Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.978876Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.013568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.050125Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.085379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.144769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.200804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.287875Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625865456158879:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.287980Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.288336Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625865456158884:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:50.292502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:50.305880Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625865456158886:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:50.367900Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625865456158937:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:51.522084Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:51.574920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:51.667459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpSinkLocks::UncommittedRead [GOOD] >> KqpSinkLocks::VisibleUncommittedRows |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TestKinesisHttpProxy::CreateStreamWithDifferentRetentions [GOOD] >> KqpTx::SnapshotRO [GOOD] >> KqpTx::SnapshotROInteractive1 >> DataShardTxOrder::RandomPoints_DelayRS_Reboot >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites+EvWrite >> TestYmqHttpProxy::TestGetQueueAttributes >> TestKinesisHttpProxy::TestRequestWithIAM [GOOD] >> TGroupMapperTest::NonUniformCluster >> TestKinesisHttpProxy::PutRecordsWithIncorrectHashKey [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithEmptyName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::RollbackRoTx [GOOD] Test command err: Trying to start YDB, gRPC: 18477, MsgBus: 19587 2025-06-24T21:08:38.720989Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625816648851065:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.721064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032f5/r3tmp/tmpHanTha/pdisk_1.dat 2025-06-24T21:08:39.161837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.161921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.165752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:39.199813Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18477, node 1 2025-06-24T21:08:39.342088Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.342122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.342130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.342269Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19587 2025-06-24T21:08:39.728095Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19587 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.136996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.159376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:40.185796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.346002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.510590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:40.602995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:41.976246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625829533754541:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.976326Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.425604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.461354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.494596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.523955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.568580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.609485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.654093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.732263Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625833828722493:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.732359Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.732890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625833828722498:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.735867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.744403Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625833828722500:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:42.815205Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625833828722553:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:43.723311Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625816648851065:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.723380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 22145, MsgBus: 24559 2025-06-24T21:08:51.025550Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625871528842843:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:51.035685Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032f5/r3tmp/tmpVnNcZT/pdisk_1.dat 2025-06-24T21:08:51.297667Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:51.312792Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625871528842791:2079] 1750799331020633 != 1750799331020636 2025-06-24T21:08:51.314046Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:51.314132Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:51.316119Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22145, node 2 2025-06-24T21:08:51.431687Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:51.431711Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:51.431719Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:51.431840Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24559 TClient is connected to server localhost:24559 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:52.001790Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:52.007654Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:52.015118Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:52.093700Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:52.101028Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:52.285788Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:52.386291Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:54.468198Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625884413746313:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.468299Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.525616Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.589297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.626555Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.701108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.777037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.826205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.873115Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.941194Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625884413746975:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.941271Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.941429Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625884413746980:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.945134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:54.956608Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625884413746982:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:55.015167Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625888708714329:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:56.030063Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625871528842843:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:56.030732Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:56.238184Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=OWI3NWY5MGUtODRjYTZiNTctZWEzMTEzMGUtZGM4ZDNhNWQ=, ActorId: [2:7519625893003681892:2472], ActorState: ReadyState, TraceId: 01jyhwbgs27cm3jvg2vem1wsh9, Create QueryResponse for error on request, msg: >> TestKinesisHttpProxy::TestListStreamConsumers ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::SnapshotROInteractive1 [GOOD] Test command err: Trying to start YDB, gRPC: 13864, MsgBus: 17787 2025-06-24T21:08:41.248647Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625825634037114:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:41.248722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003229/r3tmp/tmpRU6txY/pdisk_1.dat 2025-06-24T21:08:41.541684Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:41.543336Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625825634037089:2079] 1750799321247444 != 1750799321247447 TServer::EnableGrpc on GrpcPort 13864, node 1 2025-06-24T21:08:41.626272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:41.626306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:41.626314Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:41.626436Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:41.632194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:41.632362Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:41.634341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17787 TClient is connected to server localhost:17787 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:42.129243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:42.264303Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:44.111922Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625838518939616:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:44.112039Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:44.112329Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625838518939628:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:44.116642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:44.140475Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625838518939630:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:08:44.215322Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625838518939681:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:44.624101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:44.798623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:46.115831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:46.308609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625825634037114:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:46.335611Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:48.345283Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NWFiNTVlYmQtMWE4ZWU0MS01NWY5ZmI4YS0yNjdjZmNiOQ==, ActorId: [1:7519625851403849547:2930], ActorState: ExecuteState, TraceId: 01jyhwb90k3e8eembdrwya6h2v, Create QueryResponse for error on request, msg:
:3:29: Error: Operation 'Upsert' can't be performed in read only transaction, code: 2008 Trying to start YDB, gRPC: 10127, MsgBus: 28869 2025-06-24T21:08:49.349654Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625860112836746:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:49.349750Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003229/r3tmp/tmpjnso74/pdisk_1.dat 2025-06-24T21:08:49.495120Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:49.496701Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625860112836725:2079] 1750799329342697 != 1750799329342700 2025-06-24T21:08:49.513597Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:49.513684Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:49.516816Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10127, node 2 2025-06-24T21:08:49.583686Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:49.583712Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:49.583721Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:49.583850Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28869 TClient is connected to server localhost:28869 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:50.101324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:50.109100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:50.367279Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:52.928785Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625872997739229:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:52.928855Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:52.938138Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625872997739257:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:52.942772Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:52.957173Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625872997739259:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:53.054266Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625877292706606:2331] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:53.154983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.208294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.333340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.349852Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625860112836746:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:54.349947Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TestKinesisHttpProxy::DoubleCreateStream [GOOD] >> TestKinesisHttpProxy::CreateDeleteStream >> TestKinesisHttpProxy::TestRequestNoAuthorization >> TestKinesisHttpProxy::ListShards >> DataShardTxOrder::ImmediateBetweenOnline_Init >> TestYmqHttpProxy::TestCreateQueueWithAllAttributes >> DataShardTxOrder::DelayData >> TestKinesisHttpProxy::GoodRequestGetRecords >> TestYmqHttpProxy::TestReceiveMessageWithAttributes [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> KqpLocks::Invalidate [GOOD] >> KqpLocks::InvalidateOnCommit >> KqpSinkMvcc::OltpMultiSinks [GOOD] >> KqpSinkMvcc::SnapshotExpiration [GOOD] >> KqpSinkTx::DeferredEffects >> TestYmqHttpProxy::TestReceiveMessageWithAttemptId >> DataShardSnapshots::BrokenLockChangesDontLeak [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> KqpNewEngine::SimpleUpsertSelect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OltpMultiSinks [GOOD] Test command err: Trying to start YDB, gRPC: 17361, MsgBus: 14221 2025-06-24T21:08:44.486321Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625838474326880:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:44.491856Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031fe/r3tmp/tmpXEzGU9/pdisk_1.dat 2025-06-24T21:08:45.099684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:45.099781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:45.108273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:45.115139Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:45.116534Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625838474326781:2079] 1750799324421980 != 1750799324421983 TServer::EnableGrpc on GrpcPort 17361, node 1 2025-06-24T21:08:45.399813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:45.399834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:45.399841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:45.399965Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:45.491258Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14221 TClient is connected to server localhost:14221 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:46.225701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:46.246718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:48.296344Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625855654196613:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:48.296560Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:48.297119Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625855654196625:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:48.302037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:48.316348Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625855654196627:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:48.419043Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625855654196678:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:48.705832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:48.848957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.525921Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625838474326880:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:49.529934Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:49.767917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 25649, MsgBus: 14133 2025-06-24T21:08:52.486726Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625875574504071:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:52.487284Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031fe/r3tmp/tmpyAoT4H/pdisk_1.dat 2025-06-24T21:08:52.593894Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:52.599291Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625875574504015:2079] 1750799332438306 != 1750799332438309 TServer::EnableGrpc on GrpcPort 25649, node 2 2025-06-24T21:08:52.644368Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:52.644457Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:52.647033Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:52.783629Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:52.783653Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:52.783663Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:52.783771Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14133 TClient is connected to server localhost:14133 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:53.328735Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:53.340454Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:53.494958Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:55.934157Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625888459406546:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:55.934226Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625888459406528:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:55.934306Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:55.940429Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:55.950515Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625888459406550:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:08:56.048402Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625892754373897:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:56.102935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:56.146237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:57.221024Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:58.076841Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625875574504071:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:58.185864Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> DataShardTxOrder::RandomDotRanges_DelayRS >> KqpVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity [GOOD] >> KqpIndexes::UpdateOnReadColumns [GOOD] >> DataShardSnapshots::ShardRestartAfterDropTableAndAbort [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::BrokenLockChangesDontLeak [GOOD] Test command err: 2025-06-24T21:07:07.402079Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:07:07.402395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:07:07.402516Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047bd/r3tmp/tmp6DNtBY/pdisk_1.dat 2025-06-24T21:07:07.766086Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:07:07.774471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:07.808816Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:07.814872Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799225356341 != 1750799225356345 2025-06-24T21:07:07.861574Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:07:07.863502Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:07:07.863723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:07.863806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:07.876147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:07.955329Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:07:07.955393Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:07:07.956266Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:07:08.055071Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:07:08.055138Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:07:08.055599Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:07:08.055662Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:07:08.055940Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:07:08.056107Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:07:08.056214Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:07:08.057385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:08.057762Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:07:08.058155Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:07:08.058196Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:07:08.085900Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:07:08.086898Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:07:08.087349Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:07:08.087593Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:07:08.121758Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:07:08.122324Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:07:08.122416Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:07:08.124362Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:07:08.124427Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:07:08.124489Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:07:08.126082Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:07:08.126227Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:07:08.126307Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:07:08.126691Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:07:08.165861Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:07:08.166003Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:07:08.166092Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:07:08.166116Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:07:08.166139Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:07:08.166163Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:07:08.166308Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.166343Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.166587Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:07:08.166653Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:07:08.166688Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:07:08.166721Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:07:08.166760Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:07:08.166783Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:07:08.166807Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:07:08.166828Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:07:08.166855Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:07:08.166944Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.166968Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.167002Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:07:08.167324Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:07:08.167360Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:07:08.167439Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:07:08.167591Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:07:08.167638Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:07:08.167700Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:07:08.167738Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... X_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-24T21:08:59.629938Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:08:59.630186Z node 16 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-24T21:08:59.630269Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-06-24T21:08:59.630317Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-24T21:08:59.630395Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:08:59.630480Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:08:59.630565Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-24T21:08:59.630598Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:08:59.630648Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-24T21:08:59.630748Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-24T21:08:59.630824Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:08:59.630934Z node 16 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 5 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_LOCKS_BROKEN 2025-06-24T21:08:59.631410Z node 16 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-24T21:08:59.631551Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:08:59.631963Z node 16 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [16:865:2640], Table: `/Root/table` ([72057594046644480:2:1]), SessionActorId: [16:800:2640]Got LOCKS BROKEN for table `/Root/table`. ShardID=72075186224037888, Sink=[16:865:2640].{
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } 2025-06-24T21:08:59.632229Z node 16 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [16:858:2640], SessionActorId: [16:800:2640], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table`., code: 2001
: Error: Operation is aborting because it cannot acquire locks, code: 2001 . sessionActorId=[16:800:2640]. isRollback=0 2025-06-24T21:08:59.632859Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=16&id=NjRjYTAzYjQtYjFhMDQxMTQtOGFiOGQ4YzktNGJhNTNlYmY=, ActorId: [16:800:2640], ActorState: ExecuteState, TraceId: 01jyhwbks75xbnspn7es6ksaqq, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [16:859:2640] from: [16:858:2640] 2025-06-24T21:08:59.633114Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [16:858:2640], Recipient [16:650:2540]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-24T21:08:59.633153Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-24T21:08:59.633373Z node 16 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [16:859:2640] TxId: 281474976715663. Ctx: { TraceId: 01jyhwbks75xbnspn7es6ksaqq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=NjRjYTAzYjQtYjFhMDQxMTQtOGFiOGQ4YzktNGJhNTNlYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table`., code: 2001 subissue: {
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } } 2025-06-24T21:08:59.633691Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [16:650:2540], Recipient [16:650:2540]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T21:08:59.633735Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T21:08:59.633828Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-24T21:08:59.633980Z node 16 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-24T21:08:59.634142Z node 16 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-24T21:08:59.634256Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-06-24T21:08:59.634318Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:08:59.634353Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-06-24T21:08:59.634387Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:08:59.634418Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:08:59.634457Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v400/0 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v401/0 ImmediateWriteEdgeReplied# v401/0 2025-06-24T21:08:59.634517Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-24T21:08:59.634551Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:08:59.634582Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:08:59.634609Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-06-24T21:08:59.634636Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-06-24T21:08:59.634669Z node 16 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-24T21:08:59.634799Z node 16 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 2025-06-24T21:08:59.634896Z node 16 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:6] at 72075186224037888 2025-06-24T21:08:59.634997Z node 16 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-24T21:08:59.635093Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:08:59.635138Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-06-24T21:08:59.635409Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-24T21:08:59.635497Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:08:59.635540Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-06-24T21:08:59.635572Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-24T21:08:59.635602Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:08:59.635635Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:08:59.635682Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:08:59.635708Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:08:59.635741Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-24T21:08:59.635808Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-24T21:08:59.635844Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:08:59.635883Z node 16 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-24T21:08:59.635968Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:08:59.636231Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=16&id=NjRjYTAzYjQtYjFhMDQxMTQtOGFiOGQ4YzktNGJhNTNlYmY=, ActorId: [16:800:2640], ActorState: ExecuteState, TraceId: 01jyhwbks75xbnspn7es6ksaqq, Create QueryResponse for error on request, msg: 2025-06-24T21:08:59.637827Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [16:61:2108], Recipient [16:650:2540]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715661 LockNode: 16 Status: STATUS_NOT_FOUND 2025-06-24T21:08:59.702283Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [16:872:2690], Recipient [16:650:2540]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:59.702447Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:59.702555Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [16:871:2689], serverId# [16:872:2690], sessionId# [0:0:0] 2025-06-24T21:08:59.702854Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553224, Sender [16:552:2478], Recipient [16:650:2540]: NKikimr::TEvDataShard::TEvGetOpenTxs >> KqpSnapshotRead::TestSnapshotExpiration-withSink [GOOD] >> KqpTx::BeginTransactionBadMode >> DataShardOutOfOrder::TestSnapshotReadAfterStuckRW >> KqpTx::CommitPrepared [GOOD] >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink [GOOD] >> KqpSinkLocks::DifferentKeyUpdate [GOOD] >> KqpSinkLocks::DifferentKeyUpdateOlap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 23256, MsgBus: 8403 2025-06-24T21:07:43.069863Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625577308237485:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:43.070044Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045fd/r3tmp/tmpMQ0olc/pdisk_1.dat 2025-06-24T21:07:43.336879Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625577308237460:2079] 1750799263068670 != 1750799263068673 2025-06-24T21:07:43.363472Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23256, node 1 2025-06-24T21:07:43.439463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:43.439605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:43.443472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:43.459871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:43.459895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:43.459902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:43.460038Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8403 TClient is connected to server localhost:8403 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:43.972520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:43.996548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.081479Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:44.124362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:44.271256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:44.363383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.929870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625585898173686:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.930004Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:46.273695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.301597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.332321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.364914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.396480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.467971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.540175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:46.596724Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625590193141649:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:46.596797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:46.596811Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625590193141654:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:46.602676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:46.617767Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625590193141656:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:46.722785Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625590193141707:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:47.712199Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625594488109277:3600], Recipient [1:7519625577308237835:2177]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:47.712236Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:47.712249Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:47.712283Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625594488109273:3597], Recipient [1:7519625577308237835:2177]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-24T21:07:47.712329Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSche ... RACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625710817874943:2154]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:00.619428Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:00.619445Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 9 2025-06-24T21:09:00.619507Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 9 2025-06-24T21:09:00.619528Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 9 2025-06-24T21:09:00.619595Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:17 data size 0 row count 0 2025-06-24T21:09:00.619664Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037904 maps to shardIdx: 72057594046644480:17 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:00.619681Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-06-24T21:09:00.619748Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:00.619792Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037904 2025-06-24T21:09:00.619830Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:23 data size 0 row count 0 2025-06-24T21:09:00.619868Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037910 maps to shardIdx: 72057594046644480:23 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:00.619878Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037910, followerId 0 2025-06-24T21:09:00.619909Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:23 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:00.619922Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037910 2025-06-24T21:09:00.619941Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:20 data size 0 row count 0 2025-06-24T21:09:00.619977Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037907 maps to shardIdx: 72057594046644480:20 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:00.619988Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037907, followerId 0 2025-06-24T21:09:00.620018Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:20 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:00.620032Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037907 2025-06-24T21:09:00.620053Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:14 data size 0 row count 0 2025-06-24T21:09:00.620090Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037901 maps to shardIdx: 72057594046644480:14 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:00.620103Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037901, followerId 0 2025-06-24T21:09:00.620133Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:14 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:00.620146Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037901 2025-06-24T21:09:00.620167Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-06-24T21:09:00.620197Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:00.620207Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-06-24T21:09:00.620237Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:00.620249Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037905 2025-06-24T21:09:00.620267Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:21 data size 0 row count 0 2025-06-24T21:09:00.620300Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037908 maps to shardIdx: 72057594046644480:21 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:00.620310Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037908, followerId 0 2025-06-24T21:09:00.620340Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:21 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:00.620354Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037908 2025-06-24T21:09:00.620373Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:22 data size 0 row count 0 2025-06-24T21:09:00.620403Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037909 maps to shardIdx: 72057594046644480:22 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:00.620411Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037909, followerId 0 2025-06-24T21:09:00.620440Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:22 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:00.620453Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037909 2025-06-24T21:09:00.620470Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:19 data size 0 row count 0 2025-06-24T21:09:00.620501Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:00.620510Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-06-24T21:09:00.620538Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:00.620551Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037906 2025-06-24T21:09:00.620569Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-24T21:09:00.620598Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:00.620606Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-24T21:09:00.620634Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:00.620644Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-24T21:09:00.620705Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:00.620861Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625710817874943:2154]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:00.620881Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:00.620892Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> TGroupMapperTest::MakeDisksNonoperational [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpdateOnReadColumns [GOOD] Test command err: Trying to start YDB, gRPC: 10108, MsgBus: 5935 2025-06-24T21:07:48.029634Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625598587776959:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:48.029709Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045e5/r3tmp/tmpvgRP1V/pdisk_1.dat 2025-06-24T21:07:48.331307Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:48.331719Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625598587776936:2079] 1750799268028274 != 1750799268028277 TServer::EnableGrpc on GrpcPort 10108, node 1 2025-06-24T21:07:48.418736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:48.418817Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:48.420124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:48.447905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:48.447940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:48.447952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:48.448092Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5935 TClient is connected to server localhost:5935 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:49.039719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:49.041967Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:07:49.080803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:49.227629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:49.370339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:49.429006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.075612Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625611472680461:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:51.075719Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:51.457447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.495664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.538386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.571943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.610142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.655353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.697016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.792728Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625611472681123:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:51.792768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625611472681128:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:51.792790Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:51.797376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:51.809909Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625611472681130:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:51.890397Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625611472681181:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:52.967921Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625615767648753:3597], Recipient [1:7519625598587777255:2141]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:52.967962Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:52.967976Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:52.968037Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625615767648749:3594], Recipient [1:7519625598587777255:2141]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-24T21:07:52.968059Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSche ... -06-24T21:08:49.850974Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:49.851095Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:49.858436Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28766, node 7 2025-06-24T21:08:49.919924Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:49.919958Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:49.919967Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:49.920146Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16674 2025-06-24T21:08:50.636119Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16674 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:50.837436Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:50.847166Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:50.870167Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:50.963457Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:51.271021Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:51.404416Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:54.617875Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519625862992649538:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:54.617967Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:54.989816Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519625884467487636:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.989940Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:55.075802Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:55.129712Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:55.212463Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:55.265443Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:55.322461Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:55.382576Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:55.438646Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:55.536642Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519625888762455594:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:55.536765Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:55.536841Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519625888762455599:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:55.542025Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:55.556348Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519625888762455601:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:55.631411Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519625888762455654:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:57.263870Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:57.326358Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:57.415082Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> DataShardOutOfOrder::TestOutOfOrderLockLost [GOOD] >> DataShardOutOfOrder::TestOutOfOrderNoBarrierRestartImmediateLongTail ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::ShardRestartAfterDropTableAndAbort [GOOD] Test command err: 2025-06-24T21:07:07.437048Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:07:07.437407Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:07:07.437536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046ea/r3tmp/tmppFG2sC/pdisk_1.dat 2025-06-24T21:07:07.766104Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:07:07.774323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:07.814610Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:07.822936Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799225356398 != 1750799225356402 2025-06-24T21:07:07.866434Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:07:07.867408Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:07:07.867639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:07.867736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:07.878950Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:07.955521Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:07:07.955592Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:07:07.956329Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:07:08.052147Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:07:08.052242Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:07:08.052736Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:07:08.052801Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:07:08.053035Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:07:08.053184Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:07:08.053304Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:07:08.054705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:08.055188Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:07:08.055704Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:07:08.055754Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:07:08.092970Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:07:08.093882Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:07:08.094262Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:07:08.094461Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:07:08.126335Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:07:08.126980Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:07:08.127116Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:07:08.128929Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:07:08.128997Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:07:08.129057Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:07:08.129327Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:07:08.129436Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:07:08.129501Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:07:08.129891Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:07:08.153310Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:07:08.154475Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:07:08.154654Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:07:08.154699Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:07:08.154738Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:07:08.154778Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:07:08.155024Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.155077Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.156064Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:07:08.156143Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:07:08.156190Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:07:08.156218Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:07:08.156884Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:07:08.156913Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:07:08.156964Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:07:08.156992Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:07:08.157027Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:07:08.157132Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.157173Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.157208Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:07:08.158502Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:07:08.158541Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:07:08.158629Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:07:08.158934Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:07:08.159008Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:07:08.159079Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:07:08.159203Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... ionActorId=[14:802:2643]. isRollback=0 2025-06-24T21:09:00.755941Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=14&id=ZTQzMzBkMTctNjVjNDdjMWItMWIyMzQ4OGUtZjk1MDMzNWI=, ActorId: [14:802:2643], ActorState: ExecuteState, TraceId: 01jyhwbn294mwrms432a7gj9h0, got TEvKqpBuffer::TEvError in ExecuteState, status: UNAVAILABLE send to: [14:956:2643] from: [14:823:2643] 2025-06-24T21:09:00.756266Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [14:823:2643], Recipient [14:650:2540]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } Op: Rollback } 2025-06-24T21:09:00.756310Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-24T21:09:00.756423Z node 14 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_WRONG_SHARD_STATE;details=Rejecting data TxId 0 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state);tx_id=0; 2025-06-24T21:09:00.756480Z node 14 :TX_DATASHARD NOTICE: datashard.cpp:3137: Rejecting data TxId 0 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state) 2025-06-24T21:09:00.756699Z node 14 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [14:956:2643] TxId: 281474976715665. Ctx: { TraceId: 01jyhwbn294mwrms432a7gj9h0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZTQzMzBkMTctNjVjNDdjMWItMWIyMzQ4OGUtZjk1MDMzNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: {
: Error: Wrong shard state. Table `/Root/table`., code: 2005 subissue: {
: Error: Rejecting data TxId 0 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state), code: 2029 } } 2025-06-24T21:09:00.757169Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=ZTQzMzBkMTctNjVjNDdjMWItMWIyMzQ4OGUtZjk1MDMzNWI=, ActorId: [14:802:2643], ActorState: ExecuteState, TraceId: 01jyhwbn294mwrms432a7gj9h0, Create QueryResponse for error on request, msg: ... blocking NKikimr::NLongTxService::TEvLongTxService::TEvLockStatus from LONG_TX_SERVICE to TX_DATASHARD_ACTOR cookie 0 2025-06-24T21:09:00.759678Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 65543, Sender [14:552:2478], Recipient [14:650:2540]: NActors::TEvents::TEvPoison 2025-06-24T21:09:00.760273Z node 14 :TX_DATASHARD INFO: datashard.cpp:190: OnDetach: 72075186224037888 2025-06-24T21:09:00.760383Z node 14 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-24T21:09:00.787933Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [14:960:2772], Recipient [14:962:2773]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:00.797011Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [14:960:2772], Recipient [14:962:2773]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:00.797211Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828684, Sender [14:960:2772], Recipient [14:962:2773]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:00.801904Z node 14 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [14:962:2773] 2025-06-24T21:09:00.802424Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:00.810393Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:00.812534Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:00.816354Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:00.816518Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:00.816652Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:00.817368Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:00.817782Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:00.817881Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:00.817984Z node 14 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state PreOffline tabletId 72075186224037888 2025-06-24T21:09:00.818212Z node 14 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 1 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:09:00.818296Z node 14 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast PreOffline tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:09:00.818497Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [14:976:2780] 2025-06-24T21:09:00.818581Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:00.818651Z node 14 :TX_DATASHARD INFO: datashard.cpp:1283: Cannot activate change sender: at tablet: 72075186224037888, state: PreOffline, queue size: 0 2025-06-24T21:09:00.818725Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:00.819168Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [14:61:2108], Recipient [14:962:2773]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715661 LockNode: 14 Status: STATUS_NOT_FOUND 2025-06-24T21:09:00.819737Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [14:962:2773], Recipient [14:962:2773]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:00.819803Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:00.820513Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435075, Sender [14:962:2773], Recipient [14:962:2773]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressResendReadSet 2025-06-24T21:09:00.820564Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3160: StateWork, processing event TEvPrivate::TEvProgressResendReadSet 2025-06-24T21:09:00.821083Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [14:24:2071], Recipient [14:962:2773]: {TEvRegisterTabletResult TabletId# 72075186224037888 Entry# 600} 2025-06-24T21:09:00.821143Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-24T21:09:00.821225Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 600 2025-06-24T21:09:00.821305Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:00.822829Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:00.822924Z node 14 :TX_DATASHARD INFO: datashard__progress_tx.cpp:21: Progress tx at non-ready tablet 72075186224037888 state 5 2025-06-24T21:09:00.823046Z node 14 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-06-24T21:09:00.823129Z node 14 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715663 2025-06-24T21:09:00.823245Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 1 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715663 2025-06-24T21:09:00.823682Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [14:962:2773], Recipient [14:864:2687]: {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 138 Seqno# 1 Flags# 0} 2025-06-24T21:09:00.823738Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:09:00.823819Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715663 2025-06-24T21:09:00.824007Z node 14 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 138 Seqno# 1 Flags# 0} 2025-06-24T21:09:00.824121Z node 14 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 500:281474976715663 at 72075186224037889 2025-06-24T21:09:00.824212Z node 14 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-24T21:09:00.824319Z node 14 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037889 {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 138 Seqno# 1 Flags# 0} 2025-06-24T21:09:00.824641Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [14:24:2071], Recipient [14:962:2773]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 400 NextReadStep# 600 ReadStep# 600 } 2025-06-24T21:09:00.824698Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-24T21:09:00.824774Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 400 next step 600 2025-06-24T21:09:00.824965Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [14:864:2687], Recipient [14:962:2773]: {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 1} 2025-06-24T21:09:00.825013Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:00.825098Z node 14 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715663 2025-06-24T21:09:00.825220Z node 14 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:09:00.825537Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:01.043805Z node 14 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MakeDisksNonoperational [GOOD] >> KqpSinkLocks::TInvalidateOlap [GOOD] >> KqpSinkLocks::OlapVisibleUncommittedRowsUpdate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::CommitPrepared [GOOD] Test command err: Trying to start YDB, gRPC: 27817, MsgBus: 2280 2025-06-24T21:08:50.267608Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625867866745522:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:50.285194Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031d2/r3tmp/tmpT2MTsc/pdisk_1.dat 2025-06-24T21:08:50.723420Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27817, node 1 2025-06-24T21:08:50.751777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:50.751905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:50.757606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:50.845114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:50.845140Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:50.845158Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:50.845279Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2280 2025-06-24T21:08:51.295269Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2280 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:51.590843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:51.628244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:51.794375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:51.972311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:52.074996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:53.884012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625880751648984:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.884493Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.244461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.277344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.355081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.386389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.415108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.448746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.524568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.643190Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625885046616943:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.643269Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.643462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625885046616948:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.647640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:54.662713Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625885046616950:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:54.733754Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625885046617001:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:55.271300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625867866745522:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:55.271370Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 18799, MsgBus: 11066 2025-06-24T21:08:56.728903Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625892567538271:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:56.728979Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031d2/r3tmp/tmpP1H5ja/pdisk_1.dat 2025-06-24T21:08:56.846440Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:56.857376Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625892567538255:2079] 1750799336728495 != 1750799336728498 2025-06-24T21:08:56.883166Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:56.883249Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:56.886248Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18799, node 2 2025-06-24T21:08:56.951668Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:56.951694Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:56.951702Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:56.951819Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11066 TClient is connected to server localhost:11066 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:57.405909Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:57.411879Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:57.420532Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:57.503751Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:57.665514Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:57.744445Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:57.753807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:00.052530Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625909747409059:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:00.052646Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:00.116221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.161820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.195532Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.232556Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.311563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.396755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.449388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.547393Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625909747409727:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:00.547481Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:00.547818Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625909747409732:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:00.556514Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:00.571401Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625909747409734:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:09:00.654016Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625909747409785:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:01.732859Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625892567538271:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:01.733369Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> DataShardTxOrder::ImmediateBetweenOnline_Init_oo8 >> KqpTx::SnapshotROInteractive1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink [GOOD] Test command err: 2025-06-24T21:07:07.529769Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:07:07.530033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:07:07.530147Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0046cb/r3tmp/tmp4nRArT/pdisk_1.dat 2025-06-24T21:07:07.768085Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:07:07.774472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:07:07.809171Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:07.814871Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799225356412 != 1750799225356416 2025-06-24T21:07:07.861779Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:07:07.863541Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:07:07.863840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:07.863978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:07.876234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:07.955326Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:07:07.955397Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:07:07.956323Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:07:08.047803Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:07:08.047910Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:07:08.048370Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:07:08.048436Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:07:08.048657Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:07:08.048806Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:07:08.048913Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:07:08.050223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:08.050647Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:07:08.051092Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:07:08.051140Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:07:08.098344Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:07:08.099603Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:07:08.099995Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:07:08.100190Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:07:08.134441Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:07:08.135022Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:07:08.135114Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:07:08.136505Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:07:08.136590Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:07:08.136643Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:07:08.136933Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:07:08.137030Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:07:08.137111Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:07:08.147787Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:07:08.170966Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:07:08.171156Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:07:08.171251Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:07:08.171280Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:07:08.171308Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:07:08.171335Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:07:08.171495Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.171530Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:07:08.171786Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:07:08.171864Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:07:08.171916Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:07:08.171944Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:07:08.171986Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:07:08.172025Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:07:08.172052Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:07:08.172079Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:07:08.172117Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:07:08.172224Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.172256Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:08.172297Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:07:08.172610Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:07:08.172653Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:07:08.172752Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:07:08.172970Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:07:08.173027Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:07:08.173092Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:07:08.173127Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... eId: 01jyhwbpdb9weww8567vwsnn8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. State: WaitResolveState, Executing KQP transaction on shard: 72075186224037888, tasks: [], lockTxId: (empty maybe), locks: Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } Op: Rollback, immediate: 1 2025-06-24T21:09:02.114922Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1832: ActorId: [13:942:2643] TxId: 281474976715665. Ctx: { TraceId: 01jyhwbpdb9weww8567vwsnn8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ExecuteDatashardTransaction traceId.verbosity: 0 2025-06-24T21:09:02.114992Z node 13 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [13:942:2643] TxId: 281474976715665. Ctx: { TraceId: 01jyhwbpdb9weww8567vwsnn8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 1, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:09:02.115058Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:135: ActorId: [13:942:2643] TxId: 281474976715665. Ctx: { TraceId: 01jyhwbpdb9weww8567vwsnn8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, datashard 72075186224037888 not finished yet: Executing 2025-06-24T21:09:02.115113Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [13:942:2643] TxId: 281474976715665. Ctx: { TraceId: 01jyhwbpdb9weww8567vwsnn8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 0 compute actor(s) and 1 datashard(s): DS 72075186224037888 (Executing), 2025-06-24T21:09:02.115187Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [13:942:2643] TxId: 281474976715665. Ctx: { TraceId: 01jyhwbpdb9weww8567vwsnn8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-24T21:09:02.115477Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [13:942:2643], Recipient [13:911:2727]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 942 RawX2: 55834577491 } TxBody: " \0018\001j3\010\001\032\'\n#\t\215\023\000\000\000\000\001\000\021\000\000\001\000\000\020\000\001\030\001 \000)\000\001\205\000\000\000\000\0010\0028\001 \003\"\006\020\0020\000@\n\220\001\000" TxId: 281474976715665 ExecLevel: 0 Flags: 8 2025-06-24T21:09:02.115525Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:02.115667Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [13:911:2727], Recipient [13:911:2727]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T21:09:02.115708Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T21:09:02.115785Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:02.115974Z node 13 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-24T21:09:02.116059Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CheckDataTx 2025-06-24T21:09:02.116110Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-24T21:09:02.116147Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CheckDataTx 2025-06-24T21:09:02.116181Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:02.116215Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:09:02.116263Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v500/281474976715663 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v400/18446744073709551615 ImmediateWriteEdgeReplied# v1000/18446744073709551615 2025-06-24T21:09:02.116314Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715665] at 72075186224037888 2025-06-24T21:09:02.116351Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-24T21:09:02.116384Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:02.116441Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit ExecuteKqpDataTx 2025-06-24T21:09:02.116484Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit ExecuteKqpDataTx 2025-06-24T21:09:02.116570Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:236: Operation [0:281474976715665] (execute_kqp_data_tx) at 72075186224037888 set memory limit 4193448 2025-06-24T21:09:02.116731Z node 13 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true 2025-06-24T21:09:02.116853Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:482: add locks to result: 0 2025-06-24T21:09:02.116951Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-24T21:09:02.117010Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit ExecuteKqpDataTx 2025-06-24T21:09:02.117058Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:02.117095Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:02.117166Z node 13 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715665 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-24T21:09:02.117294Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is DelayComplete 2025-06-24T21:09:02.117335Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:02.117370Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:09:02.117404Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:09:02.117459Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-24T21:09:02.117490Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:09:02.117527Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715665] at 72075186224037888 has finished 2025-06-24T21:09:02.117596Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:09:02.117635Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:02.117677Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:02.117843Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1365: ActorId: [13:942:2643] TxId: 281474976715665. Ctx: { TraceId: 01jyhwbpdb9weww8567vwsnn8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got propose result, shard: 72075186224037888, status: COMPLETE, error: 2025-06-24T21:09:02.118054Z node 13 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [13:942:2643] TxId: 281474976715665. Ctx: { TraceId: 01jyhwbpdb9weww8567vwsnn8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:02.118193Z node 13 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [13:942:2643] TxId: 281474976715665. Ctx: { TraceId: 01jyhwbpdb9weww8567vwsnn8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:09:02.118353Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, ActorId: [13:798:2643], ActorState: CleanupState, TraceId: 01jyhwbpdb9weww8567vwsnn8v, EndCleanup, isFinal: 0 2025-06-24T21:09:02.118579Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=13&id=ZTA2MDMyNDctMjEzZDNhMzEtOWI4ZmQyZjMtM2RiNjJmNzg=, ActorId: [13:798:2643], ActorState: CleanupState, TraceId: 01jyhwbpdb9weww8567vwsnn8v, Sent query response back to proxy, proxyRequestId: 8, proxyId: [13:57:2104] 2025-06-24T21:09:02.386878Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [13:951:2753], Recipient [13:911:2727]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:02.387039Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:02.387187Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [13:950:2752], serverId# [13:951:2753], sessionId# [0:0:0] 2025-06-24T21:09:02.387508Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553224, Sender [13:552:2478], Recipient [13:911:2727]: NKikimr::TEvDataShard::TEvGetOpenTxs >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites+EvWrite [GOOD] >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop+UseSink >> TestYmqHttpProxy::TestGetQueueAttributes [GOOD] >> KqpSort::ReverseFirstKeyOptimized >> TestKinesisHttpProxy::TestRequestNoAuthorization [GOOD] >> KqpSinkLocks::VisibleUncommittedRows [GOOD] >> KqpSinkLocks::VisibleUncommittedRowsUpdate >> TestKinesisHttpProxy::TestListStreamConsumers [GOOD] >> TestKinesisHttpProxy::CreateDeleteStream [GOOD] |94.2%| [TA] $(B)/ydb/core/tx/datashard/ut_snapshot/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNewEngine::SimpleUpsertSelect [GOOD] >> KqpNewEngine::StaleRO+EnableFollowers >> TGroupMapperTest::MapperSequentialCalls ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::SnapshotROInteractive1 [GOOD] Test command err: Trying to start YDB, gRPC: 1333, MsgBus: 16331 2025-06-24T21:08:50.659668Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625868071383832:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:50.660189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031d1/r3tmp/tmpcWpWs3/pdisk_1.dat 2025-06-24T21:08:51.043290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:51.043403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:51.045330Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:51.047296Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625868071383691:2079] 1750799330630063 != 1750799330630066 2025-06-24T21:08:51.061949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1333, node 1 2025-06-24T21:08:51.211133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:51.211170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:51.211176Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:51.211274Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16331 TClient is connected to server localhost:16331 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:08:51.704876Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:51.888230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:51.904490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:51.921892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:52.123041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:52.330108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:52.415702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:54.114883Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625885251254505:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.114962Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.478686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.559470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.608183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.642344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.677233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.727804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.769627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.851422Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625885251255169:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.851496Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.851642Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625885251255174:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:54.855476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:54.901506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:08:54.902343Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625885251255176:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:55.003400Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625885251255227:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:55.638192Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625868071383832:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:55.638281Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:56.830271Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=N2E2MTBiZWYtYTY5ZWUzZDQtODE3NDIxNmItZWQyOGI2Mw==, ActorId: [1:7519625893841190091:2474], ActorState: ExecuteState, TraceId: 01jyhwbh9vcz9xh94yeatnjx26, Create QueryResponse for error on request, msg:
:3:25: Error: Operation 'Upsert' can't be performed in read only transaction, code: 2008 Trying to start YDB, gRPC: 64728, MsgBus: 9102 2025-06-24T21:08:58.125312Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625900150589166:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:58.126833Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031d1/r3tmp/tmpxRmrWp/pdisk_1.dat 2025-06-24T21:08:58.288342Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:58.304406Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:58.304486Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 64728, node 2 2025-06-24T21:08:58.309748Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:58.346940Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:58.346964Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:58.346973Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:58.347092Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9102 TClient is connected to server localhost:9102 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:58.885820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:58.892081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:58.902202Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:58.999312Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:59.144595Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:59.181554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:59.262648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:01.810633Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625913035492622:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.810733Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.892131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.925082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.968297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.997468Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:02.075599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:02.129639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:02.204835Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:02.286250Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625917330460580:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:02.286360Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:02.286417Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625917330460585:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:02.290942Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:02.303483Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625917330460587:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:09:02.363186Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625917330460638:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:03.127236Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625900150589166:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:03.127329Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TestKinesisHttpProxy::ListShards [GOOD] >> TPart::State [GOOD] >> TPart::Trivials [GOOD] >> TPart::Basics [GOOD] >> TPart::CellDefaults [GOOD] >> TPart::Matter [GOOD] >> TPart::External [GOOD] >> TPart::Outer [GOOD] >> TPart::MassCheck >> DataShardOutOfOrder::UncommittedReadSetAck |94.2%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_snapshot/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpSinkLocks::EmptyRangeOlap [GOOD] >> KqpSinkLocks::InsertWithBulkUpsert+UseBulkUpsert >> TestYmqHttpProxy::TestDeleteQueue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::OlapVisibleUncommittedRowsUpdate [GOOD] Test command err: Trying to start YDB, gRPC: 24979, MsgBus: 26985 2025-06-24T21:08:39.545853Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625820769545524:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:39.545910Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00322e/r3tmp/tmpmmKbzD/pdisk_1.dat 2025-06-24T21:08:40.054110Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24979, node 1 2025-06-24T21:08:40.093887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:40.094034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:40.096187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:40.219685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:40.219714Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:40.219721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:40.219838Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26985 2025-06-24T21:08:40.545318Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26985 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.872379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.898187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:42.776874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625833654447828:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.776959Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625833654447836:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.777027Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.781706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.794433Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625833654447865:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:08:42.849859Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625833654447916:2333] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:43.219695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:43.344969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:44.459688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:44.554200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625820769545524:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:44.554273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:46.523824Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519625850834325220:2930], SessionActorId: [1:7519625850834325174:2930], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 . sessionActorId=[1:7519625850834325174:2930]. isRollback=0 2025-06-24T21:08:46.524066Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=N2JmOWJjYjItZmQ5NWY1NzAtNzAxNjU2MmEtYjdkYjA3N2E=, ActorId: [1:7519625850834325174:2930], ActorState: ExecuteState, TraceId: 01jyhwb773931r7mxy3qxcp2gx, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519625850834325221:2930] from: [1:7519625850834325220:2930] 2025-06-24T21:08:46.524135Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519625850834325221:2930] TxId: 281474976715665. Ctx: { TraceId: 01jyhwb773931r7mxy3qxcp2gx, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2JmOWJjYjItZmQ5NWY1NzAtNzAxNjU2MmEtYjdkYjA3N2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 } 2025-06-24T21:08:46.524350Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=N2JmOWJjYjItZmQ5NWY1NzAtNzAxNjU2MmEtYjdkYjA3N2E=, ActorId: [1:7519625850834325174:2930], ActorState: ExecuteState, TraceId: 01jyhwb773931r7mxy3qxcp2gx, Create QueryResponse for error on request, msg:
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 Trying to start YDB, gRPC: 3061, MsgBus: 21977 2025-06-24T21:08:47.997469Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625853895421269:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:47.997534Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00322e/r3tmp/tmp961yfE/pdisk_1.dat 2025-06-24T21:08:48.122270Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625853895421073:2079] 1750799327956599 != 1750799327956602 2025-06-24T21:08:48.133881Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:48.139953Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:48.140039Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:48.142859Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3061, node 2 2025-06-24T21:08:48.189756Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:48.189777Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:48.189785Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:48.189900Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21977 TClient is connected to server localhost:21977 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 184 ... Kikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=64;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037913;receive=72075186224037993; 2025-06-24T21:09:00.662719Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625871075291101:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037892; 2025-06-24T21:09:00.662791Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625871075291101:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037892; 2025-06-24T21:09:00.662846Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625871075291101:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037892; 2025-06-24T21:09:00.662902Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625871075291101:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037892; 2025-06-24T21:09:00.662955Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625871075291101:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037892; 2025-06-24T21:09:00.663004Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625871075291101:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037892; 2025-06-24T21:09:00.663055Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625871075291101:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037892; 2025-06-24T21:09:00.664037Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:01.860768Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:01.862167Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Complete;commit_tx_id=281474976715667;commit_lock_id=281474976715666;fline=manager.cpp:94;broken_lock_id=281474976715665; 2025-06-24T21:09:01.968125Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[2:7519625871075291097:2304];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=tablet lock have another internal generation counter: 18446744073709551615 != 0;tx_id=281474976715669; 2025-06-24T21:09:01.969355Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [2:7519625914024973830:3905], SessionActorId: [2:7519625909730006010:3905], Got LOCKS BROKEN for table. ShardID=72075186224037897, Sink=[2:7519625914024973830:3905].{
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } 2025-06-24T21:09:01.969506Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519625914024973830:3905], SessionActorId: [2:7519625909730006010:3905], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 . sessionActorId=[2:7519625909730006010:3905]. isRollback=0 2025-06-24T21:09:01.969863Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=Y2Q4YWZmZjUtZWI2YTI4MDktNDViMWQ3YzctYzExOTViYjA=, ActorId: [2:7519625909730006010:3905], ActorState: ExecuteState, TraceId: 01jyhwbp9m9x0d7eazdebpm825, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519625914024974050:3905] from: [2:7519625914024973830:3905] 2025-06-24T21:09:01.969942Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519625914024974050:3905] TxId: 281474976715669. Ctx: { TraceId: 01jyhwbp9m9x0d7eazdebpm825, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Y2Q4YWZmZjUtZWI2YTI4MDktNDViMWQ3YzctYzExOTViYjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } } 2025-06-24T21:09:01.970111Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=Y2Q4YWZmZjUtZWI2YTI4MDktNDViMWQ3YzctYzExOTViYjA=, ActorId: [2:7519625909730006010:3905], ActorState: ExecuteState, TraceId: 01jyhwbp9m9x0d7eazdebpm825, Create QueryResponse for error on request, msg: 2025-06-24T21:09:01.971857Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037888 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-24T21:09:01.971969Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[2:7519625871075291111:2312];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:01.972316Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037889 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-24T21:09:01.972382Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[2:7519625871075291110:2311];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:01.972545Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037895 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-24T21:09:01.972592Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[2:7519625871075291098:2305];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:01.972705Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037896 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-24T21:09:01.972747Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625871075291101:2306];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:01.972805Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037890 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-24T21:09:01.972846Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[2:7519625871075291104:2309];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:01.972991Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037891 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-24T21:09:01.973037Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[2:7519625871075291127:2313];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:01.973314Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037892 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-24T21:09:01.973385Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519625871075291103:2308];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:01.973533Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037893 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-24T21:09:01.973577Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[2:7519625871075291102:2307];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:01.973702Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037894 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-24T21:09:01.973748Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[2:7519625871075291109:2310];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:01.973861Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[2:7519625871075291097:2304];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0;
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 >> TPart::MassCheck [GOOD] >> TPart::WreckPart >> DataShardTxOrder::ImmediateBetweenOnline_Init [GOOD] >> TestKinesisHttpProxy::TestListStreamConsumersWithMaxResults >> TestKinesisHttpProxy::TestUnauthorizedPutRecords >> KqpLocksTricky::TestNoLocksIssue-withSink [GOOD] >> KqpLocksTricky::TestNoLocksIssueInteractiveTx+withSink >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty >> TestYmqHttpProxy::TestCreateQueueWithAllAttributes [GOOD] >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumer >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesTrivial [GOOD] >> TestKinesisHttpProxy::ListShardsEmptyFields >> KqpLocks::InvalidateOnCommit [GOOD] >> KqpLocks::MixedTxFail+useSink >> DataShardOutOfOrder::TestSnapshotReadPriority [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesTrivial [GOOD] >> TestYmqHttpProxy::BillingRecordsForJsonApi >> TestYmqHttpProxy::TestReceiveMessageWithAttemptId [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_Init [GOOD] Test command err: 2025-06-24T21:08:59.774878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:59.774962Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:59.784005Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:08:59.804433Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:08:59.804974Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:08:59.805255Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:08:59.865177Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:08:59.873627Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:08:59.873879Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:08:59.875434Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:08:59.875505Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:08:59.875548Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:08:59.875892Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:08:59.875994Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:08:59.876054Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:08:59.941628Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:08:59.982868Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:08:59.983062Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:08:59.983216Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:08:59.983260Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:08:59.983301Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:08:59.983341Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:08:59.983485Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:08:59.983534Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:08:59.983794Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:08:59.983895Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:08:59.984045Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:08:59.984088Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:08:59.984138Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:08:59.984172Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:08:59.984221Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:08:59.984276Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:08:59.984321Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:08:59.984415Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:59.984449Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:59.984499Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:08:59.987556Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:08:59.987628Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:08:59.987714Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:08:59.987879Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:08:59.987926Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:08:59.987981Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:08:59.988025Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:08:59.988058Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:08:59.988090Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:08:59.988144Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:08:59.988448Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:08:59.988485Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:08:59.988519Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:08:59.988573Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:08:59.988639Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:08:59.988671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:08:59.988708Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:08:59.988744Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:08:59.988785Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:00.008881Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:00.008973Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:00.009013Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:00.009049Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:00.009128Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:00.009646Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:00.009693Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:00.009731Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:00.009857Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:00.009884Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:00.010024Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:00.010092Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:00.010127Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:00.010172Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:00.013695Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:00.013763Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:00.013990Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:00.014050Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:00.014128Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:00.014176Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:00.014217Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:00.014264Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:00.014307Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... [1000005:152] at 9437186 on unit CompletedOperations 2025-06-24T21:09:05.975507Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is Executed 2025-06-24T21:09:05.975527Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit CompletedOperations 2025-06-24T21:09:05.975546Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:152] at 9437186 has finished 2025-06-24T21:09:05.975566Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:05.975595Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-24T21:09:05.975618Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-24T21:09:05.975636Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-24T21:09:05.975885Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-24T21:09:05.975919Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.975942Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 2025-06-24T21:09:05.976010Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 104 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 34} 2025-06-24T21:09:05.976031Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.976055Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 104 2025-06-24T21:09:05.976131Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 110 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 36} 2025-06-24T21:09:05.976148Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.976172Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 110 2025-06-24T21:09:05.976229Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 113 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 37} 2025-06-24T21:09:05.976246Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.976260Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 113 2025-06-24T21:09:05.976299Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 116 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 38} 2025-06-24T21:09:05.976320Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.976343Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 116 2025-06-24T21:09:05.976408Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 119 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 39} 2025-06-24T21:09:05.976428Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.976451Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 119 2025-06-24T21:09:05.976500Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 122 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 40} 2025-06-24T21:09:05.976528Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.976559Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-06-24T21:09:05.976634Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-24T21:09:05.976662Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.976686Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-24T21:09:05.976742Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-24T21:09:05.976769Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.976790Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-24T21:09:05.976869Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-24T21:09:05.976894Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.976935Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-24T21:09:05.977056Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-24T21:09:05.977085Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.977121Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-06-24T21:09:05.977240Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-24T21:09:05.977274Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.977297Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2025-06-24T21:09:05.977367Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-24T21:09:05.977390Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.977427Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-24T21:09:05.977523Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-24T21:09:05.977548Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.977571Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-24T21:09:05.977637Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-24T21:09:05.977661Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.977710Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-24T21:09:05.977793Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-24T21:09:05.977831Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.977863Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-24T21:09:05.996389Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T21:09:05.996478Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2025-06-24T21:09:05.996556Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:05.996654Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:09:05.996714Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:09:05.997035Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:09:05.997085Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:05.997125Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 >> TestKinesisHttpProxy::GoodRequestGetRecords [GOOD] >> TPart::WreckPart [GOOD] >> TPart::PageFailEnv >> DataShardScan::ScanFollowedByUpdate >> KqpSinkTx::DeferredEffects [GOOD] >> DataShardOutOfOrder::UncommittedReads >> TestYmqHttpProxy::TestListQueues >> TestKinesisHttpProxy::GoodRequestGetRecordsCbor ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSnapshotReadPriority [GOOD] Test command err: 2025-06-24T21:09:00.476546Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:00.476957Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:00.477140Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002db9/r3tmp/tmp1EzaFl/pdisk_1.dat 2025-06-24T21:09:00.842521Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:00.852923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:00.875450Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2025-06-24T21:09:00.875538Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 500 Status# 16 SEND to# [1:371:2365] Proxy marker# C1 2025-06-24T21:09:00.907820Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:00.913434Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799337112626 != 1750799337112630 2025-06-24T21:09:00.962183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:00.962322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:00.974106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:01.055876Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2025-06-24T21:09:01.055990Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2025-06-24T21:09:01.056243Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 1000 in 0.500000s at 0.950000s 2025-06-24T21:09:01.056628Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 500, txid# 1 marker# C2 2025-06-24T21:09:01.056699Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 500 Status# 17 SEND EvProposeTransactionStatus to# [1:371:2365] Proxy 2025-06-24T21:09:01.058896Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-24T21:09:01.058998Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-24T21:09:01.059061Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 acknowledged 2025-06-24T21:09:01.059111Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:6] persistent tx 1 acknowledged 2025-06-24T21:09:01.065003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.113942Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:615:2523], Recipient [1:624:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:01.115385Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:615:2523], Recipient [1:624:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:01.115912Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:624:2529] 2025-06-24T21:09:01.116183Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:01.179763Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:615:2523], Recipient [1:624:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:01.180584Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:01.180750Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:01.182737Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:01.182826Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:01.182900Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:01.183354Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:01.183530Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:01.183652Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:640:2529] in generation 1 2025-06-24T21:09:01.184182Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:01.210912Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:01.211139Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:01.211323Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:642:2539] 2025-06-24T21:09:01.211374Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:01.211433Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:01.211482Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:01.211757Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:624:2529], Recipient [1:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:01.211815Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:01.212222Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:01.212335Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:01.212411Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:01.212452Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:01.212509Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:01.212550Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:01.212619Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:01.212670Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:01.212732Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:01.212878Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:629:2531], Recipient [1:624:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:01.212919Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:01.212985Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:620:2526], serverId# [1:629:2531], sessionId# [0:0:0] 2025-06-24T21:09:01.213482Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:629:2531] 2025-06-24T21:09:01.213537Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:01.213649Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:01.213870Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:01.213953Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:01.214078Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:01.214137Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:01.214179Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:01.214221Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:01.214263Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:01.214645Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:01.214694Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:01.214734Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:01.214776Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:01.214843Z node 1 :TX_DATASHARD TRACE: datashard_ ... MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-24T21:09:06.582577Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:09:06.582658Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CheckRead 2025-06-24T21:09:06.582756Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:09:06.582809Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:09:06.582851Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:06.582888Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:09:06.582940Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037888 2025-06-24T21:09:06.582984Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:09:06.583011Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:06.583036Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:09:06.583062Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:09:06.583204Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3500 TxId: 18446744073709551615 } LockTxId: 281474976715682 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-24T21:09:06.583491Z node 1 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715682, counter# 18446744073709551612 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:09:06.583547Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/18446744073709551615 2025-06-24T21:09:06.583597Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[1:1376:3028], 0} after executionsCount# 1 2025-06-24T21:09:06.583655Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[1:1376:3028], 0} sends rowCount# 5, bytes# 160, quota rows left# 996, quota bytes left# 5242720, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:09:06.583731Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[1:1376:3028], 0} finished in read 2025-06-24T21:09:06.583796Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:09:06.583829Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:09:06.583861Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:09:06.583892Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:09:06.583941Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:09:06.583965Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:09:06.583996Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037888 has finished 2025-06-24T21:09:06.584038Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:09:06.584141Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:09:06.585199Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [1:1376:3028], Recipient [1:1317:2993]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:09:06.585266Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 3 } items { uint32_value: 3 } }, { items { uint32_value: 5 } items { uint32_value: 5 } }, { items { uint32_value: 7 } items { uint32_value: 7 } }, { items { uint32_value: 9 } items { uint32_value: 9 } } 2025-06-24T21:09:06.756783Z node 1 :TX_COORDINATOR DEBUG: coordinator__acquire_read_step.cpp:97: tablet# 72057594046316545 HANDLE TEvAcquireReadStep 2025-06-24T21:09:06.756891Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:293: Coordinator# 72057594046316545 scheduling step 4500 in 0.499900s at 4.450000s 2025-06-24T21:09:06.758249Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715685. Ctx: { TraceId: 01jyhwbtwyc2xcs2x40s635rh2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjJlNDY2MjMtNzU0ZTI0YmUtNzQ2YzY4OGItMzdiZDU0NWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:06.760121Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [1:1400:3045], Recipient [1:1317:2993]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715685 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-24T21:09:06.760385Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:09:06.760471Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2025-06-24T21:09:06.760571Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:09:06.760620Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:09:06.760665Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:06.760724Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:09:06.760826Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2025-06-24T21:09:06.760869Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:09:06.760900Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:06.760924Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:09:06.760946Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:09:06.761073Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715685 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-24T21:09:06.761375Z node 1 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715685, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:09:06.761427Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v4000/18446744073709551615 2025-06-24T21:09:06.761474Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[1:1400:3045], 0} after executionsCount# 1 2025-06-24T21:09:06.761524Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[1:1400:3045], 0} sends rowCount# 6, bytes# 192, quota rows left# 995, quota bytes left# 5242688, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:09:06.761626Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[1:1400:3045], 0} finished in read 2025-06-24T21:09:06.761712Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:09:06.761755Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:09:06.761783Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:09:06.761808Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:09:06.761855Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:09:06.761879Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:09:06.761905Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037888 has finished 2025-06-24T21:09:06.761941Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:09:06.762041Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:09:06.762356Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [1:61:2108], Recipient [1:1317:2993]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715685 LockNode: 1 Status: STATUS_SUBSCRIBED 2025-06-24T21:09:06.763250Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [1:1400:3045], Recipient [1:1317:2993]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:09:06.763318Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 3 } items { uint32_value: 3 } }, { items { uint32_value: 5 } items { uint32_value: 5 } }, { items { uint32_value: 7 } items { uint32_value: 7 } }, { items { uint32_value: 9 } items { uint32_value: 9 } }, { items { uint32_value: 11 } items { uint32_value: 11 } } >> test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] [FAIL] >> KqpTx::BeginTransactionBadMode [GOOD] >> TPart::PageFailEnv [GOOD] >> TPart::ForwardEnv >> KqpNotNullColumns::CreateTableWithDisabledNotNullDataColumns ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::DeferredEffects [GOOD] Test command err: Trying to start YDB, gRPC: 25478, MsgBus: 18479 2025-06-24T21:08:38.711207Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625813352395625:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.711272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00325e/r3tmp/tmpELr1JQ/pdisk_1.dat 2025-06-24T21:08:39.158360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.158490Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.167499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:39.236826Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.238095Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625813352395602:2079] 1750799318709167 != 1750799318709170 TServer::EnableGrpc on GrpcPort 25478, node 1 2025-06-24T21:08:39.343057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.343083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.343093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.343228Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.756507Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18479 TClient is connected to server localhost:18479 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.145171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:41.811982Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625826237298142:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.812002Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625826237298128:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.812128Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.815844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:41.826741Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625826237298144:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:41.921816Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625826237298195:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:42.439278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:42.542912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:43.601122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:43.715382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625813352395625:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:43.716389Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:54.107256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:08:54.107289Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:58.985240Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [1:7519625899251751331:3228], TxId: 281474976710678, task: 1. Ctx: { TraceId : 01jyhwbkezdp6js46fga431kja. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=Nzc1NTM5YzItOWE4OTRlZTItMjk0YjQ5OGUtNmY1NTg5MDc=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Table id 7 has no snapshot at v1750799325044/18446744073709551615 shard 72075186224037889 with lowWatermark v1750799325436/18446744073709551615 (node# 1 state# Ready) } } 2025-06-24T21:08:58.985761Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519625899251751331:3228], TxId: 281474976710678, task: 1. Ctx: { TraceId : 01jyhwbkezdp6js46fga431kja. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=Nzc1NTM5YzItOWE4OTRlZTItMjk0YjQ5OGUtNmY1NTg5MDc=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Table id 7 has no snapshot at v1750799325044/18446744073709551615 shard 72075186224037889 with lowWatermark v1750799325436/18446744073709551615 (node# 1 state# Ready) } }. 2025-06-24T21:08:58.986396Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519625899251751332:3229], TxId: 281474976710678, task: 2. Ctx: { TraceId : 01jyhwbkezdp6js46fga431kja. SessionId : ydb://session/3?node_id=1&id=Nzc1NTM5YzItOWE4OTRlZTItMjk0YjQ5OGUtNmY1NTg5MDc=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7519625899251751327:2930], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:08:58.986835Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=Nzc1NTM5YzItOWE4OTRlZTItMjk0YjQ5OGUtNmY1NTg5MDc=, ActorId: [1:7519625843417175462:2930], ActorState: ExecuteState, TraceId: 01jyhwbkezdp6js46fga431kja, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 18235, MsgBus: 16857 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00325e/r3tmp/tmpsSPvn9/pdisk_1.dat 2025-06-24T21:09:00.513836Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:00.573721Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:00.585876Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625909066472286:2079] 1750799340225932 != 1750799340225935 2025-06-24T21:09:00.586439Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:00.586551Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:00.589878Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18235, node 2 2025-06-24T21:09:00.695710Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:00.695735Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:00.695743Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:00.695880Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16857 2025-06-24T21:09:01.255951Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16857 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:01.349249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:01.357490Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:09:03.967481Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625921951374819:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:03.967551Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625921951374810:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:03.967614Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:03.971948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:03.983938Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625921951374824:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:09:04.063901Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625926246342171:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:04.114642Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:04.186816Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:05.319198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> DataShardOutOfOrder::TestSnapshotReadAfterStuckRW [GOOD] >> TPart::ForwardEnv [GOOD] >> TPart::WreckPartColumnGroups |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] [FAIL] >> KqpSinkTx::OlapInvalidateOnError [GOOD] >> KqpSinkTx::OlapInteractive >> TGroupMapperTest::InterlacedRacksWithoutInterlacedNodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::BeginTransactionBadMode [GOOD] Test command err: Trying to start YDB, gRPC: 7155, MsgBus: 8031 2025-06-24T21:08:44.511649Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625841953339358:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:44.524786Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031f2/r3tmp/tmph4sqMA/pdisk_1.dat 2025-06-24T21:08:45.050302Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625841953339319:2079] 1750799324486874 != 1750799324486877 2025-06-24T21:08:45.057946Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:45.064235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:45.064333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:45.115558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7155, node 1 2025-06-24T21:08:45.165178Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:45.165206Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:45.165221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:45.165326Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8031 2025-06-24T21:08:45.521988Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8031 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:45.878276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:45.906603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:45.913490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:46.048568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:46.226922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:46.293200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:47.875600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625854838242856:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:47.875699Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:48.158419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:48.187992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:48.217729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:48.248446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:48.276818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:48.352105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:48.382870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:48.468681Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625859133210817:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:48.468773Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:48.469034Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625859133210822:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:48.472856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:48.492455Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625859133210824:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:48.561530Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625859133210877:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:49.517960Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625841953339358:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:49.518039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:09:00.045322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:09:00.045350Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:01.793229Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [1:7519625914967786374 ... execution } 2025-06-24T21:09:01.794611Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZDlkNmZiODYtZDY1OGJiYTMtZDgzYWRkMzktZmE1OGQ2ZmQ=, ActorId: [1:7519625863428178402:2464], ActorState: ExecuteState, TraceId: 01jyhwbp0d7bkbvpk1y7pys55j, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 2185, MsgBus: 17474 2025-06-24T21:09:02.681372Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625918843157458:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:02.681439Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031f2/r3tmp/tmprCk0wq/pdisk_1.dat 2025-06-24T21:09:02.844980Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:02.845691Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625918843157440:2079] 1750799342680769 != 1750799342680772 2025-06-24T21:09:02.857842Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:02.857923Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:02.859345Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2185, node 2 2025-06-24T21:09:02.939824Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:02.939861Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:02.939880Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:02.940076Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17474 TClient is connected to server localhost:17474 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:09:03.508230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:03.523055Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:03.599271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:03.718749Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:03.765638Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:03.846363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:06.141145Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625936023028258:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:06.141235Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:06.213783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:06.254614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:06.297318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:06.335695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:06.374065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:06.453604Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:06.508541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:06.647670Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625936023028918:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:06.647758Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:06.648111Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625936023028923:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:06.653198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:06.669645Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-24T21:09:06.670280Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625936023028925:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:09:06.740862Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625936023028978:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:07.683295Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625918843157458:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:07.683377Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::InterlacedRacksWithoutInterlacedNodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSnapshotReadAfterStuckRW [GOOD] Test command err: 2025-06-24T21:09:05.840209Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:05.840583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:05.840743Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002d9c/r3tmp/tmpi4fejT/pdisk_1.dat 2025-06-24T21:09:06.207382Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:06.210940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:06.263068Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:06.269308Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799342830825 != 1750799342830829 2025-06-24T21:09:06.315293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:06.315460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:06.327575Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:06.408578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:06.449438Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:06.450656Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:06.451227Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:09:06.451489Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:06.504134Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:06.504887Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:06.505039Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:06.506834Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:06.506919Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:06.506970Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:06.507483Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:06.507635Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:06.507710Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:09:06.519205Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:06.560684Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:06.560865Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:06.560972Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:09:06.561060Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:06.561114Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:06.561154Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:06.561370Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:06.561420Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:06.561799Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:06.561896Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:06.561953Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:06.561993Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:06.562049Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:06.562110Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:06.562144Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:06.562194Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:06.562248Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:06.562370Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:06.562422Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:06.562467Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:09:06.562883Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:09:06.562928Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:06.563020Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:06.563255Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:06.563308Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:06.563407Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:06.563474Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:06.563528Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:06.563583Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:06.563615Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:06.563888Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:06.563925Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:06.563962Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:06.563991Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:06.564048Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:09:06.564075Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:06.564114Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:09:06.564156Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:09:06.564181Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:06.565676Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:09:06.565730Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:09:06.576415Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:09:06.576486Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:06.576523Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:06.576572Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... d_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715664 ... performing the first select 2025-06-24T21:09:09.344909Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhwbx5eamk8ya3rhzxa6j4r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTZkZmU0ZGYtOTFkMjcyZGYtODAzOTE5M2YtZmYxNTgwZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:09.348777Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [1:1035:2805], Recipient [1:625:2529]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC KeysSize: 1 2025-06-24T21:09:09.349427Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:09:09.349535Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CheckRead 2025-06-24T21:09:09.349698Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-24T21:09:09.349744Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:09:09.349784Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:09.349847Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:09:09.349905Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037888 2025-06-24T21:09:09.349947Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-24T21:09:09.349975Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:09.350000Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:09:09.350032Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:09:09.350197Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-24T21:09:09.350462Z node 1 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715665, counter# 1 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:09:09.350517Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v4000/18446744073709551615 2025-06-24T21:09:09.350573Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[1:1035:2805], 0} after executionsCount# 1 2025-06-24T21:09:09.350635Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[1:1035:2805], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:09:09.350718Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[1:1035:2805], 0} finished in read 2025-06-24T21:09:09.350801Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-24T21:09:09.350845Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:09:09.350873Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:09:09.350909Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:09:09.350961Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-24T21:09:09.350983Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:09:09.351011Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-24T21:09:09.351059Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:09:09.351189Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:09:09.351547Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [1:61:2108], Recipient [1:625:2529]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715665 LockNode: 1 Status: STATUS_SUBSCRIBED 2025-06-24T21:09:09.351695Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [1:1037:2806], Recipient [1:711:2589]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 3 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC KeysSize: 1 2025-06-24T21:09:09.351812Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-24T21:09:09.351877Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit CheckRead 2025-06-24T21:09:09.351945Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-06-24T21:09:09.351973Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit CheckRead 2025-06-24T21:09:09.351998Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:09.352036Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-24T21:09:09.352077Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037889 2025-06-24T21:09:09.352109Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-06-24T21:09:09.352132Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:09.352160Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037889 to execution unit ExecuteRead 2025-06-24T21:09:09.352183Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit ExecuteRead 2025-06-24T21:09:09.352290Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 3 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-24T21:09:09.352512Z node 1 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037889 Acquired lock# 281474976715665, counter# 1 for [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-24T21:09:09.352563Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v4000/18446744073709551615 2025-06-24T21:09:09.352599Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[1:1037:2806], 0} after executionsCount# 1 2025-06-24T21:09:09.352641Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[1:1037:2806], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:09:09.352696Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[1:1037:2806], 0} finished in read 2025-06-24T21:09:09.352752Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-06-24T21:09:09.352790Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit ExecuteRead 2025-06-24T21:09:09.352816Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T21:09:09.352841Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit CompletedOperations 2025-06-24T21:09:09.352880Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-06-24T21:09:09.352900Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T21:09:09.352939Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037889 has finished 2025-06-24T21:09:09.352980Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-24T21:09:09.353051Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-24T21:09:09.353301Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [1:61:2108], Recipient [1:711:2589]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715665 LockNode: 1 Status: STATUS_SUBSCRIBED 2025-06-24T21:09:09.353986Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [1:1035:2805], Recipient [1:625:2529]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:09:09.354048Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-24T21:09:09.355796Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [1:1037:2806], Recipient [1:711:2589]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:09:09.355847Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } >> KqpSinkTx::OlapLocksAbortOnCommit [GOOD] >> KqpSinkTx::OlapSnapshotRO >> KqpSinkTx::OlapExplicitTcl [GOOD] >> DataShardTxOrder::ImmediateBetweenOnline_Init_oo8 [GOOD] >> DataShardScan::ScanFollowedByUpdate [GOOD] |94.2%| [TA] $(B)/ydb/tests/fq/streaming_optimize/test-results/py3test/{meta.json ... results_accumulator.log} >> DataShardOutOfOrder::TestOutOfOrderNoBarrierRestartImmediateLongTail [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix-Nullable-Covered [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix-Nullable+Covered >> DataShardTxOrder::RandomPoints_DelayRS |94.2%| [TA] {RESULT} $(B)/ydb/tests/fq/streaming_optimize/test-results/py3test/{meta.json ... results_accumulator.log} >> TPart::WreckPartColumnGroups [GOOD] >> TPart::PageFailEnvColumnGroups >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite [GOOD] >> TGroupMapperTest::Block42_1disk >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop+UseSink [GOOD] >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardScan::ScanFollowedByUpdate [GOOD] Test command err: 2025-06-24T21:09:08.597618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:08.597691Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:08.599512Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:08.612546Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:08.613088Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:08.613386Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:08.663520Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:08.671634Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:08.671912Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:08.673735Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:08.673818Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:08.673884Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:08.674332Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:08.674455Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:08.674534Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:08.759743Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:08.801824Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:08.802048Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:08.802199Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:08.802246Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:08.802287Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:08.802336Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:08.802508Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:08.802568Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:08.802855Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:08.802955Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:08.803114Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:08.803188Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:08.803266Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:08.803311Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:08.803350Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:08.803387Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:08.803435Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:08.803550Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:08.803586Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:08.803643Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:08.812692Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:08.812788Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:08.812883Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:08.813089Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:08.813150Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:08.813203Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:08.813248Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:08.813292Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:08.813330Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:08.813365Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:08.813698Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:08.813736Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:08.813774Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:08.813824Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:08.813880Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:08.813951Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:08.813998Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:08.814051Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:08.814082Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:08.828523Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:08.828631Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:08.828679Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:08.828729Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:08.828822Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:08.829436Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:08.829503Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:08.829554Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:08.829705Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:08.829744Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:08.829890Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:08.829940Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:08.830013Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:08.830062Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:08.842112Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:08.842216Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:08.842463Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:08.842522Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:08.842590Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:08.842641Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:08.842683Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:08.842764Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:08.842826Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... on::Execute at 9437184 2025-06-24T21:09:10.974542Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:09:10.974577Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000006:36] at 9437184 for ReadTableScan 2025-06-24T21:09:10.974621Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437184 on unit ReadTableScan 2025-06-24T21:09:10.974661Z node 1 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [1000006:36] at 9437184 error: , IsFatalError: 0 2025-06-24T21:09:10.974710Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437184 is Executed 2025-06-24T21:09:10.974756Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437184 executing on unit ReadTableScan 2025-06-24T21:09:10.974787Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437184 to execution unit CompleteOperation 2025-06-24T21:09:10.974814Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437184 on unit CompleteOperation 2025-06-24T21:09:10.975003Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437184 is DelayComplete 2025-06-24T21:09:10.975038Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437184 executing on unit CompleteOperation 2025-06-24T21:09:10.975074Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437184 to execution unit CompletedOperations 2025-06-24T21:09:10.975103Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437184 on unit CompletedOperations 2025-06-24T21:09:10.975133Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437184 is Executed 2025-06-24T21:09:10.975206Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437184 executing on unit CompletedOperations 2025-06-24T21:09:10.975233Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000006:36] at 9437184 has finished 2025-06-24T21:09:10.975291Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:10.975338Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:10.975378Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:10.975410Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:10.975639Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:347:2312], Recipient [1:347:2312]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:10.975675Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:10.975721Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-24T21:09:10.975749Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:09:10.975819Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000006:36] at 9437185 for ReadTableScan 2025-06-24T21:09:10.975846Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437185 on unit ReadTableScan 2025-06-24T21:09:10.975876Z node 1 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [1000006:36] at 9437185 error: , IsFatalError: 0 2025-06-24T21:09:10.975913Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437185 is Executed 2025-06-24T21:09:10.975960Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437185 executing on unit ReadTableScan 2025-06-24T21:09:10.975993Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437185 to execution unit CompleteOperation 2025-06-24T21:09:10.976018Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437185 on unit CompleteOperation 2025-06-24T21:09:10.976204Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437185 is DelayComplete 2025-06-24T21:09:10.976244Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437185 executing on unit CompleteOperation 2025-06-24T21:09:10.976268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437185 to execution unit CompletedOperations 2025-06-24T21:09:10.976291Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437185 on unit CompletedOperations 2025-06-24T21:09:10.976319Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437185 is Executed 2025-06-24T21:09:10.976339Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437185 executing on unit CompletedOperations 2025-06-24T21:09:10.976364Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000006:36] at 9437185 has finished 2025-06-24T21:09:10.976389Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:10.976415Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-24T21:09:10.976447Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-24T21:09:10.976483Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-24T21:09:10.976620Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:459:2398], Recipient [1:459:2398]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:10.976651Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:10.976689Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437186 2025-06-24T21:09:10.976713Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:09:10.976739Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000006:36] at 9437186 for ReadTableScan 2025-06-24T21:09:10.976774Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437186 on unit ReadTableScan 2025-06-24T21:09:10.976802Z node 1 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [1000006:36] at 9437186 error: , IsFatalError: 0 2025-06-24T21:09:10.976830Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437186 is Executed 2025-06-24T21:09:10.976854Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437186 executing on unit ReadTableScan 2025-06-24T21:09:10.976886Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437186 to execution unit CompleteOperation 2025-06-24T21:09:10.976913Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437186 on unit CompleteOperation 2025-06-24T21:09:10.977051Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437186 is DelayComplete 2025-06-24T21:09:10.977101Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437186 executing on unit CompleteOperation 2025-06-24T21:09:10.977125Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437186 to execution unit CompletedOperations 2025-06-24T21:09:10.977147Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437186 on unit CompletedOperations 2025-06-24T21:09:10.977174Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437186 is Executed 2025-06-24T21:09:10.977194Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437186 executing on unit CompletedOperations 2025-06-24T21:09:10.977217Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000006:36] at 9437186 has finished 2025-06-24T21:09:10.977240Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:10.977260Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-24T21:09:10.977291Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-24T21:09:10.977319Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-24T21:09:10.992873Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-24T21:09:10.992949Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-24T21:09:10.992986Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000006:36] at 9437185 on unit CompleteOperation 2025-06-24T21:09:10.993053Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000006 : 36] from 9437185 at tablet 9437185 send result to client [1:102:2135], exec latency: 4 ms, propose latency: 5 ms 2025-06-24T21:09:10.993107Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-24T21:09:10.994987Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T21:09:10.995075Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T21:09:10.995113Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000006:36] at 9437186 on unit CompleteOperation 2025-06-24T21:09:10.995197Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000006 : 36] from 9437186 at tablet 9437186 send result to client [1:102:2135], exec latency: 4 ms, propose latency: 6 ms 2025-06-24T21:09:10.995244Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:09:10.995386Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:10.995412Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:10.995434Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000006:36] at 9437184 on unit CompleteOperation 2025-06-24T21:09:10.995469Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000006 : 36] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 4 ms, propose latency: 6 ms 2025-06-24T21:09:10.995494Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_Init_oo8 [GOOD] Test command err: 2025-06-24T21:09:04.780304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:04.780354Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:04.783600Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:04.795336Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:04.795871Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:04.796209Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:04.841055Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:04.848909Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:04.849131Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:04.850728Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:04.850794Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:04.850847Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:04.851232Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:04.851316Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:04.851377Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:04.910777Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:04.949729Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:04.949915Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:04.950028Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:04.950066Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:04.950140Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:04.950203Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:04.950359Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:04.950409Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:04.950700Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:04.950796Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:04.950974Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:04.951019Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:04.951064Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:04.951097Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:04.952028Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:04.952094Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:04.952158Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:04.952299Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:04.952344Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:04.952400Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:04.955706Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:04.955781Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:04.955865Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:04.956033Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:04.956079Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:04.956129Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:04.956174Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:04.956210Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:04.956242Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:04.956293Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:04.956597Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:04.956636Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:04.956669Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:04.956716Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:04.956770Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:04.956813Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:04.956853Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:04.956884Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:04.956911Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:04.969345Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:04.969430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:04.969469Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:04.969511Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:04.969597Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:04.970142Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:04.970201Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:04.970246Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:04.970381Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:04.970417Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:04.970553Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:04.970589Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-24T21:09:04.970626Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:04.970691Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:04.974711Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:04.974782Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:04.975009Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:04.975053Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:04.975103Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:04.975162Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:04.975197Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:04.975248Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2:1] in PlanQueue unit at 9437184 2025-06-24T21:09:04.975292Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2:1] at 9437184 on unit PlanQueue 2025-06-24T21:09:04. ... propose latency: 2 ms 2025-06-24T21:09:10.941621Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:09:10.941746Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T21:09:10.941776Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:146] at 9437186 on unit CompleteOperation 2025-06-24T21:09:10.941812Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 146] from 9437186 at tablet 9437186 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:10.941844Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:09:10.941941Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T21:09:10.941976Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:149] at 9437186 on unit CompleteOperation 2025-06-24T21:09:10.942024Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 149] from 9437186 at tablet 9437186 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:10.942053Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:09:10.942170Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T21:09:10.942198Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:152] at 9437186 on unit CompleteOperation 2025-06-24T21:09:10.942246Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 152] from 9437186 at tablet 9437186 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:10.942289Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:09:10.942716Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 113 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 37} 2025-06-24T21:09:10.942783Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.942827Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 113 2025-06-24T21:09:10.942910Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 6 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-24T21:09:10.942937Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.942987Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 151 2025-06-24T21:09:10.943080Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 116 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 38} 2025-06-24T21:09:10.943106Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.943132Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 116 2025-06-24T21:09:10.943208Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 6 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-24T21:09:10.943234Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.943280Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-06-24T21:09:10.943450Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 119 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 39} 2025-06-24T21:09:10.943482Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.943510Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 119 2025-06-24T21:09:10.943567Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 6 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-24T21:09:10.943593Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.943623Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 2025-06-24T21:09:10.943835Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-24T21:09:10.943881Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.943910Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-24T21:09:10.944037Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-24T21:09:10.944068Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.944111Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-24T21:09:10.944220Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 122 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 40} 2025-06-24T21:09:10.944263Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.944291Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-06-24T21:09:10.944378Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-24T21:09:10.944405Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.944459Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-24T21:09:10.944569Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-24T21:09:10.944601Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.944634Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-24T21:09:10.944741Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:09:10.944773Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.944817Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 2025-06-24T21:09:10.944916Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-24T21:09:10.944946Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.944971Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-24T21:09:10.945041Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-24T21:09:10.945067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.945092Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-24T21:09:10.945167Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-24T21:09:10.945194Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.945219Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-24T21:09:10.945270Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-24T21:09:10.945321Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.945406Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-06-24T21:09:10.945521Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 6 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-24T21:09:10.945552Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:10.945577Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 >> DataShardTxOrder::RandomPoints_ReproducerDelayRS1 >> KqpSort::ReverseFirstKeyOptimized [GOOD] >> KqpSort::ReverseLimitOptimized ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderNoBarrierRestartImmediateLongTail [GOOD] Test command err: 2025-06-24T21:08:59.332308Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:08:59.332675Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:08:59.332865Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002dba/r3tmp/tmp34Ov1d/pdisk_1.dat 2025-06-24T21:08:59.695542Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:08:59.702609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:08:59.746910Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:59.757052Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799336296664 != 1750799336296668 2025-06-24T21:08:59.809949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:59.810133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:59.828279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:59.932099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:59.993939Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:00.001228Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:00.001922Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:09:00.002285Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:00.076941Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:00.077871Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:00.078120Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:00.080101Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:00.080207Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:00.080274Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:00.080694Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:00.080882Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:00.081015Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:09:00.092224Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:00.135941Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:00.136173Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:00.136325Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:09:00.136370Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:00.136409Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:00.136455Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:00.136715Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:00.136772Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:00.137187Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:00.137316Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:00.137417Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:00.137461Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:00.137519Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:00.137570Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:00.137615Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:00.137652Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:00.137704Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:00.137860Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:00.137906Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:00.137952Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:09:00.138437Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:09:00.138488Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:00.139433Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:00.139689Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:00.139764Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:00.139890Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:00.139949Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:00.139996Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:00.140038Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:00.140074Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:00.140394Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:00.140438Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:00.140476Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:00.140515Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:00.140576Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:09:00.140611Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:00.140651Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:09:00.140693Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:09:00.140720Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:00.142357Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:09:00.142416Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:09:00.154106Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:09:00.154203Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:00.154263Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:00.154344Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... ds on nodes: node 2: [72075186224037888] 2025-06-24T21:09:10.919375Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:10.919461Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: true, 1 scan tasks on 1 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-24T21:09:10.919783Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [2:1182:2899] 2025-06-24T21:09:10.919868Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [2:1182:2899], channels: 1 2025-06-24T21:09:10.919931Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1178:2899] TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:09:10.919994Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1178:2899] TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1182:2899], 2025-06-24T21:09:10.920053Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1178:2899] TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1182:2899], 2025-06-24T21:09:10.920099Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [2:1178:2899] TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-24T21:09:10.920848Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1178:2899] TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1182:2899], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-24T21:09:10.920913Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1178:2899] TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1182:2899], 2025-06-24T21:09:10.920966Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1178:2899] TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1182:2899], 2025-06-24T21:09:10.921429Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:1184:2899], Recipient [2:1118:2865]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false KeysSize: 1 2025-06-24T21:09:10.921547Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:09:10.921604Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v4048/281474976715667 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2000/18446744073709551615 ImmediateWriteEdgeReplied# v4048/18446744073709551615 2025-06-24T21:09:10.921648Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v5000/18446744073709551615 2025-06-24T21:09:10.921710Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2025-06-24T21:09:10.921796Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:09:10.921836Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:09:10.921872Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:10.921910Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:09:10.921953Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2025-06-24T21:09:10.921993Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:09:10.922015Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:10.922035Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:09:10.922057Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:09:10.922174Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-24T21:09:10.922374Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:1184:2899], 0} after executionsCount# 1 2025-06-24T21:09:10.922441Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:1184:2899], 0} sends rowCount# 1, bytes# 32, quota rows left# 32766, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:09:10.922517Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:1184:2899], 0} finished in read 2025-06-24T21:09:10.922580Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:09:10.922605Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:09:10.922628Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:09:10.922652Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:09:10.922692Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:09:10.922712Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:09:10.922740Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037888 has finished 2025-06-24T21:09:10.922780Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:09:10.923451Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:1184:2899], Recipient [2:1118:2865]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:09:10.923517Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-24T21:09:10.924151Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1178:2899] TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1182:2899], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 899 Tasks { TaskId: 1 CpuTimeUs: 194 FinishTimeMs: 1750799350923 OutputRows: 1 OutputBytes: 5 Tables { TablePath: "/Root/table-1" ReadRows: 1 ReadBytes: 8 AffectedPartitions: 1 } IngressRows: 1 ResultRows: 1 ResultBytes: 5 ComputeCpuTimeUs: 80 BuildCpuTimeUs: 114 HostName: "ghrun-4pjfmvffsq" NodeId: 2 StartTimeMs: 1750799350923 CreateTimeMs: 1750799350920 UpdateTimeMs: 1750799350923 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:10.924280Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1182:2899] 2025-06-24T21:09:10.924448Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1178:2899] TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:10.924515Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1178:2899] TxId: 281474976715670. Ctx: { TraceId: 01jyhwbz15a6ymh8ybc4rnf5y4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI1YjA1NzgtYzNiYjUwYmEtN2Y2OTFmNTktNDlkNzAwOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000899s ReadRows: 1 ReadBytes: 8 ru: 1 rate limiter was not found force flag: 1 { items { uint32_value: 7 } items { uint32_value: 4 } } >> DataShardTxOrder::RandomPoints_DelayData >> DataShardTxOrder::ForceOnlineBetweenOnline_oo8 >> TestKinesisHttpProxy::TestListStreamConsumersWithMaxResults [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::OlapExplicitTcl [GOOD] Test command err: Trying to start YDB, gRPC: 64481, MsgBus: 5778 2025-06-24T21:08:38.703724Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625816082113638:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.703770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032c9/r3tmp/tmpbFWDjJ/pdisk_1.dat 2025-06-24T21:08:39.132218Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625816082113618:2079] 1750799318696578 != 1750799318696581 2025-06-24T21:08:39.149803Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64481, node 1 2025-06-24T21:08:39.202056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.203567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.206656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:39.372574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.372598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.372605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.372721Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.730197Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5778 TClient is connected to server localhost:5778 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.211161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.242664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:42.054638Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625833261983444:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.054639Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625833261983452:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.054737Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.058334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.068021Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625833261983458:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:42.128143Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625833261983509:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:42.500339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:08:42.682298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:42.682585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:42.682861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:42.682982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:42.683097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:42.683487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:42.683643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:42.683759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:42.683874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:08:42.684020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:08:42.684149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519625833261983658:2307];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:08:42.685842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625833261983653:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:42.685882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625833261983653:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:42.686057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625833261983653:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:42.686162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625833261983653:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:42.686265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625833261983653:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:42.686383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625833261983653:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:42.686516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625833261983653:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:42.686622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625833261983653:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:42.686737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625833261983653:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... d=72075186224037896;local_tx_no=40;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-24T21:09:07.474489Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:07.474583Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:07.474643Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:07.474706Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:07.474773Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:07.474830Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=47;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:07.474887Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:07.475038Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981,72075186224037993;receive=72075186224037913; 2025-06-24T21:09:07.475089Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981,72075186224037993;receive=72075186224037913; 2025-06-24T21:09:07.475675Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981,72075186224037993;receive=72075186224037913; 2025-06-24T21:09:07.475785Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=53;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981,72075186224037993;receive=72075186224037913; 2025-06-24T21:09:07.475874Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=54;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981,72075186224037993;receive=72075186224037913; 2025-06-24T21:09:07.475929Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=55;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981,72075186224037993;receive=72075186224037913; 2025-06-24T21:09:07.475981Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=56;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981,72075186224037993;receive=72075186224037913; 2025-06-24T21:09:07.476136Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=58;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-24T21:09:07.476187Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=59;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-24T21:09:07.476241Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=60;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-24T21:09:07.476289Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=61;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-24T21:09:07.476342Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=62;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-24T21:09:07.476423Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=63;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-24T21:09:07.476520Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=64;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-24T21:09:07.476676Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037981; 2025-06-24T21:09:07.476730Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037981; 2025-06-24T21:09:07.476798Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037981; 2025-06-24T21:09:07.476909Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037981; 2025-06-24T21:09:07.476965Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037981; 2025-06-24T21:09:07.477033Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037981; 2025-06-24T21:09:07.477099Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519625896909288276:2307];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037896;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037981; 2025-06-24T21:09:07.477444Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T21:09:07.478954Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T21:09:08.825022Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037991;tx_state=TTxProgressTx::Execute;tx_current=281474976710667;tx_id=281474976710667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710667; 2025-06-24T21:09:09.221612Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:09:09.221644Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:09.412501Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=Y2FjNjRkYTAtZTZhZTZiNjUtZjQzY2U2NzUtY2I5ODU0YjQ=, ActorId: [2:7519625939858970450:3851], ActorState: ReadyState, TraceId: 01jyhwbxmrfmt4em8765ekb291, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite [GOOD] Test command err: 2025-06-24T21:09:01.483514Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:01.483996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:01.484206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002db7/r3tmp/tmpIyhuka/pdisk_1.dat 2025-06-24T21:09:01.826261Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:01.829512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:01.873399Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:01.877050Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799338365368 != 1750799338365372 2025-06-24T21:09:01.925617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:01.925778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:01.937699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:02.022626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:02.069272Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:02.070702Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:02.071320Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:09:02.071644Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:02.124806Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:02.125704Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:02.125886Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:02.127967Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:02.128069Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:02.128134Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:02.128582Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:02.128764Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:02.128883Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:09:02.139808Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:02.175633Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:02.175880Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:02.176030Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:09:02.176071Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:02.176113Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:02.176153Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:02.176394Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:02.176449Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:02.176877Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:02.177031Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:02.177200Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:02.177244Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:02.177298Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:02.177337Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:02.177376Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:02.177411Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:02.177464Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:02.177637Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:02.177679Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:02.177725Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:09:02.178241Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:09:02.178294Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:02.178400Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:02.178703Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:02.178771Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:02.178892Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:02.178947Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:02.178992Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:02.179034Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:02.179078Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:02.179471Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:02.179517Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:02.179559Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:02.179599Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:02.179664Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:09:02.179699Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:02.179740Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:09:02.179775Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:09:02.179805Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:02.181445Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:09:02.181505Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:09:02.192529Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:09:02.192622Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:02.192663Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:02.192742Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 1750799351297 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:11.300209Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1028:2821] 2025-06-24T21:09:11.300284Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1033:2826], CA [2:1030:2823], CA [2:1031:2824], CA [2:1032:2825], CA [2:1029:2822], 2025-06-24T21:09:11.300332Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 5 compute actor(s) and 0 datashard(s): CA [2:1033:2826], CA [2:1030:2823], CA [2:1031:2824], CA [2:1032:2825], CA [2:1029:2822], 2025-06-24T21:09:11.300488Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1029:2822], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 592 DurationUs: 1000 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 304 FinishTimeMs: 1750799351297 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 247 BuildCpuTimeUs: 57 HostName: "ghrun-4pjfmvffsq" NodeId: 2 StartTimeMs: 1750799351296 CreateTimeMs: 1750799351289 UpdateTimeMs: 1750799351297 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:11.300559Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1029:2822] 2025-06-24T21:09:11.300615Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1033:2826], CA [2:1030:2823], CA [2:1031:2824], CA [2:1032:2825], 2025-06-24T21:09:11.300660Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 4 compute actor(s) and 0 datashard(s): CA [2:1033:2826], CA [2:1030:2823], CA [2:1031:2824], CA [2:1032:2825], 2025-06-24T21:09:11.301051Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1030:2823], task: 4, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 434 DurationUs: 2000 Tasks { TaskId: 4 StageId: 3 CpuTimeUs: 203 FinishTimeMs: 1750799351299 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 165 BuildCpuTimeUs: 38 HostName: "ghrun-4pjfmvffsq" NodeId: 2 StartTimeMs: 1750799351297 CreateTimeMs: 1750799351289 UpdateTimeMs: 1750799351299 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:11.301150Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1030:2823] 2025-06-24T21:09:11.301209Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1033:2826], CA [2:1031:2824], CA [2:1032:2825], 2025-06-24T21:09:11.301256Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 3 compute actor(s) and 0 datashard(s): CA [2:1033:2826], CA [2:1031:2824], CA [2:1032:2825], 2025-06-24T21:09:11.301596Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1031:2824], task: 5, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 699 DurationUs: 1000 Tasks { TaskId: 5 StageId: 4 CpuTimeUs: 421 FinishTimeMs: 1750799351299 InputRows: 2 InputBytes: 10 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 362 BuildCpuTimeUs: 59 HostName: "ghrun-4pjfmvffsq" NodeId: 2 StartTimeMs: 1750799351298 CreateTimeMs: 1750799351289 UpdateTimeMs: 1750799351299 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:11.301675Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1031:2824] 2025-06-24T21:09:11.301721Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1033:2826], CA [2:1032:2825], 2025-06-24T21:09:11.301783Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [2:1033:2826], CA [2:1032:2825], 2025-06-24T21:09:11.302038Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1032:2825], task: 6, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 489 Tasks { TaskId: 6 StageId: 5 CpuTimeUs: 218 FinishTimeMs: 1750799351301 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 144 BuildCpuTimeUs: 74 HostName: "ghrun-4pjfmvffsq" NodeId: 2 CreateTimeMs: 1750799351289 UpdateTimeMs: 1750799351301 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:11.302129Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1032:2825] 2025-06-24T21:09:11.302179Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1033:2826], 2025-06-24T21:09:11.302240Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1033:2826], 2025-06-24T21:09:11.302545Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1033:2826], task: 7, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 447 DurationUs: 2000 Tasks { TaskId: 7 StageId: 6 CpuTimeUs: 190 FinishTimeMs: 1750799351302 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ResultRows: 2 ResultBytes: 7 ComputeCpuTimeUs: 148 BuildCpuTimeUs: 42 HostName: "ghrun-4pjfmvffsq" NodeId: 2 StartTimeMs: 1750799351300 CreateTimeMs: 1750799351289 UpdateTimeMs: 1750799351302 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:11.302618Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1033:2826] 2025-06-24T21:09:11.302832Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:11.302916Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1021:2803] TxId: 281474976715667. Ctx: { TraceId: 01jyhwbz1b1aers5dmzzedymw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI1OGZjY2MtZDhhMmU5ZjItOGZjN2FkYy03ZTMwMjk3OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.004390s ReadRows: 2 ReadBytes: 16 ru: 2 rate limiter was not found force flag: 1 { items { uint32_value: 3 } items { uint32_value: 2 } }, { items { uint32_value: 4 } items { uint32_value: 2 } } >> DataShardTxOrder::DelayData [GOOD] >> TestKinesisHttpProxy::TestUnauthorizedPutRecords [GOOD] >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty [GOOD] >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumer [GOOD] >> KqpSinkLocks::VisibleUncommittedRowsUpdate [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1-Nullable+UseSimilarity [GOOD] >> TGroupMapperTest::MonteCarlo >> TestKinesisHttpProxy::TestListStreamConsumersWithToken >> KqpNewEngine::ContainerRegistryCombiner >> TestKinesisHttpProxy::ListShardsEmptyFields [GOOD] >> TestKinesisHttpProxy::TestWrongStream >> DataShardTxOrder::RandomPoints_ReproducerDelayRS1 [GOOD] >> KqpNotNullColumns::CreateTableWithDisabledNotNullDataColumns [GOOD] >> KqpSinkMvcc::OlapNamedStatement [GOOD] >> KqpNotNullColumns::InsertNotNull >> KqpSinkMvcc::OlapMultiSinks >> DataShardOutOfOrder::UncommittedReads [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty [GOOD] Test command err: 2025-06-24T21:09:07.160552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:07.160611Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:07.163980Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:07.182385Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:07.182976Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:07.183335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:07.241269Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:07.254297Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:07.254421Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:07.256445Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:07.256526Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:07.256583Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:07.256998Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:07.257113Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:07.257190Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:07.319756Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:07.358066Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:07.358322Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:07.358433Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:07.358482Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:07.358518Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:07.358555Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:07.358771Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:07.358821Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:07.359100Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:07.359349Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:07.359426Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:07.359469Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:07.359520Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:07.359583Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:07.359624Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:07.359659Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:07.359699Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:07.359798Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:07.359835Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:07.359891Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:07.362966Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\001J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:07.363046Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:07.363126Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:07.363339Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:07.363396Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:07.363467Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:07.363529Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:07.363584Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:07.363664Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:07.363697Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:07.363988Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:07.364026Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:07.364060Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:07.364108Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:07.364170Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:07.364207Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:07.364243Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:07.364283Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:07.364311Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:07.383821Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:07.383944Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:07.383990Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:07.384056Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:07.384157Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:07.384741Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:07.384807Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:07.384862Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:07.385005Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:07.385041Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:07.385208Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:07.385272Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:07.385325Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:07.385389Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:07.389547Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:07.389628Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:07.389873Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:07.389919Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:07.389984Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:07.390045Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:07.390082Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:07.390144Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:07.390203Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 9:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-24T21:09:12.988496Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.988519Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-24T21:09:12.988602Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-24T21:09:12.988627Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.988655Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-24T21:09:12.988733Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-24T21:09:12.988780Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.988809Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-24T21:09:12.988879Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-24T21:09:12.988908Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.988929Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-24T21:09:12.989027Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-24T21:09:12.989068Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.989100Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-24T21:09:12.989186Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:09:12.989212Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.989234Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 2025-06-24T21:09:12.989296Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-24T21:09:12.989330Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.989352Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-24T21:09:12.989462Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-24T21:09:12.989494Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.989516Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-06-24T21:09:12.989613Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-24T21:09:12.989642Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.989666Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2025-06-24T21:09:12.989739Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-24T21:09:12.989770Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.989788Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-24T21:09:12.989850Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:12.989875Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:149] at 9437184 on unit CompleteOperation 2025-06-24T21:09:12.989915Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 149] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 2 ms 2025-06-24T21:09:12.989968Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-24T21:09:12.990014Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:12.990191Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:09:12.990225Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:09:12.990249Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:09:12.990271Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:12.990303Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:151] at 9437184 on unit CompleteOperation 2025-06-24T21:09:12.990345Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 151] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 2 ms 2025-06-24T21:09:12.990379Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-24T21:09:12.990396Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:12.990502Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:12.990520Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437184 on unit CompleteOperation 2025-06-24T21:09:12.990562Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 2 ms 2025-06-24T21:09:12.990591Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-24T21:09:12.990626Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:12.990705Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:12.990719Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:154] at 9437184 on unit CompleteOperation 2025-06-24T21:09:12.990757Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 154] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 2 ms 2025-06-24T21:09:12.990784Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-24T21:09:12.990803Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:12.990948Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-24T21:09:12.990972Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.991001Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 149 2025-06-24T21:09:12.991128Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-24T21:09:12.991171Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.991208Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 151 2025-06-24T21:09:12.991268Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-24T21:09:12.991322Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.991346Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-06-24T21:09:12.991407Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-24T21:09:12.991428Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:12.991448Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 >> TPart::PageFailEnvColumnGroups [GOOD] >> TPart::ForwardEnvColumnGroups ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::DelayData [GOOD] Test command err: 2025-06-24T21:08:59.945000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:59.945063Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:59.947840Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:08:59.968083Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:08:59.968558Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:08:59.968836Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:00.015993Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:00.024067Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:00.024326Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:00.026115Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:00.026200Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:00.026245Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:00.026622Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:00.026738Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:00.026812Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:00.084324Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:00.128895Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:00.129092Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:00.129190Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:00.129224Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:00.129256Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:00.129297Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:00.133380Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:00.133445Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:00.133687Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:00.133788Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:00.133938Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:00.133984Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:00.134030Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:00.134080Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:00.134112Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:00.134140Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:00.134179Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:00.134307Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:00.134342Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:00.134392Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:00.137406Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\002\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:00.137481Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:00.137569Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:00.137738Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:00.137780Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:00.137829Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:00.137868Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:00.137914Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:00.137944Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:00.137974Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:00.138276Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:00.138308Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:00.138338Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:00.138385Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:00.138437Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:00.138472Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:00.138508Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:00.138542Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:00.138564Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:00.151846Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:00.151940Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:00.151978Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:00.152019Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:00.152100Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:00.152646Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:00.152702Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:00.152750Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:00.152909Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:00.152943Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:00.153089Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:00.153147Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:00.153193Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:00.153225Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:00.161783Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:00.161870Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:00.162155Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:00.162206Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:00.162270Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:00.162309Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:00.162357Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:00.162406Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:00.162453Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... 184 to execution unit CompletedOperations 2025-06-24T21:09:12.929529Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:506] at 9437184 on unit CompletedOperations 2025-06-24T21:09:12.929565Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:506] at 9437184 is Executed 2025-06-24T21:09:12.929587Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:506] at 9437184 executing on unit CompletedOperations 2025-06-24T21:09:12.929614Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:506] at 9437184 has finished 2025-06-24T21:09:12.929663Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:12.929699Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:12.929740Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000005:507] in PlanQueue unit at 9437184 2025-06-24T21:09:12.930225Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:236:2227], Recipient [1:236:2227]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.930274Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.930353Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:12.930390Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:12.930425Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:282: Return cached ready operation [1000005:507] at 9437184 2025-06-24T21:09:12.930465Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit PlanQueue 2025-06-24T21:09:12.930494Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-24T21:09:12.930517Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit PlanQueue 2025-06-24T21:09:12.930540Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit LoadTxDetails 2025-06-24T21:09:12.930580Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit LoadTxDetails 2025-06-24T21:09:12.931251Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437184 loaded tx from db 1000005:507 keys extracted: 1 2025-06-24T21:09:12.931311Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-24T21:09:12.931341Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit LoadTxDetails 2025-06-24T21:09:12.931390Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit FinalizeDataTxPlan 2025-06-24T21:09:12.931419Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit FinalizeDataTxPlan 2025-06-24T21:09:12.931455Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-24T21:09:12.931479Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit FinalizeDataTxPlan 2025-06-24T21:09:12.931500Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:12.931521Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit BuildAndWaitDependencies 2025-06-24T21:09:12.931574Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000005:507] is the new logically complete end at 9437184 2025-06-24T21:09:12.931602Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000005:507] is the new logically incomplete end at 9437184 2025-06-24T21:09:12.931658Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000005:507] at 9437184 2025-06-24T21:09:12.931711Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-24T21:09:12.931734Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:12.931757Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit BuildDataTxOutRS 2025-06-24T21:09:12.931780Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit BuildDataTxOutRS 2025-06-24T21:09:12.931824Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-24T21:09:12.931846Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit BuildDataTxOutRS 2025-06-24T21:09:12.931868Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit StoreAndSendOutRS 2025-06-24T21:09:12.931887Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit StoreAndSendOutRS 2025-06-24T21:09:12.931910Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-24T21:09:12.931939Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit StoreAndSendOutRS 2025-06-24T21:09:12.931972Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit PrepareDataTxInRS 2025-06-24T21:09:12.932009Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit PrepareDataTxInRS 2025-06-24T21:09:12.932039Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-24T21:09:12.932059Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit PrepareDataTxInRS 2025-06-24T21:09:12.932079Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit LoadAndWaitInRS 2025-06-24T21:09:12.932099Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit LoadAndWaitInRS 2025-06-24T21:09:12.932120Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-24T21:09:12.932141Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit LoadAndWaitInRS 2025-06-24T21:09:12.932161Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit ExecuteDataTx 2025-06-24T21:09:12.932181Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit ExecuteDataTx 2025-06-24T21:09:12.932469Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000005:507] at tablet 9437184 with status COMPLETE 2025-06-24T21:09:12.932528Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000005:507] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 11, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:09:12.932588Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:12.932621Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit ExecuteDataTx 2025-06-24T21:09:12.932678Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit CompleteOperation 2025-06-24T21:09:12.932715Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit CompleteOperation 2025-06-24T21:09:12.932919Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is DelayComplete 2025-06-24T21:09:12.932947Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit CompleteOperation 2025-06-24T21:09:12.932973Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit CompletedOperations 2025-06-24T21:09:12.932998Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit CompletedOperations 2025-06-24T21:09:12.933032Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-24T21:09:12.933066Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit CompletedOperations 2025-06-24T21:09:12.933097Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:507] at 9437184 has finished 2025-06-24T21:09:12.933124Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:12.933152Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:12.933192Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:12.933228Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:12.949633Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000005 txid# 506 txid# 507} 2025-06-24T21:09:12.949720Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000005} 2025-06-24T21:09:12.949789Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:12.949838Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:506] at 9437184 on unit CompleteOperation 2025-06-24T21:09:12.949905Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 506] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:09:12.949965Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:12.950188Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:12.950230Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:507] at 9437184 on unit CompleteOperation 2025-06-24T21:09:12.950270Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 507] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:09:12.950312Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumerWithFlag >> KqpSinkLocks::InsertWithBulkUpsert+UseBulkUpsert [GOOD] >> KqpSinkLocks::InsertWithBulkUpsert-UseBulkUpsert >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite2 [GOOD] >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 >> TPart::ForwardEnvColumnGroups [GOOD] >> TPart::Versions [GOOD] >> TPart::ManyVersions >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock-EvWrite >> TPart::ManyVersions [GOOD] >> TPart::ManyDeltas [GOOD] >> TPart::CutKeys_Lz4 [GOOD] >> TPart::CutKeys_Seek [GOOD] >> TPart::CutKeys_SeekPages >> TestKinesisHttpProxy::ListShardsExclusiveStartShardId >> TPart::CutKeys_SeekPages [GOOD] >> TPart::CutKeys_SeekSlices [GOOD] >> TPart::CutKeys_CutString [GOOD] >> TPart::CutKeys_CutUtf8String [GOOD] >> TPartBtreeIndexIteration::NoNodes >> TGroupMapperTest::ReassignGroupTest3dc ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_ReproducerDelayRS1 [GOOD] Test command err: 2025-06-24T21:09:12.602834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:12.602894Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:12.604771Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:12.617459Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:12.617862Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:12.618099Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:12.661460Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:12.676825Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:12.677043Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:12.678527Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:12.678621Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:12.678682Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:12.679031Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:12.679139Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:12.679226Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:12.750486Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:12.791474Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:12.791665Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:12.791777Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:12.791817Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:12.791870Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:12.791915Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:12.792084Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.792135Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.792410Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:12.792495Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:12.792660Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:12.792707Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:12.792785Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:12.792829Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:12.792866Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:12.792907Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:12.792961Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:12.793067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:12.793102Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:12.793372Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:12.796829Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:12.796900Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:12.796975Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:12.797179Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:12.797239Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:12.797298Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:12.797345Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:12.797382Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:12.797419Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:12.797467Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:12.797805Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:12.797851Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:12.797887Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:12.797942Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:12.798001Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:12.798041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:12.798089Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:12.798146Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:12.798173Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:12.810537Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:12.810616Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:12.810661Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:12.810706Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:12.810800Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:12.811394Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:12.811455Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:12.811501Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:12.811646Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:12.811687Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:12.811819Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:12.811867Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:12.811917Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:12.811967Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:12.820966Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:12.821063Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:12.821289Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.821338Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.821401Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:12.821445Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:12.821481Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:12.821547Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:12.821595Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 09:13.906223Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.906352Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:09:13.906374Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:09:13.906392Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:09:13.906412Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:13.906455Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:7] at 9437184 on unit CompleteOperation 2025-06-24T21:09:13.906488Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 7] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:13.906522Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 7 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-24T21:09:13.906563Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.906669Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:13.906691Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:8] at 9437184 on unit CompleteOperation 2025-06-24T21:09:13.906736Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 8] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:13.906782Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 8 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-24T21:09:13.906807Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.906967Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:13.907003Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:9] at 9437184 on unit CompleteOperation 2025-06-24T21:09:13.907046Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 9] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:13.907083Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 9 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-24T21:09:13.907114Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.907296Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:13.907332Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:14] at 9437184 on unit FinishPropose 2025-06-24T21:09:13.907379Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 14 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-24T21:09:13.907461Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.907611Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:13.907638Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:10] at 9437184 on unit CompleteOperation 2025-06-24T21:09:13.907674Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 10] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:13.907720Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 10 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-24T21:09:13.907744Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.907909Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:13.907944Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:12] at 9437184 on unit CompleteOperation 2025-06-24T21:09:13.907986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 12] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:13.908050Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 12 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-24T21:09:13.908075Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.908224Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:13.908271Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:13] at 9437184 on unit CompleteOperation 2025-06-24T21:09:13.908310Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 13] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:09:13.908337Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.908438Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:09:13.908464Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:13.908488Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:5] at 9437184 on unit CompleteOperation 2025-06-24T21:09:13.908531Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 5] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:13.908577Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-24T21:09:13.908602Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.908837Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000004 txid# 4 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-24T21:09:13.908895Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:13.908942Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 4 2025-06-24T21:09:13.909040Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000004 txid# 6 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-24T21:09:13.909068Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:13.909100Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 6 2025-06-24T21:09:13.909173Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000004 txid# 7 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-24T21:09:13.909208Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:13.909233Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 7 2025-06-24T21:09:13.909289Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000004 txid# 8 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-24T21:09:13.909330Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:13.909357Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 8 2025-06-24T21:09:13.909421Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000004 txid# 9 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-24T21:09:13.909444Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:13.909470Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 9 2025-06-24T21:09:13.909559Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000004 txid# 10 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-24T21:09:13.909587Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:13.909609Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 10 2025-06-24T21:09:13.909652Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000004 txid# 12 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-24T21:09:13.909676Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:13.909702Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 12 2025-06-24T21:09:13.909755Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-24T21:09:13.909788Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:13.909819Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 5 expect 7 2 5 4 - 3 - - - - - - - - - - - - - - - - - - - - - - - - - - actual 7 2 5 4 - 3 - - - - - - - - - - - - - - - - - - - - - - - - - - interm - 2 5 4 - 3 - - - - - - - - - - - - - - - - - - - - - - - - - - ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::VisibleUncommittedRowsUpdate [GOOD] Test command err: Trying to start YDB, gRPC: 62355, MsgBus: 23889 2025-06-24T21:08:49.911885Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625864083739932:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:49.911943Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031d4/r3tmp/tmp2IGFkn/pdisk_1.dat 2025-06-24T21:08:50.416893Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:50.419286Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625864083739916:2079] 1750799329906959 != 1750799329906962 2025-06-24T21:08:50.448102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:50.448248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 62355, node 1 2025-06-24T21:08:50.450176Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:50.559026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:50.559049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:50.559055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:50.559673Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23889 2025-06-24T21:08:50.943454Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23889 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:51.162600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:53.441326Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625881263609728:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.441476Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625881263609745:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.441562Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:53.447637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:53.461557Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625881263609757:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:08:53.541927Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625881263609808:2335] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:53.933383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.082715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:54.933699Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625864083739932:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:54.937502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:55.214344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 27097, MsgBus: 62360 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031d4/r3tmp/tmp4zSvfA/pdisk_1.dat 2025-06-24T21:08:57.957716Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625898539665000:2089];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:58.045816Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:08:58.113774Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:58.113863Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:58.115220Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:58.131846Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27097, node 2 2025-06-24T21:08:58.259752Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:58.259773Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:58.259781Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:58.259909Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62360 TClient is connected to server localhost:62360 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:58.830016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:58.978733Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:01.694266Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625915719534707:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.694390Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.695160Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625915719534742:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.699989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:01.717143Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625915719534744:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:09:01.778119Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519625915719534795:2331] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:01.844856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.897613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:02.990051Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519625898539665000:2089];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:02.992949Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:09:03.032406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 2922, MsgBus: 9369 2025-06-24T21:09:05.833089Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519625932746126785:2154];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:05.899986Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031d4/r3tmp/tmpNX4kxo/pdisk_1.dat 2025-06-24T21:09:06.039305Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519625932746126656:2079] 1750799345795550 != 1750799345795553 2025-06-24T21:09:06.049518Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:06.051272Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:06.051358Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:06.054873Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2922, node 3 2025-06-24T21:09:06.115747Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:06.115772Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:06.115781Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:06.115929Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9369 TClient is connected to server localhost:9369 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:06.713788Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:06.721398Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:09:06.835524Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:09.534536Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625949925996458:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:09.534604Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625949925996450:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:09.534797Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:09.538487Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:09.556716Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625949925996487:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:09:09.618634Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625949925996538:2331] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:09.675306Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:09.747814Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:10.877063Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625932746126785:2154];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:10.880613Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:09:10.923821Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TxOrderInternals::OperationOrder [GOOD] >> TSharedPageCache::ThreeLeveledLRU >> KqpSinkTx::OlapSnapshotROInteractive2 [GOOD] >> TGroupMapperTest::MapperSequentialCalls [GOOD] >> TestKinesisHttpProxy::GoodRequestGetRecordsCbor [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineLevel1-Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 18733, MsgBus: 1609 2025-06-24T21:08:04.230582Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625668469884398:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:04.244619Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045a4/r3tmp/tmpDVxIIF/pdisk_1.dat 2025-06-24T21:08:04.689157Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:04.691965Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625668469884301:2079] 1750799284205794 != 1750799284205797 2025-06-24T21:08:04.705527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:04.705656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:04.740139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18733, node 1 2025-06-24T21:08:04.804912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:04.804932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:04.804938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:04.805040Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1609 2025-06-24T21:08:05.243760Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1609 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:05.403304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:05.434139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:05.578044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:05.739986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:05.809511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:07.600324Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625681354787829:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:07.600445Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:07.933419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:07.970912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:08.003660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:08.040176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:08.078302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:08.148564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:08.184464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:08.269395Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625685649755793:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:08.269472Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:08.269636Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625685649755798:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:08.273417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:08.284170Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625685649755800:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:08.366761Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625685649755851:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:09.224339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625668469884398:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:09.224429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:09.478825Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625689944723422:3600], Recipient [1:7519625668469884619:2143]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:09.478872Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:09.478886Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72 ... E: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037923, followerId 0 2025-06-24T21:09:11.537060Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:36 with partCount# 0, rowCount# 2, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:11.537081Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037923 2025-06-24T21:09:11.537103Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 17 shard idx 72057594046644480:37 data size 896 row count 4 2025-06-24T21:09:11.537137Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037924 maps to shardIdx: 72057594046644480:37 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 17], pathId map=TestTable, is column=0, is olap=0, RowCount 4, DataSize 896 2025-06-24T21:09:11.537155Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037924, followerId 0 2025-06-24T21:09:11.537205Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:37 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:11.537224Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037924 2025-06-24T21:09:11.537283Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:11.537422Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625806116578442:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:11.537447Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:11.537464Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:09:11.727839Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [2:7519625827591417468:2499], Recipient [2:7519625806116578442:2147]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037926 TableLocalId: 19 Generation: 1 Round: 2 TableStats { DataSize: 90 RowCount: 2 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799351061 LastUpdateTime: 1750799321778 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 2 RowDeletes: 0 RowReads: 0 RangeReads: 17 PartCount: 1 RangeReadRows: 34 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 90 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 1072 Memory: 119587 Storage: 207 } ShardState: 2 UserTablePartOwners: 72075186224037926 NodeId: 2 StartTime: 1750799321697 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:11.727884Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:11.727923Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037926 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 19] state 'Ready' dataSize 90 rowCount 2 cpuUsage 0.1072 2025-06-24T21:09:11.728039Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037926 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 19] raw table stats: DataSize: 90 RowCount: 2 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799351061 LastUpdateTime: 1750799321778 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 2 RowDeletes: 0 RowReads: 0 RangeReads: 17 PartCount: 1 RangeReadRows: 34 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 90 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:11.728065Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099994s, queue# 1 2025-06-24T21:09:11.728297Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [2:7519625827591417467:2498], Recipient [2:7519625806116578442:2147]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037925 TableLocalId: 20 Generation: 1 Round: 2 TableStats { DataSize: 250 RowCount: 10 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799351064 LastUpdateTime: 1750799321786 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 29 PartCount: 1 RangeReadRows: 155 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 250 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 1113 Memory: 119581 Storage: 358 } ShardState: 2 UserTablePartOwners: 72075186224037925 NodeId: 2 StartTime: 1750799321696 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:11.728318Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:11.728342Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037925 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] state 'Ready' dataSize 250 rowCount 10 cpuUsage 0.1113 2025-06-24T21:09:11.728450Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037925 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] raw table stats: DataSize: 250 RowCount: 10 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799351064 LastUpdateTime: 1750799321786 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 29 PartCount: 1 RangeReadRows: 155 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 250 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:11.827765Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625806116578442:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:11.827813Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:11.827847Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 2 2025-06-24T21:09:11.827900Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 2 2025-06-24T21:09:11.827921Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000140s, queue# 2 2025-06-24T21:09:11.827985Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 19 shard idx 72057594046644480:38 data size 90 row count 2 2025-06-24T21:09:11.828053Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037926 maps to shardIdx: 72057594046644480:38 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 19], pathId map=indexImplLevelTable, is column=0, is olap=0, RowCount 2, DataSize 90 2025-06-24T21:09:11.828067Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037926, followerId 0 2025-06-24T21:09:11.828134Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:38 with partCount# 1, rowCount# 2, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:11.828191Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037926 2025-06-24T21:09:11.828230Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 20 shard idx 72057594046644480:39 data size 250 row count 10 2025-06-24T21:09:11.828271Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037925 maps to shardIdx: 72057594046644480:39 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 20], pathId map=indexImplPostingTable, is column=0, is olap=0, RowCount 10, DataSize 250 2025-06-24T21:09:11.828281Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037925, followerId 0 2025-06-24T21:09:11.828317Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:39 with partCount# 1, rowCount# 10, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:11.828339Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037925 2025-06-24T21:09:11.828388Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:11.828538Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625806116578442:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:11.828565Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:11.828576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:09:12.361262Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519625806116578442:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:12.361308Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:12.361372Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:7519625806116578442:2147], Recipient [2:7519625806116578442:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:12.361401Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::UncommittedReads [GOOD] Test command err: 2025-06-24T21:09:11.509436Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:11.509894Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:11.510088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002d3e/r3tmp/tmp8iEdL1/pdisk_1.dat 2025-06-24T21:09:11.879875Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:11.884186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:11.929355Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:11.939866Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799348550566 != 1750799348550570 2025-06-24T21:09:11.991294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:11.991467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:12.003452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:12.094438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:12.134169Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:12.135355Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:12.135839Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:09:12.136095Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:12.186482Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:12.187262Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:12.187432Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:12.189703Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:12.189809Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:12.189861Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:12.190242Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:12.190397Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:12.190506Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:09:12.191169Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:12.227164Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:12.227320Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:12.227401Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:09:12.227479Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:12.227548Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:12.227589Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:12.227809Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.227847Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.228127Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:12.228197Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:12.228236Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:12.228262Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:12.228293Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:12.228321Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:12.228348Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:12.228370Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:12.228420Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:12.228512Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:12.228546Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:12.228585Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:09:12.228912Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:09:12.228946Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:12.229017Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:12.229218Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:12.229275Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:12.229377Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:12.229460Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:12.229509Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:12.229548Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:12.229585Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:12.229891Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:12.229957Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:12.229992Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:12.230025Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:12.230082Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:09:12.230127Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:12.230173Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:09:12.230221Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:09:12.230251Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:12.231177Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:09:12.231228Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:12.231261Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:12.231320Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-24T21:09:12.231398Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:12.234011Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:09:12.234066Z ... arsing write transaction for 0 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxMode: MODE_IMMEDIATE 2025-06-24T21:09:13.655047Z node 1 :TX_DATASHARD TRACE: datashard_write_operation.cpp:213: Table /Root/table-1, shard: 72075186224037888, write point (Uint32 : 4) 2025-06-24T21:09:13.655087Z node 1 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint32 : 4) table: [72057594046644480:2:1] 2025-06-24T21:09:13.655193Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-06-24T21:09:13.655241Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:09:13.655268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-06-24T21:09:13.655293Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:13.655317Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:09:13.655351Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2025-06-24T21:09:13.655402Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-24T21:09:13.655440Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:09:13.655459Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:13.655473Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-06-24T21:09:13.655497Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-06-24T21:09:13.655526Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-24T21:09:13.655569Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2025-06-24T21:09:13.655661Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:6] at 72075186224037888, row count=1 2025-06-24T21:09:13.655703Z node 1 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-24T21:09:13.655762Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:13.655780Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-06-24T21:09:13.655803Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-24T21:09:13.655825Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:09:13.655850Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-06-24T21:09:13.655871Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-24T21:09:13.655905Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:09:13.655938Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:09:13.655970Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:09:13.655984Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:09:13.656000Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished ... blocked commit for tablet 72075186224037888 2025-06-24T21:09:13.758099Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhwc1sr3tbv7w4h5fgpknhq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTFlMjU3NjUtYTA5NTAwZWMtYzQzZmM2YTMtN2E1NDU1YTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:13.759455Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [1:928:2722], Recipient [1:625:2529]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-24T21:09:13.759620Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:09:13.759694Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2025-06-24T21:09:13.759723Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v2500/18446744073709551615 2025-06-24T21:09:13.759768Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-24T21:09:13.759828Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:09:13.759858Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:09:13.759881Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:13.759920Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:09:13.759955Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-24T21:09:13.759992Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:09:13.760018Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:13.760041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:09:13.760053Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:09:13.760117Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-24T21:09:13.760367Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is DelayComplete 2025-06-24T21:09:13.760390Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:09:13.760416Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:09:13.760436Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:09:13.760468Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:09:13.760481Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:09:13.760525Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-24T21:09:13.760559Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:09:13.844447Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [1:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 1 LatestStep: 3000 2025-06-24T21:09:13.844592Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [1:24:2071] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 3000} 2025-06-24T21:09:13.981641Z node 1 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-24T21:09:13.981730Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:09:13.981816Z node 1 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 1000 ms, status: STATUS_COMPLETED 2025-06-24T21:09:13.981927Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:13.982051Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:09:13.982102Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:09:13.982171Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[1:928:2722], 0} after executionsCount# 1 2025-06-24T21:09:13.982227Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[1:928:2722], 0} sends rowCount# 4, bytes# 128, quota rows left# 997, quota bytes left# 5242752, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:09:13.982376Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[1:928:2722], 0} finished in read 2025-06-24T21:09:13.985120Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [1:928:2722], Recipient [1:625:2529]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:09:13.985224Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 3 } }, { items { uint32_value: 4 } items { uint32_value: 4 } } >> KqpLocks::MixedTxFail+useSink [GOOD] >> TPartBtreeIndexIteration::NoNodes [GOOD] >> TPartBtreeIndexIteration::NoNodes_Groups |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> TxOrderInternals::OperationOrder [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MapperSequentialCalls [GOOD] >> TestKinesisHttpProxy::GoodRequestGetRecordsLongStreamName >> DataShardOutOfOrder::TestPlannedTimeoutSplit >> DataShardTxOrder::ReadWriteReorder >> TSharedPageCache::ThreeLeveledLRU [GOOD] >> TSharedPageCache::S3FIFO |94.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |94.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TestYmqHttpProxy::TestListQueues [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpLocks::MixedTxFail+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 23054, MsgBus: 13525 2025-06-24T21:08:53.063513Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625879807336808:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:53.063574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031b5/r3tmp/tmpFARD3v/pdisk_1.dat 2025-06-24T21:08:53.566528Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:53.566784Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:53.566905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:53.571269Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625879807336785:2079] 1750799333060768 != 1750799333060771 2025-06-24T21:08:53.583435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23054, node 1 2025-06-24T21:08:53.654083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:53.654111Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:53.654122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:53.654237Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13525 2025-06-24T21:08:54.108354Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13525 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:54.373758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:54.387864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:54.409849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:54.578560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:54.747423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:54.841051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:56.567894Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625892692240326:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:56.567992Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:56.885307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:56.918801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:56.949488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:57.014955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:57.039619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:57.079109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:57.148615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:57.240101Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625896987208291:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:57.240184Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625896987208296:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:57.240219Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:57.244221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:57.254566Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625896987208298:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:57.354260Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625896987208349:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:58.063599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625879807336808:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:58.063657Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:58.972093Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519625901282175967:2473], SessionActorId: [1:7519625901282175914:2473], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 . sessionActorId=[1:7519625901282175914:2473]. isRollback=0 2025- ... d=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.706079Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.706622Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.709692Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.710185Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.711114Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.711828Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.713803Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.714372Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037917;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.716828Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.717302Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.718655Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037917;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.719069Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.721658Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.722184Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.723124Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.723556Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.726788Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.727287Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.727396Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.727756Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.731614Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.731906Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.732280Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.732590Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:09:13.736022Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.736406Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:09:13.962168Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:13.962701Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:13.963283Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:13.963825Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;self_id=[3:7519625957125296770:2345];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037907;local_tx_no=13;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037899,72075186224037947;receive=72075186224037903; 2025-06-24T21:09:13.963917Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;self_id=[3:7519625957125296770:2345];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037907;local_tx_no=14;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037899,72075186224037947;receive=72075186224037903; 2025-06-24T21:09:13.964093Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;self_id=[3:7519625957125296770:2345];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037907;local_tx_no=16;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037947;receive=72075186224037899; 2025-06-24T21:09:13.964157Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;self_id=[3:7519625957125296770:2345];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037907;local_tx_no=17;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037947;receive=72075186224037899; 2025-06-24T21:09:13.964874Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:14.296870Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715668; 2025-06-24T21:09:14.298547Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [3:7519625970010200903:2790], SessionActorId: [3:7519625965715233552:2790], Got LOCKS BROKEN for table. ShardID=72075186224037888, Sink=[3:7519625970010200903:2790].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T21:09:14.298660Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519625970010200903:2790], SessionActorId: [3:7519625965715233552:2790], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/DataShard`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:7519625965715233552:2790]. isRollback=0 2025-06-24T21:09:14.298828Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=Yzk0ZGQ2MzUtNTQxNWQ5NjEtZTY1YzBiNmMtOTNiMmZkZjU=, ActorId: [3:7519625965715233552:2790], ActorState: ExecuteState, TraceId: 01jyhwc2aqa9g1hga2vawza05e, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:7519625970010200929:2790] from: [3:7519625970010200903:2790] 2025-06-24T21:09:14.298892Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519625970010200929:2790] TxId: 281474976715668. Ctx: { TraceId: 01jyhwc2aqa9g1hga2vawza05e, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Yzk0ZGQ2MzUtNTQxNWQ5NjEtZTY1YzBiNmMtOTNiMmZkZjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/DataShard`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T21:09:14.298976Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037936;self_id=[3:7519625957125296747:2340];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037936;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:14.299040Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=Yzk0ZGQ2MzUtNTQxNWQ5NjEtZTY1YzBiNmMtOTNiMmZkZjU=, ActorId: [3:7519625965715233552:2790], ActorState: ExecuteState, TraceId: 01jyhwc2aqa9g1hga2vawza05e, Create QueryResponse for error on request, msg: 2025-06-24T21:09:14.300795Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037936;tx_state=TTxProgressTx::Execute;tx_current=281474976715668;tx_id=281474976715668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715668; 2025-06-24T21:09:14.301984Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037936;tx_state=TTxProgressTx::Complete;fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=lock invalidated;tx_id=281474976715668; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::OlapSnapshotROInteractive2 [GOOD] Test command err: Trying to start YDB, gRPC: 23973, MsgBus: 22473 2025-06-24T21:08:42.179371Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625830674464087:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:42.179437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003210/r3tmp/tmpkI0k3j/pdisk_1.dat 2025-06-24T21:08:42.535237Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625830674464067:2079] 1750799322174702 != 1750799322174705 2025-06-24T21:08:42.563794Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23973, node 1 2025-06-24T21:08:42.641125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:42.641221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:42.643935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:42.729285Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:42.729305Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:42.729312Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:42.729418Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22473 TClient is connected to server localhost:22473 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:08:43.212424Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:43.318322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:43.336271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:45.337238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625843559366590:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:45.337309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625843559366596:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:45.337369Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:45.346494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:45.360585Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625843559366612:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:45.428768Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625843559366663:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:45.773955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:08:45.972670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:45.972849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:45.973087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:45.973193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:45.973282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:45.973413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:45.973513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:45.973646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:45.973748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:08:45.973857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:08:45.973956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625843559366843:2308];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:08:45.975998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625843559366837:2304];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:45.976111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625843559366837:2304];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:45.976299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625843559366837:2304];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:45.976388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625843559366837:2304];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:45.976508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625843559366837:2304];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:45.976619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625843559366837:2304];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:45.976723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625843559366837:2304];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:45.976818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625843559366837:2304];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:45.976902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625843559366837:2304];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=23;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037981; 2025-06-24T21:09:11.580108Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=24;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037981; 2025-06-24T21:09:11.580253Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=26;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037981; 2025-06-24T21:09:11.580309Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=27;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037981; 2025-06-24T21:09:11.580369Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=28;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037897; 2025-06-24T21:09:11.580429Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=29;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037897; 2025-06-24T21:09:11.580496Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=30;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037897; 2025-06-24T21:09:11.580600Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=31;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037897; 2025-06-24T21:09:11.580644Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T21:09:11.580777Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=33;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037913,72075186224037970;receive=72075186224037993; 2025-06-24T21:09:11.580839Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=34;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037913,72075186224037970;receive=72075186224037993; 2025-06-24T21:09:11.581027Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=36;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037913,72075186224037970;receive=72075186224037993; 2025-06-24T21:09:11.581095Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=37;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037913,72075186224037970;receive=72075186224037892; 2025-06-24T21:09:11.581157Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=38;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037913,72075186224037970;receive=72075186224037892; 2025-06-24T21:09:11.581222Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=39;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037913,72075186224037970;receive=72075186224037993; 2025-06-24T21:09:11.581287Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=40;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037913,72075186224037970;receive=72075186224037892; 2025-06-24T21:09:11.581353Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=41;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037913,72075186224037970;receive=72075186224037892; 2025-06-24T21:09:11.581417Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T21:09:11.581623Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037970;receive=72075186224037913; 2025-06-24T21:09:11.581685Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037970;receive=72075186224037913; 2025-06-24T21:09:11.581748Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037970;receive=72075186224037913; 2025-06-24T21:09:11.581817Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037970;receive=72075186224037913; 2025-06-24T21:09:11.581957Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896;receive=72075186224037970; 2025-06-24T21:09:11.582034Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=49;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896;receive=72075186224037970; 2025-06-24T21:09:11.582101Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896;receive=72075186224037970; 2025-06-24T21:09:11.582178Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896;receive=72075186224037970; 2025-06-24T21:09:11.582223Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037970;local_tx_no=15;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-24T21:09:11.582236Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519625917074446772:2387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896;receive=72075186224037969; 2025-06-24T21:09:11.582748Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T21:09:11.583213Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;local_tx_no=15;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-24T21:09:11.583672Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;local_tx_no=15;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-24T21:09:11.583870Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037913;local_tx_no=15;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-24T21:09:11.584335Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;local_tx_no=15;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-24T21:09:12.699272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:09:12.699303Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:13.074211Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976710667;tx_id=281474976710667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710667; |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TPartBtreeIndexIteration::NoNodes_Groups [GOOD] >> TPartBtreeIndexIteration::NoNodes_History >> TestYmqHttpProxy::TestPurgeQueue >> TSharedPageCache::S3FIFO [GOOD] >> TSharedPageCache::ReplacementPolicySwitch >> KqpAgg::AggWithLookup >> TSharedPageCache::ReplacementPolicySwitch [GOOD] >> TSharedPageCache::MiddleCache_FlatIndex >> TMultiversionObjectMap::MonteCarlo >> DataShardTxOrder::ReadWriteReorder [GOOD] >> KqpSort::ReverseLimitOptimized [GOOD] >> KqpSort::ReverseEightShardOptimized >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop-UseSink [GOOD] >> TSharedPageCache::MiddleCache_FlatIndex [GOOD] >> TSharedPageCache::ZeroCache_BTreeIndex >> KqpSinkLocks::DifferentKeyUpdateOlap [GOOD] >> TGroupMapperTest::Mirror3dc3Nodes [GOOD] >> KqpNotNullColumns::InsertNotNull [GOOD] >> DataShardOutOfOrder::TestImmediateQueueThenSplit+UseSink >> KqpNotNullColumns::InsertFromSelect >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed+EvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ReadWriteReorder [GOOD] Test command err: 2025-06-24T21:09:16.764119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:16.764219Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:16.766506Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:16.782935Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:16.783419Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:16.783687Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:16.823800Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:16.833359Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:16.833613Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:16.835068Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:16.835120Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:16.835187Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:16.835487Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:16.835555Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:16.835603Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:16.888695Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:16.937354Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:16.937559Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:16.937658Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:16.937704Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:16.937755Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:16.937810Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:16.937986Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:16.938039Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:16.938338Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:16.938457Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:16.938642Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:16.938690Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:16.938742Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:16.938786Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:16.938825Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:16.938859Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:16.938902Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:16.939026Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:16.939066Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:16.939120Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:16.942715Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\n\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:16.942799Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:16.942890Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:16.943069Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:16.943122Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:16.943205Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:16.943258Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:16.943297Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:16.943334Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:16.943368Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:16.943735Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:16.943772Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:16.943809Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:16.943863Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:16.943919Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:16.943969Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:16.944018Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:16.944055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:16.944085Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:16.956487Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:16.956574Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:16.956622Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:16.956673Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:16.956756Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:16.957294Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:16.957349Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:16.957405Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:16.957538Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:16.957592Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:16.957728Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:16.957777Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:16.957820Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:16.957858Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:16.961956Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:16.962024Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:16.962253Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:16.962324Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:16.962387Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:16.962430Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:16.962467Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:16.962511Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:16.962565Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000001: ... 7185 executing on unit CompletedOperations 2025-06-24T21:09:17.832220Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:12] at 9437185 has finished 2025-06-24T21:09:17.832263Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:17.832296Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-24T21:09:17.832333Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-24T21:09:17.832368Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-24T21:09:17.832535Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:236:2227], Recipient [1:236:2227]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:17.832564Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:17.832605Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:17.832629Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:17.832650Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:17.832675Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000005:12] in PlanQueue unit at 9437184 2025-06-24T21:09:17.832694Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit PlanQueue 2025-06-24T21:09:17.832715Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-24T21:09:17.832752Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit PlanQueue 2025-06-24T21:09:17.832771Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit LoadTxDetails 2025-06-24T21:09:17.832788Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit LoadTxDetails 2025-06-24T21:09:17.833320Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437184 loaded tx from db 1000005:12 keys extracted: 3 2025-06-24T21:09:17.833351Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-24T21:09:17.833369Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit LoadTxDetails 2025-06-24T21:09:17.833385Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit FinalizeDataTxPlan 2025-06-24T21:09:17.833415Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit FinalizeDataTxPlan 2025-06-24T21:09:17.833441Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-24T21:09:17.833464Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit FinalizeDataTxPlan 2025-06-24T21:09:17.833501Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:17.833518Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit BuildAndWaitDependencies 2025-06-24T21:09:17.833565Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000005:12] is the new logically complete end at 9437184 2025-06-24T21:09:17.833587Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000005:12] is the new logically incomplete end at 9437184 2025-06-24T21:09:17.833616Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000005:12] at 9437184 2025-06-24T21:09:17.833642Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-24T21:09:17.833658Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:17.833671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit BuildDataTxOutRS 2025-06-24T21:09:17.833704Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit BuildDataTxOutRS 2025-06-24T21:09:17.833745Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-24T21:09:17.833760Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit BuildDataTxOutRS 2025-06-24T21:09:17.833773Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit StoreAndSendOutRS 2025-06-24T21:09:17.833787Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit StoreAndSendOutRS 2025-06-24T21:09:17.833800Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-24T21:09:17.833814Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit StoreAndSendOutRS 2025-06-24T21:09:17.833852Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit PrepareDataTxInRS 2025-06-24T21:09:17.833885Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit PrepareDataTxInRS 2025-06-24T21:09:17.833927Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-24T21:09:17.833954Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit PrepareDataTxInRS 2025-06-24T21:09:17.833991Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit LoadAndWaitInRS 2025-06-24T21:09:17.834029Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit LoadAndWaitInRS 2025-06-24T21:09:17.834059Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-24T21:09:17.834084Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit LoadAndWaitInRS 2025-06-24T21:09:17.834114Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit ExecuteDataTx 2025-06-24T21:09:17.834152Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit ExecuteDataTx 2025-06-24T21:09:17.834686Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000005:12] at tablet 9437184 with status COMPLETE 2025-06-24T21:09:17.834732Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000005:12] at 9437184: {NSelectRow: 3, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 3, SelectRowBytes: 24, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:09:17.834817Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-24T21:09:17.834848Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit ExecuteDataTx 2025-06-24T21:09:17.834876Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit CompleteOperation 2025-06-24T21:09:17.834897Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit CompleteOperation 2025-06-24T21:09:17.835033Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is DelayComplete 2025-06-24T21:09:17.835051Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit CompleteOperation 2025-06-24T21:09:17.835072Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit CompletedOperations 2025-06-24T21:09:17.835105Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit CompletedOperations 2025-06-24T21:09:17.835139Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-24T21:09:17.835176Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit CompletedOperations 2025-06-24T21:09:17.835196Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:12] at 9437184 has finished 2025-06-24T21:09:17.835217Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:17.835237Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:17.835270Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:17.835303Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:17.852364Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 1000005 txid# 12} 2025-06-24T21:09:17.852454Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 1000005} 2025-06-24T21:09:17.852530Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-24T21:09:17.852576Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:12] at 9437185 on unit CompleteOperation 2025-06-24T21:09:17.852647Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 12] from 9437185 at tablet 9437185 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:09:17.852702Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-24T21:09:17.855571Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000005 txid# 12} 2025-06-24T21:09:17.855636Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000005} 2025-06-24T21:09:17.855696Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:17.855732Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:12] at 9437184 on unit CompleteOperation 2025-06-24T21:09:17.855801Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 12] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:17.855861Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> DataShardOutOfOrder::UncommittedReadSetAck [GOOD] >> TPartBtreeIndexIteration::NoNodes_History [GOOD] >> TPartBtreeIndexIteration::OneNode >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed+EvWrite >> TSharedPageCache::ZeroCache_BTreeIndex [GOOD] >> TSharedPageCache::ZeroCache_FlatIndex |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Mirror3dc3Nodes [GOOD] >> DataShardTxOrder::ZigZag_oo8_dirty >> TSharedPageCache::ZeroCache_FlatIndex [GOOD] >> TSharedPageCache_Actor::Request_Basics >> TSharedPageCache_Actor::Request_Basics [GOOD] >> TSharedPageCache_Actor::Request_Failed >> TSharedPageCache_Actor::Request_Failed [GOOD] >> TSharedPageCache_Actor::Request_Queue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop-UseSink [GOOD] Test command err: 2025-06-24T21:09:08.513698Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:08.514064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:08.514329Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002d8b/r3tmp/tmp3p6qlc/pdisk_1.dat 2025-06-24T21:09:08.872085Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:08.874510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:08.912959Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:08.922314Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:782: Updated table service config: ComputeActorsCount: 10000 ChannelBufferSize: 8388608 MkqlLightProgramMemoryLimit: 1048576 MkqlHeavyProgramMemoryLimit: 31457280 QueryMemoryLimit: 32212254720 PublishStatisticsIntervalSec: 2 MaxTotalChannelBuffersSize: 2147483648 MinChannelBufferSize: 2048 2025-06-24T21:09:08.922395Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:09:08.922434Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 7 2025-06-24T21:09:08.922529Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:382: Updated table service config. 2025-06-24T21:09:08.922811Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799345350658 != 1750799345350662 2025-06-24T21:09:08.971264Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:09:08.972505Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:09:08.972714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:08.972830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:08.984456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:09.068965Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:09:09.069007Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:09:09.069133Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:601:2509] 2025-06-24T21:09:09.192894Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:601:2509] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value1" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value2" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:09:09.193031Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:601:2509] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:09:09.193637Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:601:2509] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:09:09.193733Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:601:2509] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:09:09.194131Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:601:2509] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:09:09.194313Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:601:2509] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:09:09.194452Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:601:2509] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:09:09.194779Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:601:2509] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:09:09.196552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:09.197695Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:601:2509] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:09:09.197773Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:601:2509] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:09:09.229999Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:617:2524], Recipient [1:625:2530]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:09.231183Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:617:2524], Recipient [1:625:2530]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:09.231621Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2530] 2025-06-24T21:09:09.231915Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:09.281366Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:617:2524], Recipient [1:625:2530]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:09.282030Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:09.282165Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:09.283835Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:09.283919Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:09.283978Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:09.284310Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:09.284446Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:09.284523Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:642:2530] in generation 1 2025-06-24T21:09:09.295439Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:09.336173Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:09.336357Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:09.336467Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:644:2540] 2025-06-24T21:09:09.336505Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:09.336578Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:09.336617Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:09.336845Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2530], Recipient [1:625:2530]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:09.336896Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:09.337223Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:09.337313Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:09.337367Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:09.337421Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:09.337466Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:09.337502Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:09.337546Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:09.337576Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:09.337625Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:09.338047Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:633:2534], Recipient [1:625:2530]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:09.338089Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:09.338149Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:624:2529], serverId# [1:633:2534], sessionId# [0:0:0] 2025-06-24T21:09:09.338219Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:633:2534] 2025-06-24T21:09:09.338258Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:09.338377Z node 1 :TX_DATAS ... node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:841:2671]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/table-1' scheme changed., code: 2028 , tablet id: 72075186224037888, actor_id: [2:626:2530] 2025-06-24T21:09:17.806516Z node 2 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:674: SelfId: [2:841:2671]. Enqueue for resolve 72075186224037888 2025-06-24T21:09:17.806573Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:383;event=scanner_finished;tablet_id=72075186224037888;stop_shard=1; 2025-06-24T21:09:17.806645Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:96;event=stop_scanner;actor_id=NO_VALUE_OPTIONAL;message=;final_flag=1; 2025-06-24T21:09:17.806741Z node 2 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:666: SelfId: [2:841:2671]. Sending TEvResolveKeySet update for table '/Root/table-1', range: [(Uint32 : NULL) ; ()), attempt #1 2025-06-24T21:09:17.806938Z node 2 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:253: SelfId: [2:841:2671]. Received TEvResolveKeySetResult update for table '/Root/table-1' 2025-06-24T21:09:17.806978Z node 2 :KQP_COMPUTE ERROR: kqp_scan_fetcher_actor.cpp:257: SelfId: [2:841:2671]. Resolve request failed for table '/Root/table-1', ErrorCount# 1 2025-06-24T21:09:17.807068Z node 2 :KQP_COMPUTE DEBUG: log.h:466: kqp_scan_compute_actor.cpp:168 :TEvTerminateFromFetcher: [2:841:2671]/[2:839:2669] 2025-06-24T21:09:17.807177Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:839:2669], TxId: 281474976715662, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=. TraceId : 01jyhwc4rjcsry5xnfpe6g7ak4. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. InternalError: SCHEME_ERROR KIKIMR_SCHEME_MISMATCH: {
: Error: Table '/Root/table-1' scheme changed., code: 2028 }. 2025-06-24T21:09:17.807340Z node 2 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715662, task: 1. pass away 2025-06-24T21:09:17.807435Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715662;task_id=1;success=0;message={
: Error: COMPUTE_STATE_FAILURE }; 2025-06-24T21:09:17.809616Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715662, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-24T21:09:17.809772Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_fetcher_actor.cpp:106;event=TEvTerminateFromCompute;sender=[2:839:2669];info={
: Error: COMPUTE_STATE_FAILURE }; 2025-06-24T21:09:17.809847Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:313;event=abort_all_scanners;error_message=Send abort execution from compute actor, message: {
: Error: COMPUTE_STATE_FAILURE }; 2025-06-24T21:09:17.810056Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:893: Schedule publish at 1970-01-01T00:00:04.000000Z, after 1.550000s 2025-06-24T21:09:17.810329Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:834:2643] TxId: 281474976715662. Ctx: { TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:839:2669], task: 1, state: COMPUTE_STATE_FAILURE, stats: { CpuTimeUs: 166063 Tasks { TaskId: 1 CpuTimeUs: 164596 Tables { TablePath: "/Root/table-1" } ComputeCpuTimeUs: 11 BuildCpuTimeUs: 164585 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-4pjfmvffsq" NodeId: 2 CreateTimeMs: 1750799357189 CurrentWaitInputTimeUs: 127688 UpdateTimeMs: 1750799357807 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:17.810444Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715662. Ctx: { TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:839:2669] 2025-06-24T21:09:17.810533Z node 2 :KQP_EXECUTER INFO: kqp_executer_impl.h:1951: ActorId: [2:834:2643] TxId: 281474976715662. Ctx: { TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. task: 1, does not have the CA id yet or is already complete 2025-06-24T21:09:17.810608Z node 2 :KQP_EXECUTER INFO: kqp_executer_impl.h:1946: ActorId: [2:834:2643] TxId: 281474976715662. Ctx: { TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. aborting compute actor execution, message: {
: Error: Terminate execution }, compute actor: [2:840:2670], task: 2 2025-06-24T21:09:17.810735Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:834:2643] TxId: 281474976715662. Ctx: { TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:17.810828Z node 2 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [2:840:2670], TxId: 281474976715662, task: 2. Ctx: { TraceId : 01jyhwc4rjcsry5xnfpe6g7ak4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646735 2025-06-24T21:09:17.810904Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:840:2670], TxId: 281474976715662, task: 2. Ctx: { TraceId : 01jyhwc4rjcsry5xnfpe6g7ak4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Handle abort execution event from: [2:834:2643], status: SCHEME_ERROR, reason: {
: Error: Terminate execution } 2025-06-24T21:09:17.811008Z node 2 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715662, task: 2. pass away 2025-06-24T21:09:17.811096Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715662;task_id=2;success=0;message={
: Error: COMPUTE_STATE_FAILURE }; 2025-06-24T21:09:17.813944Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715662, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-24T21:09:17.814224Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, ActorId: [2:808:2643], ActorState: ExecuteState, TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Create QueryResponse for error on request, msg: 2025-06-24T21:09:17.814652Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [2:59:2106] Handle TEvExecuteKqpTransaction 2025-06-24T21:09:17.814706Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [2:59:2106] TxId# 281474976715664 ProcessProposeKqpTransaction 2025-06-24T21:09:17.815201Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715661] shutting down 2025-06-24T21:09:17.815281Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [2:59:2106] Handle TEvProposeTransaction 2025-06-24T21:09:17.815327Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [2:59:2106] TxId# 0 ProcessProposeTransaction 2025-06-24T21:09:17.815435Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:289: actor# [2:59:2106] Cookie# 0 userReqId# "" txid# 0 reqId# [2:876:2702] SnapshotReq marker# P0 2025-06-24T21:09:17.815906Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715664. Resolved key sets: 0 2025-06-24T21:09:17.816041Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:17.816101Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715664. Ctx: { TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-24T21:09:17.816155Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:875:2643] TxId: 281474976715664. Ctx: { TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:09:17.816262Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:875:2643] TxId: 281474976715664. Ctx: { TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:17.816325Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:875:2643] TxId: 281474976715664. Ctx: { TraceId: 01jyhwc4rjcsry5xnfpe6g7ak4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:09:17.816390Z node 2 :TX_PROXY DEBUG: resolvereq.cpp:152: Actor# [2:879:2702] txid# 0 HANDLE EvNavigateKeySetResult TResolveTablesActor marker# P1 ErrorCount# 0 2025-06-24T21:09:17.816703Z node 2 :TX_PROXY DEBUG: resolvereq.cpp:272: Actor# [2:879:2702] txid# 0 HANDLE EvResolveKeySetResult TResolveTablesActor marker# P2 ErrorCount# 0 2025-06-24T21:09:17.816806Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 3, sender: [2:553:2479], selfId: [2:57:2104], source: [2:808:2643] 2025-06-24T21:09:17.816958Z node 2 :TX_PROXY DEBUG: snapshotreq.cpp:1453: Actor# [2:876:2702] SEND TEvDiscardVolatileSnapshotRequest to datashard 72075186224037888 marker# P3 2025-06-24T21:09:17.817749Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZDE5ZWE1NzgtZWQ4NGYxYmItMWIyZGVmMmItMTM3ZTkzY2I=, workerId: [2:808:2643], local sessions count: 0 2025-06-24T21:09:17.817879Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:876:2702], Recipient [2:626:2530]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 Response { QueryIssues { message: "Table \'/Root/table-1\' scheme changed." issue_code: 2028 severity: 1 } QueryIssues { message: "Query invalidated on scheme/internal error during Scan execution" issue_code: 2019 severity: 1 } TxMeta { } } YdbStatus: ABORTED ConsumedRu: 363 >> TSharedPageCache_Actor::Request_Queue [GOOD] >> TSharedPageCache_Actor::Request_Queue_Failed >> TSharedPageCache_Actor::Request_Queue_Failed [GOOD] >> TSharedPageCache_Actor::Request_Queue_Fast >> KqpNewEngine::ContainerRegistryCombiner [GOOD] >> KqpNewEngine::DeferredEffects >> TPartBtreeIndexIteration::OneNode [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups >> KqpLocksTricky::TestNoLocksIssueInteractiveTx+withSink [GOOD] >> KqpLocksTricky::TestNoLocksIssueInteractiveTx-withSink >> TestYmqHttpProxy::BillingRecordsForJsonApi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::DifferentKeyUpdateOlap [GOOD] Test command err: Trying to start YDB, gRPC: 14018, MsgBus: 29880 2025-06-24T21:08:46.373709Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:08:46.374093Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:08:46.374286Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003207/r3tmp/tmpfmavFb/pdisk_1.dat 2025-06-24T21:08:46.755809Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 14018, node 1 2025-06-24T21:08:46.987913Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:47.006645Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:47.006719Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:47.006759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:47.007034Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:47.007312Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799323294781 != 1750799323294785 2025-06-24T21:08:47.057893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:47.058090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:47.071871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29880 TClient is connected to server localhost:29880 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:47.430674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:47.519641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:47.684092Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:47.963165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:48.364199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:48.694124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:49.535000Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1685:3280], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.535305Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:49.568931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:49.763777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.057352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.355448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.620251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:50.973854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:51.294395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:51.721878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2360:3778], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:51.721995Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:51.722544Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2365:3783], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:51.728772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:51.889297Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2367:3785], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:08:51.957180Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2422:3821] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:53.266845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:53.463109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ES ... nt=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037981; 2025-06-24T21:09:15.870330Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:15.870386Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:15.870415Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:15.870445Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:15.870498Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:15.870554Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:15.870614Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=47;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:15.870667Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993;receive=72075186224037996; 2025-06-24T21:09:15.870916Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037993;receive=72075186224037892; 2025-06-24T21:09:15.870982Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037993;receive=72075186224037892; 2025-06-24T21:09:15.871040Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037993;receive=72075186224037892; 2025-06-24T21:09:15.871100Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=53;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037993;receive=72075186224037892; 2025-06-24T21:09:15.871173Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=54;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037993;receive=72075186224037892; 2025-06-24T21:09:15.871237Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=55;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037993;receive=72075186224037892; 2025-06-24T21:09:15.871299Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=56;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037993;receive=72075186224037892; 2025-06-24T21:09:15.872094Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:15.872203Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:15.872645Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=59;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037969; 2025-06-24T21:09:15.872719Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=60;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037897; 2025-06-24T21:09:15.872768Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=61;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037969; 2025-06-24T21:09:15.872818Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=62;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037969; 2025-06-24T21:09:15.872898Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:15.872947Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=63;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037897; 2025-06-24T21:09:15.873023Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=64;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037969; 2025-06-24T21:09:15.873068Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=65;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037897; 2025-06-24T21:09:15.873113Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037969; 2025-06-24T21:09:15.873159Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037897; 2025-06-24T21:09:15.873209Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037969; 2025-06-24T21:09:15.873258Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037897; 2025-06-24T21:09:15.873323Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037969; 2025-06-24T21:09:15.873383Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037897; 2025-06-24T21:09:15.873441Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[3:7519625943881375955:2449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037897; 2025-06-24T21:09:15.874229Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:16.637672Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; >> TSharedPageCache_Actor::Request_Queue_Fast [GOOD] >> TSharedPageCache_Actor::Request_Sequential >> TSharedPageCache_Actor::Request_Sequential [GOOD] >> TSharedPageCache_Actor::Request_Cached >> TSharedPageCache_Actor::Request_Cached [GOOD] >> TSharedPageCache_Actor::Request_Different_Collections >> TSharedPageCache_Actor::Request_Different_Collections [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::UncommittedReadSetAck [GOOD] Test command err: 2025-06-24T21:09:12.305740Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:12.306683Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:12.306843Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:12.306913Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:09:12.307453Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:12.307517Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002d68/r3tmp/tmpe576Ku/pdisk_1.dat 2025-06-24T21:09:12.725727Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:12.881111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:13.005760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:13.005921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:13.013853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:13.013982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:13.028935Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:09:13.029490Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:13.029967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:13.313561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:13.413212Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [2:1176:2339], Recipient [2:1201:2351]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:13.418668Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [2:1176:2339], Recipient [2:1201:2351]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:13.419349Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1201:2351] 2025-06-24T21:09:13.419684Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:13.467607Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [2:1176:2339], Recipient [2:1201:2351]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:13.476342Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:13.476748Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:13.478456Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:13.478556Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:13.478618Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:13.478968Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:13.479764Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:13.479881Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1225:2351] in generation 1 2025-06-24T21:09:13.484917Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:13.524970Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:13.525203Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:13.525324Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1229:2368] 2025-06-24T21:09:13.525366Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:13.525416Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:13.525469Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:13.525751Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1201:2351], Recipient [2:1201:2351]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:13.525815Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:13.526193Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:13.526323Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:13.526420Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:13.526465Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:13.526564Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:13.526615Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:13.526660Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:13.526696Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:13.526745Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:13.584383Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1233:2369], Recipient [2:1201:2351]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:13.584468Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:13.584542Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1184:2732], serverId# [2:1233:2369], sessionId# [0:0:0] 2025-06-24T21:09:13.584879Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:759:2426], Recipient [2:1233:2369] 2025-06-24T21:09:13.584951Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:13.585075Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:13.585366Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:13.585478Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:13.585593Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:13.585647Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:13.585707Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:13.585754Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:13.585796Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:13.586115Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:13.586207Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:13.586252Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:13.586289Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:13.586364Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:09:13.586403Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:13.586448Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:09:13.586481Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:09:13.586540Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:13.590754Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152 ... 224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T21:09:17.677404Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:09:17.677446Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715667 2025-06-24T21:09:17.677528Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2512 txid# 281474976715667 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T21:09:17.678032Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:09:17.678522Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:1978:2465], Recipient [2:2106:2495]: {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-24T21:09:17.678565Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:09:17.678598Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715669 2025-06-24T21:09:17.678674Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-24T21:09:17.679009Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:1978:2465], Recipient [2:2106:2495]: {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-24T21:09:17.679045Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:09:17.679075Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715669 2025-06-24T21:09:17.679126Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-24T21:09:17.679606Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:2038:3232], Recipient [2:2238:2538] 2025-06-24T21:09:17.679651Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:09:17.679697Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715668 2025-06-24T21:09:17.679751Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2513 txid# 281474976715668 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T21:09:17.680162Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:2038:3232], Recipient [2:2238:2538] 2025-06-24T21:09:17.680198Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:09:17.680236Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715669 2025-06-24T21:09:17.680457Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:09:17.680729Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:2038:3232], Recipient [2:2238:2538] 2025-06-24T21:09:17.680769Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:09:17.680809Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715669 2025-06-24T21:09:17.681826Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-24T21:09:17.682086Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-24T21:09:17.682248Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [2662 : 281474976715669] from 72075186224037890 at tablet 72075186224037890 send result to client [1:2190:3297] 2025-06-24T21:09:17.682873Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:09:17.686469Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-24T21:09:17.687240Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2106:2495], Recipient [2:1978:2465]: {TEvReadSet step# 2512 txid# 281474976715667 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 1} 2025-06-24T21:09:17.687293Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:17.687338Z node 2 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715667 2025-06-24T21:09:17.691838Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-24T21:09:17.691934Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Reply at 72075186224037890 {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-24T21:09:17.691999Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-24T21:09:17.692182Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:2106:2495], Recipient [2:1978:2465]: {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037890 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-24T21:09:17.692229Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:09:17.692266Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037890 dest 72075186224037888 producer 72075186224037890 txId 281474976715669 2025-06-24T21:09:17.692364Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037890 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-24T21:09:17.692430Z node 2 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 2662:281474976715669 at 72075186224037888 2025-06-24T21:09:17.692493Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-24T21:09:17.692591Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-24T21:09:17.693010Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2106:2495], Recipient [1:2038:3232] 2025-06-24T21:09:17.693060Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:17.693110Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715668 2025-06-24T21:09:17.693950Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-24T21:09:17.694013Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Reply at 72075186224037890 {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-24T21:09:17.694186Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-24T21:09:17.694326Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2106:2495], Recipient [2:1978:2465]: {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 3} 2025-06-24T21:09:17.694365Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:17.694401Z node 2 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715669 2025-06-24T21:09:17.694667Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:2106:2495], Recipient [1:2038:3232] 2025-06-24T21:09:17.694720Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:09:17.694774Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715669 2025-06-24T21:09:17.694848Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 2662 txid# 281474976715669 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-24T21:09:17.694890Z node 1 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 2662:281474976715669 at 72075186224037889 2025-06-24T21:09:17.694948Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-24T21:09:17.695060Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2106:2495], Recipient [1:2038:3232] 2025-06-24T21:09:17.695093Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:17.695133Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715669 >> TSharedPageCache_Actor::Request_Different_Pages [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages_Reversed >> TSharedPageCache_Actor::Request_Different_Pages_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Subset >> DataShardTxOrder::ForceOnlineBetweenOnline_oo8 [GOOD] >> TSharedPageCache_Actor::Request_Subset [GOOD] >> TSharedPageCache_Actor::Request_Subset_Shuffled >> KqpSqlIn::SimpleKey >> TSharedPageCache_Actor::Request_Subset_Shuffled [GOOD] >> TSharedPageCache_Actor::Request_Superset >> TestKinesisHttpProxy::TestWrongStream [GOOD] >> TestKinesisHttpProxy::TestListStreamConsumersWithToken [GOOD] >> TGroupMapperTest::NonUniformClusterDifferentSlotsPerDisk [GOOD] >> TestYmqHttpProxy::TestChangeMessageVisibility >> TSharedPageCache_Actor::Request_Superset [GOOD] >> TSharedPageCache_Actor::Request_Superset_Reversed >> TSharedPageCache_Actor::Request_Superset_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Crossing >> TSharedPageCache_Actor::Request_Crossing [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Reversed >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumerWithFlag [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Shuffled |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformClusterDifferentSlotsPerDisk [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Shuffled [GOOD] >> TSharedPageCache_Actor::Attach_Basics ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ForceOnlineBetweenOnline_oo8 [GOOD] Test command err: 2025-06-24T21:09:13.445327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:13.445394Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:13.447069Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:13.459694Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:13.460170Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:13.460419Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:13.508228Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:13.516285Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:13.516538Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:13.518172Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:13.518265Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:13.518324Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:13.518703Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:13.518796Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:13.518858Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:13.584865Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:13.617172Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:13.617389Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:13.617491Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:13.617525Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:13.617570Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:13.617609Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.617755Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:13.617801Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:13.618076Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:13.618189Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:13.618357Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:13.618396Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:13.618443Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:13.618477Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:13.618514Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:13.618559Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:13.618597Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:13.618686Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:13.618719Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:13.618772Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:13.621887Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:13.621959Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:13.622037Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:13.622226Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:13.622271Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:13.622338Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:13.622376Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:13.622416Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:13.622458Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:13.622510Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:13.622798Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:13.622832Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:13.622863Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:13.622909Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:13.622962Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:13.622999Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:13.623040Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:13.623084Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:13.623110Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:13.635472Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:13.635588Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:13.635629Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:13.635666Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:13.635754Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:13.636336Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:13.636393Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:13.636437Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:13.636566Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:13.636596Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:13.636762Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:13.636809Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:13.636851Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:13.636908Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:13.640703Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:13.640767Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.640989Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:13.641033Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:13.641105Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:13.641148Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:13.641178Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:13.641252Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:13.641291Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... exec latency: 1 ms, propose latency: 2 ms 2025-06-24T21:09:20.091761Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-24T21:09:20.091782Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:09:20.091872Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T21:09:20.091894Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2025-06-24T21:09:20.091922Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 2 ms 2025-06-24T21:09:20.091957Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:09:20.091978Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:09:20.092825Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-24T21:09:20.092879Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:20.092928Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2025-06-24T21:09:20.093052Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-24T21:09:20.093084Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:20.093107Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-24T21:09:20.093397Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-24T21:09:20.093428Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:20.093470Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-24T21:09:20.093654Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-24T21:09:20.093699Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:20.093725Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-24T21:09:20.093824Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-24T21:09:20.093856Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:20.093905Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-24T21:09:20.094002Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:09:20.094031Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:20.094053Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 2025-06-24T21:09:20.094119Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:20.094170Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:149] at 9437184 on unit CompleteOperation 2025-06-24T21:09:20.094217Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 149] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:20.094265Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-24T21:09:20.094302Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:20.094456Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:20.094491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:150] at 9437184 on unit CompleteOperation 2025-06-24T21:09:20.094544Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 150] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:20.094575Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:20.094670Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:20.094692Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:151] at 9437184 on unit CompleteOperation 2025-06-24T21:09:20.094740Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 151] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:20.094787Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-24T21:09:20.094819Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:20.094920Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:20.094943Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437184 on unit CompleteOperation 2025-06-24T21:09:20.094974Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:20.095029Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-24T21:09:20.095053Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:20.095202Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:20.095229Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:153] at 9437184 on unit CompleteOperation 2025-06-24T21:09:20.095257Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 153] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:20.095291Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:20.095377Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:20.095398Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:154] at 9437184 on unit CompleteOperation 2025-06-24T21:09:20.095426Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 154] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:20.095476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-24T21:09:20.095513Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:20.095693Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-24T21:09:20.095727Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:20.095766Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 149 2025-06-24T21:09:20.095870Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-24T21:09:20.095897Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:20.095921Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 151 2025-06-24T21:09:20.096000Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-24T21:09:20.096026Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:20.096049Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-06-24T21:09:20.096117Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-24T21:09:20.096144Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:20.096167Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 >> TSharedPageCache_Actor::Attach_Basics [GOOD] >> TSharedPageCache_Actor::Attach_Request >> TestKinesisHttpProxy::TestWrongStream2 >> DataShardTxOrder::ForceOnlineBetweenOnline >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock-EvWrite [GOOD] >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLockOutOfOrder >> TSharedPageCache_Actor::Attach_Request [GOOD] >> TSharedPageCache_Actor::Detach_Basics >> TestKinesisHttpProxy::TestCounters >> TSharedPageCache_Actor::Detach_Basics [GOOD] >> TSharedPageCache_Actor::Detach_Cached >> TestKinesisHttpProxy::ListShardsExclusiveStartShardId [GOOD] >> KqpExtractPredicateLookup::OverflowLookup [GOOD] >> KqpExtractPredicateLookup::SimpleRange >> TSharedPageCache_Actor::Detach_Cached [GOOD] >> TSharedPageCache_Actor::Detach_Expired >> DataShardOutOfOrder::TestUnprotectedReadsThenWriteVisibility >> TSharedPageCache_Actor::Detach_Expired [GOOD] >> TSharedPageCache_Actor::Detach_InFly >> TSharedPageCache_Actor::Detach_InFly [GOOD] >> TSharedPageCache_Actor::Detach_Queued >> TestKinesisHttpProxy::BadRequestUnknownMethod >> TGroupMapperTest::NonUniformClusterMirror3dcWithUnusableDomain >> TSharedPageCache_Actor::Detach_Queued [GOOD] >> TSharedPageCache_Actor::Unregister_Basics >> TPartBtreeIndexIteration::OneNode_Groups [GOOD] >> TPartBtreeIndexIteration::OneNode_History >> TSharedPageCache_Actor::Unregister_Basics [GOOD] >> TSharedPageCache_Actor::Unregister_Cached >> TSharedPageCache_Actor::Unregister_Cached [GOOD] >> TSharedPageCache_Actor::Unregister_Expired >> KqpNewEngine::StaleRO+EnableFollowers [GOOD] >> KqpNewEngine::StaleRO-EnableFollowers >> TSharedPageCache_Actor::Unregister_Expired [GOOD] >> TSharedPageCache_Actor::Unregister_InFly >> TSharedPageCache_Actor::Unregister_InFly [GOOD] >> TSharedPageCache_Actor::Unregister_Queued >> TestKinesisHttpProxy::ListShardsTimestamp >> DataShardOutOfOrder::TestPlannedTimeoutSplit [GOOD] >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit-UseSink >> TGroupMapperTest::NonUniformClusterMirror3dcWithUnusableDomain [GOOD] >> TSharedPageCache_Actor::Unregister_Queued [GOOD] >> TSharedPageCache_Actor::Unregister_Queued_Pending >> TPartSlice::TrivialMerge [GOOD] >> TPartSlice::SupersetByRowId [GOOD] >> TPartSlice::Subtract [GOOD] >> TPartSlice::UnsplitBorrow [GOOD] >> TPartSliceLoader::RestoreMissingSlice >> TSharedPageCache_Actor::Unregister_Queued_Pending [GOOD] >> TSwitchableCache::Touch [GOOD] >> TSwitchableCache::Erase [GOOD] >> TSwitchableCache::EvictNext [GOOD] >> TSwitchableCache::UpdateLimit [GOOD] >> TSwitchableCache::Switch_Touch_RotatePages_All [GOOD] >> TSwitchableCache::Switch_Touch_RotatePages_Parts [GOOD] >> TSwitchableCache::Switch_RotatePages_Force [GOOD] >> TSwitchableCache::Switch_RotatePages_Evicts [GOOD] >> TSwitchableCache::Switch_Touch [GOOD] >> TSwitchableCache::Switch_Erase [GOOD] >> TSwitchableCache::Switch_EvictNext [GOOD] >> TSwitchableCache::Switch_UpdateLimit [GOOD] >> TVersions::WreckHead >> KqpNotNullColumns::InsertFromSelect [GOOD] >> KqpNotNullColumns::FailedMultiEffects |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformClusterMirror3dcWithUnusableDomain [GOOD] >> KqpSinkLocks::InsertWithBulkUpsert-UseBulkUpsert [GOOD] >> TPartSliceLoader::RestoreMissingSlice [GOOD] >> TPartSliceLoader::RestoreOneSlice >> DataShardOutOfOrder::TestOutOfOrderRestartLocksReorderedWithoutBarrier >> TPartSliceLoader::RestoreOneSlice [GOOD] >> TPartSliceLoader::RestoreMissingSliceFullScreen [GOOD] >> TPartSliceLoader::RestoreFromScreenIndexKeys [GOOD] >> TPartSliceLoader::RestoreFromScreenDataKeys [GOOD] >> TRowVersionRangesTest::SimpleInserts [GOOD] >> TRowVersionRangesTest::MergeFailLeft [GOOD] >> TRowVersionRangesTest::MergeFailRight [GOOD] >> TRowVersionRangesTest::MergeFailOuter [GOOD] >> TRowVersionRangesTest::MergeFailInner [GOOD] >> TRowVersionRangesTest::MergeExtendLeft [GOOD] >> TRowVersionRangesTest::MergeExtendLeftInner [GOOD] >> TRowVersionRangesTest::MergeExtendLeftComplete [GOOD] >> TRowVersionRangesTest::MergeExtendRight [GOOD] >> TRowVersionRangesTest::MergeExtendRightInner [GOOD] >> TRowVersionRangesTest::MergeExtendRightComplete [GOOD] >> TRowVersionRangesTest::MergeExtendBoth [GOOD] >> TRowVersionRangesTest::MergeHoleExact [GOOD] >> TRowVersionRangesTest::MergeHoleInner [GOOD] >> TRowVersionRangesTest::MergeHoleOuter [GOOD] >> TRowVersionRangesTest::MergeAllOuter [GOOD] >> TRowVersionRangesTest::MergeAllInner [GOOD] >> TRowVersionRangesTest::MergeAllEdges [GOOD] >> TRowVersionRangesTest::ContainsEmpty [GOOD] >> TRowVersionRangesTest::ContainsNonEmpty [GOOD] >> TRowVersionRangesTest::ContainsInvalid [GOOD] >> TRowVersionRangesTest::AdjustDown [GOOD] >> TRowVersionRangesTest::AdjustDownSnapshot [GOOD] >> TRowVersionRangesTest::SteppedCookieAllocatorOrder [GOOD] >> TRowVersionRangesTest::SteppedCookieAllocatorLowerBound [GOOD] >> TS3FIFOCache::Touch [GOOD] >> TS3FIFOCache::Touch_MainQueue [GOOD] >> TS3FIFOCache::EvictNext [GOOD] >> TS3FIFOCache::UpdateLimit [GOOD] >> TS3FIFOCache::Erase [GOOD] >> TS3FIFOCache::Random >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover+Nullable [GOOD] >> TestYmqHttpProxy::TestDeleteQueue [GOOD] >> TS3FIFOCache::Random [GOOD] >> TS3FIFOGhostQueue::Basics [GOOD] >> TScheme::Shapshot [GOOD] >> TScheme::Delta [GOOD] >> TScheme::Policy [GOOD] >> TScreen::Cuts [GOOD] >> TScreen::Join [GOOD] >> TScreen::Sequential >> TestKinesisHttpProxy::GoodRequestGetRecordsLongStreamName [GOOD] >> TPartBtreeIndexIteration::OneNode_History [GOOD] >> TPartBtreeIndexIteration::OneNode_Slices >> KqpVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity [GOOD] >> TestYmqHttpProxy::TestListDeadLetterSourceQueues >> TScreen::Sequential [GOOD] >> TScreen::Random >> TestKinesisHttpProxy::ErroneousRequestGetRecords >> TGroupMapperTest::NonUniformCluster [GOOD] >> DataShardOutOfOrder::TestReadTableWriteConflict >> DataShardOutOfOrder::TestImmediateQueueThenSplit+UseSink [GOOD] >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink >> KqpSort::ReverseEightShardOptimized [GOOD] >> KqpSort::PassLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover+Nullable [GOOD] Test command err: Trying to start YDB, gRPC: 19296, MsgBus: 15267 2025-06-24T21:07:58.661843Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625641087462120:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:58.662326Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045aa/r3tmp/tmpL2KvbX/pdisk_1.dat 2025-06-24T21:07:59.048496Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19296, node 1 2025-06-24T21:07:59.070449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:59.070787Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:59.082653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:59.150263Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:59.150288Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:59.150294Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:59.150402Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15267 TClient is connected to server localhost:15267 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:59.635127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:59.655288Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:59.655783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:07:59.668359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:59.812022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.974375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:00.035454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:01.892897Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625653972365515:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:01.893035Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:02.346543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.395933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.432059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.502752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.540840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.596991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.646404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:02.782446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625658267333477:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:02.782543Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:02.782737Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625658267333482:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:02.786497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:02.796981Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625658267333484:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:08:02.866225Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625658267333535:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:03.653708Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625641087462120:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:03.653770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:03.924522Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625662562301109:3601], Recipient [1:7519625641087462355:2168]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:03.924581Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:03.924597Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 7205759404664 ... xCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:22.084579Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519625800412738469:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:22.084634Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:22.084700Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:7519625800412738469:2147], Recipient [2:7519625800412738469:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:22.084729Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:22.115750Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625800412738469:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:22.115799Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:22.115812Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 8 2025-06-24T21:09:22.115872Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 8 2025-06-24T21:09:22.115893Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.001359s, queue# 8 2025-06-24T21:09:22.115960Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:14 data size 0 row count 0 2025-06-24T21:09:22.116026Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037901 maps to shardIdx: 72057594046644480:14 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.116040Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037901, followerId 0 2025-06-24T21:09:22.116104Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:14 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.116152Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037901 2025-06-24T21:09:22.116186Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:22 data size 0 row count 0 2025-06-24T21:09:22.116218Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037909 maps to shardIdx: 72057594046644480:22 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.116227Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037909, followerId 0 2025-06-24T21:09:22.116255Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:22 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.116269Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037909 2025-06-24T21:09:22.116287Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:23 data size 0 row count 0 2025-06-24T21:09:22.116316Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037910 maps to shardIdx: 72057594046644480:23 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.116325Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037910, followerId 0 2025-06-24T21:09:22.116352Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:23 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.116364Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037910 2025-06-24T21:09:22.116381Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:17 data size 0 row count 0 2025-06-24T21:09:22.116411Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037904 maps to shardIdx: 72057594046644480:17 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.116420Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-06-24T21:09:22.116445Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.116456Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037904 2025-06-24T21:09:22.116472Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:20 data size 0 row count 0 2025-06-24T21:09:22.116500Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037907 maps to shardIdx: 72057594046644480:20 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.116508Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037907, followerId 0 2025-06-24T21:09:22.116533Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:20 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.116546Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037907 2025-06-24T21:09:22.116562Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:21 data size 0 row count 0 2025-06-24T21:09:22.116590Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037908 maps to shardIdx: 72057594046644480:21 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.116599Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037908, followerId 0 2025-06-24T21:09:22.116624Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:21 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.116636Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037908 2025-06-24T21:09:22.116652Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-24T21:09:22.116677Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.116685Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-24T21:09:22.116711Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.116723Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-24T21:09:22.116739Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:19 data size 0 row count 0 2025-06-24T21:09:22.116766Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.116773Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-06-24T21:09:22.116799Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.116811Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037906 2025-06-24T21:09:22.116867Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:22.118088Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625800412738469:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:22.118114Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:22.118125Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::InsertWithBulkUpsert-UseBulkUpsert [GOOD] Test command err: Trying to start YDB, gRPC: 16214, MsgBus: 14643 2025-06-24T21:08:48.408828Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625857840745893:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:48.408874Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ec/r3tmp/tmpUSQ7ak/pdisk_1.dat 2025-06-24T21:08:48.771223Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:48.775611Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625857840745871:2079] 1750799328401908 != 1750799328401911 TServer::EnableGrpc on GrpcPort 16214, node 1 2025-06-24T21:08:48.828500Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:48.829400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:48.844450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:48.863810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:48.863834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:48.863844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:48.864004Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14643 TClient is connected to server localhost:14643 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:49.446343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:49.474503Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:51.562791Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625870725648383:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:51.562873Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:51.562948Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625870725648410:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:51.571890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:51.587939Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625870725648414:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:08:51.648687Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625870725648465:2333] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:51.960674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:08:52.169821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:52.169997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625875020615944:2308];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:52.170053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:52.170095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625875020615944:2308];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:52.170343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:52.170401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625875020615944:2308];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:52.170466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:52.170512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625875020615944:2308];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:52.170607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:52.170630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625875020615944:2308];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:52.170975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:52.171101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:52.171230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:52.171359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:08:52.171489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:08:52.171597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625875020615933:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:08:52.173096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625875020615944:2308];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:52.173286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625875020615944:2308];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:52.173391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625875020615944:2308];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:52.173483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625875020615944:2308];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:08:52.173576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519625875020615944:2308];tablet_id=72075186224037891;process=TTxInitSchema:: ... main>: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[2:7519625960762353411:2929]. isRollback=0 2025-06-24T21:09:13.048169Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=NGNiZDEwMjItYjBiNjdlNzgtYjJlY2E4MzktZTJjYjRiNTQ=, ActorId: [2:7519625960762353411:2929], ActorState: ExecuteState, TraceId: 01jyhwc13t49y65erqgj0800rs, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519625965057320750:2929] from: [2:7519625965057320749:2929] 2025-06-24T21:09:13.048370Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519625965057320750:2929] TxId: 281474976715665. Ctx: { TraceId: 01jyhwc13t49y65erqgj0800rs, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGNiZDEwMjItYjBiNjdlNzgtYjJlY2E4MzktZTJjYjRiNTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T21:09:13.048646Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NGNiZDEwMjItYjBiNjdlNzgtYjJlY2E4MzktZTJjYjRiNTQ=, ActorId: [2:7519625960762353411:2929], ActorState: ExecuteState, TraceId: 01jyhwc13t49y65erqgj0800rs, Create QueryResponse for error on request, msg: 2025-06-24T21:09:13.051655Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976715665; 2025-06-24T21:09:13.051892Z node 2 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [1750799353093 : 281474976715665] from 72075186224037889 at tablet 72075186224037889, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } Trying to start YDB, gRPC: 3381, MsgBus: 1070 2025-06-24T21:09:14.561400Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519625969556023653:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:14.561449Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ec/r3tmp/tmpTCpvIG/pdisk_1.dat 2025-06-24T21:09:14.757819Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:14.757927Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:14.761749Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:14.778546Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3381, node 3 2025-06-24T21:09:14.855777Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:14.855805Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:14.855814Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:14.855972Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1070 TClient is connected to server localhost:1070 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:15.473231Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:15.587677Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:18.413027Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625986735893436:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:18.413095Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519625986735893424:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:18.413253Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:18.417611Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:18.429092Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519625986735893447:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:09:18.531058Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519625986735893498:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:18.596042Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:18.645260Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:19.599870Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519625969556023653:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:19.625532Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:09:19.881588Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:21.866822Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976710666; 2025-06-24T21:09:21.867838Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [3:7519625999620803538:2931], SessionActorId: [3:7519625999620803479:2931], Got LOCKS BROKEN for table. ShardID=72075186224037888, Sink=[3:7519625999620803538:2931].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T21:09:21.867943Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519625999620803538:2931], SessionActorId: [3:7519625999620803479:2931], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:7519625999620803479:2931]. isRollback=0 2025-06-24T21:09:21.868127Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=ZjMwZTM3YjYtNWZhMmVmMzMtNjQzNDE2NDQtMjM2MmQ3MQ==, ActorId: [3:7519625999620803479:2931], ActorState: ExecuteState, TraceId: 01jyhwc9qd10hknhpkfzqnk4g6, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:7519625999620803539:2931] from: [3:7519625999620803538:2931] 2025-06-24T21:09:21.868207Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519625999620803539:2931] TxId: 281474976710666. Ctx: { TraceId: 01jyhwc9qd10hknhpkfzqnk4g6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjMwZTM3YjYtNWZhMmVmMzMtNjQzNDE2NDQtMjM2MmQ3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T21:09:21.868391Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZjMwZTM3YjYtNWZhMmVmMzMtNjQzNDE2NDQtMjM2MmQ3MQ==, ActorId: [3:7519625999620803479:2931], ActorState: ExecuteState, TraceId: 01jyhwc9qd10hknhpkfzqnk4g6, Create QueryResponse for error on request, msg: 2025-06-24T21:09:21.870967Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976710666; 2025-06-24T21:09:21.871082Z node 3 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [1750799361913 : 281474976710666] from 72075186224037889 at tablet 72075186224037889, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformCluster [GOOD] >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed+EvWrite [GOOD] >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite >> KqpAgg::AggWithLookup [GOOD] >> KqpAgg::AggWithSelfLookup >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed+EvWrite [GOOD] >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed-EvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 1915, MsgBus: 27350 2025-06-24T21:07:55.326948Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625628799908734:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:55.326985Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045b2/r3tmp/tmp4p3qZs/pdisk_1.dat 2025-06-24T21:07:55.662885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:55.662998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:55.693944Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:55.702399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1915, node 1 2025-06-24T21:07:55.773635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:55.773670Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:55.773678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:55.773772Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27350 TClient is connected to server localhost:27350 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:07:56.332588Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:56.415301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:56.436007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:56.610822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:56.793963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:56.884361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:58.657269Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625641684812131:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:58.657405Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:58.963905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.002109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.069600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.114967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.146940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.192581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.223669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:59.316971Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625645979780090:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.317083Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.317593Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625645979780095:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:59.321163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:59.335392Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625645979780097:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:59.396994Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625645979780150:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:00.324252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625628799908734:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:00.324309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:08:00.478139Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625650274747721:3598], Recipient [1:7519625628799908943:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:00.478176Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:00.478186Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:08:00.478225Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625650274747717:3595], Recipient [1:75196256 ... ejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 43 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186224037909 NodeId: 2 StartTime: 1750799316784 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:21.949169Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:21.949219Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037909 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0043 2025-06-24T21:09:21.949356Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037909 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:21.949383Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099995s, queue# 1 2025-06-24T21:09:21.949573Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [2:7519625805009434990:2323], Recipient [2:7519625800714466722:2146]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037905 TableLocalId: 5 Generation: 1 Round: 3 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 44 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186224037905 NodeId: 2 StartTime: 1750799316784 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:21.949589Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:21.949621Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037905 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0044 2025-06-24T21:09:21.949702Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037905 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:21.962133Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [2:7519625805009434986:2322], Recipient [2:7519625800714466722:2146]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037901 TableLocalId: 5 Generation: 1 Round: 3 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 61 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186224037901 NodeId: 2 StartTime: 1750799316784 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:21.962191Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:21.962228Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037901 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0061 2025-06-24T21:09:21.962324Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037901 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:22.048670Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625800714466722:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:22.048723Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:22.048739Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 3 2025-06-24T21:09:22.048821Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 3 2025-06-24T21:09:22.048842Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000539s, queue# 3 2025-06-24T21:09:22.048920Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:22 data size 0 row count 0 2025-06-24T21:09:22.048984Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037909 maps to shardIdx: 72057594046644480:22 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.048999Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037909, followerId 0 2025-06-24T21:09:22.049071Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:22 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.049120Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037909 2025-06-24T21:09:22.049154Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-06-24T21:09:22.049187Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.049197Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-06-24T21:09:22.049233Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.049245Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037905 2025-06-24T21:09:22.049263Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:14 data size 0 row count 0 2025-06-24T21:09:22.049295Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037901 maps to shardIdx: 72057594046644480:14 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:22.049306Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037901, followerId 0 2025-06-24T21:09:22.049338Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:14 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:22.049350Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037901 2025-06-24T21:09:22.049411Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:22.050864Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519625800714466722:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:22.050909Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:22.050922Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:09:22.863390Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519625800714466722:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:22.863446Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:22.863503Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [2:7519625800714466722:2146], Recipient [2:7519625800714466722:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:22.863524Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TScreen::Random [GOOD] >> TScreen::Shrink [GOOD] >> TScreen::Cook [GOOD] >> TSharedPageCache::Limits >> TestYmqHttpProxy::TestPurgeQueue [GOOD] >> KqpNewEngine::DeferredEffects [GOOD] >> KqpNewEngine::Delete+UseSink >> TSharedPageCache::Limits [GOOD] >> TSharedPageCache::Limits_Config >> DataShardTxOrder::ZigZag_oo8_dirty [GOOD] >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge >> TestYmqHttpProxy::TestSendMessageBatch >> KqpNamedExpressions::NamedExpressionChanged+UseSink >> TSharedPageCache::Limits_Config [GOOD] >> TSharedPageCache::ClockPro ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ZigZag_oo8_dirty [GOOD] Test command err: 2025-06-24T21:09:19.598180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:19.598248Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:19.604535Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:19.619075Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:19.619696Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:19.620006Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:19.672413Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:19.682236Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:19.682515Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:19.684489Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:19.684708Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:19.684776Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:19.685146Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:19.685258Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:19.685342Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:19.759377Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:19.792390Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:19.792640Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:19.792759Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:19.792799Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:19.792842Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:19.792898Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:19.793084Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:19.793140Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:19.793448Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:19.793558Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:19.793737Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:19.793793Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:19.793856Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:19.793898Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:19.793935Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:19.793970Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:19.794016Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:19.794140Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:19.794201Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:19.794269Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:19.797537Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\001J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:19.797619Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:19.797706Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:19.797923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:19.797975Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:19.798032Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:19.798099Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:19.798143Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:19.798198Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:19.798234Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:19.798676Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:19.798720Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:19.798758Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:19.798830Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:19.798894Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:19.798937Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:19.798982Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:19.799022Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:19.799048Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:19.811975Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:19.812070Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:19.812116Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:19.812164Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:19.812275Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:19.812895Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:19.812964Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:19.813012Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:19.813151Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:19.813185Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:19.813327Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:19.813386Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:19.813435Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:19.813473Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:19.817529Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:19.817635Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:19.817897Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:19.817944Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:19.818001Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:19.818045Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:19.818100Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:19.818164Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:19.818211Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.258861Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit WaitForPlan 2025-06-24T21:09:26.258907Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit PlanQueue 2025-06-24T21:09:26.259055Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 45 at step 1000016 at tablet 9437185 { Transactions { TxId: 45 AckTo { RawX1: 102 RawX2: 8589936727 } } Step: 1000016 MediatorID: 0 TabletID: 9437185 } 2025-06-24T21:09:26.259113Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-24T21:09:26.259376Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:345:2311], Recipient [2:345:2311]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:26.259413Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:26.259459Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-24T21:09:26.259495Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:26.259524Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-24T21:09:26.259560Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000016:45] in PlanQueue unit at 9437185 2025-06-24T21:09:26.259591Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit PlanQueue 2025-06-24T21:09:26.259624Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.259661Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit PlanQueue 2025-06-24T21:09:26.259709Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit LoadTxDetails 2025-06-24T21:09:26.259738Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit LoadTxDetails 2025-06-24T21:09:26.260506Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437185 loaded tx from db 1000016:45 keys extracted: 2 2025-06-24T21:09:26.260554Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.260582Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit LoadTxDetails 2025-06-24T21:09:26.260608Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit FinalizeDataTxPlan 2025-06-24T21:09:26.260635Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit FinalizeDataTxPlan 2025-06-24T21:09:26.260688Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.260714Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit FinalizeDataTxPlan 2025-06-24T21:09:26.260736Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:26.260759Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit BuildAndWaitDependencies 2025-06-24T21:09:26.260834Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000016:45] is the new logically complete end at 9437185 2025-06-24T21:09:26.260868Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000016:45] is the new logically incomplete end at 9437185 2025-06-24T21:09:26.260899Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000016:45] at 9437185 2025-06-24T21:09:26.260936Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.260959Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:26.260983Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit BuildDataTxOutRS 2025-06-24T21:09:26.261006Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit BuildDataTxOutRS 2025-06-24T21:09:26.261069Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.261100Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit BuildDataTxOutRS 2025-06-24T21:09:26.261124Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit StoreAndSendOutRS 2025-06-24T21:09:26.261151Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit StoreAndSendOutRS 2025-06-24T21:09:26.261181Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.261207Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit StoreAndSendOutRS 2025-06-24T21:09:26.261245Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit PrepareDataTxInRS 2025-06-24T21:09:26.261270Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit PrepareDataTxInRS 2025-06-24T21:09:26.261299Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.261336Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit PrepareDataTxInRS 2025-06-24T21:09:26.261365Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit LoadAndWaitInRS 2025-06-24T21:09:26.261398Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit LoadAndWaitInRS 2025-06-24T21:09:26.261430Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.261455Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit LoadAndWaitInRS 2025-06-24T21:09:26.261478Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit ExecuteDataTx 2025-06-24T21:09:26.261502Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit ExecuteDataTx 2025-06-24T21:09:26.261922Z node 2 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000016:45] at tablet 9437185 with status COMPLETE 2025-06-24T21:09:26.261981Z node 2 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000016:45] at 9437185: {NSelectRow: 2, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 2, SelectRowBytes: 16, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:09:26.262046Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.262088Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit ExecuteDataTx 2025-06-24T21:09:26.262117Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit CompleteOperation 2025-06-24T21:09:26.262154Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit CompleteOperation 2025-06-24T21:09:26.262405Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is DelayComplete 2025-06-24T21:09:26.262440Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit CompleteOperation 2025-06-24T21:09:26.262472Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit CompletedOperations 2025-06-24T21:09:26.262502Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit CompletedOperations 2025-06-24T21:09:26.262537Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-24T21:09:26.262562Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit CompletedOperations 2025-06-24T21:09:26.262588Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000016:45] at 9437185 has finished 2025-06-24T21:09:26.262620Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:26.262652Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-24T21:09:26.262682Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-24T21:09:26.262733Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-24T21:09:26.276857Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 1000016 txid# 45} 2025-06-24T21:09:26.276982Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 1000016} 2025-06-24T21:09:26.277060Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-24T21:09:26.277115Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437185 on unit CompleteOperation 2025-06-24T21:09:26.277182Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437185 at tablet 9437185 send result to client [2:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:26.277236Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-24T21:09:26.277826Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000016 txid# 45} 2025-06-24T21:09:26.277875Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000016} 2025-06-24T21:09:26.277926Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:26.277955Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437184 on unit CompleteOperation 2025-06-24T21:09:26.278007Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437184 at tablet 9437184 send result to client [2:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:26.278049Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> TPartBtreeIndexIteration::OneNode_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_Slices >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge [GOOD] >> KqpNotNullColumns::FailedMultiEffects [GOOD] >> KqpNotNullColumns::CreateIndexedTableWithDisabledNotNullDataColumns |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge [GOOD] >> TestKinesisHttpProxy::TestWrongStream2 [GOOD] >> TSharedPageCache::ClockPro [GOOD] >> TSharedPageCache::BigCache_BTreeIndex >> KqpSinkTx::OlapSnapshotRO [GOOD] >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLockOutOfOrder [GOOD] >> TestKinesisHttpProxy::TestCounters [GOOD] >> TGroupMapperTest::Mirror3dc >> TSharedPageCache::BigCache_BTreeIndex [GOOD] >> TSharedPageCache::BigCache_FlatIndex >> TestKinesisHttpProxy::BadRequestUnknownMethod [GOOD] >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit-UseSink [GOOD] >> TestKinesisHttpProxy::TestWrongRequest >> DataShardTxOrder::ForceOnlineBetweenOnline [GOOD] >> TestYmqHttpProxy::TestChangeMessageVisibility [GOOD] >> TSharedPageCache::BigCache_FlatIndex [GOOD] >> TSharedPageCache::MiddleCache_BTreeIndex >> TPartBtreeIndexIteration::OneNode_Groups_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_History_Slices >> HttpProxyInsideYdb::TestIfEnvVariableSet [GOOD] >> TGroupMapperTest::Mirror3dc [GOOD] >> TestKinesisHttpProxy::TestEmptyHttpBody >> TSharedPageCache::MiddleCache_BTreeIndex [GOOD] >> TestKinesisHttpProxy::ListShardsTimestamp [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLockOutOfOrder [GOOD] Test command err: 2025-06-24T21:09:17.777151Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:17.777561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:17.777793Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002d1a/r3tmp/tmpGgAl4p/pdisk_1.dat 2025-06-24T21:09:18.093196Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:18.096497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:18.129725Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:18.139043Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799354763389 != 1750799354763393 2025-06-24T21:09:18.184558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:18.184672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:18.196210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:18.277559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:18.614502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:18.744191Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:18.939001Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:780:2632], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:18.939139Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:790:2637], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:18.939253Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:18.945064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:19.108059Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:794:2640], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:09:19.178374Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:850:2677] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:19.566163Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwc6yra1c5gs5ypkbyk189, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWRkYzZhMmEtNWQ5MGMyNTgtN2E2NDBkNjMtNzI4MTU0NWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:19.668640Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwc7k6dr4ebx47vy29y7j6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTY0MzA4OWEtYmQ4OGQ1N2MtZGU3YjFmNGQtZmRmZDU1MjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:20.343757Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwc7wk7r4vscyck4hjb0ky, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFmZmM1NTktMjU3YzczOWUtNWYyY2NkOTQtOTA0YjMxNmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } 2025-06-24T21:09:20.711541Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhwc8kp9af48ef1p48avk5a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDkyZTgyYjUtM2MyY2MwNmQtZTgyZjQ1OTAtYzBiMmQwNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:20.841359Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhwc8q110ryjenxvgqtd5qr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFmZmM1NTktMjU3YzczOWUtNWYyY2NkOTQtOTA0YjMxNmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:20.955609Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhwc8ts88jeddj8med39vwg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFmZmM1NTktMjU3YzczOWUtNWYyY2NkOTQtOTA0YjMxNmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:21.041016Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NTFmZmM1NTktMjU3YzczOWUtNWYyY2NkOTQtOTA0YjMxNmM=, ActorId: [1:913:2727], ActorState: ExecuteState, TraceId: 01jyhwc8xzber9j7skmr824m3n, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken 2025-06-24T21:09:21.053137Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhwc8xzber9j7skmr824m3n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFmZmM1NTktMjU3YzczOWUtNWYyY2NkOTQtOTA0YjMxNmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:24.829584Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:24.830022Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:24.830192Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002d1a/r3tmp/tmp7LYvgA/pdisk_1.dat 2025-06-24T21:09:25.112462Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:09:25.114333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:25.145802Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:25.148279Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750799361638533 != 1750799361638537 2025-06-24T21:09:25.195053Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:25.195222Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:25.206901Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:25.295912Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:25.594556Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:25.717402Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:25.911014Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:779:2632], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:25.911130Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:789:2637], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:25.911230Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:25.916597Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:26.077755Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:793:2640], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:09:26.114450Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:849:2677] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:26.182334Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwcdrn4srmb07csd6227zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTNlNGQyMmEtMzdkMTNkZGUtNmY0OWJjNDYtN2VmMTU2ZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:26.267798Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwce1s0pe8bmyd7nc8f5b5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGY2YmJiMGEtOTEzNzE2MWYtMjljNjAyY2QtN2JjNDBmZGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the first select 2025-06-24T21:09:26.873599Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwcea4fk2v454tyvta0v2t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmVmYzNmY2YtOTNjMWEzZjYtZGU0Y2NkOGUtYWUxMjdlOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } 2025-06-24T21:09:27.289321Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhwcere973arzvvdyfnpyrj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMxMGZjOWMtZTZhOTM4NDAtMjNmZjVlMWYtNzE3NWMwODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } ... waiting for commit read sets 2025-06-24T21:09:27.388082Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhwcf45fzpmtr27gw819kr3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTMxMGZjOWMtZTZhOTM4NDAtMjNmZjVlMWYtNzE3NWMwODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... captured readset ... captured readset ... performing an upsert 2025-06-24T21:09:27.766437Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhwcfgnayg7yqq0v0mzv90n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjliMmVjYzQtM2YzYmYzOTctZGQ1MmYxM2ItN2NjNzg1ZjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the second select 2025-06-24T21:09:27.884616Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhwcfk69qt1x4fzbf879e6w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmVmYzNmY2YtOTNjMWEzZjYtZGU0Y2NkOGUtYWUxMjdlOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the third select 2025-06-24T21:09:28.019990Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyhwcfphd6cprt1xba1feksv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmVmYzNmY2YtOTNjMWEzZjYtZGU0Y2NkOGUtYWUxMjdlOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the last upsert and commit 2025-06-24T21:09:28.087992Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NmVmYzNmY2YtOTNjMWEzZjYtZGU0Y2NkOGUtYWUxMjdlOWI=, ActorId: [2:920:2725], ActorState: ExecuteState, TraceId: 01jyhwcftr9dd3p9bb9y66mh9k, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken >> KqpSinkTx::OlapInteractive [GOOD] >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Mirror3dc [GOOD] >> KqpSqlIn::SimpleKey [GOOD] >> KqpSqlIn::SelectNotAllElements >> TTxDataShardMiniKQL::MemoryUsageImmediateSmallTx >> TTxDataShardMiniKQL::CrossShard_5_AllToAll >> TTxDataShardMiniKQL::CrossShard_1_Cycle ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit-UseSink [GOOD] Test command err: 2025-06-24T21:09:19.151170Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:19.151534Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:19.151692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002d14/r3tmp/tmpQGZ7bj/pdisk_1.dat 2025-06-24T21:09:19.476909Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:19.479672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:19.520529Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:19.525545Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799356423301 != 1750799356423305 2025-06-24T21:09:19.574788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:19.574954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:19.586595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:19.671827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:19.719229Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:19.720441Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:19.720929Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:09:19.721212Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:19.771113Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:19.771965Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:19.772169Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:19.774110Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:19.774230Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:19.774304Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:19.774722Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:19.774899Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:19.775018Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:09:19.787799Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:19.821888Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:19.822109Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:19.822291Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:09:19.822413Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:19.822461Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:19.822521Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:19.822753Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:19.822807Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:19.823187Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:19.823290Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:19.823359Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:19.823403Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:19.823456Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:19.823494Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:19.823532Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:19.823599Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:19.823651Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:19.823799Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:19.823848Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:19.823903Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:09:19.824367Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:09:19.824418Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:19.824526Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:19.824750Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:19.824813Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:19.824942Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:19.825009Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:19.825067Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:19.825112Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:19.825150Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:19.825451Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:19.825491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:19.825531Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:19.825568Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:19.825627Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:09:19.825668Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:19.825717Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:09:19.825752Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:09:19.825794Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:19.827403Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:09:19.827468Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:09:19.838330Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:09:19.838423Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:19.838467Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:19.838531Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... HARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [2:24:2071], Recipient [2:974:2770]: {TEvRegisterTabletResult TabletId# 72075186224037892 Entry# 2000} 2025-06-24T21:09:28.027846Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-24T21:09:28.027885Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 2000 2025-06-24T21:09:28.027925Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-24T21:09:28.028126Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-24T21:09:28.028176Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:28.028216Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037892 2025-06-24T21:09:28.028277Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2025-06-24T21:09:28.028320Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037892 2025-06-24T21:09:28.028360Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-06-24T21:09:28.028415Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-24T21:09:28.028508Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [2:1128:2881], Recipient [2:974:2770]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:09:28.028543Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:09:28.028594Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [2:1126:2879], serverId# [2:1128:2881], sessionId# [0:0:0] 2025-06-24T21:09:28.028943Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [2:24:2071], Recipient [2:974:2770]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-06-24T21:09:28.028981Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-24T21:09:28.029022Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 0 next step 2000 2025-06-24T21:09:28.029087Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-24T21:09:28.029163Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037892 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-24T21:09:28.040177Z node 2 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037893 ack snapshot OpId 281474976715665 2025-06-24T21:09:28.040304Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037893 2025-06-24T21:09:28.040404Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037893 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:09:28.040479Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037893 2025-06-24T21:09:28.040530Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037893, actorId: [2:1135:2888] 2025-06-24T21:09:28.040557Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037893 2025-06-24T21:09:28.040590Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037893 2025-06-24T21:09:28.040639Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-24T21:09:28.040843Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:978:2772], Recipient [2:978:2772]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:28.040884Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:28.041048Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553157, Sender [2:978:2772], Recipient [2:710:2589]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037893 OperationCookie: 281474976715665 2025-06-24T21:09:28.041096Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037893 for split OpId 281474976715665 2025-06-24T21:09:28.041457Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:1127:2880], Recipient [2:710:2589]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037893 ClientId: [2:1127:2880] ServerId: [2:1129:2882] } 2025-06-24T21:09:28.041496Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:09:28.041926Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [2:24:2071], Recipient [2:978:2772]: {TEvRegisterTabletResult TabletId# 72075186224037893 Entry# 2000} 2025-06-24T21:09:28.041968Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-24T21:09:28.041996Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037893 time 2000 2025-06-24T21:09:28.042027Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-24T21:09:28.042159Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-24T21:09:28.042215Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:28.042266Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037893 2025-06-24T21:09:28.042298Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037893 has no attached operations 2025-06-24T21:09:28.042324Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037893 2025-06-24T21:09:28.042349Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037893 TxInFly 0 2025-06-24T21:09:28.042393Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-24T21:09:28.042477Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [2:1129:2882], Recipient [2:978:2772]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:09:28.042510Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:09:28.042543Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037893, clientId# [2:1127:2880], serverId# [2:1129:2882], sessionId# [0:0:0] 2025-06-24T21:09:28.042816Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [2:24:2071], Recipient [2:978:2772]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-06-24T21:09:28.042849Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-24T21:09:28.042879Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037893 coordinator 72057594046316545 last step 0 next step 2000 2025-06-24T21:09:28.042920Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037893: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-24T21:09:28.042988Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037893 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-24T21:09:28.053758Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715665 2025-06-24T21:09:28.057028Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553158, Sender [2:371:2365], Recipient [2:715:2591] 2025-06-24T21:09:28.057113Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715665, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2025-06-24T21:09:28.059112Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715665 2025-06-24T21:09:28.059207Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:09:28.059980Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:702:2584], Recipient [2:710:2589]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:09:28.580472Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [2:923:2631], Recipient [2:624:2529]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 923 RawX2: 8589937223 } TxBody: " \0008\000`\200\200\200\005j\324\006\010\001\022\225\006\010\001\022\024\n\022\t\233\003\000\000\000\000\000\000\021G\n\000\000\002\000\000\000\032\256\002\010\240\215\006\022\207\002\037\002\022KqpEffect\005\205\006\213\000\205\002\206\205\004\207\203\004?\004\014key\024valueh%kqp%tx_result_binding_0_1\204\214\002\030Inputs(Parameters\034Program\013?\000)\251\000?\n\014Arg\000\002)\211\002?\016\204\214\002(KqpEffects\000)\211\010?\032\213\010\203\010\203\010\203\005@\203\010\204?\006\210\203\004\203\004\203\0144KqpUpsertRows\000\013?&\003?\036\177\000\001\205\000\000\000\000\001\003? \004\003?\"\000\003?$\002\017)\211\002?(?\010 Iterator\000)\211\004?\010?\n\203\004\030Member\000?\026\003?@\000\002\004\000\006\010\002?.\003\203\004\004\003\203\004\002\003\003?0\000\r\010\000\n\001/\032\0369\000\000\000\000\000\000\000@i\000\000\000\000\000\000\360?q\000\000\000\000\ 2025-06-24T21:09:28.580551Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:28.580657Z node 2 :TX_DATASHARD NOTICE: datashard.cpp:3097: Rejecting data TxId 281474976715663 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state) 2025-06-24T21:09:28.581077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715664, at schemeshard: 72057594046644480 2025-06-24T21:09:28.581552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715665, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::OlapSnapshotRO [GOOD] Test command err: Trying to start YDB, gRPC: 20054, MsgBus: 10756 2025-06-24T21:08:51.856441Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625871045514248:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:51.857528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031b6/r3tmp/tmp3mnEhP/pdisk_1.dat 2025-06-24T21:08:52.371427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:52.371538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:52.396083Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:52.399416Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625871045514023:2079] 1750799331814471 != 1750799331814474 2025-06-24T21:08:52.406848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20054, node 1 2025-06-24T21:08:52.473524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:52.473607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:52.473615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:52.473740Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10756 2025-06-24T21:08:52.831452Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10756 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:53.202702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:53.220941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:55.178350Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625888225383862:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:55.178353Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625888225383837:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:55.178482Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:55.185079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:55.196275Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625888225383866:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:55.267429Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625888225383917:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:55.544189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:08:55.718759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:55.719023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:55.719427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:55.719571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:55.719701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:55.719824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:55.719941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:55.720038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:55.720182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:08:55.720338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:08:55.720477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519625888225384091:2304];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:08:55.721445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519625888225384095:2308];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:55.721484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519625888225384095:2308];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:55.721646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519625888225384095:2308];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:55.721780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519625888225384095:2308];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:55.721885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519625888225384095:2308];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:55.721996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519625888225384095:2308];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:55.722140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519625888225384095:2308];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:55.722260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519625888225384095:2308];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:55.722373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519625888225384095:2308];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... =72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T21:09:24.532277Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T21:09:24.532706Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T21:09:24.533529Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037996;receive=72075186224037896; 2025-06-24T21:09:24.533597Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037996;receive=72075186224037896; 2025-06-24T21:09:24.533652Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037996;receive=72075186224037896; 2025-06-24T21:09:24.533708Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037996;receive=72075186224037896; 2025-06-24T21:09:24.533764Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037996;receive=72075186224037896; 2025-06-24T21:09:24.533904Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970;receive=72075186224037896; 2025-06-24T21:09:24.533961Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=49;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970;receive=72075186224037896; 2025-06-24T21:09:24.534018Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970;receive=72075186224037996; 2025-06-24T21:09:24.534340Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970;receive=72075186224037996; 2025-06-24T21:09:24.534397Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970;receive=72075186224037996; 2025-06-24T21:09:24.534449Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=53;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970;receive=72075186224037996; 2025-06-24T21:09:24.534504Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=54;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970;receive=72075186224037996; 2025-06-24T21:09:24.534560Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=55;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970;receive=72075186224037996; 2025-06-24T21:09:24.534613Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=56;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970;receive=72075186224037996; 2025-06-24T21:09:24.537633Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=58;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037970;receive=72075186224037897; 2025-06-24T21:09:24.537710Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=59;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037970;receive=72075186224037897; 2025-06-24T21:09:24.537764Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=60;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037970;receive=72075186224037897; 2025-06-24T21:09:24.537892Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=61;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037970;receive=72075186224037897; 2025-06-24T21:09:24.537961Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=62;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037970;receive=72075186224037897; 2025-06-24T21:09:24.538016Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=63;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037970;receive=72075186224037897; 2025-06-24T21:09:24.538071Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=64;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037970;receive=72075186224037897; 2025-06-24T21:09:24.538235Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037970; 2025-06-24T21:09:24.538290Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037970; 2025-06-24T21:09:24.538373Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037970; 2025-06-24T21:09:24.538467Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037970; 2025-06-24T21:09:24.538533Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037970; 2025-06-24T21:09:24.538586Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037970; 2025-06-24T21:09:24.538640Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625978401649282:2437];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037970; 2025-06-24T21:09:24.539808Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-24T21:09:26.375818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:09:26.375844Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:26.703712Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YTU3NWE0YjQtZjIwNjkxYTgtNzk1ZjRkZTMtYjg4ZGU4ZjQ=, ActorId: [2:7519626012761395490:3838], ActorState: ExecuteState, TraceId: 01jyhwcefa1ze87gx9hb0h6fev, Create QueryResponse for error on request, msg:
:3:29: Error: Operation 'Upsert' can't be performed in read only transaction, code: 2008 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ForceOnlineBetweenOnline [GOOD] Test command err: 2025-06-24T21:09:22.078243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:22.078299Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:22.083840Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:22.108386Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:22.108923Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:22.109199Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:22.158907Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:22.168583Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:22.168819Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:22.171192Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:22.171285Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:22.171363Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:22.171775Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:22.171906Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:22.171981Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:22.236382Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:22.284506Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:22.284769Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:22.284889Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:22.284933Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:22.284983Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:22.285034Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:22.285202Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:22.285260Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:22.285539Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:22.285646Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:22.285817Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:22.285871Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:22.285925Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:22.285964Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:22.286001Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:22.286043Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:22.286094Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:22.286214Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:22.286256Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:22.286310Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:22.289698Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:22.289778Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:22.289873Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:22.290058Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:22.290110Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:22.290197Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:22.290246Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:22.290286Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:22.290342Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:22.290397Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:22.290707Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:22.290747Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:22.290787Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:22.290849Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:22.290914Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:22.290957Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:22.291010Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:22.291046Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:22.291091Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:22.308283Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:22.308379Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:22.308429Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:22.308475Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:22.308578Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:22.309212Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:22.309269Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:22.309321Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:22.309470Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:22.309509Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:22.309667Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:22.309732Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:22.309785Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:22.309835Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:22.315822Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:22.315927Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:22.316224Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:22.316277Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:22.316346Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:22.316411Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:22.316460Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:22.316517Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:22.316564Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... : datashard.cpp:801: Complete [1000005 : 151] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:28.793898Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:28.793994Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:28.794020Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437184 on unit StoreAndSendOutRS 2025-06-24T21:09:28.794051Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 50 at 9437184 from 9437184 to 9437186 txId 152 2025-06-24T21:09:28.794117Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:28.794142Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437184 on unit CompleteOperation 2025-06-24T21:09:28.794208Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:28.794282Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:28.794403Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:28.794429Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:153] at 9437184 on unit CompleteOperation 2025-06-24T21:09:28.794483Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 153] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:28.794533Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:28.794625Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:28.794650Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:154] at 9437184 on unit CompleteOperation 2025-06-24T21:09:28.794699Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 154] from 9437184 at tablet 9437184 send result to client [1:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:09:28.794729Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:28.794983Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-24T21:09:28.795024Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:28.795056Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 151 2025-06-24T21:09:28.795246Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-24T21:09:28.795323Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:28.795362Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 149 2025-06-24T21:09:28.795520Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-24T21:09:28.795550Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:28.795575Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-06-24T21:09:28.795649Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:236:2227], Recipient [1:459:2398]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletProducer# 9437184 ReadSet.Size()# 7 Seqno# 50 Flags# 0} 2025-06-24T21:09:28.795685Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:09:28.795720Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 9437186 source 9437184 dest 9437186 producer 9437184 txId 152 2025-06-24T21:09:28.795788Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 9437186 got read set: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletProducer# 9437184 ReadSet.Size()# 7 Seqno# 50 Flags# 0} 2025-06-24T21:09:28.795833Z node 1 :TX_DATASHARD TRACE: operation.cpp:67: Filled readset for [1000005:152] from=9437184 to=9437186origin=9437184 2025-06-24T21:09:28.795897Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2025-06-24T21:09:28.796052Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:236:2227], Recipient [1:347:2312]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-24T21:09:28.796086Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:28.796118Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 2025-06-24T21:09:28.796191Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:459:2398], Recipient [1:459:2398]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:28.796221Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:28.796268Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437186 2025-06-24T21:09:28.796304Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:09:28.796359Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000005:152] at 9437186 for LoadAndWaitInRS 2025-06-24T21:09:28.796394Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:152] at 9437186 on unit LoadAndWaitInRS 2025-06-24T21:09:28.796446Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is Executed 2025-06-24T21:09:28.796478Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit LoadAndWaitInRS 2025-06-24T21:09:28.796510Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:152] at 9437186 to execution unit ExecuteDataTx 2025-06-24T21:09:28.796548Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:152] at 9437186 on unit ExecuteDataTx 2025-06-24T21:09:28.797120Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000005:152] at tablet 9437186 with status COMPLETE 2025-06-24T21:09:28.797181Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000005:152] at 9437186: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 5, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:09:28.797238Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is ExecutedNoMoreRestarts 2025-06-24T21:09:28.797273Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit ExecuteDataTx 2025-06-24T21:09:28.797303Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:152] at 9437186 to execution unit CompleteOperation 2025-06-24T21:09:28.797334Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:152] at 9437186 on unit CompleteOperation 2025-06-24T21:09:28.797558Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is DelayComplete 2025-06-24T21:09:28.797595Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit CompleteOperation 2025-06-24T21:09:28.797644Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:152] at 9437186 to execution unit CompletedOperations 2025-06-24T21:09:28.797734Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:152] at 9437186 on unit CompletedOperations 2025-06-24T21:09:28.797784Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is Executed 2025-06-24T21:09:28.797811Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit CompletedOperations 2025-06-24T21:09:28.797837Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:152] at 9437186 has finished 2025-06-24T21:09:28.797875Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:28.797903Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-24T21:09:28.797932Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-24T21:09:28.797959Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-24T21:09:28.812994Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T21:09:28.813053Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2025-06-24T21:09:28.813114Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:102:2135], exec latency: 2 ms, propose latency: 4 ms 2025-06-24T21:09:28.813199Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:09:28.813254Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:09:28.813591Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:459:2398], Recipient [1:236:2227]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-24T21:09:28.813636Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:28.813674Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink [GOOD] >> TestKinesisHttpProxy::ListShardsToken >> DataShardOutOfOrder::TestOutOfOrderRestartLocksReorderedWithoutBarrier [GOOD] >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> HttpProxyInsideYdb::TestIfEnvVariableSet [GOOD] Test command err: 2025-06-24T21:08:36.547776Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625804593877492:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:36.547822Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004483/r3tmp/tmpva6vbg/pdisk_1.dat 2025-06-24T21:08:36.971960Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625804593877470:2079] 1750799316543079 != 1750799316543082 2025-06-24T21:08:36.990489Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:36.998593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:36.998708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:37.001481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3530, node 1 2025-06-24T21:08:37.213832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:37.213867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:37.213888Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:37.214035Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:37.561094Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32084 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:37.766842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:37.795893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:32084 2025-06-24T21:08:38.029476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:08:38.036176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:08:38.039241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-24T21:08:38.053774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T21:08:38.061662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.232464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.289036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.333496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-24T21:08:38.345193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.381263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.462128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.500338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.537213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.574833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.607094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:39.711271Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625817478780736:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.711419Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.711749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625817478780748:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.715644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:39.726996Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625817478780750:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-24T21:08:39.814535Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625817478780801:2864] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:40.591371Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyhwb0mw5gvy301eb5spdzv7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yzc0MTEyNGItNTc5NTQyY2UtOGE4YzM3NTItNmY1N2Q3MjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default} ... 6-24T21:09:27.910046Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:27.910092Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-06-24T21:09:27.910196Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 3ms 2025-06-24T21:09:27.910451Z node 7 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:27.911863Z node 7 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:27.911901Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 10ms 2025-06-24T21:09:27.912403Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:27.912450Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-06-24T21:09:27.912586Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 11ms 2025-06-24T21:09:27.913368Z node 7 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:28.144428Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7519626027132007540:2401]: Pool not found 2025-06-24T21:09:28.144677Z node 7 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-06-24T21:09:28.352781Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7519626027132007528:2397]: Pool not found 2025-06-24T21:09:28.352985Z node 7 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-06-24T21:09:28.356567Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626031426974996:2421], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:28.356570Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7519626031426974997:2422], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-06-24T21:09:28.356712Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:28.650082Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7519626031426974994:2420]: Pool not found 2025-06-24T21:09:28.650550Z node 7 :SQS DEBUG: cleanup_queue_data.cpp:138: [cleanup removed queues] there are no queues to delete 2025-06-24T21:09:28.851322Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:49546) incoming connection opened 2025-06-24T21:09:28.851418Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:49546) -> (POST /Root, 3 bytes) 2025-06-24T21:09:28.851572Z node 7 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [1848:1901:6050:0:48:1901:6050:0] request [UnknownMethodName] url [/Root] database [/Root] requestId: 7cc3f45-325a054e-c101f4ad-e2dea859 2025-06-24T21:09:28.851812Z node 7 :HTTP_PROXY INFO: http_req.cpp:1211: http request [UnknownMethodName] requestId [7cc3f45-325a054e-c101f4ad-e2dea859] reply with status: UNSUPPORTED message: Missing method name UnknownMethodName 2025-06-24T21:09:28.852016Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:49546) <- (400 InvalidAction, 76 bytes) 2025-06-24T21:09:28.852096Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:289: (#37,[::1]:49546) Request: POST /Root HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.UnknownMethodName X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked { } 2025-06-24T21:09:28.852143Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:49546) Response: HTTP/1.1 400 InvalidAction Connection: close x-amzn-requestid: 7cc3f45-325a054e-c101f4ad-e2dea859 x-amz-crc32: 139748724 Content-Type: application/x-amz-json-1.1 Content-Length: 76 2025-06-24T21:09:28.852249Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:49546) connection closed Http output full {"__type":"InvalidAction","message":"Missing method name UnknownMethodName"} 400 {"__type":"InvalidAction","message":"Missing method name UnknownMethodName"} >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed-EvWrite [GOOD] >> DataShardOutOfOrder::TestReadTableWriteConflict [GOOD] >> DataShardOutOfOrder::TestSecondaryClearanceAfterShardRestartRace >> TestYmqHttpProxy::TestListDeadLetterSourceQueues [GOOD] >> TestKinesisHttpProxy::ErroneousRequestGetRecords [GOOD] >> TPartBtreeIndexIteration::OneNode_History_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_History_Slices >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TSharedPageCache::MiddleCache_BTreeIndex [GOOD] Test command err: SmallQueue: MainQueue: {11 0f 1b}, {14 1f 1b}, {15 2f 1b}, {18 0f 1b}, {19 0f 1b}, {23 0f 1b}, {27 0f 1b} GhostQueue: 9, 12, 13, 16, 17, 20, 21, 24, 25, 28 0.29096 00000.000 II| FAKE_ENV: Born at 2025-06-24T21:09:25.700627Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.012 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.012 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} release 4194304b of static, Memory{0 dyn 0} 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.015 NN| TABLET_SAUSAGECACHE: Update config MemoryLimit: 8388608 ReplacementPolicy: ThreeLeveledLRU 00000.015 NN| TABLET_SAUSAGECACHE: Switch replacement policy from S3FIFO to ThreeLeveledLRU 00000.015 NN| TABLET_SAUSAGECACHE: Switch replacement policy done from S3FIFO to ThreeLeveledLRU 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{2, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.017 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.017 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.017 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{3, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.017 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.017 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 1 for step 4 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{4, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.019 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 1 for step 5 00000.019 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.019 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{5, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 1 for step 6 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{6, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 1 for step 7 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{7, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:9} commited cookie 1 for step 8 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{8, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:10} commited cookie 1 for step 9 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{9, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:11} commited cookie 1 for step 10 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{10, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:12} commited cookie 1 for step 11 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{11, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:13} commited cookie 1 for step 12 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{12, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:14} commited cookie 1 for step 13 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{13, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:15} commited cookie 1 for step 14 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{14, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxW ... TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.396 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [4 4] 00000.396 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.396 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 4 ] 00000.396 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 96 ] owner [6:580:2605] 00000.396 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 4 ] 00000.396 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 4 ] cookie 1 00000.396 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.396 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.396 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.396 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 4 117 111 ] 00000.397 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.397 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.397 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.397 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.397 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.397 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [3 4] 00000.397 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.397 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 3 ] 00000.397 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 95 ] owner [6:580:2605] 00000.397 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 3 ] 00000.397 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 3 ] cookie 1 00000.397 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.398 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.398 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.398 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 3 117 111 ] 00000.398 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.398 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.398 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.398 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.398 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.398 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [2 4] 00000.398 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.398 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 2 ] 00000.399 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 93 ] owner [6:580:2605] 00000.399 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 2 ] 00000.399 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 2 ] cookie 1 00000.399 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.399 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.399 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.399 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 2 117 111 ] 00000.400 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.400 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.400 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.400 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.400 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.400 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [1 4] 00000.400 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.400 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 1 ] 00000.400 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 92 ] owner [6:580:2605] 00000.400 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 1 ] 00000.400 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 1 ] cookie 1 00000.400 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.400 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.400 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.400 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 1 117 111 ] 00000.401 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.401 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.401 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.401 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.401 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.401 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [0 4] 00000.401 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.401 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 0 ] 00000.401 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 91 ] owner [6:580:2605] 00000.401 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 0 ] 00000.401 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 0 ] cookie 1 00000.401 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.402 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.402 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.402 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 0 117 111 ] Counters: Active:8313958/8388608, Passive:0, MemLimit:-1 00000.402 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.403 II| TABLET_EXECUTOR: Leader{1:3:2} suiciding, Waste{2:0, 10255801b +(0, 0b), 1 trc, -48685b acc} 00000.405 DD| TABLET_SAUSAGECACHE: Unregister owner [6:580:2605] 00000.405 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] 00000.405 DD| TABLET_SAUSAGECACHE: Remove owner [6:580:2605] 00000.406 NN| TABLET_SAUSAGECACHE: Poison cache serviced 138 reqs hit {0 0b} miss {139 12197190b} 00000.406 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.406 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {10191b, 107} 00000.406 II| FAKE_ENV: DS.1 gone, left {10257096b, 5}, put {10305919b, 107} 00000.419 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.419 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.419 II| FAKE_ENV: All BS storage groups are stopped 00000.419 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.420 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 2741}, stopped ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::OlapInteractive [GOOD] Test command err: Trying to start YDB, gRPC: 21006, MsgBus: 4366 2025-06-24T21:08:56.051545Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625892937176760:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:56.051618Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031a9/r3tmp/tmpRiVefH/pdisk_1.dat 2025-06-24T21:08:56.434185Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:56.436145Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625892937176741:2079] 1750799336049692 != 1750799336049695 TServer::EnableGrpc on GrpcPort 21006, node 1 2025-06-24T21:08:56.494642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:56.494744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:56.495944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:56.547644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:56.547667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:56.547673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:56.547771Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4366 TClient is connected to server localhost:4366 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:08:57.061590Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:57.198127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:57.218564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:59.199572Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625905822079264:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:59.199677Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:59.199801Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625905822079276:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:59.203044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:59.215701Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625905822079286:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:08:59.287577Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625905822079337:2335] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:59.656641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:08:59.854661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:59.854891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:59.855474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:59.855659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:59.855811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:59.855934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:59.856094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:59.856215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:59.856342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:08:59.856488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:08:59.856653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519625905822079509:2307];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:08:59.860741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625905822079506:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:59.860850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625905822079506:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:59.861043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625905822079506:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:59.861153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625905822079506:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:59.862017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625905822079506:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:59.862207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625905822079506:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:59.862325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625905822079506:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:59.862449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625905822079506:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:59.862756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625905822079506:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... 9];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.005349s; 2025-06-24T21:09:26.639893Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037997;self_id=[2:7519625967616375714:2394];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001566s; 2025-06-24T21:09:26.640252Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037967;self_id=[2:7519625971911343301:2433];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001332s; 2025-06-24T21:09:26.641104Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037994;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.641140Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037979;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.641371Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037973;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.641384Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.642038Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037983;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.642083Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037957;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.642235Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037997;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.642313Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037992;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.642927Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037967;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.647046Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037963;self_id=[2:7519625971911343161:2419];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001113s; 2025-06-24T21:09:26.647417Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037989;self_id=[2:7519625971911343077:2405];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.000281s; 2025-06-24T21:09:26.647446Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037945;self_id=[2:7519625971911343303:2434];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.002471s; 2025-06-24T21:09:26.647723Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037977;self_id=[2:7519625971911343149:2414];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.002373s; 2025-06-24T21:09:26.648377Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037961;self_id=[2:7519625971911343214:2427];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.000015s; 2025-06-24T21:09:26.648703Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[2:7519625971911343783:2514];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.000197s; 2025-06-24T21:09:26.648816Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[2:7519625971911343309:2436];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.000898s; 2025-06-24T21:09:26.649141Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037982;self_id=[2:7519625971911343079:2407];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.000945s; 2025-06-24T21:09:26.649354Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037965;self_id=[2:7519625971911343299:2431];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001026s; 2025-06-24T21:09:26.649585Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037995;self_id=[2:7519625967616375717:2397];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001945s; 2025-06-24T21:09:26.649804Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037986;self_id=[2:7519625967616375778:2402];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.002038s; 2025-06-24T21:09:26.651269Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037989;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.651293Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037963;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.651467Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.651517Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037977;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.651858Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.652070Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037986;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.652226Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.652260Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037965;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.652451Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037961;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.652578Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037982;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.652666Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037995;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.656627Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037937;self_id=[2:7519625971911343395:2457];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.002153s; 2025-06-24T21:09:26.656884Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037943;self_id=[2:7519625971911343396:2458];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.002263s; 2025-06-24T21:09:26.657473Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7519625971911343153:2418];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.003813s; 2025-06-24T21:09:26.660003Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037978;self_id=[2:7519625971911343145:2411];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.002358s; 2025-06-24T21:09:26.661184Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037937;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.662663Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.662912Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:26.663120Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037978;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:27.646311Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037975;self_id=[2:7519625971911343144:2410];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.999907s; 2025-06-24T21:09:27.646763Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037991;self_id=[2:7519625967616375724:2400];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.999942s; 2025-06-24T21:09:27.647516Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037976;self_id=[2:7519625971911343172:2421];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=3.000570s; 2025-06-24T21:09:27.649641Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037975;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:27.651203Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037976;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:09:27.651472Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037991;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink [GOOD] Test command err: 2025-06-24T21:09:21.853755Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:21.854189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:21.854399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002cf5/r3tmp/tmpIYy9TO/pdisk_1.dat 2025-06-24T21:09:22.189451Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:22.196640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:22.240306Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:22.248980Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799358759995 != 1750799358759999 2025-06-24T21:09:22.300554Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:22.300709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:22.312631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:22.400209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:22.449767Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:22.450938Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:22.452158Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:09:22.452510Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:22.505033Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:22.505783Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:22.505925Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:22.507739Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:22.507818Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:22.507884Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:22.508266Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:22.508415Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:22.508514Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:09:22.519252Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:22.545595Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:22.545783Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:22.545900Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:09:22.545939Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:22.545977Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:22.546010Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:22.546225Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:22.546269Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:22.546602Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:22.546712Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:22.546793Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:22.546832Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:22.546884Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:22.546936Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:22.546969Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:22.547000Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:22.547045Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:22.547192Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:22.547232Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:22.547275Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:09:22.547661Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:09:22.547709Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:22.547796Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:22.548073Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:22.548149Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:22.548246Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:22.548295Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:22.548333Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:22.548371Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:22.548404Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:22.548715Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:22.548756Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:22.548810Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:22.548846Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:22.548899Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:09:22.548930Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:22.548959Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:09:22.548991Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:09:22.549016Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:22.550409Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:09:22.550460Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:09:22.561179Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:09:22.561245Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:22.561281Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:22.561344Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... ot, SessionId: ydb://session/3?node_id=2&id=YzgxNzYzZjAtYWJmNmU4NzctZDNmODk3NTEtYjRhMmYwNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:29.894481Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1092:2673] TxId: 281474976715676. Ctx: { TraceId: 01jyhwcgrf2p0a6e4pgf3q5w7b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzgxNzYzZjAtYWJmNmU4NzctZDNmODk3NTEtYjRhMmYwNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:09:29.895741Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715678. Resolved key sets: 0 2025-06-24T21:09:29.895919Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715677. Resolved key sets: 0 2025-06-24T21:09:29.896146Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jyhwcgrf6pr0hgbdpwbp2m1r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTMzMTA3MTMtNzg4ZDQ2OWMtODUxOGQzMWUtZTg1NzBhNGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:29.896188Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715678. Ctx: { TraceId: 01jyhwcgrf6pr0hgbdpwbp2m1r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTMzMTA3MTMtNzg4ZDQ2OWMtODUxOGQzMWUtZTg1NzBhNGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-24T21:09:29.896232Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1097:2669] TxId: 281474976715678. Ctx: { TraceId: 01jyhwcgrf6pr0hgbdpwbp2m1r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTMzMTA3MTMtNzg4ZDQ2OWMtODUxOGQzMWUtZTg1NzBhNGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:09:29.896294Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1097:2669] TxId: 281474976715678. Ctx: { TraceId: 01jyhwcgrf6pr0hgbdpwbp2m1r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTMzMTA3MTMtNzg4ZDQ2OWMtODUxOGQzMWUtZTg1NzBhNGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:29.896360Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1097:2669] TxId: 281474976715678. Ctx: { TraceId: 01jyhwcgrf6pr0hgbdpwbp2m1r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTMzMTA3MTMtNzg4ZDQ2OWMtODUxOGQzMWUtZTg1NzBhNGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:09:29.896464Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jyhwcgrm6nzkqwkjcczphbf4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjBmMzNkMi1mMGQ4NDhkOS1iNDY2M2YyMi0xNjhmNWFhNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:29.896502Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715677. Ctx: { TraceId: 01jyhwcgrm6nzkqwkjcczphbf4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjBmMzNkMi1mMGQ4NDhkOS1iNDY2M2YyMi0xNjhmNWFhNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-24T21:09:29.896543Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1093:2679] TxId: 281474976715677. Ctx: { TraceId: 01jyhwcgrm6nzkqwkjcczphbf4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjBmMzNkMi1mMGQ4NDhkOS1iNDY2M2YyMi0xNjhmNWFhNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:09:29.896601Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1093:2679] TxId: 281474976715677. Ctx: { TraceId: 01jyhwcgrm6nzkqwkjcczphbf4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjBmMzNkMi1mMGQ4NDhkOS1iNDY2M2YyMi0xNjhmNWFhNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:29.896639Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1093:2679] TxId: 281474976715677. Ctx: { TraceId: 01jyhwcgrm6nzkqwkjcczphbf4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjBmMzNkMi1mMGQ4NDhkOS1iNDY2M2YyMi0xNjhmNWFhNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:09:29.897348Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715679. Resolved key sets: 0 2025-06-24T21:09:29.897465Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715680. Resolved key sets: 0 2025-06-24T21:09:29.897669Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jyhwcgrf80fvh8tb5rqar7jr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGMyNWZhOTktYzkyYjY2NDktZmJkMTkzYzUtNjQ0NjhlMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:29.897706Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715679. Ctx: { TraceId: 01jyhwcgrf80fvh8tb5rqar7jr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGMyNWZhOTktYzkyYjY2NDktZmJkMTkzYzUtNjQ0NjhlMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-24T21:09:29.897748Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1103:2671] TxId: 281474976715679. Ctx: { TraceId: 01jyhwcgrf80fvh8tb5rqar7jr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGMyNWZhOTktYzkyYjY2NDktZmJkMTkzYzUtNjQ0NjhlMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:09:29.897807Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1103:2671] TxId: 281474976715679. Ctx: { TraceId: 01jyhwcgrf80fvh8tb5rqar7jr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGMyNWZhOTktYzkyYjY2NDktZmJkMTkzYzUtNjQ0NjhlMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:29.897856Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1103:2671] TxId: 281474976715679. Ctx: { TraceId: 01jyhwcgrf80fvh8tb5rqar7jr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGMyNWZhOTktYzkyYjY2NDktZmJkMTkzYzUtNjQ0NjhlMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:09:29.898314Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jyhwcgrm7wdgp14fkahvgrfp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGZiNWY2NDMtOGIyOTAzZGItYTUwYTJiMzEtNDkwZDgyZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:29.898362Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715680. Ctx: { TraceId: 01jyhwcgrm7wdgp14fkahvgrfp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGZiNWY2NDMtOGIyOTAzZGItYTUwYTJiMzEtNDkwZDgyZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-24T21:09:29.898401Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1107:2681] TxId: 281474976715680. Ctx: { TraceId: 01jyhwcgrm7wdgp14fkahvgrfp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGZiNWY2NDMtOGIyOTAzZGItYTUwYTJiMzEtNDkwZDgyZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:09:29.898454Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1107:2681] TxId: 281474976715680. Ctx: { TraceId: 01jyhwcgrm7wdgp14fkahvgrfp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGZiNWY2NDMtOGIyOTAzZGItYTUwYTJiMzEtNDkwZDgyZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:29.898513Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1107:2681] TxId: 281474976715680. Ctx: { TraceId: 01jyhwcgrm7wdgp14fkahvgrfp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGZiNWY2NDMtOGIyOTAzZGItYTUwYTJiMzEtNDkwZDgyZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:09:29.898560Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715681. Resolved key sets: 0 2025-06-24T21:09:29.898852Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715681. Ctx: { TraceId: 01jyhwcgrn77y0aca3nqt872aw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTcxMWYwOTEtNzA0YWExOGYtMWM2YWMzNGUtZGY4OTBmM2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:29.898889Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715681. Ctx: { TraceId: 01jyhwcgrn77y0aca3nqt872aw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTcxMWYwOTEtNzA0YWExOGYtMWM2YWMzNGUtZGY4OTBmM2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-24T21:09:29.898926Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1110:2686] TxId: 281474976715681. Ctx: { TraceId: 01jyhwcgrn77y0aca3nqt872aw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTcxMWYwOTEtNzA0YWExOGYtMWM2YWMzNGUtZGY4OTBmM2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:09:29.898979Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1110:2686] TxId: 281474976715681. Ctx: { TraceId: 01jyhwcgrn77y0aca3nqt872aw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTcxMWYwOTEtNzA0YWExOGYtMWM2YWMzNGUtZGY4OTBmM2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:29.899019Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1110:2686] TxId: 281474976715681. Ctx: { TraceId: 01jyhwcgrn77y0aca3nqt872aw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTcxMWYwOTEtNzA0YWExOGYtMWM2YWMzNGUtZGY4OTBmM2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 >> TestYmqHttpProxy::TestListQueueTags >> TTxDataShardMiniKQL::Write >> TTxDataShardMiniKQL::MemoryUsageImmediateSmallTx [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateMediumTx >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains_and_one_small_node [GOOD] >> TClockProCache::Touch [GOOD] >> TClockProCache::UpdateLimit [GOOD] >> TestKinesisHttpProxy::GoodRequestCreateStream >> TCompaction::OneMemtable [GOOD] >> TCompaction::ManyParts >> TTxDataShardMiniKQL::ReadConstant >> KqpNewEngine::JoinWithParams ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed-EvWrite [GOOD] Test command err: 2025-06-24T21:09:21.903576Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:21.903952Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:21.904103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002cd8/r3tmp/tmpnqf7e3/pdisk_1.dat 2025-06-24T21:09:22.299925Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:22.307918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:22.366473Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:22.371668Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799359112606 != 1750799359112610 2025-06-24T21:09:22.424472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:22.424600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:22.440361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:22.527185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:22.926781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:23.056041Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ===== UPSERT initial rows 2025-06-24T21:09:23.256784Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:780:2632], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:23.256908Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:790:2637], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:23.256995Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:23.262505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:23.422298Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:794:2640], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:09:23.497684Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:850:2677] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:23.820228Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwcb5pbsj729b0as3r92qv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFjMzZiMTktNjM5NDA4NWItNWZlYWNmMDYtNTk4NzdhYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:23.916791Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwcbrj9x3k6ekdm0g0g27w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmZjMWRkODctMzk1NWUyMzMtNTQ2ZjI5ZDMtNmZiOWM4MTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ===== Begin SELECT 2025-06-24T21:09:24.291929Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwcbvdbvpc0k5wfdd90rpy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjI0N2E3MmItOGJkZGYxNDctNDhiMjBiMWMtN2VjOTMxMTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 1 } } ===== UPSERT and commit ... waiting for commit read sets 2025-06-24T21:09:24.405925Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhwcc747ncgxr0tnec4hp3m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjI0N2E3MmItOGJkZGYxNDctNDhiMjBiMWMtN2VjOTMxMTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... captured readset ... captured readset ===== restarting tablet 2025-06-24T21:09:24.583388Z node 1 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1103: SelfId: [1:977:2718], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [1:914:2718]TEvDeliveryProblem was received from tablet: 72075186224037888 ===== Waiting for commit response ===== Last SELECT 2025-06-24T21:09:24.897507Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhwccka5begpag53e7q547e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yjk1YWNkMDctYTEyODY3OWUtM2E2YTRhZGUtMmE1YzkyZGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 3 } items { uint32_value: 2 } } 2025-06-24T21:09:28.291124Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:28.291559Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:28.291676Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002cd8/r3tmp/tmp0Ze1Qu/pdisk_1.dat 2025-06-24T21:09:28.547803Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:09:28.549098Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:28.575178Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:28.576357Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750799365360112 != 1750799365360116 2025-06-24T21:09:28.622237Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:28.622373Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:28.633896Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:28.718477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:28.988005Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:29.102221Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ===== UPSERT initial rows 2025-06-24T21:09:29.287313Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:779:2632], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:29.287423Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:789:2637], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:29.287535Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:29.293584Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:29.463679Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:793:2640], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:09:29.498859Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:849:2677] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:29.576337Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwch255xv9q1db69tqdj4k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmJjZTAwOWEtMzFiNjZkYTktOTA1YTJmOTAtNWI3YWIxNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:29.662768Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwchbw8z407ps0q936rxzm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTI5NTk4ODAtOWI3NzAwOGEtNWYzMWY0NDYtYmRiOWY0NWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ===== Begin SELECT 2025-06-24T21:09:30.001463Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwchejfrftk9w4htfz1nfv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDI3NGQ0YTgtZDJhZmU1YjEtNTU2ODM2MjktNWYzMjUwMTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 1 } } ===== UPSERT and commit ... waiting for commit read sets 2025-06-24T21:09:30.119943Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhwchs07vt4f5mxayhhbn97, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDI3NGQ0YTgtZDJhZmU1YjEtNTU2ODM2MjktNWYzMjUwMTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... captured readset ... captured readset ===== restarting tablet ===== Waiting for commit response ===== Last SELECT 2025-06-24T21:09:30.574296Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj5wa4rrqg0hbqxwftqj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmJiYThhZjUtOTg2ZmFmNzEtNDU1NjFkNjUtOGRkM2UxODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 3 } items { uint32_value: 2 } } >> KqpSort::PassLimit [GOOD] >> KqpSort::Offset >> KqpNotNullColumns::CreateIndexedTableWithDisabledNotNullDataColumns [GOOD] >> KqpNotNullColumns::Describe >> TGroupMapperTest::SimplestErasureNone [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains_and_one_small_node [GOOD] >> TTxDataShardMiniKQL::Write [GOOD] >> TTxDataShardMiniKQL::TableStats >> TTxDataShardMiniKQL::MemoryUsageImmediateMediumTx [GOOD] >> TTxDataShardMiniKQL::MemoryUsageMultiShard >> TCompaction::ManyParts [GOOD] >> TCompaction::BootAbort ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite [GOOD] Test command err: 2025-06-24T21:09:21.731405Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:21.731807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:21.732071Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ceb/r3tmp/tmpI0wKeX/pdisk_1.dat 2025-06-24T21:09:22.089387Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:22.092775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:22.140896Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:22.149108Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799358784201 != 1750799358784205 2025-06-24T21:09:22.197387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:22.197528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:22.209098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:22.298113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:22.345210Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:22.346375Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:22.346847Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:09:22.347092Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:22.395909Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:22.396689Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:22.396826Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:22.398685Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:22.398761Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:22.398825Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:22.399246Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:22.399403Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:22.399490Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:09:22.410298Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:22.445145Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:22.445320Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:22.445430Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:09:22.445474Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:22.445516Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:22.445550Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:22.445742Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:22.445800Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:22.446188Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:22.446300Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:22.446375Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:22.446414Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:22.446457Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:22.446496Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:22.446535Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:22.446568Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:22.446616Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:22.446751Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:22.446791Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:22.446833Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:09:22.447261Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:09:22.447318Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:22.447415Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:22.447684Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:22.447743Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:22.447853Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:22.447927Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:22.447967Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:22.448005Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:22.448041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:22.448333Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:22.448370Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:22.448407Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:22.448442Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:22.448502Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:09:22.448529Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:22.448563Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:09:22.448591Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:09:22.448619Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:22.450083Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:09:22.450135Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:09:22.460913Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:09:22.460988Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:22.461027Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:22.461086Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... s: 1750799370748 CreateTimeMs: 1750799370741 UpdateTimeMs: 1750799370749 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:30.750807Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:990:2787] 2025-06-24T21:09:30.750847Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:992:2789], CA [2:993:2790], CA [2:994:2791], CA [2:991:2788], CA [2:995:2792], 2025-06-24T21:09:30.750872Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 5 compute actor(s) and 0 datashard(s): CA [2:992:2789], CA [2:993:2790], CA [2:994:2791], CA [2:991:2788], CA [2:995:2792], 2025-06-24T21:09:30.751402Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:991:2788], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 626 DurationUs: 1000 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 391 FinishTimeMs: 1750799370749 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 346 BuildCpuTimeUs: 45 HostName: "ghrun-4pjfmvffsq" NodeId: 2 StartTimeMs: 1750799370748 CreateTimeMs: 1750799370741 UpdateTimeMs: 1750799370749 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:30.751493Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:991:2788] 2025-06-24T21:09:30.751554Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:992:2789], CA [2:993:2790], CA [2:994:2791], CA [2:995:2792], 2025-06-24T21:09:30.751602Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 4 compute actor(s) and 0 datashard(s): CA [2:992:2789], CA [2:993:2790], CA [2:994:2791], CA [2:995:2792], 2025-06-24T21:09:30.752138Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:992:2789], task: 4, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 417 DurationUs: 2000 Tasks { TaskId: 4 StageId: 3 CpuTimeUs: 223 FinishTimeMs: 1750799370750 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 197 BuildCpuTimeUs: 26 HostName: "ghrun-4pjfmvffsq" NodeId: 2 StartTimeMs: 1750799370748 CreateTimeMs: 1750799370741 UpdateTimeMs: 1750799370750 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:30.752215Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:992:2789] 2025-06-24T21:09:30.752273Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:993:2790], CA [2:994:2791], CA [2:995:2792], 2025-06-24T21:09:30.752318Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 3 compute actor(s) and 0 datashard(s): CA [2:993:2790], CA [2:994:2791], CA [2:995:2792], 2025-06-24T21:09:30.752542Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:993:2790], task: 5, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 594 DurationUs: 2000 Tasks { TaskId: 5 StageId: 4 CpuTimeUs: 311 FinishTimeMs: 1750799370751 InputRows: 2 InputBytes: 10 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 247 BuildCpuTimeUs: 64 HostName: "ghrun-4pjfmvffsq" NodeId: 2 StartTimeMs: 1750799370749 CreateTimeMs: 1750799370741 UpdateTimeMs: 1750799370751 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:30.752599Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:993:2790] 2025-06-24T21:09:30.752627Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:994:2791], CA [2:995:2792], 2025-06-24T21:09:30.752651Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [2:994:2791], CA [2:995:2792], 2025-06-24T21:09:30.752820Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:994:2791], task: 6, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 435 Tasks { TaskId: 6 StageId: 5 CpuTimeUs: 197 FinishTimeMs: 1750799370752 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 128 BuildCpuTimeUs: 69 HostName: "ghrun-4pjfmvffsq" NodeId: 2 CreateTimeMs: 1750799370742 UpdateTimeMs: 1750799370752 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:30.752858Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:994:2791] 2025-06-24T21:09:30.752888Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:995:2792], 2025-06-24T21:09:30.752917Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:995:2792], 2025-06-24T21:09:30.753127Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:995:2792], task: 7, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 500 DurationUs: 1000 Tasks { TaskId: 7 StageId: 6 CpuTimeUs: 188 FinishTimeMs: 1750799370752 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ResultRows: 2 ResultBytes: 7 ComputeCpuTimeUs: 127 BuildCpuTimeUs: 61 HostName: "ghrun-4pjfmvffsq" NodeId: 2 StartTimeMs: 1750799370751 CreateTimeMs: 1750799370742 UpdateTimeMs: 1750799370752 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:30.753172Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:995:2792] 2025-06-24T21:09:30.753368Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:30.753471Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:983:2769] TxId: 281474976715665. Ctx: { TraceId: 01jyhwcj3k92jxp2fy6n6zwpqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzIzNTI3Zi01NmVhNzZkYi1jNjQyZGUxNi03N2FiOWJkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.004070s ReadRows: 2 ReadBytes: 16 ru: 2 rate limiter was not found force flag: 1 { items { uint32_value: 3 } items { uint32_value: 2 } }, { items { uint32_value: 4 } items { uint32_value: 2 } } >> KqpNewEngine::Delete+UseSink [GOOD] >> KqpNewEngine::Delete-UseSink >> KqpSinkMvcc::OlapMultiSinks [GOOD] >> DataShardOutOfOrder::TestUnprotectedReadsThenWriteVisibility [GOOD] >> TTxDataShardMiniKQL::ReadConstant [GOOD] >> TTxDataShardMiniKQL::ReadAfterWrite |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::SimplestErasureNone [GOOD] >> TTxDataShardMiniKQL::ReadSpecialColumns >> TCompaction::BootAbort [GOOD] >> TCompaction::Defaults [GOOD] >> TCompaction::Merges [GOOD] >> TCompactionMulti::ManyParts >> TAsyncIndexTests::MergeMainWithReboots[PipeResets] >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 [GOOD] >> KqpNamedExpressions::NamedExpressionChanged+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionChanged-UseSink >> KqpAgg::AggWithSelfLookup [GOOD] >> KqpAgg::AggWithSelfLookup2 >> TestYmqHttpProxy::TestSendMessageBatch [GOOD] >> TTxDataShardMiniKQL::ReadAfterWrite [GOOD] >> TTxDataShardMiniKQL::ReadNonExisting >> KqpLocksTricky::TestNoLocksIssueInteractiveTx-withSink [GOOD] >> TTxDataShardMiniKQL::TableStats [GOOD] >> TTxDataShardMiniKQL::TableStatsHistograms >> TCompactionMulti::ManyParts [GOOD] >> TCompactionMulti::MainPageCollectionEdge >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestUnprotectedReadsThenWriteVisibility [GOOD] Test command err: 2025-06-24T21:09:28.085638Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:28.086216Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:28.086302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:28.086340Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:09:28.086659Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:28.086693Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002c77/r3tmp/tmpzb3enC/pdisk_1.dat 2025-06-24T21:09:28.451875Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:28.594699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:28.712960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:28.713104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:28.719727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:28.719846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:28.737768Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:09:28.738315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:28.738800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:29.017755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:29.101247Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [2:1176:2339], Recipient [2:1201:2351]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:29.105881Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [2:1176:2339], Recipient [2:1201:2351]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:29.106421Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1201:2351] 2025-06-24T21:09:29.106622Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:29.140926Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [2:1176:2339], Recipient [2:1201:2351]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:29.144861Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:29.145201Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:29.146616Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:29.146680Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:29.146721Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:29.147013Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:29.147588Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:29.147659Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1225:2351] in generation 1 2025-06-24T21:09:29.150557Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:29.173138Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:29.173299Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:29.173389Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1229:2368] 2025-06-24T21:09:29.173428Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:29.173471Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:29.173514Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:29.173745Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1201:2351], Recipient [2:1201:2351]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:29.173798Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:29.174130Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:29.174233Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:29.174283Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:29.174318Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:29.174370Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:29.174413Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:29.174449Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:29.174486Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:29.174533Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:29.229660Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1233:2369], Recipient [2:1201:2351]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:29.229736Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:29.229830Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1184:2732], serverId# [2:1233:2369], sessionId# [0:0:0] 2025-06-24T21:09:29.230330Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:759:2426], Recipient [2:1233:2369] 2025-06-24T21:09:29.230385Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:29.230546Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:29.230816Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:29.230873Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:29.230982Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:29.231043Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:29.231100Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:29.231177Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:29.231218Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:29.231545Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:29.231606Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:29.231652Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:29.231692Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:29.231758Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:09:29.231789Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:29.231835Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:09:29.231868Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:09:29.231911Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:29.236435Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152 ... } } 2025-06-24T21:09:31.836300Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhwckem4y3t0k4d9bt3jr0m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MThlMDJmYzYtNjQyM2Q5Y2MtYzkwNjIyOTctOWQ0NGRjOTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:31.838519Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:1640:2410], Recipient [2:1201:2351]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2000 TxId: 18446744073709551615 } LockTxId: 281474976715662 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-24T21:09:31.838676Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:09:31.838776Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-24T21:09:31.838844Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:09:31.838876Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:09:31.838917Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:31.838952Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:09:31.839004Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-24T21:09:31.839044Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:09:31.839065Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:31.839083Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:09:31.839099Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:09:31.839221Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2000 TxId: 18446744073709551615 } LockTxId: 281474976715662 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-24T21:09:31.839429Z node 2 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715662, counter# 18446744073709551612 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:09:31.839470Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-24T21:09:31.839511Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:1640:2410], 0} after executionsCount# 1 2025-06-24T21:09:31.839552Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:1640:2410], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:09:31.839608Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:1640:2410], 0} finished in read 2025-06-24T21:09:31.839654Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:09:31.839675Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:09:31.839696Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:09:31.839715Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:09:31.839754Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-24T21:09:31.839770Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:09:31.839794Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-24T21:09:31.839821Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:09:31.839896Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:09:31.840632Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:1640:2410], Recipient [2:1201:2351]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:09:31.840683Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } } 2025-06-24T21:09:32.003670Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhwckjd19r2sbdp3a8gdnq5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2ZlMzgzNmQtYzMxNTNjYjYtOGMyMDViYS1hZjQwOGNhMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:32.006308Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:1664:2411], Recipient [2:1201:2351]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } LockTxId: 281474976715666 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-24T21:09:32.006569Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:09:32.006670Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CheckRead 2025-06-24T21:09:32.006763Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-24T21:09:32.006808Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:09:32.006848Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:32.006885Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:09:32.006938Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:8] at 72075186224037888 2025-06-24T21:09:32.006985Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-24T21:09:32.007012Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:32.007039Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:09:32.007063Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:09:32.008086Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } LockTxId: 281474976715666 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-24T21:09:32.008414Z node 2 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715666, counter# 1 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:09:32.008475Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2500/18446744073709551615 2025-06-24T21:09:32.009269Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:1664:2411], 0} after executionsCount# 1 2025-06-24T21:09:32.009363Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:1664:2411], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:09:32.009475Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:1664:2411], 0} finished in read 2025-06-24T21:09:32.009583Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-24T21:09:32.009620Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:09:32.009649Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:09:32.009681Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:09:32.009731Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-24T21:09:32.009753Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:09:32.009779Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 72075186224037888 has finished 2025-06-24T21:09:32.009820Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:09:32.009925Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:09:32.011406Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:1664:2411], Recipient [2:1201:2351]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:09:32.011472Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-24T21:09:32.012519Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [2:243:2131], Recipient [2:1201:2351]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715666 LockNode: 1 Status: STATUS_SUBSCRIBED { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } >> TTxDataShardMiniKQL::ReadSpecialColumns [GOOD] >> TTxDataShardMiniKQL::SelectRange >> KqpReturning::ReturningWorksIndexedUpsert+QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OlapMultiSinks [GOOD] Test command err: Trying to start YDB, gRPC: 13017, MsgBus: 28072 2025-06-24T21:08:38.709451Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625812870912979:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.710142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003249/r3tmp/tmprUvE58/pdisk_1.dat 2025-06-24T21:08:39.276218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.276323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.283807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:39.291703Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.291821Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625812870912961:2079] 1750799318708298 != 1750799318708301 TServer::EnableGrpc on GrpcPort 13017, node 1 2025-06-24T21:08:39.411750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.411773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.411780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.411910Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.726875Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28072 TClient is connected to server localhost:28072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.239990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:42.082407Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625830050782796:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.082419Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625830050782790:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.082542Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:42.087114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:42.104519Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625830050782804:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:42.179721Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625830050782855:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:42.477469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:08:42.644039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:42.644237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:42.644516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:42.644655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:42.644763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:42.644879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:42.644899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625830050782993:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:42.644939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625830050782993:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:42.645010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:42.645106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625830050782993:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:42.645115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:42.645192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:08:42.645199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625830050782993:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:42.645354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:08:42.645488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625830050782995:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:08:42.645522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625830050782993:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:42.645645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625830050782993:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:42.645741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625830050782993:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:42.645847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625830050782993:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:42.645949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625830050782993:2304];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:08:42.646139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519625830050782993:2304];tablet_id=72075186224037888;process=TTxInitSchema:: ... 37892,72075186224037897,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037969; 2025-06-24T21:09:29.719057Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=40;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037969; 2025-06-24T21:09:29.719233Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037892; 2025-06-24T21:09:29.719310Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037892; 2025-06-24T21:09:29.719381Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037892; 2025-06-24T21:09:29.719444Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037892; 2025-06-24T21:09:29.719502Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:29.719530Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037892; 2025-06-24T21:09:29.719589Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=47;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037892; 2025-06-24T21:09:29.719656Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037892; 2025-06-24T21:09:29.719817Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-24T21:09:29.719896Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-24T21:09:29.719976Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-24T21:09:29.720045Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=53;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-24T21:09:29.720125Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=54;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-24T21:09:29.720207Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=55;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-24T21:09:29.720282Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=56;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-24T21:09:29.720558Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=58;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037996;receive=72075186224037993; 2025-06-24T21:09:29.720610Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:29.720627Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=59;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037996;receive=72075186224037993; 2025-06-24T21:09:29.720687Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=60;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037996;receive=72075186224037993; 2025-06-24T21:09:29.720767Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=61;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037996;receive=72075186224037993; 2025-06-24T21:09:29.720829Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=62;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037996;receive=72075186224037993; 2025-06-24T21:09:29.720883Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=63;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037996;receive=72075186224037993; 2025-06-24T21:09:29.720943Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=64;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037996;receive=72075186224037993; 2025-06-24T21:09:29.721248Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037996; 2025-06-24T21:09:29.721311Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037996; 2025-06-24T21:09:29.721374Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037996; 2025-06-24T21:09:29.721436Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037996; 2025-06-24T21:09:29.721492Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037996; 2025-06-24T21:09:29.721566Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037996; 2025-06-24T21:09:29.721650Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625993537708611:2430];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037996; 2025-06-24T21:09:29.722852Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:09:29.945613Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; >> TTxDataShardMiniKQL::ReadNonExisting [GOOD] >> TTxDataShardMiniKQL::MemoryUsageMultiShard [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes >> TTxDataShardMiniKQL::SelectRange [GOOD] >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 [GOOD] Test command err: Trying to start YDB, gRPC: 28740, MsgBus: 26529 2025-06-24T21:08:38.705752Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625814874370405:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:38.712131Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00328a/r3tmp/tmpXG9foQ/pdisk_1.dat 2025-06-24T21:08:39.147879Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:39.148263Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625814874370384:2079] 1750799318702366 != 1750799318702369 2025-06-24T21:08:39.171234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:39.171363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:39.172957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28740, node 1 2025-06-24T21:08:39.346080Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:39.346113Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:39.346121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:39.346289Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:39.736528Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26529 TClient is connected to server localhost:26529 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:40.133113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:40.163673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:08:41.844206Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625827759272926:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.844232Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625827759272915:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.844365Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:41.848226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:41.857778Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625827759272929:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:08:41.940865Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625827759272980:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:42.445198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:08:42.645931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:42.646177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:42.646438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:42.646568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:42.646696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:42.646816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:42.646918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:42.647025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:42.647175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:08:42.647304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:08:42.647399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519625832054240451:2304];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:08:42.652879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625832054240456:2309];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:08:42.652992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625832054240456:2309];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:08:42.653162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625832054240456:2309];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:08:42.653278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625832054240456:2309];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:08:42.653389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625832054240456:2309];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:08:42.653498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625832054240456:2309];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:08:42.653610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625832054240456:2309];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:08:42.653724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625832054240456:2309];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:08:42.653820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519625832054240456:2309];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... tablet_id=72075186224037932;self_id=[3:7519625994334376086:2473];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037932;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661298Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037965;self_id=[3:7519625994334375793:2444];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037965;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661365Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037966;self_id=[3:7519625994334375632:2426];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037966;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661432Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[3:7519625994334375510:2403];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661498Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037984;self_id=[3:7519625994334375506:2401];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037984;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661561Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037985;self_id=[3:7519625994334375529:2407];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037985;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661627Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037986;self_id=[3:7519625994334375513:2404];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037986;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661695Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037971;self_id=[3:7519625994334375658:2428];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037971;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661759Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037972;self_id=[3:7519625994334375492:2391];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037972;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661817Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037973;self_id=[3:7519625994334375507:2402];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037973;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661891Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037974;self_id=[3:7519625994334375579:2409];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037974;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.661972Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037975;self_id=[3:7519625994334375493:2392];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037975;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662034Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037976;self_id=[3:7519625994334375498:2396];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037976;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662115Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037961;self_id=[3:7519625994334375662:2430];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037961;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662176Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037962;self_id=[3:7519625994334375402:2384];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037962;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662253Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037963;self_id=[3:7519625994334375856:2458];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037963;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662320Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037948;self_id=[3:7519625994334375902:2461];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037948;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662382Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519625994334375497:2395];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037981;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662449Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037982;self_id=[3:7519625994334375505:2400];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037982;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662510Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037995;self_id=[3:7519625994334375441:2389];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037995;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662571Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[3:7519625994334375438:2386];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037996;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662638Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037997;self_id=[3:7519625994334375437:2385];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037997;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662705Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037979;self_id=[3:7519625994334375494:2393];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037979;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662771Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037987;self_id=[3:7519625994334375495:2394];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037987;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662838Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037988;self_id=[3:7519625994334375516:2405];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037988;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662911Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037989;self_id=[3:7519625994334375580:2410];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037989;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.662981Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037990;self_id=[3:7519625994334375500:2397];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037990;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.663050Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037991;self_id=[3:7519625994334375400:2383];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037991;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.663119Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037992;self_id=[3:7519625994334375439:2387];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037992;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.663210Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037977;self_id=[3:7519625994334375628:2423];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037977;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.663278Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037978;self_id=[3:7519625994334375504:2399];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037978;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.663343Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037964;self_id=[3:7519625994334375780:2439];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037964;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.663405Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037980;self_id=[3:7519625994334375501:2398];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037980;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.663489Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[3:7519625994334375440:2388];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037993;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:09:31.663568Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037994;self_id=[3:7519625994334375443:2390];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037994;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::ReadNonExisting [GOOD] Test command err: 2025-06-24T21:09:32.540395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:32.540466Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:32.544099Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:32.555072Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:32.555644Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:32.555945Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:32.599044Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:32.607354Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:32.607582Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:32.609462Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:32.609549Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:32.609604Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:32.610009Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:32.610119Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:32.610189Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:32.692444Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:32.734683Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:32.734937Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:32.735050Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:32.735089Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:32.735131Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:32.735185Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:32.735376Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:32.735429Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:32.735703Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:32.735799Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:32.735956Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:32.736007Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:32.736057Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:32.736095Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:32.736132Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:32.736168Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:32.736207Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:32.736311Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:32.736371Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:32.736441Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:32.739893Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:32.739968Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:32.740071Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:32.740251Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:32.740302Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:32.740362Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:32.740414Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:32.740454Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:32.740488Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:32.740521Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:32.740870Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:32.740907Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:32.740941Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:32.740990Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:32.741037Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:32.741073Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:32.741119Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:32.741152Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:32.741182Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:32.753517Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:32.753621Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:32.753664Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:32.753710Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:32.753802Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:32.754355Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:32.754417Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:32.754464Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:32.754609Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:32.754647Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:32.754783Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:32.754829Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:32.754869Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:32.754908Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:32.758858Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:32.758929Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:32.759178Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:32.759236Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:32.759295Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:32.759337Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:32.759374Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:32.759414Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:32.759462Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... oordinators count is 1 buckets per mediator 2 2025-06-24T21:09:34.492490Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [3:286:2268] 2025-06-24T21:09:34.492542Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:34.492602Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 9437184 2025-06-24T21:09:34.492656Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:34.492844Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-24T21:09:34.492963Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-24T21:09:34.493286Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [3:238:2229], Recipient [3:238:2229]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:34.493349Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:34.493673Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:34.493783Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:34.493953Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5603: Got TEvDataShard::TEvSchemaChanged for unknown txId 1 message# Source { RawX1: 238 RawX2: 12884904117 } Origin: 9437184 State: 2 TxId: 1 Step: 0 Generation: 3 2025-06-24T21:09:34.494082Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [3:24:2071], Recipient [3:238:2229]: {TEvRegisterTabletResult TabletId# 9437184 Entry# 0} 2025-06-24T21:09:34.494129Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-24T21:09:34.494196Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 9437184 time 0 2025-06-24T21:09:34.494263Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:34.494373Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [3:24:2071], Recipient [3:238:2229]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 0 ReadStep# 0 } 2025-06-24T21:09:34.494413Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-24T21:09:34.494462Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 9437184 coordinator 72057594046316545 last step 0 next step 0 2025-06-24T21:09:34.494544Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:34.494593Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:34.494639Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:34.494689Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:34.494729Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:34.494784Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:34.494848Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:34.494964Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:284:2266], Recipient [3:238:2229]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 4200 Status: OK ServerId: [3:288:2270] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:09:34.495002Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:09:34.495089Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [3:126:2150], Recipient [3:238:2229]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 1 2025-06-24T21:09:34.495130Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-24T21:09:34.495206Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 1 datashard 9437184 state Ready 2025-06-24T21:09:34.495280Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 9437184 Got TEvSchemaChangedResult from SS at 9437184 2025-06-24T21:09:34.510649Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:284:2266], Recipient [3:238:2229]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 4200 ClientId: [3:284:2266] ServerId: [3:288:2270] } 2025-06-24T21:09:34.510721Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:09:34.555920Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [3:102:2135], Recipient [3:238:2229]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 102 RawX2: 12884904023 } 2025-06-24T21:09:34.556000Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-24T21:09:34.556364Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:294:2274], Recipient [3:238:2229]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:34.556408Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:34.556463Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:292:2273], serverId# [3:294:2274], sessionId# [0:0:0] 2025-06-24T21:09:34.556694Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [3:102:2135], Recipient [3:238:2229]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 102 RawX2: 12884904023 } TxBody: "\032\365\001\037\004\0021\nvalue\005\205\n\205\002\207\205\002\207\203\001H\006\002\205\004\205\002?\006\002\205\000\034MyReads MyWrites\205\004\205\002?\006\002\206\202\024Reply\024Write?\014\205\002\206\203\010\002 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\010)\211\n?\006\203\005\004\200\205\002\203\004\006\213\002\203\004\203\004$SelectRow\000\003?\036 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000?\004\005?\"\003? p\001\013?&\003?$T\001\003?(\000\037\002\000\005?\016\005?\n?8\000\005?\014\003\005?\024\005?\020?8\000\006\000?\022\003?>\005?\032\006\000?\030\001\037/ \0018\001" TxId: 2 ExecLevel: 0 Flags: 0 2025-06-24T21:09:34.556734Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:34.556836Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:34.557629Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CheckDataTx 2025-06-24T21:09:34.557732Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-24T21:09:34.557775Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CheckDataTx 2025-06-24T21:09:34.557812Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:34.557850Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit BuildAndWaitDependencies 2025-06-24T21:09:34.557890Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T21:09:34.557952Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 9437184 2025-06-24T21:09:34.557992Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-24T21:09:34.558015Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:34.558038Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit ExecuteDataTx 2025-06-24T21:09:34.558093Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-24T21:09:34.558525Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:2] at tablet 9437184 with status COMPLETE 2025-06-24T21:09:34.558601Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:2] at 9437184: {NSelectRow: 1, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:09:34.558688Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-24T21:09:34.558716Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-24T21:09:34.558743Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:34.558769Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-24T21:09:34.558811Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-24T21:09:34.558883Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayComplete 2025-06-24T21:09:34.558937Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:34.558979Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-24T21:09:34.559015Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-24T21:09:34.559058Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-24T21:09:34.559082Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-24T21:09:34.559108Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-24T21:09:34.559193Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:34.559233Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-24T21:09:34.559278Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> TKeyValueTracingTest::ReadSmall ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpLocksTricky::TestNoLocksIssueInteractiveTx-withSink [GOOD] Test command err: Trying to start YDB, gRPC: 28669, MsgBus: 63564 2025-06-24T21:08:56.306739Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:08:56.307049Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:08:56.307223Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031b1/r3tmp/tmp4XuFmv/pdisk_1.dat 2025-06-24T21:08:56.624975Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 28669, node 1 2025-06-24T21:08:56.793904Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:56.801414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:56.801483Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:56.801516Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:56.801760Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:56.802030Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799333247589 != 1750799333247593 2025-06-24T21:08:56.855332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:56.855494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:56.867525Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63564 TClient is connected to server localhost:63564 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:57.197993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:57.308522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:57.471474Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:08:57.728054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:58.179442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:58.538654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:59.448995Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1685:3280], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:59.449364Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:59.489062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:59.715935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.029624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.360893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.639786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:00.997150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.308130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.692610Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2360:3778], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.692744Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.693029Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2365:3783], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.702582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:01.856619Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2367:3785], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:09:01.932837Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2422:3821] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:03.304952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:03.567108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ES ... sor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031b1/r3tmp/tmpQk9jv5/pdisk_1.dat 2025-06-24T21:09:24.214826Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 TServer::EnableGrpc on GrpcPort 19171, node 3 2025-06-24T21:09:24.356372Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:24.357664Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:24.357735Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:24.357813Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:24.358330Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:09:24.358676Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:32:2079] 1750799360023651 != 1750799360023655 2025-06-24T21:09:24.407527Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:24.407721Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:24.420051Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21733 TClient is connected to server localhost:21733 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:24.869224Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:24.931005Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:25.069725Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:25.273827Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:25.718342Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:26.054205Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:26.620488Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1680:3275], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:26.620762Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:26.649421Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:26.883785Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:27.137775Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:27.434654Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:27.707879Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:28.062121Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:28.350633Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:28.743034Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:2355:3775], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:28.743180Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:28.743533Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:2360:3780], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:28.750424Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:28.893841Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:2362:3782], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:09:28.962401Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:2420:3821] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:30.429098Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:30.637570Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:30.994585Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TKeyValueTracingTest::WriteHuge ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::MemoryUsageMultiShard [GOOD] Test command err: 2025-06-24T21:09:30.960531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:30.960585Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:30.967595Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:30.980857Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:30.981374Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:30.982301Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:31.038937Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:31.056740Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:31.057035Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:31.059985Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:31.060081Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:31.060209Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:31.062124Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:31.062276Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:31.062366Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:31.145932Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:31.193707Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:31.193955Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:31.194070Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:31.194127Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:31.194169Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:31.194225Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:31.194391Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.194438Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.194742Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:31.194847Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:31.195027Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:31.195075Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:31.195162Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:31.195205Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:31.195247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:31.195282Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:31.195333Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:31.195474Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.195525Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.195580Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:31.198670Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:31.198764Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:31.198862Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:31.199065Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:31.199117Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:31.199206Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:31.199261Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:31.199310Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:31.199350Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:31.199384Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:31.199691Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:31.199728Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:31.199764Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:31.199814Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:31.199856Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:31.199889Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:31.199932Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:31.199969Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:31.200001Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:31.213182Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:31.213269Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:31.213312Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:31.213359Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:31.213449Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:31.213956Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.214008Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.214048Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:31.214219Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:31.214257Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:31.214380Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:31.214425Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-24T21:09:31.214488Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:31.214538Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:31.223774Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:31.223867Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:31.224145Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.224193Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.224255Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:31.224312Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:31.224355Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:31.224395Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2:1] in PlanQueue unit at 9437184 2025-06-24T21:09:31.224430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2:1] at 9437184 on unit PlanQueue 2025-06-24T21:09:31. ... -24T21:09:34.415048Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:34.415225Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} hope 5 -> done Change{16, redo 636b alter 0b annex 0, ~{ 1001, 1, 3, 4, 12, 7, 8, 5 } -{ }, 0 gb} 2025-06-24T21:09:34.415316Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} release Res{3 96990534b}, Memory{0 dyn 0} 2025-06-24T21:09:34.415545Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437184 (3 by [3:260:2229]) (release resources {0, 96990534}) 2025-06-24T21:09:34.415614Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_transaction from 18.818640 to 1.881864 (remove task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437184 (3 by [3:260:2229])) 2025-06-24T21:09:34.415769Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-24T21:09:34.415811Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit ExecuteDataTx 2025-06-24T21:09:34.417103Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 5 at 9437185 restored its data 2025-06-24T21:09:34.418033Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [6:5] at 9437185 exceeded memory limit 10776726 and requests 86213808 more for the next try 2025-06-24T21:09:34.418186Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 5 released its data 2025-06-24T21:09:34.418244Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is Restart 2025-06-24T21:09:34.418275Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:09:34.418304Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437185 2025-06-24T21:09:34.418364Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:52: TPlanQueueUnit at 9437185 out-of-order limits exceeded 2025-06-24T21:09:34.418409Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437185 2025-06-24T21:09:34.418486Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} hope 4 -> retry Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:09:34.418532Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} touch new 0b, 0b lo load (0b in total), 86213808b requested for data (96990534b in total) 2025-06-24T21:09:34.418580Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} release tx data 2025-06-24T21:09:34.418620Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} released on update Res{3 10776726b}, Memory{0 dyn 0} 2025-06-24T21:09:34.418664Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} update Res{3 96990534b} type transaction 2025-06-24T21:09:34.418832Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:371:2314]) (priority=5 type=transaction resources={0, 96990534} resubmit=1) 2025-06-24T21:09:34.418884Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:371:2314]) to queue queue_transaction 2025-06-24T21:09:34.418948Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 96990534} for task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:371:2314]) from queue queue_transaction 2025-06-24T21:09:34.418997Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:371:2314]) to queue queue_transaction 2025-06-24T21:09:34.419049Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_transaction from 0.000000 to 15.243099 (insert task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:371:2314])) 2025-06-24T21:09:34.419139Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} acquired dyn mem Res{3 96990534b}, Memory{0 dyn 96990534} 2025-06-24T21:09:34.419220Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-24T21:09:34.419259Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit ExecuteDataTx 2025-06-24T21:09:34.420095Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 5 at 9437185 restored its data 2025-06-24T21:09:34.653475Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [6:5] at tablet 9437185 with status COMPLETE 2025-06-24T21:09:34.653543Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [6:5] at 9437185: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 2, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 22, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:09:34.653595Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is ExecutedNoMoreRestarts 2025-06-24T21:09:34.653621Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit ExecuteDataTx 2025-06-24T21:09:34.653643Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437185 to execution unit CompleteOperation 2025-06-24T21:09:34.653664Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit CompleteOperation 2025-06-24T21:09:34.653819Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is DelayComplete 2025-06-24T21:09:34.653837Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit CompleteOperation 2025-06-24T21:09:34.653871Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437185 to execution unit CompletedOperations 2025-06-24T21:09:34.653892Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit CompletedOperations 2025-06-24T21:09:34.653913Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is Executed 2025-06-24T21:09:34.653926Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit CompletedOperations 2025-06-24T21:09:34.653943Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [6:5] at 9437185 has finished 2025-06-24T21:09:34.653963Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:34.653978Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-24T21:09:34.653995Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-24T21:09:34.654017Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-24T21:09:34.654095Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} hope 5 -> done Change{16, redo 636b alter 0b annex 0, ~{ 1001, 1, 3, 4, 12, 7, 8, 5 } -{ }, 0 gb} 2025-06-24T21:09:34.654134Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} release Res{3 96990534b}, Memory{0 dyn 0} 2025-06-24T21:09:34.654275Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:371:2314]) (release resources {0, 96990534}) 2025-06-24T21:09:34.654312Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_transaction from 15.243099 to 0.000000 (remove task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:371:2314])) 2025-06-24T21:09:34.666830Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:10} commited cookie 1 for step 9 2025-06-24T21:09:34.666907Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-24T21:09:34.666950Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:5] at 9437185 on unit CompleteOperation 2025-06-24T21:09:34.667014Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 5] from 9437185 at tablet 9437185 send result to client [3:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:34.667120Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437185 {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 2} 2025-06-24T21:09:34.667187Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-24T21:09:34.667453Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:10} commited cookie 1 for step 9 2025-06-24T21:09:34.667516Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:34.667548Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:5] at 9437184 on unit CompleteOperation 2025-06-24T21:09:34.667590Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 5] from 9437184 at tablet 9437184 send result to client [3:102:2135], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:09:34.667637Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-24T21:09:34.667663Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:34.667890Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [3:349:2314], Recipient [3:458:2397]: {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 2} 2025-06-24T21:09:34.667945Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:34.668014Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437185 consumer 9437185 txId 5 2025-06-24T21:09:34.668114Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [3:238:2229], Recipient [3:458:2397]: {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-24T21:09:34.668140Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:09:34.668162Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437184 consumer 9437184 txId 5 >> TKeyValueTracingTest::WriteSmall >> TestKinesisHttpProxy::TestWrongRequest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 17413, MsgBus: 18840 2025-06-24T21:07:50.639693Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625606644658710:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:50.639763Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045d1/r3tmp/tmpXtmmTI/pdisk_1.dat 2025-06-24T21:07:50.901174Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625606644658690:2079] 1750799270638053 != 1750799270638056 2025-06-24T21:07:50.935565Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17413, node 1 2025-06-24T21:07:51.022942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:51.022969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:51.022981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:51.023137Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:07:51.041286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:51.041390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:51.043401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18840 TClient is connected to server localhost:18840 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:51.572151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:51.588804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:07:51.600565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.659800Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:51.764147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.927231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:52.010361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:54.003675Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625623824529514:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:54.003822Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:54.302770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:54.336264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:54.365872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:54.398203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:54.429015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:54.471859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:54.542545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:54.632094Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625623824530177:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:54.632193Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:54.632370Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625623824530182:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:54.636155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:54.656120Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625623824530184:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:54.710728Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625623824530237:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:55.639649Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625606644658710:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:55.639723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:55.664361Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625628119497806:3594], Recipient [1:7519625606644659044:2161]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:55.664410Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... onseTime 2025-06-24T21:09:31.168263Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625893753957547:2503], Recipient [3:7519625859394216592:2149]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037926 TableLocalId: 21 Generation: 1 Round: 2 TableStats { DataSize: 107 RowCount: 3 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799369641 LastUpdateTime: 1750799336398 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 11 PartCount: 1 RangeReadRows: 11 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 107 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 390 Memory: 119579 Storage: 228 } ShardState: 2 UserTablePartOwners: 72075186224037926 NodeId: 3 StartTime: 1750799336138 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:31.168315Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:31.168355Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037926 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 21] state 'Ready' dataSize 107 rowCount 3 cpuUsage 0.039 2025-06-24T21:09:31.168459Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037926 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 21] raw table stats: DataSize: 107 RowCount: 3 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799369641 LastUpdateTime: 1750799336398 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 11 PartCount: 1 RangeReadRows: 11 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 107 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:31.168486Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099995s, queue# 1 2025-06-24T21:09:31.268762Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625859394216592:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:31.268808Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:31.268819Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-24T21:09:31.268863Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-24T21:09:31.268881Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-24T21:09:31.268934Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 21 shard idx 72057594046644480:40 data size 107 row count 3 2025-06-24T21:09:31.268999Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037926 maps to shardIdx: 72057594046644480:40 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 21], pathId map=indexImplPrefixTable, is column=0, is olap=0, RowCount 3, DataSize 107 2025-06-24T21:09:31.269010Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037926, followerId 0 2025-06-24T21:09:31.269067Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:40 with partCount# 1, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:31.269112Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037926 2025-06-24T21:09:31.269166Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:31.270065Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625859394216592:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:31.270081Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:31.270089Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:09:31.780041Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625859394216592:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:31.780104Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:31.780157Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625859394216592:2149], Recipient [3:7519625859394216592:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:31.780180Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:32.783373Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625859394216592:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:32.783431Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:32.783492Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625859394216592:2149], Recipient [3:7519625859394216592:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:32.783516Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:33.198185Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625880869054483:2378], Recipient [3:7519625859394216592:2149]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037911 TableLocalId: 6 Generation: 1 Round: 3 TableStats { DataSize: 656 RowCount: 2 IndexSize: 0 InMemSize: 656 LastAccessTime: 1750799335360 LastUpdateTime: 1750799335360 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 2 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 146 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037911 NodeId: 3 StartTime: 1750799333164 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:33.198252Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:33.198298Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037911 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 6] state 'Ready' dataSize 656 rowCount 2 cpuUsage 0.0146 2025-06-24T21:09:33.198420Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037911 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 6] raw table stats: DataSize: 656 RowCount: 2 IndexSize: 0 InMemSize: 656 LastAccessTime: 1750799335360 LastUpdateTime: 1750799335360 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 2 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:33.198450Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099994s, queue# 1 2025-06-24T21:09:33.298630Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625859394216592:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:33.298688Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:33.298703Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-24T21:09:33.298763Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-24T21:09:33.298786Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-24T21:09:33.298853Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 6 shard idx 72057594046644480:24 data size 656 row count 2 2025-06-24T21:09:33.298920Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037911 maps to shardIdx: 72057594046644480:24 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 6], pathId map=KeyValue, is column=0, is olap=0, RowCount 2, DataSize 656 2025-06-24T21:09:33.298936Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037911, followerId 0 2025-06-24T21:09:33.298994Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:24 with partCount# 0, rowCount# 2, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:33.299063Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037911 2025-06-24T21:09:33.299166Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:33.299287Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625859394216592:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:33.299312Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:33.299321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> TGroupMapperTest::CheckNotToBreakFailModel [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestSendMessageBatch [GOOD] Test command err: 2025-06-24T21:08:36.547529Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625808163959080:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:36.548241Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004471/r3tmp/tmpDNu4FU/pdisk_1.dat 2025-06-24T21:08:37.050509Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625808163959052:2079] 1750799316543597 != 1750799316543600 2025-06-24T21:08:37.059130Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:37.068629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:37.068861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 27634, node 1 2025-06-24T21:08:37.071245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:37.211390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:37.211412Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:37.211420Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:37.211527Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:37.558049Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12564 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:37.767359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:37.785599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:12564 2025-06-24T21:08:37.994144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:08:38.004179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:08:38.006289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-24T21:08:38.025511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.227330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.267418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-24T21:08:38.272243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.325847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-24T21:08:38.332296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.371650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.411627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.488095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.521429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.564612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.594910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:39.816989Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625821048862319:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.817130Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.827371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625821048862331:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.834731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:39.847849Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625821048862333:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-24T21:08:39.937917Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625821048862384:2865] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:40.591668Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyhwb0r42abqhr3zva8taqp2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGU4ODExMmEtODY3MzdkNy03NDBkYzI2Yy1hMzBiM2ZmMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default ... r { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "b18ec19d-c6225805-d3f7d32f-d5e3af71" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true } 2025-06-24T21:09:33.843719Z node 7 :SQS DEBUG: queue_leader.cpp:384: Request SendMessageBatch working duration: 112ms 2025-06-24T21:09:33.843825Z node 7 :SQS TRACE: service.cpp:1483: Dec local leader ref for actor [7:7519626049533481171:3507]. Found: 1 2025-06-24T21:09:33.843881Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse SendMessageBatch { RequestId: "b18ec19d-c6225805-d3f7d32f-d5e3af71" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "fd5ff90-7bb497de-6eb3c524-2fa4ae00" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "3064a75a-1006abe4-b92f0b05-e9009c16" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "b18ec19d-c6225805-d3f7d32f-d5e3af71" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true 2025-06-24T21:09:33.844021Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7519626049533481167:2460]: SendMessageBatch { RequestId: "b18ec19d-c6225805-d3f7d32f-d5e3af71" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "fd5ff90-7bb497de-6eb3c524-2fa4ae00" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "3064a75a-1006abe4-b92f0b05-e9009c16" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "b18ec19d-c6225805-d3f7d32f-d5e3af71" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true 2025-06-24T21:09:33.844293Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [b18ec19d-c6225805-d3f7d32f-d5e3af71] HandleResponse: { SendMessageBatch { RequestId: "b18ec19d-c6225805-d3f7d32f-d5e3af71" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "fd5ff90-7bb497de-6eb3c524-2fa4ae00" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "3064a75a-1006abe4-b92f0b05-e9009c16" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "b18ec19d-c6225805-d3f7d32f-d5e3af71" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true }, status: OK 2025-06-24T21:09:33.844468Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [b18ec19d-c6225805-d3f7d32f-d5e3af71] Sending reply from proxy actor: { SendMessageBatch { RequestId: "b18ec19d-c6225805-d3f7d32f-d5e3af71" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "fd5ff90-7bb497de-6eb3c524-2fa4ae00" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "3064a75a-1006abe4-b92f0b05-e9009c16" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "b18ec19d-c6225805-d3f7d32f-d5e3af71" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true } 2025-06-24T21:09:33.844896Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [SendMessageBatch] requestId [b18ec19d-c6225805-d3f7d32f-d5e3af71] Got succesfult GRPC response. 2025-06-24T21:09:33.845188Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [SendMessageBatch] requestId [b18ec19d-c6225805-d3f7d32f-d5e3af71] reply ok 2025-06-24T21:09:33.845363Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [SendMessageBatch] requestId [b18ec19d-c6225805-d3f7d32f-d5e3af71] Send metering event. HttpStatusCode: 200 IsFifo: 1 FolderId: folder4 RequestSizeInBytes: 1063 ResponseSizeInBytes: 643 SourceAddress: 385f:1201:6050:0:205f:1201:6050:0 ResourceId: 000000000000000101v0 Action: SendMessageBatch 2025-06-24T21:09:33.845598Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:52526) <- (200 , 464 bytes) 2025-06-24T21:09:33.845742Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:52526) connection closed Http output full {"Successful":[{"SequenceNumber":"1","Id":"Id-0","MD5OfMessageBody":"94a29778a1f1f41bf68142847b2e6106","MD5OfMessageAttributes":"3d778967e1fa431d626ffb890c486385","MessageId":"fd5ff90-7bb497de-6eb3c524-2fa4ae00"},{"SequenceNumber":"2","Id":"Id-1","MD5OfMessageBody":"3bf7e6d806a0b8062135ae945eca30bf","MessageId":"3064a75a-1006abe4-b92f0b05-e9009c16"}],"Failed":[{"Message":"No MessageGroupId parameter.","Id":"Id-2","Code":"MissingParameter","SenderFault":true}]} 2025-06-24T21:09:33.845984Z node 7 :SQS TRACE: executor.cpp:256: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Compile program response: { Status: 48 MiniKQLCompileResults { CompiledProgram: "\037\016\nFlags\010Name\010Args\016Payload\022Parameter\014Offset\032SentTimestamp\006\002\206\202\t\211\004\202\203\005@\206\205\004\207\203\010\207\203\010\026\032$SetResult\000\003?\002\020messages\t\211\004?\016\205\004?\016\203\014\020List$Truncated\203\004\030Member\000\t\211\026?\026\203\005\004\200\205\004\203\004\203\004\026\032\213\010\203\010\203\010\203\010\203\010\213\010?$?&\203\010\203\010\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?\034 \000\001\205\000\000\000\000\001\032\000\000\000\000\000\000\000?\014\005?\"\003?\036\010\003? \006\003\013?,\t\351\000?$\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?R\003?T(QUEUE_ID_NUMBER_HASH\003\022\000\t\351\000?&\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?h\003?j\036QUEUE_ID_NUMBER\003\022\000\t\351\000?(\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?~\003?\200\022TIME_FROM\003\022\000\003?*\000\010\013?2?`?v\003?.\177\377\377\377\377\377\377\377\377\003?0\177\377\377\377\377\377\377\377\377\014\003?4\000\003?6\002\003?8\000\003?:\000\006\010?>\003\203\014\000\003\203\014\000\003\203\014\000\003\203\014\000\017\003?@\000\377\007\003?\030\000\002\001\000/" } } 2025-06-24T21:09:33.846026Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] compilation duration: 2ms 2025-06-24T21:09:33.846103Z node 7 :SQS DEBUG: queue_leader.cpp:464: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) has been prepared 2025-06-24T21:09:33.846118Z node 7 :SQS DEBUG: queue_leader.cpp:514: Request [] Executing compiled query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) 2025-06-24T21:09:33.846233Z node 7 :SQS DEBUG: executor.cpp:83: Request [] Starting executor actor for query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID). Mode: COMPILE_AND_EXEC 2025-06-24T21:09:33.846312Z node 7 :SQS TRACE: executor.cpp:154: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Serializing params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 0, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 12311263855443095412, "TIME_FROM": 0} 2025-06-24T21:09:33.846682Z node 7 :SQS TRACE: executor.cpp:203: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Execute program: { Transaction { MiniKQLTransaction { Mode: COMPILE_AND_EXEC Program { Bin: "\037\016\nFlags\010Name\010Args\016Payload\022Parameter\014Offset\032SentTimestamp\006\002\206\202\t\211\004\202\203\005@\206\205\004\207\203\010\207\203\010\026\032$SetResult\000\003?\002\020messages\t\211\004?\016\205\004?\016\203\014\020List$Truncated\203\004\030Member\000\t\211\026?\026\203\005\004\200\205\004\203\004\203\004\026\032\213\010\203\010\203\010\203\010\203\010\213\010?$?&\203\010\203\010\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?\034 \000\001\205\000\000\000\000\001\032\000\000\000\000\000\000\000?\014\005?\"\003?\036\010\003? \006\003\013?,\t\351\000?$\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?R\003?T(QUEUE_ID_NUMBER_HASH\003\022\000\t\351\000?&\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?h\003?j\036QUEUE_ID_NUMBER\003\022\000\t\351\000?(\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?~\003?\200\022TIME_FROM\003\022\000\003?*\000\010\013?2?`?v\003?.\177\377\377\377\377\377\377\377\377\003?0\177\377\377\377\377\377\377\377\377\014\003?4\000\003?6\002\003?8\000\003?:\000\006\010?>\003\203\014\000\003\203\014\000\003\203\014\000\003\203\014\000\017\003?@\000\377\007\003?\030\000\002\001\000/" } Params { Bin: "\037\000\005\205\n\203\010\203\010\203\010\203\004\203\010> TKeyValueTracingTest::ReadHuge >> DataShardOutOfOrder::TestSecondaryClearanceAfterShardRestartRace [GOOD] >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey [GOOD] >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit+UseSink [GOOD] >> TPartBtreeIndexIteration::FewNodes [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::CheckNotToBreakFailModel [GOOD] >> TestKinesisHttpProxy::TestEmptyHttpBody [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey [GOOD] Test command err: 2025-06-24T21:09:33.703876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:33.703941Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:33.705850Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:33.719350Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:33.719881Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:33.720186Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:33.768677Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:33.777630Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:33.777867Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:33.779592Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:33.779663Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:33.779719Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:33.780069Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:33.780167Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:33.780236Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:33.843947Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:33.881397Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:33.881615Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:33.881734Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:33.881770Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:33.881807Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:33.881859Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:33.882002Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:33.882057Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:33.882343Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:33.882447Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:33.882587Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:33.882624Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:33.882682Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:33.882721Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:33.882750Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:33.882781Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:33.882817Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:33.882941Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:33.882992Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:33.883047Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:33.886559Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nx\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\016\n\010__tablet\030\004 9\032\023\n\r__updateEpoch\030\004 :\032\020\n\n__updateNo\030\004 ;(\"J\014/Root/table1\222\002\013\th\020\000\000\000\000\000\000\020\r" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:33.886649Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:33.886763Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:33.886942Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:33.886996Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:33.887068Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:33.887124Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:33.887179Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:33.887222Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:33.887283Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:33.887715Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:33.887765Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:33.887801Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:33.887844Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:33.887898Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:33.887930Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:33.887971Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:33.888002Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:33.888032Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:33.900605Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:33.900711Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:33.900770Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:33.900810Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:33.900892Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:33.901542Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:33.901606Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:33.901656Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:33.901919Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:33.901959Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:33.902119Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:33.902161Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:33.902197Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:33.902251Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:33.911118Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:33.911230Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:33.911547Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:33.911597Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:33.911660Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:33.911708Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:33.911745Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:33.911785Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:33 ... 004\206\203\014\203\014,SelectRange\000\003?* h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000?\014\005?2\003?,D\003?.F\003?0p\007\013?:\003?4e\005\'?8\003\013?>\003?\000\003?@\000\003?B\000\006\004?F\003\203\014\000\003\203\014\000\003\003?H\000\377\007\002\000\005?\032\005?\026?r\000\005?\030\003\005? \005?\034?r\000\006\000?\036\003?x\005?&\006\ 2025-06-24T21:09:35.940287Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:35.940377Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:35.941107Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit CheckDataTx 2025-06-24T21:09:35.941205Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-24T21:09:35.941241Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit CheckDataTx 2025-06-24T21:09:35.941277Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:35.941312Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit BuildAndWaitDependencies 2025-06-24T21:09:35.941356Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-24T21:09:35.941402Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:8] at 9437184 2025-06-24T21:09:35.941440Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-24T21:09:35.941462Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:35.941483Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit ExecuteDataTx 2025-06-24T21:09:35.941505Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit ExecuteDataTx 2025-06-24T21:09:35.942074Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:8] at tablet 9437184 with status COMPLETE 2025-06-24T21:09:35.942165Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:8] at 9437184: {NSelectRow: 0, NSelectRange: 1, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 2, SelectRangeBytes: 31, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:09:35.942238Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-24T21:09:35.942269Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit ExecuteDataTx 2025-06-24T21:09:35.942294Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:35.942317Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit FinishPropose 2025-06-24T21:09:35.942354Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 8 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-24T21:09:35.942422Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is DelayComplete 2025-06-24T21:09:35.942460Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:35.942495Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit CompletedOperations 2025-06-24T21:09:35.942551Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit CompletedOperations 2025-06-24T21:09:35.942597Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-24T21:09:35.942619Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit CompletedOperations 2025-06-24T21:09:35.942643Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 9437184 has finished 2025-06-24T21:09:35.942700Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:35.942735Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:8] at 9437184 on unit FinishPropose 2025-06-24T21:09:35.942770Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> TCompactionMulti::MainPageCollectionEdge [GOOD] >> TCompactionMulti::MainPageCollectionEdgeMany >> KqpNotNullColumns::Describe [GOOD] >> KqpNotNullColumns::CreateTableWithNotNullColumns >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch [GOOD] |94.3%| [TA] $(B)/ydb/core/kqp/ut/tx/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSecondaryClearanceAfterShardRestartRace [GOOD] Test command err: 2025-06-24T21:09:27.374453Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:27.374807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:27.374976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002c09/r3tmp/tmpwJAA9X/pdisk_1.dat 2025-06-24T21:09:27.689303Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:27.692987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:27.738161Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:27.744133Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799364530262 != 1750799364530266 2025-06-24T21:09:27.789217Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:09:27.790441Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:09:27.790723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:27.790851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:27.802768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:27.885466Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:09:27.885534Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:09:27.885706Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:09:28.047850Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 2 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:09:28.047967Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:09:28.048677Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:09:28.048803Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:09:28.049215Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:09:28.049431Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:09:28.049612Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:09:28.051717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:28.052346Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:09:28.053128Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:09:28.053224Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:09:28.089860Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:631:2532]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:28.091735Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:631:2532]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:28.092322Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:631:2532] 2025-06-24T21:09:28.092620Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:28.141754Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:631:2532]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:28.141876Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:617:2524], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:28.143635Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:617:2524], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:28.144088Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:633:2534] 2025-06-24T21:09:28.144324Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:28.153162Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:617:2524], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:28.153708Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:28.153870Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:28.155841Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:28.155946Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:28.156012Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:28.156450Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:28.156668Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:28.156779Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:664:2532] in generation 1 2025-06-24T21:09:28.157333Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:28.157426Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:28.159014Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:09:28.159084Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:09:28.159127Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:09:28.159569Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:28.159689Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:28.159770Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:665:2534] in generation 1 2025-06-24T21:09:28.170802Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:28.209003Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:28.209247Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:28.209386Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:668:2553] 2025-06-24T21:09:28.209446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:28.209515Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:28.209565Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:28.209966Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:631:2532], Recipient [1:631:2532]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:28.210048Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:28.210221Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:28.210269Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:09:28.210363Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:28.210437Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:669:2554] 2025-06-24T21:09:28.210468Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:09:28.210495Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:09:28.210524Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:09:28.210856Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender ... 3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:922:2710], 2025-06-24T21:09:35.671582Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [2:919:2710] TxId: 281474976715662. Ctx: { TraceId: 01jyhwcq7xfpt3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-24T21:09:35.672437Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:919:2710] TxId: 281474976715662. Ctx: { TraceId: 01jyhwcq7xfpt3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:922:2710], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-24T21:09:35.672494Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:919:2710] TxId: 281474976715662. Ctx: { TraceId: 01jyhwcq7xfpt3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:922:2710], 2025-06-24T21:09:35.672540Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:919:2710] TxId: 281474976715662. Ctx: { TraceId: 01jyhwcq7xfpt3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:922:2710], 2025-06-24T21:09:35.673147Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:919:2710] TxId: 281474976715662. Ctx: { TraceId: 01jyhwcq7xfpt3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:922:2710], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 720 Tasks { TaskId: 1 CpuTimeUs: 296 FinishTimeMs: 1750799375672 EgressBytes: 10 EgressRows: 1 ComputeCpuTimeUs: 17 BuildCpuTimeUs: 279 HostName: "ghrun-4pjfmvffsq" NodeId: 2 CreateTimeMs: 1750799375671 UpdateTimeMs: 1750799375672 } MaxMemoryUsage: 1048576 } 2025-06-24T21:09:35.673242Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715662. Ctx: { TraceId: 01jyhwcq7xfpt3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:922:2710] 2025-06-24T21:09:35.673316Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:276: ActorId: [2:919:2710] TxId: 281474976715662. Ctx: { TraceId: 01jyhwcq7xfpt3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send Commit to BufferActor=[2:918:2710] 2025-06-24T21:09:35.673378Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:919:2710] TxId: 281474976715662. Ctx: { TraceId: 01jyhwcq7xfpt3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000720s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:09:35.673855Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:927:2726], Recipient [2:877:2693]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:35.673925Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:35.673990Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:926:2725], serverId# [2:927:2726], sessionId# [0:0:0] 2025-06-24T21:09:35.674212Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [2:925:2710], Recipient [2:877:2693]: NKikimrDataEvents.TEvWrite Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxMode: MODE_IMMEDIATE 2025-06-24T21:09:35.674270Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-24T21:09:35.674405Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [2:877:2693], Recipient [2:877:2693]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T21:09:35.674441Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T21:09:35.674528Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-24T21:09:35.674663Z node 2 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxMode: MODE_IMMEDIATE 2025-06-24T21:09:35.674747Z node 2 :TX_DATASHARD TRACE: datashard_write_operation.cpp:213: Table /Root/table-1, shard: 72075186224037888, write point (Uint32 : 4) 2025-06-24T21:09:35.674795Z node 2 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint32 : 4) table: [72057594046644480:2:1] 2025-06-24T21:09:35.674886Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CheckWrite 2025-06-24T21:09:35.674942Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:09:35.674978Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CheckWrite 2025-06-24T21:09:35.675022Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:35.675056Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:09:35.675100Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2000/281474976715661 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-24T21:09:35.675184Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037888 2025-06-24T21:09:35.675226Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:09:35.675259Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:35.675286Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit ExecuteWrite 2025-06-24T21:09:35.675312Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit ExecuteWrite 2025-06-24T21:09:35.675350Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:09:35.675406Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2000/281474976715661 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-24T21:09:35.675547Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=1 2025-06-24T21:09:35.675604Z node 2 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-24T21:09:35.675687Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:35.675721Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteWrite 2025-06-24T21:09:35.675762Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-24T21:09:35.675805Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:09:35.675881Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:35.675921Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-24T21:09:35.675965Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:09:35.676014Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:09:35.676066Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:09:35.676095Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:09:35.676127Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037888 has finished 2025-06-24T21:09:35.687110Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-24T21:09:35.687235Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:09:35.687306Z node 2 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 2 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-24T21:09:35.687424Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:35.688077Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:919:2710] TxId: 281474976715662. Ctx: { TraceId: 01jyhwcq7xfpt3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:09:35.688162Z node 2 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [2:919:2710] TxId: 281474976715662. Ctx: { TraceId: 01jyhwcq7xfpt3at2nhvm11fpq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWQyOTg0MmMtMmNkN2VjOTMtYzdhNDFlNWMtYTNmZjE4YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit+UseSink [GOOD] Test command err: 2025-06-24T21:09:26.382886Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:26.383246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:26.383427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002c29/r3tmp/tmpeo9zUa/pdisk_1.dat 2025-06-24T21:09:26.698031Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:26.701430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:26.734342Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:26.738755Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799363462511 != 1750799363462515 2025-06-24T21:09:26.784779Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:26.784916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:26.796547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:26.880913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:26.919861Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:26.920797Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:26.921143Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:09:26.921320Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:26.961909Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:26.962634Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:26.962795Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:26.964691Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:09:26.964793Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:09:26.964875Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:09:26.965314Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:26.965508Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:26.965628Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:09:26.976477Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:27.008497Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:09:27.008679Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:27.008790Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:09:27.008827Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:09:27.008858Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:09:27.008907Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:09:27.009080Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:27.009122Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:27.009402Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:09:27.009483Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:09:27.009542Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:09:27.009574Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:27.009614Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:09:27.009648Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:09:27.009675Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:09:27.009702Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:09:27.009734Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:09:27.009835Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:27.009870Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:27.009912Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:09:27.010256Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:09:27.010297Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:27.010369Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:09:27.010560Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:09:27.010616Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:09:27.010679Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:09:27.010720Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:09:27.010750Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:09:27.010781Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:09:27.010807Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:27.011035Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:27.011061Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:09:27.011087Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:09:27.011116Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:27.011255Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:09:27.011292Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:09:27.011333Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:09:27.011368Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:09:27.011397Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:27.012580Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:09:27.012630Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:09:27.023460Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:09:27.023547Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:09:27.023599Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:09:27.023685Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... entDestroyed { TabletId: 72075186224037892 ClientId: [2:1138:2876] ServerId: [2:1140:2878] } 2025-06-24T21:09:35.242371Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:09:35.242454Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [2:24:2071], Recipient [2:988:2768]: {TEvRegisterTabletResult TabletId# 72075186224037892 Entry# 2000} 2025-06-24T21:09:35.242485Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-24T21:09:35.242521Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 2000 2025-06-24T21:09:35.242558Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-24T21:09:35.242853Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-24T21:09:35.242911Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:35.242959Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037892 2025-06-24T21:09:35.243004Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2025-06-24T21:09:35.243044Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037892 2025-06-24T21:09:35.243087Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-06-24T21:09:35.243161Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-24T21:09:35.243345Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [2:1140:2878], Recipient [2:988:2768]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:09:35.243384Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:09:35.243444Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [2:1138:2876], serverId# [2:1140:2878], sessionId# [0:0:0] 2025-06-24T21:09:35.243748Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [2:24:2071], Recipient [2:988:2768]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-06-24T21:09:35.243790Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-24T21:09:35.243834Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 0 next step 2000 2025-06-24T21:09:35.243884Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-24T21:09:35.243945Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037892 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-24T21:09:35.254923Z node 2 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037893 ack snapshot OpId 281474976715665 2025-06-24T21:09:35.255019Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037893 2025-06-24T21:09:35.255100Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037893 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:09:35.255179Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037893 2025-06-24T21:09:35.255222Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037893, actorId: [2:1147:2885] 2025-06-24T21:09:35.255241Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037893 2025-06-24T21:09:35.255267Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037893 2025-06-24T21:09:35.255286Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-24T21:09:35.255410Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553157, Sender [2:994:2770], Recipient [2:710:2589]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037893 OperationCookie: 281474976715665 2025-06-24T21:09:35.255449Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037893 for split OpId 281474976715665 2025-06-24T21:09:35.255666Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:994:2770], Recipient [2:994:2770]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:35.255694Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:35.255915Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:1139:2877], Recipient [2:710:2589]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037893 ClientId: [2:1139:2877] ServerId: [2:1141:2879] } 2025-06-24T21:09:35.255940Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:09:35.256036Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-24T21:09:35.256061Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:35.256083Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037893 2025-06-24T21:09:35.256104Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037893 has no attached operations 2025-06-24T21:09:35.256124Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037893 2025-06-24T21:09:35.256143Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037893 TxInFly 0 2025-06-24T21:09:35.256173Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-24T21:09:35.256427Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [2:24:2071], Recipient [2:994:2770]: {TEvRegisterTabletResult TabletId# 72075186224037893 Entry# 2000} 2025-06-24T21:09:35.256463Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-24T21:09:35.256494Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037893 time 2000 2025-06-24T21:09:35.256525Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-24T21:09:35.256660Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [2:1141:2879], Recipient [2:994:2770]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:09:35.256689Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:09:35.256725Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037893, clientId# [2:1139:2877], serverId# [2:1141:2879], sessionId# [0:0:0] 2025-06-24T21:09:35.257051Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [2:24:2071], Recipient [2:994:2770]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-06-24T21:09:35.257088Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-24T21:09:35.257119Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037893 coordinator 72057594046316545 last step 0 next step 2000 2025-06-24T21:09:35.257157Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037893: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-24T21:09:35.257197Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037893 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-24T21:09:35.267934Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715665 2025-06-24T21:09:35.271130Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553158, Sender [2:371:2365], Recipient [2:715:2591] 2025-06-24T21:09:35.271247Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715665, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2025-06-24T21:09:35.273749Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715665 2025-06-24T21:09:35.273837Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:09:35.273949Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:702:2584], Recipient [2:710:2589]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:09:35.781018Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [2:939:2631], Recipient [2:624:2529]: NKikimrDataEvents.TEvWrite Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 281474976715663 TxMode: MODE_VOLATILE_PREPARE Locks { SendingShards: 72075186224037888 SendingShards: 72075186224037889 ReceivingShards: 72075186224037888 ReceivingShards: 72075186224037889 Op: Commit } 2025-06-24T21:09:35.781108Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-24T21:09:35.781248Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_WRONG_SHARD_STATE;details=Rejecting data TxId 281474976715663 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state);tx_id=281474976715663; 2025-06-24T21:09:35.781326Z node 2 :TX_DATASHARD NOTICE: datashard.cpp:3137: Rejecting data TxId 281474976715663 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state) 2025-06-24T21:09:35.781881Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715664, at schemeshard: 72057594046644480 2025-06-24T21:09:35.782518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715665, at schemeshard: 72057594046644480 >> TestKinesisHttpProxy::ListShardsToken [GOOD] |94.3%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/tx/test-results/unittest/{meta.json ... results_accumulator.log} >> TCompactionMulti::MainPageCollectionEdgeMany [GOOD] >> TCompactionMulti::MainPageCollectionOverflow [GOOD] >> TCompactionMulti::MainPageCollectionOverflowSmallRefs [GOOD] >> TCompactionMulti::MainPageCollectionOverflowLargeRefs >> TCompactionMulti::MainPageCollectionOverflowLargeRefs [GOOD] >> TExecutorDb::RandomOps >> KqpNewEngine::JoinWithParams [GOOD] >> KqpNewEngine::JoinPure >> TKeyValueTracingTest::ReadSmall [FAIL] >> TKeyValueTracingTest::WriteHuge [FAIL] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::WriteSmall [FAIL] >> KqpSqlIn::SelectNotAllElements [GOOD] >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::TestWrongRequest [GOOD] Test command err: 2025-06-24T21:08:36.577691Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625806288003640:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:36.578071Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448a/r3tmp/tmpRRNmuR/pdisk_1.dat 2025-06-24T21:08:36.997683Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:37.003400Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625806288003528:2079] 1750799316542223 != 1750799316542226 2025-06-24T21:08:37.010853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:37.011005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 11482, node 1 2025-06-24T21:08:37.012900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:37.212049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:37.212076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:37.212088Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:37.212241Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:37.578606Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20991 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:37.725710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:20991 waiting... 2025-06-24T21:08:37.982305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-24T21:08:37.988273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:08:37.999678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-24T21:08:38.012172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T21:08:38.023684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.171436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.254426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.298350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.333869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.413727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:38.448745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.487261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.522732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.552734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:39.711947Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625819172906798:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.712102Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.713240Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625819172906810:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.718703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:39.754514Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625819172906812:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-24T21:08:39.853267Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625819172906863:2866] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:40.591771Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyhwb0mx48jtyvz21z10t7dm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGU5ZTYxMTUtNzVhZTQ5ZmItYTUxYWY2ZWEtMmU5MDlmNTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:08:40.676521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb ... l { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:34.914610Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-06-24T21:09:34.914711Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 22ms 2025-06-24T21:09:34.915010Z node 8 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:34.916358Z node 8 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:34.916381Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 21ms 2025-06-24T21:09:34.916827Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:34.916860Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-06-24T21:09:34.916980Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 22ms 2025-06-24T21:09:34.917498Z node 8 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:35.104131Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519626054396655988:2396]: Pool not found 2025-06-24T21:09:35.104355Z node 8 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-06-24T21:09:35.470936Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519626054396656031:2402]: Pool not found 2025-06-24T21:09:35.471209Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-06-24T21:09:35.474680Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519626058691623443:2421], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:35.474681Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [8:7519626058691623444:2422], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-06-24T21:09:35.474841Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:35.849450Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519626058691623441:2420]: Pool not found 2025-06-24T21:09:35.849895Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:138: [cleanup removed queues] there are no queues to delete Http output full {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} 400 {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} 2025-06-24T21:09:35.868252Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:57832) incoming connection opened 2025-06-24T21:09:35.868345Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:57832) -> (POST /, 87 bytes) 2025-06-24T21:09:35.868546Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [b87e:8b00:6050:0:a07e:8b00:6050:0] request [CreateStream] url [/] database [] requestId: f7eaa23a-70abbefe-8bddaa7e-212069bf 2025-06-24T21:09:35.869147Z node 8 :HTTP_PROXY WARN: http_req.cpp:948: http request [CreateStream] requestId [f7eaa23a-70abbefe-8bddaa7e-212069bf] got new request with incorrect json from [b87e:8b00:6050:0:a07e:8b00:6050:0] database '' 2025-06-24T21:09:35.869364Z node 8 :HTTP_PROXY INFO: http_req.cpp:1211: http request [CreateStream] requestId [f7eaa23a-70abbefe-8bddaa7e-212069bf] reply with status: BAD_REQUEST message: ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName 2025-06-24T21:09:35.869684Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:57832) <- (400 InvalidArgumentException, 135 bytes) 2025-06-24T21:09:35.869743Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:289: (#37,[::1]:57832) Request: POST / HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.CreateStream X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked { "ShardCount":5, "StreamName":"testtopic", "WrongStreamName":"WrongStreamName" } 2025-06-24T21:09:35.869783Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:57832) Response: HTTP/1.1 400 InvalidArgumentException Connection: close x-amzn-requestid: f7eaa23a-70abbefe-8bddaa7e-212069bf x-amz-crc32: 3053902336 Content-Type: application/x-amz-json-1.1 Content-Length: 135 2025-06-24T21:09:35.869901Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:57832) connection closed >> TKeyValueTracingTest::ReadHuge [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 19701, MsgBus: 17378 2025-06-24T21:07:42.291928Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625574829078100:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:42.291998Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004600/r3tmp/tmpJDGT96/pdisk_1.dat 2025-06-24T21:07:42.618584Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625574829078078:2079] 1750799262290452 != 1750799262290455 2025-06-24T21:07:42.629079Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19701, node 1 2025-06-24T21:07:42.652005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:42.652121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:42.653749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:42.667766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:42.667802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:42.667831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:42.667962Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17378 TClient is connected to server localhost:17378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:43.187630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:43.220018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:07:43.228583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:43.304242Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:07:43.418311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:43.567382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:43.641392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:45.267298Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625587713981611:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.267422Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.602490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.631777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.657621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.686285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.750564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.779881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.850855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:45.907548Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625587713982275:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.907635Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.907706Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625587713982280:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:45.911211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:45.922136Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625587713982282:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:46.000430Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625587713982333:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:47.035234Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625596303917198:3594], Recipient [1:7519625574829078453:2175]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:47.035279Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:47.035293Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:47.035335Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625596303917194:3591], Recipient [1:7519625574829078453:2175]: {TEvModifySc ... pload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:34.920885Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037908, followerId 0 2025-06-24T21:09:34.920916Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:21 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:34.920930Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037908 2025-06-24T21:09:34.920949Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:22 data size 0 row count 0 2025-06-24T21:09:34.920981Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037909 maps to shardIdx: 72057594046644480:22 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:34.920991Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037909, followerId 0 2025-06-24T21:09:34.921021Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:22 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:34.921034Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037909 2025-06-24T21:09:34.921053Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:19 data size 0 row count 0 2025-06-24T21:09:34.921085Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:34.921096Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-06-24T21:09:34.921123Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:34.921136Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037906 2025-06-24T21:09:34.921155Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-06-24T21:09:34.921187Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:34.921198Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-06-24T21:09:34.921227Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:34.921239Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037905 2025-06-24T21:09:34.921260Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:17 data size 0 row count 0 2025-06-24T21:09:34.921294Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037904 maps to shardIdx: 72057594046644480:17 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:34.921304Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-06-24T21:09:34.921332Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:34.921343Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037904 2025-06-24T21:09:34.921418Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:34.921617Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625877798661274:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:34.921647Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:34.921660Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:09:35.386771Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625907863434704:2481], Recipient [3:7519625877798661274:2148]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037923 TableLocalId: 17 Generation: 1 Round: 2 TableStats { DataSize: 1232 RowCount: 6 IndexSize: 0 InMemSize: 1232 LastAccessTime: 1750799373742 LastUpdateTime: 1750799340934 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 6 RowDeletes: 0 RowReads: 22 RangeReads: 12 PartCount: 0 RangeReadRows: 72 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 620 Memory: 142392 } ShardState: 2 UserTablePartOwners: 72075186224037923 NodeId: 3 StartTime: 1750799340350 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:35.386817Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:35.386852Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037923 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 17] state 'Ready' dataSize 1232 rowCount 6 cpuUsage 0.062 2025-06-24T21:09:35.386946Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037923 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 17] raw table stats: DataSize: 1232 RowCount: 6 IndexSize: 0 InMemSize: 1232 LastAccessTime: 1750799373742 LastUpdateTime: 1750799340934 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 6 RowDeletes: 0 RowReads: 22 RangeReads: 12 PartCount: 0 RangeReadRows: 72 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:35.386965Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099997s, queue# 1 2025-06-24T21:09:35.393317Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625877798661274:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:35.393358Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:35.393426Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625877798661274:2148], Recipient [3:7519625877798661274:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:35.393446Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:35.487051Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625877798661274:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:35.487109Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:35.487122Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-24T21:09:35.487197Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-24T21:09:35.487218Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-24T21:09:35.487287Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 17 shard idx 72057594046644480:36 data size 1232 row count 6 2025-06-24T21:09:35.487335Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037923 maps to shardIdx: 72057594046644480:36 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 17], pathId map=TestTable, is column=0, is olap=0, RowCount 6, DataSize 1232 2025-06-24T21:09:35.487349Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037923, followerId 0 2025-06-24T21:09:35.487394Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:36 with partCount# 0, rowCount# 6, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:35.487432Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037923 2025-06-24T21:09:35.487494Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:35.487593Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625877798661274:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:35.487607Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:35.487614Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> KqpNewEngine::Delete-UseSink [GOOD] >> KqpNewEngine::DecimalColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::TestEmptyHttpBody [GOOD] Test command err: 2025-06-24T21:08:36.549591Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625808151995999:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:36.549814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004496/r3tmp/tmprgZCm2/pdisk_1.dat 2025-06-24T21:08:36.997112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:36.997223Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:37.000288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:37.028401Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625808151995894:2079] 1750799316542605 != 1750799316542608 2025-06-24T21:08:37.036466Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16181, node 1 2025-06-24T21:08:37.213307Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:37.213346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:37.213355Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:37.213456Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:37.554484Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2711 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:37.760514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:2711 2025-06-24T21:08:38.025650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:08:38.030737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:08:38.032238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-24T21:08:38.053499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.225543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:08:38.304953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.365530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:38.410744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.443164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.474463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.508240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.545097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.634085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:39.880931Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625821036899164:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.881054Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.881311Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625821036899176:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.885692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:39.900190Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625821036899178:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-24T21:08:40.002603Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625821036899229:2865] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:40.595285Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyhwb0t03bh1ey4fhfg2h03t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDY0MGY2OTktNjJlNmIyZWItZjljMzYxMGMtMzY1NjJlZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:08:40.686495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:40.751181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part p ... r { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:35.803980Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 17ms 2025-06-24T21:09:35.804526Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:35.804588Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-06-24T21:09:35.804726Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 19ms 2025-06-24T21:09:35.804798Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:35.805406Z node 8 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:35.805769Z node 8 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:35.805789Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Attempt 1 execution duration: 23ms 2025-06-24T21:09:35.805967Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:35.806011Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-06-24T21:09:35.806078Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 23ms 2025-06-24T21:09:35.806379Z node 8 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-24T21:09:35.964474Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519626059272946517:2404]: Pool not found 2025-06-24T21:09:35.964720Z node 8 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-06-24T21:09:36.268993Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519626059272946512:2401]: Pool not found 2025-06-24T21:09:36.269172Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-06-24T21:09:36.272576Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519626063567913933:2421], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:36.272593Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [8:7519626063567913934:2422], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-06-24T21:09:36.272661Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:36.636103Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519626063567913931:2420]: Pool not found 2025-06-24T21:09:36.636542Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:138: [cleanup removed queues] there are no queues to delete 2025-06-24T21:09:36.762618Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:47652) incoming connection opened 2025-06-24T21:09:36.762714Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:47652) -> (POST /Root, 4 bytes) 2025-06-24T21:09:36.762923Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [b80b:f500:6050:0:a00b:f500:6050:0] request [CreateStream] url [/Root] database [/Root] requestId: 32d24b6a-e151a1a9-8ccfdb0d-c45f2cea 2025-06-24T21:09:36.763676Z node 8 :HTTP_PROXY INFO: http_req.cpp:1211: http request [CreateStream] requestId [32d24b6a-e151a1a9-8ccfdb0d-c45f2cea] reply with status: BAD_REQUEST message: ydb/core/http_proxy/json_proto_conversion.h:395: Top level of json value is not a map 2025-06-24T21:09:36.763833Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:47652) <- (400 MissingParameter, 127 bytes) 2025-06-24T21:09:36.763902Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:289: (#37,[::1]:47652) Request: POST /Root HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.CreateStream X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked null 2025-06-24T21:09:36.763949Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:47652) Response: HTTP/1.1 400 MissingParameter Connection: close x-amzn-requestid: 32d24b6a-e151a1a9-8ccfdb0d-c45f2cea x-amz-crc32: 851558042 Content-Type: application/x-amz-json-1.1 Content-Length: 127 2025-06-24T21:09:36.764080Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:47652) connection closed Http output full {"__type":"MissingParameter","message":"ydb/core/http_proxy/json_proto_conversion.h:395: Top level of json value is not a map"} >> TestKinesisHttpProxy::GoodRequestCreateStream [GOOD] >> TestYmqHttpProxy::TestListQueueTags [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups [GOOD] >> TPartBtreeIndexIteration::FewNodes_History ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch [GOOD] Test command err: 2025-06-24T21:08:36.547533Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625806698581105:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:36.547593Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004479/r3tmp/tmpokKsRg/pdisk_1.dat 2025-06-24T21:08:37.004089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:37.004214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:37.005774Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:37.007238Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625806698581072:2079] 1750799316542177 != 1750799316542180 TServer::EnableGrpc on GrpcPort 18586, node 1 2025-06-24T21:08:37.016715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:37.211333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:37.211358Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:37.211365Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:37.211525Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:37.566963Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7069 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:37.766675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:37.781900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:7069 2025-06-24T21:08:38.032271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:08:38.037533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:08:38.039430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-24T21:08:38.056007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T21:08:38.064175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.204713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.244177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-24T21:08:38.249939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.323846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-24T21:08:38.328277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.366642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.399124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.436612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:38.471066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.542441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.577858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:39.855691Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625819583484339:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.855711Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625819583484347:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.855810Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.860697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:39.873202Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625819583484353:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-24T21:08:39.939723Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625819583484404:2865] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:40.591385Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyhwb0sd868d0qb9g8z ... . Mode: COMPILE_AND_EXEC 2025-06-24T21:09:37.086685Z node 7 :SQS TRACE: executor.cpp:154: Request [85589c15-1ed1c7c2-728bdeeb-5648e03e] Query(idx=CHANGE_VISIBILITY_ID) Queue [cloud4/000000000000000101v0] Serializing params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 2, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 18011340738530590538, "NOW": 1750799377085, "GROUPS_READ_ATTEMPT_IDS_PERIOD": 300000, "KEYS": [{"LockTimestamp": 1750799376896, "Offset": 1, "NewVisibilityDeadline": 1750799378085}, {"LockTimestamp": 1750799376930, "Offset": 2, "NewVisibilityDeadline": 1750799379085}]} 2025-06-24T21:09:37.087210Z node 7 :SQS TRACE: executor.cpp:203: Request [85589c15-1ed1c7c2-728bdeeb-5648e03e] Query(idx=CHANGE_VISIBILITY_ID) Queue [cloud4/000000000000000101v0] Execute program: { Transaction { MiniKQLTransaction { Mode: COMPILE_AND_EXEC Program { Bin: "O\034\014Exists*NewVisibilityDeadline\014Offset\006Arg\014Member\nFlags\010Name\010Args\016Payload\022Parameter\006And\032LockTimestamp$VisibilityDeadline\014Invoke\t\211\004\206\202?\000\206\202\030Extend\000\006\002?\000\t\211\004\202\203\005@\206\205\n\203\014\207\203\010\203\014\203\010?\020(ChangeConddCurrentVisibilityDeadline\002\006\n$SetResult\000\003?\006\014result\t\211\006?\024\206\205\006?\020?\020?\020.\006\n?\032?\0220MapParameter\000\t\351\000?\034\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?&\003?(\010KEYS\003&\000\t\251\000?\032\016\000\005?\022\t\211\004?\010\207\203\014?\010 Coalesce\000\t\211\004?<\207\203\014\207\203\014*\000\t\211\006?B\203\005@\203\010?\0146\000\003?J\026LessOrEqual\t\351\000?L\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?X\003?Z\006NOW\003&\000\t\211\004?\014\207\205\004\207\203\010?\014.2\203\004\022\000\t\211\n?n\203\005\004\200\205\004\203\004\203\004.2\213\010\203\010\203\010\203\004?\020\203\004$SelectRow\000\003?t \000\001\205\000\000\000\000\001\030\000\000\000\000\000\000\000?l\005?z\003?v\020\003?x\026\003\013?\202\t\351\000?|\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?\226\003?\230> KqpNewEngine::StaleRO-EnableFollowers [GOOD] >> KqpNewEngine::StaleRO_Immediate |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> KqpSort::Offset [GOOD] >> KqpSort::OffsetPk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::ListShardsToken [GOOD] Test command err: 2025-06-24T21:08:36.550961Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625807316952622:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:36.551187Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004484/r3tmp/tmpzNgUmn/pdisk_1.dat 2025-06-24T21:08:36.963478Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625807316952521:2079] 1750799316542231 != 1750799316542234 2025-06-24T21:08:36.964453Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:36.997077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:36.997199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:37.000256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7562, node 1 2025-06-24T21:08:37.211466Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:37.211498Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:37.211506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:37.211616Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:37.554677Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63186 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:37.743521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:63186 2025-06-24T21:08:37.990405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:08:37.997284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:08:37.999199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-24T21:08:38.027801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T21:08:38.041692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.186516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.226328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.285157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.325916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.362328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:38.404421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.437375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.470782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.502125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:39.751890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625820201855785:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.752024Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.752500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625820201855797:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.757303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:39.777997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710673, at schemeshard: 72057594046644480 2025-06-24T21:08:39.778674Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625820201855799:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-24T21:08:39.862397Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625820201855850:2864] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:40.591668Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyhwb0p4839dvvr0xtpsmx6n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTZiMzNjMjItZTlhYjdjYTgtYmMwMzkyNS1jMzEwYzhiZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:08:40.670209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, su ... 1: [PQ: 72075186224037909] add an TxId 281474976715690 to the list for deletion 2025-06-24T21:09:37.564734Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037909] TxId 281474976715690, NewState DELETING 2025-06-24T21:09:37.564746Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3852: [PQ: 72075186224037909] delete key for TxId 281474976715690 2025-06-24T21:09:37.564768Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037909] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:09:37.565966Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037909] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:09:37.565994Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037909] Try execute txs with state DELETING 2025-06-24T21:09:37.566008Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037909] TxId 281474976715690, State DELETING 2025-06-24T21:09:37.566021Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72075186224037909] delete TxId 281474976715690 2025-06-24T21:09:37.566418Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037909] Registered with mediator time cast 2025-06-24T21:09:37.566491Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037908] Registered with mediator time cast 2025-06-24T21:09:37.566532Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037907] Registered with mediator time cast 2025-06-24T21:09:37.566566Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037911] Registered with mediator time cast 2025-06-24T21:09:37.566599Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037910] Registered with mediator time cast 2025-06-24T21:09:37.566766Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [CreateStream] requestId [46d8f97e-99f87523-8488a36f-a44ac762] reply ok 2025-06-24T21:09:37.566866Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:47322) <- (200 , 2 bytes) 2025-06-24T21:09:37.566964Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:47322) connection closed Http output full {} 200 {} 2025-06-24T21:09:37.567712Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:47336) incoming connection opened 2025-06-24T21:09:37.567788Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:47336) -> (POST /Root, 157 bytes) 2025-06-24T21:09:37.567873Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [b878:2200:6050:0:a078:2200:6050:0] request [ListShards] url [/Root] database [/Root] requestId: 6f60564a-7381214b-a1507579-6ce8dba3 2025-06-24T21:09:37.568171Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [6f60564a-7381214b-a1507579-6ce8dba3] got new request from [b878:2200:6050:0:a078:2200:6050:0] database '/Root' stream 'teststream' 2025-06-24T21:09:37.568508Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [6f60564a-7381214b-a1507579-6ce8dba3] [auth] Authorized successfully 2025-06-24T21:09:37.568536Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [6f60564a-7381214b-a1507579-6ce8dba3] sending grpc request to '' database: '/Root' iam token size: 0 E0000 00:00:1750799377.568584 723619 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-24T21:09:37.570348Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037907] server connected, pipe [8:7519626069146457372:2482], now have 1 active actors on pipe 2025-06-24T21:09:37.570362Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037911] server connected, pipe [8:7519626069146457373:2483], now have 1 active actors on pipe 2025-06-24T21:09:37.570768Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037907] server disconnected, pipe [8:7519626069146457372:2482] destroyed 2025-06-24T21:09:37.570787Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037911] server disconnected, pipe [8:7519626069146457373:2483] destroyed 2025-06-24T21:09:37.570988Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [6f60564a-7381214b-a1507579-6ce8dba3] reply ok 2025-06-24T21:09:37.571186Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:47336) <- (200 , 449 bytes) 2025-06-24T21:09:37.571288Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:47336) connection closed Http output full {"NextToken":"CKLJmZ76MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 200 {"NextToken":"CKLJmZ76MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-06-24T21:09:37.572446Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:47352) incoming connection opened 2025-06-24T21:09:37.572532Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:47352) -> (POST /Root, 157 bytes) 2025-06-24T21:09:37.572654Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [1854:2000:6050:0:54:2000:6050:0] request [ListShards] url [/Root] database [/Root] requestId: db01a7d5-62b8c00a-72204f27-932d4206 2025-06-24T21:09:37.573077Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [db01a7d5-62b8c00a-72204f27-932d4206] got new request from [1854:2000:6050:0:54:2000:6050:0] database '/Root' stream 'teststream' 2025-06-24T21:09:37.573462Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [db01a7d5-62b8c00a-72204f27-932d4206] [auth] Authorized successfully 2025-06-24T21:09:37.573531Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [db01a7d5-62b8c00a-72204f27-932d4206] sending grpc request to '' database: '/Root' iam token size: 0 E0000 00:00:1750799377.573601 723619 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-24T21:09:37.574400Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037911] server connected, pipe [8:7519626069146457385:2488], now have 1 active actors on pipe 2025-06-24T21:09:37.574423Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037907] server connected, pipe [8:7519626069146457384:2487], now have 1 active actors on pipe 2025-06-24T21:09:37.574829Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037907] server disconnected, pipe [8:7519626069146457384:2487] destroyed 2025-06-24T21:09:37.574861Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037911] server disconnected, pipe [8:7519626069146457385:2488] destroyed 2025-06-24T21:09:37.575033Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [db01a7d5-62b8c00a-72204f27-932d4206] reply ok 2025-06-24T21:09:37.575198Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:47352) <- (200 , 449 bytes) 2025-06-24T21:09:37.575299Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:47352) connection closed Http output full {"NextToken":"CKbJmZ76MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 200 {"NextToken":"CKbJmZ76MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-06-24T21:09:37.576462Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:47354) incoming connection opened 2025-06-24T21:09:37.576539Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:47354) -> (POST /Root, 157 bytes) 2025-06-24T21:09:37.576657Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [3859:1f00:6050:0:2059:1f00:6050:0] request [ListShards] url [/Root] database [/Root] requestId: fab2801b-f0b42f0e-bbc84068-18d37bad 2025-06-24T21:09:37.577049Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [fab2801b-f0b42f0e-bbc84068-18d37bad] got new request from [3859:1f00:6050:0:2059:1f00:6050:0] database '/Root' stream 'teststream' 2025-06-24T21:09:37.577411Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [fab2801b-f0b42f0e-bbc84068-18d37bad] [auth] Authorized successfully 2025-06-24T21:09:37.577481Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [fab2801b-f0b42f0e-bbc84068-18d37bad] sending grpc request to '' database: '/Root' iam token size: 0 E0000 00:00:1750799377.577554 723619 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-24T21:09:37.578454Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037911] server connected, pipe [8:7519626069146457397:2493], now have 1 active actors on pipe 2025-06-24T21:09:37.578455Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037907] server connected, pipe [8:7519626069146457396:2492], now have 1 active actors on pipe 2025-06-24T21:09:37.578864Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037907] server disconnected, pipe [8:7519626069146457396:2492] destroyed 2025-06-24T21:09:37.578890Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037911] server disconnected, pipe [8:7519626069146457397:2493] destroyed 2025-06-24T21:09:37.579098Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [fab2801b-f0b42f0e-bbc84068-18d37bad] reply ok 2025-06-24T21:09:37.579267Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:47354) <- (200 , 449 bytes) 2025-06-24T21:09:37.579369Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:47354) connection closed Http output full {"NextToken":"CKrJmZ76MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 200 {"NextToken":"CKrJmZ76MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} >> KqpNamedExpressions::NamedExpressionChanged-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged+UseSink >> TMultiversionObjectMap::MonteCarlo [GOOD] >> TVersions::WreckHead [GOOD] >> TVersions::WreckHeadReverse >> DataStreams::TestStreamStorageRetention >> DataStreams::TestUpdateStorage >> DataStreams::TestControlPlaneAndMeteringData >> DataStreams::TestNonChargeableUser >> DataStreams::TestPutRecordsOfAnauthorizedUser >> DataStreams::TestReservedResourcesMetering >> DataStreams::TestUpdateStream >> DataStreams::TestGetShardIterator >> TPartBtreeIndexIteration::FewNodes_History [GOOD] >> TPartBtreeIndexIteration::FewNodes_Sticky >> DataStreams::TestDeleteStream |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TMultiversionObjectMap::MonteCarlo [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestListQueueTags [GOOD] Test command err: 2025-06-24T21:08:36.554717Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625807802354350:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:36.555125Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448d/r3tmp/tmpt5p2oX/pdisk_1.dat 2025-06-24T21:08:36.971655Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625807802354242:2079] 1750799316542180 != 1750799316542183 2025-06-24T21:08:36.989860Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:36.998098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:36.998199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:37.000270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1554, node 1 2025-06-24T21:08:37.211505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:37.211536Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:37.211545Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:37.211681Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:37.554877Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14188 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:37.731952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:37.755470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:14188 2025-06-24T21:08:37.982315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:08:37.989384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:08:38.000293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-24T21:08:38.011259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T21:08:38.021267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.183561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.263474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-24T21:08:38.269273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.307240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-24T21:08:38.311540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.382003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.418651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.452434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.487331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.522507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.564816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:39.709672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625820687257515:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.710175Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625820687257510:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.710280Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.715939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:39.730485Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625820687257524:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-24T21:08:39.812526Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625820687257575:2865] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:40.591944Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyhwb0mman2ys4g7ty ... 203\005@\203\001H?\322\030Invoke\000\003?\326\014Equals\003?\330\000\t\211\004?\322\207\203\001H?\322 Coalesce\000\t\211\004?\342\207\205\004\207\203\001H?\342\026\032\203\004\030Member\000\t\211\n?\354\203\005\004\200\205\004\203\004\203\004\026\032\213\004\203\001H\203\001H\203\004\036\000\003?\362 \000\001\205\000\000\000\000\001\003\000\000\000\000\000\000\000?\352\005?\370\003?\364\004\003?\366 \003\013?\376\t\351\000?\372\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?%\002\003?)\002\022USER_NAME\003\022\000\003?\374(000000000000000301v0\002\003?\001\002\000\037\003?\356\002\002\003?\322\004{}\002\003\003?\302\004{}?a\002\002\002\001\000/" } Params { Bin: "\037\000\005\205\010\203\001H\203\010\203\010\203\001H\020NAME> TestKinesisHttpProxy::GoodRequestCreateStream [GOOD] Test command err: 2025-06-24T21:08:36.550661Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625806594614222:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:36.551363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004491/r3tmp/tmpKUdyuA/pdisk_1.dat 2025-06-24T21:08:36.971467Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625806594614121:2079] 1750799316542185 != 1750799316542188 2025-06-24T21:08:37.011201Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18786, node 1 2025-06-24T21:08:37.015420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:37.015542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:37.018081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:08:37.211430Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:37.211465Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:37.211499Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:37.211629Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:08:37.559325Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4071 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:37.765635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:4071 2025-06-24T21:08:38.033163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:08:38.040423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:08:38.042556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-24T21:08:38.054721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-24T21:08:38.060290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.192583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.250980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:08:38.293468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:08:38.298314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.343383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:08:38.374569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.410804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.443041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.476747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:38.542867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:39.709698Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625819479517389:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.709800Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.710008Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625819479517401:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:08:39.716830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:08:39.730091Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625819479517403:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-24T21:08:39.822688Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625819479517454:2865] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:08:40.591404Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyhwb0msbj1tyqqadfvm9q2k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTQ5NzkzZS1kNjFjYzY3Ni01M2NmZmQ1OS03MDExMmVhZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:08:40.683866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, sub ... impl.cpp:2885: [PQ: 72075186224037909] server connected, pipe [8:7519626074467022111:2484], now have 1 active actors on pipe 2025-06-24T21:09:38.997022Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037911] server connected, pipe [8:7519626074467022113:2486], now have 1 active actors on pipe 2025-06-24T21:09:38.997065Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037910] server connected, pipe [8:7519626074467022112:2485], now have 1 active actors on pipe 2025-06-24T21:09:38.997981Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037907] server disconnected, pipe [8:7519626074467022109:2482] destroyed 2025-06-24T21:09:38.998013Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037909] server disconnected, pipe [8:7519626074467022111:2484] destroyed 2025-06-24T21:09:38.998036Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037910] server disconnected, pipe [8:7519626074467022112:2485] destroyed 2025-06-24T21:09:38.998060Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037908] server disconnected, pipe [8:7519626074467022110:2483] destroyed 2025-06-24T21:09:38.998065Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037911] server disconnected, pipe [8:7519626074467022113:2486] destroyed 2025-06-24T21:09:38.999084Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStream] requestId [bd6a19ae-4242d287-db3d73e5-890d6515] reply ok 2025-06-24T21:09:38.999382Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:34988) <- (200 , 1672 bytes) 2025-06-24T21:09:38.999510Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:34988) connection closed Http output full {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1750799379,"StorageLimitMb":0,"StreamName":"testtopic"}} 200 {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1750799379,"StorageLimitMb":0,"StreamName":"testtopic"}} 2025-06-24T21:09:39.000940Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:34998) incoming connection opened 2025-06-24T21:09:39.001018Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:34998) -> (POST /Root, 30 bytes) 2025-06-24T21:09:39.001166Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [78b9:1500:6050:0:60b9:1500:6050:0] request [DescribeStreamSummary] url [/Root] database [/Root] requestId: e314a9c2-79991363-95f47be-45f45d94 2025-06-24T21:09:39.001607Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [DescribeStreamSummary] requestId [e314a9c2-79991363-95f47be-45f45d94] got new request from [78b9:1500:6050:0:60b9:1500:6050:0] database '/Root' stream 'testtopic' 2025-06-24T21:09:39.002156Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [DescribeStreamSummary] requestId [e314a9c2-79991363-95f47be-45f45d94] [auth] Authorized successfully 2025-06-24T21:09:39.002273Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [DescribeStreamSummary] requestId [e314a9c2-79991363-95f47be-45f45d94] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-24T21:09:39.003398Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStreamSummary] requestId [e314a9c2-79991363-95f47be-45f45d94] reply ok 2025-06-24T21:09:39.003590Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:34998) <- (200 , 239 bytes) 2025-06-24T21:09:39.003716Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:34998) connection closed Http output full {"StreamDescriptionSummary":{"RetentionPeriodHours":24,"OpenShardCount":5,"StreamArn":"testtopic","ConsumerCount":0,"KeyId":"","StreamStatus":"ACTIVE","EncryptionType":"NONE","StreamCreationTimestamp":1750799.379,"StreamName":"testtopic"}} 200 {"StreamDescriptionSummary":{"RetentionPeriodHours":24,"OpenShardCount":5,"StreamArn":"testtopic","ConsumerCount":0,"KeyId":"","StreamStatus":"ACTIVE","EncryptionType":"NONE","StreamCreationTimestamp":1750799.379,"StreamName":"testtopic"}} 2025-06-24T21:09:39.004380Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:35012) incoming connection opened 2025-06-24T21:09:39.004459Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:35012) -> (POST /Root, 30 bytes) 2025-06-24T21:09:39.004583Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [b8cf:301:6050:0:a0cf:301:6050:0] request [DescribeStream] url [/Root] database [/Root] requestId: 7687d6f3-3a6c4a8d-b565b263-6f4abe77 2025-06-24T21:09:39.004882Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [DescribeStream] requestId [7687d6f3-3a6c4a8d-b565b263-6f4abe77] got new request from [b8cf:301:6050:0:a0cf:301:6050:0] database '/Root' stream 'testtopic' 2025-06-24T21:09:39.005240Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [DescribeStream] requestId [7687d6f3-3a6c4a8d-b565b263-6f4abe77] [auth] Authorized successfully 2025-06-24T21:09:39.005305Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [DescribeStream] requestId [7687d6f3-3a6c4a8d-b565b263-6f4abe77] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-24T21:09:39.006198Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037911] server connected, pipe [8:7519626078761989436:2498], now have 1 active actors on pipe 2025-06-24T21:09:39.006210Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037909] server connected, pipe [8:7519626078761989434:2496], now have 1 active actors on pipe 2025-06-24T21:09:39.006263Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037910] server connected, pipe [8:7519626078761989435:2497], now have 1 active actors on pipe 2025-06-24T21:09:39.006279Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037907] server connected, pipe [8:7519626078761989432:2494], now have 1 active actors on pipe 2025-06-24T21:09:39.006390Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037908] server connected, pipe [8:7519626078761989433:2495], now have 1 active actors on pipe 2025-06-24T21:09:39.007025Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037907] server disconnected, pipe [8:7519626078761989432:2494] destroyed 2025-06-24T21:09:39.007042Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037908] server disconnected, pipe [8:7519626078761989433:2495] destroyed 2025-06-24T21:09:39.007065Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037910] server disconnected, pipe [8:7519626078761989435:2497] destroyed 2025-06-24T21:09:39.007074Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037909] server disconnected, pipe [8:7519626078761989434:2496] destroyed 2025-06-24T21:09:39.007086Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037911] server disconnected, pipe [8:7519626078761989436:2498] destroyed 2025-06-24T21:09:39.007670Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStream] requestId [7687d6f3-3a6c4a8d-b565b263-6f4abe77] reply ok 2025-06-24T21:09:39.007880Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:35012) <- (200 , 1672 bytes) 2025-06-24T21:09:39.008008Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:35012) connection closed Http output full {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1750799379,"StorageLimitMb":0,"StreamName":"testtopic"}} >> KqpAgg::AggWithSelfLookup2 [GOOD] >> KqpAgg::AggWithHop >> ExternalBlobsMultipleChannels::WithNewColumnFamilyAndCompaction >> KqpNotNullColumns::CreateTableWithNotNullColumns [GOOD] >> TPartBtreeIndexIteration::FewNodes_Sticky [GOOD] >> TPartBtreeIndexIteration::FewNodes_Slices >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorks >> TKeyValueTest::TestBasicWriteRead >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOk >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOk >> KeyValueReadStorage::ReadOk [GOOD] >> KeyValueReadStorage::ReadNotWholeBlobOk >> KeyValueReadStorage::ReadError [GOOD] >> KeyValueReadStorage::ReadErrorWithWrongGroupId [GOOD] >> KeyValueReadStorage::ReadErrorWithUncorrectCookie [GOOD] >> TKeyValueTest::TestWriteTrimWithRestartsThenResponseOk |94.3%| [TA] $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KeyValueReadStorage::ReadNotWholeBlobOk [GOOD] >> KeyValueReadStorage::ReadOneItemError [GOOD] |94.3%| [TA] {RESULT} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadErrorWithUncorrectCookie [GOOD] Test command err: 2025-06-24T21:09:42.325822Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# ERROR ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-24T21:09:42.325965Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV316@keyvalue_storage_read_request.cpp:270} Unexpected EvGetResult. KeyValue# 1 Status# ERROR Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 1750799382320 ErrorReason# 2025-06-24T21:09:42.337102Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 2 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-24T21:09:42.337203Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV318@keyvalue_storage_read_request.cpp:240} Received EvGetResult from an unexpected storage group. KeyValue# 1 GroupId# 2 ExpecetedGroupId# 3 Status# OK Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 1750799382336 ErrorReason# 2025-06-24T21:09:42.344048Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-24T21:09:42.344121Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV319@keyvalue_storage_read_request.cpp:222} Received EvGetResult with an unexpected cookie. KeyValue# 1 Cookie# 1000 SentGets# 1 GroupId# 3 Status# OK Deadline# 18446744073709551 Now# 0 GotAt# 1750799382343 ErrorReason# ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadOneItemError [GOOD] Test command err: 2025-06-24T21:09:42.353427Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-24T21:09:42.356123Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-24T21:09:42.364132Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-24T21:09:42.364203Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-24T21:09:42.370820Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-24T21:09:42.370937Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV317@keyvalue_storage_read_request.cpp:310} Unexpected EvGetResult. KeyValue# 1 Status# OK Id# [1:2:3:2:0:1:0] ResponseStatus# ERROR Deadline# 586524-01-19T08:01:49.551615Z Now# 1970-01-01T00:00:00.000000Z SentAt# 1970-01-01T00:00:00.000000Z GotAt# 2025-06-24T21:09:42.370616Z ErrorReason# >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorks >> TKeyValueTest::TestCleanUpDataOnEmptyTablet >> KqpReturning::ReturningWorksIndexedUpsert+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedUpsert-QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::CreateTableWithNotNullColumns [GOOD] Test command err: Trying to start YDB, gRPC: 18922, MsgBus: 8303 2025-06-24T21:09:09.800490Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625949251497409:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:09.800625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ce/r3tmp/tmp3u8Ofj/pdisk_1.dat 2025-06-24T21:09:10.285043Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18922, node 1 2025-06-24T21:09:10.298076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:10.298209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:10.301270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:10.335764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:10.335796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:10.335820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:10.335930Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8303 TClient is connected to server localhost:8303 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:09:10.851235Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:10.924045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:13.061735Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625966431367195:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:13.061855Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:13.379960Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625966431367216:2304] txid# 281474976710658, issues: { message: "It is not allowed to create not null data column: Value" severity: 1 } 2025-06-24T21:09:13.404844Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625966431367224:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:13.404950Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:13.424020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 5372, MsgBus: 15760 2025-06-24T21:09:14.275929Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625969367811495:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:14.275995Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ce/r3tmp/tmpSoCyv6/pdisk_1.dat 2025-06-24T21:09:14.471321Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625969367811477:2079] 1750799354275447 != 1750799354275450 2025-06-24T21:09:14.480141Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:14.485253Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:14.485357Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:14.487641Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5372, node 2 2025-06-24T21:09:14.597435Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:14.597464Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:14.597472Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:14.597600Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15760 TClient is connected to server localhost:15760 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:15.186240Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:15.199967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:09:15.288532Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:17.607398Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625982252713999:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:17.607528Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:17.617844Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:17.696939Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625982252714102:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:17.697045Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:17.697387Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625982252714107:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:17.701906Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:17.713129Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519625982252714109:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: T ... s" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:28.983030Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:29.237489Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:31.808157Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519626042175658227:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:31.808257Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:31.822550Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519626042175658248:2301] txid# 281474976715658, issues: { message: "It is not allowed to create not null data column: Value" severity: 1 } 2025-06-24T21:09:31.839480Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519626042175658256:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:31.839593Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:31.870048Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 13121, MsgBus: 62714 2025-06-24T21:09:32.673832Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519626045287864137:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:32.673893Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ce/r3tmp/tmpRhjEsb/pdisk_1.dat 2025-06-24T21:09:32.785405Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519626045287864118:2079] 1750799372673399 != 1750799372673402 2025-06-24T21:09:32.819760Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13121, node 6 2025-06-24T21:09:32.851705Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:32.851812Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:32.854161Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:32.891107Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:32.891135Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:32.891162Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:32.891320Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62714 TClient is connected to server localhost:62714 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:33.498502Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:33.504994Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:09:33.682056Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:36.274771Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519626062467733938:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:36.274921Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:36.289225Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 28594, MsgBus: 61545 2025-06-24T21:09:37.151186Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626069788076519:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:37.151261Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ce/r3tmp/tmpR6c4xm/pdisk_1.dat 2025-06-24T21:09:37.274127Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:37.275685Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519626069788076500:2079] 1750799377150752 != 1750799377150755 2025-06-24T21:09:37.300394Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected TServer::EnableGrpc on GrpcPort 28594, node 7 2025-06-24T21:09:37.300486Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:37.301927Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:37.344158Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:37.344193Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:37.344206Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:37.344378Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61545 TClient is connected to server localhost:61545 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:37.919002Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:38.159824Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:40.866591Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TKeyValueTest::TestIncorrectRequestThenResponseError >> KqpNewEngine::JoinPure [GOOD] >> KqpNewEngine::JoinPureUncomparableKeys >> TKeyValueTest::TestIncorrectRequestThenResponseError [GOOD] >> TKeyValueTest::TestIncrementalKeySet >> TKeyValueTest::TestConcatWorks >> KeyValueReadStorage::ReadRangeOk1Key [GOOD] >> KeyValueReadStorage::ReadRangeOk [GOOD] >> KeyValueReadStorage::ReadRangeNoData [GOOD] >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOk >> DataStreams::TestGetShardIterator [GOOD] >> DataStreams::TestGetRecordsWithoutPermission >> DataStreams::TestDeleteStream [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlag >> DataStreams::TestStreamStorageRetention [GOOD] >> DataStreams::TestStreamPagination ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadRangeNoData [GOOD] Test command err: 2025-06-24T21:09:44.463191Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-24T21:09:44.465490Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-24T21:09:44.470929Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 2 ErrorReason# ReadRequestCookie# 0 2025-06-24T21:09:44.470984Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-24T21:09:44.475973Z 1 00h00m00.000000s :KEYVALUE INFO: {KV320@keyvalue_storage_read_request.cpp:122} Inline read request KeyValue# 1 Status# OK 2025-06-24T21:09:44.476016Z 1 00h00m00.000000s :KEYVALUE DEBUG: {KV322@keyvalue_storage_read_request.cpp:134} Expected OK or UNKNOWN and given OK readCount# 0 2025-06-24T21:09:44.476051Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 >> DataStreams::TestUpdateStorage [GOOD] >> DataStreams::TestStreamTimeRetention >> DataStreams::TestNonChargeableUser [GOOD] >> DataStreams::TestPutEmptyMessage >> DataStreams::TestPutRecordsOfAnauthorizedUser [GOOD] >> DataStreams::TestPutRecordsWithRead >> DataStreams::TestUpdateStream [GOOD] >> DataStreams::Test_AutoPartitioning_Describe >> TPartBtreeIndexIteration::FewNodes_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_Slices >> KqpNewEngine::StaleRO_Immediate [GOOD] >> KqpNewEngine::StaleRO_IndexFollowers+EnableFollowers >> KqpNewEngine::DecimalColumn [GOOD] >> KqpNewEngine::DecimalColumn35 >> DataStreams::TestControlPlaneAndMeteringData [GOOD] >> DataStreams::ChangeBetweenRetentionModes >> TKeyValueTest::TestIncrementalKeySet [GOOD] >> TKeyValueTest::TestGetStatusWorksNewApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::WriteSmall [FAIL] Test command err: assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:103, void TestOneWrite(TString, TVector &&): (env.WilsonUploader->Traces.size() == 1) failed: (2 != 1) TBackTrace::Capture()+28 (0x108004FC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10CBE310) TestOneWrite(TBasicString>, TVector>, std::__y1::allocator>>>&&)+4253 (0x104499BD) NTestSuiteTKeyValueTracingTest::TTestCaseWriteSmall::Execute_(NUnitTest::TTestContext&)+216 (0x104554F8) std::__y1::__function::__func, void ()>::operator()()+280 (0x104691F8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x10CEC436) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x10CC4E99) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x104680A4) NUnitTest::TTestFactory::Execute()+2438 (0x10CC6766) NUnitTest::RunMain(int, char**)+5213 (0x10CE69AD) ??+0 (0x7F391E46BD90) __libc_start_main+128 (0x7F391E46BE40) _start+41 (0xDD1A029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::WriteHuge [FAIL] Test command err: assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:103, void TestOneWrite(TString, TVector &&): (env.WilsonUploader->Traces.size() == 1) failed: (2 != 1) TBackTrace::Capture()+28 (0x108004FC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10CBE310) TestOneWrite(TBasicString>, TVector>, std::__y1::allocator>>>&&)+4253 (0x104499BD) NTestSuiteTKeyValueTracingTest::TTestCaseWriteHuge::Execute_(NUnitTest::TTestContext&)+216 (0x10455808) std::__y1::__function::__func, void ()>::operator()()+280 (0x104691F8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x10CEC436) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x10CC4E99) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x104680A4) NUnitTest::TTestFactory::Execute()+2438 (0x10CC6766) NUnitTest::RunMain(int, char**)+5213 (0x10CE69AD) ??+0 (0x7F5DECA7DD90) __libc_start_main+128 (0x7F5DECA7DE40) _start+41 (0xDD1A029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::ReadHuge [FAIL] Test command err: equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture()+28 (0x108004FC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10CBE310) TestOneRead(TBasicString>, TBasicString>)+4828 (0x1044F38C) NTestSuiteTKeyValueTracingTest::TTestCaseReadHuge::Execute_(NUnitTest::TTestContext&)+318 (0x10455F6E) std::__y1::__function::__func, void ()>::operator()()+280 (0x104691F8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x10CEC436) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x10CC4E99) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x104680A4) NUnitTest::TTestFactory::Execute()+2438 (0x10CC6766) NUnitTest::RunMain(int, char**)+5213 (0x10CE69AD) ??+0 (0x7F7483A09D90) __libc_start_main+128 (0x7F7483A09E40) _start+41 (0xDD1A029) >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOkNewApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::ReadSmall [FAIL] Test command err: equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture()+28 (0x108004FC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10CBE310) TestOneRead(TBasicString>, TBasicString>)+4828 (0x1044F38C) NTestSuiteTKeyValueTracingTest::TTestCaseReadSmall::Execute_(NUnitTest::TTestContext&)+318 (0x10455B7E) std::__y1::__function::__func, void ()>::operator()()+280 (0x104691F8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x10CEC436) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x10CC4E99) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x104680A4) NUnitTest::TTestFactory::Execute()+2438 (0x10CC6766) NUnitTest::RunMain(int, char**)+5213 (0x10CE69AD) ??+0 (0x7F01DE1FDD90) __libc_start_main+128 (0x7F01DE1FDE40) _start+41 (0xDD1A029) >> KqpNamedExpressions::NamedExpressionRandomChanged+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged-UseSink |94.4%| [TA] $(B)/ydb/core/keyvalue/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |94.4%| [TA] {RESULT} $(B)/ydb/core/keyvalue/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And [GOOD] >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And_In >> KqpSort::OffsetPk [GOOD] >> KqpSort::OffsetTopSort >> TKeyValueTest::TestWriteDeleteThenReadRemaining >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOk >> TKeyValueTest::TestCopyRangeWorks >> TPartBtreeIndexIteration::FewNodes_Groups_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_History_Slices >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOk >> KqpAgg::AggWithHop [GOOD] >> KqpAgg::GroupByLimit >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel4 [GOOD] >> KqpNewEngine::JoinPureUncomparableKeys [GOOD] >> KqpNewEngine::JoinWithPrecompute >> DataStreams::TestGetRecordsWithoutPermission [GOOD] >> DataStreams::TestGetRecordsWithCount [GOOD] >> DataStreams::TestInvalidRetentionCombinations >> DataStreams::TestDeleteStreamWithEnforceFlag [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlagFalse >> DataStreams::TestPutEmptyMessage [GOOD] >> DataStreams::TestListStreamConsumers >> DataStreams::TestPutRecordsWithRead [GOOD] >> DataStreams::TestPutRecordsCornerCases >> TPartBtreeIndexIteration::FewNodes_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices >> DataStreams::TestReservedResourcesMetering [GOOD] >> DataStreams::TestReservedStorageMetering >> TTxDataShardMiniKQL::CrossShard_1_Cycle [GOOD] >> TTxDataShardMiniKQL::CrossShard_2_SwapAndCopy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel4 [GOOD] Test command err: Trying to start YDB, gRPC: 16077, MsgBus: 15985 2025-06-24T21:07:48.348027Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625601767947600:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:48.348158Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045e1/r3tmp/tmpdmVwZA/pdisk_1.dat 2025-06-24T21:07:48.725018Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:07:48.725787Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625601767947584:2079] 1750799268347146 != 1750799268347149 2025-06-24T21:07:48.738538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:48.738661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:48.740772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16077, node 1 2025-06-24T21:07:48.817345Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:48.817371Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:48.817379Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:48.817521Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15985 TClient is connected to server localhost:15985 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:07:49.358967Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:49.378810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:49.409216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:49.568164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:07:49.717896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:49.777402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:51.421943Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625614652851131:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:51.422050Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:51.750290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.819245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.846238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.888113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.913460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:51.947247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:52.021083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:52.081333Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625618947819088:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:52.081419Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:52.081539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625618947819093:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:52.085484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:52.097207Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625618947819095:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:07:52.164983Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625618947819146:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:53.298295Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625623242786717:3598], Recipient [1:7519625601767947912:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:53.298343Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:53.298358Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:53.298402Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625623242786713:3595], Recipient [1:7519625601767947912:2146]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-24T21:07:53.298420Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvS ... .cpp:62: BuildStatsForCollector: datashardId 72075186224037922, followerId 0 2025-06-24T21:09:46.954075Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:35 with partCount# 0, rowCount# 12, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:46.954087Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037922 2025-06-24T21:09:46.954104Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 17 shard idx 72057594046644480:37 data size 1952 row count 12 2025-06-24T21:09:46.954136Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037924 maps to shardIdx: 72057594046644480:37 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 17], pathId map=TestTable, is column=0, is olap=0, RowCount 12, DataSize 1952 2025-06-24T21:09:46.954149Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037924, followerId 0 2025-06-24T21:09:46.954179Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:37 with partCount# 0, rowCount# 12, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:46.954192Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037924 2025-06-24T21:09:46.954243Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:46.955089Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625860468190324:2163]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:46.955121Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:46.955133Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:09:47.146173Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625860468190324:2163]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:47.146222Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:47.146294Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625860468190324:2163], Recipient [3:7519625860468190324:2163]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:47.146314Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:47.368886Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625894827931260:2503], Recipient [3:7519625860468190324:2163]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037927 TableLocalId: 20 Generation: 1 Round: 4 TableStats { DataSize: 710 RowCount: 30 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799383452 LastUpdateTime: 1750799338233 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 30 RowDeletes: 0 RowReads: 0 RangeReads: 22 PartCount: 1 RangeReadRows: 33 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 710 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 184 Memory: 119581 Storage: 818 } ShardState: 2 UserTablePartOwners: 72075186224037927 NodeId: 3 StartTime: 1750799337338 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:47.368941Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:47.368983Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037927 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] state 'Ready' dataSize 710 rowCount 30 cpuUsage 0.0184 2025-06-24T21:09:47.369101Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037927 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] raw table stats: DataSize: 710 RowCount: 30 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799383452 LastUpdateTime: 1750799338233 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 30 RowDeletes: 0 RowReads: 0 RangeReads: 22 PartCount: 1 RangeReadRows: 33 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 710 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:47.369133Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099996s, queue# 1 2025-06-24T21:09:47.429230Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625894827931263:2504], Recipient [3:7519625860468190324:2163]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037926 TableLocalId: 19 Generation: 1 Round: 4 TableStats { DataSize: 3239 RowCount: 65 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799383451 LastUpdateTime: 1750799338226 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 65 RowDeletes: 0 RowReads: 0 RangeReads: 77 PartCount: 1 RangeReadRows: 143 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 3239 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 516 Memory: 119590 Storage: 3356 } ShardState: 2 UserTablePartOwners: 72075186224037926 NodeId: 3 StartTime: 1750799337339 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:47.429279Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:47.429319Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037926 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 19] state 'Ready' dataSize 3239 rowCount 65 cpuUsage 0.0516 2025-06-24T21:09:47.429439Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037926 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 19] raw table stats: DataSize: 3239 RowCount: 65 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799383451 LastUpdateTime: 1750799338226 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 65 RowDeletes: 0 RowReads: 0 RangeReads: 77 PartCount: 1 RangeReadRows: 143 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 3239 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:47.469235Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625860468190324:2163]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:47.469286Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:47.469302Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 2 2025-06-24T21:09:47.469363Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 2 2025-06-24T21:09:47.469387Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 2 2025-06-24T21:09:47.469461Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 20 shard idx 72057594046644480:39 data size 710 row count 30 2025-06-24T21:09:47.469541Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037927 maps to shardIdx: 72057594046644480:39 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 20], pathId map=indexImplPostingTable, is column=0, is olap=0, RowCount 30, DataSize 710 2025-06-24T21:09:47.469569Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037927, followerId 0 2025-06-24T21:09:47.469643Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:39 with partCount# 1, rowCount# 30, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:47.469708Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037927 2025-06-24T21:09:47.469759Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 19 shard idx 72057594046644480:38 data size 3239 row count 65 2025-06-24T21:09:47.469806Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037926 maps to shardIdx: 72057594046644480:38 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 19], pathId map=indexImplLevelTable, is column=0, is olap=0, RowCount 65, DataSize 3239 2025-06-24T21:09:47.469828Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037926, followerId 0 2025-06-24T21:09:47.469869Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:38 with partCount# 1, rowCount# 65, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:47.469901Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037926 2025-06-24T21:09:47.469964Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:47.470098Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625860468190324:2163]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:47.470124Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:47.470135Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> DataStreams::Test_AutoPartitioning_Describe [GOOD] >> DataStreams::Test_Crreate_AutoPartitioning_Disabled >> DataStreams::ChangeBetweenRetentionModes [GOOD] >> DataStreams::TestCreateExistingStream >> DataStreams::TestStreamPagination [GOOD] >> DataStreams::TestShardPagination >> KqpReturning::ReturningWorksIndexedUpsert-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedReplace+QueryService >> TKeyValueTest::TestObtainLockNewApi >> TGroupMapperTest::ReassignGroupTest3dc [GOOD] >> TKeyValueTest::TestGetStatusWorksNewApi [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::ReassignGroupTest3dc [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix-Nullable+Covered [GOOD] >> TVersions::WreckHeadReverse [GOOD] >> TVersions::Wreck2 >> DataStreams::TestDeleteStreamWithEnforceFlagFalse [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestGetStatusWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:78:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:82:2057] recipient: [4:80:2111] Leader for TabletID 72057594037927937 is [4:83:2112] sender: [4:84:2057] recipient: [4:80:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:83:2112] Leader for TabletID 72057594037927937 is [4:83:2112] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvKeyValue::TEvGetStorageChannelStatus ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:78:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:81:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:80:2111] Leader for TabletID 72057594037927937 is [5:83:2112] sender: [5:84:2057] recipient: [5:80:2111] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:83:2112] Leader for TabletID 72057594037927937 is [5:83:2112] sender: [5:169:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:79:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:81:2111] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:84:2112] sender: [6:85:2057] recipient: [6:81:2111] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:84:2112] Leader for TabletID 72057594037927937 is [6:84:2112] sender: [6:170:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] >> DataStreams::TestInvalidRetentionCombinations [GOOD] >> ExternalBlobsMultipleChannels::WithNewColumnFamilyAndCompaction [GOOD] >> TKeyValueTest::TestBasicWriteRead [GOOD] >> TKeyValueTest::TestBasicWriteReadOverrun >> DataStreams::TestStreamTimeRetention [GOOD] >> DataStreams::TestUnsupported >> KqpNewEngine::DecimalColumn35 [GOOD] >> KqpNewEngine::DeleteByKey >> TGroupMapperTest::Block42_1disk [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged2+UseSink >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices_Sticky |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Block42_1disk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix-Nullable+Covered [GOOD] Test command err: Trying to start YDB, gRPC: 5067, MsgBus: 4284 2025-06-24T21:07:53.324290Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625622428657414:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:53.324339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045b8/r3tmp/tmpeIvlvQ/pdisk_1.dat 2025-06-24T21:07:53.660504Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5067, node 1 2025-06-24T21:07:53.741009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:07:53.743087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:07:53.751868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:07:53.787756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:07:53.787784Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:07:53.787793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:07:53.787941Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4284 TClient is connected to server localhost:4284 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:07:54.324637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:07:54.339439Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:07:54.343333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:54.491935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:54.630283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:54.689066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:07:56.488193Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625635313560913:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.488296Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:56.804975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.845222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.915292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:56.962409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:57.003176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:57.074576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:57.112608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:07:57.203810Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625639608528871:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:57.203913Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:57.204376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625639608528876:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:07:57.208764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:07:57.222468Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625639608528878:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:07:57.301705Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625639608528929:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:07:58.326380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625622428657414:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:07:58.326486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:07:58.440982Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519625643903496501:3599], Recipient [1:7519625622428657708:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:58.441022Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:07:58.441038Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:07:58.441076Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519625643903496497:3596], Recipient [1:75196256224 ... e# 2 2025-06-24T21:09:48.271871Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 2 2025-06-24T21:09:48.271887Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 2 2025-06-24T21:09:48.271936Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-24T21:09:48.271981Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:48.271992Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-24T21:09:48.272032Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:48.272066Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-24T21:09:48.272088Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:23 data size 0 row count 0 2025-06-24T21:09:48.272109Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037910 maps to shardIdx: 72057594046644480:23 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:09:48.272117Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037910, followerId 0 2025-06-24T21:09:48.272142Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:23 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:48.272150Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037910 2025-06-24T21:09:48.272185Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:48.272262Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:48.272278Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:48.272294Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:09:48.595742Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269553162, Sender [3:7519625987101127433:2500], Recipient [3:7519625957036353760:2139]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037925 TableLocalId: 20 Generation: 1 Round: 2 TableStats { DataSize: 1820 RowCount: 30 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799386646 LastUpdateTime: 1750799358938 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 30 RowDeletes: 0 RowReads: 0 RangeReads: 20 PartCount: 1 RangeReadRows: 70 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 1820 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 438 Memory: 119610 Storage: 1987 } ShardState: 2 UserTablePartOwners: 72075186224037925 NodeId: 3 StartTime: 1750799358513 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:09:48.595795Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4988: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-24T21:09:48.595831Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037925 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] state 'Ready' dataSize 1820 rowCount 30 cpuUsage 0.0438 2025-06-24T21:09:48.595967Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037925 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] raw table stats: DataSize: 1820 RowCount: 30 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750799386646 LastUpdateTime: 1750799358938 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 30 RowDeletes: 0 RowReads: 0 RangeReads: 20 PartCount: 1 RangeReadRows: 70 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 1820 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:09:48.595997Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099996s, queue# 1 2025-06-24T21:09:48.696248Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:48.696314Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:48.696329Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-24T21:09:48.696386Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-24T21:09:48.696417Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-24T21:09:48.696477Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 20 shard idx 72057594046644480:39 data size 1820 row count 30 2025-06-24T21:09:48.696546Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037925 maps to shardIdx: 72057594046644480:39 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 20], pathId map=indexImplPostingTable, is column=0, is olap=0, RowCount 30, DataSize 1820 2025-06-24T21:09:48.696569Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037925, followerId 0 2025-06-24T21:09:48.696631Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:39 with partCount# 1, rowCount# 30, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:09:48.696688Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037925 2025-06-24T21:09:48.696762Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:09:48.696861Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:48.696878Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:09:48.696899Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:09:48.844248Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:48.844297Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:48.844357Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625957036353760:2139], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:48.844377Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:49.844684Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:49.844735Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:49.844793Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625957036353760:2139], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:49.844811Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:50.845192Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:50.845243Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:50.845301Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625957036353760:2139], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:50.845320Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:51.845599Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:51.845662Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:09:51.845731Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [3:7519625957036353760:2139], Recipient [3:7519625957036353760:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:09:51.845750Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TKeyValueTest::TestWrite200KDeleteThenResponseError >> DataStreams::TestListStreamConsumers [GOOD] >> DataStreams::TestListShards1Shard ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestInvalidRetentionCombinations [GOOD] Test command err: 2025-06-24T21:09:40.893685Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626080786360346:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:40.895370Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002faa/r3tmp/tmpxnPYen/pdisk_1.dat 2025-06-24T21:09:41.342893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:41.342986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:41.360373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:41.384610Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11855, node 1 2025-06-24T21:09:41.556960Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:41.556980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:41.556986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:41.557090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:09:41.903204Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26532 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:42.082305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:42.197839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:26532 2025-06-24T21:09:42.399536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:42.407769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-24T21:09:44.978616Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626096355007818:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:44.978716Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002faa/r3tmp/tmpdSeJYj/pdisk_1.dat 2025-06-24T21:09:45.080337Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:45.099159Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:45.099248Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:45.104578Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22965, node 4 2025-06-24T21:09:45.147583Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:45.147609Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:45.147618Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:45.147732Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30109 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:45.436478Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:45.502862Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:30109 2025-06-24T21:09:45.722008Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:45.916385Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:09:45.949747Z node 4 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [4:7519626100649976298:2827], for# user2@builtin, access# DescribeSchema 2025-06-24T21:09:45.964246Z node 4 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [4:7519626100649976301:2828], for# user2@builtin, access# DescribeSchema 2025-06-24T21:09:45.975557Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:09:45.991690Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:49.518561Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626120710454823:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:49.518643Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002faa/r3tmp/tmpdntRuA/pdisk_1.dat 2025-06-24T21:09:49.619679Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:49.635526Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:49.635593Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:49.641612Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21576, node 7 2025-06-24T21:09:49.686685Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:49.686708Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:49.686715Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:49.686848Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26214 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:49.944917Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:50.001999Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:26214 2025-06-24T21:09:50.200432Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting...
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 10, code: 500080
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 144, storage 0, code: 500080
: Error: write_speed per second in partition must have values from set {131072,524288,1048576}, got 130048, code: 500080
: Error: write_speed per second in partition must have values from set {131072,524288,1048576}, got 1049600, code: 500080 2025-06-24T21:09:50.525342Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::WithNewColumnFamilyAndCompaction [GOOD] Test command err: 2025-06-24T21:09:44.666244Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:09:44.666555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:09:44.666694Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00459b/r3tmp/tmpqsnUyV/pdisk_1.dat 2025-06-24T21:09:45.117604Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:09:45.131277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:45.196941Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:45.216505Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799381899687 != 1750799381899691 2025-06-24T21:09:45.271623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:45.271783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:45.285068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:45.395506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:45.883558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 100:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:09:46.013796Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:46.203468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:769:2627], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:46.203675Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:780:2632], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:46.203784Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:46.214228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:46.369124Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:783:2635], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:09:46.457503Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:839:2672] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:47.408558Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwd1jk6fmsry6v6va8sbky, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTY0YjA0YjgtZjJhZjE2MzUtNWRkMmQ1NjYtOWIxODQ5Y2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:47.650856Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwd2yd6361ym8cbvsjga61, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTMyZmY2MmQtYThhNGRhNzctNjcyNmJiZGEtYWE0NTY2Mzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:47.697360Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwd307ea6qf343wkzvrc4g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2ZlMTliZjMtMjdkYzFiYjgtMWU4NGRlMjQtMTNiYTY1NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:47.748792Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwd31p2fnkfh8hsaqh6rbg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWJhNzQ3NWItMTg2Yzg5NTAtZGQ4NTk3MzAtYjRiNGU2NDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:47.800663Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhwd339e1vvf6vbcphnpjwq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTU4YTkwYTktYWE4NDUxYzYtZjg1ZjA0N2ItNmYwMWNjODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:47.851358Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhwd34y4br5sp6d99m43h7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTlkZmU0NDgtY2I3Yjg1NjUtZmFjMDhhZmMtZDEzYjExYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:47.903674Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhwd36g0a8yqf9e9aegdwh1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmE5ZjE0MDAtZDU1YjkwYTQtMThhYmIyZDEtZGJmNmExYWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:47.949762Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhwd3849e0y90cmx83453nf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODllNjUzMTUtY2M5MTE2MzUtYzlmNmZjOWMtNGI1MzQzZTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:47.997595Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyhwd39jan7jzek117an6geh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTRmMjcxOTgtYWQ5MzZkYzgtZGU1ZjQxYTEtMThmNDM2Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.045895Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jyhwd3b22mbr6930s59zskwp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjQ4ZmRjZC1lOTAxYWIxZi0xZWE0YzNlZi02ZTUyMmQyYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.090663Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyhwd3ck0z22h9v2hj1nr12d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTc2YmI2ZGUtM2ZjMzczOTYtZTU3OTVkMzYtNTJmNjViOTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.139233Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyhwd3dzf9w1t05ezj5gsqxe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxN2I0NTgtOTViZDcyZGEtNzU2MTNhNDgtYjcyMTAxMDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.198423Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jyhwd3fgbh2nh3y28rqg3wqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjgxYzg3NTYtNWVjNjVmMDctOTU0ZjQzMTEtYTZlNjczNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.251545Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jyhwd3hc2g744g8rmqnmt4tm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTAxMTBkOTYtYmM4YzY1NWEtMzk0MjA2Ni00NmMwOGMxYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.292952Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jyhwd3k0fenpf7t433a33p33, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWI0YjY0ZjItZWQzMGQ4ZDgtZjQyYWY5NTMtM2U1NDM5NjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.394625Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyhwd3nxfya43bk6kzf0rt21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTk2NWQ4NDUtMzJlNjAyNzUtYmMzZDFjYTYtYmQxMDhmZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.453252Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jyhwd3qgagvxvxx19g9dm7sc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDQzMGJjNmItOWE5ZmNjNi0xMjI4NGExYS05ZTE1YmI3Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.500795Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jyhwd3saexmp7a94and02svr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzZlYTQ1MWMtNDNkNWYxN2YtYzc2NGQ1YWMtZWRjMzFiMjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.543810Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jyhwd3tsdwywfapcx1hhgbs6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjgyYTYzZjUtZmRkMTY5MWQtM2VlNWRjMzEtYjhhMDg5MjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:48.586854Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jyhwd3w44g1wtcr ... : kqp_planner.cpp:120: TxId: 281474976715727. Ctx: { TraceId: 01jyhwd65h6zsqc72h233khr62, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjAwOTYxMTctOGU4NzgxNy04MDQxODNlYy04M2IyN2M1Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.003963Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715728. Ctx: { TraceId: 01jyhwd677f1xa32xhkcmjfbxm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmUyZDY3ZTgtYzMwZGQwYzgtODgwM2Y5ZWQtYjY2MDZkYzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.064144Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jyhwd6924n42y5ren80at8k5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWIwMGVhMWUtMWU0NTczMjQtNDM1ZTk0MTgtMjc2YjFiODU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.156830Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jyhwd6azesnyzgepbdffrq6w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzNkOGQ1MjgtNzMwMDA1NzctNGEwNmViZDEtMjQyM2MwNTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.204124Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jyhwd6dv7vsexh6rc56d13pr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTc4YmFlMzEtZDI0NzFmOWMtODhhMjBlYWItZDRmOGFlZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.250327Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jyhwd6f99t0g2q5qz6v382kh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjUwOTBiNzAtMmFkNzMzMWYtYzIwNGU4MmQtM2QxMWQ4NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.302935Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jyhwd6grarae9wkk66s6epr6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjRkZWZjNDMtNWY1NTE1NGYtODYyYTVhMzYtOWM5NzE5NWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.358651Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jyhwd6jd1w6j7vav7ebqyzj5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2Q2ODZhZTktODEyNWEyOTktMzk2ZDA3NTktZTdiMjRjNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.417784Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jyhwd6m576hyn2yn2k346zfa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjgyMTQxMjgtOGYxYjcxYzItZTg3ZTZiOWQtOWMyZWQ4NzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.475477Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jyhwd6p0d6s03mhyb9pjw3jj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWMwOGQ1MDctZDkwNzc5Y2MtYjhjZjE3LTg1YjQyNWZl, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.529762Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jyhwd6qte7k7he7d5r24f6rc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODUzMGI2ZmYtNDQ5MzlmMWYtNjVjOTFlYjEtYjdhODBjMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.579616Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jyhwd6sgbmty6cs1bh78216b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDliNmQ2OTMtZGNlNDQ0Y2UtMTZkNjA3NWMtN2ZlMzRmYzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.628114Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jyhwd6v067any341e09t3mxk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGI4Y2EyNmUtYjNjYTE4OC1mN2Q3ZmY5OS0xNmIyYmVjYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.701860Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jyhwd6whe1fetxxgkhqty15e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWYxZTdiMGQtNTIzMGRhZDYtOWQwYTgwZDMtZjM4NzIwMDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.748334Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jyhwd6yxb1k66zqkx2tz9sax, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTQ5MzFjZTItZDc0Yjc0YWUtOGYxNDc2ODgtN2RjZGQxYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.789826Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jyhwd709bprrk9awhz1dgsym, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGJiZDZkYWEtNjZmY2UzYWEtODc1YjY5ZTgtMjgwZjkyYjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.836656Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jyhwd71jadd9vcbw7d3q8102, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQ5ZmRhMTUtNDBjZDhhMjItMjA5MjI0MjUtMjZiMGI3OWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.880085Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jyhwd7316sxcnmae56cr7rek, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTNlN2Q5ZWQtNjRmZDM5Y2QtNzliOTViMjYtZTczOWM0ZWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.937899Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jyhwd74dc4jchw4mrsc5d6v3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODMzZWRhOTEtYzBlOWUyYTMtMjk4MDFmNDQtZThiYjFiOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:51.997521Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jyhwd7688eyhaac58yqnmxjd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjkzYTlkNDMtZDM1NGEyZTYtZjAzNDY4NGYtNTZmMDFhZTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.049982Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jyhwd7836qshsrkcgq60r2n9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTRjYzUxMGEtMWU2NTIyZGMtMzQ0MDNjZTUtYjkzMzY5MmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.103899Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jyhwd79s5f5yn3efa0g8j8g1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjU0MjA5MTQtZjRlODRjMTAtNjQ5NmEyODktNmMwZjM5Mzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.164171Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jyhwd7bearpxdbextawnst05, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmFkZDM3ZjAtMjI0NzUzNmItYWRjNGVlNjctYjk3ZTk5NmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.211004Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jyhwd7daa0b7xehqj4ymvcgn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGZkMTg4YjQtM2NiNjg0NmMtM2FjNzEzNDMtYTI0MGI0OWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.268375Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jyhwd7eqfpsmyc10eayh7j2q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTVkZjNlNWYtM2ZlNGY1YTYtZGVlMTA0My03NTE3Zjcy, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.323070Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jyhwd7ghakc84rgdxpmn33gb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjgzNzM5N2MtOTU1ZjU3ODMtZmNjODE5ZmYtM2Q4Y2Q2ZGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.368869Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jyhwd7j97dx75ndyknjemeb8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjZjMzM5NmQtNDhkNmE5NzgtZTc4ZmNhMjUtOWU1YTMzYzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.416048Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jyhwd7kpae0w4q9n1mm30g05, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjA5YWE2MjMtODdhZDQ1NzAtNDU5MzhlY2MtMTY0YzZkZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.467518Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jyhwd7n5astz6b2b82p00725, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzE4M2IzNmMtYTExZTcxZDMtODY5M2FmYTUtNTRhODE2MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.516932Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jyhwd7psfty4xagq3anz2y5m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWZkZjFiNzQtZmU4NTQyZjQtNjViNzBlYzgtNDg5NGQyNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.561560Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jyhwd7r987kaxhrgq3xg9vt9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGNhYjVmOTYtMWYzN2ZiYjgtMTYwNWJjMjYtZTM2YzUyMDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.612394Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jyhwd7sq818066tz2tq2xmgr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjY4MWRhMzQtZWQyYmMyMGUtNjNiYzA2LTFlM2M4NGQx, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:52.658715Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jyhwd7v985mt1j2gpbyv91xz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzYzYTYxYzgtNDRhZjgyZTEtMzc5MmMwMjQtZWQzMWYzOWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:09:53.127627Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jyhwd814cdb3n8fcepzgkdq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGFlZmNiMzQtZWI5OWM5Ny1lNzI5NmJlNC01N2EwYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> KqpSort::OffsetTopSort [GOOD] >> TKeyValueTest::TestWriteDeleteThenReadRemaining [GOOD] >> TKeyValueTest::TestWriteAndRenameWithCreationUnixTime >> DataStreams::Test_Crreate_AutoPartitioning_Disabled [GOOD] >> TKeyValueTest::TestWriteReadPatchRead >> TKeyValueTest::TestWriteAndRenameWithCreationUnixTime [GOOD] >> DataStreams::TestCreateExistingStream [GOOD] >> DataStreams::ListStreamsValidation >> TKeyValueTest::TestWriteReadPatchRead [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi >> DataStreams::TestShardPagination [GOOD] >> TKeyValueTest::TestWriteTrimWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteAndRenameWithCreationUnixTime [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:451:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:453:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:455:2057] recipient: [2:454:2378] Leader for TabletID 72057594037927937 is [2:456:2379] sender: [2:457:2057] recipient: [2:454:2378] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:456:2379] Leader for TabletID 72057594037927937 is [2:456:2379] sender: [2:542:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:451:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:454:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:455:2057] recipient: [3:453:2378] Leader for TabletID 72057594037927937 is [3:456:2379] sender: [3:457:2057] recipient: [3:453:2378] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:456:2379] Leader for TabletID 72057594037927937 is [3:456:2379] sender: [3:542:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:452:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:455:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:456:2057] recipient: [4:454:2378] Leader for TabletID 72057594037927937 is [4:457:2379] sender: [4:458:2057] recipient: [4:454:2378] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:457:2379] Leader for TabletID 72057594037927937 is [4:457:2379] sender: [4:543:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::Test_Crreate_AutoPartitioning_Disabled [GOOD] Test command err: 2025-06-24T21:09:40.860679Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626080507644031:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:40.860787Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003033/r3tmp/tmphEEeiO/pdisk_1.dat 2025-06-24T21:09:41.352130Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:41.352256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:41.362653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:41.395359Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1593, node 1 2025-06-24T21:09:41.563551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:41.563574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:41.563581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:41.563698Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:09:41.886884Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13182 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:42.079746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:42.176004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:13182 2025-06-24T21:09:42.354398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:42.767488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:43.025403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:45.571645Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626102814299994:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:45.571740Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003033/r3tmp/tmpWdXfXa/pdisk_1.dat 2025-06-24T21:09:45.771996Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:45.791718Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:45.791844Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:45.802580Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11896, node 4 2025-06-24T21:09:45.927737Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:45.927766Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:45.927792Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:45.927978Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31737 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:46.245378Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:46.315442Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:31737 2025-06-24T21:09:46.542107Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:46.585998Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "8" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000001" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } ALTER_SCHEME: { Name: "test-topic" Split { Partition: 1 SplitBoundary: "a" } } 2025-06-24T21:09:47.797177Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 107:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:48.995882Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:49.083605Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:49.181413Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:49.384621Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:50.827723Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626125366713703:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:50.827937Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003033/r3tmp/tmpuJO03B/pdisk_1.dat 2025-06-24T21:09:51.007429Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:51.040899Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:51.041013Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:51.049045Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26421, node 7 2025-06-24T21:09:51.128199Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:51.128233Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:51.128247Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:51.128429Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5406 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:51.442026Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:51.507817Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:5406 2025-06-24T21:09:51.749153Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:51.841097Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TKeyValueTest::TestRewriteThenLastValue >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEvents >> TKeyValueTest::TestRenameWorks ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSort::OffsetTopSort [GOOD] Test command err: Trying to start YDB, gRPC: 16812, MsgBus: 9314 2025-06-24T21:09:05.758233Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625931696420195:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:05.758308Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031d0/r3tmp/tmpeXVXnf/pdisk_1.dat 2025-06-24T21:09:06.139364Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625931696420079:2079] 1750799345722949 != 1750799345722952 2025-06-24T21:09:06.144279Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:06.144940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:06.145056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:06.150969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16812, node 1 2025-06-24T21:09:06.294960Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:06.295023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:06.295039Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:06.295181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9314 2025-06-24T21:09:06.770026Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9314 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:07.017349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:07.047485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:09:07.069728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:07.242271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:07.450495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:07.539321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:09.360048Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625948876290892:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:09.360172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:09.707252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:09.749182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:09.785884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:09.816612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:09.858660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:09.895839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:09.935362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:10.022692Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625953171258852:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:10.022833Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:10.022922Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625953171258857:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:10.026868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:10.043830Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625953171258859:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:09:10.135397Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625953171258910:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:10.734916Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625931696420195:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:10.735004Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 13423, MsgBus: 28582 2025-06-24T21:09:12.441393Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625960892499765:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:12.443609Z node 2 :METADATA_PROVIDER ERROR: log.cpp:7 ... 644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:44.722494Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519626075105655598:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:44.722568Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 25508, MsgBus: 23051 2025-06-24T21:09:47.476281Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626111738061901:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:47.476386Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031d0/r3tmp/tmptK9JPi/pdisk_1.dat 2025-06-24T21:09:47.621811Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:47.623692Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519626111738061882:2079] 1750799387474908 != 1750799387474911 2025-06-24T21:09:47.641469Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:47.641586Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:47.643126Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25508, node 7 2025-06-24T21:09:47.698267Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:47.698302Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:47.698313Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:47.698462Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23051 TClient is connected to server localhost:23051 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:48.339124Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:48.356703Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:48.443404Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:48.563756Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:48.628710Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:48.706838Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:51.637771Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626128917932719:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:51.637930Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:51.707980Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:51.746816Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:51.791783Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:51.882083Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:51.917199Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:51.957583Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:52.028500Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:52.096796Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626133212900676:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:52.096889Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:52.096954Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626133212900681:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:52.101507Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:52.112409Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626133212900683:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:09:52.168510Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626133212900734:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:52.476228Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626111738061901:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:52.476330Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And_In [GOOD] >> KqpSqlIn::SecondaryIndex_TupleParameter >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOkNewApi |94.4%| [TA] $(B)/ydb/core/kqp/ut/indexes/test-results/unittest/{meta.json ... results_accumulator.log} >> TTxDataShardMiniKQL::CrossShard_5_AllToAll [GOOD] >> TTxDataShardMiniKQL::CrossShard_6_Local >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEvents [GOOD] >> TKeyValueTest::TestWriteLongKey >> DataStreams::TestPutRecordsCornerCases [GOOD] >> DataStreams::TestPutRecords ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestShardPagination [GOOD] Test command err: 2025-06-24T21:09:40.844866Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626082166593737:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:40.845209Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fb1/r3tmp/tmp6H9EjQ/pdisk_1.dat 2025-06-24T21:09:41.379727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:41.379860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:41.392064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:41.410631Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15455, node 1 2025-06-24T21:09:41.560297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:41.560319Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:41.560326Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:41.560447Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:09:41.863668Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18423 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:42.074855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:42.177598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:18423 2025-06-24T21:09:42.400716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting...
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 40960, code: 500080 2025-06-24T21:09:42.811336Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626090756530440:3441] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/stream_TestStreamStorageRetention\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:45.236226Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626100775227178:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:45.236348Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fb1/r3tmp/tmp84qfwM/pdisk_1.dat 2025-06-24T21:09:45.401378Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:45.420280Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:45.420358Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:45.423876Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18922, node 4 2025-06-24T21:09:45.503963Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:45.503988Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:45.503998Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:45.504119Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2055 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:45.782351Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:45.878756Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:2055 2025-06-24T21:09:46.080910Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:46.257554Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:51.276402Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626128336113512:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:51.276466Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fb1/r3tmp/tmpAbWc3I/pdisk_1.dat 2025-06-24T21:09:51.453698Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:51.483930Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:51.484059Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:51.493190Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9619, node 7 2025-06-24T21:09:51.551070Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:51.551101Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:51.551111Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:51.551294Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27310 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:51.859330Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:51.917044Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:27310 2025-06-24T21:09:52.137571Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:52.285903Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |94.4%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/indexes/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNewEngine::JoinWithPrecompute [GOOD] >> KqpNewEngine::JoinProjectMulti >> TExecutorDb::RandomOps [GOOD] >> TExecutorDb::FullScan >> TKeyValueCollectorTest::TestKeyValueCollectorEmpty >> TKeyValueTest::TestRewriteThenLastValueNewApi >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOkNewApi >> TKeyValueCollectorTest::TestKeyValueCollectorEmpty [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorMany >> DataStreams::TestUnsupported [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorMany [GOOD] >> KeyValueReadStorage::ReadWithTwoPartsOk [GOOD] >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOk >> TTxDataShardMiniKQL::TableStatsHistograms [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadWithTwoPartsOk [GOOD] Test command err: 2025-06-24T21:09:57.637763Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 2 ErrorReason# ReadRequestCookie# 0 2025-06-24T21:09:57.640528Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestUnsupported [GOOD] Test command err: 2025-06-24T21:09:40.887035Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626079820775396:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:40.887101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fe9/r3tmp/tmpONHPp6/pdisk_1.dat 2025-06-24T21:09:41.363026Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:41.366444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:41.366544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:41.374933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7485, node 1 2025-06-24T21:09:41.556981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:41.557001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:41.557007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:41.557099Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:09:41.922951Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24126 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:42.067992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:42.175854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:24126 2025-06-24T21:09:42.379020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:42.728707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:45.497961Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626103668751548:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:45.498168Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fe9/r3tmp/tmpMs64EL/pdisk_1.dat 2025-06-24T21:09:45.706086Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12743, node 4 2025-06-24T21:09:45.826705Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:45.827975Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:45.837173Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:45.837220Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:45.837230Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:45.837409Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:09:45.851065Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8012 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:46.174098Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:46.236383Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:8012 2025-06-24T21:09:46.448636Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:46.529855Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:46.629876Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:46.695928Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "30" shard_id: "shard-000000" } records { sequence_number: "31" shard_id: "shard-000000 ... r: "81" shard_id: "shard-000000" } records { sequence_number: "82" shard_id: "shard-000000" } records { sequence_number: "83" shard_id: "shard-000000" } records { sequence_number: "84" shard_id: "shard-000000" } records { sequence_number: "85" shard_id: "shard-000000" } records { sequence_number: "86" shard_id: "shard-000000" } records { sequence_number: "87" shard_id: "shard-000000" } records { sequence_number: "88" shard_id: "shard-000000" } records { sequence_number: "89" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "90" shard_id: "shard-000000" } records { sequence_number: "91" shard_id: "shard-000000" } records { sequence_number: "92" shard_id: "shard-000000" } records { sequence_number: "93" shard_id: "shard-000000" } records { sequence_number: "94" shard_id: "shard-000000" } records { sequence_number: "95" shard_id: "shard-000000" } records { sequence_number: "96" shard_id: "shard-000000" } records { sequence_number: "97" shard_id: "shard-000000" } records { sequence_number: "98" shard_id: "shard-000000" } records { sequence_number: "99" shard_id: "shard-000000" } records { sequence_number: "100" shard_id: "shard-000000" } records { sequence_number: "101" shard_id: "shard-000000" } records { sequence_number: "102" shard_id: "shard-000000" } records { sequence_number: "103" shard_id: "shard-000000" } records { sequence_number: "104" shard_id: "shard-000000" } records { sequence_number: "105" shard_id: "shard-000000" } records { sequence_number: "106" shard_id: "shard-000000" } records { sequence_number: "107" shard_id: "shard-000000" } records { sequence_number: "108" shard_id: "shard-000000" } records { sequence_number: "109" shard_id: "shard-000000" } records { sequence_number: "110" shard_id: "shard-000000" } records { sequence_number: "111" shard_id: "shard-000000" } records { sequence_number: "112" shard_id: "shard-000000" } records { sequence_number: "113" shard_id: "shard-000000" } records { sequence_number: "114" shard_id: "shard-000000" } records { sequence_number: "115" shard_id: "shard-000000" } records { sequence_number: "116" shard_id: "shard-000000" } records { sequence_number: "117" shard_id: "shard-000000" } records { sequence_number: "118" shard_id: "shard-000000" } records { sequence_number: "119" shard_id: "shard-000000" } 2025-06-24T21:09:50.497893Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519626103668751548:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:50.497990Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; encryption_type: NONE records { sequence_number: "120" shard_id: "shard-000000" } records { sequence_number: "121" shard_id: "shard-000000" } records { sequence_number: "122" shard_id: "shard-000000" } records { sequence_number: "123" shard_id: "shard-000000" } records { sequence_number: "124" shard_id: "shard-000000" } records { sequence_number: "125" shard_id: "shard-000000" } records { sequence_number: "126" shard_id: "shard-000000" } records { sequence_number: "127" shard_id: "shard-000000" } records { sequence_number: "128" shard_id: "shard-000000" } records { sequence_number: "129" shard_id: "shard-000000" } records { sequence_number: "130" shard_id: "shard-000000" } records { sequence_number: "131" shard_id: "shard-000000" } records { sequence_number: "132" shard_id: "shard-000000" } records { sequence_number: "133" shard_id: "shard-000000" } records { sequence_number: "134" shard_id: "shard-000000" } records { sequence_number: "135" shard_id: "shard-000000" } records { sequence_number: "136" shard_id: "shard-000000" } records { sequence_number: "137" shard_id: "shard-000000" } records { sequence_number: "138" shard_id: "shard-000000" } records { sequence_number: "139" shard_id: "shard-000000" } records { sequence_number: "140" shard_id: "shard-000000" } records { sequence_number: "141" shard_id: "shard-000000" } records { sequence_number: "142" shard_id: "shard-000000" } records { sequence_number: "143" shard_id: "shard-000000" } records { sequence_number: "144" shard_id: "shard-000000" } records { sequence_number: "145" shard_id: "shard-000000" } records { sequence_number: "146" shard_id: "shard-000000" } records { sequence_number: "147" shard_id: "shard-000000" } records { sequence_number: "148" shard_id: "shard-000000" } records { sequence_number: "149" shard_id: "shard-000000" } Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750799386591-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1750799386,"finish":1750799386},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799386}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750799386667-3","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1750799386,"finish":1750799386},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799386}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750799386730-4","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799386,"finish":1750799387},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799387}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750799387764-5","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799387,"finish":1750799388},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799388}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750799388779-6","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799388,"finish":1750799389},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799389}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750799389792-7","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799389,"finish":1750799390},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799390}' 2025-06-24T21:09:54.053850Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626142649379894:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:54.053942Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fe9/r3tmp/tmpXXIUgC/pdisk_1.dat 2025-06-24T21:09:54.186563Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:54.216027Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:54.216117Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:54.219233Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27575, node 7 2025-06-24T21:09:54.253634Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:54.253658Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:54.253665Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:54.253808Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9790 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:54.478964Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:54.523356Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:9790 2025-06-24T21:09:54.729261Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices_Sticky [GOOD] >> TPartGroupBtreeIndexIter::NoNodes [GOOD] >> TPartGroupBtreeIndexIter::OneNode [GOOD] >> TPartGroupBtreeIndexIter::FewNodes >> DataStreams::TestReservedStorageMetering [GOOD] >> DataStreams::TestReservedConsumersMetering >> TPartGroupBtreeIndexIter::FewNodes [GOOD] >> TPartMulti::Basics [GOOD] >> TPartMulti::BasicsReverse [GOOD] >> TPartSlice::SimpleMerge [GOOD] >> TPartSlice::ComplexMerge [GOOD] >> TPartSlice::LongTailMerge [GOOD] >> TPartSlice::CutSingle [GOOD] >> TPartSlice::CutMulti [GOOD] >> TPartSlice::LookupBasics [GOOD] >> TPartSlice::LookupFull [GOOD] >> TPartSlice::EqualByRowId [GOOD] >> TPartSlice::ParallelCompactions [GOOD] >> DataStreams::TestListShards1Shard [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::TableStatsHistograms [GOOD] Test command err: 2025-06-24T21:09:32.076635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:32.076708Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:32.078786Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:32.089572Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:32.090082Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:32.090384Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:32.126571Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:32.133830Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:32.133996Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:32.135365Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:32.135434Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:32.135472Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:32.135790Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:32.135872Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:32.135926Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:32.201477Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:32.245294Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:32.245546Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:32.245672Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:32.245715Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:32.245755Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:32.245815Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:32.246004Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:32.246056Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:32.246371Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:32.246484Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:32.246646Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:32.246692Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:32.246751Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:32.246797Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:32.246837Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:32.246872Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:32.246930Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:32.247029Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:32.247076Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:32.247134Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:32.250307Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:32.250393Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:32.250511Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:32.250700Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:32.250750Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:32.250813Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:32.250862Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:32.250901Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:32.250937Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:32.250982Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:32.251347Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:32.251395Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:32.251431Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:32.251483Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:32.251536Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:32.251580Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:32.251626Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:32.251659Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:32.251685Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:32.263893Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:32.263987Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:32.264052Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:32.264096Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:32.264173Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:32.264729Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:32.264785Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:32.264882Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:32.265025Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:32.265064Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:32.265166Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:32.265202Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:32.265248Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:32.265277Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:32.269346Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:32.269424Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:32.269658Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:32.269699Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:32.269758Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:32.269799Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:32.269837Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:32.269886Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:32.269942Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... pp:1862: Execution status for [0:1002] at 9437184 is Executed 2025-06-24T21:09:57.586641Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:57.586663Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1002] at 9437184 to execution unit ExecuteDataTx 2025-06-24T21:09:57.586685Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1002] at 9437184 on unit ExecuteDataTx 2025-06-24T21:09:57.586720Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-24T21:09:57.587067Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:1002] at tablet 9437184 with status COMPLETE 2025-06-24T21:09:57.587120Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:1002] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 109, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:09:57.587189Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1002] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:57.587219Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit ExecuteDataTx 2025-06-24T21:09:57.587246Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1002] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:57.587271Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1002] at 9437184 on unit FinishPropose 2025-06-24T21:09:57.587302Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1002] at 9437184 is DelayComplete 2025-06-24T21:09:57.587326Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:57.587351Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1002] at 9437184 to execution unit CompletedOperations 2025-06-24T21:09:57.587375Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1002] at 9437184 on unit CompletedOperations 2025-06-24T21:09:57.587417Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1002] at 9437184 is Executed 2025-06-24T21:09:57.587437Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit CompletedOperations 2025-06-24T21:09:57.587461Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1002] at 9437184 has finished 2025-06-24T21:09:57.604255Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:57.604322Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1002] at 9437184 on unit FinishPropose 2025-06-24T21:09:57.604360Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1002 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-24T21:09:57.604440Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 .2025-06-24T21:09:57.609899Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [3:102:2135], Recipient [3:238:2229]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 102 RawX2: 12884904023 } 2025-06-24T21:09:57.609964Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-24T21:09:57.610944Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:4551:6465], Recipient [3:238:2229]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:57.610993Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:57.611039Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:4550:6464], serverId# [3:4551:6465], sessionId# [0:0:0] 2025-06-24T21:09:57.611756Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [3:102:2135], Recipient [3:238:2229]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 102 RawX2: 12884904023 } TxBody: "\032\265\002\037\000\005\205\n\205\000\205\004?\000\205\002\202\0041\034MyReads MyWrites\205\004?\000\206\202\024Reply\024Write?\000?\000 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\000\005?\004?\014\005?\002)\211\006\202\203\005\004\213\002\203\004\205\002\203\001H\01056$UpdateRow\000\003?\016 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000\013?\022\003?\020\235\017\001\005?\026\003?\024\322ImInShard111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111\001\007\002\000\003\005?\010?\014\006\002?\006?$\000\003?\014?\014\037/ \0018\000" TxId: 1003 ExecLevel: 0 Flags: 0 2025-06-24T21:09:57.611801Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:57.611893Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:57.612456Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit CheckDataTx 2025-06-24T21:09:57.612517Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is Executed 2025-06-24T21:09:57.612549Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit CheckDataTx 2025-06-24T21:09:57.612581Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-24T21:09:57.612610Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit BuildAndWaitDependencies 2025-06-24T21:09:57.612648Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-24T21:09:57.612701Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1003] at 9437184 2025-06-24T21:09:57.612731Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is Executed 2025-06-24T21:09:57.612751Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-24T21:09:57.612773Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit ExecuteDataTx 2025-06-24T21:09:57.612794Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit ExecuteDataTx 2025-06-24T21:09:57.612832Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-24T21:09:57.613160Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:1003] at tablet 9437184 with status COMPLETE 2025-06-24T21:09:57.613237Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:1003] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 109, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:09:57.613298Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:57.613325Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit ExecuteDataTx 2025-06-24T21:09:57.613355Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:57.613384Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit FinishPropose 2025-06-24T21:09:57.613415Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is DelayComplete 2025-06-24T21:09:57.613440Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:57.613465Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit CompletedOperations 2025-06-24T21:09:57.613492Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit CompletedOperations 2025-06-24T21:09:57.613535Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is Executed 2025-06-24T21:09:57.613556Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit CompletedOperations 2025-06-24T21:09:57.613582Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1003] at 9437184 has finished 2025-06-24T21:09:57.647940Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 9437184, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T21:09:57.647997Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 9437184, table# 1001, finished edge# 0, front# 0 2025-06-24T21:09:57.650821Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:57.650885Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1003] at 9437184 on unit FinishPropose 2025-06-24T21:09:57.650926Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1003 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 3 ms, status: COMPLETE 2025-06-24T21:09:57.651012Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:57.653817Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:235:2228], Recipient [3:238:2229]: NKikimr::TEvTablet::TEvFollowerGcApplied .2025-06-24T21:09:57.657017Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:4565:6478], Recipient [3:238:2229]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:57.657084Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:57.657136Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:4564:6477], serverId# [3:4565:6478], sessionId# [0:0:0] 2025-06-24T21:09:57.657861Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553160, Sender [3:4563:6476], Recipient [3:238:2229]: NKikimrTxDataShard.TEvGetTableStats TableId: 13 { InMemSize: 0 LastAccessTime: 1718 LastUpdateTime: 1718 } >> KqpReturning::ReturningWorksIndexedReplace+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedReplace-QueryService >> DataStreams::ListStreamsValidation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestListShards1Shard [GOOD] Test command err: 2025-06-24T21:09:40.853714Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626079789260693:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:40.854977Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fe7/r3tmp/tmp7QbDum/pdisk_1.dat 2025-06-24T21:09:41.349984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:41.350120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:41.359366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:41.394885Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65224, node 1 2025-06-24T21:09:41.558605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:41.558633Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:41.558641Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:41.558785Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:09:41.924073Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16562 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:42.114378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:42.263662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:16562 2025-06-24T21:09:42.439828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:42.456036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-24T21:09:42.839250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } 2025-06-24T21:09:42.933189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:43.022841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2025-06-24T21:09:43.037966Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T21:09:43.037996Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-24T21:09:43.038008Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1750799382677-1","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1750799382,"finish":1750799382},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799382}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1750799382677-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1750799382,"finish":1750799382},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799382}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037890-1750799382975-3","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":1,"unit":"second","start":1750799382,"finish":1750799383},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037890","source_wt":1750799383}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037890-1750799382975-4","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799382,"finish":1750799383},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037890","source_wt":1750799383}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1750799382971-5","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":1,"unit":"second","start":1750799382,"finish":1750799383},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799383}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1750799382971-6","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799382,"finish":1750799383},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799383}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1750799382677-1","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1750799382,"finish":1750799382},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224 ... ired fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799390.946581 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-24T21:09:50.962361Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:50.994485Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:51.022610Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) E0000 00:00:1750799391.079424 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799391.079603 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-24T21:09:51.088315Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) E0000 00:00:1750799391.149416 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799391.149552 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-24T21:09:51.157840Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) E0000 00:00:1750799391.224837 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799391.224947 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799391.239054 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799391.239179 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-24T21:09:51.280717Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2025-06-24T21:09:51.298182Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037892 not found 2025-06-24T21:09:51.298219Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-24T21:09:51.298232Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2025-06-24T21:09:51.298266Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037893 not found 2025-06-24T21:09:51.298283Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-06-24T21:09:51.298317Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-06-24T21:09:51.308769Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,5) wasn't found 2025-06-24T21:09:51.308838Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-24T21:09:51.308869Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-24T21:09:51.308915Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,6) wasn't found 2025-06-24T21:09:51.308953Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-24T21:09:51.308990Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found E0000 00:00:1750799391.317168 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799391.317304 731485 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-24T21:09:54.721653Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626142972611391:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:54.721746Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002fe7/r3tmp/tmpanocoh/pdisk_1.dat 2025-06-24T21:09:54.850718Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:54.866734Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:54.866829Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:54.873444Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6046, node 10 2025-06-24T21:09:54.926761Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:54.926783Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:54.926793Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:54.926929Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6070 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:55.169441Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:55.241369Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:6070 2025-06-24T21:09:55.447619Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... E0000 00:00:1750799395.614637 733443 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799395.622646 733443 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799395.629858 733443 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799395.636379 733443 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750799395.643025 733443 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-24T21:09:55.731742Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TPartSlice::ParallelCompactions [GOOD] Test command err: ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 346b 12r} data 755b + FlatIndex{4} Label{3 rev 3, 172b} 5 rec | Page Row Bytes (Uint32, String) | 0 0 86b {1, aaa} | 1 3 88b {1, b} | 2 6 86b {2, NULL} | 3 9 86b {2, ccx} | 3 11 86b {2, cxz} + BTreeIndex{PageId: 5 RowCount: 12 DataSize: 346 ErasedRowCount: 0} Label{13 rev 1, 208b} | PageId: 0 RowCount: 3 DataSize: 86 ErasedRowCount: 0 | > {1, b} | PageId: 1 RowCount: 6 DataSize: 174 ErasedRowCount: 0 | > {2, NULL} | PageId: 2 RowCount: 9 DataSize: 260 ErasedRowCount: 0 | > {2, ccx} | PageId: 3 RowCount: 12 DataSize: 346 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 346b 12r} data 777b + FlatIndex{4} Label{3 rev 3, 179b} 5 rec | Page Row Bytes (Uint32, String) | 0 0 86b {1, aaa} | 1 3 88b {1, baaaa} | 2 6 86b {2, aaa} | 3 9 86b {2, ccx} | 3 11 86b {2, cxz} + BTreeIndex{PageId: 5 RowCount: 12 DataSize: 346 ErasedRowCount: 0} Label{13 rev 1, 223b} | PageId: 0 RowCount: 3 DataSize: 86 ErasedRowCount: 0 | > {1, baaaa} | PageId: 1 RowCount: 6 DataSize: 174 ErasedRowCount: 0 | > {2, aaa} | PageId: 2 RowCount: 9 DataSize: 260 ErasedRowCount: 0 | > {2, ccx} | PageId: 3 RowCount: 12 DataSize: 346 ErasedRowCount: 0 ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1347b + FlatIndex{10} Label{3 rev 3, 362b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, ab} | 2 2 42b {1, ac} | 3 3 42b {1, b} | 4 4 42b {1, bb} | 5 5 42b {2, NULL} | 6 6 42b {2, ab} | 7 7 42b {2, ac} | 8 8 42b {2, b} | 9 9 42b {2, bb} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 536b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, ab} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, ac} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, b} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bb} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, NULL} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, ab} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, ac} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, b} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bb} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1381b + FlatIndex{10} Label{3 rev 3, 375b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, aba} | 2 2 42b {1, aca} | 3 3 42b {1, baa} | 4 4 42b {1, bba} | 5 5 42b {2, aaa} | 6 6 42b {2, aba} | 7 7 42b {2, aca} | 8 8 42b {2, baa} | 9 9 42b {2, bba} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 557b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, aba} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, aca} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, baa} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bba} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, aaa} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, aba} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, aca} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, baa} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bba} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= SLICES ======= { [0, 1), [1, 3), [3, 4), [4, 6), [6, 8), [8, 9), [9, 9] } ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1347b + FlatIndex{10} Label{3 rev 3, 362b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, ab} | 2 2 42b {1, ac} | 3 3 42b {1, b} | 4 4 42b {1, bb} | 5 5 42b {2, NULL} | 6 6 42b {2, ab} | 7 7 42b {2, ac} | 8 8 42b {2, b} | 9 9 42b {2, bb} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 536b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, ab} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, ac} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, b} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bb} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, NULL} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, ab} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, ac} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, b} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bb} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1381b + FlatIndex{10} Label{3 rev 3, 375b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, aba} | 2 2 42b {1, aca} | 3 3 42b {1, baa} | 4 4 42b {1, bba} | 5 5 42b {2, aaa} | 6 6 42b {2, aba} | 7 7 42b {2, aca} | 8 8 42b {2, baa} | 9 9 42b {2, bba} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 557b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, aba} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, aca} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, baa} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bba} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, aaa} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, aba} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, aca} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, baa} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bba} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 81b 2r} data 316b + FlatIndex{2} Label{3 rev 3, 107b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 41b {ccccccd} | 1 1 41b {ccccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 81 ErasedRowCount: 0} Label{13 rev 1, 109b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccccd} | PageId: 1 RowCount: 2 DataSize: 81 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 83b 2r} data 320b + FlatIndex{2} Label{3 rev 3, 109b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 43b {ccccccd} | 1 1 43b {ccccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 83 ErasedRowCount: 0} Label{13 rev 1, 109b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccccd} | PageId: 1 RowCount: 2 DataSize: 83 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 80b 2r} data 312b + FlatIndex{2} Label{3 rev 3, 105b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 40b {cccccd} | 1 1 40b {cccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 80 ErasedRowCount: 0} Label{13 rev 1, 108b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccccd} | PageId: 1 RowCount: 2 DataSize: 80 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 82b 2r} data 316b + FlatIndex{2} Label{3 rev 3, 107b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 42b {cccccd} | 1 1 42b {cccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 82 ErasedRowCount: 0} Label{13 rev 1, 108b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccccd} | PageId: 1 RowCount: 2 DataSize: 82 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 79b 2r} data 308b + FlatIndex{2} Label{3 rev 3, 103b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 39b {ccccd} | 1 1 39b {ccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 79 ErasedRowCount: 0} Label{13 rev 1, 107b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccd} | PageId: 1 RowCount: 2 DataSize: 79 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 81b 2r} data 312b + FlatIndex{2} Label{3 rev 3, 105b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 41b {ccccd} | 1 1 41b {ccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 81 ErasedRowCount: 0} Label{13 rev 1, 107b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccd} | PageId: 1 RowCount: 2 DataSize: 81 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 78b 2r} data 304b + FlatIndex{2} Label{3 rev 3, 101b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 38b {cccd} | 1 1 38b {cccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 78 ErasedRowCount: 0} Label{13 rev 1, 106b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccd} | PageId: 1 RowCount: 2 DataSize: 78 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 80b 2r} data 308b + FlatIndex{2} Label{3 rev 3, 103b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 40b {cccd} | 1 1 40b {cccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 80 ErasedRowCount: 0} Label{13 rev 1, 106b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccd} | PageId: 1 RowCount: 2 DataSize: 80 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 75b 2r} data 292b + FlatIndex{2} Label{3 rev 3, 95b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 35b {d} | 1 1 35b {d} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 75 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 75 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 77b 2r} data 296b + FlatIndex{2} Label{3 rev 3, 97b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 37b {d} | 1 1 37b {ddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 77 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 77 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 69b 2r} data 280b + FlatIndex{2} Label{3 rev 3, 89b} 3 rec | Page Row Bytes (String) | 0 0 34b {} | 1 1 35b {d} | 1 1 35b {d} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 69 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 34 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 69 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 71b 2r} data 284b + FlatIndex{2} Label{3 rev 3, 91b} 3 rec | Page Row Bytes (String) | 0 0 34b {} | 1 1 37b {d} | 1 1 37b {ddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 71 ErasedRowCount: 0} Label{13 rev 1, 10 ... owOp 1: {0, 8} {Set 2 Uint32 : 5}, {Set 3 Uint64 : 5}, {Set 4 String : xxxxxxxxxx_5} + Rows{3} Label{34 rev 1, 120b}, [6, +2)row | ERowOp 1: {0, 10} {Set 2 Uint32 : 6}, {Set 3 Uint64 : 6}, {Set 4 String : xxxxxxxxxx_6} | ERowOp 1: {1, 1} {Set 2 Uint32 : 7}, {Set 3 Uint64 : 7}, {Set 4 String : xxxxxxxxxx_7} + Rows{4} Label{44 rev 1, 120b}, [8, +2)row | ERowOp 1: {1, 3} {Set 2 Uint32 : 8}, {Set 3 Uint64 : 8}, {Set 4 String : xxxxxxxxxx_8} | ERowOp 1: {1, 4} {Set 2 Uint32 : 9}, {Set 3 Uint64 : 9}, {Set 4 String : xxxxxxxxxx_9} + Rows{5} Label{54 rev 1, 122b}, [10, +2)row | ERowOp 1: {1, 6} {Set 2 Uint32 : 10}, {Set 3 Uint64 : 10}, {Set 4 String : xxxxxxxxxx_10} | ERowOp 1: {1, 7} {Set 2 Uint32 : 11}, {Set 3 Uint64 : 11}, {Set 4 String : xxxxxxxxxx_11} + Rows{6} Label{64 rev 1, 122b}, [12, +2)row | ERowOp 1: {1, 8} {Set 2 Uint32 : 12}, {Set 3 Uint64 : 12}, {Set 4 String : xxxxxxxxxx_12} | ERowOp 1: {1, 10} {Set 2 Uint32 : 13}, {Set 3 Uint64 : 13}, {Set 4 String : xxxxxxxxxx_13} + Rows{7} Label{74 rev 1, 122b}, [14, +2)row | ERowOp 1: {2, 1} {Set 2 Uint32 : 14}, {Set 3 Uint64 : 14}, {Set 4 String : xxxxxxxxxx_14} | ERowOp 1: {2, 3} {Set 2 Uint32 : 15}, {Set 3 Uint64 : 15}, {Set 4 String : xxxxxxxxxx_15} + Rows{8} Label{84 rev 1, 122b}, [16, +2)row | ERowOp 1: {2, 4} {Set 2 Uint32 : 16}, {Set 3 Uint64 : 16}, {Set 4 String : xxxxxxxxxx_16} | ERowOp 1: {2, 6} {Set 2 Uint32 : 17}, {Set 3 Uint64 : 17}, {Set 4 String : xxxxxxxxxx_17} + Rows{9} Label{94 rev 1, 122b}, [18, +2)row | ERowOp 1: {2, 7} {Set 2 Uint32 : 18}, {Set 3 Uint64 : 18}, {Set 4 String : xxxxxxxxxx_18} | ERowOp 1: {2, 8} {Set 2 Uint32 : 19}, {Set 3 Uint64 : 19}, {Set 4 String : xxxxxxxxxx_19} + Rows{10} Label{104 rev 1, 122b}, [20, +2)row | ERowOp 1: {2, 10} {Set 2 Uint32 : 20}, {Set 3 Uint64 : 20}, {Set 4 String : xxxxxxxxxx_20} | ERowOp 1: {3, 1} {Set 2 Uint32 : 21}, {Set 3 Uint64 : 21}, {Set 4 String : xxxxxxxxxx_21} + Rows{11} Label{114 rev 1, 122b}, [22, +2)row | ERowOp 1: {3, 3} {Set 2 Uint32 : 22}, {Set 3 Uint64 : 22}, {Set 4 String : xxxxxxxxxx_22} | ERowOp 1: {3, 4} {Set 2 Uint32 : 23}, {Set 3 Uint64 : 23}, {Set 4 String : xxxxxxxxxx_23} + Rows{12} Label{124 rev 1, 122b}, [24, +2)row | ERowOp 1: {3, 6} {Set 2 Uint32 : 24}, {Set 3 Uint64 : 24}, {Set 4 String : xxxxxxxxxx_24} | ERowOp 1: {3, 7} {Set 2 Uint32 : 25}, {Set 3 Uint64 : 25}, {Set 4 String : xxxxxxxxxx_25} + Rows{13} Label{134 rev 1, 122b}, [26, +2)row | ERowOp 1: {3, 8} {Set 2 Uint32 : 26}, {Set 3 Uint64 : 26}, {Set 4 String : xxxxxxxxxx_26} | ERowOp 1: {3, 10} {Set 2 Uint32 : 27}, {Set 3 Uint64 : 27}, {Set 4 String : xxxxxxxxxx_27} + Rows{14} Label{144 rev 1, 122b}, [28, +2)row | ERowOp 1: {4, 1} {Set 2 Uint32 : 28}, {Set 3 Uint64 : 28}, {Set 4 String : xxxxxxxxxx_28} | ERowOp 1: {4, 3} {Set 2 Uint32 : 29}, {Set 3 Uint64 : 29}, {Set 4 String : xxxxxxxxxx_29} + Rows{15} Label{154 rev 1, 122b}, [30, +2)row | ERowOp 1: {4, 4} {Set 2 Uint32 : 30}, {Set 3 Uint64 : 30}, {Set 4 String : xxxxxxxxxx_30} | ERowOp 1: {4, 6} {Set 2 Uint32 : 31}, {Set 3 Uint64 : 31}, {Set 4 String : xxxxxxxxxx_31} + Rows{16} Label{164 rev 1, 122b}, [32, +2)row | ERowOp 1: {4, 7} {Set 2 Uint32 : 32}, {Set 3 Uint64 : 32}, {Set 4 String : xxxxxxxxxx_32} | ERowOp 1: {4, 8} {Set 2 Uint32 : 33}, {Set 3 Uint64 : 33}, {Set 4 String : xxxxxxxxxx_33} + Rows{17} Label{174 rev 1, 122b}, [34, +2)row | ERowOp 1: {4, 10} {Set 2 Uint32 : 34}, {Set 3 Uint64 : 34}, {Set 4 String : xxxxxxxxxx_34} | ERowOp 1: {5, 1} {Set 2 Uint32 : 35}, {Set 3 Uint64 : 35}, {Set 4 String : xxxxxxxxxx_35} + Rows{18} Label{184 rev 1, 122b}, [36, +2)row | ERowOp 1: {5, 3} {Set 2 Uint32 : 36}, {Set 3 Uint64 : 36}, {Set 4 String : xxxxxxxxxx_36} | ERowOp 1: {5, 4} {Set 2 Uint32 : 37}, {Set 3 Uint64 : 37}, {Set 4 String : xxxxxxxxxx_37} + Rows{19} Label{194 rev 1, 122b}, [38, +2)row | ERowOp 1: {5, 6} {Set 2 Uint32 : 38}, {Set 3 Uint64 : 38}, {Set 4 String : xxxxxxxxxx_38} | ERowOp 1: {5, 7} {Set 2 Uint32 : 39}, {Set 3 Uint64 : 39}, {Set 4 String : xxxxxxxxxx_39} Slices{ [0, 39] } Part{[1:2:3:0:0:0:0] eph 0, 2430b 40r} data 4441b + FlatIndex{26} Label{3 rev 3, 558b} 21 rec | Page Row Bytes (Uint32, Uint32) | 0 0 120b {0, 1} | 1 2 120b {0, 4} | 2 4 120b {0, 7} | 3 6 120b {0, 10} | 4 8 120b {1, 3} | 5 10 122b {1, 6} | 7 12 122b {1, 8} | 8 14 122b {2, NULL} | 9 16 122b {2, 4} | 11 18 122b {2, 7} | 12 20 122b {2, 10} | 13 22 122b {3, 3} | 15 24 122b {3, 6} | 16 26 122b {3, 8} | 17 28 122b {4, NULL} | 19 30 122b {4, 4} | 20 32 122b {4, 7} | 21 34 122b {4, 10} | 24 36 122b {5, 3} | 25 38 122b {5, 6} | 25 39 122b {5, 7} + BTreeIndex{PageId: 29 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 102b} | + BTreeIndex{PageId: 23 RowCount: 18 DataSize: 1088 ErasedRowCount: 0} Label{13 rev 1, 151b} | | + BTreeIndex{PageId: 6 RowCount: 6 DataSize: 360 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 0 RowCount: 2 DataSize: 120 ErasedRowCount: 0 | | | > {0, 4} | | | PageId: 1 RowCount: 4 DataSize: 240 ErasedRowCount: 0 | | | > {0, 7} | | | PageId: 2 RowCount: 6 DataSize: 360 ErasedRowCount: 0 | | > {0, 10} | | + BTreeIndex{PageId: 10 RowCount: 12 DataSize: 722 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 3 RowCount: 8 DataSize: 480 ErasedRowCount: 0 | | | > {1, 3} | | | PageId: 4 RowCount: 10 DataSize: 600 ErasedRowCount: 0 | | | > {1, 6} | | | PageId: 5 RowCount: 12 DataSize: 722 ErasedRowCount: 0 | | > {1, 8} | | + BTreeIndex{PageId: 14 RowCount: 18 DataSize: 1088 ErasedRowCount: 0} Label{13 rev 1, 147b} | | | PageId: 7 RowCount: 14 DataSize: 844 ErasedRowCount: 0 | | | > {2, NULL} | | | PageId: 8 RowCount: 16 DataSize: 966 ErasedRowCount: 0 | | | > {2, 4} | | | PageId: 9 RowCount: 18 DataSize: 1088 ErasedRowCount: 0 | > {2, 7} | + BTreeIndex{PageId: 28 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 151b} | | + BTreeIndex{PageId: 18 RowCount: 24 DataSize: 1454 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 11 RowCount: 20 DataSize: 1210 ErasedRowCount: 0 | | | > {2, 10} | | | PageId: 12 RowCount: 22 DataSize: 1332 ErasedRowCount: 0 | | | > {3, 3} | | | PageId: 13 RowCount: 24 DataSize: 1454 ErasedRowCount: 0 | | > {3, 6} | | + BTreeIndex{PageId: 22 RowCount: 30 DataSize: 1820 ErasedRowCount: 0} Label{13 rev 1, 147b} | | | PageId: 15 RowCount: 26 DataSize: 1576 ErasedRowCount: 0 | | | > {3, 8} | | | PageId: 16 RowCount: 28 DataSize: 1698 ErasedRowCount: 0 | | | > {4, NULL} | | | PageId: 17 RowCount: 30 DataSize: 1820 ErasedRowCount: 0 | | > {4, 4} | | + BTreeIndex{PageId: 27 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 249b} | | | PageId: 19 RowCount: 32 DataSize: 1942 ErasedRowCount: 0 | | | > {4, 7} | | | PageId: 20 RowCount: 34 DataSize: 2064 ErasedRowCount: 0 | | | > {4, 10} | | | PageId: 21 RowCount: 36 DataSize: 2186 ErasedRowCount: 0 | | | > {5, 3} | | | PageId: 24 RowCount: 38 DataSize: 2308 ErasedRowCount: 0 | | | > {5, 6} | | | PageId: 25 RowCount: 40 DataSize: 2430 ErasedRowCount: 0 + Rows{0} Label{04 rev 1, 120b}, [0, +2)row | ERowOp 1: {0, 1} {Set 2 Uint32 : 0}, {Set 3 Uint64 : 0}, {Set 4 String : xxxxxxxxxx_0} | ERowOp 1: {0, 3} {Set 2 Uint32 : 1}, {Set 3 Uint64 : 1}, {Set 4 String : xxxxxxxxxx_1} + Rows{1} Label{14 rev 1, 120b}, [2, +2)row | ERowOp 1: {0, 4} {Set 2 Uint32 : 2}, {Set 3 Uint64 : 2}, {Set 4 String : xxxxxxxxxx_2} | ERowOp 1: {0, 6} {Set 2 Uint32 : 3}, {Set 3 Uint64 : 3}, {Set 4 String : xxxxxxxxxx_3} + Rows{2} Label{24 rev 1, 120b}, [4, +2)row | ERowOp 1: {0, 7} {Set 2 Uint32 : 4}, {Set 3 Uint64 : 4}, {Set 4 String : xxxxxxxxxx_4} | ERowOp 1: {0, 8} {Set 2 Uint32 : 5}, {Set 3 Uint64 : 5}, {Set 4 String : xxxxxxxxxx_5} + Rows{3} Label{34 rev 1, 120b}, [6, +2)row | ERowOp 1: {0, 10} {Set 2 Uint32 : 6}, {Set 3 Uint64 : 6}, {Set 4 String : xxxxxxxxxx_6} | ERowOp 1: {1, 1} {Set 2 Uint32 : 7}, {Set 3 Uint64 : 7}, {Set 4 String : xxxxxxxxxx_7} + Rows{4} Label{44 rev 1, 120b}, [8, +2)row | ERowOp 1: {1, 3} {Set 2 Uint32 : 8}, {Set 3 Uint64 : 8}, {Set 4 String : xxxxxxxxxx_8} | ERowOp 1: {1, 4} {Set 2 Uint32 : 9}, {Set 3 Uint64 : 9}, {Set 4 String : xxxxxxxxxx_9} + Rows{5} Label{54 rev 1, 122b}, [10, +2)row | ERowOp 1: {1, 6} {Set 2 Uint32 : 10}, {Set 3 Uint64 : 10}, {Set 4 String : xxxxxxxxxx_10} | ERowOp 1: {1, 7} {Set 2 Uint32 : 11}, {Set 3 Uint64 : 11}, {Set 4 String : xxxxxxxxxx_11} + Rows{7} Label{74 rev 1, 122b}, [12, +2)row | ERowOp 1: {1, 8} {Set 2 Uint32 : 12}, {Set 3 Uint64 : 12}, {Set 4 String : xxxxxxxxxx_12} | ERowOp 1: {1, 10} {Set 2 Uint32 : 13}, {Set 3 Uint64 : 13}, {Set 4 String : xxxxxxxxxx_13} + Rows{8} Label{84 rev 1, 122b}, [14, +2)row | ERowOp 1: {2, 1} {Set 2 Uint32 : 14}, {Set 3 Uint64 : 14}, {Set 4 String : xxxxxxxxxx_14} | ERowOp 1: {2, 3} {Set 2 Uint32 : 15}, {Set 3 Uint64 : 15}, {Set 4 String : xxxxxxxxxx_15} + Rows{9} Label{94 rev 1, 122b}, [16, +2)row | ERowOp 1: {2, 4} {Set 2 Uint32 : 16}, {Set 3 Uint64 : 16}, {Set 4 String : xxxxxxxxxx_16} | ERowOp 1: {2, 6} {Set 2 Uint32 : 17}, {Set 3 Uint64 : 17}, {Set 4 String : xxxxxxxxxx_17} + Rows{11} Label{114 rev 1, 122b}, [18, +2)row | ERowOp 1: {2, 7} {Set 2 Uint32 : 18}, {Set 3 Uint64 : 18}, {Set 4 String : xxxxxxxxxx_18} | ERowOp 1: {2, 8} {Set 2 Uint32 : 19}, {Set 3 Uint64 : 19}, {Set 4 String : xxxxxxxxxx_19} + Rows{12} Label{124 rev 1, 122b}, [20, +2)row | ERowOp 1: {2, 10} {Set 2 Uint32 : 20}, {Set 3 Uint64 : 20}, {Set 4 String : xxxxxxxxxx_20} | ERowOp 1: {3, 1} {Set 2 Uint32 : 21}, {Set 3 Uint64 : 21}, {Set 4 String : xxxxxxxxxx_21} + Rows{13} Label{134 rev 1, 122b}, [22, +2)row | ERowOp 1: {3, 3} {Set 2 Uint32 : 22}, {Set 3 Uint64 : 22}, {Set 4 String : xxxxxxxxxx_22} | ERowOp 1: {3, 4} {Set 2 Uint32 : 23}, {Set 3 Uint64 : 23}, {Set 4 String : xxxxxxxxxx_23} + Rows{15} Label{154 rev 1, 122b}, [24, +2)row | ERowOp 1: {3, 6} {Set 2 Uint32 : 24}, {Set 3 Uint64 : 24}, {Set 4 String : xxxxxxxxxx_24} | ERowOp 1: {3, 7} {Set 2 Uint32 : 25}, {Set 3 Uint64 : 25}, {Set 4 String : xxxxxxxxxx_25} + Rows{16} Label{164 rev 1, 122b}, [26, +2)row | ERowOp 1: {3, 8} {Set 2 Uint32 : 26}, {Set 3 Uint64 : 26}, {Set 4 String : xxxxxxxxxx_26} | ERowOp 1: {3, 10} {Set 2 Uint32 : 27}, {Set 3 Uint64 : 27}, {Set 4 String : xxxxxxxxxx_27} + Rows{17} Label{174 rev 1, 122b}, [28, +2)row | ERowOp 1: {4, 1} {Set 2 Uint32 : 28}, {Set 3 Uint64 : 28}, {Set 4 String : xxxxxxxxxx_28} | ERowOp 1: {4, 3} {Set 2 Uint32 : 29}, {Set 3 Uint64 : 29}, {Set 4 String : xxxxxxxxxx_29} + Rows{19} Label{194 rev 1, 122b}, [30, +2)row | ERowOp 1: {4, 4} {Set 2 Uint32 : 30}, {Set 3 Uint64 : 30}, {Set 4 String : xxxxxxxxxx_30} | ERowOp 1: {4, 6} {Set 2 Uint32 : 31}, {Set 3 Uint64 : 31}, {Set 4 String : xxxxxxxxxx_31} + Rows{20} Label{204 rev 1, 122b}, [32, +2)row | ERowOp 1: {4, 7} {Set 2 Uint32 : 32}, {Set 3 Uint64 : 32}, {Set 4 String : xxxxxxxxxx_32} | ERowOp 1: {4, 8} {Set 2 Uint32 : 33}, {Set 3 Uint64 : 33}, {Set 4 String : xxxxxxxxxx_33} + Rows{21} Label{214 rev 1, 122b}, [34, +2)row | ERowOp 1: {4, 10} {Set 2 Uint32 : 34}, {Set 3 Uint64 : 34}, {Set 4 String : xxxxxxxxxx_34} | ERowOp 1: {5, 1} {Set 2 Uint32 : 35}, {Set 3 Uint64 : 35}, {Set 4 String : xxxxxxxxxx_35} + Rows{24} Label{244 rev 1, 122b}, [36, +2)row | ERowOp 1: {5, 3} {Set 2 Uint32 : 36}, {Set 3 Uint64 : 36}, {Set 4 String : xxxxxxxxxx_36} | ERowOp 1: {5, 4} {Set 2 Uint32 : 37}, {Set 3 Uint64 : 37}, {Set 4 String : xxxxxxxxxx_37} + Rows{25} Label{254 rev 1, 122b}, [38, +2)row | ERowOp 1: {5, 6} {Set 2 Uint32 : 38}, {Set 3 Uint64 : 38}, {Set 4 String : xxxxxxxxxx_38} | ERowOp 1: {5, 7} {Set 2 Uint32 : 39}, {Set 3 Uint64 : 39}, {Set 4 String : xxxxxxxxxx_39} Slices{ [0, 39] } >> TKeyValueCollectorTest::TestKeyValueCollectorSingle >> TKeyValueCollectorTest::TestKeyValueCollectorSingle [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorSingleWithOneError >> KqpNamedExpressions::NamedExpressionRandomChanged2+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged2-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::ListStreamsValidation [GOOD] Test command err: 2025-06-24T21:09:40.846401Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626078968849514:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:40.846488Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f63/r3tmp/tmpTMftNf/pdisk_1.dat 2025-06-24T21:09:41.354232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:41.354308Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:41.369939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:41.426882Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18956, node 1 2025-06-24T21:09:41.563638Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:41.563665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:41.563677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:41.563803Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:09:41.857213Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64809 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:42.081862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:42.190955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:64809 2025-06-24T21:09:42.386511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:43.960297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:44.165139Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037890:1][1:7519626096148720310:2329] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:6:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-24T21:09:44.309153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:44.488003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2025-06-24T21:09:44.509664Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037908 not found 2025-06-24T21:09:44.509695Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-24T21:09:44.509708Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037903 not found 2025-06-24T21:09:44.509719Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2025-06-24T21:09:44.509730Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037900 not found 2025-06-24T21:09:44.509740Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037902 not found 2025-06-24T21:09:44.509751Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2025-06-24T21:09:44.509762Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-06-24T21:09:44.509783Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037907 not found 2025-06-24T21:09:44.513153Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2025-06-24T21:09:44.513185Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037906 not found 2025-06-24T21:09:44.513197Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037894 not found 2025-06-24T21:09:44.513213Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-24T21:09:44.515185Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2025-06-24T21:09:44.515212Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037895 not found 2025-06-24T21:09:44.515223Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2025-06-24T21:09:46.278078Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626105635561620:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:46.278468Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f63/r3tmp/tmpx88kqz/pdisk_1.dat 2025-06-24T21:09:46.449651Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:46.474389Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:46.474469Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:46.479371Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18793, node 4 2025-06-24T21:09:46.557158Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:46.557189Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:46.557200Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:46.557349Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23018 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:46.828338Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:46.888175Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:23018 2025-06-24T21:09:47.071589Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:47.293198Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:47.303830Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:47.400560Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:47.489777Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:50.936630Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626123198297458:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:50.936822Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f63/r3tmp/tmpiyhXDd/pdisk_1.dat 2025-06-24T21:09:51.130655Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:51.141077Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:51.141191Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:51.151384Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26355, node 7 2025-06-24T21:09:51.215079Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:51.215121Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:51.215131Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:51.215331Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22300 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:51.543610Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:51.603999Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:22300 2025-06-24T21:09:51.823689Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:51.946514Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:52.081592Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626131788234125:3442] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/stream_TestCreateExistingStream\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:55.675188Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626143666841369:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:55.675638Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f63/r3tmp/tmpe9S8eD/pdisk_1.dat 2025-06-24T21:09:55.886043Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:55.915252Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:55.915352Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:55.924618Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2987, node 10 2025-06-24T21:09:55.972416Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:55.972443Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:55.972484Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:55.972661Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7388 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:56.280328Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:56.334551Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:7388 2025-06-24T21:09:56.537632Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:56.687818Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> DataStreams::TestPutRecords [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorSingleWithOneError [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple [GOOD] >> KqpNewEngine::DeleteByKey [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestPutRecords [GOOD] Test command err: 2025-06-24T21:09:40.872897Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626082188287667:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:40.879371Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f7d/r3tmp/tmp9PdR6B/pdisk_1.dat 2025-06-24T21:09:41.380453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:41.380568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:41.384034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:41.415449Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24433, node 1 2025-06-24T21:09:41.557763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:41.557788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:41.557796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:41.557972Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:09:41.878458Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12812 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:42.122873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:42.212331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:12812 2025-06-24T21:09:42.408316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:45.529145Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626101761337367:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:45.529235Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f7d/r3tmp/tmpuYzFiw/pdisk_1.dat 2025-06-24T21:09:45.748660Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:45.769100Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:45.769198Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:45.777704Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64358, node 4 2025-06-24T21:09:45.899214Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:45.899243Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:45.899257Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:45.899417Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23646 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:46.219964Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:46.298447Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:23646 2025-06-24T21:09:46.517848Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:46.566645Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:46.728269Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:46.806340Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) encryption_type: NONE sequence_number: "0" shard_id: "shard-000000" encryption_type: NONE records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000003" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } 2025-06-24T21:09:46.865969Z :INFO: [/Root/] [/Root/] [5e05fe9f-4be3c5da-f5451547-4ae96d4a] Starting read session 2025-06-24T21:09:46.866054Z :DEBUG: [/Root/] [/Root/] [5e05fe9f-4be3c5da-f5451547-4ae96d4a] Starting session to cluster null (localhost:64358) 2025-06-24T21:09:46.870584Z :DEBUG: [/Root/] [/Root/] [5e05fe9f-4be3c5da-f5451547-4ae96d4a] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:09:46.870631Z :DEBUG: [/Root/] [/Root/] [5e05fe9f-4be3c5da-f5451547-4ae96d4a] [null] New ... st: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:09:55.330813Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 0 (0-1) 2025-06-24T21:09:55.330827Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 4 (0-1) 2025-06-24T21:09:55.330924Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (0-1) 2025-06-24T21:09:55.330932Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-24T21:09:55.330981Z :DEBUG: [/Root/] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-24T21:09:55.331031Z :DEBUG: [/Root/] Take Data. Partition 0. Read: {0, 1} (1-1) 2025-06-24T21:09:55.331074Z :DEBUG: [/Root/] [/Root/] [6ebe9453-b9661a8f-224c34e4-2efb2587] [null] The application data is transferred to the client. Number of messages 2, size 0 bytes 2025-06-24T21:09:55.331108Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (3-3) 2025-06-24T21:09:55.331120Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (4-4) 2025-06-24T21:09:55.331162Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (5-5) 2025-06-24T21:09:55.331175Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (6-6) 2025-06-24T21:09:55.331208Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (7-7) 2025-06-24T21:09:55.331274Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (8-8) 2025-06-24T21:09:55.331307Z :DEBUG: [/Root/] Take Data. Partition 4. Read: {0, 0} (0-0) 2025-06-24T21:09:55.331346Z :DEBUG: [/Root/] Take Data. Partition 4. Read: {1, 0} (1-1) 2025-06-24T21:09:55.331374Z :DEBUG: [/Root/] [/Root/] [6ebe9453-b9661a8f-224c34e4-2efb2587] [null] The application data is transferred to the client. Number of messages 2, size 0 bytes 2025-06-24T21:09:55.331498Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {0, 0} (0-0) 2025-06-24T21:09:55.332434Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {1, 0} (1-1) 2025-06-24T21:09:55.335463Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {2, 0} (2-2) 2025-06-24T21:09:55.336377Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {3, 0} (3-3) 2025-06-24T21:09:55.340282Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {4, 0} (4-4) 2025-06-24T21:09:55.341239Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {5, 0} (5-5) 2025-06-24T21:09:55.342192Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {6, 0} (6-6) 2025-06-24T21:09:55.343170Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {7, 0} (7-7) 2025-06-24T21:09:55.351433Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {8, 0} (8-8) 2025-06-24T21:09:55.351508Z :DEBUG: [/Root/] [/Root/] [6ebe9453-b9661a8f-224c34e4-2efb2587] [null] The application data is transferred to the client. Number of messages 9, size 8388611 bytes 2025-06-24T21:09:55.354559Z :INFO: [/Root/] [/Root/] [6ebe9453-b9661a8f-224c34e4-2efb2587] Closing read session. Close timeout: 0.000000s 2025-06-24T21:09:55.354650Z :INFO: [/Root/] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:stream_TestPutRecordsCornerCases:2:5:0:0 null:stream_TestPutRecordsCornerCases:4:4:1:0 null:stream_TestPutRecordsCornerCases:1:3:8:0 null:stream_TestPutRecordsCornerCases:0:2:1:0 null:stream_TestPutRecordsCornerCases:3:1:3:0 2025-06-24T21:09:55.354713Z :INFO: [/Root/] [/Root/] [6ebe9453-b9661a8f-224c34e4-2efb2587] Counters: { Errors: 0 CurrentSessionLifetimeMs: 142 BytesRead: 9437699 MessagesRead: 17 BytesReadCompressed: 9437699 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:09:55.354825Z :NOTICE: [/Root/] [/Root/] [6ebe9453-b9661a8f-224c34e4-2efb2587] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:09:55.354880Z :DEBUG: [/Root/] [/Root/] [6ebe9453-b9661a8f-224c34e4-2efb2587] [null] Abort session to cluster 2025-06-24T21:09:55.355957Z :NOTICE: [/Root/] [/Root/] [6ebe9453-b9661a8f-224c34e4-2efb2587] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:09:55.356356Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer user1 session user1_7_1_16780753246983712804_v1 grpc read failed 2025-06-24T21:09:55.356396Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer user1 session user1_7_1_16780753246983712804_v1 grpc closed 2025-06-24T21:09:55.356437Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer user1 session user1_7_1_16780753246983712804_v1 is DEAD 2025-06-24T21:09:56.645420Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626151273477506:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:56.645484Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f7d/r3tmp/tmpPoyVnZ/pdisk_1.dat 2025-06-24T21:09:56.778928Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:56.803184Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:56.803299Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:56.810340Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64407, node 10 2025-06-24T21:09:56.857230Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:56.857252Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:56.857261Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:56.857425Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19562 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:57.136550Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:57.248319Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:19562 2025-06-24T21:09:57.432487Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:57.651182Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:57.683884Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101)
: Error: Access for stream /Root/stream_TestPutRecords is denied for subject user2@builtin, code: 500018 2025-06-24T21:09:57.785503Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) PutRecordsResponse = encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } PutRecord response = encryption_type: NONE sequence_number: "7" shard_id: "shard-000004" >> TTxDataShardMiniKQL::CrossShard_6_Local [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx >> TExecutorDb::FullScan [GOOD] >> TExecutorDb::CoordinatorSimulation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::DeleteByKey [GOOD] Test command err: Trying to start YDB, gRPC: 22346, MsgBus: 2865 2025-06-24T21:09:14.229732Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625971480453884:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:14.237102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031cb/r3tmp/tmpbK8kVx/pdisk_1.dat 2025-06-24T21:09:14.592508Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:14.592942Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625971480453864:2079] 1750799354224536 != 1750799354224539 2025-06-24T21:09:14.642934Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:14.643077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 22346, node 1 2025-06-24T21:09:14.644267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:14.732228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:14.732263Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:14.732270Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:14.732382Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2865 TClient is connected to server localhost:2865 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:09:15.253236Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:15.355318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:17.475791Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625984365356396:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:17.475919Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:17.733591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:17.859560Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625984365356499:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:17.859661Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:17.859827Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625984365356504:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:17.863549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:17.872995Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625984365356506:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:09:17.954885Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625984365356557:2395] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:19.226078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625971480453884:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:19.226157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:09:19.260077Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_scan_query.cpp:410: Client lost Trying to start YDB, gRPC: 29208, MsgBus: 29155 2025-06-24T21:09:19.980921Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625989678403361:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:19.982086Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031cb/r3tmp/tmpWym4rJ/pdisk_1.dat 2025-06-24T21:09:20.114813Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:20.116178Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625989678403340:2079] 1750799359980113 != 1750799359980116 TServer::EnableGrpc on GrpcPort 29208, node 2 2025-06-24T21:09:20.133978Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:20.134068Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:20.135618Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:20.187402Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:20.187431Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:20.187439Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:20.187575Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29155 TClient is connected to server localhost:29155 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:20.687517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:20.692586Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:09:20.703522Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:20.773594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2 ... 57594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:52.671992Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 22409, MsgBus: 10307 2025-06-24T21:09:53.934934Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626137522959635:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:53.935042Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031cb/r3tmp/tmpFfAM97/pdisk_1.dat 2025-06-24T21:09:54.066289Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:54.067116Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519626137522959615:2079] 1750799393934455 != 1750799393934458 2025-06-24T21:09:54.086107Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:54.086192Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:54.087783Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22409, node 7 2025-06-24T21:09:54.131263Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:54.131291Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:54.131303Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:54.131458Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10307 TClient is connected to server localhost:10307 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:54.685781Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:54.700367Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:54.781949Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:54.941114Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:54.984680Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:55.070505Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:57.838253Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626154702830442:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:57.838399Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:57.900974Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:57.937095Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:58.007922Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:58.036754Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:58.067052Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:58.098376Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:58.130838Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:58.186953Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626158997798401:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:58.187070Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626158997798406:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:58.187070Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:58.191250Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:58.202179Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626158997798408:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:09:58.269874Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626158997798459:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:58.935253Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626137522959635:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:58.935329Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpNewEngine::JoinProjectMulti [GOOD] >> KqpNewEngine::JoinMultiConsumer >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx [GOOD] >> TVersions::Wreck2 [GOOD] >> TVersions::Wreck2Reverse >> KqpProxy::PassErrroViaSessionActor >> ScriptExecutionsTest::RunCheckLeaseStatus >> KqpProxy::NoLocalSessionExecution >> TableCreation::ConcurrentTableCreationWithDifferentVersions >> TableCreation::ConcurrentTableCreation >> TableCreation::MultipleTablesCreation >> TTxDataShardMiniKQL::CrossShard_2_SwapAndCopy [GOOD] >> TTxDataShardMiniKQL::CrossShard_3_AllToOne >> TableCreation::SimpleTableCreation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:117:2057] recipient: [1:112:2141] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:117:2057] recipient: [1:112:2141] Leader for TabletID 9437184 is [1:133:2154] sender: [1:135:2057] recipient: [1:112:2141] 2025-06-24T21:09:30.959141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:30.959218Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:30.962031Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:30.978686Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:30.980487Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:30.981981Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:30.995998Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:31.056624Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:31.056774Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:31.060961Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:31.061079Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:31.061135Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:31.062153Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:31.062520Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:31.062598Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 Leader for TabletID 9437184 is [1:133:2154] sender: [1:211:2057] recipient: [1:14:2061] 2025-06-24T21:09:31.156191Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:31.193671Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:31.193938Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:31.194058Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:31.194107Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:31.194147Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:31.194183Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:31.194440Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.194492Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.194727Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:31.194827Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:31.194996Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:31.195039Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:31.195107Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:31.195161Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:31.195218Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:31.195273Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:31.195319Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:31.195421Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.195472Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.195547Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:31.198563Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:31.198630Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:31.198733Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:31.198937Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:31.198992Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:31.199057Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:31.199112Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:31.199175Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:31.199227Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:31.199267Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:31.199636Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:31.199677Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:31.199719Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:31.199776Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:31.199829Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:31.199865Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:31.199925Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:31.199963Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:31.200003Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:31.216153Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:31.216253Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:31.216298Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:31.216358Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:31.216446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:31.217005Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.217056Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.217100Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:31.217248Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:31.217285Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:31.217425Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:31.217471Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-24T21:09:31.217527Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:31.217592Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:31.233970Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:31.234063Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:31.234362Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.234412Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.234476Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:31.234520Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:31.234619Z node 1 :TX_DATASHARD TRACE: datashard_pipelin ... shard_impl.h:3132: StateWork, received event# 268830214, Sender [22:291:2272], Recipient [22:237:2228]: NKikimrTabletBase.TEvGetCounters 2025-06-24T21:10:02.155977Z node 22 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [22:102:2135], Recipient [22:237:2228]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 102 RawX2: 94489282647 } 2025-06-24T21:10:02.156053Z node 22 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-24T21:10:02.156388Z node 22 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [22:296:2276], Recipient [22:237:2228]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:10:02.156421Z node 22 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:10:02.156465Z node 22 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [22:295:2275], serverId# [22:296:2276], sessionId# [0:0:0] 2025-06-24T21:10:02.156645Z node 22 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [22:102:2135], Recipient [22:237:2228]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 102 RawX2: 94489282647 } TxBody: "\032\324\002\037\002\006Arg\005\205\n\205\000\205\004?\000\205\002\202\0047\034MyReads MyWrites\205\004?\000\206\202\024Reply\024Write?\000?\000 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\000\005?\004?\014\005?\002)\211\006\202\203\005\004\213\002\203\004\205\002\203\004\01057$UpdateRow\000\003?\016 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000\013?\022\003?\020T\001\005?\026)\211\n?\024\206\203\004?\024? ?\024\203\004\020Fold\000)\211\002?\"\206? \034Collect\000)\211\006?(? \203\004\203\0024ListFromRange\000\003? \000\003?,\003\022z\003?.\004\007\010\000\n\003?\024\000)\251\000? \002\000\004)\251\000?\024\002\000\002)\211\006?$\203\005@? ?\024\030Invoke\000\003?F\006Add?@?D\001\006\002\014\000\007\016\000\003\005?\010?\014\006\002?\006?R\000\003?\014?\014\037/ \0018\000" TxId: 2 ExecLevel: 0 Flags: 0 2025-06-24T21:10:02.156674Z node 22 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:10:02.156775Z node 22 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:10:02.157666Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CheckDataTx 2025-06-24T21:10:02.157731Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-24T21:10:02.157770Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CheckDataTx 2025-06-24T21:10:02.157809Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-24T21:10:02.157843Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit BuildAndWaitDependencies 2025-06-24T21:10:02.157876Z node 22 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T21:10:02.157935Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 9437184 2025-06-24T21:10:02.157974Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-24T21:10:02.157993Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-24T21:10:02.158012Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit ExecuteDataTx 2025-06-24T21:10:02.158031Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-24T21:10:02.158066Z node 22 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T21:10:02.158112Z node 22 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:124: Operation [0:2] at 9437184 requested 132374 more memory 2025-06-24T21:10:02.158155Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-24T21:10:02.158468Z node 22 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:10:02.158513Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-24T21:10:02.158554Z node 22 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T21:10:02.159364Z node 22 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 132502 and requests 1060016 more for the next try 2025-06-24T21:10:02.159506Z node 22 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-24T21:10:02.159549Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-24T21:10:02.159703Z node 22 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:10:02.159727Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-24T21:10:02.160385Z node 22 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-24T21:10:02.160442Z node 22 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T21:10:02.160936Z node 22 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 1192518 and requests 9540144 more for the next try 2025-06-24T21:10:02.161032Z node 22 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-24T21:10:02.161069Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-24T21:10:02.161238Z node 22 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:10:02.161269Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-24T21:10:02.161814Z node 22 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-24T21:10:02.161860Z node 22 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T21:10:02.162354Z node 22 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 10732662 and requests 85861296 more for the next try 2025-06-24T21:10:02.162451Z node 22 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-24T21:10:02.162485Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-24T21:10:02.162644Z node 22 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:10:02.162676Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-24T21:10:02.163209Z node 22 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-24T21:10:02.163256Z node 22 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-24T21:10:02.382231Z node 22 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:2] at tablet 9437184 with status COMPLETE 2025-06-24T21:10:02.382321Z node 22 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:2] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 8, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:10:02.382409Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:10:02.382442Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-24T21:10:02.382483Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-24T21:10:02.382532Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-24T21:10:02.382612Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:10:02.382639Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-24T21:10:02.382676Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-24T21:10:02.382710Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-24T21:10:02.382747Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-24T21:10:02.382771Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-24T21:10:02.382812Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-24T21:10:02.395083Z node 22 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:10:02.395171Z node 22 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-24T21:10:02.395225Z node 22 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-24T21:10:02.395325Z node 22 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:02.396278Z node 22 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [22:301:2281], Recipient [22:237:2228]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:10:02.396347Z node 22 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:10:02.396392Z node 22 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [22:300:2280], serverId# [22:301:2281], sessionId# [0:0:0] 2025-06-24T21:10:02.396526Z node 22 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830214, Sender [22:299:2279], Recipient [22:237:2228]: NKikimrTabletBase.TEvGetCounters >> KqpProxy::InvalidSessionID >> KqpAgg::GroupByLimit [GOOD] >> KqpAgg::AggHashShuffle+UseSink >> KqpSqlIn::SecondaryIndex_TupleParameter [GOOD] >> KqpSqlIn::SecondaryIndex_TupleLiteral >> KqpProxy::PingNotExistedSession >> TKeyValueTest::TestBasicWriteReadOverrun [GOOD] >> TKeyValueTest::TestBlockedEvGetRequest >> TKeyValueTest::TestBlockedEvGetRequest [GOOD] >> KqpProxy::PassErrroViaSessionActor [GOOD] >> KqpProxy::NodeDisconnectedTest >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestBlockedEvGetRequest [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:78:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:81:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:82:2057] recipient: [10:80:2111] Leader for TabletID 72057594037927937 is [10:83:2112] sender: [10:84:2057] recipient: [10:80:2111] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:83:2112] Leader for TabletID 72057594037927937 is [10:83:2112] sender: [10:169:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:78:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:81:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:82:2057] recipient: [11:80:2111] Leader for TabletID 72057594037927937 is [11:83:2112] sender: [11:84:2057] recipient: [11:80:2111] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:83:2112] Leader for TabletID 72057594037927937 is [11:83:2112] sender: [11:169:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:79:2057] recipient: [12:37:2084] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:82:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:83:2057] recipient: [12:81:2111] Leader for TabletID 72057594037927937 is [12:84:2112] sender: [12:85:2057] recipient: [12:81:2111] !Reboot 72057594037927937 (actor [12:58:2098]) rebooted! !Reboot 72057594037927937 (actor [12:58:2098]) tablet resolver refreshed! new actor is[12:84:2112] Leader for TabletID 72057594037927937 is [12:84:2112] sender: [12:170:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:59:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:76:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:82:2057] recipient: [13:37:2084] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:85:2057] recipient: [13:84:2114] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:86:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:87:2115] sender: [13:88:2057] recipient: [13:84:2114] !Reboot 72057594037927937 (actor [13:58:2098]) rebooted! !Reboot 72057594037927937 (actor [13:58:2098]) tablet resolver refreshed! new actor is[13:87:2115] Leader for TabletID 72057594037927937 is [13:87:2115] sender: [13:173:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:59:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:76:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:58:2098]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:82:2057] recipient: [14:37:2084] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:85:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:86:2057] recipient: [14:84:2114] Leader for TabletID 72057594037927937 is [14:87:2115] sender: [14:88:2057] recipient: [14:84:2114] !Reboot 72057594037927937 (actor [14:58:2098]) rebooted! !Reboot 72057594037927937 (actor [14:58:2098]) tablet resolver refreshed! new actor is[14:87:2115] Leader for TabletID 72057594037927937 is [14:87:2115] sender: [14:173:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:59:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:76:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:83:2057] recipient: [15:37:2084] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:86:2057] recipient: [15:85:2114] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:87:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:88:2115] sender: [15:89:2057] recipient: [15:85:2114] !Reboot 72057594037927937 (actor [15:58:2098]) rebooted! !Reboot 72057594037927937 (actor [15:58:2098]) tablet resolver refreshed! new actor is[15:88:2115] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:59:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:76:2057] recipient: [16:14:2061] 2025-06-24T21:10:05.064821Z node 17 :KEYVALUE ERROR: keyvalue_storage_read_request.cpp:254: {KV323@keyvalue_storage_read_request.cpp:254} Received BLOCKED EvGetResult. KeyValue# 72057594037927937 Status# BLOCKED Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 0 ErrorReason# block race detected 2025-06-24T21:10:05.068280Z node 17 :TABLET_MAIN ERROR: tablet_sys.cpp:934: Tablet: 72057594037927937 HandleBlockBlobStorageResult, msg->Status: ALREADY, not discovered Marker# TSYS21 2025-06-24T21:10:05.068333Z node 17 :TABLET_MAIN ERROR: tablet_sys.cpp:1849: Tablet: 72057594037927937 Type: KeyValue, EReason: ReasonBootBSError, SuggestedGeneration: 0, KnownGeneration: 3 Marker# TSYS31 >> KqpProxy::InvalidSessionID [GOOD] >> KqpProxy::LoadedMetadataAfterCompilationTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] Test command err: 2025-06-24T21:09:41.031035Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626084867752896:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:41.031160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f25/r3tmp/tmph5vbn1/pdisk_1.dat 2025-06-24T21:09:41.522854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:41.523020Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:41.531049Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:41.535014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11002, node 1 2025-06-24T21:09:41.630397Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:41.630430Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:41.630440Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:41.630597Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21856 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:09:42.055362Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:42.083479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:42.176598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:21856 2025-06-24T21:09:42.383381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:42.718447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2025-06-24T21:09:42.744389Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T21:09:42.744423Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-24T21:09:42.749459Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T21:09:42.749729Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-24T21:09:45.053901Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626100863260597:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:45.054001Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f25/r3tmp/tmpqoyWX4/pdisk_1.dat 2025-06-24T21:09:45.168947Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:45.192065Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:45.192150Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:45.197804Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29688, node 4 2025-06-24T21:09:45.327875Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:45.327897Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:45.327905Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:45.328041Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28242 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:45.599713Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:45.699322Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:28242 2025-06-24T21:09:45.931039Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:46.063485Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:46.158753Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:46.224582Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2025-06-24T21:09:46.243026Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037889 not found 2025-06-24T21:09:46.243065Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037891 not found 2025-06-24T21:09:46.243077Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037890 not found 2025-06-24T21:09:46.243093Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037888 not found 2025-06-24T21:09:46.252965Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-24T21:09:46.253051Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-24T21:09:46.253086Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-24T21:09:46.253158Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-24T21:09:49.554003Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626118969771832:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:49.554128Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f25/r3tmp/tmpTSubha/pdisk_1.dat 2025-06-24T21:09:49.652343Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:49.672807Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:49.672900Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:49.679531Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23674, node 7 2025-06-24T21:09:49.720578Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:49.720600Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:49.720609Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:49.720743Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17691 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:49.927244Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:49.971289Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:17691 2025-06-24T21:09:50.163706Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:50.309256Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:50.358249Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:09:50.396624Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2025-06-24T21:09:50.409073Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-24T21:09:50.409111Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2025-06-24T21:09:50.409125Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-06-24T21:09:50.409150Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-06-24T21:09:50.416375Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-24T21:09:50.416443Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-24T21:09:50.416491Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-24T21:09:50.416551Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-24T21:09:53.527360Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626137248605588:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:53.527420Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f25/r3tmp/tmpCk6ZVF/pdisk_1.dat 2025-06-24T21:09:53.629993Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:53.645399Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:53.645482Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:53.651089Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15533, node 10 2025-06-24T21:09:53.696488Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:53.696510Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:53.696518Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:53.696672Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64811 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:53.970240Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:54.038912Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:64811 2025-06-24T21:09:54.219902Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:54.536047Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:58.527567Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626137248605588:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:58.527667Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> DataStreams::TestReservedConsumersMetering [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged2-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandom+UseSink >> KqpReturning::ReturningWorksIndexedReplace-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedOperationsWithDefault-QueryService >> TableCreation::ConcurrentTableCreation [GOOD] >> TableCreation::ConcurrentMultipleTablesCreation >> TableCreation::MultipleTablesCreation [GOOD] >> TableCreation::CreateOldTable >> KqpProxy::NoLocalSessionExecution [GOOD] >> KqpProxy::NoUserAccessToScriptExecutionsTable >> TableCreation::SimpleTableCreation [GOOD] >> TableCreation::SimpleUpdateTable ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestReservedConsumersMetering [GOOD] Test command err: 2025-06-24T21:09:40.887573Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626082779613078:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:40.891231Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003038/r3tmp/tmp5Hv36b/pdisk_1.dat 2025-06-24T21:09:41.369823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:41.369943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:41.380232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:41.444482Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19895, node 1 2025-06-24T21:09:41.571524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:41.571550Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:41.571557Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:41.571712Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:09:41.917536Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13382 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:42.070744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:42.175830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:13382 2025-06-24T21:09:42.370333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-24T21:09:42.783448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "0" shard_id: "shard-000009" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000005" } records { sequence_number: "0" shard_id: "shard-000008" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "1" shard_id: "shard-000005" } records { sequence_number: "1" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000009" } records { sequence_number: "0" shard_id: "shard-000006" } records { sequence_number: "2" shard_id: "shard-000001" } records { sequence_number: "0" shard_id: "shard-000007" } records { sequence_number: "1" shard_id: "shard-000007" } records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000007" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000005" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000009" } records { sequence_number: "1" shard_id: "shard-000008" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000006" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000009" } records { sequence_number: "3" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000009" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "5" shard_id: "shard-000001" } records { sequence_number: "5" shard_id: "shard-000009" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "3" shard_id: "shard-000005" } records { sequence_number: "2" shard_id: "shard-000008" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000005" } records { sequence_number: "6" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000009" } records { sequence_number: "2" shard_id: "shard-000006" } records { sequence_number: "7" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000007" } records { sequence_number: "4" shard_id: "shard-000007" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000007" } records { sequence_number: "8" shard_id: "shard-000004" } records { sequence_number: "5" shard_id: "shard-000005" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "7" shard_id: "shard-000009" } records { sequence_number: "3" shard_id: "shard-000008" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000006" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000009" } records { sequence_number: "8" shard_id: "shard-000001" } records { sequence_number: "9" shard_id: "shard-000009" } records { sequence_number: "9" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "10" shard_id: "shard-000001" } records { sequence_number: "10" shard_id: "shard-000009" } records { sequence_number: "10" shard_id: "shard-000004" } records { sequence_number: "6" shard_id: "shard-000005" } records { sequence_number: "4" shard_id: "shard-000008" } records { sequence_number: "11" shard_id: "shard-000004" } records { sequence_number: "12" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000005" } records { sequence_number: "11" shard_id: "shard-000001" } records { sequence_number: "11" shard_id: "shard-000009" } records { sequence_number: "4" shard_id: "shard-000006" } records { sequence_number: "12" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000007" } records { sequence_number: "7" shard_id: "shard-000007" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000007" } records { sequence_number: "13" shard_id: "shard-000004" } records { sequence_number: "8" shard_id: "shard-000005" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "12" shard_id: "shard-000009" } records { sequence_number: "5" shard_id: "shard-000008" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000006" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000009" } records { sequence_number: "13" shard_id: "shard-000001" } records { sequence_number: "14" shard_id: "shard-000009" } records { sequence_number: "14" shard_id: "shard-000004" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000001" } 2025-06-24T21:09:45.888033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626082779613078:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:45.888117Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; encryption_type: NONE records { sequence_number: "15" shard_id: "shard-000001" } records { sequence_number: "15" shard_id: "shard-000009" } records { sequence_number: "15" shard_id: "shard-000004" } records { sequence_number: "9" shard_id: "shard-000005" } records { sequence_number: "6" shard_id: "shard-000008" } records { sequence_number: "16" shard_id: "shard-000004" } records { sequence_number: "17" shard_id: "shard-000004" } records { sequence_number: "10" shard_id: "shard-000005" } records { sequence_number: "16" shard_id: "shard-000001" } records { sequence_number: "16" shard_id: "shard-000009" } records { sequence_number: "6" shard_id: "shard-000006" } records { sequence_number: "17" shard_id: "shard-000001" } records { sequence_number: "9" shard_id: "shard-000007" } records { sequence_number: "10" shard_id: "shard-000007" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000007" } records { sequence_number: "18" shard_id: "shard-000004" } records { sequence_number: "11" shard_id: "shard-00 ... older_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799399511-170","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":0,"unit":"second","start":1750799399,"finish":1750799399},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799399}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799399511-171","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":0,"unit":"mbyte*second","start":1750799399,"finish":1750799399},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799399}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750799399511-172","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1750799399,"finish":1750799399},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799399}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1750799399541-173","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1750799399,"finish":1750799400},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799400}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799399541-174","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1750799399,"finish":1750799400},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799400}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799399541-175","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1750799399,"finish":1750799400},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799400}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750799399541-176","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799399,"finish":1750799400},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799400}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1750799400563-177","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1750799400,"finish":1750799401},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799401}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799400563-178","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1750799400,"finish":1750799401},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799401}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799400563-179","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1750799400,"finish":1750799401},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799401}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750799400563-180","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799400,"finish":1750799401},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799401}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1750799401575-181","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1750799401,"finish":1750799402},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799402}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799401575-182","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1750799401,"finish":1750799402},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799402}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799401575-183","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1750799401,"finish":1750799402},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799402}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750799401575-184","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799401,"finish":1750799402},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799402}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1750799402587-185","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1750799402,"finish":1750799403},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799403}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799402587-186","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1750799402,"finish":1750799403},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799403}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799402587-187","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1750799402,"finish":1750799403},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799403}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750799402587-188","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799402,"finish":1750799403},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799403}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1750799403600-189","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1750799403,"finish":1750799404},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799404}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799403600-190","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1750799403,"finish":1750799404},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799404}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750799403600-191","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1750799403,"finish":1750799404},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750799404}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750799403600-192","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750799403,"finish":1750799404},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750799404}' >> TKeyValueTest::TestWrite200KDeleteThenResponseError [GOOD] >> TKeyValueTest::TestWrite200KDeleteThenResponseErrorNewApi >> TableCreation::ConcurrentTableCreationWithDifferentVersions [GOOD] >> TableCreation::ConcurrentUpdateTable >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteReadWhileWriteWorks |94.4%| [TA] $(B)/ydb/services/datastreams/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.4%| [TA] {RESULT} $(B)/ydb/services/datastreams/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorks [GOOD] >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorksNewApi >> KqpProxy::PingNotExistedSession [GOOD] >> ScriptExecutionsTest::AttemptToUpdateDeletedLease >> KqpNewEngine::StaleRO_IndexFollowers+EnableFollowers [GOOD] >> KqpNewEngine::SqlInFromCompact >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorks [GOOD] >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorksNewApi >> ScriptExecutionsTest::RunCheckLeaseStatus [GOOD] >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring >> KqpProxy::CalcPeerStats [GOOD] >> KqpProxy::CreatesScriptExecutionsTable >> KqpNewEngine::JoinMultiConsumer [GOOD] >> KqpNewEngine::JoinSameKey >> TKeyValueTest::TestWriteLongKey [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteLongKey [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:80:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:81:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:81:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:78:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:80:2111] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:83:2112] sender: [4:84:2057] recipient: [4:80:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:83:2112] Leader for TabletID 72057594037927937 is [4:83:2112] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:79:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:83:2057] recipient: [5:81:2111] Leader for TabletID 72057594037927937 is [5:84:2112] sender: [5:85:2057] recipient: [5:81:2111] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:84:2112] Leader for TabletID 72057594037927937 is [5:84:2112] sender: [5:170:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:82:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:84:2114] Leader for TabletID 72057594037927937 is [7:87:2115] sender: [7:88:2057] recipient: [7:84:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:87:2115] Leader for TabletID 72057594037927937 is [7:87:2115] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:83:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:86:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:87:2057] recipient: [8:85:2114] Leader for TabletID 72057594037927937 is [8:88:2115] sender: [8:89:2057] recipient: [8:85:2114] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:88:2115] Leader for TabletID 72057594037927937 is [8:88:2115] sender: [8:174:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestAttachSession >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC [GOOD] >> KqpAgg::AggHashShuffle+UseSink [GOOD] >> KqpAgg::AggHashShuffle-UseSink >> TableCreation::CreateOldTable [GOOD] >> TableCreation::SimpleUpdateTable [GOOD] >> TableCreation::ConcurrentMultipleTablesCreation [GOOD] >> TExecutorDb::CoordinatorSimulation [GOOD] >> TExecutorDb::RandomCoordinatorSimulation |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:87:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:90:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:91:2057] recipient: [11:89:2117] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:93:2057] recipient: [11:89:2117] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:92:2118] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:112:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:58:2098]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:88:2057] recipient: [12:37:2084] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:91:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:92:2057] recipient: [12:90:2118] Leader for TabletID 72057594037927937 is [12:93:2119] sender: [12:94:2057] recipient: [12:90:2118] !Reboot 72057594037927937 (actor [12:58:2098]) rebooted! !Reboot 72057594037927937 (actor [12:58:2098]) tablet resolver refreshed! new actor is[12:93:2119] Leader for TabletID 72057594037927937 is [12:93:2119] sender: [12:113:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:59:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:76:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:91:2057] recipient: [13:37:2084] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:94:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:95:2057] recipient: [13:93:2121] Leader for TabletID 72057594037927937 is [13:96:2122] sender: [13:97:2057] recipient: [13:93:2121] !Reboot 72057594037927937 (actor [13:58:2098]) rebooted! !Reboot 72057594037927937 (actor [13:58:2098]) tablet resolver refreshed! new actor is[13:96:2122] Leader for TabletID 72057594037927937 is [13:96:2122] sender: [13:182:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:59:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:76:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:91:2057] recipient: [14:37:2084] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:94:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:95:2057] recipient: [14:93:2121] Leader for TabletID 72057594037927937 is [14:96:2122] sender: [14:97:2057] recipient: [14:93:2121] !Reboot 72057594037927937 (actor [14:58:2098]) rebooted! !Reboot 72057594037927937 (actor [14:58:2098]) tablet resolver refreshed! new actor is[14:96:2122] Leader for TabletID 72057594037927937 is [14:96:2122] sender: [14:182:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:59:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:76:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:92:2057] recipient: [15:37:2084] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:95:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:96:2057] recipient: [15:94:2121] Leader for TabletID 72057594037927937 is [15:97:2122] sender: [15:98:2057] recipient: [15:94:2121] !Reboot 72057594037927937 (actor [15:58:2098]) rebooted! !Reboot 72057594037927937 (actor [15:58:2098]) tablet resolver refreshed! new actor is[15:97:2122] Leader for TabletID 72057594037927937 is [15:97:2122] sender: [15:183:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:59:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:76:2057] recipient: [16:14:2061] >> TableCreation::ConcurrentUpdateTable [GOOD] >> TProxyActorTest::TestAttachSession [GOOD] >> KqpProxy::NoUserAccessToScriptExecutionsTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::CreateOldTable [GOOD] Test command err: 2025-06-24T21:10:03.012943Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626178783170191:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:03.013039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047ea/r3tmp/tmp0RZ18g/pdisk_1.dat 2025-06-24T21:10:03.341189Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626174488202862:2079] 1750799403010288 != 1750799403010291 2025-06-24T21:10:03.351986Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:03.401742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:03.401905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:03.403622Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6538 TServer::EnableGrpc on GrpcPort 20692, node 1 2025-06-24T21:10:03.652838Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:03.652878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:03.652887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:03.653026Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:10:03.948424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:04.020681Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:05.102161Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.103869Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.116127Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:05.116168Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:05.116189Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.116252Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.116383Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.116421Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.116913Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.116959Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.118008Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.118013Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.118034Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-24T21:10:05.118058Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-24T21:10:05.118074Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-24T21:10:05.118089Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-24T21:10:05.118316Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.118331Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-24T21:10:05.118346Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-24T21:10:05.127401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.130070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.131508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.135626Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-24T21:10:05.135666Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710660 2025-06-24T21:10:05.135761Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-24T21:10:05.135819Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710659 2025-06-24T21:10:05.135848Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-24T21:10:05.135858Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710658 2025-06-24T21:10:05.270797Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-06-24T21:10:05.334468Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-06-24T21:10:05.338102Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-06-24T21:10:05.366482Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-24T21:10:05.408960Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-24T21:10:05.429379Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-24T21:10:05.429900Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 3a9b3037-81f1378f-e050ba11-2bd60f93, Bootstrap. Database: /dc-1 2025-06-24T21:10:05.439886Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274304.111759s seconds to be completed 2025-06-24T21:10:05.442735Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=NDVmMDBkZWQtZjRiYTRlMmQtNDk2MGM5NmQtNDM1Yzc5OTA=, workerId: [1:7519626187373105626:2292], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-24T21:10:05.442940Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:05.443937Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 3a9b3037-81f1378f-e050ba11-2bd60f93, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-24T21:10:05.445895Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:7 ... NmUtODI4MWEzYTQtY2JjN2ZkNDMtM2ZmNDIxNWQ=, workerId: [2:7519626204771453866:2292], local sessions count: 1 2025-06-24T21:10:10.012336Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TScriptProgressActor] TraceId: 74b0315-b13d17-ba660ac6-28262618, Bootstrap. Database: /dc-1 2025-06-24T21:10:10.012386Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MTYzNjk4NTUtYzZkZGI2ZjYtZjM0YWY5NzctY2M3OTMxZjg=, CurrentExecutionId: 74b0315-b13d17-ba660ac6-28262618, CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 604800.000000s timeout: 604800.000000s cancelAfter: 0.000000s. Send request to target, requestId: 5, targetId: [2:7519626209066421275:2310] 2025-06-24T21:10:10.012407Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 5 timeout: 604800.000000s actor id: [2:7519626209066421279:2518] 2025-06-24T21:10:10.012447Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274299.539177s seconds to be completed 2025-06-24T21:10:10.013728Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=YWY4ODdhMTUtNjk3NTRhZTQtZDE1NzNhNmEtZmI1YzRiYg==, workerId: [2:7519626209066421286:2316], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-24T21:10:10.013824Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.014091Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptProgressActor] TraceId: 74b0315-b13d17-ba660ac6-28262618, RunDataQuery: -- TScriptProgressActor::OnRunQuery DECLARE $execution_id AS Text; DECLARE $database AS Text; DECLARE $plan AS JsonDocument; DECLARE $execution_status AS Int32; UPSERT INTO `.metadata/script_executions` (execution_id, database, plan, execution_status) VALUES ($execution_id, $database, $plan, $execution_status); 2025-06-24T21:10:10.014311Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=YWY4ODdhMTUtNjk3NTRhZTQtZDE1NzNhNmEtZmI1YzRiYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 7, targetId: [2:7519626209066421286:2316] 2025-06-24T21:10:10.014336Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 7 timeout: 300.000000s actor id: [2:7519626209066421288:2522] 2025-06-24T21:10:10.056106Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: TraceId: "01jyhwdrvg0cvxa0436ybrg829", Request has 18444993274299.495535s seconds to be completed 2025-06-24T21:10:10.057656Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jyhwdrvg0cvxa0436ybrg829", Created new session, sessionId: ydb://session/3?node_id=2&id=ODg1YzNiYS05MTJhOTUzMy00M2JlYzE4LTJhOWI5MDFm, workerId: [2:7519626209066421302:2326], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-24T21:10:10.057807Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 01jyhwdrvg0cvxa0436ybrg829 2025-06-24T21:10:10.059919Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 74b0315-b13d17-ba660ac6-28262618, Bootstrap. Database: /dc-1 2025-06-24T21:10:10.060032Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:147: Table test_table updater. Describe result: PathErrorUnknown 2025-06-24T21:10:10.060046Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:167: Table test_table updater. Creating table 2025-06-24T21:10:10.060078Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table test_table updater. Full table path:/dc-1/.test/test_table 2025-06-24T21:10:10.060764Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 5, sender: [2:7519626204771453863:2456], selfId: [2:7519626196181518522:2144], source: [2:7519626209066421275:2310] 2025-06-24T21:10:10.060870Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274299.490758s seconds to be completed 2025-06-24T21:10:10.062635Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=NDAyYmYyZjUtYTM0MWNmNGYtOWM0YWFjMGEtY2Q2NjI4NWM=, workerId: [2:7519626209066421334:2328], database: /dc-1, longSession: 1, local sessions count: 4 2025-06-24T21:10:10.062746Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.062956Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 74b0315-b13d17-ba660ac6-28262618, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-24T21:10:10.063016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:10.063204Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NDAyYmYyZjUtYTM0MWNmNGYtOWM0YWFjMGEtY2Q2NjI4NWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 10, targetId: [2:7519626209066421334:2328] 2025-06-24T21:10:10.063227Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 10 timeout: 300.000000s actor id: [2:7519626209066421336:2541] 2025-06-24T21:10:10.063970Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715665 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 10 } 2025-06-24T21:10:10.064002Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:261: Table test_table updater. Subscribe on create table tx: 281474976715665 2025-06-24T21:10:10.085662Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 7, sender: [2:7519626209066421287:2317], selfId: [2:7519626196181518522:2144], source: [2:7519626209066421286:2316] 2025-06-24T21:10:10.085822Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptProgressActor] TraceId: 74b0315-b13d17-ba660ac6-28262618, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YWY4ODdhMTUtNjk3NTRhZTQtZDE1NzNhNmEtZmI1YzRiYg==, TxId: 2025-06-24T21:10:10.085848Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TScriptProgressActor] TraceId: 74b0315-b13d17-ba660ac6-28262618, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YWY4ODdhMTUtNjk3NTRhZTQtZDE1NzNhNmEtZmI1YzRiYg==, TxId: 2025-06-24T21:10:10.086592Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=YWY4ODdhMTUtNjk3NTRhZTQtZDE1NzNhNmEtZmI1YzRiYg==, workerId: [2:7519626209066421286:2316], local sessions count: 3 2025-06-24T21:10:10.091692Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: create. Transaction completed: 281474976715665. Doublechecking... 2025-06-24T21:10:10.175595Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.175983Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.205937Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ODg1YzNiYS05MTJhOTUzMy00M2JlYzE4LTJhOWI5MDFm, workerId: [2:7519626209066421302:2326], local sessions count: 2 2025-06-24T21:10:10.209367Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 10, sender: [2:7519626209066421335:2329], selfId: [2:7519626196181518522:2144], source: [2:7519626209066421334:2328] 2025-06-24T21:10:10.209496Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 74b0315-b13d17-ba660ac6-28262618, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDAyYmYyZjUtYTM0MWNmNGYtOWM0YWFjMGEtY2Q2NjI4NWM=, TxId: 2025-06-24T21:10:10.209527Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 74b0315-b13d17-ba660ac6-28262618, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDAyYmYyZjUtYTM0MWNmNGYtOWM0YWFjMGEtY2Q2NjI4NWM=, TxId: 2025-06-24T21:10:10.209626Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1911: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 74b0315-b13d17-ba660ac6-28262618, start saving rows range [0; 1) 2025-06-24T21:10:10.209703Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NDAyYmYyZjUtYTM0MWNmNGYtOWM0YWFjMGEtY2Q2NjI4NWM=, workerId: [2:7519626209066421334:2328], local sessions count: 1 2025-06-24T21:10:10.209740Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 74b0315-b13d17-ba660ac6-28262618, Bootstrap. Database: /dc-1 2025-06-24T21:10:10.209832Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274299.341796s seconds to be completed 2025-06-24T21:10:10.211272Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=YzE1NjgzZGUtNTU3NTJkOWYtNDQ2MjA3ZS00YmQ0MTM2Yg==, workerId: [2:7519626209066421443:2343], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-24T21:10:10.211369Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.211627Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 74b0315-b13d17-ba660ac6-28262618, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-06-24T21:10:10.211905Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=YzE1NjgzZGUtNTU3NTJkOWYtNDQ2MjA3ZS00YmQ0MTM2Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7519626209066421443:2343] 2025-06-24T21:10:10.211927Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7519626209066421445:2606] >> TProxyActorTest::TestCreateSemaphoreInterrupted |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestAttachSession [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::SimpleUpdateTable [GOOD] Test command err: 2025-06-24T21:10:03.012937Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626178670896437:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:03.012973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047c7/r3tmp/tmp2ftiP3/pdisk_1.dat 2025-06-24T21:10:03.377211Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626174375929108:2079] 1750799403010302 != 1750799403010305 2025-06-24T21:10:03.391265Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:03.411025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:03.411178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:03.412814Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7697 TServer::EnableGrpc on GrpcPort 22161, node 1 2025-06-24T21:10:03.652866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:03.652901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:03.652909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:03.653076Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:10:03.964751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:04.020711Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:05.321129Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.322409Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.333083Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:05.333152Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:05.333177Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.333230Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.333393Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.333453Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.333523Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.333582Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.334391Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.334415Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-24T21:10:05.334450Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-24T21:10:05.334565Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.334583Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-24T21:10:05.334602Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-24T21:10:05.334665Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.334672Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-24T21:10:05.334685Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-24T21:10:05.337462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.339317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.340376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.344906Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-24T21:10:05.344907Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-24T21:10:05.344948Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976715658 2025-06-24T21:10:05.344966Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976715660 2025-06-24T21:10:05.345035Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-24T21:10:05.345049Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976715659 2025-06-24T21:10:05.433027Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976715658. Doublechecking... 2025-06-24T21:10:05.457139Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976715660. Doublechecking... 2025-06-24T21:10:05.463108Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976715659. Doublechecking... 2025-06-24T21:10:05.508241Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-24T21:10:05.528727Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-24T21:10:05.532242Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-24T21:10:05.532698Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 58dc3afc-dc415efd-84877003-f80857e0, Bootstrap. Database: /dc-1 2025-06-24T21:10:05.545726Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274304.005917s seconds to be completed 2025-06-24T21:10:05.548676Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=NzFkODIzN2YtZWFmNTk4N2EtZGM2ZWJjOWEtMTZiZjIwMzY=, workerId: [1:7519626187260831865:2292], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-24T21:10:05.548851Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:05.549803Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 58dc3afc-dc415efd-84877003-f80857e0, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-24T21:10:05.550436Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:7 ... ase: /dc-1, longSession: 1, local sessions count: 2 2025-06-24T21:10:10.084974Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.085304Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptProgressActor] TraceId: 77c165a4-40a62a07-c6e38bb3-c8f988a4, RunDataQuery: -- TScriptProgressActor::OnRunQuery DECLARE $execution_id AS Text; DECLARE $database AS Text; DECLARE $plan AS JsonDocument; DECLARE $execution_status AS Int32; UPSERT INTO `.metadata/script_executions` (execution_id, database, plan, execution_status) VALUES ($execution_id, $database, $plan, $execution_status); 2025-06-24T21:10:10.085570Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NGEyZTkyZTktZWRmZDRiNDQtM2ZlOGE3M2MtYzM3NmNhNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 7, targetId: [2:7519626209003291921:2316] 2025-06-24T21:10:10.085601Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 7 timeout: 300.000000s actor id: [2:7519626209003291923:2522] 2025-06-24T21:10:10.135312Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: TraceId: "01jyhwdrxvefgdmtc4qts52hcc", Request has 18444993274299.416336s seconds to be completed 2025-06-24T21:10:10.137408Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jyhwdrxvefgdmtc4qts52hcc", Created new session, sessionId: ydb://session/3?node_id=2&id=ZDMzMzA4YTYtMmMwNTY3YjEtOGJlYTY1ZjctNDQ4ZTYwYzE=, workerId: [2:7519626209003291940:2326], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-24T21:10:10.137572Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 01jyhwdrxvefgdmtc4qts52hcc 2025-06-24T21:10:10.139398Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 77c165a4-40a62a07-c6e38bb3-c8f988a4, Bootstrap. Database: /dc-1 2025-06-24T21:10:10.140475Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274299.411155s seconds to be completed 2025-06-24T21:10:10.141165Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:147: Table test_table updater. Describe result: PathErrorUnknown 2025-06-24T21:10:10.141182Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:167: Table test_table updater. Creating table 2025-06-24T21:10:10.141214Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table test_table updater. Full table path:/dc-1/.test/test_table 2025-06-24T21:10:10.142503Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=Y2IzM2QxMGYtZTFiZWI2YzctNDk1YzIyMTAtZGI3MDgyYzI=, workerId: [2:7519626209003291968:2328], database: /dc-1, longSession: 1, local sessions count: 4 2025-06-24T21:10:10.142632Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.142950Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 77c165a4-40a62a07-c6e38bb3-c8f988a4, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-24T21:10:10.143097Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 5, sender: [2:7519626204708324497:2455], selfId: [2:7519626196118389181:2162], source: [2:7519626209003291909:2310] 2025-06-24T21:10:10.143311Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=Y2IzM2QxMGYtZTFiZWI2YzctNDk1YzIyMTAtZGI3MDgyYzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 10, targetId: [2:7519626209003291968:2328] 2025-06-24T21:10:10.143345Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 10 timeout: 300.000000s actor id: [2:7519626209003291971:2541] 2025-06-24T21:10:10.143832Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:10.144796Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715665 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 10 } 2025-06-24T21:10:10.144842Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:261: Table test_table updater. Subscribe on create table tx: 281474976715665 2025-06-24T21:10:10.168906Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 7, sender: [2:7519626209003291922:2317], selfId: [2:7519626196118389181:2162], source: [2:7519626209003291921:2316] 2025-06-24T21:10:10.169066Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptProgressActor] TraceId: 77c165a4-40a62a07-c6e38bb3-c8f988a4, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGEyZTkyZTktZWRmZDRiNDQtM2ZlOGE3M2MtYzM3NmNhNjE=, TxId: 2025-06-24T21:10:10.169096Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TScriptProgressActor] TraceId: 77c165a4-40a62a07-c6e38bb3-c8f988a4, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGEyZTkyZTktZWRmZDRiNDQtM2ZlOGE3M2MtYzM3NmNhNjE=, TxId: 2025-06-24T21:10:10.169920Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NGEyZTkyZTktZWRmZDRiNDQtM2ZlOGE3M2MtYzM3NmNhNjE=, workerId: [2:7519626209003291921:2316], local sessions count: 3 2025-06-24T21:10:10.175592Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: create. Transaction completed: 281474976715665. Doublechecking... 2025-06-24T21:10:10.243672Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.244064Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:365: Table test_table updater. Adding columns. New columns: col4, col5. Existing columns: col1, col2, col3 2025-06-24T21:10:10.244107Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table test_table updater. Full table path:/dc-1/.test/test_table 2025-06-24T21:10:10.245494Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:10:10.246316Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715667 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 } 2025-06-24T21:10:10.246348Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:261: Table test_table updater. Subscribe on create table tx: 281474976715667 2025-06-24T21:10:10.255210Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: alter. Transaction completed: 281474976715667. Doublechecking... 2025-06-24T21:10:10.306732Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.335664Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 10, sender: [2:7519626209003291970:2329], selfId: [2:7519626196118389181:2162], source: [2:7519626209003291968:2328] 2025-06-24T21:10:10.335818Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 77c165a4-40a62a07-c6e38bb3-c8f988a4, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=Y2IzM2QxMGYtZTFiZWI2YzctNDk1YzIyMTAtZGI3MDgyYzI=, TxId: 2025-06-24T21:10:10.335848Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 77c165a4-40a62a07-c6e38bb3-c8f988a4, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=Y2IzM2QxMGYtZTFiZWI2YzctNDk1YzIyMTAtZGI3MDgyYzI=, TxId: 2025-06-24T21:10:10.335968Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1911: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 77c165a4-40a62a07-c6e38bb3-c8f988a4, start saving rows range [0; 1) 2025-06-24T21:10:10.336055Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=Y2IzM2QxMGYtZTFiZWI2YzctNDk1YzIyMTAtZGI3MDgyYzI=, workerId: [2:7519626209003291968:2328], local sessions count: 2 2025-06-24T21:10:10.336059Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 77c165a4-40a62a07-c6e38bb3-c8f988a4, Bootstrap. Database: /dc-1 2025-06-24T21:10:10.336186Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274299.215444s seconds to be completed 2025-06-24T21:10:10.338273Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=YWNjOGFkOTAtZDA3ZWYwNGUtYjkzMzJjYTUtY2JjYTAzOGM=, workerId: [2:7519626209003292105:2345], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-24T21:10:10.338462Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.338648Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZDMzMzA4YTYtMmMwNTY3YjEtOGJlYTY1ZjctNDQ4ZTYwYzE=, workerId: [2:7519626209003291940:2326], local sessions count: 2 2025-06-24T21:10:10.338759Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 77c165a4-40a62a07-c6e38bb3-c8f988a4, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-06-24T21:10:10.339100Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=YWNjOGFkOTAtZDA3ZWYwNGUtYjkzMzJjYTUtY2JjYTAzOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7519626209003292105:2345] 2025-06-24T21:10:10.339171Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7519626209003292108:2631] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::ConcurrentMultipleTablesCreation [GOOD] Test command err: 2025-06-24T21:10:03.012982Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626178908424859:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:03.013017Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047f4/r3tmp/tmpNZPTWr/pdisk_1.dat 2025-06-24T21:10:03.331633Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626174613457530:2079] 1750799403010284 != 1750799403010287 2025-06-24T21:10:03.363613Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:03.404809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:03.404916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:03.406947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21354 TServer::EnableGrpc on GrpcPort 65048, node 1 2025-06-24T21:10:03.652775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:03.652804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:03.652813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:03.652973Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:10:03.962276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:04.020951Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:05.186048Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.187441Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.197450Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:05.197481Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:05.197495Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.197551Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.197673Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.197710Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.198007Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.198034Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.198943Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.198943Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.198949Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-24T21:10:05.198956Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-24T21:10:05.198985Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-24T21:10:05.198991Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-24T21:10:05.199063Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.199066Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-24T21:10:05.199073Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-24T21:10:05.201756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.202940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.205016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.214053Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-24T21:10:05.214073Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-24T21:10:05.214116Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710659 2025-06-24T21:10:05.214116Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710660 2025-06-24T21:10:05.214195Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-24T21:10:05.214232Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710658 2025-06-24T21:10:05.320838Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-06-24T21:10:05.342395Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-06-24T21:10:05.344541Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-06-24T21:10:05.392229Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-24T21:10:05.402438Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-24T21:10:05.418269Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-24T21:10:05.419882Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 552c32bd-a208332c-4b44153b-63418558, Bootstrap. Database: /dc-1 2025-06-24T21:10:05.428577Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274304.123067s seconds to be completed 2025-06-24T21:10:05.431993Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=NzQyZjhlY2EtZDZhZTgwN2YtNWJjZTYzNzYtNTdkYmIxNjE=, workerId: [1:7519626187498360288:2292], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-24T21:10:05.432148Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:05.434505Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 552c32bd-a208332c-4b44153b-63418558, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-24T21:10:05.445855Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp: ... -687c516a-c55a6703, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-06-24T21:10:09.973181Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=YTlmZjYwLTRhZmQ3NjQ2LTEwZDdlZWEtNjIzZWU3NDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7519626204090885917:2344] 2025-06-24T21:10:09.973204Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7519626204090885919:3044] 2025-06-24T21:10:09.979759Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-24T21:10:09.981214Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-24T21:10:09.982773Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-24T21:10:09.983180Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-24T21:10:09.983216Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-24T21:10:09.985308Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-24T21:10:09.996154Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:09.998110Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.004310Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.005257Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.006781Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.008864Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.010390Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.012908Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.017054Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.018543Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.023213Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.027876Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.029293Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.030288Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.033910Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.035413Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.036961Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.038561Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.041594Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.043674Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-24T21:10:10.079797Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 12, sender: [2:7519626204090885918:2345], selfId: [2:7519626195500949808:2151], source: [2:7519626204090885917:2344] 2025-06-24T21:10:10.080065Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 3bb7c97c-e1ae1471-687c516a-c55a6703, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTlmZjYwLTRhZmQ3NjQ2LTEwZDdlZWEtNjIzZWU3NDY=, TxId: 2025-06-24T21:10:10.080108Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 3bb7c97c-e1ae1471-687c516a-c55a6703, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTlmZjYwLTRhZmQ3NjQ2LTEwZDdlZWEtNjIzZWU3NDY=, TxId: 2025-06-24T21:10:10.080233Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1943: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 3bb7c97c-e1ae1471-687c516a-c55a6703, result part successfully saved 2025-06-24T21:10:10.080252Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1950: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 3bb7c97c-e1ae1471-687c516a-c55a6703, reply SUCCESS, issues: 2025-06-24T21:10:10.080338Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=YTlmZjYwLTRhZmQ3NjQ2LTEwZDdlZWEtNjIzZWU3NDY=, workerId: [2:7519626204090885917:2344], local sessions count: 2 2025-06-24T21:10:10.080620Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 3bb7c97c-e1ae1471-687c516a-c55a6703, Bootstrap. Database: /dc-1 2025-06-24T21:10:10.080774Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274299.470862s seconds to be completed 2025-06-24T21:10:10.082643Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=YzhjNWE3YjAtNWJlMzU0ZTItNGE4N2VlNjktOWM5ZGNkZmQ=, workerId: [2:7519626208385853271:2353], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-24T21:10:10.082769Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.083004Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 3bb7c97c-e1ae1471-687c516a-c55a6703, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-24T21:10:10.083297Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=YzhjNWE3YjAtNWJlMzU0ZTItNGE4N2VlNjktOWM5ZGNkZmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 14, targetId: [2:7519626208385853271:2353] 2025-06-24T21:10:10.083327Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 14 timeout: 300.000000s actor id: [2:7519626208385853273:3085] 2025-06-24T21:10:10.102997Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 14, sender: [2:7519626208385853272:2354], selfId: [2:7519626195500949808:2151], source: [2:7519626208385853271:2353] 2025-06-24T21:10:10.103267Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 3bb7c97c-e1ae1471-687c516a-c55a6703, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzhjNWE3YjAtNWJlMzU0ZTItNGE4N2VlNjktOWM5ZGNkZmQ=, TxId: 2025-06-24T21:10:10.103300Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 3bb7c97c-e1ae1471-687c516a-c55a6703, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzhjNWE3YjAtNWJlMzU0ZTItNGE4N2VlNjktOWM5ZGNkZmQ=, TxId: 2025-06-24T21:10:10.103591Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 3bb7c97c-e1ae1471-687c516a-c55a6703, Bootstrap. Database: /dc-1 2025-06-24T21:10:10.103715Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=YzhjNWE3YjAtNWJlMzU0ZTItNGE4N2VlNjktOWM5ZGNkZmQ=, workerId: [2:7519626208385853271:2353], local sessions count: 2 2025-06-24T21:10:10.103751Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274299.447874s seconds to be completed 2025-06-24T21:10:10.105476Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=MWFjMDhjM2EtYWIyNmU4NWYtODE4YWNkMjctODFhMGQwODM=, workerId: [2:7519626208385853298:2363], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-24T21:10:10.105623Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.105846Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 3bb7c97c-e1ae1471-687c516a-c55a6703, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-24T21:10:10.105898Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZjJhYTJhMDgtZGFjZmJlMWYtNjAyNTFkZDMtMmFhNGFkZGE=, workerId: [2:7519626204090885286:2326], local sessions count: 2 2025-06-24T21:10:10.106090Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MWFjMDhjM2EtYWIyNmU4NWYtODE4YWNkMjctODFhMGQwODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 16, targetId: [2:7519626208385853298:2363] 2025-06-24T21:10:10.106131Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 16 timeout: 300.000000s actor id: [2:7519626208385853301:3092] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::ConcurrentUpdateTable [GOOD] Test command err: 2025-06-24T21:10:03.013059Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626181722969427:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:03.013120Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047d2/r3tmp/tmpg509gI/pdisk_1.dat 2025-06-24T21:10:03.336035Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:03.339742Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626177428002098:2079] 1750799403010286 != 1750799403010289 2025-06-24T21:10:03.435400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:03.435590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:03.437287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19632 TServer::EnableGrpc on GrpcPort 1688, node 1 2025-06-24T21:10:03.652922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:03.652963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:03.652994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:03.653141Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:10:03.964957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:04.021572Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:05.275561Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.277250Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.287368Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:05.287419Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:05.287452Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.287512Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.287759Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.287856Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.287913Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.287949Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.289004Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.289026Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-24T21:10:05.289061Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-24T21:10:05.289232Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.289245Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-24T21:10:05.289265Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-24T21:10:05.289456Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.289472Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-24T21:10:05.289494Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-24T21:10:05.293261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.294878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.296905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.302233Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-24T21:10:05.302250Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-24T21:10:05.302291Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710660 2025-06-24T21:10:05.302294Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710659 2025-06-24T21:10:05.302365Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-24T21:10:05.302384Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710658 2025-06-24T21:10:05.418456Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-06-24T21:10:05.441027Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-06-24T21:10:05.480590Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-06-24T21:10:05.509191Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-24T21:10:05.516788Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-24T21:10:05.552739Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-24T21:10:05.553196Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 805c2d62-fbc225ab-1a32e6b0-f9898a, Bootstrap. Database: /dc-1 2025-06-24T21:10:05.571392Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274303.980262s seconds to be completed 2025-06-24T21:10:05.573888Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=OGMzZDY2OWUtOWUyZGNkNDgtMmM5Mzk5NWItOTU5YTlhNjM=, workerId: [1:7519626190312904859:2292], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-24T21:10:05.574058Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:05.574990Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 805c2d62-fbc225ab-1a32e6b0-f9898a, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-24T21:10:05.575502Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: ... UG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 10, sender: [2:7519626211867071161:2329], selfId: [2:7519626198982168347:2151], source: [2:7519626211867071158:2328] 2025-06-24T21:10:10.548519Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzBhOTg5MzgtZDY3OTc5NTEtNDQwZjlhYjctZGE2NjhlYzA=, TxId: 2025-06-24T21:10:10.548545Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzBhOTg5MzgtZDY3OTc5NTEtNDQwZjlhYjctZGE2NjhlYzA=, TxId: 2025-06-24T21:10:10.548649Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1911: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, start saving rows range [0; 1) 2025-06-24T21:10:10.548709Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, Bootstrap. Database: /dc-1 2025-06-24T21:10:10.548723Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=YzBhOTg5MzgtZDY3OTc5NTEtNDQwZjlhYjctZGE2NjhlYzA=, workerId: [2:7519626211867071158:2328], local sessions count: 2 2025-06-24T21:10:10.548794Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274299.002833s seconds to be completed 2025-06-24T21:10:10.550774Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=NTUwYTA4NzYtZGE2NDlhZmYtZTcwN2I5NWQtMzc4MGJhZjY=, workerId: [2:7519626211867071350:2342], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-24T21:10:10.550936Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.551164Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-06-24T21:10:10.551405Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NTUwYTA4NzYtZGE2NDlhZmYtZTcwN2I5NWQtMzc4MGJhZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7519626211867071350:2342] 2025-06-24T21:10:10.551434Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7519626211867071352:2687] 2025-06-24T21:10:10.568111Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.570599Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.571064Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.576283Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.595308Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.597731Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.600858Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.601770Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.608513Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.609971Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-24T21:10:10.640609Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=Y2I5ZTBkNTItN2Q2Yzk4ODctZDM1YzcyNGYtZTMyZGNlMjg=, workerId: [2:7519626211867071118:2326], local sessions count: 2 2025-06-24T21:10:10.648712Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 12, sender: [2:7519626211867071351:2343], selfId: [2:7519626198982168347:2151], source: [2:7519626211867071350:2342] 2025-06-24T21:10:10.648970Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTUwYTA4NzYtZGE2NDlhZmYtZTcwN2I5NWQtMzc4MGJhZjY=, TxId: 2025-06-24T21:10:10.649000Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTUwYTA4NzYtZGE2NDlhZmYtZTcwN2I5NWQtMzc4MGJhZjY=, TxId: 2025-06-24T21:10:10.649070Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1943: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, result part successfully saved 2025-06-24T21:10:10.649096Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1950: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, reply SUCCESS, issues: 2025-06-24T21:10:10.649184Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NTUwYTA4NzYtZGE2NDlhZmYtZTcwN2I5NWQtMzc4MGJhZjY=, workerId: [2:7519626211867071350:2342], local sessions count: 1 2025-06-24T21:10:10.649382Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, Bootstrap. Database: /dc-1 2025-06-24T21:10:10.649500Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274298.902129s seconds to be completed 2025-06-24T21:10:10.650966Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=ZjU3YjMxODUtYjBhY2Y5MDQtNGVhYzM3ZC1mZDJlYmMzYQ==, workerId: [2:7519626211867071392:2351], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-24T21:10:10.651084Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.651338Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-24T21:10:10.651588Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZjU3YjMxODUtYjBhY2Y5MDQtNGVhYzM3ZC1mZDJlYmMzYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 14, targetId: [2:7519626211867071392:2351] 2025-06-24T21:10:10.651627Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 14 timeout: 300.000000s actor id: [2:7519626211867071394:2711] 2025-06-24T21:10:10.657286Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 14, sender: [2:7519626211867071393:2352], selfId: [2:7519626198982168347:2151], source: [2:7519626211867071392:2351] 2025-06-24T21:10:10.657420Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZjU3YjMxODUtYjBhY2Y5MDQtNGVhYzM3ZC1mZDJlYmMzYQ==, TxId: 2025-06-24T21:10:10.657450Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZjU3YjMxODUtYjBhY2Y5MDQtNGVhYzM3ZC1mZDJlYmMzYQ==, TxId: 2025-06-24T21:10:10.657613Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZjU3YjMxODUtYjBhY2Y5MDQtNGVhYzM3ZC1mZDJlYmMzYQ==, workerId: [2:7519626211867071392:2351], local sessions count: 1 2025-06-24T21:10:10.657681Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, Bootstrap. Database: /dc-1 2025-06-24T21:10:10.657755Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274298.893869s seconds to be completed 2025-06-24T21:10:10.659237Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=ZDUxNDU2ZDUtMWExMTlhNmEtZTkxYjE4MzktNzgzMTJmNDk=, workerId: [2:7519626211867071416:2360], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-24T21:10:10.659338Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.659514Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 1b4ead15-1ea1f739-dd8b1d0b-fe4e4c0c, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-24T21:10:10.659806Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZDUxNDU2ZDUtMWExMTlhNmEtZTkxYjE4MzktNzgzMTJmNDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 16, targetId: [2:7519626211867071416:2360] 2025-06-24T21:10:10.659836Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 16 timeout: 300.000000s actor id: [2:7519626211867071418:2716] >> TProxyActorTest::TestCreateSemaphore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::NoUserAccessToScriptExecutionsTable [GOOD] Test command err: 2025-06-24T21:10:03.069494Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626179902536343:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:03.069676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:10:03.093362Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626177923074806:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:03.093425Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00480a/r3tmp/tmpRxQFGa/pdisk_1.dat 2025-06-24T21:10:03.463727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:03.463857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:03.465682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:03.465754Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:03.481726Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:03.485272Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:10:03.485708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:03.486095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10210 2025-06-24T21:10:04.075461Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:04.097370Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:05.414511Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.416059Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.425020Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:05.425080Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:05.425096Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.425144Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.425238Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.425280Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.425618Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.425645Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.475346Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.476458Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.487496Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=MWY4YTA4OGUtZGFiZGVhNDYtYmQ5NGU1NGYtNGQ2MmVjMzk=, workerId: [2:7519626186513009677:2267], database: , longSession: 1, local sessions count: 1 2025-06-24T21:10:05.487539Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.487671Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:05.487720Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:05.487743Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:05.487756Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.487788Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.487843Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.487936Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.487971Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.487989Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.488003Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.488440Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MWY4YTA4OGUtZGFiZGVhNDYtYmQ5NGU1NGYtNGQ2MmVjMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 2, targetId: [2:8678280833929343339:121] 2025-06-24T21:10:05.488495Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 2 timeout: 600.000000s actor id: [1:7519626188492471693:2465] 2025-06-24T21:10:05.488780Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MWY4YTA4OGUtZGFiZGVhNDYtYmQ5NGU1NGYtNGQ2MmVjMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [2:7519626186513009677:2267] 2025-06-24T21:10:05.488822Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 3 timeout: 600.000000s actor id: [2:7519626186513009678:2119] 2025-06-24T21:10:05.489877Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626186513009679:2268], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:05.489872Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626188492471692:2272], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:05.489981Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:05.489981Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:06.051513Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jyhwdmdgahhd7z7g9j9nhg66", Created new session, sessionId: ydb://session/3?node_id=2&id=MmFhOGQ3NzktNTA4OWRiZjQtZmM5ODZmODktMTVhZDY2MjA=, workerId: [2:7519626190807976986:2270], database: , longSession: 0, local sessions count: 2 2025-06-24T21:10:06.051757Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jyhwdmdgahhd7z7g9j9nhg66, Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MmFhOGQ3NzktNTA4OWRiZjQtZmM5ODZmODktMTVhZDY2MjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 4, targetId: [2:7519626190807976986:2270] 2025-06-24T21:10:06.051791Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 4 timeout: 300.000000s actor id: [2:7519626190807976988:2122] 2025-06-24T21:10:06.052052Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626190807976987:2271], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:06.052112Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:06.052189Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626190807976993:2274], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:06.057748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:06.087774Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626190807976995:2275], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:10:06.261165Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626190807977025:2133] txid# 281474976720658, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:06.436687Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jyhwdmdgahhd7z7g9j9nhg66", Forwarded response to sender actor, requestId: 4, sender: [2:7519626190807976985:2269], selfId: [2:7519626177923074901:2162], source: [2:7519626190807976986:2270] 2025-06-24T21:10:06.437000Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Sessi ... ice] [TPoolFetcherActor] ActorId: [3:7519626211680352810:2322], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:10.175165Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:10.188783Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519626211680352812:2323], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T21:10:10.265421Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519626211680352876:3009] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:10.436061Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 5, sender: [3:7519626211680352803:2318], selfId: [3:7519626198795449615:2172], source: [3:7519626211680352802:2317] 2025-06-24T21:10:10.436414Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 4328d904-17a8bb37-137c7a82-c3891911, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=NjdlODNjOTctMTAwYWQzYWMtY2Y2YWZmNjUtMzRmZWMwN2Q=, TxId: 2025-06-24T21:10:10.436474Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 4328d904-17a8bb37-137c7a82-c3891911, Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=NjdlODNjOTctMTAwYWQzYWMtY2Y2YWZmNjUtMzRmZWMwN2Q=, TxId: 2025-06-24T21:10:10.436494Z node 3 :KQP_PROXY DEBUG: kqp_script_executions.cpp:304: [ScriptExecutions] Create script execution operation. ExecutionId: 4328d904-17a8bb37-137c7a82-c3891911. Result: SUCCESS. Issues: 2025-06-24T21:10:10.436785Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=3&id=NjdlODNjOTctMTAwYWQzYWMtY2Y2YWZmNjUtMzRmZWMwN2Q=, workerId: [3:7519626211680352802:2317], local sessions count: 0 2025-06-24T21:10:10.438988Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=M2I5OTJlOWMtOTQ3ZjNjZGUtZmUyYjFiNi1jMGZiMTcxNw==, workerId: [3:7519626211680352947:2335], database: /Root, longSession: 1, local sessions count: 1 2025-06-24T21:10:10.439096Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.439288Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TScriptProgressActor] TraceId: 4328d904-17a8bb37-137c7a82-c3891911, Bootstrap. Database: /Root 2025-06-24T21:10:10.439413Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jyhwdrqb9n1k7ktt3376xp7p, Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=M2I5OTJlOWMtOTQ3ZjNjZGUtZmUyYjFiNi1jMGZiMTcxNw==, CurrentExecutionId: 4328d904-17a8bb37-137c7a82-c3891911, CustomerSuppliedId: 01jyhwdrqb9n1k7ktt3376xp7p, PoolId: }. TEvQueryRequest, set timer for: 604800.000000s timeout: 604800.000000s cancelAfter: 0.000000s. Send request to target, requestId: 7, targetId: [3:7519626211680352947:2335] 2025-06-24T21:10:10.439445Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 7 timeout: 604800.000000s actor id: [3:7519626211680352950:3051] 2025-06-24T21:10:10.439688Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274299.111966s seconds to be completed 2025-06-24T21:10:10.441154Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=MTA2Yzg0MGEtNGZjNDUwMGUtMWExMzVhNzQtYzZjMzVkOTk=, workerId: [3:7519626211680352958:2341], database: /Root, longSession: 1, local sessions count: 2 2025-06-24T21:10:10.441249Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.441562Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptProgressActor] TraceId: 4328d904-17a8bb37-137c7a82-c3891911, RunDataQuery: -- TScriptProgressActor::OnRunQuery DECLARE $execution_id AS Text; DECLARE $database AS Text; DECLARE $plan AS JsonDocument; DECLARE $execution_status AS Int32; UPSERT INTO `.metadata/script_executions` (execution_id, database, plan, execution_status) VALUES ($execution_id, $database, $plan, $execution_status); 2025-06-24T21:10:10.441853Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=MTA2Yzg0MGEtNGZjNDUwMGUtMWExMzVhNzQtYzZjMzVkOTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 9, targetId: [3:7519626211680352958:2341] 2025-06-24T21:10:10.441876Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 9 timeout: 300.000000s actor id: [3:7519626211680352962:3056] 2025-06-24T21:10:10.455877Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: TraceId: "01jyhwds8p6q2ky3jb7fmxz6ye", Request has 18444993274299.095776s seconds to be completed 2025-06-24T21:10:10.457652Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jyhwds8p6q2ky3jb7fmxz6ye", Created new session, sessionId: ydb://session/3?node_id=3&id=NjBhZjk0OTgtNmI4ZGI0NGUtOTkzODE1MmQtOWMyMzQ1MGM=, workerId: [3:7519626211680352969:2347], database: /Root, longSession: 1, local sessions count: 3 2025-06-24T21:10:10.457770Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 01jyhwds8p6q2ky3jb7fmxz6ye 2025-06-24T21:10:10.504856Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jyhwds90f3gs9s5rn3t2s429, Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=NjBhZjk0OTgtNmI4ZGI0NGUtOTkzODE1MmQtOWMyMzQ1MGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 11, targetId: [3:7519626211680352969:2347] 2025-06-24T21:10:10.504897Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 11 timeout: 300.000000s actor id: [3:7519626211680352979:3063] 2025-06-24T21:10:10.507257Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 4328d904-17a8bb37-137c7a82-c3891911, Bootstrap. Database: /Root 2025-06-24T21:10:10.509995Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274299.041636s seconds to be completed 2025-06-24T21:10:10.512146Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=MzhkM2JhZmItMTM3YTk1YjUtNmZkZmViMjMtZmQ3MjQyMzM=, workerId: [3:7519626211680352995:2352], database: /Root, longSession: 1, local sessions count: 4 2025-06-24T21:10:10.512290Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:10.512375Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jyhwdrqb9n1k7ktt3376xp7p", Forwarded response to sender actor, requestId: 7, sender: [3:7519626211680352799:2954], selfId: [3:7519626198795449615:2172], source: [3:7519626211680352947:2335] 2025-06-24T21:10:10.513129Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 4328d904-17a8bb37-137c7a82-c3891911, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-24T21:10:10.513532Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=MzhkM2JhZmItMTM3YTk1YjUtNmZkZmViMjMtZmQ3MjQyMzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 13, targetId: [3:7519626211680352995:2352] 2025-06-24T21:10:10.513564Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 13 timeout: 300.000000s actor id: [3:7519626211680352997:3068] 2025-06-24T21:10:10.525055Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519626211680353003:3072], for# user@builtin, access# DescribeSchema 2025-06-24T21:10:10.525094Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519626211680353003:3072], for# user@builtin, access# DescribeSchema 2025-06-24T21:10:10.535402Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519626211680352982:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/script_executions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:10:10.537594Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 9, sender: [3:7519626211680352961:2343], selfId: [3:7519626198795449615:2172], source: [3:7519626211680352958:2341] 2025-06-24T21:10:10.537726Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptProgressActor] TraceId: 4328d904-17a8bb37-137c7a82-c3891911, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=MTA2Yzg0MGEtNGZjNDUwMGUtMWExMzVhNzQtYzZjMzVkOTk=, TxId: 2025-06-24T21:10:10.537750Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TScriptProgressActor] TraceId: 4328d904-17a8bb37-137c7a82-c3891911, Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=MTA2Yzg0MGEtNGZjNDUwMGUtMWExMzVhNzQtYzZjMzVkOTk=, TxId: 2025-06-24T21:10:10.537779Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NjBhZjk0OTgtNmI4ZGI0NGUtOTkzODE1MmQtOWMyMzQ1MGM=, ActorId: [3:7519626211680352969:2347], ActorState: ExecuteState, TraceId: 01jyhwds90f3gs9s5rn3t2s429, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:10:10.537966Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jyhwds90f3gs9s5rn3t2s429", Forwarded response to sender actor, requestId: 11, sender: [3:7519626211680352976:2349], selfId: [3:7519626198795449615:2172], source: [3:7519626211680352969:2347] 2025-06-24T21:10:10.538559Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=3&id=MTA2Yzg0MGEtNGZjNDUwMGUtMWExMzVhNzQtYzZjMzVkOTk=, workerId: [3:7519626211680352958:2341], local sessions count: 3 2025-06-24T21:10:10.542698Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=3&id=NjBhZjk0OTgtNmI4ZGI0NGUtOTkzODE1MmQtOWMyMzQ1MGM=, workerId: [3:7519626211680352969:2347], local sessions count: 2 >> TVersions::Wreck2Reverse [GOOD] >> TVersions::Wreck1 >> TProxyActorTest::TestCreateSemaphoreInterrupted [GOOD] >> TProxyActorTest::TestCreateSemaphore [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestCreateSemaphoreInterrupted [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestCreateSemaphore [GOOD] >> KqpProxy::NodeDisconnectedTest [GOOD] >> TKeyValueTest::TestCopyRangeWorks [GOOD] >> TKeyValueTest::TestCopyRangeWorksNewApi >> KqpReturning::ReturningWorksIndexedOperationsWithDefault-QueryService [GOOD] >> KqpSort::ComplexPkExclusiveSecondOptionalPredicate |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TTxDataShardMiniKQL::CrossShard_3_AllToOne [GOOD] >> TTxDataShardMiniKQL::CrossShard_4_OneToAll >> KqpSqlIn::SecondaryIndex_TupleLiteral [GOOD] >> KqpSqlIn::SecondaryIndex_TupleSelect >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring [GOOD] >> ScriptExecutionsTest::AttemptToUpdateDeletedLease [GOOD] |94.5%| [TA] $(B)/ydb/core/kesus/proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.5%| [TA] {RESULT} $(B)/ydb/core/kesus/proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::NodeDisconnectedTest [GOOD] Test command err: 2025-06-24T21:10:03.012979Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626180887534362:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:03.013042Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047cb/r3tmp/tmpZh3tmW/pdisk_1.dat 2025-06-24T21:10:03.354791Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626176592567033:2079] 1750799403010276 != 1750799403010279 2025-06-24T21:10:03.354993Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:03.427442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:03.427659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:03.429159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13724 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:10:03.638159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:04.020865Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:05.220713Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.222218Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.245121Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=ZjA1N2JmMWUtNTMwZjIyYTAtOTcyMTJlMjctNjcxOWNlMzI=, workerId: [1:7519626189477469519:2269], database: , longSession: 0, local sessions count: 1 2025-06-24T21:10:05.245171Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.246324Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=1&id=ZjA1N2JmMWUtNTMwZjIyYTAtOTcyMTJlMjctNjcxOWNlMzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.010000s timeout: 0.010000s cancelAfter: 0.000000s. Send request to target, requestId: 2, targetId: [1:7519626189477469519:2269] 2025-06-24T21:10:05.246364Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 2 timeout: 0.010000s actor id: [0:0:0] 2025-06-24T21:10:05.246399Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:05.246431Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:05.246449Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.246502Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.246641Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.246687Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.246744Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.246762Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.246781Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.248548Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=1&id=ZjA1N2JmMWUtNTMwZjIyYTAtOTcyMTJlMjctNjcxOWNlMzI=, ActorId: [1:7519626189477469519:2269], ActorState: ReadyState, Reply query error, msg:
: Error: SomeUniqTextForUt proxyRequestId: 2 2025-06-24T21:10:05.248886Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626189477469520:2270], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:05.248989Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 2, sender: [1:7519626180887534875:2281], selfId: [1:7519626180887534563:2237], source: [1:7519626189477469519:2269] 2025-06-24T21:10:05.249034Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:05.256265Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(2) 2025-06-24T21:10:05.256290Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1337: Invalid request info while on request timeout handle. RequestId: 2 2025-06-24T21:10:10.342775Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:463:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:10:10.343328Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:10:10.343527Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:10:10.344464Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:458:2158], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:10:10.344675Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:10:10.344768Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047cb/r3tmp/tmp5fFdgN/pdisk_1.dat 2025-06-24T21:10:10.608479Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TClient is connected to server localhost:15045 KQP PROXY1 [2:8678280833929343339:121] KQP PROXY2 [3:8678280833929343339:121] SENDER [2:1062:2645] 2025-06-24T21:10:10.882907Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=NTFiNDk3ZGQtMTk0YjA0M2ItZWE3YjI4MjYtNGUwNzVmOWU=, workerId: [3:1063:2337], database: , longSession: 1, local sessions count: 1 2025-06-24T21:10:10.883080Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: Created session ydb://session/3?node_id=3&id=NTFiNDk3ZGQtMTk0YjA0M2ItZWE3YjI4MjYtNGUwNzVmOWU= 2025-06-24T21:10:10.883628Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=NTFiNDk3ZGQtMTk0YjA0M2ItZWE3YjI4MjYtNGUwNzVmOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 2, targetId: [3:8678280833929343339:121] 2025-06-24T21:10:10.883688Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 2 timeout: 0.001000s actor id: [0:0:0] 2025-06-24T21:10:10.884213Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=NTFiNDk3ZGQtMTk0YjA0M2ItZWE3YjI4MjYtNGUwNzVmOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [3:1063:2337] 2025-06-24T21:10:10.884244Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 3 timeout: 0.001000s actor id: [0:0:0] 2025-06-24T21:10:11.121189Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1064:2646], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:11.121302Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:11.121551Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1066:2338], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:11.121597Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:11.143813Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(3 ... :12.591617Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(57) 2025-06-24T21:10:12.591699Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 57 sessionId: ydb://session/3?node_id=3&id=ZTE5M2IwNDUtZDBkMjk2M2EtYTY5OWZkZDAtZjlkNjVhYzU= status: TIMEOUT round: 0 2025-06-24T21:10:12.591836Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 57, sender: [2:1062:2645], selfId: [2:210:2171], source: [2:210:2171] 2025-06-24T21:10:12.594017Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=N2FlNmQwY2ItNGI3MTc4YzctMjZkNGEwODktMjQ5Y2I3YzQ=, workerId: [3:1335:2481], database: , longSession: 1, local sessions count: 57 2025-06-24T21:10:12.594208Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: Created session ydb://session/3?node_id=3&id=N2FlNmQwY2ItNGI3MTc4YzctMjZkNGEwODktMjQ5Y2I3YzQ= 2025-06-24T21:10:12.594776Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=N2FlNmQwY2ItNGI3MTc4YzctMjZkNGEwODktMjQ5Y2I3YzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 58, targetId: [3:8678280833929343339:121] 2025-06-24T21:10:12.594851Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 58 timeout: 0.001000s actor id: [0:0:0] 2025-06-24T21:10:12.595303Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=N2FlNmQwY2ItNGI3MTc4YzctMjZkNGEwODktMjQ5Y2I3YzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 87, targetId: [3:1335:2481] 2025-06-24T21:10:12.595345Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 87 timeout: 0.001000s actor id: [0:0:0] 2025-06-24T21:10:12.596737Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1336:2709], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:12.596921Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:12.626971Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1338:2482], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:12.627199Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:12.637796Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(87) 2025-06-24T21:10:12.637873Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 87 sessionId: ydb://session/3?node_id=3&id=N2FlNmQwY2ItNGI3MTc4YzctMjZkNGEwODktMjQ5Y2I3YzQ= status: TIMEOUT round: 0 2025-06-24T21:10:12.637949Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(58) 2025-06-24T21:10:12.637983Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 58 sessionId: ydb://session/3?node_id=3&id=N2FlNmQwY2ItNGI3MTc4YzctMjZkNGEwODktMjQ5Y2I3YzQ= status: TIMEOUT round: 0 2025-06-24T21:10:12.638119Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=N2FlNmQwY2ItNGI3MTc4YzctMjZkNGEwODktMjQ5Y2I3YzQ=, ActorId: [3:1335:2481], ActorState: ExecuteState, TraceId: 01jyhwdvbk6w8re3c5812tdd1h, Create QueryResponse for error on request, msg: 2025-06-24T21:10:12.638225Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 58, sender: [2:1062:2645], selfId: [2:210:2171], source: [2:210:2171] 2025-06-24T21:10:12.640656Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 87, sender: [2:210:2171], selfId: [3:239:2127], source: [3:1335:2481] 2025-06-24T21:10:12.640871Z node 2 :KQP_PROXY ERROR: kqp_proxy_service.cpp:957: Unknown sender for proxy response, requestId: 58 2025-06-24T21:10:12.642950Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=OTZhZTQ5NWItYjUyOTRiZjAtZDBmOGU5OTMtMmRjOGY0ODM=, workerId: [3:1342:2485], database: , longSession: 1, local sessions count: 58 2025-06-24T21:10:12.643122Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:12.643549Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:902: Received ping session request, request_id: 59, sender: [2:1062:2645], trace_id: 2025-06-24T21:10:12.643671Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 59 timeout: 0.001000s actor id: [0:0:0] 2025-06-24T21:10:12.643827Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:856: Received ping session request, has local session: ydb://session/3?node_id=3&id=OTZhZTQ5NWItYjUyOTRiZjAtZDBmOGU5OTMtMmRjOGY0ODM=, rpc ctrl: [0:0:0], sameNode: 0, trace_id: 2025-06-24T21:10:12.643938Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 59, sender: [2:1062:2645], selfId: [2:210:2171], source: [3:239:2127] 2025-06-24T21:10:12.645748Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=OTM0ZGUxYjktNDYwZTUzNjctOWFkOWUyNzctNDJmYzkxODM=, workerId: [3:1343:2486], database: , longSession: 1, local sessions count: 59 2025-06-24T21:10:12.645936Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: Created session ydb://session/3?node_id=3&id=OTM0ZGUxYjktNDYwZTUzNjctOWFkOWUyNzctNDJmYzkxODM= 2025-06-24T21:10:12.646405Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=OTM0ZGUxYjktNDYwZTUzNjctOWFkOWUyNzctNDJmYzkxODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 60, targetId: [3:8678280833929343339:121] 2025-06-24T21:10:12.646464Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 60 timeout: 0.001000s actor id: [0:0:0] 2025-06-24T21:10:12.646851Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=OTM0ZGUxYjktNDYwZTUzNjctOWFkOWUyNzctNDJmYzkxODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 90, targetId: [3:1343:2486] 2025-06-24T21:10:12.646892Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 90 timeout: 0.001000s actor id: [0:0:0] 2025-06-24T21:10:12.648215Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1344:2711], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:12.648435Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:12.673005Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1346:2487], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:12.673243Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:12.683711Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(90) 2025-06-24T21:10:12.683770Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 90 sessionId: ydb://session/3?node_id=3&id=OTM0ZGUxYjktNDYwZTUzNjctOWFkOWUyNzctNDJmYzkxODM= status: TIMEOUT round: 0 2025-06-24T21:10:12.683837Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(59) 2025-06-24T21:10:12.683863Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1337: Invalid request info while on request timeout handle. RequestId: 59 2025-06-24T21:10:12.683945Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=OTM0ZGUxYjktNDYwZTUzNjctOWFkOWUyNzctNDJmYzkxODM=, ActorId: [3:1343:2486], ActorState: ExecuteState, TraceId: 01jyhwdvd671jc6rq639mbt0cw, Create QueryResponse for error on request, msg: 2025-06-24T21:10:12.684043Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(60) 2025-06-24T21:10:12.684069Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 60 sessionId: ydb://session/3?node_id=3&id=OTM0ZGUxYjktNDYwZTUzNjctOWFkOWUyNzctNDJmYzkxODM= status: TIMEOUT round: 0 2025-06-24T21:10:12.685819Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 60, sender: [2:1062:2645], selfId: [2:210:2171], source: [2:210:2171] 2025-06-24T21:10:12.686016Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 90, sender: [2:210:2171], selfId: [3:239:2127], source: [3:1343:2486] 2025-06-24T21:10:12.686207Z node 2 :KQP_PROXY ERROR: kqp_proxy_service.cpp:957: Unknown sender for proxy response, requestId: 60 2025-06-24T21:10:12.688230Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=OTUyMGQ2ZTEtZWMxODkxMDUtOTZmMDQyOTItM2YzNThmNGM=, workerId: [3:1350:2490], database: , longSession: 1, local sessions count: 60 2025-06-24T21:10:12.688407Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:12.688774Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:902: Received ping session request, request_id: 61, sender: [2:1062:2645], trace_id: 2025-06-24T21:10:12.688900Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 61 timeout: 0.001000s actor id: [0:0:0] 2025-06-24T21:10:12.710735Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(61) 2025-06-24T21:10:12.710803Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 61 sessionId: ydb://session/3?node_id=3&id=OTUyMGQ2ZTEtZWMxODkxMDUtOTZmMDQyOTItM2YzNThmNGM= status: TIMEOUT round: 0 2025-06-24T21:10:12.710910Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 61, sender: [2:1062:2645], selfId: [2:210:2171], source: [2:210:2171] >> KqpProxy::CreatesScriptExecutionsTable [GOOD] >> KqpProxy::DatabasesCacheForServerless >> DataShardTxOrder::RandomDotRanges_DelayRS [GOOD] >> KqpNewEngine::SqlInFromCompact [GOOD] >> KqpNewEngine::SqlInAsScalar ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> ScriptExecutionsTest::AttemptToUpdateDeletedLease [GOOD] Test command err: 2025-06-24T21:10:05.131696Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626187997454602:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:05.131767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047b6/r3tmp/tmpXp7ofy/pdisk_1.dat 2025-06-24T21:10:05.421480Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8224, node 1 2025-06-24T21:10:05.478029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:05.478053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:05.478064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:05.478221Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:05.517672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:05.517785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:05.520552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8233 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:05.722294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:06.140051Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:07.313676Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:07.314823Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-24T21:10:07.325224Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:902: Received ping session request, request_id: 2, sender: [1:7519626187997455477:2276], trace_id: 01jyhwdmp53yvz6brb17e58c0e 2025-06-24T21:10:07.325463Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 2 timeout: 5.000000s actor id: [0:0:0] 2025-06-24T21:10:07.325500Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:07.325515Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:07.325530Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:07.325565Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-24T21:10:07.325724Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:07.325760Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:07.325792Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:07.325813Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:07.325847Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:573: Session not found, targetId: [2:8678280833929343339:121] requestId: 2 2025-06-24T21:10:07.328421Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jyhwdmp53yvz6brb17e58c0e", Forwarded response to sender actor, requestId: 2, sender: [1:7519626187997455477:2276], selfId: [1:7519626187997454786:2240], source: [1:7519626187997454786:2240] 2025-06-24T21:10:08.462660Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626203113353456:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:08.462772Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047b6/r3tmp/tmpl9Pwr6/pdisk_1.dat 2025-06-24T21:10:08.548482Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:08.549471Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519626203113353436:2079] 1750799408462198 != 1750799408462201 2025-06-24T21:10:08.589123Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:08.589201Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:08.590650Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25850 TServer::EnableGrpc on GrpcPort 27335, node 4 2025-06-24T21:10:08.721631Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:08.721649Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:08.721654Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:08.721749Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:10:08.772438Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:09.469724Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:10.392749Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:10.393800Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:10.403734Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:10.403777Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:10.403800Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:10.403848Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:10.403949Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:10.403992Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:10.404016Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:10.404039Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:10.404993Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-24T21:10:10.405011Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-24T21:10:10.405044Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-24T21:10:10.405117Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-24T21:10:10.405120Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-24T21:10:10.405125Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-24T21:10:10.405127Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-24T21:10:10.405139Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-24T21:10:10.405142Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table scrip ... equest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 24, targetId: [4:7519626224588191209:2415] 2025-06-24T21:10:13.262639Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 24 timeout: 300.000000s actor id: [4:7519626224588191211:2633] 2025-06-24T21:10:13.267226Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 24, sender: [4:7519626224588191210:2416], selfId: [4:7519626203113353555:2157], source: [4:7519626224588191209:2415] 2025-06-24T21:10:13.267376Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 737fb459-f37232d8-f6a1e78-b00ba700, State: Get operation info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=ODk3YjVkYzYtMTI3ZDg4OTgtZDEyZjhiMTItNjc3NjVkZDI=, TxId: 01jyhwdw0f08pmarpmec56wjyc 2025-06-24T21:10:13.267747Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 737fb459-f37232d8-f6a1e78-b00ba700, State: Get operation info, RunDataQuery: -- TSaveScriptFinalStatusActor::FinishScriptExecution DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $operation_status AS Int32; DECLARE $execution_status AS Int32; DECLARE $finalization_status AS Int32; DECLARE $issues AS JsonDocument; DECLARE $plan AS JsonDocument; DECLARE $stats AS JsonDocument; DECLARE $ast AS Optional; DECLARE $ast_compressed AS Optional; DECLARE $ast_compression_method AS Optional; DECLARE $operation_ttl AS Interval; DECLARE $customer_supplied_id AS Text; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-06-24T21:10:13.268025Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=ODk3YjVkYzYtMTI3ZDg4OTgtZDEyZjhiMTItNjc3NjVkZDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 25, targetId: [4:7519626224588191209:2415] 2025-06-24T21:10:13.268047Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 25 timeout: 300.000000s actor id: [4:7519626224588191234:2640] 2025-06-24T21:10:13.274504Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 25, sender: [4:7519626224588191233:2422], selfId: [4:7519626203113353555:2157], source: [4:7519626224588191209:2415] 2025-06-24T21:10:13.274792Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 737fb459-f37232d8-f6a1e78-b00ba700, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=ODk3YjVkYzYtMTI3ZDg4OTgtZDEyZjhiMTItNjc3NjVkZDI=, TxId: 2025-06-24T21:10:13.274841Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 737fb459-f37232d8-f6a1e78-b00ba700, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=ODk3YjVkYzYtMTI3ZDg4OTgtZDEyZjhiMTItNjc3NjVkZDI=, TxId: 2025-06-24T21:10:13.274879Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2658: [ScriptExecutions] Finish script execution operation. ExecutionId: 737fb459-f37232d8-f6a1e78-b00ba700. UNAVAILABLE. Issues: {
: Error: Lease expired } 2025-06-24T21:10:13.274972Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:633: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: 737fb459-f37232d8-f6a1e78-b00ba700, successfully finalized script execution operation 2025-06-24T21:10:13.274995Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:838: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: 737fb459-f37232d8-f6a1e78-b00ba700, reply success 2025-06-24T21:10:13.275027Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=ODk3YjVkYzYtMTI3ZDg4OTgtZDEyZjhiMTItNjc3NjVkZDI=, workerId: [4:7519626224588191209:2415], local sessions count: 1 2025-06-24T21:10:13.280461Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jyhwdw10db71eqkxfwg9x1wr, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=N2FkMjg5ODktNzE5MTIzYTctM2EyZjZmYmUtNjZiNzEyZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 26, targetId: [4:7519626215998256327:2326] 2025-06-24T21:10:13.280496Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 26 timeout: 300.000000s actor id: [4:7519626224588191260:2648] 2025-06-24T21:10:13.395514Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:13.462947Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519626203113353456:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:13.463018Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:13.677059Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jyhwdw10db71eqkxfwg9x1wr", Forwarded response to sender actor, requestId: 26, sender: [4:7519626224588191259:2427], selfId: [4:7519626203113353555:2157], source: [4:7519626215998256327:2326] 2025-06-24T21:10:13.678212Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TScriptLeaseUpdater] TraceId: 737fb459-f37232d8-f6a1e78-b00ba700, Bootstrap. Database: /dc-1 2025-06-24T21:10:13.678347Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274295.873285s seconds to be completed 2025-06-24T21:10:13.680187Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=4&id=Y2ZhZmJlNmMtYjc0YjljNjYtMjQ5N2E4ODgtOGRmZWY2Y2Y=, workerId: [4:7519626224588191309:2441], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-24T21:10:13.680322Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:13.680459Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptLeaseUpdater] TraceId: 737fb459-f37232d8-f6a1e78-b00ba700, RunDataQuery: -- TScriptLeaseUpdater::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT lease_deadline FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-24T21:10:13.680687Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=Y2ZhZmJlNmMtYjc0YjljNjYtMjQ5N2E4ODgtOGRmZWY2Y2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 28, targetId: [4:7519626224588191309:2441] 2025-06-24T21:10:13.680711Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 28 timeout: 300.000000s actor id: [4:7519626224588191311:2671] 2025-06-24T21:10:13.865048Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 28, sender: [4:7519626224588191310:2442], selfId: [4:7519626203113353555:2157], source: [4:7519626224588191309:2441] 2025-06-24T21:10:13.865196Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: 737fb459-f37232d8-f6a1e78-b00ba700, State: Get lease info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=Y2ZhZmJlNmMtYjc0YjljNjYtMjQ5N2E4ODgtOGRmZWY2Y2Y=, TxId: 01jyhwdwk55br6k3vk1bbxa134 2025-06-24T21:10:13.865282Z node 4 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TScriptLeaseUpdater] TraceId: 737fb459-f37232d8-f6a1e78-b00ba700, State: Get lease info, Finish with BAD_REQUEST, Issues: {
: Error: No such execution }, SessionId: ydb://session/3?node_id=4&id=Y2ZhZmJlNmMtYjc0YjljNjYtMjQ5N2E4ODgtOGRmZWY2Y2Y=, TxId: 01jyhwdwk55br6k3vk1bbxa134 2025-06-24T21:10:13.865318Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:428: [TQueryBase] [TScriptLeaseUpdater] TraceId: 737fb459-f37232d8-f6a1e78-b00ba700, State: Get lease info, Rollback transaction: 01jyhwdwk55br6k3vk1bbxa134 2025-06-24T21:10:13.865468Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=Y2ZhZmJlNmMtYjc0YjljNjYtMjQ5N2E4ODgtOGRmZWY2Y2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 29, targetId: [4:7519626224588191309:2441] 2025-06-24T21:10:13.865493Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 29 timeout: 600.000000s actor id: [4:7519626224588191335:2681] 2025-06-24T21:10:13.865934Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 29, sender: [4:7519626224588191334:2449], selfId: [4:7519626203113353555:2157], source: [4:7519626224588191309:2441] 2025-06-24T21:10:13.866013Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:437: [TQueryBase] [TScriptLeaseUpdater] TraceId: 737fb459-f37232d8-f6a1e78-b00ba700, State: Get lease info, RollbackTransactionResult: SUCCESS. Issues: 2025-06-24T21:10:13.866180Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=Y2ZhZmJlNmMtYjc0YjljNjYtMjQ5N2E4ODgtOGRmZWY2Y2Y=, workerId: [4:7519626224588191309:2441], local sessions count: 1 2025-06-24T21:10:13.870853Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=N2FkMjg5ODktNzE5MTIzYTctM2EyZjZmYmUtNjZiNzEyZTc=, workerId: [4:7519626215998256327:2326], local sessions count: 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring [GOOD] Test command err: 2025-06-24T21:10:03.013004Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626179201431515:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:03.013050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047de/r3tmp/tmpWMKh4G/pdisk_1.dat 2025-06-24T21:10:03.347475Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626174906464186:2079] 1750799403010308 != 1750799403010311 2025-06-24T21:10:03.355747Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:03.424298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:03.424430Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:03.426079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7874 TServer::EnableGrpc on GrpcPort 19973, node 1 2025-06-24T21:10:03.652826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:03.652852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:03.652859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:03.652963Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:10:03.958673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:04.022714Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:05.249995Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.251374Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.261060Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:05.261099Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:05.261118Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.261153Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.261267Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.261323Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.261376Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.261426Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.262962Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.262979Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-24T21:10:05.263001Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-24T21:10:05.263084Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.263087Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-24T21:10:05.263109Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-24T21:10:05.263133Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-24T21:10:05.263163Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-24T21:10:05.263173Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-24T21:10:05.266571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.268961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.270450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:05.279119Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-24T21:10:05.279171Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-24T21:10:05.279194Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976715660 2025-06-24T21:10:05.279215Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976715658 2025-06-24T21:10:05.279298Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-24T21:10:05.279307Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976715659 2025-06-24T21:10:05.380771Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976715660. Doublechecking... 2025-06-24T21:10:05.402636Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976715659. Doublechecking... 2025-06-24T21:10:05.407834Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976715658. Doublechecking... 2025-06-24T21:10:05.445871Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-24T21:10:05.463729Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-24T21:10:05.495060Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-24T21:10:05.495511Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: a03b956b-919f2069-806d810f-59aaed77, Bootstrap. Database: /dc-1 2025-06-24T21:10:05.511821Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274304.039819s seconds to be completed 2025-06-24T21:10:05.514644Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=ZTU3NTliMzQtZmE1MmQzNWItZjFhM2Y4NmMtODgxNDg2OWY=, workerId: [1:7519626187791366942:2292], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-24T21:10:05.514866Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:05.515492Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: a03b956b-919f2069-806d810f-59aaed77, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-24T21:10:05.515944Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:7 ... ze is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:12.674627Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TScriptLeaseUpdater] TraceId: 68bcc829-6723e79d-bd57ea9-d7de4479, Bootstrap. Database: /dc-1 2025-06-24T21:10:12.674756Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274296.876878s seconds to be completed 2025-06-24T21:10:12.676586Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=YjE3NjYwYWUtZjA3OWJkMWQtMzZhZjMzYjYtY2ZiY2NkNGQ=, workerId: [2:7519626218694966688:2395], database: /dc-1, longSession: 1, local sessions count: 4 2025-06-24T21:10:12.676751Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:12.677046Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptLeaseUpdater] TraceId: 68bcc829-6723e79d-bd57ea9-d7de4479, RunDataQuery: -- TScriptLeaseUpdater::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT lease_deadline FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-24T21:10:12.677340Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=YjE3NjYwYWUtZjA3OWJkMWQtMzZhZjMzYjYtY2ZiY2NkNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 22, targetId: [2:7519626218694966688:2395] 2025-06-24T21:10:12.677376Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 22 timeout: 300.000000s actor id: [2:7519626218694966692:2617] 2025-06-24T21:10:12.678351Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 20, sender: [2:7519626218694966652:2385], selfId: [2:7519626201515096358:2136], source: [2:7519626214399999293:2367] 2025-06-24T21:10:12.679130Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: f8d346d6-ac6c62b2-c0c2ea25-1e178d5f, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjI3ZTAzYmEtNWQ2MWMxYzYtNzAyNzk5YzctZDUyYWE0YWE=, TxId: 2025-06-24T21:10:12.679239Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: f8d346d6-ac6c62b2-c0c2ea25-1e178d5f, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YjI3ZTAzYmEtNWQ2MWMxYzYtNzAyNzk5YzctZDUyYWE0YWE=, TxId: 2025-06-24T21:10:12.679254Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2658: [ScriptExecutions] Finish script execution operation. ExecutionId: f8d346d6-ac6c62b2-c0c2ea25-1e178d5f. SUCCESS. Issues: 2025-06-24T21:10:12.680079Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=YjI3ZTAzYmEtNWQ2MWMxYzYtNzAyNzk5YzctZDUyYWE0YWE=, workerId: [2:7519626214399999293:2367], local sessions count: 3 2025-06-24T21:10:12.680110Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=Nzc5ZGFiZTMtZjM3YjZjNGYtNmMwYTRmYTctNzhmNjQ5MTg=, workerId: [2:7519626214399999125:2310], local sessions count: 2 2025-06-24T21:10:12.902445Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 22, sender: [2:7519626218694966690:2396], selfId: [2:7519626201515096358:2136], source: [2:7519626218694966688:2395] 2025-06-24T21:10:12.902639Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: 68bcc829-6723e79d-bd57ea9-d7de4479, State: Get lease info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjE3NjYwYWUtZjA3OWJkMWQtMzZhZjMzYjYtY2ZiY2NkNGQ=, TxId: 01jyhwdvn107tkx7ej227sq3yg 2025-06-24T21:10:12.902787Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptLeaseUpdater] TraceId: 68bcc829-6723e79d-bd57ea9-d7de4479, State: Get lease info, RunDataQuery: -- TScriptLeaseUpdater::OnGetLeaseInfo DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $lease_duration AS Interval; UPDATE `.metadata/script_execution_leases` SET lease_deadline=(CurrentUtcTimestamp() + $lease_duration) WHERE database = $database AND execution_id = $execution_id; 2025-06-24T21:10:12.903060Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=YjE3NjYwYWUtZjA3OWJkMWQtMzZhZjMzYjYtY2ZiY2NkNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 23, targetId: [2:7519626218694966688:2395] 2025-06-24T21:10:12.903093Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 23 timeout: 300.000000s actor id: [2:7519626218694966721:2629] 2025-06-24T21:10:13.083996Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 23, sender: [2:7519626218694966720:2404], selfId: [2:7519626201515096358:2136], source: [2:7519626218694966688:2395] 2025-06-24T21:10:13.084140Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: 68bcc829-6723e79d-bd57ea9-d7de4479, State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjE3NjYwYWUtZjA3OWJkMWQtMzZhZjMzYjYtY2ZiY2NkNGQ=, TxId: 2025-06-24T21:10:13.084180Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TScriptLeaseUpdater] TraceId: 68bcc829-6723e79d-bd57ea9-d7de4479, State: Update lease, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YjE3NjYwYWUtZjA3OWJkMWQtMzZhZjMzYjYtY2ZiY2NkNGQ=, TxId: 2025-06-24T21:10:13.084389Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=YjE3NjYwYWUtZjA3OWJkMWQtMzZhZjMzYjYtY2ZiY2NkNGQ=, workerId: [2:7519626218694966688:2395], local sessions count: 1 2025-06-24T21:10:13.089388Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jyhwdvv18wcg09rzeqfqg0sq, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NjQ1YzljYmMtODZjYWZkZDItZjVjMmVjNjAtYjc1YWIxY2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 24, targetId: [2:7519626214399999155:2326] 2025-06-24T21:10:13.089418Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 24 timeout: 300.000000s actor id: [2:7519626222989934044:2638] 2025-06-24T21:10:13.570626Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jyhwdvv18wcg09rzeqfqg0sq", Forwarded response to sender actor, requestId: 24, sender: [2:7519626222989934043:2412], selfId: [2:7519626201515096358:2136], source: [2:7519626214399999155:2326] 2025-06-24T21:10:13.571931Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:791: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: 68bcc829-6723e79d-bd57ea9-d7de4479, Bootstrap. Start TCheckLeaseStatusQueryActor 2025-06-24T21:10:13.572000Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: 68bcc829-6723e79d-bd57ea9-d7de4479, Bootstrap. Database: /dc-1 2025-06-24T21:10:13.572173Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993274295.979463s seconds to be completed 2025-06-24T21:10:13.574176Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=M2E0M2M2MzMtZTM4ODdhOTgtNmFiMDNiNzEtM2NiMTdhOTg=, workerId: [2:7519626222989934088:2425], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-24T21:10:13.574347Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:10:13.574537Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: 68bcc829-6723e79d-bd57ea9-d7de4479, RunDataQuery: -- TCheckLeaseStatusQueryActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, execution_status, finalization_status, issues, run_script_actor_id FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_deadline FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-24T21:10:13.574820Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=M2E0M2M2MzMtZTM4ODdhOTgtNmFiMDNiNzEtM2NiMTdhOTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 26, targetId: [2:7519626222989934088:2425] 2025-06-24T21:10:13.574855Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 26 timeout: 300.000000s actor id: [2:7519626222989934090:2657] 2025-06-24T21:10:13.677306Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:13.836320Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626201515096279:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:13.836374Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:13.926277Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 26, sender: [2:7519626222989934089:2426], selfId: [2:7519626201515096358:2136], source: [2:7519626222989934088:2425] 2025-06-24T21:10:13.926479Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: 68bcc829-6723e79d-bd57ea9-d7de4479, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=M2E0M2M2MzMtZTM4ODdhOTgtNmFiMDNiNzEtM2NiMTdhOTg=, TxId: 2025-06-24T21:10:13.926602Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: 68bcc829-6723e79d-bd57ea9-d7de4479, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=M2E0M2M2MzMtZTM4ODdhOTgtNmFiMDNiNzEtM2NiMTdhOTg=, TxId: 2025-06-24T21:10:13.926679Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:838: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: 68bcc829-6723e79d-bd57ea9-d7de4479, reply success 2025-06-24T21:10:13.927016Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=M2E0M2M2MzMtZTM4ODdhOTgtNmFiMDNiNzEtM2NiMTdhOTg=, workerId: [2:7519626222989934088:2425], local sessions count: 1 2025-06-24T21:10:13.931982Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NjQ1YzljYmMtODZjYWZkZDItZjVjMmVjNjAtYjc1YWIxY2Q=, workerId: [2:7519626214399999155:2326], local sessions count: 0 >> TKeyValueTest::TestRenameWorks [GOOD] >> TKeyValueTest::TestRenameToLongKey >> KqpNewEngine::JoinSameKey [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomDotRanges_DelayRS [GOOD] Test command err: 2025-06-24T21:09:02.124738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:02.124797Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:02.128735Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:02.144338Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:02.144977Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:02.145334Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:02.191969Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:02.226748Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:02.227028Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:02.229008Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:02.229107Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:02.229177Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:02.229546Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:02.229641Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:02.229730Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:02.329244Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:02.380427Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:02.380663Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:02.380791Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:02.380837Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:02.380897Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:02.380957Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:02.381142Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:02.381200Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:02.381574Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:02.381715Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:02.381926Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:02.381976Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:02.382030Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:02.382103Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:02.382149Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:02.382195Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:02.382250Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:02.382374Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:02.382427Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:02.382489Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:02.386176Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:02.386273Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:02.386379Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:02.386573Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:02.386630Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:02.386687Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:02.386741Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:02.386782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:02.386822Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:02.386882Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:02.387387Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:02.387448Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:02.387498Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:02.387564Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:02.387643Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:02.387694Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:02.387740Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:02.387782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:02.387814Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:02.400622Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:02.400719Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:02.400771Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:02.400821Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:02.400917Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:02.401540Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:02.401608Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:02.401662Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:02.401837Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:02.401884Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:02.402038Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:02.402106Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:02.402150Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:02.402203Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:02.408437Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:02.408530Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:02.408877Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:02.408939Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:02.409007Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:02.409056Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:02.409097Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:02.409163Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:02.409224Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... D DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:14.755079Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:28] at 9437184 on unit CompleteOperation 2025-06-24T21:10:14.755116Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 28] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:14.755163Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:14.755278Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:14.755299Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:29] at 9437184 on unit CompleteOperation 2025-06-24T21:10:14.755334Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 29] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:14.755359Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:14.755483Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:14.755506Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:30] at 9437184 on unit CompleteOperation 2025-06-24T21:10:14.755540Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 30] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:14.755564Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:14.755746Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:14.755772Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:31] at 9437184 on unit CompleteOperation 2025-06-24T21:10:14.755806Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 31] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:14.755832Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:14.755944Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:14.755967Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:32] at 9437184 on unit CompleteOperation 2025-06-24T21:10:14.756004Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 32] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:14.756027Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:14.756132Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:14.756155Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:33] at 9437184 on unit CompleteOperation 2025-06-24T21:10:14.756190Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 33] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:14.756218Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:14.756364Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:14.756400Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:34] at 9437184 on unit CompleteOperation 2025-06-24T21:10:14.756447Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 34] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:14.756483Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:14.756617Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:14.756641Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:35] at 9437184 on unit CompleteOperation 2025-06-24T21:10:14.756675Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 35] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:14.756703Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:14.756856Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:14.756884Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:36] at 9437184 on unit CompleteOperation 2025-06-24T21:10:14.756921Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 36] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:14.756947Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:14.757058Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:14.757082Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:37] at 9437184 on unit CompleteOperation 2025-06-24T21:10:14.757113Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 37] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:14.757135Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:14.757386Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 36 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 32} 2025-06-24T21:10:14.757421Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:14.757454Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 36 2025-06-24T21:10:14.757545Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-24T21:10:14.757569Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:14.757592Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 5 2025-06-24T21:10:14.757668Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 6 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-24T21:10:14.757690Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:14.757714Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 6 2025-06-24T21:10:14.757753Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 7 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-24T21:10:14.757776Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:14.757800Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 7 2025-06-24T21:10:14.757855Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 8 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-24T21:10:14.757877Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:14.757898Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 8 2025-06-24T21:10:14.757971Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 9 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-24T21:10:14.757995Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:14.758018Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 9 2025-06-24T21:10:14.758108Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 10 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-24T21:10:14.758142Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:14.758173Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 10 2025-06-24T21:10:14.758241Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 12 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-24T21:10:14.758268Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:14.758291Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 12 2025-06-24T21:10:14.758352Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 13 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 9} 2025-06-24T21:10:14.758375Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:14.758413Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 13 expect 30 24 26 31 29 26 30 12 19 25 25 25 30 28 25 0 21 30 25 - 19 25 25 17 16 22 - 22 19 - 17 - actual 30 24 26 31 29 26 30 12 19 25 25 25 30 28 25 0 21 30 25 - 19 25 25 17 16 22 - 22 19 - 17 - interm 5 - 4 6 6 0 4 5 2 2 2 - 0 - 5 0 5 3 3 - 0 - - 5 - - - - 5 - - - >> DataShardTxOrder::RandomPoints_DelayRS_Reboot [GOOD] >> TKeyValueTest::TestConcatWorks [GOOD] >> TKeyValueTest::TestConcatWorksNewApi >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestInlineCopyRangeWorks >> KqpExtractPredicateLookup::SimpleRange [GOOD] >> KqpExtractPredicateLookup::PointJoin |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSuccess >> TSchemeShardTTLUtility::GetExpireAfter [GOOD] >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-false >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL >> TSchemeShardTTLTestsWithReboots::AlterTable >> TSchemeShardTTLTests::TtlTiersValidation >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-true >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::JoinSameKey [GOOD] Test command err: Trying to start YDB, gRPC: 7058, MsgBus: 10954 2025-06-24T21:09:32.288673Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626048466675413:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:32.288951Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031a7/r3tmp/tmpIrMAJr/pdisk_1.dat 2025-06-24T21:09:32.664223Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626048466675396:2079] 1750799372287179 != 1750799372287182 2025-06-24T21:09:32.676695Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7058, node 1 2025-06-24T21:09:32.693397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:32.695258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:32.744213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:32.783812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:32.783841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:32.783855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:32.783994Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10954 TClient is connected to server localhost:10954 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:09:33.295550Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:33.355835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:33.379993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:33.508707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:33.644381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:33.708824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:35.307512Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626061351578939:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:35.307645Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:35.567685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:35.595560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:35.624495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:35.652971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:35.681429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:35.727566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:35.795122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:35.855662Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626061351579600:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:35.855756Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:35.855823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626061351579605:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:35.859768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:35.870075Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626061351579607:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:09:35.951628Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626061351579658:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:37.289327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626048466675413:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:37.289431Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 28308, MsgBus: 21706 2025-06-24T21:09:38.045659Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626073639585918:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:38.045830Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... 644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:07.621493Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519626175342659319:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:07.621574Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 18603, MsgBus: 16653 2025-06-24T21:10:09.217702Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626207297854811:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:09.217796Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031a7/r3tmp/tmpKatoL3/pdisk_1.dat 2025-06-24T21:10:09.353980Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:09.365317Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519626207297854792:2079] 1750799409217290 != 1750799409217293 2025-06-24T21:10:09.367428Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:09.367528Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:09.369041Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18603, node 7 2025-06-24T21:10:09.413966Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:09.413995Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:09.414007Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:09.414201Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16653 TClient is connected to server localhost:16653 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:10.081121Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:10.095875Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:10.162418Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:10.229633Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:10.327935Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:10.406109Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:12.860116Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626220182758320:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:12.860225Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:12.922955Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:12.955485Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:13.004185Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:13.036576Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:13.068656Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:13.104347Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:13.176278Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:13.239426Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626224477726272:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:13.239520Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:13.239534Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626224477726277:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:13.243098Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:13.253942Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626224477726279:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:13.337749Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626224477726332:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:14.218097Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626207297854811:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:14.218164Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpProxy::LoadedMetadataAfterCompilationTimeout [GOOD] >> KqpProxy::ExecuteScriptFailsWithoutFeatureFlag |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLUtility::GetExpireAfter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_DelayRS_Reboot [GOOD] Test command err: 2025-06-24T21:08:58.805693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:08:58.805763Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:58.809156Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:08:58.821657Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:08:58.822244Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:08:58.822549Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:08:58.871230Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:08:58.886103Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:08:58.886362Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:08:58.888726Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:08:58.888830Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:08:58.888888Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:08:58.889287Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:08:58.889388Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:08:58.889466Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:08:58.963893Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:08:59.001479Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:08:59.001714Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:08:59.001840Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:08:59.001882Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:08:59.001927Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:08:59.001976Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:08:59.002172Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:08:59.002251Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:08:59.002575Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:08:59.002698Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:08:59.002867Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:08:59.002915Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:08:59.002968Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:08:59.003008Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:08:59.003047Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:08:59.003090Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:08:59.004224Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:08:59.004424Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:59.004473Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:59.004532Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:08:59.007850Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:08:59.007941Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:08:59.008035Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:08:59.008247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:08:59.008306Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:08:59.008360Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:08:59.008404Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:08:59.008446Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:08:59.008482Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:08:59.008537Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:08:59.008863Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:08:59.008902Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:08:59.008943Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:08:59.008995Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:08:59.009068Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:08:59.009106Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:08:59.009157Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:08:59.009194Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:08:59.009219Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:08:59.023135Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:08:59.023268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:08:59.023316Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:08:59.023360Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:08:59.023452Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:08:59.024061Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:59.024122Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:08:59.024171Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:08:59.024324Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:08:59.024361Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:08:59.024518Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:08:59.024565Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:08:59.024603Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:08:59.024652Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:08:59.028706Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:08:59.028803Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:08:59.029112Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:08:59.029167Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:08:59.029237Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:08:59.029285Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:08:59.029326Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:08:59.029386Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:08:59.029438Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... ps: 0, InvisibleRowSkips: 0} 2025-06-24T21:10:15.974221Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:37] at 9437184 is Executed 2025-06-24T21:10:15.974245Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:37] at 9437184 executing on unit ExecuteDataTx 2025-06-24T21:10:15.974265Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:37] at 9437184 to execution unit CompleteOperation 2025-06-24T21:10:15.974285Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:37] at 9437184 on unit CompleteOperation 2025-06-24T21:10:15.974431Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:37] at 9437184 is DelayComplete 2025-06-24T21:10:15.974454Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:37] at 9437184 executing on unit CompleteOperation 2025-06-24T21:10:15.974478Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:37] at 9437184 to execution unit CompletedOperations 2025-06-24T21:10:15.974501Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:37] at 9437184 on unit CompletedOperations 2025-06-24T21:10:15.974527Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:37] at 9437184 is Executed 2025-06-24T21:10:15.974546Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:37] at 9437184 executing on unit CompletedOperations 2025-06-24T21:10:15.974566Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000004:37] at 9437184 has finished 2025-06-24T21:10:15.974596Z node 32 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:10:15.974624Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:10:15.974658Z node 32 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:10:15.974682Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:10:15.974714Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:361: Check active operation [1000004:5] at 9437184 on unit LoadAndWaitInRS 2025-06-24T21:10:15.974739Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:378: Active operation [1000004:5] at 9437184 is not ready for LoadAndWaitInRS 2025-06-24T21:10:15.986650Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:15.986702Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:36] at 9437184 on unit CompleteOperation 2025-06-24T21:10:15.986759Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 36] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 4 ms, propose latency: 5 ms 2025-06-24T21:10:15.986818Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 36 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 32} 2025-06-24T21:10:15.986853Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:15.987095Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:15.987118Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:37] at 9437184 on unit CompleteOperation 2025-06-24T21:10:15.987175Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 37] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:15.987211Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:15.987420Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:808:2731], Recipient [32:349:2314]: {TEvReadSet step# 1000004 txid# 36 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 32} 2025-06-24T21:10:15.987455Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:15.987485Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 36 resending delayed RS 2025-06-24T21:10:16.040702Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [32:349:2314], Recipient [32:808:2731]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletProducer# 9437185 ReadSet.Size()# 7 Seqno# 2 Flags# 0} 2025-06-24T21:10:16.040787Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:10:16.040832Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 9437184 source 9437185 dest 9437184 producer 9437185 txId 5 2025-06-24T21:10:16.040923Z node 32 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 9437184 got read set: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletProducer# 9437185 ReadSet.Size()# 7 Seqno# 2 Flags# 0} 2025-06-24T21:10:16.040980Z node 32 :TX_DATASHARD TRACE: operation.cpp:67: Filled readset for [1000004:5] from=9437185 to=9437184origin=9437185 2025-06-24T21:10:16.041070Z node 32 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-24T21:10:16.041213Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [32:808:2731], Recipient [32:808:2731]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:10:16.041258Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:10:16.041312Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:10:16.041354Z node 32 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:10:16.041398Z node 32 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000004:5] at 9437184 for LoadAndWaitInRS 2025-06-24T21:10:16.041433Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:5] at 9437184 on unit LoadAndWaitInRS 2025-06-24T21:10:16.041472Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:5] at 9437184 is Executed 2025-06-24T21:10:16.041510Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:5] at 9437184 executing on unit LoadAndWaitInRS 2025-06-24T21:10:16.041544Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:5] at 9437184 to execution unit ExecuteDataTx 2025-06-24T21:10:16.041579Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:5] at 9437184 on unit ExecuteDataTx 2025-06-24T21:10:16.042428Z node 32 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000004:5] at tablet 9437184 with status COMPLETE 2025-06-24T21:10:16.042508Z node 32 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000004:5] at 9437184: {NSelectRow: 2, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 8, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 15} 2025-06-24T21:10:16.042576Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:5] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:10:16.042611Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:5] at 9437184 executing on unit ExecuteDataTx 2025-06-24T21:10:16.042648Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:5] at 9437184 to execution unit CompleteOperation 2025-06-24T21:10:16.042683Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:5] at 9437184 on unit CompleteOperation 2025-06-24T21:10:16.042932Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:5] at 9437184 is DelayComplete 2025-06-24T21:10:16.042973Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:5] at 9437184 executing on unit CompleteOperation 2025-06-24T21:10:16.043013Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:5] at 9437184 to execution unit CompletedOperations 2025-06-24T21:10:16.043046Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:5] at 9437184 on unit CompletedOperations 2025-06-24T21:10:16.043088Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:5] at 9437184 is Executed 2025-06-24T21:10:16.043116Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:5] at 9437184 executing on unit CompletedOperations 2025-06-24T21:10:16.043172Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000004:5] at 9437184 has finished 2025-06-24T21:10:16.043209Z node 32 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:10:16.043241Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:10:16.043276Z node 32 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:10:16.043309Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:10:16.055323Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:16.055374Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:5] at 9437184 on unit CompleteOperation 2025-06-24T21:10:16.055426Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 5] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 49 ms, propose latency: 50 ms 2025-06-24T21:10:16.055486Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-24T21:10:16.055524Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:16.055772Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:808:2731], Recipient [32:349:2314]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-24T21:10:16.055815Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:16.055855Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 5 expect 31 31 30 29 31 27 30 31 31 27 31 29 25 30 30 10 - 30 27 31 22 31 23 22 27 - 27 - 7 - - - actual 31 31 30 29 31 27 30 31 31 27 31 29 25 30 30 10 - 30 27 31 22 31 23 22 27 - 27 - 7 - - - interm - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi [GOOD] >> TSchemeShardColumnTableTTL::AlterColumnTable >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn [GOOD] >> KqpAgg::AggHashShuffle-UseSink [GOOD] >> KqpExtractPredicateLookup::ComplexRange >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:80:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:81:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:81:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:78:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:80:2111] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:83:2112] sender: [4:84:2057] recipient: [4:80:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:83:2112] Leader for TabletID 72057594037927937 is [4:83:2112] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:79:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:81:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:83:2057] recipient: [5:82:2111] Leader for TabletID 72057594037927937 is [5:84:2112] sender: [5:85:2057] recipient: [5:82:2111] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:84:2112] Leader for TabletID 72057594037927937 is [5:84:2112] sender: [5:170:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:82:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:84:2114] Leader for TabletID 72057594037927937 is [7:87:2115] sender: [7:88:2057] recipient: [7:84:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:87:2115] Leader for TabletID 72057594037927937 is [7:87:2115] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:83:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:86:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:87:2057] recipient: [8:85:2114] Leader for TabletID 72057594037927937 is [8:88:2115] sender: [8:89:2057] recipient: [8:85:2114] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:88:2115] Leader for TabletID 72057594037927937 is [8:88:2115] sender: [8:106:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:88:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:88:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:85:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:88:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:87:2116] Leader for TabletID 72057594037927937 is [10:90:2117] sender: [10:91:2057] recipient: [10:87:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:90:2117] Leader for TabletID 72057594037927937 is [10:90:2117] sender: [10:176:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:86:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:88:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:90:2057] recipient: [11:89:2116] Leader for TabletID 72057594037927937 is [11:91:2117] sender: [11:92:2057] recipient: [11:89:2116] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:91:2117] Leader for TabletID 72057594037927937 is [11:91:2117] sender: [11:177:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:58:2098]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:87:2057] recipient: [12:37:2084] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:90:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:91:2057] recipient: [12:89:2117] Leader for TabletID 72057594037927937 is [12:92:2118] sender: [12:93:2057] recipient: [12:89:2117] !Reboot 72057594037927937 (actor [12:58:2098]) rebooted! !Reboot 72057594037927937 (actor [12:58:2098]) tablet resolver refreshed! new actor is[12:92:2118] Leader for TabletID 72057594037927937 is [12:92:2118] sender: [12:112:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:59:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:76:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:58:2098]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:88:2057] recipient: [13:37:2084] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:91:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:92:2057] recipient: [13:90:2118] Leader for TabletID 72057594037927937 is [13:93:2119] sender: [13:94:2057] recipient: [13:90:2118] !Reboot 72057594037927937 (actor [13:58:2098]) rebooted! !Reboot 72057594037927937 (actor [13:58:2098]) tablet resolver refreshed! new actor is[13:93:2119] Leader for TabletID 72057594037927937 is [13:93:2119] sender: [13:113:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:59:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:76:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:91:2057] recipient: [14:37:2084] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:94:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:95:2057] recipient: [14:93:2121] Leader for TabletID 72057594037927937 is [14:96:2122] sender: [14:97:2057] recipient: [14:93:2121] !Reboot 72057594037927937 (actor [14:58:2098]) rebooted! !Reboot 72057594037927937 (actor [14:58:2098]) tablet resolver refreshed! new actor is[14:96:2122] Leader for TabletID 72057594037927937 is [14:96:2122] sender: [14:182:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:59:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:76:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:58:2098]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:91:2057] recipient: [15:37:2084] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:94:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:95:2057] recipient: [15:93:2121] Leader for TabletID 72057594037927937 is [15:96:2122] sender: [15:97:2057] recipient: [15:93:2121] !Reboot 72057594037927937 (actor [15:58:2098]) rebooted! !Reboot 72057594037927937 (actor [15:58:2098]) tablet resolver refreshed! new actor is[15:96:2122] Leader for TabletID 72057594037927937 is [15:96:2122] sender: [15:182:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:59:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:76:2057] recipient: [16:14:2061] >> TSchemeShardTTLTests::TtlTiersValidation [GOOD] >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> test_delete_by_explicit_row_id.py::TestDeleteByExplicitRowId::test_delete_row_by_explicit_row_id 2025-06-24 21:10:02,433 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:10:02,615 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 597113 690M 695M 611M ydb-tests-olap-delete --basetemp /home/runner/.ya/build/build_root/2btm/002ddc/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modules - 599747 2.1G 2.1G 1.5G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/002ddc/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/chunk1/test Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/delete/test_delete_by_explicit_row_id.py", line 118, in test_delete_row_by_explicit_row_id self._test_single_column_pk(rows_to_insert=10, rows_to_delete=10, iterations=10) File "ydb/tests/olap/delete/test_delete_by_explicit_row_id.py", line 65, in _test_single_column_pk ) File "ydb/tests/olap/common/ydb_client.py", line 24, in query return self.session_pool.execute_with_retries(statement) File "contrib/python/ydb/py3/ydb/query/pool.py", line 204, in execute_with_retries return retry_operation_sync(wrapped_callee, retry_settings) File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync for next_opt in opt_generator: File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 202, in wrapped_callee return [result_set for result_set in it] File "contrib/python/ydb/py3/ydb/_utilities.py", line 173, in __next__ return self._next() File "contrib/python/ydb/py3/ydb/_utilities.py", line 164, in _next res = self.wrapper(next(self.it)) File "contrib/python/grpcio/py3/grpc/_channel.py", line 475, in __next__ return self._next() File "contrib/python/grpcio/py3/grpc/_channel.py", line 872, in _next _common.wait(self._state.condition.wait, _response_ready) File "contrib/python/grpcio/py3/grpc/_common.py", line 150, in wait _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) File "contrib/python/grpcio/py3/grpc/_common.py", line 112, in _wait_once wait_fn(timeout=timeout) File "contrib/tools/python3/Lib/threading.py", line 359, in wait gotit = waiter.acquire(True, timeout) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d_root/2btm/002ddc/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/002ddc/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/chunk1/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/002ddc', '--source-root', '/home/runner/.ya/build/build_root/2btm/002ddc/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/002ddc/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/delete', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '1', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/olap/delete', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d_root/2btm/002ddc/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/002ddc/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/chunk1/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/002ddc', '--source-root', '/home/runner/.ya/build/build_root/2btm/002ddc/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/002ddc/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/delete', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '1', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/olap/delete', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) >> TSchemeShardTTLTests::AlterTableShouldSuccess [GOOD] >> TSchemeShardTTLTests::AlterTableShouldSucceedOnAsyncIndexedTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:17.406402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:17.406533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.406598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:17.406640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:17.407463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:17.407526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:17.407620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.407699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:17.408508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:17.410171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:17.496047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:17.496114Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:17.512560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:17.512994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:17.513175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:17.520119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:17.520302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:17.520962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.521246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:17.523991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.524191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:17.526354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:17.526740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:17.526845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:17.526953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.533601Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:17.663119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.663365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.663525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:17.663571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:17.663769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:17.663854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:17.666518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.666743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:17.666997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.667073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:17.667129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:17.667208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:17.669502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.669585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:17.669632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:17.671649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.671701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.671786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.671854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.675789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:17.677924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:17.678198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:17.679205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.679354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:17.679431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.679793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:17.679845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.680031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:17.680107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:17.682248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.682293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:17.682524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.682575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:10:17.682877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.682922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:10:17.683040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:17.683103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.683167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:17.683205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.683242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:10:17.683280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.683333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:10:17.683367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:10:17.683432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:17.683472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:10:17.683520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:10:17.685464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:17.685577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:17.685616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:10:17.685676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:10:17.685733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:17.685840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:10:17.689110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:10:17.689623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T21:10:17.696601Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:10:17.718661Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:10:17.721605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TTLEnabledTable" Schema { Columns { Name: "key" Type: "Uint64" NotNull: true } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" } TtlSettings { Enabled { ColumnName: "created_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.722001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.725834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Incorrect ttl column - not found in scheme, at schemeshard: 72057594046678944 2025-06-24T21:10:17.726828Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:17.730187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Incorrect ttl column - not found in scheme" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:17.730464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Incorrect ttl column - not found in scheme, operation: CREATE COLUMN TABLE, path: /MyRoot/ 2025-06-24T21:10:17.730998Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:17.406364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:17.406464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.406499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:17.406553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:17.407399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:17.407433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:17.407551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.407626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:17.408292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:17.410189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:17.477615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:17.477671Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:17.493493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:17.493941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:17.494189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:17.503890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:17.504200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:17.507929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.508371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:17.515717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.516708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:17.526402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:17.526778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:17.526899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:17.526991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.533847Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:17.646334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.646594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.646753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:17.646789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:17.646997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:17.647068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:17.649075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.649225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:17.649379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.649429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:17.649463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:17.649492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:17.651267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.651328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:17.651371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:17.653035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.653091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.653144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.653196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.656268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:17.657925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:17.658063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:17.658807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.658926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:17.658970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.659226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:17.659300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.659478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:17.659552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:17.661346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.661391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:17.661555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.661595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:10:17.661863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.661900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:10:17.661986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:17.662013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.662066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:17.662095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.662122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:10:17.662151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.662192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:10:17.662216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:10:17.662270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:17.662298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:10:17.662344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:10:17.663788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:17.663888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:17.663925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:10:17.663957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:10:17.664010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:17.664086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:10:17.667329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:10:17.667883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1750799417.672170 740031 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TTableDescription: 9:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 101 2025-06-24T21:10:17.674066Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:10:17.694651Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:10:17.697210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3153600000 Tiers { ApplyAfterSeconds: 3153600000 Delete { } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.697643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.697752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3153600000 Tiers { ApplyAfterSeconds: 3153600000 Delete { } } } }, at schemeshard: 72057594046678944 2025-06-24T21:10:17.698082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: TTL should be less than 1750799417 seconds (20263 days, 55 years). The ttl behaviour is undefined before 1970., at schemeshard: 72057594046678944 2025-06-24T21:10:17.703815Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:17.706252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "TTL should be less than 1750799417 seconds (20263 days, 55 years). The ttl behaviour is undefined before 1970." TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:17.706467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: TTL should be less than 1750799417 seconds (20263 days, 55 years). The ttl behaviour is undefined before 1970., operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-24T21:10:17.706883Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::TtlTiersValidation [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:17.406366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:17.406491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.406559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:17.406607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:17.407459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:17.407511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:17.407598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.407703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:17.408557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:17.410174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:17.496357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:17.496409Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:17.512596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:17.513003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:17.513174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:17.520682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:17.520882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:17.521578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.521857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:17.524678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.524839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:17.526378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:17.526776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:17.526895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:17.527000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.533552Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:17.666709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.666964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.667198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:17.667257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:17.667576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:17.667659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:17.669936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.670122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:17.670305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.670362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:17.670429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:17.670468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:17.672360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.672442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:17.672503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:17.674209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.674253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.674308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.674357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.678287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:17.680179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:17.680344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:17.681302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.681437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:17.681496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.681752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:17.681818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.682003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:17.682098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:17.684128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.684179Z node 1 :FLAT_TX_SCHEMESHARD ... 2025-06-24T21:10:17.876062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:17.877148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:17.877222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:17.877264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:10:17.877298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:10:17.877327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:17.877396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-24T21:10:17.877659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1275 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:17.877694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:17.877805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1275 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:17.877923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1275 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:17.878530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:17.878588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:17.878746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:17.878806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:17.878911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:17.878994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.879038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.879089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:17.879171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:10:17.881551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:17.882934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:17.883056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.883165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.883433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.883476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:10:17.883573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:17.883629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:17.883671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:17.883703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:17.883743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T21:10:17.883841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 101 2025-06-24T21:10:17.883916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:17.883963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:10:17.883996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:10:17.884139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:10:17.885767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:10:17.885819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:340:2317] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T21:10:17.889237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "TTLEnabledTable" TTLSettings { Enabled { ColumnName: "modified_at" Tiers { ApplyAfterSeconds: 3600 Delete { } } Tiers { ApplyAfterSeconds: 7200 Delete { } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.889483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/TTLEnabledTable, pathId: , opId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.889862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Tier 0: only the last tier in TTL settings can have Delete action, at schemeshard: 72057594046678944 2025-06-24T21:10:17.892050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Tier 0: only the last tier in TTL settings can have Delete action" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:17.892317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Tier 0: only the last tier in TTL settings can have Delete action, operation: ALTER TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 2025-06-24T21:10:17.895599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "TTLEnabledTable" TTLSettings { Enabled { ColumnName: "modified_at" Tiers { ApplyAfterSeconds: 3600 EvictToExternalStorage { Storage: "/Root/abc" } } Tiers { ApplyAfterSeconds: 7200 Delete { } } } } } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.895835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/TTLEnabledTable, pathId: , opId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.896161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusInvalidParameter, reason: Only DELETE via TTL is allowed for row-oriented tables, at schemeshard: 72057594046678944 2025-06-24T21:10:17.898030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusInvalidParameter Reason: "Only DELETE via TTL is allowed for row-oriented tables" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:17.898228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Only DELETE via TTL is allowed for row-oriented tables, operation: ALTER TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 103, wait until txId: 103 |94.5%| [TA] $(B)/ydb/tests/olap/delete/test-results/py3test/{meta.json ... results_accumulator.log} |94.5%| [TA] {RESULT} $(B)/ydb/tests/olap/delete/test-results/py3test/{meta.json ... results_accumulator.log} >> TSchemeShardTTLTests::BuildIndexShouldSucceed >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorks ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSuccess [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:17.406373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:17.406513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.406572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:17.406637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:17.407463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:17.407513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:17.407610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.407694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:17.408567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:17.410188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:17.500941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:17.501007Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:17.521611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:17.522084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:17.522265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:17.529937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:17.530106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:17.530612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.530829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:17.533228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.533410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:17.534373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.534447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.534659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:17.534710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:17.534813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:17.534916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.540620Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:17.638318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.638541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.638702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:17.638737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:17.639611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:17.639689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:17.642287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.643189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:17.643382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.643498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:17.643539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:17.643567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:17.645337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.645401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:17.645437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:17.647458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.647497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.647541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.647576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.651524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:17.653273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:17.653437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:17.654225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.654349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:17.654428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.655667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:17.655744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.655930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:17.655995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:17.657885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.657927Z node 1 :FLAT_TX_SCHEMESHARD ... __publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-24T21:10:17.993856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.993894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:10:17.994514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:10:17.994607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:10:17.994664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:10:17.994707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T21:10:17.994746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:17.994841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-24T21:10:17.997161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:10:18.009624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000004 OrderId: 104 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1005 } } CommitVersion { Step: 5000004 TxId: 104 } 2025-06-24T21:10:18.009688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:18.009858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000004 OrderId: 104 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1005 } } CommitVersion { Step: 5000004 TxId: 104 } 2025-06-24T21:10:18.009960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000004 OrderId: 104 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1005 } } CommitVersion { Step: 5000004 TxId: 104 } FAKE_COORDINATOR: Erasing txId 104 2025-06-24T21:10:18.011043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-24T21:10:18.011099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:18.011267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-24T21:10:18.011329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:18.011425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-24T21:10:18.011501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 104:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.011552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.011614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 104:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:18.011678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 129 -> 240 2025-06-24T21:10:18.014069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.014366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.014490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.014529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T21:10:18.014612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:10:18.014649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:10:18.014689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:10:18.014717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:10:18.014758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-24T21:10:18.014817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 104 2025-06-24T21:10:18.014863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:10:18.014893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:10:18.014921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:10:18.015015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:10:18.016658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:10:18.016712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:445:2415] TestWaitNotification: OK eventTxId 104 2025-06-24T21:10:18.017166Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:18.017374Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 239us result status StatusSuccess 2025-06-24T21:10:18.017672Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 3 TTLSettings { Disabled { } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TKeyValueTest::TestCleanUpDataOnEmptyTablet [GOOD] >> TKeyValueTest::TestCleanUpDataOnEmptyTabletResetGeneration >> TSchemeShardTTLTests::BackupCopyHasNoTtlSettings >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi [GOOD] >> TKeyValueTest::TestCleanUpDataOnEmptyTabletResetGeneration [GOOD] >> TKeyValueTest::TestCleanUpDataWithMockDisk >> TSchemeShardTTLTests::ConditionalErase >> TSchemeShardTTLTests::AlterTableShouldSucceedOnAsyncIndexedTable [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::CreateTable |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiersAlterTable >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiers ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:78:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:81:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:82:2057] recipient: [10:80:2111] Leader for TabletID 72057594037927937 is [10:83:2112] sender: [10:84:2057] recipient: [10:80:2111] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:83:2112] Leader for TabletID 72057594037927937 is [10:83:2112] sender: [10:169:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:78:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:81:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:82:2057] recipient: [11:80:2111] Leader for TabletID 72057594037927937 is [11:83:2112] sender: [11:84:2057] recipient: [11:80:2111] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:83:2112] Leader for TabletID 72057594037927937 is [11:83:2112] sender: [11:169:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:79:2057] recipient: [12:37:2084] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:81:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:83:2057] recipient: [12:82:2111] Leader for TabletID 72057594037927937 is [12:84:2112] sender: [12:85:2057] recipient: [12:82:2111] !Reboot 72057594037927937 (actor [12:58:2098]) rebooted! !Reboot 72057594037927937 (actor [12:58:2098]) tablet resolver refreshed! new actor is[12:84:2112] Leader for TabletID 72057594037927937 is [12:84:2112] sender: [12:170:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:59:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:76:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:82:2057] recipient: [13:37:2084] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:85:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:86:2057] recipient: [13:84:2114] Leader for TabletID 72057594037927937 is [13:87:2115] sender: [13:88:2057] recipient: [13:84:2114] !Reboot 72057594037927937 (actor [13:58:2098]) rebooted! !Reboot 72057594037927937 (actor [13:58:2098]) tablet resolver refreshed! new actor is[13:87:2115] Leader for TabletID 72057594037927937 is [13:87:2115] sender: [13:173:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:59:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:76:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:82:2057] recipient: [14:37:2084] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:85:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:86:2057] recipient: [14:84:2114] Leader for TabletID 72057594037927937 is [14:87:2115] sender: [14:88:2057] recipient: [14:84:2114] !Reboot 72057594037927937 (actor [14:58:2098]) rebooted! !Reboot 72057594037927937 (actor [14:58:2098]) tablet resolver refreshed! new actor is[14:87:2115] Leader for TabletID 72057594037927937 is [14:87:2115] sender: [14:173:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:59:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:76:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:83:2057] recipient: [15:37:2084] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:86:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:87:2057] recipient: [15:85:2114] Leader for TabletID 72057594037927937 is [15:88:2115] sender: [15:89:2057] recipient: [15:85:2114] !Reboot 72057594037927937 (actor [15:58:2098]) rebooted! !Reboot 72057594037927937 (actor [15:58:2098]) tablet resolver refreshed! new actor is[15:88:2115] Leader for TabletID 72057594037927937 is [15:88:2115] sender: [15:174:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:59:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:76:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:86:2057] recipient: [16:37:2084] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:88:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:90:2057] recipient: [16:89:2117] Leader for TabletID 72057594037927937 is [16:91:2118] sender: [16:92:2057] recipient: [16:89:2117] !Reboot 72057594037927937 (actor [16:58:2098]) rebooted! !Reboot 72057594037927937 (actor [16:58:2098]) tablet resolver refreshed! new actor is[16:91:2118] Leader for TabletID 72057594037927937 is [16:91:2118] sender: [16:177:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:59:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:76:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:86:2057] recipient: [17:37:2084] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:89:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:90:2057] recipient: [17:88:2117] Leader for TabletID 72057594037927937 is [17:91:2118] sender: [17:92:2057] recipient: [17:88:2117] !Reboot 72057594037927937 (actor [17:58:2098]) rebooted! !Reboot 72057594037927937 (actor [17:58:2098]) tablet resolver refreshed! new actor is[17:91:2118] Leader for TabletID 72057594037927937 is [17:91:2118] sender: [17:177:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:59:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:76:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:87:2057] recipient: [18:37:2084] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:90:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:91:2057] recipient: [18:89:2117] Leader for TabletID 72057594037927937 is [18:92:2118] sender: [18:93:2057] recipient: [18:89:2117] !Reboot 72057594037927937 (actor [18:58:2098]) rebooted! !Reboot 72057594037927937 (actor [18:58:2098]) tablet resolver refreshed! new actor is[18:92:2118] Leader for TabletID 72057594037927937 is [18:92:2118] sender: [18:178:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:59:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:76:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:90:2057] recipient: [19:37:2084] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:92:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:94:2057] recipient: [19:93:2120] Leader for TabletID 72057594037927937 is [19:95:2121] sender: [19:96:2057] recipient: [19:93:2120] !Reboot 72057594037927937 (actor [19:58:2098]) rebooted! !Reboot 72057594037927937 (actor [19:58:2098]) tablet resolver refreshed! new actor is[19:95:2121] Leader for TabletID 72057594037927937 is [19:95:2121] sender: [19:181:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:59:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:76:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:90:2057] recipient: [20:37:2084] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:93:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:94:2057] recipient: [20:92:2120] Leader for TabletID 72057594037927937 is [20:95:2121] sender: [20:96:2057] recipient: [20:92:2120] !Reboot 72057594037927937 (actor [20:58:2098]) rebooted! !Reboot 72057594037927937 (actor [20:58:2098]) tablet resolver refreshed! new actor is[20:95:2121] Leader for TabletID 72057594037927937 is [20:95:2121] sender: [20:181:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:59:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:76:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:91:2057] recipient: [21:37:2084] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:93:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:95:2057] recipient: [21:94:2120] Leader for TabletID 72057594037927937 is [21:96:2121] sender: [21:97:2057] recipient: [21:94:2120] !Reboot 72057594037927937 (actor [21:58:2098]) rebooted! !Reboot 72057594037927937 (actor [21:58:2098]) tablet resolver refreshed! new actor is[21:96:2121] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:59:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:76:2057] recipient: [22:14:2061] >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true >> TSchemeShardTTLTests::BuildIndexShouldSucceed [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSucceedOnAsyncIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:18.752978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:18.753046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:18.753078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:18.753125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:18.753168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:18.753191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:18.753232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:18.753294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:18.753842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:18.754080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:18.810775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:18.810832Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:18.825084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:18.825498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:18.825651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:18.833063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:18.833248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:18.833929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.834280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:18.837353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:18.837609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:18.838835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:18.838895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:18.839130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:18.839204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:18.839310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:18.839431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.846229Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:18.945992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:18.946229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.946413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:18.946482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:18.946769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:18.946834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:18.948812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.949015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:18.949200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.949248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:18.949298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:18.949350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:18.950961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.951036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:18.951134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:18.952765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.952808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.952860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.952903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:18.956518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:18.958304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:18.958482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:18.959389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.959541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:18.959612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.959913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:18.959968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.960133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:18.960220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:18.962165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:18.962211Z node 1 :FLAT_TX_SCHEMESHARD ... 6-24T21:10:19.281132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:10:19.281624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:10:19.281717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:10:19.281747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:10:19.281776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T21:10:19.281803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:10:19.281852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T21:10:19.284254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:10:19.306579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1027 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:19.306631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-24T21:10:19.306757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1027 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:19.306842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1027 } } CommitVersion { Step: 5000003 TxId: 102 } FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:10:19.307590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:19.307623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-24T21:10:19.307706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:19.307756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:19.307817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:19.307861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:19.307895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.307925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:10:19.307956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:10:19.309744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.310237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.310549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.310585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:10:19.310660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:19.310684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:19.310726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:19.310747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:19.310772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:10:19.310826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:379:2345] message: TxId: 102 2025-06-24T21:10:19.310874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:19.310916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:10:19.310941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:10:19.311016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:19.312285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:10:19.312327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:509:2432] TestWaitNotification: OK eventTxId 102 2025-06-24T21:10:19.312709Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:19.312976Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 279us result status StatusSuccess 2025-06-24T21:10:19.313348Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::CheckCounters >> TSchemeShardTTLTests::BackupCopyHasNoTtlSettings [GOOD] >> TExecutorDb::RandomCoordinatorSimulation [GOOD] >> TExecutorDb::MultiPage >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BuildIndexShouldSucceed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:19.039228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:19.039298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:19.039343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:19.039383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:19.039430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:19.039452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:19.039505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:19.039572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:19.040272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:19.040562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:19.100143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:19.100188Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:19.112419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:19.112749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:19.112862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:19.118897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:19.119039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:19.119524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:19.119766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:19.122043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:19.122180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:19.122957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:19.123000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:19.123170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:19.123214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:19.123293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:19.123361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.127880Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:19.230991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:19.231254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.231410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:19.231456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:19.231683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:19.231828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:19.234157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:19.234372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:19.234588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.234637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:19.234681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:19.234713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:19.236666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.236741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:19.236785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:19.238549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.238594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.238653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:19.238694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:19.242084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:19.243918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:19.244076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:19.245075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:19.245227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:19.245290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:19.245572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:19.245629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:19.245806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:19.245887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:19.248116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:19.248166Z node 1 :FLAT_TX_SCHEMESHARD ... 10760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-06-24T21:10:19.802317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:19.802385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:19.802415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-24T21:10:19.802473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710760:0 128 -> 240 2025-06-24T21:10:19.803773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.803807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-06-24T21:10:19.803867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-24T21:10:19.803895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:10:19.803919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-24T21:10:19.803936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:10:19.803955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-24T21:10:19.803987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:127:2151] message: TxId: 281474976710760 2025-06-24T21:10:19.804011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:10:19.804031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-24T21:10:19.804065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710760:0 2025-06-24T21:10:19.804103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-24T21:10:19.805178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-24T21:10:19.805216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710760 2025-06-24T21:10:19.805252Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-24T21:10:19.805319Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:388:2358], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-24T21:10:19.806314Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-24T21:10:19.806381Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:388:2358], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:10:19.806439Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-24T21:10:19.807496Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-24T21:10:19.807547Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:388:2358], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:10:19.807583Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-24T21:10:19.807705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:10:19.807744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:475:2434] TestWaitNotification: OK eventTxId 102 2025-06-24T21:10:19.808121Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:19.808312Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 195us result status StatusSuccess 2025-06-24T21:10:19.808660Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByValue" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpProxy::ExecuteScriptFailsWithoutFeatureFlag [GOOD] >> KqpSort::ComplexPkExclusiveSecondOptionalPredicate [GOOD] >> KqpSort::ComplexPkInclusiveSecondOptionalPredicate >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiersAlterTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BackupCopyHasNoTtlSettings [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:19.504799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:19.504907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:19.504949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:19.504995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:19.505046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:19.505076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:19.505135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:19.505217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:19.505963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:19.506282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:19.590086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:19.590142Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:19.605719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:19.606104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:19.606252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:19.613400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:19.613553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:19.614105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:19.614358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:19.617122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:19.617274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:19.618369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:19.618435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:19.618632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:19.618684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:19.618786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:19.618873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.625037Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:19.755701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:19.755886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.756070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:19.756111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:19.756344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:19.756399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:19.758099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:19.758273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:19.758458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.758511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:19.758550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:19.758573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:19.760146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.760199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:19.760226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:19.761327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.761357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:19.761392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:19.761426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:19.768308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:19.769633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:19.769754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:19.770398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:19.770502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:19.770542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:19.770731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:19.770772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:19.770895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:19.770943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:19.772592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:19.772638Z node 1 :FLAT_TX_SCHEMESHARD ... 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 412 RawX2: 4294969675 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:20.125086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-24T21:10:20.125209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 412 RawX2: 4294969675 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:20.125265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:20.125363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 412 RawX2: 4294969675 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:20.125442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.125496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1055: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged CollectSchemaChanged: false 2025-06-24T21:10:20.127290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.128086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.140362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:20.140415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:20.140530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:20.140583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:20.140661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:20.140708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.140743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.140795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:20.140841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:10:20.140881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:10:20.142588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.142978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.143031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 102:0ProgressState, operation type TxCopyTable 2025-06-24T21:10:20.143076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1060: Set barrier, OperationId: 102:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-24T21:10:20.143118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1104: All parts have reached barrier, tx: 102, done: 0, blocked: 1 2025-06-24T21:10:20.143214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 102:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 102 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-06-24T21:10:20.143253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 240 -> 240 2025-06-24T21:10:20.144928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.144970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:10:20.145066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:20.145104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:20.145142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:20.145189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:20.145241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:10:20.145312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 102 2025-06-24T21:10:20.145363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:20.145398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:10:20.145429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:10:20.145559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:10:20.145597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:10:20.147196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:10:20.147248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:440:2399] TestWaitNotification: OK eventTxId 102 2025-06-24T21:10:20.147840Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTableCopy" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:20.148075Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTableCopy" took 251us result status StatusSuccess 2025-06-24T21:10:20.148541Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTableCopy" PathDescription { Self { Name: "TTLEnabledTableCopy" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTableCopy" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "ts" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: true IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiers [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TExecutorDb::MultiPage [GOOD] >> TExecutorDb::EncodedPage >> TSchemeShardTTLTestsWithReboots::MoveTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiersAlterTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:20.238467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:20.238583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:20.238629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:20.238678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:20.238736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:20.238774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:20.238844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:20.238918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:20.239665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:20.240000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:20.295094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:20.295136Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:20.305754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:20.306070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:20.306211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:20.312247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:20.312386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:20.312839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.313142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:20.315313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.315463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:20.316262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.316300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.316444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:20.316485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:20.316562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:20.316638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.321921Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:20.420086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:20.420349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.420506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:20.420539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:20.420769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:20.420851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:20.422656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.422820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:20.422983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.423020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:20.423058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:20.423081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:20.424411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.424478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:20.424505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:20.425648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.425676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.425718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.425746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:20.433052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:20.434645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:20.434773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:20.435556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.435665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:20.435714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.435919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:20.435958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.436112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:20.436165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:20.437690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.437726Z node 1 :FLAT_TX_SCHEMESHARD ... meBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:10:20.664805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.664851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:10:20.665334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:10:20.665433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:10:20.665474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:10:20.665511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T21:10:20.665563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:20.665641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T21:10:20.668157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:10:20.680035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1074 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:20.680091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:20.680233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1074 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:20.680340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1074 } } CommitVersion { Step: 5000003 TxId: 102 } FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:10:20.680989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:20.681030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:20.681134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:20.681197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:20.681285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:20.681363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.681401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.681444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:20.681489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:10:20.683706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.683932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.684027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.684066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:10:20.684143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:20.684167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:20.684192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:20.684211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:20.684240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:10:20.684296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 102 2025-06-24T21:10:20.684331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:20.684357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:10:20.684377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:10:20.684459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:10:20.685970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:10:20.686017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:397:2367] TestWaitNotification: OK eventTxId 102 2025-06-24T21:10:20.686543Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:20.686756Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 237us result status StatusSuccess 2025-06-24T21:10:20.687271Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiers [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:20.312897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:20.312964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:20.312997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:20.313026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:20.313070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:20.313090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:20.313140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:20.313190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:20.313703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:20.313956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:20.368867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:20.368909Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:20.379572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:20.379904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:20.380037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:20.386160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:20.386305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:20.386837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.387084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:20.389531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.389690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:20.390601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.390644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.390813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:20.390855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:20.390933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:20.391027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.395908Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:20.503401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:20.503664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.503850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:20.503905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:20.504179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:20.504249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:20.506350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.506546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:20.506727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.506774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:20.506819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:20.506850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:20.508661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.508732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:20.508775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:20.510419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.510478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.510527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.510574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:20.514138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:20.515940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:20.516104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:20.517022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.517164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:20.517221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.517514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:20.517561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.517741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:20.517815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:20.519686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.519734Z node 1 :FLAT_TX_SCHEMESHARD ... ublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:10:20.704978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:20.705766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:20.705819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:20.705847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:10:20.705868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:10:20.705888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:20.705937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-24T21:10:20.706116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 964 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:20.706138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:20.706271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 964 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:20.706343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 964 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:20.706842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:20.706880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:20.706965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:20.707002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:20.707068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:20.707115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.707172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.707210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:20.707250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:10:20.708995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:20.710066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:20.710139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.710204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.710390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.710420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:10:20.710523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:20.710557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:20.710584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:20.710608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:20.710637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T21:10:20.710704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 101 2025-06-24T21:10:20.710744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:20.710775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:10:20.710797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:10:20.710949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:10:20.712215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:10:20.712249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:340:2317] TestWaitNotification: OK eventTxId 101 2025-06-24T21:10:20.712639Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:20.712816Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 191us result status StatusSuccess 2025-06-24T21:10:20.713218Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::ExecuteScriptFailsWithoutFeatureFlag [GOOD] Test command err: 2025-06-24T21:10:03.922678Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626179102720199:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:03.922906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047c1/r3tmp/tmpfA12FX/pdisk_1.dat 2025-06-24T21:10:04.205569Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626179102720181:2079] 1750799403921768 != 1750799403921771 2025-06-24T21:10:04.211878Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:04.275021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:04.275117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:04.276737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25250 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:10:04.483358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:04.932915Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:05.948597Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.949888Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.960775Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1578: Failed to parse session id: ydb://session/1?id=ZjY5NWRlM2EtYWMyYjA5YWEtNzQ0MTVlYTMtM2Q4ZDgzOWQ=&node_id=1234&node_id=12345 2025-06-24T21:10:05.962647Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:05.962689Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:05.962710Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:05.962761Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:05.962932Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.962978Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.963068Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 2, sender: [1:7519626183397688023:2281], selfId: [1:7519626179102720413:2237], source: [1:7519626179102720413:2237] 2025-06-24T21:10:05.963109Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.963139Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:05.963592Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1578: Failed to parse session id: unknown://session/1?id=ZjY5NWRlM2EtYWMyYjA5YWEtNzQ0MTVlYTMtM2Q4ZDgzOWQ=&node_id=1234&node_id=12345 2025-06-24T21:10:05.963672Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 3, sender: [1:7519626183397688023:2281], selfId: [1:7519626179102720413:2237], source: [1:7519626179102720413:2237] 2025-06-24T21:10:05.963715Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626187692655370:2269], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:05.963835Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:05.963982Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1578: Failed to parse session id: ydb://session/1?id=ZjY5NWRlM2EtYWMyYjA5YWEtNzQ0MTVlYTMtM2Q4ZDgzOWQ=&node_id=eqweq 2025-06-24T21:10:05.964102Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 4, sender: [1:7519626183397688023:2281], selfId: [1:7519626179102720413:2237], source: [1:7519626179102720413:2237] 2025-06-24T21:10:05.964166Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626187692655379:2270], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:05.964240Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:08.801012Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:10:08.801282Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:10:08.801386Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047c1/r3tmp/tmpbcm7KZ/pdisk_1.dat 2025-06-24T21:10:09.006064Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:10:09.016124Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:09.043317Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:09.043412Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:09.043662Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:09.045717Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750799406414139 != 1750799406414143 2025-06-24T21:10:09.077713Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:277:2320], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: Root/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:10:09.079643Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [2:277:2320], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /Root/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-24T21:10:09.079770Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [2:277:2320], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /Root/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:569:2492] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-24T21:10:09.079913Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:277:2320], cacheItem# { Subscriber: { Subscriber: [2:569:2492] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:10:09.080067Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [2:277:2320], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /Root/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-24T21:10:09.080129Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [2:277:2320], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /Root/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:570:2493] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 Ta ... Actor Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::NSchedulerOld::TSchedulerActor Captured TEvents::TSystem::Wakeup to (anonymous namespace)::TComputeSchedulerService Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to TICKET_PARSER_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NIcNodeCache::TIcNodeCacheServiceActor Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to DS_PROXY_NODE_MON_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to KQP_COMPILE_COMPUTATION_PATTERN_SERVICE Captured TEvents::TSystem::Wakeup to KQP_NODE_SERVICE Captured TEvents::TSystem::Wakeup to (anonymous namespace)::TComputeSchedulerService Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to BSC_STAT_PROCESSOR Captured TEvents::TSystem::Wakeup to NKikimr::NBsController::TBlobStorageController::TSelfHealActor Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to TABLET_COUNTERS_AGGREGATOR Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::NSchedulerOld::TSchedulerActor Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::NSchedulerOld::TSchedulerActor Captured TEvents::TSystem::Wakeup to (anonymous namespace)::TComputeSchedulerService Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to KQP_COMPILE_COMPUTATION_PATTERN_SERVICE Captured TEvents::TSystem::Wakeup to KQP_NODE_SERVICE Captured TEvents::TSystem::Wakeup to (anonymous namespace)::TComputeSchedulerService Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to TICKET_PARSER_ACTOR Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to DS_PROXY_NODE_MON_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::NSchedulerOld::TSchedulerActor Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::NSchedulerOld::TSchedulerActor Captured TEvents::TSystem::Wakeup to (anonymous namespace)::TComputeSchedulerService Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR 2025-06-24T21:10:16.642570Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(20) 2025-06-24T21:10:16.642668Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 20 sessionId: ydb://session/3?node_id=2&id=MzZjM2MyY2MtNWExZDg0NWYtYjUyYzQ2YTUtNjc2OGM1ODE= status: TIMEOUT round: 0 2025-06-24T21:10:16.642835Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MzZjM2MyY2MtNWExZDg0NWYtYjUyYzQ2YTUtNjc2OGM1ODE=, ActorId: [2:1073:2876], ActorState: ExecuteState, TraceId: 01jyhwdyh9caskzmfdjx6znfz6, Create QueryResponse for error on request, msg: 2025-06-24T21:10:16.643042Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 20, sender: [2:552:2478], selfId: [2:57:2104], source: [2:1073:2876] Send scheduled evet back 2025-06-24T21:10:16.643191Z node 2 :KQP_COMPILE_ACTOR NOTICE: kqp_compile_actor.cpp:577: Compilation timeout, self: [2:1075:2878], cluster: db, database: , text: "SELECT * FROM `/Root/Table`;", startTime: 2025-06-24T21:10:15.850247Z 2025-06-24T21:10:16.643270Z node 2 :KQP_COMPILE_ACTOR DEBUG: kqp_compile_actor.cpp:402: Send response, self: [2:1075:2878], owner: [2:270:2313], status: TIMEOUT, issues:
: Error: Query compilation timed out. , uid: 95705501-cb72667c-8e18b552-5205ca60 Send captured event back Send captured event back Send captured event back Send captured event back Send captured event back 2025-06-24T21:10:17.280638Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626238008256298:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:17.280712Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047c1/r3tmp/tmp4bzRQP/pdisk_1.dat 2025-06-24T21:10:17.387650Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:17.387715Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:17.392415Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:17.412969Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18839, node 3 2025-06-24T21:10:17.454023Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:17.454052Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:17.454067Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:17.454211Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3884 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:17.654610Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:18.285784Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:19.460857Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:19.461932Z node 3 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-24T21:10:19.471354Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:19.471400Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:19.471425Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:19.471479Z node 3 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-24T21:10:19.471534Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:19.471570Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:19.473453Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:19.473509Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 >> TExecutorDb::EncodedPage [GOOD] >> TFlatCxxDatabaseTest::BasicSchemaTest >> TFlatCxxDatabaseTest::BasicSchemaTest [GOOD] >> TFlatCxxDatabaseTest::RenameColumnSchemaTest [GOOD] >> TFlatCxxDatabaseTest::SchemaFillerTest [GOOD] >> TFlatDatabaseDecimal::UpdateRead [GOOD] >> TFlatEraseCacheTest::BasicUsage [GOOD] >> TFlatEraseCacheTest::BasicUsageReverse [GOOD] >> TFlatEraseCacheTest::CacheEviction [GOOD] >> TFlatEraseCacheTest::StressGarbageCollection [GOOD] >> TFlatEraseCacheTest::StressGarbageCollectionWithStrings [GOOD] >> TFlatExecutorLeases::Basics >> DataShardTxOrder::RandomPoints_DelayRS [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-false |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_DelayRS [GOOD] Test command err: 2025-06-24T21:09:12.129095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:12.129171Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:12.131998Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:12.156396Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:12.156984Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:12.157248Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:12.200456Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:12.217746Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:12.218027Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:12.219815Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:12.219897Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:12.219973Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:12.220394Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:12.220485Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:12.220552Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:12.288194Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:12.329145Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:12.329380Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:12.329492Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:12.329534Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:12.329572Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:12.329618Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:12.329798Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.329857Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.330152Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:12.330254Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:12.330422Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:12.330464Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:12.330523Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:12.330563Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:12.330601Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:12.330633Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:12.330674Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:12.330794Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:12.330835Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:12.330895Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:12.334235Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:12.334317Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:12.334418Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:12.334615Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:12.334666Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:12.334725Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:12.334769Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:12.334811Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:12.334853Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:12.334887Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:12.335284Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:12.335328Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:12.335371Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:12.335424Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:12.335491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:12.335551Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:12.335604Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:12.335708Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:12.335742Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:12.352180Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:12.352287Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:12.352339Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:12.352387Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:12.352485Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:12.353146Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:12.353206Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:12.353261Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:12.353414Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:12.353477Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:12.353620Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:12.353671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:12.353712Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:12.353748Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:12.360487Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:12.360584Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:12.360858Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.360924Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:12.360985Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:12.361028Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:12.361066Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:12.361114Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:12.361165Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 0:21.416447Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:26] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.416494Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 26] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.416530Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.416670Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.416704Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:27] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.416750Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 27] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.416786Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.416947Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.416978Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:28] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.417022Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 28] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.417056Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.417225Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.417256Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:29] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.417304Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 29] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.417340Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.417559Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.417595Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:30] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.417640Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 30] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.417675Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.417867Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.417901Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:31] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.417945Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 31] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.417980Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.418125Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.418160Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:32] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.418203Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 32] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.418238Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.418383Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.418416Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:33] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.418476Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 33] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.418510Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.418715Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.418752Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:34] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.418797Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 34] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.418832Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.419030Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.419063Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:35] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.419107Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 35] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.419181Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.419351Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.419385Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:36] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.419432Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 36] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.419466Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.419624Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:21.419661Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:37] at 9437184 on unit CompleteOperation 2025-06-24T21:10:21.419707Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 37] from 9437184 at tablet 9437184 send result to client [32:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:10:21.419739Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:21.420054Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 36 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 32} 2025-06-24T21:10:21.420104Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:21.420149Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 36 2025-06-24T21:10:21.420307Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-24T21:10:21.420349Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:21.420381Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 5 2025-06-24T21:10:21.420441Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 6 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-24T21:10:21.420474Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:21.420504Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 6 2025-06-24T21:10:21.420596Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 7 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-24T21:10:21.420630Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:21.420663Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 7 2025-06-24T21:10:21.420754Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 9 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-24T21:10:21.420791Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:21.420822Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 9 2025-06-24T21:10:21.420910Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 10 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-24T21:10:21.420942Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:21.420977Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 10 2025-06-24T21:10:21.421067Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:237:2228], Recipient [32:347:2312]: {TEvReadSet step# 1000004 txid# 13 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 9} 2025-06-24T21:10:21.421100Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:10:21.421132Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 13 expect 31 30 24 29 27 23 28 25 19 19 29 31 30 23 29 28 3 14 19 25 28 19 19 28 25 - - - - - - - actual 31 30 24 29 27 23 28 25 19 19 29 31 30 23 29 28 3 14 19 25 28 19 19 28 25 - - - - - - - interm 2 6 4 6 5 3 1 5 0 1 3 3 6 - 0 1 3 3 0 - - 0 - 0 6 - - - - - - - |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:21.921770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:21.921841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:21.921869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:21.921893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:21.921933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:21.921954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:21.921997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:21.922044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:21.922578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:21.922795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:21.974660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:21.974700Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:21.984987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:21.985237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:21.985374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:21.990350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:21.990480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:21.990886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:21.991095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:21.992917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:21.993035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:21.993799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:21.993846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:21.993976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:21.994010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:21.994082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:21.994141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:21.998795Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:22.081453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:22.081658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.081794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:22.081834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:22.082038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:22.082133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:22.084066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:22.084212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:22.084350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.084402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:22.084436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:22.084460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:22.085862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.085915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:22.085960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:22.087237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.087276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.087320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:22.087356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:22.089878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:22.091114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:22.091293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:22.091960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:22.092085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:22.092135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:22.092327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:22.092374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:22.092492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:22.092541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:22.093774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:22.093819Z node 1 :FLAT_TX_SCHEMESHARD ... nd, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:10:22.303414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.303471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:10:22.303990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:10:22.304064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:10:22.304105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:10:22.304133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T21:10:22.304171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:22.304240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T21:10:22.304660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1219 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:22.304690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:22.304771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1219 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:22.304831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1219 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:22.305214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:22.305242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:22.305317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:22.305355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:22.305411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:22.305472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:22.305512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.305539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:22.305569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:10:22.308347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:10:22.309220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.309318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.309535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.309569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:10:22.309637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:22.309662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:22.309692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:22.309712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:22.309736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:10:22.309787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 102 2025-06-24T21:10:22.309833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:22.309869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:10:22.309894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:10:22.309974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:10:22.311263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:10:22.311302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:397:2367] TestWaitNotification: OK eventTxId 102 2025-06-24T21:10:22.311712Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:22.311908Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 176us result status StatusSuccess 2025-06-24T21:10:22.312254Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::CreateTableShouldFailOnUnspecifiedTTL >> TSchemeShardTTLTestsWithReboots::CopyTable >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi >> TFlatExecutorLeases::Basics [GOOD] >> TFlatExecutorLeases::BasicsLeaseTimeout |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> KqpSqlIn::SecondaryIndex_TupleSelect [GOOD] >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnUnspecifiedTTL [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ConditionalErase [GOOD] >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorksNewApi [GOOD] >> TSchemeShardColumnTableTTL::AlterColumnTable [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnUnspecifiedTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:23.338101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:23.338185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:23.338224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:23.338275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:23.338338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:23.338371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:23.338466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:23.338537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:23.339260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:23.339587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:23.404663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:23.404704Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:23.414918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:23.415250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:23.415376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:23.421131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:23.421272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:23.421866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:23.422114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:23.424673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:23.424809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:23.425860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:23.425915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:23.426096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:23.426159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:23.426260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:23.426346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.432144Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:23.536532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:23.536694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.536810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:23.536838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:23.537052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:23.537099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:23.538797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:23.538927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:23.539056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.539092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:23.539125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:23.539173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:23.540443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.540491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:23.540516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:23.541656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.541699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.541739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:23.541768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:23.544201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:23.545419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:23.545548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:23.546163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:23.546268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:23.546314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:23.546591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:23.546648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:23.546797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:23.546859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:23.548589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:23.548635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:23.548787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:23.548817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:10:23.549032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.549067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:10:23.549155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:23.549183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:23.549226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:23.549252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:23.549274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:10:23.549299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:23.549338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:10:23.549360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:10:23.549400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:23.549424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:10:23.549457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:10:23.550724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:23.550821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:23.550861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:10:23.550889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:10:23.550932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:23.550999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:10:23.553271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:10:23.553729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T21:10:23.554824Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:10:23.571839Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:10:23.573731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:23.573938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.574015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { }, at schemeshard: 72057594046678944 2025-06-24T21:10:23.574285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: TTL status must be specified, at schemeshard: 72057594046678944 2025-06-24T21:10:23.574830Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:23.577283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "TTL status must be specified" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:23.577451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: TTL status must be specified, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-24T21:10:23.577781Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative [GOOD] >> TKeyValueTest::TestWrite200KDeleteThenResponseErrorNewApi [GOOD] >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_ColumnType ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:23.148384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:23.148465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:23.148503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:23.148564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:23.148628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:23.148655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:23.148714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:23.148779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:23.149496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:23.149812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:23.209824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:23.209876Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:23.219941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:23.220202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:23.220331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:23.225419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:23.225586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:23.226160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:23.226412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:23.228520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:23.228650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:23.229527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:23.229566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:23.229731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:23.229775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:23.229869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:23.229935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.234647Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:23.334199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:23.334383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.334518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:23.334546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:23.334764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:23.334816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:23.336266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:23.336401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:23.336547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.336578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:23.336614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:23.336635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:23.337797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.337856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:23.337892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:23.339063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.339092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.339135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:23.339189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:23.341523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:23.342675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:23.342787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:23.343430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:23.343538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:23.343584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:23.343778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:23.343826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:23.343943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:23.344014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:23.345187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:23.345221Z node 1 :FLAT_TX_SCHEMESHARD ... 4744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:2 129 -> 240 2025-06-24T21:10:23.574993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:23.575014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409547, partId: 0 2025-06-24T21:10:23.575077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:23.575101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:23.575170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:23.575213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:23.575230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.575260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:10:23.575291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:10:23.578251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:23.578321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:23.579536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:23.579601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:23.579653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:10:23.579735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.579787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:10:23.579978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.580076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:10:23.580101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-24T21:10:23.580178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:2 progress is 2/3 2025-06-24T21:10:23.580204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-24T21:10:23.580228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:2 progress is 2/3 2025-06-24T21:10:23.580249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-24T21:10:23.580272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-24T21:10:23.580405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.580431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:10:23.580462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 3/3 2025-06-24T21:10:23.580474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:10:23.580502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 3/3 2025-06-24T21:10:23.580517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:10:23.580530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-24T21:10:23.580578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:379:2345] message: TxId: 101 2025-06-24T21:10:23.580608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:10:23.580637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:10:23.580659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:10:23.580770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:23.580808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:1 2025-06-24T21:10:23.580830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:1 2025-06-24T21:10:23.580850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:10:23.580881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:2 2025-06-24T21:10:23.580897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:2 2025-06-24T21:10:23.580925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:10:23.582511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:10:23.582549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:380:2346] TestWaitNotification: OK eventTxId 101 2025-06-24T21:10:23.582945Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:23.583139Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 186us result status StatusSuccess 2025-06-24T21:10:23.583578Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ConditionalErase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:19.959421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:19.959486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:19.959510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:19.959535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:19.959584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:19.959607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:19.959660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:19.959709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:19.960259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:19.960504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:20.015688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:20.015727Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:20.028300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:20.028697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:20.028859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:20.036072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:20.036231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:20.036839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.037121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:20.039938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.040093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:20.041213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.041269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.041473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:20.041529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:20.041644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:20.041754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.048415Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:20.148530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:20.148705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.148825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:20.148857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:20.149066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:20.149145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:20.150967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.151209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:20.151410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.151455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:20.151490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:20.151511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:20.152939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.152990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:20.153018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:20.154228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.154261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.154305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.154342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:20.161450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:20.163080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:20.163261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:20.163943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.164062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:20.164106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.164328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:20.164366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.164531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:20.164585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:20.166388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.166439Z node 1 :FLAT_TX_SCHEMESHARD ... amp: 1600463040223000 ColumnUnit: UNIT_AUTO } SchemaVersion: 1 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-24T21:10:23.473734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409551, request: TableId: 7 Expiration { ColumnId: 2 WallClockTimestamp: 1600466640223000 ColumnUnit: UNIT_MICROSECONDS } SchemaVersion: 1 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-24T21:10:23.474475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6740: Conditional erase accepted: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-06-24T21:10:23.474845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6740: Conditional erase accepted: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-06-24T21:10:23.475598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6740: Conditional erase accepted: tabletId: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:23.475759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6740: Conditional erase accepted: tabletId: 72075186233409550, at schemeshard: 72057594046678944 2025-06-24T21:10:23.475984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6740: Conditional erase accepted: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:10:23.476257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6740: Conditional erase accepted: tabletId: 72075186233409551, at schemeshard: 72057594046678944 2025-06-24T21:10:23.477072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.477118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-06-24T21:10:23.477511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.477537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-06-24T21:10:23.482128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.482175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409550, at schemeshard: 72057594046678944 2025-06-24T21:10:23.482892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.482937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:23.483332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.483650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.483692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:3, run at: 2020-09-18T23:04:00.224000Z, at schemeshard: 72057594046678944 2025-06-24T21:10:23.483766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.483876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.483900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:4, run at: 2020-09-18T23:04:00.224000Z, at schemeshard: 72057594046678944 2025-06-24T21:10:23.483958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.483994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:10:23.486152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.486298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.486328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:5, run at: 2020-09-18T23:04:00.226000Z, at schemeshard: 72057594046678944 2025-06-24T21:10:23.486399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.486457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.486483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.486552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.486576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:1, run at: 2020-09-18T23:04:00.226000Z, at schemeshard: 72057594046678944 2025-06-24T21:10:23.486634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.486659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.486674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.486706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.486725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:2, run at: 2020-09-18T23:04:00.226000Z, at schemeshard: 72057594046678944 2025-06-24T21:10:23.486753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.553244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 5 2025-06-24T21:10:23.553487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-24T21:10:23.553582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable1, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:23.553685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-24T21:10:23.553739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-24T21:10:23.553769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], pathId map=TTLEnabledTable2, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:23.553800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-24T21:10:23.553827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046678944:4 data size 43 row count 1 2025-06-24T21:10:23.553889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], pathId map=TTLEnabledTable4, is column=0, is olap=0, RowCount 1, DataSize 43 2025-06-24T21:10:23.553970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409549 2025-06-24T21:10:23.554016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 603 row count 2 2025-06-24T21:10:23.554076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTable3, is column=0, is olap=0, RowCount 2, DataSize 603 2025-06-24T21:10:23.554135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409548 2025-06-24T21:10:23.554219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 6 shard idx 72057594046678944:5 data size 627 row count 2 2025-06-24T21:10:23.554272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409550 maps to shardIdx: 72057594046678944:5 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], pathId map=TTLEnabledTable5, is column=0, is olap=0, RowCount 2, DataSize 627, with borrowed parts 2025-06-24T21:10:23.554325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409550 2025-06-24T21:10:23.566970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.567033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409551, at schemeshard: 72057594046678944 2025-06-24T21:10:23.569146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-24T21:10:23.569353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:23.569409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:6, run at: 2020-09-18T23:04:00.229000Z, at schemeshard: 72057594046678944 2025-06-24T21:10:23.569487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorksNewApi [GOOD] >> TFlatExecutorLeases::BasicsLeaseTimeout [GOOD] >> TFlatExecutorLeases::BasicsInitialLease ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:81:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:81:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:59:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:76:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:78:2057] recipient: [13:37:2084] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:81:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:82:2057] recipient: [13:80:2111] Leader for TabletID 72057594037927937 is [13:83:2112] sender: [13:84:2057] recipient: [13:80:2111] !Reboot 72057594037927937 (actor [13:58:2098]) rebooted! !Reboot 72057594037927937 (actor [13:58:2098]) tablet resolver refreshed! new actor is[13:83:2112] Leader for TabletID 72057594037927937 is [13:83:2112] sender: [13:169:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:59:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:76:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:78:2057] recipient: [14:37:2084] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:81:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:82:2057] recipient: [14:80:2111] Leader for TabletID 72057594037927937 is [14:83:2112] sender: [14:84:2057] recipient: [14:80:2111] !Reboot 72057594037927937 (actor [14:58:2098]) rebooted! !Reboot 72057594037927937 (actor [14:58:2098]) tablet resolver refreshed! new actor is[14:83:2112] Leader for TabletID 72057594037927937 is [14:83:2112] sender: [14:169:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:59:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:76:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:79:2057] recipient: [15:37:2084] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:82:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:83:2057] recipient: [15:81:2111] Leader for TabletID 72057594037927937 is [15:84:2112] sender: [15:85:2057] recipient: [15:81:2111] !Reboot 72057594037927937 (actor [15:58:2098]) rebooted! !Reboot 72057594037927937 (actor [15:58:2098]) tablet resolver refreshed! new actor is[15:84:2112] Leader for TabletID 72057594037927937 is [15:84:2112] sender: [15:170:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:59:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:76:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:82:2057] recipient: [16:37:2084] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:85:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:86:2057] recipient: [16:84:2114] Leader for TabletID 72057594037927937 is [16:87:2115] sender: [16:88:2057] recipient: [16:84:2114] !Reboot 72057594037927937 (actor [16:58:2098]) rebooted! !Reboot 72057594037927937 (actor [16:58:2098]) tablet resolver refreshed! new actor is[16:87:2115] Leader for TabletID 72057594037927937 is [16:87:2115] sender: [16:173:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:59:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:76:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:82:2057] recipient: [17:37:2084] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:85:2057] recipient: [17:84:2114] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:86:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:87:2115] sender: [17:88:2057] recipient: [17:84:2114] !Reboot 72057594037927937 (actor [17:58:2098]) rebooted! !Reboot 72057594037927937 (actor [17:58:2098]) tablet resolver refreshed! new actor is[17:87:2115] Leader for TabletID 72057594037927937 is [17:87:2115] sender: [17:173:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:59:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:76:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:83:2057] recipient: [18:37:2084] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:86:2057] recipient: [18:85:2114] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:87:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:88:2115] sender: [18:89:2057] recipient: [18:85:2114] !Reboot 72057594037927937 (actor [18:58:2098]) rebooted! !Reboot 72057594037927937 (actor [18:58:2098]) tablet resolver refreshed! new actor is[18:88:2115] Leader for TabletID 72057594037927937 is [18:88:2115] sender: [18:106:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:59:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:76:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:85:2057] recipient: [19:37:2084] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:87:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:89:2057] recipient: [19:88:2116] Leader for TabletID 72057594037927937 is [19:90:2117] sender: [19:91:2057] recipient: [19:88:2116] !Reboot 72057594037927937 (actor [19:58:2098]) rebooted! !Reboot 72057594037927937 (actor [19:58:2098]) tablet resolver refreshed! new actor is[19:90:2117] Leader for TabletID 72057594037927937 is [19:90:2117] sender: [19:176:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:59:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:76:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:85:2057] recipient: [20:37:2084] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:88:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:89:2057] recipient: [20:87:2116] Leader for TabletID 72057594037927937 is [20:90:2117] sender: [20:91:2057] recipient: [20:87:2116] !Reboot 72057594037927937 (actor [20:58:2098]) rebooted! !Reboot 72057594037927937 (actor [20:58:2098]) tablet resolver refreshed! new actor is[20:90:2117] Leader for TabletID 72057594037927937 is [20:90:2117] sender: [20:176:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:59:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:76:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:86:2057] recipient: [21:37:2084] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:89:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:90:2057] recipient: [21:88:2116] Leader for TabletID 72057594037927937 is [21:91:2117] sender: [21:92:2057] recipient: [21:88:2116] !Reboot 72057594037927937 (actor [21:58:2098]) rebooted! !Reboot 72057594037927937 (actor [21:58:2098]) tablet resolver refreshed! new actor is[21:91:2117] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:59:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:76:2057] recipient: [22:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::SecondaryIndex_TupleSelect [GOOD] Test command err: Trying to start YDB, gRPC: 1721, MsgBus: 29019 2025-06-24T21:09:20.870786Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625993408448215:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:20.870922Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031b0/r3tmp/tmpe4TtV1/pdisk_1.dat 2025-06-24T21:09:21.210497Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625993408448197:2079] 1750799360869953 != 1750799360869956 2025-06-24T21:09:21.223213Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1721, node 1 2025-06-24T21:09:21.311384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:21.311530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:21.313741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:21.348042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:21.348076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:21.348088Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:21.348249Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29019 TClient is connected to server localhost:29019 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:09:21.900544Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:21.984699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:22.008471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:09:22.020694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:22.202091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:22.385858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:22.468086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:24.193543Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626010588319046:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:24.193665Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:24.526028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:24.555560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:24.591242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:24.669558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:24.707900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:24.746783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:24.799170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:24.869321Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626010588319706:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:24.869451Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:24.869765Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626010588319711:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:24.874582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:24.895544Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626010588319713:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:09:24.970082Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626010588319766:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:25.871039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625993408448215:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:25.871110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:09:26.019395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... 627717Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26779, node 7 2025-06-24T21:10:14.677865Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:14.677889Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:14.677898Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:14.678033Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21539 TClient is connected to server localhost:21539 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:15.227112Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:15.245651Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:15.319106Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:15.468979Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:15.479367Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:15.551920Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:18.078852Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626242326319159:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:18.078977Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:18.145646Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.182141Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.216230Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.252624Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.289630Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.361928Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.434621Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.499960Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626242326319824:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:18.500060Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:18.500120Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626242326319829:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:18.504785Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:18.525004Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626242326319831:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:18.584842Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626242326319882:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:19.462076Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626225146448352:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:19.462188Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:20.043749Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:20.090777Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:20.167119Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Warning: Type annotation, code: 1030
:5:17: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:6:56: Warning: At function: Filter, At lambda, At function: Coalesce
:7:29: Warning: At function: SqlIn
:7:29: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::AlterColumnTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:17.900944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:17.901031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.901108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:17.901150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:17.901208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:17.901241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:17.901300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.901370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:17.902110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:17.902489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:17.965686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:17.965748Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:17.980573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:17.980905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:17.981042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:17.986575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:17.986726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:17.987179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.987434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:17.989684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.989837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:17.990698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.990743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.990893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:17.990926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:17.991005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:17.991069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.995886Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:18.117785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:18.118053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.118246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:18.118296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:18.118563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:18.118627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:18.121155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.121343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:18.121557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.121620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:18.121663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:18.121710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:18.123906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.123986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:18.124023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:18.125902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.125946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.126000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.126049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:18.129711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:18.131756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:18.131994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:18.132910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.133057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:18.133129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.133418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:18.133467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.133642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:18.133713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:18.135906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:18.135953Z node 1 :FLAT_TX_SCHEMESHARD ... rd: 72057594046678944 2025-06-24T21:10:23.584851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.584893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.585064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.585312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.585547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.586308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.586409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.586462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-24T21:10:23.586551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T21:10:23.586586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:10:23.586616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 1/1 2025-06-24T21:10:23.586663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:10:23.586697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-24T21:10:23.586753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:2807:4007] message: TxId: 103 2025-06-24T21:10:23.586791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-24T21:10:23.586858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T21:10:23.586887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T21:10:23.587839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-06-24T21:10:23.590923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:10:23.590966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:4017:5156] TestWaitNotification: OK eventTxId 103 2025-06-24T21:10:23.591516Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:23.591728Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 235us result status StatusSuccess 2025-06-24T21:10:23.592211Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 ColumnTableVersion: 3 ColumnTableSchemaVersion: 1 ColumnTableTtlSettingsVersion: 3 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 64 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "TTLEnabledTable" Schema { Columns { Id: 1 Name: "key" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "modified_at" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 3 Name: "saved_at" Type: "Datetime" TypeId: 49 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 4 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "modified_at" NextColumnId: 5 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } TtlSettings { Disabled { } Version: 3 } ColumnShardCount: 64 Sharding { ColumnShards: 72075186233409546 ColumnShards: 72075186233409547 ColumnShards: 72075186233409548 ColumnShards: 72075186233409549 ColumnShards: 72075186233409550 ColumnShards: 72075186233409551 ColumnShards: 72075186233409552 ColumnShards: 72075186233409553 ColumnShards: 72075186233409554 ColumnShards: 72075186233409555 ColumnShards: 72075186233409556 ColumnShards: 72075186233409557 ColumnShards: 72075186233409558 ColumnShards: 72075186233409559 ColumnShards: 72075186233409560 ColumnShards: 72075186233409561 ColumnShards: 72075186233409562 ColumnShards: 72075186233409563 ColumnShards: 72075186233409564 ColumnShards: 72075186233409565 ColumnShards: 72075186233409566 ColumnShards: 72075186233409567 ColumnShards: 72075186233409568 ColumnShards: 72075186233409569 ColumnShards: 72075186233409570 ColumnShards: 72075186233409571 ColumnShards: 72075186233409572 ColumnShards: 72075186233409573 ColumnShards: 72075186233409574 ColumnShards: 72075186233409575 ColumnShards: 72075186233409576 ColumnShards: 72075186233409577 ColumnShards: 72075186233409578 ColumnShards: 72075186233409579 ColumnShards: 72075186233409580 ColumnShards: 72075186233409581 ColumnShards: 72075186233409582 ColumnShards: 72075186233409583 ColumnShards: 72075186233409584 ColumnShards: 72075186233409585 ColumnShards: 72075186233409586 ColumnShards: 72075186233409587 ColumnShards: 72075186233409588 ColumnShards: 72075186233409589 ColumnShards: 72075186233409590 ColumnShards: 72075186233409591 ColumnShards: 72075186233409592 ColumnShards: 72075186233409593 ColumnShards: 72075186233409594 ColumnShards: 72075186233409595 ColumnShards: 72075186233409596 ColumnShards: 72075186233409597 ColumnShards: 72075186233409598 ColumnShards: 72075186233409599 ColumnShards: 72075186233409600 ColumnShards: 72075186233409601 ColumnShards: 72075186233409602 ColumnShards: 72075186233409603 ColumnShards: 72075186233409604 ColumnShards: 72075186233409605 ColumnShards: 72075186233409606 ColumnShards: 72075186233409607 ColumnShards: 72075186233409608 ColumnShards: 72075186233409609 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "modified_at" } } StorageConfig { DataChannelCount: 64 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 104 2025-06-24T21:10:23.594954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnTable AlterColumnTable { Name: "TTLEnabledTable" AlterSchema { AlterColumns { Name: "data" DefaultValue: "10" } } } } TxId: 104 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:23.595113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: alter_table.cpp:282: TAlterColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.599135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 104:1, propose status:StatusSchemeError, reason: schema update error: sparsed columns are disabled, at schemeshard: 72057594046678944 2025-06-24T21:10:23.601234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 104, response: Status: StatusSchemeError Reason: "schema update error: sparsed columns are disabled" TxId: 104 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:23.601439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 104, database: /MyRoot, subject: , status: StatusSchemeError, reason: schema update error: sparsed columns are disabled, operation: ALTER COLUMN TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T21:10:23.601778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T21:10:23.601822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T21:10:23.602230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T21:10:23.602329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:10:23.602360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:4354:5492] TestWaitNotification: OK eventTxId 104 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:18.689982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:18.690080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:18.690136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:18.690173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:18.690242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:18.690270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:18.690331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:18.690398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:18.691115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:18.691496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:18.770168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:18.770220Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:18.785610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:18.786071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:18.786240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:18.794165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:18.794372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:18.795109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.795452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:18.798517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:18.798747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:18.799976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:18.800042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:18.800277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:18.800330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:18.800456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:18.800552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.807205Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:18.925039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:18.925310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.925540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:18.925594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:18.925814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:18.925878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:18.928159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.928345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:18.928542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.928589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:18.928636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:18.928668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:18.930565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.930635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:18.930677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:18.932401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.932443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.932497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.932539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:18.935844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:18.937618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:18.937787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:18.938760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.938880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:18.938961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.939258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:18.939308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.939493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:18.939582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:18.941580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:18.941630Z node 1 :FLAT_TX_SCHEMESHARD ... 4T21:10:23.654886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.654931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.655007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.655061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.657012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.657099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.657145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.657190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.657226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.657304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.657359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:10:23.657451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:23.657475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:23.657507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:23.657533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:23.657581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T21:10:23.657643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:2806:4006] message: TxId: 101 2025-06-24T21:10:23.657690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:23.657738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:10:23.657764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:10:23.658566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-06-24T21:10:23.660637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:10:23.660674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:2807:4007] TestWaitNotification: OK eventTxId 101 2025-06-24T21:10:23.661121Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:23.661323Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 226us result status StatusSuccess 2025-06-24T21:10:23.661818Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 64 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "TTLEnabledTable" Schema { Columns { Id: 1 Name: "key" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "modified_at" Type: "Timestamp" TypeId: 50 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 3 Name: "str" Type: "String" TypeId: 4097 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "key" NextColumnId: 4 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } ColumnShardCount: 64 Sharding { ColumnShards: 72075186233409546 ColumnShards: 72075186233409547 ColumnShards: 72075186233409548 ColumnShards: 72075186233409549 ColumnShards: 72075186233409550 ColumnShards: 72075186233409551 ColumnShards: 72075186233409552 ColumnShards: 72075186233409553 ColumnShards: 72075186233409554 ColumnShards: 72075186233409555 ColumnShards: 72075186233409556 ColumnShards: 72075186233409557 ColumnShards: 72075186233409558 ColumnShards: 72075186233409559 ColumnShards: 72075186233409560 ColumnShards: 72075186233409561 ColumnShards: 72075186233409562 ColumnShards: 72075186233409563 ColumnShards: 72075186233409564 ColumnShards: 72075186233409565 ColumnShards: 72075186233409566 ColumnShards: 72075186233409567 ColumnShards: 72075186233409568 ColumnShards: 72075186233409569 ColumnShards: 72075186233409570 ColumnShards: 72075186233409571 ColumnShards: 72075186233409572 ColumnShards: 72075186233409573 ColumnShards: 72075186233409574 ColumnShards: 72075186233409575 ColumnShards: 72075186233409576 ColumnShards: 72075186233409577 ColumnShards: 72075186233409578 ColumnShards: 72075186233409579 ColumnShards: 72075186233409580 ColumnShards: 72075186233409581 ColumnShards: 72075186233409582 ColumnShards: 72075186233409583 ColumnShards: 72075186233409584 ColumnShards: 72075186233409585 ColumnShards: 72075186233409586 ColumnShards: 72075186233409587 ColumnShards: 72075186233409588 ColumnShards: 72075186233409589 ColumnShards: 72075186233409590 ColumnShards: 72075186233409591 ColumnShards: 72075186233409592 ColumnShards: 72075186233409593 ColumnShards: 72075186233409594 ColumnShards: 72075186233409595 ColumnShards: 72075186233409596 ColumnShards: 72075186233409597 ColumnShards: 72075186233409598 ColumnShards: 72075186233409599 ColumnShards: 72075186233409600 ColumnShards: 72075186233409601 ColumnShards: 72075186233409602 ColumnShards: 72075186233409603 ColumnShards: 72075186233409604 ColumnShards: 72075186233409605 ColumnShards: 72075186233409606 ColumnShards: 72075186233409607 ColumnShards: 72075186233409608 ColumnShards: 72075186233409609 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "key" } } StorageConfig { DataChannelCount: 64 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1750799423.662300 740640 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TAlterColumnTable: 6:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 102 2025-06-24T21:10:23.664464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnTable AlterColumnTable { Name: "TTLEnabledTable" AlterTtlSettings { Enabled { ColumnName: "str" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:23.664656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: alter_table.cpp:282: TAlterColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.665030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE, at schemeshard: 72057594046678944 2025-06-24T21:10:23.667299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:23.667557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE, operation: ALTER COLUMN TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 102, wait until txId: 102 >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_ColumnType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:81:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:81:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:86:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:86:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:88:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:92:2057] recipient: [11:90:2118] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:94:2057] recipient: [11:90:2118] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:93:2119] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:179:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... bletID 72057594037927937 is [13:58:2098] sender: [13:89:2057] recipient: [13:37:2084] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:92:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:93:2057] recipient: [13:91:2118] Leader for TabletID 72057594037927937 is [13:94:2119] sender: [13:95:2057] recipient: [13:91:2118] !Reboot 72057594037927937 (actor [13:58:2098]) rebooted! !Reboot 72057594037927937 (actor [13:58:2098]) tablet resolver refreshed! new actor is[13:94:2119] Leader for TabletID 72057594037927937 is [13:94:2119] sender: [13:180:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:59:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:76:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:59:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:76:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:59:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:76:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:78:2057] recipient: [16:37:2084] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:81:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:82:2057] recipient: [16:80:2111] Leader for TabletID 72057594037927937 is [16:83:2112] sender: [16:84:2057] recipient: [16:80:2111] !Reboot 72057594037927937 (actor [16:58:2098]) rebooted! !Reboot 72057594037927937 (actor [16:58:2098]) tablet resolver refreshed! new actor is[16:83:2112] Leader for TabletID 72057594037927937 is [16:83:2112] sender: [16:169:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:59:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:76:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:78:2057] recipient: [17:37:2084] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:81:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:82:2057] recipient: [17:80:2111] Leader for TabletID 72057594037927937 is [17:83:2112] sender: [17:84:2057] recipient: [17:80:2111] !Reboot 72057594037927937 (actor [17:58:2098]) rebooted! !Reboot 72057594037927937 (actor [17:58:2098]) tablet resolver refreshed! new actor is[17:83:2112] Leader for TabletID 72057594037927937 is [17:83:2112] sender: [17:169:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:59:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:76:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:79:2057] recipient: [18:37:2084] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:82:2057] recipient: [18:81:2111] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:83:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:84:2112] sender: [18:85:2057] recipient: [18:81:2111] !Reboot 72057594037927937 (actor [18:58:2098]) rebooted! !Reboot 72057594037927937 (actor [18:58:2098]) tablet resolver refreshed! new actor is[18:84:2112] Leader for TabletID 72057594037927937 is [18:84:2112] sender: [18:170:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:59:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:76:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:82:2057] recipient: [19:37:2084] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:85:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:86:2057] recipient: [19:84:2114] Leader for TabletID 72057594037927937 is [19:87:2115] sender: [19:88:2057] recipient: [19:84:2114] !Reboot 72057594037927937 (actor [19:58:2098]) rebooted! !Reboot 72057594037927937 (actor [19:58:2098]) tablet resolver refreshed! new actor is[19:87:2115] Leader for TabletID 72057594037927937 is [19:87:2115] sender: [19:173:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:59:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:76:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:82:2057] recipient: [20:37:2084] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:85:2057] recipient: [20:84:2114] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:86:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:87:2115] sender: [20:88:2057] recipient: [20:84:2114] !Reboot 72057594037927937 (actor [20:58:2098]) rebooted! !Reboot 72057594037927937 (actor [20:58:2098]) tablet resolver refreshed! new actor is[20:87:2115] Leader for TabletID 72057594037927937 is [20:87:2115] sender: [20:173:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:59:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:76:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:83:2057] recipient: [21:37:2084] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:86:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:87:2057] recipient: [21:85:2114] Leader for TabletID 72057594037927937 is [21:88:2115] sender: [21:89:2057] recipient: [21:85:2114] !Reboot 72057594037927937 (actor [21:58:2098]) rebooted! !Reboot 72057594037927937 (actor [21:58:2098]) tablet resolver refreshed! new actor is[21:88:2115] Leader for TabletID 72057594037927937 is [21:88:2115] sender: [21:106:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:59:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:76:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:85:2057] recipient: [22:37:2084] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:88:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:89:2057] recipient: [22:87:2116] Leader for TabletID 72057594037927937 is [22:90:2117] sender: [22:91:2057] recipient: [22:87:2116] !Reboot 72057594037927937 (actor [22:58:2098]) rebooted! !Reboot 72057594037927937 (actor [22:58:2098]) tablet resolver refreshed! new actor is[22:90:2117] Leader for TabletID 72057594037927937 is [22:90:2117] sender: [22:176:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:59:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:76:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:85:2057] recipient: [23:37:2084] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:88:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:89:2057] recipient: [23:87:2116] Leader for TabletID 72057594037927937 is [23:90:2117] sender: [23:91:2057] recipient: [23:87:2116] !Reboot 72057594037927937 (actor [23:58:2098]) rebooted! !Reboot 72057594037927937 (actor [23:58:2098]) tablet resolver refreshed! new actor is[23:90:2117] Leader for TabletID 72057594037927937 is [23:90:2117] sender: [23:176:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:59:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:76:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:86:2057] recipient: [24:37:2084] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:88:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:90:2057] recipient: [24:89:2116] Leader for TabletID 72057594037927937 is [24:91:2117] sender: [24:92:2057] recipient: [24:89:2116] !Reboot 72057594037927937 (actor [24:58:2098]) rebooted! !Reboot 72057594037927937 (actor [24:58:2098]) tablet resolver refreshed! new actor is[24:91:2117] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:56:2057] recipient: [25:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:56:2057] recipient: [25:52:2096] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:59:2057] recipient: [25:52:2096] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:76:2057] recipient: [25:14:2061] >> TSchemeShardTTLTests::CreateTableShouldFailOnUnknownColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWrite200KDeleteThenResponseErrorNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] 2025-06-24T21:10:14.506728Z node 3 :KEYVALUE ERROR: keyvalue_state.cpp:3023: KeyValue# 72057594037927937 PrepareExecuteTransactionRequest return flase, Marker# KV73 Submsg# KeyValue# 72057594037927937 Can't delete Range, in DeleteRange, total limit of deletions per request (100000) reached, Marker# KV90 Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] 2025-06-24T21:10:22.899704Z node 4 :KEYVALUE ERROR: keyvalue_state.cpp:3023: KeyValue# 72057594037927937 PrepareExecuteTransactionRequest return flase, Marker# KV73 Submsg# KeyValue# 72057594037927937 Can't delete Range, in DeleteRange, total limit of deletions per request (100000) reached, Marker# KV90 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_ColumnType [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:24.610522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:24.610589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:24.610623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:24.610654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:24.610698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:24.610729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:24.610788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:24.610839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:24.611434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:24.611688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:24.667685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:24.667722Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:24.680502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:24.680822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:24.680962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:24.686790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:24.686924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:24.687529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:24.687723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:24.690023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:24.690175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:24.691193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:24.691247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:24.691483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:24.691546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:24.691647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:24.691741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:24.697217Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:24.790459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:24.790699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:24.790858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:24.790906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:24.791273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:24.791330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:24.793329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:24.793526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:24.793758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:24.793816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:24.793868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:24.793904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:24.795973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:24.796026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:24.796060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:24.797468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:24.797508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:24.797553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:24.797616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:24.800136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:24.801508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:24.801634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:24.802429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:24.802553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:24.802609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:24.802848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:24.802886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:24.803014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:24.803073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:24.804877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:24.804915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:24.805069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:24.805098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:10:24.805335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:24.805366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:10:24.805448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:24.805484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:24.805531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:24.805562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:24.805588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:10:24.805616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:24.805652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:10:24.805675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:10:24.805725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:24.805761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:10:24.805799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:10:24.807085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:24.807217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:24.807251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:10:24.807297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:10:24.807353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:24.807437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:10:24.809741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:10:24.810121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1750799424.810977 743489 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TColumnTableDescription: 11:43: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 101 2025-06-24T21:10:24.811297Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:10:24.827716Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:10:24.830068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TTLEnabledTable" Schema { Columns { Name: "key" Type: "Uint64" NotNull: true } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" } TtlSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:24.830399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:24.830760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Unsupported column type, at schemeshard: 72057594046678944 2025-06-24T21:10:24.831381Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:24.833748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Unsupported column type" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:24.833944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Unsupported column type, operation: CREATE COLUMN TABLE, path: /MyRoot/ 2025-06-24T21:10:24.834346Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 W0000 00:00:1750799424.834709 743489 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TColumnTableDescription: 11:43: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 102 2025-06-24T21:10:24.836493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TTLEnabledTable" Schema { Columns { Name: "key" Type: "Uint64" NotNull: true } Columns { Name: "modified_at" Type: "DyNumber" } KeyColumnNames: "key" } TtlSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:24.836707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:24.836837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: Type 'DyNumber' specified for column 'modified_at' is not supported, at schemeshard: 72057594046678944 2025-06-24T21:10:24.838349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "Type \'DyNumber\' specified for column \'modified_at\' is not supported" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:24.838487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: Type 'DyNumber' specified for column 'modified_at' is not supported, operation: CREATE COLUMN TABLE, path: /MyRoot/ TestModificationResult got TxId: 102, wait until txId: 102 >> TSchemeShardTTLTests::RacyAlterTableAndConditionalErase >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType >> TAsyncIndexTests::MergeMainWithReboots[PipeResets] [GOOD] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable >> TSchemeShardTTLTests::CreateTableShouldFailOnUnknownColumn [GOOD] >> TFlatExecutorLeases::BasicsInitialLease [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseTimeout |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldFailOnSimultaneousDropColumnAndEnableTTL >> TSchemeShardTTLTests::CreateTableShouldSucceedAsyncOnIndexedTable >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnUnknownColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:25.493184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:25.493247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:25.493272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:25.493308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:25.493359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:25.493388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:25.493440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:25.493494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:25.494021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:25.494239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:25.546587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:25.546629Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:25.556719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:25.556982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:25.557092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:25.562502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:25.562620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:25.563026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:25.563214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:25.564985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:25.565094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:25.565841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:25.565893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:25.566014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:25.566061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:25.566132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:25.566191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:25.570946Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:25.684730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:25.684959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:25.685142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:25.685184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:25.685440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:25.685512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:25.687554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:25.687729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:25.687900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:25.687942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:25.687987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:25.688018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:25.689686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:25.689747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:25.689785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:25.691308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:25.691347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:25.691398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:25.691460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:25.700853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:25.702565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:25.702727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:25.703620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:25.703750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:25.703800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:25.704039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:25.704101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:25.704265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:25.704333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:25.706133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:25.706179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:25.706336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:25.706375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:10:25.706661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:25.706699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:10:25.706790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:25.706822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:25.706890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:25.706933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:25.706970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:10:25.707004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:25.707071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:10:25.707102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:10:25.707186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:25.707227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:10:25.707274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:10:25.709096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:25.709211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:25.709251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:10:25.709289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:10:25.709364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:25.709462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:10:25.712155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:10:25.712605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T21:10:25.713857Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:10:25.734578Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:10:25.737362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "created_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:25.737673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:25.737790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "created_at" } }, at schemeshard: 72057594046678944 2025-06-24T21:10:25.738171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Cannot enable TTL on unknown column: 'created_at', at schemeshard: 72057594046678944 2025-06-24T21:10:25.738870Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:25.741507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Cannot enable TTL on unknown column: \'created_at\'" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:25.741736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Cannot enable TTL on unknown column: 'created_at', operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-24T21:10:25.742177Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeMainWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:09:34.350019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:09:34.350143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:09:34.350191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:09:34.350247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:09:34.350308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:09:34.350363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:09:34.350512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:09:34.350612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:09:34.351545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:09:34.353848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:09:34.460300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:09:34.460378Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:34.461140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:09:34.497589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:09:34.498196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:09:34.498491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:09:34.518742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:09:34.519164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:09:34.536314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:09:34.536811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:09:34.545337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:09:34.546399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:09:34.559598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:09:34.559725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:09:34.560094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:09:34.560177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:09:34.560269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:09:34.560429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:09:34.570051Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:09:34.747899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:09:34.749284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:09:34.750527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:09:34.750593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:09:34.751882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:09:34.752045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:34.755389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:09:34.756353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:09:34.756648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:09:34.756776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:09:34.756842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:09:34.756885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:09:34.759579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:09:34.759648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:09:34.759696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:09:34.763390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:09:34.763459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:09:34.763503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:09:34.763567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:09:34.768637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:09:34.771386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:09:34.772488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:09:34.773916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:09:34.774067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:25.229664Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409549:2][72075186233409546][30:831:2673] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T21:10:25.229793Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409549:2][30:784:2673] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:10:25.229949Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409549:2][72075186233409546][30:831:2673] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750799425196920 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750799425196920 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750799425196920 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:10:25.232800Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409549:2][72075186233409546][30:831:2673] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-24T21:10:25.232907Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409549:2][30:784:2673] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:10:25.403379Z node 30 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:10:25.403632Z node 30 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 283us result status StatusSuccess 2025-06-24T21:10:25.404323Z node 30 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLUtility::ValidateTiers [GOOD] >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable [GOOD] >> KqpNewEngine::SqlInAsScalar [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:25.925154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:25.925224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:25.925254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:25.925301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:25.925346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:25.925371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:25.925437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:25.925510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:25.926115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:25.926429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:25.984150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:25.984199Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:25.996211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:25.996586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:25.996748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:26.003633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:26.003815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:26.004543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.004850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:26.007817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.007993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:26.009142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:26.009204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.009408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:26.009480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:26.009602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:26.009729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.016566Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:26.113643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:26.113847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.113995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:26.114032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:26.114247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:26.114304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:26.116224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.116372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:26.116514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.116558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:26.116596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:26.116620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:26.117969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.118021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:26.118055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:26.119281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.119330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.119383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.119428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:26.122080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:26.123490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:26.123619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:26.124283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.124392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:26.124437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.124637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:26.124687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.124825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:26.124880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:26.126470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:26.126520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:26.126653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.126690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:10:26.126907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.126943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:10:26.127032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:26.127059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:26.127118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:26.127170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:26.127204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:10:26.127257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:26.127304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:10:26.127328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:10:26.127378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:26.127409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:10:26.127445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:10:26.128819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:26.128912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:26.128942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:10:26.128973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:10:26.129031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:26.129107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:10:26.131417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:10:26.131790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T21:10:26.132730Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:10:26.149606Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:10:26.151795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:26.152055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.152142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" } }, at schemeshard: 72057594046678944 2025-06-24T21:10:26.152451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Unsupported column type, at schemeshard: 72057594046678944 2025-06-24T21:10:26.153025Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:26.155443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Unsupported column type" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:26.155643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Unsupported column type, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-24T21:10:26.156027Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLUtility::ValidateTiers [GOOD] >> TSchemeShardTTLTests::AlterTableShouldFailOnSimultaneousDropColumnAndEnableTTL [GOOD] >> TSchemeShardTTLTests::CreateTableShouldSucceedAsyncOnIndexedTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:26.159915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:26.159982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:26.160017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:26.160053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:26.160097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:26.160123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:26.160166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:26.160228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:26.160794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:26.161040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:26.215031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:26.215074Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:26.225645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:26.225966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:26.226095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:26.231269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:26.231413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:26.231899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.232095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:26.233994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.234166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:26.234952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:26.234992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.235138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:26.235207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:26.235288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:26.235359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.239981Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:26.342877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:26.343087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.343248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:26.343286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:26.343518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:26.343577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:26.345421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.345592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:26.345764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.345804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:26.345849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:26.345896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:26.347441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.347502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:26.347534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:26.348800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.348833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.348883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.348928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:26.351486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:26.352816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:26.352953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:26.353630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.353734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:26.353789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.353984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:26.354032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.354173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:26.354249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:26.355707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:26.355749Z node 1 :FLAT_TX_SCHEMESHARD ... 025-06-24T21:10:26.627893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:10:26.628458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:10:26.628574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:10:26.628617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:10:26.628656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T21:10:26.628722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:10:26.628817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T21:10:26.631642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:10:26.654127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1268 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:26.654169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-24T21:10:26.654292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1268 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:26.654394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1268 } } CommitVersion { Step: 5000003 TxId: 102 } FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:10:26.655056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:26.655101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-24T21:10:26.655241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:26.655283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:26.655364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:26.655432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.655462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.655493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:10:26.655534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:10:26.657265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.657381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.657574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.657604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:10:26.657677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:26.657704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:26.657730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:26.657754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:26.657787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:10:26.657835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:379:2345] message: TxId: 102 2025-06-24T21:10:26.657870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:26.657917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:10:26.657946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:10:26.658027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:26.659212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:10:26.659279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:462:2421] TestWaitNotification: OK eventTxId 102 2025-06-24T21:10:26.659698Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:26.659910Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 218us result status StatusSuccess 2025-06-24T21:10:26.660326Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TFlatExecutorLeases::BasicsInitialLeaseTimeout [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseSleep >> KqpNamedExpressions::NamedExpressionRandom+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandom-UseSink >> TSchemeShardTTLTests::ShouldSkipDroppedColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldFailOnSimultaneousDropColumnAndEnableTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:26.518519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:26.518613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:26.518654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:26.518697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:26.518755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:26.518784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:26.518831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:26.518878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:26.519438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:26.519685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:26.580590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:26.580626Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:26.594388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:26.594778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:26.594954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:26.601806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:26.601960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:26.602614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.602899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:26.605584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.605749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:26.606878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:26.606930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.607121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:26.607198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:26.607311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:26.607401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.613626Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:26.718710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:26.718989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.719224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:26.719273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:26.719532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:26.719592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:26.721577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.721735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:26.721922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.721973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:26.722026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:26.722055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:26.723647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.723708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:26.723743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:26.725074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.725110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.725153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.725193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:26.733529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:26.735295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:26.735430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:26.736175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.736292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:26.736356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.736581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:26.736628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.736769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:26.736826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:26.738528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:26.738567Z node 1 :FLAT_TX_SCHEMESHARD ... :26.925198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T21:10:26.925230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T21:10:26.925624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.925669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 101:0 ProgressState at tablet: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T21:10:26.926509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:26.926592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:26.926619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:10:26.926659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:10:26.926710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:26.927547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:26.927606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:26.927638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:10:26.927664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:10:26.927686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:26.927738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-24T21:10:26.927935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1057 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:26.927961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:26.928059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1057 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:26.928247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1057 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:26.928663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:26.928700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:26.928805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:26.928847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:26.928911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:26.928973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.929003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.929040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:26.929079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:10:26.930952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:26.932014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:26.932103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.932179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.932439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.932476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:10:26.932590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:26.932633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:26.932669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:26.932694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:26.932727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T21:10:26.932778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 101 2025-06-24T21:10:26.932826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:26.932863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:10:26.932890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:10:26.932985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:10:26.934220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:10:26.934257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:340:2317] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T21:10:26.936717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "TTLEnabledTable" DropColumns { Name: "modified_at" } TTLSettings { Enabled { ColumnName: "modified_at" } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:26.936902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/TTLEnabledTable, pathId: , opId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.937221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Cannot enable TTL on dropped column: 'modified_at', at schemeshard: 72057594046678944 2025-06-24T21:10:26.938919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Cannot enable TTL on dropped column: \'modified_at\'" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:26.939262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Cannot enable TTL on dropped column: 'modified_at', operation: ALTER TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 102, wait until txId: 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceedAsyncOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:26.624728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:26.624812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:26.624865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:26.624899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:26.624937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:26.624959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:26.625011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:26.625058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:26.625609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:26.625840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:26.680757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:26.680793Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:26.691934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:26.692264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:26.692436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:26.698329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:26.698488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:26.699049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.699337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:26.701794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.701944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:26.703056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:26.703110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.703309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:26.703362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:26.703472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:26.703561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.709670Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:26.789663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:26.789834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.789958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:26.789987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:26.790180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:26.790225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:26.791895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.792071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:26.792215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.792252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:26.792294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:26.792322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:26.793698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.793746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:26.793778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:26.795008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.795042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.795077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.795109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:26.797527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:26.798701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:26.798810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:26.799442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.799540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:26.799578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.799791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:26.799830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.799958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:26.800047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:26.801333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:26.801369Z node 1 :FLAT_TX_SCHEMESHARD ... node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:2 129 -> 240 2025-06-24T21:10:27.013754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:27.013782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409547, partId: 0 2025-06-24T21:10:27.013851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:27.013877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:27.013915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 328 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:27.013941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:27.013968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:27.013988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:10:27.014015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:10:27.017833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:27.017928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:27.021496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:27.021618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:27.021745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:10:27.021913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:27.022029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:10:27.022296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:10:27.022324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-24T21:10:27.022387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:2 progress is 2/3 2025-06-24T21:10:27.022432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-24T21:10:27.022475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:2 progress is 2/3 2025-06-24T21:10:27.022497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-24T21:10:27.022533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-24T21:10:27.022784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:27.023032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:27.023051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:10:27.023087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 3/3 2025-06-24T21:10:27.023101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:10:27.023119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 3/3 2025-06-24T21:10:27.023132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:10:27.023190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-24T21:10:27.023245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:379:2345] message: TxId: 101 2025-06-24T21:10:27.023280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:10:27.023311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:10:27.023333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:10:27.023425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:27.023455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:1 2025-06-24T21:10:27.023472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:1 2025-06-24T21:10:27.023491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:10:27.023502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:2 2025-06-24T21:10:27.023512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:2 2025-06-24T21:10:27.023536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:10:27.025255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:10:27.025290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:380:2346] TestWaitNotification: OK eventTxId 101 2025-06-24T21:10:27.025602Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:27.025775Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 179us result status StatusSuccess 2025-06-24T21:10:27.026128Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TKeyValueTest::TestObtainLockNewApi [GOOD] >> TKeyValueTest::TestLargeWriteAndDelete ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::SqlInAsScalar [GOOD] Test command err: Trying to start YDB, gRPC: 9999, MsgBus: 1580 2025-06-24T21:09:00.855680Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625911100647982:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:00.855978Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031e6/r3tmp/tmp6CCDIK/pdisk_1.dat 2025-06-24T21:09:01.330506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:01.335594Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:01.342956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:01.392634Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9999, node 1 2025-06-24T21:09:01.548114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:01.548143Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:01.548153Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:01.548258Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1580 2025-06-24T21:09:01.848148Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1580 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:02.199040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:02.217355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:09:04.333486Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625928280517611:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:04.333642Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:04.619953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:04.758805Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625928280517714:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:04.758904Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:04.761361Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625928280517719:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:04.765731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:04.780276Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625928280517721:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:09:04.878622Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625928280517774:2397] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 1381, MsgBus: 1067 2025-06-24T21:09:06.120306Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519625935463291000:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:06.178602Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031e6/r3tmp/tmpzlJ8T2/pdisk_1.dat 2025-06-24T21:09:06.326141Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:06.326223Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:06.328266Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:06.335338Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519625935463290900:2079] 1750799346084090 != 1750799346084093 2025-06-24T21:09:06.342835Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1381, node 2 2025-06-24T21:09:06.381238Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:06.381258Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:06.381265Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:06.381367Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1067 TClient is connected to server localhost:1067 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:09:06.914993Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:09:07.123311Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:09.308174Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625948348193421:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:09.308254Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:09.338531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:09.475557Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519625948348193776:2321], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:09.475652Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_worklo ... actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:13.585451Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519626199586886614:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:13.585535Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 61250, MsgBus: 28189 2025-06-24T21:10:15.292153Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626233019327159:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:15.292271Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031e6/r3tmp/tmpdnKKIJ/pdisk_1.dat 2025-06-24T21:10:15.392109Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:15.393438Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519626233019327139:2079] 1750799415291765 != 1750799415291768 TServer::EnableGrpc on GrpcPort 61250, node 7 2025-06-24T21:10:15.427855Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:15.427966Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:15.429747Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:15.446613Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:15.446647Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:15.446663Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:15.446791Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28189 TClient is connected to server localhost:28189 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:15.982760Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:15.995210Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:16.060854Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:16.211559Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:16.299599Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:16.311048Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:18.600355Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626245904230674:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:18.600463Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:18.654867Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.691044Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.724416Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.758225Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.794746Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.830860Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.866287Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:18.932451Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626245904231335:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:18.932564Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:18.932584Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626245904231340:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:18.936148Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:18.945791Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626245904231342:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:19.001345Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626245904231393:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:20.292648Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626233019327159:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:20.292748Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TTxDataShardMiniKQL::CrossShard_4_OneToAll [GOOD] >> KqpSort::ComplexPkInclusiveSecondOptionalPredicate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::CrossShard_4_OneToAll [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:117:2057] recipient: [1:112:2141] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:117:2057] recipient: [1:112:2141] Leader for TabletID 9437184 is [1:133:2154] sender: [1:135:2057] recipient: [1:112:2141] 2025-06-24T21:09:30.960656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:30.960710Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:30.962236Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:30.982723Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:30.983242Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:30.983484Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:30.997192Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:31.056621Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:31.056768Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:31.060154Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:31.060259Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:31.060333Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:31.062335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:31.062638Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:31.062726Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 Leader for TabletID 9437184 is [1:133:2154] sender: [1:211:2057] recipient: [1:14:2061] 2025-06-24T21:09:31.138579Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:31.173200Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:31.174708Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:31.174892Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:31.174945Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:31.174983Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:31.175023Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:31.175315Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.175376Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.176521Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:31.176647Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:31.176822Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:31.176872Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:31.177009Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:31.177067Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:31.177109Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:31.177145Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:31.177213Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:31.177325Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.177380Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.177439Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:31.185885Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:31.185991Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:31.186107Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:31.186438Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:31.186496Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:31.186554Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:31.186702Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:31.186752Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:31.186793Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:31.186831Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:31.187233Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:31.187273Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:31.187321Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:31.187375Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:31.187439Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:31.187477Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:31.187525Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:31.187572Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:31.187607Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:31.201672Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:31.201763Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:31.201805Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:31.201863Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:31.202816Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:31.206844Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.206929Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:31.207241Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:31.207793Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:31.208145Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:31.208576Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:31.208652Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-24T21:09:31.208701Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:31.208753Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:31.215762Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:31.216004Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:31.216280Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.216457Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:31.216532Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:31.216595Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:31.216629Z node 1 :TX_DATASHARD TRACE: datashard_pipelin ... bletID: 9437186 } 2025-06-24T21:10:27.960662Z node 40 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:10:27.960835Z node 40 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [40:460:2399], Recipient [40:460:2399]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:10:27.960873Z node 40 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:10:27.960917Z node 40 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437186 2025-06-24T21:10:27.960949Z node 40 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:10:27.960977Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-24T21:10:27.961011Z node 40 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [7:6] in PlanQueue unit at 9437186 2025-06-24T21:10:27.961039Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit PlanQueue 2025-06-24T21:10:27.961073Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-24T21:10:27.961099Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit PlanQueue 2025-06-24T21:10:27.961125Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit LoadTxDetails 2025-06-24T21:10:27.961154Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit LoadTxDetails 2025-06-24T21:10:27.961773Z node 40 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437186 loaded tx from db 7:6 keys extracted: 1 2025-06-24T21:10:27.961820Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-24T21:10:27.961850Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit LoadTxDetails 2025-06-24T21:10:27.961879Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit FinalizeDataTxPlan 2025-06-24T21:10:27.961907Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit FinalizeDataTxPlan 2025-06-24T21:10:27.961946Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-24T21:10:27.961973Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit FinalizeDataTxPlan 2025-06-24T21:10:27.961999Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit BuildAndWaitDependencies 2025-06-24T21:10:27.962029Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit BuildAndWaitDependencies 2025-06-24T21:10:27.962074Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [7:6] is the new logically complete end at 9437186 2025-06-24T21:10:27.962106Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [7:6] is the new logically incomplete end at 9437186 2025-06-24T21:10:27.962135Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [7:6] at 9437186 2025-06-24T21:10:27.962172Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-24T21:10:27.962198Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit BuildAndWaitDependencies 2025-06-24T21:10:27.962231Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit BuildDataTxOutRS 2025-06-24T21:10:27.962258Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit BuildDataTxOutRS 2025-06-24T21:10:27.962309Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-24T21:10:27.962336Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit BuildDataTxOutRS 2025-06-24T21:10:27.962360Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit StoreAndSendOutRS 2025-06-24T21:10:27.962389Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit StoreAndSendOutRS 2025-06-24T21:10:27.962419Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-24T21:10:27.962446Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit StoreAndSendOutRS 2025-06-24T21:10:27.962486Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit PrepareDataTxInRS 2025-06-24T21:10:27.962512Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit PrepareDataTxInRS 2025-06-24T21:10:27.962546Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-24T21:10:27.962573Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit PrepareDataTxInRS 2025-06-24T21:10:27.962600Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit LoadAndWaitInRS 2025-06-24T21:10:27.962629Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit LoadAndWaitInRS 2025-06-24T21:10:27.962657Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-24T21:10:27.962684Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit LoadAndWaitInRS 2025-06-24T21:10:27.962710Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit ExecuteDataTx 2025-06-24T21:10:27.962739Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit ExecuteDataTx 2025-06-24T21:10:27.963043Z node 40 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [7:6] at tablet 9437186 with status COMPLETE 2025-06-24T21:10:27.963098Z node 40 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [7:6] at 9437186: {NSelectRow: 1, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 1, SelectRowBytes: 10, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-24T21:10:27.963167Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-24T21:10:27.963199Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit ExecuteDataTx 2025-06-24T21:10:27.963227Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit CompleteOperation 2025-06-24T21:10:27.963254Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit CompleteOperation 2025-06-24T21:10:27.963430Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is DelayComplete 2025-06-24T21:10:27.963463Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit CompleteOperation 2025-06-24T21:10:27.963493Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit CompletedOperations 2025-06-24T21:10:27.963523Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit CompletedOperations 2025-06-24T21:10:27.963562Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-24T21:10:27.963591Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit CompletedOperations 2025-06-24T21:10:27.963619Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [7:6] at 9437186 has finished 2025-06-24T21:10:27.963649Z node 40 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:10:27.963674Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-24T21:10:27.963703Z node 40 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-24T21:10:27.963736Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-24T21:10:27.977042Z node 40 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 7 txid# 6} 2025-06-24T21:10:27.977097Z node 40 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 7} 2025-06-24T21:10:27.977146Z node 40 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:10:27.977177Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [7:6] at 9437184 on unit CompleteOperation 2025-06-24T21:10:27.977225Z node 40 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [7 : 6] from 9437184 at tablet 9437184 send result to client [40:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:10:27.977264Z node 40 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:10:27.977478Z node 40 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437186 step# 7 txid# 6} 2025-06-24T21:10:27.977508Z node 40 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437186 step# 7} 2025-06-24T21:10:27.977540Z node 40 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-24T21:10:27.977563Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [7:6] at 9437186 on unit CompleteOperation 2025-06-24T21:10:27.977599Z node 40 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [7 : 6] from 9437186 at tablet 9437186 send result to client [40:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:10:27.977624Z node 40 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-24T21:10:27.977842Z node 40 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 7 txid# 6} 2025-06-24T21:10:27.977870Z node 40 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 7} 2025-06-24T21:10:27.977899Z node 40 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-24T21:10:27.977921Z node 40 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [7:6] at 9437185 on unit CompleteOperation 2025-06-24T21:10:27.977958Z node 40 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [7 : 6] from 9437185 at tablet 9437185 send result to client [40:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:10:27.977984Z node 40 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 >> TWebLoginService::AuditLogLoginSuccess >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-true >> TSchemeShardLoginTest::BanUnbanUser >> TSchemeShardLoginTest::UserLogin >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-false >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-false >> TWebLoginService::AuditLogEmptySIDsLoginSuccess >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSort::ComplexPkInclusiveSecondOptionalPredicate [GOOD] Test command err: Trying to start YDB, gRPC: 21661, MsgBus: 21193 2025-06-24T21:09:34.616252Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626057477429823:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:34.616375Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031a6/r3tmp/tmp6hZf2R/pdisk_1.dat 2025-06-24T21:09:34.932396Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:34.932798Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626057477429801:2079] 1750799374615574 != 1750799374615577 TServer::EnableGrpc on GrpcPort 21661, node 1 2025-06-24T21:09:34.972154Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:34.972405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:34.974623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:34.997297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:34.997326Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:34.997335Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:34.997466Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21193 TClient is connected to server localhost:21193 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:35.542038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:35.578658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:35.642683Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:09:35.710862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:35.859770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:35.922069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:37.613453Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626070362333339:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:37.613619Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:37.909069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:37.934155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:37.956708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:37.981388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:38.009803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:38.077512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:38.123195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:38.172500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626074657301301:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:38.172564Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:38.172794Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626074657301306:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:38.176048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:38.184994Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626074657301308:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:09:38.263367Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626074657301359:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:39.395415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:39.430509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:39.462061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part propo ... 644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:18.981913Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519626222324093908:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:18.982017Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17865, MsgBus: 27987 2025-06-24T21:10:20.931025Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626254629768019:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:20.931103Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031a6/r3tmp/tmp3FZnNJ/pdisk_1.dat 2025-06-24T21:10:21.076444Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:21.077230Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519626254629768000:2079] 1750799420930426 != 1750799420930429 2025-06-24T21:10:21.094233Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:21.094318Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:21.096231Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17865, node 7 2025-06-24T21:10:21.142816Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:21.142846Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:21.142857Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:21.143019Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27987 TClient is connected to server localhost:27987 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:21.724862Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:21.739424Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:21.794718Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:21.971530Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:22.005463Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:22.082637Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:24.490024Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626271809638825:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:24.490128Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:24.557321Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:24.590654Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:24.621795Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:24.659240Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:24.697708Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:24.738495Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:24.811673Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:24.882515Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626271809639486:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:24.882607Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626271809639491:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:24.882614Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:24.886159Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:24.895631Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626271809639493:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:24.984839Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626271809639546:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:25.931296Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626254629768019:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:25.931394Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |94.6%| [TA] $(B)/ydb/core/tx/datashard/ut_minikql/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpProxy::DatabasesCacheForServerless [GOOD] |94.6%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_minikql/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-true >> TWebLoginService::AuditLogLoginSuccess [GOOD] >> TWebLoginService::AuditLogLoginBadPassword >> TSchemeShardLoginTest::BanUnbanUser [GOOD] >> TSchemeShardLoginTest::BanUserWithWaiting >> TSchemeShardLoginTest::UserLogin [GOOD] >> TSchemeShardLoginTest::UserStayLockedOutIfEnterValidPassword >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-true >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-true >> TWebLoginService::AuditLogEmptySIDsLoginSuccess [GOOD] >> TWebLoginService::AuditLogLdapLoginBadPassword >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::TestExternalLogin >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-false [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseSleep [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseSleepTimeout >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::DisableBuiltinAuthMechanism >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-false >> TSchemeShardLoginTest::TestExternalLogin [GOOD] >> TSchemeShardLoginTest::TestExternalLoginWithIncorrectLdapDomain >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-false >> TWebLoginService::AuditLogLdapLoginBadPassword [GOOD] >> TWebLoginService::AuditLogLdapLoginBadUser >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-false >> TWebLoginService::AuditLogLoginBadPassword [GOOD] >> TWebLoginService::AuditLogLdapLoginSuccess >> TSchemeShardLoginTest::UserStayLockedOutIfEnterValidPassword [GOOD] >> TWebLoginService::AuditLogAdminLoginSuccess ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:17.406356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:17.406492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.406529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:17.406565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:17.407444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:17.407491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:17.407570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.407652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:17.408334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:17.410130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:17.490560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:17.490624Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:17.505240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:17.505637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:17.505791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:17.512301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:17.512465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:17.513027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.513294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:17.516035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.516747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:17.526356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:17.526743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:17.526859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:17.526948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.533263Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:17.635776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.637083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.638234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:17.638292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:17.639616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:17.639750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:17.642304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.643188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:17.643392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.643446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:17.643488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:17.643519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:17.645359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.645427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:17.645477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:17.647113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.647169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.647223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.647278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.652035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:17.653714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:17.653860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:17.654572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.654667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:17.654704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.655666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:17.655743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.655886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:17.655940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:17.657603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.657653Z node 1 :FLAT_TX_SCHEMESHARD ... thId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:30.265482Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:30.265569Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:30.265604Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:10:30.265638Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:10:30.265677Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:30.265759Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-24T21:10:30.266521Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1198 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:30.266570Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:30.266726Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1198 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:30.266846Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1198 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:30.268272Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 77309413622 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:30.268320Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:30.268438Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 77309413622 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:30.268496Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:30.268598Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 309 RawX2: 77309413622 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:30.268661Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:30.268707Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:30.268753Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:30.268816Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:10:30.271819Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:30.272095Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:30.272296Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:30.273385Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:30.273689Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:30.273739Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:10:30.273863Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:30.273902Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:30.273950Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:30.273989Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:30.274033Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T21:10:30.274115Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [18:337:2314] message: TxId: 101 2025-06-24T21:10:30.274175Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:30.274224Z node 18 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:10:30.274263Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:10:30.274396Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:10:30.276257Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:10:30.276310Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [18:338:2315] TestWaitNotification: OK eventTxId 101 2025-06-24T21:10:30.276858Z node 18 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:30.277096Z node 18 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" took 272us result status StatusSuccess 2025-06-24T21:10:30.277727Z node 18 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" PathDescription { Self { Name: "TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "DyNumber" TypeId: 4866 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 ColumnUnit: UNIT_NANOSECONDS Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::DatabasesCacheForServerless [GOOD] Test command err: 2025-06-24T21:10:09.324497Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626204452054984:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:09.324641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:10:09.351794Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626205814584032:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:09.352073Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:10:09.356087Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626204548218301:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:09.356188Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:10:09.359872Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626206071312848:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:09.359992Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:10:09.364079Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519626204817863113:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:09.364184Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047ac/r3tmp/tmphyDZHH/pdisk_1.dat 2025-06-24T21:10:09.668217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:09.668316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:09.668431Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:09.668486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:09.668601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:09.668645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:09.668718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:09.668769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:09.683515Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-24T21:10:09.683553Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-24T21:10:09.683588Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:10:09.683608Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:10:09.684100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:09.684547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:09.685299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:09.685427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:09.689428Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:09.722223Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:10:09.740751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:09.740871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:09.743757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20901 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:10:10.069503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:10.331638Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:10.358813Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:10.364928Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:10.369509Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:10.374141Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:11.815662Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:11.817548Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:11.828577Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:10:11.828624Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:10:11.828649Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:10:11.828705Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:10:11.829597Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:11.829645Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:11.829692Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:11.829726Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:10:11.829989Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-24T21:10:11.830009Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-24T21:10:11.830012Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-24T21:10:11.830019Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-24T21:10:11.830038Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-24T21:10:11.830040Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-24T21:10:11.830095Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-24T21:10:11.830099Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-24T21:10:11.830112Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-24T21:10:11.836101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:11.839106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:11.840862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:11.846059Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 Sche ... 17.956362Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:18.015917Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:18.050121Z node 8 :STATISTICS WARN: tx_init.cpp:287: [72075186224037894] TTxInit::Complete. EnableColumnStatistics=false 2025-06-24T21:10:18.157761Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:10:18.172618Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626245444983459:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:18.172684Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-shared/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:10:18.191010Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:18.191120Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:18.193352Z node 6 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 7 Cookie 7 2025-06-24T21:10:18.194186Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:18.237449Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:10:18.237680Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:10:18.237762Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:10:18.237900Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:10:18.238007Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:10:18.238110Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:10:18.238218Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:10:18.238310Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:10:18.238380Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:10:18.292200Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:18.292293Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:18.295908Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:18.363826Z node 7 :STATISTICS WARN: tx_init.cpp:287: [72075186224038895] TTxInit::Complete. EnableColumnStatistics=false 2025-06-24T21:10:18.366134Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:18.486431Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:10:18.572103Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:18.601551Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7519626245444984377:2545], Database: /Root/test-serverless, Start database fetching 2025-06-24T21:10:18.601747Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7519626245444984377:2545], Database: /Root/test-serverless, Database info successfully fetched, serverless: 1 2025-06-24T21:10:18.840030Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:19.178486Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:19.782628Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519626229166385945:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:19.786582Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:20.442541Z node 8 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T21:10:20.449835Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7519626252527643561:2304], Start check tables existence, number paths: 2 2025-06-24T21:10:20.449936Z node 8 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-06-24T21:10:20.449966Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T21:10:20.449991Z node 8 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T21:10:20.451706Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7519626252527643561:2304], Describe table /Root/test-dedicated/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T21:10:20.451775Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7519626252527643561:2304], Describe table /Root/test-dedicated/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T21:10:20.451826Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7519626252527643561:2304], Successfully finished 2025-06-24T21:10:20.451878Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T21:10:21.074422Z node 7 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-24T21:10:21.074584Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-24T21:10:21.074607Z node 7 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-24T21:10:21.083186Z node 7 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-06-24T21:10:21.083240Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7519626258329886366:2330], Start check tables existence, number paths: 2 2025-06-24T21:10:21.084958Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7519626258329886366:2330], Describe table /Root/test-shared/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-24T21:10:21.085022Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7519626258329886366:2330], Describe table /Root/test-shared/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-24T21:10:21.085098Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7519626258329886366:2330], Successfully finished 2025-06-24T21:10:21.085160Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-24T21:10:22.835723Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519626239642740951:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:22.835844Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-dedicated/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:23.173117Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626245444983459:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:23.173224Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-shared/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:28.607533Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 7 2025-06-24T21:10:28.607897Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:10:28.608018Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 8 2025-06-24T21:10:28.608158Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:10:28.609963Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=6&id=ZjkyYjI4YzItNjgxZDNhOWQtZDQ4MDhjNGEtZDU5ZDc2ZDg=, ActorId: [6:7519626242051288595:2293], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T21:10:28.610029Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=6&id=ZjkyYjI4YzItNjgxZDNhOWQtZDQ4MDhjNGEtZDU5ZDc2ZDg=, ActorId: [6:7519626242051288595:2293], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T21:10:28.610057Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=ZjkyYjI4YzItNjgxZDNhOWQtZDQ4MDhjNGEtZDU5ZDc2ZDg=, ActorId: [6:7519626242051288595:2293], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T21:10:28.610091Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=ZjkyYjI4YzItNjgxZDNhOWQtZDQ4MDhjNGEtZDU5ZDc2ZDg=, ActorId: [6:7519626242051288595:2293], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T21:10:28.610147Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=ZjkyYjI4YzItNjgxZDNhOWQtZDQ4MDhjNGEtZDU5ZDc2ZDg=, ActorId: [6:7519626242051288595:2293], ActorState: unknown state, Session actor destroyed >> TSchemeShardLoginTest::DisableBuiltinAuthMechanism [GOOD] >> TSchemeShardLoginTest::FailedLoginUserUnderNameOfGroup >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-false >> TSchemeShardLoginTest::TestExternalLoginWithIncorrectLdapDomain [GOOD] >> TSchemeShardLoginTest::ResetFailedAttemptCount >> KqpInplaceUpdate::Negative_SingleRowWithKeyCast+UseSink >> KqpImmediateEffects::Upsert >> KqpImmediateEffects::InteractiveTxWithWriteAtTheEnd >> TWebLoginService::AuditLogLdapLoginBadUser [GOOD] >> TWebLoginService::AuditLogLdapLoginBadBind >> KqpEffects::AlterAfterUpsertBeforeUpsertTransaction+UseSink >> KqpEffects::InsertAbort_Literal_Success |94.6%| [TA] $(B)/ydb/core/kqp/proxy_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.6%| [TA] {RESULT} $(B)/ydb/core/kqp/proxy_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpEffects::UpdateOn_Literal >> TWebLoginService::AuditLogAdminLoginSuccess [GOOD] >> TWebLoginService::AuditLogCreateModifyUser >> KqpImmediateEffects::InsertExistingKey-UseSink >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-true >> TWebLoginService::AuditLogLdapLoginSuccess [GOOD] >> TWebLoginService::AuditLogLogout >> TKeyValueTest::TestWriteReadWhileWriteWorks [GOOD] >> TSchemeShardLoginTest::FailedLoginUserUnderNameOfGroup [GOOD] >> TSchemeShardLoginTest::FailedLoginWithInvalidUser >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock >> TWebLoginService::AuditLogLdapLoginBadBind [GOOD] >> TWebLoginService::AuditLogLogout [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadWhileWriteWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:81:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:81:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:88:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:88:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:88:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:92:2057] recipient: [11:90:2118] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:94:2057] recipient: [11:90:2118] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:93:2119] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:179:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... TabletID 72057594037927937 is [13:58:2098] sender: [13:92:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:93:2057] recipient: [13:91:2118] Leader for TabletID 72057594037927937 is [13:94:2119] sender: [13:95:2057] recipient: [13:91:2118] !Reboot 72057594037927937 (actor [13:58:2098]) rebooted! !Reboot 72057594037927937 (actor [13:58:2098]) tablet resolver refreshed! new actor is[13:94:2119] Leader for TabletID 72057594037927937 is [13:94:2119] sender: [13:180:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:59:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:76:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:59:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:76:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:59:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:76:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:78:2057] recipient: [16:37:2084] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:81:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:82:2057] recipient: [16:80:2111] Leader for TabletID 72057594037927937 is [16:83:2112] sender: [16:84:2057] recipient: [16:80:2111] !Reboot 72057594037927937 (actor [16:58:2098]) rebooted! !Reboot 72057594037927937 (actor [16:58:2098]) tablet resolver refreshed! new actor is[16:83:2112] Leader for TabletID 72057594037927937 is [16:83:2112] sender: [16:169:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:59:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:76:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:78:2057] recipient: [17:37:2084] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:81:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:82:2057] recipient: [17:80:2111] Leader for TabletID 72057594037927937 is [17:83:2112] sender: [17:84:2057] recipient: [17:80:2111] !Reboot 72057594037927937 (actor [17:58:2098]) rebooted! !Reboot 72057594037927937 (actor [17:58:2098]) tablet resolver refreshed! new actor is[17:83:2112] Leader for TabletID 72057594037927937 is [17:83:2112] sender: [17:169:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:59:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:76:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:79:2057] recipient: [18:37:2084] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:82:2057] recipient: [18:81:2111] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:83:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:84:2112] sender: [18:85:2057] recipient: [18:81:2111] !Reboot 72057594037927937 (actor [18:58:2098]) rebooted! !Reboot 72057594037927937 (actor [18:58:2098]) tablet resolver refreshed! new actor is[18:84:2112] Leader for TabletID 72057594037927937 is [18:84:2112] sender: [18:170:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:59:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:76:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:82:2057] recipient: [19:37:2084] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:85:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:86:2057] recipient: [19:84:2114] Leader for TabletID 72057594037927937 is [19:87:2115] sender: [19:88:2057] recipient: [19:84:2114] !Reboot 72057594037927937 (actor [19:58:2098]) rebooted! !Reboot 72057594037927937 (actor [19:58:2098]) tablet resolver refreshed! new actor is[19:87:2115] Leader for TabletID 72057594037927937 is [19:87:2115] sender: [19:173:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:59:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:76:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:82:2057] recipient: [20:37:2084] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:85:2057] recipient: [20:84:2114] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:86:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:87:2115] sender: [20:88:2057] recipient: [20:84:2114] !Reboot 72057594037927937 (actor [20:58:2098]) rebooted! !Reboot 72057594037927937 (actor [20:58:2098]) tablet resolver refreshed! new actor is[20:87:2115] Leader for TabletID 72057594037927937 is [20:87:2115] sender: [20:173:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:59:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:76:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:83:2057] recipient: [21:37:2084] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:85:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:87:2057] recipient: [21:86:2114] Leader for TabletID 72057594037927937 is [21:88:2115] sender: [21:89:2057] recipient: [21:86:2114] !Reboot 72057594037927937 (actor [21:58:2098]) rebooted! !Reboot 72057594037927937 (actor [21:58:2098]) tablet resolver refreshed! new actor is[21:88:2115] Leader for TabletID 72057594037927937 is [21:88:2115] sender: [21:174:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:59:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:76:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:86:2057] recipient: [22:37:2084] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:89:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:90:2057] recipient: [22:88:2117] Leader for TabletID 72057594037927937 is [22:91:2118] sender: [22:92:2057] recipient: [22:88:2117] !Reboot 72057594037927937 (actor [22:58:2098]) rebooted! !Reboot 72057594037927937 (actor [22:58:2098]) tablet resolver refreshed! new actor is[22:91:2118] Leader for TabletID 72057594037927937 is [22:91:2118] sender: [22:177:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:59:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:76:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:86:2057] recipient: [23:37:2084] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:89:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:90:2057] recipient: [23:88:2117] Leader for TabletID 72057594037927937 is [23:91:2118] sender: [23:92:2057] recipient: [23:88:2117] !Reboot 72057594037927937 (actor [23:58:2098]) rebooted! !Reboot 72057594037927937 (actor [23:58:2098]) tablet resolver refreshed! new actor is[23:91:2118] Leader for TabletID 72057594037927937 is [23:91:2118] sender: [23:177:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:59:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:76:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:87:2057] recipient: [24:37:2084] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:90:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:91:2057] recipient: [24:89:2117] Leader for TabletID 72057594037927937 is [24:92:2118] sender: [24:93:2057] recipient: [24:89:2117] !Reboot 72057594037927937 (actor [24:58:2098]) rebooted! !Reboot 72057594037927937 (actor [24:58:2098]) tablet resolver refreshed! new actor is[24:92:2118] Leader for TabletID 72057594037927937 is [24:92:2118] sender: [24:178:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:56:2057] recipient: [25:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:56:2057] recipient: [25:52:2096] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:59:2057] recipient: [25:52:2096] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:76:2057] recipient: [25:14:2061] >> TWebLoginService::AuditLogCreateModifyUser [GOOD] >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true >> KqpEffects::DeletePkPrefixWithIndex >> TSchemeShardLoginTest::FailedLoginWithInvalidUser [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true [GOOD] >> KqpInplaceUpdate::SingleRowSimple-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TWebLoginService::AuditLogLdapLoginBadBind [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:29.706839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:29.706917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.706951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:29.706980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:29.707960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:29.708017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:29.708113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.708248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:29.709020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:29.710374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:29.792901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:29.792964Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:29.808898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:29.809300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:29.809476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:29.817148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:29.817374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:29.818072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.818429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:29.821300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.821494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:29.822673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.822745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.822970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:29.823051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:29.823113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:29.823227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.829400Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:29.929533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:29.929728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.929897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:29.929935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:29.930104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:29.930156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:29.932044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.932211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:29.932335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.932389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:29.932443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:29.932470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:29.933895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.933949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:29.933982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:29.935192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.935237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.935291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.935376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:29.937723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:29.939280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:29.939474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:29.940396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.940541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:29.940587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.940884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:29.940943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.941141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:29.941221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:29.943275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.943326Z node 1 :FLAT_TX_SCHEMESHARD ... /ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:31.891135Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:31.891340Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:31.891597Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:31.891666Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:31.891715Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:31.891763Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:31.893626Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:31.893691Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:31.893743Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:31.895535Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:31.895593Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:31.895651Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:31.895724Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:31.895883Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:31.897504Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:31.897695Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:31.898624Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:31.898770Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 17179871341 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:31.898826Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:31.899114Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:31.899196Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:31.899388Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:31.899476Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:31.901298Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:31.901357Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:31.901570Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:31.901622Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:31.902030Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:31.902084Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:10:31.902204Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:31.902252Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:31.902301Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:31.902340Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:31.902393Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:10:31.902444Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:31.902505Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:10:31.902547Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:10:31.902624Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:31.902671Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:10:31.902710Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:10:31.903294Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:31.903409Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:31.903471Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:10:31.903519Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:10:31.903566Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:31.903665Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:10:31.906418Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:10:31.906916Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:31.908268Z node 4 :HTTP WARN: login_page.cpp:102: 127.0.0.1:0 POST /login 2025-06-24T21:10:31.908477Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [4:271:2260] Bootstrap 2025-06-24T21:10:31.931268Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [4:271:2260] Become StateWork (SchemeCache [4:280:2269]) 2025-06-24T21:10:31.931404Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:16053, port: 16053 2025-06-24T21:10:31.931506Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:10:31.933989Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:16053. Invalid credentials 2025-06-24T21:10:31.934589Z node 4 :HTTP ERROR: login_page.cpp:209: Login fail for user1@ldap: Could not login via LDAP 2025-06-24T21:10:31.935028Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [4:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:31.937979Z node 4 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 AUDIT LOG buffer(2): 2025-06-24T21:10:31.891290Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-06-24T21:10:31.934260Z: component=grpc-login, remote_address=localhost, database=/MyRoot, operation=LOGIN, status=ERROR, detailed_status=UNAUTHORIZED, reason=Could not login via LDAP: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:16053. Invalid credentials, login_user=user1@ldap, sanitized_token={none} AUDIT LOG checked line: 2025-06-24T21:10:31.934260Z: component=grpc-login, remote_address=localhost, database=/MyRoot, operation=LOGIN, status=ERROR, detailed_status=UNAUTHORIZED, reason=Could not login via LDAP: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:16053. Invalid credentials, login_user=user1@ldap, sanitized_token={none} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TWebLoginService::AuditLogCreateModifyUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:29.706826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:29.706918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.706958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:29.706996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:29.707931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:29.707976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:29.708070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.708146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:29.708916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:29.710356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:29.774630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:29.774671Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:29.786170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:29.786519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:29.786660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:29.794035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:29.794223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:29.795827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.796119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:29.803599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.804120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:29.810008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:29.810802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:29.810839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:29.810920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.816105Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:29.920496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:29.920704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.920868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:29.920909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:29.921092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:29.921145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:29.922901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.923049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:29.923188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.923243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:29.923283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:29.923314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:29.924701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.924742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:29.924771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:29.926002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.926039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.926089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.926122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:29.928687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:29.930116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:29.930267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:29.931056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.931199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:29.931249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.931477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:29.931524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.931667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:29.931759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:29.933215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.933250Z node 1 :FLAT_TX_SCHEMESHARD ... peration: MODIFY USER, path: /MyRoot 2025-06-24T21:10:31.980144Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:31.980175Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:31.980342Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:31.980391Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 105, path id: 1 2025-06-24T21:10:31.980768Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:10:31.980843Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:10:31.980885Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-24T21:10:31.980923Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-24T21:10:31.980965Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:31.981033Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-24T21:10:31.982435Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 AUDIT LOG buffer(6): 2025-06-24T21:10:31.896513Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-06-24T21:10:31.935980Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-06-24T21:10:31.952362Z: component=schemeshard, tx_id=102, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-24T21:10:31.961025Z: component=schemeshard, tx_id=103, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[blocking] 2025-06-24T21:10:31.969009Z: component=schemeshard, tx_id=104, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[unblocking] 2025-06-24T21:10:31.977367Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] AUDIT LOG checked line: 2025-06-24T21:10:31.977367Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-24T21:10:31.985080Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { ModifyUser { User: "user1" Password: "password1" CanLogin: false } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:31.988817Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:31.988940Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T21:10:31.988975Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:10:31.989020Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T21:10:31.989061Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:10:31.989127Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:31.989177Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-24T21:10:31.989206Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:10:31.989252Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 106:0 2025-06-24T21:10:31.989289Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-24T21:10:31.989328Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-24T21:10:31.990957Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:31.991051Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: MODIFY USER, path: /MyRoot 2025-06-24T21:10:31.991215Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:31.991245Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:31.991377Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:31.991410Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-24T21:10:31.991788Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T21:10:31.991859Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T21:10:31.991894Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-24T21:10:31.991921Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-24T21:10:31.991958Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:31.992025Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-24T21:10:31.993240Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 AUDIT LOG buffer(7): 2025-06-24T21:10:31.896513Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-06-24T21:10:31.935980Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-06-24T21:10:31.952362Z: component=schemeshard, tx_id=102, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-24T21:10:31.961025Z: component=schemeshard, tx_id=103, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[blocking] 2025-06-24T21:10:31.969009Z: component=schemeshard, tx_id=104, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[unblocking] 2025-06-24T21:10:31.977367Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-24T21:10:31.988708Z: component=schemeshard, tx_id=106, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password, blocking] AUDIT LOG checked line: 2025-06-24T21:10:31.988708Z: component=schemeshard, tx_id=106, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password, blocking] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TWebLoginService::AuditLogLogout [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:29.706791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:29.706859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.706886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:29.706912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:29.707918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:29.707948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:29.708020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.708076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:29.708647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:29.710330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:29.772872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:29.772920Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:29.785522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:29.785820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:29.786016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:29.793119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:29.793406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:29.795825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.796141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:29.803734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.804143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:29.810179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:29.810841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:29.810881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:29.810940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.815624Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:29.914964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:29.915172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.915329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:29.915370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:29.915595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:29.915663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:29.917554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.917708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:29.917837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.917884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:29.917918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:29.917949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:29.919732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.919783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:29.919817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:29.921298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.921395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.921469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.921513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:29.924905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:29.926544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:29.926701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:29.927606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.927717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:29.927755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.928019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:29.928069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.928232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:29.928295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:29.930103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.930143Z node 1 :FLAT_TX_SCHEMESHARD ... ation is done id#101:0 progress is 1/1 2025-06-24T21:10:31.955562Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:31.955634Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:31.955701Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:10:31.955747Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:31.955795Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:10:31.955842Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 1, subscribers: 0 2025-06-24T21:10:31.955888Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-24T21:10:31.956169Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [4:271:2260] Bootstrap 2025-06-24T21:10:31.979993Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [4:271:2260] Become StateWork (SchemeCache [4:277:2266]) 2025-06-24T21:10:31.981330Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [4:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:31.983833Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSuccess TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:31.983960Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSuccess, operation: CREATE USER, path: /MyRoot 2025-06-24T21:10:31.984189Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:31.984238Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:31.984425Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:31.984473Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T21:10:31.985358Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:31.985474Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:31.985524Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:10:31.985571Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-24T21:10:31.985621Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:31.985736Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:10:31.986072Z node 4 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:10:31.987376Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 2025-06-24T21:10:31.987796Z node 4 :HTTP WARN: login_page.cpp:102: 127.0.0.1:0 POST /login 2025-06-24T21:10:31.989545Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:31.989596Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-24T21:10:32.071547Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjMyLCJpYXQiOjE3NTA3OTk0MzIsInN1YiI6InVzZXIxIn0.PfAeoMJBHKlGywBGdClrjlEh-xQeqcbKQw5V1z5enr5SvcBKu15Xb85X9GyzZwIpA-Ug3LZJXtx-b75U-7jMTV5g2Hd033RIts8tKqfMKj8CVOmo6S1pdO0g0kHCPxDruruzT--HUj4aEneqk6hDyBeYm6u6TA5tpToahzEhPaM9wZGoAjBdOFBY4gz7rp6GUIyFQ9MjCPfPVjnq5F-DvtwBq-tTyc-OHy1jf7nWEBkIi06mf7mbLgfrduWsYszSozLB2PfFcHlFlRV2OY8qcMxd9qMy4ovRFZfxen_v02EYC9ruWKSEyCY3OgX6jTpkpCIqyjHtL4qw620D0NjGPw" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjMyLCJpYXQiOjE3NTA3OTk0MzIsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-24T21:10:32.071837Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:32.071880Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.072045Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:32.072083Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2208], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-24T21:10:32.073024Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 0 2025-06-24T21:10:32.073609Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:32.073762Z node 4 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 174us result status StatusSuccess 2025-06-24T21:10:32.074149Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnUVp7IK3d2ZCkTHbfTgr\n5PpQTjXMTT/GrSeaflcJKgpSemzJFVynakYSmCNFUYkL7OT4Cn9KarX0F87+l6UC\nMHt+zUqIH6uMNmaiXMyIzuMBGL50ZLKdS+ZvasIA6Q7uHgIq0OGCQC91dv/h75na\nqv/cHzMLrQKEkFJjz/F/kpWSwcKE9g/YUGkCtv+wo1zV8eTn9asTtERaL1EauX1V\nv3gk4Rg382yq6tdoq6tlTO300CwhbERwWH35O1VT4OTv/qsI5rJHni4okECFGH9W\nPN2a0+jQHnHoaJ2PvB6mEYAtG9qjYyePz2sp7V2CjfIQCAwIRXY0Oj5px1dMXPf6\ngQIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750885832063 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:32.074476Z node 4 :HTTP WARN: login_page.cpp:248: 127.0.0.1:0 POST /logout 2025-06-24T21:10:32.074540Z node 4 :HTTP ERROR: login_page.cpp:326: Logout: No ydb_session_id cookie 2025-06-24T21:10:32.075672Z node 4 :HTTP WARN: login_page.cpp:248: 127.0.0.1:0 POST /logout 2025-06-24T21:10:32.081198Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (589A015B): Token is not in correct format 2025-06-24T21:10:32.081320Z node 4 :HTTP ERROR: login_page.cpp:326: Logout: Token is not in correct format 2025-06-24T21:10:32.081712Z node 4 :HTTP WARN: login_page.cpp:248: 127.0.0.1:0 POST /logout AUDIT LOG buffer(4): 2025-06-24T21:10:31.930916Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-06-24T21:10:31.955097Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-06-24T21:10:32.071702Z: component=grpc-login, remote_address=localhost, database=/MyRoot, operation=LOGIN, status=SUCCESS, login_user=user1, sanitized_token=eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjMyLCJpYXQiOjE3NTA3OTk0MzIsInN1YiI6InVzZXIxIn0.**, login_user_level=admin 2025-06-24T21:10:32.082747Z: component=web-login, remote_address=127.0.0.1, subject=user1, sanitized_token=eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjMyLCJpYXQiOjE3NTA3OTk0MzIsInN1YiI6InVzZXIxIn0.**, operation=LOGOUT, status=SUCCESS AUDIT LOG checked line: 2025-06-24T21:10:32.082747Z: component=web-login, remote_address=127.0.0.1, subject=user1, sanitized_token=eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjMyLCJpYXQiOjE3NTA3OTk0MzIsInN1YiI6InVzZXIxIn0.**, operation=LOGOUT, status=SUCCESS >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::FailedLoginWithInvalidUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:29.706790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:29.706858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.706895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:29.706931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:29.707877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:29.707900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:29.707970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.708071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:29.708650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:29.710320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:29.783607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:29.783654Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:29.794994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:29.795310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:29.795438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:29.800729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:29.800856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:29.801260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.801482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:29.803428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.804118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:29.809914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.809961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:29.810800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:29.810836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:29.810893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.815675Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:29.910252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:29.910425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.910571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:29.910606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:29.911090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:29.911165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:29.913448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.914226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:29.914358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.914444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:29.914481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:29.914504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:29.915880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.915916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:29.915943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:29.917123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.917168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.917229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.917262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:29.920468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:29.921723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:29.922593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:29.923348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.923439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:29.923472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.924618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:29.924671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.924817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:29.924880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:29.926409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.926438Z node 1 :FLAT_TX_SCHEMESHARD ... T21:10:32.239743Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:32.239783Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:10:32.239818Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:10:32.240306Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:32.240402Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:32.240446Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:10:32.240484Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:10:32.240524Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:32.240607Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:10:32.243032Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:10:32.243518Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:32.243896Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [5:272:2261] Bootstrap 2025-06-24T21:10:32.259856Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [5:272:2261] Become StateWork (SchemeCache [5:277:2266]) 2025-06-24T21:10:32.260209Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:32.260367Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 177us result status StatusSuccess 2025-06-24T21:10:32.260682Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:32.261114Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [5:272:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:32.262737Z node 5 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 2025-06-24T21:10:32.263260Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:32.263291Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-24T21:10:32.312498Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Cannot find user: user1", at schemeshard: 72057594046678944 2025-06-24T21:10:32.312593Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:32.312625Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.312769Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:32.312799Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-24T21:10:32.313177Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 0 2025-06-24T21:10:32.313472Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:32.313604Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 143us result status StatusSuccess 2025-06-24T21:10:32.313935Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvX+NmXhoOHwOAAhcvO2+\n7JgBJcnwl32Lzh2HM9ulY20xGHVDFGufR7IkyOnRlczMdvZ3t0WVymDIR1J1Roq0\nC4YZ+LtFPmpFr2Ed4Tm9GP/IhJuHaCVAsgBh4KFyym6P0PKeb6tRvPjpC6K1KPVF\nBAK4sp6IdfnO2V0D+WiLdwESz+qU4nLmIUdn3nvnymhzzW3UiheLKl02pN8NVvYj\n4zzZPabxktWR1ETbFbbW09GQooB6CXVhCwUbn81l2UZcHCAqbLdlwssIH2+kfKqV\nB998qEdDSZ9kgy3noHVKBIkxFUSP+Ah2BDzPG3rsj3MT44mVC2TFUSgImgf9iQr1\nqQIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750885832310 } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:29.706792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:29.706867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.706911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:29.706942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:29.707900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:29.707927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:29.707987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.708051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:29.708695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:29.710336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:29.785327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:29.785386Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:29.796812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:29.797112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:29.797224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:29.802908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:29.803064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:29.803552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.803772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:29.805793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.805925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:29.809926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.809974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:29.810777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:29.810818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:29.810882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.815687Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:29.913227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:29.913399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.913536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:29.913588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:29.913774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:29.913824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:29.915752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.915888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:29.916000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.916045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:29.916074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:29.916099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:29.917533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.917575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:29.917611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:29.918910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.918952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.918992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.919025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:29.921518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:29.922755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:29.922935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:29.923566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.923657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:29.923698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.924589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:29.924632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.924792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:29.924859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:29.926333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.926379Z node 1 :FLAT_TX_SCHEMESHARD ... : schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-24T21:10:32.331644Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:32.331714Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-24T21:10:32.333882Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T21:10:32.334164Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 2025-06-24T21:10:32.334574Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:32.334727Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1" took 177us result status StatusSuccess 2025-06-24T21:10:32.335016Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1" PathDescription { Self { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 2 EffectiveACLVersion: 2 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 106 2025-06-24T21:10:32.337626Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveGroup { Group: "group1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:32.337787Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 1] name: MyRoot type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.337813Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.337848Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: Dir1 type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.337873Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:10:32.338103Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:32.338187Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T21:10:32.338216Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:10:32.338249Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T21:10:32.338275Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:10:32.338317Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:32.338370Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-24T21:10:32.338402Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:10:32.338426Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 106:0 2025-06-24T21:10:32.338457Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-24T21:10:32.338506Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-24T21:10:32.340389Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:32.340481Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE GROUP, path: /MyRoot 2025-06-24T21:10:32.340650Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:32.340687Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.340829Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:32.340868Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-24T21:10:32.341275Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T21:10:32.341356Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T21:10:32.341388Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-24T21:10:32.341426Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-24T21:10:32.341461Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:32.341541Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-24T21:10:32.343065Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-06-24T21:10:32.343596Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:32.343779Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 193us result status StatusSuccess 2025-06-24T21:10:32.344174Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:29.706799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:29.706875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.706904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:29.706930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:29.707911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:29.707948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:29.708024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.708093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:29.708694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:29.710353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:29.773573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:29.773625Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:29.786275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:29.786580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:29.786698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:29.794789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:29.794952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:29.795775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.796015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:29.804015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.804189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:29.810236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:29.810860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:29.810916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:29.811002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.817292Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:29.943922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:29.944085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.944221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:29.944248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:29.944417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:29.944462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:29.946129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.946248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:29.946354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.946407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:29.946434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:29.946490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:29.947811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.947850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:29.947877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:29.949109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.949151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.949200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.949233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:29.951564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:29.952956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:29.953094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:29.953721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.953808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:29.953836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.954045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:29.954083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.954229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:29.954283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:29.955803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.955838Z node 1 :FLAT_TX_SCHEMESHARD ... : schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-24T21:10:32.816023Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:32.816099Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-24T21:10:32.820936Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T21:10:32.821501Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 2025-06-24T21:10:32.822016Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:32.822251Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1" took 253us result status StatusSuccess 2025-06-24T21:10:32.822622Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1" PathDescription { Self { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 106 2025-06-24T21:10:32.825887Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveGroup { Group: "group1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:32.826118Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 1] name: MyRoot type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.826162Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.826222Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: Dir1 type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.826256Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:10:32.826622Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:32.826727Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T21:10:32.826763Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:10:32.826805Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T21:10:32.826835Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:10:32.826890Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:32.826949Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-24T21:10:32.826996Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:10:32.827055Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 106:0 2025-06-24T21:10:32.827105Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-24T21:10:32.827163Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-24T21:10:32.829746Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:32.829875Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE GROUP, path: /MyRoot 2025-06-24T21:10:32.830124Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:32.830185Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.830407Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:32.830466Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-24T21:10:32.831093Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T21:10:32.831234Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-24T21:10:32.831290Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-24T21:10:32.831343Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-24T21:10:32.831398Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:32.831523Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-24T21:10:32.833740Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-06-24T21:10:32.834354Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:32.834595Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 278us result status StatusSuccess 2025-06-24T21:10:32.835175Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpEffects::InsertAbort_Literal_Conflict+UseSink >> TFlatExecutorLeases::BasicsInitialLeaseSleepTimeout [GOOD] >> TFlatTableDatetime::TestDate [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundSnapshot >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundSnapshot [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotToRegular [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen1 >> KqpImmediateEffects::UpsertAfterInsert >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen1 [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionToRegular >> KqpInplaceUpdate::SingleRowSimple+UseSink >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionToRegular [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen2 >> KqpEffects::UpdateOn_Select >> TVersions::Wreck1 [GOOD] >> TVersions::Wreck1Reverse >> KqpInplaceUpdate::SingleRowIf-UseSink >> KqpInplaceUpdate::SingleRowArithm-UseSink >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen2 [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotPriorityByTime [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionPriorityByTime >> TSchemeShardLoginTest::BanUserWithWaiting [GOOD] >> TSchemeShardLoginTest::ChangeAcceptablePasswordParameters >> TKeyValueTest::TestRenameToLongKey [GOOD] >> KqpImmediateEffects::Delete >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionPriorityByTime [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_Default ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestRenameToLongKey [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:81:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:81:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:84:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:109:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:110:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:88:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:92:2057] recipient: [10:91:2119] Leader for TabletID 72057594037927937 is [10:93:2120] sender: [10:94:2057] recipient: [10:91:2119] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:93:2120] Leader for TabletID 72057594037927937 is [10:93:2120] sender: [10:179:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:88:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:92:2057] recipient: [11:90:2119] Leader for TabletID 72057594037927937 is [11:93:2120] sender: [11:94:2057] recipient: [11:90:2119] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:93:2120] Leader for TabletID 72057594037927937 is [11:93:2120] sender: [11:179:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:58:209 ... is [12:58:2098] sender: [12:92:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:93:2057] recipient: [12:91:2119] Leader for TabletID 72057594037927937 is [12:94:2120] sender: [12:95:2057] recipient: [12:91:2119] !Reboot 72057594037927937 (actor [12:58:2098]) rebooted! !Reboot 72057594037927937 (actor [12:58:2098]) tablet resolver refreshed! new actor is[12:94:2120] Leader for TabletID 72057594037927937 is [12:94:2120] sender: [12:180:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:59:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:76:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:59:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:76:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:59:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:76:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:78:2057] recipient: [15:37:2084] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:81:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:82:2057] recipient: [15:80:2111] Leader for TabletID 72057594037927937 is [15:83:2112] sender: [15:84:2057] recipient: [15:80:2111] !Reboot 72057594037927937 (actor [15:58:2098]) rebooted! !Reboot 72057594037927937 (actor [15:58:2098]) tablet resolver refreshed! new actor is[15:83:2112] Leader for TabletID 72057594037927937 is [15:83:2112] sender: [15:169:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:59:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:76:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:58:2098]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:78:2057] recipient: [16:37:2084] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:81:2057] recipient: [16:80:2111] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:82:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:83:2112] sender: [16:84:2057] recipient: [16:80:2111] !Reboot 72057594037927937 (actor [16:58:2098]) rebooted! !Reboot 72057594037927937 (actor [16:58:2098]) tablet resolver refreshed! new actor is[16:83:2112] Leader for TabletID 72057594037927937 is [16:83:2112] sender: [16:169:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:59:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:76:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:79:2057] recipient: [17:37:2084] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:82:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:83:2057] recipient: [17:81:2111] Leader for TabletID 72057594037927937 is [17:84:2112] sender: [17:85:2057] recipient: [17:81:2111] !Reboot 72057594037927937 (actor [17:58:2098]) rebooted! !Reboot 72057594037927937 (actor [17:58:2098]) tablet resolver refreshed! new actor is[17:84:2112] Leader for TabletID 72057594037927937 is [17:84:2112] sender: [17:170:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:59:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:76:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:82:2057] recipient: [18:37:2084] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:85:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:86:2057] recipient: [18:84:2114] Leader for TabletID 72057594037927937 is [18:87:2115] sender: [18:88:2057] recipient: [18:84:2114] !Reboot 72057594037927937 (actor [18:58:2098]) rebooted! !Reboot 72057594037927937 (actor [18:58:2098]) tablet resolver refreshed! new actor is[18:87:2115] Leader for TabletID 72057594037927937 is [18:87:2115] sender: [18:173:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:59:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:76:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:82:2057] recipient: [19:37:2084] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:85:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:86:2057] recipient: [19:84:2114] Leader for TabletID 72057594037927937 is [19:87:2115] sender: [19:88:2057] recipient: [19:84:2114] !Reboot 72057594037927937 (actor [19:58:2098]) rebooted! !Reboot 72057594037927937 (actor [19:58:2098]) tablet resolver refreshed! new actor is[19:87:2115] Leader for TabletID 72057594037927937 is [19:87:2115] sender: [19:173:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:59:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:76:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:83:2057] recipient: [20:37:2084] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:86:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:87:2057] recipient: [20:85:2114] Leader for TabletID 72057594037927937 is [20:88:2115] sender: [20:89:2057] recipient: [20:85:2114] !Reboot 72057594037927937 (actor [20:58:2098]) rebooted! !Reboot 72057594037927937 (actor [20:58:2098]) tablet resolver refreshed! new actor is[20:88:2115] Leader for TabletID 72057594037927937 is [20:88:2115] sender: [20:174:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:59:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:76:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:86:2057] recipient: [21:37:2084] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:88:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:90:2057] recipient: [21:89:2117] Leader for TabletID 72057594037927937 is [21:91:2118] sender: [21:92:2057] recipient: [21:89:2117] !Reboot 72057594037927937 (actor [21:58:2098]) rebooted! !Reboot 72057594037927937 (actor [21:58:2098]) tablet resolver refreshed! new actor is[21:91:2118] Leader for TabletID 72057594037927937 is [21:91:2118] sender: [21:177:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:59:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:76:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:86:2057] recipient: [22:37:2084] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:88:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:90:2057] recipient: [22:89:2117] Leader for TabletID 72057594037927937 is [22:91:2118] sender: [22:92:2057] recipient: [22:89:2117] !Reboot 72057594037927937 (actor [22:58:2098]) rebooted! !Reboot 72057594037927937 (actor [22:58:2098]) tablet resolver refreshed! new actor is[22:91:2118] Leader for TabletID 72057594037927937 is [22:91:2118] sender: [22:177:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:59:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:76:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:87:2057] recipient: [23:37:2084] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:90:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:91:2057] recipient: [23:89:2117] Leader for TabletID 72057594037927937 is [23:92:2118] sender: [23:93:2057] recipient: [23:89:2117] !Reboot 72057594037927937 (actor [23:58:2098]) rebooted! !Reboot 72057594037927937 (actor [23:58:2098]) tablet resolver refreshed! new actor is[23:92:2118] Leader for TabletID 72057594037927937 is [23:92:2118] sender: [23:178:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:59:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:76:2057] recipient: [24:14:2061] >> TSchemeShardLoginTest::ChangeAcceptablePasswordParameters [GOOD] >> TSchemeShardLoginTest::ChangeAccountLockoutParameters >> TSchemeShardLoginTest::ResetFailedAttemptCount [GOOD] >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_Default [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True >> KqpInplaceUpdate::Negative_SingleRowWithKeyCast+UseSink [GOOD] >> KqpInplaceUpdate::Negative_SingleRowListFromRange-UseSink >> KqpEffects::InsertAbort_Literal_Success [GOOD] >> KqpEffects::InsertAbort_Params_Conflict+UseSink >> KqpEffects::AlterAfterUpsertBeforeUpsertTransaction+UseSink [GOOD] >> KqpEffects::AlterAfterUpsertBeforeUpsertSelectTransaction+UseSink >> KqpEffects::UpdateOn_Literal [GOOD] >> KqpEffects::UpdateOn_Params >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock [GOOD] >> KqpImmediateEffects::Upsert [GOOD] >> KqpImmediateEffects::UpdateOn >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser [GOOD] >> KqpImmediateEffects::InteractiveTxWithWriteAtTheEnd [GOOD] >> KqpImmediateEffects::ManyFlushes >> KqpImmediateEffects::TxWithReadAtTheEnd-UseSink >> KqpEffects::DeletePkPrefixWithIndex [GOOD] >> KqpEffects::AlterDuringUpsertTransaction+UseSink >> KqpImmediateEffects::InsertExistingKey-UseSink [GOOD] >> KqpImmediateEffects::Interactive >> TKeyValueTest::TestCopyRangeWorksNewApi [GOOD] >> TKeyValueTest::TestCopyRangeToLongKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:29.706871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:29.706955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.707000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:29.707033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:29.707961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:29.708016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:29.708100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.708176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:29.708910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:29.710404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:29.782824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:29.782870Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:29.793475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:29.793785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:29.793926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:29.799320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:29.799460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:29.799924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.800146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:29.803120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.804099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:29.809931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.809986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:29.810817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:29.810855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:29.810912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.815741Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:29.907406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:29.908635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.909621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:29.909663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:29.910858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:29.910931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:29.913423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.914253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:29.914394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.914439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:29.914478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:29.914501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:29.916037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.916072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:29.916102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:29.917569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.917610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.917654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.917689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:29.920482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:29.921868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:29.922614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:29.923429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.923525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:29.923556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.924563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:29.924611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.924802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:29.924856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:29.926322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.926356Z node 1 :FLAT_TX_SCHEMESHARD ... HARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:32.675719Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:32.675770Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:10:32.675817Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-24T21:10:32.675871Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:32.675994Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:10:32.678151Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 2025-06-24T21:10:32.678704Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:32.678770Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-24T21:10:32.730463Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-24T21:10:32.730586Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:32.730643Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:32.730847Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:32.730887Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:209:2209], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-24T21:10:32.731403Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 0 2025-06-24T21:10:32.731697Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:32.737446Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-24T21:10:32.737708Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:32.742963Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-24T21:10:32.743272Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:32.752434Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-24T21:10:32.752839Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:32.752984Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-06-24T21:10:32.753456Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:32.753574Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-06-24T21:10:32.754078Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:32.754303Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 257us result status StatusSuccess 2025-06-24T21:10:32.754883Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxyZElysTu/rbys5mZaQe\nMKLFlsjIo5NXJ05JdooVFjIVTP7in3bk76luwNyFzCnndJnBkLy97p/QCwYpyhT9\nMuGdfiT2JcDJFJC3QRAWQdCdGfzuy3rqjZ2CMWm/Tdyil2QlfgRYwbFPqKtdkk9S\nT4Fsz8sYa8wW0/WWcvjaG/nOZsHEqn4tpDRzIgddNuW2l5y1TGcMULIsiyDK9vBZ\nvr76+ayEj+gUPUdNcvXW6P2TL4r2c4q+Y8HoF9D1F0UvNIwUi6NGzo6P69QuSvuI\nWR/hm1Um14ZtwNKjzIwr5IDowmewIdqhr8omk+7pqqo4rVff9LUhWoURLkaM6vW5\nSwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750885832724 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:36.755965Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:36.763858Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-24T21:10:36.764427Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:36.775435Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjM2LCJpYXQiOjE3NTA3OTk0MzYsInN1YiI6InVzZXIxIn0.YwJ50DF1sJ8G4C6hJHSeGxSCbYJRi8mlZzbVwiUglnFqxwtfJfZvddFtj_i7lBA7rkXxwRWJCkVJQuDrld8Vj1F-tQzRV5_vETcNZIwxGGKeRGkmjonIH6Eo2Cj2666WR5W-JZdZ5AO2KgfsdWKc19np_a421tlkcQkFwPVL-o2FzROK-kFvyJhAUP44B7hw67qHJeuY2PtNA_dHQqw2u96HTpzKmsaqV-3YReQjUB5tb74z5MjBw1AFZyN3y6fVRjdoqa9HIjyndYHoO9UX3NS1hCklUzdXIcQImtIUoVe8WicPoOqFbT0nLB5EH-8wBt4uJlrjmA0YzE1nhewb3A" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjM2LCJpYXQiOjE3NTA3OTk0MzYsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-24T21:10:36.776048Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:36.776266Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 242us result status StatusSuccess 2025-06-24T21:10:36.776798Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxyZElysTu/rbys5mZaQe\nMKLFlsjIo5NXJ05JdooVFjIVTP7in3bk76luwNyFzCnndJnBkLy97p/QCwYpyhT9\nMuGdfiT2JcDJFJC3QRAWQdCdGfzuy3rqjZ2CMWm/Tdyil2QlfgRYwbFPqKtdkk9S\nT4Fsz8sYa8wW0/WWcvjaG/nOZsHEqn4tpDRzIgddNuW2l5y1TGcMULIsiyDK9vBZ\nvr76+ayEj+gUPUdNcvXW6P2TL4r2c4q+Y8HoF9D1F0UvNIwUi6NGzo6P69QuSvuI\nWR/hm1Um14ZtwNKjzIwr5IDowmewIdqhr8omk+7pqqo4rVff9LUhWoURLkaM6vW5\nSwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750885832724 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:29.706861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:29.706950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.706994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:29.707039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:29.707964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:29.708013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:29.708094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.708171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:29.708864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:29.710370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:29.788363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:29.788417Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:29.801914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:29.802317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:29.802465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:29.808494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:29.808652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:29.809163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.809451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:29.812144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.812322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:29.813356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.813419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.813627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:29.813704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:29.813769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:29.813849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.819828Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:29.942833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:29.942988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.943134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:29.943211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:29.943383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:29.943437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:29.945323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.945524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:29.945680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.945735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:29.945770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:29.945800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:29.947551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.947614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:29.947651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:29.949287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.949334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.949392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.949440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:29.958889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:29.960749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:29.960936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:29.961866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.961981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:29.962023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.962295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:29.962359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.962569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:29.962652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:29.964650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.964708Z node 1 :FLAT_TX_SCHEMESHARD ... 0:36.827252Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-24T21:10:36.827323Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:36.827465Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-24T21:10:36.829811Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:10:36.831352Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [5:313:2298] sender: [5:406:2058] recipient: [5:105:2138] Leader for TabletID 72057594046678944 is [5:313:2298] sender: [5:409:2058] recipient: [5:15:2062] Leader for TabletID 72057594046678944 is [5:313:2298] sender: [5:410:2058] recipient: [5:408:2377] Leader for TabletID 72057594046678944 is [5:411:2378] sender: [5:412:2058] recipient: [5:408:2377] 2025-06-24T21:10:36.877573Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:36.877709Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:36.877764Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:36.877808Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:36.877855Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:36.877891Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:36.877960Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:36.878048Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:36.879066Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:36.879567Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:36.897593Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:36.899291Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:36.899466Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:36.899666Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:36.899697Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:36.899806Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:36.900501Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:36.900626Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.900707Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.901084Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.901169Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:10:36.901350Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.901437Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.901512Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.901597Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.901657Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.901780Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.902080Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.902199Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.902559Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.902681Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.902836Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.902923Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.902993Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.903272Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.903346Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.903442Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.903645Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.903734Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.903858Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.903913Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.903953Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:36.908672Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:36.910957Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:36.911026Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:36.911621Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:36.911694Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:36.911739Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:36.913082Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [5:411:2378] sender: [5:469:2058] recipient: [5:15:2062] 2025-06-24T21:10:36.967052Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:36.967126Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-24T21:10:37.030296Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjM3LCJpYXQiOjE3NTA3OTk0MzcsInN1YiI6InVzZXIxIn0.dk0TBIvVnAG-7zR-mfPzLViK8fa5toyDmMfKS9PyMk_shz18jTqkQPcEKijwuL8_matJRpde8P3TMNEBYKOd5WHbWiAjbYLLdOIhTIn43-nIC0-ihxUFVYeKwrr-7MNztCsCEPEYHMBjXsKwJ691hOnR8HqGLQc0WdO-IIL9Xx8XgcEwCwgFCSNxchTuHtXVYcidmwB3lHYRnvwMopt0zC4eRGOzFObaPnDV8fQ89SpPVpo4QA5runokzkVxETcLWvcEzoNL8qVAOdvvCagLO28slnI09r1oKVM9hWC9wYfZ9dMXNCoCohlrHpgduq_9d96piAsNNse1afK_yh9V-g" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjM3LCJpYXQiOjE3NTA3OTk0MzcsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-24T21:10:37.030476Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:37.030554Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:37.030773Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:37.030828Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:462:2418], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-24T21:10:37.031515Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorks [GOOD] >> KqpInplaceUpdate::SingleRowSimple-UseSink [GOOD] >> KqpInplaceUpdate::SingleRowStr+UseSink >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:106:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:89:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 7205759403 ... TabletID 72057594037927937 is [12:58:2098] sender: [12:92:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:93:2057] recipient: [12:91:2119] Leader for TabletID 72057594037927937 is [12:94:2120] sender: [12:95:2057] recipient: [12:91:2119] !Reboot 72057594037927937 (actor [12:58:2098]) rebooted! !Reboot 72057594037927937 (actor [12:58:2098]) tablet resolver refreshed! new actor is[12:94:2120] Leader for TabletID 72057594037927937 is [12:94:2120] sender: [12:180:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:59:2057] recipient: [13:53:2096] Leader for TabletID 72057594037927937 is [13:58:2098] sender: [13:76:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:59:2057] recipient: [14:52:2096] Leader for TabletID 72057594037927937 is [14:58:2098] sender: [14:76:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:59:2057] recipient: [15:52:2096] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:76:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:78:2057] recipient: [15:37:2084] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:81:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:58:2098] sender: [15:82:2057] recipient: [15:80:2111] Leader for TabletID 72057594037927937 is [15:83:2112] sender: [15:84:2057] recipient: [15:80:2111] !Reboot 72057594037927937 (actor [15:58:2098]) rebooted! !Reboot 72057594037927937 (actor [15:58:2098]) tablet resolver refreshed! new actor is[15:83:2112] Leader for TabletID 72057594037927937 is [15:83:2112] sender: [15:169:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:59:2057] recipient: [16:53:2096] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:76:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:78:2057] recipient: [16:37:2084] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:81:2057] recipient: [16:80:2111] Leader for TabletID 72057594037927937 is [16:58:2098] sender: [16:82:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:83:2112] sender: [16:84:2057] recipient: [16:80:2111] !Reboot 72057594037927937 (actor [16:58:2098]) rebooted! !Reboot 72057594037927937 (actor [16:58:2098]) tablet resolver refreshed! new actor is[16:83:2112] Leader for TabletID 72057594037927937 is [16:83:2112] sender: [16:169:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:59:2057] recipient: [17:52:2096] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:76:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:79:2057] recipient: [17:37:2084] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:82:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:58:2098] sender: [17:83:2057] recipient: [17:81:2111] Leader for TabletID 72057594037927937 is [17:84:2112] sender: [17:85:2057] recipient: [17:81:2111] !Reboot 72057594037927937 (actor [17:58:2098]) rebooted! !Reboot 72057594037927937 (actor [17:58:2098]) tablet resolver refreshed! new actor is[17:84:2112] Leader for TabletID 72057594037927937 is [17:84:2112] sender: [17:170:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:59:2057] recipient: [18:52:2096] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:76:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:82:2057] recipient: [18:37:2084] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:85:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:86:2057] recipient: [18:84:2114] Leader for TabletID 72057594037927937 is [18:87:2115] sender: [18:88:2057] recipient: [18:84:2114] !Reboot 72057594037927937 (actor [18:58:2098]) rebooted! !Reboot 72057594037927937 (actor [18:58:2098]) tablet resolver refreshed! new actor is[18:87:2115] Leader for TabletID 72057594037927937 is [18:87:2115] sender: [18:173:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:59:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:76:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:82:2057] recipient: [19:37:2084] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:85:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:86:2057] recipient: [19:84:2114] Leader for TabletID 72057594037927937 is [19:87:2115] sender: [19:88:2057] recipient: [19:84:2114] !Reboot 72057594037927937 (actor [19:58:2098]) rebooted! !Reboot 72057594037927937 (actor [19:58:2098]) tablet resolver refreshed! new actor is[19:87:2115] Leader for TabletID 72057594037927937 is [19:87:2115] sender: [19:173:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:59:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:76:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:83:2057] recipient: [20:37:2084] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:86:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:87:2057] recipient: [20:85:2114] Leader for TabletID 72057594037927937 is [20:88:2115] sender: [20:89:2057] recipient: [20:85:2114] !Reboot 72057594037927937 (actor [20:58:2098]) rebooted! !Reboot 72057594037927937 (actor [20:58:2098]) tablet resolver refreshed! new actor is[20:88:2115] Leader for TabletID 72057594037927937 is [20:88:2115] sender: [20:174:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:59:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:76:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:85:2057] recipient: [21:37:2084] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:88:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:89:2057] recipient: [21:87:2116] Leader for TabletID 72057594037927937 is [21:90:2117] sender: [21:91:2057] recipient: [21:87:2116] !Reboot 72057594037927937 (actor [21:58:2098]) rebooted! !Reboot 72057594037927937 (actor [21:58:2098]) tablet resolver refreshed! new actor is[21:90:2117] Leader for TabletID 72057594037927937 is [21:90:2117] sender: [21:176:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:59:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:76:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:85:2057] recipient: [22:37:2084] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:88:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:89:2057] recipient: [22:87:2116] Leader for TabletID 72057594037927937 is [22:90:2117] sender: [22:91:2057] recipient: [22:87:2116] !Reboot 72057594037927937 (actor [22:58:2098]) rebooted! !Reboot 72057594037927937 (actor [22:58:2098]) tablet resolver refreshed! new actor is[22:90:2117] Leader for TabletID 72057594037927937 is [22:90:2117] sender: [22:176:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:59:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:76:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:86:2057] recipient: [23:37:2084] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:89:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:90:2057] recipient: [23:88:2116] Leader for TabletID 72057594037927937 is [23:91:2117] sender: [23:92:2057] recipient: [23:88:2116] !Reboot 72057594037927937 (actor [23:58:2098]) rebooted! !Reboot 72057594037927937 (actor [23:58:2098]) tablet resolver refreshed! new actor is[23:91:2117] Leader for TabletID 72057594037927937 is [23:91:2117] sender: [23:177:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:59:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:76:2057] recipient: [24:14:2061] >> KqpEffects::InsertAbort_Literal_Conflict+UseSink [GOOD] >> KqpEffects::EmptyUpdate+UseSink >> KqpEffects::InsertAbort_Select_Success >> KqpImmediateEffects::UpsertAfterInsert [GOOD] >> KqpImmediateEffects::UpsertAfterInsertWithIndex >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_EnableLocalDBFlatIndex_False >> KqpImmediateEffects::ConflictingKeyW1WR2 >> KqpInplaceUpdate::SingleRowSimple+UseSink [GOOD] >> KqpInplaceUpdate::SingleRowPgNotNull-UseSink >> KqpEffects::UpdateOn_Select [GOOD] >> KqpImmediateEffects::AlreadyBrokenImmediateEffects >> KqpInplaceUpdate::SingleRowArithm-UseSink [GOOD] >> KqpInplaceUpdate::SingleRowIf+UseSink >> KqpInplaceUpdate::SingleRowIf-UseSink [GOOD] >> KqpInplaceUpdate::SingleRowPgNotNull+UseSink >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true [GOOD] >> KqpEffects::InsertRevert_Literal_Success >> KqpImmediateEffects::Delete [GOOD] >> KqpImmediateEffects::ConflictingKeyW1WRR2 >> TSchemeShardColumnTableTTL::CreateColumnTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:20.431790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:20.431852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:20.431884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:20.431917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:20.431961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:20.431989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:20.432040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:20.432110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:20.432627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:20.432853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:20.488049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:20.488098Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:20.498748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:20.499056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:20.499201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:20.504566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:20.504690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:20.505115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.505307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:20.507370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.507482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:20.508400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.508455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.508631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:20.508671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:20.508752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:20.508820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.513760Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:20.601974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:20.602180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.602321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:20.602357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:20.602550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:20.602604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:20.604357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.604494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:20.604665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.604712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:20.604747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:20.604771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:20.606127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.606178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:20.606204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:20.607492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.607520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.607554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.607594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:20.609946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:20.611200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:20.611309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:20.611958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.612054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:20.612090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.612333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:20.612384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.612541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:20.612618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:20.614329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.614379Z node 1 :FLAT_TX_SCHEMESHARD ... 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:40.120689Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:40.120780Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:10:40.120809Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:10:40.120840Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:10:40.120872Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:40.120941Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-24T21:10:40.122238Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 972 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:40.122282Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:40.122402Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 972 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:40.122500Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 972 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:10:40.123907Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 120259086582 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:40.123950Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:40.124048Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 120259086582 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:40.124092Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:40.124171Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 309 RawX2: 120259086582 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:10:40.124223Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:40.124258Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.124292Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:40.124331Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:10:40.125363Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:40.126399Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:10:40.127923Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.128034Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.128219Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.128249Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:10:40.128319Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:40.128344Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:40.128371Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:40.128392Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:40.128416Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T21:10:40.128461Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [28:337:2314] message: TxId: 101 2025-06-24T21:10:40.128501Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:40.128530Z node 28 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:10:40.128551Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:10:40.128656Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:10:40.130164Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:10:40.130204Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [28:338:2315] TestWaitNotification: OK eventTxId 101 2025-06-24T21:10:40.130596Z node 28 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:40.130766Z node 28 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" took 188us result status StatusSuccess 2025-06-24T21:10:40.131263Z node 28 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" PathDescription { Self { Name: "TTLTableWithpgint8Column_UNIT_NANOSECONDS" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLTableWithpgint8Column_UNIT_NANOSECONDS" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "pgint8" TypeId: 12288 Id: 2 NotNull: false TypeInfo { PgTypeId: 20 } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 ColumnUnit: UNIT_NANOSECONDS Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_EnableLocalDBFlatIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False_EnableLocalDBFlatIndex_False >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-false [GOOD] >> KqpEffects::InsertAbort_Params_Conflict+UseSink [GOOD] >> KqpEffects::InsertAbort_Params_Conflict-UseSink >> KqpInplaceUpdate::Negative_SingleRowListFromRange-UseSink [GOOD] >> KqpEffects::UpdateOn_Params [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:22.400277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:22.400373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:22.400413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:22.400441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:22.400489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:22.400524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:22.400585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:22.400645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:22.401204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:22.401458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:22.465073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:22.465116Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:22.474996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:22.475285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:22.475393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:22.480600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:22.480707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:22.481124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:22.481298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:22.483322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:22.483470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:22.484240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:22.484280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:22.484481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:22.484513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:22.484588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:22.484647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.489346Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:22.583379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:22.583579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.583702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:22.583748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:22.583900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:22.583981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:22.585529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:22.585661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:22.585782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.585813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:22.585843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:22.585864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:22.587275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.587321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:22.587350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:22.588449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.588486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:22.588534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:22.588568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:22.590934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:22.592222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:22.592343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:22.593009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:22.593119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:22.593169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:22.593352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:22.593383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:22.593503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:22.593565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:22.594832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:22.594862Z node 1 :FLAT_TX_SCHEMESHARD ... Root 2025-06-24T21:10:41.007748Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:41.007848Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:41.007932Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:41.008004Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:41.010016Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:41.010102Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:41.010177Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:41.012068Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:41.012137Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:41.012221Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:41.012307Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:41.012519Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:41.014033Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:41.014300Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:41.015387Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:41.015579Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 115964119149 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:41.015653Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:41.015941Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:41.016009Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:41.016282Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:41.016379Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:41.018213Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:41.018285Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:41.018575Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:41.018642Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [27:209:2209], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:10:41.019066Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:41.019134Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:10:41.019340Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:41.019403Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:41.019465Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:41.019526Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:41.019594Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:10:41.019658Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:41.019724Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:10:41.019773Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:10:41.019874Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:41.019945Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:10:41.020004Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:10:41.020634Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:41.020778Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:41.020861Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:10:41.020931Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:10:41.021001Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:41.021131Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:10:41.023699Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:10:41.024276Z node 27 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T21:10:41.025444Z node 27 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [27:272:2261] Bootstrap 2025-06-24T21:10:41.064511Z node 27 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [27:272:2261] Become StateWork (SchemeCache [27:277:2266]) 2025-06-24T21:10:41.068018Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "DyNumber" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:41.068583Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:41.068762Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "DyNumber" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } }, at schemeshard: 72057594046678944 2025-06-24T21:10:41.069385Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: To enable TTL on integral type column 'ValueSinceUnixEpochModeSettings' should be specified, at schemeshard: 72057594046678944 2025-06-24T21:10:41.071137Z node 27 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [27:272:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:41.073763Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "To enable TTL on integral type column \'ValueSinceUnixEpochModeSettings\' should be specified" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:41.074187Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: To enable TTL on integral type column 'ValueSinceUnixEpochModeSettings' should be specified, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-24T21:10:41.074958Z node 27 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> KqpEffects::AlterAfterUpsertBeforeUpsertSelectTransaction+UseSink [GOOD] >> KqpEffects::AlterAfterUpsertBeforeUpsertSelectTransaction-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:18.381912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:18.382002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:18.382050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:18.382101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:18.382144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:18.382165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:18.382229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:18.382286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:18.382861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:18.383136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:18.449796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:18.449840Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:18.460633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:18.460899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:18.461021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:18.466898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:18.467065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:18.467530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.467740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:18.469995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:18.470129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:18.471006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:18.471047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:18.471221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:18.471264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:18.471348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:18.471418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.477019Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:18.559742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:18.559917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.560043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:18.560072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:18.560239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:18.560317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:18.562020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.562149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:18.562298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.562333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:18.562366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:18.562387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:18.563996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.564050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:18.564083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:18.565472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.565504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:18.565545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.565575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:18.573013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:18.574775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:18.574905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:18.575629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:18.575728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:18.575769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.576008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:18.576048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:18.576181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:18.576245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:18.577933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:18.577968Z node 1 :FLAT_TX_SCHEMESHARD ... shard: 72057594046678944 2025-06-24T21:10:40.286993Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.287048Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.287548Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.287686Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.288897Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.289359Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.289666Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.293299Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.293449Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.293532Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.293610Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.293690Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.293762Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.293832Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.293927Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.293995Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.294128Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.294255Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:40.294300Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:10:40.294428Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:40.294469Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:40.294532Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:10:40.294572Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:40.294613Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T21:10:40.294694Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:2774:3973] message: TxId: 101 2025-06-24T21:10:40.294741Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:10:40.294819Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:10:40.294856Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:10:40.295911Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-06-24T21:10:40.298855Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:10:40.298902Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [4:2775:3974] TestWaitNotification: OK eventTxId 101 2025-06-24T21:10:40.299515Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:40.299837Z node 4 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 342us result status StatusSuccess 2025-06-24T21:10:40.300481Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 ColumnTableTtlSettingsVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 64 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "TTLEnabledTable" Schema { Columns { Id: 1 Name: "key" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "modified_at" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "modified_at" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } TtlSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 ColumnUnit: UNIT_SECONDS Tiers { ApplyAfterSeconds: 3600 Delete { } } } Version: 1 } ColumnShardCount: 64 Sharding { ColumnShards: 72075186233409546 ColumnShards: 72075186233409547 ColumnShards: 72075186233409548 ColumnShards: 72075186233409549 ColumnShards: 72075186233409550 ColumnShards: 72075186233409551 ColumnShards: 72075186233409552 ColumnShards: 72075186233409553 ColumnShards: 72075186233409554 ColumnShards: 72075186233409555 ColumnShards: 72075186233409556 ColumnShards: 72075186233409557 ColumnShards: 72075186233409558 ColumnShards: 72075186233409559 ColumnShards: 72075186233409560 ColumnShards: 72075186233409561 ColumnShards: 72075186233409562 ColumnShards: 72075186233409563 ColumnShards: 72075186233409564 ColumnShards: 72075186233409565 ColumnShards: 72075186233409566 ColumnShards: 72075186233409567 ColumnShards: 72075186233409568 ColumnShards: 72075186233409569 ColumnShards: 72075186233409570 ColumnShards: 72075186233409571 ColumnShards: 72075186233409572 ColumnShards: 72075186233409573 ColumnShards: 72075186233409574 ColumnShards: 72075186233409575 ColumnShards: 72075186233409576 ColumnShards: 72075186233409577 ColumnShards: 72075186233409578 ColumnShards: 72075186233409579 ColumnShards: 72075186233409580 ColumnShards: 72075186233409581 ColumnShards: 72075186233409582 ColumnShards: 72075186233409583 ColumnShards: 72075186233409584 ColumnShards: 72075186233409585 ColumnShards: 72075186233409586 ColumnShards: 72075186233409587 ColumnShards: 72075186233409588 ColumnShards: 72075186233409589 ColumnShards: 72075186233409590 ColumnShards: 72075186233409591 ColumnShards: 72075186233409592 ColumnShards: 72075186233409593 ColumnShards: 72075186233409594 ColumnShards: 72075186233409595 ColumnShards: 72075186233409596 ColumnShards: 72075186233409597 ColumnShards: 72075186233409598 ColumnShards: 72075186233409599 ColumnShards: 72075186233409600 ColumnShards: 72075186233409601 ColumnShards: 72075186233409602 ColumnShards: 72075186233409603 ColumnShards: 72075186233409604 ColumnShards: 72075186233409605 ColumnShards: 72075186233409606 ColumnShards: 72075186233409607 ColumnShards: 72075186233409608 ColumnShards: 72075186233409609 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "modified_at" } } StorageConfig { DataChannelCount: 64 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpImmediateEffects::UpdateOn [GOOD] >> KqpEffects::AlterDuringUpsertTransaction+UseSink [GOOD] >> KqpEffects::AlterDuringUpsertTransaction-UseSink >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False_EnableLocalDBFlatIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_TurnOff >> KqpEffects::EmptyUpdate+UseSink [GOOD] >> KqpEffects::EmptyUpdate-UseSink >> KqpImmediateEffects::InsertDuplicates-UseSink >> KqpImmediateEffects::ManyFlushes [GOOD] >> KqpImmediateEffects::TxWithReadAtTheEnd-UseSink [GOOD] >> KqpImmediateEffects::TxWithWriteAtTheEnd+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::Negative_SingleRowListFromRange-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24053, MsgBus: 25558 2025-06-24T21:10:31.675498Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626300180235330:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:31.675682Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001bac/r3tmp/tmpjSnWLp/pdisk_1.dat 2025-06-24T21:10:32.029646Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626300180235301:2079] 1750799431672629 != 1750799431672632 2025-06-24T21:10:32.040664Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24053, node 1 2025-06-24T21:10:32.085878Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:32.085966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:32.087933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:32.109551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:32.109578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:32.109593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:32.109750Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25558 TClient is connected to server localhost:25558 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:10:32.699599Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:32.745467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:32.770897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:32.898130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.030178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.082443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:34.036163Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626313065138843:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.036310Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.598818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.625082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.647125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.672878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.701920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.729445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.753880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.824630Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626313065139502:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.824717Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.824893Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626313065139507:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.829383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:34.839563Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626313065139509:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:34.940040Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626313065139560:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:35.935697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 8863, MsgBus: 4046 2025-06-24T21:10:36.877390Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626321661984641:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:36.877492Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001bac/r3tmp/tmpzGZpDI/pdisk_1.dat 2025-06-24T21:10:36.969517Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:36.970315Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626321661984612:2079] 1750799436876960 != 1750799436876963 TServer::EnableGrpc on GrpcPort 8863, node 2 2025-06-24T21:10:37.002778Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:37.002862Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:37.004346Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:37.015545Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:37.015570Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:37.015579Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:37.015676Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4046 TClient is connected to server localhost:4046 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:37.404818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:37.415716Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:37.478562Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.630152Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:37.703619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:37.883326Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:39.348450Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626334546888136:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:39.348514Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:39.404723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.431259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.454972Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.479566Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.503299Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.529025Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.554363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.630950Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626334546888795:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:39.631028Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626334546888800:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:39.631055Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:39.634257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:39.642938Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626334546888802:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:39.712036Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626334546888853:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:40.753131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::UpdateOn_Params [GOOD] Test command err: Trying to start YDB, gRPC: 12805, MsgBus: 20264 2025-06-24T21:10:31.675506Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626302069053796:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:31.675597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b97/r3tmp/tmpfyo2b8/pdisk_1.dat 2025-06-24T21:10:31.959751Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:31.959956Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626302069053768:2079] 1750799431672631 != 1750799431672634 TServer::EnableGrpc on GrpcPort 12805, node 1 2025-06-24T21:10:32.040006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:32.040139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:32.042239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:32.109484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:32.109516Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:32.109522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:32.109650Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20264 TClient is connected to server localhost:20264 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:10:32.699498Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:32.714706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:32.744298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:32.868163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:32.999684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.072760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:34.048812Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626314953957305:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.048918Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.598699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.625894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.652401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.678947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.708218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.773175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.841358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.889723Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626314953957971:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.889794Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626314953957976:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.889794Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.893435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:34.901126Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626314953957978:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:34.974120Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626314953958029:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 5096, MsgBus: 22052 2025-06-24T21:10:37.009928Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626324027291351:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:37.009989Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b97/r3tmp/tmpZCFzNI/pdisk_1.dat 2025-06-24T21:10:37.115675Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:37.117041Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626324027291332:2079] 1750799437009555 != 1750799437009558 TServer::EnableGrpc on GrpcPort 5096, node 2 2025-06-24T21:10:37.147210Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:37.147271Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:37.148687Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:37.166845Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:37.166877Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:37.166887Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:37.167008Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22052 TClient is connected to server localhost:22052 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:37.580647Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:37.593824Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:37.639604Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:37.755295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:37.833175Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.016857Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:39.862836Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626332617227570:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:39.862923Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:39.915620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.943055Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.968348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.992930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.022887Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.051127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.081576Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.134310Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626336912195524:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.134381Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.134446Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626336912195529:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.137793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:40.146965Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626336912195531:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:40.234955Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626336912195582:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpImmediateEffects::Interactive [GOOD] >> KqpInplaceUpdate::SingleRowStr+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::UpdateOn [GOOD] Test command err: Trying to start YDB, gRPC: 12134, MsgBus: 20965 2025-06-24T21:10:31.675544Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626302125988461:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:31.675656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001ba9/r3tmp/tmp9PYMLr/pdisk_1.dat 2025-06-24T21:10:31.966519Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:31.969066Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626302125988433:2079] 1750799431672610 != 1750799431672613 TServer::EnableGrpc on GrpcPort 12134, node 1 2025-06-24T21:10:32.050822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:32.050973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:32.052462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:32.109547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:32.109567Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:32.109576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:32.109672Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20965 TClient is connected to server localhost:20965 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:10:32.699470Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:32.763208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:32.785773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:32.921489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.049276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.106889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:34.139989Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626315010891978:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.140116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.598685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.626641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.653456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.676140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.705545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.734453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.760462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.824704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626315010892636:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.824775Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.824812Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626315010892641:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.829471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:34.839935Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626315010892643:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:34.927429Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626315010892694:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:35.936230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 19343, MsgBus: 12052 2025-06-24T21:10:37.160886Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626324894543239:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:37.160962Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001ba9/r3tmp/tmpUmE4Cx/pdisk_1.dat 2025-06-24T21:10:37.248525Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:37.248923Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626324894543220:2079] 1750799437160584 != 1750799437160587 TServer::EnableGrpc on GrpcPort 19343, node 2 2025-06-24T21:10:37.290581Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:37.290633Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:37.290643Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:37.290651Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:37.290666Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:37.290759Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:37.292355Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12052 TClient is connected to server localhost:12052 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:37.729922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:37.745032Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:37.791411Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.940417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.015514Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.167239Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:39.663290Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626333484479433:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:39.663377Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:39.740697Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.769434Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.796946Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.826366Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.854302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.921951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:39.990320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.068984Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626337779447393:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.069066Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.069106Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626337779447398:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.071962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:40.081226Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626337779447400:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:40.180065Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626337779447453:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:41.009385Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_TurnOff [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_Generations >> KqpImmediateEffects::InsertDuplicates+UseSink >> KqpInplaceUpdate::SingleRowStr-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ManyFlushes [GOOD] Test command err: Trying to start YDB, gRPC: 64919, MsgBus: 26669 2025-06-24T21:10:31.675646Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626300742528527:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:31.675726Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b50/r3tmp/tmpe6zixq/pdisk_1.dat 2025-06-24T21:10:31.956733Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626300742528499:2079] 1750799431672569 != 1750799431672572 2025-06-24T21:10:31.967655Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64919, node 1 2025-06-24T21:10:32.032412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:32.032516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:32.034643Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:32.109681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:32.109713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:32.109722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:32.109857Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26669 TClient is connected to server localhost:26669 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:10:32.699530Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:32.756736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:32.775648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:32.921637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.049372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.104284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:34.210393Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626313627432037:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.210480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.598994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.624560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.648895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.674175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.703576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.730530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.758364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.825050Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626313627432694:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.825103Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.825188Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626313627432699:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.829508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:34.840123Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626313627432701:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:34.932325Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626313627432752:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:35.939751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 11791, MsgBus: 19550 2025-06-24T21:10:37.270525Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626325185183390:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:37.270603Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b50/r3tmp/tmp72z8xG/pdisk_1.dat 2025-06-24T21:10:37.386892Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:37.387620Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626325185183368:2079] 1750799437270051 != 1750799437270054 TServer::EnableGrpc on GrpcPort 11791, node 2 2025-06-24T21:10:37.409872Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:37.409951Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:37.411871Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:37.441750Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:37.441771Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:37.441780Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:37.441911Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19550 TClient is connected to server localhost:19550 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:37.901665Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:37.922076Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:37.993908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.123674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.178486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.294419Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:40.283044Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626338070086882:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.283157Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.329772Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.353945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.382534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.410251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.436087Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.463986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.530462Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.580130Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626338070087543:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.580197Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626338070087548:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.580222Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.584341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:40.595612Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626338070087550:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:40.672919Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626338070087601:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:41.709650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.278058Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626325185183390:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:42.278130Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::Interactive [GOOD] Test command err: Trying to start YDB, gRPC: 29834, MsgBus: 16919 2025-06-24T21:10:31.712079Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626301683574236:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:31.712169Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b24/r3tmp/tmpNi9983/pdisk_1.dat 2025-06-24T21:10:31.977765Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:31.977976Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626301683574217:2079] 1750799431711121 != 1750799431711124 TServer::EnableGrpc on GrpcPort 29834, node 1 2025-06-24T21:10:32.058846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:32.058939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:32.060535Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:32.109525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:32.109548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:32.109556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:32.109682Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16919 TClient is connected to server localhost:16919 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:32.711419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:32.720021Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:10:32.747975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:32.877498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.001024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.055122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:34.156354Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626314568477755:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.156449Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.598663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.620986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.647034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.667927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.694906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.760453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.786130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.834380Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626314568478413:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.834447Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.834651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626314568478418:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.837971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:34.846602Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626314568478420:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:34.935285Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626314568478471:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:36.033022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:36.712277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626301683574236:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:36.712355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:36.740998Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:75196263231 ... 10:36.741900Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NWUzZmM4YzAtYjg5ODlhZDktNzI1OTYyY2MtYTNkZTcyZDY=, ActorId: [1:7519626323158413292:2463], ActorState: ExecuteState, TraceId: 01jyhwejh0chmsa8xvvn0rznqh, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 23254, MsgBus: 20426 2025-06-24T21:10:37.500646Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626323843078705:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:37.503847Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b24/r3tmp/tmpOFAEGy/pdisk_1.dat 2025-06-24T21:10:37.585889Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23254, node 2 2025-06-24T21:10:37.629082Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:37.629145Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:37.630454Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:37.642977Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:37.643004Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:37.643012Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:37.643135Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20426 TClient is connected to server localhost:20426 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:38.102261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:38.109784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.154145Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.292294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.347944Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.508254Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:40.373263Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626336727982194:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.373365Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.416629Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.444047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.473975Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.539273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.565453Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.592756Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.621368Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.672844Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626336727982853:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.672906Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626336727982858:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.672927Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.676294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:40.691035Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626336727982860:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:40.763067Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626336727982911:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:41.610550Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.497581Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626323843078705:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:42.497672Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TKeyValueTest::TestConcatWorksNewApi [GOOD] >> TKeyValueTest::TestConcatToLongKey >> KqpImmediateEffects::ConflictingKeyW1WR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyW1RWR2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::SingleRowStr+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 23793, MsgBus: 11760 2025-06-24T21:10:32.807990Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626302430170987:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:32.808140Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001aaf/r3tmp/tmpvJwNvE/pdisk_1.dat 2025-06-24T21:10:33.105671Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626302430170967:2079] 1750799432806832 != 1750799432806835 2025-06-24T21:10:33.128123Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23793, node 1 2025-06-24T21:10:33.157066Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:33.157086Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:33.157092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:33.157170Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:33.163166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:33.163236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:33.164866Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11760 TClient is connected to server localhost:11760 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:33.577740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:33.598617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.717676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.817528Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:33.866752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.944542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.165122Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626315315074504:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:35.165250Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:35.472128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.502238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.529257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.555735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.582879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.611942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.650680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.704819Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626315315075161:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:35.704877Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626315315075166:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:35.704892Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:35.707685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:35.716045Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626315315075168:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:35.820209Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626315315075219:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:36.959708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 3703, MsgBus: 8041 2025-06-24T21:10:38.226346Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626330547907179:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:38.226402Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001aaf/r3tmp/tmp2zBcMf/pdisk_1.dat 2025-06-24T21:10:38.322597Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:38.328270Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626330547907159:2079] 1750799438225836 != 1750799438225839 TServer::EnableGrpc on GrpcPort 3703, node 2 2025-06-24T21:10:38.359226Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:38.359328Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:38.363807Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:38.370381Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:38.370413Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:38.370422Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:38.370560Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8041 TClient is connected to server localhost:8041 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:38.760647Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:38.775889Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.819804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.930915Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.989925Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:39.233300Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:40.988471Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626339137843390:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.988565Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:41.037220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:41.064430Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:41.090736Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:41.117817Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:41.144831Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:41.208548Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:41.245143Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:41.320553Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626343432811353:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:41.320652Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:41.320655Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626343432811358:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:41.323496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:41.332112Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626343432811360:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:41.388914Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626343432811411:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:42.172474Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpEffects::InsertAbort_Select_Success [GOOD] >> KqpEffects::InsertAbort_Select_Duplicates-UseSink >> KqpEffects::InsertAbort_Select_Duplicates+UseSink >> KqpImmediateEffects::TxWithWriteAtTheEnd-UseSink >> KqpInplaceUpdate::SingleRowPgNotNull-UseSink [GOOD] >> KqpImmediateEffects::Insert >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-true [GOOD] >> KqpInplaceUpdate::SingleRowIf+UseSink [GOOD] >> KqpInplaceUpdate::SingleRowPgNotNull+UseSink [GOOD] >> KqpImmediateEffects::ConflictingKeyW1WRR2 [GOOD] >> KqpImmediateEffects::AlreadyBrokenImmediateEffects [GOOD] >> KqpEffects::InsertRevert_Literal_Success [GOOD] >> KqpEffects::InsertAbort_Literal_Duplicates+UseSink >> KqpEffects::InsertRevert_Literal_Duplicates ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:17.406419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:17.406535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.406584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:17.406664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:17.407471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:17.407529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:17.407624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.407707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:17.408588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:17.410147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:17.495322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:17.495378Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:17.510604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:17.510998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:17.511181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:17.518245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:17.518436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:17.519136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.519472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:17.522221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.522403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:17.526312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:17.526712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:17.526845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:17.526949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.533477Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:17.667505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.667696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.667830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:17.667866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:17.668061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:17.668166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:17.669955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.670125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:17.670270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.670316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:17.670381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:17.670433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:17.672110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.672188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:17.672234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:17.673558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.673595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.673661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.673709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.677845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:17.679898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:17.680062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:17.681045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.681191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:17.681249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.681542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:17.681614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.681794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:17.681885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:17.683769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.683812Z node 1 :FLAT_TX_SCHEMESHARD ... 2025-06-24T21:10:44.853435Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:44.853536Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:44.853626Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:44.853703Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:44.856116Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:44.856207Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:44.856298Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:44.858720Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:44.858791Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:44.858898Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:44.859015Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:44.859292Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:44.861244Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:44.861561Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:44.862761Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:44.863020Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 158913792107 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:44.863130Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:44.863567Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:44.863676Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:44.864064Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:44.864219Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:44.866774Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:44.866887Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:44.867276Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:44.867362Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [37:212:2212], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:10:44.867889Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:44.867989Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:10:44.868242Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:44.868328Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:44.868420Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:10:44.868502Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:44.868593Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:10:44.868686Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:10:44.868772Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:10:44.868836Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:10:44.868967Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:44.869050Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:10:44.869128Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:10:44.870113Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:44.870303Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:10:44.870402Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:10:44.870491Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:10:44.870605Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:44.870772Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:10:44.874223Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:10:44.875076Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T21:10:44.876628Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [37:275:2264] Bootstrap 2025-06-24T21:10:44.924512Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [37:275:2264] Become StateWork (SchemeCache [37:280:2269]) 2025-06-24T21:10:44.928810Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "pgint8" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:44.929530Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:10:44.929756Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "pgint8" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } }, at schemeshard: 72057594046678944 2025-06-24T21:10:44.930715Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: To enable TTL on integral PG type column 'ValueSinceUnixEpochModeSettings' should be specified, at schemeshard: 72057594046678944 2025-06-24T21:10:44.933148Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [37:275:2264] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:10:44.937033Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "To enable TTL on integral PG type column \'ValueSinceUnixEpochModeSettings\' should be specified" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:44.937561Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: To enable TTL on integral PG type column 'ValueSinceUnixEpochModeSettings' should be specified, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-24T21:10:44.938414Z node 37 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::SingleRowPgNotNull-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 10389, MsgBus: 25622 2025-06-24T21:10:34.477256Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626311275865163:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:34.477339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a58/r3tmp/tmpUsaQWc/pdisk_1.dat 2025-06-24T21:10:34.746171Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:34.746425Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626311275865144:2079] 1750799434476773 != 1750799434476776 TServer::EnableGrpc on GrpcPort 10389, node 1 2025-06-24T21:10:34.800196Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:34.800218Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:34.800223Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:34.800325Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:34.807805Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:34.807932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:34.809564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25622 TClient is connected to server localhost:25622 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:35.289184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:35.323846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.444223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.515854Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:35.576661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.654157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:37.011718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626324160768670:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.011837Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.257018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.281226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.308165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.337473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.364204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.430467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.460047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.539541Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626324160769334:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.539605Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.540017Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626324160769339:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.543895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:37.553784Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626324160769341:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:37.642201Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626324160769394:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:38.536679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 12882, MsgBus: 18591 2025-06-24T21:10:39.664495Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626333854792486:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:39.664572Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a58/r3tmp/tmpHcsZqB/pdisk_1.dat 2025-06-24T21:10:39.755440Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:39.756803Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626333854792455:2079] 1750799439663086 != 1750799439663089 TServer::EnableGrpc on GrpcPort 12882, node 2 2025-06-24T21:10:39.787455Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:39.787524Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:39.788794Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:39.812240Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:39.812263Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:39.812270Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:39.812409Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18591 TClient is connected to server localhost:18591 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:40.222357Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:40.238096Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.308949Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:40.431921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.503739Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.670499Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:42.263925Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626346739695994:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.264022Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.299231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.324732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.353909Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.378014Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.400874Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.431661Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.499104Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.553895Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626346739696652:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.553983Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.554019Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626346739696657:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.569091Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:42.576730Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626346739696659:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:42.651853Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626346739696710:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:43.700905Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpImmediateEffects::ConflictingKeyW1RR2 >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_Generations [GOOD] >> TFlatTableExecutor_CachePressure::TestNotEnoughLocalCache [GOOD] >> TFlatTableExecutor_Cold::ColdBorrowScan >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestGetStatusWorks >> TFlatTableExecutor_Cold::ColdBorrowScan [GOOD] >> TFlatTableExecutor_ColumnGroups::TestManyRows >> KqpImmediateEffects::MultipleEffectsWithIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::SingleRowPgNotNull+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 14871, MsgBus: 3927 2025-06-24T21:10:34.794921Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626313295140355:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:34.795111Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a3e/r3tmp/tmpLQtQfe/pdisk_1.dat 2025-06-24T21:10:35.054433Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626313295140335:2079] 1750799434794208 != 1750799434794211 2025-06-24T21:10:35.061327Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:35.066732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:35.066853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:35.069083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14871, node 1 2025-06-24T21:10:35.130368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:35.130391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:35.130402Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:35.130531Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3927 TClient is connected to server localhost:3927 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:35.625484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:35.648442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.794566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.802673Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:35.942685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:36.018119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:37.293770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626326180043858:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.293877Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.645828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.673754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.699884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.727708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.756504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.786158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.823109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.874819Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626326180044517:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.874910Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.875190Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626326180044522:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.878760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:37.888678Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626326180044524:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:37.972802Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626326180044575:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:38.988877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 11228, MsgBus: 9656 2025-06-24T21:10:40.149430Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626340686685969:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:40.149522Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a3e/r3tmp/tmpom63yN/pdisk_1.dat 2025-06-24T21:10:40.268732Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:40.270218Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626340686685950:2079] 1750799440149090 != 1750799440149093 TServer::EnableGrpc on GrpcPort 11228, node 2 2025-06-24T21:10:40.291531Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:40.291638Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:40.293133Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:40.318660Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:40.318688Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:40.318697Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:40.318797Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9656 TClient is connected to server localhost:9656 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:40.804762Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:40.821634Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.873920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.029581Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.102127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.158634Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:43.136837Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626353571589484:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.136959Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.199124Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.265747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.290075Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.315808Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.341891Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.370661Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.400057Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.447501Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626353571590149:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.447556Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626353571590154:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.447577Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.450666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:43.457785Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626353571590156:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:43.513634Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626353571590207:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:44.434532Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::SingleRowIf+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 8037, MsgBus: 2040 2025-06-24T21:10:34.897467Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626313829728845:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:34.897550Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a4b/r3tmp/tmpuTsP40/pdisk_1.dat 2025-06-24T21:10:35.188741Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:35.189333Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626313829728826:2079] 1750799434896953 != 1750799434896956 TServer::EnableGrpc on GrpcPort 8037, node 1 2025-06-24T21:10:35.233101Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:35.233129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:35.233154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:35.233307Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:35.241763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:35.241892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:35.243623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2040 TClient is connected to server localhost:2040 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:35.729163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:35.742834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.855114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.968637Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:36.008790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:36.081154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:37.286017Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626326714632368:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.286153Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.626438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.654323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.679486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.742546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.767226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.795666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.826477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.910208Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626326714633029:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.910266Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.910346Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626326714633034:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.913570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:37.922878Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626326714633036:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:38.021064Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626331009600385:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:38.898460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 21924, MsgBus: 1176 2025-06-24T21:10:40.093456Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626339161215230:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:40.093497Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a4b/r3tmp/tmpensFGv/pdisk_1.dat 2025-06-24T21:10:40.192813Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:40.194067Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626339161215211:2079] 1750799440093147 != 1750799440093150 TServer::EnableGrpc on GrpcPort 21924, node 2 2025-06-24T21:10:40.234068Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:40.234162Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:40.235678Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:40.241833Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:40.241853Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:40.241861Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:40.241977Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1176 TClient is connected to server localhost:1176 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:40.631912Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:40.638625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.683328Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.814081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.887053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.100357Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:42.731374Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626347751151449:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.731436Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.784980Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.810926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.837878Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.865903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.893112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.929709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.995980Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.073329Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626352046119406:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.073393Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.073420Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626352046119411:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.076863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:43.087022Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626352046119413:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:43.163885Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626352046119466:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:44.115920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpEffects::EmptyUpdate-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyW1WRR2 [GOOD] Test command err: Trying to start YDB, gRPC: 19286, MsgBus: 30490 2025-06-24T21:10:35.329178Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626315250864473:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:35.329261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a3d/r3tmp/tmpbuJNpi/pdisk_1.dat 2025-06-24T21:10:35.622848Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626315250864447:2079] 1750799435328374 != 1750799435328377 2025-06-24T21:10:35.631180Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19286, node 1 2025-06-24T21:10:35.710254Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:35.710274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:35.710278Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:35.710377Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:35.733632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:35.733779Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:35.735416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30490 TClient is connected to server localhost:30490 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:36.224151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:36.240377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:36.337823Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:36.345202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:36.477308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:36.538745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.007282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626328135767986:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:38.007411Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:38.322874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:38.353932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:38.381473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:38.408837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:38.434241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:38.499774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:38.524996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:38.603382Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626328135768648:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:38.603462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626328135768653:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:38.603492Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:38.607284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:38.617072Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626328135768655:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:38.672316Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626328135768706:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:39.410607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 27304, MsgBus: 16609 2025-06-24T21:10:40.677940Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626339899985448:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:40.678065Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /ho ... :40.813325Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:40.813344Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:40.813351Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:40.813437Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:40.817302Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:40.817380Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:40.819281Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16609 TClient is connected to server localhost:16609 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:41.215101Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:41.246785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.289927Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.410221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.482928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.686038Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:43.224941Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626352784888940:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.225013Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.274574Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.299486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.321840Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.348588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.373622Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.401054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.427611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.518505Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626352784889599:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.518580Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.518603Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626352784889604:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.521153Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:43.528737Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626352784889606:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:43.625147Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626352784889657:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:44.445460Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:44.976810Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [2:7519626357079857490:2473], TxId: 281474976715677, task: 1. Ctx: { TraceId : 01jyhwetwr7w6tqdr6r683yk7q. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=ZDU5M2IwNmUtMmNkOGRlMTctMzdiMDM5ZGItYTJhMzFjNDA=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Read conflict with concurrent transaction (shard# 72075186224037922 node# 2 state# Ready) } } 2025-06-24T21:10:44.979603Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7519626357079857490:2473], TxId: 281474976715677, task: 1. Ctx: { TraceId : 01jyhwetwr7w6tqdr6r683yk7q. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=ZDU5M2IwNmUtMmNkOGRlMTctMzdiMDM5ZGItYTJhMzFjNDA=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Read conflict with concurrent transaction (shard# 72075186224037922 node# 2 state# Ready) } }. 2025-06-24T21:10:44.980288Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZDU5M2IwNmUtMmNkOGRlMTctMzdiMDM5ZGItYTJhMzFjNDA=, ActorId: [2:7519626357079857225:2473], ActorState: ExecuteState, TraceId: 01jyhwetwr7w6tqdr6r683yk7q, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::AlreadyBrokenImmediateEffects [GOOD] Test command err: Trying to start YDB, gRPC: 63583, MsgBus: 8381 2025-06-24T21:10:34.475454Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626312715276271:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:34.475529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a8c/r3tmp/tmpuXCu32/pdisk_1.dat 2025-06-24T21:10:34.724049Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626312715276243:2079] 1750799434474627 != 1750799434474630 2025-06-24T21:10:34.732563Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63583, node 1 2025-06-24T21:10:34.816896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:34.816930Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:34.816938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:34.817064Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:34.822626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:34.822704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:34.824306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8381 TClient is connected to server localhost:8381 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:35.265252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:35.292917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.393662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.492280Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:35.533250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.595476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:37.041519Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626325600179788:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.041593Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.378951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.404480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.429118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.455353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.483869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.517584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.584196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.640978Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626325600180450:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.641047Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.641062Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626325600180455:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.644886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:37.655464Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626325600180457:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:37.728921Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626325600180508:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 27086, MsgBus: 26662 2025-06-24T21:10:39.887866Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626335296939748:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:39.887968Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a8c/r3tmp/tmpa8HtAo/pdisk_1.dat 2025-06-24T21:10:39.996386Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:39.997694Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626335296939729:2079] 1750799439887384 != 1750799439887387 TServer::EnableGrpc on GrpcPort 27086, node 2 2025-06-24T21:10:40.024580Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:40.024678Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:40.026381Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:40.046566Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:40.046590Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:40.046598Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:40.046716Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26662 TClient is connected to server localhost:26662 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:40.409133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:40.417894Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.486827Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.637975Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.695442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.905214Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:42.921913Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626348181843273:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.922087Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.975487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.001169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.027837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.054179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.082862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.113090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.142704Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.222561Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626352476811229:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.222634Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626352476811234:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.222648Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.226307Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:43.235471Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626352476811236:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:43.328751Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626352476811287:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:44.182764Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:44.837262Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MTZiMzIwMzQtMjFkYWY2MWQtOTE1ZThjNTYtZmY1OGQ2MzU=, ActorId: [2:7519626356771779071:2500], ActorState: ExecuteState, TraceId: 01jyhwetref2bcpf8wgjktsmz2, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken 2025-06-24T21:10:44.845085Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MTZiMzIwMzQtMjFkYWY2MWQtOTE1ZThjNTYtZmY1OGQ2MzU=, ActorId: [2:7519626356771779071:2500], ActorState: ReadyState, TraceId: 01jyhwetvc26d9yzs7hk2g3xs6, Create QueryResponse for error on request, msg: 2025-06-24T21:10:44.888026Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626335296939748:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:44.888120Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TSchemeShardTTLTests::RacyAlterTableAndConditionalErase [GOOD] >> KqpEffects::AlterAfterUpsertBeforeUpsertSelectTransaction-UseSink [GOOD] >> TSchemeShardTTLTests::CheckCounters [GOOD] >> TKeyValueTest::TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi [GOOD] >> KqpWrite::Insert >> KqpInplaceUpdate::Negative_BatchUpdate+UseSink >> KqpEffects::AlterDuringUpsertTransaction-UseSink [GOOD] >> KqpEffects::InsertAbort_Params_Conflict-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::EmptyUpdate-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 63752, MsgBus: 22736 2025-06-24T21:10:33.925790Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626310462403973:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:33.925977Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001aa4/r3tmp/tmpA2Uqbz/pdisk_1.dat 2025-06-24T21:10:34.168175Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:34.184035Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626310462403953:2079] 1750799433925061 != 1750799433925064 TServer::EnableGrpc on GrpcPort 63752, node 1 2025-06-24T21:10:34.233304Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:34.233326Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:34.233331Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:34.233450Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:34.252611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:34.252732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:34.254123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22736 TClient is connected to server localhost:22736 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:34.702754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:34.721647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:34.851924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:34.968647Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:34.998861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.069837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:36.497416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626323347307496:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:36.497537Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:36.775931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:36.797283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:36.819895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:36.843237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:36.865775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:36.932355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:36.959818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.011114Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626327642275452:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.011191Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.011271Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626327642275457:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.014777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:37.023962Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626327642275459:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:37.106828Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626327642275510:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:38.055549Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-24T21:10:38.065674Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:38.065848Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037888 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:38.066042Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:7519626331937243096:2472], Table: `/Root/TwoShard` ([72057594046644480:2:1]), SessionActorId: [1:7519626327642275778:247 ... 224037888 executing on unit AlterCdcStream 2025-06-24T21:10:46.258057Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1750799446298:281474976715664] at 72075186224037888 to execution unit DropCdcStream 2025-06-24T21:10:46.258071Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1750799446298:281474976715664] at 72075186224037888 on unit DropCdcStream 2025-06-24T21:10:46.258081Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1750799446298:281474976715664] at 72075186224037888 is Executed 2025-06-24T21:10:46.258090Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1750799446298:281474976715664] at 72075186224037888 executing on unit DropCdcStream 2025-06-24T21:10:46.258097Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1750799446298:281474976715664] at 72075186224037888 to execution unit CreateIncrementalRestoreSrc 2025-06-24T21:10:46.258105Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1750799446298:281474976715664] at 72075186224037888 on unit CreateIncrementalRestoreSrc 2025-06-24T21:10:46.258112Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1750799446298:281474976715664] at 72075186224037888 is Executed 2025-06-24T21:10:46.258110Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [3:7519626359263083346:2331], Recipient [3:7519626359263083401:2303]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:10:46.258120Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1750799446298:281474976715664] at 72075186224037888 executing on unit CreateIncrementalRestoreSrc 2025-06-24T21:10:46.258132Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1750799446298:281474976715664] at 72075186224037888 to execution unit CompleteOperation 2025-06-24T21:10:46.258143Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1750799446298:281474976715664] at 72075186224037888 on unit CompleteOperation 2025-06-24T21:10:46.258170Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:10:46.258299Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1750799446298:281474976715664] at 72075186224037888 is DelayComplete 2025-06-24T21:10:46.258319Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1750799446298:281474976715664] at 72075186224037888 executing on unit CompleteOperation 2025-06-24T21:10:46.258330Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1750799446298:281474976715664] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:10:46.258339Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1750799446298:281474976715664] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:10:46.258356Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1750799446298:281474976715664] at 72075186224037888 is Executed 2025-06-24T21:10:46.258364Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1750799446298:281474976715664] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:10:46.258373Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1750799446298:281474976715664] at 72075186224037888 has finished 2025-06-24T21:10:46.258385Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:10:46.258393Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T21:10:46.258400Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:10:46.258407Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:10:46.259753Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [3:7519626359263083346:2331], Recipient [3:7519626359263083326:2297]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:10:46.259812Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:10:46.259871Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750799446298} 2025-06-24T21:10:46.259915Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:10:46.259963Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:10:46.259983Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1750799446298:281474976715664] at 72075186224037888 on unit DropTable 2025-06-24T21:10:46.259996Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1750799446298:281474976715664] at 72075186224037888 on unit CompleteOperation 2025-06-24T21:10:46.260063Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750799446298 : 281474976715664] from 72075186224037888 at tablet 72075186224037888 send result to client [3:7519626346378181078:2146], exec latency: 1 ms, propose latency: 3 ms 2025-06-24T21:10:46.260120Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715664 state PreOffline TxInFly 0 2025-06-24T21:10:46.260202Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:10:46.260330Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:7519626359263083319:2316], Recipient [3:7519626359263083326:2297]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:10:46.260449Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:7519626363558050881:2327], Recipient [3:7519626359263083326:2297]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046644480 Status: OK ServerId: [3:7519626363558050883:2488] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:10:46.260471Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:10:46.260825Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [3:7519626346378181078:2146], Recipient [3:7519626359263083326:2297]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715664 2025-06-24T21:10:46.260915Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-24T21:10:46.260952Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715664 datashard 72075186224037888 state PreOffline 2025-06-24T21:10:46.260986Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:10:46.262180Z node 3 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:10:46.262301Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:7519626363558050881:2327], Recipient [3:7519626359263083326:2297]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046644480 ClientId: [3:7519626363558050881:2327] ServerId: [3:7519626363558050883:2488] } 2025-06-24T21:10:46.262324Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:10:46.262348Z node 3 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037888 Initiating switch from PreOffline to Offline state 2025-06-24T21:10:46.264122Z node 3 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037888 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:10:46.264236Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:7519626359263083319:2316], Recipient [3:7519626359263083326:2297]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:10:46.264431Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:7519626363558050889:2328], Recipient [3:7519626359263083326:2297]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046644480 Status: OK ServerId: [3:7519626363558050890:2494] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:10:46.264459Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:10:46.264764Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [3:7519626346378181078:2146], Recipient [3:7519626359263083326:2297]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046644480 State: 4 2025-06-24T21:10:46.264790Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-24T21:10:46.264806Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037888 state Offline 2025-06-24T21:10:46.264907Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:7519626363558050889:2328], Recipient [3:7519626359263083326:2297]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046644480 ClientId: [3:7519626363558050889:2328] ServerId: [3:7519626363558050890:2494] } 2025-06-24T21:10:46.264930Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:10:46.266776Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829699, Sender [3:7519626359263083319:2316], Recipient [3:7519626359263083326:2297]: NKikimrTabletBase.TEvTabletStop TabletID: 72075186224037888 Reason: ReasonStop 2025-06-24T21:10:46.266812Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037888 reason = ReasonStop 2025-06-24T21:10:46.267031Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:7519626359263083319:2316], Recipient [3:7519626359263083326:2297]: NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:10:46.267267Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-24T21:10:46.267358Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-24T21:10:46.267772Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 271843840, Sender [3:7519626346378180737:2064], Recipient [3:7519626359263083401:2303]: NKikimr::TEvPipeCache::TEvDeliveryProblem 2025-06-24T21:10:46.267797Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3167: StateWork, processing event TEvPipeCache::TEvDeliveryProblem 2025-06-24T21:10:46.267809Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037888 from 72075186224037889 is reset 2025-06-24T21:10:46.268090Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::RacyAlterTableAndConditionalErase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:26.039976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:26.040066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:26.040108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:26.040153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:26.040207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:26.040237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:26.040294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:26.040362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:26.041070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:26.041381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:26.115593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:26.115632Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:26.130205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:26.130583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:26.130737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:26.137661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:26.137819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:26.138382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.138666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:26.141386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.141548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:26.142643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:26.142700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:26.142906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:26.142958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:26.143064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:26.143213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.147995Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:26.258311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:26.258501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.258642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:26.258675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:26.258874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:26.258936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:26.260840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.260996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:26.261159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.261195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:26.261235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:26.261258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:26.262820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.262893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:26.262935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:26.264264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.264296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:26.264335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.264365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:26.266677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:26.267960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:26.268090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:26.268749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:26.268844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:26.268882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.269063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:26.269105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:26.269227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:26.269274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:26.270571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:26.270606Z node 1 :FLAT_TX_SCHEMESHARD ... FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 2025-06-24T21:10:47.093574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:47.093672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:47.093724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:359: TAlterTable TPropose operationId# 102:0 HandleReply TEvOperationPlan, operationId: 102:0, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-24T21:10:47.094103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 129 2025-06-24T21:10:47.094243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-24T21:10:47.099096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:47.099166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:10:47.099399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:47.099446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:10:47.099731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:47.099786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:10:47.100361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:10:47.100442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:10:47.100504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:10:47.100581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-24T21:10:47.100618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:10:47.100677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:10:47.102384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1034 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:47.102436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:47.102603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1034 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:47.102703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1034 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-24T21:10:47.103500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:47.103536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:10:47.103631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:47.103671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:10:47.103766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-24T21:10:47.103836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:47.103876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:47.103900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:10:47.103932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:10:47.106477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:10:47.106669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:47.106770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:47.106862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:10:47.106900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:10:47.106973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:47.107000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:47.107032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:10:47.107053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:47.107078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:10:47.107125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 102 2025-06-24T21:10:47.107193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:10:47.107222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:10:47.107258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:10:47.107355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:10:47.108771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:10:47.108810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:615:2567] TestWaitNotification: OK eventTxId 102 2025-06-24T21:10:47.109109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-24T21:10:47.109161Z node 1 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__conditional_erase.cpp:391: Unsuccessful conditional erase: tabletId: 72075186233409546, status: SCHEME_ERROR, error: Schema version mismatch: got 1, expected 2, retry after: 300.000000s, at schemeshard: 72057594046678944 2025-06-24T21:10:47.110443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-24T21:10:47.110550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:47.110605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:1, run at: 1970-01-01T00:06:00.038500Z, at schemeshard: 72057594046678944 2025-06-24T21:10:47.110694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CheckCounters [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:20.712104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:20.712175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:20.712212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:20.712239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:20.712270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:20.712292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:20.712342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:20.712402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:20.712957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:20.713200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:20.770259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:20.770309Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:20.780775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:20.780986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:20.781105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:20.786087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:20.786266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:20.786695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.786834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:20.788941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.789065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:20.789808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.789858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.789926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:20.789956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:20.790031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:20.790211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.795048Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:20.887285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:20.887483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.887653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:20.887686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:20.887873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:20.887925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:20.889753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.889874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:20.890004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.890036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:20.890070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:20.890093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:20.891499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.891549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:20.891578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:20.892721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.892749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.892785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.892820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:20.895356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:20.896607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:20.896739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:20.897380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.897479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:20.897520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.897725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:20.897763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.897882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:20.897941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:20.899353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.899387Z node 1 :FLAT_TX_SCHEMESHARD D ... l.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:10:46.795446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-24T21:10:46.795501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:1340:3238] 2025-06-24T21:10:46.796012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 107 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 0 Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 2025-06-24T21:10:46.912769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409548 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0001 2025-06-24T21:10:46.913718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409549 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0001 2025-06-24T21:10:46.914287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 0 row count 0 2025-06-24T21:10:46.914375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:46.914476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409548 2025-06-24T21:10:46.914751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:4 data size 0 row count 0 2025-06-24T21:10:46.914799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:46.914861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409549 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 0 Values: 0 Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 2025-06-24T21:10:47.002518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-24T21:10:47.002644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:47.002749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:10:47.002908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409549, request: TableId: 4 Expiration { ColumnId: 2 WallClockTimestamp: 1750812711081199 ColumnUnit: UNIT_AUTO } SchemaVersion: 4 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-24T21:10:47.003015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409548, request: TableId: 4 Expiration { ColumnId: 2 WallClockTimestamp: 1750812711081199 ColumnUnit: UNIT_AUTO } SchemaVersion: 4 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-24T21:10:47.003776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6740: Conditional erase accepted: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-06-24T21:10:47.004135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6740: Conditional erase accepted: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-06-24T21:10:47.004465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-24T21:10:47.004514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-06-24T21:10:47.004952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-24T21:10:47.004986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-06-24T21:10:47.008097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-24T21:10:47.008364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:47.008419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:3, run at: 2025-06-25T01:51:51.081199Z, at schemeshard: 72057594046678944 2025-06-24T21:10:47.009292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-24T21:10:47.009351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:10:47.009414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:47.009451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:3, run at: 2025-06-25T01:51:51.081199Z, at schemeshard: 72057594046678944 2025-06-24T21:10:47.009493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:10:47.030953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:10:47.084256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409548 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-24T21:10:47.084422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409549 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-24T21:10:47.084483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 0 row count 0 2025-06-24T21:10:47.084563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:47.084662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409548 2025-06-24T21:10:47.084810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:4 data size 0 row count 0 2025-06-24T21:10:47.084852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:47.084918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409549 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 2025-06-24T21:10:47.116322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:10:47.179849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409548 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-24T21:10:47.179962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409549 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-24T21:10:47.180017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 0 row count 0 2025-06-24T21:10:47.180101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:47.180189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409548 2025-06-24T21:10:47.180355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:4 data size 0 row count 0 2025-06-24T21:10:47.180426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:47.180471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409549 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 0 Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:86:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:86:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:87:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:90:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:93:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:94:2057] recipient: [11:92:2120] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:96:2057] recipient: [11:92:2120] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:95:2121] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:181:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061 ... 8:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:105:2057] recipient: [25:37:2084] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:108:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:109:2057] recipient: [25:107:2130] Leader for TabletID 72057594037927937 is [25:110:2131] sender: [25:111:2057] recipient: [25:107:2130] !Reboot 72057594037927937 (actor [25:58:2098]) rebooted! !Reboot 72057594037927937 (actor [25:58:2098]) tablet resolver refreshed! new actor is[25:110:2131] Leader for TabletID 72057594037927937 is [25:110:2131] sender: [25:128:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:56:2057] recipient: [26:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:56:2057] recipient: [26:52:2096] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:59:2057] recipient: [26:52:2096] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:76:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:107:2057] recipient: [26:37:2084] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:110:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:111:2057] recipient: [26:109:2132] Leader for TabletID 72057594037927937 is [26:112:2133] sender: [26:113:2057] recipient: [26:109:2132] !Reboot 72057594037927937 (actor [26:58:2098]) rebooted! !Reboot 72057594037927937 (actor [26:58:2098]) tablet resolver refreshed! new actor is[26:112:2133] Leader for TabletID 72057594037927937 is [26:112:2133] sender: [26:198:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:56:2057] recipient: [27:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:56:2057] recipient: [27:52:2096] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:59:2057] recipient: [27:52:2096] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:76:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:107:2057] recipient: [27:37:2084] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:110:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:111:2057] recipient: [27:109:2132] Leader for TabletID 72057594037927937 is [27:112:2133] sender: [27:113:2057] recipient: [27:109:2132] !Reboot 72057594037927937 (actor [27:58:2098]) rebooted! !Reboot 72057594037927937 (actor [27:58:2098]) tablet resolver refreshed! new actor is[27:112:2133] Leader for TabletID 72057594037927937 is [27:112:2133] sender: [27:198:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:59:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:76:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:108:2057] recipient: [28:37:2084] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:111:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:112:2057] recipient: [28:110:2132] Leader for TabletID 72057594037927937 is [28:113:2133] sender: [28:114:2057] recipient: [28:110:2132] !Reboot 72057594037927937 (actor [28:58:2098]) rebooted! !Reboot 72057594037927937 (actor [28:58:2098]) tablet resolver refreshed! new actor is[28:113:2133] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:59:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:76:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:59:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:76:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:52:2096] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:59:2057] recipient: [31:52:2096] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:76:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:78:2057] recipient: [31:37:2084] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:81:2057] recipient: [31:80:2111] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:82:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:83:2112] sender: [31:84:2057] recipient: [31:80:2111] !Reboot 72057594037927937 (actor [31:58:2098]) rebooted! !Reboot 72057594037927937 (actor [31:58:2098]) tablet resolver refreshed! new actor is[31:83:2112] Leader for TabletID 72057594037927937 is [31:83:2112] sender: [31:169:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2096] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:59:2057] recipient: [32:52:2096] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:76:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:78:2057] recipient: [32:37:2084] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:81:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:82:2057] recipient: [32:80:2111] Leader for TabletID 72057594037927937 is [32:83:2112] sender: [32:84:2057] recipient: [32:80:2111] !Reboot 72057594037927937 (actor [32:58:2098]) rebooted! !Reboot 72057594037927937 (actor [32:58:2098]) tablet resolver refreshed! new actor is[32:83:2112] Leader for TabletID 72057594037927937 is [32:83:2112] sender: [32:169:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:52:2096] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:59:2057] recipient: [33:52:2096] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:76:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:79:2057] recipient: [33:37:2084] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:81:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:83:2057] recipient: [33:82:2111] Leader for TabletID 72057594037927937 is [33:84:2112] sender: [33:85:2057] recipient: [33:82:2111] !Reboot 72057594037927937 (actor [33:58:2098]) rebooted! !Reboot 72057594037927937 (actor [33:58:2098]) tablet resolver refreshed! new actor is[33:84:2112] Leader for TabletID 72057594037927937 is [33:84:2112] sender: [33:170:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:59:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:76:2057] recipient: [34:14:2061] !Reboot 72057594037927937 (actor [34:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:82:2057] recipient: [34:37:2084] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:85:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:86:2057] recipient: [34:84:2114] Leader for TabletID 72057594037927937 is [34:87:2115] sender: [34:88:2057] recipient: [34:84:2114] !Reboot 72057594037927937 (actor [34:58:2098]) rebooted! !Reboot 72057594037927937 (actor [34:58:2098]) tablet resolver refreshed! new actor is[34:87:2115] Leader for TabletID 72057594037927937 is [34:87:2115] sender: [34:173:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:52:2096] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:59:2057] recipient: [35:52:2096] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:76:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:82:2057] recipient: [35:37:2084] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:85:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:86:2057] recipient: [35:84:2114] Leader for TabletID 72057594037927937 is [35:87:2115] sender: [35:88:2057] recipient: [35:84:2114] !Reboot 72057594037927937 (actor [35:58:2098]) rebooted! !Reboot 72057594037927937 (actor [35:58:2098]) tablet resolver refreshed! new actor is[35:87:2115] Leader for TabletID 72057594037927937 is [35:87:2115] sender: [35:173:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:52:2096] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:59:2057] recipient: [36:52:2096] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:76:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:83:2057] recipient: [36:37:2084] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:86:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:87:2057] recipient: [36:85:2114] Leader for TabletID 72057594037927937 is [36:88:2115] sender: [36:89:2057] recipient: [36:85:2114] !Reboot 72057594037927937 (actor [36:58:2098]) rebooted! !Reboot 72057594037927937 (actor [36:58:2098]) tablet resolver refreshed! new actor is[36:88:2115] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:53:2096] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:59:2057] recipient: [37:53:2096] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:76:2057] recipient: [37:14:2061] >> KqpImmediateEffects::ConflictingKeyRW1WR2 >> KqpImmediateEffects::TxWithWriteAtTheEnd+UseSink [GOOD] >> KqpImmediateEffects::InsertDuplicates-UseSink [GOOD] >> KqpImmediateEffects::InsertExistingKey+UseSink >> KqpWrite::InsertRevert >> KqpInplaceUpdate::Negative_SingleRowWithKeyCast-UseSink >> KqpImmediateEffects::UpsertExistingKey >> KqpImmediateEffects::InsertDuplicates+UseSink [GOOD] >> KqpImmediateEffects::InsertConflictTxAborted >> KqpImmediateEffects::UpsertAfterInsertWithIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::AlterAfterUpsertBeforeUpsertSelectTransaction-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 3272, MsgBus: 2019 2025-06-24T21:10:31.675598Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626301540003538:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:31.675662Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b05/r3tmp/tmpxpcHZe/pdisk_1.dat 2025-06-24T21:10:31.963051Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:31.963533Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626301540003510:2079] 1750799431672608 != 1750799431672611 TServer::EnableGrpc on GrpcPort 3272, node 1 2025-06-24T21:10:32.046202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:32.046286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:32.048053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:32.109576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:32.109600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:32.109607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:32.109758Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2019 TClient is connected to server localhost:2019 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:32.699367Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:32.710789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:32.752230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:32.856778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:32.998809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.073988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:34.078901Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626314424907056:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.079017Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.598646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.621113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.648139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.672416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.698685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.727624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.754133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.830594Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626314424907717:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.830696Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.830766Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626314424907722:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.834151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:34.843573Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626314424907724:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:34.927953Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626314424907775:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:35.953531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:36.244388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:10:36.327841Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:1887: SelfId: [1:7519626323014842730:2472], Se ... 6346298472920:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:42.195976Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b05/r3tmp/tmpb6pmxS/pdisk_1.dat 2025-06-24T21:10:42.312779Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:42.312902Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519626346298472901:2079] 1750799442195668 != 1750799442195671 TServer::EnableGrpc on GrpcPort 2055, node 3 2025-06-24T21:10:42.344899Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:42.344981Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:42.346102Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:42.361910Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:42.361931Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:42.361939Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:42.362053Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16797 TClient is connected to server localhost:16797 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:42.760960Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:42.779937Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:42.854351Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:42.997355Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.059568Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.216431Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:44.988713Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626354888409118:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:44.988806Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.043925Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.073942Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.104108Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.133879Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.161713Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.188919Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.225565Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.307695Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626359183377076:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.307786Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626359183377081:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.307785Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.311285Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:45.320766Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519626359183377083:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:45.380869Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519626359183377134:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:46.519932Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.758624Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:10:46.902581Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NTY0YmQ3ZDktMzMyMTdiZmUtNjhjZGU2N2YtZTA4YjJhMzc=, ActorId: [3:7519626363478344659:2463], ActorState: ExecuteState, TraceId: 01jyhwewqqdhd3rgzcjymh1bcx, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::AlterDuringUpsertTransaction-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 15567, MsgBus: 16408 2025-06-24T21:10:32.648012Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626306154877648:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:32.648121Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001af1/r3tmp/tmpqMLNw5/pdisk_1.dat 2025-06-24T21:10:32.934311Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626306154877624:2079] 1750799432647185 != 1750799432647188 2025-06-24T21:10:32.944954Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15567, node 1 2025-06-24T21:10:32.989634Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:32.989686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:32.989700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:32.989831Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:33.031123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:33.031235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:33.032856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16408 TClient is connected to server localhost:16408 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:33.430298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:33.445187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.543490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.655736Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:33.662527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.718920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:34.902949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626314744813855:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.903050Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:35.154587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.181378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.207816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.239587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.267030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.296643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.364203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:35.456018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626319039781814:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:35.456090Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:35.456147Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626319039781819:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:35.459096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:35.466718Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626319039781821:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:35.553107Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626319039781872:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:36.457161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 6255, MsgBus: 18211 2025-06-24T21:10:37.455843Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626326053457981:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:37.455932Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /hom ... for subscription [3:7519626347293551593:2079] 1750799442461207 != 1750799442461210 TServer::EnableGrpc on GrpcPort 23602, node 3 2025-06-24T21:10:42.603534Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:42.603823Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:42.605684Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:42.638850Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:42.638886Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:42.638896Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:42.639032Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2909 TClient is connected to server localhost:2909 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:10:43.137428Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:43.154179Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.201635Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.334818Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.415321Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.475450Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:45.329581Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626360178455111:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.329677Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.381919Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.411767Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.439826Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.471184Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.498480Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.563586Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.592722Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.643423Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626360178455771:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.643480Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.643533Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626360178455776:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.646774Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:45.656515Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519626360178455778:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:45.719809Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519626360178455829:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:46.818334Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.985710Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:10:47.006413Z node 3 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037922 cannot parse tx 281474976715674: Table '/Root/TestTable' scheme changed. 2025-06-24T21:10:47.006580Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [3:7519626368768390782:2463] TxId: 281474976715674. Ctx: { TraceId: 01jyhwewyw5gm3jbkvh6f26ewm, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzM4MmEyODMtOGI4ZTk3ZWEtYjUxZTMzYjItMjBmMTM5YTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ERROR: [SCHEME_CHANGED] Table '/Root/TestTable' scheme changed.; 2025-06-24T21:10:47.006846Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YzM4MmEyODMtOGI4ZTk3ZWEtYjUxZTMzYjItMjBmMTM5YTI=, ActorId: [3:7519626364473423354:2463], ActorState: ExecuteState, TraceId: 01jyhwewyw5gm3jbkvh6f26ewm, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertAbort_Params_Conflict-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 61887, MsgBus: 26171 2025-06-24T21:10:31.680266Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626299262496764:2163];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:31.680742Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b98/r3tmp/tmpTBpDH8/pdisk_1.dat 2025-06-24T21:10:31.967378Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:31.983016Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626299262496627:2079] 1750799431672609 != 1750799431672612 TServer::EnableGrpc on GrpcPort 61887, node 1 2025-06-24T21:10:32.045827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:32.045922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:32.047684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:32.109584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:32.109615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:32.109635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:32.109749Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26171 TClient is connected to server localhost:26171 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:10:32.699598Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:32.754322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:32.776772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:32.898692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.022508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:33.082919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:34.129629Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626312147400150:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.129781Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.599427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.625202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.649378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.672416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.698347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.765007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.796437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:34.857966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626312147400813:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.858050Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.858091Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626312147400818:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:34.860994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:34.869322Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626312147400820:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:34.960325Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626312147400871:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 61246, MsgBus: 17920 2025-06-24T21:10:36.911687Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626320374079823:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:36.911781Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b98/r3tmp/tmpcr8eO1/pdisk_1.dat 2025-06-24T21:10:37.006205Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:37.009477Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626320374079806:2079] 1750799436911081 != 1750799436911084 TServer::EnableGrpc on GrpcPort 612 ... 9:2079] 1750799442065391 != 1750799442065394 2025-06-24T21:10:42.197522Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:42.197615Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:42.202579Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65369, node 3 2025-06-24T21:10:42.237321Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:42.237345Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:42.237366Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:42.237508Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28538 TClient is connected to server localhost:28538 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:42.686884Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:42.697449Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:42.770612Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:42.953881Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.030065Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.148470Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:45.035824Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626358295976319:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.035933Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.087047Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.117026Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.143281Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.167026Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.196239Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.224661Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.253751Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.314367Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626358295976979:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.314447Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.314450Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626358295976984:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.317944Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:45.329429Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519626358295976986:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:45.387993Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519626358295977037:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:46.738151Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7519626362590944596:2475], TxId: 281474976715673, task: 1. Ctx: { SessionId : ydb://session/3?node_id=3&id=NjUwOThhMGEtODUzYjlkYzgtYThkOTQ2ZDEtYjBhMGI5ZDI=. TraceId : 01jyhwewdm0721dwkey0nnyhyw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-24T21:10:46.738509Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519626362590944598:2476], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=NjUwOThhMGEtODUzYjlkYzgtYThkOTQ2ZDEtYjBhMGI5ZDI=. TraceId : 01jyhwewdm0721dwkey0nnyhyw. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7519626362590944593:2463], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-24T21:10:46.738923Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NjUwOThhMGEtODUzYjlkYzgtYThkOTQ2ZDEtYjBhMGI5ZDI=, ActorId: [3:7519626362590944562:2463], ActorState: ExecuteState, TraceId: 01jyhwewdm0721dwkey0nnyhyw, Create QueryResponse for error on request, msg: 2025-06-24T21:10:47.065881Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519626345411072808:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:47.065971Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TSchemeShardTTLTests::ShouldSkipDroppedColumn [GOOD] >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::TxWithWriteAtTheEnd+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 10847, MsgBus: 25684 2025-06-24T21:10:37.380819Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626327850342533:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:37.380890Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a18/r3tmp/tmpQaLU8U/pdisk_1.dat 2025-06-24T21:10:37.693282Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:37.695331Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626327850342511:2079] 1750799437379783 != 1750799437379786 TServer::EnableGrpc on GrpcPort 10847, node 1 2025-06-24T21:10:37.751214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:37.751235Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:37.751248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:37.751382Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:37.766398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:37.766494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:37.768295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25684 TClient is connected to server localhost:25684 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:38.258624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:38.293180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.391055Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:38.436283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.564355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:38.627464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.121045Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626340735246054:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.121170Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.456545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.484612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.510098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.536882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.565048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.609404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.635874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:40.717319Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626340735246715:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.717389Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.717496Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626340735246720:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:40.720898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:40.729919Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626340735246722:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:40.786980Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626340735246773:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:41.751279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.380987Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626327850342533:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:42.381053Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 27917, MsgBus: 61238 2025-06-24T21:10:43.124453Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626349621805441:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:43.124552Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a18/r3tmp/tmposCMqn/pdisk_1.dat 2025-06-24T21:10:43.224754Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:43.226527Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626349621805421:2079] 1750799443123970 != 1750799443123973 TServer::EnableGrpc on GrpcPort 27917, node 2 2025-06-24T21:10:43.255282Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:43.255367Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:43.257057Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:43.275416Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:43.275442Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:43.275451Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:43.275581Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61238 TClient is connected to server localhost:61238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:43.738188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:43.748104Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.793452Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.931361Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:44.014773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:44.140285Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:45.888532Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626358211741639:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.888592Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.942033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.969625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.997937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.024794Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.052444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.120812Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.190258Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.268744Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626362506709603:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.268829Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.269049Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626362506709608:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.272216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:46.280924Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626362506709610:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:46.361473Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626362506709661:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:47.109471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpImmediateEffects::ConflictingKeyW1RWR2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::UpsertAfterInsertWithIndex [GOOD] Test command err: Trying to start YDB, gRPC: 1346, MsgBus: 24735 2025-06-24T21:10:34.322859Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626314571490598:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:34.322992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a8a/r3tmp/tmp2BGZur/pdisk_1.dat 2025-06-24T21:10:34.551393Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626314571490578:2079] 1750799434322132 != 1750799434322135 2025-06-24T21:10:34.564909Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1346, node 1 2025-06-24T21:10:34.637925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:34.637952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:34.637963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:34.638061Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:34.664606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:34.664732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:34.666379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24735 TClient is connected to server localhost:24735 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:35.127397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:35.156777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.264706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.358996Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:35.391292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:35.467023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:36.868300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626323161426818:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:36.868432Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.113748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.140071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.163039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.188685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.213049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.281342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.317261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:37.398581Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626327456394782:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.398659Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.398680Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626327456394787:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:37.401843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:37.409848Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626327456394789:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:37.482329Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626327456394840:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:38.427138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 17699, MsgBus: 13877 2025-06-24T21:10:39.404916Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626335015670093:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:39.404971Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home ... bscription [2:7519626335015670074:2079] 1750799439404538 != 1750799439404541 2025-06-24T21:10:39.523224Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17699, node 2 2025-06-24T21:10:39.534126Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:39.534208Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:39.535953Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:39.560996Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:39.561024Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:39.561030Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:39.561110Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13877 TClient is connected to server localhost:13877 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:39.917787Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:39.933680Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.003911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.158508Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.233659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.418922Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:42.144408Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626347900573598:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.144476Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.192559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.234796Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.261356Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.288049Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.314977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.346384Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.377654Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.433935Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626347900574253:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.434005Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.434010Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626347900574258:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.436970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:42.446460Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626347900574260:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:42.521432Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626347900574313:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:43.523906Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.559721Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.593734Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:44.405436Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626335015670093:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:44.405510Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpImmediateEffects::UpsertDuplicates >> KqpInplaceUpdate::Negative_SingleRowListFromRange+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ShouldSkipDroppedColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:27.749064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:27.749124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:27.749160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:27.749192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:27.749235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:27.749258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:27.749301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:27.749355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:27.749969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:27.750301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:27.829303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:27.829354Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:27.844369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:27.844768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:27.844953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:27.852074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:27.852251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:27.852872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:27.853168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:27.856266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:27.856416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:27.857243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:27.857286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:27.857508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:27.857560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:27.857644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:27.857721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:27.862569Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:27.946477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:27.946666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:27.946831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:27.946872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:27.947081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:27.947130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:27.949050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:27.949193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:27.949344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:27.949384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:27.949417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:27.949439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:27.950785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:27.950851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:27.950890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:27.952134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:27.952166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:27.952216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:27.952247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:27.954650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:27.955871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:27.956003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:27.956677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:27.956771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:27.956811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:27.957007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:27.957040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:27.957210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:27.957269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:27.958535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:27.958576Z node 1 :FLAT_TX_SCHEMESHARD ... 25-06-24T21:10:28.263402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:507:2466] TestWaitNotification: OK eventTxId 103 2025-06-24T21:10:33.539003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:10:33.539067Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:35.323244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0311 2025-06-24T21:10:35.334170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0541 2025-06-24T21:10:35.375241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-24T21:10:35.375405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-24T21:10:35.375476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:35.375562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-24T21:10:35.375601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-24T21:10:35.375726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:35.375788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-24T21:10:35.386157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:10:38.812825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0086 2025-06-24T21:10:38.823449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0145 2025-06-24T21:10:38.864456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-24T21:10:38.864614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-24T21:10:38.864692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:38.864772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-24T21:10:38.864806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-24T21:10:38.864832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:38.864855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-24T21:10:38.875228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:10:42.223803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0086 2025-06-24T21:10:42.234374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0145 2025-06-24T21:10:42.285837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-24T21:10:42.286020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-24T21:10:42.286095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:42.286267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-24T21:10:42.286318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-24T21:10:42.286357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:42.286394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-24T21:10:42.296788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:10:45.577084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0032 2025-06-24T21:10:45.587575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0048 2025-06-24T21:10:45.628519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-24T21:10:45.628718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-24T21:10:45.628792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:45.628898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-24T21:10:45.628950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-24T21:10:45.628990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:10:45.629034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-24T21:10:45.639408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:10:49.012176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-24T21:10:49.012308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:49.012514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:10:49.012701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409547, request: TableId: 2 Expiration { ColumnId: 2 WallClockTimestamp: 60024000 ColumnUnit: UNIT_AUTO } SchemaVersion: 3 Indexes { OwnerId: 72057594046678944 PathId: 4 SchemaVersion: 1 KeyMap { IndexColumnId: 1 MainColumnId: 3 } KeyMap { IndexColumnId: 2 MainColumnId: 1 } } Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-24T21:10:49.013137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6740: Conditional erase accepted: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:10:49.013816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-24T21:10:49.013869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:10:49.018096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-24T21:10:49.018292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:10:49.018362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:1, run at: 1970-01-01T01:01:00.024000Z, at schemeshard: 72057594046678944 2025-06-24T21:10:49.018440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 >> KqpImmediateEffects::InteractiveTxWithReadAtTheEnd+UseSink >> KqpImmediateEffects::ConflictingKeyR1WRR2 >> KqpInplaceUpdate::SingleRowStr-UseSink [GOOD] >> KqpWrite::CastValues ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:89:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... boot 72057594037927937 (actor [19:58:2098]) tablet resolver refreshed! new actor is[19:87:2115] Leader for TabletID 72057594037927937 is [19:87:2115] sender: [19:173:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:59:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:76:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:58:2098]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:82:2057] recipient: [20:37:2084] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:85:2057] recipient: [20:84:2114] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:86:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:87:2115] sender: [20:88:2057] recipient: [20:84:2114] !Reboot 72057594037927937 (actor [20:58:2098]) rebooted! !Reboot 72057594037927937 (actor [20:58:2098]) tablet resolver refreshed! new actor is[20:87:2115] Leader for TabletID 72057594037927937 is [20:87:2115] sender: [20:173:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:59:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:76:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:83:2057] recipient: [21:37:2084] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:85:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:87:2057] recipient: [21:86:2114] Leader for TabletID 72057594037927937 is [21:88:2115] sender: [21:89:2057] recipient: [21:86:2114] !Reboot 72057594037927937 (actor [21:58:2098]) rebooted! !Reboot 72057594037927937 (actor [21:58:2098]) tablet resolver refreshed! new actor is[21:88:2115] Leader for TabletID 72057594037927937 is [21:88:2115] sender: [21:106:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:59:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:76:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:85:2057] recipient: [22:37:2084] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:88:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:89:2057] recipient: [22:87:2116] Leader for TabletID 72057594037927937 is [22:90:2117] sender: [22:91:2057] recipient: [22:87:2116] !Reboot 72057594037927937 (actor [22:58:2098]) rebooted! !Reboot 72057594037927937 (actor [22:58:2098]) tablet resolver refreshed! new actor is[22:90:2117] Leader for TabletID 72057594037927937 is [22:90:2117] sender: [22:176:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:59:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:76:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:85:2057] recipient: [23:37:2084] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:87:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:89:2057] recipient: [23:88:2116] Leader for TabletID 72057594037927937 is [23:90:2117] sender: [23:91:2057] recipient: [23:88:2116] !Reboot 72057594037927937 (actor [23:58:2098]) rebooted! !Reboot 72057594037927937 (actor [23:58:2098]) tablet resolver refreshed! new actor is[23:90:2117] Leader for TabletID 72057594037927937 is [23:90:2117] sender: [23:176:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:59:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:76:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:86:2057] recipient: [24:37:2084] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:89:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:90:2057] recipient: [24:88:2116] Leader for TabletID 72057594037927937 is [24:91:2117] sender: [24:92:2057] recipient: [24:88:2116] !Reboot 72057594037927937 (actor [24:58:2098]) rebooted! !Reboot 72057594037927937 (actor [24:58:2098]) tablet resolver refreshed! new actor is[24:91:2117] Leader for TabletID 72057594037927937 is [24:91:2117] sender: [24:109:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:56:2057] recipient: [25:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:56:2057] recipient: [25:52:2096] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:59:2057] recipient: [25:52:2096] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:76:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:88:2057] recipient: [25:37:2084] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:91:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:92:2057] recipient: [25:90:2118] Leader for TabletID 72057594037927937 is [25:93:2119] sender: [25:94:2057] recipient: [25:90:2118] !Reboot 72057594037927937 (actor [25:58:2098]) rebooted! !Reboot 72057594037927937 (actor [25:58:2098]) tablet resolver refreshed! new actor is[25:93:2119] Leader for TabletID 72057594037927937 is [25:93:2119] sender: [25:179:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:56:2057] recipient: [26:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:56:2057] recipient: [26:52:2096] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:59:2057] recipient: [26:52:2096] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:76:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:88:2057] recipient: [26:37:2084] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:91:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:92:2057] recipient: [26:90:2118] Leader for TabletID 72057594037927937 is [26:93:2119] sender: [26:94:2057] recipient: [26:90:2118] !Reboot 72057594037927937 (actor [26:58:2098]) rebooted! !Reboot 72057594037927937 (actor [26:58:2098]) tablet resolver refreshed! new actor is[26:93:2119] Leader for TabletID 72057594037927937 is [26:93:2119] sender: [26:179:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:56:2057] recipient: [27:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:56:2057] recipient: [27:52:2096] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:59:2057] recipient: [27:52:2096] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:76:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:89:2057] recipient: [27:37:2084] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:92:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:93:2057] recipient: [27:91:2118] Leader for TabletID 72057594037927937 is [27:94:2119] sender: [27:95:2057] recipient: [27:91:2118] !Reboot 72057594037927937 (actor [27:58:2098]) rebooted! !Reboot 72057594037927937 (actor [27:58:2098]) tablet resolver refreshed! new actor is[27:94:2119] Leader for TabletID 72057594037927937 is [27:94:2119] sender: [27:180:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:59:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:76:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:92:2057] recipient: [28:37:2084] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:95:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:96:2057] recipient: [28:94:2121] Leader for TabletID 72057594037927937 is [28:97:2122] sender: [28:98:2057] recipient: [28:94:2121] !Reboot 72057594037927937 (actor [28:58:2098]) rebooted! !Reboot 72057594037927937 (actor [28:58:2098]) tablet resolver refreshed! new actor is[28:97:2122] Leader for TabletID 72057594037927937 is [28:97:2122] sender: [28:183:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:59:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:76:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:58:2098]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:92:2057] recipient: [29:37:2084] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:94:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:96:2057] recipient: [29:95:2121] Leader for TabletID 72057594037927937 is [29:97:2122] sender: [29:98:2057] recipient: [29:95:2121] !Reboot 72057594037927937 (actor [29:58:2098]) rebooted! !Reboot 72057594037927937 (actor [29:58:2098]) tablet resolver refreshed! new actor is[29:97:2122] Leader for TabletID 72057594037927937 is [29:97:2122] sender: [29:183:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:59:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:76:2057] recipient: [30:14:2061] >> TFlatTableExecutor_ColumnGroups::TestManyRows [GOOD] >> TFlatTableExecutor_CompactionScan::TestCompactionScan >> KqpImmediateEffects::TxWithWriteAtTheEnd-UseSink [GOOD] >> KqpImmediateEffects::UnobservedUncommittedChangeConflict >> KqpEffects::InsertAbort_Select_Duplicates+UseSink [GOOD] >> KqpEffects::InsertAbort_Select_Conflict+UseSink >> KqpWrite::UpsertNullKey >> KqpImmediateEffects::ReplaceExistingKey >> TFlatTableExecutor_CompactionScan::TestCompactionScan [GOOD] >> TFlatTableExecutor_CompressedSelectRows::TestCompressedSelectRows >> KqpEffects::InsertAbort_Select_Duplicates-UseSink [GOOD] >> KqpEffects::InsertRevert_Literal_Conflict >> TFlatTableExecutor_CompressedSelectRows::TestCompressedSelectRows [GOOD] >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionDirect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyW1RWR2 [GOOD] Test command err: Trying to start YDB, gRPC: 7097, MsgBus: 28573 2025-06-24T21:10:39.565563Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626334045020834:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:39.565656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019ec/r3tmp/tmpYhmxoo/pdisk_1.dat 2025-06-24T21:10:39.856655Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626334045020814:2079] 1750799439564779 != 1750799439564782 2025-06-24T21:10:39.867629Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7097, node 1 2025-06-24T21:10:39.936267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:39.936392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:39.938023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:39.940836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:39.940867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:39.940875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:39.940994Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28573 TClient is connected to server localhost:28573 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:40.450637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:40.468678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.571040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.573112Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:40.722176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.772260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:42.162341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626346929924338:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.162429Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.408389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.431009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.455698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.480583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.507294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.537463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.568557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.624666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626346929925000:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.624736Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626346929925005:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.624739Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.628244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:42.637069Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626346929925007:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:42.723332Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626346929925058:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:43.659436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 1853, MsgBus: 24595 2025-06-24T21:10:44.625214Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626354343531442:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:44.625282Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/ ... fectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:45.153221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:45.170707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.217106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.361094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.434398Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.630747Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:47.229187Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626367228434947:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.229288Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.272306Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.299456Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.328803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.353700Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.379868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.408564Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.437524Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.490438Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626367228435600:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.490535Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626367228435605:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.490569Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.493586Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:47.502389Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626367228435607:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:47.599745Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626367228435658:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:48.457137Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.908893Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because it cannot acquire locks;tx_id=6; 2025-06-24T21:10:48.919270Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037922 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-24T21:10:48.919453Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037922 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-24T21:10:48.919703Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [2:7519626371523403481:2473], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7519626371523403225:2473]Got LOCKS BROKEN for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7519626371523403481:2473].{
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } 2025-06-24T21:10:48.920336Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519626371523403445:2473], SessionActorId: [2:7519626371523403225:2473], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001
: Error: Operation is aborting because it cannot acquire locks, code: 2001 . sessionActorId=[2:7519626371523403225:2473]. isRollback=0 2025-06-24T21:10:48.920685Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=MzI1MTk4NzgtM2YwNWRiYjMtZTFlMDY5MzgtNTE5ZGI3NTQ=, ActorId: [2:7519626371523403225:2473], ActorState: ExecuteState, TraceId: 01jyhweyqn0w27sjmw1addbphn, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519626371523403475:2473] from: [2:7519626371523403445:2473] 2025-06-24T21:10:48.920808Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519626371523403475:2473] TxId: 281474976715676. Ctx: { TraceId: 01jyhweyqn0w27sjmw1addbphn, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzI1MTk4NzgtM2YwNWRiYjMtZTFlMDY5MzgtNTE5ZGI3NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001 subissue: {
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } } 2025-06-24T21:10:48.921080Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MzI1MTk4NzgtM2YwNWRiYjMtZTFlMDY5MzgtNTE5ZGI3NTQ=, ActorId: [2:7519626371523403225:2473], ActorState: ExecuteState, TraceId: 01jyhweyqn0w27sjmw1addbphn, Create QueryResponse for error on request, msg: >> KqpImmediateEffects::Insert [GOOD] >> KqpImmediateEffects::ImmediateUpdateSelect >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionDirect [GOOD] >> TFlatTableExecutorGC::TestGCVectorDeduplicaton [GOOD] >> KqpImmediateEffects::ConflictingKeyR1WR2 >> KqpEffects::InsertRevert_Literal_Duplicates [GOOD] >> KqpEffects::AlterAfterUpsertTransaction+UseSink >> KqpEffects::InsertAbort_Literal_Duplicates+UseSink [GOOD] >> KqpEffects::InsertAbort_Literal_Duplicates-UseSink >> KqpImmediateEffects::ForceImmediateEffectsExecution-UseSink >> KqpImmediateEffects::UpdateAfterUpsert >> KqpImmediateEffects::ConflictingKeyW1RR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1WRR2 >> KqpImmediateEffects::DeleteAfterUpsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TFlatTableExecutorGC::TestGCVectorDeduplicaton [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-24T21:09:37.866514Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.007 II| FAKE_ENV: TNanny initiates TDummy tablet 72057594037927937 birth 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.012 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.012 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.156 II| TABLET_EXECUTOR: LSnap{1:2, on 2:301, 5835b, wait} done, Waste{2:0, 711970b +(0, 0b), 300 trc} 00000.182 II| TABLET_EXECUTOR: Leader{1:2:346} starting compaction 00000.183 II| TABLET_EXECUTOR: Leader{1:2:347} starting Scan{1 on 2, Compact{1.2.346, eph 1}} 00000.183 II| TABLET_EXECUTOR: Leader{1:2:347} started compaction 1 00000.183 II| TABLET_OPS_HOST: Scan{1 on 2, Compact{1.2.346, eph 1}} begin on TSubset{head 2, 1m 0p 0c} 00000.186 II| TABLET_OPS_HOST: Scan{1 on 2, Compact{1.2.346, eph 1}} end=Done, 103r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 17 of 23 ~1p 00000.187 II| OPS_COMPACT: Compact{1.2.346, eph 1} end=Done, 10 blobs 72r (max 103), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 17 +6, (131561 20446 213826)b }, ecr=1.000 00000.209 II| TABLET_EXECUTOR: Leader{1:2:348} Compact 1 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 346, product {1 parts epoch 2} done 00000.212 II| TABLET_EXECUTOR: Leader{1:2:351} starting compaction 00000.213 II| TABLET_EXECUTOR: Leader{1:2:352} starting Scan{3 on 3, Compact{1.2.351, eph 1}} 00000.213 II| TABLET_EXECUTOR: Leader{1:2:352} started compaction 3 00000.213 II| TABLET_OPS_HOST: Scan{3 on 3, Compact{1.2.351, eph 1}} begin on TSubset{head 2, 1m 0p 0c} 00000.215 II| TABLET_OPS_HOST: Scan{3 on 3, Compact{1.2.351, eph 1}} end=Done, 97r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 13 of 20 ~1p 00000.216 II| OPS_COMPACT: Compact{1.2.351, eph 1} end=Done, 9 blobs 72r (max 97), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 13 +5, (136895 13551 161812)b }, ecr=1.000 00000.218 II| TABLET_EXECUTOR: Leader{1:2:353} Compact 3 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 351, product {1 parts epoch 2} done 00000.418 II| TABLET_EXECUTOR: LSnap{1:2, on 2:601, 8704b, wait} done, Waste{2:0, 1592529b +(147, 131869b), 300 trc} 00000.480 II| TABLET_EXECUTOR: Leader{1:2:692} starting compaction 00000.480 II| TABLET_EXECUTOR: Leader{1:2:693} starting Scan{5 on 2, Compact{1.2.692, eph 2}} 00000.480 II| TABLET_EXECUTOR: Leader{1:2:693} started compaction 5 00000.480 II| TABLET_OPS_HOST: Scan{5 on 2, Compact{1.2.692, eph 2}} begin on TSubset{head 3, 1m 0p 0c} 00000.484 II| TABLET_OPS_HOST: Scan{5 on 2, Compact{1.2.692, eph 2}} end=Done, 104r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 16 of 19 ~1p 00000.484 II| OPS_COMPACT: Compact{1.2.692, eph 2} end=Done, 14 blobs 104r (max 104), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 16 +10, (135866 6654 218876)b }, ecr=1.000 00000.508 II| TABLET_EXECUTOR: Leader{1:2:694} Compact 5 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 692, product {1 parts epoch 3} done 00000.510 II| TABLET_EXECUTOR: Leader{1:2:695} starting compaction 00000.510 II| TABLET_EXECUTOR: Leader{1:2:696} starting Scan{7 on 2, Compact{1.2.695, eph 2}} 00000.510 II| TABLET_EXECUTOR: Leader{1:2:696} started compaction 7 00000.510 II| TABLET_OPS_HOST: Scan{7 on 2, Compact{1.2.695, eph 2}} begin on TSubset{head 0, 0m 2p 0c} 00000.519 II| TABLET_OPS_HOST: Scan{7 on 2, Compact{1.2.695, eph 2}} end=Done, 152r seen, TFwd{fetch=260KiB,saved=260KiB,usage=260KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 42 of 49 ~3p 00000.519 II| OPS_COMPACT: Compact{1.2.695, eph 2} end=Done, 4 blobs 125r (max 176), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 42 +0, (221336 27100 369931)b }, ecr=1.000 00000.521 II| TABLET_EXECUTOR: Leader{1:2:698} Compact 7 on TGenCompactionParams{2: gen 1 epoch 0, 2 parts} step 695, product {1 parts epoch 0} done 00000.583 II| TABLET_EXECUTOR: Leader{1:2:720} starting compaction 00000.583 II| TABLET_EXECUTOR: Leader{1:2:721} starting Scan{9 on 3, Compact{1.2.720, eph 2}} 00000.583 II| TABLET_EXECUTOR: Leader{1:2:721} started compaction 9 00000.583 II| TABLET_OPS_HOST: Scan{9 on 3, Compact{1.2.720, eph 2}} begin on TSubset{head 3, 1m 0p 0c} 00000.586 II| TABLET_OPS_HOST: Scan{9 on 3, Compact{1.2.720, eph 2}} end=Done, 104r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 7 of 14 ~1p 00000.586 II| OPS_COMPACT: Compact{1.2.720, eph 2} end=Done, 10 blobs 104r (max 104), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 7 +6, (166770 6829 105736)b }, ecr=1.000 00000.589 II| TABLET_EXECUTOR: Leader{1:2:722} Compact 9 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 720, product {1 parts epoch 3} done 00000.590 II| TABLET_EXECUTOR: Leader{1:2:723} starting compaction 00000.590 II| TABLET_EXECUTOR: Leader{1:2:724} starting Scan{11 on 3, Compact{1.2.723, eph 2}} 00000.590 II| TABLET_EXECUTOR: Leader{1:2:724} started compaction 11 00000.590 II| TABLET_OPS_HOST: Scan{11 on 3, Compact{1.2.723, eph 2}} begin on TSubset{head 0, 0m 2p 0c} 00000.596 II| TABLET_OPS_HOST: Scan{11 on 3, Compact{1.2.723, eph 2}} end=Done, 151r seen, TFwd{fetch=296KiB,saved=296KiB,usage=296KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=8}, trace 25 of 31 ~3p 00000.597 II| OPS_COMPACT: Compact{1.2.723, eph 2} end=Done, 4 blobs 121r (max 176), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 25 +0, (269766 20380 210847)b }, ecr=1.000 00000.629 II| TABLET_EXECUTOR: Leader{1:2:725} Compact 11 on TGenCompactionParams{3: gen 1 epoch 0, 2 parts} step 723, product {1 parts epoch 0} done 00000.749 II| TABLET_EXECUTOR: LSnap{1:2, on 2:901, 10446b, wait} done, Waste{2:0, 2329962b +(185, 808565b), 300 trc} 00000.853 II| TABLET_EXECUTOR: Leader{1:2:1006} starting compaction 00000.853 II| TABLET_EXECUTOR: Leader{1:2:1007} starting Scan{13 on 3, Compact{1.2.1006, eph 3}} 00000.853 II| TABLET_EXECUTOR: Leader{1:2:1007} started compaction 13 00000.853 II| TABLET_OPS_HOST: Scan{13 on 3, Compact{1.2.1006, eph 3}} begin on TSubset{head 4, 1m 0p 0c} 00000.857 II| TABLET_OPS_HOST: Scan{13 on 3, Compact{1.2.1006, eph 3}} end=Done, 108r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 24 of 25 ~1p 00000.857 II| OPS_COMPACT: Compact{1.2.1006, eph 3} end=Done, 10 blobs 108r (max 108), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 24 +6, (124961 27411 275115)b }, ecr=1.000 00000.889 II| TABLET_EXECUTOR: Leader{1:2:1008} Compact 13 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 1006, product {1 parts epoch 4} done 00000.949 II| TABLET_EXECUTOR: Leader{1:2:1050} starting compaction 00000.949 II| TABLET_EXECUTOR: Leader{1:2:1051} starting Scan{15 on 2, Compact{1.2.1050, eph 3}} 00000.949 II| TABLET_EXECUTOR: Leader{1:2:1051} started compaction 15 00000.949 II| TABLET_OPS_HOST: Scan{15 on 2, Compact{1.2.1050, eph 3}} begin on TSubset{head 4, 1m 0p 0c} 00000.952 II| TABLET_OPS_HOST: Scan{15 on 2, Compact{1.2.1050, eph 3}} end=Done, 103r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 18 of 20 ~1p 00000.952 II| OPS_COMPACT: Compact{1.2.1050, eph 3} end=Done, 10 blobs 103r (max 103), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 18 +6, (142442 20601 215965)b }, ecr=1.000 00000.971 II| TABLET_EXECUTOR: Leader{1:2:1052} Compact 15 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 1050, product {1 parts epoch 4} done 00001.099 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1201, 11450b, wait} done, Waste{2:0, 3341014b +(141, 40893b), 300 trc} 00001.203 II| TABLET_EXECUTOR: Leader{1:2:1321} starting compaction 00001.203 II| TABLET_EXECUTOR: Leader{1:2:1322} starting Scan{17 on 3, Compact{1.2.1321, eph 4}} 00001.203 II| TABLET_EXECUTOR: Leader{1:2:1322} started compaction 17 00001.203 II| TABLET_OPS_HOST: Scan{17 on 3, Compact{1.2.1321, eph 4}} begin on TSubset{head 5, 1m 0p 0c} 00001.205 II| TABLET_OPS_HOST: Scan{17 on 3, Compact{1.2.1321, eph 4}} end=Done, 110r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 13 of 16 ~1p 00001.205 II| OPS_COMPACT: Compact{1.2.1321, eph 4} end=Done, 8 blobs 110r (max 110), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 13 +4, (160005 20781 158017)b }, ecr=1.000 00001.228 II| TABLET_EXECUTOR: Leader{1:2:1322} Compact 17 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 1321, product {1 parts epoch 5} done 00001.229 II| TABLET_EXECUTOR: Leader{1:2:1323} starting compaction 00001.229 II| TABLET_EXECUTOR: Leader{1:2:1324} starting Scan{19 on 3, Compact{1.2.1323, eph 4}} 00001.229 II| TABLET_EXECUTOR: Leader{1:2:1324} started compaction 19 00001.230 II| TABLET_OPS_HOST: Scan{19 on 3, Compact{1.2.1323, eph 4}} begin on TSubset{head 0, 0m 2p 0c} 00001.234 II| TABLET_OPS_HOST: Scan{19 on 3, Compact{1.2.1323, eph 4}} end=Done, 181r seen, TFwd{fetch=270KiB,saved=270KiB,usage=263KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=6}, trace 40 of 47 ~3p 00001.234 II| OPS_COMPACT: Compact{1.2.1323, eph 4} end=Done, 4 blobs 181r (max 218), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 40 +0, (234252 34568 369813)b }, ecr=1.000 00001.236 II| TABLET_EXECUTOR: Leader{1:2:1326} Compact 19 on TGenCompactionParams{3: gen 1 epoch 0, 2 parts} step 1323, product {1 parts epoch 0} done 00001.341 II| TABLET_EXECUTOR: Leader{1:2:1433} starting compaction 00001.341 II| TABLET_EXECUTOR: Leader{1:2:1434} starting Scan{21 on 2, Compact{1.2.1433, eph 4}} 00001.342 II| TABLET_EXECUTOR: Leader{1:2:1434} started compaction 21 00001.342 II| TABLET_OPS_HOST: Scan{21 on 2, Compact{1.2.1433, eph 4}} begin on TSubset{head 5, 1m 0p 0c} 00001.344 II| TABLET_OPS_HOST: Scan{21 on 2, Compact{1.2.1433, eph 4}} end=Done, 111r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 12 of 15 ~1p 00001.344 II| OPS_COMPACT: Compact{1.2.1433, eph 4} end=Done, 9 blobs 111r (max 111), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 12 +5, (170689 26718 158084)b }, ecr=1.000 00001.426 II| TABLET_EXECUTOR: Leader{1:2:1434} Compact 21 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 1433, product {1 parts epoch 5} done 00001.428 II| TABLET_EXECUTOR: Leader{1:2:1435} starting compaction 00001.428 II| TABLET_EXECUTOR: Leader{1:2:1436} starting Scan{23 on 2, Compact{1.2.1435, eph 4}} 00001.428 II| TABLET_EXECUTOR: Leader{1:2:1436} started compaction 23 00001.428 II| TABLET_OPS_HOST: Scan{23 on 2, Compact{1.2.1435, eph 4}} begin on TSubset{head 0, 0m 2p 0c} 00001.434 II| TABLET_OPS_HOST: Scan{23 on 2, Compact{1.2.1435, eph 4}} end=Done, 171r seen, TFwd{fetch=291KiB,saved=291KiB,usage=291KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 31 of 41 ~3p 00001.434 II| OPS_COMPACT: Compact{1.2.1435, eph 4} end=Done, 4 blobs 171r (max 214), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 31 +0, (253776 33721 281944)b }, ecr=1.000 00001.435 II| TABLET_EXECUTOR: Leader{1:2:1438} Compact 23 on TGenCompactionParams{2: gen 1 epoch 0, 2 parts} step 1435, product {1 parts epoch 0} done 00001.507 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1501, 13015b, wait} done, Waste{2:0, 38 ... SAUSAGECACHE: Send page collection result [1:2:250:1:12288:161:0] owner [37:418:2424] class Scan pages [ 0 ] cookie 0 00000.301 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:313:1:12288:161:0] owner [37:418:2424] 00000.301 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:313:1:12288:161:0] owner [37:418:2424] cookie 0 class Scan from cache [ 0 ] 00000.301 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:313:1:12288:161:0] owner [37:418:2424] class Scan pages [ 0 ] cookie 0 00000.312 DD| TABLET_SAUSAGECACHE: Save page collection [1:2:315:1:12288:163:0] owner [37:419:2424] compacted pages [ 2 ] 00000.312 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:315:1:12288:163:0] 00000.312 DD| TABLET_SAUSAGECACHE: Unregister owner [37:418:2424] 00000.312 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:418:2424] 00000.312 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:313:1:12288:161:0] owner [37:418:2424] 00000.312 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:418:2424] 00000.312 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:189:1:12288:161:0] owner [37:418:2424] 00000.312 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:127:1:12288:161:0] owner [37:418:2424] 00000.312 DD| TABLET_SAUSAGECACHE: Remove owner [37:418:2424] 00000.312 II| TABLET_EXECUTOR: Leader{1:2:316} Compact 63 on TGenCompactionParams{101: gen 2 epoch 0, 5 parts} step 315, product {1 parts epoch 0} done 00000.313 DD| TABLET_EXECUTOR: TGenCompactionStrategy CompactionFinished for 1: compaction 63, generation 2 00000.313 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 2, state Free, final id 0, final level 2 00000.313 DD| RESOURCE_BROKER: Finish task gen2-table-101-tablet-1 (32 by [37:30:2062]) (release resources {1, 0}) 00000.313 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_compaction_gen2 from 4.687500 to 0.000000 (remove task gen2-table-101-tablet-1 (32 by [37:30:2062])) 00000.313 DD| TABLET_SAUSAGECACHE: Attach page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.313 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.313 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:313:1:12288:161:0] owner [37:30:2062] 00000.313 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:313:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:250:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_EXECUTOR: Leader{1:2:317} commited cookie 3 for step 316 00000.314 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:189:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:189:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:127:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:127:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_EXECUTOR: Leader{1:2:317} switch applied on followers, step 316 00000.314 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:64:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:30:2062] 00000.314 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:315:1:12288:163:0] owner [37:30:2062] pages [ 2 ] 00000.315 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:64:1:12288:161:0] owner [37:405:2414] 00000.315 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:64:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.315 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:64:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.315 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:127:1:12288:161:0] owner [37:405:2414] 00000.315 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:127:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.315 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:127:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.315 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:189:1:12288:161:0] owner [37:405:2414] 00000.316 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:189:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.316 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:189:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.316 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:250:1:12288:161:0] owner [37:405:2414] 00000.316 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:250:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.316 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:250:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.316 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:261:1:12288:161:0] owner [37:405:2414] 00000.316 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:261:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.316 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:261:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.316 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:273:1:12288:161:0] owner [37:405:2414] 00000.316 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:273:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.316 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:273:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.317 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:285:1:12288:161:0] owner [37:405:2414] 00000.317 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:285:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.317 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:285:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.317 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:297:1:12288:161:0] owner [37:405:2414] 00000.317 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:297:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.317 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:297:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.317 DD| TABLET_SAUSAGECACHE: Unregister owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:189:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:261:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:273:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:285:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:127:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:297:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove owner [37:405:2414] 00000.318 DD| RESOURCE_BROKER: Finish task Scan{58 on 101}::1 (29 by [37:30:2062]) (release resources {1, 0}) 00000.318 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_scan from 11.718750 to 0.000000 (remove task Scan{58 on 101}::1 (29 by [37:30:2062])) 00000.318 II| TABLET_EXECUTOR: Leader{1:2:317} suiciding, Waste{2:0, 7661b +(30, 11928b), 16 trc, -42337b acc} 00000.319 DD| TABLET_SAUSAGECACHE: Unregister owner [37:30:2062] 00000.319 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.319 DD| TABLET_SAUSAGECACHE: Remove owner [37:30:2062] 00000.320 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.320 NN| TABLET_SAUSAGECACHE: Poison cache serviced 38 reqs hit {38 21480b} miss {0 0b} 00000.320 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.320 II| FAKE_ENV: DS.1 gone, left {23864b, 37}, put {57254b, 346} 00000.320 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.320 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.320 II| FAKE_ENV: DS.0 gone, left {1961b, 17}, put {31646b, 317} 00000.320 II| FAKE_ENV: All BS storage groups are stopped 00000.320 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.320 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 2287}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T21:10:50.736022Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.068 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.068 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.068 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.069 II| FAKE_ENV: DS.0 gone, left {536b, 6}, put {556b, 7} 00000.069 II| FAKE_ENV: DS.1 gone, left {30495b, 8}, put {30495b, 8} 00000.069 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.069 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.069 II| FAKE_ENV: All BS storage groups are stopped 00000.069 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.069 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-24T21:10:50.812573Z 00000.009 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.010 II| FAKE_ENV: Starting storage for BS group 1 00000.010 II| FAKE_ENV: Starting storage for BS group 2 00000.010 II| FAKE_ENV: Starting storage for BS group 3 00000.179 CC| TABLET_EXECUTOR: Tablet 1 unhandled exception std::runtime_error: test ??+0 (0x11A91301) __cxa_throw+221 (0x11A9112D) NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Exceptions::TTxExecuteThrowException::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&)+62 (0x10D7BD6E) NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*)+3349 (0x17DAAAB5) NKikimr::NTabletFlatExecutor::TExecutor::DoExecute(TAutoPtr, NKikimr::NTabletFlatExecutor::TExecutor::ETxMode)+10562 (0x17DA6492) non-virtual thunk to NKikimr::NTabletFlatExecutor::TExecutor::Execute(TAutoPtr, NActors::TActorContext const&)+54 (0x17DAD726) ??+0 (0x10D7BB60) NKikimr::NFake::TDummy::Inbox(TAutoPtr&)+2810 (0x10CBD46A) NActors::IActor::Receive(TAutoPtr&)+237 (0x13291CBD) 00000.180 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.180 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.180 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.180 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {62b, 2} 00000.180 II| FAKE_ENV: DS.1 gone, left {35b, 1}, put {35b, 1} 00000.180 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.181 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.181 II| FAKE_ENV: All BS storage groups are stopped 00000.181 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.181 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 1 Error 0 Left 15}, stopped ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertRevert_Literal_Duplicates [GOOD] Test command err: Trying to start YDB, gRPC: 7810, MsgBus: 6644 2025-06-24T21:10:40.640722Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626340060133938:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:40.640851Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019e9/r3tmp/tmpXNbEbI/pdisk_1.dat 2025-06-24T21:10:40.970981Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:40.971448Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626340060133915:2079] 1750799440639553 != 1750799440639556 TServer::EnableGrpc on GrpcPort 7810, node 1 2025-06-24T21:10:41.039919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:41.039951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:41.039963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:41.040090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:41.047507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:41.047615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:41.049384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6644 TClient is connected to server localhost:6644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:41.546108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:41.562311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.642440Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:41.696758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.807357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:41.866253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.186531Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626352945037453:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.186710Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.449319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.477363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.504798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.531736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.556645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.623911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.664125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:43.708707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626352945038111:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.708793Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.708853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626352945038116:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:43.712280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:43.720335Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626352945038118:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:43.820293Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626352945038169:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 4818, MsgBus: 28786 2025-06-24T21:10:45.967632Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626358861928456:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:45.967735Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019e9/r3tmp/tmpnFdyHU/pdisk_1.dat 2025-06-24T21:10:46.049949Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:46.051344Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626358861928435:2079] 1750799445967211 != 1750799445967214 TServer::EnableGrpc on GrpcPort 4818, node 2 2025-06-24T21:10:46.095715Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:46.095791Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:46.097193Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:46.097457Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:46.097480Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:46.097490Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:46.097589Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28786 TClient is connected to server localhost:28786 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:46.448785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:46.460436Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.510950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.660572Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.736486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.985752Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:48.773694Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626371746831950:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.773790Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.827476Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.855878Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.891933Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.926660Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.953969Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.020259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.054068Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.126300Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626376041799907:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.126378Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.126715Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626376041799912:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.130336Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:49.141006Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626376041799914:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:49.205225Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626376041799965:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk [GOOD] >> TSchemeShardLoginTest::ChangeAccountLockoutParameters [GOOD] >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb >> KqpImmediateEffects::Replace >> TKeyValueTest::TestLargeWriteAndDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:81:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:81:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:106:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:88:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:88:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:109:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:88:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:92:2057] recipient: [11:90:2118] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:94:2057] recipient: [11:90:2118] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:93:2119] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:179:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (a ... 94037927937 (actor [23:58:2098]) tablet resolver refreshed! new actor is[23:87:2115] Leader for TabletID 72057594037927937 is [23:87:2115] sender: [23:173:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:59:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:76:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:83:2057] recipient: [24:37:2084] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:86:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:87:2057] recipient: [24:85:2114] Leader for TabletID 72057594037927937 is [24:88:2115] sender: [24:89:2057] recipient: [24:85:2114] !Reboot 72057594037927937 (actor [24:58:2098]) rebooted! !Reboot 72057594037927937 (actor [24:58:2098]) tablet resolver refreshed! new actor is[24:88:2115] Leader for TabletID 72057594037927937 is [24:88:2115] sender: [24:174:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:56:2057] recipient: [25:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:56:2057] recipient: [25:52:2096] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:59:2057] recipient: [25:52:2096] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:76:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:86:2057] recipient: [25:37:2084] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:89:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:90:2057] recipient: [25:88:2117] Leader for TabletID 72057594037927937 is [25:91:2118] sender: [25:92:2057] recipient: [25:88:2117] !Reboot 72057594037927937 (actor [25:58:2098]) rebooted! !Reboot 72057594037927937 (actor [25:58:2098]) tablet resolver refreshed! new actor is[25:91:2118] Leader for TabletID 72057594037927937 is [25:91:2118] sender: [25:177:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:56:2057] recipient: [26:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:56:2057] recipient: [26:52:2096] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:59:2057] recipient: [26:52:2096] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:76:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:86:2057] recipient: [26:37:2084] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:89:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:90:2057] recipient: [26:88:2117] Leader for TabletID 72057594037927937 is [26:91:2118] sender: [26:92:2057] recipient: [26:88:2117] !Reboot 72057594037927937 (actor [26:58:2098]) rebooted! !Reboot 72057594037927937 (actor [26:58:2098]) tablet resolver refreshed! new actor is[26:91:2118] Leader for TabletID 72057594037927937 is [26:91:2118] sender: [26:177:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:56:2057] recipient: [27:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:56:2057] recipient: [27:52:2096] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:59:2057] recipient: [27:52:2096] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:76:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:87:2057] recipient: [27:37:2084] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:90:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:91:2057] recipient: [27:89:2117] Leader for TabletID 72057594037927937 is [27:92:2118] sender: [27:93:2057] recipient: [27:89:2117] !Reboot 72057594037927937 (actor [27:58:2098]) rebooted! !Reboot 72057594037927937 (actor [27:58:2098]) tablet resolver refreshed! new actor is[27:92:2118] Leader for TabletID 72057594037927937 is [27:92:2118] sender: [27:178:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:59:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:76:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:90:2057] recipient: [28:37:2084] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:93:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:94:2057] recipient: [28:92:2120] Leader for TabletID 72057594037927937 is [28:95:2121] sender: [28:96:2057] recipient: [28:92:2120] !Reboot 72057594037927937 (actor [28:58:2098]) rebooted! !Reboot 72057594037927937 (actor [28:58:2098]) tablet resolver refreshed! new actor is[28:95:2121] Leader for TabletID 72057594037927937 is [28:95:2121] sender: [28:181:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:59:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:76:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:90:2057] recipient: [29:37:2084] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:93:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:94:2057] recipient: [29:92:2120] Leader for TabletID 72057594037927937 is [29:95:2121] sender: [29:96:2057] recipient: [29:92:2120] !Reboot 72057594037927937 (actor [29:58:2098]) rebooted! !Reboot 72057594037927937 (actor [29:58:2098]) tablet resolver refreshed! new actor is[29:95:2121] Leader for TabletID 72057594037927937 is [29:95:2121] sender: [29:181:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:59:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:76:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:91:2057] recipient: [30:37:2084] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:94:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:95:2057] recipient: [30:93:2120] Leader for TabletID 72057594037927937 is [30:96:2121] sender: [30:97:2057] recipient: [30:93:2120] !Reboot 72057594037927937 (actor [30:58:2098]) rebooted! !Reboot 72057594037927937 (actor [30:58:2098]) tablet resolver refreshed! new actor is[30:96:2121] Leader for TabletID 72057594037927937 is [30:96:2121] sender: [30:182:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:52:2096] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:59:2057] recipient: [31:52:2096] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:76:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:93:2057] recipient: [31:37:2084] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:96:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:97:2057] recipient: [31:95:2122] Leader for TabletID 72057594037927937 is [31:98:2123] sender: [31:99:2057] recipient: [31:95:2122] !Reboot 72057594037927937 (actor [31:58:2098]) rebooted! !Reboot 72057594037927937 (actor [31:58:2098]) tablet resolver refreshed! new actor is[31:98:2123] Leader for TabletID 72057594037927937 is [31:98:2123] sender: [31:184:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2096] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:59:2057] recipient: [32:52:2096] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:76:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:93:2057] recipient: [32:37:2084] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:96:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:97:2057] recipient: [32:95:2122] Leader for TabletID 72057594037927937 is [32:98:2123] sender: [32:99:2057] recipient: [32:95:2122] !Reboot 72057594037927937 (actor [32:58:2098]) rebooted! !Reboot 72057594037927937 (actor [32:58:2098]) tablet resolver refreshed! new actor is[32:98:2123] Leader for TabletID 72057594037927937 is [32:98:2123] sender: [32:184:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:52:2096] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:59:2057] recipient: [33:52:2096] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:76:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:94:2057] recipient: [33:37:2084] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:97:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:98:2057] recipient: [33:96:2122] Leader for TabletID 72057594037927937 is [33:99:2123] sender: [33:100:2057] recipient: [33:96:2122] !Reboot 72057594037927937 (actor [33:58:2098]) rebooted! !Reboot 72057594037927937 (actor [33:58:2098]) tablet resolver refreshed! new actor is[33:99:2123] Leader for TabletID 72057594037927937 is [33:99:2123] sender: [33:185:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:59:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:76:2057] recipient: [34:14:2061] >> KqpInplaceUpdate::Negative_BatchUpdate+UseSink [GOOD] >> KqpInplaceUpdate::BigRow >> KqpWrite::Insert [GOOD] >> KqpWrite::CastValuesOptional >> KqpImmediateEffects::InsertExistingKey+UseSink [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1WR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1RWR2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestLargeWriteAndDelete [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvGetStorageChannelStatus ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:84:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:88:2057] recipient: [7:86:2116] Leader for TabletID 72057594037927937 is [7:89:2117] sender: [7:90:2057] recipient: [7:86:2116] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:89:2117] Leader for TabletID 72057594037927937 is [7:89:2117] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:84:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:86:2116] Leader for TabletID 72057594037927937 is [8:89:2117] sender: [8:90:2057] recipient: [8:86:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:89:2117] Leader for TabletID 72057594037927937 is [8:89:2117] sender: [8:175:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:86:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:90:2057] recipient: [9:88:2118] Leader for TabletID 72057594037927937 is [9:91:2119] sender: [9:92:2057] recipient: [9:88:2118] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:91:2119] Leader for TabletID 72057594037927937 is [9:91:2119] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:88:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:89:2118] Leader for TabletID 72057594037927937 is [10:91:2119] sender: [10:92:2057] recipient: [10:89:2118] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2119] Leader for TabletID 72057594037927937 is [10:91:2119] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:88:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:92:2057] recipient: [11:90:2120] Leader for TabletID 72057594037927937 is [11:93:2121] sender: [11:94:2057] recipient: [11:90:2120] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:93:2121] Leader for TabletID 72057594037927937 is [11:93:2121] sender: [11:179:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Re ... is [18:58:2098] sender: [18:97:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:58:2098] sender: [18:98:2057] recipient: [18:96:2124] Leader for TabletID 72057594037927937 is [18:99:2125] sender: [18:100:2057] recipient: [18:96:2124] !Reboot 72057594037927937 (actor [18:58:2098]) rebooted! !Reboot 72057594037927937 (actor [18:58:2098]) tablet resolver refreshed! new actor is[18:99:2125] Leader for TabletID 72057594037927937 is [18:99:2125] sender: [18:185:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:59:2057] recipient: [19:51:2096] Leader for TabletID 72057594037927937 is [19:58:2098] sender: [19:76:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:56:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:59:2057] recipient: [20:52:2096] Leader for TabletID 72057594037927937 is [20:58:2098] sender: [20:76:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:56:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:59:2057] recipient: [21:52:2096] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:76:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:78:2057] recipient: [21:37:2084] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:80:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:58:2098] sender: [21:82:2057] recipient: [21:81:2111] Leader for TabletID 72057594037927937 is [21:83:2112] sender: [21:84:2057] recipient: [21:81:2111] !Reboot 72057594037927937 (actor [21:58:2098]) rebooted! !Reboot 72057594037927937 (actor [21:58:2098]) tablet resolver refreshed! new actor is[21:83:2112] Leader for TabletID 72057594037927937 is [21:83:2112] sender: [21:169:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:56:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:59:2057] recipient: [22:52:2096] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:76:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:58:2098]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:78:2057] recipient: [22:37:2084] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:81:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:58:2098] sender: [22:82:2057] recipient: [22:80:2111] Leader for TabletID 72057594037927937 is [22:83:2112] sender: [22:84:2057] recipient: [22:80:2111] !Reboot 72057594037927937 (actor [22:58:2098]) rebooted! !Reboot 72057594037927937 (actor [22:58:2098]) tablet resolver refreshed! new actor is[22:83:2112] Leader for TabletID 72057594037927937 is [22:83:2112] sender: [22:169:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:56:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:59:2057] recipient: [23:53:2096] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:76:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:79:2057] recipient: [23:37:2084] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:82:2057] recipient: [23:81:2111] Leader for TabletID 72057594037927937 is [23:58:2098] sender: [23:83:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:84:2112] sender: [23:85:2057] recipient: [23:81:2111] !Reboot 72057594037927937 (actor [23:58:2098]) rebooted! !Reboot 72057594037927937 (actor [23:58:2098]) tablet resolver refreshed! new actor is[23:84:2112] Leader for TabletID 72057594037927937 is [23:84:2112] sender: [23:170:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:56:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:59:2057] recipient: [24:51:2096] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:76:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:82:2057] recipient: [24:37:2084] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:85:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:58:2098] sender: [24:86:2057] recipient: [24:84:2114] Leader for TabletID 72057594037927937 is [24:87:2115] sender: [24:88:2057] recipient: [24:84:2114] !Reboot 72057594037927937 (actor [24:58:2098]) rebooted! !Reboot 72057594037927937 (actor [24:58:2098]) tablet resolver refreshed! new actor is[24:87:2115] Leader for TabletID 72057594037927937 is [24:87:2115] sender: [24:173:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:56:2057] recipient: [25:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:56:2057] recipient: [25:52:2096] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:59:2057] recipient: [25:52:2096] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:76:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:82:2057] recipient: [25:37:2084] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:85:2057] recipient: [25:84:2114] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:86:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:87:2115] sender: [25:88:2057] recipient: [25:84:2114] !Reboot 72057594037927937 (actor [25:58:2098]) rebooted! !Reboot 72057594037927937 (actor [25:58:2098]) tablet resolver refreshed! new actor is[25:87:2115] Leader for TabletID 72057594037927937 is [25:87:2115] sender: [25:173:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:56:2057] recipient: [26:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:56:2057] recipient: [26:52:2096] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:59:2057] recipient: [26:52:2096] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:76:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:83:2057] recipient: [26:37:2084] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:86:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:87:2057] recipient: [26:85:2114] Leader for TabletID 72057594037927937 is [26:88:2115] sender: [26:89:2057] recipient: [26:85:2114] !Reboot 72057594037927937 (actor [26:58:2098]) rebooted! !Reboot 72057594037927937 (actor [26:58:2098]) tablet resolver refreshed! new actor is[26:88:2115] Leader for TabletID 72057594037927937 is [26:88:2115] sender: [26:174:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:56:2057] recipient: [27:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:56:2057] recipient: [27:52:2096] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:59:2057] recipient: [27:52:2096] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:76:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:86:2057] recipient: [27:37:2084] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:89:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:90:2057] recipient: [27:88:2117] Leader for TabletID 72057594037927937 is [27:91:2118] sender: [27:92:2057] recipient: [27:88:2117] !Reboot 72057594037927937 (actor [27:58:2098]) rebooted! !Reboot 72057594037927937 (actor [27:58:2098]) tablet resolver refreshed! new actor is[27:91:2118] Leader for TabletID 72057594037927937 is [27:91:2118] sender: [27:177:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:59:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:76:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:86:2057] recipient: [28:37:2084] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:89:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:90:2057] recipient: [28:88:2117] Leader for TabletID 72057594037927937 is [28:91:2118] sender: [28:92:2057] recipient: [28:88:2117] !Reboot 72057594037927937 (actor [28:58:2098]) rebooted! !Reboot 72057594037927937 (actor [28:58:2098]) tablet resolver refreshed! new actor is[28:91:2118] Leader for TabletID 72057594037927937 is [28:91:2118] sender: [28:177:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:59:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:76:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:87:2057] recipient: [29:37:2084] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:90:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:91:2057] recipient: [29:89:2117] Leader for TabletID 72057594037927937 is [29:92:2118] sender: [29:93:2057] recipient: [29:89:2117] !Reboot 72057594037927937 (actor [29:58:2098]) rebooted! !Reboot 72057594037927937 (actor [29:58:2098]) tablet resolver refreshed! new actor is[29:92:2118] Leader for TabletID 72057594037927937 is [29:92:2118] sender: [29:178:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:59:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:76:2057] recipient: [30:14:2061] >> KqpNamedExpressions::NamedExpressionRandom-UseSink [GOOD] >> KqpInplaceUpdate::Negative_SingleRowWithKeyCast-UseSink [GOOD] >> KqpInplaceUpdate::Negative_SingleRowWithValueCast+UseSink >> KqpImmediateEffects::InsertConflictTxAborted [GOOD] >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi [GOOD] >> KqpWrite::InsertRevert [GOOD] >> KqpWrite::ProjectReplace+UseSink >> KqpImmediateEffects::UpsertExistingKey [GOOD] >> KqpImmediateEffects::WriteThenReadWithCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::InsertExistingKey+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 5985, MsgBus: 28835 2025-06-24T21:10:42.820800Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626346363349610:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:42.820873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019d1/r3tmp/tmploxogA/pdisk_1.dat 2025-06-24T21:10:43.108237Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:43.108548Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626346363349590:2079] 1750799442820012 != 1750799442820015 TServer::EnableGrpc on GrpcPort 5985, node 1 2025-06-24T21:10:43.154978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:43.154998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:43.155026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:43.155190Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:43.174816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:43.174919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:43.176906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28835 TClient is connected to server localhost:28835 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:43.632384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:43.656633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.753957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.829311Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:43.891724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:43.969218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.603218Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626359248253109:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.603318Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:45.837648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.860632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.887605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.914922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.939009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.967255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:45.998460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.048176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626363543221061:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.048250Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.048300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626363543221066:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.051439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:46.060394Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626363543221068:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:46.129522Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626363543221119:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:47.086092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.686297Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519626367838188791:2494], TxId: 281474976710677, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=Mzg1YzZjMDQtZTUyMjBiNzktZDczZWYwMC0yMTk5NzM1ZQ==. TraceId : 01jyhwex7rcn4vfk30pmtfqbs0. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Duplicated keys found., code: 2012 }. 2025-06-24T21 ... eStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:48.956623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:10:48.976301Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.027341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.172182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.239234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.375717Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:51.310915Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626387858917187:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.311023Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.371419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.401711Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.433319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.464962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.500154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.530937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.599080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.658404Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626387858917847:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.658481Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.658770Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626387858917852:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.661935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:51.671852Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626387858917854:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:51.740732Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626387858917905:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:52.708563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.067620Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=4; 2025-06-24T21:10:53.067831Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 4 at tablet 72075186224037922 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:53.067966Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 4 at tablet 72075186224037922 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:53.068133Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [2:7519626396448852893:2472], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7519626392153885470:2472]Got CONSTRAINT VIOLATION for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7519626396448852893:2472].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:10:53.068244Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519626396448852877:2472], SessionActorId: [2:7519626392153885470:2472], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/TestImmediateEffects`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[2:7519626392153885470:2472]. isRollback=0 2025-06-24T21:10:53.068530Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=ZTFlNTIwYTAtYzYxMTdjOGItMzgyM2RkNi1lMWRmN2M0Zg==, ActorId: [2:7519626392153885470:2472], ActorState: ExecuteState, TraceId: 01jyhwf2n78stnya0dabc6d9fm, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [2:7519626396448852887:2472] from: [2:7519626396448852877:2472] 2025-06-24T21:10:53.068620Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519626396448852887:2472] TxId: 281474976715675. Ctx: { TraceId: 01jyhwf2n78stnya0dabc6d9fm, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTFlNTIwYTAtYzYxMTdjOGItMzgyM2RkNi1lMWRmN2M0Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/TestImmediateEffects`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:10:53.068818Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZTFlNTIwYTAtYzYxMTdjOGItMzgyM2RkNi1lMWRmN2M0Zg==, ActorId: [2:7519626392153885470:2472], ActorState: ExecuteState, TraceId: 01jyhwf2n78stnya0dabc6d9fm, Create QueryResponse for error on request, msg: >> KqpImmediateEffects::DeleteOnAfterInsertWithIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:86:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:86:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:87:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:90:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:93:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:94:2057] recipient: [11:92:2120] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:96:2057] recipient: [11:92:2120] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:95:2121] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:181:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... 057594037927937 is [25:58:2098] sender: [25:105:2057] recipient: [25:37:2084] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:108:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:58:2098] sender: [25:109:2057] recipient: [25:107:2130] Leader for TabletID 72057594037927937 is [25:110:2131] sender: [25:111:2057] recipient: [25:107:2130] !Reboot 72057594037927937 (actor [25:58:2098]) rebooted! !Reboot 72057594037927937 (actor [25:58:2098]) tablet resolver refreshed! new actor is[25:110:2131] Leader for TabletID 72057594037927937 is [25:110:2131] sender: [25:196:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:56:2057] recipient: [26:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:56:2057] recipient: [26:52:2096] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:59:2057] recipient: [26:52:2096] Leader for TabletID 72057594037927937 is [26:58:2098] sender: [26:76:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:56:2057] recipient: [27:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:56:2057] recipient: [27:52:2096] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:59:2057] recipient: [27:52:2096] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:76:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:59:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:76:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:78:2057] recipient: [28:37:2084] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:81:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:82:2057] recipient: [28:80:2111] Leader for TabletID 72057594037927937 is [28:83:2112] sender: [28:84:2057] recipient: [28:80:2111] !Reboot 72057594037927937 (actor [28:58:2098]) rebooted! !Reboot 72057594037927937 (actor [28:58:2098]) tablet resolver refreshed! new actor is[28:83:2112] Leader for TabletID 72057594037927937 is [28:83:2112] sender: [28:169:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:59:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:76:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:78:2057] recipient: [29:37:2084] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:81:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:82:2057] recipient: [29:80:2111] Leader for TabletID 72057594037927937 is [29:83:2112] sender: [29:84:2057] recipient: [29:80:2111] !Reboot 72057594037927937 (actor [29:58:2098]) rebooted! !Reboot 72057594037927937 (actor [29:58:2098]) tablet resolver refreshed! new actor is[29:83:2112] Leader for TabletID 72057594037927937 is [29:83:2112] sender: [29:169:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:59:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:76:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:79:2057] recipient: [30:37:2084] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:82:2057] recipient: [30:81:2111] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:83:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:84:2112] sender: [30:85:2057] recipient: [30:81:2111] !Reboot 72057594037927937 (actor [30:58:2098]) rebooted! !Reboot 72057594037927937 (actor [30:58:2098]) tablet resolver refreshed! new actor is[30:84:2112] Leader for TabletID 72057594037927937 is [30:84:2112] sender: [30:170:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:52:2096] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:59:2057] recipient: [31:52:2096] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:76:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:82:2057] recipient: [31:37:2084] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:85:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:86:2057] recipient: [31:84:2114] Leader for TabletID 72057594037927937 is [31:87:2115] sender: [31:88:2057] recipient: [31:84:2114] !Reboot 72057594037927937 (actor [31:58:2098]) rebooted! !Reboot 72057594037927937 (actor [31:58:2098]) tablet resolver refreshed! new actor is[31:87:2115] Leader for TabletID 72057594037927937 is [31:87:2115] sender: [31:173:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2096] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:59:2057] recipient: [32:52:2096] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:76:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:82:2057] recipient: [32:37:2084] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:85:2057] recipient: [32:84:2114] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:86:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:87:2115] sender: [32:88:2057] recipient: [32:84:2114] !Reboot 72057594037927937 (actor [32:58:2098]) rebooted! !Reboot 72057594037927937 (actor [32:58:2098]) tablet resolver refreshed! new actor is[32:87:2115] Leader for TabletID 72057594037927937 is [32:87:2115] sender: [32:173:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:52:2096] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:59:2057] recipient: [33:52:2096] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:76:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:83:2057] recipient: [33:37:2084] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:85:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:87:2057] recipient: [33:86:2114] Leader for TabletID 72057594037927937 is [33:88:2115] sender: [33:89:2057] recipient: [33:86:2114] !Reboot 72057594037927937 (actor [33:58:2098]) rebooted! !Reboot 72057594037927937 (actor [33:58:2098]) tablet resolver refreshed! new actor is[33:88:2115] Leader for TabletID 72057594037927937 is [33:88:2115] sender: [33:106:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:59:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:76:2057] recipient: [34:14:2061] !Reboot 72057594037927937 (actor [34:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:85:2057] recipient: [34:37:2084] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:88:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:89:2057] recipient: [34:87:2116] Leader for TabletID 72057594037927937 is [34:90:2117] sender: [34:91:2057] recipient: [34:87:2116] !Reboot 72057594037927937 (actor [34:58:2098]) rebooted! !Reboot 72057594037927937 (actor [34:58:2098]) tablet resolver refreshed! new actor is[34:90:2117] Leader for TabletID 72057594037927937 is [34:90:2117] sender: [34:176:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:52:2096] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:59:2057] recipient: [35:52:2096] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:76:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:85:2057] recipient: [35:37:2084] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:87:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:89:2057] recipient: [35:88:2116] Leader for TabletID 72057594037927937 is [35:90:2117] sender: [35:91:2057] recipient: [35:88:2116] !Reboot 72057594037927937 (actor [35:58:2098]) rebooted! !Reboot 72057594037927937 (actor [35:58:2098]) tablet resolver refreshed! new actor is[35:90:2117] Leader for TabletID 72057594037927937 is [35:90:2117] sender: [35:176:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:52:2096] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:59:2057] recipient: [36:52:2096] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:76:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:86:2057] recipient: [36:37:2084] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:89:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:90:2057] recipient: [36:88:2116] Leader for TabletID 72057594037927937 is [36:91:2117] sender: [36:92:2057] recipient: [36:88:2116] !Reboot 72057594037927937 (actor [36:58:2098]) rebooted! !Reboot 72057594037927937 (actor [36:58:2098]) tablet resolver refreshed! new actor is[36:91:2117] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:53:2096] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:59:2057] recipient: [37:53:2096] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:76:2057] recipient: [37:14:2061] >> KqpImmediateEffects::MultipleEffectsWithIndex [GOOD] >> KqpImmediateEffects::MultiShardUpsertAfterRead >> KqpImmediateEffects::UpsertDuplicates [GOOD] >> KqpImmediateEffects::UpsertConflictInteractiveTxAborted >> KqpWrite::CastValues [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::InsertConflictTxAborted [GOOD] Test command err: Trying to start YDB, gRPC: 22939, MsgBus: 61011 2025-06-24T21:10:43.774307Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626351368369387:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:43.774442Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019ca/r3tmp/tmpfHpxkg/pdisk_1.dat 2025-06-24T21:10:44.088489Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:44.088828Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626351368369366:2079] 1750799443773511 != 1750799443773514 TServer::EnableGrpc on GrpcPort 22939, node 1 2025-06-24T21:10:44.118848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:44.118955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:44.121263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:44.122891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:44.122917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:44.122925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:44.123037Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61011 TClient is connected to server localhost:61011 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:44.590890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:44.617497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:44.745770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:44.858065Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:44.912258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:44.978469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.344968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626364253272911:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.345058Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.615921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.641340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.667604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.695385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.723829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.755460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.790017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:46.844438Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626364253273568:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.844537Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.844566Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626364253273573:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.847466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:46.856241Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626364253273575:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:46.939401Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626364253273628:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:47.936007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.250975Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=4; 2025-06-24T21:10:48.258460Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 4 at tablet 72075186224037922 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:48.258639Z node 1 :TX_DATASHARD ERROR: finish_propose_ ... eStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:49.620047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:49.628693Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.679370Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.869913Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.945673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.966284Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:51.750431Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626385013502340:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.750540Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.802350Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.836978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.868004Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.896872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.929869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.006442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.047391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.122076Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626389308470297:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.122138Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.122161Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626389308470302:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.126109Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:52.135977Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626389308470304:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:52.231091Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626389308470355:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:53.291287Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.576503Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=4; 2025-06-24T21:10:53.576693Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 4 at tablet 72075186224037922 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:53.576811Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 4 at tablet 72075186224037922 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:53.576933Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [2:7519626393603438136:2472], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7519626393603437920:2472]Got CONSTRAINT VIOLATION for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7519626393603438136:2472].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:10:53.576998Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519626393603438126:2472], SessionActorId: [2:7519626393603437920:2472], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/TestImmediateEffects`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[2:7519626393603437920:2472]. isRollback=0 2025-06-24T21:10:53.577216Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=ZWU4OTAyYWUtNzI0NTllNDMtNzE1NDJlNGYtNDE3MGY0YjA=, ActorId: [2:7519626393603437920:2472], ActorState: ExecuteState, TraceId: 01jyhwf38xfxa2xa7hs6z6ena6, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [2:7519626393603438127:2472] from: [2:7519626393603438126:2472] 2025-06-24T21:10:53.577301Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519626393603438127:2472] TxId: 281474976715674. Ctx: { TraceId: 01jyhwf38xfxa2xa7hs6z6ena6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWU4OTAyYWUtNzI0NTllNDMtNzE1NDJlNGYtNDE3MGY0YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/TestImmediateEffects`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:10:53.577470Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZWU4OTAyYWUtNzI0NTllNDMtNzE1NDJlNGYtNDE3MGY0YjA=, ActorId: [2:7519626393603437920:2472], ActorState: ExecuteState, TraceId: 01jyhwf38xfxa2xa7hs6z6ena6, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandom-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 22983, MsgBus: 18632 2025-06-24T21:09:27.052034Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626026990623857:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:27.052179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ab/r3tmp/tmptbIRGp/pdisk_1.dat 2025-06-24T21:09:27.387438Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:27.387810Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626026990623836:2079] 1750799367051448 != 1750799367051451 TServer::EnableGrpc on GrpcPort 22983, node 1 2025-06-24T21:09:27.422668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:27.424017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:27.428986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:27.454765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:27.454795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:27.454804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:27.454949Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18632 TClient is connected to server localhost:18632 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:27.978533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:28.006225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:28.060469Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:09:28.143412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:28.295090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:28.378501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:30.218114Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626039875527373:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:30.218264Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:30.548655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:30.582605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:30.613271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:30.648025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:30.680576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:30.715360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:30.786389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:30.843068Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626039875528037:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:30.843136Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:30.843215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626039875528042:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:30.847976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:30.860256Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626039875528044:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:09:30.949382Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626039875528095:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:32.052033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626026990623857:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:32.052150Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21740, MsgBus: 24274 2025-06-24T21:09:33.819783Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626050171788409:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:33.819863Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... "]];[["5864682b-fe9e-4bba-a7b4-e52d8cc08708"]]] [[["11699d86-8578-477a-80b5-555768a8dd5e"]];[["5864682b-fe9e-4bba-a7b4-e52d8cc08708"]]] [["6c91fe6c-886b-46d2-a2a0-fe92a574a579"];["eb4cf3aa-6aac-4503-a433-dff75b4f46d3"]] Trying to start YDB, gRPC: 19483, MsgBus: 6946 2025-06-24T21:10:44.317206Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519626356026284663:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:44.317327Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ab/r3tmp/tmpGjXjDh/pdisk_1.dat 2025-06-24T21:10:44.457374Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:44.457727Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519626356026284643:2079] 1750799444316481 != 1750799444316484 2025-06-24T21:10:44.477554Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:44.477686Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:44.480560Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19483, node 12 2025-06-24T21:10:44.518426Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:44.518460Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:44.518474Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:44.518702Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6946 TClient is connected to server localhost:6946 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:45.066027Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:45.079429Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.161410Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.344690Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:45.399028Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.498398Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.035862Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519626377501122779:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.035987Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.188152Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.227004Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.268750Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.307979Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.317464Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519626356026284663:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:49.317526Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:49.351174Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.398124Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.444089Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.635348Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519626377501123438:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.635455Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519626377501123443:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.635470Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.641865Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:49.654975Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519626377501123445:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:49.754272Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519626377501123496:3429] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } [[["5abab796-2073-4790-aed6-46b501959fba"]];[["aec6c38f-5166-43c8-9f12-6fec03c5eb28"]]] [[["5abab796-2073-4790-aed6-46b501959fba"]];[["aec6c38f-5166-43c8-9f12-6fec03c5eb28"]]] [["89af2eff-3e6b-46ac-bac0-ee9b0395abbe"];["c8406c4f-0bca-486a-9f16-8f208ee47c4a"]] >> KqpImmediateEffects::ConflictingKeyR1WRR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1RR2 >> KqpImmediateEffects::InteractiveTxWithReadAtTheEnd+UseSink [GOOD] >> KqpImmediateEffects::InteractiveTxWithReadAtTheEnd-UseSink >> KqpInplaceUpdate::Negative_SingleRowListFromRange+UseSink [GOOD] >> KqpInplaceUpdate::Negative_BatchUpdate-UseSink >> KqpEffects::InsertAbort_Select_Conflict+UseSink [GOOD] >> KqpEffects::InsertAbort_Select_Conflict-UseSink >> KqpImmediateEffects::ReplaceExistingKey [GOOD] >> KqpImmediateEffects::TxWithReadAtTheEnd+UseSink >> KqpImmediateEffects::UnobservedUncommittedChangeConflict [GOOD] >> KqpWrite::UpsertNullKey [GOOD] >> KqpWrite::ProjectReplace-UseSink >> TKeyValueTest::TestCopyRangeToLongKey [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpWrite::CastValues [GOOD] Test command err: Trying to start YDB, gRPC: 8374, MsgBus: 14642 2025-06-24T21:10:44.069437Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626357368658525:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:44.069735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019c3/r3tmp/tmppp94Pc/pdisk_1.dat 2025-06-24T21:10:44.331249Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626357368658503:2079] 1750799444068573 != 1750799444068576 2025-06-24T21:10:44.345396Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8374, node 1 2025-06-24T21:10:44.416945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:44.416978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:44.416988Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:44.417184Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:44.430434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:44.430583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:44.432564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14642 TClient is connected to server localhost:14642 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:44.890382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:44.907838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.055931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.166754Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:45.204041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.257886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.989107Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626365958594741:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:46.989255Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.308008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.335088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.363190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.390916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.418605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.465604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.530829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.576849Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626370253562697:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.576933Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.576992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626370253562702:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.580479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:47.588702Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626370253562704:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:47.650026Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626370253562755:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:48.973065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.069358Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626357368658525:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:49.069437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 27148, MsgBus: 7235 2025-06-24T21:10:50.201695Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626382844729408:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:50.201822Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019c3/r3tmp/tmpYd5m8D/pdisk_1.dat 2025-06-24T21:10:50.310861Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:50.313079Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626382844729389:2079] 1750799450201032 != 1750799450201035 TServer::EnableGrpc on GrpcPort 27148, node 2 2025-06-24T21:10:50.339592Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:50.339694Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:50.342191Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:50.372745Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:50.372770Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:50.372779Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:50.372889Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7235 TClient is connected to server localhost:7235 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:10:50.820921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:50.832775Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:50.898826Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.051257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.126399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.260425Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:53.348983Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626395729632915:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.349104Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.399003Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.443296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.470353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.498641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.523962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.556267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.625647Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.677966Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626395729633573:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.678043Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.678047Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626395729633578:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.681127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:53.689216Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626395729633580:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:53.766111Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626395729633631:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:55.202093Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626382844729408:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:55.202173Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpImmediateEffects::ImmediateUpdateSelect [GOOD] >> KqpEffects::InsertRevert_Literal_Conflict [GOOD] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpEffects::AlterAfterUpsertTransaction+UseSink [GOOD] >> KqpEffects::AlterAfterUpsertTransaction-UseSink |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpImmediateEffects::ConflictingKeyR1WR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyR1RWR2 >> KqpEffects::InsertAbort_Literal_Duplicates-UseSink [GOOD] >> KqpEffects::InsertAbort_Literal_Conflict-UseSink >> KqpUserConstraint::KqpReadNull-UploadNull >> KqpUserConstraint::KqpReadNull+UploadNull |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestCopyRangeToLongKey [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:81:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:81:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:86:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:86:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:86:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:86:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:87:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:90:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:93:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:94:2057] recipient: [11:92:2120] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:96:2057] recipient: [11:92:2120] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:95:2121] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:181:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... 7 is [27:58:2098] sender: [27:91:2057] recipient: [27:37:2084] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:94:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:58:2098] sender: [27:95:2057] recipient: [27:93:2120] Leader for TabletID 72057594037927937 is [27:96:2121] sender: [27:97:2057] recipient: [27:93:2120] !Reboot 72057594037927937 (actor [27:58:2098]) rebooted! !Reboot 72057594037927937 (actor [27:58:2098]) tablet resolver refreshed! new actor is[27:96:2121] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:56:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:59:2057] recipient: [28:53:2096] Leader for TabletID 72057594037927937 is [28:58:2098] sender: [28:76:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:59:2057] recipient: [29:51:2096] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:76:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:59:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:76:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:78:2057] recipient: [30:37:2084] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:80:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:82:2057] recipient: [30:81:2111] Leader for TabletID 72057594037927937 is [30:83:2112] sender: [30:84:2057] recipient: [30:81:2111] !Reboot 72057594037927937 (actor [30:58:2098]) rebooted! !Reboot 72057594037927937 (actor [30:58:2098]) tablet resolver refreshed! new actor is[30:83:2112] Leader for TabletID 72057594037927937 is [30:83:2112] sender: [30:169:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:52:2096] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:59:2057] recipient: [31:52:2096] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:76:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:58:2098]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:78:2057] recipient: [31:37:2084] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:81:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:82:2057] recipient: [31:80:2111] Leader for TabletID 72057594037927937 is [31:83:2112] sender: [31:84:2057] recipient: [31:80:2111] !Reboot 72057594037927937 (actor [31:58:2098]) rebooted! !Reboot 72057594037927937 (actor [31:58:2098]) tablet resolver refreshed! new actor is[31:83:2112] Leader for TabletID 72057594037927937 is [31:83:2112] sender: [31:169:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2096] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:59:2057] recipient: [32:52:2096] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:76:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:79:2057] recipient: [32:37:2084] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:82:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:83:2057] recipient: [32:81:2111] Leader for TabletID 72057594037927937 is [32:84:2112] sender: [32:85:2057] recipient: [32:81:2111] !Reboot 72057594037927937 (actor [32:58:2098]) rebooted! !Reboot 72057594037927937 (actor [32:58:2098]) tablet resolver refreshed! new actor is[32:84:2112] Leader for TabletID 72057594037927937 is [32:84:2112] sender: [32:170:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:52:2096] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:59:2057] recipient: [33:52:2096] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:76:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:82:2057] recipient: [33:37:2084] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:85:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:86:2057] recipient: [33:84:2114] Leader for TabletID 72057594037927937 is [33:87:2115] sender: [33:88:2057] recipient: [33:84:2114] !Reboot 72057594037927937 (actor [33:58:2098]) rebooted! !Reboot 72057594037927937 (actor [33:58:2098]) tablet resolver refreshed! new actor is[33:87:2115] Leader for TabletID 72057594037927937 is [33:87:2115] sender: [33:173:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:59:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:76:2057] recipient: [34:14:2061] !Reboot 72057594037927937 (actor [34:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:82:2057] recipient: [34:37:2084] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:85:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:86:2057] recipient: [34:84:2114] Leader for TabletID 72057594037927937 is [34:87:2115] sender: [34:88:2057] recipient: [34:84:2114] !Reboot 72057594037927937 (actor [34:58:2098]) rebooted! !Reboot 72057594037927937 (actor [34:58:2098]) tablet resolver refreshed! new actor is[34:87:2115] Leader for TabletID 72057594037927937 is [34:87:2115] sender: [34:173:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:52:2096] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:59:2057] recipient: [35:52:2096] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:76:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:83:2057] recipient: [35:37:2084] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:86:2057] recipient: [35:85:2114] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:87:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:88:2115] sender: [35:89:2057] recipient: [35:85:2114] !Reboot 72057594037927937 (actor [35:58:2098]) rebooted! !Reboot 72057594037927937 (actor [35:58:2098]) tablet resolver refreshed! new actor is[35:88:2115] Leader for TabletID 72057594037927937 is [35:88:2115] sender: [35:174:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:52:2096] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:59:2057] recipient: [36:52:2096] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:76:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:86:2057] recipient: [36:37:2084] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:89:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:90:2057] recipient: [36:88:2117] Leader for TabletID 72057594037927937 is [36:91:2118] sender: [36:92:2057] recipient: [36:88:2117] !Reboot 72057594037927937 (actor [36:58:2098]) rebooted! !Reboot 72057594037927937 (actor [36:58:2098]) tablet resolver refreshed! new actor is[36:91:2118] Leader for TabletID 72057594037927937 is [36:91:2118] sender: [36:177:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:53:2096] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:59:2057] recipient: [37:53:2096] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:76:2057] recipient: [37:14:2061] !Reboot 72057594037927937 (actor [37:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:86:2057] recipient: [37:37:2084] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:89:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:90:2057] recipient: [37:88:2117] Leader for TabletID 72057594037927937 is [37:91:2118] sender: [37:92:2057] recipient: [37:88:2117] !Reboot 72057594037927937 (actor [37:58:2098]) rebooted! !Reboot 72057594037927937 (actor [37:58:2098]) tablet resolver refreshed! new actor is[37:91:2118] Leader for TabletID 72057594037927937 is [37:91:2118] sender: [37:177:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:56:2057] recipient: [38:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:56:2057] recipient: [38:52:2096] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:59:2057] recipient: [38:52:2096] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:76:2057] recipient: [38:14:2061] !Reboot 72057594037927937 (actor [38:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:87:2057] recipient: [38:37:2084] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:89:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:91:2057] recipient: [38:90:2117] Leader for TabletID 72057594037927937 is [38:92:2118] sender: [38:93:2057] recipient: [38:90:2117] !Reboot 72057594037927937 (actor [38:58:2098]) rebooted! !Reboot 72057594037927937 (actor [38:58:2098]) tablet resolver refreshed! new actor is[38:92:2118] Leader for TabletID 72057594037927937 is [38:92:2118] sender: [38:178:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:56:2057] recipient: [39:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:56:2057] recipient: [39:52:2096] Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:59:2057] recipient: [39:52:2096] Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:76:2057] recipient: [39:14:2061] >> KqpImmediateEffects::UpdateAfterUpsert [GOOD] >> KqpImmediateEffects::UpdateAfterInsert >> KqpImmediateEffects::ConflictingKeyRW1WRR2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::UnobservedUncommittedChangeConflict [GOOD] Test command err: Trying to start YDB, gRPC: 11663, MsgBus: 61004 2025-06-24T21:10:44.919616Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626355938963443:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:44.920065Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019b4/r3tmp/tmpqfnrLi/pdisk_1.dat 2025-06-24T21:10:45.231890Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626355938963413:2079] 1750799444918880 != 1750799444918883 2025-06-24T21:10:45.246935Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11663, node 1 2025-06-24T21:10:45.301155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:45.301280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:45.302965Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:45.309096Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:45.309120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:45.309133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:45.309300Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61004 TClient is connected to server localhost:61004 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:45.760125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:45.774296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.911811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.925744Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:46.050425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.124382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.476448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626368823866948:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.476542Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.748774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.772665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.797188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.821355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.846234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.875819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.944670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.026463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626373118834913:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.026550Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.026591Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626373118834918:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.030527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:48.040918Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626373118834920:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:48.110545Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626373118834971:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:49.160814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 12453, MsgBus: 22953 2025-06-24T21:10:50.553973Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626382456281922:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:50.554105Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /ho ... TypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:51.154435Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:51.173372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.231873Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:51.399027Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.462916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.592371Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:53.683574Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626395341185415:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.683666Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.741197Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.767654Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.800572Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.832261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.860712Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.933783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.004325Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.066467Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626399636153374:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.066549Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.066767Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626399636153379:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.070151Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:54.079610Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626399636153381:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:54.172760Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626399636153432:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:55.292067Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.554461Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626382456281922:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:55.554552Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:55.800748Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715676; 2025-06-24T21:10:55.813725Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [2:7519626403931121264:2500], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7519626403931121204:2500]Got LOCKS BROKEN for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7519626403931121264:2500].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T21:10:55.814437Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519626403931121257:2500], SessionActorId: [2:7519626403931121204:2500], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[2:7519626403931121204:2500]. isRollback=0 2025-06-24T21:10:55.814740Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=N2FmMDJkMjItN2E0M2FiMTktYzhkMTZkZGQtYjVmMzA4NDQ=, ActorId: [2:7519626403931121204:2500], ActorState: ExecuteState, TraceId: 01jyhwf5hm6vp8dn0qq8bmdctg, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519626403931121258:2500] from: [2:7519626403931121257:2500] 2025-06-24T21:10:55.814816Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519626403931121258:2500] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf5hm6vp8dn0qq8bmdctg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2FmMDJkMjItN2E0M2FiMTktYzhkMTZkZGQtYjVmMzA4NDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T21:10:55.815013Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=N2FmMDJkMjItN2E0M2FiMTktYzhkMTZkZGQtYjVmMzA4NDQ=, ActorId: [2:7519626403931121204:2500], ActorState: ExecuteState, TraceId: 01jyhwf5hm6vp8dn0qq8bmdctg, Create QueryResponse for error on request, msg: >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb [GOOD] >> KqpImmediateEffects::ForceImmediateEffectsExecution-UseSink [GOOD] >> KqpImmediateEffects::ImmediateUpdate >> KqpImmediateEffects::DeleteAfterUpsert [GOOD] >> KqpImmediateEffects::DeleteAfterInsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ImmediateUpdateSelect [GOOD] Test command err: Trying to start YDB, gRPC: 5966, MsgBus: 20891 2025-06-24T21:10:45.229722Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626358578740432:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:45.229863Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019af/r3tmp/tmpF4gQO1/pdisk_1.dat 2025-06-24T21:10:45.543943Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:45.544233Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626358578740405:2079] 1750799445229006 != 1750799445229009 TServer::EnableGrpc on GrpcPort 5966, node 1 2025-06-24T21:10:45.602887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:45.602990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:45.604903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:45.618942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:45.618970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:45.618978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:45.619182Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20891 TClient is connected to server localhost:20891 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:46.056150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:46.074003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.197352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.239402Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:46.318076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.393589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.894372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626367168676652:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.894475Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.201081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.223309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.248459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.275517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.300444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.369606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.408003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.489557Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626371463644615:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.489634Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.489648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626371463644620:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.493001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:48.501887Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626371463644622:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:48.565042Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626371463644673:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:49.563902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:50.232033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626358578740432:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:50.232670Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 20292, MsgBus: 7031 2025-06-24T21:10:51.088385Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626386944457053:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:51.107648Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019af/r3tmp/tmpIFK2hM/pdisk_1.dat 2025-06-24T21:10:51.193519Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:51.194147Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626386944456933:2079] 1750799451076392 != 1750799451076395 TServer::EnableGrpc on GrpcPort 20292, node 2 2025-06-24T21:10:51.219097Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:51.219223Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:51.231491Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:51.264355Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:51.264377Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:51.264385Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:51.264485Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7031 TClient is connected to server localhost:7031 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:51.709899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:51.715522Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:10:51.723568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.777357Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.933517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.004872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.122085Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:54.295489Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626399829360450:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.295601Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.352619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.382413Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.410165Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.437930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.463776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.531751Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.583513Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.669038Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626399829361111:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.669119Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.669129Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626399829361116:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.672249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:54.682049Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626399829361118:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:54.744628Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626399829361169:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:55.717094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.082311Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626386944457053:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:56.082378Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertRevert_Literal_Conflict [GOOD] Test command err: Trying to start YDB, gRPC: 7586, MsgBus: 2643 2025-06-24T21:10:39.382715Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626333400054017:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:39.382881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019f9/r3tmp/tmpzAHtVJ/pdisk_1.dat 2025-06-24T21:10:39.623298Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626333400053995:2079] 1750799439381964 != 1750799439381967 2025-06-24T21:10:39.624277Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7586, node 1 2025-06-24T21:10:39.664909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:39.664938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:39.664946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:39.665055Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:39.708677Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:39.708771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:39.710578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2643 TClient is connected to server localhost:2643 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:40.151330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:40.175287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.304556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.401916Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:40.457758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:40.518506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:42.157328Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626346284957537:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.157470Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.444903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.469496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.495249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.522227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.547880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.613362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.678921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:42.755621Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626346284958211:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.755730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.755825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626346284958216:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:42.758761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:42.767872Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626346284958218:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:42.871065Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626346284958269:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:43.638066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 30159, MsgBus: 28896 2025-06-24T21:10:44.750229Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626354297832145:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:44.750292Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/ru ... ActorState: ExecuteState, TraceId: 01jyhwez30by4rvtnqbykqbvjy, Create QueryResponse for error on request, msg: 2025-06-24T21:10:49.750611Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626354297832145:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:49.750687Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 30749, MsgBus: 25961 2025-06-24T21:10:50.843057Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626379705560110:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:50.844059Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019f9/r3tmp/tmpHlXWGX/pdisk_1.dat 2025-06-24T21:10:50.962222Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:50.979575Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:50.979677Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:50.982506Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30749, node 3 2025-06-24T21:10:51.028392Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:51.028412Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:51.028419Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:51.028525Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25961 TClient is connected to server localhost:25961 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:51.531409Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:51.536812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:10:51.549189Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.610171Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.757627Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.839060Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.850312Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:54.284891Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626396885430876:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.284999Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.349992Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.383447Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.450904Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.482363Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.553787Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.586281Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.617970Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.674418Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626396885431537:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.674512Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.674710Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626396885431542:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.678469Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:54.687894Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519626396885431544:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:54.773789Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519626396885431595:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:55.842839Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519626379705560110:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:55.842894Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:10:29.706826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:29.706918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.706967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:29.707008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:29.707932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:29.707975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:29.708072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:29.708155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:29.708934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:29.710359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:29.775348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:10:29.775397Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:29.787063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:29.787437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:29.787599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:29.793535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:29.793682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:29.795758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.796056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:29.803707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.804120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:29.810001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:29.810751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:29.810803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:29.810846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:29.810918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.816049Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:29.920330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:29.920502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.920650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:29.920685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:29.920851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:29.920906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:29.922690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.922831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:29.922945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.922992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:29.923040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:29.923069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:29.924646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.924689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:29.924721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:29.925943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.925980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:29.926031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.926073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:29.928690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:29.930050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:29.930185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:29.930937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:29.931044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:10:29.931082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.931312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:10:29.931351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:29.931499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:10:29.931556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:10:29.933131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:29.933165Z node 1 :FLAT_TX_SCHEMESHARD ... utes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.638351Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.638856Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.638955Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:10:55.639233Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.639339Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.639439Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.639549Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.639626Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.639767Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.640103Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.640229Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.640621Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.640735Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.640931Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.641025Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.641139Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.641418Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.641510Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.641649Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.641938Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.642020Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.642155Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.642232Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.642298Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:10:55.650709Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:55.653336Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:55.653441Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:55.653691Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:55.653752Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:55.653798Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:55.655350Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [5:375:2342] sender: [5:433:2058] recipient: [5:15:2062] 2025-06-24T21:10:55.708622Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:55.708690Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-24T21:10:55.752278Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-06-24T21:10:55.752435Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:55.752495Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:55.752740Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:55.752791Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:425:2381], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-24T21:10:55.753407Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 0 2025-06-24T21:10:57.754241Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-24T21:10:57.765977Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjU3LCJpYXQiOjE3NTA3OTk0NTcsInN1YiI6InVzZXIxIn0.sX2IRCqaBNLcd5duyEG9G7uTupw3e3Pc2nQwFwZatuMHx0KYzMkJTpSVLHDp2cQXVXdk_cTUcA495rm15b03vq0QWWXtL3Y4WQpStJ_5ZNA6ZXsi-z2nsKsOtI1WEZC36n190jOdCGpVLc7MXExuEM3UVSmMmGradbdwvfUdCYMcq0ma3QrenebvHxdxeHfCZhLtJ3gdpfVn8_O91yFdMe3LuOK7DPUuuwQhb27aOXqae47eSdbsjIAFAWqh5PJNHUgvUciB2ez2k4VsbH_lFmbtk-rZKuQR7ZlMQec59NEej-KyIDpoQHq0ZIGyrFUgxbDoLu6QX5RB6HSeN1UcaA" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwODQyNjU3LCJpYXQiOjE3NTA3OTk0NTcsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-24T21:10:57.766664Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:10:57.766959Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 346us result status StatusSuccess 2025-06-24T21:10:57.767584Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA72hJedsGvAU68ue7gtbn\nnBUN7OJ9tFzMl25KpgaimmbjymwiH+m0Osx1NRV+S2SB60dZsgU+/RfwsqF+gzja\nkVuUXaHHAuzvToh3F6N5rTgkg4MVu2TbAh7bCdUf+5sOTbNsPMHUvbtEL+t9J8ep\ny2iRjcGWcTTTmKL08Fvcc8eF4wFNFH7zFqL0JcL2lT6pWJgnLpeM0b+n5pjg5uKg\npHiO9Mw/rn57lXjOs1e6b+rdVEHuJ9lsOM23cqyIyxzl/HA0VQOvPuDSDmmbjYGs\n18pgsRNhb5QVB5pEP/C/qnYwTyuiGb0L0ePLQukY1MRggP3U2fDX6E0JzyOK2JA0\nFQIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750885853368 } PublicKeys { KeyId: 2 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnh28UVZcJAU/eqv02Cfc\nQXwPtIUlRpz5Ww7HZrs0zWjwK+tuMsEvY78fomiJpZvzCzZWtydBfQ5MO6r1mTgp\n8YLVLTgb38EqVCO2rC5+NMPMn46mpgh8wDHN4Wb3ICwQU76SNn24rsfUEij22t2j\nRhGwbdHQbikc/5isgQavsUQHmpiN9NL8lSpXkmeAlp3qCXKMMsafHBF+TTnsMfv7\nO0StsT2tbMmE6GdgpX+SYKoYGKAATJNBl6GfXvSkq0Z7lITOxT37nIYtD+2uyWYg\nbDHeAjmfEX810GI02+p/Gcn9d+1vfaMcTPNZJYvG7ZdM0+IBe98BIwUQjNQgpOUn\nIQIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750885853552 } PublicKeys { KeyId: 3 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0gYM+SJ4UUHnYjgW3FVi\nrXrlzfABUFzU3gJha/x4xDzWmkQlkufjMhYxijXXshAcC6aYzT1QlmssrrwoD3KF\n4fYX7T88lDnxoVzg2OE0AoCbon0nUQ6TVeFJRh5Yf3Th5lNGgumy2AnY66zEpQuG\n0cSILdPTEIZDuMxuVvRx34uiuaWRmNdhKqjK4YWVGyUkYR5H+Sc2vTd8ICIXQVto\n1PCZB5+DM8hR86bhZGuMw6lMaX++8aUuKvYm+Qvv61P6FyIKwhDMO33BwcuNN+I4\nfK69zaq/YSOx0hAKvUttDklUrzugsyUsKvxCFU+BMnuzI7xHIITPpYc3/HzxWjrL\nCQIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750885855746 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpImmediateEffects::Replace [GOOD] >> KqpImmediateEffects::ReplaceDuplicates >> KqpWrite::CastValuesOptional [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyRW1WRR2 [GOOD] Test command err: Trying to start YDB, gRPC: 14755, MsgBus: 2841 2025-06-24T21:10:46.216937Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626364031296581:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:46.217042Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019a6/r3tmp/tmp0Sd02P/pdisk_1.dat 2025-06-24T21:10:46.498762Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:46.499121Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626364031296562:2079] 1750799446216196 != 1750799446216199 TServer::EnableGrpc on GrpcPort 14755, node 1 2025-06-24T21:10:46.566805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:46.566839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:46.566847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:46.566979Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:46.595888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:46.596027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:46.597725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2841 TClient is connected to server localhost:2841 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:47.008039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:47.027475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.115964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.224914Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:47.264962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.340473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.238714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626376916200110:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.238853Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.537922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.565758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.591548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.618639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.645071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.675617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.716936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.797893Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626376916200767:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.797970Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.797975Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626376916200772:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.801602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:49.811950Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626376916200774:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:49.902869Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626376916200825:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:50.824507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.218025Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626364031296581:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:51.218080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17119, MsgBus: 20614 2025-06-24T21:10:52.171208Z node 2 :METADATA_PROVIDER WARN: l ... node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:52.322351Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:52.322455Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20614 TClient is connected to server localhost:20614 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:52.760747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:52.767171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:10:52.780155Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.843997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.994424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.066426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.195025Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:55.273932Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626404687318339:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.274026Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.332507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.364036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.389539Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.415023Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.442020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.470492Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.500167Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.579164Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626404687318995:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.579218Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.579275Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626404687319000:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.582344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:55.590587Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626404687319002:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:55.651038Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626404687319053:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:56.651346Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.171622Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626391802414855:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:57.171694Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:57.251376Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [2:7519626413277254192:2474], TxId: 281474976715678, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwf6wcdsvc31gea7yrh719. SessionId : ydb://session/3?node_id=2&id=YjlhMzUyYzMtOThjZTNhM2EtZmYwYzBkYWYtNWI0NDI3ZTQ=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Read conflict with concurrent transaction (shard# 72075186224037922 node# 2 state# Ready) } } 2025-06-24T21:10:57.251816Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7519626413277254192:2474], TxId: 281474976715678, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwf6wcdsvc31gea7yrh719. SessionId : ydb://session/3?node_id=2&id=YjlhMzUyYzMtOThjZTNhM2EtZmYwYzBkYWYtNWI0NDI3ZTQ=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Read conflict with concurrent transaction (shard# 72075186224037922 node# 2 state# Ready) } }. 2025-06-24T21:10:57.252420Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YjlhMzUyYzMtOThjZTNhM2EtZmYwYzBkYWYtNWI0NDI3ZTQ=, ActorId: [2:7519626408982286621:2474], ActorState: ExecuteState, TraceId: 01jyhwf6wcdsvc31gea7yrh719, Create QueryResponse for error on request, msg: >> KqpInplaceUpdate::BigRow [GOOD] |94.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_login/test-results/unittest/{meta.json ... results_accumulator.log} |94.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueTest::TestGetStatusWorks [GOOD] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpImmediateEffects::ConflictingKeyRW1RWR2 [GOOD] >> KqpWrite::ProjectReplace+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestGetStatusWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:88:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:92:2057] recipient: [11:90:2118] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:94:2057] recipient: [11:90:2118] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:93:2119] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:179:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... or TabletID 72057594037927937 is [29:58:2098] sender: [29:91:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:58:2098] sender: [29:92:2057] recipient: [29:90:2118] Leader for TabletID 72057594037927937 is [29:93:2119] sender: [29:94:2057] recipient: [29:90:2118] !Reboot 72057594037927937 (actor [29:58:2098]) rebooted! !Reboot 72057594037927937 (actor [29:58:2098]) tablet resolver refreshed! new actor is[29:93:2119] Leader for TabletID 72057594037927937 is [29:93:2119] sender: [29:179:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:59:2057] recipient: [30:51:2096] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:76:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:89:2057] recipient: [30:37:2084] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:91:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:58:2098] sender: [30:93:2057] recipient: [30:92:2118] Leader for TabletID 72057594037927937 is [30:94:2119] sender: [30:95:2057] recipient: [30:92:2118] !Reboot 72057594037927937 (actor [30:58:2098]) rebooted! !Reboot 72057594037927937 (actor [30:58:2098]) tablet resolver refreshed! new actor is[30:94:2119] Leader for TabletID 72057594037927937 is [30:94:2119] sender: [30:180:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:52:2096] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:59:2057] recipient: [31:52:2096] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:76:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:92:2057] recipient: [31:37:2084] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:95:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:58:2098] sender: [31:96:2057] recipient: [31:94:2121] Leader for TabletID 72057594037927937 is [31:97:2122] sender: [31:98:2057] recipient: [31:94:2121] !Reboot 72057594037927937 (actor [31:58:2098]) rebooted! !Reboot 72057594037927937 (actor [31:58:2098]) tablet resolver refreshed! new actor is[31:97:2122] Leader for TabletID 72057594037927937 is [31:97:2122] sender: [31:183:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2096] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:59:2057] recipient: [32:52:2096] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:76:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:58:2098]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:92:2057] recipient: [32:37:2084] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:95:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:58:2098] sender: [32:96:2057] recipient: [32:94:2121] Leader for TabletID 72057594037927937 is [32:97:2122] sender: [32:98:2057] recipient: [32:94:2121] !Reboot 72057594037927937 (actor [32:58:2098]) rebooted! !Reboot 72057594037927937 (actor [32:58:2098]) tablet resolver refreshed! new actor is[32:97:2122] Leader for TabletID 72057594037927937 is [32:97:2122] sender: [32:183:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:52:2096] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:59:2057] recipient: [33:52:2096] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:76:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:59:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:76:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:52:2096] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:59:2057] recipient: [35:52:2096] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:76:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:78:2057] recipient: [35:37:2084] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:80:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:82:2057] recipient: [35:81:2111] Leader for TabletID 72057594037927937 is [35:83:2112] sender: [35:84:2057] recipient: [35:81:2111] !Reboot 72057594037927937 (actor [35:58:2098]) rebooted! !Reboot 72057594037927937 (actor [35:58:2098]) tablet resolver refreshed! new actor is[35:83:2112] Leader for TabletID 72057594037927937 is [35:83:2112] sender: [35:169:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:52:2096] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:59:2057] recipient: [36:52:2096] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:76:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:78:2057] recipient: [36:37:2084] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:81:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:82:2057] recipient: [36:80:2111] Leader for TabletID 72057594037927937 is [36:83:2112] sender: [36:84:2057] recipient: [36:80:2111] !Reboot 72057594037927937 (actor [36:58:2098]) rebooted! !Reboot 72057594037927937 (actor [36:58:2098]) tablet resolver refreshed! new actor is[36:83:2112] Leader for TabletID 72057594037927937 is [36:83:2112] sender: [36:169:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:53:2096] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:59:2057] recipient: [37:53:2096] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:76:2057] recipient: [37:14:2061] !Reboot 72057594037927937 (actor [37:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:79:2057] recipient: [37:37:2084] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:82:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:83:2057] recipient: [37:81:2111] Leader for TabletID 72057594037927937 is [37:84:2112] sender: [37:85:2057] recipient: [37:81:2111] !Reboot 72057594037927937 (actor [37:58:2098]) rebooted! !Reboot 72057594037927937 (actor [37:58:2098]) tablet resolver refreshed! new actor is[37:84:2112] Leader for TabletID 72057594037927937 is [37:84:2112] sender: [37:170:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:56:2057] recipient: [38:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:56:2057] recipient: [38:52:2096] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:59:2057] recipient: [38:52:2096] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:76:2057] recipient: [38:14:2061] !Reboot 72057594037927937 (actor [38:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:81:2057] recipient: [38:37:2084] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:84:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:85:2057] recipient: [38:83:2113] Leader for TabletID 72057594037927937 is [38:86:2114] sender: [38:87:2057] recipient: [38:83:2113] !Reboot 72057594037927937 (actor [38:58:2098]) rebooted! !Reboot 72057594037927937 (actor [38:58:2098]) tablet resolver refreshed! new actor is[38:86:2114] Leader for TabletID 72057594037927937 is [38:86:2114] sender: [38:172:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:56:2057] recipient: [39:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:56:2057] recipient: [39:52:2096] Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:59:2057] recipient: [39:52:2096] Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:76:2057] recipient: [39:14:2061] !Reboot 72057594037927937 (actor [39:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:81:2057] recipient: [39:37:2084] Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:84:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:85:2057] recipient: [39:83:2113] Leader for TabletID 72057594037927937 is [39:86:2114] sender: [39:87:2057] recipient: [39:83:2113] !Reboot 72057594037927937 (actor [39:58:2098]) rebooted! !Reboot 72057594037927937 (actor [39:58:2098]) tablet resolver refreshed! new actor is[39:86:2114] Leader for TabletID 72057594037927937 is [39:86:2114] sender: [39:172:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:56:2057] recipient: [40:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:56:2057] recipient: [40:53:2096] Leader for TabletID 72057594037927937 is [40:58:2098] sender: [40:59:2057] recipient: [40:53:2096] Leader for TabletID 72057594037927937 is [40:58:2098] sender: [40:76:2057] recipient: [40:14:2061] !Reboot 72057594037927937 (actor [40:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [40:58:2098] sender: [40:82:2057] recipient: [40:37:2084] Leader for TabletID 72057594037927937 is [40:58:2098] sender: [40:85:2057] recipient: [40:84:2113] Leader for TabletID 72057594037927937 is [40:58:2098] sender: [40:86:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [40:87:2114] sender: [40:88:2057] recipient: [40:84:2113] !Reboot 72057594037927937 (actor [40:58:2098]) rebooted! !Reboot 72057594037927937 (actor [40:58:2098]) tablet resolver refreshed! new actor is[40:87:2114] Leader for TabletID 72057594037927937 is [40:87:2114] sender: [40:173:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:56:2057] recipient: [41:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:56:2057] recipient: [41:51:2096] Leader for TabletID 72057594037927937 is [41:58:2098] sender: [41:59:2057] recipient: [41:51:2096] Leader for TabletID 72057594037927937 is [41:58:2098] sender: [41:76:2057] recipient: [41:14:2061] >> KqpInplaceUpdate::Negative_SingleRowWithValueCast+UseSink [GOOD] >> KqpImmediateEffects::WriteThenReadWithCommit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpWrite::CastValuesOptional [GOOD] Test command err: Trying to start YDB, gRPC: 29460, MsgBus: 23918 2025-06-24T21:10:47.722361Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626368621560611:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:47.722524Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00196d/r3tmp/tmpI9CShg/pdisk_1.dat 2025-06-24T21:10:48.045042Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626368621560589:2079] 1750799447721606 != 1750799447721609 2025-06-24T21:10:48.055292Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29460, node 1 2025-06-24T21:10:48.095562Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:48.095598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:48.095606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:48.095747Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:48.117008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:48.117154Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:48.119472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23918 TClient is connected to server localhost:23918 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:48.606204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:48.633337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:48.729642Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:48.766353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:48.922156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:48.993907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:50.553136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626381506464140:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:50.553251Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:50.931326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:50.959624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:50.986639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.019074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.049558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.120257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.187276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.253178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626385801432101:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.253295Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.253473Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626385801432106:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.257697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:51.270821Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626385801432108:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:51.359614Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626385801432159:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:52.237396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.441704Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-24T21:10:52.453459Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037911 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:52.453664Z node 1 :TX_DATASHARD ERROR: finish_propose_ ... ubissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:10:52.577573Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YTEyOTBhNmItMmFjNzQ1MTItMzU2OWQ4NmUtODBlZjc3M2U=, ActorId: [1:7519626390096399723:2472], ActorState: ExecuteState, TraceId: 01jyhwf29g2mnbxmz8nxxjvaqw, Create QueryResponse for error on request, msg:
: Error: Constraint violated. Table: `/Root/KeyValue`., code: 2012
: Error: Conflict with existing key., code: 2012 2025-06-24T21:10:52.722740Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626368621560611:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:52.722805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 3231, MsgBus: 23135 2025-06-24T21:10:53.703797Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626393096045879:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:53.703878Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00196d/r3tmp/tmphxEQ28/pdisk_1.dat 2025-06-24T21:10:53.792965Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:53.795137Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626393096045860:2079] 1750799453703453 != 1750799453703456 TServer::EnableGrpc on GrpcPort 3231, node 2 2025-06-24T21:10:53.836176Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:53.836245Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:53.837787Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:53.852988Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:53.853009Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:53.853015Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:53.853155Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23135 TClient is connected to server localhost:23135 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:54.325693Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:54.335125Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.391171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.535165Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.610855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.732359Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:56.698969Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626405980949381:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:56.699055Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:56.755475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.785658Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.822732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.852757Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.881519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.973560Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.042794Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.105046Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626410275917339:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.105149Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.105717Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626410275917344:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.109570Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:57.120074Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626410275917346:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:57.219808Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626410275917397:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::BigRow [GOOD] Test command err: Trying to start YDB, gRPC: 18915, MsgBus: 8118 2025-06-24T21:10:47.780896Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626367407921347:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:47.781042Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001977/r3tmp/tmp3vNsYK/pdisk_1.dat 2025-06-24T21:10:48.077908Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:48.078427Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626367407921325:2079] 1750799447780025 != 1750799447780028 TServer::EnableGrpc on GrpcPort 18915, node 1 2025-06-24T21:10:48.093926Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T21:10:48.094019Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T21:10:48.119202Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:48.119231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:48.119243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:48.119375Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:48.155542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:48.155671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:48.157298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8118 TClient is connected to server localhost:8118 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:48.562581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:10:48.590961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.706960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:48.837354Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:48.883202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:48.965301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:50.559841Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626380292824863:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:50.559975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:50.886981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:50.914741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:50.940705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.013754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.052073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.082167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.111901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.173582Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626384587792817:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.173680Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.173864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626384587792822:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.177583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:51.191106Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626384587792824:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:51.249287Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626384587792875:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:52.109517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.780882Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626367407921347:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:52.780990Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor ... se.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 29818, MsgBus: 1648 2025-06-24T21:10:53.563519Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626395589535202:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:53.563596Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001977/r3tmp/tmprdNxFe/pdisk_1.dat 2025-06-24T21:10:53.666639Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626395589535180:2079] 1750799453563134 != 1750799453563137 2025-06-24T21:10:53.675204Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29818, node 2 2025-06-24T21:10:53.721729Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:53.721816Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:53.723559Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:53.759779Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:53.759799Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:53.759806Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:53.759914Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1648 TClient is connected to server localhost:1648 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:54.183689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:54.200430Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.282630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.449035Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.523293Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.570495Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:56.689540Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626408474438713:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:56.691013Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:56.744763Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.773300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.798734Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.825941Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.855997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.889071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.923792Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.010727Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626412769406662:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.010863Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.011205Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626412769406667:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.014337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:57.027121Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626412769406669:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:57.100289Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626412769406720:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:58.047717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:58.563922Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626395589535202:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:58.564030Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyRW1RWR2 [GOOD] Test command err: Trying to start YDB, gRPC: 11664, MsgBus: 30127 2025-06-24T21:10:48.382489Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626371917763089:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:48.382637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00194f/r3tmp/tmpJjFnkT/pdisk_1.dat 2025-06-24T21:10:48.653572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:48.653682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:48.659235Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626371917763065:2079] 1750799448381514 != 1750799448381517 2025-06-24T21:10:48.665052Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:48.665417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11664, node 1 2025-06-24T21:10:48.726853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:48.726889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:48.726902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:48.727112Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30127 TClient is connected to server localhost:30127 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:49.245078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:49.269147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.397993Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:49.443646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.582229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.660896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.261311Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626384802666592:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.261419Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.590610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.617667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.646347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.676504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.717951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.749323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.780276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.835372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626384802667250:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.835454Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.835529Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626384802667255:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.840658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:51.853005Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626384802667257:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:51.914917Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626384802667308:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:52.863913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.382857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626371917763089:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:53.382942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 22235, MsgBus: 11435 2025-06-24T21:10:53.908447Z node 2 :METADATA_PROVIDER WARN ... CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:10:54.568436Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:54.594967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:54.672421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.810828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.888953Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.915727Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:56.905483Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626408602709191:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:56.905550Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:56.960489Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.989424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.017492Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.043810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.113171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.148465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.226866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.285943Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626412897677151:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.286055Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626412897677156:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.286094Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.290084Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:57.300368Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626412897677158:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:57.371664Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626412897677209:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:58.362271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:58.905941Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because it cannot acquire locks;tx_id=7; 2025-06-24T21:10:58.914187Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626395717805710:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:58.914328Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:10:58.918844Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 7 at tablet 72075186224037922 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-24T21:10:58.919032Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 7 at tablet 72075186224037922 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-24T21:10:58.919283Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [2:7519626417192645040:2474], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7519626417192644775:2474]Got LOCKS BROKEN for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7519626417192645040:2474].{
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } 2025-06-24T21:10:58.919906Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519626417192645003:2474], SessionActorId: [2:7519626417192644775:2474], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001
: Error: Operation is aborting because it cannot acquire locks, code: 2001 . sessionActorId=[2:7519626417192644775:2474]. isRollback=0 2025-06-24T21:10:58.920186Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=ZTY2MTA4MGMtZmQ3ZjA5OTUtNjE5MWUzMTYtZmY0MzUwODg=, ActorId: [2:7519626417192644775:2474], ActorState: ExecuteState, TraceId: 01jyhwf8gacazfhs29a8cnp49x, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519626417192645034:2474] from: [2:7519626417192645003:2474] 2025-06-24T21:10:58.920278Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519626417192645034:2474] TxId: 281474976715677. Ctx: { TraceId: 01jyhwf8gacazfhs29a8cnp49x, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTY2MTA4MGMtZmQ3ZjA5OTUtNjE5MWUzMTYtZmY0MzUwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001 subissue: {
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } } 2025-06-24T21:10:58.920575Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZTY2MTA4MGMtZmQ3ZjA5OTUtNjE5MWUzMTYtZmY0MzUwODg=, ActorId: [2:7519626417192644775:2474], ActorState: ExecuteState, TraceId: 01jyhwf8gacazfhs29a8cnp49x, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpWrite::ProjectReplace+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 7819, MsgBus: 6619 2025-06-24T21:10:48.377687Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626374617861602:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:48.377791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001963/r3tmp/tmpJauntY/pdisk_1.dat 2025-06-24T21:10:48.653745Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:48.654052Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626374617861583:2079] 1750799448377021 != 1750799448377024 TServer::EnableGrpc on GrpcPort 7819, node 1 2025-06-24T21:10:48.730087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:48.730168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:48.731740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:48.734476Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:48.734501Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:48.734508Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:48.734660Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6619 TClient is connected to server localhost:6619 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:49.266326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:49.292147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.386415Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:49.443270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.590167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.647838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.273806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626387502765105:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.273974Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.564596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.633065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.661070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.692228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.720175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.761410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.833487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.890082Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626387502765767:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.890222Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.890300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626387502765772:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.894250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:51.904210Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626387502765774:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:51.979877Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626387502765825:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:52.961207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.377959Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626374617861602:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:53.378035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 16176, MsgBus: 3626 2025-06-24T21:10:54.750669Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626398203913904:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:54.750739Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001963/r3tmp/tmpCr7wQP/pdisk_1.dat 2025-06-24T21:10:54.855025Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:54.865125Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626398203913885:2079] 1750799454750293 != 1750799454750296 TServer::EnableGrpc on GrpcPort 16176, node 2 2025-06-24T21:10:54.894882Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:54.894958Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:54.896513Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:54.914469Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:54.914498Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:54.914505Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:54.914640Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3626 TClient is connected to server localhost:3626 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:55.338887Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:55.343837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:10:55.348674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:55.419607Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:55.552363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:55.614675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:55.756198Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:57.818276Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626411088817417:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.818373Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.884817Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.915810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.945104Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.977393Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:58.008504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:58.080017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:58.112377Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:58.168303Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626415383785371:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:58.168368Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:58.168461Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626415383785376:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:58.171859Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:58.181319Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626415383785378:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:58.244425Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626415383785429:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::WriteThenReadWithCommit [GOOD] Test command err: Trying to start YDB, gRPC: 29481, MsgBus: 7345 2025-06-24T21:10:48.753682Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626373121009417:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:48.753866Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001941/r3tmp/tmpDDP0kU/pdisk_1.dat 2025-06-24T21:10:49.127525Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:49.128068Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626373121009384:2079] 1750799448752475 != 1750799448752478 TServer::EnableGrpc on GrpcPort 29481, node 1 2025-06-24T21:10:49.164402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:49.164519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:49.166084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:49.186822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:49.186864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:49.186878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:49.187022Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7345 TClient is connected to server localhost:7345 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:49.664675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:49.694598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.761338Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:49.841364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.966004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:50.028771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.804265Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626386005912926:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.804403Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.099063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.132939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.160568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.233842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.291978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.324986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.392899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.452852Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626390300880884:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.452928Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.452988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626390300880889:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.457143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:52.467983Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626390300880891:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:52.558934Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626390300880942:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:53.602702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.753678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626373121009417:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:53.753733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 32756, MsgBus: 30066 2025-06-24T21:10:54.930283Z node 2 :METADATA_PROVIDER WARN: l ... w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-06-24T21:10:59.648546Z node 2 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 1, OutputsCount: 1 2025-06-24T21:10:59.648725Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715676. Resolved key sets: 1 2025-06-24T21:10:59.648838Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715676. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 17] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 3 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint64 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-24T21:10:59.648890Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '($1) $1)) ) 2025-06-24T21:10:59.649025Z node 2 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:241: Create result channelId: 1 from task: 1 with index: 0 2025-06-24T21:10:59.649095Z node 2 :KQP_EXECUTER DEBUG: kqp_shards_resolver.cpp:76: [ShardsResolver] TxId: 281474976715676. Shard resolve complete, resolved shards: 1 2025-06-24T21:10:59.649119Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:273: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolved, success: 1, failed: 0 2025-06-24T21:10:59.649137Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:296: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards on nodes: node 2: [72075186224037922] 2025-06-24T21:10:59.649159Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: true, 1 scan tasks on 1 nodes, localComputeTasks: 0, snapshot: {18446744073709551615, 1750799459381} 2025-06-24T21:10:59.649348Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [2:7519626422166957546:2472] 2025-06-24T21:10:59.649391Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [2:7519626422166957546:2472], channels: 1 2025-06-24T21:10:59.649410Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:10:59.649433Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:7519626422166957546:2472], 2025-06-24T21:10:59.649452Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:7519626422166957546:2472], 2025-06-24T21:10:59.649462Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-24T21:10:59.649841Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:7519626422166957546:2472], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-24T21:10:59.649877Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:7519626422166957546:2472], 2025-06-24T21:10:59.649893Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:7519626422166957546:2472], 2025-06-24T21:10:59.650974Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:7519626422166957546:2472], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 642 Tasks { TaskId: 1 CpuTimeUs: 149 FinishTimeMs: 1750799459650 OutputRows: 1 OutputBytes: 22 Tables { TablePath: "/Root/TestImmediateEffects" ReadRows: 1 ReadBytes: 22 AffectedPartitions: 1 } IngressRows: 1 ResultRows: 1 ResultBytes: 22 ComputeCpuTimeUs: 76 BuildCpuTimeUs: 73 HostName: "ghrun-4pjfmvffsq" NodeId: 2 StartTimeMs: 1750799459650 CreateTimeMs: 1750799459649 UpdateTimeMs: 1750799459650 } MaxMemoryUsage: 1048576 } 2025-06-24T21:10:59.651040Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:7519626422166957546:2472] 2025-06-24T21:10:59.651167Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:10:59.651194Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:7519626422166957542:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000642s ReadRows: 1 ReadBytes: 22 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:10:59.651527Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715677. Resolved key sets: 0 2025-06-24T21:10:59.651602Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715677. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {18446744073709551615, 1750799459381} 2025-06-24T21:10:59.651627Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:7519626422166957550:2472] TxId: 281474976715677. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-24T21:10:59.651649Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:276: ActorId: [2:7519626422166957550:2472] TxId: 281474976715677. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send Commit to BufferActor=[2:7519626422166957533:2472] 2025-06-24T21:10:59.651684Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:7519626422166957550:2472] TxId: 281474976715677. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-24T21:10:59.653312Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:7519626422166957550:2472] TxId: 281474976715677. Ctx: { TraceId: 01jyhwf96rbdcq51v6hyzf3a1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmRkMzE0MDktMWVkYWNkZmYtZGY2OTZkZDctNDRjMjMzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::Negative_SingleRowWithValueCast+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 27076, MsgBus: 6244 2025-06-24T21:10:48.637652Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626373294888753:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:48.637820Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00193b/r3tmp/tmpUHYbuJ/pdisk_1.dat 2025-06-24T21:10:48.953961Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:48.955611Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626373294888734:2079] 1750799448636843 != 1750799448636846 TServer::EnableGrpc on GrpcPort 27076, node 1 2025-06-24T21:10:49.046212Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:49.046323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:49.048487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:49.056798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:49.056822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:49.056830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:49.056962Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6244 TClient is connected to server localhost:6244 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:49.582803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:49.595010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:10:49.608066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.657993Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:49.742922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.881347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.935217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.539521Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626386179792260:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.539654Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:51.876336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.909468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.936674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:51.965475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.042424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.072367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.099997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.153372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626390474760214:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.153462Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.153709Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626390474760219:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.157089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:52.168168Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626390474760221:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:52.239457Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626390474760272:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:53.293677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.638949Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626373294888753:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:53.639020Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detec ... :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626397914810597:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:54.630772Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00193b/r3tmp/tmpOnwSnk/pdisk_1.dat 2025-06-24T21:10:54.737383Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:54.741415Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626397914810570:2079] 1750799454629743 != 1750799454629746 TServer::EnableGrpc on GrpcPort 62377, node 2 2025-06-24T21:10:54.763313Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:54.763451Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:54.765796Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:54.783713Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:54.783739Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:54.783745Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:54.783851Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10304 TClient is connected to server localhost:10304 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:55.208182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:55.240931Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:10:55.245105Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.300139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:55.459376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:55.535004Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:55.690555Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:57.562677Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626410799714105:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.562757Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.604502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.673272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.706233Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.734153Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.765473Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.798252Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.829650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.879275Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626410799714762:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.879348Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.879385Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626410799714768:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:57.882472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:57.890494Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626410799714770:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:57.977553Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626410799714821:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:59.027463Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.631015Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626397914810597:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:59.631082Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpImmediateEffects::MultiShardUpsertAfterRead [GOOD] >> KqpImmediateEffects::UpsertConflictInteractiveTxAborted [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1RR2 [GOOD] >> KqpInplaceUpdate::Negative_BatchUpdate-UseSink [GOOD] >> TSchemeShardTTLTestsWithReboots::AlterTable [GOOD] >> KqpImmediateEffects::TxWithReadAtTheEnd+UseSink [GOOD] >> KqpImmediateEffects::InteractiveTxWithReadAtTheEnd-UseSink [GOOD] >> KqpWrite::ProjectReplace-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::MultiShardUpsertAfterRead [GOOD] Test command err: Trying to start YDB, gRPC: 13012, MsgBus: 9057 2025-06-24T21:10:46.444636Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626365506699506:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:46.444803Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001983/r3tmp/tmplsDjMt/pdisk_1.dat 2025-06-24T21:10:46.701419Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:46.701688Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626365506699481:2079] 1750799446443510 != 1750799446443513 TServer::EnableGrpc on GrpcPort 13012, node 1 2025-06-24T21:10:46.747215Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:46.747241Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:46.747250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:46.747376Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:46.781397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:46.781537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:46.783289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9057 TClient is connected to server localhost:9057 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:47.221328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:47.239038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.379348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.463373Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:47.492245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.566850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:48.868414Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626374096635722:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.868548Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.158108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.195446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.224855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.250559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.274121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.300668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.327598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.391555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626378391603672:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.391633Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.391734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626378391603677:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.394809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:49.404821Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626378391603679:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:49.478209Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626378391603732:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:50.484173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:50.514231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:50.543171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ... cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 14742, MsgBus: 12056 2025-06-24T21:10:55.796460Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626403733526480:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:55.796508Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001983/r3tmp/tmpHsgYTu/pdisk_1.dat 2025-06-24T21:10:55.907101Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:55.909834Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626403733526460:2079] 1750799455796095 != 1750799455796098 TServer::EnableGrpc on GrpcPort 14742, node 2 2025-06-24T21:10:55.942243Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:55.942590Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:55.944465Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:55.958838Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:55.958871Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:55.958882Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:55.959039Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12056 TClient is connected to server localhost:12056 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:56.469010Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:56.486111Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.558279Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.712241Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.798534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.802636Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:58.910445Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626416618429983:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:58.910530Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:58.964746Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:58.993814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.025480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.059203Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.091230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.159895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.192241Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.251240Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626420913397941:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.251321Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626420913397946:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.251350Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.254715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:59.264213Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626420913397948:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:59.347427Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626420913397999:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:00.261521Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.796764Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626403733526480:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:00.796826Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpEffects::AlterAfterUpsertTransaction-UseSink [GOOD] >> KqpEffects::AlterAfterUpsertBeforeUpsertTransaction-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::AlterTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:10:17.406419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:17.406508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.406548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:17.406609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:17.407442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:17.407484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:17.407560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:17.407641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:17.408412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:17.410141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:17.494130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:10:17.494191Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:17.494981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:10:17.512581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:17.512994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:17.513138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:17.520510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:17.520739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:17.521349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.521550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:17.524266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.524433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:17.526357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:17.526642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:17.526688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:17.526784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:17.526912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:10:17.533312Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:17.653685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:17.653924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.654122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:17.654190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:17.654453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:17.654537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:17.656887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.657068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:17.657281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.657337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:17.657380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:17.657430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:17.659446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.659506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:17.659543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:17.661218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.661281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:17.661340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:17.661399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:17.670398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:17.672299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:17.672535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:17.673462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:17.673579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 42949694 ... 11:01.526297Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-24T21:11:01.526400Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-24T21:11:01.526440Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-24T21:11:01.526476Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-24T21:11:01.526509Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:11:01.526578Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-24T21:11:01.527712Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1149 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-24T21:11:01.527753Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-24T21:11:01.527876Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1149 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-24T21:11:01.527964Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1149 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-24T21:11:01.528841Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 219043334413 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-24T21:11:01.528882Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-24T21:11:01.528976Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 219043334413 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-24T21:11:01.529022Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:11:01.529098Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 332 RawX2: 219043334413 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-24T21:11:01.529150Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1003:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:11:01.529190Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:11:01.529236Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:11:01.529275Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1003:0 129 -> 240 2025-06-24T21:11:01.532091Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:11:01.533468Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:11:01.533610Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:11:01.534053Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:11:01.534108Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-24T21:11:01.534225Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1003:0 progress is 1/1 2025-06-24T21:11:01.534265Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-24T21:11:01.534312Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1003:0 progress is 1/1 2025-06-24T21:11:01.534345Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-24T21:11:01.534384Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-24T21:11:01.534428Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-24T21:11:01.534473Z node 51 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1003:0 2025-06-24T21:11:01.534509Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1003:0 2025-06-24T21:11:01.534661Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-24T21:11:01.537703Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-24T21:11:01.537764Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-24T21:11:01.538177Z node 51 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-24T21:11:01.538274Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-24T21:11:01.538311Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [51:453:2424] TestWaitNotification: OK eventTxId 1003 2025-06-24T21:11:01.538835Z node 51 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:11:01.539070Z node 51 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 282us result status StatusSuccess 2025-06-24T21:11:01.539693Z node 51 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::UpsertConflictInteractiveTxAborted [GOOD] Test command err: Trying to start YDB, gRPC: 19654, MsgBus: 62744 2025-06-24T21:10:49.995657Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626378946261101:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:49.995762Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00191b/r3tmp/tmp74MVTa/pdisk_1.dat 2025-06-24T21:10:50.269706Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:50.270476Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626378946261082:2079] 1750799449994997 != 1750799449995000 TServer::EnableGrpc on GrpcPort 19654, node 1 2025-06-24T21:10:50.359936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:50.359962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:50.359969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:50.360132Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:50.362423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:50.362546Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:50.364798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62744 TClient is connected to server localhost:62744 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:50.883643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:50.908192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.008646Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:51.047873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.208576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:51.271522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.907364Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626391831164602:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:52.907462Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.252629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.285557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.351310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.382999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.408676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.435966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.466202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.554234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626396126132566:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.554317Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.554376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626396126132571:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.557659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:53.568002Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626396126132573:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:53.628719Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626396126132624:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:54.631766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.995725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626378946261101:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:54.995794Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 29679, MsgBus: 25669 2025-06-24T21:10:55.832908Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626405159153751:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:55.832976Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00191b/r3tmp/tmpI2lnbR/pdisk_1.dat 2025-06-24T21:10:55.941496Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:55.942733Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626405159153720:2079] 1750799455832258 != 1750799455832261 TServer::EnableGrpc on GrpcPort 29679, node 2 2025-06-24T21:10:55.966395Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:55.966478Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:55.967635Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:55.993606Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:55.993631Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:55.993638Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:55.993758Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25669 TClient is connected to server localhost:25669 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:56.410181Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:56.418043Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:10:56.428370Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.502354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.639103Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.698584Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.861655Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:58.857485Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626418044057248:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:58.857671Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:58.904914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:58.937524Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:58.981527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.009969Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.039371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.069683Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.136281Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.217882Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626422339025210:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.217964Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.217979Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626422339025215:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.221250Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:59.230474Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626422339025217:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:59.322896Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626422339025268:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:00.319192Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.832990Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626405159153751:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:00.833052Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyRW1RR2 [GOOD] Test command err: Trying to start YDB, gRPC: 3133, MsgBus: 12270 2025-06-24T21:10:50.195695Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626381367134005:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:50.195790Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018c9/r3tmp/tmpMgzTzW/pdisk_1.dat 2025-06-24T21:10:50.511424Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:50.511794Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626381367133986:2079] 1750799450195023 != 1750799450195026 TServer::EnableGrpc on GrpcPort 3133, node 1 2025-06-24T21:10:50.586948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:50.587060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:50.588756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:50.588813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:50.588833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:50.588845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:50.589020Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12270 TClient is connected to server localhost:12270 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:51.097205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:51.118373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:10:51.134375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.203854Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:51.286668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.443037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.515305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.126142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626394252037509:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.126260Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.459223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.485926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.510466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.539311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.568977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.635904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.665679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.711706Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626394252038169:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.711779Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.711979Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626394252038174:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.716304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:53.728511Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626394252038176:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:53.793374Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626394252038227:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:54.797074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.195968Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626381367134005:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:55.196042Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 24189, MsgBus: 61354 2025-06-24T21:10:56.008494Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626405410115774:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:56.008547Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018c9/r3tmp/tmp9RxOH0/pdisk_1.dat 2025-06-24T21:10:56.100032Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626405410115751:2079] 1750799456007582 != 1750799456007585 2025-06-24T21:10:56.100413Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24189, node 2 2025-06-24T21:10:56.140391Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:56.140528Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:56.142315Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:56.142801Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:56.142830Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:56.142844Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:56.142996Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61354 TClient is connected to server localhost:61354 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:56.592517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:56.604582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.682534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.866832Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.961323Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.087543Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:59.002152Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626418295019273:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.002232Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.058669Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.126706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.192539Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.220366Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.245324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.312077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.379319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.430897Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626418295019942:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.430962Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.431051Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626418295019947:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.433902Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:59.443032Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626418295019949:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:59.498456Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626418295020000:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:00.459603Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.008688Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626405410115774:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:01.008753Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpEffects::InsertAbort_Select_Conflict-UseSink [GOOD] >> TVersions::Wreck1Reverse [GOOD] >> TVersions::Wreck0 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-1 >> KqpImmediateEffects::ConflictingKeyR1RWR2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-37 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::Negative_BatchUpdate-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 29116, MsgBus: 10891 2025-06-24T21:10:49.977351Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626377115274815:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:49.977457Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018fc/r3tmp/tmpvrTgkb/pdisk_1.dat 2025-06-24T21:10:50.243084Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:50.243184Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626377115274791:2079] 1750799449976785 != 1750799449976788 TServer::EnableGrpc on GrpcPort 29116, node 1 2025-06-24T21:10:50.331699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:50.331719Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:50.331726Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:50.331890Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:50.345542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:50.345676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:50.346978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10891 TClient is connected to server localhost:10891 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:50.886663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:50.904376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:10:50.918740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:50.985587Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:51.066628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.217511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.294326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.065345Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626394295145638:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.065469Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.439564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.465809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.492196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.518226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.548369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.616998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.649165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.730074Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626394295146301:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.730150Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.730160Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626394295146306:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.733780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:53.743981Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626394295146308:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:53.834569Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626394295146359:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:54.789206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.977473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626377115274815:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:54.977540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 16305, MsgBus: 15363 2025-06-24T21:10:56.127381Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626406678346704:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:56.127436Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018fc/r3tmp/tmpUEGl9B/pdisk_1.dat 2025-06-24T21:10:56.238309Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:56.239615Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626406678346679:2079] 1750799456127026 != 1750799456127029 2025-06-24T21:10:56.260616Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:56.260695Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 16305, node 2 2025-06-24T21:10:56.262309Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:56.294362Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:56.294381Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:56.294386Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:56.294495Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15363 TClient is connected to server localhost:15363 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:10:56.756743Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:56.770185Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.842968Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.993732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.060249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.135823Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:59.051468Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626419563250196:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.051570Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.104649Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.135920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.167727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.192756Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.236052Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.267004Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.297605Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.345911Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626419563250859:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.345981Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.346046Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626419563250864:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.349237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:59.358064Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626419563250866:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:59.429863Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626419563250917:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:00.416363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.127634Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626406678346704:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:01.127710Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-31 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-1 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-2 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-25 >> KqpImmediateEffects::UpdateAfterInsert [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpWrite::ProjectReplace-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 61983, MsgBus: 25010 2025-06-24T21:10:50.836115Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626381950445992:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:50.836247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001857/r3tmp/tmpuYHTQq/pdisk_1.dat 2025-06-24T21:10:51.200563Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:51.202378Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626381950445974:2079] 1750799450835433 != 1750799450835436 TServer::EnableGrpc on GrpcPort 61983, node 1 2025-06-24T21:10:51.265013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:51.265090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:51.270405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:51.299696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:51.299714Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:51.299725Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:51.299827Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25010 TClient is connected to server localhost:25010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:51.824944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:51.849569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:10:51.853612Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:51.867400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.039582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:52.183906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.246958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.969957Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626394835349499:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.970074Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.353008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.390666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.419313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.448583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.476534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.551676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.618200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.674545Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626399130317459:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.674638Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.675037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626399130317464:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.678839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:54.688224Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626399130317466:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:54.772095Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626399130317517:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:55.836231Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626381950445992:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:55.836355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 2389, MsgBus: 65235 2025-06-24T21:10:57.074105Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626411043779941:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:57.074160Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001857/r3tmp/tmpP2N1HM/pdisk_1.dat 2025-06-24T21:10:57.188026Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:57.189374Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626411043779919:2079] 1750799457073295 != 1750799457073298 TServer::EnableGrpc on GrpcPort 2389, node 2 2025-06-24T21:10:57.215992Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:57.216064Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:57.217597Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:57.234343Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:57.234364Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:57.234373Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:57.234505Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65235 TClient is connected to server localhost:65235 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:57.681230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:57.689980Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.748481Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.901354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.967747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.110921Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:00.027192Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626423928683422:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.027250Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.068839Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.099885Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.125364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.150408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.175791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.242225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.269023Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.321971Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626423928684081:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.322049Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626423928684086:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.322083Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.325164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:00.333028Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626423928684088:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:00.409678Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626423928684139:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::TxWithReadAtTheEnd+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 11831, MsgBus: 4954 2025-06-24T21:10:50.767188Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626382601287269:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:50.767235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00189b/r3tmp/tmpUFwoPO/pdisk_1.dat 2025-06-24T21:10:51.132035Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11831, node 1 2025-06-24T21:10:51.164848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:51.164962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:51.166141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:51.212773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:51.212798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:51.212819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:51.212926Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4954 TClient is connected to server localhost:4954 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:51.743302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:51.785568Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:51.792256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:51.969967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:52.106961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.182671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.728018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626395486190765:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.728129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.048323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.076862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.104284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.132026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.160479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.211446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.244519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.332762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626399781158723:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.332857Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.333049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626399781158728:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.336869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:54.346411Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626399781158730:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:54.419461Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626399781158781:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:55.302724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.767355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626382601287269:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:55.775656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 25430, MsgBus: 15030 2025-06-24T21:10:56.777234Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626408294769476:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:56.777351Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00189b/r3tmp/tmplyNauX/pdisk_1.dat 2025-06-24T21:10:56.874763Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626408294769456:2079] 1750799456776805 != 1750799456776808 2025-06-24T21:10:56.906733Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:56.913970Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:56.914059Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:56.918441Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25430, node 2 2025-06-24T21:10:56.975796Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:56.975820Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:56.975828Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:56.975937Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15030 TClient is connected to server localhost:15030 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:57.404755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:10:57.423071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.497061Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.678739Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.757976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.792540Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:59.735795Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626421179672965:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.735861Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.777603Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.803811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.828790Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.852858Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.882123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.911113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.940311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.992698Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626421179673623:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.992768Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626421179673628:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.992782Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.995958Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:00.004708Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626421179673630:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:00.090384Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626425474640977:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:00.924543Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::InteractiveTxWithReadAtTheEnd-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 29323, MsgBus: 61054 2025-06-24T21:10:50.096448Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626382204630489:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:50.096719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018e3/r3tmp/tmp2vUCZ2/pdisk_1.dat 2025-06-24T21:10:50.418783Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626382204630467:2079] 1750799450095726 != 1750799450095729 2025-06-24T21:10:50.425228Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29323, node 1 2025-06-24T21:10:50.463808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:50.463836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:50.463851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:50.464009Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:50.493274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:50.493362Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:50.494973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61054 TClient is connected to server localhost:61054 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:51.002551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:51.019993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:10:51.032953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.111997Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:51.190971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.349245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:51.417083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.006917Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626395089533985:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.007026Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.419605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.450354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.480722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.505822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.533284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.566864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.636741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.688033Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626395089534641:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.688097Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.688236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626395089534646:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:53.691869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:53.702128Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626395089534648:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:53.759296Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626395089534699:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:54.715555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.096685Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626382204630489:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:55.096746Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 5800, MsgBus: 18455 2025-06-24T21:10:56.078289Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626407808279260:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:56.078389Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018e3/r3tmp/tmpoEnHyP/pdisk_1.dat 2025-06-24T21:10:56.169857Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:56.184439Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626407808279237:2079] 1750799456077814 != 1750799456077817 TServer::EnableGrpc on GrpcPort 5800, node 2 2025-06-24T21:10:56.213093Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:56.213182Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:56.214783Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:56.242973Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:56.242997Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:56.243005Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:56.243121Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18455 TClient is connected to server localhost:18455 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:56.717171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:56.732605Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.804752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.957333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.029899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.150935Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:59.027353Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626420693182779:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.027449Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.079954Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.113280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.140555Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.166549Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.193550Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.280622Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.311645Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.391646Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626420693183447:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.391729Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.391773Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626420693183452:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.395090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:59.404181Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626420693183454:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:59.492187Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626420693183505:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:00.492466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.078331Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626407808279260:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:01.078536Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpEffects::InsertAbort_Literal_Conflict-UseSink [GOOD] >> KqpImmediateEffects::ImmediateUpdate [GOOD] >> KqpImmediateEffects::DeleteAfterInsert [GOOD] >> KqpUserConstraint::KqpReadNull+UploadNull [GOOD] >> KqpUserConstraint::KqpReadNull-UploadNull [GOOD] >> KqpImmediateEffects::ReplaceDuplicates [GOOD] >> TKeyValueTest::TestConcatToLongKey [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyR1RWR2 [GOOD] Test command err: Trying to start YDB, gRPC: 19460, MsgBus: 32727 2025-06-24T21:10:51.298297Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626385821147574:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:51.298443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001801/r3tmp/tmpFKMJzv/pdisk_1.dat 2025-06-24T21:10:51.610132Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:51.611756Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626385821147388:2079] 1750799451274996 != 1750799451274999 2025-06-24T21:10:51.627485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:51.627589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:51.632486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19460, node 1 2025-06-24T21:10:51.735705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:51.735728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:51.735734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:51.735849Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32727 TClient is connected to server localhost:32727 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:52.291273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:52.295603Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:10:52.318062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.457396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.602235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.663716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.416664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626398706050919:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.416751Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.737513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.769826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.798616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.828464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.857952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.901059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.972151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.030854Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626403001018876:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.030911Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.031056Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626403001018881:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.034995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:55.044046Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626403001018883:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:55.107934Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626403001018934:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:56.185634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.305660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626385821147574:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:56.305741Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 9888, MsgBus: 20651 2025-06-24T21:10:57.496747Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626412416758107:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:57.496871Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001801/r3tmp/tmpiTZHa1/pdisk_1.dat 2025-06-24T21:10:57.600555Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9888, node 2 2025-06-24T21:10:57.630254Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:57.630343Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:57.634565Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:57.656860Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:57.656892Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:57.656901Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:57.657024Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20651 TClient is connected to server localhost:20651 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:58.101019Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:58.118519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.186198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.333344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.411673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.553524Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:00.392411Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626425301661604:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.392494Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.447070Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.485496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.512539Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.538195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.563289Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.628384Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.656889Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.704923Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626425301662267:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.704989Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.705042Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626425301662272:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.708304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:00.716838Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626425301662274:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:00.783460Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626425301662325:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:01.533621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertAbort_Select_Conflict-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 17717, MsgBus: 26727 2025-06-24T21:10:44.866361Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626357969838524:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:44.866500Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019b8/r3tmp/tmpaNnKpi/pdisk_1.dat 2025-06-24T21:10:45.107574Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626357969838504:2079] 1750799444865482 != 1750799444865485 2025-06-24T21:10:45.113353Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17717, node 1 2025-06-24T21:10:45.168033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:45.168071Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:45.168104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:45.168241Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:45.210303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:45.210416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:45.212139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26727 TClient is connected to server localhost:26727 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:45.656831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:45.673296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.786810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.874325Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:45.912032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:45.961417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.492722Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626370854742047:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.492871Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:47.744304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.773970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.796097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.818668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.847358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.915180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:47.945306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:48.026290Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626375149710014:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.026369Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.026430Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626375149710019:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:48.030472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:48.039810Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626375149710021:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:48.118439Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626375149710072:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:49.234244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.549427Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-24T21:10:49.559861Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:49.560030Z node 1 :TX_DATASHARD ERROR: finish_propose_ ... e 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:56.563505Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:56.563528Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:56.563538Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:56.563673Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23970 TClient is connected to server localhost:23970 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:57.061570Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:57.083925Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:57.157570Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:57.299583Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.409433Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:57.415228Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:59.830355Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626420740553601:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.830478Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.874339Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.905150Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.936495Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.968251Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.000080Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.068535Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.101893Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.185554Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626425035521561:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.185614Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626425035521566:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.185634Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.189442Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:00.199611Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519626425035521568:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:00.289521Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519626425035521619:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:01.360186Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.399806Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519626407855650096:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:01.399903Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:01.810545Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7519626429330489284:2492], TxId: 281474976715676, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwfb4f8yjacw7mjap5bwt1. SessionId : ydb://session/3?node_id=3&id=ZDU2NGY0MWItYWQyMjIwODktNTRjZTAyN2EtZTU0YjMzMGU=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-24T21:11:01.810814Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519626429330489286:2493], TxId: 281474976715676, task: 2. Ctx: { SessionId : ydb://session/3?node_id=3&id=ZDU2NGY0MWItYWQyMjIwODktNTRjZTAyN2EtZTU0YjMzMGU=. TraceId : 01jyhwfb4f8yjacw7mjap5bwt1. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7519626429330489281:2463], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-24T21:11:01.811132Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZDU2NGY0MWItYWQyMjIwODktNTRjZTAyN2EtZTU0YjMzMGU=, ActorId: [3:7519626429330489144:2463], ActorState: ExecuteState, TraceId: 01jyhwfb4f8yjacw7mjap5bwt1, Create QueryResponse for error on request, msg: >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberSeconds >> DistributedEraseTests::ConditionalEraseRowsShouldSuccessOnShardedIndex >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64Seconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MilliSeconds >> EraseRowsTests::EraseRowsShouldSuccess >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt4Seconds >> DistributedEraseTests::ConditionalEraseRowsShouldEraseOnUint32 >> DistributedEraseTests::ConditionalEraseRowsShouldNotErase ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::UpdateAfterInsert [GOOD] Test command err: Trying to start YDB, gRPC: 23101, MsgBus: 28265 2025-06-24T21:10:51.942135Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626386383749309:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:51.942331Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017ee/r3tmp/tmplBz61n/pdisk_1.dat 2025-06-24T21:10:52.325110Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23101, node 1 2025-06-24T21:10:52.379727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:52.379822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:52.381285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:52.407683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:52.407705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:52.407737Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:52.407849Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28265 TClient is connected to server localhost:28265 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:52.913183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:52.938975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.953025Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:53.062192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.189760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:53.255639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.992289Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626399268652802:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.992388Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.294934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.332173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.360765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.391385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.418697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.455188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.524709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.573854Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626403563620763:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.573947Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.573978Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626403563620768:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.577295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:55.586263Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626403563620770:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:55.683753Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626403563620821:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:56.636833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.941926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626386383749309:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:56.941993Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 13688, MsgBus: 27754 2025-06-24T21:10:57.916328Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626409726195467:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:57.916399Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017ee/r3tmp/tmp8A3ZiU/pdisk_1.dat 2025-06-24T21:10:58.025375Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:58.025806Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626409726195445:2079] 1750799457915539 != 1750799457915542 TServer::EnableGrpc on GrpcPort 13688, node 2 2025-06-24T21:10:58.061678Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:58.061766Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:58.062854Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:58.077506Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:58.077535Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:58.077545Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:58.077666Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27754 TClient is connected to server localhost:27754 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:58.513568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:58.529164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.604204Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.734563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.791986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.945573Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:00.688053Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626422611098972:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.688133Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.732148Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.758347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.781095Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.804441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.828979Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.895488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.923109Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.999603Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626422611099639:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.999660Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626422611099644:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.999709Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.002300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:01.011047Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626422611099646:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:01.074821Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626426906066993:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:01.908354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> DistributedEraseTests::ConditionalEraseRowsShouldErase >> EraseRowsTests::ConditionalEraseRowsShouldNotEraseModifiedRows >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMicroSeconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Seconds >> KqpImmediateEffects::DeleteOnAfterInsertWithIndex [GOOD] >> KqpImmediateEffects::ForceImmediateEffectsExecution+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ImmediateUpdate [GOOD] Test command err: Trying to start YDB, gRPC: 21313, MsgBus: 23272 2025-06-24T21:10:51.915784Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626385886951533:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:51.915870Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017ec/r3tmp/tmpd8xMtc/pdisk_1.dat 2025-06-24T21:10:52.290854Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21313, node 1 2025-06-24T21:10:52.347821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:52.347930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:52.349783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:52.356836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:52.356860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:52.356878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:52.357018Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23272 TClient is connected to server localhost:23272 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:10:52.926359Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:52.942462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:52.973463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.086757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.249143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.331122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.908958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626398771855024:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.909061Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.293613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.325718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.351350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.381084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.410048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.436750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.504096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.588101Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626403066822985:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.588185Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.588269Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626403066822990:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.591506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:55.601055Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626403066822992:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:55.673663Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626403066823043:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:56.768694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.946414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626385886951533:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:56.946506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 61028, MsgBus: 7243 2025-06-24T21:10:58.112368Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626414379640040:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:58.112424Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017ec/r3tmp/tmpoDOuf2/pdisk_1.dat 2025-06-24T21:10:58.202904Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:58.204165Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626414379640021:2079] 1750799458112104 != 1750799458112107 TServer::EnableGrpc on GrpcPort 61028, node 2 2025-06-24T21:10:58.244424Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:58.244513Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:58.246351Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:58.256442Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:58.256473Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:58.256481Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:58.256599Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7243 TClient is connected to server localhost:7243 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:58.631627Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:58.644971Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.715362Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.868365Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.936993Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:59.117777Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:00.890444Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626422969576244:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.890540Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.941533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.966145Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.991610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.017803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.041865Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.068996Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.138698Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.180304Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626427264544201:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.180372Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.180413Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626427264544206:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.182791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:01.189251Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626427264544208:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:01.266602Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626427264544259:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:02.020860Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull-UploadNull [GOOD] Test command err: 2025-06-24T21:11:00.807842Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:00.808266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:00.808466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003377/r3tmp/tmpKglNl3/pdisk_1.dat 2025-06-24T21:11:01.240879Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:01.250194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:01.295257Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:01.305957Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799457852784 != 1750799457852788 2025-06-24T21:11:01.352601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:01.352770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:01.372077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:01.465197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.921689Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:814:2662], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.921882Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:824:2667], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.921955Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.929810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:01.961752Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:02.081054Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:828:2670], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:02.147432Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:897:2708] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:03.052832Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwfbgy5a4f6nyh4yfgth5m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTdlMmMwMi1hMTU0Y2UxLWQ5NTJjMDQxLThkZDAxNjc2, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull+UploadNull [GOOD] Test command err: 2025-06-24T21:11:00.818014Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:00.818373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:00.818505Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00337b/r3tmp/tmpJ2gfIg/pdisk_1.dat 2025-06-24T21:11:01.240835Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:01.250187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:01.295591Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:01.304670Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799457852794 != 1750799457852798 2025-06-24T21:11:01.352393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:01.352529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:01.372067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:01.465195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.921687Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:814:2662], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.921774Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:824:2667], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.921885Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.929811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:01.961757Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:02.078669Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:828:2670], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:02.146528Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:897:2708] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:03.052832Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwfbgy3gy8skv8pp8ctwy4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWI5ODJhMjUtN2IyZjMxNjAtMTMyMDNmYjEtNTE1YTdjY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:03.123317Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [1:928:2729], TxId: 281474976715660, task: 1. Ctx: { TraceId : 01jyhwfbgy3gy8skv8pp8ctwy4. SessionId : ydb://session/3?node_id=1&id=YWI5ODJhMjUtN2IyZjMxNjAtMTMyMDNmYjEtNTE1YTdjY2M=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Source[0] fatal error: {
: Fatal: Read from column index 1: got NULL from NOT NULL column, code: 2012 } 2025-06-24T21:11:03.125393Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:928:2729], TxId: 281474976715660, task: 1. Ctx: { TraceId : 01jyhwfbgy3gy8skv8pp8ctwy4. SessionId : ydb://session/3?node_id=1&id=YWI5ODJhMjUtN2IyZjMxNjAtMTMyMDNmYjEtNTE1YTdjY2M=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. InternalError: INTERNAL_ERROR KIKIMR_CONSTRAINT_VIOLATION: {
: Fatal: Read from column index 1: got NULL from NOT NULL column, code: 2012 }. 2025-06-24T21:11:03.130351Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:929:2730], TxId: 281474976715660, task: 2. Ctx: { TraceId : 01jyhwfbgy3gy8skv8pp8ctwy4. SessionId : ydb://session/3?node_id=1&id=YWI5ODJhMjUtN2IyZjMxNjAtMTMyMDNmYjEtNTE1YTdjY2M=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate execution }. 2025-06-24T21:11:03.137028Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YWI5ODJhMjUtN2IyZjMxNjAtMTMyMDNmYjEtNTE1YTdjY2M=, ActorId: [1:812:2660], ActorState: ExecuteState, TraceId: 01jyhwfbgy3gy8skv8pp8ctwy4, Create QueryResponse for error on request, msg: 2025-06-24T21:11:03.138891Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwfbgy3gy8skv8pp8ctwy4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWI5ODJhMjUtN2IyZjMxNjAtMTMyMDNmYjEtNTE1YTdjY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertAbort_Literal_Conflict-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 3229, MsgBus: 26143 2025-06-24T21:10:46.087431Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626364255332766:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:46.087734Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019ac/r3tmp/tmp3BBvFz/pdisk_1.dat 2025-06-24T21:10:46.339920Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626364255332738:2079] 1750799446086222 != 1750799446086225 2025-06-24T21:10:46.347674Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3229, node 1 2025-06-24T21:10:46.403819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:46.403853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:46.403863Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:46.403968Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:46.445142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:46.445242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:46.447873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26143 TClient is connected to server localhost:26143 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:46.851913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:46.868190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:46.980779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.094089Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:47.097414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:47.169193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:49.122047Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626377140236255:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.122124Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.435224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.462821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.491523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.520508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.547446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.587015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.613547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:49.690779Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626377140236917:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.690872Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.691084Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626377140236922:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:49.694555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:49.703481Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626377140236924:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:49.783846Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626377140236975:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:50.773136Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-24T21:10:50.781127Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:50.781280Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037888 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:10:50.781462Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:7519626381435204562:2472], Table: `/Root/TwoShard` ([72057594046644480:2:1]), SessionActorId: [1:7519626381435204540:2472] ... port.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23716, node 3 2025-06-24T21:10:57.657329Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:57.657431Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:57.659478Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:57.709865Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:57.709890Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:57.709898Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:57.710037Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20849 TClient is connected to server localhost:20849 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:58.198233Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:58.209583Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.280955Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.407576Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.486636Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.604578Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:00.545898Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626424472299622:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.545980Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.608726Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.638153Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.666580Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.695271Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.721510Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.748606Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.814733Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.902620Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626424472300288:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.902734Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626424472300293:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.902743Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:00.905708Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:00.913837Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519626424472300295:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:00.998935Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519626424472300346:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:02.156789Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7519626433062235201:2476], TxId: 281474976715673, task: 1. Ctx: { TraceId : 01jyhwfbhkdqdtce0xxnbe0fhh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=MmQwOWI4MWYtMjZmNmQ0YmEtMzM0YzlkYzEtYjhjZGFiZg==. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-24T21:11:02.157005Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519626433062235203:2477], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwfbhkdqdtce0xxnbe0fhh. SessionId : ydb://session/3?node_id=3&id=MmQwOWI4MWYtMjZmNmQ0YmEtMzM0YzlkYzEtYjhjZGFiZg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7519626433062235198:2463], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-24T21:11:02.157235Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=MmQwOWI4MWYtMjZmNmQ0YmEtMzM0YzlkYzEtYjhjZGFiZg==, ActorId: [3:7519626428767267870:2463], ActorState: ExecuteState, TraceId: 01jyhwfbhkdqdtce0xxnbe0fhh, Create QueryResponse for error on request, msg: 2025-06-24T21:11:02.525523Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519626411587396143:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:02.525687Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::DeleteAfterInsert [GOOD] Test command err: Trying to start YDB, gRPC: 6083, MsgBus: 14081 2025-06-24T21:10:52.300235Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626388492956748:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:52.300393Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017dd/r3tmp/tmpBhW1Tp/pdisk_1.dat 2025-06-24T21:10:52.676803Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:52.680830Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626388492956730:2079] 1750799452295407 != 1750799452295410 TServer::EnableGrpc on GrpcPort 6083, node 1 2025-06-24T21:10:52.748085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:52.748489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:52.754118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:52.773046Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:52.773075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:52.773115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:52.773244Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14081 TClient is connected to server localhost:14081 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:53.307696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:53.314219Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:10:53.330124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:10:53.463131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:53.594684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.648571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:55.372954Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626401377860278:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.373056Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.654878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.682126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.708244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.735649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.808397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.852097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.881595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.933768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626401377860939:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.933832Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.933966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626401377860944:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.937331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:55.946662Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626401377860946:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:56.044763Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626405672828293:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:57.049832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.300689Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626388492956748:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:57.300797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 62823, MsgBus: 5574 2025-06-24T21:10:58.334472Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626417984526311:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:58.334529Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017dd/r3tmp/tmpTFEpOV/pdisk_1.dat 2025-06-24T21:10:58.436800Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:58.437890Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626417984526281:2079] 1750799458333830 != 1750799458333833 TServer::EnableGrpc on GrpcPort 62823, node 2 2025-06-24T21:10:58.464179Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:58.464296Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:58.466028Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:58.487715Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:58.487742Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:58.487751Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:58.487872Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5574 TClient is connected to server localhost:5574 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:58.892234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:10:58.901640Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:58.952982Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:59.102278Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:59.159750Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:59.339524Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:01.151890Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626430869429786:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.151992Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.194560Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.216527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.250498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.273383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.297446Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.324327Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.390709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.432791Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626430869430450:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.432860Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626430869430455:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.432860Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.435568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:01.462803Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626430869430457:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:01.527163Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626430869430508:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:02.263119Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestConcatToLongKey [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:81:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:81:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:86:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:86:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:90:2057] recipient: [9:89:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:89:2117] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:87:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:89:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... is [33:58:2098] sender: [33:94:2057] recipient: [33:37:2084] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:97:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:58:2098] sender: [33:98:2057] recipient: [33:96:2122] Leader for TabletID 72057594037927937 is [33:99:2123] sender: [33:100:2057] recipient: [33:96:2122] !Reboot 72057594037927937 (actor [33:58:2098]) rebooted! !Reboot 72057594037927937 (actor [33:58:2098]) tablet resolver refreshed! new actor is[33:99:2123] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:59:2057] recipient: [34:52:2096] Leader for TabletID 72057594037927937 is [34:58:2098] sender: [34:76:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:52:2096] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:59:2057] recipient: [35:52:2096] Leader for TabletID 72057594037927937 is [35:58:2098] sender: [35:76:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:52:2096] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:59:2057] recipient: [36:52:2096] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:76:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:78:2057] recipient: [36:37:2084] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:81:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:58:2098] sender: [36:82:2057] recipient: [36:80:2111] Leader for TabletID 72057594037927937 is [36:83:2112] sender: [36:84:2057] recipient: [36:80:2111] !Reboot 72057594037927937 (actor [36:58:2098]) rebooted! !Reboot 72057594037927937 (actor [36:58:2098]) tablet resolver refreshed! new actor is[36:83:2112] Leader for TabletID 72057594037927937 is [36:83:2112] sender: [36:169:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:53:2096] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:59:2057] recipient: [37:53:2096] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:76:2057] recipient: [37:14:2061] !Reboot 72057594037927937 (actor [37:58:2098]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:78:2057] recipient: [37:37:2084] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:81:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [37:58:2098] sender: [37:82:2057] recipient: [37:80:2111] Leader for TabletID 72057594037927937 is [37:83:2112] sender: [37:84:2057] recipient: [37:80:2111] !Reboot 72057594037927937 (actor [37:58:2098]) rebooted! !Reboot 72057594037927937 (actor [37:58:2098]) tablet resolver refreshed! new actor is[37:83:2112] Leader for TabletID 72057594037927937 is [37:83:2112] sender: [37:169:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:56:2057] recipient: [38:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:56:2057] recipient: [38:52:2096] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:59:2057] recipient: [38:52:2096] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:76:2057] recipient: [38:14:2061] !Reboot 72057594037927937 (actor [38:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:79:2057] recipient: [38:37:2084] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:82:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [38:58:2098] sender: [38:83:2057] recipient: [38:81:2111] Leader for TabletID 72057594037927937 is [38:84:2112] sender: [38:85:2057] recipient: [38:81:2111] !Reboot 72057594037927937 (actor [38:58:2098]) rebooted! !Reboot 72057594037927937 (actor [38:58:2098]) tablet resolver refreshed! new actor is[38:84:2112] Leader for TabletID 72057594037927937 is [38:84:2112] sender: [38:170:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:56:2057] recipient: [39:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:56:2057] recipient: [39:52:2096] Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:59:2057] recipient: [39:52:2096] Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:76:2057] recipient: [39:14:2061] !Reboot 72057594037927937 (actor [39:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:82:2057] recipient: [39:37:2084] Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:85:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [39:58:2098] sender: [39:86:2057] recipient: [39:84:2114] Leader for TabletID 72057594037927937 is [39:87:2115] sender: [39:88:2057] recipient: [39:84:2114] !Reboot 72057594037927937 (actor [39:58:2098]) rebooted! !Reboot 72057594037927937 (actor [39:58:2098]) tablet resolver refreshed! new actor is[39:87:2115] Leader for TabletID 72057594037927937 is [39:87:2115] sender: [39:173:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:56:2057] recipient: [40:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:56:2057] recipient: [40:53:2096] Leader for TabletID 72057594037927937 is [40:58:2098] sender: [40:59:2057] recipient: [40:53:2096] Leader for TabletID 72057594037927937 is [40:58:2098] sender: [40:76:2057] recipient: [40:14:2061] !Reboot 72057594037927937 (actor [40:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [40:58:2098] sender: [40:82:2057] recipient: [40:37:2084] Leader for TabletID 72057594037927937 is [40:58:2098] sender: [40:85:2057] recipient: [40:84:2114] Leader for TabletID 72057594037927937 is [40:58:2098] sender: [40:86:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [40:87:2115] sender: [40:88:2057] recipient: [40:84:2114] !Reboot 72057594037927937 (actor [40:58:2098]) rebooted! !Reboot 72057594037927937 (actor [40:58:2098]) tablet resolver refreshed! new actor is[40:87:2115] Leader for TabletID 72057594037927937 is [40:87:2115] sender: [40:173:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:56:2057] recipient: [41:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:56:2057] recipient: [41:51:2096] Leader for TabletID 72057594037927937 is [41:58:2098] sender: [41:59:2057] recipient: [41:51:2096] Leader for TabletID 72057594037927937 is [41:58:2098] sender: [41:76:2057] recipient: [41:14:2061] !Reboot 72057594037927937 (actor [41:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [41:58:2098] sender: [41:83:2057] recipient: [41:37:2084] Leader for TabletID 72057594037927937 is [41:58:2098] sender: [41:86:2057] recipient: [41:85:2114] Leader for TabletID 72057594037927937 is [41:58:2098] sender: [41:87:2057] recipient: [41:14:2061] Leader for TabletID 72057594037927937 is [41:88:2115] sender: [41:89:2057] recipient: [41:85:2114] !Reboot 72057594037927937 (actor [41:58:2098]) rebooted! !Reboot 72057594037927937 (actor [41:58:2098]) tablet resolver refreshed! new actor is[41:88:2115] Leader for TabletID 72057594037927937 is [41:88:2115] sender: [41:174:2057] recipient: [41:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [42:56:2057] recipient: [42:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [42:56:2057] recipient: [42:53:2096] Leader for TabletID 72057594037927937 is [42:58:2098] sender: [42:59:2057] recipient: [42:53:2096] Leader for TabletID 72057594037927937 is [42:58:2098] sender: [42:76:2057] recipient: [42:14:2061] !Reboot 72057594037927937 (actor [42:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [42:58:2098] sender: [42:86:2057] recipient: [42:37:2084] Leader for TabletID 72057594037927937 is [42:58:2098] sender: [42:89:2057] recipient: [42:14:2061] Leader for TabletID 72057594037927937 is [42:58:2098] sender: [42:90:2057] recipient: [42:88:2117] Leader for TabletID 72057594037927937 is [42:91:2118] sender: [42:92:2057] recipient: [42:88:2117] !Reboot 72057594037927937 (actor [42:58:2098]) rebooted! !Reboot 72057594037927937 (actor [42:58:2098]) tablet resolver refreshed! new actor is[42:91:2118] Leader for TabletID 72057594037927937 is [42:91:2118] sender: [42:177:2057] recipient: [42:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [43:56:2057] recipient: [43:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [43:56:2057] recipient: [43:51:2096] Leader for TabletID 72057594037927937 is [43:58:2098] sender: [43:59:2057] recipient: [43:51:2096] Leader for TabletID 72057594037927937 is [43:58:2098] sender: [43:76:2057] recipient: [43:14:2061] !Reboot 72057594037927937 (actor [43:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [43:58:2098] sender: [43:86:2057] recipient: [43:37:2084] Leader for TabletID 72057594037927937 is [43:58:2098] sender: [43:88:2057] recipient: [43:14:2061] Leader for TabletID 72057594037927937 is [43:58:2098] sender: [43:90:2057] recipient: [43:89:2117] Leader for TabletID 72057594037927937 is [43:91:2118] sender: [43:92:2057] recipient: [43:89:2117] !Reboot 72057594037927937 (actor [43:58:2098]) rebooted! !Reboot 72057594037927937 (actor [43:58:2098]) tablet resolver refreshed! new actor is[43:91:2118] Leader for TabletID 72057594037927937 is [43:91:2118] sender: [43:177:2057] recipient: [43:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [44:56:2057] recipient: [44:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [44:56:2057] recipient: [44:53:2096] Leader for TabletID 72057594037927937 is [44:58:2098] sender: [44:59:2057] recipient: [44:53:2096] Leader for TabletID 72057594037927937 is [44:58:2098] sender: [44:76:2057] recipient: [44:14:2061] !Reboot 72057594037927937 (actor [44:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [44:58:2098] sender: [44:87:2057] recipient: [44:37:2084] Leader for TabletID 72057594037927937 is [44:58:2098] sender: [44:90:2057] recipient: [44:14:2061] Leader for TabletID 72057594037927937 is [44:58:2098] sender: [44:91:2057] recipient: [44:89:2117] Leader for TabletID 72057594037927937 is [44:92:2118] sender: [44:93:2057] recipient: [44:89:2117] !Reboot 72057594037927937 (actor [44:58:2098]) rebooted! !Reboot 72057594037927937 (actor [44:58:2098]) tablet resolver refreshed! new actor is[44:92:2118] Leader for TabletID 72057594037927937 is [44:92:2118] sender: [44:178:2057] recipient: [44:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:56:2057] recipient: [45:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:56:2057] recipient: [45:53:2096] Leader for TabletID 72057594037927937 is [45:58:2098] sender: [45:59:2057] recipient: [45:53:2096] Leader for TabletID 72057594037927937 is [45:58:2098] sender: [45:76:2057] recipient: [45:14:2061] |94.8%| [TA] $(B)/ydb/core/kqp/ut/data/test-results/unittest/{meta.json ... results_accumulator.log} |94.8%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/data/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ReplaceDuplicates [GOOD] Test command err: Trying to start YDB, gRPC: 5848, MsgBus: 3191 2025-06-24T21:10:52.965639Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626390127682467:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:52.965736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017db/r3tmp/tmppXFqAK/pdisk_1.dat 2025-06-24T21:10:53.273538Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:53.285463Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626390127682441:2079] 1750799452964913 != 1750799452964916 TServer::EnableGrpc on GrpcPort 5848, node 1 2025-06-24T21:10:53.350633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:53.350719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:53.352897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:53.362883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:53.362904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:53.362913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:53.363101Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3191 TClient is connected to server localhost:3191 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:53.880268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:53.901427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:53.986086Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:54.040188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.171117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.232526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:55.930717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626403012585977:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.930845Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:56.229358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.258458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.287468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.314350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.341264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.372823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.417310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.467502Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626407307553932:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:56.467568Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:56.467651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626407307553937:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:56.471490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:56.480570Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626407307553939:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:56.569768Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626407307553990:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:57.580903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:57.965828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626390127682467:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:57.965887Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17064, MsgBus: 26071 2025-06-24T21:10:58.943925Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626416366986308:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:58.943989Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017db/r3tmp/tmpfuJYgA/pdisk_1.dat 2025-06-24T21:10:59.042151Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626416366986289:2079] 1750799458943424 != 1750799458943427 2025-06-24T21:10:59.042241Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17064, node 2 2025-06-24T21:10:59.079512Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:59.080168Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:59.084928Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:59.104589Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:59.104611Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:59.104619Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:59.104738Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26071 TClient is connected to server localhost:26071 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:59.525308Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:59.539930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:59.609294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:59.740818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:59.811065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:59.970814Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:01.583278Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626429251889820:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.583364Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.630833Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.659925Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.688029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.716425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.744276Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.810295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.840559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:01.882545Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626429251890482:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.882604Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.882677Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626429251890487:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:01.886383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:01.896563Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626429251890489:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:01.974866Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626429251890540:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:02.798968Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors >> TKeyValueTest::TestCleanUpDataWithMockDisk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestCleanUpDataWithMockDisk [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvCleanUpDataRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvForceTabletDataCleanup ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:82:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:84:2114] Leader for TabletID 72057594037927937 is [7:87:2115] sender: [7:88:2057] recipient: [7:84:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:87:2115] Leader for TabletID 72057594037927937 is [7:87:2115] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTablet::TEvFollowerGcApplied ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:87:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:91:2057] recipient: [8:90:2118] Leader for TabletID 72057594037927937 is [8:92:2119] sender: [8:93:2057] recipient: [8:90:2118] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:92:2119] Leader for TabletID 72057594037927937 is [8:92:2119] sender: [8:178:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:91:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:94:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:95:2057] recipient: [9:93:2122] Leader for TabletID 72057594037927937 is [9:96:2123] sender: [9:97:2057] recipient: [9:93:2122] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:96:2123] Leader for TabletID 72057594037927937 is [9:96:2123] sender: [9:182:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvCleanUpDataRequest ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:91:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:94:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:95:2057] recipient: [10:93:2122] Leader for TabletID 72057594037927937 is [10:96:2123] sender: [10:97:2057] recipient: [10:93:2122] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:96:2123] Leader for TabletID 72057594037927937 is [10:96:2123] sender: [10:182:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:93:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:96:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:97:2057] recipient: [11:95:2124] Leader for TabletID 72057594037927937 is [11:98:2125] sender: [11:99:2057] recipient: [11:95:2124] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:98:2125] Leader for TabletID 72057594037927937 is [11:98:2125] sender: [11:184:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:1 ... 37927937 is [35:101:2126] sender: [35:187:2057] recipient: [35:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:58:2057] recipient: [36:55:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:58:2057] recipient: [36:55:2098] Leader for TabletID 72057594037927937 is [36:60:2100] sender: [36:61:2057] recipient: [36:55:2098] Leader for TabletID 72057594037927937 is [36:60:2100] sender: [36:78:2057] recipient: [36:17:2064] !Reboot 72057594037927937 (actor [36:60:2100]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [36:60:2100] sender: [36:97:2057] recipient: [36:40:2087] Leader for TabletID 72057594037927937 is [36:60:2100] sender: [36:100:2057] recipient: [36:17:2064] Leader for TabletID 72057594037927937 is [36:60:2100] sender: [36:101:2057] recipient: [36:99:2125] Leader for TabletID 72057594037927937 is [36:102:2126] sender: [36:103:2057] recipient: [36:99:2125] !Reboot 72057594037927937 (actor [36:60:2100]) rebooted! !Reboot 72057594037927937 (actor [36:60:2100]) tablet resolver refreshed! new actor is[36:102:2126] Leader for TabletID 72057594037927937 is [36:102:2126] sender: [36:188:2057] recipient: [36:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:58:2057] recipient: [37:55:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:58:2057] recipient: [37:55:2098] Leader for TabletID 72057594037927937 is [37:60:2100] sender: [37:61:2057] recipient: [37:55:2098] Leader for TabletID 72057594037927937 is [37:60:2100] sender: [37:78:2057] recipient: [37:17:2064] !Reboot 72057594037927937 (actor [37:60:2100]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [37:60:2100] sender: [37:100:2057] recipient: [37:40:2087] Leader for TabletID 72057594037927937 is [37:60:2100] sender: [37:102:2057] recipient: [37:17:2064] Leader for TabletID 72057594037927937 is [37:60:2100] sender: [37:104:2057] recipient: [37:103:2128] Leader for TabletID 72057594037927937 is [37:105:2129] sender: [37:106:2057] recipient: [37:103:2128] !Reboot 72057594037927937 (actor [37:60:2100]) rebooted! !Reboot 72057594037927937 (actor [37:60:2100]) tablet resolver refreshed! new actor is[37:105:2129] Leader for TabletID 72057594037927937 is [37:105:2129] sender: [37:191:2057] recipient: [37:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:58:2057] recipient: [38:55:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:58:2057] recipient: [38:55:2098] Leader for TabletID 72057594037927937 is [38:60:2100] sender: [38:61:2057] recipient: [38:55:2098] Leader for TabletID 72057594037927937 is [38:60:2100] sender: [38:78:2057] recipient: [38:17:2064] !Reboot 72057594037927937 (actor [38:60:2100]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [38:60:2100] sender: [38:100:2057] recipient: [38:40:2087] Leader for TabletID 72057594037927937 is [38:60:2100] sender: [38:103:2057] recipient: [38:17:2064] Leader for TabletID 72057594037927937 is [38:60:2100] sender: [38:104:2057] recipient: [38:102:2128] Leader for TabletID 72057594037927937 is [38:105:2129] sender: [38:106:2057] recipient: [38:102:2128] !Reboot 72057594037927937 (actor [38:60:2100]) rebooted! !Reboot 72057594037927937 (actor [38:60:2100]) tablet resolver refreshed! new actor is[38:105:2129] Leader for TabletID 72057594037927937 is [38:105:2129] sender: [38:191:2057] recipient: [38:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:58:2057] recipient: [39:55:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:58:2057] recipient: [39:55:2098] Leader for TabletID 72057594037927937 is [39:60:2100] sender: [39:61:2057] recipient: [39:55:2098] Leader for TabletID 72057594037927937 is [39:60:2100] sender: [39:78:2057] recipient: [39:17:2064] !Reboot 72057594037927937 (actor [39:60:2100]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [39:60:2100] sender: [39:101:2057] recipient: [39:40:2087] Leader for TabletID 72057594037927937 is [39:60:2100] sender: [39:104:2057] recipient: [39:17:2064] Leader for TabletID 72057594037927937 is [39:60:2100] sender: [39:105:2057] recipient: [39:103:2128] Leader for TabletID 72057594037927937 is [39:106:2129] sender: [39:107:2057] recipient: [39:103:2128] !Reboot 72057594037927937 (actor [39:60:2100]) rebooted! !Reboot 72057594037927937 (actor [39:60:2100]) tablet resolver refreshed! new actor is[39:106:2129] Leader for TabletID 72057594037927937 is [39:106:2129] sender: [39:192:2057] recipient: [39:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:58:2057] recipient: [40:55:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:58:2057] recipient: [40:55:2098] Leader for TabletID 72057594037927937 is [40:60:2100] sender: [40:61:2057] recipient: [40:55:2098] Leader for TabletID 72057594037927937 is [40:60:2100] sender: [40:78:2057] recipient: [40:17:2064] !Reboot 72057594037927937 (actor [40:60:2100]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [40:60:2100] sender: [40:102:2057] recipient: [40:40:2087] Leader for TabletID 72057594037927937 is [40:60:2100] sender: [40:105:2057] recipient: [40:17:2064] Leader for TabletID 72057594037927937 is [40:60:2100] sender: [40:106:2057] recipient: [40:104:2129] Leader for TabletID 72057594037927937 is [40:107:2130] sender: [40:108:2057] recipient: [40:104:2129] !Reboot 72057594037927937 (actor [40:60:2100]) rebooted! !Reboot 72057594037927937 (actor [40:60:2100]) tablet resolver refreshed! new actor is[40:107:2130] Leader for TabletID 72057594037927937 is [40:107:2130] sender: [40:127:2057] recipient: [40:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:58:2057] recipient: [41:55:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:58:2057] recipient: [41:55:2098] Leader for TabletID 72057594037927937 is [41:60:2100] sender: [41:61:2057] recipient: [41:55:2098] Leader for TabletID 72057594037927937 is [41:60:2100] sender: [41:78:2057] recipient: [41:17:2064] !Reboot 72057594037927937 (actor [41:60:2100]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [41:60:2100] sender: [41:103:2057] recipient: [41:40:2087] Leader for TabletID 72057594037927937 is [41:60:2100] sender: [41:105:2057] recipient: [41:17:2064] Leader for TabletID 72057594037927937 is [41:60:2100] sender: [41:107:2057] recipient: [41:106:2130] Leader for TabletID 72057594037927937 is [41:108:2131] sender: [41:109:2057] recipient: [41:106:2130] !Reboot 72057594037927937 (actor [41:60:2100]) rebooted! !Reboot 72057594037927937 (actor [41:60:2100]) tablet resolver refreshed! new actor is[41:108:2131] Leader for TabletID 72057594037927937 is [41:108:2131] sender: [41:128:2057] recipient: [41:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [42:58:2057] recipient: [42:55:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [42:58:2057] recipient: [42:55:2098] Leader for TabletID 72057594037927937 is [42:60:2100] sender: [42:61:2057] recipient: [42:55:2098] Leader for TabletID 72057594037927937 is [42:60:2100] sender: [42:78:2057] recipient: [42:17:2064] !Reboot 72057594037927937 (actor [42:60:2100]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [42:60:2100] sender: [42:106:2057] recipient: [42:40:2087] Leader for TabletID 72057594037927937 is [42:60:2100] sender: [42:109:2057] recipient: [42:17:2064] Leader for TabletID 72057594037927937 is [42:60:2100] sender: [42:110:2057] recipient: [42:108:2133] Leader for TabletID 72057594037927937 is [42:111:2134] sender: [42:112:2057] recipient: [42:108:2133] !Reboot 72057594037927937 (actor [42:60:2100]) rebooted! !Reboot 72057594037927937 (actor [42:60:2100]) tablet resolver refreshed! new actor is[42:111:2134] Leader for TabletID 72057594037927937 is [42:111:2134] sender: [42:197:2057] recipient: [42:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [43:58:2057] recipient: [43:54:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [43:58:2057] recipient: [43:54:2098] Leader for TabletID 72057594037927937 is [43:60:2100] sender: [43:61:2057] recipient: [43:54:2098] Leader for TabletID 72057594037927937 is [43:60:2100] sender: [43:78:2057] recipient: [43:17:2064] !Reboot 72057594037927937 (actor [43:60:2100]) on event NKikimr::TEvKeyValue::TEvCleanUpDataRequest ! Leader for TabletID 72057594037927937 is [43:60:2100] sender: [43:106:2057] recipient: [43:40:2087] Leader for TabletID 72057594037927937 is [43:60:2100] sender: [43:109:2057] recipient: [43:17:2064] Leader for TabletID 72057594037927937 is [43:60:2100] sender: [43:110:2057] recipient: [43:108:2133] Leader for TabletID 72057594037927937 is [43:111:2134] sender: [43:112:2057] recipient: [43:108:2133] !Reboot 72057594037927937 (actor [43:60:2100]) rebooted! !Reboot 72057594037927937 (actor [43:60:2100]) tablet resolver refreshed! new actor is[43:111:2134] Leader for TabletID 72057594037927937 is [43:111:2134] sender: [43:197:2057] recipient: [43:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [44:58:2057] recipient: [44:54:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [44:58:2057] recipient: [44:54:2098] Leader for TabletID 72057594037927937 is [44:60:2100] sender: [44:61:2057] recipient: [44:54:2098] Leader for TabletID 72057594037927937 is [44:60:2100] sender: [44:78:2057] recipient: [44:17:2064] !Reboot 72057594037927937 (actor [44:60:2100]) on event NKikimr::TEvKeyValue::TEvForceTabletDataCleanup ! Leader for TabletID 72057594037927937 is [44:60:2100] sender: [44:106:2057] recipient: [44:40:2087] Leader for TabletID 72057594037927937 is [44:60:2100] sender: [44:109:2057] recipient: [44:17:2064] Leader for TabletID 72057594037927937 is [44:60:2100] sender: [44:110:2057] recipient: [44:108:2133] Leader for TabletID 72057594037927937 is [44:111:2134] sender: [44:112:2057] recipient: [44:108:2133] !Reboot 72057594037927937 (actor [44:60:2100]) rebooted! !Reboot 72057594037927937 (actor [44:60:2100]) tablet resolver refreshed! new actor is[44:111:2134] Leader for TabletID 72057594037927937 is [44:111:2134] sender: [44:197:2057] recipient: [44:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:58:2057] recipient: [45:55:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:58:2057] recipient: [45:55:2098] Leader for TabletID 72057594037927937 is [45:60:2100] sender: [45:61:2057] recipient: [45:55:2098] Leader for TabletID 72057594037927937 is [45:60:2100] sender: [45:78:2057] recipient: [45:17:2064] !Reboot 72057594037927937 (actor [45:60:2100]) on event NKikimr::TEvTablet::TEvFollowerGcApplied ! Leader for TabletID 72057594037927937 is [45:60:2100] sender: [45:111:2057] recipient: [45:40:2087] Leader for TabletID 72057594037927937 is [45:60:2100] sender: [45:114:2057] recipient: [45:17:2064] Leader for TabletID 72057594037927937 is [45:60:2100] sender: [45:115:2057] recipient: [45:113:2137] Leader for TabletID 72057594037927937 is [45:116:2138] sender: [45:117:2057] recipient: [45:113:2137] !Reboot 72057594037927937 (actor [45:60:2100]) rebooted! !Reboot 72057594037927937 (actor [45:60:2100]) tablet resolver refreshed! new actor is[45:116:2138] Leader for TabletID 72057594037927937 is [45:116:2138] sender: [45:202:2057] recipient: [45:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [46:58:2057] recipient: [46:54:2098] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [46:58:2057] recipient: [46:54:2098] Leader for TabletID 72057594037927937 is [46:60:2100] sender: [46:61:2057] recipient: [46:54:2098] Leader for TabletID 72057594037927937 is [46:60:2100] sender: [46:78:2057] recipient: [46:17:2064] >> TKeyValueTest::TestInlineCopyRangeWorks [GOOD] >> TKeyValueTest::TestInlineCopyRangeWorksNewApi >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-10 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-25 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-26 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-2 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-3 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-37 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-38 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-31 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-32 >> KqpEffects::AlterAfterUpsertBeforeUpsertTransaction-UseSink [GOOD] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupJoin-StreamLookupJoin >> KqpCost::OlapPointLookup >> KqpCost::PointLookup >> KqpCost::OlapRangeFullScan >> KqpCost::Range |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Milliseconds >> EraseRowsTests::EraseRowsShouldSuccess [GOOD] >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMilliSeconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt4Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Microseconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMicroSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDate32 >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MilliSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint32 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 >> EraseRowsTests::ConditionalEraseRowsShouldNotEraseModifiedRows [GOOD] >> EraseRowsTests::EraseRowsFromReplicatedTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::AlterAfterUpsertBeforeUpsertTransaction-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 32399, MsgBus: 25031 2025-06-24T21:10:51.612292Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626386590523236:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:51.612516Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017f9/r3tmp/tmpLCXQjK/pdisk_1.dat 2025-06-24T21:10:51.963486Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626386590523217:2079] 1750799451611274 != 1750799451611277 2025-06-24T21:10:51.964164Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32399, node 1 2025-06-24T21:10:52.023193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:52.023278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:52.025732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:10:52.039987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:52.040023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:52.040033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:52.040182Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25031 TClient is connected to server localhost:25031 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:52.611135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:52.620048Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:10:52.639286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.780803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.927673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:52.992119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:54.509772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626399475426768:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.509920Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:54.844321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.873196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.908328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.936906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:54.967608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.040999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.067641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:55.117717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626403770394727:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.117789Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.117904Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626403770394732:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:55.121298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:55.131315Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626403770394734:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:10:55.191341Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626403770394785:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:10:56.272759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:56.571235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:10:56.592050Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_ ... 26434988544604:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:02.508770Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017f9/r3tmp/tmpyz4l9S/pdisk_1.dat 2025-06-24T21:11:02.603805Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519626434988544577:2079] 1750799462508303 != 1750799462508306 2025-06-24T21:11:02.614840Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11238, node 3 2025-06-24T21:11:02.637886Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:02.637992Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:02.639413Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:02.652640Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:02.652663Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:02.652670Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:02.652764Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3464 TClient is connected to server localhost:3464 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:03.059884Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:03.069243Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:03.118490Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:03.266801Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:03.346493Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:03.537402Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:05.375707Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626447873448090:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:05.375808Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:05.412825Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:05.442438Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:05.468690Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:05.493978Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:05.523038Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:05.549365Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:05.615303Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:05.691637Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626447873448755:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:05.691719Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:05.691728Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519626447873448760:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:05.695089Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:05.704451Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519626447873448762:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:05.801214Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519626447873448813:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:06.772386Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:06.963835Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:11:07.048536Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NDE4NzljYTQtZGQyMjU4ODItMmZhNzc5NmItMjUzODFlM2U=, ActorId: [3:7519626452168416339:2463], ActorState: ExecuteState, TraceId: 01jyhwfgh58e0jf7r5tecc7v5x, Create QueryResponse for error on request, msg: >> KqpImmediateEffects::ForceImmediateEffectsExecution+UseSink [GOOD] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapWriteRow |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldNotErase [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnVariousErrors >> EraseRowsTests::ConditionalEraseRowsShouldErase >> KqpCost::ScanScriptingRangeFullScan-SourceRead >> KqpCost::OltpWriteRow+isSink >> KqpCost::IndexLookupJoin+StreamLookupJoin >> DistributedEraseTests::ConditionalEraseRowsShouldEraseOnUint32 [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSchemeTx >> DistributedEraseTests::ConditionalEraseRowsShouldErase [GOOD] >> DistributedEraseTests::ConditionalEraseRowsCheckLimits >> KqpCost::ScanScriptingRangeFullScan+SourceRead ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ForceImmediateEffectsExecution+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 4221, MsgBus: 25838 2025-06-24T21:10:55.491787Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626402508774896:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:10:55.491905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017d4/r3tmp/tmpA4Wb5y/pdisk_1.dat 2025-06-24T21:10:55.790967Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:55.791317Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626402508774876:2079] 1750799455490888 != 1750799455490891 TServer::EnableGrpc on GrpcPort 4221, node 1 2025-06-24T21:10:55.849522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:10:55.849547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:10:55.849560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:10:55.849669Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:10:55.853716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:10:55.853801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:10:55.855656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25838 TClient is connected to server localhost:25838 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:10:56.377520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:10:56.399246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.498857Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:10:56.562466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.713529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:56.774761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:10:58.620622Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626415393678420:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:58.620751Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:58.983203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.008793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.050110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.082954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.110526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.138489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.208328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:10:59.256221Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626419688646374:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.256285Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.256351Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626419688646379:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:10:59.259432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:10:59.269691Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626419688646381:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:10:59.360606Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626419688646432:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:00.295035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.326439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.355269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part propose ... me/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:00.491972Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626402508774896:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:00.492055Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 7288, MsgBus: 11095 2025-06-24T21:11:04.265707Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626443979635154:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:04.265776Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017d4/r3tmp/tmpMOvXzH/pdisk_1.dat 2025-06-24T21:11:04.367733Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:04.368194Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626443979635135:2079] 1750799464265101 != 1750799464265104 TServer::EnableGrpc on GrpcPort 7288, node 2 2025-06-24T21:11:04.403200Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:04.403296Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:04.404745Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:04.413351Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:04.413379Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:04.413386Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:04.413502Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11095 TClient is connected to server localhost:11095 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:04.852376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:04.864684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:04.918967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:05.070429Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:05.134814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:05.339029Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:07.064111Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626456864538659:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.064202Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.105149Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.130237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.157164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.180260Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.203658Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.230660Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.299919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.349576Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626456864539318:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.349683Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.349712Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519626456864539323:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.352796Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:07.361426Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519626456864539325:2432], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:07.423224Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519626456864539378:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:08.405930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-3 |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldSuccessOnShardedIndex [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldNotEraseModifiedRows >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-10 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-11 |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink |94.8%| [TA] $(B)/ydb/core/kqp/ut/effects/test-results/unittest/{meta.json ... results_accumulator.log} |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |94.9%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/effects/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpCost::IndexLookupAndTake+useSink >> KqpCost::QuerySeviceRangeFullScan >> KqpCost::ScanQueryRangeFullScan+SourceRead >> KqpCost::ScanQueryRangeFullScan-SourceRead >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-26 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-27 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-4 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-38 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-39 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-32 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-33 >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup-useSink |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Milliseconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp >> KqpCost::IndexLookup+useSink >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDate32 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMilliSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds >> EraseRowsTests::EraseRowsFromReplicatedTable [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Microseconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit >> KqpCost::Range [GOOD] >> KqpCost::PointLookup [GOOD] >> KqpCost::AAARangeFullScan ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors [GOOD] Test command err: 2025-06-24T21:11:06.841304Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.841589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.841703Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00374e/r3tmp/tmpn6pRTv/pdisk_1.dat 2025-06-24T21:11:07.087065Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.090047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.128744Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.135338Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323121 != 1750799464323125 2025-06-24T21:11:07.179446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.179536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.190645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.269784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.310187Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:07.310460Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.355548Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.355640Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.356851Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.356942Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.356988Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.357269Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.357368Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.357419Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:07.367983Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.402330Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.402564Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.402693Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:07.402743Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.402795Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.402839Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.403390Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.403524Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.403615Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.403661Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.403722Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.403771Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.403878Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:07.404332Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:07.404602Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:07.404714Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:07.406410Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.417092Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:07.417199Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:07.564870Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:11:07.569574Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:07.569646Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.570383Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.570434Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:07.570495Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:07.570764Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:07.570896Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:07.571271Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.571335Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:07.573267Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:07.573683Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.575221Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:07.575271Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.576117Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:07.576178Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.576913Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.576951Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.577017Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:07.577074Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:07.577120Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:07.577211Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.581723Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.583661Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:07.583721Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:07.584634Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:07.616232Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.616361Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... shard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:12.885699Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:12.885735Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:12.886071Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:12.886166Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:12.886278Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:12.886312Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:12.886352Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:12.886395Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:12.886472Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:623:2528], serverId# [2:631:2533], sessionId# [0:0:0] 2025-06-24T21:11:12.886897Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:12.887109Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:12.887221Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:12.888876Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:12.899529Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:12.899631Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:13.039594Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:658:2549], serverId# [2:659:2550], sessionId# [0:0:0] 2025-06-24T21:11:13.040422Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:13.040466Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.041091Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.041147Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:13.041196Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:13.041457Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:13.041595Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:13.041935Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.042002Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:13.042417Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:13.042861Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:13.044284Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:13.044352Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.045248Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:13.045303Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.045830Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.045870Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:13.045908Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:13.045959Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:13.046002Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:13.046063Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.046784Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.048552Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:13.048627Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:13.049482Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:13.098513Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:693:2576], serverId# [2:694:2577], sessionId# [0:0:0] 2025-06-24T21:11:13.098720Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:13.120365Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:13.120443Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.120747Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:693:2576], serverId# [2:694:2577], sessionId# [0:0:0] 2025-06-24T21:11:13.146116Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:699:2582], serverId# [2:700:2583], sessionId# [0:0:0] 2025-06-24T21:11:13.146329Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:13.146538Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:13.146588Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.146803Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:699:2582], serverId# [2:700:2583], sessionId# [0:0:0] 2025-06-24T21:11:13.171210Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:704:2587], serverId# [2:705:2588], sessionId# [0:0:0] 2025-06-24T21:11:13.171382Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:13.171606Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:13.171653Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.171855Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:704:2587], serverId# [2:705:2588], sessionId# [0:0:0] 2025-06-24T21:11:13.216785Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:709:2592], serverId# [2:710:2593], sessionId# [0:0:0] 2025-06-24T21:11:13.216965Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:13.217145Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:13.217201Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.217405Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:709:2592], serverId# [2:710:2593], sessionId# [0:0:0] 2025-06-24T21:11:13.240624Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:714:2597], serverId# [2:715:2598], sessionId# [0:0:0] 2025-06-24T21:11:13.240824Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:13.241136Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:13.241189Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.241412Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:714:2597], serverId# [2:715:2598], sessionId# [0:0:0] 2025-06-24T21:11:13.264558Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:719:2602], serverId# [2:720:2603], sessionId# [0:0:0] 2025-06-24T21:11:13.264735Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:13.264946Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:13.264992Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.265188Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:719:2602], serverId# [2:720:2603], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::EraseRowsFromReplicatedTable [GOOD] Test command err: 2025-06-24T21:11:06.719247Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.719495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.719607Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036ee/r3tmp/tmpXo2P9b/pdisk_1.dat 2025-06-24T21:11:07.013184Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.021552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.057256Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.064634Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323105 != 1750799464323109 2025-06-24T21:11:07.109400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.109503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.121350Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.213984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.258522Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:07.258807Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.301491Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.301612Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.303193Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.303283Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.303340Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.303640Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.303773Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.303864Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:07.314567Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.347111Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.347333Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.347439Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:07.347487Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.347537Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.347578Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.348043Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.348155Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.348230Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.348282Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.348334Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.348376Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.348475Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:07.348899Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:07.349165Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:07.349275Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:07.350959Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.361473Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:07.361556Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:07.509121Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:11:07.514722Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:07.514806Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.515674Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.515729Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:07.515803Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:07.516091Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:07.516227Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:07.516544Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.516610Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:07.518901Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:07.519351Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.521339Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:07.521388Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.522265Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:07.522343Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.523185Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.523226Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.523307Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:07.523368Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:07.523426Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:07.523525Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.527801Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.529681Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:07.529758Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:07.530710Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:07.565563Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.565697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... TablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:12.497034Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:12.497169Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036ee/r3tmp/tmppBrbzr/pdisk_1.dat 2025-06-24T21:11:12.783884Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:11:12.785679Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:12.818303Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:12.820958Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750799469295151 != 1750799469295155 2025-06-24T21:11:12.867454Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:12.867624Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:12.879318Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:12.961270Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.984137Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:624:2529] 2025-06-24T21:11:12.984382Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:13.033144Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:13.033284Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:13.035423Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:13.035537Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:13.035606Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:13.035958Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:13.036102Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:13.036194Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:640:2529] in generation 1 2025-06-24T21:11:13.046958Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:13.047034Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:13.047167Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:13.047261Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:642:2539] 2025-06-24T21:11:13.047302Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:13.047337Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:13.047374Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.047700Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:13.047806Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:13.047926Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.047986Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:13.048034Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:13.048078Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.048149Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:623:2528], serverId# [2:631:2533], sessionId# [0:0:0] 2025-06-24T21:11:13.048538Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:13.048768Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:13.048847Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:13.050516Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.061222Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:13.061335Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:13.212170Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:658:2549], serverId# [2:659:2550], sessionId# [0:0:0] 2025-06-24T21:11:13.213087Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:13.213145Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.213846Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.213887Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:13.213925Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:13.214196Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:13.214356Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:13.214690Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.214788Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:13.215177Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:13.215501Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:13.216515Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:13.216556Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.217206Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:13.217258Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.217922Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.217963Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:13.218007Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:13.218096Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:13.218154Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:13.218248Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.219337Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.221291Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:13.221366Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:13.222223Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:13.273128Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:693:2576], serverId# [2:694:2577], sessionId# [0:0:0] 2025-06-24T21:11:13.273308Z node 2 :TX_DATASHARD NOTICE: datashard__op_rows.cpp:168: Rejecting erase request on datashard: tablet# 72075186224037888, error# Can't execute erase at replicated table 2025-06-24T21:11:13.273517Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:693:2576], serverId# [2:694:2577], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds [GOOD] Test command err: 2025-06-24T21:11:06.635440Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.635773Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.635895Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036ea/r3tmp/tmp6UL76c/pdisk_1.dat 2025-06-24T21:11:07.013284Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.021890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.066705Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.071720Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323203 != 1750799464323207 2025-06-24T21:11:07.117011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.117112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.128489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.214129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.261880Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:07.262134Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.304077Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.304186Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.305776Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.305885Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.305950Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.306322Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.306455Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.306518Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:07.317155Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.352711Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.352913Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.353013Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:07.353061Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.353127Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.353180Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.353624Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.353721Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.353782Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.353817Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.353872Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.353912Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.353998Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:07.354421Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:07.354666Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:07.354790Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:07.356442Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.367108Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:07.367247Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:07.515385Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:11:07.520465Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:07.520532Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.521279Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.521330Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:07.521418Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:07.521717Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:07.521856Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:07.522174Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.522234Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:07.524289Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:07.524723Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.526428Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:07.526477Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.527335Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:07.527405Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.528117Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.528154Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.528219Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:07.528278Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:07.528326Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:07.528423Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.532840Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.534700Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:07.534772Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:07.535730Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:07.565717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.565825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... .995419Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:12.995463Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:12.995706Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:12.995846Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:12.996160Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:12.996250Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:12.996717Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:12.997144Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:12.998525Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:12.998576Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:12.999704Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:12.999778Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.000588Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.000637Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:13.000690Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:13.000752Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:13.000821Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:13.000932Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.002000Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.004005Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:13.004078Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:13.004974Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:13.033496Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:691:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.033595Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:701:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.033917Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.038529Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:13.044818Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.091600Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:13.204950Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.207527Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:705:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:13.245114Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:775:2621] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:13.352721Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwfpc82njtd7kp8w6bxb4c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTg4ZjlkZmUtZjY1NmIwZGQtNjUzZDFjMWEtNTBlN2JjYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:13.355631Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:806:2638], serverId# [2:807:2639], sessionId# [0:0:0] 2025-06-24T21:11:13.356049Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:11:13.356240Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-06-24T21:11:13.367398Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.396894Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:814:2645], serverId# [2:815:2646], sessionId# [0:0:0] 2025-06-24T21:11:13.397901Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:13.410670Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:13.410783Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.411079Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:13.411138Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-24T21:11:13.411457Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.411518Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:13.411576Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:13.411649Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.411748Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:814:2645], serverId# [2:815:2646], sessionId# [0:0:0] 2025-06-24T21:11:13.412803Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:13.413189Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:13.413454Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.413507Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:13.413564Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:11:13.413819Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:13.413889Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.414451Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-24T21:11:13.414727Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:13.414921Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-24T21:11:13.414972Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-24T21:11:13.416738Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:13.416799Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-24T21:11:13.417219Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.417259Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:13.417301Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-24T21:11:13.417432Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:13.417493Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.417547Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds [GOOD] Test command err: 2025-06-24T21:11:06.740185Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.740460Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.740577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036cc/r3tmp/tmpj8m6xL/pdisk_1.dat 2025-06-24T21:11:07.013309Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.024832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.058758Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.063506Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323183 != 1750799464323187 2025-06-24T21:11:07.109098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.109188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.121360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.213995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.258468Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:07.258685Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.295168Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.295305Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.297778Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.297872Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.297941Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.299660Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.299819Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.299888Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:07.310546Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.333559Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.333785Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.333904Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:07.333944Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.333993Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.334031Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.334484Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.334565Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.334633Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.334685Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.334746Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.334782Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.334857Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:07.335351Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:07.335669Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:07.335796Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:07.337601Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.348361Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:07.348484Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:07.497373Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:11:07.503426Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:07.503511Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.504360Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.504484Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:07.504593Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:07.504870Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:07.505061Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:07.505402Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.505520Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:07.508218Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:07.508881Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.510503Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:07.510550Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.511631Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:07.511704Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.512522Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.512561Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.512631Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:07.512698Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:07.512750Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:07.512852Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.517686Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.519690Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:07.519763Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:07.520926Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:07.565558Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.565690Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... .954192Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:12.954240Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:12.954496Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:12.954631Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:12.954990Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:12.955070Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:12.955589Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:12.956072Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:12.957642Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:12.957698Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:12.958577Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:12.958671Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:12.959888Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:12.959940Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:12.960005Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:12.960096Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:12.960150Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:12.960242Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:12.961351Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:12.963410Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:12.963497Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:12.964429Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:12.998773Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:691:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.998886Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:701:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.999301Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.004558Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:13.011865Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.062144Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:13.179213Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.183169Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:705:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:13.225714Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:775:2621] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:13.310400Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwfpb5602cm6m49qxjn5mn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjRkMjRmZTgtY2RhOTA0YjAtMzMzNDM4YWMtYTQ2ZDEyZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:13.313245Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:806:2638], serverId# [2:807:2639], sessionId# [0:0:0] 2025-06-24T21:11:13.313678Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:11:13.313853Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-06-24T21:11:13.325138Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.377358Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:814:2645], serverId# [2:815:2646], sessionId# [0:0:0] 2025-06-24T21:11:13.378309Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:13.389575Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:13.389670Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.389973Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:13.390031Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-24T21:11:13.390296Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.390350Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:13.390406Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:13.390464Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.390564Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:814:2645], serverId# [2:815:2646], sessionId# [0:0:0] 2025-06-24T21:11:13.391630Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:13.392016Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:13.392252Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.392304Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:13.392363Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:11:13.392607Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:13.392686Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.393296Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-24T21:11:13.393564Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:13.393762Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-24T21:11:13.393804Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-24T21:11:13.395242Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:13.395304Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-24T21:11:13.395697Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.395743Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:13.395785Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-24T21:11:13.395918Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:13.395976Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.396030Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> KqpCost::IndexLookupAndTake-useSink >> EraseRowsTests::ConditionalEraseRowsShouldErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 [GOOD] Test command err: 2025-06-24T21:11:06.646972Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.647271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.647404Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003736/r3tmp/tmplGelnH/pdisk_1.dat 2025-06-24T21:11:07.013284Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.021979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.067658Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.072259Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323106 != 1750799464323110 2025-06-24T21:11:07.116147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.116268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.127490Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.213976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.261747Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:07.261934Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.296535Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.296649Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.297878Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.297964Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.298020Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.299638Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.299753Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.299814Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:07.310503Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.328667Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.330091Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.330237Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:07.330282Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.330348Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.330393Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.331524Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.331605Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.331659Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.331694Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.331778Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.331812Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.331896Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:07.333177Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:07.333463Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:07.333547Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:07.334967Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.345621Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:07.345742Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:07.495061Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:11:07.500411Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:07.500491Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.501371Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.501428Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:07.501694Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:07.501994Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:07.502173Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:07.502541Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.502610Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:07.506246Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:07.508213Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.509439Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:07.509479Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.510228Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:07.510301Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.510923Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.510962Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.511015Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:07.511062Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:07.511097Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:07.511270Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.515395Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.517155Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:07.517219Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:07.518156Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:07.565524Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.565647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... .225663Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:13.225709Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:13.225969Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:13.226115Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:13.226429Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.226493Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:13.226950Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:13.228074Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:13.229609Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:13.229664Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.230485Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:13.230565Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.231402Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.231445Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:13.231495Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:13.231562Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:13.231626Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:13.231724Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.232800Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.234913Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:13.234992Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:13.235959Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:13.268769Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:691:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.268860Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:701:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.269178Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.273923Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:13.280224Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.326052Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:13.443640Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:13.447275Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:705:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:13.483437Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:775:2621] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:13.612794Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwfpkk2m01a7rwgepca0ps, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2MzN2I1ZTAtN2Y2YmFjZDMtMTI4Mjk0MjItZDM2MmZjNzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:13.621178Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:806:2638], serverId# [2:807:2639], sessionId# [0:0:0] 2025-06-24T21:11:13.621600Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:11:13.621788Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-24T21:11:13.633126Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.660032Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:814:2645], serverId# [2:815:2646], sessionId# [0:0:0] 2025-06-24T21:11:13.661056Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:13.672482Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:13.672578Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:13.672859Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:13.672915Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-24T21:11:13.673202Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.673258Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:13.673320Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:13.673390Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.673492Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:814:2645], serverId# [2:815:2646], sessionId# [0:0:0] 2025-06-24T21:11:13.674440Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:13.674861Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:13.675078Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.675128Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:13.675207Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:11:13.675458Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:13.675532Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.676132Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-24T21:11:13.676401Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:13.676613Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-24T21:11:13.676667Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-24T21:11:13.678598Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:13.678654Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-24T21:11:13.679090Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:13.679135Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:13.679199Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-24T21:11:13.679330Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:13.679389Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:13.679439Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-4 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-11 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-12 >> KqpCost::IndexLookupJoin-StreamLookupJoin [GOOD] >> KqpCost::OlapPointLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::PointLookup [GOOD] >> KqpCost::OlapRangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 5388, MsgBus: 10710 2025-06-24T21:11:08.274362Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626460662420177:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:08.274429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a2d/r3tmp/tmp5M7luF/pdisk_1.dat 2025-06-24T21:11:08.624948Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626460662420142:2079] 1750799468269411 != 1750799468269414 2025-06-24T21:11:08.629484Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5388, node 1 2025-06-24T21:11:08.688340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:08.688426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:08.690247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:08.825067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:08.825105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:08.825116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:08.825262Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10710 2025-06-24T21:11:09.287400Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10710 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:09.535959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:09.548909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:11:09.555958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:09.730248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:09.894587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:11:09.965087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.370913Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626473547323676:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:11.371047Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:11.826467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.857674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.891418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.923057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.976682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.050472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.092787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.165734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626477842291629:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.165810Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.166012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626477842291634:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.171077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:12.185177Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626477842291636:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:12.284909Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626477842291687:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:13.275280Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626460662420177:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:13.275354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::Range [GOOD] Test command err: Trying to start YDB, gRPC: 18272, MsgBus: 22288 2025-06-24T21:11:08.273206Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626460339910974:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:08.273610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a27/r3tmp/tmpO0pkMM/pdisk_1.dat 2025-06-24T21:11:08.621980Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:08.626531Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626460339910951:2079] 1750799468268992 != 1750799468268995 TServer::EnableGrpc on GrpcPort 18272, node 1 2025-06-24T21:11:08.694482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:08.694582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:08.696771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:08.826961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:08.826990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:08.826997Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:08.827133Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22288 2025-06-24T21:11:09.285434Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22288 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:09.510961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:11:09.548903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:09.727757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:09.859001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:09.938179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.224279Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626473224814484:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:11.224405Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:11.826299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.862435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.937815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.015985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.069342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.125305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.158606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.220280Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626477519782445:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.220412Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.220602Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626477519782450:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.223777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:12.232810Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626477519782452:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:12.329059Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626477519782505:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:13.273522Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626460339910974:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:13.273584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSchemeTx [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard >> KqpCost::ScanScriptingRangeFullScan-SourceRead [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-27 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-28 >> KqpCost::OltpWriteRow+isSink [GOOD] >> KqpCost::ScanScriptingRangeFullScan+SourceRead [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-5 >> KqpCost::IndexLookupJoin+StreamLookupJoin [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupJoin-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 15917, MsgBus: 19697 2025-06-24T21:11:08.272977Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626457517912264:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:08.273123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a1c/r3tmp/tmpW5T85j/pdisk_1.dat 2025-06-24T21:11:08.623041Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626457517912242:2079] 1750799468268877 != 1750799468268880 2025-06-24T21:11:08.626365Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15917, node 1 2025-06-24T21:11:08.681155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:08.681283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:08.683479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:08.825091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:08.825121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:08.825137Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:08.825290Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19697 2025-06-24T21:11:09.281822Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19697 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:09.513368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:09.548757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:11:09.714926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:09.871432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:09.939812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.261398Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626470402815767:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:11.261513Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:11.827951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.861409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.929496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.980619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.021417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.060165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.099252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.165735Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626474697783722:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.165842Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.166029Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626474697783727:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.171106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:12.182262Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626474697783729:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:12.258941Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626474697783782:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:13.307746Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626457517912264:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:13.307797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:13.634184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.664189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.688670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) /Root/Join1_2 1 19 /Root/Join1_1 8 136 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-39 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-40 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapPointLookup [GOOD] Test command err: Trying to start YDB, gRPC: 24191, MsgBus: 4989 2025-06-24T21:11:08.288240Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626459552707140:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:08.288421Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a16/r3tmp/tmpYuh2oa/pdisk_1.dat 2025-06-24T21:11:08.626398Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626459552707004:2079] 1750799468268825 != 1750799468268828 2025-06-24T21:11:08.636501Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:08.673107Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:08.673199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:08.675342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24191, node 1 2025-06-24T21:11:08.828315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:08.828340Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:08.828348Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:08.828511Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4989 2025-06-24T21:11:09.287811Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4989 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:09.533130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:09.553427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:09.696906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:11:09.846574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:09.922287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.488989Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626472437610526:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:11.489109Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:11.828160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.871019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.903667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.971762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.002389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.034393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.069832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.165959Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626476732578477:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.166036Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.166395Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626476732578482:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.171214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:12.187609Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626476732578484:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:12.251566Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626476732578535:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:13.284991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626459552707140:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:13.285135Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:13.642952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:11:13.789517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7 ... Chunks_V2;id=15; 2025-06-24T21:11:13.988721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:11:13.988799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:11:13.988856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:11:13.988919Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037931;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:11:13.988961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:11:13.988991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:11:13.989440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:11:13.989470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:11:13.990186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;self_id=[1:7519626481027546309:2483];ev=NActors::IEventHandle;tablet_id=72075186224037928;tx_id=281474976710672;this=88923031063328;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473989;max=18446744073709551615;plan=0;src=[1:7519626459552707325:2141];cookie=412:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.993222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[1:7519626481027546176:2475];ev=NActors::IEventHandle;tablet_id=72075186224037926;tx_id=281474976710672;this=88923032443840;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473992;max=18446744073709551615;plan=0;src=[1:7519626459552707325:2141];cookie=392:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.993761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7519626481027546180:2478];ev=NActors::IEventHandle;tablet_id=72075186224037923;tx_id=281474976710672;this=88923030860160;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473993;max=18446744073709551615;plan=0;src=[1:7519626459552707325:2141];cookie=362:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.994318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;self_id=[1:7519626481027546178:2476];ev=NActors::IEventHandle;tablet_id=72075186224037924;tx_id=281474976710672;this=88923032437792;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473994;max=18446744073709551615;plan=0;src=[1:7519626459552707325:2141];cookie=372:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.994524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7519626481027546189:2481];ev=NActors::IEventHandle;tablet_id=72075186224037930;tx_id=281474976710672;this=88923027097184;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473994;max=18446744073709551615;plan=0;src=[1:7519626459552707325:2141];cookie=432:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.997196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7519626481027546188:2480];ev=NActors::IEventHandle;tablet_id=72075186224037925;tx_id=281474976710672;this=88923032433984;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473996;max=18446744073709551615;plan=0;src=[1:7519626459552707325:2141];cookie=382:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.997767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519626481027546190:2482];ev=NActors::IEventHandle;tablet_id=72075186224037931;tx_id=281474976710672;this=88923032432864;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473997;max=18446744073709551615;plan=0;src=[1:7519626459552707325:2141];cookie=442:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.999189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7519626481027546175:2474];ev=NActors::IEventHandle;tablet_id=72075186224037927;tx_id=281474976710672;this=88923028929056;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473998;max=18446744073709551615;plan=0;src=[1:7519626459552707325:2141];cookie=402:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.000332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7519626481027546179:2477];ev=NActors::IEventHandle;tablet_id=72075186224037922;tx_id=281474976710672;this=88923030190848;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799474000;max=18446744073709551615;plan=0;src=[1:7519626459552707325:2141];cookie=352:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.004155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.004158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.020502Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.021062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.021081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.021609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.027358Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.027993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.028030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.028655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.034047Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.034577Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.034644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.035173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.040656Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.040779Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.041320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.042142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.047131Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.048009Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.225896Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-24T21:11:14.225896Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-24T21:11:14.226502Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapRangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 7151, MsgBus: 10091 2025-06-24T21:11:08.278093Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626459020628961:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:08.278514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a11/r3tmp/tmp3fHJcZ/pdisk_1.dat 2025-06-24T21:11:08.621257Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:08.631081Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626459020628926:2079] 1750799468276372 != 1750799468276375 2025-06-24T21:11:08.672785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:08.672871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 7151, node 1 2025-06-24T21:11:08.674152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:08.824946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:08.824974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:08.824980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:08.825106Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10091 2025-06-24T21:11:09.285300Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10091 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:09.545008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:09.577290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:11:09.591521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:09.721934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:09.878279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:09.959096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.281564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626471905532449:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:11.281800Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:11.826255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.869062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.938982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.990780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.026873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.064078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.107886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.202722Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626476200500411:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.202807Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.202956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626476200500416:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.206564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:12.216326Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626476200500418:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:12.317614Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626476200500473:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:13.278534Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626459020628961:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:13.278602Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:13.644351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_run ... 5-06-24T21:11:13.977986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:11:13.978040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:11:13.978137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:11:13.978172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:11:13.978228Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:11:13.978267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:11:13.978291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:11:13.978623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519626480495468109:2477];ev=NActors::IEventHandle;tablet_id=72075186224037931;tx_id=281474976710672;this=88923033749312;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473978;max=18446744073709551615;plan=0;src=[1:7519626459020629249:2142];cookie=442:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.978780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:11:13.978807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:11:13.980599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7519626480495468127:2478];ev=NActors::IEventHandle;tablet_id=72075186224037925;tx_id=281474976710672;this=88923033748192;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473980;max=18446744073709551615;plan=0;src=[1:7519626459020629249:2142];cookie=382:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.981863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7519626480495468166:2482];ev=NActors::IEventHandle;tablet_id=72075186224037930;tx_id=281474976710672;this=88923033824352;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473981;max=18446744073709551615;plan=0;src=[1:7519626459020629249:2142];cookie=432:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.982681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7519626480495468106:2474];ev=NActors::IEventHandle;tablet_id=72075186224037923;tx_id=281474976710672;this=88923033744160;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473982;max=18446744073709551615;plan=0;src=[1:7519626459020629249:2142];cookie=362:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.984486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;self_id=[1:7519626480495468140:2479];ev=NActors::IEventHandle;tablet_id=72075186224037924;tx_id=281474976710672;this=88923033820768;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473984;max=18446744073709551615;plan=0;src=[1:7519626459020629249:2142];cookie=372:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.985118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7519626480495468243:2483];ev=NActors::IEventHandle;tablet_id=72075186224037927;tx_id=281474976710672;this=88923032894080;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473984;max=18446744073709551615;plan=0;src=[1:7519626459020629249:2142];cookie=402:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.987586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[1:7519626480495468153:2480];ev=NActors::IEventHandle;tablet_id=72075186224037926;tx_id=281474976710672;this=88923032873024;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473987;max=18446744073709551615;plan=0;src=[1:7519626459020629249:2142];cookie=392:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.988239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7519626480495468107:2475];ev=NActors::IEventHandle;tablet_id=72075186224037922;tx_id=281474976710672;this=88923032889600;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750799473987;max=18446744073709551615;plan=0;src=[1:7519626459020629249:2142];cookie=352:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.991873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:13.991873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.006082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.006370Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.006581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.006865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.011983Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.011983Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.012606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.012606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.017346Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.017773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.018539Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.019214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.022461Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.022631Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.023032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.023062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:11:14.026460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.029858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-24T21:11:14.224439Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-24T21:11:14.224445Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-24T21:11:14.225559Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; query_phases { duration_us: 244477 table_access { name: "/Root/TestTable" reads { rows: 2 bytes: 72 } } cpu_time_us: 122675 } compilation { duration_us: 428342 cpu_time_us: 424689 } process_cpu_time_us: 354 total_duration_us: 675393 total_cpu_time_us: 547718 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-33 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-34 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanScriptingRangeFullScan-SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 15285, MsgBus: 10011 2025-06-24T21:11:10.044880Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626468262150071:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:10.049249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049ed/r3tmp/tmpFOujA9/pdisk_1.dat 2025-06-24T21:11:10.407287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:10.407402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:10.414109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:10.450648Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:10.451376Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626468262150042:2079] 1750799470032613 != 1750799470032616 TServer::EnableGrpc on GrpcPort 15285, node 1 2025-06-24T21:11:10.489295Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:10.489322Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:10.489338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:10.489504Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10011 TClient is connected to server localhost:10011 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:11.056436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:11.064681Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:11.074683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.207238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.340944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:11:11.421615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.185277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626481147053566:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.185374Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.485240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.516852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.546829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.573726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.601035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.673888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.711927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.774929Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626481147054225:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.775013Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.775363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626481147054230:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.779412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:13.791757Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626481147054232:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:13.856039Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626481147054283:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:15.045036Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626468262150071:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:15.045103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:15.320191Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799475341, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OltpWriteRow+isSink [GOOD] Test command err: Trying to start YDB, gRPC: 26834, MsgBus: 8348 2025-06-24T21:11:10.062228Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626467115883311:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:10.062820Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049e1/r3tmp/tmp3sefF5/pdisk_1.dat 2025-06-24T21:11:10.435869Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626467115883169:2079] 1750799470033137 != 1750799470033140 2025-06-24T21:11:10.436066Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26834, node 1 2025-06-24T21:11:10.519954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:10.520193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:10.522175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:10.535504Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:10.535540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:10.535555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:10.535741Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8348 TClient is connected to server localhost:8348 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:11:11.059900Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:11.089149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:11.112625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:11:11.125591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.260539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.422296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.507740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:13.088951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626480000786700:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.089085Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.378071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.403795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.430068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.460195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.488815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.542172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.578456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.630039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626480000787359:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.630102Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.630237Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626480000787364:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.633407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:13.642352Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626480000787366:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:13.742213Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626480000787417:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:14.826447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) query_phases { duration_us: 3852 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1342 affected_shards: 1 } compilation { duration_us: 55857 cpu_time_us: 53375 } process_cpu_time_us: 536 total_duration_us: 62554 total_cpu_time_us: 55253 2025-06-24T21:11:15.040676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626467115883311:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:15.040736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; query_phases { duration_us: 2753 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1158 affected_shards: 1 } compilation { duration_us: 47174 cpu_time_us: 44695 } process_cpu_time_us: 497 total_duration_us: 51368 total_cpu_time_us: 46350 2025-06-24T21:11:15.168169Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=5; 2025-06-24T21:11:15.177553Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037922 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:11:15.177700Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037922 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:11:15.177893Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:7519626488590722437:2472], Table: `/Root/TestTable` ([72057594046644480:17:1]), SessionActorId: [1:7519626484295754981:2472]Got CONSTRAINT VIOLATION for table `/Root/TestTable`. ShardID=72075186224037922, Sink=[1:7519626488590722437:2472].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:11:15.178586Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519626488590722430:2472], SessionActorId: [1:7519626484295754981:2472], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[1:7519626484295754981:2472]. isRollback=0 2025-06-24T21:11:15.178880Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=ZjlhNzg3NTUtNGU1ZTdjNDAtZTYzMzE0ZDMtZjE1Yjk3ODI=, ActorId: [1:7519626484295754981:2472], ActorState: ExecuteState, TraceId: 01jyhwfrcr6128wq09ncktj82p, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [1:7519626488590722431:2472] from: [1:7519626488590722430:2472] 2025-06-24T21:11:15.178991Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519626488590722431:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhwfrcr6128wq09ncktj82p, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjlhNzg3NTUtNGU1ZTdjNDAtZTYzMzE0ZDMtZjE1Yjk3ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:11:15.179217Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZjlhNzg3NTUtNGU1ZTdjNDAtZTYzMzE0ZDMtZjE1Yjk3ODI=, ActorId: [1:7519626484295754981:2472], ActorState: ExecuteState, TraceId: 01jyhwfrcr6128wq09ncktj82p, Create QueryResponse for error on request, msg: query_phases { duration_us: 25248 cpu_time_us: 1189 } compilation { duration_us: 55354 cpu_time_us: 53100 } process_cpu_time_us: 505 total_duration_us: 82180 total_cpu_time_us: 54794 query_phases { duration_us: 3439 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1508 affected_shards: 1 } compilation { duration_us: 50531 cpu_time_us: 46549 } process_cpu_time_us: 441 total_duration_us: 55594 total_cpu_time_us: 48498 query_phases { duration_us: 2617 cpu_time_us: 1281 affected_shards: 1 } compilation { duration_us: 87445 cpu_time_us: 85260 } process_cpu_time_us: 450 total_duration_us: 93279 total_cpu_time_us: 86991 query_phases { duration_us: 3537 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1493 affected_shards: 1 } compilation { duration_us: 103130 cpu_time_us: 100790 } process_cpu_time_us: 438 total_duration_us: 108946 total_cpu_time_us: 102721 query_phases { duration_us: 3679 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 1406 affected_shards: 1 } compilation { duration_us: 45352 cpu_time_us: 43130 } process_cpu_time_us: 434 total_duration_us: 50522 total_cpu_time_us: 44970 query_phases { duration_us: 3622 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 1395 affected_shards: 1 } compilation { duration_us: 44615 cpu_time_us: 42446 } process_cpu_time_us: 460 total_duration_us: 49681 total_cpu_time_us: 44301 >> TSchemeShardTTLTestsWithReboots::MoveTable [GOOD] >> KqpCost::QuerySeviceRangeFullScan [GOOD] >> KqpCost::ScanQueryRangeFullScan+SourceRead [GOOD] >> KqpCost::ScanQueryRangeFullScan-SourceRead [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanScriptingRangeFullScan+SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 30565, MsgBus: 3456 2025-06-24T21:11:10.474812Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626466498432964:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:10.474912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049d7/r3tmp/tmpbRgcC6/pdisk_1.dat 2025-06-24T21:11:10.787309Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626466498432945:2079] 1750799470473917 != 1750799470473920 2025-06-24T21:11:10.797743Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30565, node 1 2025-06-24T21:11:10.868558Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:10.868623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:10.868635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:10.869684Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:10.877783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:10.877910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:10.880004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3456 TClient is connected to server localhost:3456 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:11.446085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:11.473743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:11:11.486480Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:11.489247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.665940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:11:11.835353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.914018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:13.696420Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626479383336466:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.696536Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:14.000010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.029100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.059088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.089235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.119436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.188612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.219646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.271487Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626483678304423:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:14.271555Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:14.271616Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626483678304428:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:14.275607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:14.287817Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626483678304430:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:14.384157Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626483678304481:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:15.474933Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626466498432964:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:15.475009Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:15.679101Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799475705, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupJoin+StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 14171, MsgBus: 16579 2025-06-24T21:11:10.058930Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626466959635849:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:10.059069Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049db/r3tmp/tmpNUpkkb/pdisk_1.dat 2025-06-24T21:11:10.444076Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:10.444501Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626466959635696:2079] 1750799470042175 != 1750799470042178 TServer::EnableGrpc on GrpcPort 14171, node 1 2025-06-24T21:11:10.471281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:10.471350Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:10.473530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:10.501505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:10.501525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:10.501532Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:10.501635Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16579 TClient is connected to server localhost:16579 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:11.035210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:11.046378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:11:11.055335Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:11.068913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.210014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.340316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.402755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:12.976068Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626475549571931:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.976201Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.251043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.282263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.312605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.339568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.366560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.397424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.449947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.506525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626479844539885:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.506628Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.506673Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626479844539890:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.510240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:13.520290Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626479844539892:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:13.613954Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626479844539943:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:14.873025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.906187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.943502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.062609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626466959635849:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:15.062971Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; /Root/Join1_2 1 19 /Root/Join1_1 8 136 |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAndTake+useSink [GOOD] >> KqpCost::OlapWriteRow [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::MoveTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:10:21.739446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:21.739508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:21.739532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:21.739575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:21.739613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:21.739633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:21.739676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:21.739726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:21.740292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:21.740590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:21.796782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:10:21.796835Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:21.797404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:10:21.808821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:21.809159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:21.809292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:21.814986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:21.815198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:21.815687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:21.815883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:21.818199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:21.818340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:21.819235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:21.819287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:21.819428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:21.819479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:21.819613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:21.819723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:10:21.825176Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:21.920741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:21.920993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:21.921183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:21.921228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:21.921480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:21.921631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:21.923726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:21.923929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:21.924176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:21.924223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:21.924272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:21.924305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:21.926151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:21.926200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:21.926238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:21.927811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:21.927851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:21.927907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:21.927951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:21.937347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:21.939314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:21.939495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:21.940513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:21.940644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 42949694 ... 3] 2025-06-24T21:11:16.953797Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:11:16.953855Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [62:208:2208], at schemeshard: 72057594046678944, txId: 1003, path id: 1 2025-06-24T21:11:16.953909Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [62:208:2208], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-24T21:11:16.954480Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:11:16.954545Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 1003:0 ProgressState at tablet: 72057594046678944 2025-06-24T21:11:16.954646Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:11:16.954689Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:11:16.954749Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1003:0 129 -> 240 2025-06-24T21:11:16.955761Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-24T21:11:16.955871Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-24T21:11:16.955912Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-24T21:11:16.955956Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-24T21:11:16.955999Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-24T21:11:16.957222Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-24T21:11:16.957323Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-24T21:11:16.957363Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-24T21:11:16.957401Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-24T21:11:16.957445Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:11:16.957533Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-24T21:11:16.960030Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:11:16.960096Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:11:16.960146Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 1003:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 3], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T21:11:16.960265Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1003:0 progress is 1/1 2025-06-24T21:11:16.960304Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-24T21:11:16.960352Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1003:0 progress is 1/1 2025-06-24T21:11:16.960384Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-24T21:11:16.960418Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-24T21:11:16.960456Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-24T21:11:16.960490Z node 62 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1003:0 2025-06-24T21:11:16.960514Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1003:0 2025-06-24T21:11:16.960623Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:11:16.960655Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:11:16.961796Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:11:16.961839Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:11:16.961898Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:11:16.962700Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:11:16.962838Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:11:16.965393Z node 62 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-24T21:11:16.965761Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-24T21:11:16.965802Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-24T21:11:16.966193Z node 62 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-24T21:11:16.966295Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-24T21:11:16.966335Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [62:472:2443] TestWaitNotification: OK eventTxId 1003 2025-06-24T21:11:16.966878Z node 62 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTableMoved" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:11:16.967126Z node 62 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTableMoved" took 294us result status StatusSuccess 2025-06-24T21:11:16.967727Z node 62 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTableMoved" PathDescription { Self { Name: "TTLEnabledTableMoved" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TTLEnabledTableMoved" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan+SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 65412, MsgBus: 11926 2025-06-24T21:11:11.675691Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626470848296224:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:11.677006Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049ce/r3tmp/tmpBP4tc7/pdisk_1.dat 2025-06-24T21:11:12.092957Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:12.095717Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626470848296205:2079] 1750799471673734 != 1750799471673737 2025-06-24T21:11:12.107044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:12.107111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 65412, node 1 2025-06-24T21:11:12.109271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:12.185370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:12.185391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:12.185399Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:12.185499Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11926 TClient is connected to server localhost:11926 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:12.681489Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:12.686466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:12.711708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:12.846813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:12.978809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:13.055539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.855957Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626483733199746:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:14.856121Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.157709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.185170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.250219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.318010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.343590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.411062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.440319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.519352Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626488028167708:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.519425Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.519444Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626488028167713:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.522634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:15.531492Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626488028167715:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:15.635351Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626488028167766:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:16.676792Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626470848296224:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:16.679228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:16.849016Z node 1 :KQP_GATEWAY DEBUG: kqp_metadata_loader.cpp:886: Load table metadata from cache by path, request Path: /Root/Test 2025-06-24T21:11:16.983433Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:37: Start KqpSnapshotManager at [1:7519626492323135313:2464] 2025-06-24T21:11:16.983466Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:58: KqpSnapshotManager: got snapshot request from [1:7519626492323135295:2464] 2025-06-24T21:11:16.987524Z node 1 :KQP_RESOURCE_MANAGER ... g batch for read #0 2025-06-24T21:11:17.016515Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715673, task: 1, CA Id [1:7519626492323135328:2472]. effective maxinflight 1 sorted 1 2025-06-24T21:11:17.016523Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715673, task: 1, CA Id [1:7519626492323135328:2472]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-24T21:11:17.016532Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715673, task: 1, CA Id [1:7519626492323135328:2472]. returned async data processed rows 3 left freeSpace 8388548 received rows 3 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-24T21:11:17.016724Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519626492323135328:2472], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft1x10f15kpp9f2t1bx3. SessionId : ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T21:11:17.016747Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519626492323135328:2472], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft1x10f15kpp9f2t1bx3. SessionId : ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T21:11:17.016746Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519626492323135329:2473], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=. TraceId : 01jyhwft1x10f15kpp9f2t1bx3. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646923 2025-06-24T21:11:17.016786Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715673, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-24T21:11:17.016794Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715673, task: 2. Finish input channelId: 1, from: [1:7519626492323135328:2472] 2025-06-24T21:11:17.016849Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519626492323135329:2473], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=. TraceId : 01jyhwft1x10f15kpp9f2t1bx3. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-24T21:11:17.016859Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519626492323135328:2472], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft1x10f15kpp9f2t1bx3. SessionId : ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646927 2025-06-24T21:11:17.016882Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519626492323135328:2472], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft1x10f15kpp9f2t1bx3. SessionId : ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-24T21:11:17.016906Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 1. Tasks execution finished 2025-06-24T21:11:17.016921Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519626492323135328:2472], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft1x10f15kpp9f2t1bx3. SessionId : ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T21:11:17.017013Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 1. pass away 2025-06-24T21:11:17.017125Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715673;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T21:11:17.017168Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519626492323135329:2473], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=. TraceId : 01jyhwft1x10f15kpp9f2t1bx3. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T21:11:17.017278Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-24T21:11:17.017364Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7519626492323135323:2464] TxId: 281474976715673. Ctx: { TraceId: 01jyhwft1x10f15kpp9f2t1bx3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7519626492323135328:2472], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 26916 Tasks { TaskId: 1 CpuTimeUs: 1101 FinishTimeMs: 1750799477016 OutputRows: 1 OutputBytes: 19 Tables { TablePath: "/Root/Test" ReadRows: 1 ReadBytes: 20 AffectedPartitions: 1 } IngressRows: 3 ComputeCpuTimeUs: 128 BuildCpuTimeUs: 973 HostName: "ghrun-4pjfmvffsq" NodeId: 1 StartTimeMs: 1750799477016 CreateTimeMs: 1750799476989 UpdateTimeMs: 1750799477016 } MaxMemoryUsage: 1048576 } 2025-06-24T21:11:17.017436Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715673. Ctx: { TraceId: 01jyhwft1x10f15kpp9f2t1bx3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7519626492323135328:2472] 2025-06-24T21:11:17.017523Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:7519626492323135323:2464] TxId: 281474976715673. Ctx: { TraceId: 01jyhwft1x10f15kpp9f2t1bx3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:7519626492323135329:2473], 2025-06-24T21:11:17.017716Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:357: ActorId: [1:7519626492323135323:2464] TxId: 281474976715673. Ctx: { TraceId: 01jyhwft1x10f15kpp9f2t1bx3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [1:7519626492323135295:2464], seqNo: 1, nRows: 1 2025-06-24T21:11:17.020013Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:423: TxId: 281474976715673, send ack to channelId: 2, seqNo: 1, enough: 0, freeSpace: 8388470, to: [1:7519626496618102627:2473] 2025-06-24T21:11:17.020068Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519626492323135329:2473], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=. TraceId : 01jyhwft1x10f15kpp9f2t1bx3. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-24T21:11:17.020111Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715673, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-24T21:11:17.020118Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 2. Tasks execution finished 2025-06-24T21:11:17.020125Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519626492323135329:2473], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=. TraceId : 01jyhwft1x10f15kpp9f2t1bx3. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-24T21:11:17.020192Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 2. pass away 2025-06-24T21:11:17.020241Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715673;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T21:11:17.020387Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-24T21:11:17.020397Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7519626492323135323:2464] TxId: 281474976715673. Ctx: { TraceId: 01jyhwft1x10f15kpp9f2t1bx3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7519626492323135329:2473], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 2333 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 817 FinishTimeMs: 1750799477020 InputRows: 1 InputBytes: 19 OutputRows: 1 OutputBytes: 19 ResultRows: 1 ResultBytes: 19 ComputeCpuTimeUs: 239 BuildCpuTimeUs: 578 HostName: "ghrun-4pjfmvffsq" NodeId: 1 CreateTimeMs: 1750799477013 UpdateTimeMs: 1750799477020 } MaxMemoryUsage: 1048576 } 2025-06-24T21:11:17.020456Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715673. Ctx: { TraceId: 01jyhwft1x10f15kpp9f2t1bx3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7519626492323135329:2473] 2025-06-24T21:11:17.020597Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [1:7519626492323135323:2464] TxId: 281474976715673. Ctx: { TraceId: 01jyhwft1x10f15kpp9f2t1bx3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:11:17.020672Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [1:7519626492323135323:2464] TxId: 281474976715673. Ctx: { TraceId: 01jyhwft1x10f15kpp9f2t1bx3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFkZDNhZDItMzQwOTdhYWEtOTQ4ZDIwYWEtMzkwN2E0YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.029249s ReadRows: 1 ReadBytes: 20 ru: 19 rate limiter was not found force flag: 1 2025-06-24T21:11:17.021586Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799477035, txId: 281474976715672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::QuerySeviceRangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 16680, MsgBus: 6932 2025-06-24T21:11:11.607503Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626472342732775:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:11.607574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049cb/r3tmp/tmpGiZLRh/pdisk_1.dat 2025-06-24T21:11:12.019762Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:12.023431Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626472342732752:2079] 1750799471606166 != 1750799471606169 TServer::EnableGrpc on GrpcPort 16680, node 1 2025-06-24T21:11:12.082188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:12.082309Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:12.092792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:12.160224Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:12.161234Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:12.161285Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:12.161441Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6932 TClient is connected to server localhost:6932 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:11:12.639701Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:12.766959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:11:12.795300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:12.959976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:13.114383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:11:13.175335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.666001Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626485227636278:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:14.666125Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.012743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.037970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.065505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.091892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.157781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.186148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.213070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.294952Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626489522604237:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.295049Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.295087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626489522604242:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.298425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:15.308145Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626489522604244:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:15.408126Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626489522604295:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:16.607538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626472342732775:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:16.607618Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan-SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 20265, MsgBus: 24790 2025-06-24T21:11:11.698706Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626471045705994:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:11.701152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049ca/r3tmp/tmpvrSFKq/pdisk_1.dat 2025-06-24T21:11:12.085670Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626471045705900:2079] 1750799471678961 != 1750799471678964 2025-06-24T21:11:12.096349Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:12.099754Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:12.099856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:12.104101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20265, node 1 2025-06-24T21:11:12.203168Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:12.203200Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:12.203218Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:12.203390Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24790 TClient is connected to server localhost:24790 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:11:12.699304Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:12.788172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:12.813486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:12.973916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:13.123179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:11:13.203623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.850298Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626483930609420:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:14.850424Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.170231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.199136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.235895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.265563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.303293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.370698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.437786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.522144Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626488225577384:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.522258Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.522373Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626488225577389:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.525987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:15.535014Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626488225577391:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:15.596415Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626488225577442:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:16.688112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626471045705994:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:16.688182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:16.843563Z node 1 :KQP_GATEWAY DEBUG: kqp_metadata_loader.cpp:886: Load table metadata from cache by path, request Path: /Root/Test 2025-06-24T21:11:16.986426Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:37: Start KqpSnapshotManager at [1:7519626492520544988:2464] 2025-06-24T21:11:16.986464Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:58: KqpSnapshotManager: got snapshot request from [1:7519626492520544970:2464] 2025-06-24T21:11:16.990759Z node 1 :KQP_RESOURCE_MANAGER ... er.h:200;event=send_data_to_compute;space=8388608;queue=1;compute_actor_id=[1:7519626492520545003:2472];rows=3; 2025-06-24T21:11:17.003943Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:184;event=stop_scanner; 2025-06-24T21:11:17.003956Z node 1 :KQP_COMPUTE DEBUG: log.h:466: kqp_scan_compute_actor.cpp:175 :TEvSendData: [1:7519626492520545006:2474]/[1:7519626492520545003:2472] 2025-06-24T21:11:17.003968Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:42;event=scan_ack_on_finished;actor_id=[1:7519626496815512306:2050]; 2025-06-24T21:11:17.003984Z node 1 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:555: SelfId: [1:7519626492520545006:2474]. EVLOGKQP:0/0/3/3 2025-06-24T21:11:17.004007Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:383;event=scanner_finished;tablet_id=72075186224037914;stop_shard=1; 2025-06-24T21:11:17.004028Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:96;event=stop_scanner;actor_id=[1:7519626496815512306:2050];message=;final_flag=1; 2025-06-24T21:11:17.004137Z node 1 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:599: SelfId: [1:7519626492520545006:2474]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, pending resolve shards: 0, average read rows: 3, average read bytes: 0, 2025-06-24T21:11:17.004169Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:430;event=wait_all_scanner_finished;scans=0; 2025-06-24T21:11:17.004192Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519626492520545003:2472], TxId: 281474976710673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft0zeh7jj0s1yd53rbcg. SessionId : ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T21:11:17.004215Z node 1 :KQP_COMPUTE DEBUG: log.h:466: kqp_scan_compute_actor.cpp:205 :TEvFetcherFinished: [1:7519626492520545006:2474] 2025-06-24T21:11:17.004229Z node 1 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:699: SelfId: [1:7519626492520545006:2474]. EVLOGKQP(max_in_flight:1) InFlightScans:InFlightShards:;wScans=0;wShards=0; {SHARD(72075186224037914):CHUNKS=1;D=0.000000s;PacksCount=1;RowsCount=3;BytesCount=0;MinPackSize=3;MaxPackSize=3;CAVG=0.000000s;CMIN=0.000000s;CMAX=0.000000s;}; 2025-06-24T21:11:17.004263Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976710673, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-24T21:11:17.004281Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519626492520545005:2473], TxId: 281474976710673, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft0zeh7jj0s1yd53rbcg. SessionId : ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646923 2025-06-24T21:11:17.004307Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976710673, task: 2. Finish input channelId: 1, from: [1:7519626492520545003:2472] 2025-06-24T21:11:17.004337Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519626492520545005:2473], TxId: 281474976710673, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft0zeh7jj0s1yd53rbcg. SessionId : ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-24T21:11:17.004408Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710673, task: 1. Tasks execution finished 2025-06-24T21:11:17.004427Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519626492520545003:2472], TxId: 281474976710673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft0zeh7jj0s1yd53rbcg. SessionId : ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T21:11:17.004552Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710673, task: 1. pass away 2025-06-24T21:11:17.004591Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519626492520545005:2473], TxId: 281474976710673, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft0zeh7jj0s1yd53rbcg. SessionId : ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-24T21:11:17.004641Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710673;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T21:11:17.004721Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7519626492520544998:2464] TxId: 281474976710673. Ctx: { TraceId: 01jyhwft0zeh7jj0s1yd53rbcg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7519626492520545003:2472], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 3249 Tasks { TaskId: 1 CpuTimeUs: 962 FinishTimeMs: 1750799477004 OutputRows: 1 OutputBytes: 19 Tables { TablePath: "/Root/Test" ReadRows: 3 ReadBytes: 96 } ComputeCpuTimeUs: 130 BuildCpuTimeUs: 832 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-4pjfmvffsq" NodeId: 1 StartTimeMs: 1750799477004 CreateTimeMs: 1750799476993 UpdateTimeMs: 1750799477004 } MaxMemoryUsage: 1048576 } 2025-06-24T21:11:17.004772Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976710673. Ctx: { TraceId: 01jyhwft0zeh7jj0s1yd53rbcg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7519626492520545003:2472] 2025-06-24T21:11:17.004834Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:7519626492520544998:2464] TxId: 281474976710673. Ctx: { TraceId: 01jyhwft0zeh7jj0s1yd53rbcg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:7519626492520545005:2473], 2025-06-24T21:11:17.004938Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976710673, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-24T21:11:17.004984Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:357: ActorId: [1:7519626492520544998:2464] TxId: 281474976710673. Ctx: { TraceId: 01jyhwft0zeh7jj0s1yd53rbcg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [1:7519626492520544970:2464], seqNo: 1, nRows: 1 2025-06-24T21:11:17.006705Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:423: TxId: 281474976710673, send ack to channelId: 2, seqNo: 1, enough: 0, freeSpace: 8388470, to: [1:7519626492520545007:2473] 2025-06-24T21:11:17.006799Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519626492520545005:2473], TxId: 281474976710673, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft0zeh7jj0s1yd53rbcg. SessionId : ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-24T21:11:17.006852Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710673, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-24T21:11:17.006870Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710673, task: 2. Tasks execution finished 2025-06-24T21:11:17.006886Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519626492520545005:2473], TxId: 281474976710673, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jyhwft0zeh7jj0s1yd53rbcg. SessionId : ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-24T21:11:17.006971Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710673, task: 2. pass away 2025-06-24T21:11:17.007097Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710673;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T21:11:17.007101Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7519626492520544998:2464] TxId: 281474976710673. Ctx: { TraceId: 01jyhwft0zeh7jj0s1yd53rbcg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7519626492520545005:2473], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 8042 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 834 FinishTimeMs: 1750799477006 InputRows: 1 InputBytes: 19 OutputRows: 1 OutputBytes: 19 ResultRows: 1 ResultBytes: 19 ComputeCpuTimeUs: 196 BuildCpuTimeUs: 638 HostName: "ghrun-4pjfmvffsq" NodeId: 1 CreateTimeMs: 1750799476994 UpdateTimeMs: 1750799477006 } MaxMemoryUsage: 1048576 } 2025-06-24T21:11:17.007155Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976710673. Ctx: { TraceId: 01jyhwft0zeh7jj0s1yd53rbcg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7519626492520545005:2473] 2025-06-24T21:11:17.007279Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [1:7519626492520544998:2464] TxId: 281474976710673. Ctx: { TraceId: 01jyhwft0zeh7jj0s1yd53rbcg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:11:17.007335Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [1:7519626492520544998:2464] TxId: 281474976710673. Ctx: { TraceId: 01jyhwft0zeh7jj0s1yd53rbcg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIyMWUxZWYtMzNjZWYyM2YtMjJkNWJjNjMtYmNlMDI1MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.011291s ReadRows: 3 ReadBytes: 96 ru: 7 rate limiter was not found force flag: 1 2025-06-24T21:11:17.007359Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976710673, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-24T21:11:17.008108Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799477035, txId: 281474976710672] shutting down >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate [GOOD] >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp [GOOD] Test command err: 2025-06-24T21:11:06.784822Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.785053Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.785153Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003729/r3tmp/tmpybKEZ2/pdisk_1.dat 2025-06-24T21:11:07.023025Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.025089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.058349Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.063477Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323147 != 1750799464323151 2025-06-24T21:11:07.109111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.109230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.121397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.214113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.258723Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:07.258962Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.303933Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.304059Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.305725Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.305836Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.305896Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.306253Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.306382Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.306454Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:07.317109Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.347434Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.347620Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.347738Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:07.347784Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.347846Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.347886Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.348279Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.348371Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.348432Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.348469Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.348512Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.348559Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.348650Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:07.349035Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:07.349256Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:07.349337Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:07.350955Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.361465Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:07.361561Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:07.509136Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:11:07.514688Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:07.514782Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.515582Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.515633Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:07.515693Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:07.515986Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:07.516122Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:07.516457Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.516526Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:07.518865Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:07.519321Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.520958Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:07.521004Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.521856Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:07.521917Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.522708Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.522753Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.522822Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:07.522896Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:07.522974Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:07.523058Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.527247Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.529032Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:07.529094Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:07.530229Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:07.565535Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.565641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... .158269Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:17.158334Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:17.158563Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:17.158693Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:17.159465Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:17.159539Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:17.160002Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:17.160442Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:17.162257Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:17.162330Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.162855Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:17.162950Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.163811Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.163856Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:17.163913Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:17.163977Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:372:2366], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:17.164028Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:17.164128Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.165249Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:17.167258Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:17.167432Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:17.167515Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:17.204264Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.204365Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:703:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.204436Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.209915Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:17.216339Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:17.263358Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:17.387831Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:17.390703Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:706:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:17.425335Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:777:2622] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:17.532162Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwftejcwf13enr4pa4yrj2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjU1N2RiYjEtY2E3ZjljMjItZGY4NDExZC0zOWViMjM5Mg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:17.537190Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:808:2639], serverId# [3:809:2640], sessionId# [0:0:0] 2025-06-24T21:11:17.537530Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:11:17.537663Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-24T21:11:17.548420Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.572463Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:816:2646], serverId# [3:817:2647], sessionId# [0:0:0] 2025-06-24T21:11:17.573561Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:17.584792Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:17.584878Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.585173Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:17.585210Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-24T21:11:17.585446Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:17.585490Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:17.585537Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:17.585592Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.585661Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:816:2646], serverId# [3:817:2647], sessionId# [0:0:0] 2025-06-24T21:11:17.586426Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:17.586706Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:17.586932Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:17.586979Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:17.587018Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:11:17.587243Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:17.587315Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.587845Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-24T21:11:17.588112Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 48, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:17.588229Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-24T21:11:17.588275Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-24T21:11:17.619547Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:17.619615Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-24T21:11:17.619955Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:17.619980Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:17.620008Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-24T21:11:17.620108Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:17.620153Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.620187Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAndTake+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 14930, MsgBus: 2041 2025-06-24T21:11:11.670600Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626472346718128:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:11.670656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049c8/r3tmp/tmpWkSIpB/pdisk_1.dat 2025-06-24T21:11:12.011198Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:12.011568Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626472346718104:2079] 1750799471667514 != 1750799471667517 TServer::EnableGrpc on GrpcPort 14930, node 1 2025-06-24T21:11:12.093210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:12.093478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:12.115322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:12.146419Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:12.146446Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:12.146455Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:12.146614Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2041 TClient is connected to server localhost:2041 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:11:12.684365Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:12.704953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:12.738004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:12.874003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:11:13.015850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:13.096219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:14.727847Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626485231621629:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:14.727964Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.013168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.045726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.075202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.109068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.138906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.206244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.277224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.330178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626489526589590:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.330244Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.330254Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626489526589595:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.333363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:15.342206Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626489526589597:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:15.423963Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626489526589648:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:16.338619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.671454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626472346718128:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:16.671510Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; /Root/SecondaryKeys/Index/indexImplTable 2 16 /Root/SecondaryKeys 1 8 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapWriteRow [GOOD] Test command err: Trying to start YDB, gRPC: 4292, MsgBus: 19560 2025-06-24T21:11:09.805027Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626462592326547:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:09.815089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049f2/r3tmp/tmpuHKHuV/pdisk_1.dat 2025-06-24T21:11:10.238374Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626462592326511:2079] 1750799469799145 != 1750799469799148 2025-06-24T21:11:10.253728Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4292, node 1 2025-06-24T21:11:10.268236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:10.268765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:10.274935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:10.331791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:10.331818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:10.331834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:10.331968Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19560 TClient is connected to server localhost:19560 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:11:10.815490Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:10.870913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:10.886597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:11:10.891438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:11.017730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.154817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:11.227694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:12.992670Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626475477230040:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:12.992770Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.336371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.366485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.391121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.416842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.442825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.487066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.520092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:13.574322Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626479772197990:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.574406Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.574767Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626479772197995:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:13.578680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:13.588682Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626479772197997:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:13.655493Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626479772198048:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:14.630949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:11:14.795526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7519626484067165754:2482];tablet_id=72075186224037923;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:11:14.795525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=720751 ... T21:11:15.519757Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=281474976710682;tx_id=281474976710682;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710682; query_phases { duration_us: 13653 table_access { name: "/Root/TestTable" updates { rows: 2 bytes: 744 } } cpu_time_us: 5200 affected_shards: 2 } query_phases { duration_us: 10528 cpu_time_us: 351 affected_shards: 2 } compilation { duration_us: 63761 cpu_time_us: 61075 } process_cpu_time_us: 737 total_duration_us: 89875 total_cpu_time_us: 67363 2025-06-24T21:11:15.644299Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976710684;tx_id=281474976710684;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710684; query_phases { duration_us: 20064 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 2637 affected_shards: 1 } query_phases { duration_us: 6473 cpu_time_us: 179 affected_shards: 1 } compilation { duration_us: 89647 cpu_time_us: 86798 } process_cpu_time_us: 699 total_duration_us: 118372 total_cpu_time_us: 90313 2025-06-24T21:11:15.768666Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=281474976710687;tx_id=281474976710687;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710687; 2025-06-24T21:11:15.769076Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710687;tx_id=281474976710687;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710687; query_phases { duration_us: 9657 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 2184 affected_shards: 1 } query_phases { duration_us: 8249 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 2132 affected_shards: 2 } query_phases { duration_us: 8743 cpu_time_us: 212 affected_shards: 2 } compilation { duration_us: 82662 cpu_time_us: 80235 } process_cpu_time_us: 868 total_duration_us: 117089 total_cpu_time_us: 85631 2025-06-24T21:11:15.880423Z node 1 :TX_COLUMNSHARD_RESTORE WARN: log.cpp:784: tablet_id=72075186224037930;tablet_actor_id=[1:7519626484067165763:2486];this=89129162773696;activity=1;task_id=c3cfe478-513f11f0-98eb10c2-3dc32309::4;fline=restore.cpp:28;event=merge_data_problems;write_id=4;tablet_id=72075186224037930;message=Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}; 2025-06-24T21:11:15.880691Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7519626484067165763:2486];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteBlobsResult;tablet_id=72075186224037930;event=TEvWriteBlobsResult;fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]};tx_id=281474976710688; 2025-06-24T21:11:15.881217Z node 1 :TX_COLUMNSHARD_SCAN WARN: actor.cpp:133: Scan [1:7519626488362133800:2706] got AbortExecution txId: 281474976710688 scanId: 1 gen: 1 tablet: 72075186224037930 code: ABORTED reason: {
: Error: task finished: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]} } 2025-06-24T21:11:15.892223Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:7519626488362133797:2704], Table: `/Root/TestTable` ([72057594046644480:17:1]), SessionActorId: [0:0:0]Got CONSTRAINT VIOLATION for table `/Root/TestTable`. ShardID=72075186224037930, Sink=[1:7519626488362133797:2704].{
: Error: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}, code: 2012 } 2025-06-24T21:11:15.892420Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1566: SelfId: [1:7519626488362133794:2704], TxId: 281474976710688, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=OTA0OTE5ZTItYzQwZjliOTctYWM1NjIzZTMtYTU2ZDcxNWM=. TraceId : 01jyhwfs221w0akrgvs8qftrnz. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Sink[0] fatal error: {
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012 subissue: {
: Error: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}, code: 2012 } } 2025-06-24T21:11:15.892542Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519626488362133794:2704], TxId: 281474976710688, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=OTA0OTE5ZTItYzQwZjliOTctYWM1NjIzZTMtYTU2ZDcxNWM=. TraceId : 01jyhwfs221w0akrgvs8qftrnz. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012 subissue: {
: Error: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}, code: 2012 } }. 2025-06-24T21:11:15.893034Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=OTA0OTE5ZTItYzQwZjliOTctYWM1NjIzZTMtYTU2ZDcxNWM=, ActorId: [1:7519626484067165613:2472], ActorState: ExecuteState, TraceId: 01jyhwfs221w0akrgvs8qftrnz, Create QueryResponse for error on request, msg: query_phases { duration_us: 49559 cpu_time_us: 1738 } compilation { duration_us: 62818 cpu_time_us: 60293 } process_cpu_time_us: 594 total_duration_us: 114232 total_cpu_time_us: 62625 2025-06-24T21:11:15.991135Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710690;tx_id=281474976710690;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710690; query_phases { duration_us: 6839 cpu_time_us: 1797 affected_shards: 1 } query_phases { duration_us: 6075 cpu_time_us: 248 affected_shards: 1 } compilation { duration_us: 75520 cpu_time_us: 72902 } process_cpu_time_us: 581 total_duration_us: 92193 total_cpu_time_us: 75528 2025-06-24T21:11:16.053772Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710692;tx_id=281474976710692;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710692; query_phases { duration_us: 7087 table_access { name: "/Root/TestTable" deletes { rows: 1 } } cpu_time_us: 1949 affected_shards: 1 } query_phases { duration_us: 5407 cpu_time_us: 297 affected_shards: 1 } compilation { duration_us: 42520 cpu_time_us: 39789 } process_cpu_time_us: 592 total_duration_us: 56851 total_cpu_time_us: 42627 2025-06-24T21:11:16.133506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976710694;tx_id=281474976710694;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710694; query_phases { duration_us: 6551 table_access { name: "/Root/TestTable" deletes { rows: 1 } } cpu_time_us: 1847 affected_shards: 1 } query_phases { duration_us: 6009 cpu_time_us: 315 affected_shards: 1 } compilation { duration_us: 56703 cpu_time_us: 54072 } process_cpu_time_us: 722 total_duration_us: 71220 total_cpu_time_us: 56956 2025-06-24T21:11:16.265856Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976710697;tx_id=281474976710697;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710697; 2025-06-24T21:11:16.266013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710697;tx_id=281474976710697;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710697; query_phases { duration_us: 7495 table_access { name: "/Root/TestTable" deletes { rows: 1 } } cpu_time_us: 2107 affected_shards: 1 } query_phases { duration_us: 10705 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 2269 affected_shards: 2 } query_phases { duration_us: 8960 cpu_time_us: 331 affected_shards: 2 } compilation { duration_us: 95241 cpu_time_us: 92300 } process_cpu_time_us: 833 total_duration_us: 124924 total_cpu_time_us: 97840 2025-06-24T21:11:16.853259Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-24T21:11:16.853635Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-24T21:11:16.854004Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-24T21:11:16.854283Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-24T21:11:16.854450Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-24T21:11:16.854607Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-24T21:11:16.854765Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-24T21:11:16.854853Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-24T21:11:16.854960Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-24T21:11:16.855055Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; query_phases { duration_us: 837 cpu_time_us: 837 } query_phases { duration_us: 182583 table_access { name: "/Root/TestTable" reads { rows: 2 bytes: 40 } deletes { rows: 2 } } cpu_time_us: 51572 affected_shards: 10 } query_phases { duration_us: 10729 cpu_time_us: 682 affected_shards: 10 } compilation { duration_us: 378941 cpu_time_us: 373515 } process_cpu_time_us: 1823 total_duration_us: 577969 total_cpu_time_us: 428429 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 16463, MsgBus: 25683 2025-06-24T21:11:11.629346Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626471045526393:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:11.629410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049d2/r3tmp/tmp37kMEX/pdisk_1.dat 2025-06-24T21:11:11.989193Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:12.000438Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626471045526244:2079] 1750799471617490 != 1750799471617493 TServer::EnableGrpc on GrpcPort 16463, node 1 2025-06-24T21:11:12.043290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:12.046351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:12.050683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:12.146138Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:12.146163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:12.146173Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:12.146323Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25683 TClient is connected to server localhost:25683 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:11:12.627739Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:12.679887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:12.701601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:12.846584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:12.995598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:13.060112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.780043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626483930429761:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:14.780145Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.062436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.092116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.120372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.149588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.178514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.209622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.259210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:15.311461Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626488225397714:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.311569Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.315447Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626488225397719:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.319550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:15.329810Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626488225397721:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:15.425830Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626488225397772:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:16.623854Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626471045526393:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:16.623914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:16.765301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-5 >> KqpCost::IndexLookup+useSink [GOOD] >> KqpCost::IndexLookup-useSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 [GOOD] Test command err: 2025-06-24T21:11:06.635436Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.635714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.635849Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003735/r3tmp/tmpNqJZZk/pdisk_1.dat 2025-06-24T21:11:07.013302Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.021641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.061748Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.065756Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323115 != 1750799464323119 2025-06-24T21:11:07.110113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.110217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.121475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.214011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.258433Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:07.258642Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.296895Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.297000Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.298330Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.298435Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.298491Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.299667Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.299804Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.299873Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:07.310571Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.335596Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.335782Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.335892Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:07.335925Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.335962Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.335996Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.336396Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.336487Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.336538Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.336567Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.336603Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.336631Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.336697Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:07.337062Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:07.337304Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:07.337389Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:07.338725Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.349330Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:07.349404Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:07.496823Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:11:07.506207Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:07.506284Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.507191Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.507340Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:07.507395Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:07.507627Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:07.507770Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:07.508034Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.508089Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:07.509900Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:07.510246Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.511518Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:07.511554Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.512289Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:07.512341Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.512934Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.512961Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.513008Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:07.513050Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:07.513098Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:07.513161Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.522112Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.523680Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:07.523807Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:07.524719Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:07.571970Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.572134Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... .414214Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:17.414260Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:17.414477Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:17.414591Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:17.415345Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:17.415415Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:17.415990Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:17.416495Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:17.418150Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:17.418214Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.418813Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:17.418884Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.419681Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.419726Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:17.419783Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:17.419866Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:372:2366], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:17.419930Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:17.420045Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.421078Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:17.423341Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:17.423618Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:17.423697Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:17.464894Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.465001Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:703:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.465070Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.469755Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:17.476486Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:17.524382Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:17.649937Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:17.653138Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:706:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:17.688256Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:777:2622] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:17.771796Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwftpqd4gxrfenx870x75a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzNkNTVjZTctYTg4YzI0ZjQtNTkzNTcxYWUtODU0YzhhMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:17.774775Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:808:2639], serverId# [3:809:2640], sessionId# [0:0:0] 2025-06-24T21:11:17.775287Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:11:17.775508Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-24T21:11:17.786692Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.818924Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:816:2646], serverId# [3:817:2647], sessionId# [0:0:0] 2025-06-24T21:11:17.820130Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:17.831742Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:17.831847Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.832172Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:17.832224Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-24T21:11:17.832574Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:17.832647Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:17.832710Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:17.832787Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.832896Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:816:2646], serverId# [3:817:2647], sessionId# [0:0:0] 2025-06-24T21:11:17.833980Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:17.834412Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:17.834648Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:17.834697Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:17.834769Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:11:17.835040Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:17.835118Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.835851Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-24T21:11:17.836097Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:17.836242Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-24T21:11:17.836294Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-24T21:11:17.885685Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:17.885775Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-24T21:11:17.886268Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:17.886316Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:17.886359Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-24T21:11:17.886513Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:17.886584Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.886640Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TIcNodeCache::GetNodesInfoTest |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate [GOOD] Test command err: 2025-06-24T21:11:06.739388Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.739669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.739786Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036d4/r3tmp/tmpfQ4oog/pdisk_1.dat 2025-06-24T21:11:07.013213Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.021693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.063362Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.068475Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323091 != 1750799464323095 2025-06-24T21:11:07.113616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.113712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.125027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.214157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.258430Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:07.258642Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.297653Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.297756Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.299219Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.299298Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.299342Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.299608Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.299721Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.299775Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:07.310442Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.329562Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.330083Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.330223Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:07.330268Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.330309Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.330339Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.331548Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.331635Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.331695Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.331730Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.331772Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.331813Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.331894Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:07.333226Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:07.333544Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:07.333648Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:07.335427Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.346046Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:07.346134Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:07.495027Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:11:07.500667Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:07.500756Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.501497Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.501545Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:07.501649Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:07.501897Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:07.502023Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:07.502431Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.502511Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:07.510535Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:07.511008Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.512658Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:07.512706Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.513504Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:07.513566Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.514389Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.514431Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.514496Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:07.514570Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:07.514623Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:07.514759Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.518965Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.520699Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:07.520771Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:07.521651Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:07.565626Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.565735Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... .700771Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:17.700828Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:17.701093Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:17.701256Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:17.702045Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:17.702145Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:17.702663Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:17.703195Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:17.705030Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:17.705087Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.705676Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:17.705754Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.706676Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.706723Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:17.706798Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:17.706873Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:372:2366], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:17.706928Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:17.707022Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.708270Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:17.710405Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:17.710603Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:17.710671Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:17.749047Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.749159Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:703:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.749239Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.755233Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:17.762401Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:17.810042Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:17.933466Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:17.935876Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:706:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:17.970864Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:777:2622] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:18.090393Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwftzkf63c27ehzm1fxqhe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTk4NWYxNmMtYjYyN2QxNDYtYmM2YWM2MzktMmE2YjdhYmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:18.093587Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:808:2639], serverId# [3:809:2640], sessionId# [0:0:0] 2025-06-24T21:11:18.094113Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:11:18.094337Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-24T21:11:18.105407Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:18.136252Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:816:2646], serverId# [3:817:2647], sessionId# [0:0:0] 2025-06-24T21:11:18.137730Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:18.149274Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:18.149371Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:18.149645Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:18.149699Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-24T21:11:18.150020Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:18.150082Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:18.150153Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:18.150225Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:18.150348Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:816:2646], serverId# [3:817:2647], sessionId# [0:0:0] 2025-06-24T21:11:18.151442Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:18.151832Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:18.152053Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:18.152102Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:18.152152Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:11:18.152424Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:18.152497Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:18.153154Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-24T21:11:18.153489Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 43, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:18.153628Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-24T21:11:18.153683Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-24T21:11:18.200704Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:18.200778Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-24T21:11:18.201267Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:18.201310Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:18.201355Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-24T21:11:18.201490Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:18.201563Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:18.201610Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds [GOOD] Test command err: 2025-06-24T21:11:06.921874Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.922114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.922213Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036e6/r3tmp/tmp5xhYPS/pdisk_1.dat 2025-06-24T21:11:07.173221Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.175629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.212200Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.216011Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323147 != 1750799464323151 2025-06-24T21:11:07.260858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.260989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.272224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.351498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.394872Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:07.395186Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.445701Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.445811Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.447692Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.447780Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.447837Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.448225Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.448348Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.448518Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:07.459140Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.486821Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.487014Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.487139Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:07.487207Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.487254Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.487292Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.487759Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.487854Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.487933Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.487966Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.488008Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.488045Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.488128Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:07.488553Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:07.488871Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:07.488961Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:07.490612Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.501243Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:07.501331Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:07.648200Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:11:07.651770Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:07.651833Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.652437Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.652485Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:07.652541Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:07.652716Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:07.652812Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:07.653051Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.653103Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:07.654574Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:07.654927Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.656074Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:07.656115Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.656747Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:07.656799Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.657399Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.657430Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.657486Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:07.657537Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:07.657577Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:07.657664Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.660863Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:07.662173Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:07.662228Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:07.662874Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:07.686099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:07.686182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... 1:17.918590Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:17.918648Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:17.918969Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:17.919129Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:17.920015Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:17.920102Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:17.920631Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:17.921120Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:17.923201Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:17.923263Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.923877Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:17.923967Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.924959Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:17.925011Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:17.925075Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:17.925154Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:372:2366], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:17.925219Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:17.925339Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:17.926693Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:17.928994Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:17.929207Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:17.929277Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:17.971316Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.971456Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:703:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.971553Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.978146Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:17.986049Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:18.033678Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:18.160285Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:18.163566Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:706:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:18.197746Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:777:2622] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:18.275645Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwfv6h6batfekdksxmpmvt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTkyMTYzLTMyOWY5MWFkLTVlOTRiM2VhLWE3MzVhMjdm, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:18.277840Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:808:2639], serverId# [3:809:2640], sessionId# [0:0:0] 2025-06-24T21:11:18.278218Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:11:18.278383Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-06-24T21:11:18.289319Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:18.315939Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:816:2646], serverId# [3:817:2647], sessionId# [0:0:0] 2025-06-24T21:11:18.316937Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:18.328140Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:18.328221Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:18.328503Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:18.328548Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-24T21:11:18.328837Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:18.328886Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:18.328931Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:18.328989Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:18.329062Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:816:2646], serverId# [3:817:2647], sessionId# [0:0:0] 2025-06-24T21:11:18.329941Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:18.330270Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:18.330441Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:18.330485Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:18.330544Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:11:18.330774Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:18.330837Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:18.331401Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-24T21:11:18.331673Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 37, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:18.331797Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-24T21:11:18.331842Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-24T21:11:18.366266Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:18.366339Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-24T21:11:18.366788Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:18.366827Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:18.366867Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-24T21:11:18.366983Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:18.367039Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:18.367079Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 28654, MsgBus: 7721 2025-06-24T21:11:12.784619Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626475215201343:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:12.784660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049c2/r3tmp/tmpAwwdJs/pdisk_1.dat 2025-06-24T21:11:13.084886Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626475215201315:2079] 1750799472782913 != 1750799472782916 2025-06-24T21:11:13.122378Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28654, node 1 2025-06-24T21:11:13.185189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:13.185316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:13.187247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:13.192946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:13.192972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:13.192985Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:13.193137Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7721 TClient is connected to server localhost:7721 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:13.732132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:13.760304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:13.817933Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:13.895958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.039458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.120341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:15.825127Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626488100104852:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:15.825263Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.091489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.118133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.146444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.173620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.197727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.270651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.338833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.422762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626492395072818:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.422853Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.422933Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626492395072823:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.427223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:16.438236Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626492395072825:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:16.502501Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626492395072876:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:17.520948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:17.819096Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626475215201343:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:17.819236Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 >> TPQCDTest::TestUnavailableWithoutClustersList >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-12 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-13 >> TPQCDTest::TestUnavailableWithoutNetClassifier |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestDiscoverClusters >> TPQCDTest::TestRelatedServicesAreRunning >> TPQCDTest::TestUnavailableWithoutBoth >> KqpCost::AAARangeFullScan [GOOD] >> TPQCDTest::TestPrioritizeLocalDatacenter >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-28 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-29 >> DistributedEraseTests::ConditionalEraseRowsShouldNotEraseModifiedRows [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldNotFailOnMissingRows >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-40 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-41 >> TSchemeShardTTLTestsWithReboots::CreateTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 11215, MsgBus: 2455 2025-06-24T21:11:13.405314Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626481052579036:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:13.405455Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049be/r3tmp/tmpEUzeCv/pdisk_1.dat 2025-06-24T21:11:13.740079Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626481052579017:2079] 1750799473404285 != 1750799473404288 2025-06-24T21:11:13.746377Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11215, node 1 2025-06-24T21:11:13.782752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:13.782840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:13.786340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:13.800458Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:13.800486Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:13.800496Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:13.800648Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2455 TClient is connected to server localhost:2455 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:14.325134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:14.349831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.417049Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:14.520924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.657626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.735751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:16.447663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626493937482544:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.447793Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.701213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.734854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.767096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.794856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.826337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.857896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.892137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.938637Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626493937483204:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.938720Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.938841Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626493937483209:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.942420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:16.952367Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626493937483211:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:17.054397Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626498232450558:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:18.026746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.405652Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626481052579036:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:18.405784Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 9407, MsgBus: 29375 2025-06-24T21:11:13.543168Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626481739050755:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:13.543266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049ba/r3tmp/tmpF48QiK/pdisk_1.dat 2025-06-24T21:11:13.872257Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626481739050732:2079] 1750799473541314 != 1750799473541317 2025-06-24T21:11:13.881953Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9407, node 1 2025-06-24T21:11:13.941599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:13.941705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:13.944181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:13.960995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:13.961019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:13.961028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:13.961167Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29375 TClient is connected to server localhost:29375 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:14.511917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:14.536413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.589778Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:14.670867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.813444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.889341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:16.476655Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626494623954257:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.476788Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.732724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.760595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.789805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.819376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.848013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.879717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.910402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:16.966884Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626494623954914:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.966963Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.967104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626494623954919:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:16.970694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:16.980533Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626494623954921:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:17.045210Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626498918922268:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:18.076003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.543443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626481739050755:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:18.543515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-6 >> KqpCost::IndexLookupAndTake-useSink [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-34 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-35 >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::AAARangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 23283, MsgBus: 63989 2025-06-24T21:11:14.890226Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626483778486862:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:14.890548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049b9/r3tmp/tmpXH4fuE/pdisk_1.dat 2025-06-24T21:11:15.208632Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:15.208989Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626483778486844:2079] 1750799474889035 != 1750799474889038 TServer::EnableGrpc on GrpcPort 23283, node 1 2025-06-24T21:11:15.291896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:15.291916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:15.291944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:15.292078Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:15.303645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:15.303786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:15.304984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63989 TClient is connected to server localhost:63989 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:15.822769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:15.836148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:11:15.841448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:15.898174Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:15.980382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:16.101440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:16.165961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:17.736938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626496663390390:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.737080Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.997481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.023916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.050977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.077636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.104123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.170855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.206276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.248929Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626500958358346:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.248986Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.249037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626500958358351:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.251988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:18.260064Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626500958358353:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:18.345493Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626500958358404:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Test"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Group (-∞, +∞)","Name (-∞, +∞)"],"Reverse":false,"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Test","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Test","ReadColumns":["Amount","Comment","Group","Name"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"1"},{"Inputs":[{"ExternalPlanNodeId":1}],"E-Rows":"0","Predicate":"item.Amount \u003C 5000","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Limit-Filter","Stats":{"UseLlvm":"undefined","Output":[{"Pop":{" ... r"}],"Operators":[{"A-Rows":1,"A-SelfCpu":0.944,"A-Cpu":0.944,"A-Size":19,"Name":"Limit","Limit":"1"}],"Node Type":"Limit"}],"Operators":[{"A-Rows":1,"A-SelfCpu":0.554,"A-Cpu":1.498,"A-Size":19,"Name":"Limit","Limit":"1"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","PlanNodeType":"Query"}} query_phases { duration_us: 5100 table_access { name: "/Root/Test" reads { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 4340 affected_shards: 1 } compilation { duration_us: 162523 cpu_time_us: 160137 } process_cpu_time_us: 317 query_plan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":3,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"Tables\":[\"Test\"],\"PlanNodeId\":1,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Group (-\342\210\236, +\342\210\236)\",\"Name (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/Test\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"Test\",\"ReadColumns\":[\"Amount\",\"Comment\",\"Group\",\"Name\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Operators\":[{\"Inputs\":[{\"InternalOperatorId\":1}],\"Name\":\"Limit\",\"Limit\":\"1\"},{\"Inputs\":[{\"ExternalPlanNodeId\":1}],\"E-Rows\":\"0\",\"Predicate\":\"item.Amount \\u003C 5000\",\"Name\":\"Filter\",\"E-Size\":\"0\",\"E-Cost\":\"0\"}],\"Node Type\":\"Limit-Filter\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[2,19]}},\"Name\":\"4\",\"Push\":{\"WaitTimeUs\":{\"Count\":1,\"Sum\":1466,\"Max\":1466,\"Min\":1466,\"History\":[2,1466]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}],\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[2,1048576]},\"Tasks\":1,\"OutputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FinishedTasks\":1,\"IngressRows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"PhysicalStageId\":0,\"StageDurationUs\":0,\"Table\":[{\"Path\":\"\\/Root\\/Test\",\"ReadRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ReadBytes\":{\"Count\":1,\"Sum\":20,\"Max\":20,\"Min\":20}}],\"BaseTimeMs\":1750799479333,\"OutputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"CpuTimeUs\":{\"Count\":1,\"Sum\":944,\"Max\":944,\"Min\":944,\"History\":[2,944]},\"Ingress\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":192,\"Max\":192,\"Min\":192,\"History\":[2,192]}},\"External\":{},\"Name\":\"KqpReadRangesSource\",\"Ingress\":{},\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":192,\"Max\":192,\"Min\":192,\"History\":[2,192]},\"WaitTimeUs\":{\"Count\":1,\"Sum\":1504,\"Max\":1504,\"Min\":1504,\"History\":[2,1504]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}],\"UpdateTimeMs\":1}}],\"Node Type\":\"Merge\",\"SortColumns\":[\"Group (Asc)\"],\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":3}],\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\",\"Stats\":{\"UseLlvm\":\"undefined\",\"OutputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"PhysicalStageId\":1,\"FinishedTasks\":1,\"InputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"DurationUs\":{\"Count\":1,\"Sum\":1000,\"Max\":1000,\"Min\":1000},\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[2,1048576]},\"BaseTimeMs\":1750799479333,\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"FirstMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[2,19]}},\"Name\":\"RESULT\",\"Push\":{\"WaitTimeUs\":{\"Count\":1,\"Sum\":1163,\"Max\":1163,\"Min\":1163,\"History\":[2,1163]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}],\"CpuTimeUs\":{\"Count\":1,\"Sum\":554,\"Max\":554,\"Min\":554,\"History\":[2,554]},\"StageDurationUs\":1000,\"ResultRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResultBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"OutputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"Input\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[2,19]}},\"Name\":\"2\",\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FirstMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[2,19]},\"WaitTimeUs\":{\"Count\":1,\"Sum\":1085,\"Max\":1085,\"Min\":1085,\"History\":[2,1085]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}],\"UpdateTimeMs\":2,\"InputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Tasks\":1}}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"Compilation\":{\"FromCache\":false,\"DurationUs\":162523,\"CpuTimeUs\":160137},\"ProcessCpuTimeUs\":317,\"TotalDurationUs\":173108,\"ResourcePoolId\":\"default\",\"QueuedTimeUs\":293},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":6,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Group (-\342\210\236, +\342\210\236)\",\"Name (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Path\":\"\\/Root\\/Test\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"Test\",\"ReadColumns\":[\"Amount\",\"Comment\",\"Group\",\"Name\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Operators\":[{\"E-Rows\":\"0\",\"Predicate\":\"item.Amount \\u003C 5000\",\"Name\":\"Filter\",\"E-Size\":\"0\",\"E-Cost\":\"0\"}],\"Node Type\":\"Filter\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":0.944,\"A-Cpu\":0.944,\"A-Size\":19,\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":0.554,\"A-Cpu\":1.498,\"A-Size\":19,\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"PlanNodeType\":\"Query\"}}" query_ast: "(\n(let $1 (KqpTable \'\"/Root/Test\" \'\"72057594046644480:9\" \'\"\" \'1))\n(let $2 \'(\'\"Amount\" \'\"Comment\" \'\"Group\" \'\"Name\"))\n(let $3 (KqpRowsSourceSettings $1 $2 \'(\'(\'\"Sorted\")) (Void) \'()))\n(let $4 (Uint64 \'1))\n(let $5 (OptionalType (DataType \'String)))\n(let $6 (StructType \'(\'\"Amount\" (OptionalType (DataType \'Uint64))) \'(\'\"Comment\" $5) \'(\'\"Group\" (OptionalType (DataType \'Uint32))) \'(\'\"Name\" $5)))\n(let $7 \'(\'(\'\"_logical_id\" \'559) \'(\'\"_id\" \'\"2d34bc1e-76265979-aabacd63-a2787914\") \'(\'\"_wide_channels\" $6)))\n(let $8 (DqPhyStage \'((DqSource (DataSource \'\"KqpReadRangesSource\") $3)) (lambda \'($12) (block \'(\n (let $13 (lambda \'($16) (block \'(\n (let $17 (Member $16 \'\"Amount\"))\n (return $17 (Member $16 \'\"Comment\") (Member $16 \'\"Group\") (Member $16 \'\"Name\") (Coalesce (< $17 (Uint64 \'\"5000\")) (Bool \'false)))\n ))))\n (let $14 (WideFilter (ExpandMap (ToFlow $12) $13) (lambda \'($18 $19 $20 $21 $22) $22) $4))\n (let $15 (lambda \'($23 $24 $25 $26 $27) $23 $24 $25 $26))\n (return (FromFlow (WideMap $14 $15)))\n))) $7))\n(let $9 (DqCnMerge (TDqOutput $8 \'0) \'(\'(\'\"2\" \'\"Asc\"))))\n(let $10 (DqPhyStage \'($9) (lambda \'($28) (FromFlow (NarrowMap (Take (ToFlow $28) $4) (lambda \'($29 $30 $31 $32) (AsStruct \'(\'\"Amount\" $29) \'(\'\"Comment\" $30) \'(\'\"Group\" $31) \'(\'\"Name\" $32)))))) \'(\'(\'\"_logical_id\" \'572) \'(\'\"_id\" \'\"21336d91-f0bfcd75-bbf0343-80290615\"))))\n(let $11 (DqCnResult (TDqOutput $10 \'0) \'()))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($8 $10) \'($11) \'() \'(\'(\'\"type\" \'\"data\")))) \'((KqpTxResultBinding (ListType $6) \'0 \'0)) \'(\'(\'\"type\" \'\"data_query\"))))\n)\n" total_duration_us: 173108 total_cpu_time_us: 164794 query_meta: "{\"query_database\":\"/Root\",\"query_parameter_types\":{},\"table_metadata\":[\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/Test\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":9},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"Amount\\\",\\\"Id\\\":3,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Comment\\\",\\\"Id\\\":4,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Group\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint32\\\",\\\"TypeId\\\":2,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Name\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"Group\\\",\\\"Name\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\"],\"table_meta_serialization_type\":2,\"created_at\":\"1750799479\",\"query_type\":\"QUERY_TYPE_SQL_DML\",\"query_syntax\":\"1\",\"query_cluster\":\"db\",\"query_id\":\"aad68ae0-33595d34-43fef9bc-f362129\",\"version\":\"1.0\"}" ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::CreateTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:10:19.956414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:19.956505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:19.956548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:19.956604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:19.956651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:19.956679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:19.956736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:19.956808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:19.957569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:19.957904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:20.032226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:10:20.032274Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:20.032814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:10:20.043170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:20.043482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:20.043588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:20.049546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:20.049706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:20.050164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.050331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:20.052448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.052578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:20.053393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:20.053452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:20.053603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:20.053639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:20.053680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:20.053766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:10:20.060637Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:20.175841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:20.176046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.176220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:20.176266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:20.176478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:20.176592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:20.178572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.178784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:20.179027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.179077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:20.179122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:20.179175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:20.181485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.181545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:20.181584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:20.183441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.183487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:20.183561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:20.183611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:20.186999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:20.188971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:20.189123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:20.190035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:20.190154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 42949694 ... ion: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-24T21:11:20.055476Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-24T21:11:20.055518Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1002 2025-06-24T21:11:20.055561Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:11:20.055606Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:11:20.055701Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1002, ready parts: 0/1, is published: true 2025-06-24T21:11:20.058063Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1002 Step: 5000003 OrderId: 1002 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1271 } } CommitVersion { Step: 5000003 TxId: 1002 } 2025-06-24T21:11:20.058120Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409546, partId: 0 2025-06-24T21:11:20.058293Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1002 Step: 5000003 OrderId: 1002 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1271 } } CommitVersion { Step: 5000003 TxId: 1002 } 2025-06-24T21:11:20.058419Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1002 Step: 5000003 OrderId: 1002 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1271 } } CommitVersion { Step: 5000003 TxId: 1002 } 2025-06-24T21:11:20.060728Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 334 RawX2: 309237647631 } Origin: 72075186233409546 State: 2 TxId: 1002 Step: 0 Generation: 2 2025-06-24T21:11:20.060786Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409546, partId: 0 2025-06-24T21:11:20.060930Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: Source { RawX1: 334 RawX2: 309237647631 } Origin: 72075186233409546 State: 2 TxId: 1002 Step: 0 Generation: 2 2025-06-24T21:11:20.060994Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 1002:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:11:20.061095Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 1002:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 334 RawX2: 309237647631 } Origin: 72075186233409546 State: 2 TxId: 1002 Step: 0 Generation: 2 2025-06-24T21:11:20.061164Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1002:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:11:20.061209Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-24T21:11:20.061253Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 1002:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:11:20.061303Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1002:0 129 -> 240 2025-06-24T21:11:20.062861Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-24T21:11:20.062971Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-24T21:11:20.065360Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-24T21:11:20.065554Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-24T21:11:20.066036Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-24T21:11:20.066097Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1002:0 ProgressState 2025-06-24T21:11:20.066219Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1002:0 progress is 1/1 2025-06-24T21:11:20.066258Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-24T21:11:20.066308Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1002:0 progress is 1/1 2025-06-24T21:11:20.066347Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-24T21:11:20.066391Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: true 2025-06-24T21:11:20.066441Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-24T21:11:20.066490Z node 72 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1002:0 2025-06-24T21:11:20.066531Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1002:0 2025-06-24T21:11:20.066676Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestModificationResult got TxId: 1002, wait until txId: 1002 TestWaitNotification wait txId: 1002 2025-06-24T21:11:20.070711Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1002: send EvNotifyTxCompletion 2025-06-24T21:11:20.070779Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 2025-06-24T21:11:20.071104Z node 72 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-06-24T21:11:20.071211Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-24T21:11:20.071242Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [72:411:2382] TestWaitNotification: OK eventTxId 1002 2025-06-24T21:11:20.071638Z node 72 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:11:20.071822Z node 72 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 219us result status StatusSuccess 2025-06-24T21:11:20.072323Z node 72 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks [GOOD] Test command err: 2025-06-24T21:11:08.160911Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:08.161317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:08.161535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036be/r3tmp/tmpTsx5Q4/pdisk_1.dat 2025-06-24T21:11:08.465141Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:08.468592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:08.521350Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:08.530801Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799465934470 != 1750799465934474 2025-06-24T21:11:08.578021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:08.578153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:08.589800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:08.679098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:08.734414Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:647:2545] 2025-06-24T21:11:08.734746Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:08.784455Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:08.784771Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:08.786749Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:08.786858Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:08.786928Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:08.787369Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:08.787676Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:08.787761Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:676:2545] in generation 1 2025-06-24T21:11:08.789624Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:650:2547] 2025-06-24T21:11:08.789876Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:08.800857Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:08.801331Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:652:2549] 2025-06-24T21:11:08.801541Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:08.810047Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:08.811670Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:11:08.811756Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:11:08.811808Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:11:08.812145Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:08.812498Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:08.812560Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:697:2547] in generation 1 2025-06-24T21:11:08.813176Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:08.813264Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:08.814732Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-24T21:11:08.814797Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-24T21:11:08.814853Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-24T21:11:08.815236Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:08.815358Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:08.815436Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:698:2549] in generation 1 2025-06-24T21:11:08.826595Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:08.884711Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:08.884997Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:08.885153Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:702:2576] 2025-06-24T21:11:08.885214Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:08.885257Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:08.885298Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:08.885478Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:08.885542Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:11:08.885635Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:08.885700Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:703:2577] 2025-06-24T21:11:08.885756Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:11:08.885784Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:11:08.885811Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:11:08.886247Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:08.886290Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-24T21:11:08.886359Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:08.886437Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:704:2578] 2025-06-24T21:11:08.886471Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-24T21:11:08.886496Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-24T21:11:08.886521Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:11:08.886884Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:08.887049Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:08.887113Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:11:08.887204Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:11:08.887349Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:08.887410Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:08.887477Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:08.887536Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:08.887590Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:08.887619Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:08.887646Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:11:08.887690Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:08.887843Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:636:2540], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-24T21:11:08.887906Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:637:2541], serverId# [1:670:2559], sessionId# [0:0:0] 2025-06-24T21:11:08.887936Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-24T21:11:08.888005Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-24T21:11:08.888203Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cp ... mplete at 72075186224037888 2025-06-24T21:11:18.928225Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:18.928274Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:18.928319Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:18.928375Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:372:2366], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:18.928414Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:18.928489Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:18.929607Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:18.931429Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:18.931552Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:18.931604Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:18.962939Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.963022Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:703:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.963090Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.967076Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:18.972905Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:19.019099Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:19.141813Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:19.144357Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:706:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:19.179228Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:777:2622] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:19.262316Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwfw5h1tqxz2t7bnmejf21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Y2E2Y2ZiNjMtYzUyOWQ4MWYtNWM2ZTQ4MTUtODZhYWQ0M2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:19.265136Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:808:2639], serverId# [3:809:2640], sessionId# [0:0:0] 2025-06-24T21:11:19.265563Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:11:19.265733Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-06-24T21:11:19.276735Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:19.544441Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwfwgqddpfnw9dpn3h386b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NkNGYyYzAtZjYzODc3YjItZDQ4MzI3MTAtM2YzYjI3ZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:19.547590Z node 3 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715661, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] { items { uint64_value: 0 } } 2025-06-24T21:11:19.573968Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:848:2671], serverId# [3:849:2672], sessionId# [0:0:0] 2025-06-24T21:11:19.574954Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-24T21:11:19.586384Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-24T21:11:19.586474Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:19.586579Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-24T21:11:19.587392Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-24T21:11:19.587477Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:19.587782Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:19.587837Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037888 2025-06-24T21:11:19.587931Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:848:2671], serverId# [3:849:2672], sessionId# [0:0:0] 2025-06-24T21:11:19.588094Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:19.588146Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:19.588199Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:19.588275Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:19.658440Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwfws58rfsx6h14hf8cv50, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NkNGYyYzAtZjYzODc3YjItZDQ4MzI3MTAtM2YzYjI3ZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:19.661554Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-24T21:11:19.661763Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=6; 2025-06-24T21:11:19.673150Z node 3 :TX_DATASHARD INFO: datashard_write_operation.cpp:707: Write transaction 6 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-24T21:11:19.673445Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-24T21:11:19.673683Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-24T21:11:19.673780Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:19.674206Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [3:870:2645], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:815:2645]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:870:2645].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T21:11:19.674919Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:863:2645], SessionActorId: [3:815:2645], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:815:2645]. isRollback=0 2025-06-24T21:11:19.675471Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=N2NkNGYyYzAtZjYzODc3YjItZDQ4MzI3MTAtM2YzYjI3ZDc=, ActorId: [3:815:2645], ActorState: ExecuteState, TraceId: 01jyhwfws58rfsx6h14hf8cv50, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:864:2645] from: [3:863:2645] 2025-06-24T21:11:19.675803Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:7] at 72075186224037888 2025-06-24T21:11:19.675866Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:7] at 72075186224037888 2025-06-24T21:11:19.676092Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:19.676265Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:864:2645] TxId: 281474976715662. Ctx: { TraceId: 01jyhwfws58rfsx6h14hf8cv50, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NkNGYyYzAtZjYzODc3YjItZDQ4MzI3MTAtM2YzYjI3ZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T21:11:19.676620Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=N2NkNGYyYzAtZjYzODc3YjItZDQ4MzI3MTAtM2YzYjI3ZDc=, ActorId: [3:815:2645], ActorState: ExecuteState, TraceId: 01jyhwfws58rfsx6h14hf8cv50, Create QueryResponse for error on request, msg: |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestAllocateAllByPieces >> TTxLocatorTest::TestImposibleSize >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx >> TTxLocatorTest::TestWithReboot >> TTxLocatorTest::TestAllocateAll ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit [GOOD] Test command err: 2025-06-24T21:11:06.668841Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.669145Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.669264Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003710/r3tmp/tmpd8u9dA/pdisk_1.dat 2025-06-24T21:11:07.013355Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.021571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.057333Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.065746Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323104 != 1750799464323108 2025-06-24T21:11:07.110165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.110250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.121415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.214273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.267867Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:647:2545] 2025-06-24T21:11:07.268165Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.308408Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.308649Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.310428Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.310505Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.310584Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.311029Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.311294Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.311367Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:676:2545] in generation 1 2025-06-24T21:11:07.312971Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:650:2547] 2025-06-24T21:11:07.313196Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.322205Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.322483Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:652:2549] 2025-06-24T21:11:07.322618Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.328783Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.329910Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:11:07.329983Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:11:07.330024Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:11:07.330241Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.330489Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.330525Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:697:2547] in generation 1 2025-06-24T21:11:07.330984Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.331044Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.332055Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-24T21:11:07.332104Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-24T21:11:07.332141Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-24T21:11:07.332392Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.332457Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.332512Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:698:2549] in generation 1 2025-06-24T21:11:07.343434Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.387932Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.388210Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.388366Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:702:2576] 2025-06-24T21:11:07.388425Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.388466Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.388505Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.388623Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.388666Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:11:07.388760Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.388828Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:703:2577] 2025-06-24T21:11:07.388913Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:11:07.388945Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:11:07.388972Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:11:07.389401Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.389441Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-24T21:11:07.389505Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.389583Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:704:2578] 2025-06-24T21:11:07.389612Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-24T21:11:07.389638Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-24T21:11:07.389663Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:11:07.390027Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.390208Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.390262Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:11:07.390326Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:11:07.390466Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.390507Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.390567Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.390621Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.390693Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:07.390734Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.390765Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:11:07.390809Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:07.390954Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:636:2540], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-24T21:11:07.391014Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:637:2541], serverId# [1:670:2559], sessionId# [0:0:0] 2025-06-24T21:11:07.391046Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-24T21:11:07.391102Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-24T21:11:07.391279Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cp ... node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 7, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T21:11:19.903696Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 7, finished edge# 0, front# 0 2025-06-24T21:11:19.905470Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 8, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T21:11:19.905514Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 8, finished edge# 0, front# 0 2025-06-24T21:11:19.906571Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T21:11:19.906620Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 1001, finished edge# 0, front# 0 2025-06-24T21:11:19.907172Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:256: 72075186224037889 snapshot complete for split OpId 281474976715663 2025-06-24T21:11:19.907432Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 3 snapshot size is 12 total snapshot size is 12 for split OpId 281474976715663 2025-06-24T21:11:19.907538Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 4 snapshot size is 12 total snapshot size is 24 for split OpId 281474976715663 2025-06-24T21:11:19.907584Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 7 snapshot size is 12 total snapshot size is 36 for split OpId 281474976715663 2025-06-24T21:11:19.907619Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 8 snapshot size is 12 total snapshot size is 48 for split OpId 281474976715663 2025-06-24T21:11:19.907925Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 1001 snapshot size is 146 total snapshot size is 194 for split OpId 281474976715663 2025-06-24T21:11:19.908175Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 3 snapshot size is 12 total snapshot size is 206 for split OpId 281474976715663 2025-06-24T21:11:19.908215Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 4 snapshot size is 12 total snapshot size is 218 for split OpId 281474976715663 2025-06-24T21:11:19.908250Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 7 snapshot size is 12 total snapshot size is 230 for split OpId 281474976715663 2025-06-24T21:11:19.908283Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 8 snapshot size is 12 total snapshot size is 242 for split OpId 281474976715663 2025-06-24T21:11:19.908449Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 1001 snapshot size is 155 total snapshot size is 397 for split OpId 281474976715663 2025-06-24T21:11:19.909207Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:424: 72075186224037889 Sending snapshots from src for split OpId 281474976715663 2025-06-24T21:11:19.909429Z node 3 :TX_DATASHARD DEBUG: datashard_impl.h:2342: Sending snapshot for split opId 281474976715663 from datashard 72075186224037889 to datashard 72075186224037892 size 221 2025-06-24T21:11:19.909558Z node 3 :TX_DATASHARD DEBUG: datashard_impl.h:2342: Sending snapshot for split opId 281474976715663 from datashard 72075186224037889 to datashard 72075186224037891 size 215 2025-06-24T21:11:19.909932Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [3:1152:2863], serverId# [3:1153:2864], sessionId# [0:0:0] 2025-06-24T21:11:19.909984Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037892, clientId# [3:1151:2862], serverId# [3:1154:2865], sessionId# [0:0:0] 2025-06-24T21:11:19.910230Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:175: 72075186224037891 Received snapshot for split/merge TxId 281474976715663 from tabeltId 72075186224037889 2025-06-24T21:11:19.911099Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:175: 72075186224037892 Received snapshot for split/merge TxId 281474976715663 from tabeltId 72075186224037889 2025-06-24T21:11:19.913181Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037891 ack snapshot OpId 281474976715663 2025-06-24T21:11:19.913361Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037891 2025-06-24T21:11:19.913496Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037891 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:19.913608Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-24T21:11:19.913684Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [3:1157:2868] 2025-06-24T21:11:19.913733Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-24T21:11:19.913791Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-06-24T21:11:19.913839Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-24T21:11:19.914060Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037891 for split OpId 281474976715663 2025-06-24T21:11:19.915092Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037891 time 2000 2025-06-24T21:11:19.915172Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-24T21:11:19.915248Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-06-24T21:11:19.915285Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:19.915322Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-24T21:11:19.915361Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-24T21:11:19.915606Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [3:1152:2863], serverId# [3:1153:2864], sessionId# [0:0:0] 2025-06-24T21:11:19.915686Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037892 ack snapshot OpId 281474976715663 2025-06-24T21:11:19.915790Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037892 2025-06-24T21:11:19.915873Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037892 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:19.915945Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-24T21:11:19.915998Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037892, actorId: [3:1159:2870] 2025-06-24T21:11:19.916026Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037892 2025-06-24T21:11:19.916084Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037892 2025-06-24T21:11:19.916119Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-24T21:11:19.916365Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037892 for split OpId 281474976715663 2025-06-24T21:11:19.917024Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 2000 2025-06-24T21:11:19.917068Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-24T21:11:19.917152Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037891 coordinator 72057594046316545 last step 1500 next step 2000 2025-06-24T21:11:19.917211Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037891: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-24T21:11:19.917322Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-24T21:11:19.917360Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:19.917414Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-06-24T21:11:19.917462Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-24T21:11:19.917740Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [3:1151:2862], serverId# [3:1154:2865], sessionId# [0:0:0] 2025-06-24T21:11:19.918065Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 1500 next step 2000 2025-06-24T21:11:19.918134Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-24T21:11:19.939829Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715663 2025-06-24T21:11:19.944560Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715663, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2025-06-24T21:11:19.946696Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-06-24T21:11:19.946786Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-06-24T21:11:19.947126Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:19.947208Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:21: Progress tx at non-ready tablet 72075186224037889 state 5 2025-06-24T21:11:19.947474Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1043:2783], serverId# [3:1044:2784], sessionId# [0:0:0] 2025-06-24T21:11:19.947706Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715663 2025-06-24T21:11:19.947785Z node 3 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:11:19.947842Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 >> TTxLocatorTest::TestAllocateAll [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAndTake-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 7862, MsgBus: 5408 2025-06-24T21:11:15.348834Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626487764150868:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:15.348944Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049b2/r3tmp/tmpws8qjT/pdisk_1.dat 2025-06-24T21:11:15.673546Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:15.673843Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626487764150850:2079] 1750799475347803 != 1750799475347806 TServer::EnableGrpc on GrpcPort 7862, node 1 2025-06-24T21:11:15.726935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:15.727108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:15.727538Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:15.727556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:15.727563Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:15.727702Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:15.729264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5408 TClient is connected to server localhost:5408 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:16.245587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:16.266265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:16.356572Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:16.386571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:16.516952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:16.580566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:17.899428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626496354087094:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:17.899523Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.122683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.146262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.167960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.191351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.217005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.245289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.312815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.375699Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626500649055056:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.375795Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.376041Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626500649055061:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.379288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:18.387209Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626500649055063:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:18.445415Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626500649055114:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:19.369182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) /Root/SecondaryKeys/Index/indexImplTable 2 16 /Root/SecondaryKeys 1 8 >> TTxLocatorTest::TestAllocateAllByPieces [GOOD] >> TTxLocatorTest::TestImposibleSize [GOOD] >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx [GOOD] >> DistributedEraseTests::ConditionalEraseRowsCheckLimits [GOOD] >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestAllocateAllByPieces [GOOD] Test command err: 2025-06-24T21:11:21.695893Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T21:11:21.700153Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T21:11:21.702303Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T21:11:21.718675Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.722516Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T21:11:21.737343Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.737547Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.737658Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T21:11:21.737830Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.737965Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.738282Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T21:11:21.738397Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-24T21:11:21.740147Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#8796093022207 2025-06-24T21:11:21.742702Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.742801Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.742902Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 8796093022207 2025-06-24T21:11:21.742946Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult from# 0 to# 8796093022207 expected SUCCESS 2025-06-24T21:11:21.746897Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:77:2110] requested range size#8796093022207 2025-06-24T21:11:21.747520Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.747607Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.747708Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8796093022207 Reserved to# 17592186044414 2025-06-24T21:11:21.747761Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:77:2110] TEvAllocateResult from# 8796093022207 to# 17592186044414 expected SUCCESS 2025-06-24T21:11:21.748167Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:81:2114] requested range size#8796093022207 2025-06-24T21:11:21.748716Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748836Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748932Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 17592186044414 Reserved to# 26388279066621 2025-06-24T21:11:21.748980Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:81:2114] TEvAllocateResult from# 17592186044414 to# 26388279066621 expected SUCCESS 2025-06-24T21:11:21.749372Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:85:2118] requested range size#8796093022207 2025-06-24T21:11:21.749878Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.750009Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.750096Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 26388279066621 Reserved to# 35184372088828 2025-06-24T21:11:21.750170Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:85:2118] TEvAllocateResult from# 26388279066621 to# 35184372088828 expected SUCCESS 2025-06-24T21:11:21.750715Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:89:2122] requested range size#8796093022207 2025-06-24T21:11:21.751083Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.751189Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.751438Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 35184372088828 Reserved to# 43980465111035 2025-06-24T21:11:21.751699Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:89:2122] TEvAllocateResult from# 35184372088828 to# 43980465111035 expected SUCCESS 2025-06-24T21:11:21.752191Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:93:2126] requested range size#8796093022207 2025-06-24T21:11:21.752532Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.752598Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.752681Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 43980465111035 Reserved to# 52776558133242 2025-06-24T21:11:21.752720Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:93:2126] TEvAllocateResult from# 43980465111035 to# 52776558133242 expected SUCCESS 2025-06-24T21:11:21.753179Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:97:2130] requested range size#8796093022207 2025-06-24T21:11:21.753545Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.753598Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.753710Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 52776558133242 Reserved to# 61572651155449 2025-06-24T21:11:21.753756Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:97:2130] TEvAllocateResult from# 52776558133242 to# 61572651155449 expected SUCCESS 2025-06-24T21:11:21.754227Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:101:2134] requested range size#8796093022207 2025-06-24T21:11:21.754583Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.754648Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.754731Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 61572651155449 Reserved to# 70368744177656 2025-06-24T21:11:21.754791Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:101:2134] TEvAllocateResult from# 61572651155449 to# 70368744177656 expected SUCCESS 2025-06-24T21:11:21.755310Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:105:2138] requested range size#8796093022207 2025-06-24T21:11:21.755625Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.755714Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.755809Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 70368744177656 Reserved to# 79164837199863 2025-06-24T21:11:21.755859Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:105:2138] TEvAllocateResult from# 70368744177656 to# 79164837199863 expected SUCCESS 2025-06-24T21:11:21.756340Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:109:2142] requested range size#8796093022207 2025-06-24T21:11:21.756867Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.756942Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.757023Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Suc ... node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:157:2190] TEvAllocateResult from# 184717953466347 to# 193514046488554 expected SUCCESS 2025-06-24T21:11:21.773529Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:161:2194] requested range size#8796093022207 2025-06-24T21:11:21.773854Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:25:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.774019Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:25:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.774131Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 193514046488554 Reserved to# 202310139510761 2025-06-24T21:11:21.774171Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:161:2194] TEvAllocateResult from# 193514046488554 to# 202310139510761 expected SUCCESS 2025-06-24T21:11:21.774880Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:165:2198] requested range size#8796093022207 2025-06-24T21:11:21.775232Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:26:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.775306Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:26:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.775395Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 202310139510761 Reserved to# 211106232532968 2025-06-24T21:11:21.775458Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:165:2198] TEvAllocateResult from# 202310139510761 to# 211106232532968 expected SUCCESS 2025-06-24T21:11:21.776139Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:169:2202] requested range size#8796093022207 2025-06-24T21:11:21.776485Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:27:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.776541Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:27:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.776627Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 211106232532968 Reserved to# 219902325555175 2025-06-24T21:11:21.776664Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:169:2202] TEvAllocateResult from# 211106232532968 to# 219902325555175 expected SUCCESS 2025-06-24T21:11:21.777398Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:173:2206] requested range size#8796093022207 2025-06-24T21:11:21.777766Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:28:1:24576:75:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.777824Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:28:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.778005Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 219902325555175 Reserved to# 228698418577382 2025-06-24T21:11:21.778044Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:173:2206] TEvAllocateResult from# 219902325555175 to# 228698418577382 expected SUCCESS 2025-06-24T21:11:21.778807Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:177:2210] requested range size#8796093022207 2025-06-24T21:11:21.779230Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:29:1:24576:73:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.779294Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:29:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.779394Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 228698418577382 Reserved to# 237494511599589 2025-06-24T21:11:21.779435Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:177:2210] TEvAllocateResult from# 228698418577382 to# 237494511599589 expected SUCCESS 2025-06-24T21:11:21.780260Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:181:2214] requested range size#8796093022207 2025-06-24T21:11:21.780640Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:30:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.780700Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:30:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.780799Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 237494511599589 Reserved to# 246290604621796 2025-06-24T21:11:21.780877Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:181:2214] TEvAllocateResult from# 237494511599589 to# 246290604621796 expected SUCCESS 2025-06-24T21:11:21.781631Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:185:2218] requested range size#8796093022207 2025-06-24T21:11:21.781947Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:31:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.782038Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:31:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.782123Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 246290604621796 Reserved to# 255086697644003 2025-06-24T21:11:21.782201Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:185:2218] TEvAllocateResult from# 246290604621796 to# 255086697644003 expected SUCCESS 2025-06-24T21:11:21.783025Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:189:2222] requested range size#8796093022207 2025-06-24T21:11:21.783620Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:32:1:24576:75:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.783672Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:32:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.783760Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 255086697644003 Reserved to# 263882790666210 2025-06-24T21:11:21.783794Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:189:2222] TEvAllocateResult from# 255086697644003 to# 263882790666210 expected SUCCESS 2025-06-24T21:11:21.784466Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:193:2226] requested range size#8796093022207 2025-06-24T21:11:21.784786Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:33:1:24576:77:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.784879Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:33:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.784962Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 263882790666210 Reserved to# 272678883688417 2025-06-24T21:11:21.784993Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:193:2226] TEvAllocateResult from# 263882790666210 to# 272678883688417 expected SUCCESS 2025-06-24T21:11:21.785560Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:197:2230] requested range size#8796093022207 2025-06-24T21:11:21.785826Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:34:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.785904Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:34:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.785973Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 272678883688417 Reserved to# 281474976710624 2025-06-24T21:11:21.786009Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:197:2230] TEvAllocateResult from# 272678883688417 to# 281474976710624 expected SUCCESS 2025-06-24T21:11:21.786634Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:201:2234] requested range size#31 2025-06-24T21:11:21.786893Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:35:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.786944Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:35:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.786999Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 281474976710624 Reserved to# 281474976710655 2025-06-24T21:11:21.787028Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:201:2234] TEvAllocateResult from# 281474976710624 to# 281474976710655 expected SUCCESS 2025-06-24T21:11:21.787651Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:205:2238] requested range size#1 2025-06-24T21:11:21.787737Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 281474976710655 Reserved to# 0 2025-06-24T21:11:21.787777Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:205:2238] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestImposibleSize [GOOD] Test command err: 2025-06-24T21:11:21.699643Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T21:11:21.700214Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T21:11:21.702303Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T21:11:21.718569Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.722480Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T21:11:21.736309Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.736446Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.736522Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T21:11:21.736634Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.736738Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.736802Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T21:11:21.736882Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-24T21:11:21.740032Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#281474976710656 2025-06-24T21:11:21.741253Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 0 Reserved to# 0 2025-06-24T21:11:21.743626Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE 2025-06-24T21:11:21.743977Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:76:2109] requested range size#123456 2025-06-24T21:11:21.744355Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.744394Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.744461Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 123456 2025-06-24T21:11:21.744487Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:76:2109] TEvAllocateResult from# 0 to# 123456 expected SUCCESS 2025-06-24T21:11:21.744780Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:80:2113] requested range size#281474976587200 2025-06-24T21:11:21.744876Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 123456 Reserved to# 0 2025-06-24T21:11:21.744902Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:80:2113] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE 2025-06-24T21:11:21.745197Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:83:2116] requested range size#246912 2025-06-24T21:11:21.745440Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.745486Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.745551Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 123456 Reserved to# 370368 2025-06-24T21:11:21.745581Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:83:2116] TEvAllocateResult from# 123456 to# 370368 expected SUCCESS 2025-06-24T21:11:21.745886Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:87:2120] requested range size#281474976340288 2025-06-24T21:11:21.745970Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 370368 Reserved to# 0 2025-06-24T21:11:21.745995Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:87:2120] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE |94.9%| [TA] $(B)/ydb/core/kqp/ut/cost/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestAllocateAll [GOOD] Test command err: 2025-06-24T21:11:21.698650Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T21:11:21.700182Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T21:11:21.702301Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T21:11:21.718454Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.722485Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T21:11:21.735711Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.735840Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.735925Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T21:11:21.736036Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.736162Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.736238Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T21:11:21.736306Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-24T21:11:21.740041Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#281474976710655 2025-06-24T21:11:21.742622Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.742672Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.742740Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 281474976710655 2025-06-24T21:11:21.742782Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult from# 0 to# 281474976710655 expected SUCCESS 2025-06-24T21:11:21.745280Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:77:2110] requested range size#1 2025-06-24T21:11:21.745407Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 281474976710655 Reserved to# 0 2025-06-24T21:11:21.745448Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:77:2110] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard [GOOD] Test command err: 2025-06-24T21:11:06.762371Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.762704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.762822Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036fe/r3tmp/tmprP4Brn/pdisk_1.dat 2025-06-24T21:11:07.015968Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.022117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.064979Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.073663Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323090 != 1750799464323094 2025-06-24T21:11:07.117657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.117739Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.129026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.214177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.263507Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:647:2545] 2025-06-24T21:11:07.263695Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.305945Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.306152Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.307639Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.307705Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.307760Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.308078Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.308279Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.308327Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:676:2545] in generation 1 2025-06-24T21:11:07.309443Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:650:2547] 2025-06-24T21:11:07.309605Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.316790Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.317113Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:652:2549] 2025-06-24T21:11:07.317252Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.323070Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.324174Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:11:07.324237Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:11:07.324282Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:11:07.324524Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.324745Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.324780Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:697:2547] in generation 1 2025-06-24T21:11:07.325186Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.325242Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.326349Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-24T21:11:07.326396Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-24T21:11:07.326437Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-24T21:11:07.326633Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.326720Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.326782Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:698:2549] in generation 1 2025-06-24T21:11:07.337593Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.372959Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.373199Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.373326Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:702:2576] 2025-06-24T21:11:07.373378Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.373417Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.373455Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.373554Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.373591Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:11:07.373674Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.373730Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:703:2577] 2025-06-24T21:11:07.373807Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:11:07.373835Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:11:07.373860Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:11:07.374228Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.374265Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-24T21:11:07.374323Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.374390Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:704:2578] 2025-06-24T21:11:07.374425Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-24T21:11:07.374449Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-24T21:11:07.374471Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:11:07.374825Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.374989Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.375041Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:11:07.375088Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:11:07.375230Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.375270Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.375327Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.375376Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.375428Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:07.375473Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.375503Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:11:07.375545Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:07.375687Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:636:2540], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-24T21:11:07.375741Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:637:2541], serverId# [1:670:2559], sessionId# [0:0:0] 2025-06-24T21:11:07.375772Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-24T21:11:07.375832Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-24T21:11:07.375987Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cp ... d__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T21:11:20.390943Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-24T21:11:20.391008Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037890 2025-06-24T21:11:20.391103Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1049:2788] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037889, status# 1 2025-06-24T21:11:20.391256Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 2025-06-24T21:11:20.391328Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 2025-06-24T21:11:20.391375Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1049:2788] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037890, status# 1 2025-06-24T21:11:20.391426Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-24T21:11:20.391459Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:20.391522Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-06-24T21:11:20.391560Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037890 source 72075186224037890 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-06-24T21:11:20.391602Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1049:2788] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037888, status# 1 2025-06-24T21:11:20.391635Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:901: [DistEraser] [3:1049:2788] Register plan: txId# 281474976715662, minStep# 1545, maxStep# 31545 2025-06-24T21:11:20.426640Z node 3 :TX_DATASHARD INFO: datashard.cpp:190: OnDetach: 72075186224037888 2025-06-24T21:11:20.427558Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-24T21:11:20.429903Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037888 from 72075186224037889 is reset 2025-06-24T21:11:20.429960Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037888 from 72075186224037890 is reset 2025-06-24T21:11:20.430124Z node 3 :TX_DATASHARD ERROR: datashard_distributed_erase.cpp:167: [DistEraser] [3:1049:2788] Reply: txId# 281474976715662, status# SHARD_UNKNOWN, error# Tx state unknown: reason# lost pipe while waiting for reply (plan), txId# 281474976715662, shard# 72075186224037888 2025-06-24T21:11:20.430816Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-06-24T21:11:20.430869Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-06-24T21:11:20.431223Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:20.431279Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:20.431320Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 1 2025-06-24T21:11:20.431376Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:20.431463Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1043:2783], serverId# [3:1044:2784], sessionId# [0:0:0] 2025-06-24T21:11:20.452503Z node 3 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [3:1060:2798] 2025-06-24T21:11:20.452759Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:20.456808Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:20.457782Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:20.460040Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:20.460124Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:20.460188Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:20.460607Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:20.461008Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:20.461084Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [3:1075:2798] in generation 2 2025-06-24T21:11:20.472884Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:20.473012Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037888 2025-06-24T21:11:20.473132Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:20.473422Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [3:1078:2806] 2025-06-24T21:11:20.473470Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:20.473521Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:20.473564Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:20.473802Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-24T21:11:20.473981Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-24T21:11:20.475061Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:20.475183Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:20.475343Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1544 2025-06-24T21:11:20.475395Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:20.475565Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:20.475664Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:20.475709Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:20.475752Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 1 2025-06-24T21:11:20.475802Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:20.475903Z node 3 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-06-24T21:11:20.475950Z node 3 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715661 2025-06-24T21:11:20.475996Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 1 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715661 2025-06-24T21:11:20.476171Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 1500 next step 1544 2025-06-24T21:11:20.476270Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715661 2025-06-24T21:11:20.476354Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1544 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T21:11:20.476413Z node 3 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 1544:281474976715661 at 72075186224037889 2025-06-24T21:11:20.476463Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-24T21:11:20.476543Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037889 {TEvReadSet step# 1544 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-24T21:11:20.476617Z node 3 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-06-24T21:11:20.476645Z node 3 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037890 txId 281474976715661 2025-06-24T21:11:20.476674Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 2 at 72075186224037888 from 72075186224037888 to 72075186224037890 txId 281474976715661 2025-06-24T21:11:20.476870Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715661 2025-06-24T21:11:20.476939Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715661 2025-06-24T21:11:20.476990Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1544 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-24T21:11:20.477025Z node 3 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 1544:281474976715661 at 72075186224037890 2025-06-24T21:11:20.477057Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-24T21:11:20.477091Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037890 {TEvReadSet step# 1544 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-24T21:11:20.477136Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 >> TTxLocatorTest::TestWithReboot [GOOD] |95.0%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/cost/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx [GOOD] Test command err: 2025-06-24T21:11:21.696867Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T21:11:21.700174Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T21:11:21.702282Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T21:11:21.718539Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.722485Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T21:11:21.737472Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.737633Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.737737Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T21:11:21.737907Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.738051Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.738161Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T21:11:21.738268Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-24T21:11:21.740873Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:80:2114] requested range size#100000 2025-06-24T21:11:21.742645Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:82:2116] requested range size#100000 2025-06-24T21:11:21.744431Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:84:2118] requested range size#100000 2025-06-24T21:11:21.744893Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:86:2120] requested range size#100000 2025-06-24T21:11:21.745356Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:88:2122] requested range size#100000 2025-06-24T21:11:21.745829Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.746005Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:90:2124] requested range size#100000 2025-06-24T21:11:21.746245Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.746410Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#100000 2025-06-24T21:11:21.746608Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.746711Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.746932Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.747015Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:74:2108] requested range size#100000 2025-06-24T21:11:21.747244Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.747430Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.747539Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:76:2110] requested range size#100000 2025-06-24T21:11:21.747721Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.747916Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:78:2112] requested range size#100000 2025-06-24T21:11:21.748131Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748290Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 100000 2025-06-24T21:11:21.748339Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:80:2114] TEvAllocateResult from# 0 to# 100000 2025-06-24T21:11:21.748461Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748537Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 100000 Reserved to# 200000 2025-06-24T21:11:21.748580Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:82:2116] TEvAllocateResult from# 100000 to# 200000 2025-06-24T21:11:21.748724Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748827Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748944Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 200000 Reserved to# 300000 2025-06-24T21:11:21.748984Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:84:2118] TEvAllocateResult from# 200000 to# 300000 2025-06-24T21:11:21.749127Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.749237Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 300000 Reserved to# 400000 2025-06-24T21:11:21.749267Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:86:2120] TEvAllocateResult from# 300000 to# 400000 2025-06-24T21:11:21.749333Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 400000 Reserved to# 500000 2025-06-24T21:11:21.749380Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:88:2122] TEvAllocateResult from# 400000 to# 500000 2025-06-24T21:11:21.749488Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.749554Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.749599Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.749647Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 500000 Reserved to# 600000 2025-06-24T21:11:21.749672Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:90:2124] TEvAllocateResult from# 500000 to# 600000 2025-06-24T21:11:21.749819Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.749872Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 600000 Reserved to# 700000 2025-06-24T21:11:21.749897Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult from# 600000 to# 700000 2025-06-24T21:11:21.750031Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.750102Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 700000 Reserved to# 800000 2025-06-24T21:11:21.750126Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:74:2108] TEvAllocateResult from# 700000 to# 800000 2025-06-24T21:11:21.750253Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.750295Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 800000 Reserved to# 900000 2025-06-24T21:11:21.750320Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:76:2110] TEvAllocateResult from# 800000 to# 900000 2025-06-24T21:11:21.750468Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.750570Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 900000 Reserved to# 1000000 2025-06-24T21:11:21.750626Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:78:2112] TEvAllocateResult from# 900000 to# 1000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-24T21:11:21.755682Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 720575 ... Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.829163Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8600000 Reserved to# 8700000 2025-06-24T21:11:21.829189Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:405:2438] TEvAllocateResult from# 8600000 to# 8700000 2025-06-24T21:11:21.829309Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8700000 Reserved to# 8800000 2025-06-24T21:11:21.829342Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:407:2440] TEvAllocateResult from# 8700000 to# 8800000 2025-06-24T21:11:21.829479Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:92:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.829526Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8800000 Reserved to# 8900000 2025-06-24T21:11:21.829550Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:409:2442] TEvAllocateResult from# 8800000 to# 8900000 2025-06-24T21:11:21.829654Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:92:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.829769Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8900000 Reserved to# 9000000 2025-06-24T21:11:21.829807Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:411:2444] TEvAllocateResult from# 8900000 to# 9000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-24T21:11:21.834282Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:439:2472] requested range size#100000 2025-06-24T21:11:21.834691Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:441:2474] requested range size#100000 2025-06-24T21:11:21.835226Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:443:2476] requested range size#100000 2025-06-24T21:11:21.835541Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:93:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.835730Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:445:2478] requested range size#100000 2025-06-24T21:11:21.835880Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:93:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.836080Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:94:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.836248Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:447:2480] requested range size#100000 2025-06-24T21:11:21.836470Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:94:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.836793Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:449:2482] requested range size#100000 2025-06-24T21:11:21.837134Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:95:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.837295Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:433:2466] requested range size#100000 2025-06-24T21:11:21.837443Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:95:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.837610Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:96:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.837725Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:451:2484] requested range size#100000 2025-06-24T21:11:21.837937Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:96:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.838127Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:97:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.838370Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:97:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.838511Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:435:2468] requested range size#100000 2025-06-24T21:11:21.838670Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:98:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.838837Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:437:2470] requested range size#100000 2025-06-24T21:11:21.838997Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:98:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.839192Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9000000 Reserved to# 9100000 2025-06-24T21:11:21.839246Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:439:2472] TEvAllocateResult from# 9000000 to# 9100000 2025-06-24T21:11:21.839350Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:99:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.839486Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9100000 Reserved to# 9200000 2025-06-24T21:11:21.839516Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:441:2474] TEvAllocateResult from# 9100000 to# 9200000 2025-06-24T21:11:21.839585Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:99:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.839742Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9200000 Reserved to# 9300000 2025-06-24T21:11:21.839784Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:443:2476] TEvAllocateResult from# 9200000 to# 9300000 2025-06-24T21:11:21.839844Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:100:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.840028Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9300000 Reserved to# 9400000 2025-06-24T21:11:21.840066Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:445:2478] TEvAllocateResult from# 9300000 to# 9400000 2025-06-24T21:11:21.840127Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:100:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.840291Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9400000 Reserved to# 9500000 2025-06-24T21:11:21.840319Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:447:2480] TEvAllocateResult from# 9400000 to# 9500000 2025-06-24T21:11:21.840431Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9500000 Reserved to# 9600000 2025-06-24T21:11:21.840454Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:449:2482] TEvAllocateResult from# 9500000 to# 9600000 2025-06-24T21:11:21.840508Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:101:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.840626Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9600000 Reserved to# 9700000 2025-06-24T21:11:21.840669Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:433:2466] TEvAllocateResult from# 9600000 to# 9700000 2025-06-24T21:11:21.840733Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:101:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.840778Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:102:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.840909Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9700000 Reserved to# 9800000 2025-06-24T21:11:21.840939Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:451:2484] TEvAllocateResult from# 9700000 to# 9800000 2025-06-24T21:11:21.840993Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:102:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.841109Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9800000 Reserved to# 9900000 2025-06-24T21:11:21.841144Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:435:2468] TEvAllocateResult from# 9800000 to# 9900000 2025-06-24T21:11:21.841248Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9900000 Reserved to# 10000000 2025-06-24T21:11:21.841290Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:437:2470] TEvAllocateResult from# 9900000 to# 10000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestWithReboot [GOOD] Test command err: 2025-06-24T21:11:21.698420Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T21:11:21.700160Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T21:11:21.702272Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T21:11:21.718419Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.722485Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T21:11:21.737283Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.737473Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.737580Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T21:11:21.737725Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.737868Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.737960Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T21:11:21.738052Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-24T21:11:21.740843Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:80:2114] requested range size#100000 2025-06-24T21:11:21.742625Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:82:2116] requested range size#100000 2025-06-24T21:11:21.744382Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:84:2118] requested range size#100000 2025-06-24T21:11:21.744817Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:86:2120] requested range size#100000 2025-06-24T21:11:21.745268Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:88:2122] requested range size#100000 2025-06-24T21:11:21.745632Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.745801Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:90:2124] requested range size#100000 2025-06-24T21:11:21.746012Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.746158Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#100000 2025-06-24T21:11:21.746330Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.746417Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.746597Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.746669Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:74:2108] requested range size#100000 2025-06-24T21:11:21.746866Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.747036Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.747132Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:76:2110] requested range size#100000 2025-06-24T21:11:21.747298Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.747447Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:78:2112] requested range size#100000 2025-06-24T21:11:21.747654Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.747799Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 100000 2025-06-24T21:11:21.747841Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:80:2114] TEvAllocateResult from# 0 to# 100000 2025-06-24T21:11:21.747979Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748040Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 100000 Reserved to# 200000 2025-06-24T21:11:21.748064Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:82:2116] TEvAllocateResult from# 100000 to# 200000 2025-06-24T21:11:21.748210Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748294Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748386Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 200000 Reserved to# 300000 2025-06-24T21:11:21.748429Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:84:2118] TEvAllocateResult from# 200000 to# 300000 2025-06-24T21:11:21.748559Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748641Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 300000 Reserved to# 400000 2025-06-24T21:11:21.748665Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:86:2120] TEvAllocateResult from# 300000 to# 400000 2025-06-24T21:11:21.748716Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 400000 Reserved to# 500000 2025-06-24T21:11:21.748753Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:88:2122] TEvAllocateResult from# 400000 to# 500000 2025-06-24T21:11:21.748868Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748925Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.748966Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.749009Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 500000 Reserved to# 600000 2025-06-24T21:11:21.749030Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:90:2124] TEvAllocateResult from# 500000 to# 600000 2025-06-24T21:11:21.749159Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.749205Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 600000 Reserved to# 700000 2025-06-24T21:11:21.749227Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult from# 600000 to# 700000 2025-06-24T21:11:21.749312Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.749379Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 700000 Reserved to# 800000 2025-06-24T21:11:21.749404Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:74:2108] TEvAllocateResult from# 700000 to# 800000 2025-06-24T21:11:21.749525Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.749562Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 800000 Reserved to# 900000 2025-06-24T21:11:21.749587Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:76:2110] TEvAllocateResult from# 800000 to# 900000 2025-06-24T21:11:21.749719Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:21.749806Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 900000 Reserved to# 1000000 2025-06-24T21:11:21.749863Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:78:2112] TEvAllocateResult from# 900000 to# 1000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-24T21:11:21.757796Z node 1 :TABLET_MAIN NOTICE: tablet_sys.cpp:1849: Tablet: 7205759404 ... 1:8:1:24576:75:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.085005Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9400000 Reserved to# 9500000 2025-06-24T21:11:22.085030Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:627:2555] TEvAllocateResult from# 9400000 to# 9500000 2025-06-24T21:11:22.085126Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:8:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.085198Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9500000 Reserved to# 9600000 2025-06-24T21:11:22.085240Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:629:2557] TEvAllocateResult from# 9500000 to# 9600000 2025-06-24T21:11:22.085361Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:9:1:24576:78:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.085417Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9600000 Reserved to# 9700000 2025-06-24T21:11:22.085442Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:631:2559] TEvAllocateResult from# 9600000 to# 9700000 2025-06-24T21:11:22.085533Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.085648Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:10:1:24576:78:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.085714Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9700000 Reserved to# 9800000 2025-06-24T21:11:22.085770Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:633:2561] TEvAllocateResult from# 9700000 to# 9800000 2025-06-24T21:11:22.085869Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.085961Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9800000 Reserved to# 9900000 2025-06-24T21:11:22.085986Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:635:2563] TEvAllocateResult from# 9800000 to# 9900000 2025-06-24T21:11:22.086058Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:11:1:24576:72:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.086167Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.086236Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9900000 Reserved to# 10000000 2025-06-24T21:11:22.086267Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:637:2565] TEvAllocateResult from# 9900000 to# 10000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-24T21:11:22.090775Z node 1 :TABLET_MAIN NOTICE: tablet_sys.cpp:1849: Tablet: 72057594046447617 Type: TxAllocator, EReason: ReasonPill, SuggestedGeneration: 0, KnownGeneration: 11 Marker# TSYS31 2025-06-24T21:11:22.092192Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:811: Tablet: 72057594046447617 HandleStateStorageInfoResolve, KnownGeneration: 11 Promote Marker# TSYS16 2025-06-24T21:11:22.092898Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:421: TabletId# 72057594046447617 TTabletReqRebuildHistoryGraph::ProcessKeyEntry, LastBlobID: [72057594046447617:11:11:0:0:71:0] Snap: 11:1 for 72057594046447617 Marker# TRRH04 2025-06-24T21:11:22.092997Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:11:0:0:71:0], refs: [[72057594046447617:11:11:1:24576:72:0],] for 72057594046447617 2025-06-24T21:11:22.093182Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:1:0:0:42:0], refs: [[72057594046447617:11:1:1:28672:1483:0],] for 72057594046447617 2025-06-24T21:11:22.093226Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:2:0:0:69:0], refs: [[72057594046447617:11:2:1:24576:76:0],] for 72057594046447617 2025-06-24T21:11:22.093271Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:3:0:0:71:0], refs: [[72057594046447617:11:3:1:24576:78:0],] for 72057594046447617 2025-06-24T21:11:22.093310Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:4:0:0:71:0], refs: [[72057594046447617:11:4:1:24576:75:0],] for 72057594046447617 2025-06-24T21:11:22.093344Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:5:0:0:71:0], refs: [[72057594046447617:11:5:1:24576:78:0],] for 72057594046447617 2025-06-24T21:11:22.093432Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:6:0:0:71:0], refs: [[72057594046447617:11:6:1:24576:78:0],] for 72057594046447617 2025-06-24T21:11:22.093467Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:7:0:0:71:0], refs: [[72057594046447617:11:7:1:24576:78:0],] for 72057594046447617 2025-06-24T21:11:22.093507Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:8:0:0:71:0], refs: [[72057594046447617:11:8:1:24576:75:0],] for 72057594046447617 2025-06-24T21:11:22.093545Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:9:0:0:71:0], refs: [[72057594046447617:11:9:1:24576:78:0],] for 72057594046447617 2025-06-24T21:11:22.093612Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:10:0:0:71:0], refs: [[72057594046447617:11:10:1:24576:78:0],] for 72057594046447617 2025-06-24T21:11:22.093745Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:625: TabletId# 72057594046447617 TTabletReqRebuildHistoryGraph::BuildHistory - Process generation 11 from 1 with 11 steps Marker# TRRH09 2025-06-24T21:11:22.093781Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:1:1:28672:1483:0],] for 72057594046447617 2025-06-24T21:11:22.093825Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:2:1:24576:76:0],] for 72057594046447617 2025-06-24T21:11:22.093857Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:3:1:24576:78:0],] for 72057594046447617 2025-06-24T21:11:22.093880Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:4:1:24576:75:0],] for 72057594046447617 2025-06-24T21:11:22.093916Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:5:1:24576:78:0],] for 72057594046447617 2025-06-24T21:11:22.093967Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:6:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:6:1:24576:78:0],] 2025-06-24T21:11:22.094001Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:7:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:7:1:24576:78:0],] 2025-06-24T21:11:22.094029Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:8:1:24576:75:0],] for 72057594046447617, Gc+: [[72057594046447617:11:8:1:24576:75:0],] 2025-06-24T21:11:22.094061Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:9:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:9:1:24576:78:0],] 2025-06-24T21:11:22.094089Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:10:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:10:1:24576:78:0],] 2025-06-24T21:11:22.094116Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:11:1:24576:72:0],] for 72057594046447617, Gc+: [[72057594046447617:11:11:1:24576:72:0],] 2025-06-24T21:11:22.094408Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:12:0:0:0:0:0] Marker# TSYS01 2025-06-24T21:11:22.095798Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.099385Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T21:11:22.099607Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T21:11:22.100408Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 12, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-24T21:11:22.100477Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:1:1:28672:1639:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.100566Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:11:22.100651Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 12:0 Marker# TSYS28 >> KqpExtractPredicateLookup::PointJoin [GOOD] >> KqpExtractPredicateLookup::SqlInJoin >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeAll [GOOD] >> TBalanceCoverageBuilderTest::TestComplexSplitWithDuplicates [GOOD] >> TBalanceCoverageBuilderTest::TestZeroTracks [GOOD] |95.0%| [TA] $(B)/ydb/core/tx/tx_allocator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.0%| [TA] {RESULT} $(B)/ydb/core/tx/tx_allocator/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeOne [GOOD] >> TBalanceCoverageBuilderTest::TestEmpty [GOOD] >> TBalanceCoverageBuilderTest::TestSplitWithMergeBack [GOOD] >> TBalanceCoverageBuilderTest::TestSimpleSplit [GOOD] >> TBalanceCoverageBuilderTest::TestComplexSplit [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-6 |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeAll [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestComplexSplitWithDuplicates [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestZeroTracks [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeOne [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSplitWithMergeBack [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestComplexSplit [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSimpleSplit [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestEmpty [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-13 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-14 |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-7 |95.0%| [TA] $(B)/ydb/core/tx/balance_coverage/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.0%| [TA] {RESULT} $(B)/ydb/core/tx/balance_coverage/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-29 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-30 >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataSplitThenPublish >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflow >> TPQCDTest::TestUnavailableWithoutBoth [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataPublishThenSplit >> TTxDataShardUploadRows::TestUploadShadowRows >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-41 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-42 >> TPQCDTest::TestUnavailableWithoutClustersList [GOOD] >> TTxDataShardUploadRows::TestUploadRows >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-35 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-36 >> TPQCDTest::TestRelatedServicesAreRunning [GOOD] >> KqpExtractPredicateLookup::ComplexRange [GOOD] >> TPQCDTest::TestPrioritizeLocalDatacenter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutClustersList [GOOD] Test command err: 2025-06-24T21:11:20.092797Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626512227830413:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:20.092872Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0023f6/r3tmp/tmpzyAaw3/pdisk_1.dat 2025-06-24T21:11:20.454232Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:20.455323Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626512227830379:2079] 1750799480088411 != 1750799480088414 TServer::EnableGrpc on GrpcPort 20339, node 1 2025-06-24T21:11:20.509600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:20.509766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:20.511355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:20.663232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0023f6/r3tmp/yandexNwdU4s.tmp 2025-06-24T21:11:20.663271Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0023f6/r3tmp/yandexNwdU4s.tmp 2025-06-24T21:11:20.664313Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0023f6/r3tmp/yandexNwdU4s.tmp 2025-06-24T21:11:20.664456Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:21.104055Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:22.580676Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626520817765645:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.580693Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626520817765653:2321], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.580774Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.585146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:22.600233Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626520817765659:2322], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710657 completed, doublechecking } 2025-06-24T21:11:22.719405Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626520817765720:2348] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:23.033232Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626520817765737:2328], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:23.033490Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OWIzZTFjZDMtMzA3MmQ3OWItNjc2NWJiMGEtYjVlMjhiNzM=, ActorId: [1:7519626520817765643:2317], ActorState: ExecuteState, TraceId: 01jyhwfzpj0czhdxc83pm7yj31, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:23.057822Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutBoth [GOOD] Test command err: 2025-06-24T21:11:20.122638Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626508698140716:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:20.122873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002403/r3tmp/tmp4d9EyG/pdisk_1.dat 2025-06-24T21:11:20.436059Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:20.438026Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626508698140698:2079] 1750799480121550 != 1750799480121553 2025-06-24T21:11:20.503563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:20.503680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 16167, node 1 2025-06-24T21:11:20.505411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:20.663351Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:20.663379Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:20.663388Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:20.663526Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:21.130769Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:22.520563Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626517288075967:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.520574Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626517288075955:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.520715Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.529211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:22.549492Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626517288075970:2320], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710657 completed, doublechecking } 2025-06-24T21:11:22.668117Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626517288076031:2346] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:23.019384Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626517288076052:2328], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:23.019740Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MTFiZmNmMzYtNWNjYjhkNDctZDAxYTRhZTQtY2FjNTA2NTU=, ActorId: [1:7519626517288075954:2315], ActorState: ExecuteState, TraceId: 01jyhwfzmp526tcghpz54hxpcj, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:23.046896Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:11:24.071618Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626525878010726:2361], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:24.071876Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTNhMDZmNGYtYjJhYzYzMTgtY2UzYTE0NzgtZThiYWVhNTg=, ActorId: [1:7519626525878010719:2357], ActorState: ExecuteState, TraceId: 01jyhwg14h8055j500xpe0evv8, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:24.072343Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestRelatedServicesAreRunning [GOOD] Test command err: 2025-06-24T21:11:20.092985Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626511393022943:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:20.093064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0023ac/r3tmp/tmp0QZUMA/pdisk_1.dat 2025-06-24T21:11:20.448296Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:20.470736Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626511393022908:2079] 1750799480088254 != 1750799480088257 2025-06-24T21:11:20.479217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:20.479331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:20.482992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18269, node 1 2025-06-24T21:11:20.663182Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0023ac/r3tmp/yandex8EvOdq.tmp 2025-06-24T21:11:20.663214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0023ac/r3tmp/yandex8EvOdq.tmp 2025-06-24T21:11:20.664323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0023ac/r3tmp/yandex8EvOdq.tmp 2025-06-24T21:11:20.664433Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2836 2025-06-24T21:11:21.101086Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; PQClient connected to localhost:18269 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:21.209425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:11:22.692960Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626519982958210:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.693080Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.693173Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626519982958219:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.696989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:22.706503Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626519982958224:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:11:22.905082Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626519982958290:2387] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:22.996649Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626519982958298:2301], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:22.998410Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDk4MzNkYTEtMmVkMzdlMGYtYTBlYWM4NzYtZDNhNjExMjU=, ActorId: [1:7519626519982958208:2289], ActorState: ExecuteState, TraceId: 01jyhwfzt3c9v1s4gxcxdxx7c5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:23.000071Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:11:23.025102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:23.123903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:23.199425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:11:23.514200Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhwg0be1gp0wbws65nw2dcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGU0OTY5MzUtYjJjZDQ5OGItNmMyYTBlZjctOWQ5ZDY5OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> DistributedEraseTests::ConditionalEraseRowsShouldNotFailOnMissingRows [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestPrioritizeLocalDatacenter [GOOD] Test command err: 2025-06-24T21:11:20.252749Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626511492815317:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:20.252941Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0022ec/r3tmp/tmp0yNpBQ/pdisk_1.dat 2025-06-24T21:11:20.569699Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:20.570762Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626511492815298:2079] 1750799480251768 != 1750799480251771 TServer::EnableGrpc on GrpcPort 4556, node 1 2025-06-24T21:11:20.644792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:20.644915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:20.646706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:20.663289Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0022ec/r3tmp/yandexklEEBU.tmp 2025-06-24T21:11:20.663333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0022ec/r3tmp/yandexklEEBU.tmp 2025-06-24T21:11:20.664861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0022ec/r3tmp/yandexklEEBU.tmp 2025-06-24T21:11:20.665025Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64463 PQClient connected to localhost:4556 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:21.221022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-24T21:11:21.260167Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:11:22.669670Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626520082750596:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.669849Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626520082750602:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.670076Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.674080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:22.684109Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626520082750611:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:11:22.909097Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626520082750676:2385] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:23.025237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:23.026822Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626520082750684:2303], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:23.027165Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Y2FhNjU0ZDMtNjUwODA2MWYtZDI3MjQ3NDQtYzRjMTBiYjc=, ActorId: [1:7519626520082750594:2291], ActorState: ExecuteState, TraceId: 01jyhwfzsbembvpaqksajfjgz2, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:23.029422Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:11:23.141606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:23.211197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:11:23.514224Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhwg0bsbq9x8bh2wbx4kfvj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTI0MWM5NTQtZDA0MWY2ZWItZjY0MjQ4N2QtNWYyOWYxYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> YdbIndexTable::MultiShardTableOneIndex >> YdbIndexTable::OnlineBuild >> YdbIndexTable::MultiShardTableUniqAndNonUniqIndex >> YdbIndexTable::MultiShardTableOneIndexIndexOverlapDataColumn >> YdbIndexTable::MultiShardTableOneUniqIndex >> TKeyValueTest::TestRewriteThenLastValueNewApi [GOOD] >> TKeyValueTest::TestSetExecutorFastLogPolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpExtractPredicateLookup::ComplexRange [GOOD] Test command err: Trying to start YDB, gRPC: 25644, MsgBus: 32176 2025-06-24T21:09:17.996348Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625983328805385:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:17.996522Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031b7/r3tmp/tmpXq4dYP/pdisk_1.dat 2025-06-24T21:09:18.355821Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25644, node 1 2025-06-24T21:09:18.379158Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:09:18.379251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:09:18.382916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:09:18.434799Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:09:18.434825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:09:18.434835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:09:18.434975Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32176 TClient is connected to server localhost:32176 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:09:19.010725Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:09:19.052479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:09:19.075401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:09:19.091054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:19.255387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:19.399840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:09:19.482959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:21.272134Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626000508676176:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:21.272505Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:21.587519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:21.624320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:21.653913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:21.684516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:21.739274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:21.779732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:21.854980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:21.915219Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626000508676835:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:21.915295Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:21.915727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626000508676840:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:21.919604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:21.935919Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626000508676842:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:09:22.001754Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626000508676893:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:22.998731Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625983328805385:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:22.998811Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 63737, MsgBus: 19347 2025-06-24T21:09:25.447107Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626018796335515:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:25.447342Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/run ... rd/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:13.554283Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:13.645143Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:13.893551Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:14.011296Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:17.053654Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626476987429524:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:17.053752Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:18.345382Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626502757234854:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.345518Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.435935Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.482671Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.532507Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.577503Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.625820Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.734348Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.781696Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:18.970204Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626502757235522:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.970357Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626502757235527:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.970378Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:18.975520Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:18.988808Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626502757235529:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:19.075192Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626507052202881:3439] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:21.009125Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:21.057610Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:21.110224Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:21.163415Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:21.225124Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:21.281978Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:21.333052Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:21.383005Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:21.458962Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:21.508410Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> YdbYqlClient::TestYqlWrongTable >> TGRpcYdbTest::CreateTableBadRequest >> TGRpcClientLowTest::GrpcRequestProxy >> TGRpcNewCoordinationClient::SessionSemaphoreInfiniteTimeout >> YdbYqlClient::CreateTableWithPartitionAtKeys >> YdbImport::Simple >> GrpcConnectionStringParserTest::NoDatabaseFlag >> YdbScripting::MultiResults >> TGRpcYdbTest::ExecuteQueryBadRequest >> TGRpcYdbTest::DropTableBadRequest >> YdbOlapStore::ManyTables >> TGRpcYdbTest::MakeListRemoveDirectory >> YdbYqlClient::TestReadTableMultiShardWholeTable >> YdbImport::EmptyData >> YdbYqlClient::TestReadTableMultiShard >> TGRpcYdbTest::ExecuteQueryImplicitSession >> TGRpcYdbTest::RemoveNotExistedDirectory >> YdbLogStore::LogStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldNotFailOnMissingRows [GOOD] Test command err: 2025-06-24T21:11:06.753175Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.753571Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.753741Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003713/r3tmp/tmpAFIzGk/pdisk_1.dat 2025-06-24T21:11:07.066879Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.069256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.111777Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.119018Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323135 != 1750799464323139 2025-06-24T21:11:07.164291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.164391Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.175932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.257434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.310367Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:647:2545] 2025-06-24T21:11:07.310562Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.359585Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.359751Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.361050Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.361131Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.361196Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.361537Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.361731Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.361776Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:676:2545] in generation 1 2025-06-24T21:11:07.362903Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:650:2547] 2025-06-24T21:11:07.363051Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.370891Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.371202Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:652:2549] 2025-06-24T21:11:07.371326Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.377188Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.378226Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:11:07.378278Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:11:07.378316Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:11:07.378523Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.378850Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.378905Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:697:2547] in generation 1 2025-06-24T21:11:07.379400Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.379457Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.380364Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-24T21:11:07.380442Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-24T21:11:07.380478Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-24T21:11:07.380663Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.380721Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.380757Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:698:2549] in generation 1 2025-06-24T21:11:07.391586Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.435628Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.435875Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.436067Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:702:2576] 2025-06-24T21:11:07.436138Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.436187Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.436245Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.436356Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.436395Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:11:07.436474Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.436610Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:703:2577] 2025-06-24T21:11:07.436649Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:11:07.436673Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:11:07.436696Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:11:07.437068Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.437113Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-24T21:11:07.437177Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.437235Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:704:2578] 2025-06-24T21:11:07.437261Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-24T21:11:07.437285Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-24T21:11:07.437320Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:11:07.437696Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.437889Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.437947Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:11:07.438000Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:11:07.438119Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.438179Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.438234Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.438276Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.438337Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:07.438367Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.438396Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:11:07.438440Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:07.438580Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:636:2540], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-24T21:11:07.438633Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:637:2541], serverId# [1:670:2559], sessionId# [0:0:0] 2025-06-24T21:11:07.438664Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-24T21:11:07.438736Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-24T21:11:07.438923Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cp ... 037888 2025-06-24T21:11:25.337496Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715662 2025-06-24T21:11:25.337560Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:11:25.337609Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2000 : 281474976715662] from 72075186224037890 at tablet 72075186224037890 send result to client [3:1049:2788], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:11:25.337659Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037890 {TEvReadSet step# 2000 txid# 281474976715662 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 6} 2025-06-24T21:11:25.337692Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:11:25.337792Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715662 2025-06-24T21:11:25.337840Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1049:2788] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037888, status# 2 2025-06-24T21:11:25.337891Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1049:2788] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037890, status# 2 2025-06-24T21:11:25.337939Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:165: [DistEraser] [3:1049:2788] Reply: txId# 281474976715662, status# OK, error# 2025-06-24T21:11:25.338418Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-06-24T21:11:25.338496Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-06-24T21:11:25.338674Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1043:2783], serverId# [3:1044:2784], sessionId# [0:0:0] 2025-06-24T21:11:25.338786Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:25.338823Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:25.338864Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:11:25.338934Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:25.340245Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:11:25.340636Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T21:11:25.340855Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:25.340902Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:25.340959Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715664] at 72075186224037889 for WaitForStreamClearance 2025-06-24T21:11:25.341238Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:25.341306Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:25.341892Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037889, TxId: 281474976715664, MessageQuota: 1 2025-06-24T21:11:25.342142Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037889, TxId: 281474976715664, Size: 70, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:25.342308Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037889, TxId: 281474976715664, PendingAcks: 0 2025-06-24T21:11:25.342362Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037889, TxId: 281474976715664, MessageQuota: 0 2025-06-24T21:11:25.344350Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-06-24T21:11:25.344407Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715664, at: 72075186224037889 2025-06-24T21:11:25.344558Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:25.344604Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:25.344646Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715664] at 72075186224037889 for ReadTableScan 2025-06-24T21:11:25.344765Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:25.344817Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:25.344863Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:11:25.376034Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:25.376446Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:25.376657Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:25.376709Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:25.376766Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:11:25.377028Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:25.377101Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:25.377822Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 1 2025-06-24T21:11:25.378074Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715665, Size: 35, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:25.378226Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715665, PendingAcks: 0 2025-06-24T21:11:25.378283Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 0 2025-06-24T21:11:25.379812Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:25.379872Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715665, at: 72075186224037888 2025-06-24T21:11:25.380319Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:25.380360Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:25.380398Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for ReadTableScan 2025-06-24T21:11:25.380533Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:25.380589Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:25.380648Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:25.411033Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037890 2025-06-24T21:11:25.411361Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037890 2025-06-24T21:11:25.411509Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-24T21:11:25.411549Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:25.411589Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715666] at 72075186224037890 for WaitForStreamClearance 2025-06-24T21:11:25.411791Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:25.411840Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:11:25.412298Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715666, MessageQuota: 1 2025-06-24T21:11:25.412502Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715666, Size: 35, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:25.412642Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715666, PendingAcks: 0 2025-06-24T21:11:25.412681Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715666, MessageQuota: 0 2025-06-24T21:11:25.414131Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-24T21:11:25.414174Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715666, at: 72075186224037890 2025-06-24T21:11:25.414301Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-24T21:11:25.414329Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:25.414360Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715666] at 72075186224037890 for ReadTableScan 2025-06-24T21:11:25.414458Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:25.414499Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:11:25.414533Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 >> YdbTableBulkUpsert::Nulls >> YdbYqlClient::TestDecimal1 >> TPQCDTest::TestUnavailableWithoutNetClassifier [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-7 >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserLogin >> YdbYqlClient::TestYqlIssues >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-14 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-25 >> TTxDataShardUploadRows::TestUploadRows [GOOD] >> TTxDataShardUploadRows::TestUploadRowsDropColumnRace ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutNetClassifier [GOOD] Test command err: 2025-06-24T21:11:20.092872Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626512035367190:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:20.092936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00241c/r3tmp/tmpu0IUK9/pdisk_1.dat 2025-06-24T21:11:20.454907Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626512035367155:2079] 1750799480088241 != 1750799480088244 2025-06-24T21:11:20.468543Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19953, node 1 2025-06-24T21:11:20.523401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:20.523514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:20.525102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:20.663117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:20.663155Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:20.663163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:20.663296Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27470 PQClient connected to localhost:19953 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:11:21.102431Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:21.209895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:11:22.580898Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626520625302469:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.580977Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626520625302458:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.581132Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.584440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:22.593225Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626520625302472:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:11:22.691434Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626520625302538:2387] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:22.968511Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626520625302546:2303], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:22.970884Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTk1NThhYjYtZTI1YTJkOTYtNDk3OWQ4Mi0zZWNhN2Q3Yg==, ActorId: [1:7519626520625302456:2291], ActorState: ExecuteState, TraceId: 01jyhwfzpk0qvapn1tsrgbvfce, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:22.987581Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:11:23.025271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:23.138819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:23.204177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:11:23.514218Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhwg0be7v8ykd4a9xj6kxz1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjZjYWRjYjMtYzBhZjY4NDYtM2Y3OTYxZDYtZThmNWU2MGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:25.092895Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626512035367190:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:25.092968Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflow [GOOD] >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetry >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-30 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-31 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-8 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-42 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-43 >> TGRpcNewClient::TestAuth >> TPQCDTest::TestDiscoverClusters [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex [GOOD] Test command err: 2025-06-24T21:11:06.668247Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:06.668616Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:06.668746Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003776/r3tmp/tmpXqmy5A/pdisk_1.dat 2025-06-24T21:11:07.013506Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:07.022200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:07.065732Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:07.071031Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799464323177 != 1750799464323181 2025-06-24T21:11:07.116939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:07.117072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:07.128663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:07.214251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:07.258548Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:647:2545] 2025-06-24T21:11:07.258766Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.302171Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.302389Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.303987Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:07.304047Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:07.304095Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:07.304441Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.304672Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.304717Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:676:2545] in generation 1 2025-06-24T21:11:07.306115Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:650:2547] 2025-06-24T21:11:07.306268Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.313932Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.314217Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:652:2549] 2025-06-24T21:11:07.314340Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:07.320368Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.321393Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:11:07.321441Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:11:07.321471Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:11:07.321704Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.321912Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.321948Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:697:2547] in generation 1 2025-06-24T21:11:07.322344Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:07.322399Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:07.323350Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-24T21:11:07.323395Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-24T21:11:07.323430Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-24T21:11:07.323656Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:07.323718Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:07.323754Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:698:2549] in generation 1 2025-06-24T21:11:07.334612Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.361736Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:07.361950Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.362052Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:702:2576] 2025-06-24T21:11:07.362088Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:07.362136Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:07.362166Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:07.362252Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.362278Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:11:07.362334Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.362379Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:703:2577] 2025-06-24T21:11:07.362403Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:11:07.362467Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:11:07.362482Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:11:07.362794Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:07.362819Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-24T21:11:07.362871Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:07.362914Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:704:2578] 2025-06-24T21:11:07.362938Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-24T21:11:07.362960Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-24T21:11:07.362989Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:11:07.363291Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:07.363420Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:07.363469Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:11:07.363515Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:11:07.363599Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:07.363630Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.363678Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:07.363717Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:07.363758Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:07.363782Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:07.363809Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:11:07.363841Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:07.363940Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:636:2540], serverId# [1:664:2554], sessionId# [0:0:0] 2025-06-24T21:11:07.363993Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:637:2541], serverId# [1:670:2559], sessionId# [0:0:0] 2025-06-24T21:11:07.364013Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-24T21:11:07.364052Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-24T21:11:07.364198Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cp ... datashard.cpp:3990: Send RS 2 at 72075186224037891 from 72075186224037891 to 72075186224037893 txId 281474976715666 2025-06-24T21:11:27.316431Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-24T21:11:27.316513Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715666] from 72075186224037891 at tablet 72075186224037891 send result to client [3:1368:3001], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:27.316669Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037891, records: { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 } 2025-06-24T21:11:27.316749Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-24T21:11:27.316928Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037893 step# 2500} 2025-06-24T21:11:27.316989Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-24T21:11:27.317327Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1368:3001] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715666, shard# 72075186224037891, status# 2 2025-06-24T21:11:27.317446Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037893 source 72075186224037891 dest 72075186224037893 producer 72075186224037891 txId 281474976715666 2025-06-24T21:11:27.317560Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037893 got read set: {TEvReadSet step# 2500 txid# 281474976715666 TabletSource# 72075186224037891 TabletDest# 72075186224037893 SetTabletProducer# 72075186224037891 ReadSet.Size()# 19 Seqno# 2 Flags# 0} 2025-06-24T21:11:27.317671Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037893 2025-06-24T21:11:27.317906Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037891 2025-06-24T21:11:27.318286Z node 3 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 3 change records: to# [3:1166:2881], at tablet# 72075186224037891 2025-06-24T21:11:27.318358Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 3, forgotten# 0, left# 0, at tablet# 72075186224037891 2025-06-24T21:11:27.318445Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-24T21:11:27.318487Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:27.318540Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [2500:281474976715666] at 72075186224037893 for LoadAndWaitInRS 2025-06-24T21:11:27.319041Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:27.320024Z node 3 :TX_DATASHARD DEBUG: datashard_change_receiving.cpp:470: Handle TEvChangeExchange::TEvApplyRecords: origin# 72075186224037891, generation# 1, at tablet# 72075186224037892 2025-06-24T21:11:27.331547Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-24T21:11:27.331656Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715666] from 72075186224037893 at tablet 72075186224037893 send result to client [3:1368:3001], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:11:27.331793Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037893 {TEvReadSet step# 2500 txid# 281474976715666 TabletSource# 72075186224037891 TabletDest# 72075186224037893 SetTabletConsumer# 72075186224037893 Flags# 0 Seqno# 2} 2025-06-24T21:11:27.331856Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-24T21:11:27.331992Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1368:3001] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715666, shard# 72075186224037893, status# 2 2025-06-24T21:11:27.332052Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:165: [DistEraser] [3:1368:3001] Reply: txId# 281474976715666, status# OK, error# 2025-06-24T21:11:27.332224Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 3, at tablet# 72075186224037891 2025-06-24T21:11:27.332262Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 4, at tablet: 72075186224037891 2025-06-24T21:11:27.332364Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 5, at tablet: 72075186224037891 2025-06-24T21:11:27.332400Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 6, at tablet: 72075186224037891 2025-06-24T21:11:27.332677Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037891 source 72075186224037891 dest 72075186224037893 consumer 72075186224037893 txId 281474976715666 2025-06-24T21:11:27.332896Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037891 2025-06-24T21:11:27.332954Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037891 2025-06-24T21:11:27.333138Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [3:1363:2997], serverId# [3:1364:2998], sessionId# [0:0:0] 2025-06-24T21:11:27.333237Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-06-24T21:11:27.333280Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:27.333326Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-24T21:11:27.334470Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037893 2025-06-24T21:11:27.334917Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037893 2025-06-24T21:11:27.335140Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-24T21:11:27.335435Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:27.335503Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715667] at 72075186224037893 for WaitForStreamClearance 2025-06-24T21:11:27.335796Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:27.335892Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-24T21:11:27.336700Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037893, TxId: 281474976715667, MessageQuota: 1 2025-06-24T21:11:27.336862Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037893, TxId: 281474976715667, MessageQuota: 1 2025-06-24T21:11:27.398409Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037893 2025-06-24T21:11:27.398496Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715667, at: 72075186224037893 2025-06-24T21:11:27.398744Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-24T21:11:27.398806Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:27.398853Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715667] at 72075186224037893 for ReadTableScan 2025-06-24T21:11:27.399013Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:27.399085Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-24T21:11:27.399161Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-24T21:11:27.400512Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037892 2025-06-24T21:11:27.400829Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037892 2025-06-24T21:11:27.401025Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-24T21:11:27.401064Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:27.401106Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715668] at 72075186224037892 for WaitForStreamClearance 2025-06-24T21:11:27.401292Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:27.401352Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-24T21:11:27.401958Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037892, TxId: 281474976715668, MessageQuota: 1 2025-06-24T21:11:27.402086Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037892, TxId: 281474976715668, MessageQuota: 1 2025-06-24T21:11:27.403851Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037892 2025-06-24T21:11:27.403897Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715668, at: 72075186224037892 2025-06-24T21:11:27.404074Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-24T21:11:27.404114Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:27.404153Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715668] at 72075186224037892 for ReadTableScan 2025-06-24T21:11:27.404275Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:27.404328Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-24T21:11:27.404372Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-36 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-37 >> TTxDataShardUploadRows::TestUploadShadowRows [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData |95.0%| [TA] $(B)/ydb/core/tx/datashard/ut_erase_rows/test-results/unittest/{meta.json ... results_accumulator.log} |95.0%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_erase_rows/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestDiscoverClusters [GOOD] Test command err: 2025-06-24T21:11:20.092772Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626511833098031:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:20.092864Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002371/r3tmp/tmpCmpwbg/pdisk_1.dat 2025-06-24T21:11:20.437181Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:20.443260Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626511833097998:2079] 1750799480088247 != 1750799480088250 TServer::EnableGrpc on GrpcPort 65081, node 1 2025-06-24T21:11:20.525306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:20.525428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:20.526995Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:20.663462Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002371/r3tmp/yandexcHvX0o.tmp 2025-06-24T21:11:20.663498Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002371/r3tmp/yandexcHvX0o.tmp 2025-06-24T21:11:20.664306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002371/r3tmp/yandexcHvX0o.tmp 2025-06-24T21:11:20.664487Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8471 PQClient connected to localhost:65081 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:11:21.099603Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:21.209378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:11:22.546810Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626520423033300:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.546943Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.547123Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626520423033313:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.550927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:22.559105Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626520423033315:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T21:11:22.623601Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626520423033380:2387] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:22.952224Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626520423033388:2303], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:22.958737Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODdlMDFjYTItYzMzNzZlYi04NmY0MmJhYS0xN2M4NzA2ZQ==, ActorId: [1:7519626520423033296:2291], ActorState: ExecuteState, TraceId: 01jyhwfznh672y6hqvhdc80ttg, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:22.963838Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:11:23.025337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:23.174927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:23.245137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:11:23.514222Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhwg0czd5gxezrxd627cw08, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2U3OWI4YS1hOWNiYjkzZi03NmIyMzdlNC1hNTkzM2E5MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:24.623891Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyhwg1g3amrwccvw1g1ymtjm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTRkYjUyMWItZGEwYTIxODYtMzNkZjgxYmEtZGZhZmI1Yzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:25.093176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626511833098031:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:25.093243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:25.702563Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyhwg2m9c88e3vke5z5camn1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzYwMDAxNDAtOGQxMGJlYTctMWQzYThjOTMtODc0NTUxNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:27.060424Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jyhwg3vzfnzgt5tjvrftpfjm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWI0OTg2MjAtZjlkMjA5MWMtZDNjYTk4NGEtYzI2MmIxNzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:28.402263Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jyhwg55q6p0wgvdjz3x36074, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTQ5YzEyOWUtOGI3Y2RiOGYtNmU1OTk4ZTgtNzdjM2YyYTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataPublishThenSplit [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish >> YdbYqlClient::ConnectDbAclIsOffWhenTokenIsOptionalAndNull >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataSplitThenPublish [GOOD] >> TTxDataShardUploadRows::UploadRowsToReplicatedTable |95.0%| [TA] $(B)/ydb/services/persqueue_cluster_discovery/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.0%| [TA] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueTest::TestInlineCopyRangeWorksNewApi [GOOD] >> TGRpcYdbTest::MakeListRemoveDirectory [GOOD] >> TGRpcYdbTest::GetOperationBadRequest >> TGRpcYdbTest::CreateTableBadRequest [GOOD] >> TGRpcYdbTest::CreateTableBadRequest2 >> TGRpcNewCoordinationClient::SessionSemaphoreInfiniteTimeout [GOOD] >> TGRpcNewCoordinationClient::SessionReconnectReattach >> TGRpcClientLowTest::GrpcRequestProxy [GOOD] >> TGRpcClientLowTest::GrpcRequestProxyWithoutToken >> TGRpcYdbTest::RemoveNotExistedDirectory [GOOD] >> TGRpcYdbTest::SdkUuid >> TGRpcYdbTest::ExecuteQueryBadRequest [GOOD] >> TGRpcYdbTest::DropTableBadRequest [GOOD] >> TGRpcYdbTest::CreateTableWithIndex >> TGRpcYdbTest::ExecuteQueryExplicitSession >> TGRpcClientLowTest::SimpleRequest >> TGRpcYdbTest::ExecuteQueryImplicitSession [GOOD] >> TGRpcYdbTest::ExecuteQueryWithUuid >> GrpcConnectionStringParserTest::NoDatabaseFlag [GOOD] >> GrpcConnectionStringParserTest::IncorrectConnectionString >> YdbImport::EmptyData [GOOD] >> YdbImport::ImportFromS3ToExistingTable >> GrpcConnectionStringParserTest::IncorrectConnectionString [GOOD] >> GrpcConnectionStringParserTest::CommonClientSettingsFromConnectionString >> YdbYqlClient::CreateTableWithPartitionAtKeys [GOOD] >> YdbYqlClient::CreateAndAltertTableWithPartitioningBySize >> YdbYqlClient::TestYqlWrongTable [GOOD] >> YdbYqlClient::TraceId ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineCopyRangeWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:88:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:92:2057] recipient: [11:90:2118] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:94:2057] recipient: [11:90:2118] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:93:2119] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:179:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... 46:80:2111] !Reboot 72057594037927937 (actor [46:58:2098]) rebooted! !Reboot 72057594037927937 (actor [46:58:2098]) tablet resolver refreshed! new actor is[46:83:2112] Leader for TabletID 72057594037927937 is [46:83:2112] sender: [46:169:2057] recipient: [46:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [47:56:2057] recipient: [47:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [47:56:2057] recipient: [47:52:2096] Leader for TabletID 72057594037927937 is [47:58:2098] sender: [47:59:2057] recipient: [47:52:2096] Leader for TabletID 72057594037927937 is [47:58:2098] sender: [47:76:2057] recipient: [47:14:2061] !Reboot 72057594037927937 (actor [47:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [47:58:2098] sender: [47:79:2057] recipient: [47:37:2084] Leader for TabletID 72057594037927937 is [47:58:2098] sender: [47:82:2057] recipient: [47:81:2111] Leader for TabletID 72057594037927937 is [47:58:2098] sender: [47:83:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [47:84:2112] sender: [47:85:2057] recipient: [47:81:2111] !Reboot 72057594037927937 (actor [47:58:2098]) rebooted! !Reboot 72057594037927937 (actor [47:58:2098]) tablet resolver refreshed! new actor is[47:84:2112] Leader for TabletID 72057594037927937 is [47:84:2112] sender: [47:170:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:55:2057] recipient: [48:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:55:2057] recipient: [48:51:2096] Leader for TabletID 72057594037927937 is [48:58:2098] sender: [48:59:2057] recipient: [48:51:2096] Leader for TabletID 72057594037927937 is [48:58:2098] sender: [48:76:2057] recipient: [48:14:2061] !Reboot 72057594037927937 (actor [48:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [48:58:2098] sender: [48:82:2057] recipient: [48:37:2084] Leader for TabletID 72057594037927937 is [48:58:2098] sender: [48:85:2057] recipient: [48:14:2061] Leader for TabletID 72057594037927937 is [48:58:2098] sender: [48:86:2057] recipient: [48:84:2114] Leader for TabletID 72057594037927937 is [48:87:2115] sender: [48:88:2057] recipient: [48:84:2114] !Reboot 72057594037927937 (actor [48:58:2098]) rebooted! !Reboot 72057594037927937 (actor [48:58:2098]) tablet resolver refreshed! new actor is[48:87:2115] Leader for TabletID 72057594037927937 is [48:87:2115] sender: [48:173:2057] recipient: [48:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:56:2057] recipient: [49:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:56:2057] recipient: [49:52:2096] Leader for TabletID 72057594037927937 is [49:58:2098] sender: [49:59:2057] recipient: [49:52:2096] Leader for TabletID 72057594037927937 is [49:58:2098] sender: [49:76:2057] recipient: [49:14:2061] !Reboot 72057594037927937 (actor [49:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [49:58:2098] sender: [49:82:2057] recipient: [49:37:2084] Leader for TabletID 72057594037927937 is [49:58:2098] sender: [49:85:2057] recipient: [49:84:2114] Leader for TabletID 72057594037927937 is [49:58:2098] sender: [49:86:2057] recipient: [49:14:2061] Leader for TabletID 72057594037927937 is [49:87:2115] sender: [49:88:2057] recipient: [49:84:2114] !Reboot 72057594037927937 (actor [49:58:2098]) rebooted! !Reboot 72057594037927937 (actor [49:58:2098]) tablet resolver refreshed! new actor is[49:87:2115] Leader for TabletID 72057594037927937 is [49:87:2115] sender: [49:173:2057] recipient: [49:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [50:56:2057] recipient: [50:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [50:56:2057] recipient: [50:52:2096] Leader for TabletID 72057594037927937 is [50:58:2098] sender: [50:59:2057] recipient: [50:52:2096] Leader for TabletID 72057594037927937 is [50:58:2098] sender: [50:76:2057] recipient: [50:14:2061] !Reboot 72057594037927937 (actor [50:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [50:58:2098] sender: [50:83:2057] recipient: [50:37:2084] Leader for TabletID 72057594037927937 is [50:58:2098] sender: [50:85:2057] recipient: [50:14:2061] Leader for TabletID 72057594037927937 is [50:58:2098] sender: [50:87:2057] recipient: [50:86:2114] Leader for TabletID 72057594037927937 is [50:88:2115] sender: [50:89:2057] recipient: [50:86:2114] !Reboot 72057594037927937 (actor [50:58:2098]) rebooted! !Reboot 72057594037927937 (actor [50:58:2098]) tablet resolver refreshed! new actor is[50:88:2115] Leader for TabletID 72057594037927937 is [50:88:2115] sender: [50:174:2057] recipient: [50:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [51:56:2057] recipient: [51:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [51:56:2057] recipient: [51:52:2096] Leader for TabletID 72057594037927937 is [51:58:2098] sender: [51:59:2057] recipient: [51:52:2096] Leader for TabletID 72057594037927937 is [51:58:2098] sender: [51:76:2057] recipient: [51:14:2061] !Reboot 72057594037927937 (actor [51:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [51:58:2098] sender: [51:86:2057] recipient: [51:37:2084] Leader for TabletID 72057594037927937 is [51:58:2098] sender: [51:89:2057] recipient: [51:14:2061] Leader for TabletID 72057594037927937 is [51:58:2098] sender: [51:90:2057] recipient: [51:88:2117] Leader for TabletID 72057594037927937 is [51:91:2118] sender: [51:92:2057] recipient: [51:88:2117] !Reboot 72057594037927937 (actor [51:58:2098]) rebooted! !Reboot 72057594037927937 (actor [51:58:2098]) tablet resolver refreshed! new actor is[51:91:2118] Leader for TabletID 72057594037927937 is [51:91:2118] sender: [51:177:2057] recipient: [51:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [52:56:2057] recipient: [52:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [52:56:2057] recipient: [52:53:2096] Leader for TabletID 72057594037927937 is [52:58:2098] sender: [52:59:2057] recipient: [52:53:2096] Leader for TabletID 72057594037927937 is [52:58:2098] sender: [52:76:2057] recipient: [52:14:2061] !Reboot 72057594037927937 (actor [52:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [52:58:2098] sender: [52:86:2057] recipient: [52:37:2084] Leader for TabletID 72057594037927937 is [52:58:2098] sender: [52:88:2057] recipient: [52:14:2061] Leader for TabletID 72057594037927937 is [52:58:2098] sender: [52:90:2057] recipient: [52:89:2117] Leader for TabletID 72057594037927937 is [52:91:2118] sender: [52:92:2057] recipient: [52:89:2117] !Reboot 72057594037927937 (actor [52:58:2098]) rebooted! !Reboot 72057594037927937 (actor [52:58:2098]) tablet resolver refreshed! new actor is[52:91:2118] Leader for TabletID 72057594037927937 is [52:91:2118] sender: [52:177:2057] recipient: [52:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:56:2057] recipient: [53:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:56:2057] recipient: [53:51:2096] Leader for TabletID 72057594037927937 is [53:58:2098] sender: [53:59:2057] recipient: [53:51:2096] Leader for TabletID 72057594037927937 is [53:58:2098] sender: [53:76:2057] recipient: [53:14:2061] !Reboot 72057594037927937 (actor [53:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [53:58:2098] sender: [53:87:2057] recipient: [53:37:2084] Leader for TabletID 72057594037927937 is [53:58:2098] sender: [53:90:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [53:58:2098] sender: [53:91:2057] recipient: [53:89:2117] Leader for TabletID 72057594037927937 is [53:92:2118] sender: [53:93:2057] recipient: [53:89:2117] !Reboot 72057594037927937 (actor [53:58:2098]) rebooted! !Reboot 72057594037927937 (actor [53:58:2098]) tablet resolver refreshed! new actor is[53:92:2118] Leader for TabletID 72057594037927937 is [53:92:2118] sender: [53:178:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:56:2057] recipient: [54:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:56:2057] recipient: [54:53:2096] Leader for TabletID 72057594037927937 is [54:58:2098] sender: [54:59:2057] recipient: [54:53:2096] Leader for TabletID 72057594037927937 is [54:58:2098] sender: [54:76:2057] recipient: [54:14:2061] !Reboot 72057594037927937 (actor [54:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [54:58:2098] sender: [54:90:2057] recipient: [54:37:2084] Leader for TabletID 72057594037927937 is [54:58:2098] sender: [54:93:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [54:58:2098] sender: [54:94:2057] recipient: [54:92:2120] Leader for TabletID 72057594037927937 is [54:95:2121] sender: [54:96:2057] recipient: [54:92:2120] !Reboot 72057594037927937 (actor [54:58:2098]) rebooted! !Reboot 72057594037927937 (actor [54:58:2098]) tablet resolver refreshed! new actor is[54:95:2121] Leader for TabletID 72057594037927937 is [54:95:2121] sender: [54:181:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:56:2057] recipient: [55:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:56:2057] recipient: [55:51:2096] Leader for TabletID 72057594037927937 is [55:58:2098] sender: [55:59:2057] recipient: [55:51:2096] Leader for TabletID 72057594037927937 is [55:58:2098] sender: [55:76:2057] recipient: [55:14:2061] !Reboot 72057594037927937 (actor [55:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [55:58:2098] sender: [55:90:2057] recipient: [55:37:2084] Leader for TabletID 72057594037927937 is [55:58:2098] sender: [55:93:2057] recipient: [55:14:2061] Leader for TabletID 72057594037927937 is [55:58:2098] sender: [55:94:2057] recipient: [55:92:2120] Leader for TabletID 72057594037927937 is [55:95:2121] sender: [55:96:2057] recipient: [55:92:2120] !Reboot 72057594037927937 (actor [55:58:2098]) rebooted! !Reboot 72057594037927937 (actor [55:58:2098]) tablet resolver refreshed! new actor is[55:95:2121] Leader for TabletID 72057594037927937 is [55:95:2121] sender: [55:181:2057] recipient: [55:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [56:56:2057] recipient: [56:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [56:56:2057] recipient: [56:51:2096] Leader for TabletID 72057594037927937 is [56:58:2098] sender: [56:59:2057] recipient: [56:51:2096] Leader for TabletID 72057594037927937 is [56:58:2098] sender: [56:76:2057] recipient: [56:14:2061] !Reboot 72057594037927937 (actor [56:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [56:58:2098] sender: [56:91:2057] recipient: [56:37:2084] Leader for TabletID 72057594037927937 is [56:58:2098] sender: [56:94:2057] recipient: [56:14:2061] Leader for TabletID 72057594037927937 is [56:58:2098] sender: [56:95:2057] recipient: [56:93:2120] Leader for TabletID 72057594037927937 is [56:96:2121] sender: [56:97:2057] recipient: [56:93:2120] !Reboot 72057594037927937 (actor [56:58:2098]) rebooted! !Reboot 72057594037927937 (actor [56:58:2098]) tablet resolver refreshed! new actor is[56:96:2121] Leader for TabletID 72057594037927937 is [0:0:0] sender: [57:56:2057] recipient: [57:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [57:56:2057] recipient: [57:53:2096] Leader for TabletID 72057594037927937 is [57:58:2098] sender: [57:59:2057] recipient: [57:53:2096] Leader for TabletID 72057594037927937 is [57:58:2098] sender: [57:76:2057] recipient: [57:14:2061] >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesCorrectCerts >> YdbYqlClient::TestDecimal1 [GOOD] >> YdbYqlClient::TestDecimal35 >> YdbYqlClient::TestReadTableMultiShardWholeTable [GOOD] >> YdbYqlClient::TestReadTableMultiShardWholeTableUseSnapshot >> YdbImport::Simple [GOOD] >> YdbIndexTable::AlterIndexImplBySuperUser >> YdbYqlClient::TestReadTableMultiShard [GOOD] >> YdbYqlClient::TestReadTableMultiShardUseSnapshot >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserLogin [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserPassword >> TTxDataShardUploadRows::TestUploadRowsDropColumnRace [GOOD] >> TTxDataShardUploadRows::TestUploadRowsLocks >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetry [GOOD] >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-8 >> YdbScripting::MultiResults [GOOD] >> YdbScripting::Params >> TVersions::Wreck0 [GOOD] >> TVersions::Wreck0Reverse >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-9 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-25 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-26 >> YdbYqlClient::TestYqlIssues [GOOD] >> YdbYqlClient::TestYqlSessionClosed >> TGRpcNewClient::TestAuth [GOOD] >> TGRpcNewClient::YqlQueryWithParams >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-31 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-32 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-43 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-44 >> YdbQueryService::TestCreateAndAttachSession >> TIcNodeCache::GetNodesInfoTest [GOOD] >> YdbYqlClient::ConnectDbAclIsOffWhenTokenIsOptionalAndNull [GOOD] >> YdbYqlClient::ColumnFamiliesWithStorageAndIndex >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-37 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-38 >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData [GOOD] >> TTxDataShardUploadRows::UploadRowsToReplicatedTable [GOOD] >> TGRpcYdbTest::GetOperationBadRequest [GOOD] >> TGRpcYdbTest::OperationTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TIcNodeCache::GetNodesInfoTest [GOOD] Test command err: 2025-06-24T21:11:20.059872Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626508611232710:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:20.059946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:20.110704Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626510790918648:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:20.110918Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047d1/r3tmp/tmpG87gzz/pdisk_1.dat 2025-06-24T21:11:20.275764Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:11:20.286730Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:11:20.570298Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:20.601489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:20.601587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:20.613132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:20.623556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:20.623639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:20.626491Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:11:20.627375Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29190, node 1 2025-06-24T21:11:20.822126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0047d1/r3tmp/yandexj500e1.tmp 2025-06-24T21:11:20.822152Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0047d1/r3tmp/yandexj500e1.tmp 2025-06-24T21:11:20.823568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0047d1/r3tmp/yandexj500e1.tmp 2025-06-24T21:11:20.823733Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:21.076182Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:21.098076Z INFO: TTestServer started on Port 23825 GrpcPort 29190 2025-06-24T21:11:21.116522Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23825 PQClient connected to localhost:29190 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:21.423760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:11:21.475217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:11:22.807914Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626517201168334:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.808032Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.808362Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626517201168362:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.817490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:22.819037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626517201168395:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.819119Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:22.836442Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626517201168364:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T21:11:22.912603Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626517201168442:2744] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:23.331551Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626517201168461:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:23.332309Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519626519380853608:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:23.332576Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=Y2U1Y2UzMWMtODVkZmUwNjUtY2M3MzkwM2ItNjUwNWQyYWM=, ActorId: [2:7519626519380853584:2271], ActorState: ExecuteState, TraceId: 01jyhwfzzc9aye397kgc28rs32, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:23.336109Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:11:23.337640Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YjRhMTY0ZDUtZWRhZjMxNDAtNDBmZDYxYzktYmE4ZDUzMTI=, ActorId: [1:7519626517201168332:2296], ActorState: ExecuteState, TraceId: 01jyhwfzxda4ra4jey4869pmg5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:23.337976Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:11:23.393519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:23.473668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:23.569174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:11:23.889815Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhwg0qc7er41xh6y6rcqk9b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTQxNWJiMzEtY2NmMzU1ZjktMTc5MDZlNmEtNmI0YTI4OGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7519626521496136217:3087] 2025-06-24T21:11:25.059990Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626508611232710:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:25.060163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:25.111139Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626510790918648:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:25.111225Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::UploadRowsToReplicatedTable [GOOD] Test command err: 2025-06-24T21:11:27.165955Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:27.166411Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:27.166597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004278/r3tmp/tmptsByx4/pdisk_1.dat 2025-06-24T21:11:27.577745Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:27.581450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:27.638267Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.645851Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799484582348 != 1750799484582352 2025-06-24T21:11:27.693877Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.694056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.705929Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:27.797471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:27.848878Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:27.850108Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:11:27.850613Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:27.850911Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:27.928282Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:11:27.929124Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:27.929284Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:27.931644Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:27.931774Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:27.931848Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:27.932252Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:27.932423Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:27.932523Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:27.943860Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:27.982981Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:27.983272Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:27.983411Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:27.983459Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:27.983504Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:27.983564Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:27.983801Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:27.983858Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:27.984269Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:27.984393Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:27.984493Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:27.984548Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:27.984612Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:11:27.984661Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:11:27.984697Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:11:27.984742Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:27.984788Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:27.984914Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:11:27.984952Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:11:27.984995Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:27.985428Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:11:27.985477Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:11:27.985613Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:27.985854Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:11:27.985900Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:27.986022Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:27.986079Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:11:27.986121Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:11:27.986159Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:11:27.986198Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:11:27.986518Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:11:27.986562Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:11:27.986600Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:11:27.986648Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:11:27.986699Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:11:27.986739Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:11:27.986818Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:11:27.986866Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:11:27.986900Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:11:27.988530Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:11:27.988588Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:27.999541Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:27.999610Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:11:27.999644Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:11:27.999713Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... eartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:11:34.338385Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:34.338672Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:34.338764Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004278/r3tmp/tmpBWu8qm/pdisk_1.dat 2025-06-24T21:11:34.604523Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:11:34.606008Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:34.635137Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:34.637480Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750799491061046 != 1750799491061050 2025-06-24T21:11:34.684972Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:34.685110Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:34.696978Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:34.792531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:34.819807Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:624:2529] 2025-06-24T21:11:34.820070Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:34.883645Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:34.883823Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:34.886127Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:34.886243Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:34.886313Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:34.886689Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:34.886876Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:34.886970Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:640:2529] in generation 1 2025-06-24T21:11:34.899730Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:34.899830Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:34.899960Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:34.900051Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:642:2539] 2025-06-24T21:11:34.900098Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:34.900144Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:34.900201Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:34.900570Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:34.900673Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:34.900789Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:34.900834Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:34.900874Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:34.900921Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:34.900997Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:623:2528], serverId# [2:631:2533], sessionId# [0:0:0] 2025-06-24T21:11:34.901442Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:34.901666Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:34.901742Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:34.903628Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:34.915786Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:34.915915Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:35.065265Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:658:2549], serverId# [2:659:2550], sessionId# [0:0:0] 2025-06-24T21:11:35.066274Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:35.066358Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:35.067133Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:35.068125Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:35.068201Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:35.068511Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:35.068670Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:35.069089Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:35.069161Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:35.069683Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:35.070139Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:35.072281Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:35.072349Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:35.073327Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:35.073432Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:35.074232Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:35.074278Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:35.074333Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:35.074406Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:35.074467Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:35.074559Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:35.076011Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:35.078015Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:35.078095Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:35.079007Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:35.136504Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:693:2576], serverId# [2:694:2577], sessionId# [0:0:0] 2025-06-24T21:11:35.136685Z node 2 :TX_DATASHARD NOTICE: datashard__op_rows.cpp:168: Rejecting bulk upsert request on datashard: tablet# 72075186224037888, error# Can't execute bulk upsert at replicated table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData [GOOD] Test command err: 2025-06-24T21:11:27.030364Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:27.030822Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:27.031005Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004286/r3tmp/tmpN0PWCI/pdisk_1.dat 2025-06-24T21:11:27.396219Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:27.400261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:27.456627Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.462037Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799484582382 != 1750799484582386 2025-06-24T21:11:27.516645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.516847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.529363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:27.629082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:27.704928Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:27.705235Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:27.752797Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:27.752942Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:27.754732Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:27.754844Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:27.754942Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:27.755379Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:27.755543Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:27.755645Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:27.766570Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:27.814356Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:27.814595Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:27.814810Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:27.814875Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:27.814917Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:27.814957Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:27.815606Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:27.815733Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:27.815819Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:27.815859Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:27.815900Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:27.815971Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:27.816097Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:27.816628Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:27.816901Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:27.817010Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:27.818823Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:27.829692Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:27.829825Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:27.997320Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:11:28.001791Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:11:28.001856Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:28.002651Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:28.002695Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:11:28.002757Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:11:28.003010Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:11:28.003139Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:28.003698Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:28.003771Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:28.004170Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:28.004535Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:28.005646Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:28.005686Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:28.006380Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:28.006464Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:28.007388Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:28.007432Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:28.007491Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:28.007551Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:28.007604Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:28.007695Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:28.012364Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:28.014030Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:28.014090Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:28.014900Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:28.050685Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:28.050829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: ... ndencies 2025-06-24T21:11:34.905250Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2000/281474976715664 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-24T21:11:34.905292Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715665] at 72075186224037888 2025-06-24T21:11:34.905322Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-24T21:11:34.905341Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:11:34.905365Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit MakeScanSnapshot 2025-06-24T21:11:34.905392Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit MakeScanSnapshot 2025-06-24T21:11:34.905421Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-24T21:11:34.905438Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit MakeScanSnapshot 2025-06-24T21:11:34.905454Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit WaitForStreamClearance 2025-06-24T21:11:34.905470Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit WaitForStreamClearance 2025-06-24T21:11:34.905512Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:99: Requested stream clearance from [2:901:2713] for [0:281474976715665] at 72075186224037888 2025-06-24T21:11:34.905552Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Continue 2025-06-24T21:11:34.910929Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287942, Sender [2:901:2713], Recipient [2:624:2529]: NKikimrTx.TEvStreamClearancePending TxId: 281474976715665 2025-06-24T21:11:34.910995Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvTxProcessing::TEvStreamClearancePending 2025-06-24T21:11:34.911081Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:901:2713], Recipient [2:624:2529]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715665 Cleared: true 2025-06-24T21:11:34.911115Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-24T21:11:34.911208Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:624:2529], Recipient [2:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:34.911235Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:34.911310Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:34.911354Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:34.911401Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for WaitForStreamClearance 2025-06-24T21:11:34.911433Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit WaitForStreamClearance 2025-06-24T21:11:34.911482Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [0:281474976715665] at 72075186224037888 2025-06-24T21:11:34.911518Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-24T21:11:34.911561Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit WaitForStreamClearance 2025-06-24T21:11:34.911611Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit ReadTableScan 2025-06-24T21:11:34.911649Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit ReadTableScan 2025-06-24T21:11:34.911967Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Continue 2025-06-24T21:11:34.912021Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:34.912063Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T21:11:34.912105Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:11:34.912144Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:11:34.912761Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:908:2718], Recipient [2:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-24T21:11:34.912802Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-24T21:11:34.913028Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 1 2025-06-24T21:11:34.913575Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715665, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:34.913703Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:34.913751Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:34.913895Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715665, PendingAcks: 0 2025-06-24T21:11:34.913949Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 0 2025-06-24T21:11:34.924119Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:894:2706], Recipient [2:624:2529]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046644480 ClientId: [2:894:2706] ServerId: [2:896:2708] } 2025-06-24T21:11:34.924202Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:11:34.924280Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:11:34.924326Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715665, at: 72075186224037888 2025-06-24T21:11:34.924495Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:624:2529], Recipient [2:624:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:34.924529Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:34.924596Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:34.924634Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:34.924676Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for ReadTableScan 2025-06-24T21:11:34.924709Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit ReadTableScan 2025-06-24T21:11:34.924753Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715665] at 72075186224037888 error: , IsFatalError: 0 2025-06-24T21:11:34.924806Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-24T21:11:34.924841Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit ReadTableScan 2025-06-24T21:11:34.924871Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:11:34.924899Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-24T21:11:34.924942Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715665 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-24T21:11:34.925015Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is DelayComplete 2025-06-24T21:11:34.925048Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:11:34.925088Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:11:34.925128Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:11:34.925178Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-24T21:11:34.925201Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:11:34.925228Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715665] at 72075186224037888 has finished 2025-06-24T21:11:34.925267Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:34.925293Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T21:11:34.925323Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:11:34.925351Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:11:34.925428Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:34.925471Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-24T21:11:34.925518Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> TGRpcClientLowTest::GrpcRequestProxyWithoutToken [GOOD] >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Ignore >> GrpcConnectionStringParserTest::CommonClientSettingsFromConnectionString [GOOD] >> LocalityOperation::LocksFromAnotherTenants+UseSink |95.0%| [TA] $(B)/ydb/services/persqueue_v1/ut/describes_ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.0%| [TA] {RESULT} $(B)/ydb/services/persqueue_v1/ut/describes_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TGRpcYdbTest::CreateTableBadRequest2 [GOOD] >> TGRpcYdbTest::CreateTableBadRequest3 >> TGRpcNewCoordinationClient::SessionReconnectReattach [GOOD] >> TGRpcNewCoordinationClientAuth::OwnersAndPermissions >> TGRpcClientLowTest::SimpleRequest [GOOD] >> TGRpcClientLowTest::SimpleRequestDummyService >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish [GOOD] >> TGRpcYdbTest::ExecuteQueryExplicitSession [GOOD] >> TGRpcYdbTest::ExecuteDmlQuery >> YdbImport::ImportFromS3ToExistingTable [GOOD] >> TYqlDecimalTests::SimpleUpsertSelect >> TGRpcYdbTest::SdkUuid [GOOD] >> TGRpcYdbTest::SdkUuidViaParams >> YdbYqlClient::CreateAndAltertTableWithPartitioningBySize [GOOD] >> YdbYqlClient::CreateAndAltertTableWithPartitioningByLoad >> YdbYqlClient::TraceId [GOOD] >> YdbYqlClient::Utf8DatabasePassViaHeader >> YdbYqlClient::TestDecimal35 [GOOD] >> YdbYqlClient::TestDecimalFullStack >> TGRpcYdbTest::ExecuteQueryWithUuid [GOOD] >> TGRpcYdbTest::ExecuteQueryWithParametersBadRequest >> YdbIndexTable::AlterIndexImplBySuperUser [GOOD] >> YdbIndexTable::CreateTableAddIndex >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserPassword [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidLogin >> YdbYqlClient::TestReadTableMultiShardWholeTableUseSnapshot [GOOD] >> YdbYqlClient::TestReadTableMultiShardWithDescribe ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish [GOOD] Test command err: 2025-06-24T21:11:26.854934Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:26.855393Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:26.855548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042c3/r3tmp/tmphqeT57/pdisk_1.dat 2025-06-24T21:11:27.305868Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:27.313660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:27.370047Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.374400Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799484582365 != 1750799484582369 2025-06-24T21:11:27.423742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.423898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.443984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:27.541986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:27.606432Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:27.607748Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:11:27.611661Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:11:27.611966Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:27.672495Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:11:27.673230Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:27.673366Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:27.680831Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:27.680998Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:27.681088Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:27.684280Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:27.684491Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:27.684594Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:11:27.695375Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:27.732390Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:27.734152Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:27.734300Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:11:27.734346Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:27.734412Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:27.734450Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:27.734655Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:27.734705Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:27.735895Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:27.736006Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:27.736071Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:27.736119Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:27.736226Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:11:27.736267Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:11:27.736312Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:11:27.736346Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:27.736391Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:27.736507Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:11:27.736542Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:11:27.736583Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:11:27.738271Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:11:27.738334Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:11:27.738463Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:27.738804Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:11:27.738877Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:27.738967Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:27.739019Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:11:27.739056Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:11:27.739093Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:11:27.739128Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:11:27.739399Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:11:27.739431Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:11:27.739456Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:11:27.739491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:11:27.739540Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:11:27.739572Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:11:27.739599Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:11:27.739631Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:11:27.739651Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:11:27.740747Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:11:27.740792Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:27.751810Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:27.751882Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:11:27.751918Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:11:27.752085Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... line.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037889 is DelayComplete 2025-06-24T21:11:36.740088Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037889 executing on unit CompleteOperation 2025-06-24T21:11:36.740129Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T21:11:36.740169Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037889 on unit CompletedOperations 2025-06-24T21:11:36.740205Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037889 is Executed 2025-06-24T21:11:36.740229Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T21:11:36.740263Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715668] at 72075186224037889 has finished 2025-06-24T21:11:36.740319Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:36.740351Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-24T21:11:36.740383Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-24T21:11:36.740426Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-24T21:11:36.751515Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:36.751618Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:36.751655Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715668] at 72075186224037889 on unit CompleteOperation 2025-06-24T21:11:36.751739Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715668] from 72075186224037889 at tablet 72075186224037889 send result to client [2:1098:2877], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:36.751797Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:11:36.751988Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 3500} 2025-06-24T21:11:36.752050Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:11:36.752080Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:11:36.752368Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:1098:2877], Recipient [2:923:2728]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715668 Cleared: true 2025-06-24T21:11:36.752412Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-24T21:11:36.752537Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:923:2728], Recipient [2:923:2728]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:36.752565Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:36.752622Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-24T21:11:36.752657Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:36.752692Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [3500:281474976715668] at 72075186224037890 for WaitForStreamClearance 2025-06-24T21:11:36.752722Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit WaitForStreamClearance 2025-06-24T21:11:36.752754Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [3500:281474976715668] at 72075186224037890 2025-06-24T21:11:36.752787Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-06-24T21:11:36.752818Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit WaitForStreamClearance 2025-06-24T21:11:36.752847Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit ReadTableScan 2025-06-24T21:11:36.752881Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit ReadTableScan 2025-06-24T21:11:36.753095Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Continue 2025-06-24T21:11:36.753125Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:36.753155Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037890 2025-06-24T21:11:36.753182Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:52: TPlanQueueUnit at 72075186224037890 out-of-order limits exceeded 2025-06-24T21:11:36.753209Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-24T21:11:36.753958Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:1118:2894], Recipient [2:923:2728]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-24T21:11:36.754000Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-24T21:11:36.754347Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715668, MessageQuota: 1 2025-06-24T21:11:36.754953Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715668, Size: 54, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-24T21:11:36.756701Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715668, PendingAcks: 0 2025-06-24T21:11:36.756764Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715668, MessageQuota: 0 2025-06-24T21:11:36.816523Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-24T21:11:36.816591Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715668, at: 72075186224037890 2025-06-24T21:11:36.816791Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:923:2728], Recipient [2:923:2728]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:36.816834Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:36.816903Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-24T21:11:36.816939Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:11:36.816973Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [3500:281474976715668] at 72075186224037890 for ReadTableScan 2025-06-24T21:11:36.817001Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit ReadTableScan 2025-06-24T21:11:36.817035Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [3500:281474976715668] at 72075186224037890 error: , IsFatalError: 0 2025-06-24T21:11:36.817075Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-06-24T21:11:36.817119Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit ReadTableScan 2025-06-24T21:11:36.817161Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit CompleteOperation 2025-06-24T21:11:36.817188Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit CompleteOperation 2025-06-24T21:11:36.817375Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is DelayComplete 2025-06-24T21:11:36.817402Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit CompleteOperation 2025-06-24T21:11:36.817428Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit CompletedOperations 2025-06-24T21:11:36.817455Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit CompletedOperations 2025-06-24T21:11:36.817485Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-06-24T21:11:36.817509Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit CompletedOperations 2025-06-24T21:11:36.817535Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715668] at 72075186224037890 has finished 2025-06-24T21:11:36.817563Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:36.817590Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-24T21:11:36.817619Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-24T21:11:36.817647Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-24T21:11:36.829203Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:11:36.829274Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:11:36.829309Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715668] at 72075186224037890 on unit CompleteOperation 2025-06-24T21:11:36.829368Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715668] from 72075186224037890 at tablet 72075186224037890 send result to client [2:1098:2877], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:11:36.829412Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 >> TGRpcYdbTest::CreateTableWithIndex [GOOD] >> TGRpcYdbTest::CreateYqlSession >> YdbYqlClient::TestReadTableMultiShardUseSnapshot [GOOD] >> YdbYqlClient::TestReadTableMultiShardOneRow >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts >> YdbYqlClient::TestYqlSessionClosed [GOOD] >> YdbYqlClient::TestYqlLongSessionPrepareError >> TAuthenticationWithSqlExecution::CreateAlterUserWithHash >> YdbScripting::Params [GOOD] >> YdbTableBulkUpsert::DataValidation >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvideIncorrectCerts >> TTxDataShardUploadRows::TestUploadRowsLocks [GOOD] >> TKeyValueTest::TestRewriteThenLastValue [GOOD] >> TKeyValueTest::TestRenameWorksNewApi >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-9 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-15 >> YdbQueryService::TestCreateAndAttachSession [GOOD] >> YdbQueryService::TestAttachTwice >> TGRpcNewClient::YqlQueryWithParams [GOOD] >> TGRpcNewClient::YqlExplainDataQuery >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-26 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-27 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-32 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-33 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-44 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-45 >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyHosts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadRowsLocks [GOOD] Test command err: 2025-06-24T21:11:26.855009Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:26.855454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:26.855664Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00426d/r3tmp/tmpmy01f0/pdisk_1.dat 2025-06-24T21:11:27.307714Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:27.318636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:27.363358Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.370904Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799484582355 != 1750799484582359 2025-06-24T21:11:27.422034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.422219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.438757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:27.540719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:27.617604Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:643:2538] 2025-06-24T21:11:27.617858Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:27.685818Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:27.691879Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:27.693670Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:27.693755Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:27.693824Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:27.694191Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:27.694580Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:27.694669Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:677:2538] in generation 1 2025-06-24T21:11:27.695262Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:645:2540] 2025-06-24T21:11:27.695438Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:27.703249Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:27.703539Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:27.704890Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:11:27.704971Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:11:27.705016Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:11:27.705325Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:27.705682Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:27.705730Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:690:2540] in generation 1 2025-06-24T21:11:27.707267Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:648:2542] 2025-06-24T21:11:27.707463Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:27.717274Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:27.717509Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:27.718800Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037891 2025-06-24T21:11:27.718872Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037891 2025-06-24T21:11:27.718921Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037891 2025-06-24T21:11:27.719261Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:27.719666Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:651:2544] 2025-06-24T21:11:27.719851Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:27.728355Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:27.728449Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037891 persisting started state actor id [1:707:2542] in generation 1 2025-06-24T21:11:27.729298Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:27.729405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:27.730695Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-24T21:11:27.730756Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-24T21:11:27.730828Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-24T21:11:27.731111Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:27.731245Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:27.731321Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:712:2544] in generation 1 2025-06-24T21:11:27.742331Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:27.770518Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:27.770691Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:27.770806Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:717:2580] 2025-06-24T21:11:27.770841Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:27.770874Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:27.770908Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:27.771256Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:27.771303Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:11:27.771390Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:27.771462Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:718:2581] 2025-06-24T21:11:27.771486Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:11:27.771511Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:11:27.771535Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:11:27.771914Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:27.771970Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037891 2025-06-24T21:11:27.772026Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037891 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:27.772091Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [1:719:2582] 2025-06-24T21:11:27.772125Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-24T21:11:27.772160Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2025-06-24T21:11:27.772182Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-24T21:11:27.772430Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:27.772539Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:27.772641Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:27.772691Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-24T21:11:27.772741Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:27.772785Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:720:2583] 2025-06-24T21:11:27.772808Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-24T21:11:27.772847Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-24T21:11:27.772870Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: ... ess_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:37.712816Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:11:37.713378Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:11:37.713906Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:37.715536Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:11:37.715602Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:37.716138Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:11:37.716214Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:37.717221Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:37.717274Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:37.717323Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:11:37.717395Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:372:2366], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:11:37.717479Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:11:37.717584Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:37.718909Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:37.721441Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:11:37.721702Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:11:37.721783Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:11:37.769835Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:37.769962Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:703:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:37.770048Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:37.776644Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:37.784623Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:37.837126Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:37.977586Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:37.991305Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:706:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:38.079173Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:777:2622] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:38.485513Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwgeh296k7ke317g2d27g7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MTFjYjk5YjctZmEwNzE0OTQtNjNlYjE5YmQtMWZkOGJkOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:38.492291Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:808:2639], serverId# [3:809:2640], sessionId# [0:0:0] 2025-06-24T21:11:38.492768Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:11:38.492968Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-06-24T21:11:38.507674Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:38.935882Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwgf9376q240sjz6fb62nh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTdjMjY3ZTctZTdiMjhjOWMtZTQ0Y2UzZi01NDg2Y2UyZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:38.969280Z node 3 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715661, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] { items { uint32_value: 300 } } 2025-06-24T21:11:38.993543Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(36) Execute: at tablet# 72075186224037888 2025-06-24T21:11:39.008126Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(36) Complete: at tablet# 72075186224037888 2025-06-24T21:11:39.008232Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:39.008316Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-24T21:11:39.009195Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-24T21:11:39.009289Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:39.101471Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwgfr2dv1xzmt2r4enx03g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTdjMjY3ZTctZTdiMjhjOWMtZTQ0Y2UzZi01NDg2Y2UyZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:39.104252Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:5] at 72075186224037888 2025-06-24T21:11:39.104438Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=5; 2025-06-24T21:11:39.112941Z node 3 :TX_DATASHARD INFO: datashard_write_operation.cpp:707: Write transaction 5 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-24T21:11:39.113298Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-24T21:11:39.113556Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-24T21:11:39.113655Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:39.113975Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [3:866:2645], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:815:2645]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:866:2645].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T21:11:39.114540Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:859:2645], SessionActorId: [3:815:2645], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:815:2645]. isRollback=0 2025-06-24T21:11:39.114916Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=YTdjMjY3ZTctZTdiMjhjOWMtZTQ0Y2UzZi01NDg2Y2UyZg==, ActorId: [3:815:2645], ActorState: ExecuteState, TraceId: 01jyhwgfr2dv1xzmt2r4enx03g, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:860:2645] from: [3:859:2645] 2025-06-24T21:11:39.115224Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:860:2645] TxId: 281474976715662. Ctx: { TraceId: 01jyhwgfr2dv1xzmt2r4enx03g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTdjMjY3ZTctZTdiMjhjOWMtZTQ0Y2UzZi01NDg2Y2UyZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T21:11:39.115541Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YTdjMjY3ZTctZTdiMjhjOWMtZTQ0Y2UzZi01NDg2Y2UyZg==, ActorId: [3:815:2645], ActorState: ExecuteState, TraceId: 01jyhwgfr2dv1xzmt2r4enx03g, Create QueryResponse for error on request, msg: 2025-06-24T21:11:39.116357Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-24T21:11:39.116411Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:6] at 72075186224037888 2025-06-24T21:11:39.116574Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-38 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-39 >> TGRpcYdbTest::OperationTimeout [GOOD] >> TGRpcYdbTest::OperationCancelAfter >> TGRpcClientLowTest::SimpleRequestDummyService [GOOD] >> TGRpcClientLowTest::MultipleSimpleRequests >> TGRpcYdbTest::CreateTableBadRequest3 [GOOD] >> TGRpcYdbTest::CreateAlterCopyAndDropTable >> TGRpcNewCoordinationClientAuth::OwnersAndPermissions [GOOD] >> TGRpcYdbTest::AlterTableAddIndexBadRequest >> YdbYqlClient::Utf8DatabasePassViaHeader [GOOD] >> YdbYqlClient::TestYqlTypesFromPreparedQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption [GOOD] Test command err: 2025-06-24T21:11:26.854951Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:26.855412Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:26.855621Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004290/r3tmp/tmpfGhkok/pdisk_1.dat 2025-06-24T21:11:27.307103Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:27.315290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:27.361908Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.370879Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799484582339 != 1750799484582343 2025-06-24T21:11:27.422502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.422657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.438372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:27.540557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:27.609170Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:636:2537] 2025-06-24T21:11:27.609420Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:27.705196Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:27.705338Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:27.707006Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:11:27.707083Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:11:27.707256Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:11:27.707620Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:27.708652Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:27.708734Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:663:2537] in generation 1 2025-06-24T21:11:27.709289Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:639:2539] 2025-06-24T21:11:27.709476Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:11:27.719033Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:11:27.719444Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:11:27.720859Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:11:27.720927Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:11:27.720967Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:11:27.721240Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:11:27.721413Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:11:27.721478Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:670:2539] in generation 1 2025-06-24T21:11:27.732376Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:27.763559Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:11:27.763782Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:27.763892Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:673:2558] 2025-06-24T21:11:27.763929Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:11:27.763963Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:11:27.764017Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:11:27.764230Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:11:27.764267Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:11:27.764332Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:11:27.764411Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:674:2559] 2025-06-24T21:11:27.764437Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:11:27.764459Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:11:27.764487Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:11:27.764965Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:11:27.765075Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:11:27.765141Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:11:27.765203Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:11:27.765263Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:11:27.765307Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:27.765348Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:11:27.765387Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:11:27.765505Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:11:27.765530Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:27.765548Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:11:27.765590Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:11:27.765649Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:630:2534], serverId# [1:655:2549], sessionId# [0:0:0] 2025-06-24T21:11:27.766020Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:629:2533], serverId# [1:647:2543], sessionId# [0:0:0] 2025-06-24T21:11:27.766151Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:11:27.766405Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:11:27.766509Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-24T21:11:27.767061Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:11:27.767760Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:11:27.767826Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:11:27.769366Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:11:27.769502Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:11:27.780119Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:11:27.780206Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:27.780677Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T21:11:27.780721Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-24T21:11:27.941484Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:692:2571], serverId# [1:695:2574], sessionId# [0:0:0] 2025-06-24T21:11:27.941757Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:693:2572], serverId# [1:696:2575], sessionId# [0:0:0] 2025-06-24T21:11:27.961564Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... t finished with status SCHEME_ERROR 2025-06-24T21:11:40.697037Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [3:59:2106] Handle TEvExecuteKqpTransaction 2025-06-24T21:11:40.697119Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [3:59:2106] TxId# 281474976715662 ProcessProposeKqpTransaction 2025-06-24T21:11:40.698116Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwgh1a8gxggadzy3nwbert, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTRhZTkyNDYtZjhjM2EzMzMtYTdlNGNlMjYtODBlM2NjNTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:40.707981Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [3:1063:2856], Recipient [3:626:2530]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 3 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-24T21:11:40.708238Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:11:40.708312Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v8000/0 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v8000/18446744073709551615 ImmediateWriteEdgeReplied# v8000/18446744073709551615 2025-06-24T21:11:40.708370Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v8000/18446744073709551615 2025-06-24T21:11:40.708456Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckRead 2025-06-24T21:11:40.708597Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:11:40.708646Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:11:40.708692Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:11:40.708733Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:11:40.708788Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-24T21:11:40.708830Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:11:40.708856Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:11:40.708887Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:11:40.708930Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:11:40.710745Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 3 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-24T21:11:40.711134Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[3:1063:2856], 0} after executionsCount# 1 2025-06-24T21:11:40.711231Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[3:1063:2856], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:11:40.711359Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[3:1063:2856], 0} finished in read 2025-06-24T21:11:40.711443Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:11:40.711481Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:11:40.711513Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:11:40.711557Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:11:40.711613Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:11:40.711666Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:11:40.711698Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-24T21:11:40.711740Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:11:40.711889Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:11:40.713286Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [3:1063:2856], Recipient [3:626:2530]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:11:40.713365Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 4 } } 2025-06-24T21:11:40.908829Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [3:59:2106] Handle TEvExecuteKqpTransaction 2025-06-24T21:11:40.908912Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [3:59:2106] TxId# 281474976715663 ProcessProposeKqpTransaction 2025-06-24T21:11:40.909858Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwghdf09t6fkrm5fbyas5q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODdkZGNhYmEtYmMxNzVlMDgtODJhMWNjZmItNzQ5N2M5OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:40.919479Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [3:1093:2880], Recipient [3:864:2691]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 8 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 1 2025-06-24T21:11:40.919749Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-24T21:11:40.919822Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037889 CompleteEdge# v6000/281474976710759 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v5000/18446744073709551615 ImmediateWriteEdgeReplied# v5000/18446744073709551615 2025-06-24T21:11:40.919881Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037889 changed HEAD read to non-repeatable v8000/18446744073709551615 2025-06-24T21:11:40.919961Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CheckRead 2025-06-24T21:11:40.920074Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-24T21:11:40.920139Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CheckRead 2025-06-24T21:11:40.920210Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-24T21:11:40.920270Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-24T21:11:40.920330Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037889 2025-06-24T21:11:40.920378Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-24T21:11:40.920408Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-24T21:11:40.920436Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit ExecuteRead 2025-06-24T21:11:40.920480Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit ExecuteRead 2025-06-24T21:11:40.920616Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 8 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-24T21:11:40.920979Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[3:1093:2880], 0} after executionsCount# 1 2025-06-24T21:11:40.921048Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[3:1093:2880], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:11:40.921163Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[3:1093:2880], 0} finished in read 2025-06-24T21:11:40.921253Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-24T21:11:40.921285Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit ExecuteRead 2025-06-24T21:11:40.921313Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit CompletedOperations 2025-06-24T21:11:40.921341Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CompletedOperations 2025-06-24T21:11:40.921406Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-24T21:11:40.921433Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CompletedOperations 2025-06-24T21:11:40.921467Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037889 has finished 2025-06-24T21:11:40.921511Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-24T21:11:40.921641Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-24T21:11:40.939796Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [3:1093:2880], Recipient [3:864:2691]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:11:40.939893Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 4 } } >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Ignore [GOOD] >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Check >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesCorrectCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts >> YdbYqlClient::ColumnFamiliesWithStorageAndIndex [GOOD] >> YdbYqlClient::ColumnFamiliesDescriptionWithStorageAndIndex >> YdbYqlClient::DiscoveryLocationOverride |95.0%| [TA] $(B)/ydb/core/tx/datashard/ut_upload_rows/test-results/unittest/{meta.json ... results_accumulator.log} |95.0%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_upload_rows/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbIndexTable::CreateTableAddIndex [GOOD] >> YdbIndexTable::AlterTableAddIndex >> TGRpcYdbTest::SdkUuidViaParams [GOOD] >> TGRpcYdbTest::ReadTable >> YdbYqlClient::CreateAndAltertTableWithPartitioningByLoad [GOOD] >> YdbYqlClient::CreateAndAltertTableWithReadReplicasSettings >> TGRpcYdbTest::ExecuteQueryWithParametersBadRequest [GOOD] >> TGRpcYdbTest::ExecuteQueryWithParametersExplicitSession >> TGRpcYdbTest::ExecuteDmlQuery [GOOD] >> TGRpcYdbTest::ExecutePreparedQuery >> TGRpcYdbTest::CreateYqlSession [GOOD] >> TGRpcYdbTest::CreateYqlSessionExecuteQuery >> TGRpcLdapAuthentication::LdapAuthWithInvalidLogin [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidPassword >> TYqlDecimalTests::SimpleUpsertSelect [GOOD] >> TYqlDecimalTests::NegativeValues >> YdbYqlClient::TestReadTableMultiShardOneRow [GOOD] >> YdbYqlClient::TestReadTableBatchLimits >> YdbYqlClient::TestYqlLongSessionPrepareError [GOOD] >> YdbYqlClient::TestYqlLongSessionMultipleErrors >> YdbTableBulkUpsert::DataValidation [GOOD] >> YdbTableBulkUpsert::AsyncIndexShouldFail >> YdbTableBulkUpsert::ValidRetry >> YdbYqlClient::TestReadTableMultiShardWithDescribe [GOOD] >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit >> YdbYqlClient::TestDecimalFullStack [GOOD] >> YdbYqlClient::TestDescribeDirectory >> YdbQueryService::TestAttachTwice [GOOD] >> YdbQueryService::TestForbidExecuteWithoutAttach >> TGRpcNewClient::YqlExplainDataQuery [GOOD] >> TGRpcNewCoordinationClient::CheckUnauthorized >> TAuthenticationWithSqlExecution::CreateAlterUserWithHash [GOOD] >> TDatabaseQuotas::DisableWritesToDatabase >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-15 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-16 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-10 >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyHosts [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBaseDn >> LocalityOperation::LocksFromAnotherTenants+UseSink [GOOD] >> LocalityOperation::LocksFromAnotherTenants-UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-27 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-28 >> TKeyValueTest::TestSetExecutorFastLogPolicy [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-45 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-46 >> TGRpcYdbTest::OperationCancelAfter [GOOD] >> TGRpcYdbTest::KeepAlive >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-33 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-34 >> TGRpcYdbTest::CreateAlterCopyAndDropTable [GOOD] >> TGRpcYdbTest::CreateDeleteYqlSession ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestSetExecutorFastLogPolicy [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:106:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:87:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:90:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:91:2057] recipient: [11:89:2117] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:93:2057] recipient: [11:89:2117] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:92:2118] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:112:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (a ... D 72057594037927937 is [57:58:2098] sender: [57:140:2057] recipient: [57:37:2084] Leader for TabletID 72057594037927937 is [57:58:2098] sender: [57:143:2057] recipient: [57:142:2157] Leader for TabletID 72057594037927937 is [57:58:2098] sender: [57:144:2057] recipient: [57:14:2061] Leader for TabletID 72057594037927937 is [57:145:2158] sender: [57:146:2057] recipient: [57:142:2157] !Reboot 72057594037927937 (actor [57:58:2098]) rebooted! !Reboot 72057594037927937 (actor [57:58:2098]) tablet resolver refreshed! new actor is[57:145:2158] Leader for TabletID 72057594037927937 is [0:0:0] sender: [58:56:2057] recipient: [58:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [58:56:2057] recipient: [58:51:2096] Leader for TabletID 72057594037927937 is [58:58:2098] sender: [58:59:2057] recipient: [58:51:2096] Leader for TabletID 72057594037927937 is [58:58:2098] sender: [58:76:2057] recipient: [58:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [59:56:2057] recipient: [59:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [59:56:2057] recipient: [59:53:2096] Leader for TabletID 72057594037927937 is [59:58:2098] sender: [59:59:2057] recipient: [59:53:2096] Leader for TabletID 72057594037927937 is [59:58:2098] sender: [59:76:2057] recipient: [59:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [60:56:2057] recipient: [60:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [60:56:2057] recipient: [60:52:2096] Leader for TabletID 72057594037927937 is [60:58:2098] sender: [60:59:2057] recipient: [60:52:2096] Leader for TabletID 72057594037927937 is [60:58:2098] sender: [60:76:2057] recipient: [60:14:2061] !Reboot 72057594037927937 (actor [60:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [60:58:2098] sender: [60:78:2057] recipient: [60:37:2084] Leader for TabletID 72057594037927937 is [60:58:2098] sender: [60:81:2057] recipient: [60:14:2061] Leader for TabletID 72057594037927937 is [60:58:2098] sender: [60:82:2057] recipient: [60:80:2111] Leader for TabletID 72057594037927937 is [60:83:2112] sender: [60:84:2057] recipient: [60:80:2111] !Reboot 72057594037927937 (actor [60:58:2098]) rebooted! !Reboot 72057594037927937 (actor [60:58:2098]) tablet resolver refreshed! new actor is[60:83:2112] Leader for TabletID 72057594037927937 is [60:83:2112] sender: [60:169:2057] recipient: [60:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [61:56:2057] recipient: [61:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [61:56:2057] recipient: [61:52:2096] Leader for TabletID 72057594037927937 is [61:58:2098] sender: [61:59:2057] recipient: [61:52:2096] Leader for TabletID 72057594037927937 is [61:58:2098] sender: [61:76:2057] recipient: [61:14:2061] !Reboot 72057594037927937 (actor [61:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [61:58:2098] sender: [61:78:2057] recipient: [61:37:2084] Leader for TabletID 72057594037927937 is [61:58:2098] sender: [61:81:2057] recipient: [61:14:2061] Leader for TabletID 72057594037927937 is [61:58:2098] sender: [61:82:2057] recipient: [61:80:2111] Leader for TabletID 72057594037927937 is [61:83:2112] sender: [61:84:2057] recipient: [61:80:2111] !Reboot 72057594037927937 (actor [61:58:2098]) rebooted! !Reboot 72057594037927937 (actor [61:58:2098]) tablet resolver refreshed! new actor is[61:83:2112] Leader for TabletID 72057594037927937 is [61:83:2112] sender: [61:169:2057] recipient: [61:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [62:56:2057] recipient: [62:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [62:56:2057] recipient: [62:52:2096] Leader for TabletID 72057594037927937 is [62:58:2098] sender: [62:59:2057] recipient: [62:52:2096] Leader for TabletID 72057594037927937 is [62:58:2098] sender: [62:76:2057] recipient: [62:14:2061] !Reboot 72057594037927937 (actor [62:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [62:58:2098] sender: [62:79:2057] recipient: [62:37:2084] Leader for TabletID 72057594037927937 is [62:58:2098] sender: [62:82:2057] recipient: [62:81:2111] Leader for TabletID 72057594037927937 is [62:58:2098] sender: [62:83:2057] recipient: [62:14:2061] Leader for TabletID 72057594037927937 is [62:84:2112] sender: [62:85:2057] recipient: [62:81:2111] !Reboot 72057594037927937 (actor [62:58:2098]) rebooted! !Reboot 72057594037927937 (actor [62:58:2098]) tablet resolver refreshed! new actor is[62:84:2112] Leader for TabletID 72057594037927937 is [62:84:2112] sender: [62:170:2057] recipient: [62:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [63:56:2057] recipient: [63:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [63:56:2057] recipient: [63:52:2096] Leader for TabletID 72057594037927937 is [63:58:2098] sender: [63:59:2057] recipient: [63:52:2096] Leader for TabletID 72057594037927937 is [63:58:2098] sender: [63:76:2057] recipient: [63:14:2061] !Reboot 72057594037927937 (actor [63:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [63:58:2098] sender: [63:81:2057] recipient: [63:37:2084] Leader for TabletID 72057594037927937 is [63:58:2098] sender: [63:84:2057] recipient: [63:14:2061] Leader for TabletID 72057594037927937 is [63:58:2098] sender: [63:85:2057] recipient: [63:83:2113] Leader for TabletID 72057594037927937 is [63:86:2114] sender: [63:87:2057] recipient: [63:83:2113] !Reboot 72057594037927937 (actor [63:58:2098]) rebooted! !Reboot 72057594037927937 (actor [63:58:2098]) tablet resolver refreshed! new actor is[63:86:2114] Leader for TabletID 72057594037927937 is [63:86:2114] sender: [63:172:2057] recipient: [63:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [64:56:2057] recipient: [64:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [64:56:2057] recipient: [64:52:2096] Leader for TabletID 72057594037927937 is [64:58:2098] sender: [64:59:2057] recipient: [64:52:2096] Leader for TabletID 72057594037927937 is [64:58:2098] sender: [64:76:2057] recipient: [64:14:2061] !Reboot 72057594037927937 (actor [64:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [64:58:2098] sender: [64:81:2057] recipient: [64:37:2084] Leader for TabletID 72057594037927937 is [64:58:2098] sender: [64:83:2057] recipient: [64:14:2061] Leader for TabletID 72057594037927937 is [64:58:2098] sender: [64:85:2057] recipient: [64:84:2113] Leader for TabletID 72057594037927937 is [64:86:2114] sender: [64:87:2057] recipient: [64:84:2113] !Reboot 72057594037927937 (actor [64:58:2098]) rebooted! !Reboot 72057594037927937 (actor [64:58:2098]) tablet resolver refreshed! new actor is[64:86:2114] Leader for TabletID 72057594037927937 is [64:86:2114] sender: [64:172:2057] recipient: [64:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [65:56:2057] recipient: [65:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [65:56:2057] recipient: [65:52:2096] Leader for TabletID 72057594037927937 is [65:58:2098] sender: [65:59:2057] recipient: [65:52:2096] Leader for TabletID 72057594037927937 is [65:58:2098] sender: [65:76:2057] recipient: [65:14:2061] !Reboot 72057594037927937 (actor [65:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [65:58:2098] sender: [65:82:2057] recipient: [65:37:2084] Leader for TabletID 72057594037927937 is [65:58:2098] sender: [65:85:2057] recipient: [65:14:2061] Leader for TabletID 72057594037927937 is [65:58:2098] sender: [65:86:2057] recipient: [65:84:2113] Leader for TabletID 72057594037927937 is [65:87:2114] sender: [65:88:2057] recipient: [65:84:2113] !Reboot 72057594037927937 (actor [65:58:2098]) rebooted! !Reboot 72057594037927937 (actor [65:58:2098]) tablet resolver refreshed! new actor is[65:87:2114] Leader for TabletID 72057594037927937 is [65:87:2114] sender: [65:173:2057] recipient: [65:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [66:56:2057] recipient: [66:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [66:56:2057] recipient: [66:53:2096] Leader for TabletID 72057594037927937 is [66:58:2098] sender: [66:59:2057] recipient: [66:53:2096] Leader for TabletID 72057594037927937 is [66:58:2098] sender: [66:76:2057] recipient: [66:14:2061] !Reboot 72057594037927937 (actor [66:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [66:58:2098] sender: [66:85:2057] recipient: [66:37:2084] Leader for TabletID 72057594037927937 is [66:58:2098] sender: [66:87:2057] recipient: [66:14:2061] Leader for TabletID 72057594037927937 is [66:58:2098] sender: [66:89:2057] recipient: [66:88:2116] Leader for TabletID 72057594037927937 is [66:90:2117] sender: [66:91:2057] recipient: [66:88:2116] !Reboot 72057594037927937 (actor [66:58:2098]) rebooted! !Reboot 72057594037927937 (actor [66:58:2098]) tablet resolver refreshed! new actor is[66:90:2117] Leader for TabletID 72057594037927937 is [66:90:2117] sender: [66:176:2057] recipient: [66:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [67:56:2057] recipient: [67:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [67:56:2057] recipient: [67:52:2096] Leader for TabletID 72057594037927937 is [67:58:2098] sender: [67:59:2057] recipient: [67:52:2096] Leader for TabletID 72057594037927937 is [67:58:2098] sender: [67:76:2057] recipient: [67:14:2061] !Reboot 72057594037927937 (actor [67:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [67:58:2098] sender: [67:85:2057] recipient: [67:37:2084] Leader for TabletID 72057594037927937 is [67:58:2098] sender: [67:88:2057] recipient: [67:14:2061] Leader for TabletID 72057594037927937 is [67:58:2098] sender: [67:89:2057] recipient: [67:87:2116] Leader for TabletID 72057594037927937 is [67:90:2117] sender: [67:91:2057] recipient: [67:87:2116] !Reboot 72057594037927937 (actor [67:58:2098]) rebooted! !Reboot 72057594037927937 (actor [67:58:2098]) tablet resolver refreshed! new actor is[67:90:2117] Leader for TabletID 72057594037927937 is [67:90:2117] sender: [67:176:2057] recipient: [67:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [68:56:2057] recipient: [68:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [68:56:2057] recipient: [68:52:2096] Leader for TabletID 72057594037927937 is [68:58:2098] sender: [68:59:2057] recipient: [68:52:2096] Leader for TabletID 72057594037927937 is [68:58:2098] sender: [68:76:2057] recipient: [68:14:2061] !Reboot 72057594037927937 (actor [68:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [68:58:2098] sender: [68:86:2057] recipient: [68:37:2084] Leader for TabletID 72057594037927937 is [68:58:2098] sender: [68:89:2057] recipient: [68:14:2061] Leader for TabletID 72057594037927937 is [68:58:2098] sender: [68:90:2057] recipient: [68:88:2116] Leader for TabletID 72057594037927937 is [68:91:2117] sender: [68:92:2057] recipient: [68:88:2116] !Reboot 72057594037927937 (actor [68:58:2098]) rebooted! !Reboot 72057594037927937 (actor [68:58:2098]) tablet resolver refreshed! new actor is[68:91:2117] Leader for TabletID 72057594037927937 is [68:91:2117] sender: [68:177:2057] recipient: [68:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [69:56:2057] recipient: [69:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [69:56:2057] recipient: [69:53:2096] Leader for TabletID 72057594037927937 is [69:58:2098] sender: [69:59:2057] recipient: [69:53:2096] Leader for TabletID 72057594037927937 is [69:58:2098] sender: [69:76:2057] recipient: [69:14:2061] >> TGRpcYdbTest::AlterTableAddIndexBadRequest [GOOD] >> TGRpcYdbTest::BeginTxRequestError >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-39 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-40 >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Check [GOOD] >> TGRpcClientLowTest::ChangeAcl >> YdbYqlClient::DiscoveryLocationOverride [GOOD] >> YdbYqlClient::DeleteTableWithDeletedIndex >> YdbYqlClient::TestYqlTypesFromPreparedQuery [GOOD] >> YdbIndexTable::AlterTableAddIndex [GOOD] >> YdbLogStore::AlterLogStore >> TGRpcYdbTest::ExecuteQueryWithParametersExplicitSession [GOOD] >> TGRpcYdbTest::ExplainQuery >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesEmptyClientCerts >> TGRpcClientLowTest::MultipleSimpleRequests [GOOD] >> TGRpcLdapAuthentication::LdapAuthServerIsUnavailable >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvideIncorrectCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideAnyCerts >> TGRpcLdapAuthentication::LdapAuthWithInvalidPassword [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithEmptyPassword >> TGRpcYdbTest::ExecutePreparedQuery [GOOD] >> TGRpcYdbTest::ExecuteQueryCache >> TGRpcYdbTest::CreateYqlSessionExecuteQuery [GOOD] >> TGRpcYdbTest::DeleteFromAfterCreate >> YdbYqlClient::CreateAndAltertTableWithReadReplicasSettings [GOOD] >> YdbYqlClient::CreateTableWithMESettings >> TGRpcYdbTest::ReadTable [GOOD] >> TGRpcYdbTest::ReadTablePg >> ClientStatsCollector::PrepareQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestYqlTypesFromPreparedQuery [GOOD] Test command err: 2025-06-24T21:11:27.078995Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626539265660930:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.083499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449b/r3tmp/tmpPt2goy/pdisk_1.dat 2025-06-24T21:11:27.647437Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.649848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.649949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.660175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13895, node 1 2025-06-24T21:11:27.939556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.939581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.939588Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.939734Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.110754Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26063 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.679264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:30.668592Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626552150563798:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.668723Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.151982Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626556445531138:2627] txid# 281474976710658, issues: { message: "Column Key has wrong key type Json" severity: 1 } 2025-06-24T21:11:31.190533Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626556445531148:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.190614Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.220505Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626556445531155:2637] txid# 281474976710659, issues: { message: "Column Key has wrong key type Yson" severity: 1 } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449b/r3tmp/tmpFdum5o/pdisk_1.dat TServer::EnableGrpc on GrpcPort 63633, node 4 TClient is connected to server localhost:9191 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449b/r3tmp/tmpsst53f/pdisk_1.dat TServer::EnableGrpc on GrpcPort 2736, node 7 TClient is connected to server localhost:20017 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:11:43.686117Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626607891718079:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:43.687438Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449b/r3tmp/tmpdEfZWC/pdisk_1.dat 2025-06-24T21:11:43.883652Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:43.907050Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:43.907123Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:43.912905Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24244, node 10 2025-06-24T21:11:44.107857Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:44.107883Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:44.107893Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:44.108040Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19771 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:44.506176Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:44.743294Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:47.775626Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626625071588257:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:47.775761Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:47.776362Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626625071588269:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:47.781033Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:47.808694Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626625071588271:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:47.912224Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626625071588336:2664] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> YdbYqlClient::TestYqlLongSessionMultipleErrors [GOOD] >> YdbYqlClient::ColumnFamiliesDescriptionWithStorageAndIndex [GOOD] >> YdbYqlClient::ColumnFamiliesExternalBlobsWithoutDefaultProfile >> YdbQueryService::TestForbidExecuteWithoutAttach [GOOD] >> YdbQueryService::TestCreateDropAttachSession >> YdbTableBulkUpsert::AsyncIndexShouldFail [GOOD] >> YdbTableBulkUpsert::AsyncIndexShouldSucceed >> TGRpcNewCoordinationClient::CheckUnauthorized [GOOD] >> TGRpcNewCoordinationClient::BasicMethods >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBaseDn [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindDn >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit [GOOD] >> TYqlDecimalTests::NegativeValues [GOOD] >> TYqlDecimalTests::DecimalKey ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestYqlLongSessionMultipleErrors [GOOD] Test command err: 2025-06-24T21:11:28.943755Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626544395254656:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:28.944438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004478/r3tmp/tmpoHw0tW/pdisk_1.dat 2025-06-24T21:11:29.348246Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:29.402510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:29.402640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:29.406187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25501, node 1 2025-06-24T21:11:29.553744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:29.553763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:29.553773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:29.553879Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31036 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:29.915916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:29.957089Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:32.136063Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626561575124819:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:32.136218Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:32.530997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:32.734278Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626561575125001:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:32.734365Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:32.734963Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626561575125006:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:32.738915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:32.779385Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626561575125008:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:11:32.848638Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626561575125080:2797] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:32.962766Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626561575125091:2315], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:25: Error: At function: KiWriteTable!
:2:43: Error: Failed to convert type: Struct<'Key':String,'Value':String> to Struct<'Key':Uint32?,'Value':String?>
:2:43: Error: Failed to convert 'Key': String to Optional
:2:43: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:11:32.963524Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTY4MTk4MWQtZGJmMzM0YTEtNWFlZWVjMDAtNTIyYWIyZGQ=, ActorId: [1:7519626561575124816:2295], ActorState: ExecuteState, TraceId: 01jyhwg9kx61673pndev2et0zc, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:11:34.609253Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626571384216323:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:34.609545Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004478/r3tmp/tmpXis6AV/pdisk_1.dat 2025-06-24T21:11:34.842266Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:34.865047Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:34.865130Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:34.871101Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61048, node 4 2025-06-24T21:11:35.043771Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:35.043794Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:35.043802Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:35.043928Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30191 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:35.307972Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:35.617156Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:37.664765Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626584269119104:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:37.664861Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:39.511346Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626591218074527:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:39.519008Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... 08397944896:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:43.165937Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:43.166479Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626608397944901:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:43.171061Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:43.219524Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626608397944903:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:11:43.312186Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626608397944983:2787] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:43.431960Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwgkswfg0fvsphjb3z0mcb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZTNkZDJlOTItNTEyOTBiNjEtNzA2YmRiMzktZmFiZDY5OTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:43.534153Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519626608397945028:2322], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[Root/BadTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:43.536354Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=ZTNkZDJlOTItNTEyOTBiNjEtNzA2YmRiMzktZmFiZDY5OTM=, ActorId: [7:7519626604102977428:2295], ActorState: ExecuteState, TraceId: 01jyhwgm390exc9kraam6aph55, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:43.616152Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwgm618cdp75yvbdjz7qnn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZTNkZDJlOTItNTEyOTBiNjEtNzA2YmRiMzktZmFiZDY5OTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:43.788273Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwgm8v5wkhhvd4wg5h2tqa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZTNkZDJlOTItNTEyOTBiNjEtNzA2YmRiMzktZmFiZDY5OTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:45.647468Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626615870537161:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:45.647515Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004478/r3tmp/tmpooU3Yr/pdisk_1.dat 2025-06-24T21:11:45.985814Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:46.031949Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:46.032034Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:46.046387Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21727, node 10 2025-06-24T21:11:46.263732Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:46.263757Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:46.263766Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:46.263922Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31738 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:11:46.670764Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:46.732306Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:49.647297Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626633050407347:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:49.647412Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:49.681775Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:49.805850Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626633050407513:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:49.805957Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:49.806179Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626633050407518:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:49.809617Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:49.841346Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626633050407520:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:11:49.944343Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626633050407597:2803] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:49.961457Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519626633050407616:2316], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:25: Error: At function: KiWriteTable!
:2:25: Error: Cannot find table 'db.[Root/BadTable1]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:49.963491Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=YjRlMGQ0M2UtMmY4ZTFiNTktYTVmNTNlMzAtMTE0ZjdiYjk=, ActorId: [10:7519626633050407341:2295], ActorState: ExecuteState, TraceId: 01jyhwgt9dczs41achk7emy0ba, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:49.996014Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519626633050407637:2322], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:25: Error: At function: KiWriteTable!
:2:25: Error: Cannot find table 'db.[Root/BadTable2]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:49.998130Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=YjRlMGQ0M2UtMmY4ZTFiNTktYTVmNTNlMzAtMTE0ZjdiYjk=, ActorId: [10:7519626633050407341:2295], ActorState: ExecuteState, TraceId: 01jyhwgtev61dmxbx1cnqa9q1f, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> YdbTableBulkUpsert::ValidRetry [GOOD] >> YdbTableBulkUpsert::Types >> TTableProfileTests::ExplicitPartitionsSimple >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-10 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-11 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-16 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-17 >> YdbYqlClient::TestReadTableBatchLimits [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit [GOOD] Test command err: 2025-06-24T21:11:27.147195Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626538839570127:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.147459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004495/r3tmp/tmp2HRByU/pdisk_1.dat 2025-06-24T21:11:27.640301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.640407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.665867Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.685892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24209, node 1 2025-06-24T21:11:27.902264Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.902292Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.902300Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.902404Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.157690Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26038 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.556958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:28.594072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:11:28.641842Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:592: Got grpc request# ListEndpointsRequest, traceId# 01jyhwg5kzfmgff0895jwr5qw6, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:42562, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.996344s 2025-06-24T21:11:28.668411Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jyhwg5mm8k2e0tvv9gs4td35, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:42572, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:11:30.512658Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateTableRequest, traceId# 01jyhwg7ega5z52a26kmd8ysz2, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:42578, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:11:30.513525Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626538839570311:2142] Handle TEvProposeTransaction 2025-06-24T21:11:30.513555Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626538839570311:2142] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T21:11:30.513623Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626538839570311:2142] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519626551724472990:2621] 2025-06-24T21:11:30.603711Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626551724472990:2621] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Test" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Fk" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" KeyColumnNames: "Fk" UniformPartitionsCount: 16 PartitionConfig { } Temporary: false } CreateIndexedTable { } } } DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:42578" 2025-06-24T21:11:30.603765Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626551724472990:2621] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:30.604194Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519626551724472990:2621] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:11:30.604258Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626551724472990:2621] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:30.604556Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626551724472990:2621] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:30.604718Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626551724472990:2621] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:11:30.604801Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626551724472990:2621] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:11:30.604934Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626551724472990:2621] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T21:11:30.607174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.617468Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519626551724472990:2621] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-06-24T21:11:30.617544Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519626551724472990:2621] txid# 281474976710658 SEND to# [1:7519626551724472989:2296] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-06-24T21:11:30.618172Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:11:30.618271Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:11:30.618291Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:11:30.618324Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:11:30.690838Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473056:2685], Recipient [1:7519626551724473192:2299]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.691884Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473062:2691], Recipient [1:7519626551724473234:2312]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.692526Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473054:2683], Recipient [1:7519626551724473196:2303]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.693069Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473059:2688], Recipient [1:7519626551724473274:2314]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.693400Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473053:2682], Recipient [1:7519626551724473195:2302]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.693744Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473063:2692], Recipient [1:7519626551724473200:2307]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.693928Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473060:2689], Recipient [1:7519626551724473198:2305]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.694092Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473049:2678], Recipient [1:7519626551724473244:2313]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.694717Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473061:2690], Recipient [1:7519626551724473203:2310]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.695030Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473050:2679], Recipient [1:7519626551724473201:2308]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.695323Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473048:2677], Recipient [1:7519626551724473193:2300]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.696061Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473055:2684], Recipient [1:7519626551724473199:2306]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.696572Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473057:2686], Recipient [1:7519626551724473197:2304]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.696750Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473052:2681], Recipient [1:7519626551724473194:2301]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.697349Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626551724473058:2687], Recipient [1:7519626551 ... e 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037897 2025-06-24T21:11:51.070625Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037897 2025-06-24T21:11:51.073838Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [10:7519626645131512814:2155], Recipient [10:7519626636541577219:2308]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-24T21:11:51.073870Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-24T21:11:51.073928Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [10:7519626645131512795:2362] Adding quota request to queue ShardId: 0, TxId: 281474976715680 2025-06-24T21:11:51.073951Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [10:7519626645131512795:2362] Assign stream quota to Shard 0, Quota 5, TxId 281474976715680 Reserved: 5 of 25, Queued: 0 2025-06-24T21:11:51.074063Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037897, TxId: 281474976715681, MessageQuota: 5 2025-06-24T21:11:51.076241Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037897, TxId: 281474976715681, Size: 54, Rows: 0, PendingAcks: 1, MessageQuota: 4 2025-06-24T21:11:51.076317Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037897, TxId: 281474976715681, PendingAcks: 0 2025-06-24T21:11:51.076348Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037897, TxId: 281474976715681, MessageQuota: 4 2025-06-24T21:11:51.076439Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [10:7519626645131512795:2362] got stream part, size: 75, RU required: 128 rate limiter absent 2025-06-24T21:11:51.076786Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [10:7519626645131512795:2362] Starting inactivity timer for 600.000000s with tag 3 2025-06-24T21:11:51.078699Z node 10 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037897 2025-06-24T21:11:51.078720Z node 10 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715681, at: 72075186224037897 2025-06-24T21:11:51.078827Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [10:7519626636541577219:2308], Recipient [10:7519626636541577219:2308]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:51.078851Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:11:51.078900Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037897 2025-06-24T21:11:51.078934Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037897 active 1 active planned 0 immediate 1 planned 0 2025-06-24T21:11:51.078971Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715681] at 72075186224037897 for ReadTableScan 2025-06-24T21:11:51.078989Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715681] at 72075186224037897 on unit ReadTableScan 2025-06-24T21:11:51.079014Z node 10 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715681] at 72075186224037897 error: , IsFatalError: 0 2025-06-24T21:11:51.079042Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715681] at 72075186224037897 is Executed 2025-06-24T21:11:51.079068Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715681] at 72075186224037897 executing on unit ReadTableScan 2025-06-24T21:11:51.079082Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715681] at 72075186224037897 to execution unit FinishPropose 2025-06-24T21:11:51.079094Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715681] at 72075186224037897 on unit FinishPropose 2025-06-24T21:11:51.079118Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715681] at 72075186224037897 is DelayComplete 2025-06-24T21:11:51.079137Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715681] at 72075186224037897 executing on unit FinishPropose 2025-06-24T21:11:51.079166Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715681] at 72075186224037897 to execution unit CompletedOperations 2025-06-24T21:11:51.079175Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715681] at 72075186224037897 on unit CompletedOperations 2025-06-24T21:11:51.079219Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715681] at 72075186224037897 is Executed 2025-06-24T21:11:51.079232Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715681] at 72075186224037897 executing on unit CompletedOperations 2025-06-24T21:11:51.079244Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715681] at 72075186224037897 has finished 2025-06-24T21:11:51.079256Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037897 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:11:51.079267Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037897 2025-06-24T21:11:51.079276Z node 10 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037897 has no attached operations 2025-06-24T21:11:51.079287Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037897 2025-06-24T21:11:51.079332Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037897 2025-06-24T21:11:51.079345Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715681] at 72075186224037897 on unit FinishPropose 2025-06-24T21:11:51.079375Z node 10 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715681 at tablet 72075186224037897 send to client, exec latency: 9 ms, propose latency: 9 ms, status: COMPLETE 2025-06-24T21:11:51.079420Z node 10 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037897 2025-06-24T21:11:51.080365Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549569, Sender [10:7519626645131512796:2362], Recipient [10:7519626636541577219:2308]: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715681 2025-06-24T21:11:51.080383Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3174: StateWork, processing event TEvDataShard::TEvCancelTransactionProposal 2025-06-24T21:11:51.080394Z node 10 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037897 txId 281474976715681 2025-06-24T21:11:51.080427Z node 10 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037897 txId 281474976715681 2025-06-24T21:11:51.080514Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287431, Sender [10:7519626645131512796:2362], Recipient [10:7519626636541577219:2308]: NKikimrTx.TEvInterruptTransaction TxId: 281474976715681 2025-06-24T21:11:51.080529Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvTxProcessing::TEvInterruptTransaction 2025-06-24T21:11:51.080604Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519626645131512796:2362], Recipient [10:7519626636541577219:2308]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750799511104 TxId: 281474976715680 2025-06-24T21:11:51.083798Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7519626645131512795:2362] Finish grpc stream, status: 400000 2025-06-24T21:11:51.091067Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001adc80] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.091367Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001ba880] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.091546Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002a680] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.091707Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000137a80] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.091936Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000092a80] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.092109Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000094e80] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.092295Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c4280] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.092447Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000130280] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.092602Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001c3880] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.092747Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000177080] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.092903Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00010a480] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.093072Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00015ba80] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.093228Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000183c80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.093370Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00015a880] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.093517Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00015b480] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.093674Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000093080] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.093824Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000080480] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:51.107246Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626623656674146:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:51.107319Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-28 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-29 >> YdbYqlClient::TestDescribeDirectory [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts >> TGRpcYdbTest::CreateDeleteYqlSession [GOOD] >> TGRpcYdbTest::KeepAlive [GOOD] >> TGRpcYdbTest::BeginTxRequestError [GOOD] >> YdbYqlClient::DeleteTableWithDeletedIndex [GOOD] >> YdbYqlClient::CreateTableWithUniformPartitions >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-46 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-47 >> YdbYqlClient::TestTzTypesFullStack >> TGRpcClientLowTest::ChangeAcl [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-34 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-35 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadTableBatchLimits [GOOD] Test command err: 2025-06-24T21:11:27.201738Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626541219536457:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.201878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448b/r3tmp/tmp7AEzKA/pdisk_1.dat 2025-06-24T21:11:27.698383Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.709008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.709090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.720782Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:27.751936Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 29187, node 1 2025-06-24T21:11:27.901508Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.901529Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.901536Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.901643Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.218309Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2869 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.527030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:28.620455Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:592: Got grpc request# ListEndpointsRequest, traceId# 01jyhwg5ka7670q180j4nrf6gw, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:53876, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.986717s 2025-06-24T21:11:28.669534Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jyhwg5m4e12rh5htnf1qhxps, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:53880, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:11:30.771627Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateTableRequest, traceId# 01jyhwg7pkc6qvm234hq4vhq7v, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:53894, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:11:30.772277Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626541219536477:2142] Handle TEvProposeTransaction 2025-06-24T21:11:30.772309Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626541219536477:2142] TxId# 281474976715658 ProcessProposeTransaction 2025-06-24T21:11:30.772360Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626541219536477:2142] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519626554104439155:2620] 2025-06-24T21:11:30.850325Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626554104439155:2620] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Test" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Fk" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" KeyColumnNames: "Fk" UniformPartitionsCount: 16 PartitionConfig { } Temporary: false } CreateIndexedTable { } } } DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:53894" 2025-06-24T21:11:30.850372Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626554104439155:2620] txid# 281474976715658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:30.850723Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519626554104439155:2620] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:11:30.850786Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626554104439155:2620] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:30.850998Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626554104439155:2620] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:30.851127Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626554104439155:2620] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:11:30.851181Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626554104439155:2620] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-24T21:11:30.851295Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626554104439155:2620] txid# 281474976715658 HANDLE EvClientConnected 2025-06-24T21:11:30.853359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.857184Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519626554104439155:2620] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-06-24T21:11:30.857252Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519626554104439155:2620] txid# 281474976715658 SEND to# [1:7519626554104439154:2296] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-06-24T21:11:30.858217Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:11:30.858300Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:11:30.858318Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:11:30.858361Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:11:30.917900Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439211:2674], Recipient [1:7519626554104439356:2302]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.919159Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439208:2671], Recipient [1:7519626554104439360:2306]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.919394Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439214:2677], Recipient [1:7519626554104439364:2310]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.919836Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439203:2666], Recipient [1:7519626554104439338:2300]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.920127Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439202:2665], Recipient [1:7519626554104439357:2303]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.920451Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439210:2673], Recipient [1:7519626554104439379:2313]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.920697Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439207:2670], Recipient [1:7519626554104439337:2299]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.921017Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439205:2668], Recipient [1:7519626554104439358:2304]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.921289Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439212:2675], Recipient [1:7519626554104439365:2311]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.921991Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439217:2680], Recipient [1:7519626554104439371:2312]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.922278Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439215:2678], Recipient [1:7519626554104439389:2314]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.922652Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439213:2676], Recipient [1:7519626554104439359:2305]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.922859Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439206:2669], Recipient [1:7519626554104439355:2301]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.923529Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439216:2679], Recipient [1:7519626554104439362:2308]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:11:30.923704Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519626554104439204:2667], Recipient [1:7519626554104439363:2309]: NKikimr::TEvTablet::TEvBoot 2025-0 ... 678 2025-06-24T21:11:52.578145Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519626647204856365:2366], Recipient [10:7519626634319952677:2300]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750799512532 TxId: 281474976710678 2025-06-24T21:11:52.578159Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519626647204856365:2366], Recipient [10:7519626634319952690:2306]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750799512532 TxId: 281474976710678 2025-06-24T21:11:52.578250Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519626647204856365:2366], Recipient [10:7519626634319952680:2303]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750799512532 TxId: 281474976710678 2025-06-24T21:11:52.578292Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519626647204856365:2366], Recipient [10:7519626634319952678:2301]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750799512532 TxId: 281474976710678 2025-06-24T21:11:52.578363Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519626647204856365:2366], Recipient [10:7519626634319952721:2308]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750799512532 TxId: 281474976710678 2025-06-24T21:11:52.578420Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519626647204856365:2366], Recipient [10:7519626634319952681:2304]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750799512532 TxId: 281474976710678 2025-06-24T21:11:52.578502Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519626647204856365:2366], Recipient [10:7519626634319952691:2307]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750799512532 TxId: 281474976710678 2025-06-24T21:11:52.578540Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519626647204856365:2366], Recipient [10:7519626634319952676:2299]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750799512532 TxId: 281474976710678 ---- batch start ---- [[0u];[0u];["A"]] ---- batch end ---- ---- batch start ---- [[1u];[2u];["A"]] ---- batch end ---- ---- batch start ---- [[2u];[4u];["A"]] ---- batch end ---- ---- batch start ---- [[3u];[6u];["A"]] ---- batch end ---- ---- batch start ---- [[4u];[8u];["A"]] ---- batch end ---- ---- batch start ---- [[5u];[10u];["A"]] ---- batch end ---- ---- batch start ---- [[6u];[12u];["A"]] ---- batch end ---- ---- batch start ---- [[7u];[14u];["A"]] ---- batch end ---- ---- batch start ---- [[8u];[16u];["A"]] ---- batch end ---- ---- batch start ---- [[9u];[18u];["A"]] ---- batch end ---- ---- batch start ---- [[10u];[20u];["A"]] ---- batch end ---- ---- batch start ---- [[11u];[22u];["A"]] ---- batch end ---- ---- batch start ---- [[12u];[24u];["A"]] ---- batch end ---- ---- batch start ---- [[13u];[26u];["A"]] ---- batch end ---- ---- batch start ---- [[14u];[28u];["A"]] ---- batch end ---- ---- batch start ---- [[15u];[30u];["A"]] ---- batch end ---- ---- batch start ---- [[16u];[32u];["A"]] ---- batch end ---- ---- batch start ---- [[17u];[34u];["A"]] ---- batch end ---- ---- batch start ---- [[18u];[36u];["A"]] ---- batch end ---- ---- batch start ---- [[19u];[38u];["A"]] ---- batch end ---- ---- batch start ---- [[20u];[40u];["A"]] ---- batch end ---- ---- batch start ---- [[21u];[42u];["A"]] ---- batch end ---- ---- batch start ---- [[22u];[44u];["A"]] ---- batch end ---- ---- batch start ---- [[23u];[46u];["A"]] ---- batch end ---- ---- batch start ---- [[24u];[48u];["A"]] ---- batch end ---- ---- batch start ---- [[25u];[50u];["A"]] ---- batch end ---- ---- batch start ---- [[26u];[52u];["A"]] ---- batch end ---- ---- batch start ---- [[27u];[54u];["A"]] ---- batch end ---- ---- batch start ---- [[28u];[56u];["A"]] ---- batch end ---- ---- batch start ---- [[29u];[58u];["A"]] ---- batch end ---- ---- batch start ---- [[30u];[60u];["A"]] ---- batch end ---- ---- batch start ---- [[31u];[62u];["A"]] ---- batch end ---- ---- batch start ---- [[32u];[64u];["A"]] ---- batch end ---- ---- batch start ---- [[33u];[66u];["A"]] ---- batch end ---- ---- batch start ---- [[34u];[68u];["A"]] ---- batch end ---- ---- batch start ---- [[35u];[70u];["A"]] ---- batch end ---- ---- batch start ---- [[36u];[72u];["A"]] ---- batch end ---- ---- batch start ---- [[37u];[74u];["A"]] ---- batch end ---- ---- batch start ---- [[38u];[76u];["A"]] ---- batch end ---- ---- batch start ---- [[39u];[78u];["A"]] ---- batch end ---- ---- batch start ---- [[40u];[80u];["A"]] ---- batch end ---- ---- batch start ---- [[41u];[82u];["A"]] ---- batch end ---- ---- batch start ---- [[42u];[84u];["A"]] ---- batch end ---- ---- batch start ---- [[43u];[86u];["A"]] ---- batch end ---- ---- batch start ---- [[44u];[88u];["A"]] ---- batch end ---- ---- batch start ---- [[45u];[90u];["A"]] ---- batch end ---- ---- batch start ---- [[46u];[92u];["A"]] ---- batch end ---- ---- batch start ---- [[47u];[94u];["A"]] ---- batch end ---- ---- batch start ---- [[48u];[96u];["A"]] ---- batch end ---- ---- batch start ---- [[49u];[98u];["A"]] ---- batch end ---- ---- batch start ---- [[50u];[100u];["A"]] ---- batch end ---- ---- batch start ---- [[51u];[102u];["A"]] ---- batch end ---- ---- batch start ---- [[52u];[104u];["A"]] ---- batch end ---- ---- batch start ---- [[53u];[106u];["A"]] ---- batch end ---- ---- batch start ---- [[54u];[108u];["A"]] ---- batch end ---- ---- batch start ---- [[55u];[110u];["A"]] ---- batch end ---- ---- batch start ---- [[56u];[112u];["A"]] ---- batch end ---- ---- batch start ---- [[57u];[114u];["A"]] ---- batch end ---- ---- batch start ---- [[58u];[116u];["A"]] ---- batch end ---- ---- batch start ---- [[59u];[118u];["A"]] ---- batch end ---- ---- batch start ---- [[60u];[120u];["A"]] ---- batch end ---- ---- batch start ---- [[61u];[122u];["A"]] ---- batch end ---- ---- batch start ---- [[62u];[124u];["A"]] ---- batch end ---- ---- batch start ---- [[63u];[126u];["A"]] ---- batch end ---- ---- batch start ---- [[64u];[128u];["A"]] ---- batch end ---- ---- batch start ---- [[65u];[130u];["A"]] ---- batch end ---- ---- batch start ---- [[66u];[132u];["A"]] ---- batch end ---- ---- batch start ---- [[67u];[134u];["A"]] ---- batch end ---- ---- batch start ---- [[68u];[136u];["A"]] ---- batch end ---- ---- batch start ---- [[69u];[138u];["A"]] ---- batch end ---- ---- batch start ---- [[70u];[140u];["A"]] ---- batch end ---- ---- batch start ---- [[71u];[142u];["A"]] ---- batch end ---- ---- batch start ---- [[72u];[144u];["A"]] ---- batch end ---- ---- batch start ---- [[73u];[146u];["A"]] ---- batch end ---- ---- batch start ---- [[74u];[148u];["A"]] ---- batch end ---- ---- batch start ---- [[75u];[150u];["A"]] ---- batch end ---- ---- batch start ---- [[76u];[152u];["A"]] ---- batch end ---- ---- batch start ---- [[77u];[154u];["A"]] ---- batch end ---- ---- batch start ---- [[78u];[156u];["A"]] ---- batch end ---- ---- batch start ---- [[79u];[158u];["A"]] ---- batch end ---- ---- batch start ---- [[80u];[160u];["A"]] ---- batch end ---- ---- batch start ---- [[81u];[162u];["A"]] ---- batch end ---- ---- batch start ---- [[82u];[164u];["A"]] ---- batch end ---- ---- batch start ---- [[83u];[166u];["A"]] ---- batch end ---- ---- batch start ---- [[84u];[168u];["A"]] ---- batch end ---- ---- batch start ---- [[85u];[170u];["A"]] ---- batch end ---- ---- batch start ---- [[86u];[172u];["A"]] ---- batch end ---- ---- batch start ---- [[87u];[174u];["A"]] ---- batch end ---- ---- batch start ---- [[88u];[176u];["A"]] ---- batch end ---- ---- batch start ---- [[89u];[178u];["A"]] ---- batch end ---- ---- batch start ---- [[90u];[180u];["A"]] ---- batch end ---- ---- batch start ---- [[91u];[182u];["A"]] ---- batch end ---- ---- batch start ---- [[92u];[184u];["A"]] ---- batch end ---- ---- batch start ---- [[93u];[186u];["A"]] ---- batch end ---- ---- batch start ---- [[94u];[188u];["A"]] ---- batch end ---- ---- batch start ---- [[95u];[190u];["A"]] ---- batch end ---- ---- batch start ---- [[96u];[192u];["A"]] ---- batch end ---- ---- batch start ---- [[97u];[194u];["A"]] ---- batch end ---- ---- batch start ---- [[98u];[196u];["A"]] ---- batch end ---- ---- batch start ---- [[99u];[198u];["A"]] ---- batch end ---- 2025-06-24T21:11:52.586875Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002ca80] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.587181Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000016e80] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.587396Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000e8280] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.587591Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000082280] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.587791Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002dc80] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.587971Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002d080] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.588132Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000065480] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.588313Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000087680] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.588478Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00001b680] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.588658Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000066c80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.588833Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000066080] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.588996Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d0280] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.589192Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002ac80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.589375Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002d680] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.589571Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002c480] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.589728Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d0880] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-24T21:11:52.589852Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000094880] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> YdbLogStore::AlterLogStore [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-40 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-41 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestDescribeDirectory [GOOD] Test command err: 2025-06-24T21:11:27.812941Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626541846835845:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.813103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447b/r3tmp/tmpTsWx0y/pdisk_1.dat 2025-06-24T21:11:28.352295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:28.352393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:28.369711Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:28.404638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29777, node 1 2025-06-24T21:11:28.702965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:28.702995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:28.702999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:28.704962Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.833765Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4411 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:29.039106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:31.112988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626559026706009:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.113096Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.113212Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626559026706021:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.117692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:31.145039Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626559026706023:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:31.228374Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626559026706094:2673] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:33.221435Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626566026472308:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:33.221493Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447b/r3tmp/tmpri8f2w/pdisk_1.dat 2025-06-24T21:11:33.462623Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:33.482326Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:33.482406Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:33.489563Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15757, node 4 2025-06-24T21:11:33.502078Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-24T21:11:33.601100Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:33.601131Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:33.601139Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:33.601755Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8616 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:33.884763Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:34.267328Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:36.480586Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626578911375199:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:36.480721Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:36.481207Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626578911375211:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:36.485726Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:36.529034Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519626578911375213:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:36.593592Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626578911375289:2666] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:38.411430Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626588195792875:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:38.411474Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447b/r3tmp/tmpXnKYwW/pdisk_1.dat 2025-06-24T21:11:38.696436Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:38.721297Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:38.721393Z node 7 :HIVE WARN: ... 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:39.281472Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:39.437120Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:42.110966Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:42.312572Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626605375663205:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:42.312670Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626605375663210:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:42.312739Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:42.317521Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:42.354256Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626605375663219:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:11:42.432687Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626605375663300:2790] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:42.603774Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwgjz58y1v9rbyh7k9wrmm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjA1NzUxODAtYjQ4ZTVmMGMtNzFiNDcyNzktNGVhNjFjZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:42.741338Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwgk9nd2az144bd7evkqx7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjA1NzUxODAtYjQ4ZTVmMGMtNzFiNDcyNzktNGVhNjFjZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:42.859918Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwgkdk4gfw1zct78fgqnw6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjA1NzUxODAtYjQ4ZTVmMGMtNzFiNDcyNzktNGVhNjFjZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:42.969273Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhwgkh437g50dhtjhb954ka, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjA1NzUxODAtYjQ4ZTVmMGMtNzFiNDcyNzktNGVhNjFjZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:43.092372Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhwgkmnctn1nqpy74hq5jj1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjA1NzUxODAtYjQ4ZTVmMGMtNzFiNDcyNzktNGVhNjFjZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:43.414050Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626588195792875:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:43.414130Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:44.449222Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhwgks37p9pwmnx049rm1vj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjA1NzUxODAtYjQ4ZTVmMGMtNzFiNDcyNzktNGVhNjFjZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:44.463361Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhwgks37p9pwmnx049rm1vj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjA1NzUxODAtYjQ4ZTVmMGMtNzFiNDcyNzktNGVhNjFjZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:46.469672Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626622764296800:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:46.469734Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447b/r3tmp/tmpImyNU2/pdisk_1.dat 2025-06-24T21:11:46.667818Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:46.689744Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:46.689841Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:46.697637Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:46.712868Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 28207, node 10 2025-06-24T21:11:46.858527Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:46.858550Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:46.858557Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:46.858704Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7701 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:47.394186Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:47.503291Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:50.628412Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626639944167051:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:50.628536Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:50.663961Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:51.470047Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626622764296800:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:51.470119Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::CreateDeleteYqlSession [GOOD] Test command err: 2025-06-24T21:11:27.099130Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626539448958334:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.099358Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004492/r3tmp/tmpTExPXw/pdisk_1.dat 2025-06-24T21:11:27.752325Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.785161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.785286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.796507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19709, node 1 2025-06-24T21:11:28.055836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:28.055857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:28.055864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:28.056011Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.115277Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1685 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.457227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:32.088366Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626560487841080:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.088510Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004492/r3tmp/tmppJYYEk/pdisk_1.dat 2025-06-24T21:11:32.381594Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:32.432209Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:32.432319Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 62410, node 4 2025-06-24T21:11:32.486916Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:32.527588Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:32.527609Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:32.527618Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:32.527789Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21397 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:32.885675Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:32.919801Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:11:33.014288Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626564782809241:2591] txid# 281474976715658, issues: { message: "Unknown column \'BlaBla\' specified in key column list" severity: 1 } 2025-06-24T21:11:33.113906Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:37.650129Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626584698617194:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:37.650242Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004492/r3tmp/tmpQi4TW5/pdisk_1.dat 2025-06-24T21:11:38.025816Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:38.060202Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:38.060289Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:38.088530Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20949, node 7 2025-06-24T21:11:38.315834Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:38.315865Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:38.315874Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:38.316015Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15981 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:11:38.661480Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:38.717939Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:43.161474Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626607804917726:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:43.161554Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004492/r3tmp/tmp0jHGyy/pdisk_1.dat 2025-06-24T21:11:43.360656Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:43.388521Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:43.388610Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:43.396229Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:43.409276Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 62251, node 10 2025-06-24T21:11:43.567816Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:43.567840Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:43.567847Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:43.567994Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25106 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:43.890088Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:43.907738Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:11:43.979875Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:44.197439Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:44.265453Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:11:44.695547Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-06-24T21:11:48.948433Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626629945320106:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:48.948512Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004492/r3tmp/tmpyDKgN0/pdisk_1.dat 2025-06-24T21:11:49.308505Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20480, node 13 2025-06-24T21:11:49.343818Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 2025-06-24T21:11:49.399731Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:49.399837Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:49.493309Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:49.510116Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:49.510146Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:49.510153Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:49.510311Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17203 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:49.904691Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:49.973283Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TGRpcLdapAuthentication::LdapAuthServerIsUnavailable [GOOD] >> TGRpcLdapAuthentication::DisableBuiltinAuthMechanism ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::KeepAlive [GOOD] Test command err: 2025-06-24T21:11:27.093799Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626541689147856:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.095208Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044a0/r3tmp/tmpKDYBeU/pdisk_1.dat 2025-06-24T21:11:27.565293Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.592628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.592724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.630299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11843, node 1 2025-06-24T21:11:27.907895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.907925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.907934Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.908065Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.083721Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3446 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.478017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:28.701791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) 2025-06-24T21:11:31.746093Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626558095353308:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:31.746226Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044a0/r3tmp/tmpR6OTQY/pdisk_1.dat 2025-06-24T21:11:31.951496Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:31.970234Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:31.970368Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:31.977584Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21444, node 4 2025-06-24T21:11:32.089039Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:32.089064Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:32.089072Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:32.089233Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22844 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:32.445018Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:36.683835Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626581153718615:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:36.683937Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044a0/r3tmp/tmpFZhPTr/pdisk_1.dat 2025-06-24T21:11:36.917092Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:36.964267Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 19464, node 7 2025-06-24T21:11:36.999884Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:36.999986Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:37.010432Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:37.155952Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:37.155989Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:37.155996Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:37.156192Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16333 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:37.558219Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:37.702593Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
: Error: Operation timeout. 2025-06-24T21:11:42.688017Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626603363219678:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:42.688124Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044a0/r3tmp/tmp89nibJ/pdisk_1.dat 2025-06-24T21:11:42.905773Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:42.914075Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:42.914151Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:42.926664Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11532, node 10 2025-06-24T21:11:43.095852Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:43.095875Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:43.095884Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:43.096036Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22967 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:43.508931Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:43.702621Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup;
: Error: Operation cancelled. 2025-06-24T21:11:48.821355Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626630772403423:2240];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:48.821462Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044a0/r3tmp/tmpg1DJzA/pdisk_1.dat 2025-06-24T21:11:49.157239Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:49.178836Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:49.178947Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:49.183678Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18648, node 13 2025-06-24T21:11:49.360742Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:49.360760Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:49.360768Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:49.360902Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14272 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:49.759264Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:49.764880Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::BeginTxRequestError [GOOD] Test command err: 2025-06-24T21:11:27.065080Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626539231566630:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.066052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448f/r3tmp/tmp0bzOTV/pdisk_1.dat 2025-06-24T21:11:27.578741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.578874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.604201Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.605708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61100, node 1 2025-06-24T21:11:27.902704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.902740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.902748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.902870Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.065789Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7558 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.461528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:28.632271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:11:32.095234Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626560073677168:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.095710Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448f/r3tmp/tmplZYkbK/pdisk_1.dat 2025-06-24T21:11:32.430721Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:32.486540Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:32.486668Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 2222, node 4 2025-06-24T21:11:32.511996Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:32.679088Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:32.679106Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:32.679111Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:32.679264Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7534 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:33.102075Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:33.133692Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:33.184776Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:11:37.655404Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626583843345926:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:37.655484Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448f/r3tmp/tmpS6tkcC/pdisk_1.dat 2025-06-24T21:11:37.938573Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:37.954137Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:37.954243Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:37.968637Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:37.990042Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 25006, node 7 2025-06-24T21:11:38.138253Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:38.138281Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:38.138290Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:38.138420Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8983 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:38.392059Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:38.564467Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:11:38.653317Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:11:38.685060Z ... ation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:11:39.491590Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:11:39.561326Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:11:39.644735Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:11:43.370568Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626609556416208:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:43.371080Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448f/r3tmp/tmpdXwCZE/pdisk_1.dat 2025-06-24T21:11:43.749258Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:43.768193Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:43.768284Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:43.781316Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61552, node 10 2025-06-24T21:11:43.975762Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:43.975785Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:43.975795Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:43.975935Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23146 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:44.261903Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:44.366365Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:44.493906Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:49.236864Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626635122034866:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:49.236916Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448f/r3tmp/tmpL2Ne6F/pdisk_1.dat 2025-06-24T21:11:49.634201Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:49.649931Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:49.650020Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:49.664407Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7015, node 13 2025-06-24T21:11:49.967861Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:49.967883Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:49.967894Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:49.968053Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:50.255637Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64138 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:50.364541Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:53.222333Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626652301905092:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:53.222440Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:53.226859Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626652301905104:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:53.230740Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:53.265710Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626652301905106:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:11:53.347579Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626652301905182:2689] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:53.348690Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=13&id=YWNlOGMwOGEtYjgwMWNiOC01YTczZjMxLTg0MGIxZDc4, ActorId: [13:7519626652301905074:2292], ActorState: ExecuteState, TraceId: 01jyhwgxm4744757hcq8vk2v9q, ReplyQueryCompileError, status NOT_FOUND remove tx with tx_id: 2025-06-24T21:11:53.356836Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=13&id=YWNlOGMwOGEtYjgwMWNiOC01YTczZjMxLTg0MGIxZDc4, ActorId: [13:7519626652301905074:2292], ActorState: ExecuteState, TraceId: 01jyhwgxrb9ay56y1955tmz2j5, ReplyQueryCompileError, status NOT_FOUND remove tx with tx_id: 2025-06-24T21:11:53.363980Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=13&id=YWNlOGMwOGEtYjgwMWNiOC01YTczZjMxLTg0MGIxZDc4, ActorId: [13:7519626652301905074:2292], ActorState: ExecuteState, TraceId: 01jyhwgxrk6rwjswtk6g807pqn, ReplyQueryCompileError, status NOT_FOUND remove tx with tx_id: >> YdbYqlClient::SecurityTokenAuthMultiTenantSDK ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcClientLowTest::ChangeAcl [GOOD] Test command err: 2025-06-24T21:11:27.082267Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626541108608145:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.082359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449e/r3tmp/tmpxC1W1c/pdisk_1.dat 2025-06-24T21:11:27.630262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.630639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.640422Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.656745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26336, node 1 2025-06-24T21:11:27.903089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.903109Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.903115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.903247Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.091788Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4801 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.501976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TestRequest(database="/Root", token="root@builtin") => {SUCCESS, 0} 2025-06-24T21:11:30.546236Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:30.558589Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:32.032981Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626562662647807:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.033081Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449e/r3tmp/tmp5M9S7r/pdisk_1.dat 2025-06-24T21:11:32.288165Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16083, node 4 2025-06-24T21:11:32.397480Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:32.397583Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:32.473138Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:32.546276Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:32.546303Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:32.546311Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:32.546454Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16501 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:32.902311Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:32.922292Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TestRequest(database="/Root", token="") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:33.039526Z node 4 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:33.044333Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:33.054247Z node 4 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:37.311510Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626583854055479:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:37.311590Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449e/r3tmp/tmpg97Ou5/pdisk_1.dat 2025-06-24T21:11:37.597851Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:37.621566Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:37.621650Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:37.626255Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:37.643183Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 27273, node 7 2025-06-24T21:11:37.751076Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:37.751105Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:37.751120Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:37.751301Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:38.086537Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:38.375290Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TestRequest(database="/Root", token="") => {SUCCESS, 0} TestRequest(database="/blabla", token="") => {SUCCESS, 0} TestRequest(database="blabla", token="") => {SUCCESS, 0} TestRequest(database="/Root", token="root@builtin") => {SUCCESS, 0} 2025-06-24T21:11:41.545979Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:41.564667Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:41.579671Z node 7 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (717F937C): Unknown token TestRequest(database="/Root", token="invalid token") => {UNAUTHORIZED, 0} 2025-06-24T21:11:41.600939Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:41.616769Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:43.723231Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626607730185210:2241];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449e/r3tmp/tmp7UHDAC/pdisk_1.dat 2025-06-24T21:11:43.866094Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:44.002047Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:44.016905Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:44.017006Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:44.028129Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7336, node 10 2025-06-24T21:11:44.223681Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:44.223705Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:44.223714Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:44.223901Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19384 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:44.436285Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:44.651282Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TestRequest(database="/Root", token="") => {SUCCESS, 0} TestRequest(database="/blabla", token="") => {SUCCESS, 0} TestRequest(database="blabla", token="") => {SUCCESS, 0} TestRequest(database="/Root", token="root@builtin") => {SUCCESS, 0} 2025-06-24T21:11:47.798193Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:47.824951Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:47.851466Z node 10 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (717F937C): Unknown token TestRequest(database="/Root", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:47.873325Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:47.913421Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-24T21:11:49.783626Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626635601781723:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:49.783693Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449e/r3tmp/tmpGgj1kJ/pdisk_1.dat 2025-06-24T21:11:50.137058Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:50.167994Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:50.168082Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:50.177055Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25100, node 13 2025-06-24T21:11:50.303817Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:50.303839Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:50.303849Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:50.304010Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11221 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:50.656291Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:50.858947Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11221 2025-06-24T21:11:51.118474Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) >> TGRpcLdapAuthentication::LdapAuthWithEmptyPassword [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindDn [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindPassword >> TGRpcYdbTest::ExecuteQueryCache [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbLogStore::AlterLogStore [GOOD] Test command err: 2025-06-24T21:11:27.083134Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626540278432503:2181];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.083461Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004494/r3tmp/tmpJamgyO/pdisk_1.dat 2025-06-24T21:11:27.650420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.650552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.652654Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.673616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12892, node 1 2025-06-24T21:11:27.900075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.900104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.900111Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.900264Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.083461Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14802 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.454503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:30.460359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) SUCCESS 3 rows in 0.025825s 2025-06-24T21:11:30.648220Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626553163335459:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.648221Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626553163335451:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.648321Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.652849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:30.670916Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626553163335465:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:11:30.729498Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626553163335541:2808] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:31.676616Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwg7jn39pev68cfaae15yp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjBkYTE3OWMtNzZlYTQ2MzUtOWIxMDc5MTktZGU4M2ViYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 3 rows 2025-06-24T21:11:33.359682Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626566430813128:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:33.359761Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004494/r3tmp/tmpkB88Zb/pdisk_1.dat 2025-06-24T21:11:33.616072Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:33.652972Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 8166, node 4 2025-06-24T21:11:33.727865Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:33.727947Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:33.735699Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:33.735721Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:33.735727Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:33.735849Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:33.745238Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24761 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:34.036226Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:24761 2025-06-24T21:11:34.374191Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:36.630168Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:36.780476Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: Root/Foo/TimestampIndex/indexImplTable, pathId: , opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-24T21:11:36.780642Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710659:1, propose status:StatusNameConflict, reason: Check failed: path: '/Root/Foo/TimestampIndex/indexImplTable', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges), at schemeshard: 72057594046644480 2025-06-24T21:11:36.783824Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710659, database: /Root, subject: , status: StatusNameConflict, reason: Check failed: path: '/Root/Foo/TimestampIndex/indexImplTable', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges), operation: ALTER TABLE, path: Root/Foo/TimestampIndex/indexImplTable 2025-06-24T21:11:36.784100Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626579315716388:2920] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/Foo/TimestampIndex/indexImplTable\', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } Error 128: Administrative access denied TClient::Ls request: /Root/Foo/TimestampIndex/indexImplTable TClient::Ls response: Status: ... R: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004494/r3tmp/tmp0tzwju/pdisk_1.dat 2025-06-24T21:11:39.196255Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:39.227795Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:39.227883Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:39.238696Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62832, node 7 2025-06-24T21:11:39.439789Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:39.439811Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:39.439819Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:39.439955Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21381 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:39.871011Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:39.952874Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:39.962017Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:44.327545Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626614527004581:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:44.327599Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004494/r3tmp/tmpmEwpQ1/pdisk_1.dat 2025-06-24T21:11:44.744590Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12523, node 10 2025-06-24T21:11:44.843264Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:44.843407Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:44.870333Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:44.931766Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:44.931786Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:44.931792Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:44.931905Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4684 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:45.284312Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:45.304975Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:11:45.398111Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:45.403316Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:45.607394Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:45.847545Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:11:50.460701Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626638906641833:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:50.460763Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004494/r3tmp/tmpCjY7jC/pdisk_1.dat 2025-06-24T21:11:50.799485Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:50.799585Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:50.800114Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:50.821074Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6784, node 13 2025-06-24T21:11:50.931804Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:50.931827Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:50.931836Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:50.931983Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22922 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:51.261286Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:51.492903Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TGRpcNewCoordinationClient::BasicMethods [GOOD] >> YdbQueryService::TestCreateDropAttachSession [GOOD] >> YdbQueryService::TestCreateAttachAndDropAttachedSession >> ClientStatsCollector::PrepareQuery [GOOD] >> ClientStatsCollector::CounterCacheMiss >> YdbYqlClient::CreateTableWithMESettings [GOOD] >> TGRpcYdbTest::DeleteFromAfterCreate [GOOD] >> TGRpcYdbTest::ExplainQuery [GOOD] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts_AccessDenied >> YdbTableBulkUpsert::AsyncIndexShouldSucceed [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcLdapAuthentication::LdapAuthWithEmptyPassword [GOOD] Test command err: 2025-06-24T21:11:28.618901Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626545478504452:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:28.618963Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447a/r3tmp/tmpeJKig6/pdisk_1.dat 2025-06-24T21:11:29.106851Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:29.130022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:29.130108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:29.133875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21042, node 1 2025-06-24T21:11:29.225060Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:29.225156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:29.225172Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:29.225279Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27830 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:29.542104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:29.728255Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:33.511852Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626564654713422:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:33.511910Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447a/r3tmp/tmp7DenWW/pdisk_1.dat 2025-06-24T21:11:33.814366Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:33.822655Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:33.822747Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:33.832413Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:33.849335Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 10289, node 4 2025-06-24T21:11:33.995851Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:33.995872Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:33.995879Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:33.995991Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6713 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:34.284288Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:34.574968Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:39.196973Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626593786992530:2178];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:39.197029Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447a/r3tmp/tmpz1YDJ8/pdisk_1.dat 2025-06-24T21:11:39.623496Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:39.661962Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:39.662054Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:39.670295Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:39.691980Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 9073, node 7 2025-06-24T21:11:39.875092Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:39.875114Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:39.875163Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:39.875320Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:40.196438Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25917 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:40.321105Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447a/r3tmp/tmpby5AKE/pdisk_1.dat 2025-06-24T21:11:45.544181Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:45.676596Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:45.677524Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:45.677602Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:45.698599Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27036, node 10 2025-06-24T21:11:45.944033Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:45.944057Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:45.944066Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:45.944216Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15996 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:46.343317Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:46.345209Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:51.127737Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626643188602440:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:51.127794Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447a/r3tmp/tmpR9EzXR/pdisk_1.dat 2025-06-24T21:11:51.448972Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61775, node 13 2025-06-24T21:11:51.568008Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:51.568371Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:51.685468Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:51.763874Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:51.763904Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:51.763914Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:51.764071Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14003 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:52.163279Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:52.167334Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... >> TGRpcYdbTest::ReadTablePg [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithValidCredentials >> TTableProfileTests::UseDefaultProfile >> LocalityOperation::LocksFromAnotherTenants-UseSink [GOOD] >> YdbYqlClient::SecurityTokenAuth ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::ExecuteQueryCache [GOOD] Test command err: 2025-06-24T21:11:27.088168Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626540532197763:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.096318Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004488/r3tmp/tmpZCV7rN/pdisk_1.dat 2025-06-24T21:11:27.693796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.693885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.705065Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.742913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6651, node 1 2025-06-24T21:11:27.909817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.909839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.909846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.909957Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.109854Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3904 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.466219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:32.066435Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626563751385205:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.066529Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004488/r3tmp/tmp5hs4AR/pdisk_1.dat 2025-06-24T21:11:32.285425Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:32.309824Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:32.309922Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:32.318324Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20759, node 4 2025-06-24T21:11:32.511818Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:32.511836Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:32.511841Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:32.511941Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15875 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:32.903791Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:33.111851Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:35.741996Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626576636288098:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:35.742154Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:35.742544Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626576636288110:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:35.747590Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:35.774615Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519626576636288112:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:35.834035Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626576636288190:2690] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:38.130069Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626586924154373:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:38.143682Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004488/r3tmp/tmpxbOw01/pdisk_1.dat 2025-06-24T21:11:38.496930Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:38.504375Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:38.504450Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:38.520571Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61652, node 7 2025-06-24T21:11:38.680932Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:38.680951Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:38.680959Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:38.681112Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12170 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:38.956073Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, op ... k failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:42.536692Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwgk462r940d30a5c304kg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NmUzOTI5N2EtODc4OTA3YjctZGY1MDY4NWQtM2I4YTk4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:42.765119Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwgkd7bevw7dxvznqd7htk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NmUzOTI5N2EtODc4OTA3YjctZGY1MDY4NWQtM2I4YTk4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:44.754401Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626613161510584:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:44.754475Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004488/r3tmp/tmprjnbee/pdisk_1.dat 2025-06-24T21:11:45.130260Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:45.150255Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:45.150844Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:45.162413Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10494, node 10 2025-06-24T21:11:45.283393Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:45.283413Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:45.283419Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:45.283547Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8413 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:45.721544Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:45.760816Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:48.985867Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626630341380768:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:48.985933Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626630341380757:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:48.986240Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:48.990618Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:49.015198Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626630341380771:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:11:49.120324Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626634636348138:2668] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:51.331090Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626644578700816:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:51.331168Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004488/r3tmp/tmp91yMOn/pdisk_1.dat 2025-06-24T21:11:51.663101Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:51.691811Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:51.691899Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:51.698376Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14849, node 13 2025-06-24T21:11:51.862132Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:51.862158Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:51.862167Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:51.862341Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62078 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:52.293226Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:52.420944Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:55.382261Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626661758571027:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.382394Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.382962Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626661758571039:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.388491Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:55.429638Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626661758571041:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:11:55.496561Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626661758571111:2670] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> YdbTableBulkUpsert::Types [GOOD] >> YdbTableBulkUpsert::Uint8 >> YdbYqlClient::TestColumnOrder ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewCoordinationClient::BasicMethods [GOOD] Test command err: 2025-06-24T21:11:29.538294Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626547539825859:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:29.538355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004475/r3tmp/tmp3ODJZz/pdisk_1.dat 2025-06-24T21:11:30.011250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:30.011345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:30.019486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:30.034693Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1462, node 1 2025-06-24T21:11:30.185132Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:30.185152Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:30.185160Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:30.185300Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17634 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:30.544781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:30.553512Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:34.745526Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626571706089225:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:34.784690Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004475/r3tmp/tmpWRWW8J/pdisk_1.dat 2025-06-24T21:11:35.002656Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:35.038321Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:35.038437Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:35.048879Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64835, node 4 2025-06-24T21:11:35.255064Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:35.255087Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:35.255100Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:35.255248Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2171 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:35.641462Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:35.727130Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:38.357491Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626588885959339:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:38.357584Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626588885959347:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:38.357655Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:38.367642Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:38.405661Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519626588885959353:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:11:38.472697Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626588885959422:2677] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004475/r3tmp/tmpoJRmRj/pdisk_1.dat 2025-06-24T21:11:40.976257Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:41.066968Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13311, node 7 2025-06-24T21:11:41.202398Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:41.202511Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:41.269938Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:41.303780Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:41.303803Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:41.303814Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:41.303938Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19210 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:41.646956Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:41.879319Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:44.232125Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626615332681797:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:44.232208Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:44.267560Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:44.466765Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626615332681975:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:44.466932Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:44.467250Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626615332681980:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:44.472245Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:44.511134Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626615332681982:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:11:44.606220Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626615332682058:2797] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:46.789700Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626623240243985:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:46.789782Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004475/r3tmp/tmpXP1qyH/pdisk_1.dat 2025-06-24T21:11:47.157873Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:47.198096Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:47.198182Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:47.208773Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10921, node 10 2025-06-24T21:11:47.332538Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:47.332562Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:47.332571Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:47.332713Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64705 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:47.675098Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:47.800049Z node 10 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [10:7519626627535212203:2600] txid# 281474976710658, Access denied for bad@builtin on path /Root, with access CreateTable 2025-06-24T21:11:47.800222Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626627535212203:2600] txid# 281474976710658, issues: { message: "Access denied for bad@builtin on path /Root" issue_code: 200000 severity: 1 } 2025-06-24T21:11:47.859558Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004475/r3tmp/tmpwU1HnF/pdisk_1.dat 2025-06-24T21:11:52.329428Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:52.436105Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:52.446645Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:52.446731Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:52.457389Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:52.496073Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 25308, node 13 2025-06-24T21:11:52.665973Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:52.665999Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:52.666009Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:52.666165Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18657 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:53.133024Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:53.227368Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:53.269388Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::DeleteFromAfterCreate [GOOD] Test command err: 2025-06-24T21:11:27.067740Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626541279931595:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.067808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449c/r3tmp/tmpeRinUi/pdisk_1.dat 2025-06-24T21:11:27.719562Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.743300Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.745463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.757943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29077, node 1 2025-06-24T21:11:27.927724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.927747Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.927757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.927868Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.099133Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21027 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.519876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:28.663427Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626545574899765:2613] txid# 281474976715658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-24T21:11:32.108878Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626561327091532:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.109016Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449c/r3tmp/tmp8DN9mQ/pdisk_1.dat 2025-06-24T21:11:32.383077Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:32.418636Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 20744, node 4 2025-06-24T21:11:32.482373Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:32.482469Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:32.503966Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:32.503997Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:32.504006Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:32.504128Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:32.506788Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9105 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:32.831370Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:32.958920Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:33.121779Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:33.226354Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:35.828459Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626574211994847:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:35.828589Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:35.833631Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626574211994859:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:35.838389Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:35.874654Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519626574211994861:2316], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T21:11:35.946266Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626574211994933:3034] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:36.934939Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhwgcmj2675erasahfhps5t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NzgzZmRhYzctYTdlZjA3NjUtZmQxYmQyZDctNDRmYjQ3NWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:36.960227Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhwgcmj2675erasahfhps5t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NzgzZmRhYzctYTdlZjA3NjUtZmQxYmQyZDctNDRmYjQ3NWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:37.109444Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519626561327091532:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:37.109536Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:37.153409Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyhwgds11j2v7x4pb0gvcahv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NTAyNzA2ZTQtNjllYzA5NzMtZmMzMmY3NjMtMTE3NzA5YmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:39.176119Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626591697921756:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:39.176254Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.met ... rom file: (empty maybe) 2025-06-24T21:11:45.571779Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:45.571937Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:45.934612Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19689 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:46.069805Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:48.943678Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626632140850727:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:48.943774Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626632140850719:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:48.944136Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:48.948309Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:48.996824Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626632140850733:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:11:49.080494Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626636435818103:2665] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:49.322246Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519626636435818135:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[Root/NotFound]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:11:49.323406Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=OTgwZjQyM2MtNzZhZmJjMWItMzA1ZGMzYzctMTZmNTI2YTU=, ActorId: [10:7519626632140850715:2293], ActorState: ExecuteState, TraceId: 01jyhwgsqj24138pwp65b7zgmz, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:11:51.234961Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626645587500401:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:51.235061Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449c/r3tmp/tmpC9E3Wh/pdisk_1.dat 2025-06-24T21:11:51.476662Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:51.509510Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:51.509601Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:51.517688Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28628, node 13 2025-06-24T21:11:51.727895Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:51.727918Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:51.727931Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:51.728073Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21847 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:52.166075Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:52.233436Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:52.290331Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:52.369178Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:55.380183Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626662767370835:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.380287Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.380538Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626662767370847:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.385596Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:55.431345Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626662767370849:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:11:55.492682Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626662767370923:2892] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:55.608259Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwgzqg7y5fk47fzxxk076w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NGU0MDQ5ZmQtYTRiZjQ5YjktZGE5Mzk3NTctMzg0ZDVjOWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:55.758749Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwgzz79qqxg9ha02kb5yp4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NGU0MDQ5ZmQtYTRiZjQ5YjktZGE5Mzk3NTctMzg0ZDVjOWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CreateTableWithMESettings [GOOD] Test command err: 2025-06-24T21:11:27.136978Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626538751358151:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.137151Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004489/r3tmp/tmpcpK8eh/pdisk_1.dat 2025-06-24T21:11:27.619810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.619916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.643622Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.650593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:27.732781Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 26485, node 1 2025-06-24T21:11:27.964940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.965830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.965842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.965950Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.129033Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3419 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.573146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:30.774548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:32.659801Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626560196762227:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.659864Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004489/r3tmp/tmpRIzhwl/pdisk_1.dat 2025-06-24T21:11:32.879432Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:32.910401Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:32.910471Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:32.917968Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:32.933626Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 10290, node 4 2025-06-24T21:11:33.061807Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:33.061827Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:33.061832Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:33.061942Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29537 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:33.325235Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:33.692917Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:36.077284Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:36.265500Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:11:36.337980Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:11:36.420814Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:11:38.258245Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626588262878542:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:38.258358Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004489/r3tmp/tmp9N7Q11/pdisk_1.dat 2025-06-24T21:11:38.528641Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:38.586910Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 4463, node 7 2025-06-24T21:11:38.635852Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:38.635883Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:38.635895Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:38.636036Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:38.646237Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:38.646344Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:38.669159Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20535 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:39.037993Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:39.278345Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:42.373152Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:42.590514Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:11:42.717236Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:11:44.668592Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626611603126198:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:44.668640Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004489/r3tmp/tmpbxpNhx/pdisk_1.dat 2025-06-24T21:11:45.021797Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:45.026405Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:45.026505Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:45.033831Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17840, node 10 2025-06-24T21:11:45.295809Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:45.295835Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:45.295845Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:45.295996Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4742 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:45.711393Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:45.719389Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:48.853736Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:49.119735Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:11:49.133443Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-06-24T21:11:49.133477Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-06-24T21:11:51.331310Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626643091665777:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:51.331404Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004489/r3tmp/tmpukP7BD/pdisk_1.dat 2025-06-24T21:11:51.607038Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:51.664531Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:51.664642Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:51.675486Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2386, node 13 2025-06-24T21:11:51.864019Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:51.864047Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:51.864055Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:51.864210Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64085 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:52.321929Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:52.350146Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:55.671677Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TYqlDateTimeTests::SimpleUpsertSelect ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::ExplainQuery [GOOD] Test command err: 2025-06-24T21:11:27.225495Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626540190040967:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.225729Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004486/r3tmp/tmprwEeNe/pdisk_1.dat 2025-06-24T21:11:27.766753Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.806407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.806498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.816171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30981, node 1 2025-06-24T21:11:27.970720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.970748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.970760Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.971089Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.227548Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12700 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.574092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004486/r3tmp/tmpG08cDQ/pdisk_1.dat 2025-06-24T21:11:32.515121Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:32.653928Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:32.674213Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:32.674330Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:32.684313Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17322, node 4 2025-06-24T21:11:32.802002Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:32.802032Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:32.802041Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:32.802176Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27805 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:33.128684Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:33.337577Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:36.265531Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626580041010767:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:36.265604Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626580041010775:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:36.265677Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:36.269833Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:36.298539Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519626580041010781:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:11:36.376298Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626580041010852:2690] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:38.546041Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626587273375365:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:38.546094Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004486/r3tmp/tmpvlyg21/pdisk_1.dat 2025-06-24T21:11:38.841467Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:38.882475Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:38.882563Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:38.892974Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14508, node 7 2025-06-24T21:11:39.133877Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:39.133912Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:39.133921Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:39.134077Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:39.493961Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:3 ... kupError; 2025-06-24T21:11:44.846425Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:44.869128Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:44.869221Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:44.882341Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:44.912054Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 14833, node 10 2025-06-24T21:11:45.092304Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:45.092329Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:45.092338Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:45.092478Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17634 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:45.488029Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:45.640402Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:48.625796Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626630121331664:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:48.625925Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:48.626523Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626630121331676:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:48.631244Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:48.661812Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626630121331678:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:11:48.754840Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626630121331754:2680] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:50.667222Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626640690667725:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:50.667336Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004486/r3tmp/tmpEWCitY/pdisk_1.dat 2025-06-24T21:11:50.993404Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:51.051892Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 13082, node 13 2025-06-24T21:11:51.083018Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:51.083123Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:51.086446Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:51.102890Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:51.102910Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:51.102930Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:51.103059Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12359 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:51.488893Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:51.591702Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:51.783284Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:55.268066Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626662165505402:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.268166Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.268447Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626662165505414:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.277714Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:55.320039Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626662165505416:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:11:55.415491Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626662165505491:2797] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:55.535847Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwgzm0fjqfk6989hwkf7w7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFlNDZmZWUtYzE3ZDNlM2MtY2MzOGY5MzUtNjVmZjBmNjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:55.659238Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626640690667725:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:55.659320Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::AsyncIndexShouldSucceed [GOOD] Test command err: 2025-06-24T21:11:27.113314Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626541554739917:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.113459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449d/r3tmp/tmpXjNb6E/pdisk_1.dat 2025-06-24T21:11:27.615254Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.634816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.647328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.661419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31626, node 1 2025-06-24T21:11:27.907852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.907876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.907882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.907994Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.131715Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29060 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.518543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:30.458136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626554439642778:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.458261Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.400650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:31.538870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626558734610265:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.538945Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626558734610270:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.538944Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.542741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:31.569910Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626558734610272:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:11:31.647459Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626558734610347:2804] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:31.708592Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwg5mq4swsx7svx1fbpy0f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmUyNDhkOWQtNjY1YzRkZDItZjQ0YWYwZWMtMWQ2MGZkMzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:31.821160Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwg5mq4swsx7svx1fbpy0f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmY1MjgxZDEtZTQ3NGI0M2QtYmVkZTY4YzItYmIwNDM3OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:31.870195Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwg5mq4swsx7svx1fbpy0f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjI5NGUzMDEtNzE5MGIzZDMtYjFhOTU3NmEtYTViZjY3MWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:32.114240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626541554739917:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.114289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:32.152729Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhwg5mq4swsx7svx1fbpy0f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjE0OTY0YWQtMWE3YTE3OWMtYmU2MzE2YTMtZDYxZDMwMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:33.764573Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626568210385622:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:33.764626Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449d/r3tmp/tmpCyDzcB/pdisk_1.dat 2025-06-24T21:11:34.072118Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:34.095660Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:34.095739Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:34.103243Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:34.120504Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 3373, node 4 2025-06-24T21:11:34.171786Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:34.171812Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:34.171819Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:34.171961Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13395 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:34.524646Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:34.787319Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:37.212019Z no ... initialize from file: (empty maybe) 2025-06-24T21:11:40.193658Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:40.193825Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9356 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:40.563719Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:40.587441Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:11:40.640031Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:43.415076Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Decimal(22,9) value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Date value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Datetime value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Timestamp value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Interval value CLIENT_INTERNAL_ERROR
: Error: GRpc error: (13): Unable to parse request
: Error: Grpc error response on endpoint localhost:2169 BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Yson value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Json value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid JSON for JsonDocument provided: TAPE_ERROR: The JSON document has an improper structure: missing or superfluous commas, braces, missing keys, etc. This is a fatal and unrecoverable error. BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid DyNumber string representation 2025-06-24T21:11:45.740611Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626617092805865:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:45.740647Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449d/r3tmp/tmp1enguO/pdisk_1.dat 2025-06-24T21:11:46.110277Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:46.129444Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:46.129531Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:46.138415Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24588, node 10 2025-06-24T21:11:46.307854Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:46.307877Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:46.307884Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:46.308015Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15962 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:46.653799Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:46.839391Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:50.024615Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/ui8' Only async-indexed tables are supported by BulkUpsert
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table 2025-06-24T21:11:52.089884Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626648159716465:2180];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449d/r3tmp/tmpMwkqJi/pdisk_1.dat 2025-06-24T21:11:52.228011Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:52.352841Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:52.378977Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:52.379069Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:52.387782Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23876, node 13 2025-06-24T21:11:52.555820Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:52.555841Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:52.555848Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:52.556003Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19084 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:52.776637Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:53.079280Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:56.166516Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table 2025-06-24T21:11:56.451127Z node 13 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> TGRpcNewCoordinationClient::SessionMethods >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientProvidesEmptyClientCerts ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::ReadTablePg [GOOD] Test command err: 2025-06-24T21:11:27.155639Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626539848961513:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.155704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004482/r3tmp/tmphjmhnW/pdisk_1.dat 2025-06-24T21:11:27.834586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.834692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.873594Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.884316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10854, node 1 2025-06-24T21:11:28.203847Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:28.260281Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:28.260304Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:28.260311Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:28.260456Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16528 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.723994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:28.870032Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626544143929745:2617] txid# 281474976715658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-24T21:11:32.000015Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626556007210586:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.000061Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004482/r3tmp/tmpShBVXQ/pdisk_1.dat 2025-06-24T21:11:32.259188Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24722, node 4 2025-06-24T21:11:32.344798Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:32.344937Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:32.430159Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:32.499915Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:32.499941Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:32.499948Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:32.500092Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3504 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:32.830107Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:33.021065Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:35.609169Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626573187080762:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:35.609269Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:35.609971Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626573187080774:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:35.614816Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:35.643857Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519626573187080776:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:11:35.720597Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626573187080849:2675] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:38.186278Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626588669094920:2073];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004482/r3tmp/tmpHIgwtt/pdisk_1.dat 2025-06-24T21:11:38.230718Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:38.433285Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:38.464112Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:38.464223Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:38.470272Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:38.487843Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 20652, node 7 2025-06-24T21:11:38.655483Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:38.655513Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:38.655520Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:38.655661Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26461 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Deprica ... .897422Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:11:52.897435Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:11:52.897483Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:11:52.911824Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jyhwgxaf9kxwgnj9xgkcz7w0, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:56398, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:11:55.913729Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ExecuteDataQueryRequest, traceId# 01jyhwh089fj5k288v3tpmph70, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:56398, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:11:55.915877Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626659665175991:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.915971Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.916528Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626659665176003:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.922165Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:55.935786Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:11:55.935924Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:11:55.935933Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:11:55.935982Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:11:55.955728Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:11:55.955890Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:11:55.955909Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:11:55.956002Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:11:55.961660Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626659665176005:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:11:56.065260Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626663960143374:2802] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:56.262396Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwh089fj5k288v3tpmph70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MjJiM2E3ZTMtOGRiOTk4MjAtMjEyYzZmZGMtMmUzYWY5MDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:56.295365Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jyhwh0m4e7m17w5xy9x4xda5, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:56398, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:11:56.295740Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7519626663960143433:2312] Finish grpc stream, status: 400010 2025-06-24T21:11:56.302855Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jyhwh0mea81tkh1z28ph9fff, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:56398, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:11:56.339566Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [13:7519626663960143434:2313] Adding quota request to queue ShardId: 0, TxId: 281474976710662 2025-06-24T21:11:56.339611Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [13:7519626663960143434:2313] Assign stream quota to Shard 0, Quota 5, TxId 281474976710662 Reserved: 5 of 25, Queued: 0 2025-06-24T21:11:56.343215Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [13:7519626663960143434:2313] got stream part, size: 246, RU required: 128 rate limiter absent 2025-06-24T21:11:56.343522Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [13:7519626663960143434:2313] Starting inactivity timer for 600.000000s with tag 3 2025-06-24T21:11:56.346701Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7519626663960143434:2313] Finish grpc stream, status: 400000 2025-06-24T21:11:56.351323Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jyhwh0nzc9ece12t0r1wee19, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:56398, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:11:56.374784Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [13:7519626663960143461:2315] Adding quota request to queue ShardId: 0, TxId: 281474976710664 2025-06-24T21:11:56.374823Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [13:7519626663960143461:2315] Assign stream quota to Shard 0, Quota 5, TxId 281474976710664 Reserved: 5 of 25, Queued: 0 2025-06-24T21:11:56.377031Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [13:7519626663960143461:2315] got stream part, size: 84, RU required: 128 rate limiter absent 2025-06-24T21:11:56.377338Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [13:7519626663960143461:2315] Starting inactivity timer for 600.000000s with tag 3 2025-06-24T21:11:56.408040Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7519626663960143461:2315] Finish grpc stream, status: 400000 2025-06-24T21:11:56.413154Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jyhwh0qt39ed3tx3s2hggg49, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:56398, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:11:56.479282Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [13:7519626663960143481:2317] Adding quota request to queue ShardId: 0, TxId: 281474976710666 2025-06-24T21:11:56.479327Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [13:7519626663960143481:2317] Assign stream quota to Shard 0, Quota 5, TxId 281474976710666 Reserved: 5 of 25, Queued: 0 2025-06-24T21:11:56.484387Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [13:7519626663960143481:2317] got stream part, size: 210, RU required: 128 rate limiter absent 2025-06-24T21:11:56.484723Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [13:7519626663960143481:2317] Starting inactivity timer for 600.000000s with tag 3 2025-06-24T21:11:56.532515Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7519626663960143481:2317] Finish grpc stream, status: 400000 2025-06-24T21:11:56.540101Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f5a80] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.540405Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00006f680] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.540639Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00000a280] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.540864Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000000080] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.541086Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f6680] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.541329Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00007da80] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.541529Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000bbe80] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.541732Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000bca80] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.541920Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00009e480] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.542073Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00007e080] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.542122Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000022880] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.542315Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00011f480] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.542331Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00006f080] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.542517Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00009d880] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.542525Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00007e680] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.542712Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000043e80] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.542717Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00009f680] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 2025-06-24T21:11:56.623844Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626642485305626:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:56.623928Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> YdbYqlClient::ColumnFamiliesExternalBlobsWithoutDefaultProfile [GOOD] >> YdbYqlClient::CheckDefaultTableSettings3 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> LocalityOperation::LocksFromAnotherTenants-UseSink [GOOD] Test command err: 2025-06-24T21:11:27.135526Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626542753453662:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.135590Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449a/r3tmp/tmpPrCKOr/pdisk_1.dat 2025-06-24T21:11:27.708675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.708768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.717958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:27.724077Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64337, node 1 2025-06-24T21:11:27.903658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.903693Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.903701Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.903805Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.153076Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9343 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.518588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:32.354314Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626561501071393:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.359265Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449a/r3tmp/tmpJIOD3K/pdisk_1.dat 2025-06-24T21:11:32.637881Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15292, node 4 2025-06-24T21:11:32.743550Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:32.743998Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:32.795012Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:32.845703Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:32.845725Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:32.845732Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:32.845858Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62496 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:33.141808Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:37.426622Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626583965297541:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:37.426714Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449a/r3tmp/tmpDgmc4d/pdisk_1.dat 2025-06-24T21:11:37.630633Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22609, node 7 2025-06-24T21:11:37.798163Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:37.798333Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:37.903855Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:37.928175Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:37.928206Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:37.928215Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:37.928379Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18622 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:38.250310Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:38.433889Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18622 2025-06-24T21:11:38.606066Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:11:38.645137Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:39.181326Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519626592038662520:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:39.314541Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:39.314912Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:39.347914Z node 7 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 9 Cookie 9 2025-06-24T21:11:39.382885Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:39.405957Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Oper ... E WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-24T21:11:50.450195Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:50.544387Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:11:50.591235Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:51.125122Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7519626645832779330:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:51.264025Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:51.264153Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:51.272791Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 11 Cookie 11 2025-06-24T21:11:51.274117Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:51.276261Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:51.379452Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:52.147244Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:53.079272Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626632077625359:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:53.079355Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:54.005202Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:54.222898Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:54.365632Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626657847431248:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:54.365695Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626657847431260:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:54.365752Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:54.378030Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:54.447013Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626657847431262:2333], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-06-24T21:11:54.520837Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626657847431340:3402] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:54.627037Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhwgyqvazsx7dxk9emwqafk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OTg3Y2NhOWYtYWJmZjJlZDEtN2U4MmM5YzktYTZjMzI2NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:54.777254Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jyhwgz1sb4vf9ccdwftkcmm7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OTg3Y2NhOWYtYWJmZjJlZDEtN2U4MmM5YzktYTZjMzI2NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:54.937312Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710668. Ctx: { TraceId: 01jyhwgz5h263jxmgrhpgsyde7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OTg3Y2NhOWYtYWJmZjJlZDEtN2U4MmM5YzktYTZjMzI2NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:55.358045Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519626639738101725:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:55.358120Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_0/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:56.105240Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519626645832779330:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:56.105298Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:56.197863Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710669. Ctx: { TraceId: 01jyhwgz5h263jxmgrhpgsyde7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OTg3Y2NhOWYtYWJmZjJlZDEtN2U4MmM5YzktYTZjMzI2NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:56.201092Z node 10 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:818: ActorId: [10:7519626666437366129:2319] TxId: 281474976710669. Ctx: { TraceId: 01jyhwgz5h263jxmgrhpgsyde7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OTg3Y2NhOWYtYWJmZjJlZDEtN2U4MmM5YzktYTZjMzI2NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Handle TEvProposeTransactionResult: unable to select coordinator. Tx canceled, actorId: [10:7519626666437366129:2319], previously selected coordinator: 72075186224037888, coordinator selected at propose result: 72075186224037890 2025-06-24T21:11:56.201383Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=10&id=OTg3Y2NhOWYtYWJmZjJlZDEtN2U4MmM5YzktYTZjMzI2NWQ=, ActorId: [10:7519626653552463776:2319], ActorState: ExecuteState, TraceId: 01jyhwgz5h263jxmgrhpgsyde7, Create QueryResponse for error on request, msg: 2025-06-24T21:11:56.203005Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710670. Ctx: { TraceId: 01jyhwgz5h263jxmgrhpgsyde7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OTg3Y2NhOWYtYWJmZjJlZDEtN2U4MmM5YzktYTZjMzI2NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:56.222153Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-24T21:11:56.222682Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:11:56.223173Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 11 2025-06-24T21:11:56.223599Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:11:56.672675Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:11:56.688967Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519626667307616165:2282], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:56.755229Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519626667307616165:2282], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:56.927689Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519626667307616165:2282], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:57.201084Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519626667307616165:2282], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:57.605381Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519626667307616165:2282], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:57.673961Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; >> YdbOlapStore::LogLast50 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-17 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-18 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-11 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-12 >> TKeyValueTest::TestRenameWorksNewApi [GOOD] >> YdbYqlClient::CreateTableWithUniformPartitions [GOOD] >> YdbYqlClient::CreateTableWithUniformPartitionsAndAutoPartitioning >> YdbTableBulkUpsertOlap::UpsertCsvBug >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-29 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-30 >> TYqlDecimalTests::DecimalKey [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowBatch >> YdbYqlClient::ConnectDbAclIsStrictlyChecked >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideAnyCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedCert >> YdbYqlClient::RetryOperationAsync >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesServerCerts >> TTableProfileTests::ExplicitPartitionsSimple [GOOD] >> TTableProfileTests::ExplicitPartitionsUnordered >> TGRpcAuthentication::ValidCredentials ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestRenameWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:59:2057] recipient: [1:51:2096] Leader for TabletID 72057594037927937 is [1:58:2098] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:59:2057] recipient: [2:52:2096] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:78:2057] recipient: [2:37:2084] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:81:2057] recipient: [2:80:2111] Leader for TabletID 72057594037927937 is [2:58:2098] sender: [2:82:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:84:2057] recipient: [2:80:2111] !Reboot 72057594037927937 (actor [2:58:2098]) rebooted! !Reboot 72057594037927937 (actor [2:58:2098]) tablet resolver refreshed! new actor is[2:83:2112] Leader for TabletID 72057594037927937 is [2:83:2112] sender: [2:169:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:59:2057] recipient: [3:52:2096] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:78:2057] recipient: [3:37:2084] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:58:2098] sender: [3:82:2057] recipient: [3:80:2111] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:84:2057] recipient: [3:80:2111] !Reboot 72057594037927937 (actor [3:58:2098]) rebooted! !Reboot 72057594037927937 (actor [3:58:2098]) tablet resolver refreshed! new actor is[3:83:2112] Leader for TabletID 72057594037927937 is [3:83:2112] sender: [3:169:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:59:2057] recipient: [4:52:2096] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:79:2057] recipient: [4:37:2084] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:58:2098] sender: [4:83:2057] recipient: [4:82:2111] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:85:2057] recipient: [4:82:2111] !Reboot 72057594037927937 (actor [4:58:2098]) rebooted! !Reboot 72057594037927937 (actor [4:58:2098]) tablet resolver refreshed! new actor is[4:84:2112] Leader for TabletID 72057594037927937 is [4:84:2112] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:59:2057] recipient: [5:52:2096] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:82:2057] recipient: [5:37:2084] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:58:2098] sender: [5:86:2057] recipient: [5:84:2114] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:88:2057] recipient: [5:84:2114] !Reboot 72057594037927937 (actor [5:58:2098]) rebooted! !Reboot 72057594037927937 (actor [5:58:2098]) tablet resolver refreshed! new actor is[5:87:2115] Leader for TabletID 72057594037927937 is [5:87:2115] sender: [5:173:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:59:2057] recipient: [6:52:2096] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:82:2057] recipient: [6:37:2084] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:58:2098] sender: [6:86:2057] recipient: [6:84:2114] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:88:2057] recipient: [6:84:2114] !Reboot 72057594037927937 (actor [6:58:2098]) rebooted! !Reboot 72057594037927937 (actor [6:58:2098]) tablet resolver refreshed! new actor is[6:87:2115] Leader for TabletID 72057594037927937 is [6:87:2115] sender: [6:173:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:59:2057] recipient: [7:52:2096] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:83:2057] recipient: [7:37:2084] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:58:2098] sender: [7:87:2057] recipient: [7:85:2114] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:89:2057] recipient: [7:85:2114] !Reboot 72057594037927937 (actor [7:58:2098]) rebooted! !Reboot 72057594037927937 (actor [7:58:2098]) tablet resolver refreshed! new actor is[7:88:2115] Leader for TabletID 72057594037927937 is [7:88:2115] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:59:2057] recipient: [8:53:2096] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:85:2057] recipient: [8:37:2084] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:58:2098] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:58:2098]) rebooted! !Reboot 72057594037927937 (actor [8:58:2098]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:59:2057] recipient: [9:53:2096] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2098]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:85:2057] recipient: [9:37:2084] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:58:2098] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:58:2098]) rebooted! !Reboot 72057594037927937 (actor [9:58:2098]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:59:2057] recipient: [10:51:2096] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:86:2057] recipient: [10:37:2084] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:58:2098] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:58:2098]) rebooted! !Reboot 72057594037927937 (actor [10:58:2098]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:59:2057] recipient: [11:53:2096] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2098]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:87:2057] recipient: [11:37:2084] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:90:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:58:2098] sender: [11:91:2057] recipient: [11:89:2117] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:93:2057] recipient: [11:89:2117] !Reboot 72057594037927937 (actor [11:58:2098]) rebooted! !Reboot 72057594037927937 (actor [11:58:2098]) tablet resolver refreshed! new actor is[11:92:2118] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:112:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:59:2057] recipient: [12:51:2096] Leader for TabletID 72057594037927937 is [12:58:2098] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:58:2 ... recipient: [60:80:2111] !Reboot 72057594037927937 (actor [60:58:2098]) rebooted! !Reboot 72057594037927937 (actor [60:58:2098]) tablet resolver refreshed! new actor is[60:83:2112] Leader for TabletID 72057594037927937 is [60:83:2112] sender: [60:169:2057] recipient: [60:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [61:56:2057] recipient: [61:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [61:56:2057] recipient: [61:52:2096] Leader for TabletID 72057594037927937 is [61:58:2098] sender: [61:59:2057] recipient: [61:52:2096] Leader for TabletID 72057594037927937 is [61:58:2098] sender: [61:76:2057] recipient: [61:14:2061] !Reboot 72057594037927937 (actor [61:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [61:58:2098] sender: [61:78:2057] recipient: [61:37:2084] Leader for TabletID 72057594037927937 is [61:58:2098] sender: [61:81:2057] recipient: [61:14:2061] Leader for TabletID 72057594037927937 is [61:58:2098] sender: [61:82:2057] recipient: [61:80:2111] Leader for TabletID 72057594037927937 is [61:83:2112] sender: [61:84:2057] recipient: [61:80:2111] !Reboot 72057594037927937 (actor [61:58:2098]) rebooted! !Reboot 72057594037927937 (actor [61:58:2098]) tablet resolver refreshed! new actor is[61:83:2112] Leader for TabletID 72057594037927937 is [61:83:2112] sender: [61:169:2057] recipient: [61:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [62:56:2057] recipient: [62:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [62:56:2057] recipient: [62:52:2096] Leader for TabletID 72057594037927937 is [62:58:2098] sender: [62:59:2057] recipient: [62:52:2096] Leader for TabletID 72057594037927937 is [62:58:2098] sender: [62:76:2057] recipient: [62:14:2061] !Reboot 72057594037927937 (actor [62:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [62:58:2098] sender: [62:79:2057] recipient: [62:37:2084] Leader for TabletID 72057594037927937 is [62:58:2098] sender: [62:82:2057] recipient: [62:81:2111] Leader for TabletID 72057594037927937 is [62:58:2098] sender: [62:83:2057] recipient: [62:14:2061] Leader for TabletID 72057594037927937 is [62:84:2112] sender: [62:85:2057] recipient: [62:81:2111] !Reboot 72057594037927937 (actor [62:58:2098]) rebooted! !Reboot 72057594037927937 (actor [62:58:2098]) tablet resolver refreshed! new actor is[62:84:2112] Leader for TabletID 72057594037927937 is [62:84:2112] sender: [62:170:2057] recipient: [62:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [63:56:2057] recipient: [63:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [63:56:2057] recipient: [63:52:2096] Leader for TabletID 72057594037927937 is [63:58:2098] sender: [63:59:2057] recipient: [63:52:2096] Leader for TabletID 72057594037927937 is [63:58:2098] sender: [63:76:2057] recipient: [63:14:2061] !Reboot 72057594037927937 (actor [63:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [63:58:2098] sender: [63:82:2057] recipient: [63:37:2084] Leader for TabletID 72057594037927937 is [63:58:2098] sender: [63:85:2057] recipient: [63:14:2061] Leader for TabletID 72057594037927937 is [63:58:2098] sender: [63:86:2057] recipient: [63:84:2114] Leader for TabletID 72057594037927937 is [63:87:2115] sender: [63:88:2057] recipient: [63:84:2114] !Reboot 72057594037927937 (actor [63:58:2098]) rebooted! !Reboot 72057594037927937 (actor [63:58:2098]) tablet resolver refreshed! new actor is[63:87:2115] Leader for TabletID 72057594037927937 is [63:87:2115] sender: [63:173:2057] recipient: [63:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [64:56:2057] recipient: [64:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [64:56:2057] recipient: [64:52:2096] Leader for TabletID 72057594037927937 is [64:58:2098] sender: [64:59:2057] recipient: [64:52:2096] Leader for TabletID 72057594037927937 is [64:58:2098] sender: [64:76:2057] recipient: [64:14:2061] !Reboot 72057594037927937 (actor [64:58:2098]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [64:58:2098] sender: [64:82:2057] recipient: [64:37:2084] Leader for TabletID 72057594037927937 is [64:58:2098] sender: [64:85:2057] recipient: [64:14:2061] Leader for TabletID 72057594037927937 is [64:58:2098] sender: [64:86:2057] recipient: [64:84:2114] Leader for TabletID 72057594037927937 is [64:87:2115] sender: [64:88:2057] recipient: [64:84:2114] !Reboot 72057594037927937 (actor [64:58:2098]) rebooted! !Reboot 72057594037927937 (actor [64:58:2098]) tablet resolver refreshed! new actor is[64:87:2115] Leader for TabletID 72057594037927937 is [64:87:2115] sender: [64:173:2057] recipient: [64:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [65:56:2057] recipient: [65:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [65:56:2057] recipient: [65:52:2096] Leader for TabletID 72057594037927937 is [65:58:2098] sender: [65:59:2057] recipient: [65:52:2096] Leader for TabletID 72057594037927937 is [65:58:2098] sender: [65:76:2057] recipient: [65:14:2061] !Reboot 72057594037927937 (actor [65:58:2098]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [65:58:2098] sender: [65:83:2057] recipient: [65:37:2084] Leader for TabletID 72057594037927937 is [65:58:2098] sender: [65:86:2057] recipient: [65:14:2061] Leader for TabletID 72057594037927937 is [65:58:2098] sender: [65:87:2057] recipient: [65:85:2114] Leader for TabletID 72057594037927937 is [65:88:2115] sender: [65:89:2057] recipient: [65:85:2114] !Reboot 72057594037927937 (actor [65:58:2098]) rebooted! !Reboot 72057594037927937 (actor [65:58:2098]) tablet resolver refreshed! new actor is[65:88:2115] Leader for TabletID 72057594037927937 is [65:88:2115] sender: [65:174:2057] recipient: [65:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [66:56:2057] recipient: [66:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [66:56:2057] recipient: [66:53:2096] Leader for TabletID 72057594037927937 is [66:58:2098] sender: [66:59:2057] recipient: [66:53:2096] Leader for TabletID 72057594037927937 is [66:58:2098] sender: [66:76:2057] recipient: [66:14:2061] !Reboot 72057594037927937 (actor [66:58:2098]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [66:58:2098] sender: [66:84:2057] recipient: [66:37:2084] Leader for TabletID 72057594037927937 is [66:58:2098] sender: [66:87:2057] recipient: [66:14:2061] Leader for TabletID 72057594037927937 is [66:58:2098] sender: [66:88:2057] recipient: [66:86:2115] Leader for TabletID 72057594037927937 is [66:89:2116] sender: [66:90:2057] recipient: [66:86:2115] !Reboot 72057594037927937 (actor [66:58:2098]) rebooted! !Reboot 72057594037927937 (actor [66:58:2098]) tablet resolver refreshed! new actor is[66:89:2116] Leader for TabletID 72057594037927937 is [66:89:2116] sender: [66:109:2057] recipient: [66:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [67:56:2057] recipient: [67:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [67:56:2057] recipient: [67:52:2096] Leader for TabletID 72057594037927937 is [67:58:2098] sender: [67:59:2057] recipient: [67:52:2096] Leader for TabletID 72057594037927937 is [67:58:2098] sender: [67:76:2057] recipient: [67:14:2061] !Reboot 72057594037927937 (actor [67:58:2098]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [67:58:2098] sender: [67:85:2057] recipient: [67:37:2084] Leader for TabletID 72057594037927937 is [67:58:2098] sender: [67:88:2057] recipient: [67:14:2061] Leader for TabletID 72057594037927937 is [67:58:2098] sender: [67:89:2057] recipient: [67:87:2116] Leader for TabletID 72057594037927937 is [67:90:2117] sender: [67:91:2057] recipient: [67:87:2116] !Reboot 72057594037927937 (actor [67:58:2098]) rebooted! !Reboot 72057594037927937 (actor [67:58:2098]) tablet resolver refreshed! new actor is[67:90:2117] Leader for TabletID 72057594037927937 is [67:90:2117] sender: [67:110:2057] recipient: [67:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [68:56:2057] recipient: [68:52:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [68:56:2057] recipient: [68:52:2096] Leader for TabletID 72057594037927937 is [68:58:2098] sender: [68:59:2057] recipient: [68:52:2096] Leader for TabletID 72057594037927937 is [68:58:2098] sender: [68:76:2057] recipient: [68:14:2061] !Reboot 72057594037927937 (actor [68:58:2098]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [68:58:2098] sender: [68:88:2057] recipient: [68:37:2084] Leader for TabletID 72057594037927937 is [68:58:2098] sender: [68:90:2057] recipient: [68:14:2061] Leader for TabletID 72057594037927937 is [68:58:2098] sender: [68:92:2057] recipient: [68:91:2119] Leader for TabletID 72057594037927937 is [68:93:2120] sender: [68:94:2057] recipient: [68:91:2119] !Reboot 72057594037927937 (actor [68:58:2098]) rebooted! !Reboot 72057594037927937 (actor [68:58:2098]) tablet resolver refreshed! new actor is[68:93:2120] Leader for TabletID 72057594037927937 is [68:93:2120] sender: [68:179:2057] recipient: [68:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [69:56:2057] recipient: [69:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [69:56:2057] recipient: [69:53:2096] Leader for TabletID 72057594037927937 is [69:58:2098] sender: [69:59:2057] recipient: [69:53:2096] Leader for TabletID 72057594037927937 is [69:58:2098] sender: [69:76:2057] recipient: [69:14:2061] !Reboot 72057594037927937 (actor [69:58:2098]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [69:58:2098] sender: [69:88:2057] recipient: [69:37:2084] Leader for TabletID 72057594037927937 is [69:58:2098] sender: [69:91:2057] recipient: [69:14:2061] Leader for TabletID 72057594037927937 is [69:58:2098] sender: [69:92:2057] recipient: [69:90:2119] Leader for TabletID 72057594037927937 is [69:93:2120] sender: [69:94:2057] recipient: [69:90:2119] !Reboot 72057594037927937 (actor [69:58:2098]) rebooted! !Reboot 72057594037927937 (actor [69:58:2098]) tablet resolver refreshed! new actor is[69:93:2120] Leader for TabletID 72057594037927937 is [69:93:2120] sender: [69:179:2057] recipient: [69:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [70:56:2057] recipient: [70:51:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [70:56:2057] recipient: [70:51:2096] Leader for TabletID 72057594037927937 is [70:58:2098] sender: [70:59:2057] recipient: [70:51:2096] Leader for TabletID 72057594037927937 is [70:58:2098] sender: [70:76:2057] recipient: [70:14:2061] !Reboot 72057594037927937 (actor [70:58:2098]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [70:58:2098] sender: [70:89:2057] recipient: [70:37:2084] Leader for TabletID 72057594037927937 is [70:58:2098] sender: [70:92:2057] recipient: [70:14:2061] Leader for TabletID 72057594037927937 is [70:58:2098] sender: [70:93:2057] recipient: [70:91:2119] Leader for TabletID 72057594037927937 is [70:94:2120] sender: [70:95:2057] recipient: [70:91:2119] !Reboot 72057594037927937 (actor [70:58:2098]) rebooted! !Reboot 72057594037927937 (actor [70:58:2098]) tablet resolver refreshed! new actor is[70:94:2120] Leader for TabletID 72057594037927937 is [0:0:0] sender: [71:56:2057] recipient: [71:53:2096] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [71:56:2057] recipient: [71:53:2096] Leader for TabletID 72057594037927937 is [71:58:2098] sender: [71:59:2057] recipient: [71:53:2096] Leader for TabletID 72057594037927937 is [71:58:2098] sender: [71:76:2057] recipient: [71:14:2061] >> YdbYqlClient::TestDoubleKey >> YdbYqlClient::TestTzTypesFullStack [GOOD] >> YdbYqlClient::TestVariant >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-47 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-48 >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindPassword [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-35 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-36 >> TGRpcLdapAuthentication::LdapAuthSetIncorrectDomain |95.1%| [TA] $(B)/ydb/core/keyvalue/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.1%| [TA] {RESULT} $(B)/ydb/core/keyvalue/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_EmptyAllowedSids >> YdbYqlClient::SecurityTokenAuthMultiTenantSDK [GOOD] >> YdbYqlClient::SecurityTokenAuthMultiTenantSDKAsync >> TGRpcLdapAuthentication::DisableBuiltinAuthMechanism [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-41 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-42 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TYqlDecimalTests::DecimalKey [GOOD] Test command err: 2025-06-24T21:11:27.080268Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626539971320685:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.080322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448c/r3tmp/tmpxvWFGw/pdisk_1.dat 2025-06-24T21:11:27.574039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.574170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.601245Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.629204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5292, node 1 2025-06-24T21:11:27.899889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.899918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.899932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.900096Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.112126Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17795 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.449095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:30.378362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:32.299476Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626562934488127:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.299557Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448c/r3tmp/tmpjB1sWr/pdisk_1.dat 2025-06-24T21:11:32.686989Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:32.717796Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:32.717886Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:32.730730Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23805, node 4 2025-06-24T21:11:32.910905Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:32.910924Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:32.910933Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:32.911087Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5827 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:33.228218Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:33.336714Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:36.040826Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:38.091681Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626588691678546:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:38.091890Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448c/r3tmp/tmplGfv54/pdisk_1.dat 2025-06-24T21:11:38.411041Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:38.433482Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:38.433574Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:38.443131Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:38.463113Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 17496, node 7 2025-06-24T21:11:38.653292Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:38.653322Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:38.653332Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:38.653472Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23611 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:39.000640Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:39.138660Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:41.776830Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:41.916666Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626601576581557:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resour ... abaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MjNjMDdhMWUtZjg2NWNlMTQtZDJiMGVjYi1mN2Q0NDRlNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:50.340293Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhwgtnrcpxpgdxe8nd9vdet, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MjNjMDdhMWUtZjg2NWNlMTQtZDJiMGVjYi1mN2Q0NDRlNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:50.391835Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626619794011991:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:50.391903Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:50.531926Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhwgttf8mch2kzncr1rngsx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MjNjMDdhMWUtZjg2NWNlMTQtZDJiMGVjYi1mN2Q0NDRlNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:50.660745Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyhwgv0gfpep78ztg9ay1bt6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MjNjMDdhMWUtZjg2NWNlMTQtZDJiMGVjYi1mN2Q0NDRlNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:50.865045Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jyhwgv4cdc39yhq6csz1t3ds, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MjNjMDdhMWUtZjg2NWNlMTQtZDJiMGVjYi1mN2Q0NDRlNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:52.944121Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626646262930780:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:52.945060Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00448c/r3tmp/tmp4yPn7R/pdisk_1.dat 2025-06-24T21:11:53.247300Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:53.273755Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:53.273852Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:53.279710Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24512, node 13 2025-06-24T21:11:53.475847Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:53.475872Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:53.475883Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:53.476063Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13858 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:53.904649Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:53.981094Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:57.211757Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:57.379110Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626667737768388:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:57.379226Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:57.379587Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626667737768400:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:57.384335Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:57.436436Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626667737768402:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:11:57.526359Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626667737768488:2787] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:57.641113Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwh1p014jczkzv5ypfxmgm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZWU2ODg2YjktZDc2ODVlNzktY2YxMmVkNzktOWJmOTQ3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:57.821444Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhwh1yy2a3msk34n578gt9d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZWU2ODg2YjktZDc2ODVlNzktY2YxMmVkNzktOWJmOTQ3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:57.944398Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626646262930780:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:57.944481Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:58.021076Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhwh24f0s95nhp2wxxgqzr1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZWU2ODg2YjktZDc2ODVlNzktY2YxMmVkNzktOWJmOTQ3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:58.180701Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyhwh2ak9whtnyjp3snz5t5t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZWU2ODg2YjktZDc2ODVlNzktY2YxMmVkNzktOWJmOTQ3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:58.329596Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jyhwh2fc2gj9tp4q91640sby, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZWU2ODg2YjktZDc2ODVlNzktY2YxMmVkNzktOWJmOTQ3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:58.473250Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhwh2kz7wc4v2x8dg8cf2g6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZWU2ODg2YjktZDc2ODVlNzktY2YxMmVkNzktOWJmOTQ3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:58.607999Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jyhwh2rg2qe5468r0jj5989x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZWU2ODg2YjktZDc2ODVlNzktY2YxMmVkNzktOWJmOTQ3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:58.892736Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710668. Ctx: { TraceId: 01jyhwh2wsemytrd0ebqhe3qee, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZWU2ODg2YjktZDc2ODVlNzktY2YxMmVkNzktOWJmOTQ3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:59.152609Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710669. Ctx: { TraceId: 01jyhwh35x7nrz2kfy2e436n3y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZWU2ODg2YjktZDc2ODVlNzktY2YxMmVkNzktOWJmOTQ3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:59.533533Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710670. Ctx: { TraceId: 01jyhwh3dw4rnbntd808atgasz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZWU2ODg2YjktZDc2ODVlNzktY2YxMmVkNzktOWJmOTQ3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> ClientStatsCollector::CounterCacheMiss [GOOD] >> ClientStatsCollector::CounterRetryOperation >> YdbS3Internal::TestS3Listing >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts_AccessDenied [GOOD] >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesCorrectCerts >> YdbQueryService::TestCreateAttachAndDropAttachedSession [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcLdapAuthentication::DisableBuiltinAuthMechanism [GOOD] Test command err: 2025-06-24T21:11:32.345889Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626560610882966:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.346007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004472/r3tmp/tmpFcFeXk/pdisk_1.dat 2025-06-24T21:11:33.068608Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:33.134175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:33.134299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:33.138014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8936, node 1 2025-06-24T21:11:33.387621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:33.387660Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:33.387667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:33.387796Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:33.393534Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21523 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:33.873086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:37.707518Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626584252283052:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:37.707579Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004472/r3tmp/tmpQJYA2J/pdisk_1.dat 2025-06-24T21:11:38.014313Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:38.053995Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 24812, node 4 2025-06-24T21:11:38.068576Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:38.068645Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:38.094825Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:38.204240Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:38.204260Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:38.204267Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:38.204401Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12628 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:38.504921Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:38.727281Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:42.741082Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626605619163824:2190];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004472/r3tmp/tmpJFBmxI/pdisk_1.dat 2025-06-24T21:11:42.887077Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:43.013009Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:43.033027Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:43.033140Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:43.041349Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3613, node 7 2025-06-24T21:11:43.395847Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:43.395871Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:43.395880Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:43.396043Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19415 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:43.726204Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:43.739563Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:47.691832Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626605619163824:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:47.691890Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:50.989103Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626638002164902:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:50.990597Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004472/r3tmp/tmpP6eKDV/pdisk_1.dat 2025-06-24T21:11:51.217200Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61546, node 10 2025-06-24T21:11:51.328860Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:51.328950Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:51.337008Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:51.351082Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:51.351104Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:51.351111Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:51.351296Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5346 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:51.672979Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:51.688615Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:11:52.010645Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:56.547820Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626663559631803:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:56.547886Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004472/r3tmp/tmpITNjiH/pdisk_1.dat 2025-06-24T21:11:56.934237Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26664, node 13 2025-06-24T21:11:57.019161Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:57.019266Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:57.053909Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:57.111037Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:57.111063Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:57.111072Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:57.111237Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:57.579428Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25311 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:57.672209Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... >> TGRpcLdapAuthentication::LdapAuthWithValidCredentials [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidSearchFilter >> YdbTableBulkUpsert::Simple >> TTableProfileTests::OverwriteCompactionPolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbQueryService::TestCreateAttachAndDropAttachedSession [GOOD] Test command err: 2025-06-24T21:11:35.208706Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626575964882549:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:35.208751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446e/r3tmp/tmp9RBk1L/pdisk_1.dat 2025-06-24T21:11:35.774255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:35.774364Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:35.776213Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:35.797799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:35.851705Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 7022, node 1 2025-06-24T21:11:35.899203Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:35.899223Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:35.899229Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:35.899344Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3803 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:11:36.285298Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:36.414639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:38.944706Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1578: Failed to parse session id: unknownSesson 2025-06-24T21:11:40.916001Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626594361104829:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:40.919936Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446e/r3tmp/tmpkqtRLC/pdisk_1.dat 2025-06-24T21:11:41.367639Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:41.385016Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:41.385107Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:41.398575Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:41.449710Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 2061, node 4 2025-06-24T21:11:41.590027Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:41.590049Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:41.590056Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:41.590206Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6895 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:11:41.967247Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:41.976542Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:46.616025Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626621513486332:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:46.616086Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446e/r3tmp/tmpqXETQI/pdisk_1.dat 2025-06-24T21:11:46.965843Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:47.024957Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 20255, node 7 2025-06-24T21:11:47.067431Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:47.071694Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:47.137530Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:47.176938Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:47.176963Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:47.176970Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:47.177135Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11042 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:47.562795Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:47.703282Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:51.986913Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626641895187152:2147];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446e/r3tmp/tmpMqVhIP/pdisk_1.dat 2025-06-24T21:11:52.082535Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:52.210821Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:52.259918Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 22382, node 10 2025-06-24T21:11:52.309590Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:52.309719Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileSt ... ecting 2025-06-24T21:11:52.333420Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:52.372717Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:52.372737Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:52.372747Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:52.372900Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15586 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:52.708825Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:52.725429Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:11:52.987293Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:57.647544Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626667832971266:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:57.648087Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446e/r3tmp/tmpclOh6w/pdisk_1.dat 2025-06-24T21:11:57.960188Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:58.007067Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:58.007199Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 19995, node 13 2025-06-24T21:11:58.015977Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 2025-06-24T21:11:58.016607Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:58.207949Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:58.207977Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:58.207986Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:58.208135Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15476 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:58.606169Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:58.681583Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:01.696362Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:12:01.697569Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-24T21:12:01.718671Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: TraceId: "01jyhwh2z2e57vx4nh24h8ksqc", Request has 18444993274187.832985s seconds to be completed 2025-06-24T21:12:01.723348Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y= 2025-06-24T21:12:01.723440Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jyhwh2z2e57vx4nh24h8ksqc", Created new session, sessionId: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y=, workerId: [13:7519626685012841459:2293], database: , longSession: 1, local sessions count: 1 2025-06-24T21:12:01.726225Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-24T21:12:01.726322Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y=, ActorId: [13:7519626685012841459:2293], ActorState: unknown state, session actor bootstrapped 2025-06-24T21:12:01.726486Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 01jyhwh2z2e57vx4nh24h8ksqc 2025-06-24T21:12:01.727543Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:12:01.727580Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:12:01.727602Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:12:01.727660Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-24T21:12:01.727738Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:12:01.727792Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:12:01.727860Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:12:01.729504Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:12:01.729558Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:12:01.748684Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:856: Received ping session request, has local session: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y=, rpc ctrl: [13:7519626685012841483:2294], sameNode: 1, trace_id: 2025-06-24T21:12:01.748731Z node 13 :KQP_PROXY TRACE: kqp_proxy_service.cpp:878: Attach local session: [13:7519626685012841459:2293] to rpc: [13:7519626685012841483:2294] on same node 2025-06-24T21:12:01.763882Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y=, ActorId: [13:7519626685012841459:2293], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T21:12:01.763944Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y=, ActorId: [13:7519626685012841459:2293], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T21:12:01.763986Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y=, ActorId: [13:7519626685012841459:2293], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-24T21:12:01.764036Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y=, ActorId: [13:7519626685012841459:2293], ActorState: unknown state, Cleanup temp tables: 0 2025-06-24T21:12:01.764137Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y=, ActorId: [13:7519626685012841459:2293], ActorState: unknown state, Session actor destroyed 2025-06-24T21:12:01.764558Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y=, workerId: [13:7519626685012841459:2293], local sessions count: 0 2025-06-24T21:12:01.775694Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:902: Received ping session request, request_id: 3, sender: [13:7519626685012841486:2296], trace_id: 2025-06-24T21:12:01.775890Z node 13 :KQP_PROXY NOTICE: kqp_proxy_service.cpp:1585: Session not found: ydb://session/3?node_id=13&id=NjliMmI1NjMtNjU0ZWJjY2QtYjRlMDA3ZjAtMjhlYzI1M2Y= 2025-06-24T21:12:01.775992Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 3, sender: [13:7519626685012841486:2296], selfId: [13:7519626667832971374:2166], source: [13:7519626667832971374:2166] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientDoesNotProvideCorrectCerts >> TDatabaseQuotas::DisableWritesToDatabase [GOOD] >> TGRpcAuthentication::InvalidPassword >> TGRpcNewCoordinationClient::SessionMethods [GOOD] >> TGRpcNewCoordinationClient::SessionDescribeWatchData >> TSchemeShardTTLTestsWithReboots::CopyTable [GOOD] >> YdbYqlClient::SecurityTokenAuth [GOOD] >> YdbYqlClient::RetryOperationTemplate >> YdbYqlClient::TestColumnOrder [GOOD] >> YdbYqlClient::TestDecimal >> TYqlDateTimeTests::SimpleUpsertSelect [GOOD] >> TYqlDateTimeTests::DatetimeKey >> TGRpcNewCoordinationClient::CreateDropDescribe ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::CopyTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:10:23.322205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:10:23.322282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:23.322320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:10:23.322391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:10:23.322463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:10:23.322495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:10:23.322566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:10:23.322645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:10:23.323431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:10:23.323797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:10:23.402415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:10:23.402486Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:10:23.403040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:10:23.414025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:10:23.414383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:10:23.414556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:10:23.420157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:10:23.420317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:10:23.420767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:23.420924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:10:23.423073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:23.423259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:10:23.424054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:10:23.424103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:10:23.424244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:10:23.424279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:10:23.424335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:10:23.424431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:10:23.429453Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:10:23.536168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:10:23.536383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.536533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:10:23.536566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:10:23.536766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:10:23.536837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:10:23.538644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:23.538792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:10:23.538969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.539005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:10:23.539039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:10:23.539067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:10:23.540628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.540666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:10:23.540693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:10:23.541911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.541943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:10:23.542019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:10:23.542052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:10:23.544569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:10:23.545812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:10:23.545919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:10:23.546598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:10:23.546681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 42949694 ... { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1469 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-24T21:12:04.908022Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409547, partId: 0 2025-06-24T21:12:04.908181Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 3 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1469 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-24T21:12:04.908291Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 3 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1469 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-24T21:12:04.908645Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:12:04.909182Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 437 RawX2: 416611830117 } Origin: 72075186233409547 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-24T21:12:04.909228Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409547, partId: 0 2025-06-24T21:12:04.909342Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Source { RawX1: 437 RawX2: 416611830117 } Origin: 72075186233409547 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-24T21:12:04.909394Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:12:04.909480Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 437 RawX2: 416611830117 } Origin: 72075186233409547 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-24T21:12:04.909539Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1003:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:04.909579Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:12:04.909619Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:12:04.909661Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 1003:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:12:04.909693Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1003:0 129 -> 240 2025-06-24T21:12:04.914823Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:12:04.915524Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:12:04.915963Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:12:04.916028Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 1003:0ProgressState, operation type TxCopyTable 2025-06-24T21:12:04.916081Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1060: Set barrier, OperationId: 1003:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-24T21:12:04.916124Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1104: All parts have reached barrier, tx: 1003, done: 0, blocked: 1 2025-06-24T21:12:04.916196Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 1003:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 1003 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-06-24T21:12:04.916237Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1003:0 240 -> 240 2025-06-24T21:12:04.919826Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:12:04.919889Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-24T21:12:04.919994Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1003:0 progress is 1/1 2025-06-24T21:12:04.920027Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-24T21:12:04.920069Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1003:0 progress is 1/1 2025-06-24T21:12:04.920100Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-24T21:12:04.920141Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-24T21:12:04.920183Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-24T21:12:04.920226Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1003:0 2025-06-24T21:12:04.920258Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1003:0 2025-06-24T21:12:04.920412Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:12:04.920452Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-24T21:12:04.922877Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-24T21:12:04.922929Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-24T21:12:04.923351Z node 97 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-24T21:12:04.923509Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-24T21:12:04.923550Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [97:532:2491] TestWaitNotification: OK eventTxId 1003 2025-06-24T21:12:04.924023Z node 97 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTableCopy" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:12:04.924244Z node 97 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTableCopy" took 254us result status StatusSuccess 2025-06-24T21:12:04.924841Z node 97 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTableCopy" PathDescription { Self { Name: "TTLEnabledTableCopy" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTableCopy" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> YdbYqlClient::TestVariant [GOOD] >> YdbYqlClient::TestTransactionQueryError >> TTableProfileTests::UseDefaultProfile [GOOD] >> TTableProfileTests::UseTableProfilePreset >> YdbYqlClient::ConnectDbAclIsStrictlyChecked [GOOD] >> YdbYqlClient::ConnectDbAclIsOffWhenYdbRequestsWithoutDatabase >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-18 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-19 >> YdbYqlClient::TestReadTableOneBatch >> YdbYqlClient::CreateTableWithUniformPartitionsAndAutoPartitioning [GOOD] >> YdbYqlClient::CreateTableWithPartitionAtKeysAndAutoPartitioning >> YdbYqlClient::CheckDefaultTableSettings3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-12 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-13 >> YdbTableBulkUpsertOlap::UpsertCsvBug [GOOD] >> YdbTableBulkUpsertOlap::UpsertCSV >> YdbYqlClient::SecurityTokenAuthMultiTenantSDKAsync [GOOD] >> YdbYqlClient::SimpleColumnFamilies >> YdbYqlClient::TestDoubleKey [GOOD] >> YdbYqlClient::TestMultipleModifications >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-30 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-31 |95.1%| [TA] $(B)/ydb/core/tx/schemeshard/ut_ttl/test-results/unittest/{meta.json ... results_accumulator.log} >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesCorrectCerts [GOOD] >> TGRpcLdapAuthentication::LdapAuthSetIncorrectDomain [GOOD] >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts |95.1%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ttl/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbTableBulkUpsertOlap::UpsertArrowBatch [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowDupField >> TGRpcAuthentication::ValidCredentials [GOOD] >> TGRpcAuthentication::NoConnectRights >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-48 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-1 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-36 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-37 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CheckDefaultTableSettings3 [GOOD] Test command err: 2025-06-24T21:11:31.182566Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626559256382099:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:31.182695Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004473/r3tmp/tmp8Rnb5X/pdisk_1.dat 2025-06-24T21:11:31.510325Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:31.544205Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 3912, node 1 2025-06-24T21:11:31.571463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:31.571589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:31.589012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:31.589067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:31.589081Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:31.589198Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:31.591690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29844 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:31.879660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:32.182541Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004473/r3tmp/tmpT6g7Vf/pdisk_1.dat 2025-06-24T21:11:36.245971Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626580354693602:2154];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:36.385048Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:36.496935Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:36.526201Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:36.526330Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:36.534250Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:36.552811Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 62502, node 4 2025-06-24T21:11:36.816414Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:36.816444Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:36.816452Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:36.816614Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32125 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:37.116345Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:37.246206Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32125 2025-06-24T21:11:37.536318Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-24T21:11:37.536876Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:11:37.536912Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:11:37.541645Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: , status: StatusAccepted, operation: CREATE DATABASE, path: /Root/ydb_ut_tenant 2025-06-24T21:11:37.569550Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750799497615, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:11:37.572818Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710658:0 2025-06-24T21:11:37.572910Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976710658, publications: 2, subscribers: 1 2025-06-24T21:11:37.576333Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710658, subscribers: 1 2025-06-24T21:11:37.583783Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-24T21:11:37.584594Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710659:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:11:37.584659Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:37.586745Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710659, database: /Root, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: /Root/ydb_ut_tenant waiting... 2025-06-24T21:11:38.105748Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519626589089655640:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:38.215665Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ObjectStorage, PostgreSQL 2025-06-24T21:11:38.215726Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:11:38.401866Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:38.401972Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:38.412317Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:38.412590Z node 4 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-06-24T21:11:38.423934Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:39.116816Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:39.484744Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750799499526, transactions count in step: 1, at schemeshard: ... 057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:10284 2025-06-24T21:11:53.530195Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-24T21:11:53.530600Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:11:53.530649Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) 2025-06-24T21:11:53.536841Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: , status: StatusAccepted, operation: CREATE DATABASE, path: /Root/ydb_ut_tenant waiting... 2025-06-24T21:11:53.555275Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750799513596, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:11:53.557551Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710658:0 2025-06-24T21:11:53.557610Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976710658, publications: 2, subscribers: 1 2025-06-24T21:11:53.558675Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710658, subscribers: 1 2025-06-24T21:11:53.563682Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-24T21:11:53.564314Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710659:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:11:53.564365Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:53.566300Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710659, database: /Root, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: /Root/ydb_ut_tenant waiting... waiting... 2025-06-24T21:11:54.583703Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519626657461939931:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:54.583819Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:54.619558Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:54.619668Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:54.630347Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-24T21:11:54.643545Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:55.591847Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:55.642397Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750799515682, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:11:55.646004Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710659:0 2025-06-24T21:11:55.646192Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976710659, publications: 1, subscribers: 2 2025-06-24T21:11:55.648232Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710659, subscribers: 2 2025-06-24T21:11:56.890361Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/ydb_ut_tenant/Table-1, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-24T21:11:56.892020Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:11:56.892078Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:56.900309Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710660, database: /Root/ydb_ut_tenant, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/ydb_ut_tenant/Table-1 2025-06-24T21:11:57.031405Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750799517060, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:11:57.044498Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626647785516062:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:57.044585Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:57.048092Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710660:0 2025-06-24T21:11:57.119170Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-24T21:11:57.119838Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:00.713771Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626681192637983:2159];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:00.725120Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004473/r3tmp/tmpeI1sMA/pdisk_1.dat 2025-06-24T21:12:00.983405Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:01.029131Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 22093, node 13 2025-06-24T21:12:01.062774Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:01.062857Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:01.109974Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:01.123872Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:01.123898Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:01.123907Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:01.124089Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10744 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:01.616213Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:01.714137Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:05.707281Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626681192637983:2159];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:05.707413Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:05.861841Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TTableProfileTests::ExplicitPartitionsUnordered [GOOD] >> TTableProfileTests::ExplicitPartitionsComplex >> YdbMonitoring::SelfCheck >> TGRpcLdapAuthentication::LdapAuthWithInvalidSearchFilter [GOOD] >> TGRpcNewClient::SimpleYqlQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcLdapAuthentication::LdapAuthSetIncorrectDomain [GOOD] Test command err: 2025-06-24T21:11:41.821670Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626601661800477:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:41.821738Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004468/r3tmp/tmpi7zYFY/pdisk_1.dat 2025-06-24T21:11:42.448740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:42.448902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:42.461108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:42.476565Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:42.551481Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 11916, node 1 2025-06-24T21:11:42.818000Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:42.941979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:42.942001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:42.942008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:42.942104Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4893 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:43.423282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:47.552272Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626625642572103:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:47.552350Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004468/r3tmp/tmpvrO6JS/pdisk_1.dat 2025-06-24T21:11:47.800273Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:47.833492Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 10749, node 4 2025-06-24T21:11:47.887353Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:47.887462Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:47.898618Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:47.908706Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:47.908728Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:47.908737Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:47.908869Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21767 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:48.193819Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:52.617274Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626648198413477:2187];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004468/r3tmp/tmpuHEPId/pdisk_1.dat 2025-06-24T21:11:52.645472Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:52.801239Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:52.828789Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:52.828889Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:52.834651Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10042, node 7 2025-06-24T21:11:53.022609Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:53.022636Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:53.022643Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:53.022784Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11989 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:53.312319Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:57.112014Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626669015472454:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:57.112074Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004468/r3tmp/tmprjTidX/pdisk_1.dat 2025-06-24T21:11:57.392012Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63459, node 10 2025-06-24T21:11:57.460202Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 2025-06-24T21:11:57.464808Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:57.464927Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:57.488955Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:57.605567Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:57.605597Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:57.605608Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:57.605798Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18958 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:57.992985Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:58.121410Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:02.640252Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626690522582162:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:02.647136Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004468/r3tmp/tmpIM6lQJ/pdisk_1.dat 2025-06-24T21:12:03.051126Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:03.081494Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:03.081616Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:03.092489Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24497, node 13 2025-06-24T21:12:03.104089Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 2025-06-24T21:12:03.164012Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:03.164043Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:03.164052Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:03.164208Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24203 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:12:03.671366Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:03.716861Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... >> TGRpcNewCoordinationClient::SessionDescribeWatchData [GOOD] >> TGRpcNewCoordinationClient::SessionDescribeWatchOwners >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-42 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-43 >> YdbS3Internal::TestS3Listing [GOOD] >> YdbS3Internal::TestAccessCheck >> ClientStatsCollector::CounterRetryOperation [GOOD] >> ClientStatsCollector::ExternalMetricRegistryByRawPtr >> TGRpcAuthentication::InvalidPassword [GOOD] >> TGRpcAuthentication::DisableLoginAuthentication >> YdbYqlClient::TestDecimal [GOOD] >> YdbYqlClient::TestBusySession >> YdbYqlClient::BuildInfo >> YdbYqlClient::RetryOperationTemplate [GOOD] >> YdbYqlClient::RetryOperationSync >> YdbTableBulkUpsert::Simple [GOOD] >> YdbTableBulkUpsert::SyncIndexShouldSucceed >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientDoesNotProvideCorrectCerts [GOOD] >> YdbTableBulkUpsert::Nulls [GOOD] >> YdbTableBulkUpsert::NotNulls >> TGRpcNewCoordinationClient::CreateDropDescribe [GOOD] >> TGRpcNewCoordinationClient::CreateAlter >> YdbYqlClient::ConnectDbAclIsOffWhenYdbRequestsWithoutDatabase [GOOD] >> YdbYqlClient::CopyTables >> YdbOlapStore::LogLast50ByResource >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_EmptyAllowedSids [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_AllowOnlyDefaultGroup >> TYqlDateTimeTests::DatetimeKey [GOOD] >> TYqlDateTimeTests::TimestampKey ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientDoesNotProvideCorrectCerts [GOOD] Test command err: 2025-06-24T21:11:33.424127Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626567211502260:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:33.424263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004470/r3tmp/tmpicSJdY/pdisk_1.dat 2025-06-24T21:11:33.901507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:33.901615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:33.917560Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:33.918738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:33.986350Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 19974, node 1 2025-06-24T21:11:34.111420Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:34.111444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:34.111451Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:34.111577Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:34.454678Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7514 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:34.642193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:34.811467Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:46644) has now valid token of root@builtin 2025-06-24T21:11:34.913386Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:11:34.913435Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:11:34.913452Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:11:34.913485Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:11:38.303447Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626588619023747:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:38.313167Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004470/r3tmp/tmp86od4H/pdisk_1.dat 2025-06-24T21:11:38.795404Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:38.802831Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:38.802916Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:38.808686Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:38.832327Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 15314, node 4 2025-06-24T21:11:39.013229Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:39.013253Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:39.013260Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:39.013377Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24634 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:11:39.357106Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:39.361968Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:39.560866Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:55696) has now valid token of root@builtin 2025-06-24T21:11:39.682002Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:11:39.682032Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:11:39.682042Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:11:39.682078Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:11:43.823484Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626609955116428:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:43.823527Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004470/r3tmp/tmp8NYhGc/pdisk_1.dat 2025-06-24T21:11:44.220304Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:44.241762Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:44.241840Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:44.249797Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20632, node 7 2025-06-24T21:11:44.438348Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:44.438371Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:44.438380Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:44.438512Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29480 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:44.716518Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:44.850110Z node 7 :TICKET_PARSER DEBUG: ticket_pa ... ed 2025-06-24T21:11:55.193345Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 19012, node 13 2025-06-24T21:11:55.258862Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:55.258891Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:55.258900Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:55.259075Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:55.259212Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:55.259299Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:55.287789Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23437 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:55.633679Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1750806715132166 Nodes { NodeId: 1024 Host: "localhost" Port: 27305 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1750806715132166 } Nodes { NodeId: 13 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 14 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" } } Nodes { NodeId: 15 Host: "::1" Port: 12003 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 51 RoomNum: 3 RackNum: 3 BodyNum: 3 DataCenter: "3" Module: "3" Rack: "3" Unit: "3" } } 2025-06-24T21:11:55.983433Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:59.906518Z node 16 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[16:7519626679409719526:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:59.912135Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004470/r3tmp/tmpLe38Nz/pdisk_1.dat 2025-06-24T21:12:00.174212Z node 16 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:00.180106Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:00.180189Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:00.187352Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:00.205354Z node 16 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 16 Type# 268639257 TServer::EnableGrpc on GrpcPort 3271, node 16 2025-06-24T21:12:00.299477Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:00.299501Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:00.299510Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:00.299661Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29608 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:00.634424Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1750806720145604 Nodes { NodeId: 1024 Host: "localhost" Port: 14800 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1750806720145604 } Nodes { NodeId: 16 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 17 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" } } Nodes { NodeId: 18 Host: "::1" Port: 12003 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 51 RoomNum: 3 RackNum: 3 BodyNum: 3 DataCenter: "3" Module: "3" Rack: "3" Unit: "3" } } 2025-06-24T21:12:05.769288Z node 19 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[19:7519626704190911322:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:05.769347Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004470/r3tmp/tmpcguJaO/pdisk_1.dat 2025-06-24T21:12:06.202547Z node 19 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:06.208987Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:06.209067Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:06.216658Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23347, node 19 2025-06-24T21:12:06.360077Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:06.360106Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:06.360116Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:06.360261Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15188 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:12:06.799923Z node 19 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:06.814501Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node 2025-06-24T21:12:07.157951Z node 19 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket F3B5D633A316D69820CDA5FD2EDC59CD35396B14E97125B53BE12C6284176A5F: Cannot create token from certificate. Client certificate failed verification Register node result Status { Code: ERROR Reason: "Cannot create token from certificate. Client certificate failed verification" } >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-19 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-20 >> YdbYqlClient::TestTransactionQueryError [GOOD] >> YdbYqlClient::TestReadWrongTable >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedCert [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideClientCerts >> YdbYqlClient::CreateTableWithPartitionAtKeysAndAutoPartitioning [GOOD] >> TTableProfileTests::OverwriteCompactionPolicy [GOOD] >> TTableProfileTests::OverwriteExecutionPolicy >> TVersions::Wreck0Reverse [GOOD] >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TTableProfileTests::DescribeTableWithPartitioningPolicy >> TGRpcAuthentication::NoConnectRights [GOOD] >> TGRpcAuthentication::NoDescribeRights >> YdbYqlClient::TestMultipleModifications [GOOD] >> YdbYqlClient::TestDescribeTableWithShardStats >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesServerCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedPrivatekey >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-13 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-14 >> YdbYqlClient::TestReadTableOneBatch [GOOD] >> YdbYqlClient::TestReadTableNotNullBorder >> YdbTableBulkUpsertOlap::UpsertArrowDupField [GOOD] >> YdbTableBulkUpsertOlap::ParquetImportBug ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CreateTableWithPartitionAtKeysAndAutoPartitioning [GOOD] Test command err: 2025-06-24T21:11:44.239613Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626615707379057:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:44.239681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004467/r3tmp/tmpu99T9N/pdisk_1.dat 2025-06-24T21:11:44.951399Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:45.049451Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 26690, node 1 2025-06-24T21:11:45.112601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:45.112721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:45.155469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:45.311027Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:45.311051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:45.311058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:45.311224Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:45.358615Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21195 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:45.876596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:49.754446Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626635925036819:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:49.760725Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004467/r3tmp/tmpN3T2T3/pdisk_1.dat 2025-06-24T21:11:50.116510Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:50.133891Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:50.133963Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:50.141291Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:50.153806Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 8790, node 4 2025-06-24T21:11:50.258582Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:50.258604Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:50.258610Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:50.258723Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15529 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:50.569502Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:50.808691Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:53.349277Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:53.681997Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037889 not found 2025-06-24T21:11:53.763926Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037888 not found 2025-06-24T21:11:55.491968Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626659494928372:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:55.492025Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004467/r3tmp/tmpuvm5SX/pdisk_1.dat 2025-06-24T21:11:55.746523Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:55.808285Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:55.808375Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 19651, node 7 2025-06-24T21:11:55.848450Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:55.975793Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:55.975823Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:55.975829Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:55.975965Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12570 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:56.251951Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:56.530833Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:59.012288Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:01.251997Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626688024451484:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:01.252213Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004467/r3tmp/tmpvfWwTL/pdisk_1.dat 2025-06-24T21:12:01.617578Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28986, node 10 2025-06-24T21:12:01.673859Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 2025-06-24T21:12:01.696088Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:01.696206Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:01.721685Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:01.809131Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:01.809158Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:01.809166Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:01.809320Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6825 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:02.249859Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:02.375301Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:05.514195Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:07.889334Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626714214519918:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:07.889407Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004467/r3tmp/tmpXlyh2s/pdisk_1.dat 2025-06-24T21:12:08.197548Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:08.197652Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:08.205713Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:08.227844Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21232, node 13 2025-06-24T21:12:08.291330Z node 13 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T21:12:08.308279Z node 13 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T21:12:08.359719Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:08.359742Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:08.359751Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:08.359917Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13416 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:08.838686Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:08.948176Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:12.314428Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TGRpcNewCoordinationClient::SessionDescribeWatchOwners [GOOD] >> TGRpcNewCoordinationClient::SessionDescribeWatchReplace >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-31 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-32 >> YdbTableBulkUpsert::Uint8 [GOOD] >> YdbTableBulkUpsert::Timeout >> TTableProfileTests::UseTableProfilePreset [GOOD] >> TTableProfileTests::UseTableProfilePresetViaSdk >> YdbMonitoring::SelfCheck [GOOD] >> YdbMonitoring::SelfCheckWithNodesDying ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TVersions::Wreck0Reverse [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-24T21:09:15.119062Z 00000.010 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.011 II| FAKE_ENV: Starting storage for BS group 0 00000.011 II| FAKE_ENV: Starting storage for BS group 1 00000.011 II| FAKE_ENV: Starting storage for BS group 2 00000.011 II| FAKE_ENV: Starting storage for BS group 3 00000.017 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.017 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema 00000.019 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.019 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.019 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} release 4194304b of static, Memory{0 dyn 0} 00000.019 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.028 NN| TABLET_SAUSAGECACHE: Update config MemoryLimit: 8388608 ReplacementPolicy: ThreeLeveledLRU 00000.028 NN| TABLET_SAUSAGECACHE: Switch replacement policy from S3FIFO to ThreeLeveledLRU 00000.028 NN| TABLET_SAUSAGECACHE: Switch replacement policy done from S3FIFO to ThreeLeveledLRU 00000.030 II| TABLET_SAUSAGECACHE: Limit memory consumer with 16777216TiB 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{2, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{3, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 1 for step 4 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{4, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 1 for step 5 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{5, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 1 for step 6 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{6, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 1 for step 7 00000.038 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.038 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.038 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{7, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.038 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.039 DD| TABLET_EXECUTOR: Leader{1:2:9} commited cookie 1 for step 8 00000.039 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.039 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.039 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{8, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.039 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.040 DD| TABLET_EXECUTOR: Leader{1:2:10} commited cookie 1 for step 9 00000.040 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.040 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.041 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{9, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.041 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.041 DD| TABLET_EXECUTOR: Leader{1:2:11} commited cookie 1 for step 10 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{10, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:12} commited cookie 1 for step 11 00000.043 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.043 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.043 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{11, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.043 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.044 DD| TABLET_EXECUTOR: Leader{1:2:13} commited cookie 1 for step 12 00000.044 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.044 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.044 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{12, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.044 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.045 DD| TABLET_EXECUTOR: Leader{1:2:14} commited cookie 1 for step 13 00000.045 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.045 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.045 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{13, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.045 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.046 DD| TABLET_EXECUTOR: Leader{1:2:15} commited cookie 1 for step 14 00000.046 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.046 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.047 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{14, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.047 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.047 DD| TABLET_EXECUTOR: Leader{ ... ual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 3 ] Cookie: 4 2025-06-24T21:09:22.847565Z node 35 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1265: Bootstrap with config MemoryLimit: 456 AsyncQueueInFlyLimit: 19 ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-24T21:09:22.848194Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:1] 2025-06-24T21:09:22.848264Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [35:5:2052] 2025-06-24T21:09:22.848397Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:5:2052] cookie 1 class AsyncLoad from cache [ ] already requested [ ] to request [ 1 2 3 4 5 ] 2025-06-24T21:09:22.848452Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 1 2 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-06-24T21:09:22.848771Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:5:2052] cookie 2 class AsyncLoad from cache [ ] already requested [ ] to request [ 6 7 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-24T21:09:22.851273Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:2] 2025-06-24T21:09:22.851346Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:2] owner [35:5:2052] 2025-06-24T21:09:22.851450Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [35:5:2052] cookie 3 class AsyncLoad from cache [ ] already requested [ ] to request [ 10 11 12 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-24T21:09:22.851609Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [35:6:2053] 2025-06-24T21:09:22.851734Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:6:2053] cookie 4 class AsyncLoad from cache [ ] already requested [ 1 5 ] to request [ 9 10 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 2 ] Cookie: 20 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 2 ] Cookie: 20 ... waiting for NKikimr::NSharedCache::TEvUnregister 2025-06-24T21:09:22.852045Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:803: Unregister owner [35:5:2052] 2025-06-24T21:09:22.852124Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:1] owner [35:5:2052] class AsyncLoad error RACE cookie 1 2025-06-24T21:09:22.852181Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:1] owner [35:5:2052] class AsyncLoad error RACE cookie 2 2025-06-24T21:09:22.852222Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:1] owner [35:5:2052] 2025-06-24T21:09:22.852277Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [35:5:2052] class AsyncLoad error RACE cookie 3 2025-06-24T21:09:22.852306Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:2] owner [35:5:2052] 2025-06-24T21:09:22.852336Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:823: Remove owner [35:5:2052] ... waiting for NKikimr::NSharedCache::TEvUnregister (done) ... waiting for results #4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 ... waiting for fetches #4 2025-06-24T21:09:22.852765Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 1 2 ] 2025-06-24T21:09:22.852846Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:1] class AsyncLoad cookie 1 2025-06-24T21:09:22.852926Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 5 9 ] ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 5 9 ] Cookie: 20 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 5 9 ] Cookie: 20 ... waiting for fetches #4 2025-06-24T21:09:22.853259Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 5 9 ] 2025-06-24T21:09:22.853326Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:1] class AsyncLoad cookie 2 2025-06-24T21:09:22.853387Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 10 ] 2025-06-24T21:09:22.853471Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 3 2025-06-24T21:09:22.853560Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1012: Drop page collection [1:0:256:0:0:0:1] pages [ 2 ] owner [35:6:2053] ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 10 ] Cookie: 10 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 10 ] Cookie: 10 ... waiting for results #4 2025-06-24T21:09:22.853858Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 10 ] 2025-06-24T21:09:22.853937Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1050: Send page collection result [1:0:256:0:0:0:1] owner [35:6:2053] class AsyncLoad pages [ 1 5 9 10 ] cookie 4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 5 9 10 ] Cookie: 4 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 5 9 10 ] Cookie: 4 2025-06-24T21:09:22.974034Z node 36 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1265: Bootstrap with config MemoryLimit: 456 AsyncQueueInFlyLimit: 19 ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-24T21:09:22.974720Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:1] 2025-06-24T21:09:22.974774Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [36:5:2052] 2025-06-24T21:09:22.974876Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [36:5:2052] cookie 1 class AsyncLoad from cache [ ] already requested [ ] to request [ 1 ] 2025-06-24T21:09:22.974944Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 1 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-06-24T21:09:22.975181Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:2] 2025-06-24T21:09:22.975233Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:2] owner [36:6:2053] 2025-06-24T21:09:22.975319Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [36:6:2053] cookie 2 class AsyncLoad from cache [ ] already requested [ ] to request [ 10 11 ] 2025-06-24T21:09:22.975371Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:2] async queue pages [ 10 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-24T21:09:22.975559Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [36:6:2053] cookie 3 class AsyncLoad from cache [ ] already requested [ ] to request [ 12 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for fetches #3 ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #3 (done) Checking fetches#3 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:2] Pages: [ 10 ] Cookie: 10 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:2] Pages: [ 10 ] Cookie: 10 ... waiting for NKikimr::NSharedCache::TEvUnregister 2025-06-24T21:09:22.975955Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:803: Unregister owner [36:6:2053] 2025-06-24T21:09:22.976009Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [36:6:2053] class AsyncLoad error RACE cookie 2 2025-06-24T21:09:22.976058Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [36:6:2053] class AsyncLoad error RACE cookie 3 2025-06-24T21:09:22.976084Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:2] owner [36:6:2053] 2025-06-24T21:09:22.976132Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:823: Remove owner [36:6:2053] ... waiting for NKikimr::NSharedCache::TEvUnregister (done) ... waiting for results #3 ... waiting for results #3 (done) Checking results#3 Expected: PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 Actual: PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 ... waiting for results #3 2025-06-24T21:09:22.976436Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 1 ] 2025-06-24T21:09:22.976486Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1050: Send page collection result [1:0:256:0:0:0:1] owner [36:5:2052] class AsyncLoad pages [ 1 ] cookie 1 2025-06-24T21:09:22.976549Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 2 2025-06-24T21:09:22.976602Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 3 ... waiting for results #3 (done) Checking results#3 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 1 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 1 Checking fetches#3 Expected: Actual: 2025-06-24T21:09:22.987279Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:2] status OK pages [ 10 ] Checking results#3 Expected: Actual: Checking fetches#3 Expected: Actual: >> TGRpcNewClient::SimpleYqlQuery [GOOD] >> TGRpcNewClient::CreateAlterUpsertDrop >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-37 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-38 >> YdbYqlClient::RetryOperationAsync [GOOD] >> YdbYqlClient::QueryLimits >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-2 >> YdbYqlClient::TestBusySession [GOOD] >> YdbYqlClient::TestConstraintViolation >> TGRpcAuthentication::DisableLoginAuthentication [GOOD] >> ReadRows::KillTabletDuringRead >> YdbS3Internal::TestAccessCheck [GOOD] >> YdbS3Internal::BadRequests >> YdbTableBulkUpsert::SyncIndexShouldSucceed [GOOD] >> YdbTableBulkUpsert::Overload >> YdbYqlClient::BuildInfo [GOOD] >> YdbYqlClient::AlterTableAddIndexAsyncOp >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-43 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-44 >> ClientStatsCollector::ExternalMetricRegistryByRawPtr [GOOD] >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr >> TTableProfileTests::ExplicitPartitionsComplex [GOOD] >> TTableProfileTests::ExplicitPartitionsWrongKeyFormat |95.1%| [TA] $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TGRpcNewCoordinationClient::CreateAlter [GOOD] >> TGRpcNewCoordinationClient::NodeNotFound |95.1%| [TA] {RESULT} $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbTableBulkUpsert::NotNulls [GOOD] >> YdbTableBulkUpsert::Errors >> YdbIndexTable::MultiShardTableOneIndexIndexOverlapDataColumn [GOOD] >> YdbIndexTable::MultiShardTableOneIndexPkOverlap >> YdbOlapStore::ManyTables [GOOD] >> YdbOlapStore::LogPagingBetween >> YdbIndexTable::MultiShardTableOneIndex [GOOD] >> YdbIndexTable::MultiShardTableOneIndexDataColumn >> YdbYqlClient::TestReadWrongTable [GOOD] >> YdbYqlClient::TestReadTableNotNullBorder [GOOD] >> YdbYqlClient::TestReadTableNotNullBorder2 >> TYqlDateTimeTests::TimestampKey [GOOD] >> TYqlDateTimeTests::IntervalKey >> YdbYqlClient::TestDescribeTableWithShardStats [GOOD] >> YdbYqlClient::TestExplicitPartitioning >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-20 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-21 >> TGRpcAuthentication::NoDescribeRights [GOOD] >> TGRpcClientLowTest::BiStreamPing >> TGRpcNewCoordinationClient::SessionDescribeWatchReplace [GOOD] >> TGRpcNewCoordinationClient::SessionCreateUpdateDeleteSemaphore >> YdbYqlClient::CopyTables [GOOD] >> YdbYqlClient::CreateAndAltertTableWithCompactionPolicy >> YdbTableBulkUpsertOlap::ParquetImportBug [GOOD] >> YdbTableBulkUpsertOlap::ParquetImportBug_Datashard >> TTableProfileTests::DescribeTableWithPartitioningPolicy [GOOD] >> TTableProfileTests::DescribeTableOptions ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadWrongTable [GOOD] Test command err: 2025-06-24T21:11:55.852072Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626662362994823:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:55.852326Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004460/r3tmp/tmpgiQ1T7/pdisk_1.dat 2025-06-24T21:11:56.617585Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:56.632045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:56.634836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:56.644043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:56.683332Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 30173, node 1 2025-06-24T21:11:56.787807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:56.787831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:56.787837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:56.787935Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:56.862779Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13691 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:57.333064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:59.608367Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626679542864847:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:59.608486Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626679542864838:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:59.608629Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:59.613248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:59.637436Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626679542864852:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:11:59.702800Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626679542864921:2682] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:02.087585Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626690726967394:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:02.087637Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004460/r3tmp/tmpBfxzz0/pdisk_1.dat 2025-06-24T21:12:02.290705Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:02.316192Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:02.316290Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:02.325148Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:02.339838Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 8476, node 4 2025-06-24T21:12:02.419774Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:02.419797Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:02.419803Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:02.419913Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9239 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:02.752710Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:03.149884Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:05.349234Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626703611870255:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:05.349326Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:05.349604Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626703611870267:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:05.353142Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:05.381368Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519626703611870269:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:12:05.459940Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626703611870353:2667] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:07.380424Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626714109142714:2180];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004460/r3tmp/tmpFOIvgW/pdisk_1.dat 2025-06-24T21:12:07.551688Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:07.698618Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:07.721525Z node 7 :HIVE WARN: node_in ... , at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:11.847359Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626731289012997:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:11.847504Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:11.847838Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626731289013002:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:11.855980Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:11.893502Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626731289013004:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:12:11.954951Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626731289013093:2806] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:12.092353Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwhft2ac0te5sgrs477kpm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjJmOWFmMmUtYjIxZTc2NC01OWIwMTFkYi1lOTJkYTg3NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:12.230691Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwhg3ac8g2htx4yxms43kn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=N2M4NDYyNWYtMTQyNjM4YzgtMWI5NGUwNzgtNDZkOTUzY2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:12.366085Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626714109142714:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:12.366163Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:12.384533Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=7&id=MjJmOWFmMmUtYjIxZTc2NC01OWIwMTFkYi1lOTJkYTg3NQ==, ActorId: [7:7519626731289012810:2295], ActorState: ExecuteState, TraceId: 01jyhwhg77bxx59ssa1fse3d88, Create QueryResponse for error on request, msg: 2025-06-24T21:12:14.485309Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626743554035941:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:14.485378Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004460/r3tmp/tmpPe16iZ/pdisk_1.dat 2025-06-24T21:12:14.867890Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:14.904957Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:14.905046Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:14.910989Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28733, node 10 2025-06-24T21:12:15.123805Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:15.123830Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:15.123838Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:15.123982Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8303 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:12:15.526796Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:15.568067Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:15.659925Z node 10 :GRPC_SERVER INFO: grpc_request_proxy.cpp:592: Got grpc request# ListEndpointsRequest, traceId# 01jyhwhkhbcwvrwgmn9xjnk1sd, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:55588, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.995261s 2025-06-24T21:12:15.685326Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jyhwhkj57h09f0v5xfek4erp, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:55594, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:12:18.817703Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jyhwhpm1bzgz872ej2n736qh, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:55596, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:12:18.819656Z node 10 :TX_PROXY ERROR: read_table_impl.cpp:567: [ReadTable [10:7519626760733906141:2297] TxId# 281474976710658] Navigate request failed for table 'Root/NoTable' 2025-06-24T21:12:18.819755Z node 10 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [10:7519626760733906141:2297] TxId# 281474976710658] RESPONSE Status# ResolveError shard: 0 table: Root/NoTable 2025-06-24T21:12:18.820288Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7519626760733906140:2297] Finish grpc stream, status: 400070
: Error: Failed to resolve table Root/NoTable, code: 200400
: Error: Got ResolveError response from TxProxy
: Error: Failed to resolve table Root/NoTable 2025-06-24T21:12:18.835330Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b3a80] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.835656Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000006080] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.835888Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b3480] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.836102Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000052880] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.836312Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b4080] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.836523Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b4680] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.836716Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000095480] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.836904Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000094e80] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.837103Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d5080] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.837300Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000029a80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.837492Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000052e80] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.837685Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000050480] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.837867Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000027c80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.838085Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000028880] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.838298Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000002a80] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.838504Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d5c80] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-24T21:12:18.838695Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002dc80] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> TTableProfileTests::OverwriteExecutionPolicy [GOOD] >> TTableProfileTests::OverwritePartitioningPolicy >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-14 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-15 >> TGRpcNewClient::CreateAlterUpsertDrop [GOOD] >> TGRpcNewClient::InMemoryTables >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-32 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-33 >> YdbTableBulkUpsert::Timeout [GOOD] >> YdbTableBulkUpsert::ZeroRows >> YdbYqlClient::TestConstraintViolation [GOOD] >> ReadRows::KillTabletDuringRead [GOOD] >> KqpStreamLookup::ReadTableDuringSplit >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-38 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-39 >> KqpStreamLookup::ReadTableWithIndexDuringSplit >> YdbYqlClient::RetryOperationSync [GOOD] >> YdbYqlClient::RetryOperationLimitedDuration >> YdbYqlClient::AlterTableAddIndexAsyncOp [GOOD] >> YdbYqlClient::AlterTableAddIndexWithDataColumn >> YdbYqlClient::QueryLimits [GOOD] >> YdbYqlClient::QueryStats >> TGRpcNewCoordinationClient::NodeNotFound [GOOD] >> TGRpcNewCoordinationClient::MultipleSessionsSemaphores >> YdbOlapStore::LogLast50 [GOOD] >> YdbOlapStore::LogGrepNonExisting >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-3 >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_AllowOnlyDefaultGroup [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithIssuerVerification_ClientWithSameIssuer >> YdbS3Internal::BadRequests [GOOD] >> YdbScripting::BasicV0 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestConstraintViolation [GOOD] Test command err: 2025-06-24T21:11:59.165489Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626677964707813:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:59.165545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004454/r3tmp/tmpXoFwdb/pdisk_1.dat 2025-06-24T21:11:59.753851Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:59.768370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:59.768487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:59.777248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:59.853890Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 10294, node 1 2025-06-24T21:12:00.034423Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:00.034454Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:00.034461Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:00.034591Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:00.207201Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12433 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:00.484961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:03.165065Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626695144578006:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.165183Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.525650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:03.767522Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626695144578170:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.767620Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.768053Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626695144578175:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.776267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:03.809216Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626695144578177:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:12:03.876449Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626695144578247:2795] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:04.075744Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwh7xn5jsz1jx8f0nh55pc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTQwMmJmZWYtYjk5ZTdhOGEtNDJhZWJlMTItYTExZTQ2ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:04.196196Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626677964707813:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:04.196275Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:04.333321Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhwh88bat129hbz60hfzydk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTQwMmJmZWYtYjk5ZTdhOGEtNDJhZWJlMTItYTExZTQ2ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:06.251771Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626706045154982:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:06.257003Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004454/r3tmp/tmpP2hvsx/pdisk_1.dat 2025-06-24T21:12:06.579390Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:06.583044Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:06.583125Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:06.589942Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1921, node 4 2025-06-24T21:12:06.807758Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:06.807785Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:06.807792Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:06.807916Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10282 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:07.127947Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:07.287300Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:09.733188Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626718930057887:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:09.733256Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:09.733541Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor ... nding previous query completion proxyRequestId: 9 2025-06-24T21:12:15.480412Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=7&id=YzZjZTE3YjgtOGJmZjdkMTYtNTRhYjdmYzUtYjdkOTNhODE=, ActorId: [7:7519626746523375706:2295], ActorState: ExecuteState, TraceId: 01jyhwhkas3nze5y5ja4k6nh79, Reply query error, msg: Pending previous query completion proxyRequestId: 10 2025-06-24T21:12:15.491241Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=7&id=YzZjZTE3YjgtOGJmZjdkMTYtNTRhYjdmYzUtYjdkOTNhODE=, ActorId: [7:7519626746523375706:2295], ActorState: ExecuteState, TraceId: 01jyhwhkas3nze5y5ja4k6nh79, Reply query error, msg: Pending previous query completion proxyRequestId: 11 2025-06-24T21:12:15.491838Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=7&id=YzZjZTE3YjgtOGJmZjdkMTYtNTRhYjdmYzUtYjdkOTNhODE=, ActorId: [7:7519626746523375706:2295], ActorState: ExecuteState, TraceId: 01jyhwhkas3nze5y5ja4k6nh79, Reply query error, msg: Pending previous query completion proxyRequestId: 12 2025-06-24T21:12:15.495891Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626746523375742:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:12:15.557457Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626746523375846:2684] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:17.815456Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626753425199718:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:17.815530Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004454/r3tmp/tmpcB9U3R/pdisk_1.dat 2025-06-24T21:12:18.106604Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:18.154990Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 19398, node 10 2025-06-24T21:12:18.200562Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:18.200722Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:18.247429Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:18.301766Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:18.301789Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:18.301802Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:18.301930Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15339 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:18.713991Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:18.867841Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:21.636186Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626770605069914:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:21.636287Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:21.658209Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:21.831318Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626770605070074:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:21.831455Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:21.832153Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626770605070079:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:21.838785Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:21.871354Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626770605070081:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:12:21.932460Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626770605070151:2779] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:22.094817Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwhsj6dp0mf3f8b4k9z0n4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YTZjOGFiNDgtOWViOGRjZTgtMzg0N2ZjYmItZDIwYzJiMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:22.215849Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhwhsvf3fvkvh8q07t0h2qr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YTZjOGFiNDgtOWViOGRjZTgtMzg0N2ZjYmItZDIwYzJiMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:22.227422Z node 10 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-24T21:12:22.241567Z node 10 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:12:22.241756Z node 10 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037888 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:12:22.241997Z node 10 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [10:7519626774900037507:2294], Table: `Root/Test` ([72057594046644480:2:1]), SessionActorId: [10:7519626770605069887:2294]Got CONSTRAINT VIOLATION for table `Root/Test`. ShardID=72075186224037888, Sink=[10:7519626774900037507:2294].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:12:22.242500Z node 10 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [10:7519626774900037500:2294], SessionActorId: [10:7519626770605069887:2294], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `Root/Test`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[10:7519626770605069887:2294]. isRollback=0 2025-06-24T21:12:22.242766Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=10&id=YTZjOGFiNDgtOWViOGRjZTgtMzg0N2ZjYmItZDIwYzJiMjM=, ActorId: [10:7519626770605069887:2294], ActorState: ExecuteState, TraceId: 01jyhwhsvf3fvkvh8q07t0h2qr, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [10:7519626774900037501:2294] from: [10:7519626774900037500:2294] 2025-06-24T21:12:22.242867Z node 10 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [10:7519626774900037501:2294] TxId: 281474976710662. Ctx: { TraceId: 01jyhwhsvf3fvkvh8q07t0h2qr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YTZjOGFiNDgtOWViOGRjZTgtMzg0N2ZjYmItZDIwYzJiMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `Root/Test`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:12:22.243065Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=10&id=YTZjOGFiNDgtOWViOGRjZTgtMzg0N2ZjYmItZDIwYzJiMjM=, ActorId: [10:7519626770605069887:2294], ActorState: ExecuteState, TraceId: 01jyhwhsvf3fvkvh8q07t0h2qr, Create QueryResponse for error on request, msg: >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> ReadRows::KillTabletDuringRead [GOOD] Test command err: 2025-06-24T21:11:39.766437Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626592349028044:2163];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:39.767655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446b/r3tmp/tmpkdA4SX/pdisk_1.dat 2025-06-24T21:11:40.489373Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:40.524300Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:40.524398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:40.538545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22033, node 1 2025-06-24T21:11:40.775602Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:40.838684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:40.838708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:40.838720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:40.838851Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3894 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:41.415955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:3894 TClient is connected to server localhost:3894 2025-06-24T21:11:42.241644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:11:44.280793Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626613823865471:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:44.280931Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:44.281250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626613823865486:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:44.285876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:44.319585Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626613823865488:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T21:11:44.381381Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626613823865561:2716] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:44.762761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626592349028044:2163];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:44.762838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:3894 2025-06-24T21:11:44.920225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:11:50.243556Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:411:2371], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:11:50.243976Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:11:50.244098Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446b/r3tmp/tmpJ9eG5w/pdisk_1.dat 2025-06-24T21:11:50.932169Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:51.033628Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:51.033774Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:51.054770Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:51.390639Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [4:969:2768], Recipient [4:518:2429]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:11:51.390742Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:11:51.390792Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:11:51.390976Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [4:966:2766], Recipient [4:518:2429]: {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:11:51.391044Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:11:51.478594Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateSubDomain SubDomain { Name: "tenant" } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:11:51.478903Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /Root/tenant, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:11:51.479091Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: tenant, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T21:11:51.479377Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-24T21:11:51.479438Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715657:0 type: TxCreateSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-24T21:11:51.479630Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:11:51.479810Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:11:51.479887Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) 2025-06-24T21:11:51.479974Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:11:51.480038Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594 ... oot/2btm/00446b/r3tmp/tmpidTnse/pdisk_1.dat 2025-06-24T21:12:06.099546Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:06.122299Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:06.122996Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:06.130844Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30541, node 6 2025-06-24T21:12:06.235326Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:06.235350Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:06.235360Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:06.235484Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26130 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:06.527110Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:26130 2025-06-24T21:12:06.867744Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:11.524840Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519626731401954602:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:11.528339Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446b/r3tmp/tmpq4Z1n5/pdisk_1.dat 2025-06-24T21:12:11.875840Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:11.890085Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:11.890176Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:11.901498Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:11.917414Z node 9 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 9 Type# 268639257 TServer::EnableGrpc on GrpcPort 6106, node 9 2025-06-24T21:12:12.146440Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:12.146472Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:12.146481Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:12.146648Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11344 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:12.544962Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:12.568553Z node 9 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:21.589739Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:106:2152], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:21.589915Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:12:21.590097Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446b/r3tmp/tmpiL8OGI/pdisk_1.dat 2025-06-24T21:12:21.965486Z node 12 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 12 Type# 268639257 2025-06-24T21:12:21.967542Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:22.012341Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:22.021114Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:32:2079] 1750799537463543 != 1750799537463547 2025-06-24T21:12:22.070072Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:22.070252Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:22.082740Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:22.173514Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:22.508910Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:22.509051Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:702:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:22.509605Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:22.516217Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:22.576748Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:22.726331Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:706:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:12:22.762129Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:777:2622] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:22.905672Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwht791e58z4z07mmevv2r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=NTAyNjdkZGEtZDMyNWE3MmUtYzk1NDdiNWMtNmVlMjY4Njk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Stoping tablet id: 720751862240378882025-06-24T21:12:22.982151Z node 12 :RPC_REQUEST WARN: rpc_read_rows.cpp:757: TReadRowsRPC CancelReads, shardIds# [72075186224037888, ] 2025-06-24T21:12:22.982248Z node 12 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Failed to connect to shard 72075186224037888 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-44 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-45 >> YdbTableBulkUpsert::Overload [GOOD] >> YdbTableBulkUpsert::RetryOperationSync >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideClientCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_AuthNotRequired ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr [GOOD] Test command err: 2025-06-24T21:11:51.664179Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626641909822224:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:51.664235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004464/r3tmp/tmps98KJz/pdisk_1.dat 2025-06-24T21:11:52.390059Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:52.416370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:52.416464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:52.447408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24477, node 1 2025-06-24T21:11:52.755789Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:52.939828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:52.939876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:52.939884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:52.940018Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17583 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:53.264447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:55.484380Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626659089692400:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.484458Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626659089692408:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.484505Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:55.488271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:55.522302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626659089692414:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:11:55.620417Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626659089692489:2686] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:56.063608Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710660. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=1&id=MzZjNDE0MDEtYzIwZTJhZjEtYzI3OWFhMDAtM2M4ZTAwYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-06-24T21:11:57.682192Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626670012596377:2083];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004464/r3tmp/tmpQlYeZG/pdisk_1.dat 2025-06-24T21:11:57.811298Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:57.973381Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:57.973466Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:57.983804Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:58.002036Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15120, node 4 2025-06-24T21:11:58.091399Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:58.091422Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:58.091430Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:58.091560Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28548 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:58.335861Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:58.687553Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:00.759141Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626682897499229:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:00.759249Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:00.759685Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626682897499241:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:00.764253Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:00.818672Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519626682897499243:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:12:00.884859Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626682897499322:2669] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:03.171502Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626695382266774:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:03.172763Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004464/r3tmp/tmpd2HQwg/pdisk_1.dat 2025-06-24T21:12:03.437140Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21 ... aceId: 01jyhwhdedd2s33hrgas15rjte, Create QueryResponse for error on request, msg: 2025-06-24T21:12:11.343606Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626730667418245:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:11.343652Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004464/r3tmp/tmpiOgXKG/pdisk_1.dat 2025-06-24T21:12:11.990842Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:12.003184Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:12.003295Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:12.016213Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25079, node 10 2025-06-24T21:12:12.259825Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:12.259851Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:12.259859Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:12.260010Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:12.379249Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5147 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:12.649330Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:16.275044Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626752142255783:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:16.275392Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:16.275482Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626752142255795:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:16.280069Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:16.322896Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626752142255797:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:12:16.347397Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626730667418245:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:16.347501Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:16.420829Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626752142255876:2686] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004464/r3tmp/tmpwR9jAn/pdisk_1.dat 2025-06-24T21:12:18.834024Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626761398297171:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:18.892098Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:19.090117Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:19.097547Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:19.097667Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:19.122001Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4943, node 13 2025-06-24T21:12:19.416052Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:19.416081Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:19.416095Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:19.416367Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:19.715301Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62878 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:19.993018Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:23.401545Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626782873134704:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:23.401667Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:23.402317Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626782873134716:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:23.412179Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:23.445145Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626782873134718:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:12:23.541211Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626782873134787:2677] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:23.615331Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626761398297171:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:23.615412Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> YdbYqlClient::TestReadTableNotNullBorder2 [GOOD] >> YdbYqlClient::TestReadTableSnapshot >> TGRpcClientLowTest::BiStreamPing [GOOD] >> TGRpcClientLowTest::BiStreamCancelled >> TTopicWriterTests::TestEnterMessage_OnlyDelimiters [GOOD] >> TTopicWriterTests::TestEnterMessage_SomeBinaryData [GOOD] >> YdbTableBulkUpsert::Errors [GOOD] >> YdbTableBulkUpsert::Limits >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimited_With_Two_Delimiters_In_A_Row [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_Invalid_Encode >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_Invalid_Encode [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform [GOOD] >> TTopicWriterTests::TestEnterMessage_1KiB_No_Delimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_Custom_Delimiter_Delimited [GOOD] >> TTableProfileTests::ExplicitPartitionsWrongKeyFormat [GOOD] >> TTableProfileTests::ExplicitPartitionsWrongKeyType |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_SomeBinaryData [GOOD] >> TGRpcNewCoordinationClient::SessionCreateUpdateDeleteSemaphore [GOOD] >> TYqlDateTimeTests::IntervalKey [GOOD] >> TYqlDateTimeTests::SimpleOperations >> YdbLogStore::LogStore [GOOD] >> YdbLogStore::LogStoreNegative |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimited_With_Two_Delimiters_In_A_Row [GOOD] >> TTableProfileTests::UseTableProfilePresetViaSdk [GOOD] >> TTableProfileTests::WrongTableProfile |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_Custom_Delimiter_Delimited [GOOD] >> YdbTableBulkUpsertOlap::UpsertCSV [GOOD] >> YdbTableBulkUpsertOlap::UpsertCSV_DataShard >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-21 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-22 >> YdbYqlClient::CreateAndAltertTableWithCompactionPolicy [GOOD] >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedPrivatekey [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesExpiredCert >> TTopicReaderTests::TestRun_ReadOneMessage >> YdbTableBulkUpsertOlap::ParquetImportBug_Datashard [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowBatch_DataShard ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewCoordinationClient::SessionCreateUpdateDeleteSemaphore [GOOD] Test command err: 2025-06-24T21:11:59.916522Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626680195809458:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:59.936685Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00444a/r3tmp/tmpAHkjtY/pdisk_1.dat 2025-06-24T21:12:00.433135Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:00.461232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:00.461370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:00.486022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7708, node 1 2025-06-24T21:12:00.701880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:00.701902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:00.701907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:00.701996Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:00.967457Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12822 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:01.237127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:01.420878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:12:05.853235Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626704327821665:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:05.885767Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00444a/r3tmp/tmp8HoRXA/pdisk_1.dat 2025-06-24T21:12:06.153319Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:06.185765Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:06.185846Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:06.196608Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 3822, node 4 2025-06-24T21:12:06.197589Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:06.315193Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:06.315214Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:06.315222Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:06.315353Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18044 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:06.656466Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:06.750031Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:12:06.885939Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:11.035438Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626731553671529:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:11.035523Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00444a/r3tmp/tmpa2bBeR/pdisk_1.dat 2025-06-24T21:12:11.370803Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:11.390718Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:11.390800Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:11.397434Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:11.414485Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 5477, node 7 2025-06-24T21:12:11.521316Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:11.521341Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:11.521350Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:11.521487Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16844 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:11.847960Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:11.953123Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:12:12.044784Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:16.220622Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626749159676960:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:16.220676Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00444a/r3tmp/tmpbCB4lG/pdisk_1.dat 2025-06-24T21:12:16.612553Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:16.612640Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:16.616454Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:16.627689Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6768, node 10 2025-06-24T21:12:16.651188Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 2025-06-24T21:12:16.715777Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:16.715804Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:16.715814Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:16.715947Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4309 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:17.069671Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:17.217313Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:12:17.247324Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:21.905817Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626771038297439:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:21.905890Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00444a/r3tmp/tmpzmaMeX/pdisk_1.dat 2025-06-24T21:12:22.266157Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:22.292340Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:22.292437Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:22.308929Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:22.344427Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 13370, node 13 2025-06-24T21:12:22.492101Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:22.492126Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:22.492136Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:22.492299Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29612 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:22.870353Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:22.970183Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:12:23.004379Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TGRpcNewClient::InMemoryTables [GOOD] >> TGRpcNewCoordinationClient::MultipleSessionsSemaphores [GOOD] >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-15 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-16 >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NewlineDelimited >> YdbTableBulkUpsert::ZeroRows [GOOD] >> TTopicWriterTests::TestEnterMessage_ZeroSymbol_Delimited [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_NewlineDelimited [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-33 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-34 >> TTopicWriterTests::TestTopicWriterParams_No_Delimiter [GOOD] >> TTopicWriterTests::TestTopicWriterParams_InvalidDelimiter [GOOD] >> YdbYqlClient::AlterTableAddIndexWithDataColumn [GOOD] >> YdbYqlClient::CheckDefaultTableSettings1 >> TTopicWriterTests::TestEnterMessage_EmptyInput [GOOD] >> TTopicWriterTests::TestEnterMessage_No_Base64_Transform [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_NewlineDelimited [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-39 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-40 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewClient::InMemoryTables [GOOD] Test command err: 2025-06-24T21:11:58.808624Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626672818063270:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:58.813059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445a/r3tmp/tmpRai9FU/pdisk_1.dat 2025-06-24T21:11:59.347875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:59.347968Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:59.361478Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:59.383600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26377, node 1 2025-06-24T21:11:59.450951Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:59.650635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:59.653377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:59.653418Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:59.653566Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:59.805149Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:00.096965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445a/r3tmp/tmpVa7JAP/pdisk_1.dat 2025-06-24T21:12:04.994936Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:05.172867Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:05.210619Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:05.210703Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:05.221678Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3519, node 4 2025-06-24T21:12:05.371224Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:05.371247Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:05.371254Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:05.371434Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:05.703339Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19620 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:05.768735Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:10.263460Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626725317883516:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:10.263521Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445a/r3tmp/tmpJjHC0j/pdisk_1.dat 2025-06-24T21:12:10.652120Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:10.711990Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:10.712083Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:10.720441Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65305, node 7 2025-06-24T21:12:10.941242Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:10.941268Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:10.941276Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:10.941427Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29950 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:11.284718Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:11.383292Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:14.225582Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:14.433776Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626742497753848:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:14.433869Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626742497753860:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:14.433940Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:14.438277Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/s ... s: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:17.042981Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626753329039692:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:17.047908Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445a/r3tmp/tmp8jWd6S/pdisk_1.dat 2025-06-24T21:12:17.292888Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:17.336925Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 23628, node 10 2025-06-24T21:12:17.407736Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:17.407903Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:17.493052Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:17.517406Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:17.517427Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:17.517436Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:17.517586Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30721 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:17.922199Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:18.103051Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:20.913673Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:21.063853Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:12:21.111869Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626770508910171:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:21.111941Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:21.112283Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626770508910183:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:21.116567Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:21.140745Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626770508910185:2321], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T21:12:21.235620Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626770508910252:2878] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:21.332620Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwhrvp6a5205x8yzx8wgjs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MmUwMjgzYWMtZDllOTFiNi1hZmQzOGMyNC0xOTk2MWExMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:21.473918Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-06-24T21:12:23.247209Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626780126452169:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:23.267426Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445a/r3tmp/tmpkXPe1P/pdisk_1.dat 2025-06-24T21:12:23.596333Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:23.620131Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:23.620244Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:23.628108Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28405, node 13 2025-06-24T21:12:23.750068Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:23.750088Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:23.750098Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:23.750243Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8480 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:24.228113Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:24.309651Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:27.169947Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:27.377875Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:12:27.513583Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-10 |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestTopicWriterParams_InvalidDelimiter [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_No_Base64_Transform [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::ZeroRows [GOOD] Test command err: 2025-06-24T21:11:46.278601Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626623399557553:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:46.280791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004466/r3tmp/tmpQbtkr8/pdisk_1.dat 2025-06-24T21:11:46.906175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:46.906296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:46.922967Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:46.942273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:47.006402Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25244, node 1 2025-06-24T21:11:47.209818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:47.209855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:47.209869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:47.210042Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:47.263289Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18217 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:47.731979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:50.114705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) CLIENT_DEADLINE_EXCEEDED 2025-06-24T21:11:50.822649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626640579429463:2398], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:50.822785Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:50.823054Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626640579429476:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:50.828214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:50.851644Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626640579429478:2402], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:11:50.956114Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626640579429566:4153] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:51.277278Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626623399557553:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:51.277337Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:51.617271Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwgv932emq148bczfcert4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWE0Mzg5MDAtN2Y3YzU1MjYtNTk3YjczM2EtYmVhZjE5Mjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:11:53.721060Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626650409235076:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:53.721123Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004466/r3tmp/tmp4wIxtJ/pdisk_1.dat 2025-06-24T21:11:53.938311Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:53.962795Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:53.962876Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:53.969926Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:53.981160Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 20035, node 4 2025-06-24T21:11:54.060345Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:54.060366Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:54.060376Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:54.060506Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19188 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:54.353163Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:54.759275Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:57.040990Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:58.947748Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626674949224575:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:58.957952Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004466/r3tmp/tmpcMPQxZ/pdisk_1.dat 2025-06-24T21:11:59.129935Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:59.149923Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:59.150015Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:59.156638Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcP ... essingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:59.577167Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:00.003292Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:02.164410Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:03.944039Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626674949224575:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:03.944100Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:14.110624Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:12:14.110656Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:16.325280Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626750408631998:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:16.325334Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004466/r3tmp/tmph3I2M1/pdisk_1.dat 2025-06-24T21:12:16.772247Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:16.826047Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:16.826150Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:16.843612Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20263, node 10 2025-06-24T21:12:17.091879Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:17.091910Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:17.091919Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:17.092084Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:17.373138Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29646 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:17.603372Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:17.632065Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:12:20.645906Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 1 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 2 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 2025-06-24T21:12:21.325552Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626750408631998:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:21.325692Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 4 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 8 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 16 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 32 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 64 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 128 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 256 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 512 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 1024 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 2048 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 4096 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 8192 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 16384 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 32768 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 65536 usec 2025-06-24T21:12:23.835320Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626782285219088:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:23.839426Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004466/r3tmp/tmpRTcRgf/pdisk_1.dat 2025-06-24T21:12:24.245417Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:24.263869Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:24.263969Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:24.274067Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16097, node 13 2025-06-24T21:12:24.433942Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:24.433969Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:24.433982Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:24.434173Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31223 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:24.817083Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:24.889169Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:27.822541Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TTableProfileTests::DescribeTableOptions [GOOD] >> YdbScripting::BasicV0 [GOOD] >> YdbScripting::BasicV1 >> TTableProfileTests::OverwritePartitioningPolicy [GOOD] >> TTableProfileTests::OverwriteStoragePolicy >> YdbYqlClient::TestReadTableSnapshot [GOOD] >> TTopicWriterTests::TestTopicWriterParams_Format_NewlineDelimited [GOOD] >> TTopicWriterTests::TestTopicWriterParams_Format_Concatenated [GOOD] >> TGRpcClientLowTest::BiStreamCancelled [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-45 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-46 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TTableProfileTests::DescribeTableOptions [GOOD] Test command err: 2025-06-24T21:11:58.505583Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626673042761219:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:58.505721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445c/r3tmp/tmpgqQqkR/pdisk_1.dat 2025-06-24T21:11:58.968332Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:58.970463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:58.970554Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:58.995368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5681, node 1 2025-06-24T21:11:59.183901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:59.183922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:59.183931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:59.184026Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15339 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:11:59.506387Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:59.687486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node Register node result Status { Code: UNAUTHORIZED Reason: "Cannot authorize node. Access denied" } 2025-06-24T21:12:03.878219Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626695031566602:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:03.878302Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445c/r3tmp/tmprKzTYU/pdisk_1.dat 2025-06-24T21:12:04.129520Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:04.149430Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:04.149501Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:04.156693Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11200, node 4 2025-06-24T21:12:04.331300Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:04.331325Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:04.331333Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:04.331485Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26686 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:04.646488Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node 2025-06-24T21:12:04.923368Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1750806724112881 Nodes { NodeId: 1024 Host: "localhost" Port: 26717 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1750806724112881 } Nodes { NodeId: 4 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 5 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" } } Nodes { NodeId: 6 Host: "::1" Port: 12003 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 51 RoomNum: 3 RackNum: 3 BodyNum: 3 DataCenter: "3" Module: "3" Rack: "3" Unit: "3" } } 2025-06-24T21:12:08.886070Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626715181664074:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:08.978967Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445c/r3tmp/tmpeLbMES/pdisk_1.dat 2025-06-24T21:12:09.223674Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:09.272641Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:09.272729Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:09.278453Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27440, node 7 2025-06-24T21:12:09.576118Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:09.576137Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:09.576146Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:09.576291Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:09.863318Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5163 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:09.936949Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1750806729212994 Nodes { NodeId: 1024 Host: "localhost" Port: 30187 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1750806729212994 } Nodes { NodeId: 7 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 8 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Mo ... 8446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:15.992893Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:9196 2025-06-24T21:12:16.369282Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:12:16.389424Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:16.916334Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519626752702743648:2152];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:17.020202Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:17.020285Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:17.036025Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-24T21:12:17.040875Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:17.052270Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; TClient is connected to server localhost:9196 2025-06-24T21:12:17.917327Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:19.142352Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:19.791884Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626741933863177:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:19.791991Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:9196 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750799539470 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "Data" Type: "String" TypeId: 4097 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "KeyHash" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name... (TRUNCATED) 2025-06-24T21:12:20.316540Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-24T21:12:20.318326Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:22.793657Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626777041530557:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:22.793716Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445c/r3tmp/tmp6AQhVb/pdisk_1.dat 2025-06-24T21:12:23.052439Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:23.068202Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:23.068301Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:23.079054Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:23.094682Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 22948, node 13 2025-06-24T21:12:23.284127Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:23.284153Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:23.284162Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:23.284335Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20254 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:12:23.807953Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:23.869171Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:20254 2025-06-24T21:12:24.415078Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:12:24.471632Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:12:25.505484Z node 15 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[15:7519626788804919749:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:25.505791Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:25.635254Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:25.635367Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:25.652045Z node 13 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 15 Cookie 15 2025-06-24T21:12:25.653118Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20254 2025-06-24T21:12:26.232987Z node 13 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-06-24T21:12:26.239740Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:26.499854Z node 15 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:26.510445Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:27.523120Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:28.527727Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestTopicWriterParams_Format_Concatenated [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadTableSnapshot [GOOD] Test command err: 2025-06-24T21:12:07.887483Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626714235711519:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:07.887536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00441e/r3tmp/tmpeMEktz/pdisk_1.dat 2025-06-24T21:12:08.628678Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:08.640997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:08.641098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:08.704066Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:08.735878Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 13686, node 1 2025-06-24T21:12:08.935321Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:09.013005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:09.013027Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:09.013033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:09.013131Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3023 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:09.566917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:12.291793Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626735710549040:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.291894Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.641332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:12.833415Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626735710549208:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.833483Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.833724Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626735710549213:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.837818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:12.864345Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626735710549215:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:12:12.887902Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626714235711519:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:12.887962Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:12.944390Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626735710549288:2800] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:13.143760Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwhgs0fn3peybbwz24tz58, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWJkZWZkOTAtMTc3Y2YyNzktZDI3NjMzY2EtNjFiZmRhNzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:13.407598Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhwhh4d9e2fgcm3vjmys3sk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWJkZWZkOTAtMTc3Y2YyNzktZDI3NjMzY2EtNjFiZmRhNzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:13.434726Z node 1 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [1:7519626740005516670:2330] TxId# 281474976710663] RESPONSE Status# ResolveError shard: 0 table: Root/Test 2025-06-24T21:12:13.444192Z node 1 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [1:7519626740005516674:2331] TxId# 281474976710664] RESPONSE Status# ResolveError shard: 0 table: Root/Test 2025-06-24T21:12:15.323975Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626747749170644:2153];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00441e/r3tmp/tmpvtm8s2/pdisk_1.dat 2025-06-24T21:12:15.384650Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:15.568258Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:15.568332Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:15.573217Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:15.594670Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14715, node 4 2025-06-24T21:12:15.775743Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:15.775765Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:15.775771Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:15.775889Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21273 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:16.044561Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:16.324598Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:18.556550Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626760634073451:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21 ... :24.930177Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626786900693978:2795] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:25.079673Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwhwfbarf8hf045q79b4gj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjY4YmVjNmMtMmUyNmEzODgtYmVlMDhiZTItYjdhMTdhNDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:25.268351Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwhwrz5w9dgh6bg3q1m9e4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjY4YmVjNmMtMmUyNmEzODgtYmVlMDhiZTItYjdhMTdhNDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:25.334061Z node 7 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:12:27.114398Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626797357375049:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:27.114445Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00441e/r3tmp/tmpchGOUa/pdisk_1.dat 2025-06-24T21:12:27.422412Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:27.447120Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:27.450221Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:27.456691Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64833, node 10 2025-06-24T21:12:27.687917Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:27.687949Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:27.687957Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:27.688141Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19135 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:28.121856Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:28.191379Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:28.221537Z node 10 :GRPC_SERVER INFO: grpc_request_proxy.cpp:592: Got grpc request# ListEndpointsRequest, traceId# 01jyhwhzsxftfp5w5zm4pjgk9w, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50826, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.995651s 2025-06-24T21:12:28.232544Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jyhwhzt875xt806ebngwktjh, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50842, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:12:30.811060Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ExecuteSchemeQueryRequest, traceId# 01jyhwj2at5049n00meavyp8ts, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50858, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:12:30.819361Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626810242277972:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:30.819483Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:30.835274Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:30.841132Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:30.841253Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:30.841264Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:30.841309Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:30.959623Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:30.959628Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:30.959756Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:30.959756Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:31.055284Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jyhwj2jf8hryxma0naghsqet, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50860, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:12:31.074471Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [10:7519626814537245428:2306] Adding quota request to queue ShardId: 0, TxId: 281474976710659 2025-06-24T21:12:31.074525Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [10:7519626814537245428:2306] Assign stream quota to Shard 0, Quota 5, TxId 281474976710659 Reserved: 5 of 25, Queued: 0 2025-06-24T21:12:31.075700Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [10:7519626814537245428:2306] got stream part, size: 35, RU required: 128 rate limiter absent 2025-06-24T21:12:31.076081Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [10:7519626814537245428:2306] Starting inactivity timer for 600.000000s with tag 3 2025-06-24T21:12:31.076154Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7519626814537245428:2306] Finish grpc stream, status: 400000 2025-06-24T21:12:31.115689Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f8a80] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.115954Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b2880] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.116144Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f9080] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.116323Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b2280] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.116525Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f8480] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.116700Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f7e80] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.116899Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a3280] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.117090Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b1680] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.117260Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f5a80] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.117433Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b3a80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.117615Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b3480] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.117858Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f6680] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.117967Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f6c80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.118113Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f7280] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.118181Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f7880] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.118305Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f6080] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-24T21:12:31.118360Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b2e80] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> YdbTableBulkUpsert::RetryOperationSync [GOOD] >> YdbTableBulkUpsert::RetryOperation ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcClientLowTest::BiStreamCancelled [GOOD] Test command err: 2025-06-24T21:12:02.035348Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626692354793607:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:02.035419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443f/r3tmp/tmpbiidXk/pdisk_1.dat 2025-06-24T21:12:03.051409Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:03.121390Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 22771, node 1 2025-06-24T21:12:03.204897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:03.204992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:03.290454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:03.441030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:03.441056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:03.441064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:03.441181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16999 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:04.056446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:16999 TClient is connected to server localhost:16999 2025-06-24T21:12:04.758152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:12:06.793165Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626709534663754:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:06.793262Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:06.793510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626709534663759:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:06.803301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:06.861822Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626709534663768:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:12:06.922399Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626709534663851:2712] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:07.038885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626692354793607:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:07.038959Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:16999 TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750799524124 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "\n\016\010\001\020\200\204\002\032\004user \003" EffectiveACL: "\n\016\010\001\020\200\204\002\032\004user \003" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 1 EffectiveACLVersion: 1 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: ".metadata" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976715660 CreateStep: 1750799526875 ParentPathId: 1 PathState: EPathStateCreate Owner: "met... (TRUNCATED) 2025-06-24T21:12:09.485666Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626722703822738:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:09.485727Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443f/r3tmp/tmp2LqtTq/pdisk_1.dat 2025-06-24T21:12:09.859842Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:09.884126Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:09.884210Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:09.892208Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:09.908266Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 14535, node 4 2025-06-24T21:12:09.967541Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:09.967568Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:09.967576Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:09.967708Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3137 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:10.345285Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:10.571277Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3137 2025-06-24T21:12:14.881942Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626743250017005:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:14.989044Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443f/r3tmp/tmp1OaTvI/pdisk_1.dat 2025-06-24T21:12:15.290551Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:15.315958Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21 ... ER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:15.562268Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:15.562408Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26786 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:15.932891Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:15.987329Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26786 TClient is connected to server localhost:26786 2025-06-24T21:12:16.676300Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:12:18.718955Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626760429887220:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:18.719059Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626760429887255:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:18.719115Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:18.725971Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:18.774629Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626760429887257:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T21:12:18.852755Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626760429887326:2694] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } TClient is connected to server localhost:26786 TClient::Ls request: Root 2025-06-24T21:12:19.308498Z node 7 :TX_PROXY ERROR: describe.cpp:395: Access denied for user with access DescribeSchema to path Root TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 12 ErrorReason: "Access denied" test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443f/r3tmp/tmpd51wVU/pdisk_1.dat 2025-06-24T21:12:21.642695Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:21.771872Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:21.780263Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:21.780364Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:21.790569Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22366, node 10 2025-06-24T21:12:22.074852Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:22.074890Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:22.074900Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:22.075049Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:22.418592Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10795 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:22.525869Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:22.673936Z node 10 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket some****oken (BB86510A): Could not find correct token validator 2025-06-24T21:12:22.674081Z node 10 :GRPC_SERVER ERROR: ydb_dummy.cpp:94: Received TEvRefreshTokenResponse, Authenticated = 0 2025-06-24T21:12:27.229770Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626797920198787:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:27.229853Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443f/r3tmp/tmpRgnVsj/pdisk_1.dat 2025-06-24T21:12:27.385126Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:27.417918Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:27.418020Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:27.424759Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26621, node 13 2025-06-24T21:12:27.535858Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:27.535894Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:27.535903Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:27.536042Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7794 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:27.936988Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:28.238224Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_AuthNotRequired [GOOD] >> YdbLogStore::LogStoreNegative [GOOD] >> YdbLogStore::Dirs >> YdbTableBulkUpsert::Limits [GOOD] >> YdbTableBulkUpsert::DecimalPK >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback [GOOD] >> YdbTableBulkUpsertOlap::UpsertCSV_DataShard [GOOD] >> YdbTableBulkUpsertOlap::UpsertMixed >> TRegisterNodeOverDiscoveryService::ServerWithIssuerVerification_ClientWithSameIssuer [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithOutCertVerification_ClientProvidesExpiredCert >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-22 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-23 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_AuthNotRequired [GOOD] Test command err: 2025-06-24T21:11:40.512963Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626596363360073:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:40.515801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004469/r3tmp/tmp9fMxVa/pdisk_1.dat 2025-06-24T21:11:41.114039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:41.114139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:41.124964Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:41.148875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4825, node 1 2025-06-24T21:11:41.511917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:41.511944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:41.511956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:41.512088Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:41.527806Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3089 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:42.123437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:42.257756Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 56B7EB319E17F036E56FB3370B81098354C1672AC75420430D80EA67854DF178 (ipv6:[::1]:48830) has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-24T21:11:42.258727Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 56B7EB319E17F036E56FB3370B81098354C1672AC75420430D80EA67854DF178: Cannot create token from certificate. Client certificate failed verification 2025-06-24T21:11:42.387481Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:48840) has now valid token of root@builtin 2025-06-24T21:11:42.539751Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:11:42.539800Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:11:42.539813Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:11:42.539856Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:11:46.155473Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626622884040200:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:46.155537Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004469/r3tmp/tmp0nB9S9/pdisk_1.dat 2025-06-24T21:11:46.429128Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:46.507898Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:46.507974Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 7059, node 4 2025-06-24T21:11:46.574221Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:46.710470Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:46.710486Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:46.710492Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:46.710581Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:47.034460Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:47.189887Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 56B7EB319E17F036E56FB3370B81098354C1672AC75420430D80EA67854DF178 (ipv6:[::1]:43580) has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-24T21:11:47.190399Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 56B7EB319E17F036E56FB3370B81098354C1672AC75420430D80EA67854DF178: Cannot create token from certificate. Client certificate failed verification 2025-06-24T21:11:47.191809Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:47.304112Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:43592) has now valid token of root@builtin 2025-06-24T21:11:47.413913Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:11:47.413939Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:11:47.413953Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:11:47.413985Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:11:51.152648Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626645561617834:2117];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:51.184320Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004469/r3tmp/tmpb2mMzA/pdisk_1.dat 2025-06-24T21:11:51.419486Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23594, node 7 2025-06-24T21:11:51.480511Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 2025-06-24T21:11:51.567810Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:51.567957Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:51.593873Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:51.618845Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:51.618870Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:51.618876Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:51.619018Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23660 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ... atileState: Unknown -> Disconnected 2025-06-24T21:12:15.009389Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:15.014183Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15097, node 19 2025-06-24T21:12:15.237494Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:15.237521Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:15.237530Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:15.237689Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:15.586347Z node 19 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22910 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:15.700978Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:15.910140Z node 19 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:39024) has now valid token of root@builtin 2025-06-24T21:12:16.017708Z node 19 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:12:16.017744Z node 19 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:16.017751Z node 19 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:12:16.017783Z node 19 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:12:20.732992Z node 22 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[22:7519626768331289783:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:20.734223Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004469/r3tmp/tmpMbK2tr/pdisk_1.dat 2025-06-24T21:12:21.063635Z node 22 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:21.089265Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:21.089368Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:21.097748Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27341, node 22 2025-06-24T21:12:21.269483Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:21.269507Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:21.269515Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:21.269663Z node 22 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:21.672471Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:21.751853Z node 22 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:21.857300Z node 22 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:41370) has now valid token of root@builtin 2025-06-24T21:12:21.927693Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:12:21.927726Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:21.927737Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:12:21.927798Z node 22 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:12:27.426365Z node 25 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[25:7519626800054317445:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:27.426432Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004469/r3tmp/tmpzcQAfs/pdisk_1.dat 2025-06-24T21:12:27.673942Z node 25 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:27.718783Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:27.718888Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:27.729078Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14859, node 25 2025-06-24T21:12:27.916104Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:27.916127Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:27.916135Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:27.916294Z node 25 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24242 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:28.369641Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:28.450628Z node 25 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:28.504285Z node 25 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket F6187DA78CF75775C44C821A973DC8BDC0A0F864468CE1F734CDA2D9FA4F8C0A (ipv6:[::1]:60426) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-24T21:12:28.679668Z node 25 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (717F937C): Unknown token 2025-06-24T21:12:28.772806Z node 25 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 32033D92EEDA6855BC778B3D8EC48E05FEBCFE658A34C66CACDAD8231A0DD80E (ipv6:[::1]:60454) has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-24T21:12:28.773411Z node 25 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 32033D92EEDA6855BC778B3D8EC48E05FEBCFE658A34C66CACDAD8231A0DD80E: Cannot create token from certificate. Client certificate failed verification >> YdbYqlClient::CheckDefaultTableSettings1 [GOOD] >> YdbYqlClient::CheckDefaultTableSettings2 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback [GOOD] Test command err: 2025-06-24T21:12:07.308468Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626713881907726:2217];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:07.308647Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004421/r3tmp/tmp6kwR7x/pdisk_1.dat 2025-06-24T21:12:08.018682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:08.018780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:08.022928Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:08.041775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15020, node 1 2025-06-24T21:12:08.315351Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:08.443836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:08.443882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:08.443891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:08.443991Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23763 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:08.926270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:09.153639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:12:09.322060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:12:09.526268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropKesus, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp:186) 2025-06-24T21:12:09.546826Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T21:12:09.562105Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-24T21:12:13.513703Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626740045026548:2146];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004421/r3tmp/tmp4EICTM/pdisk_1.dat 2025-06-24T21:12:13.610489Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:13.728801Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:13.765295Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 25497, node 4 2025-06-24T21:12:13.822865Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:13.822951Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:13.834821Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:13.834844Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:13.834852Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:13.834989Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:13.850683Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29261 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:14.142338Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:14.225276Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:12:14.356947Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp:19) 2025-06-24T21:12:14.434461Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp:19) 2025-06-24T21:12:14.514350Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp:19) 2025-06-24T21:12:14.518869Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:18.982538Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626761271239474:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:18.982675Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004421/r3tmp/tmp98wp7f/pdisk_1.dat 2025-06-24T21:12:19.460254Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:19.488684Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:19.488775Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:19.497002Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21768, node 7 2025-06-24T21:12:19.727945Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:19.727965Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:19.727973Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:19.728133Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:20.015907Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10387 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:20.080744Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:24.607510Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626786425279618:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:24.607649Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004421/r3tmp/tmpEnRsdW/pdisk_1.dat 2025-06-24T21:12:24.870223Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23216, node 10 2025-06-24T21:12:24.956566Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:24.956717Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:25.068470Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:25.192983Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:25.193013Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:25.193022Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:25.193192Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4601 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:25.579775Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:25.635852Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:25.662471Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:12:29.764349Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626805891342165:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:29.764397Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004421/r3tmp/tmpgvivdy/pdisk_1.dat 2025-06-24T21:12:30.092728Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:30.166028Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:30.166131Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 12187, node 13 2025-06-24T21:12:30.199564Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:30.289657Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:30.289686Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:30.289696Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:30.289887Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:30.717323Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:30.735589Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:12:30.848132Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:30.869479Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) >> YdbTableBulkUpsertOlap::UpsertArrowBatch_DataShard [GOOD] >> TTableProfileTests::ExplicitPartitionsWrongKeyType [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-16 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-17 >> TYqlDateTimeTests::SimpleOperations [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter [GOOD] Test command err: 2025-06-24T21:12:01.580406Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626686134242784:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:01.580466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004441/r3tmp/tmp0gUz0k/pdisk_1.dat 2025-06-24T21:12:02.213838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:02.213979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:02.239294Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:02.265305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5573, node 1 2025-06-24T21:12:02.596222Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:02.638924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:02.638947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:02.638960Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:02.639105Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25722 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:03.067030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:03.107971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:12:03.195782Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:542: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:57150 Call 2025-06-24T21:12:03.223500Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:542: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:57152 2025-06-24T21:12:05.597942Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:542: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:57154 Call Call 2025-06-24T21:12:05.681391Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:578: Skip check permission connect db, user is a admin, database: /Root, user: root@builtin, from ip: ipv6:[::1]:57182 2025-06-24T21:12:05.700605Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:578: Skip check permission connect db, user is a admin, database: /Root, user: root@builtin, from ip: ipv6:[::1]:57196 2025-06-24T21:12:05.702192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:12:07.594584Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626712182054991:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:07.594620Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004441/r3tmp/tmp5MC1Ee/pdisk_1.dat 2025-06-24T21:12:07.777844Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:07.790112Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:07.790192Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:07.796794Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:07.826041Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 11667, node 4 2025-06-24T21:12:08.011872Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:08.011901Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:08.011909Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:08.012055Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11007 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:08.372451Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:08.603631Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:13.453553Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626736944492182:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:13.454405Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004441/r3tmp/tmpL4UgEa/pdisk_1.dat 2025-06-24T21:12:13.702142Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:13.751840Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:13.751946Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:13.763016Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9170, node 7 2025-06-24T21:12:13.784873Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 2025-06-24T21:12:13.919906Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:13.919930Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:13.919939Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:13.920083Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19086 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:14.413957Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operat ... on still in progress, tx: 281474976715688, publications: 2, subscribers: 1 2025-06-24T21:12:20.250913Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715688, subscribers: 1 2025-06-24T21:12:20.287611Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037893 not found 2025-06-24T21:12:20.292786Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:12:22.441918Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626775484953357:2159];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004441/r3tmp/tmpqUv2kF/pdisk_1.dat 2025-06-24T21:12:22.615656Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:22.767323Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:22.767416Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:22.776946Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:22.798654Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14531, node 10 2025-06-24T21:12:22.989093Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:22.989124Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:22.989133Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:22.989280Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3706 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:23.429730Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:23.441127Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3706 2025-06-24T21:12:26.391008Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TClient is connected to server localhost:3706 TClient::Ls request: Root/Test TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Test" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799546524 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Test" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) 2025-06-24T21:12:26.853636Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) TClient is connected to server localhost:3706 TClient::Ls request: Root/Test TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Test" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799546524 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Test" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) 2025-06-24T21:12:29.165493Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626804899488776:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:29.165582Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004441/r3tmp/tmp7IRkfh/pdisk_1.dat 2025-06-24T21:12:29.466517Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:29.502249Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:29.502391Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:29.507973Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23212, node 13 2025-06-24T21:12:29.667222Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:29.667252Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:29.667263Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:29.667432Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1682 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:30.108151Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:30.190602Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:33.932980Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:34.167387Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626804899488776:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:34.167526Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:34.249339Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) >> TTableProfileTests::WrongTableProfile [GOOD] >> TYqlDateTimeTests::DateKey >> TSchemeShardTopicSplitMergeTest::MargeUnorderedPartitions >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary >> TSchemeShardTopicSplitMergeTest::MargePartitions >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition >> TSchemeShardTopicSplitMergeTest::SplitTwoPartitions >> TSchemeShardTopicSplitMergeTest::Boot >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition >> TSchemeShardTopicSplitMergeTest::MargeInactivePartitions >> TSchemeShardTopicSplitMergeTest::SplitWithWrongPartition >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-10 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-11 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-40 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-41 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-34 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-35 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TTableProfileTests::ExplicitPartitionsWrongKeyType [GOOD] Test command err: 2025-06-24T21:11:54.291656Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626657300554842:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:54.294483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004461/r3tmp/tmpxJBHA6/pdisk_1.dat 2025-06-24T21:11:55.005069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:55.005175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:55.011576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:55.014210Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:55.073351Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 5127, node 1 2025-06-24T21:11:55.278778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:55.278798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:55.278806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:55.278915Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:55.347464Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15600 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:55.802823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:15600 2025-06-24T21:11:56.350766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:11:56.391899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:56.901119Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626664446663266:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:56.901162Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:11:56.969497Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:56.969617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:57.029601Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:11:57.042728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15600 2025-06-24T21:11:57.681114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:57.908572Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15600 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750799517890 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-24T21:11:58.315224Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-24T21:11:58.333936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:01.945385Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626684558795073:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:01.945455Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004461/r3tmp/tmpS1L0II/pdisk_1.dat 2025-06-24T21:12:02.192774Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:02.247876Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:02.247956Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:02.258017Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64574, node 4 2025-06-24T21:12:02.537603Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:02.537630Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:02.537637Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:02.537801Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23291 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:12:02.967309Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:02.999076Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:03.016256Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 TClient is connected to server localhost:23291 2025-06-24T21:12:03.424795Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:12:03.464835Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 7205759404664448 ... Id: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:20.240015Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:12572 2025-06-24T21:12:20.856007Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:12:20.891116Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:21.431709Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519626773685064507:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:21.563949Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:21.564081Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:21.577058Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-24T21:12:21.578204Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:21.601192Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; TClient is connected to server localhost:12572 2025-06-24T21:12:22.261079Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-24T21:12:22.261580Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:22.407318Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:22.607348Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:23.609415Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:24.609205Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:25.619329Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:28.231271Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626800545444066:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:28.236813Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004461/r3tmp/tmpmkbzSk/pdisk_1.dat 2025-06-24T21:12:28.558289Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:28.617877Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:28.617995Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:28.624076Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5649, node 13 2025-06-24T21:12:28.888067Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:28.888094Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:28.888105Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:28.888284Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18359 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:12:29.299504Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:29.344834Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:18359 2025-06-24T21:12:29.822115Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:12:29.872454Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:30.379219Z node 15 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[15:7519626811736684579:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:30.379299Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:30.413409Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:30.413623Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:30.420365Z node 13 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 15 Cookie 15 2025-06-24T21:12:30.423211Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18359 2025-06-24T21:12:30.944288Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626809135380031:2899] txid# 281474976710660, issues: { message: "Error at split boundary 0: Value of type Uint64 expected in tuple at position 1" severity: 1 } 2025-06-24T21:12:30.952262Z node 13 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-06-24T21:12:30.952798Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:31.384902Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:31.415308Z node 15 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:32.391699Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:33.395714Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:34.399562Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsertOlap::UpsertArrowBatch_DataShard [GOOD] Test command err: 2025-06-24T21:12:01.770277Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626685170542999:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:01.770324Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004444/r3tmp/tmpRvlaOR/pdisk_1.dat 2025-06-24T21:12:02.418733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:02.418842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:02.491842Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:02.557257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:02.596807Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 4519, node 1 2025-06-24T21:12:02.796240Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:02.925245Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:02.925268Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:02.925295Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:02.925434Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19287 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:03.390187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:19287 2025-06-24T21:12:03.836401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) waiting... 2025-06-24T21:12:03.979702Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:12:04.040094Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:12:04.040284Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037889 2025-06-24T21:12:04.056889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:12:04.057132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:12:04.057393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:12:04.057545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:12:04.057657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:12:04.057825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:12:04.057951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:12:04.058091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:12:04.058220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:12:04.058355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:12:04.058451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626693760478439:2281];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:12:04.069583Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:12:04.143851Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:12:04.143998Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037890 2025-06-24T21:12:04.155541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:12:04.155610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:12:04.155853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:12:04.155983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:12:04.156085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:12:04.156186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:12:04.156306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:12:04.156436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:12:04.156589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:12:04.156682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:12:04.156784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626693760478454:2283];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:12:04.167175Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626693760478445:2282];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:12:04.227659Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626693760478445:2282];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:12:04.227801Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037891 2025-06-24T21:12:04.233361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:751962 ... DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1750799553881 2025-06-24T21:12:33.845081Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:33.845112Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:33.845151Z node 13 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:12:33.845208Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750799553874 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [13:7519626808743087765:2197], exec latency: 1 ms, propose latency: 5 ms 2025-06-24T21:12:33.845248Z node 13 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-24T21:12:33.845303Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:33.849227Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-24T21:12:33.849300Z node 13 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8984;columns=10; 2025-06-24T21:12:33.882739Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [13:7519626825922957822:2738], serverId# [13:7519626825922957823:2739], sessionId# [0:0:0] 2025-06-24T21:12:33.882883Z node 13 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(36) Execute: at tablet# 72075186224037888 2025-06-24T21:12:33.892797Z node 13 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(36) Complete: at tablet# 72075186224037888 2025-06-24T21:12:33.892861Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 SUCCESS Upsert done: 0.038818s 2025-06-24T21:12:33.915127Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626825922957836:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:33.915251Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626825922957831:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:33.915620Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:33.921276Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:33.930795Z node 13 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:33.956059Z node 13 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:33.965238Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626825922957845:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:12:34.025264Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626830217925211:2798] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:34.155264Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:34.155386Z node 13 :TX_DATASHARD DEBUG: check_snapshot_tx_unit.cpp:153: Prepared Snapshot transaction txId 281474976715661 at tablet 72075186224037888 2025-06-24T21:12:34.159942Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:12:34.166798Z node 13 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715661 at step 1750799554210 at tablet 72075186224037888 { Transactions { TxId: 281474976715661 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750799554210 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:12:34.166826Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:34.166950Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:34.166965Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:12:34.166984Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1750799554210:281474976715661] in PlanQueue unit at 72075186224037888 2025-06-24T21:12:34.167200Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1750799554210:281474976715661 keys extracted: 0 2025-06-24T21:12:34.167500Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:34.170999Z node 13 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750799554210} 2025-06-24T21:12:34.171051Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:34.171094Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750799554210 : 281474976715661] from 72075186224037888 at tablet 72075186224037888 send result to client [13:7519626830217925238:2812], exec latency: 0 ms, propose latency: 3 ms 2025-06-24T21:12:34.171118Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:34.173988Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwj5bm4bqrxpaxvpy37waq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MzNiN2QwYzUtODIwNTI5MWMtM2U4MTc5ZGQtNjNiYjc0MGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:34.178695Z node 13 :TX_DATASHARD INFO: datashard__kqp_scan.cpp:214: Start scan, at: [13:7519626830217925266:2146], tablet: [13:7519626825922957717:2300], scanId: 4, table: /Root/LogsX, gen: 1, deadline: 2025-06-24T21:22:34.178394Z 2025-06-24T21:12:34.178906Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:109: Got ScanDataAck, at: [13:7519626830217925266:2146], scanId: 4, table: /Root/LogsX, gen: 1, tablet: [13:7519626825922957717:2300], freeSpace: 8388608;limits:(bytes=0;chunks=0); 2025-06-24T21:12:34.178936Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:124: Wakeup driver at: [13:7519626830217925266:2146] 2025-06-24T21:12:34.180483Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:311: Range 0 of 1 exhausted: try next one. table: /Root/LogsX range: [(Utf8 : NULL, Timestamp : NULL) ; ()) next range: 2025-06-24T21:12:34.180527Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:226: TableRanges is over, at: [13:7519626830217925266:2146], scanId: 4, table: /Root/LogsX 2025-06-24T21:12:34.180564Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:340: Finish scan, at: [13:7519626830217925266:2146], scanId: 4, table: /Root/LogsX, reason: Done, abortEvent: 2025-06-24T21:12:34.180599Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:465: Send ScanData, from: [13:7519626830217925266:2146], to: [13:7519626830217925261:2319], scanId: 4, table: /Root/LogsX, bytes: 11000, rows: 100, page faults: 0, finished: 1, pageFault: 0 2025-06-24T21:12:34.181020Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-24T21:12:34.181118Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:34.181138Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:34.181158Z node 13 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:12:34.181211Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:34.210763Z node 13 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799554210, txId: 281474976715661] shutting down 2025-06-24T21:12:34.318672Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626808743087482:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:34.318766Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:34.867123Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwj5p15acc9e849sn73yn6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NmY5Mjc3MTctZjcwMzVjZDUtZWRkMTI2NmEtNTRiNDk5NjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 100 rows Negative (wrong format): BAD_REQUEST Negative (wrong data): SCHEME_ERROR FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8016;columns=9; 2025-06-24T21:12:34.934973Z node 13 :ARROW_HELPER ERROR: log.cpp:784: fline=arrow_helpers.cpp:142;event=cannot_parse;message=Invalid: Ran out of field metadata, likely malformed;schema_columns_count=10;schema_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; Negative (less columns): BAD_REQUEST FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8984;columns=10; 2025-06-24T21:12:34.949400Z node 13 :ARROW_HELPER ERROR: log.cpp:784: fline=arrow_helpers.cpp:142;event=cannot_parse;message=Serialization error: batch is not valid: Invalid: Offsets buffer size (bytes): 400 isn't large enough for length: 100;schema_columns_count=10;schema_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; Negative (reordered columns): BAD_REQUEST ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TYqlDateTimeTests::SimpleOperations [GOOD] Test command err: 2025-06-24T21:11:59.427510Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626678681372670:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:59.436047Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00444f/r3tmp/tmpnkBwWj/pdisk_1.dat 2025-06-24T21:12:00.142686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:00.142796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:00.146093Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:00.156559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22837, node 1 2025-06-24T21:12:00.433887Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:00.480177Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:00.480198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:00.480205Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:00.480304Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11024 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:00.847099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:03.319403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:03.555693Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626695861242995:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.555785Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.555931Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626695861243007:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.563492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:03.593617Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626695861243009:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:12:03.678144Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626695861243081:2798] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:04.352310Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwh7q1cyd8ayamfm86krtj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjBjOWY1YzctMTFlZWU4N2QtYTM5NmJhN2ItNjZlZDc0OTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:04.431347Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626678681372670:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:04.431418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:04.624465Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhwh8j352kb84snyjsf3ydt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjBjOWY1YzctMTFlZWU4N2QtYTM5NmJhN2ItNjZlZDc0OTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:04.756112Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhwh8rv3s8y2d4k2rsh3jpy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjBjOWY1YzctMTFlZWU4N2QtYTM5NmJhN2ItNjZlZDc0OTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:04.921324Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyhwh8wvc0t613p0hd6vpv8f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjBjOWY1YzctMTFlZWU4N2QtYTM5NmJhN2ItNjZlZDc0OTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:05.076205Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jyhwh928as7jnmgwyzwnjkcm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjBjOWY1YzctMTFlZWU4N2QtYTM5NmJhN2ItNjZlZDc0OTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:07.212909Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626712467051919:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:07.212984Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00444f/r3tmp/tmphQ2y7Q/pdisk_1.dat 2025-06-24T21:12:07.412962Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:07.428412Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:07.428496Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:07.433235Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7036, node 4 2025-06-24T21:12:07.658758Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:07.658785Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:07.658792Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:07.658932Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32630 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:08.119261Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:08.134146Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:12:08.251756Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:10.766576Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 720 ... ce;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:26.220144Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyhwhxkk7jjhfy66ywjm99yt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YjJlZWVjZmEtN2YyOGJlMTgtNDJlNDc2ZTgtZDkxY2ZmMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:28.224757Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626802436606722:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:28.224822Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00444f/r3tmp/tmp45MXgC/pdisk_1.dat 2025-06-24T21:12:28.461932Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:28.495044Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 27144, node 13 2025-06-24T21:12:28.543418Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:28.543582Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:28.576640Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:28.694062Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:28.694091Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:28.694103Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:28.694287Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23276 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:29.035668Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:29.267569Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:32.536936Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:32.661167Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:32.775187Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626819616477164:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:32.775306Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:32.775620Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626819616477176:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:32.781065Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:32.823126Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626819616477178:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:12:32.921348Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626819616477258:2890] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:33.076659Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwj4857gm25y26c72vqen2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:33.224637Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwj4jqfz31gwqsa0g39tww, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:33.227082Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626802436606722:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:33.227181Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:33.780400Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhwj4pvcq65210mjp9cb0ya, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:33.789237Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhwj4pvcq65210mjp9cb0ya, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:34.312613Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyhwj58b95qp3andjc59ca5g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:34.320186Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhwj58b95qp3andjc59ca5g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:34.475196Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyhwj5sk97n3q5ycs64m15yd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:34.620217Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jyhwj5xndhwrzdnp9w02w9x4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:34.760776Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyhwj624azyaak8ez38n83m7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:34.905401Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyhwj66g2a1abr36nswn8hgg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:35.076269Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jyhwj6c845284zzq6jhz62e6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:35.420604Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jyhwj6gfb486pmthn4f5k35z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:35.425709Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jyhwj6gfb486pmthn4f5k35z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTFkZTYwYmEtMmFiN2M4ZTAtNTQwMDAzNjYtNjUxYTk3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> YdbScripting::BasicV1 [GOOD] >> YdbOlapStore::LogLast50ByResource [GOOD] >> YdbOlapStore::LogNonExistingRequest >> TSchemeShardTopicSplitMergeTest::Boot [GOOD] >> TSchemeShardTopicSplitMergeTest::CreateTopicWithManyPartition >> TSchemeShardTopicSplitMergeTest::CreateTopicWithOnePartition >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitWithWrongPartition [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitTwoPartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition >> TSchemeShardTopicSplitMergeTest::MargeUnorderedPartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::MargePartitions2 >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition [GOOD] >> YdbYqlClient::QueryStats [GOOD] >> YdbYqlClient::RenameTables >> TSchemeShardTopicSplitMergeTest::MargePartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-46 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-47 >> YdbMonitoring::SelfCheckWithNodesDying [GOOD] >> YdbOlapStore::BulkUpsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:12:38.268064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:38.268141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.268169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:38.268196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:38.268833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:38.268866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:38.268972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.269033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:38.269852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:38.271466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:38.371742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:38.371814Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:38.395845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:38.396332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:38.396522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:12:38.410295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:38.410504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:38.411314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.411648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:12:38.418188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.419424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:38.424519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.424618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.425431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:38.425488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:38.425541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:38.425626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.434091Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:12:38.573287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:38.573535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.573748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:12:38.573795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:12:38.574071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:12:38.574206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:38.576715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.576925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:12:38.577147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.577204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:12:38.577262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:12:38.577300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:12:38.584127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.584240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:12:38.584290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:12:38.596170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.596239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.596296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.596351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:12:38.612928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:38.615765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:12:38.615994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:12:38.617092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.617267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:38.617321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.617668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:12:38.617729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.617940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:12:38.618058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:12:38.620626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.620686Z node 1 :FLAT_TX_SCHEMESHARD ... satisfy waiter [1:643:2567] TestWaitNotification: OK eventTxId 105 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\001" } TestModificationResults wait txId: 106 2025-06-24T21:12:39.116734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\001" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:39.116976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.118161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '01' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', at schemeshard: 72057594046678944 2025-06-24T21:12:39.121781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Split boundary less or equals FromBound of partition: \'01\' <= \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\'" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:39.122084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '01' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T21:12:39.122423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-24T21:12:39.122467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-24T21:12:39.122935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-24T21:12:39.123024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T21:12:39.123062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:650:2574] TestWaitNotification: OK eventTxId 106 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "UUUUUUUUUUUUUUUT" } TestModificationResults wait txId: 107 2025-06-24T21:12:39.126415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "UUUUUUUUUUUUUUUT" } } } TxId: 107 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:39.126688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 107:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.126906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 107:1, propose status:StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', at schemeshard: 72057594046678944 2025-06-24T21:12:39.129364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 107, response: Status: StatusInvalidParameter Reason: "Split boundary less or equals FromBound of partition: \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\' <= \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\'" TxId: 107 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:39.129658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 107, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 2025-06-24T21:12:39.130024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-24T21:12:39.130068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-24T21:12:39.130514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-24T21:12:39.130615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-24T21:12:39.130658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:657:2581] TestWaitNotification: OK eventTxId 107 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\255" } TestModificationResults wait txId: 108 2025-06-24T21:12:39.133952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\255" } } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:39.134187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 108:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.134408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 108:1, propose status:StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AD' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), at schemeshard: 72057594046678944 2025-06-24T21:12:39.136925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 108, response: Status: StatusInvalidParameter Reason: "Split boundary greate or equals ToBound of partition: \'AD\' >= \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' (FromBound is \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\')" TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:39.137193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AD' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-24T21:12:39.137625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-24T21:12:39.137670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-24T21:12:39.138128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-24T21:12:39.138231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-24T21:12:39.138278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:664:2588] TestWaitNotification: OK eventTxId 108 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } TestModificationResults wait txId: 109 2025-06-24T21:12:39.141633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } } TxId: 109 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:39.141876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 109:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.142091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 109:1, propose status:StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), at schemeshard: 72057594046678944 2025-06-24T21:12:39.145009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 109, response: Status: StatusInvalidParameter Reason: "Split boundary greate or equals ToBound of partition: \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' >= \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' (FromBound is \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\')" TxId: 109 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:39.145286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 109, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 2025-06-24T21:12:39.145634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 109: send EvNotifyTxCompletion 2025-06-24T21:12:39.145677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 109 2025-06-24T21:12:39.146169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 109, at schemeshard: 72057594046678944 2025-06-24T21:12:39.146276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-06-24T21:12:39.146318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [1:671:2595] TestWaitNotification: OK eventTxId 109 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithWrongPartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:12:38.267667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:38.267772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.267814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:38.267850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:38.269373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:38.269426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:38.269527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.269627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:38.270409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:38.275304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:38.374498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:38.374558Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:38.399720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:38.400188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:38.400396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:12:38.413055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:38.413274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:38.414040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.414352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:12:38.417927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.419157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:38.424560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.424650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.425450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:38.425511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:38.425570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:38.425661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.438871Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:12:38.584554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:38.584815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.585041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:12:38.585091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:12:38.585335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:12:38.585468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:38.588721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.588962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:12:38.589221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.589285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:12:38.589327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:12:38.589364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:12:38.591693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.591777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:12:38.591830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:12:38.594100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.594167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.594220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.594282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:12:38.598336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:38.600667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:12:38.600883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:12:38.601913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.602069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:38.602124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.602459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:12:38.602520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.602723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:12:38.602822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:12:38.605400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.605459Z node 1 :FLAT_TX_SCHEMESHARD ... 46678944 2025-06-24T21:12:39.022534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-24T21:12:39.066755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409548, partId: 0 2025-06-24T21:12:39.066960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-06-24T21:12:39.067059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-06-24T21:12:39.067135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.067220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-24T21:12:39.067392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 128 -> 240 2025-06-24T21:12:39.067612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:12:39.067700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:12:39.074380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.074870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:39.074923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:12:39.075106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:12:39.075345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:39.075411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-24T21:12:39.075482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-24T21:12:39.076088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.076142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T21:12:39.076252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:12:39.076288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:12:39.076332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:12:39.076381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:12:39.076432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-24T21:12:39.076474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:12:39.076516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:12:39.076551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:12:39.076717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:12:39.076764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 104, publications: 2, subscribers: 1 2025-06-24T21:12:39.076802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-24T21:12:39.076852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T21:12:39.077663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:12:39.077775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:12:39.077836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:12:39.077880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T21:12:39.077929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:12:39.078753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:12:39.078848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:12:39.078884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:12:39.078911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T21:12:39.078945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:12:39.079008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 1 2025-06-24T21:12:39.079050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:410:2376] 2025-06-24T21:12:39.083996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:12:39.085778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:12:39.085946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:12:39.085989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:549:2484] TestWaitNotification: OK eventTxId 104 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 7 SplitBoundary: "W" } TestModificationResults wait txId: 105 2025-06-24T21:12:39.101426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 7 SplitBoundary: "W" } } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:39.101689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.101891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: Splitting partition does not exists: 7, at schemeshard: 72057594046678944 2025-06-24T21:12:39.109861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "Splitting partition does not exists: 7" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:39.110076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Splitting partition does not exists: 7, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-24T21:12:39.110393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-24T21:12:39.110441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-24T21:12:39.110777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-24T21:12:39.110869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T21:12:39.110911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:643:2567] TestWaitNotification: OK eventTxId 105 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbScripting::BasicV1 [GOOD] Test command err: 2025-06-24T21:12:03.572610Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626693946886437:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:03.582514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004435/r3tmp/tmpOMLDjo/pdisk_1.dat 2025-06-24T21:12:04.211699Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:04.227745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:04.227859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:04.234314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2985, node 1 2025-06-24T21:12:04.595283Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:04.742377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:04.742396Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:04.742420Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:04.742545Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12833 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:05.094900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:07.448764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:08.315945Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626715421725779:2397], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:08.316045Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626715421725791:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:08.316106Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:08.320302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:08.349291Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626715421725794:2401], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:12:08.415499Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626715421725874:4075] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:08.575365Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626693946886437:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:08.575432Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:08.971045Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwhcbscsqypka0ha5pcb6n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGI0NDIxNGYtOWNmNDdlYTEtNDMwY2U2YWYtZDk3M2Q2NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS 2025-06-24T21:12:11.179135Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626731217350166:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:11.179191Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004435/r3tmp/tmprbYP33/pdisk_1.dat 2025-06-24T21:12:11.423498Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11631, node 4 2025-06-24T21:12:11.543480Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:11.552808Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:11.647442Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:11.671829Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:11.671852Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:11.671859Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:11.671969Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26771 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:11.954786Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:12.199359Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:14.491372Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:15.176548Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626748397222272:2396], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:15.176636Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:15.179504Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626748397222284:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:15.186976Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:15.225799Z node 4 :KQP_WORK ... P_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:29.330700Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:29.484225Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626805344510147:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:29.484334Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:29.484616Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626805344510152:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:29.489713Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:29.520631Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626805344510154:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:12:29.628445Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626805344510244:2809] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:29.715910Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwhxn60gd3qbw86asyhhfk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MThlYjFhZWMtYmFmOTM3OC0zNGM2OGMzNy1jOTY2YWE5NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:29.966566Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhwj1ah5tyy8jhctagvxgdj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=NmI5NmIxZTAtYTEyZmRiY2YtOTkyNDcyODctZGY5YWE5OGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:29.976026Z node 10 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799549982, txId: 281474976710662] shutting down 2025-06-24T21:12:30.083315Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626788164639756:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:30.083425Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:32.098181Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626819032786670:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:32.098248Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004435/r3tmp/tmpDzIAE5/pdisk_1.dat 2025-06-24T21:12:32.405820Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:32.437294Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:32.437396Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:32.444010Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22742, node 13 2025-06-24T21:12:32.627618Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:32.631220Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:32.631236Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:32.631438Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3262 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:32.843591Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:33.184644Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:36.396674Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626836212656881:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:36.396783Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:36.552091Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:36.663393Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626836212657059:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:36.663489Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:36.663769Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626836212657064:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:36.668535Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:36.694112Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626836212657066:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:12:36.798736Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626836212657142:2791] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:36.860575Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwj4em353dkm3efh1w5mvn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDEwNTNiYjktOGMyOWM5MGEtZTZkODA2OGMtMmEwYWVjODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:36.993890Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwj88fbzexxn1hb45dmd1a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTY4M2U4YTctM2IwZDczYzctYTk4YTBmYWUtYmZmMGI4NWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:37.001512Z node 13 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799557031, txId: 281474976715662] shutting down 2025-06-24T21:12:37.099127Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626819032786670:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:37.099352Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TSchemeShardTopicSplitMergeTest::CreateTopicWithManyPartition [GOOD] >> KqpStreamLookup::ReadTableDuringSplit [GOOD] >> TSchemeShardTopicSplitMergeTest::MargeInactivePartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::EnableSplitMerge ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:12:38.268134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:38.268274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.268333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:38.268381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:38.270287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:38.270371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:38.270493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.270624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:38.271604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:38.271990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:38.431809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:38.431883Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:38.466095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:38.466636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:38.466860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:12:38.507531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:38.507869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:38.508740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.509234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:12:38.513574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.513833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:38.515348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.515446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.515751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:38.515817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:38.515882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:38.515988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.535814Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:12:38.713970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:38.714223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.714484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:12:38.714553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:12:38.714868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:12:38.715020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:38.722910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.723132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:12:38.723372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.723440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:12:38.723488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:12:38.723526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:12:38.725495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.725566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:12:38.725615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:12:38.727578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.727640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.727687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.727743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:12:38.732154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:38.734213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:12:38.734435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:12:38.735560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.735738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:38.735804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.736129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:12:38.736203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.736421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:12:38.736560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:12:38.738915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.738973Z node 1 :FLAT_TX_SCHEMESHARD ... \252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 5 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:39.306972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:762:2058] recipient: [1:105:2138] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:765:2058] recipient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:766:2058] recipient: [1:764:2672] Leader for TabletID 72057594046678944 is [1:767:2673] sender: [1:768:2058] recipient: [1:764:2672] 2025-06-24T21:12:39.357875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:39.357991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:39.358043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:39.358083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:39.358123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:39.358155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:39.358224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:39.358308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:39.359164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:39.359573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:39.375931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:39.377400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:39.377597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:39.377856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:39.377906Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:39.378033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:39.378927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:12:39.379035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:12:39.379081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:12:39.379221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.379298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.379560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:12:39.379873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.379965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:12:39.380224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.380350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.380500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-06-24T21:12:39.380554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:12:39.380587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:12:39.380619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-24T21:12:39.380661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:12:39.380764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.380860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.381125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-06-24T21:12:39.381304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:12:39.381710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.381839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.382296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.382386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.382662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.382774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.382883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.383100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.383460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.383710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.383985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.384124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.384282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.384330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.384406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.389939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:39.395526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:39.395615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:39.396082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:39.396150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:39.396208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:39.399805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:12:38.268130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:38.268224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.268279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:38.268346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:38.268847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:38.268892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:38.268971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.269061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:38.269917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:38.271374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:38.374667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:38.374730Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:38.401676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:38.406067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:38.406266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:12:38.415121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:38.415436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:38.416136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.416440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:12:38.419050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.419237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:38.424452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.424528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.424672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:38.424728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:38.424832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:38.425106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.436500Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:12:38.642243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:38.642468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.642681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:12:38.642743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:12:38.643028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:12:38.643215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:38.654971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.655231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:12:38.655463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.655520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:12:38.655561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:12:38.655596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:12:38.663219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.663306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:12:38.663358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:12:38.672026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.672089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.672135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.672182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:12:38.695904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:38.703969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:12:38.704199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:12:38.705235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.705407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:38.705460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.705785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:12:38.705847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.706056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:12:38.706168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:12:38.715536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.715600Z node 1 :FLAT_TX_SCHEMESHARD ... plete 2025-06-24T21:12:39.296068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:39.296265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:39.296388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:39.296423Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:39.296546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:39.297436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:12:39.297533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:12:39.297572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:12:39.297659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.298155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.298446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:12:39.298754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.298859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:12:39.299076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.299188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.299346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-06-24T21:12:39.299431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:12:39.299468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:12:39.299488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-24T21:12:39.299507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:12:39.299741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.299822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.300034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-06-24T21:12:39.300238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:12:39.300591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.300699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.301953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.302053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.302283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.303072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.303211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.303448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.303560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.303779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.304016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.304122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.304258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.304317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.304364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.311731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:39.314895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:39.314983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:39.315597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:39.315656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:39.315703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:39.316247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:752:2662] sender: [1:811:2058] recipient: [1:15:2062] 2025-06-24T21:12:39.380120Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:12:39.380432Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 307us result status StatusSuccess 2025-06-24T21:12:39.381103Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\177" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\177" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\177" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\177" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTopicSplitMergeTest::CreateTopicWithOnePartition [GOOD] >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::MargePartitions2 [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::CreateTopicWithManyPartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:12:38.293252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:38.293357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.293418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:38.293467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:38.293517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:38.293562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:38.293654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.293727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:38.294545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:38.294928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:38.381971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:38.382047Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:38.406581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:38.407065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:38.407323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:12:38.427288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:38.427464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:38.428019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.428300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:12:38.430734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.430935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:38.432141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.432200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.432376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:38.432424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:38.432466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:38.432535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.438314Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:12:38.615363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:38.615664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.615957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:12:38.616016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:12:38.616297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:12:38.616439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:38.622060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.622293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:12:38.622497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.622589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:12:38.622641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:12:38.622682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:12:38.636194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.636274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:12:38.636332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:12:38.638639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.638706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.638757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.638827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:12:38.643139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:38.645582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:12:38.645792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:12:38.646918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.647415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:38.647486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.647869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:12:38.647935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.648163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:12:38.648252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:12:38.650485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.650538Z node 1 :FLAT_TX_SCHEMESHARD ... RN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:39.905228Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:39.905412Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:39.906260Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:12:39.906354Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:12:39.906407Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:12:39.906492Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.906568Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.906799Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:12:39.907181Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.907290Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:12:39.907514Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.907616Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.907739Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-06-24T21:12:39.907791Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:12:39.907830Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:12:39.907854Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-24T21:12:39.907877Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:12:39.907979Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.908075Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.908346Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-06-24T21:12:39.908552Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:12:39.908922Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.909052Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.909564Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.909662Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.909921Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.910017Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.910106Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.910279Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.910353Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.910541Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.910723Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.910805Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.910938Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.910990Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.911041Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.917698Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:39.920062Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:39.920167Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:39.920979Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:39.921052Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:39.921107Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:39.922262Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [2:645:2563] sender: [2:705:2058] recipient: [2:15:2062] 2025-06-24T21:12:39.976274Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:12:39.976604Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 373us result status StatusSuccess 2025-06-24T21:12:39.977411Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 1024 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 1 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 1024 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:12:38.274089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:38.274178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.274216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:38.274329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:38.274384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:38.274414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:38.274491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.274566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:38.275387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:38.275742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:38.365018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:38.365084Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:38.395768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:38.396278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:38.396538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:12:38.406364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:38.406556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:38.410282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.410552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:12:38.424270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.424495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:38.425740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.425810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.425996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:38.426039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:38.426086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:38.426162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.432547Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:12:38.617134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:38.617328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.617491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:12:38.617524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:12:38.617708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:12:38.617804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:38.620200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.620453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:12:38.620664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.620730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:12:38.620771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:12:38.620815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:12:38.624014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.624074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:12:38.624113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:12:38.626461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.626541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.626590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.626642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:12:38.630443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:38.632798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:12:38.632992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:12:38.633988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.634119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:38.634168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.634456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:12:38.634515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.634722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:12:38.634813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:12:38.637479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.637530Z node 1 :FLAT_TX_SCHEMESHARD ... -24T21:12:40.484201Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-24T21:12:40.526833Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409548, partId: 0 2025-06-24T21:12:40.527027Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-06-24T21:12:40.527108Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-06-24T21:12:40.527196Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.527256Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-24T21:12:40.527461Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 128 -> 240 2025-06-24T21:12:40.527658Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:12:40.527721Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:12:40.530576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.531022Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:40.531080Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:12:40.531411Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:12:40.531649Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:40.531694Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-24T21:12:40.531768Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-24T21:12:40.531851Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.531912Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T21:12:40.532043Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:12:40.532081Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:12:40.532124Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:12:40.532160Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:12:40.532203Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-24T21:12:40.532253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:12:40.532298Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:12:40.532336Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:12:40.532499Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:12:40.532561Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 104, publications: 2, subscribers: 1 2025-06-24T21:12:40.532598Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-24T21:12:40.532628Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T21:12:40.534793Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:12:40.534920Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:12:40.534965Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:12:40.535010Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T21:12:40.535057Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:12:40.535618Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:12:40.535699Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:12:40.535729Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:12:40.535775Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T21:12:40.535806Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:12:40.535873Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 1 2025-06-24T21:12:40.535932Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:414:2378] 2025-06-24T21:12:40.541237Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:12:40.541409Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:12:40.541505Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:12:40.541543Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [2:548:2481] TestWaitNotification: OK eventTxId 104 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Merge { Partition: 0 AdjacentPartition: 2 } TestModificationResults wait txId: 105 2025-06-24T21:12:40.552260Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Merge { Partition: 0 AdjacentPartition: 2 } } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:40.552494Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.552702Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: You cannot merge non-contiguous partitions, at schemeshard: 72057594046678944 2025-06-24T21:12:40.559039Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "You cannot merge non-contiguous partitions" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:40.559476Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: You cannot merge non-contiguous partitions, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-24T21:12:40.559827Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-24T21:12:40.559874Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-24T21:12:40.560311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-24T21:12:40.560418Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T21:12:40.560459Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:642:2564] TestWaitNotification: OK eventTxId 105 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::MargePartitions2 [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:12:38.268546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:38.268667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.268720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:38.268759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:38.268832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:38.268866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:38.268963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.269051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:38.269920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:38.271393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:38.387881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:38.387963Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:38.410223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:38.410651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:38.410844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:12:38.432041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:38.432257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:38.433052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.433386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:12:38.437439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.437666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:38.439016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.439102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.439394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:38.439453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:38.439506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:38.439579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.446290Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:12:38.637079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:38.637344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.637578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:12:38.637625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:12:38.637888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:12:38.638013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:38.644897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.645149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:12:38.645347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.645407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:12:38.645470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:12:38.645520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:12:38.648147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.648217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:12:38.648264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:12:38.656314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.656382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.656437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.656511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:12:38.664470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:38.666797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:12:38.667014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:12:38.668053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.668186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:38.668252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.668589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:12:38.668648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.668829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:12:38.668923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:12:38.672661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.672710Z node 1 :FLAT_TX_SCHEMESHARD ... -24T21:12:40.501040Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-06-24T21:12:40.501096Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 105:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.501138Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-24T21:12:40.501320Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 105:0 128 -> 240 2025-06-24T21:12:40.501524Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:12:40.505163Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.505470Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:40.505524Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:12:40.505875Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:40.505934Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-24T21:12:40.506036Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.506088Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 105:0 ProgressState 2025-06-24T21:12:40.506214Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#105:0 progress is 1/1 2025-06-24T21:12:40.506258Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:12:40.506305Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#105:0 progress is 1/1 2025-06-24T21:12:40.506345Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:12:40.506396Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-06-24T21:12:40.506447Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:12:40.506493Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 105:0 2025-06-24T21:12:40.506531Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 105:0 2025-06-24T21:12:40.506699Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:12:40.506752Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 105, publications: 1, subscribers: 1 2025-06-24T21:12:40.506791Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-24T21:12:40.509187Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:12:40.509323Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:12:40.509375Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-24T21:12:40.509432Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:12:40.509484Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:12:40.509613Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 1 2025-06-24T21:12:40.509663Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:414:2378] 2025-06-24T21:12:40.516436Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T21:12:40.516597Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T21:12:40.516648Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:679:2598] TestWaitNotification: OK eventTxId 105 2025-06-24T21:12:40.523324Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:12:40.523665Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 411us result status StatusSuccess 2025-06-24T21:12:40.524561Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 5 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" } Status: Inactive ChildPartitionIds: 4 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Inactive ChildPartitionIds: 4 } Partitions { PartitionId: 3 TabletId: 72075186233409548 KeyRange { FromBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Active } Partitions { PartitionId: 4 TabletId: 72075186233409548 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Active ParentPartitionIds: 1 ParentPartitionIds: 2 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 5 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 5 NextPartitionId: 5 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } Partitions { PartitionId: 3 GroupId: 4 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } } Partitions { PartitionId: 4 GroupId: 5 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 1 ParentPartitionIds: 2 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 5 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> YdbTableBulkUpsert::RetryOperation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:12:38.268350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:38.268422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.268458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:38.268490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:38.270097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:38.270156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:38.270248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.270299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:38.270917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:38.271333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:38.371572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:38.371634Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:38.395899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:38.396292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:38.396542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:12:38.405741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:38.406119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:38.409611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.410029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:12:38.418794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.419401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:38.426140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.426214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.426375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:38.426409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:38.426451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:38.426521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.435259Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:12:38.607018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:38.607449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.607715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:12:38.607771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:12:38.608088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:12:38.608237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:38.616404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.616662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:12:38.616868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.616921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:12:38.616962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:12:38.616994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:12:38.620037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.620114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:12:38.620155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:12:38.622465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.622522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.622569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.622626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:12:38.626444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:38.628625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:12:38.628824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:12:38.629828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.629969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:38.630019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.630310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:12:38.630375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.630573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:12:38.630706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:12:38.633067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.633120Z node 1 :FLAT_TX_SCHEMESHARD ... AT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:648: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionAttachResult CollectPQConfigChanged: false 2025-06-24T21:12:40.465232Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 105:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-24T21:12:40.468727Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-24T21:12:40.469097Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-24T21:12:40.469156Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-24T21:12:40.469627Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 105, at schemeshard: 72057594046678944 2025-06-24T21:12:40.469681Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 105, ready parts: 0/1, is published: true 2025-06-24T21:12:40.469736Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 105, at schemeshard: 72057594046678944 2025-06-24T21:12:40.524734Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 200, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:40.524915Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 105 AckTo { RawX1: 0 RawX2: 0 } } Step: 200 MediatorID: 72075186233409547 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:40.524992Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:662: NPQState::TPropose operationId# 105:0 HandleReply TEvOperationPlan, step: 200, at tablet: 72057594046678944 2025-06-24T21:12:40.525062Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 105:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-24T21:12:40.587826Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409548, partId: 0 2025-06-24T21:12:40.588075Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-06-24T21:12:40.588172Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-06-24T21:12:40.588239Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 105:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.588286Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-24T21:12:40.588499Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 105:0 128 -> 240 2025-06-24T21:12:40.588695Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:12:40.592104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.592849Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:40.592913Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:12:40.593270Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:40.593329Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-24T21:12:40.593796Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.593862Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 105:0 ProgressState 2025-06-24T21:12:40.593995Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#105:0 progress is 1/1 2025-06-24T21:12:40.594044Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:12:40.594094Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#105:0 progress is 1/1 2025-06-24T21:12:40.594135Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:12:40.594179Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-06-24T21:12:40.594228Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:12:40.594280Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 105:0 2025-06-24T21:12:40.594323Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 105:0 2025-06-24T21:12:40.594503Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:12:40.594565Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 105, publications: 1, subscribers: 1 2025-06-24T21:12:40.594605Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-24T21:12:40.595768Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:12:40.595902Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:12:40.595953Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-24T21:12:40.596002Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:12:40.596059Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:12:40.596164Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 1 2025-06-24T21:12:40.596213Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:414:2378] 2025-06-24T21:12:40.602678Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T21:12:40.602821Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T21:12:40.602872Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:671:2592] TestWaitNotification: OK eventTxId 105 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "W" } TestModificationResults wait txId: 106 2025-06-24T21:12:40.615536Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "W" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:40.615838Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-06-24T21:12:40.616088Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Invalid partition status: 2, at schemeshard: 72057594046678944 2025-06-24T21:12:40.624435Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Invalid partition status: 2" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:40.624754Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Invalid partition status: 2, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T21:12:40.625141Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-24T21:12:40.625201Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-24T21:12:40.625677Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-24T21:12:40.625803Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T21:12:40.625848Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:761:2669] TestWaitNotification: OK eventTxId 106 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp/unittest >> KqpStreamLookup::ReadTableDuringSplit [GOOD] Test command err: 2025-06-24T21:12:27.856016Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:27.856812Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:27.857129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ee0/r3tmp/tmp3BBXIw/pdisk_1.dat 2025-06-24T21:12:28.487772Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:12:28.497792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:28.562140Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:28.572803Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799544379514 != 1750799544379518 2025-06-24T21:12:28.621881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:28.622052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:28.636472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:28.777989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:29.307885Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:29.308039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:703:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:29.308133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:29.325565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:29.392202Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:29.523415Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:12:29.602410Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:38.619331Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwj0vfeq8tz94yb20m7kmx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mzk3ZjljZTctNTlhOGM2ZjMtOWNhNTE3ODEtOGFlNGVjYTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:39.454064Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwja6p3y8d7vqgktyvyjrc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2E2ZGFkZjYtODM1MDdhOWMtNzkyYWY3ZWUtYWUzMWRjODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_SOURCE_READ_ACTOR to TX_DATASHARD_ACTOR 2025-06-24T21:12:39.482021Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwja6p3y8d7vqgktyvyjrc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2E2ZGFkZjYtODM1MDdhOWMtNzkyYWY3ZWUtYWUzMWRjODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR --- split started --- --- split finished --- Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR >> YdbTableBulkUpsertOlap::UpsertMixed [GOOD] >> YdbYqlClient::AlterTableAddIndex >> KqpStreamLookup::ReadTableWithIndexDuringSplit [GOOD] >> YdbTableBulkUpsert::DecimalPK [GOOD] >> YdbLogStore::Dirs [GOOD] >> YdbLogStore::LogTable >> YdbYqlClient::CheckDefaultTableSettings2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-23 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-24 >> TSchemeShardTopicSplitMergeTest::EnableSplitMerge [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::RetryOperation [GOOD] Test command err: 2025-06-24T21:12:04.631543Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626699007089053:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:04.631613Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00442b/r3tmp/tmpj1zxd7/pdisk_1.dat 2025-06-24T21:12:05.280099Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:05.313529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:05.313673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:05.325126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:05.372000Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 26356, node 1 2025-06-24T21:12:05.676846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:05.676869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:05.676876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:05.677015Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:05.706556Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12855 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:06.243424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:08.621834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) SUCCESS 3 rows in 0.038321s 2025-06-24T21:12:09.550229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626720481928472:2398], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:09.550285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626720481928484:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:09.550334Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:09.554193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:09.583728Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626720481928487:2402], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:12:09.631723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626699007089053:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:09.631789Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:09.684232Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626720481928577:4173] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:10.425490Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwhdjbc8e961xfty1gw3gr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzAxOWNkNjEtZjBkNGFjYWYtMWQyMDMzNTQtNmI2ZGZlMzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 3 rows 2025-06-24T21:12:12.664142Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626736040022005:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:12.664212Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00442b/r3tmp/tmpUCD4zk/pdisk_1.dat 2025-06-24T21:12:12.907671Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:12.930828Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:12.930908Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:12.945102Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21804, node 4 2025-06-24T21:12:13.121412Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:13.121434Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:13.121441Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:13.121542Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17214 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:13.455094Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:13.682633Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:16.061467Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/ui8' Only async-indexed tables are supported by BulkUpsert
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table 2025-06-24T21:12:18.145054Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626760425884341:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:18.145109Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00442b/r3tmp/tmpc12PEK/pdisk_1.dat 2025-06-24T21:12:18.522664Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:18.527039Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:18.531215Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:18.540267Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 N ... 24T21:12:27.423990Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:27.424001Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:27.424187Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12530 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:27.914266Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:27.924308Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:31.360652Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Injecting ABORTED 10 times Result: ABORTED Injecting ABORTED 6 times Result: ABORTED Injecting ABORTED 5 times Result: SUCCESS Injecting ABORTED 3 times Result: SUCCESS Injecting ABORTED 0 times Result: SUCCESS Injecting OVERLOADED 10 times Result: OVERLOADED Injecting OVERLOADED 6 times Result: OVERLOADED Injecting OVERLOADED 5 times Result: SUCCESS Injecting OVERLOADED 3 times Result: SUCCESS Injecting OVERLOADED 0 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 10 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 6 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 5 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 3 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 0 times 2025-06-24T21:12:31.860393Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626795373520265:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:31.860454Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Result: SUCCESS Injecting UNAVAILABLE 10 times Result: UNAVAILABLE Injecting UNAVAILABLE 6 times Result: UNAVAILABLE Injecting UNAVAILABLE 5 times Result: SUCCESS Injecting UNAVAILABLE 3 times Result: SUCCESS Injecting UNAVAILABLE 0 times Result: SUCCESS Injecting BAD_SESSION 10 times Result: BAD_SESSION Injecting BAD_SESSION 6 times Result: BAD_SESSION Injecting BAD_SESSION 5 times Result: SUCCESS Injecting BAD_SESSION 3 times Result: SUCCESS Injecting BAD_SESSION 0 times Result: SUCCESS Injecting SESSION_BUSY 10 times Result: SESSION_BUSY Injecting SESSION_BUSY 6 times Result: SESSION_BUSY Injecting SESSION_BUSY 5 times Result: SUCCESS Injecting SESSION_BUSY 3 times Result: SUCCESS Injecting SESSION_BUSY 0 times Result: SUCCESS Injecting NOT_FOUND 10 times Result: NOT_FOUND Injecting NOT_FOUND 6 times Result: NOT_FOUND Injecting NOT_FOUND 5 times Result: SUCCESS Injecting NOT_FOUND 3 times Result: SUCCESS Injecting NOT_FOUND 0 times Result: SUCCESS Injecting UNDETERMINED 10 times Result: UNDETERMINED Injecting UNDETERMINED 6 times Result: UNDETERMINED Injecting UNDETERMINED 5 times Result: SUCCESS Injecting UNDETERMINED 3 times Result: SUCCESS Injecting UNDETERMINED 0 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 10 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 6 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 5 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 3 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 0 times Result: SUCCESS 2025-06-24T21:12:34.684074Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626828816878282:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:34.684142Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00442b/r3tmp/tmp3GslNC/pdisk_1.dat 2025-06-24T21:12:34.923033Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:34.979715Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 28522, node 13 2025-06-24T21:12:35.010831Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:35.010917Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:35.033089Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:35.056117Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:35.056149Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:35.056163Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:35.056356Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8659 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:35.480767Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:35.693059Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:38.745492Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Injecting ABORTED 10 times Result: ABORTED Injecting ABORTED 6 times Result: ABORTED Injecting ABORTED 5 times Result: SUCCESS Injecting ABORTED 3 times Result: SUCCESS Injecting ABORTED 0 times Result: SUCCESS Injecting OVERLOADED 10 times Result: OVERLOADED Injecting OVERLOADED 6 times Result: OVERLOADED Injecting OVERLOADED 5 times Result: SUCCESS Injecting OVERLOADED 3 times Result: SUCCESS Injecting OVERLOADED 0 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 10 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 6 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 5 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 3 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 0 times Result: SUCCESS Injecting UNAVAILABLE 10 times Result: UNAVAILABLE Injecting UNAVAILABLE 6 times Result: UNAVAILABLE Injecting UNAVAILABLE 5 times Result: SUCCESS Injecting UNAVAILABLE 3 times Result: SUCCESS Injecting UNAVAILABLE 0 times Result: SUCCESS Injecting BAD_SESSION 10 times Result: BAD_SESSION Injecting BAD_SESSION 6 times Result: BAD_SESSION Injecting BAD_SESSION 5 times Result: SUCCESS Injecting BAD_SESSION 3 times Result: SUCCESS Injecting BAD_SESSION 0 times Result: SUCCESS Injecting SESSION_BUSY 10 times Result: SESSION_BUSY Injecting SESSION_BUSY 6 times Result: SESSION_BUSY Injecting SESSION_BUSY 5 times Result: SUCCESS Injecting SESSION_BUSY 3 times Result: SUCCESS Injecting SESSION_BUSY 0 times Result: SUCCESS Injecting NOT_FOUND 10 times Result: NOT_FOUND Injecting NOT_FOUND 6 times Result: NOT_FOUND Injecting NOT_FOUND 5 times Result: SUCCESS Injecting NOT_FOUND 3 times Result: SUCCESS Injecting NOT_FOUND 0 times Result: SUCCESS Injecting UNDETERMINED 10 times Result: UNDETERMINED Injecting UNDETERMINED 6 times 2025-06-24T21:12:39.684589Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626828816878282:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:39.684680Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Result: UNDETERMINED Injecting UNDETERMINED 5 times Result: SUCCESS Injecting UNDETERMINED 3 times Result: SUCCESS Injecting UNDETERMINED 0 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 10 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 6 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 5 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 3 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 0 times Result: SUCCESS >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-17 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-18 >> CommitOffset::PartitionSplit_OffsetCommit >> TopicAutoscaling::ControlPlane_BackCompatibility >> TopicAutoscaling::PartitionMerge_PreferedPartition_BeforeAutoscaleAwareSDK >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_BeforeAutoscaleAwareSDK >> TopicAutoscaling::Simple_BeforeAutoscaleAwareSDK >> CommitOffset::Commit_WithoutSession_TopPast >> TopicAutoscaling::PartitionSplit_PQv1 >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge [GOOD] >> Balancing::Balancing_OneTopic_TopicApi >> TTableProfileTests::OverwriteStoragePolicy [GOOD] >> TTableProfileTests::OverwriteCachingPolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp/unittest >> KqpStreamLookup::ReadTableWithIndexDuringSplit [GOOD] Test command err: 2025-06-24T21:12:27.859706Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:27.860211Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:27.860437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ef0/r3tmp/tmpEG7CDb/pdisk_1.dat 2025-06-24T21:12:28.481049Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:12:28.496512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:28.561889Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:28.572935Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799544379322 != 1750799544379326 2025-06-24T21:12:28.624132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:28.624320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:28.637559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:28.779677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:29.303004Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:742:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:29.303193Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:750:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:29.303324Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:29.327347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:29.397406Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:29.529788Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:756:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:12:29.632828Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:827:2661] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:40.922878Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwj0vmcjh06qc7h3582kjg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDQ0NTI2NTUtZmZlNGEyZDItMzc0Yjg4MGQtYzY4MmQwOGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:40.975750Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwj0vmcjh06qc7h3582kjg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDQ0NTI2NTUtZmZlNGEyZDItMzc0Yjg4MGQtYzY4MmQwOGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:41.127547Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwj0vmcjh06qc7h3582kjg", SessionId: ydb://session/3?node_id=1&id=ZDQ0NTI2NTUtZmZlNGEyZDItMzc0Yjg4MGQtYzY4MmQwOGI=, Slow query, duration: 11.788087s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "UPSERT INTO `/Root/TestTable` (key, value) VALUES (0, 00), (1, 11), (2, 22), (3, 33), (4, 44), (5, 55), (6, 66), (7, 77), (8, 88), (9, 99), (10, 1010), (11, 1111), (12, 1212), (13, 1313), (14, 1414), (15, 1515), (16, 1616), (17, 1717), (18, 1818), (19, 1919), (20, 2020), (21, 2121), (22, 2222), (23, 2323), (24, 2424), (25, 2525), (26, 2626), (27, 2727), (28, 2828), (29, 2929), (30, 3030), (31, 3131), (32, 3232), (33, 3333), (34, 3434), (35, 3535), (36, 3636), (37, 3737), (38, 3838), (39, 3939), (40, 4040), (41, 4141), (42, 4242), (43, 4343), (44, 4444), (45, 4545), (46, 4646), (47, 4747), (48, 4848), (49, 4949), (50, 5050), (51, 5151), (52, 5252), (53, 5353), (54, 5454), (55, 5555), (56, 5656), (57, 5757), (58, 5858), (59, 5959), (60, 6060), (61, 6161), (62, 6262), (63, 6363), (64, 6464), (65, 6565), (66, 6666), (67, 6767), (68, 6868), (69, 6969), (70, 7070), (71, 7171), (72, 7272), (73, 7373), (74, 7474), (75, 7575), (76, 7676), (77, 7777), (78, 7878), (79, 7979), (80, 8080), (81, 8181), (82, 8282), (83, 8383), (84, 8484), (85, 8585), (86, 8686), (87, 8787), (88, 8888), (89, 8989), (90, 9090), (91, 9191), (92, 9292), (93, 9393), (94, 9494), (95, 9595), (96, 9696), (97, 9797), (98, 9898), (99, 9999), (100, 100100), (101, 101101), (102, 102102), (103, 103103), (104, 104104), (105, 105105), (106, 106106), (107, 107107), (108, 108108), (109, 109109), (110, 110110), (111, 111111), (112, 112112), (113, 113113), (114, 114114), (115, 115115), (116, 116116), (117, 117117), (118, 118118), (119, 119119), (120, 120120), (121, 121121), (122, 122122), (123, 123123), (124, 124124), (125, 125125), (126, 126126), (127, 127127), (128, 128128), (129, 129129), (130, 130130), (131, 131131), (132, 132132), (133, 133133), (134, 134134), (135, 135135), (136, 136136), (137, 137137), (138, 138138), (139, 139139), (140, 140140), (141, 141141), (142, 142142), (143, 143143), (144, 144144), (145, 145145), (146, 146146), (147, 147147), (148, 148148), (149, 149149), (150, 150150), (151, 151151), (152, 152152), (153, 153153), (154, 154154), (155, 155155), (156, 156156), (157, 157157), (158, 158158), (159, 159159), (160, 160160), (161, 161161), (162, 162162), (163, 163163), (164, 164164), (165, 165165), (166, 166166), (167, 167167), (168, 168168), (169, 169169), (170, 170170), (171, 171171), (172, 172172), (173, 173173), (174, 174174), (175, 175175), (176, 176176), (177, 177177), (178, 178178), (179, 179179), (180, 180180), (181, 181181), (182, 182182), (183, 183183), (184, 184184), (185, 185185), (186, 186186), (187, 187187), (188, 188188), (189, 189189), (190, 190190), (191, 191191), (192, 192192), (193, 193193), (194, 194194), (195, 195195), (196, 196196), (197, 197197), (198, 198198), (199, 199199), (200, 200200), (201, 201201), (202, 202202), (203, 203203), (204, 204204), (205, 205205), (206, 206206), (207, 207207), (208, 208208), (209, 209209), (210, 210210), (211, 211211), (212, 212212), (213, 213213), (214, 214214), (215, 215215), (216, 216216), (217, 217217), (218, 218218), (219, 219219), (220, 220220), (221, 221221), (222, 222222), (223, 223223), (224, 224224), (225, 225225), (226, 226226), (227, 227227), (228, 228228), (229, 229229), (230, 230230), (231, 231231), (232, 232232), (233, 233233), (234, 234234), (235, 235235), (236, 236236), (237, 237237), (238, 238238), (239, 239239), (240, 240240), (241, 241241), (242, 242242), (243, 243243), (244, 244244), (245, 245245), (246, 246246), (247, 247247), (248, 248248), (249, 249249), (250, 250250), (251, 251251), (252, 252252), (253, 253253), (254, 254254), (255, 255255), (256, 256256), (257, 257257), (258, 258258), (259, 259259), (260, 260260), (261, 261261), (262, 262262), (263, 263263), (264, 264264), (265, 265265), (266, 266266), (267, 267267), (268, 268268), (269, 269269), (270, 270270), (271, 271271), (272, 272272), (273, 273273), (274, 274274), (275, 275275), (276, 276276), (277, 277277), (278, 278278), (279, 279279), (280, 280280), (281, 281281), (282, 282282), (283, 283283), (284, 284284), (285, 285285), (286, 286286), (287, 287287), (288, 288288), (289, 289289), (290, 290290), (291, 291291), (292, 292292), (293, 293293), (294, 294294), (295, 295295), (296, 296296), (297, 297297), (298, 298298), (299, 299299), (300, 300300), (301, 301301), (302, 302302), (303, 303303), (304, 304304), (305, 305305), (306, 306306), (307, 307307), (308, 308308), (309, 309309), (310, 310310), (311, 311311), (312, 312312), (313, 313313), (314, 314314), (315, 315315), (316, 316316), (317, 317317), (318, 318318), (319, 319319), (320, 320320), (321, 321321), (322, 322322), (323, 323323), (324, 324324), (325, 325325), (326, 326326), (327, 327327), (328, 328328), (329, 329329), (330, 330330), (331, 331331), (332, 332332), (333, 333333), (334, 334334), (335, 335335), (336, 336336), (337, 337337), (338, 338338), (339, 339339), (340, 340340), (341, 341341), (342, 342342), (343, 343343), (344, 344344), (345, 345345), (346, 346346), (347, 347347), (348, 348348), (349, 349349), (350, 350350), (351, 351351), (352, 352352), (353, 353353), (354, 354354), (355, 355355), (356, 356356), (357, 357357), (358, 358358), (359, 359359), (360, 360360), (361, 361361), (362, 362362), (363, 363363), (364, 364364), (365, 365365), (366, 366366), (367, 367367), (368, 368368), (369, 369369), (370, 370370), (371, 371371), (372, 372372), (373, 373373), (374, 374374), (375, 375375), (376, 376376), (377, 377377), (378, 378378), (379, 379379), (380, 380380), (381, 381381), (382, 382382), (383, 383383), (384, 384384), (385, 385385), (386, 386386), (387, 387387), (388, 388388), (389, 389389), (390, 390390), (391, 391391), (392, 392392), (393, 393393), (394, 394394), (395, 395395), (396, 396396), (397, 397397), (398, 398398), (399, 399399), (400, 400400), (401, 401401), (402, 402402), (403, 403403), (404, 404404), (405, 405405), (406, 406406), (407, 407407), (408, 408408), (409, 409409), (410, 410410), (411, 411411), (412, 412412), (413, 413413), (414, 414414), (415, 415415), (416, 416416), (417, 417417), (418, 418418), (419, 419419), (420, 420420), (421, 421421), (422, 422422), (423, 423423), (424, 424424), (425, 425425), (426, 426426), (427, 427427), (428, 428428), (429, 429429), (430, 430430), (431, 431431), (432, 432432), (433, 433433), (434, 434434), (435, 435435), (436, 436436), (437, 437437), (438, 438438), (439, 439439), (440, 440440), (441, 441441), (442, 442442), (443, 443443), (444, 444444), (445, 445445), (446, 446446), (447, 447447), (448, 448448), (449, 449449), (450, 450450), (451, 451451), (452, 452452), (453, 453453), (454, 454454), (455, 455455), (456, 456456), (457, 457457), (458, 458458), (459, 459459), (460, 460460), (461, 461461), (462, 462462), (463, 463463), (464, 464464), (465, 465465), (466, 466466), (467, 467467), (468, 468468), (469, 469469), (470, 470470), (471, 471471), (472, 472472), (473, 473473), (474, 474474), (475, 475475), (476, 476476), (477, 477477), (478, 478478), (479, 479479), (480, 480480), (481, 481481), (482, 482482), (483, 483483), (484, 484484), (485, 485485), (486, 486486), (487, 487487), (488, 488488), (489, 489489), (490, 490490), (491, 491491), (492, 492492), (493, 493493), (494, 494494), (495, 495495), (496, 496496), (497, 497497), (498, 498498), (499, 499499), (500, 500500), (501, 501501), (502, 502502), (503, 503503), (504, 504504), (505, 505505), (506, 506506), (507, 507507), (508, 508508), (509, 509509), (510, 510510), (511, 511511), (512, 512512), (513, 513513), (514, 514514), (515, 515515), (516, 516516), (517, 517517), (518, 518518), (519, 519519), (520, 520520), (521, 521521), (522, 522522), (523, 523523), (524, 524524), (525, 525525), (526, 526526), (527, 527527), (528, 528528), (529, 529529), (530, 530530), (531, 531531), (532, 532532), (533, 533533), (534, 534534), (535, 535535), (536, 536536), (537, 537537), (538, 538538), (539, 539539), (540, 540540), (541, 541541), (542, 542542), (543, 543543), (544, 544544), (545, 545545), (546, 546546), (547, 547547), (548, 548548), (549, 549549), (550, 550550), (551, 551551), (552, 552552), (553, 553553), (554, 554554), (555, 555555), (556, 556556), (557, 557557), (558, 558558), (559, 559559), (560, 560560), (561, 561561), (562, 562562), (563, 563563), (564, 564564), (565, 565565), (566, 566566), (567, 567567), (568, 568568), (569, 569569), (570, 570570), (571, 571571), (572, 572572), (573, 573573), (574, 574574), (575, 575575), (576, 576576), (577, 577577), (578, 578578), (579, 579579), (580, 580580), (581, 581581), (582, 582582), (583, 583583), (584, 584584), (585, 585585), (586, 586586), (587, 587587), (588, 588588), (589, 589589), (590, 590590), (591, 591591), (592, 592592), (593, 593593), (594, 594594), (595, 595595), (596, 596596), (597, 597597), (598, 598598), (599, 599599), (600, 600600), (601, 601601), (602, 602602), (603, 603603), (604, 604604), (605, 605605), (606, 606606), (607, 607607), (608, 608608), (609, 609609), (610, 610610), (611, 611611), (612, 612612), (613, 613613), (614, 614614), (615, 615615), (616, 616616), (617, 617617), (618, 618618), (619, 619619), (620, 620620), (621, 621621), (622, 622622), (623, 623623), (624, 624624), (625, 625625), (626, 626626), (627, 627627), (628, 628628), (629, 629629), (630, 630630), (631, 631631), (632, 632632), (633, 633633), (634, 634634), (635, 635635), (636, 636636), (637, 637637), (638, 638638), (639, 639639), (640, 640640), (641, 641641), (642, 642642), (643, 643643), (644, 644644), (645, 645645), (646, 646646), (647, 647647), (648, 648648), (649, 649649), (650, 650650), (651, 651651), (652, 652652), (653, 653653), (654, 654654), (655, 655655), (656, 656656), (657, 657657), (658, 658658), (659, 659659), (660, 660660), (661, 661661), (662, 662662), (663, 663663), (664, 664664), (665, 665665), (666, 666666), (667, 667667), (668, 668668), (669, 669669), (670, 670670), (671, 671671), (672, 672672), (673, 673673), (674, 674674), (675, 675675), (676, 676676), (677, 677677), (678, 678678), (679, 679679), (680, 680680), (681, 681681), (682, 682682), (683, 683683), (684, 684684), (685, 685685), (686, 686686), (687, 687687), (688, 688688), (689, 689689), (690, 690690), (691, 691691), (692, 692692), (693, 693693), (694, 694694), (695, 695695), (696, 696696), (697, 697697), (698, 698698), (699, 699699), (700, 700700), (701, 701701), (702, 702702), (703, 703703), (704, 704704), (705, 705705), (706, 706706), (707, 707707), (708, 708708), (709, 709709), (710, 710710), (711, 711711), (712, 712712), (713, 713713), (714, 714714), (715, 715715), (716, 716716), (717, 717717), (718, 718718), (719, 719719), (720, 720720), (721, 721721), (722, 722722), (723, 723723), (724, 724724), (725, 725725), (726, 726726), (727, 727727), (728, 728728), (729, 729729), (730, 730730), (731, 731731), (732, 732732), (733, 733733), (734, 734734), (735, 735735), (736, 736736), (737, 737737), (738, 738738), (739, 739739), (740, 740740), (741, 741741), (742, 742742), (743, 743743), (744, 744744), (745, 745745), (746, 746746), (747, 747747), (748, 748748), (749, 749749), (750, 750750), (751, 751751), (752, 752752), (753, 753753), (754, 754754), (755, 755755), (756, 756756), (757, 757757), (758, 758758), (759, 759759), (760, 760760), (761, 761761), (762, 762762), (763, 763763), (764, 764764), (765, 765765), (766, 766766), (767, 767767), (768, 768768), (769, 769769), (770, 770770), (771, 771771), (772, 772772), (773, 773773), (774, 774774), (775, 775775), (776, 776776), (777, 777777), (778, 778778), (779, 779779), (780, 780780), (781, 781781), (782, 782782), (783, 783783), (784, 784784), (785, 785785), (786, 786786), (787, 787787), (788, 788788), (789, 789789), (790, 790790), (791, 791791), (792, 792792), (793, 793793), (794, 794794), (795, 795795), (796, 796796), (797, 797797), (798, 798798), (799, 799799), (800, 800800), (801, 801801), (802, 802802), (803, 803803), (804, 804804), (805, 805805), (806, 806806), (807, 807807), (808, 808808), (809, 809809), (810, 810810), (811, 811811), (812, 812812), (813, 813813), (814, 814814), (815, 815815), (816, 816816), (817, 817817), (818, 818818), (819, 819819), (820, 820820), (821, 821821), (822, 822822), (823, 823823), (824, 824824), (825, 825825), (826, 826826), (827, 827827), (828, 828828), (829, 829829), (830, 830830), (831, 831831), (832, 832832), (833, 833833), (834, 834834), (835, 835835), (836, 836836), (837, 837837), (838, 838838), (839, 839839), (840, 840840), (841, 841841), (842, 842842), (843, 843843), (844, 844844), (845, 845845), (846, 846846), (847, 847847), (848, 848848), (849, 849849), (850, 850850), (851, 851851), (852, 852852), (853, 853853), (854, 854854), (855, 855855), (856, 856856), (857, 857857), (858, 858858), (859, 859859), (860, 860860), (861, 861861), (862, 862862), (863, 863863), (864, 864864), (865, 865865), (866, 866866), (867, 867867), (868, 868868), (869, 869869), (870, 870870), (871, 871871), (872, 872872), (873, 873873), (874, 874874), (875, 875875), (876, 876876), (877, 877877), (878, 878878), (879, 879879), (880, 880880), (881, 881881), (882, 882882), (883, 883883), (884, 884884), (885, 885885), (886, 886886), (887, 887887), (888, 888888), (889, 889889), (890, 890890), (891, 891891), (892, 892892), (893, 893893), (894, 894894), (895, 895895), (896, 896896), (897, 897897), (898, 898898), (899, 899899), (900, 900900), (901, 901901), (902, 902902), (903, 903903), (904, 904904), (905, 905905), (906, 906906), (907, 907907), (908, 908908), (909, 909909), (910, 910910), (911, 911911), (912, 912912), (913, 913913), (914, 914914), (915, 915915), (916, 916916), (917, 917917), (918, 918918), (919, 919919), (920, 920920), (921, 921921), (922, 922922), (923, 923923), (924, 924924), (925, 925925), (926, 926926), (927, 927927), (928, 928928), (929, 929929), (930, 930930), (931, 931931), (932, 932932), (933, 933933), (934, 934934), (935, 935935), (936, 936936), (937, 937937), (938, 938938), (939, 939939), (940, 940940), (941, 941941), (942, 942942), (943, 943943), (944, 944944), (945, 945945), (946, 946946), (947, 947947), (948, 948948), (949, 949949), (950, 950950), (951, 951951), (952, 952952), (953, 953953), (954, 954954), (955, 955955), (956, 956956), (957, 957957), (958, 958958), (959, 959959), (960, 960960), (961, 961961), (962, 962962), (963, 963963), (964, 964964), (965, 965965), (966, 966966), (967, 967967), (968, 968968), (969, 969969), (970, 970970), (971, 971971), (972, 972972), (973, 973973), (974, 974974), (975, 975975), (976, 976976), (977, 977977), (978, 978978), (979, 979979), (980, 980980), (981, 981981), (982, 982982), (983, 983983), (984, 984984), (985, 985985), (986, 986986), (987, 987987), (988, 988988), (989, 989989), (990, 990990), (991, 991991), (992, 992992), (993, 993993), (994, 994994), (995, 995995), (996, 996996), (997, 997997), (998, 998998), (999, 999999), (10000, 10000);", parameters: 0b 2025-06-24T21:12:41.365216Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwjcdd4cnxbcdv1rk3em1n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2NiZTIyZTgtYzA5MTc2YmMtYmViNjVjZTgtNzM0M2Y2Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_SOURCE_READ_ACTOR to TX_DATASHARD_ACTOR ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::DecimalPK [GOOD] Test command err: 2025-06-24T21:11:27.700410Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626542807043204:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.700469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447c/r3tmp/tmpqis3Hn/pdisk_1.dat 2025-06-24T21:11:28.209057Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:28.239165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:28.239261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:28.274583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21748, node 1 2025-06-24T21:11:28.624994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:28.625026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:28.625037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:28.625158Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.691718Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27854 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.994933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:31.180606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) SUCCESS 2025-06-24T21:11:31.358099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626559986913399:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.358119Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626559986913408:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.358207Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:31.361927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:31.387734Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626559986913413:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:11:31.476650Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626559986913489:2810] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:32.265153Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwg88wb5ykj28ww9rbapzb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmVkNjkwY2ItODRhNzcyYWMtNjgyNjRmZTUtNzlkNDYxMjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-06-24T21:11:32.700663Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626542807043204:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:32.700728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:32.748359Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhwg96j8x0sqr7t8xmz68ed, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmVkNjkwY2ItODRhNzcyYWMtNjgyNjRmZTUtNzlkNDYxMjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-06-24T21:11:32.834460Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T21:11:32.868880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) SUCCESS 2025-06-24T21:11:33.519927Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jyhwg9xg7cdnppc0ve6zjjwq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTRkYzU5YmQtNmYxYmJhMzMtNWJmY2M1NGQtYTZjNzUzMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-06-24T21:11:33.984547Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhwgad1baqhzm7x28m4cfyh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTRkYzU5YmQtNmYxYmJhMzMtNWJmY2M1NGQtYTZjNzUzMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-06-24T21:11:34.058011Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T21:11:34.093524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) SUCCESS 2025-06-24T21:11:34.569227Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710669. Ctx: { TraceId: 01jyhwgb1m17j8y8cmt3ve2795, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmVkYTBlN2YtOWI2ZGQzMGYtMWQwYWQyMWQtY2U5MmJlM2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-06-24T21:11:35.001179Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710670. Ctx: { TraceId: 01jyhwgbdq7ee32e4dxsm8wp5h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmVkYTBlN2YtOWI2ZGQzMGYtMWQwYWQyMWQtY2U5MmJlM2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-06-24T21:11:35.085596Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-24T21:11:35.114233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) SUCCESS 2025-06-24T21:11:35.695315Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { TraceId: 01jyhwgc0x3zz3c7pe6kzae2ek, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWYyNTIwNDctY2RhNTQyY2EtMzQzNjFhNTctYTcwODNhYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-06-24T21:11:36.156198Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710674. Ctx: { TraceId: 01jyhwgch49smr7ymg42x9z0m6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWYyNTIwNDctY2RhNTQyY2EtMzQzNjFhNTctYTcwODNhYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-06-24T21:11:36.240710Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-24T21:11:36.290849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) SUCCESS 2025-06-24T21:11:36.746197Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710677. Ctx: { TraceId: 01jyhwgd6f5n6fr5p4fs7ne6d1, Database: , DatabaseId: /Root, ... d
: Error: Bulk upsert to table '/Root/Logs' Type mismatch, got type Uint64 for column App, but expected Utf8
: Error: Bulk upsert to table '/Root/Logs' Type mismatch, got type Uint64 for column Message, but expected Utf8
: Error: Bulk upsert to table '/Root/Logs' Unknown column: HttpCode 2025-06-24T21:12:27.367637Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626798592760159:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:27.367704Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447c/r3tmp/tmpovBb7C/pdisk_1.dat 2025-06-24T21:12:27.581679Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28190, node 10 2025-06-24T21:12:27.639877Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 2025-06-24T21:12:27.700500Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:27.700649Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:27.760895Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:27.787570Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:27.787613Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:27.787624Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:27.787822Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17600 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:28.149572Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:28.392948Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:31.464026Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/Limits' Row key size of 1100002 bytes is larger than the allowed threshold 1049600
: Error: Bulk upsert to table '/Root/Limits' Row key size of 1100002 bytes is larger than the allowed threshold 1049600
: Error: Bulk upsert to table '/Root/Limits' Row key size of 1100000 bytes is larger than the allowed threshold 1049600
: Error: Bulk upsert to table '/Root/Limits' Row cell size of 17000022 bytes is larger than the allowed threshold 16777216 2025-06-24T21:12:32.368649Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626798592760159:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:32.368789Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:35.568787Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626831509564103:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:35.568876Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447c/r3tmp/tmpcbspAn/pdisk_1.dat 2025-06-24T21:12:35.844415Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:35.870635Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:35.870761Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:35.879597Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12909, node 13 2025-06-24T21:12:35.984267Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:35.984292Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:35.984302Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:35.984458Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:36.327875Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:36.612036Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:39.726356Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:39.880805Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626848689434448:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:39.880910Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626848689434443:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:39.881008Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:39.886175Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:39.927312Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626848689434457:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:12:40.004676Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626848689434544:2789] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:40.380662Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwjb668vphp2gczsn176jw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NDNkMDhjYjctYzI3YzAwMmMtZjY0YmU3ZWQtMjNkMjQyZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:40.569319Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626831509564103:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:40.569427Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TopicAutoscaling::PartitionSplit_BeforeAutoscaleAwareSDK |95.2%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::EnableSplitMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:12:38.267666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:38.267812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.267865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:38.267905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:38.268829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:38.268889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:38.268978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:38.269067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:38.269937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:38.271470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:38.376412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:38.376477Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:38.395770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:38.396273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:38.396477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:12:38.411742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:38.412016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:38.412856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.413237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:12:38.418113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.419199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:38.424536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.424630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:38.426164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:38.426242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:38.426316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:38.426404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.433960Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:12:38.556158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:38.557527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.558559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:12:38.558639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:12:38.561569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:12:38.561780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:38.565224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.566084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:12:38.566346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.566536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:12:38.566591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:12:38.566652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:12:38.572371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.572481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:12:38.572528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:12:38.575137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.575257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:38.575311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.575395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:12:38.580527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:38.583657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:12:38.585589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:12:38.587026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:38.587294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:38.587365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.588897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:12:38.588977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:38.589234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:12:38.589326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:12:38.593055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:38.593119Z node 1 :FLAT_TX_SCHEMESHARD ... on is done id#105:0 progress is 1/1 2025-06-24T21:12:42.081099Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:12:42.081150Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-06-24T21:12:42.081199Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-24T21:12:42.081250Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 105:0 2025-06-24T21:12:42.081312Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 105:0 2025-06-24T21:12:42.081504Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:12:42.081557Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 105, publications: 1, subscribers: 0 2025-06-24T21:12:42.081598Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-24T21:12:42.086948Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:12:42.087104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:12:42.087162Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-24T21:12:42.087237Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:12:42.087283Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:12:42.087402Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-24T21:12:42.098479Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-24T21:12:42.106433Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-24T21:12:42.106489Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-24T21:12:42.107018Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-24T21:12:42.107129Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T21:12:42.107186Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:757:2665] TestWaitNotification: OK eventTxId 105 2025-06-24T21:12:42.624618Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:42.624935Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 3 took 330us result status StatusSuccess 2025-06-24T21:12:42.625635Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:42.703368Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:12:42.703698Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 422us result status StatusSuccess 2025-06-24T21:12:42.704468Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >>>>> Verify partition 0 >>>>> Verify partition 1 >>>>> Verify partition 2 |95.2%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CheckDefaultTableSettings2 [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004417/r3tmp/tmp19FauM/pdisk_1.dat TServer::EnableGrpc on GrpcPort 6075, node 1 TClient is connected to server localhost:3852 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:12:18.207494Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626759894969355:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:18.207576Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004417/r3tmp/tmpzmALLp/pdisk_1.dat 2025-06-24T21:12:18.604102Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:18.642522Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:18.642605Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:18.654839Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11870, node 4 2025-06-24T21:12:18.816603Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:18.816624Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:18.816638Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:18.816782Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15065 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:19.172597Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:19.251322Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:21.640953Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626772779872279:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:21.641050Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:21.991481Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:22.171515Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626777074839753:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:22.171657Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:22.172682Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626777074839758:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:22.183972Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:22.224979Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519626777074839760:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:12:22.287197Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626777074839835:2806] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:22.449078Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwhswq5wd82fva66h5strk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MjJhMTAyOWMtNzE3MTQ2ZDctODkwNTFiNjAtNDM5ZTRlZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:22.514393Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:22.660657Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:12:22.776695Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004417/r3tmp/tmpYWxkmJ/pdisk_1.dat 2025-06-24T21:12:24.708457Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:24.795853Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:24.835877Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 1336, node 7 2025-06-24T21:12:24.883661Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:24.883815Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:24.951593Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:25.002098Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:25.002129Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:25.002137Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:25.002282Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17453 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: tr ... Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:28.154044Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:28.276317Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626802596338027:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:28.276395Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:28.276612Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626802596338032:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:28.280662Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:28.313111Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626802596338034:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:12:28.408570Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626802596338109:2798] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:28.500454Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwhzvk6549f7wj3tceqnfr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NGVjZjM0Ny0zZjc4MWIzNi05MTk3YTFiYy0xYmQ0ZDkxYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:28.567099Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:28.734836Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:12:28.920424Z node 7 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:12:30.935965Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626812539408296:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:30.936039Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004417/r3tmp/tmpCEMel4/pdisk_1.dat 2025-06-24T21:12:31.219244Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:31.263421Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 17095, node 10 2025-06-24T21:12:31.297988Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:31.298165Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:31.307487Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:31.357848Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:31.357875Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:31.357886Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:31.358050Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4472 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:31.764586Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:31.781931Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:12:31.963309Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:34.740308Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:36.765821Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626838517633394:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:36.765897Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004417/r3tmp/tmpAVXczt/pdisk_1.dat 2025-06-24T21:12:36.939543Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:36.978513Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:36.978613Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:36.986319Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21562, node 13 2025-06-24T21:12:37.104731Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:37.104756Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:37.104767Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:37.104923Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20128 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:37.448881Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:37.783323Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:40.625731Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:12:39.594940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:12:39.595037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:39.595077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:12:39.595112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:12:39.595301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:12:39.595338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:12:39.595416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:12:39.595487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:12:39.596270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:12:39.596618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:12:39.681065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:12:39.681125Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:39.697568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:12:39.698020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:12:39.698210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:12:39.706714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:12:39.706884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:12:39.707508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:39.707802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:12:39.710599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:39.710826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:12:39.712068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:39.712130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:12:39.712381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:12:39.712432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:12:39.712476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:12:39.712581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.724241Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:12:39.866733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:39.866968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.867234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:12:39.867300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:12:39.867507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:12:39.867670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:39.870091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:39.870274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:12:39.870415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.870454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:12:39.870509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:12:39.870535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:12:39.872495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.872559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:12:39.872602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:12:39.874339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.874378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:12:39.874408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:39.874444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:12:39.877323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:39.878977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:12:39.879250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:12:39.880028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:12:39.880169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:39.880227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:39.880459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:12:39.880511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:12:39.880710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:12:39.880794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:12:39.882700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:12:39.882744Z node 1 :FLAT_TX_SCHEMESHARD ... rd__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-24T21:12:42.277604Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-24T21:12:42.277653Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:12:42.277764Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-24T21:12:42.283558Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-24T21:12:42.290935Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-24T21:12:42.291004Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-24T21:12:42.291553Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-24T21:12:42.291682Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T21:12:42.291727Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:753:2663] TestWaitNotification: OK eventTxId 105 2025-06-24T21:12:42.901244Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:42.901576Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 3 took 355us result status StatusSuccess 2025-06-24T21:12:42.902313Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\010" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\010" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\010" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\010" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:42.994843Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:12:42.995186Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 369us result status StatusSuccess 2025-06-24T21:12:42.995899Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\010" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\010" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\010" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\010" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } PartitionStrategy { PartitionStrategyType: DISABLED } } TestModificationResults wait txId: 106 2025-06-24T21:12:43.000378Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } PartitionStrategy { PartitionStrategyType: DISABLED } } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:12:43.000631Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-06-24T21:12:43.000798Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Can`t disable auto partitioning., at schemeshard: 72057594046678944 2025-06-24T21:12:43.003684Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Can`t disable auto partitioning." TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:12:43.003980Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Can`t disable auto partitioning., operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T21:12:43.004340Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-24T21:12:43.004390Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-24T21:12:43.005022Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-24T21:12:43.005133Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T21:12:43.005176Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:768:2677] TestWaitNotification: OK eventTxId 106 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-11 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-12 |95.2%| [TA] $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/test-results/unittest/{meta.json ... results_accumulator.log} |95.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/test-results/unittest/{meta.json ... results_accumulator.log} >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-41 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-42 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-35 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-36 >> TTopicReaderTests::TestRun_ReadOneMessage [GOOD] >> TTopicReaderTests::TestRun_ReadTwoMessages_With_Limit_1 >> YdbYqlClient::RenameTables [GOOD] >> TYqlDateTimeTests::DateKey [GOOD] >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NewlineDelimited [GOOD] >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NoDelimiter >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-47 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-48 >> AsyncIndexChangeCollector::UpsertSingleRow >> CdcStreamChangeCollector::UpsertManyRows >> CdcStreamChangeCollector::UpsertIntoTwoStreams >> AsyncIndexChangeCollector::UpsertToSameKey >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn >> CdcStreamChangeCollector::InsertSingleRow >> AsyncIndexChangeCollector::InsertSingleRow >> AsyncIndexChangeCollector::DeleteNothing ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::RenameTables [GOOD] Test command err: 2025-06-24T21:12:01.679529Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626684972765398:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:01.679583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004440/r3tmp/tmp8G4q7r/pdisk_1.dat 2025-06-24T21:12:02.340735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:02.340875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:02.360343Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:02.367000Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3277, node 1 2025-06-24T21:12:02.695502Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:02.741295Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:02.741327Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:02.741334Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:02.741429Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11884 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:03.219721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 2025-06-24T21:12:06.680350Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626684972765398:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:06.680417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 2025-06-24T21:12:09.736755Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626719332504876:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:09.736888Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:09.738663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626719332504888:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:09.742368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:09.797245Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626719332504890:2315], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:12:09.876428Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626719332504970:2713] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 2025-06-24T21:12:17.687517Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626754454047482:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:17.687628Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004440/r3tmp/tmpZpPU2w/pdisk_1.dat 2025-06-24T21:12:18.012023Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:18.045961Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:18.046054Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:18.059405Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6683, node 4 2025-06-24T21:12:18.335992Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:18.336018Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:18.336027Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:18.336172Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:18.697334Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13948 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:18.842160Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:22.091327Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626775928884963:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:22.091470Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:22.168578Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first c ... schemereq.cpp:1385: Actor# [13:7519626875901780344:3549] txid# 281474976715672 HANDLE EvClientConnected 2025-06-24T21:12:45.246141Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_table.cpp:492: TDropTable Propose, path: Root/Table-1, pathId: 0, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-24T21:12:45.246302Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715672:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:12:45.249084Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715672, database: /Root, subject: , status: StatusAccepted, operation: DROP TABLE, path: Root/Table-1 2025-06-24T21:12:45.250014Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [13:7519626875901780344:3549] txid# 281474976715672 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715672} 2025-06-24T21:12:45.250064Z node 13 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [13:7519626875901780344:3549] txid# 281474976715672 SEND to# [13:7519626875901780343:2354] Source {TEvProposeTransactionStatus txid# 281474976715672 Status# 53} 2025-06-24T21:12:45.251076Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:45.251269Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:45.251287Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:45.251344Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:45.265169Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750799565312, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:12:45.271850Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1104: All parts have reached barrier, tx: 281474976715672, done: 0, blocked: 1 2025-06-24T21:12:45.277338Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:45.277484Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:45.277506Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:45.277564Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:45.278833Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-24T21:12:45.296379Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# DropTableRequest, traceId# 01jyhwjgfgat3vz6b3htwcjd4p, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:46094, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:12:45.296686Z node 13 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 13, TabletId: 72075186224037890 not found 2025-06-24T21:12:45.297055Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:12:45.298151Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [13:7519626854426941827:2139] Handle TEvProposeTransaction 2025-06-24T21:12:45.298191Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [13:7519626854426941827:2139] TxId# 281474976715673 ProcessProposeTransaction 2025-06-24T21:12:45.298240Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [13:7519626854426941827:2139] Cookie# 0 userReqId# "" txid# 281474976715673 SEND to# [13:7519626875901780433:3632] 2025-06-24T21:12:45.300980Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [13:7519626875901780433:3632] txid# 281474976715673 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "Root" OperationType: ESchemeOpDropTable Drop { Name: "Table-2" } } } DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:46094" 2025-06-24T21:12:45.301020Z node 13 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [13:7519626875901780433:3632] txid# 281474976715673 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:12:45.301076Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [13:7519626875901780433:3632] txid# 281474976715673 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:12:45.301455Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [13:7519626875901780433:3632] txid# 281474976715673 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:12:45.301552Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [13:7519626875901780433:3632] HANDLE EvNavigateKeySetResult, txid# 281474976715673 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:12:45.301587Z node 13 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [13:7519626875901780433:3632] txid# 281474976715673 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-24T21:12:45.301740Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [13:7519626875901780433:3632] txid# 281474976715673 HANDLE EvClientConnected 2025-06-24T21:12:45.301939Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_table.cpp:492: TDropTable Propose, path: Root/Table-2, pathId: 0, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-24T21:12:45.302078Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715673:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:12:45.304249Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715673, database: /Root, subject: , status: StatusAccepted, operation: DROP TABLE, path: Root/Table-2 2025-06-24T21:12:45.304339Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [13:7519626875901780433:3632] txid# 281474976715673 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715673} 2025-06-24T21:12:45.304377Z node 13 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [13:7519626875901780433:3632] txid# 281474976715673 SEND to# [13:7519626875901780428:2358] Source {TEvProposeTransactionStatus txid# 281474976715673 Status# 53} 2025-06-24T21:12:45.305900Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:45.306024Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:45.306035Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:45.306094Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:45.315448Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750799565361, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:12:45.322782Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1104: All parts have reached barrier, tx: 281474976715673, done: 0, blocked: 1 2025-06-24T21:12:45.329929Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:45.330074Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:45.330084Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:12:45.330161Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:12:45.332548Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715673:0 2025-06-24T21:12:45.345319Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000021c80] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.345614Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000014480] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.345828Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000152480] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.346054Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000084680] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.346251Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016c280] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.346430Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016ce80] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.346627Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000105680] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.346820Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00017be80] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.347011Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001f4a80] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.347216Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000090c80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.347430Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000023a80] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.347628Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000154280] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.347824Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001ad080] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.348009Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001adc80] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.348187Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001ad680] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.348355Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001f5080] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.348520Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00017c480] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 2025-06-24T21:12:45.356057Z node 13 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 13, TabletId: 72075186224037889 not found 2025-06-24T21:12:45.358042Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TYqlDateTimeTests::DateKey [GOOD] Test command err: 2025-06-24T21:11:59.051723Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626675281514992:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:59.053201Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004457/r3tmp/tmpHxIYr7/pdisk_1.dat 2025-06-24T21:11:59.484447Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:59.487415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:59.487556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:59.496762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:59.531328Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 2817, node 1 2025-06-24T21:11:59.788722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:59.788747Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:59.788771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:59.788887Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:00.059600Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4206 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:00.249943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:4206 2025-06-24T21:12:00.584161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:12:00.632661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:01.154629Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626687387265967:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:01.283638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:01.283728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:01.287389Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:12:01.292810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:01.352898Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; TClient is connected to server localhost:4206 2025-06-24T21:12:02.110901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:02.159692Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4206 TClient::Ls request: /Root/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715660 CreateStep: 1750799522339 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-24T21:12:02.909957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TClient is connected to server localhost:4206 TClient::Ls request: /Root/table-2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-2" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715661 CreateStep: 1750799523074 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-24T21:12:03.497447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TClient is connected to server localhost:4206 TClient::Ls request: /Root/table-3 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-3" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715662 CreateStep: 1750799523599 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-24T21:12:03.979463Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626675281514992:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:03.979541Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:04.000889Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-24T21:12:04.001432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:07.555433Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626713522763039:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:07.561892Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004457/r3tmp/tmpWafFtH/pdisk_1.dat 2025-06-24T21:12:07.947902Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:07.966965Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:07.967072Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node ... Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:29.316251Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:29.349519Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30288 2025-06-24T21:12:29.870343Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... waiting... 2025-06-24T21:12:29.899910Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:30.407852Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519626810024600335:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:30.408059Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:30.448803Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:30.448992Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:30.456247Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-24T21:12:30.476279Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30288 2025-06-24T21:12:31.417968Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:31.475387Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-24T21:12:31.476176Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:35.411295Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519626810024600335:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:35.411403Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:37.902303Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626840822353672:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:37.902395Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004457/r3tmp/tmpXRwYCc/pdisk_1.dat 2025-06-24T21:12:38.161598Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:38.218382Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:38.218536Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:38.226939Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10111, node 13 2025-06-24T21:12:38.354730Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:38.354764Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:38.354773Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:38.354928Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:38.921955Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9849 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:38.993749Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:42.902653Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626840822353672:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:42.902752Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:43.411007Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:43.645739Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626866592158631:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:43.645881Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:43.646355Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626866592158643:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:43.653430Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:43.692093Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626866592158645:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:12:43.761071Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626866592158722:2802] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:44.082120Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwjevv3qp7hkqf73vwncsk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NGY0YzRhYjktM2IwNjhiMGUtOGE4OTc2N2ItMjFjODg5OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:44.416902Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhwjfb50gddpqtyg7vmdtr7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NGY0YzRhYjktM2IwNjhiMGUtOGE4OTc2N2ItMjFjODg5OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:44.748016Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhwjfm87jag28pc74x6tam5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NGY0YzRhYjktM2IwNjhiMGUtOGE4OTc2N2ItMjFjODg5OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:44.985808Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyhwjfyk6cfdcpkrk2dbv0nz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NGY0YzRhYjktM2IwNjhiMGUtOGE4OTc2N2ItMjFjODg5OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> YdbYqlClient::AlterTableAddIndex [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-24 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-49 >> YdbYqlClient::RetryOperationLimitedDuration [GOOD] >> YdbOlapStore::LogGrepNonExisting [GOOD] >> YdbOlapStore::LogGrepExisting >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-18 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-19 >> YdbOlapStore::LogPagingBetween [GOOD] >> YdbOlapStore::LogWithUnionAllAscending >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGood >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsFromAdLdapServer >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsFromAdLdapServer >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsDisableRequestToAD >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDontExistGroupAttribute >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDontExistGroupAttribute >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-12 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-13 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::AlterTableAddIndex [GOOD] Test command err: 2025-06-24T21:12:01.286832Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626687932447147:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:01.317517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004446/r3tmp/tmpy6nLgO/pdisk_1.dat 2025-06-24T21:12:01.935241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:01.935342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:01.941224Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:01.963569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:02.031136Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 14468, node 1 2025-06-24T21:12:02.288661Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:02.448843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:02.448866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:02.448872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:02.448968Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24215 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:02.905369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:05.462870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) SUCCESS 2025-06-24T21:12:05.756222Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626705112317354:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:05.756285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626705112317364:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:05.756336Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:05.761049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:05.807953Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626705112317368:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:12:05.900274Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626705112317438:2810] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:06.286315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626687932447147:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:06.286375Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:06.333577Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwh9vqfq088h8159aee6ca, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTAzZTBmY2MtNjA2OTM2NDQtMmU0YTdhZmQtMTFhNjc3OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:06.347941Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799526357, txId: 281474976715661] shutting down 2025-06-24T21:12:06.420215Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T21:12:06.432967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) BAD_REQUEST 2025-06-24T21:12:06.607358Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T21:12:06.610641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) SUCCESS 2025-06-24T21:12:06.758045Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-24T21:12:08.492689Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626715486259099:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:08.492738Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004446/r3tmp/tmpSAZMUh/pdisk_1.dat 2025-06-24T21:12:08.891552Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:08.920663Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:08.920744Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:08.928735Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28294, node 4 2025-06-24T21:12:09.187706Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:09.187727Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:09.187735Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:09.187868Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10569 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:12:09.547282Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:09.560643Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:10569 2025-06-24T21:12:09.881283Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__op ... load","id":7},{"name":"ingested_at","id":8},{"name":"saved_at","id":9},{"name":"request_id","id":10},{"name":"flt","id":11},{"name":"dbl","id":12}]},"o":"0","t":"ReserveMemory"},"w":0,"id":26}}}; 2025-06-24T21:12:40.140149Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-06-24T21:12:40.140328Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-06-24T21:12:40.142588Z node 10 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[10:7519626835505683779:2298];tablet_id=72075186224037888;parent=[10:7519626835505683653:2282];fline=manager.cpp:88;event=ask_data;request=request_id=21;3={portions_count=1};; 2025-06-24T21:12:40.143086Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-06-24T21:12:40.145385Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-06-24T21:12:40.145548Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-06-24T21:12:40.146951Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-06-24T21:12:40.159896Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-06-24T21:12:40.162283Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-06-24T21:12:40.179465Z node 10 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 1 at tablet 72075186224037888 2025-06-24T21:12:40.192246Z node 10 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 1 at tablet 72075186224037889 2025-06-24T21:12:40.209376Z node 10 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799560000, txId: 18446744073709551615] shutting down 2025-06-24T21:12:40.427411Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[10:7519626835505683643:2279];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T21:12:40.427491Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[10:7519626835505683648:2281];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T21:12:40.427524Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[10:7519626835505683646:2280];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T21:12:40.427579Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;parent=[10:7519626835505683653:2282];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T21:12:42.365538Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626860781552514:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:42.365604Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004446/r3tmp/tmpA3Duzq/pdisk_1.dat 2025-06-24T21:12:42.587191Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8983, node 13 2025-06-24T21:12:42.721947Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:42.722059Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:42.868989Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:42.928006Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:42.928039Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:42.928051Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:42.928227Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22314 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:43.362830Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:43.379603Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:46.804346Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626877961422733:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:46.804460Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:46.854338Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.024344Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626882256390204:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.024478Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.025006Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519626882256390209:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.030217Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:47.065811Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519626882256390211:2315], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:12:47.137435Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519626882256390280:2808] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:47.253302Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwjj5f29f1sj4vadf3eabg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDYwYzg1ZjAtYTBjY2IyMDMtMjA0ZDM4MzItMzMwN2E1NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:47.371406Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626860781552514:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:47.371502Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:47.406842Z node 13 :TX_PROXY WARN: rpc_alter_table.cpp:329: [AlterTableAddIndex [13:7519626882256390337:2331] TxId# 281474976710663] Access check failed 2025-06-24T21:12:47.485531Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.612398Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:12:47.905104Z node 13 :TX_PROXY ERROR: rpc_alter_table.cpp:274: [AlterTableAddIndex [13:7519626882256390736:2347] TxId# 281474976710665] Unable to navigate: Root/WrongPath status: PathErrorUnknown 2025-06-24T21:12:48.105036Z node 13 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 13, TabletId: 72075186224037889 not found ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::RetryOperationLimitedDuration [GOOD] Test command err: 2025-06-24T21:11:59.147391Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626679048938464:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:59.147443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004452/r3tmp/tmp5IqdZv/pdisk_1.dat 2025-06-24T21:11:59.665286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:59.665398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:59.674545Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:59.692443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6128, node 1 2025-06-24T21:11:59.756066Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:11:59.802458Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:59.802481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:59.802489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:59.802617Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:00.161905Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64951 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:00.398756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:03.116931Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626696228808652:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.117034Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.393157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:03.722434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:12:03.898604Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626696228808847:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.898698Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.898935Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626696228808852:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:03.903094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:03.954348Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626696228808854:2321], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T21:12:04.072366Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626700523776235:2815] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:04.138120Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7519626700523776259:2825], for# test_user@builtin, access# DescribeSchema 2025-06-24T21:12:04.138155Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7519626700523776259:2825], for# test_user@builtin, access# DescribeSchema 2025-06-24T21:12:04.148031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626679048938464:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:04.148094Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:04.153481Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626700523776250:2326], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:21: Error: At function: KiReadTable!
:2:21: Error: Cannot find table 'db.[Root/Test]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:12:04.155557Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWJhZTQ5ZTAtMWQwNGQ2NGMtOTE2MTQyNjgtMjYyNDEzZWM=, ActorId: [1:7519626696228808843:2315], ActorState: ExecuteState, TraceId: 01jyhwh81s20fwazbm40cynad4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:12:05.947299Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626701731905464:2196];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004452/r3tmp/tmp5D5UI6/pdisk_1.dat 2025-06-24T21:12:06.004279Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:06.143482Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25446, node 4 2025-06-24T21:12:06.165322Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-24T21:12:06.219173Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:06.219195Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:06.219202Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:06.219328Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:06.226283Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:06.226379Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:06.241626Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28884 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:06.482398Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner ... HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:13.166300Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:13.170603Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:13.274655Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:13.274683Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:13.274692Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:13.274830Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26962 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:13.699509Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:13.811566Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 2025-06-24T21:12:17.751708Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519626733962297341:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:17.751770Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 2025-06-24T21:12:18.755528Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626759732102196:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:18.755601Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626759732102209:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:18.755660Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:18.759843Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:18.815345Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626759732102212:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:12:18.897141Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626759732102299:2696] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 2025-06-24T21:12:24.796224Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626784759328319:2146];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004452/r3tmp/tmpjNTFCA/pdisk_1.dat 2025-06-24T21:12:24.936158Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:25.135562Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:25.176050Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:25.176146Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:25.182758Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13690, node 10 2025-06-24T21:12:25.432047Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:25.432085Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:25.432095Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:25.432254Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13741 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:12:25.798740Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:25.832859Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:25.848725Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 2025-06-24T21:12:29.787066Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626784759328319:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:29.787158Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 2 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 3 of 3 2025-06-24T21:12:40.065521Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:12:40.065558Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 2 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 3 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-42 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-61 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-36 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-37 >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithoutLoginPlaceholders [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnames [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromIpV4List [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromIpV6List [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesLdapsScheme [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesUnknownScheme [GOOD] >> LdapAuthProviderTest::LdapServerIsUnavailable >> AsyncIndexChangeCollector::UpsertToSameKey [GOOD] >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue >> AsyncIndexChangeCollector::UpsertSingleRow [GOOD] >> AsyncIndexChangeCollector::UpsertManyRows >> AsyncIndexChangeCollector::DeleteNothing [GOOD] >> AsyncIndexChangeCollector::DeleteSingleRow >> AsyncIndexChangeCollector::InsertSingleRow [GOOD] >> AsyncIndexChangeCollector::InsertManyRows >> YdbYqlClient::TestExplicitPartitioning [GOOD] >> CdcStreamChangeCollector::InsertSingleRow [GOOD] >> CdcStreamChangeCollector::InsertSingleUuidRow >> CdcStreamChangeCollector::UpsertManyRows [GOOD] >> CdcStreamChangeCollector::UpsertToSameKey |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesUnknownScheme [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-48 [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant >> CdcStreamChangeCollector::UpsertIntoTwoStreams [GOOD] >> CdcStreamChangeCollector::PageFaults >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn [GOOD] >> AsyncIndexChangeCollector::CoveredIndexUpsert >> TTableProfileTests::OverwriteCachingPolicy [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsDisableRequestToAD [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithRemovedUserCredentialsBad >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserLoginBad >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGood >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGood >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserLoginBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestExplicitPartitioning [GOOD] Test command err: 2025-06-24T21:12:02.287868Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626692566047165:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:02.287927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443e/r3tmp/tmpWm06vw/pdisk_1.dat 2025-06-24T21:12:02.971504Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:02.998342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:02.998454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:03.014009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21597, node 1 2025-06-24T21:12:03.295947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:03.295973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:03.295980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:03.296105Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:03.315392Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30386 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:03.766688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:06.545131Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626709745917345:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:06.545266Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:06.864348Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626709745917366:2619] txid# 281474976710658, issues: { message: "Column Key has wrong key type Double" severity: 1 } 2025-06-24T21:12:08.663270Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626718133307657:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:08.786768Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443e/r3tmp/tmpzjVl2Y/pdisk_1.dat 2025-06-24T21:12:09.088946Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:09.104193Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:09.104268Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:09.122702Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64895, node 4 2025-06-24T21:12:09.405735Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:09.405758Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:09.405765Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:09.405882Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10714 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:09.696542Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:09.719839Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:12.438183Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626735313177882:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.438268Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.503307Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626735313177906:2321], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.504876Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.531887Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:12.564569Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626735313177930:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.566084Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:12.590851Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626735313178006:2670] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:12:12.598582Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626735313178007:2671] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:12:12.598758Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626735313178042:2694] txid# 281474976710666, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:12:12.598811Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626735313178010:2674] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:12:12.598885Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626735313178008:2672] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:12:12.598911Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626735313178009:2673] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: ... aitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:15.880751Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:16.083275Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:18.721539Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:18.831846Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626757644421383:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:18.831938Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:18.832177Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519626757644421395:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:18.839790Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:18.875903Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519626757644421397:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:12:18.963272Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519626757644421470:2844] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:19.020153Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwhpme9fdhkm5xwzzbkqjy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZWY2OGE4YmItZjQxOWMwODQtODhjMTQ5NzMtYTkzNDU1ZWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443e/r3tmp/tmpjz3DaV/pdisk_1.dat 2025-06-24T21:12:21.290747Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519626772040395459:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:21.392445Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:21.519071Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:21.571915Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:21.572004Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:21.576728Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:21.595787Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 5711, node 10 2025-06-24T21:12:21.817228Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:21.817257Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:21.817265Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:21.817404Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4642 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:22.249650Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:22.339370Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:24.758521Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:26.291446Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519626772040395459:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:26.291520Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:36.507743Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:12:36.507784Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.504592Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626900889416511:2500], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:51.504708Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:51.504736Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519626900889416523:2503], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:51.510283Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:51.540714Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519626900889416525:2504], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:12:51.613041Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519626900889416601:3186] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:51.731316Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhwjphe0ebh6hgvq34sjev3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YTAxYmEwNjgtN2JjZWE4ZDYtZjMzMzBhMjYtNjI2NjE1MTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:12:52.278646Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhwjpsg8k2mhg8ex900546n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YTAxYmEwNjgtN2JjZWE4ZDYtZjMzMzBhMjYtNjI2NjE1MTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesExpiredCert [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TTableProfileTests::OverwriteCachingPolicy [GOOD] Test command err: 2025-06-24T21:12:05.520123Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626703891911208:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:05.520175Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004429/r3tmp/tmpIMS7bY/pdisk_1.dat 2025-06-24T21:12:06.228775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:06.228887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:06.270474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:06.271249Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11217, node 1 2025-06-24T21:12:06.599282Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:06.639181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:06.639201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:06.639208Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:06.639347Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:07.086640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:4072 2025-06-24T21:12:07.477901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:12:07.531504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:08.176952Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:08.177034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:08.192669Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:12:08.200250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4072 2025-06-24T21:12:09.052815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:09.101772Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:10.524916Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626703891911208:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:10.525007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:4072 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750799530010 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-24T21:12:10.663844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TClient is connected to server localhost:4072 TClient::Ls request: /Root/ydb_ut_tenant/table-2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-2" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710661 CreateStep: 1750799531030 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-24T21:12:11.683331Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-24T21:12:11.685969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:14.808066Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626740662628656:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:14.808135Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004429/r3tmp/tmpqPTeEC/pdisk_1.dat 2025-06-24T21:12:15.206102Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:15.226400Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:15.226481Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:15.234649Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18232, node 4 2025-06-24T21:12:15.519724Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:15.519748Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:15.519756Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:15.519893Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:15.876325Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8481 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. ... 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-24T21:12:40.313848Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TClient is connected to server localhost:9439 TClient::Ls request: /Root/ydb_ut_tenant/table-5 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-5" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715664 CreateStep: 1750799560790 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-5" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-24T21:12:41.369183Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-24T21:12:41.377324Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:43.994202Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519626865802612876:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:43.994299Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004429/r3tmp/tmpDMpLHA/pdisk_1.dat 2025-06-24T21:12:44.363179Z node 13 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:44.389906Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:44.390044Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:44.406283Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19726, node 13 2025-06-24T21:12:44.430218Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 2025-06-24T21:12:44.508441Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:44.508479Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:44.508496Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:44.508754Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23160 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:45.075025Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:45.098861Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23160 2025-06-24T21:12:45.633043Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:12:45.668466Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:46.177475Z node 15 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[15:7519626881450158230:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:46.177552Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:46.243325Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:46.243485Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:46.249245Z node 13 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 15 Cookie 15 2025-06-24T21:12:46.252426Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23160 2025-06-24T21:12:46.753806Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.278114Z node 15 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23160 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750799567480 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-24T21:12:48.232555Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:48.999181Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519626865802612876:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:48.999298Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:23160 TClient::Ls request: /Root/ydb_ut_tenant/table-2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-2" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710661 CreateStep: 1750799568780 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-24T21:12:49.505572Z node 13 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-06-24T21:12:49.509900Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:12:51.183318Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[15:7519626881450158230:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:51.183443Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=timeout; >> TopicAutoscaling::ControlPlane_BackCompatibility [GOOD] >> TopicAutoscaling::ControlPlane_AutoscalingWithStorageSizeRetention >> LdapAuthProviderTest_LdapsScheme::LdapRefreshRemoveUserBad >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-19 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-49 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-50 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-20 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesExpiredCert [GOOD] Test command err: 2025-06-24T21:11:39.982951Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626592513952319:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:39.987264Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446d/r3tmp/tmpVR3OBM/pdisk_1.dat 2025-06-24T21:11:40.809629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:40.809701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:40.817442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:40.856298Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:40.881134Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 22432, node 1 2025-06-24T21:11:41.024976Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:41.040306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:41.040329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:41.040336Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:41.040491Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24565 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:41.619068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:41.805238Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket BFCAA002776F3BAB0C6287CBEE697D8416A13BD0F80E8D718C15AE28D11DFFD4 (ipv6:[::1]:49192) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-24T21:11:42.035621Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:49204) has now valid token of root@builtin 2025-06-24T21:11:42.179725Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:11:42.179761Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:11:42.179773Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:11:42.179815Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:11:45.958002Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626617025975900:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:45.958050Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446d/r3tmp/tmpQGUPTN/pdisk_1.dat 2025-06-24T21:11:46.350597Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:46.393508Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:46.393959Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:46.399996Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7288, node 4 2025-06-24T21:11:46.413960Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-24T21:11:46.523864Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:46.523884Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:46.523889Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:46.523983Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17853 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:46.860698Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:46.951548Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket BFCAA002776F3BAB0C6287CBEE697D8416A13BD0F80E8D718C15AE28D11DFFD4 (ipv6:[::1]:56694) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-24T21:11:47.100907Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:56718) has now valid token of root@builtin 2025-06-24T21:11:47.106078Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:11:47.187578Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:11:47.187607Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:11:47.187615Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:11:47.187650Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:11:50.960233Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626641125514581:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:50.960297Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446d/r3tmp/tmpW1U2jf/pdisk_1.dat 2025-06-24T21:11:51.197398Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:51.231907Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 13268, node 7 2025-06-24T21:11:51.281948Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:51.281970Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:51.281979Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:51.282111Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:51.295708Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:51.295796Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:51.300550Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30617 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: ... 2:29.688617Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:29.688777Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:29.744441Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:29.846574Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:29.846603Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:29.846613Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:29.846792Z node 25 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4339 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:30.306656Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:30.386970Z node 25 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:34.370707Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[25:7519626805351701113:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:34.370800Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0624 21:12:40.503725377 819806 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.528771142 819806 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.567562318 815829 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.591351707 815829 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.631536180 819829 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.652191789 815829 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.683551758 815828 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.703828629 815829 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.746568706 815828 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.766750472 819904 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.799585936 815513 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:40.829203930 815512 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. 2025-06-24T21:12:42.312803Z node 28 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[28:7519626861341009688:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:42.312873Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00446d/r3tmp/tmpHR52Iu/pdisk_1.dat 2025-06-24T21:12:42.613646Z node 28 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:42.642692Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:42.642817Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:42.650217Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22124, node 28 2025-06-24T21:12:42.811009Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:42.811035Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:42.811043Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:42.811227Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7045 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:43.326610Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:43.354381Z node 28 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:47.313919Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[28:7519626861341009688:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:47.314037Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0624 21:12:53.448972740 820331 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.473579330 820503 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.516875787 824157 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.543590470 820503 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.595236389 824186 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.616903976 820502 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.653040738 824187 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.676406662 820502 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.732025324 820503 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.761063132 824260 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.801615927 820331 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0624 21:12:53.822352725 824260 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. >> LdapAuthProviderTest::LdapServerIsUnavailable [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyHost >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoGood >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserPasswordBad >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-13 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-14 >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserPasswordBad >> LdapAuthProviderTest_nonSecure::LdapRefreshRemoveUserBad >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-61 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-62 >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue [GOOD] >> CdcStreamChangeCollector::DeleteNothing >> AsyncIndexChangeCollector::InsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow >> AsyncIndexChangeCollector::DeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow >> AsyncIndexChangeCollector::UpsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn >> DataShardTxOrder::RandomPoints_DelayData [GOOD] >> AsyncIndexChangeCollector::CoveredIndexUpsert [GOOD] >> AsyncIndexChangeCollector::AllColumnsInPk >> KqpExtractPredicateLookup::SqlInJoin [GOOD] >> KqpKv::BulkUpsert >> CdcStreamChangeCollector::InsertSingleUuidRow [GOOD] >> CdcStreamChangeCollector::IndexAndStreamUpsert >> CdcStreamChangeCollector::UpsertToSameKey [GOOD] >> CdcStreamChangeCollector::UpsertToSameKeyWithImages >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-37 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-38 >> LdapAuthProviderTest::LdapRequestWithEmptyHost [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBaseDn >> TTopicReaderTests::TestRun_ReadTwoMessages_With_Limit_1 [GOOD] >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent >> TRegisterNodeOverDiscoveryService::ServerWithOutCertVerification_ClientProvidesExpiredCert [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientDoesNotProvideClientCerts >> TopicAutoscaling::Simple_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::Simple_AutoscaleAwareSDK ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_DelayData [GOOD] Test command err: 2025-06-24T21:09:13.314654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:09:13.314715Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:09:13.316549Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:09:13.329628Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:09:13.330271Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:09:13.330586Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:09:13.378145Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:09:13.387123Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:09:13.387390Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:09:13.389338Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:09:13.389424Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:09:13.389494Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:09:13.389874Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:09:13.390007Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:09:13.390084Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:09:13.459846Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:09:13.503650Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:09:13.503874Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:09:13.503995Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:09:13.504058Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:09:13.504123Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:09:13.504178Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.504365Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:13.504428Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:13.504749Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:09:13.504882Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:09:13.505091Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:13.505137Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:09:13.505190Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:09:13.505235Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:09:13.505284Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:09:13.505332Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:09:13.505382Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:09:13.505494Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:13.505535Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:13.505599Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:09:13.514073Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:09:13.514204Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:09:13.514305Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:09:13.514507Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:09:13.514576Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:09:13.514635Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:09:13.514686Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:09:13.514730Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:09:13.514781Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:09:13.514825Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:13.515247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:09:13.515293Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:09:13.515337Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:09:13.515396Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:13.515471Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:09:13.515510Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:09:13.515556Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:09:13.515591Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:13.515624Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:09:13.528017Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:09:13.528090Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:09:13.528144Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:09:13.528192Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:09:13.528297Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:09:13.528853Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:222:2218], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:13.528913Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:09:13.528977Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2217], serverId# [1:222:2218], sessionId# [0:0:0] 2025-06-24T21:09:13.529104Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:09:13.529134Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:09:13.529241Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:09:13.529278Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:09:13.529315Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:09:13.529366Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:09:13.532054Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:09:13.532112Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:09:13.532327Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:13.532372Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:09:13.532419Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:09:13.532452Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:09:13.532487Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:09:13.532532Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:09:13.532562Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... st 9437184 consumer 9437184 txId 523 2025-06-24T21:12:58.766500Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 524 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 18} 2025-06-24T21:12:58.766527Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.766548Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 524 2025-06-24T21:12:58.766704Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 525 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 19} 2025-06-24T21:12:58.766727Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.766749Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 525 2025-06-24T21:12:58.766894Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 526 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 20} 2025-06-24T21:12:58.766928Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.766964Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 526 2025-06-24T21:12:58.767066Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 527 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 21} 2025-06-24T21:12:58.767088Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.767116Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 527 2025-06-24T21:12:58.767392Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 528 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 22} 2025-06-24T21:12:58.767436Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.767468Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 528 2025-06-24T21:12:58.767698Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 529 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 23} 2025-06-24T21:12:58.767736Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.767768Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 529 2025-06-24T21:12:58.767923Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 512 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-24T21:12:58.767958Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.767989Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 512 2025-06-24T21:12:58.768133Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 530 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 24} 2025-06-24T21:12:58.768162Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.768184Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 530 2025-06-24T21:12:58.768317Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 531 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 25} 2025-06-24T21:12:58.768349Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.768395Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 531 2025-06-24T21:12:58.768541Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 532 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 26} 2025-06-24T21:12:58.768580Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.768609Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 532 2025-06-24T21:12:58.768816Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 533 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 27} 2025-06-24T21:12:58.768861Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.768891Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 533 2025-06-24T21:12:58.769035Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 534 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 28} 2025-06-24T21:12:58.769067Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.769095Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 534 2025-06-24T21:12:58.769273Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 535 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 29} 2025-06-24T21:12:58.769311Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.769341Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 535 2025-06-24T21:12:58.769570Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 536 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 30} 2025-06-24T21:12:58.769600Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.769620Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 536 2025-06-24T21:12:58.769714Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 514 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-24T21:12:58.769735Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.769751Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 514 2025-06-24T21:12:58.769895Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 537 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 31} 2025-06-24T21:12:58.769925Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.769954Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 537 2025-06-24T21:12:58.804342Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:12:58.804402Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:538] at 9437184 on unit CompleteOperation 2025-06-24T21:12:58.804470Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 538] from 9437184 at tablet 9437184 send result to client [16:102:2135], exec latency: 1 ms, propose latency: 2 ms 2025-06-24T21:12:58.804542Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 538 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 32} 2025-06-24T21:12:58.804579Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:12:58.805553Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:12:58.805608Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:539] at 9437184 on unit CompleteOperation 2025-06-24T21:12:58.805654Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 539] from 9437184 at tablet 9437184 send result to client [16:102:2135], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:12:58.805692Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:12:58.806082Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2312]: {TEvReadSet step# 1000005 txid# 538 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 32} 2025-06-24T21:12:58.806115Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-24T21:12:58.806145Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 538 expect 20 30 23 28 29 27 22 24 25 25 31 31 29 31 29 20 31 23 30 31 31 23 29 26 26 22 26 16 10 - - - actual 20 30 23 28 29 27 22 24 25 25 31 31 29 31 29 20 31 23 30 31 31 23 29 26 26 22 26 16 10 - - - interm 20 30 23 28 29 27 22 24 25 25 15 29 29 18 29 20 27 23 30 29 29 23 29 26 26 22 26 16 10 - - - >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_PQv1 >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDontExistGroupAttribute >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithRemovedUserCredentialsBad >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithRemovedUserCredentialsBad >> Balancing::Balancing_OneTopic_TopicApi [GOOD] >> Balancing::Balancing_OneTopic_PQv1 >> TopicAutoscaling::PartitionSplit_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_AutoscaleAwareSDK >> TopicAutoscaling::PartitionSplit_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_BeforeAutoscaleAwareSDK >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-20 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-21 >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NoDelimiter [GOOD] >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-StrictAclCheck >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow >> CdcStreamChangeCollector::DeleteNothing [GOOD] >> CdcStreamChangeCollector::DeleteSingleRow >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-51 >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn >> LdapAuthProviderTest::LdapRequestWithEmptyBaseDn [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBindDn >> AsyncIndexChangeCollector::AllColumnsInPk [GOOD] >> AsyncIndexChangeCollector::CoverIndexedColumn >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation >> TSchemeShardServerLess::StorageBillingLabels >> TSchemeShardServerLess::StorageBilling >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-14 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-15 >> CdcStreamChangeCollector::UpsertToSameKeyWithImages [GOOD] >> CdcStreamChangeCollector::UpsertModifyDelete >> CdcStreamChangeCollector::IndexAndStreamUpsert [GOOD] >> CdcStreamChangeCollector::NewImage >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithCustomGroupAttributeGood >> TopicAutoscaling::ControlPlane_AutoscalingWithStorageSizeRetention [GOOD] >> TopicAutoscaling::CDC_PartitionSplit_AutosplitByLoad >> YdbOlapStore::LogNonExistingRequest [GOOD] >> YdbOlapStore::LogNonExistingUserId >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithCustomGroupAttributeGood >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-62 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-63 >> TopicAutoscaling::PartitionMerge_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_AutoscaleAwareSDK >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoGood >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoGood >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserLoginBad >> CommitOffset::PartitionSplit_OffsetCommit [GOOD] >> CommitOffset::DistributedTxCommit >> CommitOffset::Commit_WithoutSession_TopPast [GOOD] >> CommitOffset::Commit_WithWrongSession_ToParent >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation [GOOD] >> CdcStreamChangeCollector::PageFaults [GOOD] >> CdcStreamChangeCollector::OldImage >> LdapAuthProviderTest_LdapsScheme::LdapRefreshRemoveUserBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoWithError >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-38 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-39 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:13:06.495873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:13:06.495994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:06.496036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:13:06.496071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:13:06.502455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:13:06.502534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:13:06.502635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:06.502730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:13:06.503595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:13:06.507258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:13:06.685896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:13:06.685956Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:06.707633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:13:06.708161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:13:06.708355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:13:06.720522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:13:06.720743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:13:06.721374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:06.721664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:13:06.737062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:06.737576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:13:06.754868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:06.754952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:06.755234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:13:06.755304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:13:06.755411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:13:06.755512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.762288Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:13:06.895166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:13:06.896830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.898008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:13:06.898071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:13:06.899280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:13:06.899476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:06.911173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:06.912352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:13:06.912609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.912749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:13:06.912802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:13:06.912835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:13:06.915876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.915950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:13:06.916007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:13:06.918343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.918396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.918442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:06.918489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:13:06.923817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:13:06.926149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:13:06.927227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:13:06.928347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:06.928526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:06.928580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:06.929798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:13:06.929867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:06.930035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:13:06.930106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:13:06.932241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:06.932293Z node 1 :FLAT_TX_SCHEMESHARD ... 4 2025-06-24T21:13:07.408721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 128 -> 240 2025-06-24T21:13:07.408778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-24T21:13:07.408921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:13:07.409030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:568: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:618:2545], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 104 2025-06-24T21:13:07.412669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:07.412730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:13:07.412929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:07.412969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-24T21:13:07.413289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:13:07.413375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 104:0, ProgressState, NeedSyncHive: 0 2025-06-24T21:13:07.413425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 240 -> 240 2025-06-24T21:13:07.414127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:13:07.414240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:13:07.414300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:13:07.414339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-24T21:13:07.414380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-24T21:13:07.414456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-24T21:13:07.417408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:13:07.417477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T21:13:07.417584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:13:07.417619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:13:07.417657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:13:07.417690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:13:07.417723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-24T21:13:07.417790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:13:07.417829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:13:07.417861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:13:07.418049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:13:07.418736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T21:13:07.420616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T21:13:07.420682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T21:13:07.421163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T21:13:07.421288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:13:07.421328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:773:2653] TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-24T21:13:07.424364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "SharedDB" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:13:07.424524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "SharedDB" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } 2025-06-24T21:13:07.424561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, path /MyRoot/SharedDB 2025-06-24T21:13:07.424713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, at schemeshard: 72057594046678944 2025-06-24T21:13:07.424776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, at schemeshard: 72057594046678944 2025-06-24T21:13:07.429131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:07.429350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, operation: ALTER DATABASE, path: /MyRoot/SharedDB TestModificationResult got TxId: 105, wait until txId: 105 TestModificationResults wait txId: 106 2025-06-24T21:13:07.432565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeUnspecified } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:13:07.432733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 106:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeUnspecified } 2025-06-24T21:13:07.432777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 106:0, path /MyRoot/ServerLess0 2025-06-24T21:13:07.432913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 106:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, at schemeshard: 72057594046678944 2025-06-24T21:13:07.432970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, at schemeshard: 72057594046678944 2025-06-24T21:13:07.435402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:07.435623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, operation: ALTER DATABASE, path: /MyRoot/ServerLess0 TestModificationResult got TxId: 106, wait until txId: 106 >> LdapAuthProviderTest::LdapRequestWithEmptyBindDn [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBindPassword >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoDisableNestedGroupsGood >> TSchemeShardServerLess::TestServerlessComputeResourcesMode |95.2%| [TA] $(B)/ydb/core/tx/datashard/ut_order/test-results/unittest/{meta.json ... results_accumulator.log} >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad >> LdapAuthProviderTest_nonSecure::LdapRefreshRemoveUserBad [GOOD] >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-21 [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-22 >> KqpKv::BulkUpsert [GOOD] >> KqpKv::ReadRows_ExternalBlobs+UseExtBlobsPrecharge >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoWithError >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] >> CdcStreamChangeCollector::NewImage [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-52 >> TSchemeShardServerLess::TestServerlessComputeResourcesMode [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBindPassword [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] Test command err: 2025-06-24T21:12:50.873848Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:50.874305Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:50.874500Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004747/r3tmp/tmpzxFNNn/pdisk_1.dat 2025-06-24T21:12:51.369757Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:12:51.373453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:51.444703Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.450050Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799567837931 != 1750799567837935 2025-06-24T21:12:51.496488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.496669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.512468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.603752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:51.678917Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:636:2537] 2025-06-24T21:12:51.679193Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.738093Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.738240Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.739962Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:12:51.740062Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:12:51.740115Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:12:51.740442Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.741537Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.741616Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:663:2537] in generation 1 2025-06-24T21:12:51.742285Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:639:2539] 2025-06-24T21:12:51.742518Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.751340Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.751458Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.752861Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:12:51.752934Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:12:51.752979Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:12:51.753252Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.753396Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.753449Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:670:2539] in generation 1 2025-06-24T21:12:51.764309Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.798062Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:12:51.798270Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.798392Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:673:2558] 2025-06-24T21:12:51.798433Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:12:51.798467Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:12:51.798509Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:12:51.798781Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.798819Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:12:51.798872Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.798919Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:674:2559] 2025-06-24T21:12:51.798965Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:51.798994Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:12:51.799018Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.799558Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:12:51.799696Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:12:51.799776Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:12:51.799825Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:12:51.799866Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:12:51.799910Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.799968Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:12:51.800005Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:12:51.800125Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.800149Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.800185Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:12:51.800216Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.800289Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:630:2534], serverId# [1:655:2549], sessionId# [0:0:0] 2025-06-24T21:12:51.800663Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:629:2533], serverId# [1:647:2543], sessionId# [0:0:0] 2025-06-24T21:12:51.800762Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:12:51.801035Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:12:51.801128Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-24T21:12:51.801657Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:51.801792Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:12:51.801855Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:12:51.803674Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:51.803754Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:12:51.814622Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:12:51.814754Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.815402Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T21:12:51.815483Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.972834Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:692:2571], serverId# [1:695:2574], sessionId# [0:0:0] 2025-06-24T21:12:51.973060Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:693:2572], serverId# [1:696:2575], sessionId# [0:0:0] 2025-06-24T21:12:51.993305Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... uckets per mediator 2 2025-06-24T21:13:09.577617Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:13:09.580873Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:13:09.580954Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:09.582487Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:13:09.582597Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:09.582682Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2025-06-24T21:13:09.582718Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:09.583882Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1000} 2025-06-24T21:13:09.583958Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:13:09.585280Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:09.585338Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:13:09.585410Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:13:09.585505Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:09.585577Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:13:09.585695Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:09.588179Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:13:09.588255Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:13:09.588296Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-24T21:13:09.588366Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:09.588417Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:13:09.588499Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:09.589175Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.589273Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:09.597005Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:09.597264Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:13:09.597354Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:09.598111Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:09.598347Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-24T21:13:09.598403Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-24T21:13:09.634724Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:740:2611], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.634848Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:750:2616], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.634943Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.641639Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:09.650311Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.650454Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:09.700661Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:09.829762Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.829896Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:09.834149Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:754:2619], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:13:09.870826Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:825:2659] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:09.959905Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwk8803pm892gf0fdd2yhy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZTEzNTNjOS0yNGE4ZmVkNC0xZWQ5YmQyZS1lN2FhNmY2Nw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.962760Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:894:2690], serverId# [4:895:2691], sessionId# [0:0:0] 2025-06-24T21:13:09.964108Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037889 2025-06-24T21:13:09.964468Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750799589964356 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:09.964673Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-24T21:13:09.978848Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-24T21:13:09.978947Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:10.105522Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwk8mndt4xp7nkx03gzhxh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZmI5YWU5OGEtMzdkYjU2NTAtODY0YWRhOTUtZDgxYjE2YzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:10.108021Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037889 2025-06-24T21:13:10.108387Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750799590108270 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:10.108608Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1750799590108270 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:10.108775Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037889, row count=1 2025-06-24T21:13:10.119980Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-24T21:13:10.120067Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:10.161907Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:935:2722], serverId# [4:936:2723], sessionId# [0:0:0] 2025-06-24T21:13:10.169032Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:937:2724], serverId# [4:938:2725], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] Test command err: 2025-06-24T21:12:50.889898Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:50.890362Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:50.890543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00476c/r3tmp/tmpyl6tN6/pdisk_1.dat 2025-06-24T21:12:51.356836Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:12:51.373345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:51.425926Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.434132Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799567837817 != 1750799567837821 2025-06-24T21:12:51.482272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.482447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.497866Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.608391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:51.669740Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:636:2537] 2025-06-24T21:12:51.670008Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.721185Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.721351Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.732856Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:12:51.732982Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:12:51.733061Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:12:51.735230Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.736508Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.736600Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:663:2537] in generation 1 2025-06-24T21:12:51.737302Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:639:2539] 2025-06-24T21:12:51.737536Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.747446Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.747578Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.749106Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:12:51.749178Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:12:51.749222Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:12:51.749505Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.749659Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.749719Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:670:2539] in generation 1 2025-06-24T21:12:51.760703Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.799323Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:12:51.799547Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.799673Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:673:2558] 2025-06-24T21:12:51.799713Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:12:51.799757Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:12:51.799801Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:12:51.800099Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.800142Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:12:51.800203Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.800259Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:674:2559] 2025-06-24T21:12:51.800311Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:51.800345Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:12:51.800369Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.800858Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:12:51.800982Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:12:51.801065Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:12:51.801115Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:12:51.801156Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:12:51.801211Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.801281Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:12:51.801331Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:12:51.801490Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.801524Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.801561Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:12:51.801594Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.801657Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:630:2534], serverId# [1:655:2549], sessionId# [0:0:0] 2025-06-24T21:12:51.802076Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:629:2533], serverId# [1:647:2543], sessionId# [0:0:0] 2025-06-24T21:12:51.802185Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:12:51.802442Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:12:51.802538Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-24T21:12:51.803179Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:51.803338Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:12:51.803405Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:12:51.805220Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:51.805304Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:12:51.816187Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:12:51.816312Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.816855Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T21:12:51.816916Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.971964Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:692:2571], serverId# [1:695:2574], sessionId# [0:0:0] 2025-06-24T21:12:51.972153Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:693:2572], serverId# [1:696:2575], sessionId# [0:0:0] 2025-06-24T21:12:51.975226Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... pp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:09.039330Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:13:09.039404Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:13:09.039483Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:09.039546Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:13:09.039648Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:09.040993Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.042980Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:13:09.043079Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:09.052905Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:09.109208Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:13:09.109398Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:13:09.109452Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-24T21:13:09.109487Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-24T21:13:09.110013Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.135638Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:13:09.229784Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:09.352796Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:13:09.352872Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:09.353213Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:13:09.353269Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:13:09.353322Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-24T21:13:09.353528Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-24T21:13:09.353661Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:13:09.353879Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:13:09.354671Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:13:09.408533Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-24T21:13:09.408627Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:09.408666Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:09.408711Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:09.408792Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:09.408853Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-24T21:13:09.408960Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:09.410984Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-24T21:13:09.411060Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:09.444206Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:837:2675], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.444315Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:847:2680], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.444393Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.449583Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:09.454519Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.633494Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.636559Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:851:2683], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:13:09.667885Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:907:2720] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:09.786210Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwk8226grkkmwaq8bd4pyv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NWE0OGU3NzMtYjVlNDNjOWItMTg5YTZjYzgtOTM4MjgyMTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.788828Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:938:2737], serverId# [4:939:2738], sessionId# [0:0:0] 2025-06-24T21:13:09.789372Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037888 2025-06-24T21:13:09.789686Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750799589789577 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-24T21:13:09.789913Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-24T21:13:09.801756Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-24T21:13:09.801839Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:09.891130Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwk8ddf2xp4bmh5z4dffa6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2QzYjUxZDAtZjFiMTljY2QtYWFmZjdhMGUtMTZmMTQzZjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.893332Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:4] at 72075186224037888 2025-06-24T21:13:09.893651Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750799589893533 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-24T21:13:09.893818Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-24T21:13:09.904695Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-24T21:13:09.904768Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:09.906717Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:966:2756], serverId# [4:967:2757], sessionId# [0:0:0] 2025-06-24T21:13:09.913309Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:968:2758], serverId# [4:969:2759], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] Test command err: 2025-06-24T21:12:50.999524Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:51.000032Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:51.000231Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00475f/r3tmp/tmpOCWfpO/pdisk_1.dat 2025-06-24T21:12:51.388461Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:12:51.392675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:51.433452Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.438983Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799567837852 != 1750799567837856 2025-06-24T21:12:51.489723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.489849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.501250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.603746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:51.676380Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:636:2537] 2025-06-24T21:12:51.676597Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.735056Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.735377Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.737142Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:12:51.737243Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:12:51.737305Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:12:51.737669Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.738722Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.738808Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:663:2537] in generation 1 2025-06-24T21:12:51.739614Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:639:2539] 2025-06-24T21:12:51.739847Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.749838Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.749976Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.751482Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:12:51.751567Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:12:51.751615Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:12:51.751886Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.752037Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.752096Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:670:2539] in generation 1 2025-06-24T21:12:51.763035Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.794903Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:12:51.795132Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.795306Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:673:2558] 2025-06-24T21:12:51.795352Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:12:51.795389Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:12:51.795441Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:12:51.795703Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.795750Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:12:51.795809Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.795904Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:674:2559] 2025-06-24T21:12:51.795931Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:51.795954Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:12:51.795989Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.796560Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:12:51.796698Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:12:51.796795Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:12:51.796852Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:12:51.796893Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:12:51.796932Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.796991Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:12:51.797041Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:12:51.797197Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.797226Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.797278Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:12:51.797316Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.797382Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:630:2534], serverId# [1:655:2549], sessionId# [0:0:0] 2025-06-24T21:12:51.797792Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:629:2533], serverId# [1:647:2543], sessionId# [0:0:0] 2025-06-24T21:12:51.797917Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:12:51.798211Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:12:51.798370Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-24T21:12:51.799052Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:51.799199Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:12:51.799261Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:12:51.801264Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:51.801379Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:12:51.812362Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:12:51.812509Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.813033Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T21:12:51.813099Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.973106Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:692:2571], serverId# [1:695:2574], sessionId# [0:0:0] 2025-06-24T21:12:51.973305Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:693:2572], serverId# [1:696:2575], sessionId# [0:0:0] 2025-06-24T21:12:51.976689Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... BUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:09.435476Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:13:09.435578Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:09.437044Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:13:09.437108Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:09.438372Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:13:09.438416Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:09.439136Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:09.439224Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:13:09.439277Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:13:09.439338Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:09.439402Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:13:09.439469Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:09.439561Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037890 time 0 2025-06-24T21:13:09.439590Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:13:09.439818Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1000} 2025-06-24T21:13:09.439867Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:13:09.441978Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:13:09.442021Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-24T21:13:09.442059Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037890 2025-06-24T21:13:09.442115Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:09.442160Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:13:09.442225Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:13:09.443362Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.443515Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:09.443581Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-24T21:13:09.448476Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-24T21:13:09.448576Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-24T21:13:09.448895Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:09.449997Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:09.450361Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:13:09.450412Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:09.450523Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:09.452029Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2025-06-24T21:13:09.452116Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-24T21:13:09.484737Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:789:2648], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.484855Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:799:2653], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.484944Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.490621Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:09.496985Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.497103Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:09.497184Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-24T21:13:09.551826Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:09.677922Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.678055Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:09.678124Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-24T21:13:09.681616Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:803:2656], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:13:09.717723Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:875:2697] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:09.862605Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwk83b7kpn48ddydpj77z3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NTJiYzA0NjMtNWVmMGE4ZjAtYTczNmJkMTAtMWQ5ZjMxY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.865353Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:981:2741], serverId# [4:982:2742], sessionId# [0:0:0] 2025-06-24T21:13:09.865757Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037889 2025-06-24T21:13:09.866070Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750799589865962 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 38b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:09.866293Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750799589865962 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:09.866426Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-24T21:13:09.877681Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 38 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-24T21:13:09.877773Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:09.926252Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:988:2747], serverId# [4:989:2748], sessionId# [0:0:0] 2025-06-24T21:13:09.933918Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:990:2749], serverId# [4:991:2750], sessionId# [0:0:0] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-15 [GOOD] >> YdbIndexTable::MultiShardTableOneIndexDataColumn [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-16 Test command err: 2025-06-24T21:12:50.873847Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:50.874293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:50.874468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00473c/r3tmp/tmpWHctWr/pdisk_1.dat 2025-06-24T21:12:51.364170Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:12:51.369780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:51.445097Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.459765Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799567838002 != 1750799567838006 2025-06-24T21:12:51.509961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.510129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.522101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.604835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:51.664054Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:636:2537] 2025-06-24T21:12:51.664319Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.749121Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.749262Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.751039Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:12:51.751165Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:12:51.751225Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:12:51.751597Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.752727Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.752823Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:663:2537] in generation 1 2025-06-24T21:12:51.753424Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:639:2539] 2025-06-24T21:12:51.753648Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.763062Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.763209Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.764790Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:12:51.764865Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:12:51.764906Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:12:51.765225Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.765391Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.765456Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:670:2539] in generation 1 2025-06-24T21:12:51.779888Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.822398Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:12:51.822617Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.822755Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:673:2558] 2025-06-24T21:12:51.822809Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:12:51.822852Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:12:51.822902Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:12:51.823170Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.823215Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:12:51.823290Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.823346Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:674:2559] 2025-06-24T21:12:51.823397Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:51.823429Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:12:51.823453Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.824297Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:12:51.824449Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:12:51.824571Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:12:51.824656Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:12:51.824729Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:12:51.824799Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.824889Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:12:51.824955Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:12:51.825094Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.825119Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.825159Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:12:51.825191Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.825252Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:630:2534], serverId# [1:655:2549], sessionId# [0:0:0] 2025-06-24T21:12:51.825633Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:629:2533], serverId# [1:647:2543], sessionId# [0:0:0] 2025-06-24T21:12:51.825770Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:12:51.826066Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:12:51.826154Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-24T21:12:51.826776Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:51.826911Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:12:51.826993Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:12:51.828853Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:51.828945Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:12:51.839901Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:12:51.840027Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.840619Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T21:12:51.840697Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.993344Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:692:2571], serverId# [1:695:2574], sessionId# [0:0:0] 2025-06-24T21:12:51.993592Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:693:2572], serverId# [1:696:2575], sessionId# [0:0:0] 2025-06-24T21:12:51.998103Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... ATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:09.544370Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:13:09.544450Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:13:09.545631Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.545738Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:09.545796Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-24T21:13:09.551729Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-24T21:13:09.551863Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-24T21:13:09.552195Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:09.553315Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:09.554807Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:13:09.554878Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:09.555041Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:09.557338Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2025-06-24T21:13:09.557415Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-24T21:13:09.598103Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:789:2648], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.598212Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:799:2653], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.598294Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:09.604815Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:09.612578Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.612708Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:09.612764Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-24T21:13:09.710528Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:09.832599Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:09.832723Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:09.832788Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-24T21:13:09.836865Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:803:2656], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:13:09.872759Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:875:2697] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:09.975338Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwk86wfvfxr53kpzv8by5n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZGM4NzExNzgtM2ExZDg3ZWMtM2ZkOTk1MDktYWZlYTNlMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.977568Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:981:2741], serverId# [4:982:2742], sessionId# [0:0:0] 2025-06-24T21:13:09.977985Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037889 2025-06-24T21:13:09.978290Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750799589978186 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:09.978465Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750799589978186 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:09.978617Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-24T21:13:09.991949Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-24T21:13:09.992043Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:10.070793Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwk8kcafwveev2ch09axpb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NDA3MmVmY2ItYWNmNTQ0ZTAtOWU2YTBkOWYtZGYzODNjMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:10.073079Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037889 2025-06-24T21:13:10.073406Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1750799590073290 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:10.073647Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 4 Group: 1750799590073290 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:10.073846Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 5 Group: 1750799590073290 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:10.073939Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 6 Group: 1750799590073290 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 24b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:10.074082Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037889, row count=1 2025-06-24T21:13:10.087935Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 24 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-24T21:13:10.088037Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:10.128130Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1030:2781], serverId# [4:1031:2782], sessionId# [0:0:0] 2025-06-24T21:13:10.135393Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1032:2783], serverId# [4:1033:2784], sessionId# [0:0:0] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap >> CdcStreamChangeCollector::OldImage [GOOD] >> YdbIndexTable::MultiShardTableOneIndexPkOverlap [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-63 [GOOD] >> CdcStreamChangeCollector::SchemaChanges >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-64 >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-StrictAclCheck [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-DomainLoginOnly |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:13:10.837461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:13:10.837579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:10.837645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:13:10.837684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:13:10.837729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:13:10.837759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:13:10.837823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:10.837911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:13:10.838671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:13:10.839025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:13:10.925529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:13:10.925609Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:10.940169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:13:10.944763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:13:10.944962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:13:10.953402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:13:10.953672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:13:10.954404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:10.954737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:13:10.957692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:10.957882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:13:10.959085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:10.959172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:10.959328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:13:10.959396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:13:10.959452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:13:10.959629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:13:10.966885Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:13:11.109571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:13:11.109840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:11.110076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:13:11.110135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:13:11.110442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:13:11.110589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:11.115026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:11.115283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:13:11.115547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:11.115623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:13:11.115667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:13:11.115724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:13:11.118069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:11.118136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:13:11.118207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:13:11.120192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:11.120247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:11.120302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:11.120374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:13:11.124365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:13:11.126878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:13:11.127131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:13:11.128287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:11.128500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:11.128559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:11.128876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:13:11.128937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:11.129115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:13:11.129274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:13:11.132017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:11.132081Z node 1 :FLAT_TX_SCHEMESHARD ... eshard_impl.cpp:6074: Update domain reply, message: Origin: 72075186233409546 TxId: 106, at schemeshard: 72057594046678944 2025-06-24T21:13:11.712110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409546, partId: 0 2025-06-24T21:13:11.712228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 106:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 106 2025-06-24T21:13:11.712303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:796: [72057594046678944] TSyncHive, operationId 106:0, HandleReply TEvUpdateDomainReply, from hive: 72075186233409546 2025-06-24T21:13:11.712372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 106:0 138 -> 240 2025-06-24T21:13:11.713012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T21:13:11.713100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:13:11.715907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T21:13:11.716087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-24T21:13:11.716137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 106:0 ProgressState 2025-06-24T21:13:11.716522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T21:13:11.716569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:13:11.716614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#106:0 progress is 1/1 2025-06-24T21:13:11.716672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:13:11.716716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-06-24T21:13:11.716762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-24T21:13:11.716812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 106:0 2025-06-24T21:13:11.716847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 106:0 2025-06-24T21:13:11.716959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T21:13:11.719472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-24T21:13:11.719529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-24T21:13:11.720101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-24T21:13:11.720274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T21:13:11.720320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:856:2736] TestWaitNotification: OK eventTxId 106 2025-06-24T21:13:11.721038Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:13:11.721264Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 253us result status StatusSuccess 2025-06-24T21:13:11.721724Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "ServerLess0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:11.722478Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186234409549 2025-06-24T21:13:11.722707Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186234409549 describe path "/MyRoot/ServerLess0" took 196us result status StatusSuccess 2025-06-24T21:13:11.723130Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "MyRoot/ServerLess0" PathId: 1 SchemeshardId: 72075186234409549 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/ServerLess0" } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186234409549, at schemeshard: 72075186234409549 2025-06-24T21:13:11.723894Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:13:11.724072Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 185us result status StatusSuccess 2025-06-24T21:13:11.724462Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "ServerLess0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:11.725064Z node 1 :HIVE INFO: tablet_helpers.cpp:1470: [72075186233409546] TEvRequestDomainInfo, 72057594046678944:3 |95.2%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_order/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::NewImage [GOOD] Test command err: 2025-06-24T21:12:50.970220Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:50.970706Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:50.970884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004764/r3tmp/tmpKPSKJy/pdisk_1.dat 2025-06-24T21:12:51.359779Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:12:51.376188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:51.446437Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.450913Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-24T21:12:51.451425Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799567838887 != 1750799567838891 2025-06-24T21:12:51.500943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.501080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.516295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.607028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:51.667597Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:12:51.667908Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.733322Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.733737Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.735654Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:12:51.735743Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:12:51.735803Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:12:51.736157Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.736302Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.736416Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:12:51.747757Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.804699Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:12:51.804948Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.805098Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:12:51.805156Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:51.805196Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:12:51.805265Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.805795Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:12:51.805928Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:12:51.806066Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.806108Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.806169Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:12:51.806222Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.806316Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:12:51.806802Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:51.807137Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:12:51.807287Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:12:51.809102Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:51.821091Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:12:51.821228Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.986209Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:12:51.992803Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:12:51.992905Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.993861Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.993915Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:12:51.994019Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:12:51.994243Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:12:51.994410Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:12:51.994764Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.994837Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:12:51.997510Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:12:51.998016Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:52.000237Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:12:52.000313Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:52.001337Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:12:52.001410Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:52.002290Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:52.002345Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:52.002408Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:12:52.002493Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:12:52.002557Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:12:52.002639Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:52.015254Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:52.017447Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:12:52.017548Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:12:52.018824Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:12:52.055386Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:52.055571Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:12:5 ... pp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:10.492905Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:13:10.492964Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:13:10.493040Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:10.493094Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:13:10.493184Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:10.494428Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:10.496254Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:13:10.496331Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:10.497296Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:10.542161Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:13:10.542344Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:13:10.542400Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-24T21:13:10.542442Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-24T21:13:10.543012Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:10.570775Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:13:10.666969Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:10.798822Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:13:10.798904Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:10.799496Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:13:10.799564Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:13:10.799621Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-24T21:13:10.799861Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-24T21:13:10.800018Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:13:10.800265Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:13:10.801060Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:13:10.853472Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-24T21:13:10.853600Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:10.853656Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:10.853737Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:10.853820Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:10.853895Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-24T21:13:10.854007Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:10.856157Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-24T21:13:10.856248Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:10.954481Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:837:2675], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:10.954618Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:847:2680], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:10.954714Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:10.960423Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:10.967721Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:11.144705Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:11.147754Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:851:2683], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:13:11.174236Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:907:2720] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:11.249855Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwk9h81hxas83bas8e0nyd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YTFiOWY5OTItYmUwOWRlM2MtMjljN2Y1ZWEtODUwZWJiNTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:11.252904Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:938:2737], serverId# [4:939:2738], sessionId# [0:0:0] 2025-06-24T21:13:11.253349Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037888 2025-06-24T21:13:11.253687Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750799591253563 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 40b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-24T21:13:11.253914Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-24T21:13:11.265104Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 40 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-24T21:13:11.265208Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:11.359968Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwk9v5dpzyzych7rpbas4f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MTlkNDVjYWMtMjk5NjY5MDItZmY1NTUwYTItMTAzYThiYjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:11.362204Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:4] at 72075186224037888 2025-06-24T21:13:11.362536Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750799591362415 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-24T21:13:11.362692Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-24T21:13:11.373729Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 18 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-24T21:13:11.373805Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:11.375941Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:966:2756], serverId# [4:967:2757], sessionId# [0:0:0] 2025-06-24T21:13:11.382749Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:968:2758], serverId# [4:969:2759], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] Test command err: 2025-06-24T21:12:50.873905Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:50.874393Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:50.874596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00475e/r3tmp/tmpYoKCH3/pdisk_1.dat 2025-06-24T21:12:51.358334Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:12:51.369306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:51.444227Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.450005Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-24T21:12:51.450524Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799567838569 != 1750799567838573 2025-06-24T21:12:51.500071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.500228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.512748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.603744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:51.683018Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:12:51.683365Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.748720Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.748925Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.750928Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:12:51.751032Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:12:51.751090Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:12:51.751596Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.751791Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.751899Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:12:51.762809Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.787692Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:12:51.789209Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.789406Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:12:51.789476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:51.789521Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:12:51.789593Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.791129Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:12:51.791308Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:12:51.791384Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.791448Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.791514Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:12:51.791565Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.791673Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:12:51.793882Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:51.794250Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:12:51.794360Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:12:51.796334Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:51.807236Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:12:51.807371Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.957990Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:12:51.968646Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:12:51.968742Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.969734Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.969799Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:12:51.969905Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:12:51.970145Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:12:51.970321Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:12:51.970723Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.970816Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:12:51.973928Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:12:51.975413Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.977131Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:12:51.977214Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.978233Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:12:51.978306Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.979118Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.979225Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:51.979302Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:12:51.979385Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:12:51.979441Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:12:51.979523Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.984017Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:51.986208Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:12:51.986275Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:12:51.986984Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:12:52.025244Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:52.025439Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:12:5 ... UG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:10.358168Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:13:10.358345Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:13:10.358400Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-24T21:13:10.358453Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-24T21:13:10.359025Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:10.384801Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:13:10.480215Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:10.616974Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:13:10.617059Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:10.617417Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:13:10.617477Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:13:10.617534Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-24T21:13:10.617773Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-24T21:13:10.617935Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:13:10.618170Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:13:10.619007Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:13:10.669674Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-24T21:13:10.669794Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:10.669841Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:10.669888Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:10.669972Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:10.670054Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-24T21:13:10.670160Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:10.672293Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-24T21:13:10.672381Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:10.721551Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:837:2675], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:10.721667Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:847:2680], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:10.721745Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:10.727998Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:10.741152Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:10.926240Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:10.929338Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:851:2683], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:13:10.959750Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:907:2720] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:11.088720Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwk99z9fzz8q4q2gqpnw8e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YWYxMTZjNTItNDI1YjllZGQtYTJiOWE3NDMtNTM1NGMwOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:11.090805Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:938:2737], serverId# [4:939:2738], sessionId# [0:0:0] 2025-06-24T21:13:11.091262Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037888 2025-06-24T21:13:11.091499Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750799591091416 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-24T21:13:11.091655Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-24T21:13:11.102660Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-24T21:13:11.102733Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:11.170781Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhwk9p33ft4fyezm7mjatc8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZDVjY2MxYjYtNjE4OWJhMjctNTFmZDFhMGYtMzQ2NzA1Zjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:11.173227Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:4] at 72075186224037888 2025-06-24T21:13:11.173558Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750799591173467 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 50b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-24T21:13:11.173735Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-24T21:13:11.184763Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 50 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-24T21:13:11.184834Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:11.275815Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhwk9rm60bzhb0r2t0yrd4r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2ZkMThiYzYtMjI1MGYyMDctNWMyYjA2ZDItY2Y1YTkyNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:11.278030Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:5] at 72075186224037888 2025-06-24T21:13:11.278357Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1750799591278245 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-24T21:13:11.278518Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:5] at 72075186224037888, row count=1 2025-06-24T21:13:11.289527Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-24T21:13:11.289591Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:11.291275Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:985:2767], serverId# [4:986:2768], sessionId# [0:0:0] 2025-06-24T21:13:11.297373Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:987:2769], serverId# [4:988:2770], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] Test command err: 2025-06-24T21:12:50.875856Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:50.876406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:50.876610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00477e/r3tmp/tmpMfldTh/pdisk_1.dat 2025-06-24T21:12:51.356866Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:12:51.369531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:51.453479Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.464269Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799567839437 != 1750799567839441 2025-06-24T21:12:51.513718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.513905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.525851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.623680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:51.703926Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:636:2537] 2025-06-24T21:12:51.704245Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.756862Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.757031Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.759050Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:12:51.759217Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:12:51.759302Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:12:51.759717Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.761073Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.761193Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:663:2537] in generation 1 2025-06-24T21:12:51.761972Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:639:2539] 2025-06-24T21:12:51.762210Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.772569Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.772716Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.774400Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:12:51.774483Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:12:51.774530Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:12:51.774862Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.775061Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.775130Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:670:2539] in generation 1 2025-06-24T21:12:51.786431Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.841382Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:12:51.841656Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.842522Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:673:2558] 2025-06-24T21:12:51.842598Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:12:51.842649Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:12:51.842702Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:12:51.843045Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.843097Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:12:51.843194Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.843261Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:674:2559] 2025-06-24T21:12:51.843338Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:51.843375Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:12:51.843403Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.844037Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-24T21:12:51.844170Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-24T21:12:51.844265Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:12:51.844319Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:12:51.844369Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:12:51.844426Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.844509Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-24T21:12:51.844559Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:12:51.844731Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.844772Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.844818Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:12:51.844853Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.844919Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:630:2534], serverId# [1:655:2549], sessionId# [0:0:0] 2025-06-24T21:12:51.845376Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:629:2533], serverId# [1:647:2543], sessionId# [0:0:0] 2025-06-24T21:12:51.845510Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:12:51.845867Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:12:51.845975Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-24T21:12:51.846620Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:51.846789Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:12:51.846858Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:12:51.848859Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:51.848957Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:12:51.860078Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:12:51.860239Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.860911Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T21:12:51.861010Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:52.021752Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:692:2571], serverId# [1:695:2574], sessionId# [0:0:0] 2025-06-24T21:12:52.022044Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:693:2572], serverId# [1:696:2575], sessionId# [0:0:0] 2025-06-24T21:12:52.027081Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... HARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:13:08.530796Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:13:08.530852Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037889 2025-06-24T21:13:08.531062Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:13:08.531232Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:13:08.531417Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:13:08.531482Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 4] schema version# 1 2025-06-24T21:13:08.531995Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:13:08.532463Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:13:08.533723Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:13:08.533791Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037889 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:13:08.534209Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037889 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:13:08.534597Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:13:08.537568Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:13:08.537639Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:08.538959Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:13:08.539048Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:08.539111Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2025-06-24T21:13:08.539161Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:08.539870Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1000} 2025-06-24T21:13:08.539940Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:13:08.540941Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:08.540993Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:13:08.541048Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:13:08.541132Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:08.541187Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:13:08.541275Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:08.543008Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:13:08.543067Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:13:08.543101Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-24T21:13:08.543176Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:08.543224Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:13:08.543293Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:08.543837Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:08.543922Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:08.547885Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:08.548088Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:13:08.548162Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:08.548838Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:13:08.549013Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-24T21:13:08.549059Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-24T21:13:08.583092Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:740:2611], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:08.583285Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:750:2616], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:08.583384Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:08.589235Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:08.597262Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:08.597395Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:08.652139Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:08.771267Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:08.771429Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-24T21:13:08.775316Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:754:2619], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:13:08.813211Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:825:2659] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:08.888582Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwk7757vsybd79n9v428ec, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YTJmOWUxMzItZTA2YmIxZmQtNmY2ZTA5YWUtMjk4NDE2NTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.897845Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:894:2690], serverId# [4:895:2691], sessionId# [0:0:0] 2025-06-24T21:13:08.898198Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037889 2025-06-24T21:13:08.898473Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750799588898364 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-24T21:13:08.898620Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-24T21:13:08.909648Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-24T21:13:08.909741Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:13:08.943133Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:901:2696], serverId# [4:902:2697], sessionId# [0:0:0] 2025-06-24T21:13:08.949299Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:903:2698], serverId# [4:904:2699], sessionId# [0:0:0] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneIndexPkOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 3224, MsgBus: 29193 2025-06-24T21:11:26.297666Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626537014953307:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:26.297737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003fc4/r3tmp/tmpw1U2f9/pdisk_1.dat 2025-06-24T21:11:26.669437Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626537014953276:2079] 1750799486294150 != 1750799486294153 2025-06-24T21:11:26.686691Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3224, node 1 2025-06-24T21:11:26.760040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:26.760164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:26.761863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:26.875184Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:26.875207Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:26.875218Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:26.875872Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:27.304969Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29193 TClient is connected to server localhost:29193 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:27.873370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:27.897950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.066712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.247946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.350742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:29.748277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626549899856799:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:29.748401Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.456685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.487248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.520129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.550335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.626076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.696476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.730261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.798577Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626554194824760:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.798641Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.798930Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626554194824765:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.802507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:30.813636Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626554194824767:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:30.916132Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626554194824818:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:31.297969Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626537014953307:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:31.298060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:32.129432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:33.056643Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { T ... TBlYjMtNTBiNmJkOC1hYTI3Nzg2NC0zODM3NzdlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.820979Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714465. Ctx: { TraceId: 01jyhwk7ec24s5tggf2rfe9h97, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmI2M2RhYjYtODE2YWJlNTctZDMxZjZiYzItNjViY2YyMzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.823110Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714466. Ctx: { TraceId: 01jyhwk7e8emrgh9gd1fahvgr8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzZiZTBlYjMtNTBiNmJkOC1hYTI3Nzg2NC0zODM3NzdlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.838469Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714467. Ctx: { TraceId: 01jyhwk7f1a929jh4tp670752v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE4ZDgzZjYtOTQ1ZThkNTQtZDlhY2U0ZWItMTUyNzBiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.847872Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714469. Ctx: { TraceId: 01jyhwk7f1a929jh4tp670752v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE4ZDgzZjYtOTQ1ZThkNTQtZDlhY2U0ZWItMTUyNzBiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.848276Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714468. Ctx: { TraceId: 01jyhwk7f42e6wz3w5k8x9yxmv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDhlNzY0ODAtMjIwZDM3YmItMWFhOWJlYTAtMWM4ODhmMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.852306Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714470. Ctx: { TraceId: 01jyhwk7f1a929jh4tp670752v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE4ZDgzZjYtOTQ1ZThkNTQtZDlhY2U0ZWItMTUyNzBiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.857138Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714471. Ctx: { TraceId: 01jyhwk7f42e6wz3w5k8x9yxmv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDhlNzY0ODAtMjIwZDM3YmItMWFhOWJlYTAtMWM4ODhmMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.861421Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714472. Ctx: { TraceId: 01jyhwk7fk2vadnx91zyxts0mn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjkyYThkYjgtOWFiYTEyMmItZjdlODAxYTItYTY1Mjc0ZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.862029Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714473. Ctx: { TraceId: 01jyhwk7fn8mwmn10ny7s8cbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDVjYjI5MDItOTgxOGMzYWQtNzgwNTZmMC1mZjQ0MDRmMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.865856Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714474. Ctx: { TraceId: 01jyhwk7frc0cr893he7add0ha, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTkxZTQ5NDMtOWYxZjIxMjItY2UyZDlhZDYtYzJmZmYzNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.871533Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714475. Ctx: { TraceId: 01jyhwk7fk2vadnx91zyxts0mn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjkyYThkYjgtOWFiYTEyMmItZjdlODAxYTItYTY1Mjc0ZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.876053Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714476. Ctx: { TraceId: 01jyhwk7fn8mwmn10ny7s8cbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDVjYjI5MDItOTgxOGMzYWQtNzgwNTZmMC1mZjQ0MDRmMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.921324Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714477. Ctx: { TraceId: 01jyhwk7h3ftjewvbf78csjhy5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzZiZTBlYjMtNTBiNmJkOC1hYTI3Nzg2NC0zODM3NzdlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.922252Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714478. Ctx: { TraceId: 01jyhwk7h38y5x88cq8vs1ks11, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTkxZTQ5NDMtOWYxZjIxMjItY2UyZDlhZDYtYzJmZmYzNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.930412Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714479. Ctx: { TraceId: 01jyhwk7h3frr2n6zx84cvc5rm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE4ZDgzZjYtOTQ1ZThkNTQtZDlhY2U0ZWItMTUyNzBiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.945890Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714480. Ctx: { TraceId: 01jyhwk7h7eefex9pz5msqemb4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjkyYThkYjgtOWFiYTEyMmItZjdlODAxYTItYTY1Mjc0ZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.947912Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714483. Ctx: { TraceId: 01jyhwk7h3frr2n6zx84cvc5rm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE4ZDgzZjYtOTQ1ZThkNTQtZDlhY2U0ZWItMTUyNzBiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.948939Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714482. Ctx: { TraceId: 01jyhwk7h38y5x88cq8vs1ks11, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTkxZTQ5NDMtOWYxZjIxMjItY2UyZDlhZDYtYzJmZmYzNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.949053Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714481. Ctx: { TraceId: 01jyhwk7h3ftjewvbf78csjhy5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzZiZTBlYjMtNTBiNmJkOC1hYTI3Nzg2NC0zODM3NzdlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.950886Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714484. Ctx: { TraceId: 01jyhwk7hn1aa295jzwygkz3rm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDhlNzY0ODAtMjIwZDM3YmItMWFhOWJlYTAtMWM4ODhmMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.962498Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714485. Ctx: { TraceId: 01jyhwk7h7eefex9pz5msqemb4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjkyYThkYjgtOWFiYTEyMmItZjdlODAxYTItYTY1Mjc0ZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:08.973490Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714486. Ctx: { TraceId: 01jyhwk7hn1aa295jzwygkz3rm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDhlNzY0ODAtMjIwZDM3YmItMWFhOWJlYTAtMWM4ODhmMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.005120Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714487. Ctx: { TraceId: 01jyhwk7m5cp1ktfc68yqq9f4m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTkxZTQ5NDMtOWYxZjIxMjItY2UyZDlhZDYtYzJmZmYzNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.006658Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714488. Ctx: { TraceId: 01jyhwk7m71tnn2zr88bsxferz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmI2M2RhYjYtODE2YWJlNTctZDMxZjZiYzItNjViY2YyMzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.008303Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714489. Ctx: { TraceId: 01jyhwk7m42k0dy3ae0d8f2twq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE4ZDgzZjYtOTQ1ZThkNTQtZDlhY2U0ZWItMTUyNzBiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.009146Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714490. Ctx: { TraceId: 01jyhwk7m3ak9sqp8608ttfk4y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDVjYjI5MDItOTgxOGMzYWQtNzgwNTZmMC1mZjQ0MDRmMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:13:09.023733Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714491. Ctx: { TraceId: 01jyhwk7m3ak9sqp8608ttfk4y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDVjYjI5MDItOTgxOGMzYWQtNzgwNTZmMC1mZjQ0MDRmMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.030842Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714492. Ctx: { TraceId: 01jyhwk7m3ak9sqp8608ttfk4y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDVjYjI5MDItOTgxOGMzYWQtNzgwNTZmMC1mZjQ0MDRmMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.034976Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714493. Ctx: { TraceId: 01jyhwk7mf6q8ksdr35t8azw68, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjkyYThkYjgtOWFiYTEyMmItZjdlODAxYTItYTY1Mjc0ZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.046519Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714494. Ctx: { TraceId: 01jyhwk7mf6q8ksdr35t8azw68, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjkyYThkYjgtOWFiYTEyMmItZjdlODAxYTItYTY1Mjc0ZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:13:09.061628Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714495. Ctx: { TraceId: 01jyhwk7nd3xp9hfz75mrttsm7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzZiZTBlYjMtNTBiNmJkOC1hYTI3Nzg2NC0zODM3NzdlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:09.068688Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714496. Ctx: { TraceId: 01jyhwk7m42k0dy3ae0d8f2twq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE4ZDgzZjYtOTQ1ZThkNTQtZDlhY2U0ZWItMTUyNzBiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:13:09.076895Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714497. Ctx: { TraceId: 01jyhwk7nd3xp9hfz75mrttsm7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzZiZTBlYjMtNTBiNmJkOC1hYTI3Nzg2NC0zODM3NzdlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> YdbIndexTable::MultiShardTableOneUniqIndex [GOOD] >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-39 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-40 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-24T21:12:50.725990Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626895043178742:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:50.726526Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042d6/r3tmp/tmpFjFh1L/pdisk_1.dat 2025-06-24T21:12:51.192390Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.210162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.210302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 19881, node 1 2025-06-24T21:12:51.219464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.384360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:51.384390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:51.384401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:51.384528Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:51.726069Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:51.848676Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:51.852527Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:51.852565Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:51.854167Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:30584, port: 30584 2025-06-24T21:12:51.854267Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:51.915691Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:51.963582Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:12:52.013583Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****Kn5g (58326073) () has now valid token of ldapuser@ldap 2025-06-24T21:12:54.258420Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626913559369427:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:54.294399Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042d6/r3tmp/tmpXx6ur3/pdisk_1.dat 2025-06-24T21:12:54.517084Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:54.518508Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626913559369287:2079] 1750799574230313 != 1750799574230316 2025-06-24T21:12:54.533965Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:54.534059Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:54.536706Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11934, node 2 2025-06-24T21:12:54.698025Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:54.698055Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:54.698063Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:54.698192Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:54.939330Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:54.939671Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:54.939686Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:54.940432Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:24184, port: 24184 2025-06-24T21:12:54.940502Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:54.999684Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:55.051476Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:12:55.052045Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:12:55.052104Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:55.100031Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:55.147504Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:55.148639Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****rDpA (67CBD5F9) () has now valid token of ldapuser@ldap 2025-06-24T21:12:55.254772Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:58.524151Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626929976540744:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:58.524282Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042d6/r3tmp/tmpjI9lKA/pdisk_1.dat 2025-06-24T21:12:58.729997Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:58.731661Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519626929976540588:2079] 1750799578496953 != 1750799578496956 2025-06-24T21:12:58.747849Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:58.747928Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:58.756668Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6637, node 3 2025-06-24T21:12:58.819381Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:58.819413Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:58.819420Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:58.819558Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:58.989340Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:58.994236Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:58.994268Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:58.994949Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:11656, port: 11656 2025-06-24T21:12:58.995012Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:59.056126Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:59.103946Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****NK9Q (1DBF6F56) () has now valid token of ldapuser@ldap 2025-06-24T21:13:02.432739Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626947776170233:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:02.432792Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042d6/r3tmp/tmpFKp9Xx/pdisk_1.dat 2025-06-24T21:13:02.680404Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:02.681432Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519626947776170212:2079] 1750799582428679 != 1750799582428682 2025-06-24T21:13:02.692773Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:02.692844Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:02.694794Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14430, node 4 2025-06-24T21:13:02.732117Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:02.732148Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:02.732156Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:02.732294Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:02.883277Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:02.885838Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:02.885856Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:02.886572Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://qqq:7014 ldaps://localhost:7014 ldaps://localhost:11111, port: 7014 2025-06-24T21:13:02.886642Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:02.959627Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:03.003444Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:03.007745Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:03.007810Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:03.051690Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:03.099413Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:03.100370Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****ikfQ (FF8FBD84) () has now valid token of ldapuser@ldap 2025-06-24T21:13:06.292428Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519626967407978123:2195];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042d6/r3tmp/tmp5n8JeX/pdisk_1.dat 2025-06-24T21:13:06.387045Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:13:06.477726Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:06.477829Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:06.482720Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:06.490192Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519626967407977948:2079] 1750799586201794 != 1750799586201797 2025-06-24T21:13:06.497463Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14347, node 5 2025-06-24T21:13:06.662622Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:06.662641Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:06.662650Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:06.662788Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:06.755348Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:06.758886Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:06.758915Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:06.759688Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:18933, port: 18933 2025-06-24T21:13:06.759753Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:06.819725Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-24T21:13:06.863870Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:06.864404Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:06.864453Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-24T21:13:06.911606Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-24T21:13:06.960921Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-24T21:13:06.968083Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****YnHw (B3EC8EDC) () has now valid token of ldapuser@ldap 2025-06-24T21:13:10.550956Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519626983055968721:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:10.576584Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042d6/r3tmp/tmp6sCzo9/pdisk_1.dat 2025-06-24T21:13:10.667161Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:10.671318Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519626983055968559:2079] 1750799590524122 != 1750799590524125 TServer::EnableGrpc on GrpcPort 27136, node 6 2025-06-24T21:13:10.688020Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:10.688106Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:10.692671Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:10.722584Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:10.722606Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:10.722614Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:10.722746Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:10.802018Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:10.805399Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:10.805430Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:10.806171Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:27978, port: 27978 2025-06-24T21:13:10.806269Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:10.859610Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-24T21:13:10.859680Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldaps://localhost:27978. Bad search filter 2025-06-24T21:13:10.860117Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****KHMQ (078AFCA4) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldaps://localhost:27978. Bad search filter)' >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientDoesNotProvideClientCerts [GOOD] >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false [GOOD] >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-24T21:12:50.715922Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626898593370833:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:50.716001Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042dd/r3tmp/tmpWtBCkO/pdisk_1.dat 2025-06-24T21:12:51.155492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.155576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.169534Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.192687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16432, node 1 2025-06-24T21:12:51.385728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:51.385765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:51.385774Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:51.385950Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:51.739534Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:51.750890Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:51.751340Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:51.751361Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:51.752111Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:3130, port: 3130 2025-06-24T21:12:51.752911Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:51.760338Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:51.807659Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:12:51.856446Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****pkbw (41205D17) () has now valid token of ldapuser@ldap 2025-06-24T21:12:54.257063Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626912446320194:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:54.257105Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042dd/r3tmp/tmpSgvuXP/pdisk_1.dat 2025-06-24T21:12:54.567065Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:54.594916Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:54.595002Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:54.597546Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11046, node 2 2025-06-24T21:12:54.783831Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:54.783854Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:54.783862Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:54.783970Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:54.957550Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:54.960712Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:54.960739Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:54.961364Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7913, port: 7913 2025-06-24T21:12:54.961452Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:54.971722Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:55.019403Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:12:55.019927Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:12:55.020004Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:55.063492Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:55.111462Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:55.112314Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****PA8w (90BC747B) () has now valid token of ldapuser@ldap 2025-06-24T21:12:58.428885Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626931447695997:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:58.431280Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042dd/r3tmp/tmpiKPnL4/pdisk_1.dat 2025-06-24T21:12:58.634843Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:58.634950Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:58.643256Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:58.662951Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10745, node 3 2025-06-24T21:12:58.800288Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:58.800310Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:58.800342Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:58.800468Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:59.034109Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:59.037499Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:59.037523Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:59.038047Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:25549, port: 25549 2025-06-24T21:12:59.038113Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:59.055751Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:59.099736Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****WRCg (731D5F7F) () has now valid token of ldapuser@ldap 2025-06-24T21:13:02.685879Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626948349418675:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:02.749089Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042dd/r3tmp/tmpvUbLs3/pdisk_1.dat 2025-06-24T21:13:02.888658Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:02.901640Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:02.901727Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:02.903544Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519626948349418527:2079] 1750799582617320 != 1750799582617323 2025-06-24T21:13:02.905289Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25972, node 4 2025-06-24T21:13:03.097484Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:03.097514Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:03.097522Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:03.097698Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:03.259234Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:03.261006Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:03.261035Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:03.261706Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://qqq:18852 ldap://localhost:18852 ldap://localhost:11111, port: 18852 2025-06-24T21:13:03.261783Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:03.274033Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:03.315520Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:03.316608Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:03.316654Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:03.359463Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:03.411419Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:03.412354Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****o2DQ (E5A20680) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042dd/r3tmp/tmpj6hA8G/pdisk_1.dat 2025-06-24T21:13:06.978784Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:06.983394Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519626966073202500:2079] 1750799586698480 != 1750799586698483 2025-06-24T21:13:06.999398Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:13:07.000046Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:07.000140Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:07.003301Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11344, node 5 2025-06-24T21:13:07.159752Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:07.159776Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:07.159784Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:07.159905Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:07.292886Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:07.293223Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:07.293246Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:07.293992Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:62486, port: 62486 2025-06-24T21:13:07.294080Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:07.297913Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-24T21:13:07.343440Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:07.344520Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:07.344571Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-24T21:13:07.395470Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-24T21:13:07.443480Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-24T21:13:07.444431Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****-mBg (C2436FE8) () has now valid token of ldapuser@ldap 2025-06-24T21:13:10.695541Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519626985058385641:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:10.695619Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042dd/r3tmp/tmpqQBNpU/pdisk_1.dat 2025-06-24T21:13:10.845453Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:10.846763Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519626985058385607:2079] 1750799590694669 != 1750799590694672 2025-06-24T21:13:10.872950Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:10.873042Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 64763, node 6 2025-06-24T21:13:10.878805Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:10.930633Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:10.930662Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:10.930671Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:10.930815Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:11.106195Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:11.109944Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:11.109975Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:11.110716Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:3155, port: 3155 2025-06-24T21:13:11.110779Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:11.120217Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-24T21:13:11.120312Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:3155. Bad search filter 2025-06-24T21:13:11.120639Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****22qQ (A39E86DA) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:3155. Bad search filter)' >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD [GOOD] >> TSchemeShardServerLess::Fake [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-22 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-23 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:13:14.697189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:13:14.697304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:14.697353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:13:14.697389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:13:14.697430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:13:14.697478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:13:14.697538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:14.697619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:13:14.698377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:13:14.698742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:13:14.785341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:13:14.785403Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:14.802232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:13:14.802718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:13:14.802878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:13:14.811176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:13:14.811455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:13:14.812114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:14.812431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:13:14.815740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:14.815948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:13:14.817212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:14.817293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:14.817549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:13:14.817609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:13:14.817664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:13:14.817772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:13:14.825134Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:13:14.938356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:13:14.938662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:14.938890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:13:14.938936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:13:14.939189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:13:14.939328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:14.945248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:14.945475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:13:14.945695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:14.945767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:13:14.945811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:13:14.945850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:13:14.948353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:14.948433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:13:14.948498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:13:14.950722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:14.950775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:14.950832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:14.950893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:13:14.954615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:13:14.957050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:13:14.957272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:13:14.958387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:14.958533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:14.958585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:14.958863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:13:14.958929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:14.959087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:13:14.959193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:13:14.961667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:14.961713Z node 1 :FLAT_TX_SCHEMESHARD ... :13:15.664389Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186234409548 2025-06-24T21:13:15.664598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-24T21:13:15.664943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:13:15.667195Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186234409547 2025-06-24T21:13:15.667774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-06-24T21:13:15.668051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 Forgetting tablet 72075186234409548 Forgetting tablet 72075186234409547 2025-06-24T21:13:15.670394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-24T21:13:15.670641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:13:15.672269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T21:13:15.672770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:13:15.672833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:13:15.672979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:13:15.673537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:13:15.673589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:13:15.673658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:13:15.678150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:13:15.678240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186234409546 2025-06-24T21:13:15.678339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-24T21:13:15.678365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186234409548 2025-06-24T21:13:15.678499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-24T21:13:15.678538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186234409547 2025-06-24T21:13:15.679034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:13:15.679178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T21:13:15.679534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-24T21:13:15.679581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-24T21:13:15.680178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-24T21:13:15.680287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T21:13:15.680324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:934:2793] TestWaitNotification: OK eventTxId 106 2025-06-24T21:13:15.681083Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0/dir/table0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:13:15.681332Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0/dir/table0" took 250us result status StatusPathDoesNotExist 2025-06-24T21:13:15.681517Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0/dir/table0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0/dir/table0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:13:15.682171Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:13:15.682373Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 212us result status StatusPathDoesNotExist 2025-06-24T21:13:15.682536Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:13:15.683120Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:13:15.683432Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 280us result status StatusSuccess 2025-06-24T21:13:15.683951Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SharedDB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186233409550 is deleted wait until 72075186233409551 is deleted wait until 72075186233409552 is deleted wait until 72075186233409553 is deleted 2025-06-24T21:13:15.684621Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409550 2025-06-24T21:13:15.684698Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409551 2025-06-24T21:13:15.684792Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409552 2025-06-24T21:13:15.684860Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409553 Deleted tabletId 72075186233409550 Deleted tabletId 72075186233409551 Deleted tabletId 72075186233409552 Deleted tabletId 72075186233409553 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] Test command err: 2025-06-24T21:12:50.718752Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626897436207341:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:50.718818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042cb/r3tmp/tmpCO7mz9/pdisk_1.dat 2025-06-24T21:12:51.124081Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.148971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.150578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 13189, node 1 2025-06-24T21:12:51.220945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.383895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:51.383920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:51.383951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:51.384084Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:51.712458Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:51.718946Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:51.719013Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:51.730593Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:5590, port: 5590 2025-06-24T21:12:51.730697Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:12:51.748647Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:51.791767Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:51.836832Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:12:51.839730Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:12:51.839789Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:51.883585Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:51.931501Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:51.933009Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****-qoQ (C9EF8ED7) () has now valid token of ldapuser@ldap 2025-06-24T21:12:51.933308Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:54.239440Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626913831873120:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:54.239477Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042cb/r3tmp/tmpSGQlgg/pdisk_1.dat 2025-06-24T21:12:54.596073Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:54.596132Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:54.598752Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:54.601730Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626913831873104:2079] 1750799574238782 != 1750799574238785 2025-06-24T21:12:54.621565Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28454, node 2 2025-06-24T21:12:54.723867Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:54.723900Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:54.723909Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:54.724019Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:54.963235Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:54.965807Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:54.965820Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:54.966359Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:12324, port: 12324 2025-06-24T21:12:54.966410Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:12:54.974658Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:55.019885Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:55.071967Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****rKBA (F728AFF8) () has now valid token of ldapuser@ldap 2025-06-24T21:12:58.397586Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626932484940510:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:58.400998Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042cb/r3tmp/tmpF3Cr0g/pdisk_1.dat 2025-06-24T21:12:58.576078Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:58.582685Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:58.582764Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:58.585615Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30400, node 3 2025-06-24T21:12:58.672520Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:58.672543Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:58.672550Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:58.672687Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:58.778564Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:58.780969Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:58.781000Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:58.781680Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://qqq:14988 ldap://localhost:14988 ldap://localhost:11111, port: 14988 2025-06-24T21:12:58.781738Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:12:58.798960Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:58.847550Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:58.895582Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:12:58.896415Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:12:58.896465Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:58.939569Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:58.987484Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:58.988593Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****k_2Q (C622B11F) () has now valid token of ldapuser@ldap 2025-06-24T21:13:02.695286Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626948504128806:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:02.695395Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042cb/r3tmp/tmpwXXZC0/pdisk_1.dat 2025-06-24T21:13:02.899293Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:02.900526Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519626948504128787:2079] 1750799582694785 != 1750799582694788 2025-06-24T21:13:02.910678Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:02.910759Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:02.917789Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16265, node 4 2025-06-24T21:13:03.067888Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:03.067920Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:03.067931Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:03.068100Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:03.255305Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:03.258056Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:03.258087Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:03.258805Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:24109, port: 24109 2025-06-24T21:13:03.258893Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:03.269749Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:03.312172Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-24T21:13:03.360044Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****BHFA (0479C12E) () has now valid token of ldapuser@ldap 2025-06-24T21:13:07.212329Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519626971307365782:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:07.212429Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042cb/r3tmp/tmpaoq018/pdisk_1.dat 2025-06-24T21:13:07.482210Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:07.482470Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:07.484286Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:07.485443Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31670, node 5 2025-06-24T21:13:07.640026Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:07.640052Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:07.640060Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:07.640206Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:07.788192Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:07.791212Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:07.791244Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:07.792216Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:24753, port: 24753 2025-06-24T21:13:07.792298Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:07.826460Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:07.867686Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:24753. Invalid credentials 2025-06-24T21:13:07.868340Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****KUVA (D46C8ADB) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:24753. Invalid credentials)' 2025-06-24T21:13:11.545636Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519626988024369197:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:11.545698Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042cb/r3tmp/tmpFdMItk/pdisk_1.dat 2025-06-24T21:13:11.686569Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:11.688439Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519626988024369176:2079] 1750799591544996 != 1750799591544999 2025-06-24T21:13:11.707077Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:11.707207Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18986, node 6 2025-06-24T21:13:11.714012Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:11.753296Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:11.753321Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:11.753330Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:11.753479Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:11.989198Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:11.993316Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:11.993351Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:11.994129Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:25317, port: 25317 2025-06-24T21:13:11.994195Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:12.006455Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:12.051662Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:25317. Invalid credentials 2025-06-24T21:13:12.052144Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****c9cg (D542D334) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:25317. Invalid credentials)' ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientDoesNotProvideClientCerts [GOOD] Test command err: 2025-06-24T21:12:03.165308Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626696987751938:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:03.165434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443a/r3tmp/tmpDk6Ndq/pdisk_1.dat 2025-06-24T21:12:03.831657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:03.831819Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:03.841369Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:03.858210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8032, node 1 2025-06-24T21:12:04.187765Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:04.308493Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:04.308517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:04.308524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:04.308632Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10303 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:04.742366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:04.848103Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket C7FC14956B2B8EEB67F5626F64D0916A35660ED7E6B9DEF2F80DD78D6595566E (ipv6:[::1]:57080) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-24T21:12:05.051926Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:57096) has now valid token of root@builtin 2025-06-24T21:12:05.217046Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:12:05.217076Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:05.217090Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:12:05.217141Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:12:08.928887Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626715437450426:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:08.928933Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443a/r3tmp/tmpneVcJj/pdisk_1.dat 2025-06-24T21:12:09.312307Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:09.347315Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:09.347407Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:09.354760Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24945, node 4 2025-06-24T21:12:09.516975Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:09.517000Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:09.517008Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:09.517133Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65404 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:09.796569Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:09.913942Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket C7FC14956B2B8EEB67F5626F64D0916A35660ED7E6B9DEF2F80DD78D6595566E (ipv6:[::1]:41942) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-24T21:12:09.972913Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:10.002163Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:41966) has now valid token of root@builtin 2025-06-24T21:12:10.077637Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:12:10.077663Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:10.077671Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:12:10.077701Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:12:13.927943Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626736564386856:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:13.928002Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443a/r3tmp/tmp8DB1y4/pdisk_1.dat 2025-06-24T21:12:14.283473Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:14.292716Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:14.292803Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:14.307942Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12645, node 7 2025-06-24T21:12:14.523921Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:14.523948Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:14.523956Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:14.524093Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10886 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:14.816997Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called ... NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:49.314045Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:49.314067Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:49.314209Z node 22 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31261 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:49.675050Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:50.029944Z node 22 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:54.027452Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[22:7519626892381712905:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:54.027588Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:59.923675Z node 22 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:55812) has now valid token of root@builtin 2025-06-24T21:13:00.014572Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:13:00.014616Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:00.014631Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:13:00.014684Z node 22 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:13:02.127610Z node 25 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[25:7519626947699084594:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:02.135015Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443a/r3tmp/tmpTWgMSQ/pdisk_1.dat 2025-06-24T21:13:02.540176Z node 25 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:02.571745Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:02.571876Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:02.581377Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15984, node 25 2025-06-24T21:13:02.856175Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:02.856203Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:02.856216Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:02.856410Z node 25 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:03.159301Z node 25 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18129 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:03.478055Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:03.708360Z node 25 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:54242) has now valid token of root@builtin 2025-06-24T21:13:03.809480Z node 25 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:13:03.809523Z node 25 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:03.809538Z node 25 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:13:03.809588Z node 25 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-24T21:13:09.887515Z node 28 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[28:7519626979654144118:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:09.887600Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00443a/r3tmp/tmpYN7ofh/pdisk_1.dat 2025-06-24T21:13:10.147860Z node 28 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:10.161101Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:10.161209Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:10.169677Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3386, node 28 2025-06-24T21:13:10.181570Z node 28 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 28 Type# 268639257 2025-06-24T21:13:10.301197Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:10.301221Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:10.301234Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:10.301396Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:10.750154Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:10.911938Z node 28 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:10.968230Z node 28 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:48822) has now valid token of root@builtin 2025-06-24T21:13:11.059902Z node 28 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-24T21:13:11.059940Z node 28 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:11.059962Z node 28 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-24T21:13:11.060013Z node 28 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::Fake [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD [GOOD] Test command err: 2025-06-24T21:12:53.427247Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626908796274474:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:53.427456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00427b/r3tmp/tmpdNRc9k/pdisk_1.dat 2025-06-24T21:12:53.747909Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:53.750800Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626908796274456:2079] 1750799573426608 != 1750799573426611 TServer::EnableGrpc on GrpcPort 25757, node 1 2025-06-24T21:12:53.827922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:53.828027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:53.830588Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:53.835973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:53.835997Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:53.836005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:53.836191Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:54.015950Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:54.020055Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:54.020102Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:54.021634Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://unavailablehost:24351, port: 24351 2025-06-24T21:12:54.021787Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:12:54.026656Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:184: Could not start TLS. Can't contact LDAP server 2025-06-24T21:12:54.027314Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****DK1g (E43D2B61) () has now retryable error message 'Could not login via LDAP (Could not start TLS. Can't contact LDAP server)' 2025-06-24T21:12:54.027615Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:54.027637Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:54.028425Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://unavailablehost:24351, port: 24351 2025-06-24T21:12:54.028533Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:12:54.044578Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:184: Could not start TLS. Can't contact LDAP server 2025-06-24T21:12:54.044731Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****DK1g (E43D2B61) () has now retryable error message 'Could not login via LDAP (Could not start TLS. Can't contact LDAP server)' 2025-06-24T21:12:57.153892Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626928485285379:2142];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00427b/r3tmp/tmpn4iuzr/pdisk_1.dat 2025-06-24T21:12:57.335520Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:57.422152Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626928485285273:2079] 1750799577130688 != 1750799577130691 2025-06-24T21:12:57.460452Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:57.460552Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:57.461954Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62720, node 2 2025-06-24T21:12:57.488311Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:57.490942Z node 2 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T21:12:57.491539Z node 2 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T21:12:57.579298Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:57.579333Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:57.579341Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:57.579485Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:57.899296Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:57.900490Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:57.900518Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:57.901332Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****gj6w (4EC00624) () has now permanent error message 'Could not login via LDAP (List of ldap server hosts is empty)' 2025-06-24T21:13:01.292223Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626945772331227:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:01.307708Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00427b/r3tmp/tmpX0nyuQ/pdisk_1.dat 2025-06-24T21:13:01.541933Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:01.542026Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:01.542253Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:01.545384Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519626945772331061:2079] 1750799581229693 != 1750799581229696 2025-06-24T21:13:01.560124Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32004, node 3 2025-06-24T21:13:01.656133Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:01.656160Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:01.656167Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:01.656297Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:01.727777Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:01.731134Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:01.731172Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:01.732062Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****MYwg (9511F1C9) () has now permanent error message 'Could not login via LDAP (Parameter BaseDn is empty)' 2025-06-24T21:13:04.891286Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626955489535735:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:04.891325Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00427b/r3tmp/tmpcEFrGf/pdisk_1.dat 2025-06-24T21:13:05.110774Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:05.112267Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519626955489535712:2079] 1750799584890093 != 1750799584890096 2025-06-24T21:13:05.121948Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:05.122041Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:05.124516Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1709, node 4 2025-06-24T21:13:05.207830Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:05.207855Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:05.207863Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:05.208027Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:05.376498Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:05.378696Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:05.378716Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:05.379571Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****laWA (1537E1A0) () has now permanent error message 'Could not login via LDAP (Parameter BindDn is empty)' 2025-06-24T21:13:08.841846Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519626975100311324:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:08.841891Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00427b/r3tmp/tmpgsH8Ic/pdisk_1.dat 2025-06-24T21:13:09.034690Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:09.037014Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519626975100311305:2079] 1750799588841067 != 1750799588841070 2025-06-24T21:13:09.054031Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:09.054158Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18456, node 5 2025-06-24T21:13:09.055833Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:09.091893Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:09.091920Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:09.091928Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:09.092069Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:09.267331Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:09.270609Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:09.270638Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:09.271463Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****n_tA (FB2B7DA3) () has now permanent error message 'Could not login via LDAP (Parameter BindPassword is empty)' 2025-06-24T21:13:12.483479Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519626993391555171:2149];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00427b/r3tmp/tmpqQZbu6/pdisk_1.dat 2025-06-24T21:13:12.525919Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:13:12.589490Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519626993391555047:2079] 1750799592472127 != 1750799592472130 2025-06-24T21:13:12.598392Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21737, node 6 2025-06-24T21:13:12.606911Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:12.607004Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:12.609026Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:12.648967Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:12.648989Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:12.648997Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:12.649145Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:12.843430Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:12.847068Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:12.847104Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:12.847969Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:12518, port: 12518 2025-06-24T21:13:12.848070Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:12.903810Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:12.952421Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****cm9Q (87B3EA71) () has now valid token of ldapuser@ldap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:13:15.465240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:13:15.465359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:15.465419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:13:15.465457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:13:15.465521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:13:15.465553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:13:15.465610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:15.465694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:13:15.466456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:13:15.466817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:13:15.557420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:13:15.557485Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:15.575537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:13:15.576012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:13:15.576200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:13:15.600053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:13:15.600309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:13:15.601046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:15.601410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:13:15.604548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:15.604742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:13:15.605684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:15.605733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:15.605899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:13:15.605941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:13:15.605976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:13:15.606054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:13:15.612694Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:13:15.769455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:13:15.769808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:15.770097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:13:15.770160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:13:15.770426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:13:15.770567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:15.774620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:15.774893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:13:15.775071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:15.775160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:13:15.775226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:13:15.775267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:13:15.777881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:15.777953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:13:15.778024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:13:15.780397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:15.780459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:15.780529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:15.780587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:13:15.784598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:13:15.786696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:13:15.786854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:13:15.787854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:15.788024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:15.788091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:15.788420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:13:15.788488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:15.788643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:13:15.788716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:13:15.790750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:15.790789Z node 1 :FLAT_TX_SCHEMESHARD ... :13:16.621647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-24T21:13:16.622030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:13:16.623556Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186234409551 2025-06-24T21:13:16.624411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-06-24T21:13:16.624694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:13:16.625457Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186234409550 Forgetting tablet 72075186234409551 Forgetting tablet 72075186234409550 2025-06-24T21:13:16.626826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-24T21:13:16.627125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:13:16.627786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-24T21:13:16.628740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:13:16.628808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:13:16.628940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:13:16.629705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:13:16.629766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:13:16.629840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:13:16.633430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-24T21:13:16.633500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186234409549 2025-06-24T21:13:16.633695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-24T21:13:16.633727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186234409551 2025-06-24T21:13:16.633789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-24T21:13:16.633845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186234409550 2025-06-24T21:13:16.635512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:13:16.635663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T21:13:16.636073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-24T21:13:16.636121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-24T21:13:16.636678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-24T21:13:16.636786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T21:13:16.636846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:948:2808] TestWaitNotification: OK eventTxId 106 2025-06-24T21:13:16.637501Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0/dir/table0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:13:16.637719Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0/dir/table0" took 260us result status StatusPathDoesNotExist 2025-06-24T21:13:16.637978Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0/dir/table0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0/dir/table0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:13:16.638580Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:13:16.638754Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 184us result status StatusPathDoesNotExist 2025-06-24T21:13:16.638899Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:13:16.639508Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:13:16.639728Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 200us result status StatusSuccess 2025-06-24T21:13:16.640207Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SharedDB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186234409549 is deleted wait until 72075186234409550 is deleted wait until 72075186234409551 is deleted wait until 72075186234409552 is deleted 2025-06-24T21:13:16.640918Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409549 2025-06-24T21:13:16.641027Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409550 2025-06-24T21:13:16.641076Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409551 2025-06-24T21:13:16.641126Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409552 Deleted tabletId 72075186234409549 Deleted tabletId 72075186234409550 Deleted tabletId 72075186234409551 Deleted tabletId 72075186234409552 ------- [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent [GOOD] Test command err: === Starting PQ server === Server->StartServer(false); 2025-06-24T21:12:29.450857Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626806133745468:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:29.450927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:29.569178Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626804870321026:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:29.569218Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:29.874282Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:12:29.893670Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045c5/r3tmp/tmpo43jOu/pdisk_1.dat 2025-06-24T21:12:30.467489Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:30.626473Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:30.683814Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:30.699445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:30.753103Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:30.784402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:30.784486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:30.785957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:30.786005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:30.793585Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:12:30.793721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:30.795925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23292, node 1 2025-06-24T21:12:31.332058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0045c5/r3tmp/yandex5fP6Z0.tmp 2025-06-24T21:12:31.332088Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0045c5/r3tmp/yandex5fP6Z0.tmp 2025-06-24T21:12:31.335365Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0045c5/r3tmp/yandex5fP6Z0.tmp 2025-06-24T21:12:31.335947Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:31.808848Z INFO: TTestServer started on Port 19049 GrpcPort 23292 TClient is connected to server localhost:19049 PQClient connected to localhost:23292 === TenantModeEnabled() = 0 === Init PQ - start server on port 23292 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:32.336298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:12:32.339450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:12:32.343324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T21:12:32.343367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T21:12:32.346628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:12:32.346724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:32.378734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T21:12:32.382558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T21:12:32.382780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:12:32.382811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T21:12:32.382929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-24T21:12:32.382944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-24T21:12:32.388344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:12:32.388394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:12:32.388412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 3 -> 128 2025-06-24T21:12:32.390926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:12:32.390972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:12:32.391008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T21:12:32.391067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-24T21:12:32.404516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:32.405115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:12:32.405129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-24T21:12:32.405147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:12:32.412996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-24T21:12:32.416723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:12:32.416951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750799552460, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:12:32.417079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750799552460 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:12:32.417101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T21:12:32.423752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 128 -> 240 2025-06-24T21:12:32.423843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomai ... } } 2025-06-24T21:13:14.088656Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 got read request: guid# 44ad43b4-c54d653b-c693cff9-ffb4f591 2025-06-24T21:13:14.091691Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 assign: record# { Partition: 0 TabletId: 72075186224037897 Topic: "rt3.dc1--topic1" Generation: 1 Step: 1 Session: "shared/cli_5_1_8711044324281813697_v1" ClientId: "cli" PipeClient { RawX1: 7519627001029620091 RawX2: 4503621102209572 } Path: "/Root/PQ/rt3.dc1--topic1" } 2025-06-24T21:13:14.091791Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1132: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 INITING TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) 2025-06-24T21:13:14.092170Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:972: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037897 Generation: 1, pipe: [5:7519627001029620093:2599] 2025-06-24T21:13:14.092288Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: shared/cli_5_1_8711044324281813697_v1:1 with generation 1 2025-06-24T21:13:14.095755Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 3 SizeLag: 280 WriteTimestampEstimateMS: 1750799594075 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-24T21:13:14.095800Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 INIT DONE TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 2025-06-24T21:13:14.095867Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 sending to client partition status 2025-06-24T21:13:14.096696Z :INFO: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "/topic1". Partition: 0. Read offset: (NULL) 2025-06-24T21:13:14.097285Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 grpc read done: success# 1, data# { start_partition_session_response { partition_session_id: 1 } } 2025-06-24T21:13:14.097418Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:533: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 got StartRead from client: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, commitOffset# (empty maybe) 2025-06-24T21:13:14.097471Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1012: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 clientCommitOffset (empty maybe) clientReadOffset 0 2025-06-24T21:13:14.097501Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 0 endOffset 3 2025-06-24T21:13:14.097603Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, endOffset# 3, WTime# 0, sizeLag# 280 2025-06-24T21:13:14.097620Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1TEvPartitionReady. Aval parts: 1 2025-06-24T21:13:14.097679Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 performing read request: guid# 37041c67-3c67741f-660dd4de-c4065866, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 3, size# 336, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-24T21:13:14.097744Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 3 maxSize 336 maxTimeLagMs 0 readTimestampMs 0 readOffset 0 EndOffset 3 ClientCommitOffset 0 committedOffset 0 Guid 37041c67-3c67741f-660dd4de-c4065866 2025-06-24T21:13:14.099515Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 3 Result { Offset: 0 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 1 WriteTimestampMS: 1750799593970 CreateTimestampMS: 1750799593967 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 2 WriteTimestampMS: 1750799593974 CreateTimestampMS: 1750799593968 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 3 WriteTimestampMS: 1750799593975 CreateTimestampMS: 1750799593968 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 2 SizeLag: 18446744073709551530 RealReadOffset: 2 WaitQuotaTimeMs: 0 EndOffset: 3 StartOffset: 0 } Cookie: 0 } 2025-06-24T21:13:14.099648Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 3 2025-06-24T21:13:14.099681Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid 37041c67-3c67741f-660dd4de-c4065866 has messages 1 2025-06-24T21:13:14.099765Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 read done: guid# 37041c67-3c67741f-660dd4de-c4065866, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 490 2025-06-24T21:13:14.099799Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 response to read: guid# 37041c67-3c67741f-660dd4de-c4065866 2025-06-24T21:13:14.099997Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 Process answer. Aval parts: 0 2025-06-24T21:13:14.100327Z :DEBUG: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] [] Got ReadResponse, serverBytesSize = 490, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428310 2025-06-24T21:13:14.100488Z :DEBUG: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428310 2025-06-24T21:13:14.100706Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (0-2) 2025-06-24T21:13:14.100768Z :DEBUG: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] [] Returning serverBytesSize = 490 to budget 2025-06-24T21:13:14.100815Z :DEBUG: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] [] In ContinueReadingDataImpl, ReadSizeBudget = 490, ReadSizeServerDelta = 52428310 2025-06-24T21:13:14.101018Z :DEBUG: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-24T21:13:14.101199Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-24T21:13:14.101264Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-06-24T21:13:14.101301Z :DEBUG: [] Take Data. Partition 0. Read: {2, 0} (2-2) 2025-06-24T21:13:14.101351Z :DEBUG: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] [] The application data is transferred to the client. Number of messages 3, size 24 bytes 2025-06-24T21:13:14.101243Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 grpc read done: success# 1, data# { read_request { bytes_size: 490 } } 2025-06-24T21:13:14.101332Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 got read request: guid# 7577f853-40d7eb47-1df86b54-188d8b 2025-06-24T21:13:14.101417Z :DEBUG: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] [] Returning serverBytesSize = 0 to budget 2025-06-24T21:13:14.101614Z :INFO: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] Closing read session. Close timeout: 0.000000s 2025-06-24T21:13:14.101665Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:2:0 2025-06-24T21:13:14.101717Z :INFO: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] Counters: { Errors: 0 CurrentSessionLifetimeMs: 25 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:13:14.101836Z :NOTICE: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:13:14.101885Z :DEBUG: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] [] Abort session to cluster 2025-06-24T21:13:14.102972Z :NOTICE: [] [] [7b54259f-7c35ed13-d1b8025e-98ff3e58] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:13:14.103299Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 grpc read done: success# 0, data# { } 2025-06-24T21:13:14.103327Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 grpc read failed 2025-06-24T21:13:14.103362Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 closed 2025-06-24T21:13:14.103783Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/cli session shared/cli_5_1_8711044324281813697_v1 is DEAD 2025-06-24T21:13:14.104407Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/cli_5_1_8711044324281813697_v1 2025-06-24T21:13:14.104577Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037898][rt3.dc1--topic1] pipe [5:7519627001029620091:2596] disconnected; active server actors: 1 2025-06-24T21:13:14.104604Z node 6 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037898][rt3.dc1--topic1] pipe [5:7519627001029620091:2596] client cli disconnected session shared/cli_5_1_8711044324281813697_v1 >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag >> TopicAutoscaling::CDC_PartitionSplit_AutosplitByLoad [GOOD] >> TopicAutoscaling::ControlPlane_CDC >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-52 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-53 >> TopicAutoscaling::Simple_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::Simple_PQv1 >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoWithError [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsFromAdLdapServer >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-16 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-17 >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 [GOOD] >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag [GOOD] >> CdcStreamChangeCollector::SchemaChanges [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> TopicAutoscaling::PartitionSplit_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_PQv1 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-64 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-65 |95.3%| [TA] $(B)/ydb/core/kqp/executer_actor/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.3%| [TA] {RESULT} $(B)/ydb/core/kqp/executer_actor/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:13:18.498628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:13:18.498752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:18.498793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:13:18.498831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:13:18.498874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:13:18.498904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:13:18.498962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:18.499053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:13:18.499931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:13:18.500244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:13:18.587519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:13:18.587588Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:18.601651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:13:18.606337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:13:18.606526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:13:18.615690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:13:18.615958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:13:18.616686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:18.617053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:13:18.620236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:18.620421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:13:18.621675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:18.621741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:18.621912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:13:18.621968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:13:18.622016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:13:18.622176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:13:18.630082Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:13:18.789722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:13:18.789998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:18.790250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:13:18.790303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:13:18.790566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:13:18.790706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:18.794336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:18.794584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:13:18.794815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:18.794906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:13:18.794957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:13:18.795019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:13:18.797292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:18.797367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:13:18.797430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:13:18.800972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:18.801044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:18.801093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:18.801149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:13:18.805477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:13:18.812804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:13:18.813041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:13:18.814103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:18.814274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:18.814329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:18.814714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:13:18.814777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:18.814949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:13:18.815039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:13:18.817555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:18.817603Z node 1 :FLAT_TX_SCHEMESHARD ... _TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:13:19.215327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:13:19.215368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:13:19.215408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 104:0, at tablet# 72057594046678944 2025-06-24T21:13:19.215478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 104 ready parts: 1/1 2025-06-24T21:13:19.215619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 104 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:13:19.217753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 msg type: 269090816 2025-06-24T21:13:19.217868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-24T21:13:19.218198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:19.218346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:19.218401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-24T21:13:19.218746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 128 -> 240 2025-06-24T21:13:19.218805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-24T21:13:19.218924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:13:19.219029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:568: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:608:2534], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 104 2025-06-24T21:13:19.221196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:19.221233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:13:19.221392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:19.221446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-24T21:13:19.221767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:13:19.221822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 104:0, ProgressState, NeedSyncHive: 0 2025-06-24T21:13:19.221869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 240 -> 240 2025-06-24T21:13:19.222653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:13:19.222751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:13:19.222794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:13:19.222829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-24T21:13:19.222873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-24T21:13:19.222958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-24T21:13:19.225816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:13:19.225878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T21:13:19.225994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:13:19.226034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:13:19.226074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:13:19.226107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:13:19.226146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-24T21:13:19.226206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:13:19.226245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:13:19.226276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:13:19.226436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:13:19.226849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T21:13:19.230948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T21:13:19.230999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T21:13:19.231516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T21:13:19.231623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:13:19.231666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:766:2646] TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-24T21:13:19.234928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeExclusive } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:13:19.235097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeExclusive } 2025-06-24T21:13:19.235171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, path /MyRoot/ServerLess0 2025-06-24T21:13:19.235326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, at schemeshard: 72057594046678944 2025-06-24T21:13:19.235373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 105:1, propose status:StatusPreconditionFailed, reason: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, at schemeshard: 72057594046678944 2025-06-24T21:13:19.238130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 105, response: Status: StatusPreconditionFailed Reason: "Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:19.238354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, operation: ALTER DATABASE, path: /MyRoot/ServerLess0 TestModificationResult got TxId: 105, wait until txId: 105 >> Balancing::Balancing_OneTopic_PQv1 [GOOD] >> Balancing::Balancing_ManyTopics_TopicApi >> YdbOlapStore::LogGrepExisting [GOOD] >> YdbOlapStore::LogExistingRequest >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshRemoveUserBad ------- [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 [GOOD] Test command err: === Starting PQ server === Server->StartServer(false); 2025-06-24T21:12:30.127061Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626809710684595:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:30.127538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:30.200507Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626809903670570:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:30.200556Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:30.551303Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:12:30.551665Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0045c3/r3tmp/tmpoP9A4Z/pdisk_1.dat 2025-06-24T21:12:31.086011Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:31.116167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:31.116240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:31.133679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:31.133723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:31.145526Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:31.149423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:31.168849Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:12:31.168961Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:31.190647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:31.239564Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 3880, node 1 2025-06-24T21:12:31.443781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0045c3/r3tmp/yandex5GQ0jV.tmp 2025-06-24T21:12:31.443808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0045c3/r3tmp/yandex5GQ0jV.tmp 2025-06-24T21:12:31.443957Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0045c3/r3tmp/yandex5GQ0jV.tmp 2025-06-24T21:12:31.444114Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:31.803205Z INFO: TTestServer started on Port 63166 GrpcPort 3880 TClient is connected to server localhost:63166 PQClient connected to localhost:3880 === TenantModeEnabled() = 0 === Init PQ - start server on port 3880 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:32.461552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:12:32.461739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:12:32.461916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T21:12:32.461941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T21:12:32.462192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:12:32.462244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:32.467643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T21:12:32.467808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T21:12:32.468031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:12:32.468061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T21:12:32.468080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-24T21:12:32.468090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 2 -> 3 waiting... 2025-06-24T21:12:32.471018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:12:32.471059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:12:32.471075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 3 -> 128 2025-06-24T21:12:32.473074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:12:32.473109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:12:32.473131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T21:12:32.473161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-24T21:12:32.477321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:12:32.478388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:12:32.478412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-24T21:12:32.478427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:12:32.479927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-24T21:12:32.480060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:12:32.483108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750799552530, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:12:32.483286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750799552530 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:12:32.483308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T21:12:32.483789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 128 -> 240 2025-06-24T21:12:32.483825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T21:12:32.484001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path ... onsumer cli rebalancing was scheduled 2025-06-24T21:13:17.777855Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037898][rt3.dc1--topic1] consumer cli balancing. Sessions=1, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-06-24T21:13:17.777906Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037898][rt3.dc1--topic1] consumer cli balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "shared/cli_5_1_3842447180432625107_v1" (Sender=[5:7519627012830609499:2608], Pipe=[5:7519627012830609502:2608], Partitions=[], ActiveFamilyCount=0) 2025-06-24T21:13:17.777960Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037898][rt3.dc1--topic1] consumer cli family 1 status Active partitions [0] session "shared/cli_5_1_3842447180432625107_v1" sender [5:7519627012830609499:2608] lock partition 0 for ReadingSession "shared/cli_5_1_3842447180432625107_v1" (Sender=[5:7519627012830609499:2608], Pipe=[5:7519627012830609502:2608], Partitions=[], ActiveFamilyCount=1) generation 1 step 1 2025-06-24T21:13:17.778013Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037898][rt3.dc1--topic1] consumer cli start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-24T21:13:17.778039Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037898][rt3.dc1--topic1] consumer cli balancing duration: 0.000164s 2025-06-24T21:13:17.779636Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:972: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037897 Generation: 1, pipe: [5:7519627012830609504:2611] 2025-06-24T21:13:17.779843Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: shared/cli_5_1_3842447180432625107_v1:1 with generation 1 2025-06-24T21:13:17.783613Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 3 SizeLag: 280 WriteTimestampEstimateMS: 1750799597756 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-24T21:13:17.783663Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 INIT DONE TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 2025-06-24T21:13:17.783720Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 sending to client partition status 2025-06-24T21:13:17.784600Z :INFO: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "/topic1". Partition: 0. Read offset: (NULL) 2025-06-24T21:13:17.785044Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 grpc read done: success# 1, data# { start_partition_session_response { partition_session_id: 1 } } 2025-06-24T21:13:17.785157Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:533: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 got StartRead from client: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, commitOffset# (empty maybe) 2025-06-24T21:13:17.785216Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1012: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 clientCommitOffset (empty maybe) clientReadOffset 0 2025-06-24T21:13:17.785249Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 0 endOffset 3 2025-06-24T21:13:17.785307Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, endOffset# 3, WTime# 0, sizeLag# 280 2025-06-24T21:13:17.785326Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1TEvPartitionReady. Aval parts: 1 2025-06-24T21:13:17.785368Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 performing read request: guid# c9c1a365-ea4e7564-61677f25-12e45807, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 3, size# 336, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-24T21:13:17.785425Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 3 maxSize 336 maxTimeLagMs 0 readTimestampMs 0 readOffset 0 EndOffset 3 ClientCommitOffset 0 committedOffset 0 Guid c9c1a365-ea4e7564-61677f25-12e45807 2025-06-24T21:13:17.786544Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 3 Result { Offset: 0 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 1 WriteTimestampMS: 1750799597649 CreateTimestampMS: 1750799597647 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 2 WriteTimestampMS: 1750799597653 CreateTimestampMS: 1750799597647 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 3 WriteTimestampMS: 1750799597654 CreateTimestampMS: 1750799597647 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 2 SizeLag: 18446744073709551530 RealReadOffset: 2 WaitQuotaTimeMs: 0 EndOffset: 3 StartOffset: 0 } Cookie: 0 } 2025-06-24T21:13:17.786658Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 3 2025-06-24T21:13:17.786683Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid c9c1a365-ea4e7564-61677f25-12e45807 has messages 1 2025-06-24T21:13:17.786747Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 read done: guid# c9c1a365-ea4e7564-61677f25-12e45807, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 490 2025-06-24T21:13:17.786775Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 response to read: guid# c9c1a365-ea4e7564-61677f25-12e45807 2025-06-24T21:13:17.787254Z :DEBUG: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] [] Got ReadResponse, serverBytesSize = 490, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428310 2025-06-24T21:13:17.786946Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 Process answer. Aval parts: 0 2025-06-24T21:13:17.787351Z :DEBUG: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428310 2025-06-24T21:13:17.788072Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (0-2) 2025-06-24T21:13:17.788123Z :DEBUG: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] [] Returning serverBytesSize = 490 to budget 2025-06-24T21:13:17.788158Z :DEBUG: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] [] In ContinueReadingDataImpl, ReadSizeBudget = 490, ReadSizeServerDelta = 52428310 2025-06-24T21:13:17.788390Z :DEBUG: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-24T21:13:17.788536Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-24T21:13:17.788578Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-06-24T21:13:17.788609Z :DEBUG: [] Take Data. Partition 0. Read: {2, 0} (2-2) 2025-06-24T21:13:17.788665Z :DEBUG: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] [] The application data is transferred to the client. Number of messages 3, size 24 bytes 2025-06-24T21:13:17.788712Z :DEBUG: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] [] Returning serverBytesSize = 0 to budget 2025-06-24T21:13:17.788938Z :INFO: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] Closing read session. Close timeout: 0.000000s 2025-06-24T21:13:17.788986Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:2:0 2025-06-24T21:13:17.789029Z :INFO: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] Counters: { Errors: 0 CurrentSessionLifetimeMs: 33 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:13:17.789132Z :NOTICE: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:13:17.789170Z :DEBUG: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] [] Abort session to cluster 2025-06-24T21:13:17.789684Z :NOTICE: [] [] [33a8ecfd-fd374563-50b9dfc6-3147a612] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:13:17.791276Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 grpc read done: success# 1, data# { read_request { bytes_size: 490 } } 2025-06-24T21:13:17.791338Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 grpc closed 2025-06-24T21:13:17.791386Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/cli session shared/cli_5_1_3842447180432625107_v1 is DEAD 2025-06-24T21:13:17.792581Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/cli_5_1_3842447180432625107_v1 2025-06-24T21:13:17.795547Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037898][rt3.dc1--topic1] pipe [5:7519627012830609502:2608] disconnected; active server actors: 1 2025-06-24T21:13:17.795582Z node 6 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037898][rt3.dc1--topic1] pipe [5:7519627012830609502:2608] client cli disconnected session shared/cli_5_1_3842447180432625107_v1 >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_BeforeAutoscaleAwareSDK ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::SchemaChanges [GOOD] Test command err: 2025-06-24T21:12:50.892964Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:12:50.893377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:12:50.893571Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004731/r3tmp/tmp6Jpqx4/pdisk_1.dat 2025-06-24T21:12:51.356935Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:12:51.371430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:12:51.441801Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.451574Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-24T21:12:51.452090Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799567842578 != 1750799567842582 2025-06-24T21:12:51.500370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.500501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.513931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.620060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:51.686851Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:12:51.687139Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:12:51.737679Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:12:51.737829Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:12:51.739639Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:12:51.739734Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:12:51.739789Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:12:51.740136Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:12:51.740270Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:12:51.740343Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:12:51.751106Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:12:51.789975Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:12:51.790193Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:12:51.790322Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:12:51.790376Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:51.790413Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:12:51.790520Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.791034Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:12:51.791255Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:12:51.791367Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.791430Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.791540Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:12:51.791595Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.791724Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:12:51.793824Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:51.794189Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:12:51.794294Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:12:51.796203Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:51.807019Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:12:51.807127Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:12:51.965786Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:12:51.970872Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:12:51.970957Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.974297Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.974383Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:12:51.974470Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:12:51.974728Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:12:51.974917Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:12:51.975349Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:12:51.975419Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:12:51.977159Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:12:51.977938Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:12:51.979635Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:12:51.979693Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.981138Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:12:51.981199Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.981756Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:12:51.981787Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:12:51.981832Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:12:51.981880Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:12:51.981931Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:12:51.981989Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:12:51.988114Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:12:51.990163Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:12:51.990261Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:12:51.991994Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:12:52.026080Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:12:52.026217Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-24T21:12:5 ... ransaction::Execute at 72075186224037888 2025-06-24T21:13:18.484592Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1822: Add schema snapshot: pathId# [OwnerId: 72057594046644480, LocalPathId: 2], version# 2, step# 1500, txId# 281474976715658, at tablet# 72075186224037888 2025-06-24T21:13:18.484961Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:13:18.547890Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-24T21:13:18.548026Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:18.548069Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:18.548125Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:18.548210Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:18.548284Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-24T21:13:18.548389Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:18.550843Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-24T21:13:18.550966Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:18.608446Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:837:2675], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:18.608590Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:847:2680], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:18.608680Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:18.615760Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:18.624353Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:18.800810Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:13:18.804803Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:851:2683], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:13:18.832686Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:907:2720] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:18.891549Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhwkh0edmd2qeqncwaa7hty, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=M2ZmMTRhYzUtZDU4OWQxZmQtYTU1Nzc5N2MtYWI2N2UzZDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:18.894226Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:938:2737], serverId# [4:939:2738], sessionId# [0:0:0] 2025-06-24T21:13:18.894685Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037888 2025-06-24T21:13:18.894927Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750799598894814 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 32b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-24T21:13:18.895168Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-24T21:13:18.906656Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 32 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-24T21:13:18.906760Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:18.961836Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:13:18.965208Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:13:18.965505Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715662 ssId 72057594046644480 seqNo 2:3 2025-06-24T21:13:18.965594Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 3 current version# 2 expected version# 3 at tablet# 72075186224037888 txId# 281474976715662 2025-06-24T21:13:18.965647Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715662 at tablet 72075186224037888 2025-06-24T21:13:18.976883Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:13:19.092629Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715662 at step 2500 at tablet 72075186224037888 { Transactions { TxId: 281474976715662 AckTo { RawX1: 0 RawX2: 0 } } Step: 2500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:13:19.092712Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:19.093030Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:13:19.093081Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:13:19.093124Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2500:281474976715662] in PlanQueue unit at 72075186224037888 2025-06-24T21:13:19.093422Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2500:281474976715662 keys extracted: 0 2025-06-24T21:13:19.093563Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:13:19.093749Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:13:19.093846Z node 4 :TX_DATASHARD INFO: alter_table_unit.cpp:145: Trying to ALTER TABLE at 72075186224037888 version 3 2025-06-24T21:13:19.094659Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1822: Add schema snapshot: pathId# [OwnerId: 72057594046644480, LocalPathId: 2], version# 3, step# 2500, txId# 281474976715662, at tablet# 72075186224037888 2025-06-24T21:13:19.094781Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 0 Step: 2500 TxId: 281474976715662 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcSchemaChange Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 3 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-24T21:13:19.095190Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:13:19.098997Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2500} 2025-06-24T21:13:19.099098Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:19.100415Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:13:19.100524Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 3 } 2025-06-24T21:13:19.100631Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715662] from 72075186224037888 at tablet 72075186224037888 send result to client [4:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:13:19.100709Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715662 state Ready TxInFly 0 2025-06-24T21:13:19.100827Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 3 } 2025-06-24T21:13:19.100883Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:13:19.104144Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-24T21:13:19.104235Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:13:19.109764Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:981:2775], serverId# [4:982:2776], sessionId# [0:0:0] 2025-06-24T21:13:19.129159Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:984:2778], serverId# [4:985:2779], sessionId# [0:0:0] >> KqpKv::ReadRows_ExternalBlobs+UseExtBlobsPrecharge [GOOD] >> KqpKv::ReadRows_ExternalBlobs-UseExtBlobsPrecharge |95.3%| [TA] $(B)/ydb/public/lib/ydb_cli/topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.3%| [TA] {RESULT} $(B)/ydb/public/lib/ydb_cli/topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.3%| [TA] $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} |95.3%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoWithError [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetDefaultFilter [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithOneLoginPlaceholder [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithSearchAttribute [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithFewLoginPlaceholders [GOOD] >> Cache::Test4 [GOOD] >> Cache::Test5 >> IcebergClusterProcessor::ValidateDdlCreationForHiveWithS3 [GOOD] >> IcebergClusterProcessor::ValidateRiseErrors [GOOD] >> IssuesTextFiltering::ShouldRemoveDatabasePath [GOOD] >> SplitterBasic::EqualSplitByMaxBytesLimitPerChunk [GOOD] >> EntityId::Order >> IcebergClusterProcessor::ValidateDdlCreationForHadoopWithS3 [GOOD] >> IcebergClusterProcessor::ValidateConfigurationWithoutWarehouse [GOOD] >> EntityId::Distinct [GOOD] >> EntityId::MinId [GOOD] >> EntityId::MaxId [GOOD] >> Cache::Test1 [GOOD] >> Cache::Test2 [GOOD] >> Cache::Test3 [GOOD] >> EntityId::Order [GOOD] >> EscapingBasics::EncloseSecretShouldWork [GOOD] >> EscapingBasics::EncloseAndEscapeStringShouldWork [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorQuorumTest::TwoRingGroups >> TPopulatorQuorumTest::OneDisconnectedRingGroup >> TPopulatorQuorumTest::OneWriteOnlyRingGroup >> TPopulatorTestWithResets::UpdateAck >> TPopulatorQuorumTest::OneRingGroup >> TPopulatorTest::Boot >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-40 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-41 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> IcebergClusterProcessor::ValidateRiseErrors [GOOD] Test command err: test case: 1 test case: 2 test case: 3 test case: 4 test case: 5 test case: 6 test case: 7 test case: 8 test case: 9 |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> IcebergClusterProcessor::ValidateConfigurationWithoutWarehouse [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> SplitterBasic::EqualSplitByMaxBytesLimitPerChunk [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> Cache::Test3 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithFewLoginPlaceholders [GOOD] Test command err: 2025-06-24T21:12:58.762511Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626932467906208:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:58.770672Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00426b/r3tmp/tmpPURxoV/pdisk_1.dat 2025-06-24T21:12:59.261055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:59.261162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:59.317705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:59.372498Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:59.375326Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626932467906179:2079] 1750799578740800 != 1750799578740803 TServer::EnableGrpc on GrpcPort 22420, node 1 2025-06-24T21:12:59.570006Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:59.570028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:59.570035Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:59.570179Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:59.781310Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:59.794159Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:59.802762Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:59.802795Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:59.803529Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:19042, port: 19042 2025-06-24T21:12:59.804142Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:59.836205Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:59.883878Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:12:59.884547Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:12:59.884617Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:59.929924Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:59.975415Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:59.976603Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****n_7A (2204F07F) () has now valid token of ldapuser@ldap 2025-06-24T21:13:03.767433Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626932467906208:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:03.767532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:03.787181Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****n_7A (2204F07F) 2025-06-24T21:13:03.787320Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:19042, port: 19042 2025-06-24T21:13:03.787470Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:03.811384Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:03.811932Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:19042 return no entries 2025-06-24T21:13:03.812195Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****n_7A (2204F07F) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:19042 return no entries)' 2025-06-24T21:13:07.794692Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****n_7A (2204F07F) 2025-06-24T21:13:10.601411Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626982131324071:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:10.601521Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00426b/r3tmp/tmpqPYUjW/pdisk_1.dat 2025-06-24T21:13:10.786815Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:10.793581Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626982131323998:2079] 1750799590591259 != 1750799590591262 2025-06-24T21:13:10.793940Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:10.794022Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:10.798370Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5984, node 2 2025-06-24T21:13:10.883793Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:10.883814Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:10.883820Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:10.883934Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:11.051262Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:11.054716Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:11.054743Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:11.055420Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:2397, port: 2397 2025-06-24T21:13:11.055508Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:11.060450Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:11.061190Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:2397. Server is busy 2025-06-24T21:13:11.061492Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****rZ8A (4DBD9C67) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:2397. Server is busy)' 2025-06-24T21:13:11.061768Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:11.061807Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:11.062802Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:2397, port: 2397 2025-06-24T21:13:11.062878Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:11.066619Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:11.067198Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:2397. Server is busy 2025-06-24T21:13:11.067365Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****rZ8A (4DBD9C67) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:2397. Server is busy)' 2025-06-24T21:13:11.603816Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:12.604319Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****rZ8A (4DBD9C67) 2025-06-24T21:13:12.604763Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:12.605869Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:12.606859Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:2397, port: 2397 2025-06-24T21:13:12.606939Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:12.615319Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:12.619287Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:2397. Server is busy 2025-06-24T21:13:12.619634Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****rZ8A (4DBD9C67) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:2397. Server is busy)' 2025-06-24T21:13:15.603470Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519626982131324071:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:15.603574Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:15.611224Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****rZ8A (4DBD9C67) 2025-06-24T21:13:15.611636Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:15.611658Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:15.612310Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:2397, port: 2397 2025-06-24T21:13:15.612364Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:15.614850Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:15.659583Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:15.660119Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:15.660181Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:15.710788Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:15.755980Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:15.757099Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****rZ8A (4DBD9C67) () has now valid token of ldapuser@ldap 2025-06-24T21:13:20.624208Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****rZ8A (4DBD9C67) 2025-06-24T21:13:20.624316Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:2397, port: 2397 2025-06-24T21:13:20.624388Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:20.627939Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:20.675454Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:20.675967Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:20.676019Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:20.723424Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:20.771538Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:20.772349Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****rZ8A (4DBD9C67) () has now valid token of ldapuser@ldap |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> EscapingBasics::EncloseAndEscapeStringShouldWork [GOOD] >> EscapingBasics::HideSecretsOverEncloseSecretShouldWork [GOOD] >> EscapingBasics::EscapeStringShouldWork [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> EntityId::MaxId [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsDisableRequestToAD >> SplitterBasic::EqualSplitByMaxRowsLimitPerChunk [GOOD] >> SplitterBasic::LimitExceed [GOOD] >> TPopulatorTestWithResets::UpdateAck [GOOD] >> TPopulatorTest::Boot [GOOD] >> Cache::Test5 [GOOD] >> EntityId::CheckId [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> EscapingBasics::EscapeStringShouldWork [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> SplitterBasic::LimitExceed [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-DomainLoginOnly [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-DomainLoginOnly-StrictAclCheck >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-23 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-24 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::Boot [GOOD] Test command err: 2025-06-24T21:13:22.558364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:13:22.558428Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> EntityId::CheckId [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTestWithResets::UpdateAck [GOOD] Test command err: 2025-06-24T21:13:22.556973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:13:22.557030Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 100 2025-06-24T21:13:22.650796Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 419, preserialized size# 51 2025-06-24T21:13:22.650885Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 3 2025-06-24T21:13:22.652031Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T21:13:22.653639Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirC" PathDescription { Self { Name: "DirC" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: false CreateTxId: 100 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 309, preserialized size# 2 2025-06-24T21:13:22.653682Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 2 2025-06-24T21:13:22.653792Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T21:13:22.653818Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T21:13:22.653874Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 2025-06-24T21:13:22.653913Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 100 FAKE_COORDINATOR: Add transaction: 100 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000001 FAKE_COORDINATOR: Erasing txId 100 2025-06-24T21:13:22.657185Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 429, preserialized size# 56 2025-06-24T21:13:22.657233Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 4 2025-06-24T21:13:22.658573Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:97:2122] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirC" PathDescription { Self { Name: "DirC" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:72:2110], cookie# 100, event size# 314, preserialized size# 2 2025-06-24T21:13:22.658632Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:97:2122] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 3 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-24T21:13:22.686368Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:98:2123] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 2 }: sender# [1:12:2059] 2025-06-24T21:13:22.686431Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:98:2123] Successful handshake: replica# [1:12:2059] 2025-06-24T21:13:22.686477Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:260: [1:98:2123] Resume sync: replica# [1:12:2059], fromPathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:13:22.686580Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:99:2124] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 2 }: sender# [1:15:2062] 2025-06-24T21:13:22.686606Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:99:2124] Successful handshake: replica# [1:15:2062] 2025-06-24T21:13:22.686641Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:260: [1:99:2124] Resume sync: replica# [1:15:2062], fromPathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:13:22.686704Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:100:2125] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 2 }: sender# [1:18:2065] 2025-06-24T21:13:22.686721Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:100:2125] Successful handshake: replica# [1:18:2065] 2025-06-24T21:13:22.686734Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:260: [1:100:2125] Resume sync: replica# [1:18:2065], fromPathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:13:22.686806Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:97:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Replica: [1:24339059:0] }: sender# [1:98:2123] 2025-06-24T21:13:22.686890Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/DirC PathId: [OwnerId: 72057594046678944, LocalPathId: 2] PathVersion: 3 } }: sender# [1:97:2122] 20 ... BUG: populator.cpp:536: [1:97:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Replica: [1:1099535966835:0] }: sender# [1:99:2124] 2025-06-24T21:13:22.687558Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:97:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 1] }: sender# [1:99:2124] 2025-06-24T21:13:22.687598Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:12:2059], cookie# 0 2025-06-24T21:13:22.687675Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:99:2124] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/DirC PathId: [OwnerId: 72057594046678944, LocalPathId: 2] PathVersion: 3 } }: sender# [1:97:2122] 2025-06-24T21:13:22.687754Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:97:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] }: sender# [1:99:2124] 2025-06-24T21:13:22.687789Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 0 2025-06-24T21:13:22.687831Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 0 2025-06-24T21:13:22.687915Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:97:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Replica: [1:2199047594611:0] }: sender# [1:100:2125] 2025-06-24T21:13:22.687971Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:97:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 1] }: sender# [1:100:2125] 2025-06-24T21:13:22.688031Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:15:2062], cookie# 0 2025-06-24T21:13:22.688097Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:100:2125] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/DirC PathId: [OwnerId: 72057594046678944, LocalPathId: 2] PathVersion: 3 } }: sender# [1:97:2122] 2025-06-24T21:13:22.688174Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:97:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] }: sender# [1:100:2125] 2025-06-24T21:13:22.688207Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:15:2062], cookie# 0 2025-06-24T21:13:22.688246Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 0 2025-06-24T21:13:22.688310Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:97:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Replica: [1:24339059:0] }: sender# [1:98:2123] 2025-06-24T21:13:22.688355Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:97:2122] 2025-06-24T21:13:22.688388Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:97:2122], cookie# 0 2025-06-24T21:13:22.688428Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:18:2065], cookie# 0 2025-06-24T21:13:22.688485Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:98:2123], cookie# 0 2025-06-24T21:13:22.688534Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:98:2123], cookie# 0 2025-06-24T21:13:22.688591Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:98:2123] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 2 }: sender# [1:12:2059] 2025-06-24T21:13:22.688637Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:98:2123], cookie# 100 2025-06-24T21:13:22.688676Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:18:2065], cookie# 0 2025-06-24T21:13:22.688720Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:98:2123], cookie# 0 2025-06-24T21:13:22.688742Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:98:2123], cookie# 0 2025-06-24T21:13:22.688769Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:98:2123], cookie# 100 2025-06-24T21:13:22.688812Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:97:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Replica: [1:1099535966835:0] }: sender# [1:99:2124] 2025-06-24T21:13:22.688857Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:99:2124] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:97:2122] 2025-06-24T21:13:22.688903Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:99:2124], cookie# 0 2025-06-24T21:13:22.688944Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:99:2124], cookie# 0 2025-06-24T21:13:22.688981Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:99:2124] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 2 }: sender# [1:15:2062] 2025-06-24T21:13:22.689020Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:99:2124], cookie# 100 2025-06-24T21:13:22.689070Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 3 2025-06-24T21:13:22.689117Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 4 2025-06-24T21:13:22.689676Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:97:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Replica: [1:2199047594611:0] }: sender# [1:100:2125] 2025-06-24T21:13:22.689860Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:99:2124], cookie# 0 2025-06-24T21:13:22.689895Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:99:2124], cookie# 0 2025-06-24T21:13:22.689934Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:100:2125] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:97:2122] 2025-06-24T21:13:22.690052Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:99:2124], cookie# 100 2025-06-24T21:13:22.690094Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 2 2025-06-24T21:13:22.690127Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:97:2122] Ack update: ack to# [1:72:2110], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 3 2025-06-24T21:13:22.690161Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:100:2125] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 2 }: sender# [1:18:2065] 2025-06-24T21:13:22.690360Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:100:2125], cookie# 0 2025-06-24T21:13:22.690394Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 0 2025-06-24T21:13:22.690651Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:100:2125], cookie# 100 2025-06-24T21:13:22.690684Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 100 2025-06-24T21:13:22.690836Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:100:2125], cookie# 0 2025-06-24T21:13:22.690858Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 0 2025-06-24T21:13:22.691010Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:100:2125], cookie# 100 2025-06-24T21:13:22.691037Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:97:2122] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 100 TestWaitNotification: OK eventTxId 100 >> EscapingBasics::HideSecretsShouldWork [GOOD] >> IcebergClusterProcessor::ValidateConfigurationWithoutCatalog [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-53 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-54 |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> IcebergClusterProcessor::ValidateConfigurationWithoutCatalog [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-17 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-18 |95.3%| [TA] $(B)/ydb/core/fq/libs/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.3%| [TA] {RESULT} $(B)/ydb/core/fq/libs/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> THDRRQuoterResourceTreeRuntimeTest::TestWeights [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestWeightsChange [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestVerySmallSpeed [GOOD] >> TKesusTest::TestAcquireBeforeTimeoutViaRelease >> TKesusTest::TestAttachNewSessions >> TKesusTest::TestAcquireUpgrade >> TKesusTest::TestAcquireSemaphoreTimeout >> TKesusTest::TestQuoterResourceDescribe >> TKesusTest::TestAcquireWaiterDowngrade >> THDRRQuoterResourceTreeRuntimeTest::TestCreateInactiveSession [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestDeleteResourceSessions [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestDistributeResourcesBetweenConsumers [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestEffectiveProps [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestDeleteResourceWithActiveChildren [GOOD] >> TKesusTest::TestAcquireLocks >> TKesusTest::TestSessionTimeoutAfterDetach >> TKesusTest::TestRegisterProxy >> THDRRQuoterResourceTreeRuntimeTest::TestUpdateResourceSessions [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestStopConsuming [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestUpdateConsumptionState [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestUpdateConsumptionStateAfterAllResourceAllocated [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestVeryBigWeights [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestAllocateResource [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestAllocationGranularity [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestAmountIsLessThanEpsilon [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestActiveSessionDisconnectsAndThenConnectsAgain [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestActiveMultiresourceSessionDisconnectsAndThenConnectsAgain [GOOD] >> TKesusTest::TestUnregisterProxy >> LdapAuthProviderTest_StartTls::LdapFetchGroupsDisableRequestToAD [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithCustomGroupAttributeGood |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> THDRRQuoterResourceTreeRuntimeTest::TestDeleteResourceWithActiveChildren [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> THDRRQuoterResourceTreeRuntimeTest::TestVeryBigWeights [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ControlPlane_CreateAlterDescribe >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-65 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-66 |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> THDRRQuoterResourceTreeRuntimeTest::TestActiveMultiresourceSessionDisconnectsAndThenConnectsAgain [GOOD] >> TKesusTest::TestAcquireBeforeTimeoutViaRelease [GOOD] >> TKesusTest::TestAcquireBeforeTimeoutViaModeChange >> TKesusTest::TestAcquireUpgrade [GOOD] >> TKesusTest::TestAcquireTimeout >> TKesusTest::TestAttachNewSessions [GOOD] >> TKesusTest::TestAttachMissingSession >> TKesusTest::TestUnregisterProxy [GOOD] >> TKesusTest::TestUnregisterProxyBadGeneration >> TKesusTest::TestAcquireWaiterDowngrade [GOOD] >> TKesusTest::TestAcquireWaiterUpgrade >> TKesusTest::TestRegisterProxy [GOOD] >> TKesusTest::TestRegisterProxyBadGeneration >> TKesusTest::TestQuoterResourceDescribe [GOOD] >> TKesusTest::TestQuoterResourceCreation >> TKesusTest::TestSessionDetach >> TKesusTest::TestUnregisterProxyBadGeneration [GOOD] >> TKesusTest::TestSessionTimeoutAfterUnregister >> TKesusTest::TestAcquireWaiterUpgrade [GOOD] >> TKesusTest::TestAcquireWaiterChangeTimeoutToZero >> TKesusTest::TestRegisterProxyBadGeneration [GOOD] >> TKesusTest::TestRegisterProxyFromDeadActor >> TKesusTest::TestAttachMissingSession [GOOD] >> TKesusTest::TestAttachOldGeneration >> TopicAutoscaling::PartitionSplit_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_AutoscaleAwareSDK >> TKesusTest::TestAcquireBeforeTimeoutViaModeChange [GOOD] >> TKesusTest::TestSessionDetach [GOOD] >> TKesusTest::TestSessionDetachFutureId >> TKesusTest::TestQuoterResourceCreation [GOOD] >> TKesusTest::TestQuoterResourceModification >> TKesusTest::TestRegisterProxyFromDeadActor [GOOD] >> TKesusTest::TestRegisterProxyLinkFailure >> TKesusTest::TestAttachOldGeneration [GOOD] >> TKesusTest::TestAttachFastPath >> TKesusTest::TestAcquireWaiterChangeTimeoutToZero [GOOD] >> TKesusTest::TestAcquireWaiterRelease >> TKesusTest::TestSessionDetachFutureId [GOOD] >> TKesusTest::TestSessionDestroy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAcquireBeforeTimeoutViaModeChange [GOOD] Test command err: 2025-06-24T21:13:26.420111Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.420257Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.444152Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.444385Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.468488Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.469298Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=9765512922808088783, session=0, seqNo=0) 2025-06-24T21:13:26.469441Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:26.481487Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=9765512922808088783, session=1) 2025-06-24T21:13:26.481837Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=17483448740815866913, session=0, seqNo=0) 2025-06-24T21:13:26.481958Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:26.499904Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=17483448740815866913, session=2) 2025-06-24T21:13:26.501191Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:26.501368Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:26.501484Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:26.513401Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=111) 2025-06-24T21:13:26.513722Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=112, session=1, semaphore="Lock2" count=1) 2025-06-24T21:13:26.513842Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 2 "Lock2" 2025-06-24T21:13:26.513919Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 1 2025-06-24T21:13:26.527239Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=112) 2025-06-24T21:13:26.527768Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[1:135:2159], cookie=333, name="Lock1") 2025-06-24T21:13:26.527871Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Lock1" owner link 2025-06-24T21:13:26.528121Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-24T21:13:26.528227Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 3 "Lock1" 2025-06-24T21:13:26.528309Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 3 "Lock1" queue: next order #3 session 2 2025-06-24T21:13:26.528431Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=223, session=2, semaphore="Lock2" count=18446744073709551615) 2025-06-24T21:13:26.540980Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[1:135:2159], cookie=333) 2025-06-24T21:13:26.541086Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=222) 2025-06-24T21:13:26.541128Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=223) 2025-06-24T21:13:26.541429Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[1:135:2159], cookie=334, name="Lock2") 2025-06-24T21:13:26.541508Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 2 "Lock2" owner link 2025-06-24T21:13:26.541569Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-24T21:13:26.553556Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[1:135:2159], cookie=334) 2025-06-24T21:13:26.554065Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:164:2186], cookie=5647228328330460472, name="Lock1") 2025-06-24T21:13:26.554167Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:164:2186], cookie=5647228328330460472) 2025-06-24T21:13:26.554598Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:167:2189], cookie=1412837637908076180, name="Lock2") 2025-06-24T21:13:26.554686Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:167:2189], cookie=1412837637908076180) 2025-06-24T21:13:26.572342Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.572478Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.573040Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.573420Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.591637Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.591802Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-24T21:13:26.591871Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 3 "Lock1" queue: next order #3 session 2 2025-06-24T21:13:26.592257Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:207:2219], cookie=7542541823231561468, name="Lock1") 2025-06-24T21:13:26.592341Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:207:2219], cookie=7542541823231561468) 2025-06-24T21:13:26.592960Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:215:2226], cookie=8952451691514275620, name="Lock2") 2025-06-24T21:13:26.593044Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:215:2226], cookie=8952451691514275620) 2025-06-24T21:13:26.992086Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.992164Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.005689Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.005817Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.031561Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.032435Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:135:2159], cookie=16488858841465921664, session=0, seqNo=0) 2025-06-24T21:13:27.032590Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:27.044809Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:135:2159], cookie=16488858841465921664, session=1) 2025-06-24T21:13:27.045131Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=15498952366496461684, session=0, seqNo=0) 2025-06-24T21:13:27.045280Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:27.057552Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=15498952366496461684, session=2) 2025-06-24T21:13:27.058706Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:27.058864Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:27.058973Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:27.071514Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=111) 2025-06-24T21:13:27.071895Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=112, session=1, semaphore="Lock2" count=1) 2025-06-24T21:13:27.072063Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 2 "Lock2" 2025-06-24T21:13:27.072163Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 1 2025-06-24T21:13:27.085051Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=112) 2025-06-24T21:13:27.085466Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=333, session=1, semaphore="Lock1" count=1) 2025-06-24T21:13:27.085714Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-24T21:13:27.085808Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #3 session 2 2025-06-24T21:13:27.085923Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=223, session=2, semaphore="Lock2" count=18446744073709551615) 2025-06-24T21:13:27.098157Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=333) 2025-06-24T21:13:27.098242Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=222) 2025-06-24T21:13:27.098266Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=223) 2025-06-24T21:13:27.098837Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:162:2184], cookie=2236155041941571478, name="Lock1") 2025-06-24T21:13:27.098925Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:162:2184], cookie=2236155041941571478) 2025-06-24T21:13:27.099339Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:165:2187], cookie=12698623958238555505, name="Lock2") 2025-06-24T21:13:27.099390Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:165:2187], cookie=12698623958238555505) 2025-06-24T21:13:27.099803Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:168:2190], cookie=657098803106713107, name="Lock1") 2025-06-24T21:13:27.099885Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:168:2190], cookie=657098803106713107) 2025-06-24T21:13:27.100360Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:171:2193], cookie=9304510535948796355, name="Lock2") 2025-06-24T21:13:27.100423Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:171:2193], cookie=9304510535948796355) 2025-06-24T21:13:27.100624Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=444, session=2, semaphore="Lock2" count=1) 2025-06-24T21:13:27.100736Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-24T21:13:27.112991Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=444) 2025-06-24T21:13:27.113729Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:176:2198], cookie=11293269828795931209, name="Lock2") 2025-06-24T21:13:27.113833Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:176:2198], cookie=11293269828795931209) 2025-06-24T21:13:27.114416Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:179:2201], cookie=10320235718406676472, name="Lock2") 2025-06-24T21:13:27.114508Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:179:2201], cookie=10320235718406676472) 2025-06-24T21:13:27.130543Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:27.130682Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.131211Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.131611Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.169580Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.169733Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:27.169782Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #3 session 2 2025-06-24T21:13:27.169811Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 1 2025-06-24T21:13:27.169840Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-24T21:13:27.170157Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:219:2231], cookie=17359226111884650643, name="Lock1") 2025-06-24T21:13:27.170246Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:219:2231], cookie=17359226111884650643) 2025-06-24T21:13:27.170905Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:227:2238], cookie=17897298109595271997, name="Lock2") 2025-06-24T21:13:27.170975Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:227:2238], cookie=17897298109595271997) >> TKesusTest::TestAttachFastPath [GOOD] >> TKesusTest::TestAttachFastPathBlocked >> TKesusTest::TestKesusConfig >> THDRRQuoterResourceTreeRuntimeTest::TestHierarchicalQuotas [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestHangDefence [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestMoreStrongChildLimit [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestInactiveSessionDisconnectsAndThenConnectsAgain [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestInactiveMultiresourceSessionDisconnectsAndThenConnectsAgain [GOOD] >> TKesusTest::TestRegisterProxyLinkFailure [GOOD] >> TKesusTest::TestRegisterProxyLinkFailureRace >> TKesusTest::TestQuoterHDRRParametersValidation >> TKesusTest::TestAcquireWaiterRelease [GOOD] >> TKesusTest::TestAllocatesResources >> TKesusTest::TestSessionDestroy [GOOD] >> TKesusTest::TestSessionStealing |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> THDRRQuoterResourceTreeRuntimeTest::TestInactiveMultiresourceSessionDisconnectsAndThenConnectsAgain [GOOD] >> TKesusTest::TestAttachFastPathBlocked [GOOD] >> TKesusTest::TestQuoterResourceModification [GOOD] >> TKesusTest::TestQuoterResourceDeletion >> TKesusTest::TestQuoterHDRRParametersValidation [GOOD] >> TKesusTest::TestQuoterAccountResourcesOnDemand >> TKesusTest::TestSessionStealing [GOOD] >> TKesusTest::TestSessionStealingAnyKey >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-41 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-42 >> TKesusTest::TestAllocatesResources [GOOD] >> TKesusTest::TestKesusConfig [GOOD] >> TKesusTest::TestLockNotFound >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-24 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-49 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAttachFastPathBlocked [GOOD] Test command err: 2025-06-24T21:13:26.420097Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.420263Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.442081Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.442329Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.467266Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.467832Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=9501698499071777514, session=0, seqNo=0) 2025-06-24T21:13:26.468578Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:26.481490Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=9501698499071777514, session=1) 2025-06-24T21:13:26.481815Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=8166841119597574236, session=0, seqNo=0) 2025-06-24T21:13:26.481975Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:26.494478Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=8166841119597574236, session=2) 2025-06-24T21:13:26.987166Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.987277Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.007355Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.007506Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.032112Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.032580Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:135:2159], cookie=13432922987541244857, session=1, seqNo=0) 2025-06-24T21:13:27.044803Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:135:2159], cookie=13432922987541244857, session=1) 2025-06-24T21:13:27.403424Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:27.403518Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.423067Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.423370Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.451499Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.452266Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:135:2159], cookie=15925299840038165822, session=0, seqNo=0) 2025-06-24T21:13:27.452402Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:27.469026Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:135:2159], cookie=15925299840038165822, session=1) 2025-06-24T21:13:27.857240Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:27.857346Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.875139Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.875626Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.899783Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.900159Z node 4 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[4:135:2159], cookie=12702672760398539481, path="") 2025-06-24T21:13:27.912622Z node 4 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[4:135:2159], cookie=12702672760398539481, status=SUCCESS) 2025-06-24T21:13:27.913650Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:144:2166], cookie=17069110155085038866, session=0, seqNo=0) 2025-06-24T21:13:27.913808Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:27.926790Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:144:2166], cookie=17069110155085038866, session=1) 2025-06-24T21:13:27.927768Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:145:2167], cookie=111, session=0, seqNo=0) 2025-06-24T21:13:27.927905Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:27.928085Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:262: [72057594037927937] Fast-path attach session=1 to sender=[4:145:2167], cookie=222, seqNo=0 2025-06-24T21:13:27.940377Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:145:2167], cookie=111, session=2) 2025-06-24T21:13:28.289039Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:28.289132Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:28.304758Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:28.304860Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:28.323031Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:28.323430Z node 5 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[5:133:2157], cookie=3421192920076878916, path="") 2025-06-24T21:13:28.347277Z node 5 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[5:133:2157], cookie=3421192920076878916, status=SUCCESS) 2025-06-24T21:13:28.348380Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:142:2164], cookie=16634541368694825751, session=0, seqNo=0) 2025-06-24T21:13:28.348518Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:28.361139Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:142:2164], cookie=16634541368694825751, session=1) 2025-06-24T21:13:28.361973Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:142:2164], cookie=123, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:28.362163Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:28.362277Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:28.362685Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:143:2165], cookie=111, session=0, seqNo=0) 2025-06-24T21:13:28.362780Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:28.362910Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:143:2165], cookie=222, session=1, seqNo=0) 2025-06-24T21:13:28.375265Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:142:2164], cookie=123) 2025-06-24T21:13:28.375363Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:143:2165], cookie=111, session=2) 2025-06-24T21:13:28.375422Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:143:2165], cookie=222, session=1) >> TKesusTest::TestSessionStealingAnyKey [GOOD] >> CommitOffset::Commit_WithWrongSession_ToParent [GOOD] >> CommitOffset::Commit_WithoutSession_ParentNotFinished >> TKesusTest::TestLockNotFound [GOOD] >> TKesusTest::TestDeleteSemaphore >> TKesusTest::TestQuoterResourceDeletion [GOOD] >> TKesusTest::TestQuoterSubscribeOnResource ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAllocatesResources [GOOD] Test command err: 2025-06-24T21:13:26.420165Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.420298Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.449916Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.450131Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.475606Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.476061Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=6910752766213561759, session=0, seqNo=0) 2025-06-24T21:13:26.476206Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:26.488034Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=6910752766213561759, session=1) 2025-06-24T21:13:26.488303Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=14373227736592271220, session=0, seqNo=0) 2025-06-24T21:13:26.488442Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:26.500758Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=14373227736592271220, session=2) 2025-06-24T21:13:26.501076Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=111, session=1, semaphore="Lock1" count=1) 2025-06-24T21:13:26.501213Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:26.501334Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:26.513356Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=111) 2025-06-24T21:13:26.513698Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=222, session=2, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:26.513993Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=333, session=2, semaphore="Lock1" count=1) 2025-06-24T21:13:26.514085Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #2 session 2 2025-06-24T21:13:26.526121Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=222) 2025-06-24T21:13:26.526227Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=333) 2025-06-24T21:13:26.526778Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:151:2173], cookie=1496953144323261721, name="Lock1") 2025-06-24T21:13:26.526907Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:151:2173], cookie=1496953144323261721) 2025-06-24T21:13:26.955845Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.955971Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.973968Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.974124Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.003225Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.003813Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:135:2159], cookie=11760697092895695207, session=0, seqNo=0) 2025-06-24T21:13:27.003992Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:27.017208Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:135:2159], cookie=11760697092895695207, session=1) 2025-06-24T21:13:27.017540Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:135:2159], cookie=2275468086053171003, session=0, seqNo=0) 2025-06-24T21:13:27.017672Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:27.031108Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:135:2159], cookie=2275468086053171003, session=2) 2025-06-24T21:13:27.031551Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:27.031734Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:27.031842Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:27.044122Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=111) 2025-06-24T21:13:27.044538Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-24T21:13:27.044901Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=333, session=2, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:27.059846Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=222) 2025-06-24T21:13:27.059946Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=333) 2025-06-24T21:13:27.060538Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:151:2173], cookie=1197473326009325380, name="Lock1") 2025-06-24T21:13:27.060657Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:151:2173], cookie=1197473326009325380) 2025-06-24T21:13:27.061089Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:154:2176], cookie=5006372987795952105, name="Lock1") 2025-06-24T21:13:27.061162Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:154:2176], cookie=5006372987795952105) 2025-06-24T21:13:27.390976Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:27.391102Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.412338Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.412684Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.437575Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.438119Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:135:2159], cookie=15352658648612250678, session=0, seqNo=0) 2025-06-24T21:13:27.438300Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:27.450758Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:135:2159], cookie=15352658648612250678, session=1) 2025-06-24T21:13:27.451092Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:135:2159], cookie=16106424238613261697, session=0, seqNo=0) 2025-06-24T21:13:27.451253Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:27.463520Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:135:2159], cookie=16106424238613261697, session=2) 2025-06-24T21:13:27.464268Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:135:2159], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:27.464466Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:27.464585Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:27.476805Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:135:2159], cookie=111) 2025-06-24T21:13:27.477144Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:135:2159], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-24T21:13:27.477455Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:135:2159], cookie=333, session=2, semaphore="Lock1" count=1) 2025-06-24T21:13:27.477528Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Lock1" waiter link 2025-06-24T21:13:27.489580Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:135:2159], cookie=222) 2025-06-24T21:13:27.489667Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:135:2159], cookie=333) 2025-06-24T21:13:27.490278Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:154:2176], cookie=2563586906924877928, name="Lock1") 2025-06-24T21:13:27.490374Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:154:2176], cookie=2563586906924877928) 2025-06-24T21:13:27.490855Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:157:2179], cookie=8568310441689463, name="Lock1") 2025-06-24T21:13:27.490928Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:157:2179], cookie=8568310441689463) 2025-06-24T21:13:27.506280Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:27.506411Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.506934Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.507286Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.546711Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.546904Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:27.547390Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:197:2209], cookie=5540271370446120630, name="Lock1") 2025-06-24T21:13:27.547509Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:197:2209], cookie=5540271370446120630) 2025-06-24T21:13:27.548160Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:205:2216], cookie=2035473920941214089, name="Lock1") 2025-06-24T21:13:27.548243Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:205:2216], cookie=2035473920941214089) 2025-06-24T21:13:28.051208Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:28.051318Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:28.073149Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:28.073672Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:28.097059Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:28.097519Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:135:2159], cookie=17612672901915046431, session=0, seqNo=0) 2025-06-24T21:13:28.097662Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:28.111344Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:135:2159], cookie=17612672901915046431, session=1) 2025-06-24T21:13:28.111649Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:135:2159], cookie=10176657207640254237, session=0, seqNo=0) 2025-06-24T21:13:28.111757Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:28.124012Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:135:2159], cookie=10176657207640254237, session=2) 2025-06-24T21:13:28.124326Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:135:2159], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:28.124472Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:28.124560Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:28.137104Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:135:2159], cookie=111) 2025-06-24T21:13:28.137499Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:135:2159], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-24T21:13:28.137905Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[4:135:2159], cookie=333, name="Lock1") 2025-06-24T21:13:28.138109Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Lock1" waiter link 2025-06-24T21:13:28.152263Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:135:2159], cookie=222) 2025-06-24T21:13:28.152369Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[4:135:2159], cookie=333) 2025-06-24T21:13:28.511140Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:28.511281Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:28.529072Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:28.529195Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:28.543743Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:28.549775Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:133:2157], cookie=3600196069286387197, path="/Root", config={ MaxUnitsPerSecond: 100 }) 2025-06-24T21:13:28.550045Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:28.572855Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:133:2157], cookie=3600196069286387197) 2025-06-24T21:13:28.573479Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:142:2164], cookie=15951724141565966939, path="/Root/Res", config={ }) 2025-06-24T21:13:28.573721Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-06-24T21:13:28.588608Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:142:2164], cookie=15951724141565966939) 2025-06-24T21:13:28.594206Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:147:2169]. Cookie: 11702358120385464264. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:28.594315Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:147:2169], cookie=11702358120385464264) 2025-06-24T21:13:28.595724Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:193: [72057594037927937] Send TEvUpdateConsumptionStateAck to [5:147:2169]. Cookie: 4603124433403858887. Data: { } 2025-06-24T21:13:28.595809Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:198: [72057594037927937] Update quoter resources consumption state (sender=[5:147:2169], cookie=4603124433403858887) 2025-06-24T21:13:28.638149Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:147:2169]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:28.691400Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:147:2169]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:28.724167Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:147:2169]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:28.766264Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:147:2169]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:28.808097Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:147:2169]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad >> TKesusTest::TestRegisterProxyLinkFailureRace [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] >> TKesusTest::TestReleaseLockFailure >> TKesusTest::TestQuoterSubscribeOnResource [GOOD] >> TopicAutoscaling::ControlPlane_CDC [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Disable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestSessionStealingAnyKey [GOOD] Test command err: 2025-06-24T21:13:27.262333Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:27.262451Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.279577Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.279817Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.304784Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.305349Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=2043931895996796896, session=0, seqNo=0) 2025-06-24T21:13:27.305535Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:27.320462Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=2043931895996796896, session=1) 2025-06-24T21:13:27.322349Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:28: [72057594037927937] TTxSessionDetach::Execute (sender=[1:135:2159], cookie=11876323524836084948, session=2) 2025-06-24T21:13:27.322447Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:59: [72057594037927937] TTxSessionDetach::Complete (sender=[1:135:2159], cookie=11876323524836084948) 2025-06-24T21:13:27.323210Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:100: [72057594037927937] Fast-path detach session=1 from sender=[1:135:2159], cookie=14929354624320495648 2025-06-24T21:13:27.324089Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=6013626531866480806, session=1, seqNo=0) 2025-06-24T21:13:27.336254Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=6013626531866480806, session=1) 2025-06-24T21:13:27.336644Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:27.336818Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:27.336923Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:27.337104Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:28: [72057594037927937] TTxSessionDetach::Execute (sender=[1:135:2159], cookie=8352281702510260293, session=1) 2025-06-24T21:13:27.347437Z node 1 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-24T21:13:27.347527Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-24T21:13:27.347572Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Lock1" owner link 2025-06-24T21:13:27.359708Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=111) 2025-06-24T21:13:27.359808Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:59: [72057594037927937] TTxSessionDetach::Complete (sender=[1:135:2159], cookie=8352281702510260293) 2025-06-24T21:13:27.359853Z node 1 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-24T21:13:27.681323Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:27.681437Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.695305Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.695503Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.721492Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.721934Z node 2 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[2:135:2159], cookie=11790358891698510560, path="") 2025-06-24T21:13:27.753870Z node 2 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[2:135:2159], cookie=11790358891698510560, status=SUCCESS) 2025-06-24T21:13:27.754552Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:144:2166], cookie=111, session=0, seqNo=0) 2025-06-24T21:13:27.754665Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:27.754803Z node 2 :KESUS_TABLET DEBUG: tx_session_detach.cpp:28: [72057594037927937] TTxSessionDetach::Execute (sender=[2:144:2166], cookie=12278192298338012720, session=1) 2025-06-24T21:13:27.765173Z node 2 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-24T21:13:27.765249Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-24T21:13:27.777253Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:144:2166], cookie=111, session=1) 2025-06-24T21:13:27.777337Z node 2 :KESUS_TABLET DEBUG: tx_session_detach.cpp:59: [72057594037927937] TTxSessionDetach::Complete (sender=[2:144:2166], cookie=12278192298338012720) 2025-06-24T21:13:27.777386Z node 2 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-24T21:13:28.113898Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:28.114014Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:28.135499Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:28.135858Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:28.161136Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:28.161700Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:135:2159], cookie=168446113847413486, session=0, seqNo=0) 2025-06-24T21:13:28.161928Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:28.174120Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:135:2159], cookie=168446113847413486, session=1) 2025-06-24T21:13:28.174845Z node 3 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:37: [72057594037927937] TTxSessionDestroy::Execute (sender=[3:135:2159], cookie=17017507534170282929, session=1) 2025-06-24T21:13:28.174934Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-24T21:13:28.187729Z node 3 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:75: [72057594037927937] TTxSessionDestroy::Complete (sender=[3:135:2159], cookie=17017507534170282929) 2025-06-24T21:13:28.188619Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[3:152:2174], cookie=8453486735029253088) 2025-06-24T21:13:28.188703Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[3:152:2174], cookie=8453486735029253088) 2025-06-24T21:13:28.189231Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:155:2177], cookie=16594402674346555730, session=0, seqNo=0) 2025-06-24T21:13:28.189377Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:28.201962Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:155:2177], cookie=16594402674346555730, session=2) 2025-06-24T21:13:28.203097Z node 3 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:37: [72057594037927937] TTxSessionDestroy::Execute (sender=[3:135:2159], cookie=1172168724681970182, session=2) 2025-06-24T21:13:28.203206Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 2 2025-06-24T21:13:28.215496Z node 3 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:75: [72057594037927937] TTxSessionDestroy::Complete (sender=[3:135:2159], cookie=1172168724681970182) 2025-06-24T21:13:28.572857Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:28.572988Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:28.594417Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:28.595073Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:28.619872Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:28.620828Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:135:2159], cookie=12345, session=0, seqNo=0) 2025-06-24T21:13:28.621008Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:28.633497Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:135:2159], cookie=12345, session=1) 2025-06-24T21:13:28.634461Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:142:2164], cookie=23456, session=1, seqNo=0) 2025-06-24T21:13:28.648357Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:142:2164], cookie=23456, session=1) 2025-06-24T21:13:29.020321Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:29.020448Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:29.041053Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:29.041182Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:29.057233Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:29.058264Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=12345, session=0, seqNo=0) 2025-06-24T21:13:29.058451Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:29.085997Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=12345, session=1) 2025-06-24T21:13:29.087084Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:139:2162], cookie=23456, session=1, seqNo=0) 2025-06-24T21:13:29.105432Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:139:2162], cookie=23456, session=1) >> TKesusTest::TestDeleteSemaphore [GOOD] >> TKesusTest::TestDescribeSemaphoreWatches ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestRegisterProxyLinkFailureRace [GOOD] Test command err: 2025-06-24T21:13:26.420122Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.420248Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.440004Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.440282Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.467233Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.960812Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.960884Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.977572Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.977828Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.002452Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.369836Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:27.369941Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.386128Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.386406Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.410936Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.818017Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:27.818130Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.836885Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.837380Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.862666Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.864254Z node 4 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037927937] NodeDisconnected NodeId# 5 2025-06-24T21:13:27.864752Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:318: Got TEvServerDisconnected([4:192:2160]) 2025-06-24T21:13:28.489732Z node 6 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:28.489877Z node 6 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:28.512711Z node 6 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:28.512879Z node 6 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute ... waiting for register request 2025-06-24T21:13:28.540948Z node 6 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete ... blocking NKikimr::NKesus::TEvKesus::TEvRegisterProxy from TEST_ACTOR_RUNTIME to KESUS_TABLET_ACTOR cookie 5501341004942782130 ... waiting for register request (done) ... unblocking NKikimr::NKesus::TEvKesus::TEvRegisterProxy from TEST_ACTOR_RUNTIME to KESUS_TABLET_ACTOR 2025-06-24T21:13:28.541694Z node 6 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037927937] NodeDisconnected NodeId# 7 2025-06-24T21:13:28.542055Z node 6 :KESUS_TABLET TRACE: quoter_runtime.cpp:318: Got TEvServerDisconnected([6:192:2160]) >> TKesusTest::TestAttachOutOfSequence >> TKesusTest::TestReleaseLockFailure [GOOD] >> TKesusTest::TestReleaseSemaphore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestQuoterSubscribeOnResource [GOOD] Test command err: 2025-06-24T21:13:26.420141Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.420302Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.442524Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.442797Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.470899Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.478851Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:135:2159], cookie=362384916887191100, path="/Root", config={ MaxUnitsPerSecond: 100500 MaxBurstSizeCoefficient: 1.5 }) 2025-06-24T21:13:26.479209Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:26.491688Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:135:2159], cookie=362384916887191100) 2025-06-24T21:13:26.492409Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:144:2166], cookie=9379041676677201725, path="/Root/Folder", config={ MaxUnitsPerSecond: 100500 MaxBurstSizeCoefficient: 1.5 }) 2025-06-24T21:13:26.492665Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Folder" 2025-06-24T21:13:26.505192Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:144:2166], cookie=9379041676677201725) 2025-06-24T21:13:26.505800Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:149:2171], cookie=8382738702114319190, path="/Root/Q1", config={ MaxUnitsPerSecond: 10 }) 2025-06-24T21:13:26.506019Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 3 "Root/Q1" 2025-06-24T21:13:26.518195Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:149:2171], cookie=8382738702114319190) 2025-06-24T21:13:26.518837Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:154:2176], cookie=8225170084606470219, path="/Root/Folder/Q1", config={ MaxUnitsPerSecond: 10 }) 2025-06-24T21:13:26.519046Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 4 "Root/Folder/Q1" 2025-06-24T21:13:26.530922Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:154:2176], cookie=8225170084606470219) 2025-06-24T21:13:26.531507Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:159:2181], cookie=16550214586970426714, path="/Root/Folder/Q2", config={ MaxUnitsPerSecond: 10 }) 2025-06-24T21:13:26.531681Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 5 "Root/Folder/Q2" 2025-06-24T21:13:26.543862Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:159:2181], cookie=16550214586970426714) 2025-06-24T21:13:26.544474Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:164:2186], cookie=2389283808433390641, path="/Root/Folder/Q3", config={ MaxUnitsPerSecond: 10 }) 2025-06-24T21:13:26.544724Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 6 "Root/Folder/Q3" 2025-06-24T21:13:26.557044Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:164:2186], cookie=2389283808433390641) 2025-06-24T21:13:26.557711Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:169:2191], cookie=16799730726481805157, path="/Root2", config={ MaxUnitsPerSecond: 100500 MaxBurstSizeCoefficient: 1.5 }) 2025-06-24T21:13:26.557863Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 7 "Root2" 2025-06-24T21:13:26.570032Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:169:2191], cookie=16799730726481805157) 2025-06-24T21:13:26.570730Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:174:2196], cookie=17715210029980344959, path="/Root2/Q", config={ MaxUnitsPerSecond: 10 }) 2025-06-24T21:13:26.570970Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 8 "Root2/Q" 2025-06-24T21:13:26.583333Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:174:2196], cookie=17715210029980344959) 2025-06-24T21:13:26.584106Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:179:2201], cookie=17627508383729970694, ids=[100], paths=[], recursive=0) 2025-06-24T21:13:26.584309Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:179:2201], cookie=17627508383729970694) 2025-06-24T21:13:26.584852Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:182:2204], cookie=14443326683531524037, ids=[], paths=[Nonexistent/Path], recursive=0) 2025-06-24T21:13:26.584946Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:182:2204], cookie=14443326683531524037) 2025-06-24T21:13:26.585463Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:185:2207], cookie=3070231789074772420, ids=[], paths=[/Root, ], recursive=0) 2025-06-24T21:13:26.585558Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:185:2207], cookie=3070231789074772420) 2025-06-24T21:13:26.586053Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:188:2210], cookie=4897643888633304721, ids=[1, 1], paths=[], recursive=0) 2025-06-24T21:13:26.586114Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:188:2210], cookie=4897643888633304721) 2025-06-24T21:13:26.586646Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:191:2213], cookie=13769144475665410681, ids=[], paths=[/Root2/Q, /Root2/Q], recursive=0) 2025-06-24T21:13:26.586716Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:191:2213], cookie=13769144475665410681) 2025-06-24T21:13:26.587719Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:194:2216], cookie=2133005010427523651, ids=[], paths=[], recursive=1) 2025-06-24T21:13:26.587804Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:194:2216], cookie=2133005010427523651) 2025-06-24T21:13:26.589804Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:197:2219], cookie=1059471449333035792, ids=[], paths=[], recursive=0) 2025-06-24T21:13:26.589879Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:197:2219], cookie=1059471449333035792) 2025-06-24T21:13:26.590418Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:200:2222], cookie=6464940916509201903, ids=[3, 2], paths=[], recursive=1) 2025-06-24T21:13:26.590492Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:200:2222], cookie=6464940916509201903) 2025-06-24T21:13:26.591052Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:203:2225], cookie=13512756597504791071, ids=[3, 2], paths=[], recursive=0) 2025-06-24T21:13:26.591113Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:203:2225], cookie=13512756597504791071) 2025-06-24T21:13:26.591738Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:206:2228], cookie=6052281995077083623, ids=[], paths=[Root2/], recursive=1) 2025-06-24T21:13:26.591826Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:206:2228], cookie=6052281995077083623) 2025-06-24T21:13:26.592377Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:209:2231], cookie=3241703692418387297, ids=[], paths=[Root2/], recursive=0) 2025-06-24T21:13:26.592451Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:209:2231], cookie=3241703692418387297) 2025-06-24T21:13:26.608820Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.608971Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.609445Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.610063Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.627900Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.628362Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:249:2261], cookie=14202778739275781774, ids=[100], paths=[], recursive=0) 2025-06-24T21:13:26.628468Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:249:2261], cookie=14202778739275781774) 2025-06-24T21:13:26.629279Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:255:2266], cookie=7519648101856340553, ids=[], paths=[Nonexistent/Path], recursive=0) 2025-06-24T21:13:26.629384Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:255:2266], cookie=7519648101856340553) 2025-06-24T21:13:26.630112Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:258:2269], cookie=7390826993538549647, ids=[], paths=[/Root, ], recursive=0) 2025-06-24T21:13:26.630222Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:258:2269], cookie=7390826993538549647) 2025-06-24T21:13:26.630946Z node 1 ... US_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 4 "Root/Folder/Q1" 2025-06-24T21:13:28.992053Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[4:154:2176], cookie=6932357457580506380) 2025-06-24T21:13:28.992648Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:159:2181], cookie=10732668794980685562, ids=[], paths=[], recursive=1) 2025-06-24T21:13:28.992753Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:159:2181], cookie=10732668794980685562) 2025-06-24T21:13:28.993646Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:165:2187], cookie=2748114786791271237, ids=[], paths=[], recursive=1) 2025-06-24T21:13:28.993732Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:165:2187], cookie=2748114786791271237) 2025-06-24T21:13:28.994621Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:171:2193], cookie=13986055161416195465, ids=[], paths=[], recursive=1) 2025-06-24T21:13:28.994700Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:171:2193], cookie=13986055161416195465) 2025-06-24T21:13:28.996257Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:32: [72057594037927937] TTxQuoterResourceDelete::Execute (sender=[4:174:2196], cookie=4111367135298753001, id=0, path="/Root/Folder/NonexistingRes") 2025-06-24T21:13:28.996371Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:70: [72057594037927937] TTxQuoterResourceDelete::Complete (sender=[4:174:2196], cookie=4111367135298753001) 2025-06-24T21:13:28.996911Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:177:2199], cookie=3076166844666212918, ids=[], paths=[], recursive=1) 2025-06-24T21:13:28.996989Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:177:2199], cookie=3076166844666212918) 2025-06-24T21:13:28.997471Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:32: [72057594037927937] TTxQuoterResourceDelete::Execute (sender=[4:180:2202], cookie=10049653488112840732, id=100, path="") 2025-06-24T21:13:28.997534Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:70: [72057594037927937] TTxQuoterResourceDelete::Complete (sender=[4:180:2202], cookie=10049653488112840732) 2025-06-24T21:13:28.998015Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:183:2205], cookie=1404416812948958593, ids=[], paths=[], recursive=1) 2025-06-24T21:13:28.998082Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:183:2205], cookie=1404416812948958593) 2025-06-24T21:13:28.998579Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:32: [72057594037927937] TTxQuoterResourceDelete::Execute (sender=[4:186:2208], cookie=2561904949824601666, id=3, path="") 2025-06-24T21:13:28.998640Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:70: [72057594037927937] TTxQuoterResourceDelete::Complete (sender=[4:186:2208], cookie=2561904949824601666) 2025-06-24T21:13:28.999137Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:189:2211], cookie=5925431789367023402, ids=[], paths=[], recursive=1) 2025-06-24T21:13:28.999228Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:189:2211], cookie=5925431789367023402) 2025-06-24T21:13:28.999890Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:32: [72057594037927937] TTxQuoterResourceDelete::Execute (sender=[4:192:2214], cookie=3417115640010429265, id=0, path="/Root/Folder/Q1") 2025-06-24T21:13:29.000044Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:61: [72057594037927937] Deleted quoter resource 4 "Root/Folder/Q1" 2025-06-24T21:13:29.015114Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:70: [72057594037927937] TTxQuoterResourceDelete::Complete (sender=[4:192:2214], cookie=3417115640010429265) 2025-06-24T21:13:29.015951Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:197:2219], cookie=329745959718297262, ids=[], paths=[], recursive=1) 2025-06-24T21:13:29.016053Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:197:2219], cookie=329745959718297262) 2025-06-24T21:13:29.030419Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:29.030515Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:29.030961Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:29.032260Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:29.072022Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:29.072393Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:237:2249], cookie=11095912460375757934, ids=[], paths=[], recursive=1) 2025-06-24T21:13:29.072482Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:237:2249], cookie=11095912460375757934) 2025-06-24T21:13:29.073182Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:32: [72057594037927937] TTxQuoterResourceDelete::Execute (sender=[4:243:2254], cookie=4443100141138264344, id=3, path="") 2025-06-24T21:13:29.073338Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:61: [72057594037927937] Deleted quoter resource 3 "Root/Folder" 2025-06-24T21:13:29.096542Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:70: [72057594037927937] TTxQuoterResourceDelete::Complete (sender=[4:243:2254], cookie=4443100141138264344) 2025-06-24T21:13:29.097263Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:248:2259], cookie=8486214655067704972, ids=[], paths=[], recursive=1) 2025-06-24T21:13:29.097375Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:248:2259], cookie=8486214655067704972) 2025-06-24T21:13:29.109928Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:29.110016Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:29.110378Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:29.110877Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:29.148155Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:29.148528Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:288:2289], cookie=15999017193583690871, ids=[], paths=[], recursive=1) 2025-06-24T21:13:29.148613Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:288:2289], cookie=15999017193583690871) 2025-06-24T21:13:29.532568Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:29.532655Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:29.546502Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:29.546601Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:29.561157Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:29.561664Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:133:2157], cookie=3812325195389547291, path="/Q1", config={ MaxUnitsPerSecond: 10 }) 2025-06-24T21:13:29.561896Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Q1" 2025-06-24T21:13:29.587556Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:133:2157], cookie=3812325195389547291) 2025-06-24T21:13:29.588259Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:142:2164], cookie=10133910358823466861, path="/Q2", config={ MaxUnitsPerSecond: 10 }) 2025-06-24T21:13:29.588489Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Q2" 2025-06-24T21:13:29.608002Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:142:2164], cookie=10133910358823466861) 2025-06-24T21:13:29.609934Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:147:2169]. Cookie: 10125696754734420385. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Q1" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 10 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:29.610025Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:147:2169], cookie=10125696754734420385) 2025-06-24T21:13:29.610900Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:147:2169]. Cookie: 2134145817526377995. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Q1" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 10 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Q2" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 10 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } Results { Error { Status: NOT_FOUND Issues { message: "Resource \"/Q3\" doesn\'t exist." } } } ProtocolVersion: 1 } 2025-06-24T21:13:29.610958Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:147:2169], cookie=2134145817526377995) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] Test command err: 2025-06-24T21:12:50.721152Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626895351497341:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:50.722437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00428e/r3tmp/tmp2bPuq9/pdisk_1.dat 2025-06-24T21:12:51.164104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.164210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.201564Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:51.222940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12607, node 1 2025-06-24T21:12:51.383988Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:51.384031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:51.384040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:51.384176Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:51.722466Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:51.727081Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:51.730844Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:51.730889Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:51.732458Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:10993, port: 10993 2025-06-24T21:12:51.732574Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:51.791703Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-24T21:12:51.839789Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****vsXQ (5FA991BA) () has now valid token of ldapuser@ldap 2025-06-24T21:12:54.211864Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626915177668997:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:54.211907Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00428e/r3tmp/tmpB2KRF8/pdisk_1.dat 2025-06-24T21:12:54.551213Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:54.552429Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626915177668979:2079] 1750799574210899 != 1750799574210902 2025-06-24T21:12:54.561774Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:54.561871Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:54.564176Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19831, node 2 2025-06-24T21:12:54.705883Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:54.705903Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:54.705910Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:54.706025Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:54.810749Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:54.814524Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:54.814552Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:54.815218Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:65453, port: 65453 2025-06-24T21:12:54.815271Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:54.867713Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:65453. Invalid credentials 2025-06-24T21:12:54.868282Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****Tshg (8EBB1E00) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:65453. Invalid credentials)' 2025-06-24T21:12:58.451767Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626929912162167:2178];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00428e/r3tmp/tmpIeOpw2/pdisk_1.dat 2025-06-24T21:12:58.564110Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:58.706825Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519626929912162014:2079] 1750799578383517 != 1750799578383520 2025-06-24T21:12:58.715940Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:58.738858Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:58.740803Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:58.744715Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29788, node 3 2025-06-24T21:12:58.891755Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:58.891782Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:58.891790Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:58.891920Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:59.017073Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:59.020566Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:59.020605Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:59.021381Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:9822, port: 9822 2025-06-24T21:12:59.021448Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:59.075711Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:9822. Invalid credentials 2025-06-24T21:12:59.076261Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****5MAA (1DA16187) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:9822. Invalid credentials)' 2025-06-24T21:13:02.784728Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626949688123706:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:02.784783Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00428e/r3tmp/tmpbtUiGC/pdisk_1.dat 2025-06-24T21:13:02.988934Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:02.989028Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:03.003306Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519626949688123688:2079] 1750799582784094 != 1750799582784097 2025-06-24T21:13:03.004170Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:03.005110Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28701, node 4 2025-06-24T21:13:03.071760Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:03.071786Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:03.071794Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:03.071926Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:03.270386Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:03.271489Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:03.271524Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:03.272265Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:27266, port: 27266 2025-06-24T21:13:03.272350Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:03.323870Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser ... ode 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:07.383875Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:07.384014Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:07.589556Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:07.592510Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:07.592536Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:07.593310Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:4897, port: 4897 2025-06-24T21:13:07.593427Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:07.654576Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:07.703814Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:07.704547Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:07.704610Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:07.750339Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:07.799620Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:07.800996Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****jIPA (4AA7D3C3) () has now valid token of ldapuser@ldap 2025-06-24T21:13:07.911311Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:11.896325Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519626967218439347:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:11.896453Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:11.927636Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****jIPA (4AA7D3C3) 2025-06-24T21:13:11.927882Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:4897, port: 4897 2025-06-24T21:13:11.928044Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:11.995659Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:12.043470Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:12.044029Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:12.044069Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:12.091490Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:12.139521Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:12.140768Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****jIPA (4AA7D3C3) () has now valid token of ldapuser@ldap 2025-06-24T21:13:15.934781Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****jIPA (4AA7D3C3) 2025-06-24T21:13:15.934919Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:4897, port: 4897 2025-06-24T21:13:15.935012Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:15.991621Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:16.042441Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:16.051378Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:16.051469Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:16.099459Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:16.148310Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:16.149593Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****jIPA (4AA7D3C3) () has now valid token of ldapuser@ldap 2025-06-24T21:13:18.553303Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519627018096346692:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:18.553370Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00428e/r3tmp/tmphrmOgD/pdisk_1.dat 2025-06-24T21:13:18.708574Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7179, node 6 2025-06-24T21:13:18.719135Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:18.719239Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:18.722209Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:18.773339Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:18.773371Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:18.773402Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:18.773561Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:18.899289Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:18.902760Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:18.902799Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:18.903584Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:18978, port: 18978 2025-06-24T21:13:18.903670Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:18.959688Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:19.008101Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****8B_w (CA6D0329) () has now valid token of ldapuser@ldap 2025-06-24T21:13:19.562017Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:22.563571Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****8B_w (CA6D0329) 2025-06-24T21:13:22.563674Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:18978, port: 18978 2025-06-24T21:13:22.563748Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:22.626764Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:22.672144Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****8B_w (CA6D0329) () has now valid token of ldapuser@ldap 2025-06-24T21:13:23.555346Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519627018096346692:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:23.555473Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:27.566486Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****8B_w (CA6D0329) 2025-06-24T21:13:27.566649Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:18978, port: 18978 2025-06-24T21:13:27.566765Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:27.623770Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:27.668149Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****8B_w (CA6D0329) () has now valid token of ldapuser@ldap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] Test command err: 2025-06-24T21:12:50.715999Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626896588587841:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:50.719468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042c1/r3tmp/tmprqBjCh/pdisk_1.dat 2025-06-24T21:12:51.229178Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19066, node 1 2025-06-24T21:12:51.269409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.269531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.273365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.383629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:51.383654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:51.383661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:51.383793Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:51.722516Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:51.756793Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:51.761173Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:51.761229Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:51.762138Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:3314, port: 3314 2025-06-24T21:12:51.762877Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:51.773057Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-24T21:12:51.816233Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****_BnQ (5CEDC0D1) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042c1/r3tmp/tmpZ9XsXB/pdisk_1.dat 2025-06-24T21:12:54.451194Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626915589316708:2246];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:54.451376Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:54.649470Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:54.652499Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:54.652574Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:54.657052Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7786, node 2 2025-06-24T21:12:54.839821Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:54.839846Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:54.839853Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:54.839951Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:55.019261Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:55.023023Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:55.023045Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:55.023733Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:16098, port: 16098 2025-06-24T21:12:55.023805Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:55.026922Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:16098. Invalid credentials 2025-06-24T21:12:55.027406Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****bo8Q (F6122E5A) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:16098. Invalid credentials)' 2025-06-24T21:12:58.646903Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626932889130795:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:58.646960Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042c1/r3tmp/tmpVWiHG1/pdisk_1.dat 2025-06-24T21:12:58.847684Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:58.847761Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:58.848843Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:58.864617Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11523, node 3 2025-06-24T21:12:58.948027Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:58.948049Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:58.948057Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:58.948188Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:59.353871Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:59.358377Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:59.358404Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:59.359088Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:4930, port: 4930 2025-06-24T21:12:59.359161Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:59.379923Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:4930. Invalid credentials 2025-06-24T21:12:59.380142Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****4QNw (76E43A87) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:4930. Invalid credentials)' 2025-06-24T21:13:02.960562Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626950001986828:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:02.963111Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042c1/r3tmp/tmpOkQ24C/pdisk_1.dat 2025-06-24T21:13:03.076260Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:03.077687Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519626950001986678:2079] 1750799582951126 != 1750799582951129 TServer::EnableGrpc on GrpcPort 22400, node 4 2025-06-24T21:13:03.121867Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:03.121957Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:03.124285Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:03.163821Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:03.163847Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:03.163856Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:03.164004Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:03.363293Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:03.365144Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:03.365175Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:03.365853Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:17992, port: 17992 2025-06-24T21:13:03.365924Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:03.374203Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:03.374794Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:17992 return no entries 2025-06-24T21:13:03.375031Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****neVw (267AF12F) () has now permanent error message 'Could not login via LDAP (LDAP user ... ize from file: (empty maybe) 2025-06-24T21:13:07.364111Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:07.495311Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:07.497992Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:07.498016Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:07.498774Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:15514, port: 15514 2025-06-24T21:13:07.498888Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:07.513465Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:07.555535Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:07.556848Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:07.556905Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:07.599515Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:07.648214Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:07.649505Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****u2AQ (FC4C03C9) () has now valid token of ldapuser@ldap 2025-06-24T21:13:08.099836Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:11.107611Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****u2AQ (FC4C03C9) 2025-06-24T21:13:11.107685Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:15514, port: 15514 2025-06-24T21:13:11.107765Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:11.111097Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:11.159412Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:11.160287Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:11.160344Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:11.207474Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:11.251497Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:11.252342Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****u2AQ (FC4C03C9) () has now valid token of ldapuser@ldap 2025-06-24T21:13:12.071936Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519626971196828518:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:12.072062Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:15.109582Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****u2AQ (FC4C03C9) 2025-06-24T21:13:15.109679Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:15514, port: 15514 2025-06-24T21:13:15.109757Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:15.113810Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:15.155561Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:15.156567Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:15.156627Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:15.207402Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:15.251522Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:15.252482Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****u2AQ (FC4C03C9) () has now valid token of ldapuser@ldap 2025-06-24T21:13:18.315477Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519627017527765017:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:18.315562Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042c1/r3tmp/tmpbHGsKg/pdisk_1.dat 2025-06-24T21:13:18.515827Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:18.531507Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519627017527764997:2079] 1750799598305311 != 1750799598305314 TServer::EnableGrpc on GrpcPort 32082, node 6 2025-06-24T21:13:18.643948Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:18.644036Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:18.645231Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:18.647872Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:18.647894Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:18.647903Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:18.648031Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:18.931328Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:18.933046Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:18.933079Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:18.933789Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:63018, port: 63018 2025-06-24T21:13:18.933856Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:18.945258Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:18.994655Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****9_TQ (BA246898) () has now valid token of ldapuser@ldap 2025-06-24T21:13:19.385868Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:22.384324Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****9_TQ (BA246898) 2025-06-24T21:13:22.384462Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:63018, port: 63018 2025-06-24T21:13:22.384562Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:22.387854Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:22.438951Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****9_TQ (BA246898) () has now valid token of ldapuser@ldap 2025-06-24T21:13:23.312996Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519627017527765017:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:23.313131Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:26.387423Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****9_TQ (BA246898) 2025-06-24T21:13:26.387552Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:63018, port: 63018 2025-06-24T21:13:26.387619Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:26.391971Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:26.443716Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****9_TQ (BA246898) () has now valid token of ldapuser@ldap >> TKesusTest::TestAttachOutOfSequence [GOOD] >> TKesusTest::TestAttachOutOfSequenceInTx >> TKesusTest::TestQuoterAccountResourcesOnDemand [GOOD] >> TKesusTest::TestQuoterAccountResourcesPaced >> TKesusTest::TestReleaseSemaphore [GOOD] >> TKesusTest::TestSemaphoreData >> TKesusTest::TestAttachOutOfSequenceInTx [GOOD] >> TKesusTest::TestAttachThenReRegister >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-54 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-55 >> TKesusTest::TestSemaphoreData [GOOD] >> TKesusTest::TestSemaphoreReleaseReacquire >> TKesusTest::TestAttachThenReRegister [GOOD] >> TKesusTest::TestAttachTimeoutTooBig >> TKesusTest::TestQuoterAccountResourcesBurst >> KqpKv::ReadRows_ExternalBlobs-UseExtBlobsPrecharge [GOOD] >> KqpKv::ReadRows_Decimal >> TKesusTest::TestDescribeSemaphoreWatches [GOOD] >> TKesusTest::TestGetQuoterResourceCounters >> LdapAuthProviderTest_StartTls::LdapRefreshRemoveUserBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoWithError >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-18 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-19 >> TKesusTest::TestAttachTimeoutTooBig [GOOD] >> TKesusTest::TestCreateSemaphore >> TopicAutoscaling::Simple_PQv1 [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_BeforeAutoscaleAwareSDK >> TKesusTest::TestSemaphoreReleaseReacquire [GOOD] >> TKesusTest::TestSemaphoreSessionFailures >> TKesusTest::TestCreateSemaphore [GOOD] >> TKesusTest::TestGetQuoterResourceCounters [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-66 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-67 >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-DomainLoginOnly-StrictAclCheck [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant >> CommitOffset::DistributedTxCommit [GOOD] >> CommitOffset::DistributedTxCommit_ChildFirst >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] >> TKesusTest::TestSemaphoreSessionFailures [GOOD] >> TKesusTest::TestQuoterAccountResourcesPaced [GOOD] >> TKesusTest::TestQuoterAccountResourcesDeduplicateClient ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestGetQuoterResourceCounters [GOOD] Test command err: 2025-06-24T21:13:28.430890Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:28.431014Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:28.458472Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:28.458762Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:28.491529Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:28.491871Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[1:135:2159], cookie=5385636465708298489, path="/foo/bar/baz") 2025-06-24T21:13:28.504678Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[1:135:2159], cookie=5385636465708298489, status=SUCCESS) 2025-06-24T21:13:28.505178Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:23: [72057594037927937] TTxConfigGet::Execute (sender=[1:144:2166], cookie=6366563090502373351) 2025-06-24T21:13:28.517692Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:44: [72057594037927937] TTxConfigGet::Complete (sender=[1:144:2166], cookie=6366563090502373351) 2025-06-24T21:13:28.518110Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[1:149:2171], cookie=7174665335798606856, path="/foo/bar/baz") 2025-06-24T21:13:28.530323Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[1:149:2171], cookie=7174665335798606856, status=SUCCESS) 2025-06-24T21:13:28.530776Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:23: [72057594037927937] TTxConfigGet::Execute (sender=[1:154:2176], cookie=15386146407193955693) 2025-06-24T21:13:28.545104Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:44: [72057594037927937] TTxConfigGet::Complete (sender=[1:154:2176], cookie=15386146407193955693) 2025-06-24T21:13:28.559441Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:28.559572Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:28.560091Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:28.560563Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:28.603850Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:28.604196Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:23: [72057594037927937] TTxConfigGet::Execute (sender=[1:196:2208], cookie=16594670961986367055) 2025-06-24T21:13:28.626373Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:44: [72057594037927937] TTxConfigGet::Complete (sender=[1:196:2208], cookie=16594670961986367055) 2025-06-24T21:13:28.627028Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[1:204:2215], cookie=10493511159729742078, path="/foo/bar/baz") 2025-06-24T21:13:28.639555Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[1:204:2215], cookie=10493511159729742078, status=SUCCESS) 2025-06-24T21:13:28.640215Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[1:209:2220], cookie=9440125528418762618, path="/foo/bar/baz") 2025-06-24T21:13:28.640310Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[1:209:2220], cookie=9440125528418762618, status=PRECONDITION_FAILED) 2025-06-24T21:13:29.063078Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:29.063194Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:29.081477Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:29.081741Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:29.106258Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:29.106664Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:135:2159], cookie=2245227810338259598, name="Lock1") 2025-06-24T21:13:29.106747Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:135:2159], cookie=2245227810338259598) 2025-06-24T21:13:29.526168Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:29.526271Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:29.552448Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:29.552731Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:29.578112Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:29.578581Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:135:2159], cookie=17516250019460988487, session=0, seqNo=0) 2025-06-24T21:13:29.578714Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:29.590528Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:135:2159], cookie=17516250019460988487, session=1) 2025-06-24T21:13:29.590846Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:135:2159], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:29.590996Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:29.591076Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:29.603832Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:135:2159], cookie=111) 2025-06-24T21:13:29.604414Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[3:146:2168], cookie=1769307843111990487, name="Lock1", force=0) 2025-06-24T21:13:29.620789Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[3:146:2168], cookie=1769307843111990487) 2025-06-24T21:13:29.621351Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[3:151:2173], cookie=3346533996777380542, name="Sem1", force=0) 2025-06-24T21:13:29.640202Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[3:151:2173], cookie=3346533996777380542) 2025-06-24T21:13:29.640716Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[3:156:2178], cookie=3274228094023316446, name="Sem1", limit=42) 2025-06-24T21:13:29.640851Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 2 "Sem1" 2025-06-24T21:13:29.655860Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[3:156:2178], cookie=3274228094023316446) 2025-06-24T21:13:29.656450Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[3:161:2183], cookie=15652206970579424090, name="Sem1", force=0) 2025-06-24T21:13:29.656550Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:58: [72057594037927937] Deleting semaphore 2 "Sem1" 2025-06-24T21:13:29.676364Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[3:161:2183], cookie=15652206970579424090) 2025-06-24T21:13:29.676957Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[3:166:2188], cookie=2356422493873797426, name="Sem1", force=0) 2025-06-24T21:13:29.691936Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[3:166:2188], cookie=2356422493873797426) 2025-06-24T21:13:30.169497Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:30.169626Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:30.192726Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:30.195793Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:30.229177Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:30.229570Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:135:2159], cookie=7040374845894230613, session=0, seqNo=0) 2025-06-24T21:13:30.229709Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:30.241825Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:135:2159], cookie=7040374845894230613, session=1) 2025-06-24T21:13:30.242175Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:135:2159], cookie=9282682793885336465, session=0, seqNo=0) 2025-06-24T21:13:30.242297Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:30.257594Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:135:2159], cookie=9282682793885336465, session=2) 2025-06-24T21:13:30.257884Z node 4 :KESUS_TABLET DEBUG: tx_session_detach.cpp:100: [72057594037927937] Fast-path detach session=2 from sender=[4:135:2159], cookie=7432524682668968539 2025-06-24T21:13:30.258399Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[4:147:2169], cookie=7954678574130660192, name="Sem1", limit=3) 2025-06-24T21:13:30.258543Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-24T21:13:30.270757Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[4:147:2169], cookie=7954678574130660192) 2025-06-24T21:13:30.271097Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:135:2159], cookie=112, name="Sem1") 2025-06-24T21:13:30.271203Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:135:2159], cookie=112) 2025-06-24T21:13:30.271442Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:135:2159], cookie=113, name="Sem1") 2025-06-24T21:13:30.271522Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:135:2159], cookie=113) 2025-06-24T21:13:30.271717Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:135:2159], cookie=12582512061040673549, session=2, seqNo=0) 2025-06-24T21:13:30.283905Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessi ... 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.717390Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.728022Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:135:2159], cookie=129, session=1, semaphore="Sem2" count=2) 2025-06-24T21:13:31.740164Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:135:2159], cookie=129) 2025-06-24T21:13:31.740535Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:135:2159], cookie=130, name="Sem2") 2025-06-24T21:13:31.740625Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:135:2159], cookie=130) 2025-06-24T21:13:31.740833Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:135:2159], cookie=131, session=1, semaphore="Sem2" count=1) 2025-06-24T21:13:31.753214Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:135:2159], cookie=131) 2025-06-24T21:13:31.753695Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:135:2159], cookie=132, name="Sem2") 2025-06-24T21:13:31.753792Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:135:2159], cookie=132) 2025-06-24T21:13:31.754054Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:135:2159], cookie=133, name="Sem2") 2025-06-24T21:13:31.754125Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:135:2159], cookie=133) 2025-06-24T21:13:32.237738Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:32.237844Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:32.252354Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:32.252472Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:32.273177Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:32.282087Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:133:2157], cookie=2207299022992378524, path="/Root1", config={ MaxUnitsPerSecond: 1000 }) 2025-06-24T21:13:32.282372Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root1" 2025-06-24T21:13:32.312800Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:133:2157], cookie=2207299022992378524) 2025-06-24T21:13:32.313425Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:142:2164], cookie=13777567793190417256, path="/Root1/Res", config={ }) 2025-06-24T21:13:32.313686Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root1/Res" 2025-06-24T21:13:32.326653Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:142:2164], cookie=13777567793190417256) 2025-06-24T21:13:32.327380Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:147:2169], cookie=16672041391989086494, path="/Root2", config={ MaxUnitsPerSecond: 1000 }) 2025-06-24T21:13:32.327611Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 3 "Root2" 2025-06-24T21:13:32.340093Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:147:2169], cookie=16672041391989086494) 2025-06-24T21:13:32.340761Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:152:2174], cookie=2851191069856355087, path="/Root2/Res", config={ }) 2025-06-24T21:13:32.340996Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 4 "Root2/Res" 2025-06-24T21:13:32.353793Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:152:2174], cookie=2851191069856355087) 2025-06-24T21:13:32.354457Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:157:2179], cookie=16732556541127709931, path="/Root2/Res/Subres", config={ }) 2025-06-24T21:13:32.354701Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 5 "Root2/Res/Subres" 2025-06-24T21:13:32.367025Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:157:2179], cookie=16732556541127709931) 2025-06-24T21:13:32.368539Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:162:2184]. Cookie: 16432137904670992269. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root1/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 1000 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:32.368618Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:162:2184], cookie=16432137904670992269) 2025-06-24T21:13:32.415906Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:162:2184]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 100 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:32.468677Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:162:2184]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 100 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:32.503296Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:162:2184]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 100 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:32.504102Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:313: [72057594037927937] Send TEvGetQuoterResourceCountersResult to [5:170:2188]. Cookie: 13675681618228825859. Data: { ResourceCounters { ResourcePath: "Root2/Res" } ResourceCounters { ResourcePath: "Root2/Res/Subres" } ResourceCounters { ResourcePath: "Root2" } ResourceCounters { ResourcePath: "Root1/Res" Allocated: 300 } ResourceCounters { ResourcePath: "Root1" Allocated: 300 } } 2025-06-24T21:13:32.504977Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:173:2191]. Cookie: 17121286146284695214. Data: { Results { ResourceId: 5 Error { Status: SUCCESS } EffectiveProps { ResourceId: 5 ResourcePath: "Root2/Res/Subres" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 1000 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:32.505044Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:173:2191], cookie=17121286146284695214) 2025-06-24T21:13:32.547215Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:173:2191]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 5 Amount: 100 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:32.590967Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:173:2191]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 5 Amount: 100 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:32.591856Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:313: [72057594037927937] Send TEvGetQuoterResourceCountersResult to [5:179:2195]. Cookie: 12331094367566324159. Data: { ResourceCounters { ResourcePath: "Root2/Res" Allocated: 200 } ResourceCounters { ResourcePath: "Root2/Res/Subres" Allocated: 200 } ResourceCounters { ResourcePath: "Root2" Allocated: 200 } ResourceCounters { ResourcePath: "Root1/Res" Allocated: 300 } ResourceCounters { ResourcePath: "Root1" Allocated: 300 } } 2025-06-24T21:13:32.592829Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:162:2184]. Cookie: 891310847563380202. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root1/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 1000 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:32.592901Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:162:2184], cookie=891310847563380202) 2025-06-24T21:13:32.593708Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:173:2191]. Cookie: 8417584138150073302. Data: { Results { ResourceId: 5 Error { Status: SUCCESS } EffectiveProps { ResourceId: 5 ResourcePath: "Root2/Res/Subres" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 1000 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:32.593774Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:173:2191], cookie=8417584138150073302) 2025-06-24T21:13:32.625136Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:173:2191]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 5 Amount: 50 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:32.625260Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:162:2184]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 20 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:32.626011Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:313: [72057594037927937] Send TEvGetQuoterResourceCountersResult to [5:186:2202]. Cookie: 15815111682213253074. Data: { ResourceCounters { ResourcePath: "Root2/Res" Allocated: 250 } ResourceCounters { ResourcePath: "Root2/Res/Subres" Allocated: 250 } ResourceCounters { ResourcePath: "Root2" Allocated: 250 } ResourceCounters { ResourcePath: "Root1/Res" Allocated: 320 } ResourceCounters { ResourcePath: "Root1" Allocated: 320 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestCreateSemaphore [GOOD] Test command err: 2025-06-24T21:13:30.756952Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:30.757106Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:30.777388Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:30.777751Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:30.808105Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:30.809052Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=12566162184604830136, session=0, seqNo=222) 2025-06-24T21:13:30.809274Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:30.822894Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=12566162184604830136, session=1) 2025-06-24T21:13:30.823303Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=16586183982504860606, session=1, seqNo=111) 2025-06-24T21:13:30.835790Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=16586183982504860606, session=1) 2025-06-24T21:13:31.168461Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:31.168573Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:31.187191Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:31.187336Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:31.212291Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:31.212814Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:135:2159], cookie=111, session=0, seqNo=42) 2025-06-24T21:13:31.212988Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:31.213177Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:135:2159], cookie=222, session=1, seqNo=41) 2025-06-24T21:13:31.227891Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:135:2159], cookie=111, session=1) 2025-06-24T21:13:31.228031Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:135:2159], cookie=222, session=1) 2025-06-24T21:13:31.611454Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:31.611562Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:31.627605Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:31.627901Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:31.652640Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:31.653106Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:135:2159], cookie=16810778102018130668, session=0, seqNo=0) 2025-06-24T21:13:31.653249Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:31.665038Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:135:2159], cookie=16810778102018130668, session=1) 2025-06-24T21:13:31.666050Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[3:152:2174], cookie=3534704200210013532) 2025-06-24T21:13:31.666113Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[3:152:2174], cookie=3534704200210013532) 2025-06-24T21:13:32.035649Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:32.035787Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:32.054227Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:32.054823Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:32.079112Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:32.486836Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:32.486944Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:32.501989Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:32.502087Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:32.516823Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:32.517343Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=9030954537940735060, session=0, seqNo=0) 2025-06-24T21:13:32.517491Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:32.547763Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=9030954537940735060, session=1) 2025-06-24T21:13:32.548128Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:32.548278Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:32.548374Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:32.561172Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=111) 2025-06-24T21:13:32.562043Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:147:2169], cookie=12303669727592934802, name="Sem1", limit=42) 2025-06-24T21:13:32.562174Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 2 "Sem1" 2025-06-24T21:13:32.574815Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:147:2169], cookie=12303669727592934802) 2025-06-24T21:13:32.575391Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:152:2174], cookie=12230807916109527623, name="Sem1", limit=42) 2025-06-24T21:13:32.587703Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:152:2174], cookie=12230807916109527623) 2025-06-24T21:13:32.588390Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:157:2179], cookie=10757656799217407655, name="Sem1", limit=51) 2025-06-24T21:13:32.601634Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:157:2179], cookie=10757656799217407655) 2025-06-24T21:13:32.602262Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:162:2184], cookie=4220672025525123683, name="Lock1", limit=42) 2025-06-24T21:13:32.614670Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:162:2184], cookie=4220672025525123683) 2025-06-24T21:13:32.615316Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:167:2189], cookie=4412074753762643670, name="Lock1", limit=18446744073709551615) 2025-06-24T21:13:32.627639Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:167:2189], cookie=4412074753762643670) 2025-06-24T21:13:32.628129Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:172:2194], cookie=11765237640837765702, name="Sem1") 2025-06-24T21:13:32.628206Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:172:2194], cookie=11765237640837765702) 2025-06-24T21:13:32.628619Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:175:2197], cookie=9948676734012406594, name="Sem2") 2025-06-24T21:13:32.628682Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:175:2197], cookie=9948676734012406594) 2025-06-24T21:13:32.641852Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:32.641929Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:32.642321Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:32.642791Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:32.684686Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:32.684859Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:32.685261Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:215:2227], cookie=9251186329202689854, name="Sem1") 2025-06-24T21:13:32.685381Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:215:2227], cookie=9251186329202689854) 2025-06-24T21:13:32.686030Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:222:2233], cookie=12389831884804763199, name="Sem2") 2025-06-24T21:13:32.686109Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:222:2233], cookie=12389831884804763199) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestSemaphoreSessionFailures [GOOD] Test command err: 2025-06-24T21:13:30.049783Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:30.049921Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:30.069839Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:30.070127Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:30.102306Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:30.103010Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=8700525011308380916, session=0, seqNo=0) 2025-06-24T21:13:30.103207Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:30.121705Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=8700525011308380916, session=1) 2025-06-24T21:13:30.122093Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=1287438381359640403, session=0, seqNo=0) 2025-06-24T21:13:30.122240Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:30.144179Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=1287438381359640403, session=2) 2025-06-24T21:13:30.144588Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[1:135:2159], cookie=111, name="Lock1") 2025-06-24T21:13:30.158771Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[1:135:2159], cookie=111) 2025-06-24T21:13:30.159128Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=222, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:30.159317Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:30.159415Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:30.175893Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=222) 2025-06-24T21:13:30.176181Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[1:135:2159], cookie=333, name="Lock1") 2025-06-24T21:13:30.188260Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[1:135:2159], cookie=333) 2025-06-24T21:13:30.704773Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:30.704906Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:30.725448Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:30.725633Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:30.750849Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:30.751439Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:135:2159], cookie=12167105930597640527, session=0, seqNo=0) 2025-06-24T21:13:30.751656Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:30.764376Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:135:2159], cookie=12167105930597640527, session=1) 2025-06-24T21:13:30.764777Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:135:2159], cookie=2554592725551641037, session=0, seqNo=0) 2025-06-24T21:13:30.764931Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:30.777161Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:135:2159], cookie=2554592725551641037, session=2) 2025-06-24T21:13:30.777845Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[2:146:2168], cookie=18307051026274761770, name="Sem1", limit=1) 2025-06-24T21:13:30.778007Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-24T21:13:30.790703Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[2:146:2168], cookie=18307051026274761770) 2025-06-24T21:13:30.791998Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=111, session=1, semaphore="Sem1" count=1) 2025-06-24T21:13:30.792195Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-24T21:13:30.792445Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=222, session=2, semaphore="Sem1" count=1) 2025-06-24T21:13:30.804776Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=111) 2025-06-24T21:13:30.804892Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=222) 2025-06-24T21:13:30.805566Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:154:2176], cookie=2341511733468598773, name="Sem1") 2025-06-24T21:13:30.805727Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:154:2176], cookie=2341511733468598773) 2025-06-24T21:13:30.806230Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:157:2179], cookie=4605795611464926284, name="Sem1") 2025-06-24T21:13:30.806300Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:157:2179], cookie=4605795611464926284) 2025-06-24T21:13:30.806579Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[2:135:2159], cookie=333, name="Sem1") 2025-06-24T21:13:30.806703Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Sem1" waiter link 2025-06-24T21:13:30.819895Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[2:135:2159], cookie=333) 2025-06-24T21:13:30.820572Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:162:2184], cookie=13899505389499498413, name="Sem1") 2025-06-24T21:13:30.820696Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:162:2184], cookie=13899505389499498413) 2025-06-24T21:13:30.821284Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:165:2187], cookie=6137200694467720715, name="Sem1") 2025-06-24T21:13:30.821369Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:165:2187], cookie=6137200694467720715) 2025-06-24T21:13:30.821658Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[2:135:2159], cookie=444, name="Sem1") 2025-06-24T21:13:30.821771Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Sem1" owner link 2025-06-24T21:13:30.834387Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[2:135:2159], cookie=444) 2025-06-24T21:13:30.835178Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:170:2192], cookie=15877815760495434096, name="Sem1") 2025-06-24T21:13:30.835288Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:170:2192], cookie=15877815760495434096) 2025-06-24T21:13:30.835959Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:173:2195], cookie=1697028322753887768, name="Sem1") 2025-06-24T21:13:30.836040Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:173:2195], cookie=1697028322753887768) 2025-06-24T21:13:31.334981Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:31.335082Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:31.353450Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:31.353659Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:31.380008Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:31.380445Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[3:135:2159], cookie=10131325597012365256, name="Sem1", limit=1) 2025-06-24T21:13:31.380643Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-24T21:13:31.393067Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[3:135:2159], cookie=10131325597012365256) 2025-06-24T21:13:31.393804Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[3:144:2166], cookie=16786770133703997737, name="Sem2", limit=1) 2025-06-24T21:13:31.393982Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 2 "Sem2" 2025-06-24T21:13:31.411086Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[3:144:2166], cookie=16786770133703997737) 2025-06-24T21:13:31.411821Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:149:2171], cookie=15855502533765663622, name="Sem1") 2025-06-24T21:13:31.411937Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:149:2171], cookie=15855502533765663622) 2025-06-24T21:13:31.412445Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:152:2174], cookie=15405546089007435914, name="Sem2") 2025-06-24T21:13:31.412513Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:152:2174], cookie=15405546089007435914) 2025-06-24T21:13:31.434398Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:31.434532Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema: ... TTxSemaphoreCreate::Complete (sender=[4:247:2268], cookie=11769172153579803347) 2025-06-24T21:13:32.352655Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:135:2159], cookie=111, session=1, semaphore="Sem1" count=1) 2025-06-24T21:13:32.352818Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 11 "Sem1" queue: next order #1 session 1 2025-06-24T21:13:32.365631Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:135:2159], cookie=111) 2025-06-24T21:13:32.366185Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:135:2159], cookie=222, session=2, semaphore="Sem1" count=1) 2025-06-24T21:13:32.390127Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:135:2159], cookie=222) 2025-06-24T21:13:32.390693Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[4:135:2159], cookie=333, name="Sem1") 2025-06-24T21:13:32.390818Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 11 "Sem1" waiter link 2025-06-24T21:13:32.403158Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[4:135:2159], cookie=333) 2025-06-24T21:13:32.403771Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:135:2159], cookie=444, session=2, semaphore="Sem1" count=1) 2025-06-24T21:13:32.417898Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:135:2159], cookie=444) 2025-06-24T21:13:32.418437Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[4:135:2159], cookie=555, name="Sem1") 2025-06-24T21:13:32.418546Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 11 "Sem1" owner link 2025-06-24T21:13:32.418611Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 11 "Sem1" queue: next order #3 session 2 2025-06-24T21:13:32.431462Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[4:135:2159], cookie=555) 2025-06-24T21:13:32.832795Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:32.832929Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:32.852173Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:32.852271Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:32.866453Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:32.867014Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=5413939259277285737, session=0, seqNo=0) 2025-06-24T21:13:32.867208Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:32.890455Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=5413939259277285737, session=1) 2025-06-24T21:13:32.890926Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:133:2157], cookie=112, name="Sem1", limit=5) 2025-06-24T21:13:32.891120Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-24T21:13:32.903885Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:133:2157], cookie=112) 2025-06-24T21:13:32.904316Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:133:2157], cookie=113, name="Sem1") 2025-06-24T21:13:32.917085Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:133:2157], cookie=113) 2025-06-24T21:13:32.917473Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:133:2157], cookie=114, name="Sem1", force=0) 2025-06-24T21:13:32.917580Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:58: [72057594037927937] Deleting semaphore 1 "Sem1" 2025-06-24T21:13:32.929962Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:133:2157], cookie=114) 2025-06-24T21:13:32.930296Z node 5 :KESUS_TABLET DEBUG: tx_session_detach.cpp:100: [72057594037927937] Fast-path detach session=1 from sender=[5:133:2157], cookie=11244710954072654894 2025-06-24T21:13:32.930643Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:133:2157], cookie=115, name="Sem1", limit=5) 2025-06-24T21:13:32.943212Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:133:2157], cookie=115) 2025-06-24T21:13:32.943651Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:133:2157], cookie=116, name="Sem1") 2025-06-24T21:13:32.957609Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:133:2157], cookie=116) 2025-06-24T21:13:32.958019Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:133:2157], cookie=117, name="Sem1", force=0) 2025-06-24T21:13:32.972357Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:133:2157], cookie=117) 2025-06-24T21:13:32.973031Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=118, session=1, semaphore="Sem1" count=1) 2025-06-24T21:13:32.987614Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=118) 2025-06-24T21:13:32.988062Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:133:2157], cookie=119, name="Sem1") 2025-06-24T21:13:33.017083Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:133:2157], cookie=119) 2025-06-24T21:13:33.017623Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:133:2157], cookie=120, name="Sem1") 2025-06-24T21:13:33.017746Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:133:2157], cookie=120) 2025-06-24T21:13:33.018044Z node 5 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:37: [72057594037927937] TTxSessionDestroy::Execute (sender=[5:133:2157], cookie=5510626405141074925, session=1) 2025-06-24T21:13:33.018171Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-24T21:13:33.032704Z node 5 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:75: [72057594037927937] TTxSessionDestroy::Complete (sender=[5:133:2157], cookie=5510626405141074925) 2025-06-24T21:13:33.033112Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:133:2157], cookie=121, name="Sem1", limit=5) 2025-06-24T21:13:33.045882Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:133:2157], cookie=121) 2025-06-24T21:13:33.046269Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:133:2157], cookie=122, name="Sem1") 2025-06-24T21:13:33.059184Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:133:2157], cookie=122) 2025-06-24T21:13:33.059606Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:133:2157], cookie=123, name="Sem1", force=0) 2025-06-24T21:13:33.072180Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:133:2157], cookie=123) 2025-06-24T21:13:33.072559Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=124, session=1, semaphore="Sem1" count=1) 2025-06-24T21:13:33.089686Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=124) 2025-06-24T21:13:33.090100Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:133:2157], cookie=125, name="Sem1") 2025-06-24T21:13:33.102836Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:133:2157], cookie=125) 2025-06-24T21:13:33.103250Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:133:2157], cookie=126, name="Sem1") 2025-06-24T21:13:33.103359Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:133:2157], cookie=126) 2025-06-24T21:13:33.104036Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:133:2157], cookie=127, name="Sem1", limit=5) 2025-06-24T21:13:33.104125Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:133:2157], cookie=127) 2025-06-24T21:13:33.104683Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:133:2157], cookie=128, name="Sem1") 2025-06-24T21:13:33.104768Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:133:2157], cookie=128) 2025-06-24T21:13:33.105012Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:133:2157], cookie=129, name="Sem1", force=0) 2025-06-24T21:13:33.105078Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:133:2157], cookie=129) 2025-06-24T21:13:33.105531Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=130, session=1, semaphore="Sem1" count=1) 2025-06-24T21:13:33.105606Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=130) 2025-06-24T21:13:33.105863Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:133:2157], cookie=131, name="Sem1") 2025-06-24T21:13:33.105925Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:133:2157], cookie=131) 2025-06-24T21:13:33.106147Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:133:2157], cookie=132, name="Sem1") 2025-06-24T21:13:33.106208Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:133:2157], cookie=132) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-24T21:12:56.063119Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626924960791199:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:56.063202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004272/r3tmp/tmpd5wSLu/pdisk_1.dat 2025-06-24T21:12:56.562091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:56.562174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:56.567076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:56.609624Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626920665823713:2079] 1750799576032468 != 1750799576032471 2025-06-24T21:12:56.617703Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4657, node 1 2025-06-24T21:12:56.811894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:56.811919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:56.811926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:56.812041Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:57.055314Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:57.059252Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:57.063707Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:57.063746Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:57.065437Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:12548, port: 12548 2025-06-24T21:12:57.065520Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:57.131697Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:57.177504Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:12:57.183346Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:12:57.183422Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:57.233062Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:57.283280Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:57.288442Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****OQQA (84C5292B) () has now valid token of ldapuser@ldap 2025-06-24T21:13:01.067277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626924960791199:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:01.067390Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:02.085344Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****OQQA (84C5292B) 2025-06-24T21:13:02.085469Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:12548, port: 12548 2025-06-24T21:13:02.085740Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:02.163817Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:02.164397Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldaps://localhost:12548 return no entries 2025-06-24T21:13:02.164868Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****OQQA (84C5292B) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldaps://localhost:12548 return no entries)' 2025-06-24T21:13:07.095279Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****OQQA (84C5292B) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004272/r3tmp/tmpDeVBQ2/pdisk_1.dat 2025-06-24T21:13:08.365981Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:13:08.449772Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:08.455293Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626976406446734:2079] 1750799588160721 != 1750799588160724 2025-06-24T21:13:08.459581Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:08.459653Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:08.464302Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21162, node 2 2025-06-24T21:13:08.519810Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:08.519833Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:08.519841Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:08.519958Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:08.608801Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:08.612129Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:08.612167Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:08.612827Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:6820, port: 6820 2025-06-24T21:13:08.612897Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:08.671604Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:08.675581Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:6820. Server is busy 2025-06-24T21:13:08.676027Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****cJ0A (2B7916C3) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:6820. Server is busy)' 2025-06-24T21:13:08.676265Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:08.676284Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:08.677100Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:6820, port: 6820 2025-06-24T21:13:08.677176Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:08.731732Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:08.732228Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:6820. Server is busy 2025-06-24T21:13:08.732522Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****cJ0A (2B7916C3) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:6820. Server is busy)' 2025-06-24T21:13:09.188356Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:10.185204Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****cJ0A (2B7916C3) 2025-06-24T21:13:10.185562Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:10.185589Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:10.186551Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:6820, port: 6820 2025-06-24T21:13:10.186621Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:10.255655Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:10.256156Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:6820. Server is busy 2025-06-24T21:13:10.256612Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****cJ0A (2B7916C3) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:6820. Server is busy)' 2025-06-24T21:13:13.187512Z node 2 :TICKET_PARSER ... r_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:19.573450Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:19.574083Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:31698, port: 31698 2025-06-24T21:13:19.574142Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:19.585330Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:19.627711Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:19.675483Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:19.720297Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****4HVw (94A2C5D5) () has now valid token of ldapuser@ldap 2025-06-24T21:13:22.824004Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627036086985248:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:22.824092Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004272/r3tmp/tmpxyGW2R/pdisk_1.dat 2025-06-24T21:13:22.931308Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627036086985230:2079] 1750799602822267 != 1750799602822270 2025-06-24T21:13:22.958891Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:22.967544Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:22.967726Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:22.971323Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4336, node 4 2025-06-24T21:13:23.073241Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:23.073262Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:23.073289Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:23.073435Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:23.175304Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:23.177153Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:23.177199Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:23.177991Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:17496, port: 17496 2025-06-24T21:13:23.178079Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:23.189262Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:23.235728Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:23.280088Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****GX4w (787EF6E6) () has now valid token of ldapuser@ldap 2025-06-24T21:13:26.253982Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519627050074307792:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:26.254044Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004272/r3tmp/tmpRaSdeF/pdisk_1.dat 2025-06-24T21:13:26.384345Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:26.385643Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519627050074307773:2079] 1750799606253586 != 1750799606253589 TServer::EnableGrpc on GrpcPort 15948, node 5 2025-06-24T21:13:26.418153Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:26.418298Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:26.421390Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:26.444393Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:26.444418Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:26.444426Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:26.444595Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:26.631071Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:26.634320Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:26.634356Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:26.635201Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:4221, port: 4221 2025-06-24T21:13:26.635284Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:26.645221Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:26.687860Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-24T21:13:26.731636Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:26.732089Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:26.732125Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-24T21:13:26.780468Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-24T21:13:26.827523Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-24T21:13:26.828552Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****e2dA (63FB3684) () has now valid token of ldapuser@ldap 2025-06-24T21:13:29.616520Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519627063084678059:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:29.616596Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004272/r3tmp/tmpxeiW8a/pdisk_1.dat 2025-06-24T21:13:29.750972Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:29.752234Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519627063084678032:2079] 1750799609616097 != 1750799609616100 2025-06-24T21:13:29.769421Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:29.769504Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:29.775914Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63451, node 6 2025-06-24T21:13:29.816033Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:29.816060Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:29.816072Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:29.816198Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:30.055299Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:30.055963Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:30.055991Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:30.056727Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:25014, port: 25014 2025-06-24T21:13:30.056809Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:30.068752Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:30.111677Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-24T21:13:30.111738Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:25014. Bad search filter 2025-06-24T21:13:30.112191Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****T2oQ (D9B70062) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:25014. Bad search filter)' >> TKesusTest::TestQuoterAccountResourcesBurst [GOOD] >> TKesusTest::TestQuoterAccountResourcesAggregateClients >> KqpRbo::Bench_10Joins >> KqpRbo::Bench_Filter >> KqpRbo::LeftJoinToKqpOpJoin >> KqpRbo::JoinFilter >> KqpRbo::Bench_CrossFilter >> KqpRbo::Bench_JoinFilter >> KqpRbo::Bench_Select >> KqpRbo::CrossFilter >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-42 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-43 >> TKesusTest::TestQuoterAccountResourcesDeduplicateClient [GOOD] >> TKesusTest::TestQuoterAccountResourcesForgetClient >> YdbOlapStore::LogNonExistingUserId [GOOD] >> YdbOlapStore::LogPagingBefore >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-49 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-50 >> KqpRbo::Filter >> TKesusTest::TestQuoterAccountResourcesAggregateClients [GOOD] >> TKesusTest::TestQuoterAccountResourcesAggregateResources >> TopicAutoscaling::ControlPlane_CreateAlterDescribe [GOOD] >> TopicAutoscaling::ControlPlane_DisableAutoPartitioning >> KqpRbo::Select >> Balancing::Balancing_ManyTopics_TopicApi [GOOD] >> Balancing::Balancing_ManyTopics_PQv1 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-55 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-56 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-19 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-20 >> TKesusTest::TestQuoterAccountResourcesAggregateResources [GOOD] >> TKesusTest::TestQuoterAccountLabels >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-67 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-68 >> KqpRbo::Bench_Select [GOOD] >> KqpRbo::CrossFilter [GOOD] >> KqpRbo::JoinFilter [GOOD] >> KqpRbo::Filter [GOOD] >> KqpRbo::LeftJoinToKqpOpJoin [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Disable [GOOD] >> TopicAutoscaling::BalancingAfterSplit_sessionsWithPartition >> KqpRbo::Bench_Filter [GOOD] >> KqpRbo::Bench_CrossFilter [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_BeforeAutoscaleAwareSDK >> KqpRbo::Bench_JoinFilter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Bench_Select [GOOD] Test command err: Trying to start YDB, gRPC: 64800, MsgBus: 63175 2025-06-24T21:13:35.495924Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627091559146047:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:35.496025Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004376/r3tmp/tmpgR4WBL/pdisk_1.dat 2025-06-24T21:13:35.954699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:35.954797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:35.955088Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:35.984814Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64800, node 1 2025-06-24T21:13:36.271689Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:36.271716Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:36.271725Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:36.271820Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:36.507892Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63175 TClient is connected to server localhost:63175 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:37.136994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:38.629966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627104444048541:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:38.629967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627104444048549:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:38.630062Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:38.636457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:38.653293Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627104444048555:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:13:38.716807Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627104444048606:2334] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::CrossFilter [GOOD] Test command err: Trying to start YDB, gRPC: 14436, MsgBus: 3659 2025-06-24T21:13:35.593659Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627089299685296:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:35.593733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00437f/r3tmp/tmpD907RV/pdisk_1.dat 2025-06-24T21:13:36.137599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:36.137723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:36.140986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:36.192794Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:36.193748Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627089299685110:2079] 1750799615536002 != 1750799615536005 TServer::EnableGrpc on GrpcPort 14436, node 1 2025-06-24T21:13:36.291201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:36.291225Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:36.291233Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:36.295419Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:36.629325Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3659 TClient is connected to server localhost:3659 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:37.186512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:38.852768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627102184587637:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:38.852885Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.109462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.229497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.303985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627106479555109:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.304105Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.304562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627106479555114:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.308622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:39.321856Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627106479555116:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:13:39.412813Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627106479555167:2440] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpRbo::Select [GOOD] >> TKesusTest::TestQuoterAccountLabels [GOOD] >> TKesusTest::TestPassesUpdatedPropsToSession >> KqpKv::ReadRows_Decimal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::LeftJoinToKqpOpJoin [GOOD] Test command err: Trying to start YDB, gRPC: 27570, MsgBus: 11171 2025-06-24T21:13:35.443085Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627091636121308:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:35.444290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004380/r3tmp/tmpSZQBm6/pdisk_1.dat 2025-06-24T21:13:35.955873Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:35.956966Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627091636121280:2079] 1750799615439462 != 1750799615439465 2025-06-24T21:13:35.977079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:35.977235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:35.980596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27570, node 1 2025-06-24T21:13:36.275685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:36.275707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:36.275715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:36.275829Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:36.465941Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11171 TClient is connected to server localhost:11171 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:37.143924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:38.681446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627104521023812:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:38.681572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.111124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.260568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.294021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.324731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.402678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627108815991424:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.402747Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.402950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627108815991429:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.406910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:39.418130Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627108815991431:2327], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:13:39.511891Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627108815991484:2533] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::JoinFilter [GOOD] Test command err: Trying to start YDB, gRPC: 18095, MsgBus: 29679 2025-06-24T21:13:35.444703Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627091071273123:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:35.446880Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004387/r3tmp/tmpYt7iDD/pdisk_1.dat 2025-06-24T21:13:35.997218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:35.997324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:36.016190Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:36.028166Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:36.030351Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627091071273103:2079] 1750799615442454 != 1750799615442457 TServer::EnableGrpc on GrpcPort 18095, node 1 2025-06-24T21:13:36.267753Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:36.267780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:36.267797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:36.267922Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:36.479349Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29679 TClient is connected to server localhost:29679 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:37.140885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:38.722320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627103956175638:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:38.722488Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.109471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.252192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.320459Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627108251143110:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.320576Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.320843Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627108251143115:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.324838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:39.334271Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627108251143117:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T21:13:39.427328Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627108251143168:2441] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Filter [GOOD] Test command err: Trying to start YDB, gRPC: 5007, MsgBus: 2144 2025-06-24T21:13:36.077265Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627095392141255:2232];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00436a/r3tmp/tmpFCYjOy/pdisk_1.dat 2025-06-24T21:13:36.083223Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:13:36.473293Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:36.475240Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627095392141036:2079] 1750799616039686 != 1750799616039689 2025-06-24T21:13:36.525537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:36.525665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 5007, node 1 2025-06-24T21:13:36.529125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:36.630892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:36.630923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:36.631371Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:36.631558Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2144 2025-06-24T21:13:37.070903Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2144 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:37.299086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:37.316579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:13:39.267133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627108277043564:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.267327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.544057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.682371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627108277043669:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.682463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627108277043674:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.682495Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.686763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:39.698610Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627108277043676:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:13:39.768019Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627108277043727:2394] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TKesusTest::TestPassesUpdatedPropsToSession [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Bench_CrossFilter [GOOD] Test command err: Trying to start YDB, gRPC: 18497, MsgBus: 19190 2025-06-24T21:13:35.444726Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627091801839939:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:35.445456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004394/r3tmp/tmpr0l7ZZ/pdisk_1.dat 2025-06-24T21:13:35.959769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:35.959872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:35.991825Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:35.994729Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627091801839918:2079] 1750799615440474 != 1750799615440477 2025-06-24T21:13:36.028714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18497, node 1 2025-06-24T21:13:36.267805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:36.267834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:36.267842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:36.267956Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:36.463661Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19190 TClient is connected to server localhost:19190 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:37.151640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:37.166731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:13:38.668714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627104686742450:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:38.668835Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.109486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.234523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.287079Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627108981709922:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.287179Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.287416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627108981709927:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.292075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:39.307041Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627108981709929:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:13:39.367263Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627108981709980:2441] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:40.444919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627091801839939:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:40.445030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Bench_Filter [GOOD] Test command err: Trying to start YDB, gRPC: 65448, MsgBus: 24568 2025-06-24T21:13:35.451680Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627092463560504:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:35.460917Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043a8/r3tmp/tmppM9Jn1/pdisk_1.dat 2025-06-24T21:13:35.974123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:35.974207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:35.993217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:36.053310Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627092463560463:2079] 1750799615439693 != 1750799615439696 2025-06-24T21:13:36.056613Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65448, node 1 2025-06-24T21:13:36.267853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:36.267886Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:36.267897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:36.268025Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:36.465841Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24568 TClient is connected to server localhost:24568 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:37.143056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:37.164985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:13:38.880538Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627105348462996:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:38.880694Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.187180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.352842Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627109643430398:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.352932Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.353143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627109643430403:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.356736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:39.365793Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627109643430405:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:13:39.437325Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627109643430456:2395] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:40.452170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627092463560504:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:40.452268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Bench_JoinFilter [GOOD] Test command err: Trying to start YDB, gRPC: 24430, MsgBus: 14968 2025-06-24T21:13:35.492351Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627089328441465:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:35.514001Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004395/r3tmp/tmpMDPeQ8/pdisk_1.dat 2025-06-24T21:13:35.979991Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627089328441280:2079] 1750799615444897 != 1750799615444900 2025-06-24T21:13:35.981413Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:36.004755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:36.004939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:36.008800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24430, node 1 2025-06-24T21:13:36.266434Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:36.266459Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:36.266470Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:36.266637Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:36.461093Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14968 TClient is connected to server localhost:14968 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:37.194091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:38.806164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627102213343811:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:38.806336Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.109735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.258993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:39.353232Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627106508311285:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.353302Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.353410Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627106508311290:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.357938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:39.367872Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627106508311292:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:13:39.456100Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627106508311343:2440] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:40.461886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627089328441465:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:40.461969Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> YdbIndexTable::OnlineBuild [GOOD] >> YdbIndexTable::OnlineBuildWithDataColumn >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-43 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-44 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-51 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Select [GOOD] Test command err: Trying to start YDB, gRPC: 6524, MsgBus: 25919 2025-06-24T21:13:38.081455Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627101248827541:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:38.081570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00435a/r3tmp/tmp8B6qzi/pdisk_1.dat 2025-06-24T21:13:38.423051Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:38.423373Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627101248827519:2079] 1750799618080702 != 1750799618080705 TServer::EnableGrpc on GrpcPort 6524, node 1 2025-06-24T21:13:38.471362Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:38.471394Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:38.471401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:38.471515Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:38.472174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:38.472244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:38.474006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25919 TClient is connected to server localhost:25919 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:39.032861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:39.047940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:13:39.088778Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:40.776500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627109838762752:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:40.776503Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627109838762760:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:40.776608Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:40.780030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:40.789383Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627109838762766:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:13:40.892090Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627109838762817:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestPassesUpdatedPropsToSession [GOOD] Test command err: 2025-06-24T21:13:32.331277Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:32.331414Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:32.360128Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:32.360453Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:32.401471Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:32.409712Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:135:2159], cookie=900024316450484002, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-24T21:13:32.410151Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:32.422383Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:135:2159], cookie=900024316450484002) 2025-06-24T21:13:32.423000Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:145:2167], cookie=9354608804783911989, path="/Root/Res", config={ }) 2025-06-24T21:13:32.423280Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-06-24T21:13:32.435326Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:145:2167], cookie=9354608804783911989) 2025-06-24T21:13:32.437020Z node 1 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [1:150:2172]. Cookie: 3099696710171385077. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { BillingPeriodSec: 2 } OnDemand { BillingPeriodSec: 2 } Overshoot { BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:32.437095Z node 1 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[1:150:2172], cookie=3099696710171385077) 2025-06-24T21:13:32.437587Z node 1 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [1:150:2172]. Cookie: 3725568141463909083. Data: { ResourcesInfo { ResourceId: 2 AcceptedUs: 29000 } } 2025-06-24T21:13:32.437630Z node 1 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[1:150:2172], cookie=3725568141463909083) 2025-06-24T21:13:34.565538Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:34.565642Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:34.584426Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:34.584591Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:34.612054Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:34.612567Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[2:135:2159], cookie=15170030412204621551, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-24T21:13:34.612919Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:34.625518Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[2:135:2159], cookie=15170030412204621551) 2025-06-24T21:13:34.626330Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [2:145:2167]. Cookie: 7009057257609255795. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:34.626376Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[2:145:2167], cookie=7009057257609255795) 2025-06-24T21:13:34.626875Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [2:145:2167]. Cookie: 3676904725639249101. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:34.626940Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[2:145:2167], cookie=3676904725639249101) 2025-06-24T21:13:34.627385Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [2:145:2167]. Cookie: 16866277437787801776. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 1017500 } } 2025-06-24T21:13:34.627437Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[2:145:2167], cookie=16866277437787801776) 2025-06-24T21:13:34.627844Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [2:145:2167]. Cookie: 9670790448646172450. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 1017500 } } 2025-06-24T21:13:34.627878Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[2:145:2167], cookie=9670790448646172450) 2025-06-24T21:13:36.949140Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:36.949250Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:36.969076Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:36.969424Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:36.994767Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:36.995289Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[3:135:2159], cookie=16464380766645097262, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-24T21:13:36.995713Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:37.007981Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[3:135:2159], cookie=16464380766645097262) 2025-06-24T21:13:37.008625Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[3:145:2167], cookie=15709577167710078698, path="/Root/Res1", config={ }) 2025-06-24T21:13:37.008885Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res1" 2025-06-24T21:13:37.021416Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[3:145:2167], cookie=15709577167710078698) 2025-06-24T21:13:37.022255Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[3:150:2172], cookie=13905888961980655949, path="/Root/Res2", config={ }) 2025-06-24T21:13:37.022453Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 3 "Root/Res2" 2025-06-24T21:13:37.035123Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[3:150:2172], cookie=13905888961980655949) 2025-06-24T21:13:37.036106Z node 3 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [3:155:2177]. Cookie: 10053872587939664131. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res1" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { BillingPeriodSec: 2 } OnDemand { BillingPeriodSec: 2 } Overshoot { BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:37.036175Z node 3 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[3:155:2177], cookie=10053872587939664131) 2025-06-24T21:13:37.036870Z node 3 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [3:155:2177]. Cookie: 1732579933085308805. Data: { Results { ResourceId: 3 Error { Status: SUCCESS } EffectiveProps { ResourceId: 3 ResourcePath: "Root/Res2" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { BillingPeriodSec: 2 } OnDemand { BillingPeriodSec: 2 } Overshoot { BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:37.036934Z node 3 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[3:155:2177], cookie=1732579933085308805) 2025-06-24T21:13:37.037541Z node 3 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [3:155:2177]. Cookie: 1176446007298152424. Data: { ResourcesInfo { ResourceId: 2 AcceptedUs: 1020500 } ResourcesInfo { ResourceId: 3 AcceptedUs: 1020500 } } 2025-06-24T21:13:37.037611Z node 3 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[3:155:2177], cookie=1176446007298152424) 2025-06-24T21:13:39.413804Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:39.413927Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:39.430834Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:39.431377Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:39.458618Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:39.459018Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[4:135:2159], cookie=7926664760875338185, path="/Root", config={ MaxUnitsPerSecond: 100 PrefetchCoefficient: 300 }) 2025-06-24T21:13:39.459312Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:39.474255Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[4:135:2159], cookie=7926664760875338185) 2025-06-24T21:13:39.475089Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [4:145:2167]. Cookie: 17058042321109656266. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 300 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { Enabled: true BillingPeriodSec: 2 Labels { key: "k1" value: "v1" } Labels { key: "k2" value: "v2" } } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:39.475166Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[4:145:2167], cookie=17058042321109656266) 2025-06-24T21:13:39.475665Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [4:145:2167]. Cookie: 14498372106582189149. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 27500 } } 2025-06-24T21:13:39.475716Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[4:145:2167], cookie=14498372106582189149) 2025-06-24T21:13:41.656099Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:41.656227Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:41.677843Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:41.677966Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:41.694727Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:41.695203Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:133:2157], cookie=1021041790566709380, path="/Root", config={ MaxUnitsPerSecond: 100 }) 2025-06-24T21:13:41.695414Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:41.719910Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:133:2157], cookie=1021041790566709380) 2025-06-24T21:13:41.720499Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:142:2164], cookie=15761019979616881893, path="/Root/Res", config={ }) 2025-06-24T21:13:41.720742Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-06-24T21:13:41.732992Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:142:2164], cookie=15761019979616881893) 2025-06-24T21:13:41.733815Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:147:2169]. Cookie: 10473518547962371964. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:41.733874Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:147:2169], cookie=10473518547962371964) 2025-06-24T21:13:41.734366Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_update.cpp:34: [72057594037927937] TTxQuoterResourceUpdate::Execute (sender=[5:151:2173], cookie=6361330105935218742, id=0, path="/Root", config={ MaxUnitsPerSecond: 150 }) 2025-06-24T21:13:41.734525Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_update.cpp:61: [72057594037927937] Updated quoter resource 1 "Root" 2025-06-24T21:13:41.734693Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:147:2169]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 150 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:41.750042Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_update.cpp:75: [72057594037927937] TTxQuoterResourceUpdate::Complete (sender=[5:151:2173], cookie=6361330105935218742) 2025-06-24T21:13:41.750642Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:193: [72057594037927937] Send TEvUpdateConsumptionStateAck to [5:147:2169]. Cookie: 8256434514924324046. Data: { } 2025-06-24T21:13:41.750695Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:198: [72057594037927937] Update quoter resources consumption state (sender=[5:147:2169], cookie=8256434514924324046) >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-StrictAclCheck >> TKesusTest::TestQuoterAccountResourcesForgetClient [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpKv::ReadRows_Decimal [GOOD] Test command err: Trying to start YDB, gRPC: 30573, MsgBus: 29863 2025-06-24T21:08:57.202458Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519625895932551355:2188];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:08:57.202659Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031e7/r3tmp/tmpGvly4V/pdisk_1.dat 2025-06-24T21:08:57.612696Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:08:57.619270Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519625895932551186:2079] 1750799337123577 != 1750799337123580 2025-06-24T21:08:57.625521Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:08:57.627446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:08:57.633953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30573, node 1 2025-06-24T21:08:57.819427Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:08:57.819458Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:08:57.819467Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:08:57.819655Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29863 2025-06-24T21:08:58.202071Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29863 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:08:58.611942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:08:58.628164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:08:58.637951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:58.813527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:58.993793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:08:59.067635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:09:00.876938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625908817454707:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:00.877046Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.179514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.256802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.300898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.386185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.437484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.489190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.542784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:09:01.629584Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625913112422666:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.629663Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.629924Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519625913112422671:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:09:01.634787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:09:01.655469Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519625913112422673:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:09:01.724016Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519625913112422724:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:09:02.200602Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519625895932551355:2188];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:09:02.200724Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:09:02.934628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... meshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:19.250959Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:19.423269Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 3419, MsgBus: 1280 2025-06-24T21:13:27.668975Z node 22 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [22:258:2304], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:13:27.669283Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:13:27.669556Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031e7/r3tmp/tmpfNMV4G/pdisk_1.dat 2025-06-24T21:13:28.117797Z node 22 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 22 Type# 268639257 TServer::EnableGrpc on GrpcPort 3419, node 22 2025-06-24T21:13:28.427860Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:28.428006Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:28.428096Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:28.428657Z node 22 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:28.429788Z node 22 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:28.430166Z node 22 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [22:32:2079] 1750799601709328 != 1750799601709332 2025-06-24T21:13:28.526559Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:28.526795Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:28.539741Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:28.737905Z node 22 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1280 TClient is connected to server localhost:1280 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:29.511409Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:29.593567Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 21237, MsgBus: 3106 2025-06-24T21:13:32.264480Z node 23 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[23:7519627076649514662:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:32.264607Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031e7/r3tmp/tmpDzTwM2/pdisk_1.dat 2025-06-24T21:13:32.516983Z node 23 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:32.517172Z node 23 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [23:7519627076649514642:2079] 1750799612263507 != 1750799612263510 2025-06-24T21:13:32.542695Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:32.542872Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:32.546065Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21237, node 23 2025-06-24T21:13:32.632002Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:32.632033Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:32.632057Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:32.632309Z node 23 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3106 2025-06-24T21:13:33.307320Z node 23 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3106 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:33.966951Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:37.264502Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[23:7519627076649514662:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:37.264633Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:39.875916Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7519627106714286371:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.876117Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:39.927521Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:40.109340Z node 23 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Type mismatch, got type Uint64 for column Key22, but expected Decimal(22,9) 2025-06-24T21:13:40.121045Z node 23 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Type mismatch, got type Decimal(35,10) for column Key22, but expected Decimal(22,9) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestQuoterAccountResourcesForgetClient [GOOD] Test command err: 2025-06-24T21:13:28.551467Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:28.551629Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:28.568610Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:28.568918Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:28.595062Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:28.601043Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:135:2159], cookie=17079880276090191804, path="/Res", config={ MaxUnitsPerSecond: -100 }) 2025-06-24T21:13:28.601272Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:135:2159], cookie=17079880276090191804) 2025-06-24T21:13:28.601877Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:142:2164], cookie=5346465714583663228, path="/ResWithoutMaxUnitsPerSecond", config={ }) 2025-06-24T21:13:28.602001Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:142:2164], cookie=5346465714583663228) 2025-06-24T21:13:28.602489Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:145:2167], cookie=8400380669407353232, path="/ResWithMaxUnitsPerSecond", config={ MaxUnitsPerSecond: 1 }) 2025-06-24T21:13:28.602699Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "ResWithMaxUnitsPerSecond" 2025-06-24T21:13:28.615296Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:145:2167], cookie=8400380669407353232) 2025-06-24T21:13:28.615970Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:150:2172], cookie=15580912652450198817, path="/ResWithMaxUnitsPerSecond/ChildWithoutMaxUnitsPerSecond", config={ }) 2025-06-24T21:13:28.616206Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "ResWithMaxUnitsPerSecond/ChildWithoutMaxUnitsPerSecond" 2025-06-24T21:13:28.628412Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:150:2172], cookie=15580912652450198817) 2025-06-24T21:13:28.973695Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:28.973779Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:28.993387Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:28.993499Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:29.018971Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:29.019379Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[2:135:2159], cookie=3517361658174955768, path="/Root", config={ MaxUnitsPerSecond: 100 PrefetchCoefficient: 300 }) 2025-06-24T21:13:29.020420Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:29.032750Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[2:135:2159], cookie=3517361658174955768) 2025-06-24T21:13:29.033299Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[2:145:2167], cookie=13438250818477781295, path="/Root/Res", config={ }) 2025-06-24T21:13:29.033502Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-06-24T21:13:29.047159Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[2:145:2167], cookie=13438250818477781295) 2025-06-24T21:13:29.049094Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [2:150:2172]. Cookie: 80480737597205419. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 300 } AccountingConfig { ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 2 Version: "version" Schema: "schema" CloudId: "cloud" FolderId: "folder" ResourceId: "resource" SourceId: "source" Tags { key: "key" value: "value" } } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:29.049175Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[2:150:2172], cookie=80480737597205419) 2025-06-24T21:13:29.049811Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [2:150:2172]. Cookie: 9421533277875525140. Data: { ResourcesInfo { ResourceId: 2 AcceptedUs: 29000 } } 2025-06-24T21:13:29.049866Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[2:150:2172], cookie=9421533277875525140) 2025-06-24T21:13:31.251535Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:31.251653Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:31.269220Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:31.269534Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:31.300142Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:31.300684Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[3:135:2159], cookie=9879827357490108080, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-24T21:13:31.301049Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:31.313442Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[3:135:2159], cookie=9879827357490108080) 2025-06-24T21:13:31.313966Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[3:145:2167], cookie=354870166974779708, path="/Root/Res", config={ }) 2025-06-24T21:13:31.314207Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-06-24T21:13:31.327977Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[3:145:2167], cookie=354870166974779708) 2025-06-24T21:13:31.328853Z node 3 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [3:150:2172]. Cookie: 13096894329552023048. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { BillingPeriodSec: 2 } OnDemand { BillingPeriodSec: 2 } Overshoot { BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:31.328921Z node 3 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[3:150:2172], cookie=13096894329552023048) 2025-06-24T21:13:31.329409Z node 3 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [3:150:2172]. Cookie: 5471974640854972348. Data: { ResourcesInfo { ResourceId: 2 AcceptedUs: 1019000 } } 2025-06-24T21:13:31.329460Z node 3 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[3:150:2172], cookie=5471974640854972348) 2025-06-24T21:13:33.526156Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:33.526279Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:33.547071Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:33.547721Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:33.572781Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:33.573220Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[4:135:2159], cookie=14323248151004332947, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-24T21:13:33.573515Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:33.585639Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[4:135:2159], cookie=14323248151004332947) 2025-06-24T21:13:33.586605Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [4:145:2167]. Cookie: 3639685722054173020. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:33.586676Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[4:145:2167], cookie=3639685722054173020) 2025-06-24T21:13:33.587210Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [4:145:2167]. Cookie: 963897913636193123. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 1017500 } } 2025-06-24T21:13:33.587275Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[4:145:2167], cookie=963897913636193123) 2025-06-24T21:13:33.587770Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [4:145:2167]. Cookie: 17919169512851192931. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 1017500 } } 2025-06-24T21:13:33.587818Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[4:145:2167], cookie=17919169512851192931) 2025-06-24T21:13:35.904904Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:35.905019Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:35.924569Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:35.924722Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:35.941556Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:35.942093Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:133:2157], cookie=374440006271038078, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-24T21:13:35.942462Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:35.965213Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:133:2157], cookie=374440006271038078) 2025-06-24T21:13:35.966193Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:143:2165]. Cookie: 17453019971732146278. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:35.966267Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:143:2165], cookie=17453019971732146278) 2025-06-24T21:13:35.966853Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [5:143:2165]. Cookie: 8299679025545260471. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 3000000 } } 2025-06-24T21:13:35.966914Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[5:143:2165], cookie=8299679025545260471) 2025-06-24T21:13:38.539758Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:182:2189]. Cookie: 1575843500815740935. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:38.539852Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:182:2189], cookie=1575843500815740935) 2025-06-24T21:13:38.540347Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [5:182:2189]. Cookie: 15342300718432419825. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 9000000 } } 2025-06-24T21:13:38.540389Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[5:182:2189], cookie=15342300718432419825) 2025-06-24T21:13:40.685985Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:215:2215]. Cookie: 3539182453366398304. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:40.686059Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:215:2215], cookie=3539182453366398304) 2025-06-24T21:13:40.686609Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [5:215:2215]. Cookie: 15087670564764959126. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 15000000 } } 2025-06-24T21:13:40.686655Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[5:215:2215], cookie=15087670564764959126) >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoWithError [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_PQv1 >> YdbIndexTable::MultiShardTableUniqAndNonUniqIndex [GOOD] >> YdbIndexTable::MultiShardTableTwoIndexes >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-56 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-57 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-20 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-21 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoWithError [GOOD] Test command err: 2025-06-24T21:12:50.721462Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626898699807541:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:50.721967Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042d0/r3tmp/tmpOLWqMy/pdisk_1.dat 2025-06-24T21:12:51.149442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:51.149554Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:51.153742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:51.193096Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27346, node 1 2025-06-24T21:12:51.383661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:51.383694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:51.383703Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:51.383793Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:51.723290Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:51.753953Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:51.757684Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:51.757728Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:51.758494Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:32684, port: 32684 2025-06-24T21:12:51.759194Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:51.770263Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:51.812268Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****0YPw (3FA78263) () has now valid token of ldapuser@ldap 2025-06-24T21:12:54.055791Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519626915051753340:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:54.055899Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042d0/r3tmp/tmp5w2eFF/pdisk_1.dat 2025-06-24T21:12:54.237914Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:54.237988Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:54.239870Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:54.245164Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:54.246007Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519626915051753321:2079] 1750799574051560 != 1750799574051563 TServer::EnableGrpc on GrpcPort 24540, node 2 2025-06-24T21:12:54.293788Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:54.293810Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:54.293816Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:54.293944Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:54.512298Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:54.513504Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:54.513547Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:54.514202Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:20163, port: 20163 2025-06-24T21:12:54.514297Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:12:54.528306Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:54.579620Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:54.580207Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:20163 return no entries 2025-06-24T21:12:54.580727Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****MOzg (89E1D818) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:20163 return no entries)' 2025-06-24T21:12:57.823874Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519626927935863933:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:57.843223Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042d0/r3tmp/tmpOHs7td/pdisk_1.dat 2025-06-24T21:12:58.189798Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:58.190735Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:58.190801Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:58.191252Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519626927935863913:2079] 1750799577822521 != 1750799577822524 2025-06-24T21:12:58.194252Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16534, node 3 2025-06-24T21:12:58.359737Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:58.359756Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:58.359763Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:58.359901Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:58.543292Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:12:58.545681Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:12:58.545707Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:12:58.546385Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:23613, port: 23613 2025-06-24T21:12:58.546442Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:12:58.570413Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:12:58.615647Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:12:58.663541Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:12:58.664121Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:12:58.664179Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:58.712477Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:58.759535Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:12:58.761851Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****SkJQ (EABD1A40) () has now valid token of ldapuser@ldap 2025-06-24T21:12:58.785542Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:02.799692Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****SkJQ (EABD1A40) 2025-06-24T21:13:02.803265Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:23613, port: 23613 2025-06-24T21:13:02.803381Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:02.827765Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519626927935863933:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:02.827856Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:02.828626Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:02.881235Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:02.9255 ... lter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:21.563643Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:21.607553Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:21.608905Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****A9Vg (40856186) () has now valid token of ldapuser@ldap 2025-06-24T21:13:22.013575Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:25.992881Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519627024517406720:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:25.992975Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:27.000433Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****A9Vg (40856186) 2025-06-24T21:13:27.000541Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:8936, port: 8936 2025-06-24T21:13:27.000608Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:27.012950Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:27.055650Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:27.056264Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:8936 return no entries 2025-06-24T21:13:27.056676Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****A9Vg (40856186) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:8936 return no entries)' 2025-06-24T21:13:32.315025Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519627076751950830:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:32.315091Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0042d0/r3tmp/tmp8jGxkX/pdisk_1.dat 2025-06-24T21:13:32.486331Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:32.488790Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519627076751950812:2079] 1750799612314360 != 1750799612314363 2025-06-24T21:13:32.505345Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:32.505447Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:32.508749Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6174, node 6 2025-06-24T21:13:32.563780Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:32.563812Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:32.563821Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:32.563959Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:32.673649Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-24T21:13:32.677038Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:32.677082Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:32.677829Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:31077, port: 31077 2025-06-24T21:13:32.677928Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:32.689976Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:32.731757Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:32.732461Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:31077. Server is busy 2025-06-24T21:13:32.733008Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****nLrQ (D0EB64FA) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:31077. Server is busy)' 2025-06-24T21:13:32.733320Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:32.733342Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:32.734331Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:31077, port: 31077 2025-06-24T21:13:32.734397Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:32.746225Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:32.787974Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:32.788581Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:31077. Server is busy 2025-06-24T21:13:32.789021Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****nLrQ (D0EB64FA) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:31077. Server is busy)' 2025-06-24T21:13:33.338053Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:35.327424Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****nLrQ (D0EB64FA) 2025-06-24T21:13:35.327803Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:35.327837Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:35.337432Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:31077, port: 31077 2025-06-24T21:13:35.337543Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:35.348860Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:35.391729Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:35.392210Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:31077. Server is busy 2025-06-24T21:13:35.393569Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****nLrQ (D0EB64FA) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:31077. Server is busy)' 2025-06-24T21:13:37.315277Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519627076751950830:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:37.315382Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:39.337750Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****nLrQ (D0EB64FA) 2025-06-24T21:13:39.338041Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-24T21:13:39.338058Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-24T21:13:39.338926Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:31077, port: 31077 2025-06-24T21:13:39.338993Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-24T21:13:39.357632Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-24T21:13:39.399911Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-24T21:13:39.444081Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-24T21:13:39.444649Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-24T21:13:39.444690Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:39.492263Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:39.543560Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-24T21:13:39.544810Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****nLrQ (D0EB64FA) () has now valid token of ldapuser@ldap |95.4%| [TA] $(B)/ydb/core/security/ldap_auth_provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.4%| [TA] {RESULT} $(B)/ydb/core/security/ldap_auth_provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-68 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-69 |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> TopicAutoscaling::ControlPlane_DisableAutoPartitioning [GOOD] >> TopicAutoscaling::ControlPlane_PauseAutoPartitioning >> AssignTxId::Basic |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> KqpRbo::Bench_10Joins [GOOD] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-44 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-45 |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStyleDeduction [GOOD] >> ColumnShardTiers::DSConfigsWithQueryServiceDdl >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-52 >> TopicAutoscaling::PartitionSplit_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_PQv1 |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStyleDeduction [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Bench_10Joins [GOOD] Test command err: Trying to start YDB, gRPC: 29845, MsgBus: 16727 2025-06-24T21:13:35.448247Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627088527498633:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:35.448330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043a0/r3tmp/tmpqLFFPp/pdisk_1.dat 2025-06-24T21:13:35.993478Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627088527498611:2079] 1750799615439423 != 1750799615439426 2025-06-24T21:13:36.031465Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:36.033442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:36.033521Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:36.042199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29845, node 1 2025-06-24T21:13:36.271741Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:36.271762Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:36.271769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:36.271911Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:36.459612Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16727 TClient is connected to server localhost:16727 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:37.138657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:37.164040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 26891, MsgBus: 10826 2025-06-24T21:13:38.801961Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627102991053068:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:38.802205Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043a0/r3tmp/tmpp1J5Ic/pdisk_1.dat 2025-06-24T21:13:38.931708Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:38.936352Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519627102991053048:2079] 1750799618801108 != 1750799618801111 TServer::EnableGrpc on GrpcPort 26891, node 2 2025-06-24T21:13:38.955380Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:38.955457Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:38.957650Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:38.977691Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:38.977727Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:38.977737Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:38.977868Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10826 TClient is connected to server localhost:10826 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:39.413401Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:39.813803Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:13:40.448430Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627088527498633:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:40.448532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:13:41.817207Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627115875955569:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:41.817355Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:42.102065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:42.212710Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:42.245582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:42.279891Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:42.314267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:42.384309Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:42.408985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:42.435916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:42.466766Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:42.498478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:13:42.545678Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627120170923605:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:42.545745Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:42.545887Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627120170923610:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:42.549314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715668:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:13:42.558587Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519627120170923612:2367], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715668 completed, doublechecking } 2025-06-24T21:13:42.640586Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519627120170923663:2805] txid# 281474976715669, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 15], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:13:43.801992Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519627102991053068:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:43.802056Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TKesusTest::TestSessionTimeoutAfterDetach [GOOD] >> TKesusTest::TestSessionTimeoutAfterReboot >> TKesusTest::TestAcquireSemaphoreTimeout [GOOD] >> TKesusTest::TestAcquireSemaphoreTimeoutTooBig >> YdbOlapStore::LogExistingRequest [GOOD] >> YdbOlapStore::LogExistingUserId |95.5%| [TA] $(B)/ydb/core/kqp/ut/rbo/test-results/unittest/{meta.json ... results_accumulator.log} |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/rbo/test-results/unittest/{meta.json ... results_accumulator.log} >> ColumnShardTiers::DSConfigs |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TKesusTest::TestAcquireSemaphoreTimeoutTooBig [GOOD] >> TKesusTest::TestAcquireSemaphoreTimeoutInfinite |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TKesusTest::TestAcquireTimeout [GOOD] >> TKesusTest::TestAcquireSharedBlocked >> ColumnShardTiers::TTLUsage |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> CommitOffset::Commit_WithoutSession_ParentNotFinished [GOOD] >> CommitOffset::Commit_WithoutSession_ToPastParentPartition >> TKesusTest::TestSessionTimeoutAfterUnregister [GOOD] >> TKesusTest::TestStopResourceAllocationWhenPipeDestroyed >> TKesusTest::TestAcquireSemaphoreTimeoutInfinite [GOOD] >> TKesusTest::TestAcquireSemaphoreRebootTimeout >> TKesusTest::TestAcquireSharedBlocked [GOOD] >> TKesusTest::TestAcquireTimeoutAfterReboot |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-58 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-21 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-22 |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TKesusTest::TestStopResourceAllocationWhenPipeDestroyed [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TSchemeShardServerLess::StorageBilling [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_PQv1 |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestStopResourceAllocationWhenPipeDestroyed [GOOD] Test command err: 2025-06-24T21:13:26.420169Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.420309Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.448916Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.449188Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.474522Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.961307Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.961418Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.977385Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.977554Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.006390Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.410753Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:27.410878Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:27.430227Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:27.430543Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.457539Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.458091Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:135:2159], cookie=14055099090560957430, session=0, seqNo=0) 2025-06-24T21:13:27.458298Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:27.470317Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:135:2159], cookie=14055099090560957430, session=1) 2025-06-24T21:13:27.470879Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[3:144:2166], cookie=18003109809187793211) 2025-06-24T21:13:27.470963Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[3:144:2166], cookie=18003109809187793211) 2025-06-24T21:13:27.916517Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.929764Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.285700Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.299644Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.657301Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.669326Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.029768Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.044007Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.438044Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.452644Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.820415Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.836117Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.198836Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.212984Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.577025Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.590046Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.954614Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.967039Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.380514Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.393179Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.769244Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.781386Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.151346Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.165151Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.533594Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.547903Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.924030Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.940057Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.341233Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.353597Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.733699Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.748514Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.116877Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.129477Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.493692Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.506214Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.877025Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.889537Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.268987Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:35.281447Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.663501Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:35.684321Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:36.115837Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:36.129787Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:36.517111Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:36.530291Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:36.914251Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:36.926980Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:37.323486Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:37.350207Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:37.732091Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:37.744787Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:38.115941Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:38.132971Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:38.510073Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:38.528010Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:38.911335Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:38.925646Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:39.332038Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:39.344765Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:39.715472Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:39.727546Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:40.102983Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:40.115108Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:40.472806Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:40.487864Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:40.854070Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:40.865993Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:41.238974Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:41.251415Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:41.619037Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:41.631199Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:42.005573Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:42.018972Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:42.375481Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:42.394019Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:42.749922Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:42.765480Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:43.149736Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:43.163698Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:43.517864Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:43.531996Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:43.908356Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:43.920337Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:44.286172Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:44.298298Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:44.662247Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:44.675116Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:45.062594Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:45.074722Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:45.442030Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:45.455678Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:45.830208Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:45.842120Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:46.208270Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:46.220947Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:46.587642Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:46.599813Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:46.968518Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:46.981183Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:47.340214Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:47.352897Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:47.720027Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:47.732480Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:48.086544Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:48.102927Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:48.462518Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:48.476077Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:48.819537Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:48.831982Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:49.194847Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:49.208303Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:49.567535Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:49.580382Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:49.978458Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:50.001009Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:50.376917Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:50.389276Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:50.819249Z node 3 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-24T21:13:50.819355Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-24T21:13:50.836017Z node 3 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-24T21:13:50.848053Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[3:533:2480], cookie=6593637939167186165) 2025-06-24T21:13:50.848179Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[3:533:2480], cookie=6593637939167186165) 2025-06-24T21:13:51.372390Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:51.372535Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:51.394653Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:51.395789Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:51.424009Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:51.431471Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[4:135:2159], cookie=6732944810984532276, path="Root", config={ MaxUnitsPerSecond: 100 }) 2025-06-24T21:13:51.431802Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-24T21:13:51.445069Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[4:135:2159], cookie=6732944810984532276) 2025-06-24T21:13:51.446940Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [4:144:2166]. Cookie: 0. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:51.447038Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[4:144:2166], cookie=0) 2025-06-24T21:13:51.447366Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [4:146:2168]. Cookie: 0. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-24T21:13:51.447403Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[4:146:2168], cookie=0) 2025-06-24T21:13:51.489715Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [4:144:2166]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 5 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:51.489842Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [4:146:2168]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 5 StateNotification { Status: SUCCESS } } } 2025-06-24T21:13:51.490176Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:318: Got TEvServerDisconnected([4:149:2171]) 2025-06-24T21:13:51.490366Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:37: [72057594037927937] Send TEvResourcesAllocated to [4:146:2168]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 StateNotification { Status: SESSION_EXPIRED Issues { message: "Disconected." } } } } 2025-06-24T21:13:51.544460Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [4:144:2166]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 10 StateNotification { Status: SUCCESS } } } |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> AssignTxId::Basic [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-69 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-70 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::StorageBilling [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:13:06.497113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:13:06.497231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:06.497282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:13:06.497335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:13:06.501271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:13:06.501364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:13:06.501551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:06.501654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:13:06.502495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:13:06.507619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:13:06.653002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:13:06.653074Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:06.679841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:13:06.684567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:13:06.684757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:13:06.701602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:13:06.701865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:13:06.713658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:06.714162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:13:06.736370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:06.737370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:13:06.750985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:06.751082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:06.751219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:13:06.751290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:13:06.751378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:13:06.751549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.764359Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:13:06.916622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:13:06.916865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.917100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:13:06.917151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:13:06.917402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:13:06.917526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:06.921944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:06.922189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:13:06.922423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.922511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:13:06.922562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:13:06.922601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:13:06.928613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.928691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:13:06.928751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:13:06.932128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.932187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.932231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:06.932269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:13:06.941677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:13:06.944193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:13:06.944466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:13:06.945385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:06.945535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:06.945586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:06.945948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:13:06.946011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:06.946150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:13:06.946215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:13:06.949282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:06.949333Z node 1 :FLAT_TX_SCHEMESHARD ... e 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:663:2573], at schemeshard: 72075186233409549, txId: 107, path id: 2 2025-06-24T21:13:51.759077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72075186233409549 2025-06-24T21:13:51.759136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 107:0 ProgressState at tablet: 72075186233409549 2025-06-24T21:13:51.759305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72075186233409549 2025-06-24T21:13:51.759359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 107:0, datashard: 72075186233409552, at schemeshard: 72075186233409549 2025-06-24T21:13:51.759408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 107:0 129 -> 240 2025-06-24T21:13:51.760609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409549, cookie: 107 2025-06-24T21:13:51.760723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409549, cookie: 107 2025-06-24T21:13:51.760766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409549, txId: 107 2025-06-24T21:13:51.760809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409549, txId: 107, pathId: [OwnerId: 72075186233409549, LocalPathId: 1], version: 9 2025-06-24T21:13:51.760856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 5 2025-06-24T21:13:51.762728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409549, cookie: 107 2025-06-24T21:13:51.762858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409549, cookie: 107 2025-06-24T21:13:51.762901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409549, txId: 107 2025-06-24T21:13:51.762932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409549, txId: 107, pathId: [OwnerId: 72075186233409549, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:13:51.762967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 4 2025-06-24T21:13:51.763046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 107, ready parts: 0/1, is published: true 2025-06-24T21:13:51.765882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72075186233409549 2025-06-24T21:13:51.765947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 107:0 ProgressState, at schemeshard: 72075186233409549 2025-06-24T21:13:51.766368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-24T21:13:51.766537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#107:0 progress is 1/1 2025-06-24T21:13:51.766580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-24T21:13:51.766622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#107:0 progress is 1/1 2025-06-24T21:13:51.766658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-24T21:13:51.766699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 107, ready parts: 1/1, is published: true 2025-06-24T21:13:51.766779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:805:2684] message: TxId: 107 2025-06-24T21:13:51.766830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-24T21:13:51.766870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 107:0 2025-06-24T21:13:51.766905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 107:0 2025-06-24T21:13:51.767039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 2 2025-06-24T21:13:51.768192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409549, cookie: 107 2025-06-24T21:13:51.770356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409549, cookie: 107 2025-06-24T21:13:51.776473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-24T21:13:51.776558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:2176:4018] TestWaitNotification: OK eventTxId 107 2025-06-24T21:13:51.795982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72075186233409549, message: Source { RawX1: 777 RawX2: 4294969960 } TabletId: 72075186233409552 State: 4 2025-06-24T21:13:51.796104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409552, state: Offline, at schemeshard: 72075186233409549 2025-06-24T21:13:51.799281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72075186233409549:4 hive 72057594037968897 at ss 72075186233409549 2025-06-24T21:13:51.800042Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72075186233409549 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409552 2025-06-24T21:13:51.802784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72075186233409549 ShardLocalIdx: 4, at schemeshard: 72075186233409549 2025-06-24T21:13:51.803506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 1 2025-06-24T21:13:51.804642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72075186233409549 2025-06-24T21:13:51.804705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72075186233409549, LocalPathId: 2], at schemeshard: 72075186233409549 2025-06-24T21:13:51.804791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 4 2025-06-24T21:13:51.816730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72075186233409549:4 2025-06-24T21:13:51.816862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72075186233409549:4 tabletId 72075186233409552 2025-06-24T21:13:51.817379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72075186233409549 2025-06-24T21:13:51.944073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-24T21:13:51.944197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-24T21:13:51.944254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-24T21:13:51.944320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-24T21:13:51.944358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-24T21:13:51.944394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-24T21:13:51.944437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-24T21:13:51.944471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:13:51.944503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:13:52.007344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:13:52.007761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:191: TTxServerlessStorageBilling: make a bill, record: '{"usage":{"start":1600452180,"quantity":59,"finish":1600452239,"type":"delta","unit":"byte*second"},"tags":{"ydb_size":0},"id":"72057594046678944-3-1600452180-1600452239-0","cloud_id":"CLOUD_ID_VAL","source_wt":1600452240,"source_id":"sless-docapi-ydb-storage","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.v1","folder_id":"FOLDER_ID_VAL","version":"1.0.0"} ', schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 2020-09-18T18:04:00.027000Z, LastBillTime: 2020-09-18T18:02:00.000000Z, lastBilled: 2020-09-18T18:02:00.000000Z--2020-09-18T18:02:59.000000Z, toBill: 2020-09-18T18:03:00.000000Z--2020-09-18T18:03:59.000000Z, next retry at: 2020-09-18T18:05:00.000000Z 2025-06-24T21:13:52.012331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete grabMeteringMessage has happened 2025-06-24T21:13:52.012539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:335: tests -- TFakeMetering got TEvMetering::TEvWriteMeteringJson quantity: 59, 59 unit: "byte*second", "byte*second" type: "delta", "delta" >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-StrictAclCheck [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::TieringUsage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> AssignTxId::Basic [GOOD] Test command err: 2025-06-24T21:13:47.626959Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627143699434493:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:47.627027Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0016a3/r3tmp/tmpcl8UPD/pdisk_1.dat 2025-06-24T21:13:48.035003Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:48.050639Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627143699434463:2079] 1750799627624801 != 1750799627624804 2025-06-24T21:13:48.101093Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:48.101224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:48.115452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62262 TServer::EnableGrpc on GrpcPort 31849, node 1 2025-06-24T21:13:48.470823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:48.470853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:48.470862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:48.471002Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:48.635211Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62262 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:49.026397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:50.872680Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627156584337001:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:50.872824Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:13:51.566142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateReplication, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp:473) 2025-06-24T21:13:51.583516Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:41: [controller 72075186224037888] OnActivateExecutor 2025-06-24T21:13:51.583650Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init_schema.cpp:17: [controller 72075186224037888][TxInitSchema] Execute 2025-06-24T21:13:51.590115Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init_schema.cpp:26: [controller 72075186224037888][TxInitSchema] Complete 2025-06-24T21:13:51.590219Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init.cpp:240: [controller 72075186224037888][TxInit] Execute 2025-06-24T21:13:51.590487Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init.cpp:245: [controller 72075186224037888][TxInit] Complete 2025-06-24T21:13:51.590497Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:113: [controller 72075186224037888] SwitchToWork 2025-06-24T21:13:51.634798Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:142: [controller 72075186224037888] Handle NKikimrReplication.TEvCreateReplication PathId { OwnerId: 72057594046644480 LocalId: 2 } OperationId { TxId: 281474976710658 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:31849" Database: "/Root" OAuthToken { Token: "***" } EnableSsl: false } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Global { CommitIntervalMilliSeconds: 10000 } } } Database: "/Root" 2025-06-24T21:13:51.635093Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_create_replication.cpp:22: [controller 72075186224037888][TxCreateReplication] Execute: NKikimrReplication.TEvCreateReplication PathId { OwnerId: 72057594046644480 LocalId: 2 } OperationId { TxId: 281474976710658 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:31849" Database: "/Root" OAuthToken { Token: "***" } EnableSsl: false } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Global { CommitIntervalMilliSeconds: 10000 } } } Database: "/Root" 2025-06-24T21:13:51.635199Z node 1 :REPLICATION_CONTROLLER NOTICE: tx_create_replication.cpp:43: [controller 72075186224037888][TxCreateReplication] Add replication: rid# 1, pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:13:51.636219Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_create_replication.cpp:58: [controller 72075186224037888][TxCreateReplication] Complete 2025-06-24T21:13:51.640248Z node 1 :REPLICATION_CONTROLLER TRACE: tenant_resolver.cpp:33: [TenantResolver][rid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root/replication TableId: [72057594046644480:2:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindReplication DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:13:51.642650Z node 1 :REPLICATION_CONTROLLER TRACE: tenant_resolver.cpp:33: [TenantResolver][rid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:13:51.650374Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:252: [controller 72075186224037888] Handle NKikimr::NReplication::NController::TEvPrivate::TEvResolveTenantResult { ReplicationId: 1 Tenant: /Root Sucess: 1 } 2025-06-24T21:13:51.650406Z node 1 :REPLICATION_CONTROLLER NOTICE: controller.cpp:267: [controller 72075186224037888] Tenant resolved: rid# 1, tenant# /Root 2025-06-24T21:13:51.650431Z node 1 :REPLICATION_CONTROLLER INFO: controller.cpp:271: [controller 72075186224037888] Discover tenant nodes: tenant# /Root 2025-06-24T21:13:51.661149Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:297: [controller 72075186224037888] Handle NKikimr::TEvDiscovery::TEvDiscoveryData 2025-06-24T21:13:51.661201Z node 1 :REPLICATION_CONTROLLER DEBUG: controller.cpp:321: [controller 72075186224037888] Create session: nodeId# 1 TClient::Ls request: /Root/replication 2025-06-24T21:13:51.716548Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: SCHEME_ERROR, issues: {
: Error: Path not found } } } 2025-06-24T21:13:51.716618Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root/table, status# SCHEME_ERROR, issues# {
: Error: Path not found } 2025-06-24T21:13:51.716821Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:172: [controller 72075186224037888] Handle NKikimr::NReplication::NController::TEvPrivate::TEvDiscoveryTargetsResult { ReplicationId: 1 ToAdd [] ToDelete [] Failed [/Root/table: SCHEME_ERROR ({
: Error: Path not found })] } 2025-06-24T21:13:51.716973Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_discovery_targets_result.cpp:24: [controller 72075186224037888][TxDiscoveryTargetsResult] Execute: NKikimr::NReplication::NController::TEvPrivate::TEvDiscoveryTargetsResult { ReplicationId: 1 ToAdd [] ToDelete [] Failed [/Root/table: SCHEME_ERROR ({
: Error: Path not found })] } 2025-06-24T21:13:51.717023Z node 1 :REPLICATION_CONTROLLER ERROR: tx_discovery_targets_result.cpp:76: [controller 72075186224037888][TxDiscoveryTargetsResult] Discovery error: rid# 1, error# /Root/table: SCHEME_ERROR ({
: Error: Path not found }) 2025-06-24T21:13:51.717946Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_discovery_targets_result.cpp:89: [controller 72075186224037888][TxDiscoveryTargetsResult] Complete TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "replication" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeReplication CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799631686 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ReplicationVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsIns... (TRUNCATED) 2025-06-24T21:13:51.736327Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 1 TxId: 0 } 2025-06-24T21:13:51.736405Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 0, allocated# 0 2025-06-24T21:13:51.736458Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 1, assigned# 0, allocated# 0, exhausted# 1 2025-06-24T21:13:51.736542Z node 1 :REPLICATION_CONTROLLER TRACE: tx_assign_tx_id.cpp:174: [controller 72075186224037888] Handle NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:13:51.737908Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 0, allocated# 5 2025-06-24T21:13:51.738563Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-06-24T21:13:51.739116Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 9999 TxId: 0 } 2025-06-24T21:13:51.739198Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-06-24T21:13:51.739251Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-06-24T21:13:51.739737Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 9999 TxId: 18446744073709551615 } 2025-06-24T21:13:51.739776Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-06-24T21:13:51.739813Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-06-24T21:13:51.740286Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 10000 TxId: 0 } 2025-06-24T21:13:51.740315Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-06-24T21:13:51.741564Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 2, allocated# 3, exhausted# 0 2025-06-24T21:13:51.741997Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 5000 TxId: 0 } 2025-06-24T21:13:51.742135Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 2, allocated# 3 2025-06-24T21:13:51.742222Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 2, allocated# 3, exhausted# 0 2025-06-24T21:13:51.742766Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 20000 TxId: 0 } Versions { Step: 30000 TxId: 0 } Versions { Step: 40000 TxId: 0 } 2025-06-24T21:13:51.742800Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 3, assigned# 2, allocated# 3 2025-06-24T21:13:51.743425Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 0, exhausted# 0 2025-06-24T21:13:51.743514Z node 1 :REPLICATION_CONTROLLER TRACE: tx_assign_tx_id.cpp:174: [controller 72075186224037888] Handle NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:13:51.743544Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 0, assigned# 5, allocated# 5 2025-06-24T21:13:51.743599Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 5, exhausted# 0 2025-06-24T21:13:51.744155Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 50000 TxId: 0 } 2025-06-24T21:13:51.744186Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 5, allocated# 5 2025-06-24T21:13:51.744718Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 5, exhausted# 0 |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsStub |95.5%| [TA] $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/test-results/unittest/{meta.json ... results_accumulator.log} |95.5%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/test-results/unittest/{meta.json ... results_accumulator.log} |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TKesusTest::TestAcquireLocks [GOOD] >> TKesusTest::TestAcquireRepeat |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> YdbLogStore::LogTable [GOOD] >> YdbLogStore::AlterLogTable |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TKesusTest::TestAcquireRepeat [GOOD] >> TKesusTest::TestAcquireDowngrade |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TKesusTest::TestAcquireDowngrade [GOOD] >> TKesusTest::TestAcquireBeforeTimeoutViaSessionTimeout |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Port |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Port [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-45 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-46 |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-52 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-53 >> S3SettingsConversion::FoldersStrictStyle [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Port [GOOD] >> S3SettingsConversion::Basic [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStrictStyle [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Basic [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap [GOOD] >> Balancing::Balancing_ManyTopics_PQv1 [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TopicAutoscaling::ControlPlane_PauseAutoPartitioning [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Enable >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-22 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-23 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-58 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-59 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 20146, MsgBus: 5898 2025-06-24T21:11:26.297839Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626536212600027:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:26.297914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f87/r3tmp/tmpGWNHaL/pdisk_1.dat 2025-06-24T21:11:26.687710Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626536212600001:2079] 1750799486293810 != 1750799486293813 2025-06-24T21:11:26.696802Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20146, node 1 2025-06-24T21:11:26.739741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:26.739861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:26.741588Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:26.877136Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:26.877158Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:26.877170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:26.877282Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:27.333019Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5898 TClient is connected to server localhost:5898 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:27.832539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:27.880555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.047973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.238149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.403192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:29.844118Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626549097503540:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:29.844233Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.457467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.487253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.515366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.546398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.583635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.618841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.690625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.754320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626553392471497:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.754394Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.754819Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626553392471502:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.758829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:30.777520Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626553392471504:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:30.866529Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626553392471557:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:31.298071Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626536212600027:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:31.298140Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:32.133107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:33.053713Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { Tr ... jVkZmUtZWEyYjBkOTUtNjA4NGVmYTItYjJiMDA1MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.209781Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719342. Ctx: { TraceId: 01jyhwmjrq36nspq11x7rs2jsn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmI2Yjg5OGItMzU1YzExZGQtOGI3MzNmOTItNWMzZjFjZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.214072Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719343. Ctx: { TraceId: 01jyhwmjrwes0syxen2d6z4gsh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjFkZTE0YzktMTJkMDk5NzItMjMwNjJlMGEtOGViZWEwZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.227790Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719344. Ctx: { TraceId: 01jyhwmjrwes0syxen2d6z4gsh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjFkZTE0YzktMTJkMDk5NzItMjMwNjJlMGEtOGViZWEwZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.233816Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719345. Ctx: { TraceId: 01jyhwmjt84xcz4ghfqd4zgq7p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjdlMTdhNTItNGY0OGYyMDUtODcwNTg5ZDUtNjg3YjVjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.236103Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719346. Ctx: { TraceId: 01jyhwmjtgdd135mrzx76tgvp5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTg2Yjg3YWItZTYyZWY2ZmItYTlkYzQwMGMtYjU0NmIxYTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.245621Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719347. Ctx: { TraceId: 01jyhwmjt84xcz4ghfqd4zgq7p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjdlMTdhNTItNGY0OGYyMDUtODcwNTg5ZDUtNjg3YjVjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.262780Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719348. Ctx: { TraceId: 01jyhwmjv36q7509ytdww08cbh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NWYxYWExYWYtZDJjMTdjMmUtZGJiMDE1Y2UtMTk4Y2YxYjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.264950Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719349. Ctx: { TraceId: 01jyhwmjv35kveckap3m12ts92, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmI2Yjg5OGItMzU1YzExZGQtOGI3MzNmOTItNWMzZjFjZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.270343Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719350. Ctx: { TraceId: 01jyhwmjv36q7509ytdww08cbh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NWYxYWExYWYtZDJjMTdjMmUtZGJiMDE1Y2UtMTk4Y2YxYjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.275313Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719351. Ctx: { TraceId: 01jyhwmjv35kveckap3m12ts92, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmI2Yjg5OGItMzU1YzExZGQtOGI3MzNmOTItNWMzZjFjZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.279372Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719352. Ctx: { TraceId: 01jyhwmjv35kveckap3m12ts92, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmI2Yjg5OGItMzU1YzExZGQtOGI3MzNmOTItNWMzZjFjZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.285311Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719353. Ctx: { TraceId: 01jyhwmjvzdk4zw5j4c5eczjmt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTUzNjVkZmUtZWEyYjBkOTUtNjA4NGVmYTItYjJiMDA1MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.289700Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719354. Ctx: { TraceId: 01jyhwmjvze7nzsybg8qyesbtg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjFkZTE0YzktMTJkMDk5NzItMjMwNjJlMGEtOGViZWEwZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.292877Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719355. Ctx: { TraceId: 01jyhwmjvqeg43cn2my23v80sy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTVmOThmZTEtYTVkMGNhMmMtNDkxZWQ2NzItY2Q1MjY5ZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.299512Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719356. Ctx: { TraceId: 01jyhwmjvze7nzsybg8qyesbtg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjFkZTE0YzktMTJkMDk5NzItMjMwNjJlMGEtOGViZWEwZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.302364Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719357. Ctx: { TraceId: 01jyhwmjvqeg43cn2my23v80sy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTVmOThmZTEtYTVkMGNhMmMtNDkxZWQ2NzItY2Q1MjY5ZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.306445Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719358. Ctx: { TraceId: 01jyhwmjvqeg43cn2my23v80sy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTVmOThmZTEtYTVkMGNhMmMtNDkxZWQ2NzItY2Q1MjY5ZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.319059Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719359. Ctx: { TraceId: 01jyhwmjwxf0fenw3vdvgwr9gm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTg2Yjg3YWItZTYyZWY2ZmItYTlkYzQwMGMtYjU0NmIxYTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.324645Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719361. Ctx: { TraceId: 01jyhwmjwxf0fenw3vdvgwr9gm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTg2Yjg3YWItZTYyZWY2ZmItYTlkYzQwMGMtYjU0NmIxYTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.324704Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719360. Ctx: { TraceId: 01jyhwmjx47sdn2ncyg4zfvyz4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjdlMTdhNTItNGY0OGYyMDUtODcwNTg5ZDUtNjg3YjVjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.332109Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719362. Ctx: { TraceId: 01jyhwmjwxf0fenw3vdvgwr9gm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTg2Yjg3YWItZTYyZWY2ZmItYTlkYzQwMGMtYjU0NmIxYTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.340090Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719363. Ctx: { TraceId: 01jyhwmjx47sdn2ncyg4zfvyz4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjdlMTdhNTItNGY0OGYyMDUtODcwNTg5ZDUtNjg3YjVjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.344305Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719364. Ctx: { TraceId: 01jyhwmjxm6kynz849c577dhpr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmI2Yjg5OGItMzU1YzExZGQtOGI3MzNmOTItNWMzZjFjZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.346309Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719365. Ctx: { TraceId: 01jyhwmjxm76mwt934bfgbxdth, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTUzNjVkZmUtZWEyYjBkOTUtNjA4NGVmYTItYjJiMDA1MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:13:53.362557Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719366. Ctx: { TraceId: 01jyhwmjy8fc5khfn4rb4eha8y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjFkZTE0YzktMTJkMDk5NzItMjMwNjJlMGEtOGViZWEwZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.381716Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719367. Ctx: { TraceId: 01jyhwmjy8fc5khfn4rb4eha8y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjFkZTE0YzktMTJkMDk5NzItMjMwNjJlMGEtOGViZWEwZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.385765Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719368. Ctx: { TraceId: 01jyhwmjy8fc5khfn4rb4eha8y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjFkZTE0YzktMTJkMDk5NzItMjMwNjJlMGEtOGViZWEwZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.402805Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719370. Ctx: { TraceId: 01jyhwmjze3a0pkcaxbbywmx5v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmI2Yjg5OGItMzU1YzExZGQtOGI3MzNmOTItNWMzZjFjZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:13:53.403709Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719369. Ctx: { TraceId: 01jyhwmjze7y1bp7p7kjf6226g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTVmOThmZTEtYTVkMGNhMmMtNDkxZWQ2NzItY2Q1MjY5ZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.410779Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719371. Ctx: { TraceId: 01jyhwmjxm76mwt934bfgbxdth, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTUzNjVkZmUtZWEyYjBkOTUtNjA4NGVmYTItYjJiMDA1MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.413369Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719372. Ctx: { TraceId: 01jyhwmjze3a0pkcaxbbywmx5v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmI2Yjg5OGItMzU1YzExZGQtOGI3MzNmOTItNWMzZjFjZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:13:53.416794Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719373. Ctx: { TraceId: 01jyhwmjxm76mwt934bfgbxdth, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTUzNjVkZmUtZWEyYjBkOTUtNjA4NGVmYTItYjJiMDA1MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:13:53.418480Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719374. Ctx: { TraceId: 01jyhwmjze3a0pkcaxbbywmx5v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmI2Yjg5OGItMzU1YzExZGQtOGI3MzNmOTItNWMzZjFjZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS >> YdbOlapStore::LogWithUnionAllAscending [GOOD] >> YdbOlapStore::LogWithUnionAllDescending >> TopicAutoscaling::BalancingAfterSplit_sessionsWithPartition [GOOD] >> TPersQueueMirrorer::ValidStartStream >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-71 |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::UpdateAndDelete >> KqpSystemView::Join >> KqpSystemView::ReadSuccess >> KqpSystemView::QueryStatsScan >> KqpSystemView::Sessions >> KqpSystemView::PartitionStatsRange1 >> KqpSysColV1::StreamSelectRange >> KqpSystemView::PartitionStatsSimple >> KqpSysColV1::StreamInnerJoinSelectAsterisk >> TopicAutoscaling::PartitionSplit_ManySession_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_AutoscaleAwareSDK >> YdbLogStore::AlterLogTable [FAIL] >> KqpSysColV1::SelectRowById >> KqpSystemView::NodesRange2 >> KqpSysColV0::InnerJoinTables >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-53 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-54 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-46 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-47 >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly-StrictAclCheck >> CommitOffset::DistributedTxCommit_ChildFirst [GOOD] >> CommitOffset::DistributedTxCommit_CheckSessionResetAfterCommit >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-23 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-59 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-24 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-60 >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_AutoscaleAwareSDK >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-72 >> KqpSysColV1::StreamSelectRange [GOOD] >> KqpSystemView::ReadSuccess [GOOD] >> KqpSystemView::PartitionStatsRange1 [GOOD] >> KqpSystemView::PartitionStatsSimple [GOOD] >> TKesusTest::TestAcquireSemaphoreRebootTimeout [GOOD] >> TKesusTest::TestAcquireSemaphoreViaDecrease >> KqpSysColV1::StreamInnerJoinSelectAsterisk [GOOD] >> TKesusTest::TestAcquireBeforeTimeoutViaSessionTimeout [GOOD] >> TKesusTest::TestAcquireSemaphore >> TKesusTest::TestAcquireSemaphoreViaDecrease [GOOD] >> KqpSysColV1::SelectRowById [GOOD] >> TKesusTest::TestAcquireSemaphore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamSelectRange [GOOD] Test command err: Trying to start YDB, gRPC: 10247, MsgBus: 30076 2025-06-24T21:14:00.932482Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627195855768944:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:00.932600Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a32/r3tmp/tmpe9mRni/pdisk_1.dat 2025-06-24T21:14:01.410748Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627195855768920:2079] 1750799640923703 != 1750799640923706 2025-06-24T21:14:01.411362Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:01.449798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:01.449903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 10247, node 1 2025-06-24T21:14:01.455355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:01.619880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:01.619907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:01.619915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:01.620052Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:01.947538Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30076 TClient is connected to server localhost:30076 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:02.606166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:02.638659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:02.845409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.030775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:14:03.095857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:04.507268Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627213035639725:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:04.507400Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.221202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.253027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.282149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.354823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.386576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.420532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.490324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.546045Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627217330607689:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.546116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.546313Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627217330607694:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.549825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:05.561864Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627217330607696:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:05.661023Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627217330607747:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:05.934703Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627195855768944:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:05.934754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:07.256496Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799647275, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::ReadSuccess [GOOD] Test command err: Trying to start YDB, gRPC: 21341, MsgBus: 11971 2025-06-24T21:14:00.958285Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627196061479385:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:00.958355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a34/r3tmp/tmp28QLJF/pdisk_1.dat 2025-06-24T21:14:01.415630Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:01.419363Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627196061479187:2079] 1750799640921837 != 1750799640921840 2025-06-24T21:14:01.431765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:01.431860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:01.436678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21341, node 1 2025-06-24T21:14:01.623817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:01.623842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:01.623850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:01.623975Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:01.959370Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11971 TClient is connected to server localhost:11971 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:02.626144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:02.648636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:02.666435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:02.823844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.010883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.095043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.507162Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627213241350026:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:04.507277Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.221172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.253058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.283647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.315079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.350203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.391843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.442024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.542517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627217536317980:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.542631Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.542897Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627217536317985:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.546723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:05.563613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:14:05.563823Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627217536317987:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:05.655808Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627217536318038:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:05.958086Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627196061479385:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:05.958168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:07.027554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:07.233824Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710674. Ctx: { TraceId: 01jyhwn0abcdqpm19zacngx1v8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yjk3NDk3MjgtZDQ4NWVlYmItNGQwZWZhNTItZWViZmRmYTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:07.257562Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799647227, txId: 281474976710673] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange1 [GOOD] Test command err: Trying to start YDB, gRPC: 23033, MsgBus: 13758 2025-06-24T21:14:00.928848Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627199822111067:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:00.929488Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a51/r3tmp/tmpQdv9Vt/pdisk_1.dat 2025-06-24T21:14:01.427042Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627199822111047:2079] 1750799640921907 != 1750799640921910 2025-06-24T21:14:01.436576Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:01.438598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:01.438690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:01.440969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23033, node 1 2025-06-24T21:14:01.623777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:01.623804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:01.623813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:01.623926Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:02.011111Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13758 TClient is connected to server localhost:13758 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:02.730272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:02.754873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:02.950959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.135786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.222374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.620157Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627217001981873:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:04.620283Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.221284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.267772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.344264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.423177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.453932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.488288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.563400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.653239Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627221296949839:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.653303Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.653413Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627221296949844:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.657362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:05.674422Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627221296949846:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:05.764909Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627221296949897:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:05.929039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627199822111067:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:05.929105Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:07.344513Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799647327, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsSimple [GOOD] Test command err: Trying to start YDB, gRPC: 3818, MsgBus: 26794 2025-06-24T21:14:00.925609Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627199106054698:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:00.925674Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a45/r3tmp/tmpoLxqPE/pdisk_1.dat 2025-06-24T21:14:01.450816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:01.450913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:01.453596Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:01.466794Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627199106054674:2079] 1750799640923635 != 1750799640923638 2025-06-24T21:14:01.484140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3818, node 1 2025-06-24T21:14:01.620062Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:01.620110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:01.620120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:01.620256Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:01.946551Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26794 TClient is connected to server localhost:26794 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:02.649015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:02.672732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:14:02.693867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:02.887444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.070673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.147176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.540298Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627216285925502:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:04.540431Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.221654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.295075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.331952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.367057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.402248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.443106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.474957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.548401Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627220580893458:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.548498Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.548792Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627220580893463:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.556389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:05.569434Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627220580893465:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:05.630317Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627220580893516:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:05.926081Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627199106054698:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:05.926163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:07.350313Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799647287, txId: 281474976710672] shutting down >> KqpSystemView::QueryStatsScan [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAcquireSemaphoreViaDecrease [GOOD] Test command err: 2025-06-24T21:13:26.420140Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.420303Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.447640Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.447928Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.475398Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.475890Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=6523731838687504865, session=0, seqNo=0) 2025-06-24T21:13:26.476047Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:26.488468Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=6523731838687504865, session=1) 2025-06-24T21:13:26.488770Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=12691803540108418624, session=0, seqNo=0) 2025-06-24T21:13:26.488914Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:26.501701Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=12691803540108418624, session=2) 2025-06-24T21:13:26.502364Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[1:146:2168], cookie=11823376464732360621, name="Sem1", limit=1) 2025-06-24T21:13:26.502512Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-24T21:13:26.514737Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[1:146:2168], cookie=11823376464732360621) 2025-06-24T21:13:26.515091Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=111, session=1, semaphore="Sem1" count=1) 2025-06-24T21:13:26.515272Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-24T21:13:26.515459Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=222, session=2, semaphore="Sem1" count=1) 2025-06-24T21:13:26.527666Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=111) 2025-06-24T21:13:26.527751Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=222) 2025-06-24T21:13:26.528388Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:154:2176], cookie=10672253660863823460, name="Sem1") 2025-06-24T21:13:26.528497Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:154:2176], cookie=10672253660863823460) 2025-06-24T21:13:26.530083Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:157:2179], cookie=5116817491063850533, name="Sem1") 2025-06-24T21:13:26.530190Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:157:2179], cookie=5116817491063850533) 2025-06-24T21:13:26.990625Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.004375Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:27.350551Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.362708Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:27.741273Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.754104Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.105396Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.117638Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.479669Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.492973Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.847687Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.861162Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.199571Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.211838Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.574749Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.589396Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.968303Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.985073Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.411087Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.424119Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.800209Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.815370Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.180272Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.192472Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.567955Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.580282Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.948469Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.960764Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.362799Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.376180Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.758185Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.771906Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.164170Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.176308Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.545962Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.561251Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.926790Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.938941Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.337614Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.352062Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.730368Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.742667Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.112163Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:35.124430Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.502499Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:35.515004Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.897132Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:35.916720Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:36.319477Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:36.339506Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:36.725625Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:36.744144Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:37.119460Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:37.132028Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:37.511514Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:37.526201Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:37.884847Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:37.897287Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:38.293081Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:38.305318Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:38.686166Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:38.699221Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:39.069203Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:39.082381Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:39.447726Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:39.461696Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:39.829546Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:39.844012Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck: ... DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:02.423927Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:02.822636Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:02.835861Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:03.212620Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:03.227608Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:03.712040Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:03.728082Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:04.143517Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:04.163857Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:04.551520Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:04.572349Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:04.962521Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:04.975910Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:05.355446Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:05.372276Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:05.775440Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:05.789752Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:06.177773Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:06.198860Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:06.591507Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:06.619272Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:07.008533Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:07.023668Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:07.404665Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:07.422522Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:07.826698Z node 4 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-24T21:14:07.826783Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-24T21:14:07.826837Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Sem1" owner link 2025-06-24T21:14:07.843685Z node 4 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-24T21:14:07.854661Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:454:2412], cookie=6936521514435570500, name="Sem1") 2025-06-24T21:14:07.854776Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:454:2412], cookie=6936521514435570500) 2025-06-24T21:14:08.478542Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:14:08.478690Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:14:08.500350Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:14:08.500494Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:14:08.521970Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:14:08.522560Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=9592700442197474109, session=0, seqNo=0) 2025-06-24T21:14:08.522733Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:14:08.551886Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=9592700442197474109, session=1) 2025-06-24T21:14:08.552250Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=8489023878014481231, session=0, seqNo=0) 2025-06-24T21:14:08.552413Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:14:08.566612Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=8489023878014481231, session=2) 2025-06-24T21:14:08.566968Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=5655411575989973563, session=0, seqNo=0) 2025-06-24T21:14:08.567115Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 3 2025-06-24T21:14:08.579594Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=5655411575989973563, session=3) 2025-06-24T21:14:08.580270Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:146:2168], cookie=4362784073821486349, name="Sem1", limit=3) 2025-06-24T21:14:08.580455Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-24T21:14:08.593043Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:146:2168], cookie=4362784073821486349) 2025-06-24T21:14:08.593462Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=111, session=1, semaphore="Sem1" count=2) 2025-06-24T21:14:08.593664Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-24T21:14:08.593899Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=222, session=2, semaphore="Sem1" count=1) 2025-06-24T21:14:08.593984Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #2 session 2 2025-06-24T21:14:08.594082Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=333, session=3, semaphore="Sem1" count=1) 2025-06-24T21:14:08.613271Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=111) 2025-06-24T21:14:08.613383Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=222) 2025-06-24T21:14:08.613420Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=333) 2025-06-24T21:14:08.614154Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:154:2176], cookie=9237060981256384712, name="Sem1") 2025-06-24T21:14:08.614270Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:154:2176], cookie=9237060981256384712) 2025-06-24T21:14:08.614772Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:157:2179], cookie=12291084241931055811, name="Sem1") 2025-06-24T21:14:08.614866Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:157:2179], cookie=12291084241931055811) 2025-06-24T21:14:08.615174Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=444, session=1, semaphore="Sem1" count=1) 2025-06-24T21:14:08.615351Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #3 session 3 2025-06-24T21:14:08.632524Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=444) 2025-06-24T21:14:08.633253Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:162:2184], cookie=5070126430996414228, name="Sem1") 2025-06-24T21:14:08.633368Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:162:2184], cookie=5070126430996414228) 2025-06-24T21:14:08.633874Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:165:2187], cookie=17730597319203272966, name="Sem1") 2025-06-24T21:14:08.633949Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:165:2187], cookie=17730597319203272966) 2025-06-24T21:14:08.654795Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:14:08.654935Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:14:08.656089Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:14:08.657114Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:14:08.708405Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:14:08.708624Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-24T21:14:08.708681Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #2 session 2 2025-06-24T21:14:08.708712Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #3 session 3 2025-06-24T21:14:08.709120Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:205:2217], cookie=16313078606994162192, name="Sem1") 2025-06-24T21:14:08.709228Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:205:2217], cookie=16313078606994162192) 2025-06-24T21:14:08.709904Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:214:2225], cookie=6474918429343962571, name="Sem1") 2025-06-24T21:14:08.710011Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:214:2225], cookie=6474918429343962571) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamInnerJoinSelectAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 14532, MsgBus: 8073 2025-06-24T21:14:00.964120Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627198929960641:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:00.964179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a43/r3tmp/tmpoGzjmJ/pdisk_1.dat 2025-06-24T21:14:01.473624Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:01.500852Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627198929960619:2079] 1750799640952648 != 1750799640952651 2025-06-24T21:14:01.510193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:01.510316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:01.516295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14532, node 1 2025-06-24T21:14:01.631984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:01.632009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:01.632021Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:01.632155Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:01.983605Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8073 TClient is connected to server localhost:8073 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:02.599735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:02.630590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:14:02.650685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:02.851958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.059861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.161310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.812678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627216109831466:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:04.812797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.221216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.270294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.300823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.353230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.420048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.507211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.547506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.610499Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627220404799428:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.610577Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.610646Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627220404799433:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.614982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:05.632502Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627220404799435:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:05.723262Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627220404799486:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:05.964280Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627198929960641:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:05.964373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:07.778720Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799647772, txId: 281474976715672] shutting down >> KqpSysColV0::InnerJoinTables [GOOD] >> KqpSysColV1::UpdateAndDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAcquireSemaphore [GOOD] Test command err: 2025-06-24T21:13:26.420128Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.420279Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.447012Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.447268Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.471449Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.471955Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=769460036015395312, session=0, seqNo=0) 2025-06-24T21:13:26.472112Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:26.483958Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=769460036015395312, session=1) 2025-06-24T21:13:26.484254Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=4647228505684384798, session=0, seqNo=0) 2025-06-24T21:13:26.484384Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:26.496423Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=4647228505684384798, session=2) 2025-06-24T21:13:26.497178Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:26.497337Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:26.497457Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:26.497665Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=222, session=2, semaphore="Lock2" count=1) 2025-06-24T21:13:26.497749Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 2 "Lock2" 2025-06-24T21:13:26.497811Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 2 2025-06-24T21:13:26.497912Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=333, session=1, semaphore="Lock2" count=1) 2025-06-24T21:13:26.497975Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #3 session 1 2025-06-24T21:13:26.509706Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=111) 2025-06-24T21:13:26.509776Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=222) 2025-06-24T21:13:26.509795Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=333) 2025-06-24T21:13:26.510257Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:151:2173], cookie=13418185283148605581, name="Lock1") 2025-06-24T21:13:26.511574Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:151:2173], cookie=13418185283148605581) 2025-06-24T21:13:26.512166Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:154:2176], cookie=4204792471255060502, name="Lock2") 2025-06-24T21:13:26.512243Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:154:2176], cookie=4204792471255060502) 2025-06-24T21:13:26.526968Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.527082Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.527601Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.528271Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.544878Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.545019Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:26.545069Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 2 2025-06-24T21:13:26.545103Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #3 session 1 2025-06-24T21:13:26.545427Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:194:2206], cookie=3995180625426496857, name="Lock1") 2025-06-24T21:13:26.545497Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:194:2206], cookie=3995180625426496857) 2025-06-24T21:13:26.546045Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:202:2213], cookie=17141156251457571908, name="Lock2") 2025-06-24T21:13:26.546117Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:202:2213], cookie=17141156251457571908) 2025-06-24T21:13:27.037751Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.049921Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:27.420494Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.435993Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:27.795493Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.807765Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.176047Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.188459Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.549996Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.562445Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.921421Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.937405Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.292273Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.308817Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.687290Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.700999Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.057845Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.070317Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.476164Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.492086Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.869258Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.884076Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.259768Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.276261Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.649743Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.662186Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.018989Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.031872Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.455634Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.472097Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.862439Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.875946Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.247810Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.265686Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.635650Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.648173Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.042822Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.055282Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.441955Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.454115Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.823656Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.835811Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.201721Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:35.214075Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.572618Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:35.588116Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.992638Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:36.012057Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:36.397714Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927 ... .cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:04.136128Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:04.518467Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:04.532530Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:04.915913Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:04.940943Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:05.327622Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:05.344081Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:05.721585Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:05.734145Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:06.133929Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:06.148766Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:06.523822Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:06.540163Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:06.921777Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:06.946463Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:07.334541Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:07.354312Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:07.732661Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:07.745939Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:08.127601Z node 4 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-24T21:14:08.127713Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-24T21:14:08.127790Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Lock1" owner link 2025-06-24T21:14:08.127909Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #3 session 2 2025-06-24T21:14:08.127977Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 2 "Lock2" owner link 2025-06-24T21:14:08.128016Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-24T21:14:08.151981Z node 4 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-24T21:14:08.152809Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:365:2345], cookie=1555395052768106145, name="Lock1") 2025-06-24T21:14:08.152928Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:365:2345], cookie=1555395052768106145) 2025-06-24T21:14:08.153489Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:368:2348], cookie=12659723417464755100, name="Lock2") 2025-06-24T21:14:08.153570Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:368:2348], cookie=12659723417464755100) 2025-06-24T21:14:08.154068Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[4:371:2351], cookie=12773533423083699517) 2025-06-24T21:14:08.154139Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[4:371:2351], cookie=12773533423083699517) 2025-06-24T21:14:08.195380Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:14:08.195513Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:14:08.196136Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:14:08.197262Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:14:08.249816Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:14:08.249972Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #3 session 2 2025-06-24T21:14:08.250028Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-24T21:14:08.250392Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[4:411:2381], cookie=16872413385473401861) 2025-06-24T21:14:08.250502Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[4:411:2381], cookie=16872413385473401861) 2025-06-24T21:14:08.251188Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:418:2387], cookie=16938387051484431829, name="Lock1") 2025-06-24T21:14:08.251284Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:418:2387], cookie=16938387051484431829) 2025-06-24T21:14:08.251841Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:421:2390], cookie=7721006243915716501, name="Lock2") 2025-06-24T21:14:08.251909Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:421:2390], cookie=7721006243915716501) 2025-06-24T21:14:08.841866Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:14:08.841955Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:14:08.860595Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:14:08.860725Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:14:08.875806Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:14:08.876336Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=16691530062625263025, session=0, seqNo=0) 2025-06-24T21:14:08.876665Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:14:08.903950Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=16691530062625263025, session=1) 2025-06-24T21:14:08.904564Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=13600577826032555012, session=0, seqNo=0) 2025-06-24T21:14:08.904705Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:14:08.917067Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=13600577826032555012, session=2) 2025-06-24T21:14:08.917425Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=111, session=1, semaphore="Sem1" count=1) 2025-06-24T21:14:08.931598Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=111) 2025-06-24T21:14:08.932296Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:146:2168], cookie=4609925206748840662, name="Sem1", limit=1) 2025-06-24T21:14:08.932492Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-24T21:14:08.946345Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:146:2168], cookie=4609925206748840662) 2025-06-24T21:14:08.946863Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=333, session=1, semaphore="Sem1" count=100500) 2025-06-24T21:14:08.961794Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=333) 2025-06-24T21:14:08.962190Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=222, session=1, semaphore="Sem1" count=1) 2025-06-24T21:14:08.962391Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-24T21:14:08.962638Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=333, session=2, semaphore="Sem1" count=1) 2025-06-24T21:14:08.977032Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=222) 2025-06-24T21:14:08.977137Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=333) 2025-06-24T21:14:08.977792Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:156:2178], cookie=7867094861284621596, name="Sem1") 2025-06-24T21:14:08.977899Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:156:2178], cookie=7867094861284621596) 2025-06-24T21:14:08.978422Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:159:2181], cookie=4894639911818777961, name="Sem1") 2025-06-24T21:14:08.978517Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:159:2181], cookie=4894639911818777961) 2025-06-24T21:14:08.979039Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:162:2184], cookie=8132147107109017973, name="Sem1", force=0) 2025-06-24T21:14:08.999517Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:162:2184], cookie=8132147107109017973) 2025-06-24T21:14:09.000251Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:167:2189], cookie=5562826762106788758, name="Sem1", force=1) 2025-06-24T21:14:09.000380Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:58: [72057594037927937] Deleting semaphore 1 "Sem1" 2025-06-24T21:14:09.013347Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:167:2189], cookie=5562826762106788758) >> TopicAutoscaling::ControlPlane_CDC_Enable [GOOD] >> TopicAutoscaling::MidOfRange [GOOD] >> YdbOlapStore::LogPagingBefore [GOOD] >> YdbOlapStore::LogPagingAfter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::SelectRowById [GOOD] Test command err: Trying to start YDB, gRPC: 13168, MsgBus: 17166 2025-06-24T21:14:02.651587Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627208100207476:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:02.652242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a2c/r3tmp/tmpUkpOQd/pdisk_1.dat 2025-06-24T21:14:03.049870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:03.049970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:03.053504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:03.079462Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:03.080156Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627208100207293:2079] 1750799642586248 != 1750799642586251 TServer::EnableGrpc on GrpcPort 13168, node 1 2025-06-24T21:14:03.180090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:03.180119Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:03.180128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:03.180256Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17166 2025-06-24T21:14:03.633777Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17166 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:03.877590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:03.907338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:14:03.930739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.106686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.305606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.394658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:06.057931Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627225280078114:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.058041Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.437598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.518045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.565555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.595939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.628094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.668272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.707498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.799482Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627225280078776:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.799638Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.801189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627225280078781:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.805849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:06.820815Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627225280078783:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:06.882161Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627225280078834:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:07.676570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627208100207476:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:07.676834Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-54 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::QueryStatsScan [GOOD] Test command err: Trying to start YDB, gRPC: 21659, MsgBus: 22072 2025-06-24T21:14:00.946428Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627197597402931:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:00.946482Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a42/r3tmp/tmpyUQJHu/pdisk_1.dat 2025-06-24T21:14:01.491632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:01.491768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:01.494497Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:01.525998Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:01.527347Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627197597402745:2079] 1750799640935632 != 1750799640935635 TServer::EnableGrpc on GrpcPort 21659, node 1 2025-06-24T21:14:01.669856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:01.669899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:01.669913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:01.670037Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:01.940968Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22072 TClient is connected to server localhost:22072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:02.795669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:02.815207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:02.826968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:02.986038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:14:03.159942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:03.235226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.948372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627214777273554:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:04.948528Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.301671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.334745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.377526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.417212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.446203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.516746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.591747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.670319Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627219072241514:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.670407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.670606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627219072241519:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.674834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:05.685962Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627219072241521:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:05.780786Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627219072241572:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:05.946251Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627197597402931:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:05.946359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:08.671478Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799647534, txId: 281474976710672] shutting down 2025-06-24T21:14:08.845535Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799648815, txId: 281474976710675] shutting down >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-47 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-48 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::UpdateAndDelete [GOOD] Test command err: Trying to start YDB, gRPC: 13076, MsgBus: 21646 2025-06-24T21:14:00.945966Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627198864088960:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:00.946102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a35/r3tmp/tmpQiMMCr/pdisk_1.dat 2025-06-24T21:14:01.479261Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627198864088936:2079] 1750799640921848 != 1750799640921851 2025-06-24T21:14:01.490601Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:01.497278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:01.497675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 13076, node 1 2025-06-24T21:14:01.499417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:01.619710Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:01.619736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:01.619741Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:01.619854Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:01.976797Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21646 TClient is connected to server localhost:21646 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:02.597158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:02.625872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:02.638322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:02.881729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:14:03.095457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:03.169972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.627888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627216043959773:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:04.628045Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.225852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.301076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.344268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.390949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.459806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.495935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.533066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.628523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627220338927736:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.628616Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.629371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627220338927741:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.634237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:05.646714Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627220338927743:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:05.711406Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627220338927794:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:05.947345Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627198864088960:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:05.947453Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::InnerJoinTables [GOOD] Test command err: Trying to start YDB, gRPC: 26625, MsgBus: 23654 2025-06-24T21:14:02.760765Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627204301412467:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:02.760797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a2a/r3tmp/tmprgB2nd/pdisk_1.dat 2025-06-24T21:14:03.379393Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627204301412449:2079] 1750799642744562 != 1750799642744565 2025-06-24T21:14:03.394374Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26625, node 1 2025-06-24T21:14:03.422200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:03.422884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:03.430387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:03.471252Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:03.471277Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:03.471286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:03.471527Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23654 2025-06-24T21:14:03.843201Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23654 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:04.112437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:04.132514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:14:04.151115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.305860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.478107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.569994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:06.395628Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627221481283277:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.395760Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.673684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.707139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.737450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.767670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.843749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.916413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.964886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:07.077729Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627225776251240:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:07.077826Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:07.078039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627225776251245:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:07.082741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:07.096218Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627225776251247:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:07.199849Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627225776251298:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:07.765774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627204301412467:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:07.779155Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange2 >> KqpSysColV1::StreamInnerJoinTables >> KqpSysColV1::StreamSelectRowById ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-54 [GOOD] Test command err: Starting YDB, grpc: 14268, msgbus: 23176 2025-06-24T21:11:03.258512Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626435834574941:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:03.258616Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003614/r3tmp/tmpUyk0Fv/pdisk_1.dat 2025-06-24T21:11:03.788535Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:03.788659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:03.795874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:03.796569Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14268, node 1 2025-06-24T21:11:04.011567Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:04.011592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:04.011605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:04.011747Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:04.270101Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23176 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:11:04.432620Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519626435834575150:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:11:04.453461Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519626440129542974:2452] HANDLE EvNavigateScheme dc-1 2025-06-24T21:11:04.454792Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519626440129542974:2452] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.511548Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519626440129542974:2452] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-24T21:11:04.523542Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519626440129542974:2452] Handle TEvDescribeSchemeResult Forward to# [1:7519626440129542972:2450] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:11:04.541127Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626435834575150:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.541163Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626435834575150:2117] TxId# 281474976710657 ProcessProposeTransaction 2025-06-24T21:11:04.541291Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626435834575150:2117] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519626440129542983:2457] 2025-06-24T21:11:04.647031Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626440129542983:2457] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.647128Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626440129542983:2457] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.647166Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626440129542983:2457] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.647243Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626440129542983:2457] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.647638Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626440129542983:2457] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.647800Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626440129542983:2457] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T21:11:04.647906Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626440129542983:2457] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-24T21:11:04.648130Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626440129542983:2457] txid# 281474976710657 HANDLE EvClientConnected 2025-06-24T21:11:04.648976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:04.651554Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519626440129542983:2457] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-06-24T21:11:04.651647Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519626440129542983:2457] txid# 281474976710657 SEND to# [1:7519626440129542982:2456] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} waiting... 2025-06-24T21:11:04.669640Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626435834575150:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.669664Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626435834575150:2117] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T21:11:04.669689Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626435834575150:2117] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519626440129543027:2497] 2025-06-24T21:11:04.671980Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626440129543027:2497] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.672033Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626440129543027:2497] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.672046Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626440129543027:2497] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.672131Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626440129543027:2497] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.672424Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626440129543027:2497] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.672532Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626440129543027:2497] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:11:04.672570Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626440129543027:2497] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:11:04.672713Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626440129543027:2497] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T21:11:04.673306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 2814749767 ... tself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:08.659290Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627231634890883:2525] txid# 281474976710660 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710660} 2025-06-24T21:14:08.659354Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627231634890883:2525] txid# 281474976710660 SEND to# [59:7519627231634890882:2302] Source {TEvProposeTransactionStatus txid# 281474976710660 Status# 53} 2025-06-24T21:14:08.704920Z node 59 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [59:7519627231634890882:2302], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T21:14:08.755832Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627210160053535:2110] Handle TEvProposeTransaction 2025-06-24T21:14:08.755877Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627210160053535:2110] TxId# 281474976710661 ProcessProposeTransaction 2025-06-24T21:14:08.755957Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627210160053535:2110] Cookie# 0 userReqId# "" txid# 281474976710661 SEND to# [59:7519627231634890960:2578] 2025-06-24T21:14:08.760190Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627231634890960:2578] txid# 281474976710661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003\n#\010\000\022\037\010\001\020\377\377\003\032\025cluster_admin@builtin \003\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/dc-1" 2025-06-24T21:14:08.760280Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627231634890960:2578] txid# 281474976710661 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:14:08.760310Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627231634890960:2578] txid# 281474976710661 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 0 2025-06-24T21:14:08.761335Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [59:7519627231634890960:2578] txid# 281474976710661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:14:08.761457Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627231634890960:2578] txid# 281474976710661 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:08.761705Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627231634890960:2578] txid# 281474976710661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:08.761885Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627231634890960:2578] HANDLE EvNavigateKeySetResult, txid# 281474976710661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:08.761954Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627231634890960:2578] txid# 281474976710661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710661 TabletId# 72057594046644480} 2025-06-24T21:14:08.762128Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627231634890960:2578] txid# 281474976710661 HANDLE EvClientConnected 2025-06-24T21:14:08.770554Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627231634890960:2578] txid# 281474976710661 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710661 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)} 2025-06-24T21:14:08.770732Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519627231634890960:2578] txid# 281474976710661, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:08.770776Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627231634890960:2578] txid# 281474976710661 SEND to# [59:7519627231634890882:2302] Source {TEvProposeTransactionStatus txid# 281474976710661 Status# 48} 2025-06-24T21:14:08.807746Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627210160053535:2110] Handle TEvProposeTransaction 2025-06-24T21:14:08.807805Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627210160053535:2110] TxId# 281474976710662 ProcessProposeTransaction 2025-06-24T21:14:08.807871Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627210160053535:2110] Cookie# 0 userReqId# "" txid# 281474976710662 SEND to# [59:7519627231634890983:2589] 2025-06-24T21:14:08.817655Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627231634890983:2589] txid# 281474976710662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:53572" 2025-06-24T21:14:08.817764Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627231634890983:2589] txid# 281474976710662 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:14:08.817796Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627231634890983:2589] txid# 281474976710662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:14:08.817874Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627231634890983:2589] txid# 281474976710662 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:08.818368Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627231634890983:2589] txid# 281474976710662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:08.818491Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627231634890983:2589] HANDLE EvNavigateKeySetResult, txid# 281474976710662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:08.818541Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627231634890983:2589] txid# 281474976710662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710662 TabletId# 72057594046644480} 2025-06-24T21:14:08.818809Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627231634890983:2589] txid# 281474976710662 HANDLE EvClientConnected 2025-06-24T21:14:08.832864Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627231634890983:2589] txid# 281474976710662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710662} 2025-06-24T21:14:08.832937Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627231634890983:2589] txid# 281474976710662 SEND to# [59:7519627231634890982:2294] Source {TEvProposeTransactionStatus txid# 281474976710662 Status# 48} 2025-06-24T21:14:08.923849Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627210160053535:2110] Handle TEvProposeTransaction 2025-06-24T21:14:08.923891Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627210160053535:2110] TxId# 281474976710663 ProcessProposeTransaction 2025-06-24T21:14:08.923948Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627210160053535:2110] Cookie# 0 userReqId# "" txid# 281474976710663 SEND to# [59:7519627231634891018:2605] 2025-06-24T21:14:08.927208Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627231634891018:2605] txid# 281474976710663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\025cluster_admin@builtin\022\030\022\026\n\024all-users@well-known\032\025cluster_admin@builtin\"\007Builtin*\027clus****ltin (2AB0E265)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:53602" 2025-06-24T21:14:08.927311Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627231634891018:2605] txid# 281474976710663 Bootstrap, UserSID: cluster_admin@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:14:08.927338Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627231634891018:2605] txid# 281474976710663 Bootstrap, UserSID: cluster_admin@builtin IsClusterAdministrator: 1 2025-06-24T21:14:08.927401Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627231634891018:2605] txid# 281474976710663 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:08.927844Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627231634891018:2605] txid# 281474976710663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:08.927902Z node 59 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [59:7519627231634891018:2605] txid# 281474976710663, Access denied for cluster_admin@builtin on path /dc-1, with access AlterSchema 2025-06-24T21:14:08.928057Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519627231634891018:2605] txid# 281474976710663, issues: { message: "Access denied for cluster_admin@builtin on path /dc-1" issue_code: 200000 severity: 1 } 2025-06-24T21:14:08.928096Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627231634891018:2605] txid# 281474976710663 SEND to# [59:7519627231634891017:2313] Source {TEvProposeTransactionStatus Status# 5} 2025-06-24T21:14:08.928948Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=NTM3M2UzYjctNDdiNTc1NmUtMmQxOTk2NmEtNTY5M2ZlYWU=, ActorId: [59:7519627231634891003:2313], ActorState: ExecuteState, TraceId: 01jyhwn2442t7m91cw1xn46vx7, Create QueryResponse for error on request, msg: 2025-06-24T21:14:08.929349Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519627210160053535:2110] Handle TEvExecuteKqpTransaction 2025-06-24T21:14:08.929372Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519627210160053535:2110] TxId# 281474976710664 ProcessProposeKqpTransaction >> KqpSystemView::PartitionStatsFollower ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::MidOfRange [GOOD] Test command err: 2025-06-24T21:12:43.577231Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626868010101878:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:43.577467Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031a8/r3tmp/tmp6bTQmF/pdisk_1.dat 2025-06-24T21:12:43.802992Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:12:44.085378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:44.085502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:44.105588Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626868010101765:2079] 1750799563556888 != 1750799563556891 2025-06-24T21:12:44.117916Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:44.145014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22185, node 1 2025-06-24T21:12:44.388080Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0031a8/r3tmp/yandexaG2PvK.tmp 2025-06-24T21:12:44.388112Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0031a8/r3tmp/yandexaG2PvK.tmp 2025-06-24T21:12:44.388893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0031a8/r3tmp/yandexaG2PvK.tmp 2025-06-24T21:12:44.389032Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:44.579868Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:44.639842Z INFO: TTestServer started on Port 22882 GrpcPort 22185 TClient is connected to server localhost:22882 PQClient connected to localhost:22185 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:45.005036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:12:45.071400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:12:45.083121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:12:45.286556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:12:47.018271Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626880895004437:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.018369Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626885189971760:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.018467Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.022856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:47.038469Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626885189971762:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:12:47.325739Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626885189971826:2444] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:47.594943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.618198Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626885189971834:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:12:47.627975Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NzU4ZTljNjgtYTRjM2E3ZjMtODQ3ZDAxZTYtMTZmMDRhODg=, ActorId: [1:7519626880895004434:2297], ActorState: ExecuteState, TraceId: 01jyhwjj4n3cmgm3hy2ee6xsbn, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:12:47.630248Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:12:47.660421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.763372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519626889484939417:2621] 2025-06-24T21:12:48.564949Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626868010101878:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:48.565087Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:12:54.238184Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T21:12:54.265698Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T21:12:54.266877Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519626915254743389:2698], Recipient [1:7519626868010102139:2174]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.266913Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.266928Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:12:54.266973Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519626915254743385:2695], Recipient [1:7519626868010102139:2174]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-06-24T21:12:54.266987Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:12:54.354735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceId ... 81474976710673] PredicateAcks: 0/0 2025-06-24T21:14:09.461556Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4621: [PQ: 72075186224037893] add an TxId 281474976710673 to the list for deletion 2025-06-24T21:14:09.461578Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037893] TxId 281474976710673, NewState DELETING 2025-06-24T21:14:09.461605Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3852: [PQ: 72075186224037893] delete key for TxId 281474976710673 2025-06-24T21:14:09.461607Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 14 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710673 2025-06-24T21:14:09.461629Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046644480, txId: 281474976710673 2025-06-24T21:14:09.461659Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037893] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:14:09.461660Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710673, pathId: [OwnerId: 72057594046644480, LocalPathId: 14], version: 4 2025-06-24T21:14:09.461681Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 14] was 3 2025-06-24T21:14:09.461723Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 270794756, Sender [6:7519627238074791991:2429], Recipient [6:7519627238074791991:2429]: NKikimr::TEvKeyValue::TEvCollect 2025-06-24T21:14:09.461762Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:14:09.461865Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 270794752, Sender [6:7519627238074791991:2429], Recipient [6:7519627238074791991:2429]: NKikimrClient.TKeyValueRequest Cookie: 5 CmdDeleteRange { Range { From: "tx_00000281474976710673" IncludeFrom: true To: "tx_00000281474976710673" IncludeTo: true } } CmdWrite { Key: "_txinfo" Value: "\020\317\225\252\236\3722\030\221\200\200\200\200\200@(\240\215\0060\317\225\252\236\37228\221\200\200\200\200\200@" StorageChannel: INLINE } 2025-06-24T21:14:09.461988Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274137603, Sender [6:7519627190830150761:2244], Recipient [6:7519627190830150624:2164]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 15] Version: 2 } 2025-06-24T21:14:09.462009Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5035: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-24T21:14:09.462033Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 270794753, Sender [6:7519627238074792109:2429], Recipient [6:7519627238074791991:2429]: NKikimr::TEvKeyValue::TEvIntermediate 2025-06-24T21:14:09.462083Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 15 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710673 2025-06-24T21:14:09.462145Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 15 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710673 2025-06-24T21:14:09.462157Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976710673 2025-06-24T21:14:09.462172Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710673, pathId: [OwnerId: 72057594046644480, LocalPathId: 15], version: 2 2025-06-24T21:14:09.462185Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 4 2025-06-24T21:14:09.462242Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710673, subscribers: 1 2025-06-24T21:14:09.462258Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [6:7519627238074791952:2426] 2025-06-24T21:14:09.462276Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-24T21:14:09.462391Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:14:09.462556Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710673 2025-06-24T21:14:09.462568Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:14:09.462878Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710673 2025-06-24T21:14:09.462889Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:14:09.462945Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [6:7519627238074791952:2426] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710673 at schemeshard: 72057594046644480 2025-06-24T21:14:09.463297Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 270794760, Sender [6:7519627238074792107:2439], Recipient [6:7519627238074791991:2429]: NKikimr::TEvKeyValue::TEvCompleteGC 2025-06-24T21:14:09.463603Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 270795264, Sender [6:7519627238074791991:2429], Recipient [6:7519627238074791991:2429]: NKikimrClient.TResponse Status: 1 Cookie: 5 DeleteRangeResult { Status: 0 } WriteResult { Status: 0 StatusFlags: 1 } 2025-06-24T21:14:09.463622Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5274: HandleHook, processing event TEvKeyValue::TEvResponse 2025-06-24T21:14:09.463644Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:14:09.463664Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037893] Try execute txs with state DELETING 2025-06-24T21:14:09.463683Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037893] TxId 281474976710673, State DELETING 2025-06-24T21:14:09.463709Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72075186224037893] delete TxId 281474976710673 2025-06-24T21:14:09.464241Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [6:7519627238074791959:2740], Recipient [6:7519627190830150624:2164]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:14:09.464276Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:14:09.464290Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-24T21:14:09.464624Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 270794756, Sender [6:7519627238074791991:2429], Recipient [6:7519627238074791991:2429]: NKikimr::TEvKeyValue::TEvCollect 2025-06-24T21:14:09.464901Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 270794760, Sender [6:7519627238074792113:2440], Recipient [6:7519627238074791991:2429]: NKikimr::TEvKeyValue::TEvCompleteGC 2025-06-24T21:14:09.479659Z node 6 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:140: new alter topic request 2025-06-24T21:14:09.509250Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [6:7519627190830150624:2164]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:09.509294Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:09.509342Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [6:7519627190830150624:2164], Recipient [6:7519627190830150624:2164]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:09.509361Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:09.544937Z node 6 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519627238074791991:2429], Partition 0, Sender [0:0:0], Recipient [6:7519627238074792059:2434], Cookie: 0 2025-06-24T21:14:09.545024Z node 6 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519627238074792059:2434]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:14:09.545058Z node 6 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:14:09.545116Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:14:09.545212Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:14:09.545251Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:14:09.545286Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:14:09.646269Z node 6 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519627238074791991:2429], Partition 0, Sender [0:0:0], Recipient [6:7519627238074792059:2434], Cookie: 0 2025-06-24T21:14:09.646352Z node 6 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519627238074792059:2434]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:14:09.646398Z node 6 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:14:09.646460Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:14:09.646548Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:14:09.646602Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:14:09.646637Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> KqpSysColV0::SelectRange >> KqpSystemView::FailNavigate >> TopicAutoscaling::PartitionSplit_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_AutoscaleAwareSDK >> KqpSysColV1::SelectRowAsterisk >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-60 [GOOD] >> KqpSystemView::Sessions [GOOD] >> KqpSystemView::PartitionStatsRange3 >> KqpSysColV1::StreamSelectRowAsterisk >> KqpSysColV1::StreamInnerJoinSelect >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-24 [GOOD] >> KqpSysColV0::SelectRowAsterisk >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::WithDir_PartitionSplit_AutosplitByLoad |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-72 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-60 [GOOD] Test command err: Starting YDB, grpc: 18964, msgbus: 27254 2025-06-24T21:11:03.258494Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626435743319499:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:03.258557Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003633/r3tmp/tmpZ4zN4M/pdisk_1.dat 2025-06-24T21:11:03.766767Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:03.822611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:03.822795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:03.825260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18964, node 1 2025-06-24T21:11:04.011560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:04.011584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:04.011590Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:04.011701Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:04.286425Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27254 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:11:04.434539Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519626435743319705:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:11:04.454901Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519626440038287521:2445] HANDLE EvNavigateScheme dc-1 2025-06-24T21:11:04.455277Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519626440038287521:2445] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.487209Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519626440038287521:2445] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-24T21:11:04.496981Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519626440038287521:2445] Handle TEvDescribeSchemeResult Forward to# [1:7519626440038287519:2443] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:11:04.525464Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626435743319705:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.525523Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626435743319705:2117] TxId# 281474976710657 ProcessProposeTransaction 2025-06-24T21:11:04.526295Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626435743319705:2117] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519626440038287527:2450] 2025-06-24T21:11:04.603491Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626440038287527:2450] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.604445Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626440038287527:2450] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.604486Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626440038287527:2450] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.604560Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626440038287527:2450] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.605020Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626440038287527:2450] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.605332Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626440038287527:2450] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T21:11:04.605392Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626440038287527:2450] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-24T21:11:04.605659Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626440038287527:2450] txid# 281474976710657 HANDLE EvClientConnected 2025-06-24T21:11:04.612667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:04.617257Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519626440038287527:2450] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-06-24T21:11:04.617336Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519626440038287527:2450] txid# 281474976710657 SEND to# [1:7519626440038287526:2449] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} waiting... 2025-06-24T21:11:04.639471Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626435743319705:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.639493Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626435743319705:2117] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T21:11:04.639516Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626435743319705:2117] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519626440038287572:2488] 2025-06-24T21:11:04.641675Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626440038287572:2488] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.641733Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626440038287572:2488] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.641748Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626440038287572:2488] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.641938Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626440038287572:2488] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.642343Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626440038287572:2488] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.642474Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626440038287572:2488] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:11:04.642523Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626440038287572:2488] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:11:04.642682Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626440038287572:2488] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T21:11:04.643301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 2814749767 ... ainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:11.300075Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627243978769892:2586] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-24T21:14:11.300263Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627243978769892:2586] txid# 281474976715661 HANDLE EvClientConnected 2025-06-24T21:14:11.305500Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627243978769892:2586] txid# 281474976715661 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715661 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)} 2025-06-24T21:14:11.305649Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519627243978769892:2586] txid# 281474976715661, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:11.305685Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627243978769892:2586] txid# 281474976715661 SEND to# [59:7519627243978769815:2302] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-24T21:14:11.329879Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627222503932536:2113] Handle TEvProposeTransaction 2025-06-24T21:14:11.329918Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627222503932536:2113] TxId# 281474976715662 ProcessProposeTransaction 2025-06-24T21:14:11.329971Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627222503932536:2113] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7519627243978769916:2598] 2025-06-24T21:14:11.333049Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627243978769916:2598] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:47180" 2025-06-24T21:14:11.333128Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627243978769916:2598] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:11.333151Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627243978769916:2598] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:14:11.333206Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627243978769916:2598] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:11.333869Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627243978769916:2598] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:11.334006Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627243978769916:2598] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:11.334067Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627243978769916:2598] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-24T21:14:11.334220Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627243978769916:2598] txid# 281474976715662 HANDLE EvClientConnected 2025-06-24T21:14:11.342598Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627243978769916:2598] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-24T21:14:11.342656Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627243978769916:2598] txid# 281474976715662 SEND to# [59:7519627243978769915:2295] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-24T21:14:11.352093Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627222503932536:2113] Handle TEvProposeTransaction 2025-06-24T21:14:11.352129Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627222503932536:2113] TxId# 281474976715663 ProcessProposeTransaction 2025-06-24T21:14:11.352177Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627222503932536:2113] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7519627243978769929:2607] 2025-06-24T21:14:11.355262Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627243978769929:2607] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "" NewOwner: "db_admin@builtin" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:47180" 2025-06-24T21:14:11.355352Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627243978769929:2607] txid# 281474976715663 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:11.355376Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627243978769929:2607] txid# 281474976715663 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:14:11.355443Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627243978769929:2607] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:11.355880Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627243978769929:2607] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:11.356008Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627243978769929:2607] HANDLE EvNavigateKeySetResult, txid# 281474976715663 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:11.356068Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627243978769929:2607] txid# 281474976715663 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715663 TabletId# 72057594046644480} 2025-06-24T21:14:11.356229Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627243978769929:2607] txid# 281474976715663 HANDLE EvClientConnected 2025-06-24T21:14:11.356669Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:11.360745Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627243978769929:2607] txid# 281474976715663 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715663} 2025-06-24T21:14:11.360814Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627243978769929:2607] txid# 281474976715663 SEND to# [59:7519627243978769928:2308] Source {TEvProposeTransactionStatus txid# 281474976715663 Status# 48} 2025-06-24T21:14:11.415580Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627222503932536:2113] Handle TEvProposeTransaction 2025-06-24T21:14:11.415616Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627222503932536:2113] TxId# 281474976715664 ProcessProposeTransaction 2025-06-24T21:14:11.415665Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627222503932536:2113] Cookie# 0 userReqId# "" txid# 281474976715664 SEND to# [59:7519627243978769961:2621] 2025-06-24T21:14:11.418482Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627243978769961:2621] txid# 281474976715664 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\020db_admin@builtin\022\030\022\026\n\024all-users@well-known\032\020db_admin@builtin\"\007Builtin*\027db_a****ltin (DEFA2CD5)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:47204" 2025-06-24T21:14:11.418559Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627243978769961:2621] txid# 281474976715664 Bootstrap, UserSID: db_admin@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:11.418582Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627243978769961:2621] txid# 281474976715664 Bootstrap, UserSID: db_admin@builtin IsClusterAdministrator: 0 2025-06-24T21:14:11.418761Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519627243978769961:2621] txid# 281474976715664 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-24T21:14:11.418813Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519627243978769961:2621] txid# 281474976715664 HandleResolveDatabase, UserSID: db_admin@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 1 DatabaseOwner: db_admin@builtin 2025-06-24T21:14:11.418871Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627243978769961:2621] txid# 281474976715664 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:11.419249Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627243978769961:2621] txid# 281474976715664 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:11.419367Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627243978769961:2621] HANDLE EvNavigateKeySetResult, txid# 281474976715664 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:11.419423Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627243978769961:2621] txid# 281474976715664 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715664 TabletId# 72057594046644480} 2025-06-24T21:14:11.419574Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627243978769961:2621] txid# 281474976715664 HANDLE EvClientConnected 2025-06-24T21:14:11.424021Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627243978769961:2621] txid# 281474976715664 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715664} 2025-06-24T21:14:11.424093Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627243978769961:2621] txid# 281474976715664 SEND to# [59:7519627243978769960:2314] Source {TEvProposeTransactionStatus txid# 281474976715664 Status# 48} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::Sessions [GOOD] Test command err: Trying to start YDB, gRPC: 64381, MsgBus: 10497 2025-06-24T21:14:00.931468Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627197373502250:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:00.931523Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a3d/r3tmp/tmpCAbdwq/pdisk_1.dat 2025-06-24T21:14:01.437609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:01.437719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:01.442657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:01.463219Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:01.467284Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627197373502226:2079] 1750799640926203 != 1750799640926206 TServer::EnableGrpc on GrpcPort 64381, node 1 2025-06-24T21:14:01.619867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:01.619893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:01.619900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:01.620046Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:01.948087Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10497 TClient is connected to server localhost:10497 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:02.626503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:02.666833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:02.687851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:02.700475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:02.878365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.091187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.202888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.712677Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627214553373056:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:04.712826Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.221263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.257625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.285271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.326921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.366741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.403916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.475426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.545352Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627218848341013:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.545442Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.545557Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627218848341018:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.549714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:05.564958Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627218848341020:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2025-06-24T21:14:05.665719Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627218848341073:3430] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:05.931594Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627197373502250:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:05.931671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 1 ydb-cpp-sdk/dev 2025-06-24T21:14:12.164638Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799652159, txId: 281474976710685] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-24 [GOOD] Test command err: Starting YDB, grpc: 19176, msgbus: 3015 2025-06-24T21:11:03.257685Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626435680129653:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:03.259016Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003660/r3tmp/tmprZZA8b/pdisk_1.dat 2025-06-24T21:11:03.773438Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:03.808784Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:03.808937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:03.825682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19176, node 1 2025-06-24T21:11:04.011561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:04.011582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:04.011589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:04.011708Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:04.266114Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3015 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:11:04.446670Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519626435680129853:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:11:04.465380Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519626439975097661:2438] HANDLE EvNavigateScheme dc-1 2025-06-24T21:11:04.465920Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519626439975097661:2438] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.497642Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519626439975097661:2438] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-24T21:11:04.506308Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519626439975097661:2438] Handle TEvDescribeSchemeResult Forward to# [1:7519626439975097659:2436] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:11:04.525467Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626435680129853:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.525496Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626435680129853:2117] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:11:04.526314Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626435680129853:2117] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519626439975097667:2443] 2025-06-24T21:11:04.624754Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626439975097667:2443] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.624852Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626439975097667:2443] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.624877Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626439975097667:2443] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.624938Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626439975097667:2443] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.625327Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626439975097667:2443] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.625520Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626439975097667:2443] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T21:11:04.625582Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626439975097667:2443] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:11:04.625786Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626439975097667:2443] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:11:04.626943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:04.629733Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519626439975097667:2443] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:11:04.629806Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519626439975097667:2443] txid# 281474976715657 SEND to# [1:7519626439975097666:2442] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-24T21:11:04.644213Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626435680129853:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.644238Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626435680129853:2117] TxId# 281474976715658 ProcessProposeTransaction 2025-06-24T21:11:04.644267Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626435680129853:2117] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519626439975097711:2480] 2025-06-24T21:11:04.646778Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626439975097711:2480] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.646829Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626439975097711:2480] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.646850Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626439975097711:2480] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.646927Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626439975097711:2480] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.647555Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626439975097711:2480] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.647651Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626439975097711:2480] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:11:04.647695Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626439975097711:2480] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-24T21:11:04.647817Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626439975097711:2480] txid# 281474976715658 HANDLE EvClientConnected 2025-06-24T21:11:04.648282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715 ... T21:14:11.470569Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627243239309937:2577] txid# 281474976710660 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)} 2025-06-24T21:14:11.470720Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519627243239309937:2577] txid# 281474976710660, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:11.470748Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627243239309937:2577] txid# 281474976710660 SEND to# [59:7519627243239309853:2301] Source {TEvProposeTransactionStatus txid# 281474976710660 Status# 48} 2025-06-24T21:14:11.489505Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627221764472523:2114] Handle TEvProposeTransaction 2025-06-24T21:14:11.489541Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627221764472523:2114] TxId# 281474976710661 ProcessProposeTransaction 2025-06-24T21:14:11.489593Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627221764472523:2114] Cookie# 0 userReqId# "" txid# 281474976710661 SEND to# [59:7519627243239309961:2589] 2025-06-24T21:14:11.492257Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627243239309961:2589] txid# 281474976710661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "ordinaryuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:35116" 2025-06-24T21:14:11.492350Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627243239309961:2589] txid# 281474976710661 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:11.492376Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627243239309961:2589] txid# 281474976710661 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:14:11.492418Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627243239309961:2589] txid# 281474976710661 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:11.492858Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627243239309961:2589] txid# 281474976710661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:11.492985Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627243239309961:2589] HANDLE EvNavigateKeySetResult, txid# 281474976710661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:11.493061Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627243239309961:2589] txid# 281474976710661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710661 TabletId# 72057594046644480} 2025-06-24T21:14:11.493221Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627243239309961:2589] txid# 281474976710661 HANDLE EvClientConnected 2025-06-24T21:14:11.503955Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627243239309961:2589] txid# 281474976710661 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710661} 2025-06-24T21:14:11.504020Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627243239309961:2589] txid# 281474976710661 SEND to# [59:7519627243239309960:2294] Source {TEvProposeTransactionStatus txid# 281474976710661 Status# 48} 2025-06-24T21:14:11.637348Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627221764472523:2114] Handle TEvProposeTransaction 2025-06-24T21:14:11.637401Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627221764472523:2114] TxId# 281474976710662 ProcessProposeTransaction 2025-06-24T21:14:11.637462Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627221764472523:2114] Cookie# 0 userReqId# "" txid# 281474976710662 SEND to# [59:7519627243239309986:2605] 2025-06-24T21:14:11.640388Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627243239309986:2605] txid# 281474976710662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\022\010\001\022\016\032\014ordinaryuser\n\032\010\000\022\026\010\001\020\200\200\002\032\014ordinaryuser \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:35130" 2025-06-24T21:14:11.640618Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627243239309986:2605] txid# 281474976710662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:11.640650Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627243239309986:2605] txid# 281474976710662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:14:11.640716Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627243239309986:2605] txid# 281474976710662 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:11.641110Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627243239309986:2605] txid# 281474976710662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:11.641223Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627243239309986:2605] HANDLE EvNavigateKeySetResult, txid# 281474976710662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:11.641279Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627243239309986:2605] txid# 281474976710662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710662 TabletId# 72057594046644480} 2025-06-24T21:14:11.641425Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627243239309986:2605] txid# 281474976710662 HANDLE EvClientConnected 2025-06-24T21:14:11.642009Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:11.645590Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627243239309986:2605] txid# 281474976710662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710662} 2025-06-24T21:14:11.645658Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627243239309986:2605] txid# 281474976710662 SEND to# [59:7519627243239309985:2308] Source {TEvProposeTransactionStatus txid# 281474976710662 Status# 48} 2025-06-24T21:14:11.735202Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627221764472523:2114] Handle TEvProposeTransaction 2025-06-24T21:14:11.735245Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627221764472523:2114] TxId# 281474976710663 ProcessProposeTransaction 2025-06-24T21:14:11.735306Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627221764472523:2114] Cookie# 0 userReqId# "" txid# 281474976710663 SEND to# [59:7519627243239310025:2626] 2025-06-24T21:14:11.738321Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627243239310025:2626] txid# 281474976710663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014ordinaryuser\022\030\022\026\n\024all-users@well-known\032\334\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDg0Mjg1MSwiaWF0IjoxNzUwNzk5NjUxLCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.tz_QlPvitYwMIr5INbNBlTeSgXWToKKBFwNRwYPMgB9dm-dfoJ8CpoglcfA8DOkKtnV3APnoyc06TWgmbNfbERHeUT3DSw361juQ_Q6mSRoCZMXmNJkUtWilOKCOlsQqXgeBmHoGCcjSOrhDCXT__rp3snW-juCHj3WN-a2XAccOHrBtYlpjv-8p81ad7l1pTuEKODNndn7tRkcjnQFpnaL21b9AadmndH9DEeaiw4oS6E608vJYTsRw4hRi0jz0Yh5X50Kee_L8Jg1NOLXMYKjZMb0OLDSsD5FZujEQnKwlOlarpMnWjg8knN50EO9tFW-Jra6kPUcaiDZy8QDn5Q\"\005Login*\210\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDg0Mjg1MSwiaWF0IjoxNzUwNzk5NjUxLCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.**" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:35164" 2025-06-24T21:14:11.738402Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627243239310025:2626] txid# 281474976710663 Bootstrap, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:11.738432Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627243239310025:2626] txid# 281474976710663 Bootstrap, UserSID: ordinaryuser IsClusterAdministrator: 0 2025-06-24T21:14:11.738601Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519627243239310025:2626] txid# 281474976710663 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-24T21:14:11.738650Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519627243239310025:2626] txid# 281474976710663 HandleResolveDatabase, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-24T21:14:11.738702Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627243239310025:2626] txid# 281474976710663 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:11.739142Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627243239310025:2626] txid# 281474976710663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:11.739194Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519627243239310025:2626] txid# 281474976710663, Access denied for ordinaryuser, attempt to manage user 2025-06-24T21:14:11.739310Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519627243239310025:2626] txid# 281474976710663, issues: { message: "Access denied for ordinaryuser" issue_code: 200000 severity: 1 } 2025-06-24T21:14:11.739358Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627243239310025:2626] txid# 281474976710663 SEND to# [59:7519627243239310024:2314] Source {TEvProposeTransactionStatus Status# 5} 2025-06-24T21:14:11.739701Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=ZGM1NWY3Ny0yNzcwMjFkNi0xNDA0MThkOC04YWU5MjUyNA==, ActorId: [59:7519627243239310010:2314], ActorState: ExecuteState, TraceId: 01jyhwn4w06tsnanhkc06a9vvx, Create QueryResponse for error on request, msg: 2025-06-24T21:14:11.740131Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519627221764472523:2114] Handle TEvExecuteKqpTransaction 2025-06-24T21:14:11.740157Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519627221764472523:2114] TxId# 281474976710664 ProcessProposeKqpTransaction >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly-StrictAclCheck [FAIL] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant >> CommitOffset::Commit_Flat_WithWrongSession [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession_ToPast >> TPersQueueMirrorer::ValidStartStream [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-72 [GOOD] Test command err: Starting YDB, grpc: 1671, msgbus: 17182 2025-06-24T21:11:03.257715Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626438538170731:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:03.257775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00360e/r3tmp/tmp0e8ff4/pdisk_1.dat 2025-06-24T21:11:03.784996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:03.785151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:03.795087Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:03.795996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1671, node 1 2025-06-24T21:11:04.011598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:04.011638Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:04.011652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:04.011765Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:04.274340Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17182 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:11:04.448866Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519626438538170942:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:11:04.473353Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519626442833138763:2451] HANDLE EvNavigateScheme dc-1 2025-06-24T21:11:04.473836Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519626442833138763:2451] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.515845Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519626442833138763:2451] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-24T21:11:04.529254Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519626442833138763:2451] Handle TEvDescribeSchemeResult Forward to# [1:7519626442833138761:2449] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:11:04.547191Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626438538170942:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.547219Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626438538170942:2117] TxId# 281474976710657 ProcessProposeTransaction 2025-06-24T21:11:04.547347Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626438538170942:2117] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519626442833138772:2456] 2025-06-24T21:11:04.649796Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626442833138772:2456] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.649876Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626442833138772:2456] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.649899Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626442833138772:2456] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.649953Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626442833138772:2456] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.650302Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626442833138772:2456] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.650463Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626442833138772:2456] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T21:11:04.650518Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626442833138772:2456] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-24T21:11:04.650680Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626442833138772:2456] txid# 281474976710657 HANDLE EvClientConnected 2025-06-24T21:11:04.651522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:04.653820Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519626442833138772:2456] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-06-24T21:11:04.653909Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519626442833138772:2456] txid# 281474976710657 SEND to# [1:7519626442833138771:2455] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} 2025-06-24T21:11:04.667579Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626438538170942:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.667600Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626438538170942:2117] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T21:11:04.667635Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626438538170942:2117] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519626442833138811:2491] 2025-06-24T21:11:04.669976Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626442833138811:2491] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.670039Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626442833138811:2491] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.670054Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626442833138811:2491] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.670119Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626442833138811:2491] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.670458Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626442833138811:2491] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.670592Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626442833138811:2491] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:11:04.670635Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626442833138811:2491] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:11:04.670785Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626442833138811:2491] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T21:11:04.671334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710 ... ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:12.218015Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627228087812140:2112] Handle TEvProposeTransaction 2025-06-24T21:14:12.218056Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627228087812140:2112] TxId# 281474976715661 ProcessProposeTransaction 2025-06-24T21:14:12.218114Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627228087812140:2112] Cookie# 0 userReqId# "" txid# 281474976715661 SEND to# [59:7519627249562649559:2578] 2025-06-24T21:14:12.221402Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627249562649559:2578] txid# 281474976715661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/dc-1" 2025-06-24T21:14:12.221477Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627249562649559:2578] txid# 281474976715661 Bootstrap, UserSID: metadata@system CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:12.221504Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627249562649559:2578] txid# 281474976715661 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 0 2025-06-24T21:14:12.221694Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519627249562649559:2578] txid# 281474976715661 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-24T21:14:12.221726Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519627249562649559:2578] txid# 281474976715661 HandleResolveDatabase, UserSID: metadata@system CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-24T21:14:12.222556Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [59:7519627249562649559:2578] txid# 281474976715661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:14:12.222650Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627249562649559:2578] txid# 281474976715661 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:12.222875Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627249562649559:2578] txid# 281474976715661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:12.223040Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627249562649559:2578] HANDLE EvNavigateKeySetResult, txid# 281474976715661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:12.223098Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627249562649559:2578] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-24T21:14:12.223291Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627249562649559:2578] txid# 281474976715661 HANDLE EvClientConnected 2025-06-24T21:14:12.234548Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627249562649559:2578] txid# 281474976715661 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715661 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)} 2025-06-24T21:14:12.234710Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519627249562649559:2578] txid# 281474976715661, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:12.234750Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627249562649559:2578] txid# 281474976715661 SEND to# [59:7519627249562649478:2302] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-24T21:14:12.267226Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627228087812140:2112] Handle TEvProposeTransaction 2025-06-24T21:14:12.267270Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627228087812140:2112] TxId# 281474976715662 ProcessProposeTransaction 2025-06-24T21:14:12.267329Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627228087812140:2112] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7519627249562649583:2590] 2025-06-24T21:14:12.270244Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627249562649583:2590] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:59214" 2025-06-24T21:14:12.270334Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627249562649583:2590] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:12.270360Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627249562649583:2590] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:14:12.270416Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627249562649583:2590] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:12.270788Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627249562649583:2590] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:12.270935Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627249562649583:2590] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:12.271003Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627249562649583:2590] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-24T21:14:12.271958Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627249562649583:2590] txid# 281474976715662 HANDLE EvClientConnected 2025-06-24T21:14:12.294454Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627249562649583:2590] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-24T21:14:12.294531Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627249562649583:2590] txid# 281474976715662 SEND to# [59:7519627249562649582:2295] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-24T21:14:12.383430Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627228087812140:2112] Handle TEvProposeTransaction 2025-06-24T21:14:12.383473Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627228087812140:2112] TxId# 281474976715663 ProcessProposeTransaction 2025-06-24T21:14:12.383524Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627228087812140:2112] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7519627249562649620:2606] 2025-06-24T21:14:12.386494Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627249562649620:2606] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\024ordinaryuser@builtin\022\030\022\026\n\024all-users@well-known\032\024ordinaryuser@builtin\"\007Builtin*\027ordi****ltin (32520BBF)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:47162" 2025-06-24T21:14:12.386594Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627249562649620:2606] txid# 281474976715663 Bootstrap, UserSID: ordinaryuser@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:12.386620Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627249562649620:2606] txid# 281474976715663 Bootstrap, UserSID: ordinaryuser@builtin IsClusterAdministrator: 0 2025-06-24T21:14:12.386805Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519627249562649620:2606] txid# 281474976715663 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-24T21:14:12.386845Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519627249562649620:2606] txid# 281474976715663 HandleResolveDatabase, UserSID: ordinaryuser@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-24T21:14:12.386893Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627249562649620:2606] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:12.387209Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627249562649620:2606] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:12.387238Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519627249562649620:2606] txid# 281474976715663, Access denied for ordinaryuser@builtin, attempt to manage user 2025-06-24T21:14:12.387337Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519627249562649620:2606] txid# 281474976715663, issues: { message: "Access denied for ordinaryuser@builtin" issue_code: 200000 severity: 1 } 2025-06-24T21:14:12.387367Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627249562649620:2606] txid# 281474976715663 SEND to# [59:7519627249562649619:2313] Source {TEvProposeTransactionStatus Status# 5} 2025-06-24T21:14:12.387633Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=M2U1YmM5MGEtNTMyNDBmOGEtY2VhMTQ2NzgtODRlZDI3YmI=, ActorId: [59:7519627249562649601:2313], ActorState: ExecuteState, TraceId: 01jyhwn5g66dv21g7gwv41wew8, Create QueryResponse for error on request, msg: 2025-06-24T21:14:12.387903Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519627228087812140:2112] Handle TEvExecuteKqpTransaction 2025-06-24T21:14:12.387928Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519627228087812140:2112] TxId# 281474976715664 ProcessProposeKqpTransaction >> CommitOffset::Commit_WithoutSession_ToPastParentPartition [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_SameSession >> KqpSystemView::NodesSimple |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> TKesusTest::TestAcquireTimeoutAfterReboot [GOOD] >> TKesusTest::TestAcquireSemaphoreViaRelease >> KqpSysColV0::InnerJoinSelectAsterisk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TPersQueueMirrorer::ValidStartStream [GOOD] Test command err: 2025-06-24T21:12:43.571729Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626865331804680:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:43.571778Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:43.897609Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003168/r3tmp/tmpdN8d0l/pdisk_1.dat 2025-06-24T21:12:44.153965Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:44.154064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:44.158010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:44.222059Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:44.227273Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626865331804655:2079] 1750799563563588 != 1750799563563591 TServer::EnableGrpc on GrpcPort 27148, node 1 2025-06-24T21:12:44.390715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003168/r3tmp/yandex07YKda.tmp 2025-06-24T21:12:44.390738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003168/r3tmp/yandex07YKda.tmp 2025-06-24T21:12:44.390868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003168/r3tmp/yandex07YKda.tmp 2025-06-24T21:12:44.390953Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:44.587976Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:44.639492Z INFO: TTestServer started on Port 11772 GrpcPort 27148 TClient is connected to server localhost:11772 PQClient connected to localhost:27148 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:45.057362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:45.083777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:12:45.099756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:12:45.106277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:12:46.881223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626878216707339:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:46.881349Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:46.882350Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626878216707352:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:46.890486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:46.909216Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626878216707354:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:12:46.990877Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626878216707418:2442] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:47.474668Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626878216707426:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:12:47.481399Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YjlhZTI2MTYtNTdkMmFjNWEtZmQ5ZWM2NDMtOTQwMTZkYmM=, ActorId: [1:7519626878216707337:2297], ActorState: ExecuteState, TraceId: 01jyhwjj021dwsrmg8p1j47vc1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:12:47.483704Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:12:47.591820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.689066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.789532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519626886806642306:2618] 2025-06-24T21:12:48.575264Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626865331804680:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:48.575334Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:12:54.188669Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T21:12:54.231706Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T21:12:54.236432Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519626912576446271:2691], Recipient [1:7519626865331804994:2150]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.236479Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.236498Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:12:54.236554Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519626912576446267:2688], Recipient [1:7519626865331804994:2150]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-06-24T21:12:54.236573Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:12:54.333410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "back-compatibility-test" TotalGroupCount: 3 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: ... Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 5 endOffset 10 2025-06-24T21:14:13.214473Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 5, endOffset# 10, WTime# 1750799653021, sizeLag# 1179 2025-06-24T21:14:13.214491Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1TEvPartitionReady. Aval parts: 1 2025-06-24T21:14:13.214540Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 performing read request: guid# 4d626c83-7a494ad0-be3c1ea8-507bdbc2, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 6, size# 1414, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-24T21:14:13.214645Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 6 maxSize 1414 maxTimeLagMs 0 readTimestampMs 0 readOffset 5 EndOffset 10 ClientCommitOffset 1 committedOffset 1 Guid 4d626c83-7a494ad0-be3c1ea8-507bdbc2 2025-06-24T21:14:13.215140Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-24T21:14:13.215198Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 0 2025-06-24T21:14:13.215332Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 5 Topic 'rt3.dc1--topic1' partition 0 user user offset 5 count 6 size 1414 endOffset 10 max time lag 0ms effective offset 5 2025-06-24T21:14:13.215954Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 5 added 2 blobs, size 670 count 5 last offset 6, current partition end offset: 10 2025-06-24T21:14:13.215982Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 5. Send blob request. 2025-06-24T21:14:13.216042Z node 8 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 5 partno 0 count 1 parts_count 0 source 1 size 161 accessed 0 times before, last time 2025-06-24T21:14:13.000000Z 2025-06-24T21:14:13.216063Z node 8 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 6 partno 0 count 4 parts_count 0 source 1 size 509 accessed 0 times before, last time 2025-06-24T21:14:13.000000Z 2025-06-24T21:14:13.216099Z node 8 :PERSQUEUE DEBUG: read.h:121: Reading cookie 5. All 2 blobs are from cache. 2025-06-24T21:14:13.216151Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 2 blobs 2025-06-24T21:14:13.216189Z node 8 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 5 partno 0 count 1 parts 0 suffix '63' 2025-06-24T21:14:13.216216Z node 8 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 6 partno 0 count 4 parts 0 suffix '63' 2025-06-24T21:14:13.216307Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 5 totakecount 1 count 1 size 141 from pos 0 cbcount 1 2025-06-24T21:14:13.216500Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 6 totakecount 4 count 4 size 489 from pos 0 cbcount 4 2025-06-24T21:14:13.216655Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--topic1' partition: 0 messageNo: 0 requestId: cookie: 5 2025-06-24T21:14:13.217545Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 10 Result { Offset: 5 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 6 WriteTimestampMS: 1750799653056 CreateTimestampMS: 1750799653048 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 6 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 7 WriteTimestampMS: 1750799653066 CreateTimestampMS: 1750799653048 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 7 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 8 WriteTimestampMS: 1750799653067 CreateTimestampMS: 1750799653048 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 8 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 9 WriteTimestampMS: 1750799653067 CreateTimestampMS: 1750799653048 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 9 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 10 WriteTimestampMS: 1750799653067 CreateTimestampMS: 1750799653048 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 2 SizeLag: 18446744073709551233 RealReadOffset: 9 WaitQuotaTimeMs: 0 EndOffset: 10 StartOffset: 0 } Cookie: 5 } 2025-06-24T21:14:13.217760Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 10 2025-06-24T21:14:13.217802Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 10 ReadOffset 10 ReadGuid 4d626c83-7a494ad0-be3c1ea8-507bdbc2 has messages 1 2025-06-24T21:14:13.217948Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 read done: guid# 4d626c83-7a494ad0-be3c1ea8-507bdbc2, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 673 2025-06-24T21:14:13.218001Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 response to read: guid# 4d626c83-7a494ad0-be3c1ea8-507bdbc2 2025-06-24T21:14:13.218530Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 Process answer. Aval parts: 0 2025-06-24T21:14:13.219392Z :DEBUG: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] [] Got ReadResponse, serverBytesSize = 673, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428127 2025-06-24T21:14:13.219562Z :DEBUG: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428127 2025-06-24T21:14:13.219878Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (5-9) 2025-06-24T21:14:13.219940Z :DEBUG: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] [] Returning serverBytesSize = 673 to budget 2025-06-24T21:14:13.219991Z :DEBUG: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] [] In ContinueReadingDataImpl, ReadSizeBudget = 673, ReadSizeServerDelta = 52428127 2025-06-24T21:14:13.220272Z :DEBUG: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-24T21:14:13.220588Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 grpc read done: success# 1, data# { read_request { bytes_size: 673 } } 2025-06-24T21:14:13.220703Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 got read request: guid# a79fc910-cc274552-1199725f-3de1f25b 2025-06-24T21:14:13.223458Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (5-5) 2025-06-24T21:14:13.223552Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (6-6) 2025-06-24T21:14:13.223602Z :DEBUG: [] Take Data. Partition 0. Read: {2, 0} (7-7) 2025-06-24T21:14:13.223640Z :DEBUG: [] Take Data. Partition 0. Read: {2, 1} (8-8) 2025-06-24T21:14:13.223684Z :DEBUG: [] Take Data. Partition 0. Read: {2, 2} (9-9) 2025-06-24T21:14:13.223747Z :DEBUG: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] [] The application data is transferred to the client. Number of messages 5, size 115 bytes 2025-06-24T21:14:13.223847Z :DEBUG: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] [] Returning serverBytesSize = 0 to budget 2025-06-24T21:14:13.224063Z :INFO: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] Closing read session. Close timeout: 0.000000s 2025-06-24T21:14:13.224126Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:9:1 2025-06-24T21:14:13.224201Z :INFO: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] Counters: { Errors: 0 CurrentSessionLifetimeMs: 71 BytesRead: 115 MessagesRead: 5 BytesReadCompressed: 115 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:14:13.224364Z :NOTICE: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:14:13.224424Z :DEBUG: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] [] Abort session to cluster 2025-06-24T21:14:13.225115Z :NOTICE: [] [] [867ba39a-bd73b195-e395af21-a0b0274d] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:14:13.226065Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 grpc read done: success# 0, data# { } 2025-06-24T21:14:13.226108Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 grpc read failed 2025-06-24T21:14:13.226142Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 grpc closed 2025-06-24T21:14:13.226190Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_7_2_8021864572531532121_v1 is DEAD 2025-06-24T21:14:13.226773Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session shared/user_7_2_8021864572531532121_v1 2025-06-24T21:14:13.226828Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [7:7519627251645011079:2478] destroyed 2025-06-24T21:14:13.226878Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_7_2_8021864572531532121_v1 2025-06-24T21:14:13.227211Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--topic1] pipe [7:7519627251645011076:2475] disconnected; active server actors: 1 2025-06-24T21:14:13.227252Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--topic1] pipe [7:7519627251645011076:2475] client user disconnected session shared/user_7_2_8021864572531532121_v1 2025-06-24T21:14:13.227485Z :DEBUG: [] MessageGroupId [src-id-test] SessionId [src-id-test|6bcf4eec-e3044ad5-6c49922c-f89842fe_0] Write session: destroy >> KqpSystemView::Join [GOOD] >> KqpSysColV0::UpdateAndDelete >> KqpSystemView::NodesRange2 [GOOD] >> TKesusTest::TestAcquireSemaphoreViaRelease [GOOD] >> KqpSystemView::PartitionStatsRange2 [GOOD] >> KqpSystemView::QueryStatsSimple |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAcquireSemaphoreViaRelease [GOOD] Test command err: 2025-06-24T21:13:26.420767Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.420869Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.447021Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.447247Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.471719Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.472210Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=4328370216719343308, session=0, seqNo=0) 2025-06-24T21:13:26.472360Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:26.484107Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=4328370216719343308, session=1) 2025-06-24T21:13:26.486286Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=111, session=1, semaphore="Lock1" count=1) 2025-06-24T21:13:26.487568Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:26.487688Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:26.499718Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=111) 2025-06-24T21:13:26.500011Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:135:2159], cookie=222, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:26.512027Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:135:2159], cookie=222) 2025-06-24T21:13:26.512634Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:151:2173], cookie=5235930755129717234, name="Lock1") 2025-06-24T21:13:26.512735Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:151:2173], cookie=5235930755129717234) 2025-06-24T21:13:26.984400Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.984498Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.997952Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.998076Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:27.022046Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:27.022739Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:135:2159], cookie=18217244167183964798, session=0, seqNo=0) 2025-06-24T21:13:27.022836Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:27.036345Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:135:2159], cookie=18217244167183964798, session=1) 2025-06-24T21:13:27.036710Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=2412570073330894279, session=0, seqNo=0) 2025-06-24T21:13:27.036848Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:13:27.049127Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=2412570073330894279, session=2) 2025-06-24T21:13:27.050367Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-24T21:13:27.050541Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-24T21:13:27.050630Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-24T21:13:27.063033Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=111) 2025-06-24T21:13:27.063363Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=112, session=1, semaphore="Lock2" count=1) 2025-06-24T21:13:27.063502Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 2 "Lock2" 2025-06-24T21:13:27.063572Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 1 2025-06-24T21:13:27.076177Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=112) 2025-06-24T21:13:27.076550Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-24T21:13:27.076776Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=223, session=2, semaphore="Lock2" count=18446744073709551615) 2025-06-24T21:13:27.088860Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=222) 2025-06-24T21:13:27.088953Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=223) 2025-06-24T21:13:27.089286Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=333, session=2, semaphore="Lock1" count=1) 2025-06-24T21:13:27.089563Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=334, session=2, semaphore="Lock2" count=18446744073709551615) 2025-06-24T21:13:27.101512Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=333) 2025-06-24T21:13:27.101577Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=334) 2025-06-24T21:13:27.539355Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.556201Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:27.915400Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.931931Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.285324Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.297561Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.664936Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.680288Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.067393Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.079735Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.424115Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.439900Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.817620Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.830823Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.195771Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.216291Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.599479Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.616328Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.020202Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.033665Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.401054Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.413201Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.775763Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.788139Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.150078Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.162591Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.555657Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.572760Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.971429Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.983807Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.350683Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.364096Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.733770Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.749536Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.130033Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.142192Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.512031Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.524590Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.922236Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.934587Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.301045Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927 ... 4T21:14:09.914566Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:10.319526Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:10.331907Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:10.730838Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:10.744359Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:11.121078Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:11.134242Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:11.505588Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:11.520714Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:11.905742Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:11.919334Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:12.375795Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:12.395843Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:12.771570Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:12.784193Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:13.175502Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:13.195862Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:13.587101Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:13.604278Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:13.975205Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:13.992941Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:14.403469Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:14.417247Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:14.795552Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:14.814196Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:15.187489Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:15.203432Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:15.575487Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:15.595834Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:15.956624Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:15.973457Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:16.466845Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_timeout.cpp:30: [72057594037927937] TTxSemaphoreTimeout::Execute (session=2, semaphore=1) 2025-06-24T21:14:16.466931Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Lock1" waiter link 2025-06-24T21:14:16.483976Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_timeout.cpp:71: [72057594037927937] TTxSemaphoreTimeout::Complete (session=2, semaphore=1) 2025-06-24T21:14:16.512149Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[4:598:2534], cookie=3939543717253884009) 2025-06-24T21:14:16.512256Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[4:598:2534], cookie=3939543717253884009) 2025-06-24T21:14:16.512774Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[4:601:2537], cookie=15366930460227891582) 2025-06-24T21:14:16.512968Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[4:601:2537], cookie=15366930460227891582) 2025-06-24T21:14:16.513509Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:604:2540], cookie=7259471770841721168, name="Lock1") 2025-06-24T21:14:16.513591Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:604:2540], cookie=7259471770841721168) 2025-06-24T21:14:16.514097Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:607:2543], cookie=7095925927268332653, name="Lock1") 2025-06-24T21:14:16.514169Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:607:2543], cookie=7095925927268332653) 2025-06-24T21:14:16.947944Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:14:16.948081Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:14:16.969686Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:14:16.969840Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:14:16.994053Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:14:16.994656Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=18077996347771220029, session=0, seqNo=0) 2025-06-24T21:14:16.994834Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:14:17.020106Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=18077996347771220029, session=1) 2025-06-24T21:14:17.020483Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=15449425196640766885, session=0, seqNo=0) 2025-06-24T21:14:17.020630Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-24T21:14:17.035956Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=15449425196640766885, session=2) 2025-06-24T21:14:17.036299Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:133:2157], cookie=3503947416399522171, session=0, seqNo=0) 2025-06-24T21:14:17.036438Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 3 2025-06-24T21:14:17.055892Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:133:2157], cookie=3503947416399522171, session=3) 2025-06-24T21:14:17.056516Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:146:2168], cookie=2015377901371965907, name="Sem1", limit=3) 2025-06-24T21:14:17.056674Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-24T21:14:17.073236Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:146:2168], cookie=2015377901371965907) 2025-06-24T21:14:17.073620Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=111, session=1, semaphore="Sem1" count=2) 2025-06-24T21:14:17.073786Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-24T21:14:17.073977Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=222, session=2, semaphore="Sem1" count=2) 2025-06-24T21:14:17.074192Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:133:2157], cookie=333, session=3, semaphore="Sem1" count=1) 2025-06-24T21:14:17.088368Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=111) 2025-06-24T21:14:17.088475Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=222) 2025-06-24T21:14:17.088513Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:133:2157], cookie=333) 2025-06-24T21:14:17.089175Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:155:2177], cookie=17922555496948629968, name="Sem1") 2025-06-24T21:14:17.089279Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:155:2177], cookie=17922555496948629968) 2025-06-24T21:14:17.089771Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:158:2180], cookie=16297167242196584909, name="Sem1") 2025-06-24T21:14:17.089847Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:158:2180], cookie=16297167242196584909) 2025-06-24T21:14:17.090102Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:133:2157], cookie=444, name="Sem1") 2025-06-24T21:14:17.090220Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Sem1" owner link 2025-06-24T21:14:17.090292Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #2 session 2 2025-06-24T21:14:17.090352Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #3 session 3 2025-06-24T21:14:17.104270Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:133:2157], cookie=444) 2025-06-24T21:14:17.105014Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:163:2185], cookie=6703638914982282465, name="Sem1") 2025-06-24T21:14:17.105113Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:163:2185], cookie=6703638914982282465) 2025-06-24T21:14:17.105569Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:166:2188], cookie=14632162524237716478, name="Sem1") 2025-06-24T21:14:17.105643Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:166:2188], cookie=14632162524237716478) >> KqpSystemView::FailResolve ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::Join [GOOD] Test command err: Trying to start YDB, gRPC: 25326, MsgBus: 24825 2025-06-24T21:14:00.933196Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627198855370756:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:00.933227Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a3f/r3tmp/tmph12HJP/pdisk_1.dat 2025-06-24T21:14:01.409777Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627198855370735:2079] 1750799640931451 != 1750799640931454 2025-06-24T21:14:01.437467Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:01.440221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:01.440284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:01.448553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25326, node 1 2025-06-24T21:14:01.623826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:01.623851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:01.623859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:01.623984Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:01.961815Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24825 TClient is connected to server localhost:24825 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:02.608323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:02.625671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:02.640788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:02.824487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:02.998648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:03.079759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:04.572417Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627216035241555:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:04.572555Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.221217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.252385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.285074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.316275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.354859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.427028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.464287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:05.566931Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627220330209526:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.567025Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.569477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627220330209531:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.573379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:05.586057Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627220330209533:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:05.670945Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627220330209584:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:05.933986Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627198855370756:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:05.934050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:07.387873Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799647332, txId: 281474976710672] shutting down waiting... 2025-06-24T21:14:08.598321Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799648587, txId: 281474976710674] shutting down waiting... 2025-06-24T21:14:09.818055Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799649810, txId: 281474976710676] shutting down waiting... 2025-06-24T21:14:10.998263Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799650991, txId: 281474976710678] shutting down waiting... 2025-06-24T21:14:12.226580Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799652211, txId: 281474976710680] shutting down waiting... 2025-06-24T21:14:13.424497Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799653415, txId: 281474976710682] shutting down waiting... 2025-06-24T21:14:14.642746Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799654628, txId: 281474976710684] shutting down waiting... 2025-06-24T21:14:15.841060Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799655800, txId: 281474976710686] shutting down 2025-06-24T21:14:16.286629Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799656271, txId: 281474976710688] shutting down 2025-06-24T21:14:16.425767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:14:16.425802Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded >> KqpSysColV1::StreamInnerJoinTables [GOOD] >> KqpSysColV1::StreamSelectRowById [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-48 [GOOD] >> KqpSysColV0::SelectRange [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::FailNavigate [GOOD] >> TKesusTest::TestSessionTimeoutAfterReboot [GOOD] >> TKesusTest::TestSessionStealingSameKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesRange2 [GOOD] Test command err: Trying to start YDB, gRPC: 24043, MsgBus: 7167 2025-06-24T21:14:03.503940Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627208787745968:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:03.528717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:03.616351Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519627211913552902:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:03.616654Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:03.625005Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627209136214386:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:03.625093Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:03.667018Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627209064244075:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:03.668596Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519627210701199513:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:03.668641Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:04.360487Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a2b/r3tmp/tmpIiGw0d/pdisk_1.dat 2025-06-24T21:14:04.689386Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:04.693035Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:04.694702Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:04.701620Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:14:04.705206Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:14:04.701616Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:04.702195Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:14:04.754077Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:04.761309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:14:05.080712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:05.083304Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:05.089417Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:05.094667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:05.094728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:05.112003Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:05.112139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:05.112443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:05.112518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:05.112720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:05.112757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:05.155279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:05.156297Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:14:05.156348Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-24T21:14:05.156375Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:14:05.156441Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-24T21:14:05.159503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:05.159781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:05.160986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:05.161563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:05.206433Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 24043, node 1 2025-06-24T21:14:05.489347Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:05.489376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:05.489385Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:05.489534Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7167 TClient is connected to server localhost:7167 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:06.737867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:06.796416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:07.023339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:07.496456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:07.681630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:08.501169Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627208787745968:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:08.501276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:08.597489Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627211913552902:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:08.597542Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:08.620942Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519627209064244075:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:08.621036Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:08.625050Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627209136214386:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:08.625130Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:08.669023Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519627210701199513:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:08.669066Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:10.316866Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627238852518827:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:10.317039Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:10.750315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:10.851459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:10.940299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:11.049398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:11.153243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:11.252583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:11.318065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:11.416276Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627243147486907:2358], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:11.416379Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:11.416857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627243147486912:2361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:11.421966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:11.458192Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627243147486914:2362], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:11.558779Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627243147486987:4108] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:13.289691Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799653275, txId: 281474976715672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange2 [GOOD] Test command err: Trying to start YDB, gRPC: 26814, MsgBus: 14311 2025-06-24T21:14:11.497440Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627245011851030:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:11.497560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a23/r3tmp/tmpl9vWzZ/pdisk_1.dat 2025-06-24T21:14:11.964344Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:11.965228Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627245011851011:2079] 1750799651495970 != 1750799651495973 2025-06-24T21:14:11.975644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:11.975763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:11.977745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26814, node 1 2025-06-24T21:14:12.067226Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:12.067255Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:12.067270Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:12.067450Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14311 2025-06-24T21:14:12.516414Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14311 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:12.751814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:12.778908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:14:12.800915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:12.995210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.183862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.277980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:14.933074Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627257896754534:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:14.933312Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.306494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.389334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.426767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.460601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.501599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.583948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.629412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.716772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627262191722493:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.716900Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.717225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627262191722498:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.722390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:15.741371Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627262191722500:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:15.816098Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627262191722551:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:16.497821Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627245011851030:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:16.497914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:17.180350Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799657166, txId: 281474976715672] shutting down >> KqpSysColV1::SelectRowAsterisk [GOOD] >> TKesusTest::TestSessionStealingSameKey [GOOD] >> TKesusTest::TestSessionStealingDifferentKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamInnerJoinTables [GOOD] Test command err: Trying to start YDB, gRPC: 18675, MsgBus: 6046 2025-06-24T21:14:11.585203Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627243991929594:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:11.586937Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a26/r3tmp/tmpQI0f7F/pdisk_1.dat 2025-06-24T21:14:11.987632Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:11.988029Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627243991929497:2079] 1750799651575213 != 1750799651575216 2025-06-24T21:14:12.006783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:12.006926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:12.018868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18675, node 1 2025-06-24T21:14:12.260765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:12.260790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:12.260798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:12.260959Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6046 2025-06-24T21:14:12.586996Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6046 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:12.981375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:13.001753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:13.018322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.184207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.366808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.455863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.110506Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627261171800332:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.110609Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.429279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.467993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.504769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.583525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.625499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.702524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.749437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.852300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627261171800998:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.852446Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.855447Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627261171801003:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.860691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:15.875256Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627261171801005:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:15.970188Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627261171801056:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:16.583297Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627243991929594:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:16.583383Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:17.621554Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799657635, txId: 281474976710672] shutting down [[[108u];["One"];[8];["Value5"];[108u];["One"];#;["Value31"]]] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamSelectRowById [GOOD] Test command err: Trying to start YDB, gRPC: 14374, MsgBus: 3750 2025-06-24T21:14:11.842356Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627244318567984:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:11.842406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a1d/r3tmp/tmprAJ0gk/pdisk_1.dat 2025-06-24T21:14:12.268766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:12.268870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:12.279708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:12.305603Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627244318567960:2079] 1750799651840340 != 1750799651840343 2025-06-24T21:14:12.316181Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14374, node 1 2025-06-24T21:14:12.384033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:12.384062Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:12.384072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:12.384226Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3750 TClient is connected to server localhost:3750 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:14:12.856190Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:12.978909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:12.991426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:12.996901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.149815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.341312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.443252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.261701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627261498438794:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.261837Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.609769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.653979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.696268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.784348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.863001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:15.943454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.021302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.113833Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627265793406763:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.113914Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.114138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627265793406768:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.117813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:16.132561Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627265793406770:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:16.211404Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627265793406821:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:16.842952Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627244318567984:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:16.843046Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:17.617884Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799657642, txId: 281474976710672] shutting down >> KqpSysColV1::StreamSelectRowAsterisk [GOOD] >> TKesusTest::TestSessionStealingDifferentKey [GOOD] >> KqpSysColV0::SelectRowAsterisk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-48 [GOOD] Test command err: Starting YDB, grpc: 7778, msgbus: 11065 2025-06-24T21:11:03.291048Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626435968198850:2191];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:03.291319Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0035fc/r3tmp/tmpNgvAir/pdisk_1.dat 2025-06-24T21:11:03.766203Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:03.807266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:03.807398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:03.809957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7778, node 1 2025-06-24T21:11:04.011560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:04.011582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:04.011589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:04.011708Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:04.291117Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11065 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:11:04.447011Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519626435968198933:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:11:04.466308Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519626440263166745:2440] HANDLE EvNavigateScheme dc-1 2025-06-24T21:11:04.466769Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519626440263166745:2440] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.501314Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519626440263166745:2440] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-24T21:11:04.510146Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519626440263166745:2440] Handle TEvDescribeSchemeResult Forward to# [1:7519626440263166743:2438] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:11:04.531553Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626435968198933:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.531580Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626435968198933:2117] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:11:04.531692Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626435968198933:2117] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519626440263166751:2445] 2025-06-24T21:11:04.633341Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626440263166751:2445] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.633443Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626440263166751:2445] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.633491Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626440263166751:2445] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.633557Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626440263166751:2445] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.633896Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626440263166751:2445] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.634086Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626440263166751:2445] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T21:11:04.634157Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626440263166751:2445] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:11:04.634366Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626440263166751:2445] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:11:04.635271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:04.637563Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519626440263166751:2445] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:11:04.637620Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519626440263166751:2445] txid# 281474976715657 SEND to# [1:7519626440263166750:2444] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-24T21:11:04.650892Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626435968198933:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.650917Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626435968198933:2117] TxId# 281474976715658 ProcessProposeTransaction 2025-06-24T21:11:04.650951Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626435968198933:2117] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519626440263166798:2485] 2025-06-24T21:11:04.653687Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626440263166798:2485] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.653761Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626440263166798:2485] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.653777Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626440263166798:2485] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.653871Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626440263166798:2485] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.654204Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626440263166798:2485] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.654328Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626440263166798:2485] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:11:04.654375Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626440263166798:2485] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-24T21:11:04.654513Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626440263166798:2485] txid# 281474976715658 HANDLE EvClientConnected 2025-06-24T21:11:04.655072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715 ... DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:16.836568Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627265999484261:2601] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-24T21:14:16.836740Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627265999484261:2601] txid# 281474976715661 HANDLE EvClientConnected 2025-06-24T21:14:16.843820Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627265999484261:2601] txid# 281474976715661 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715661} 2025-06-24T21:14:16.843884Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627265999484261:2601] txid# 281474976715661 SEND to# [59:7519627265999484260:2293] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-24T21:14:17.049585Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627244524646914:2114] Handle TEvProposeTransaction 2025-06-24T21:14:17.049622Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627244524646914:2114] TxId# 281474976715662 ProcessProposeTransaction 2025-06-24T21:14:17.049675Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627244524646914:2114] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7519627270294451580:2618] 2025-06-24T21:14:17.052688Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627270294451580:2618] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\022\010\001\022\016\032\014ordinaryuser\n\032\010\000\022\026\010\001\020\200\200\002\032\014ordinaryuser \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:41844" 2025-06-24T21:14:17.052774Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627270294451580:2618] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:17.052805Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627270294451580:2618] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:14:17.052867Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627270294451580:2618] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:17.053273Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627270294451580:2618] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:17.053400Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627270294451580:2618] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:17.053466Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627270294451580:2618] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-24T21:14:17.053657Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627270294451580:2618] txid# 281474976715662 HANDLE EvClientConnected 2025-06-24T21:14:17.054246Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:17.057445Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627270294451580:2618] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-24T21:14:17.057509Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627270294451580:2618] txid# 281474976715662 SEND to# [59:7519627270294451579:2308] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-24T21:14:17.107620Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627244524646914:2114] Handle TEvProposeTransaction 2025-06-24T21:14:17.107659Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627244524646914:2114] TxId# 281474976715663 ProcessProposeTransaction 2025-06-24T21:14:17.107713Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627244524646914:2114] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7519627270294451617:2641] 2025-06-24T21:14:17.110568Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627270294451617:2641] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:41874" 2025-06-24T21:14:17.110644Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627270294451617:2641] txid# 281474976715663 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:17.110672Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627270294451617:2641] txid# 281474976715663 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:14:17.110726Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627270294451617:2641] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:17.111101Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627270294451617:2641] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:17.111232Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519627270294451617:2641] HANDLE EvNavigateKeySetResult, txid# 281474976715663 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:14:17.111292Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519627270294451617:2641] txid# 281474976715663 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715663 TabletId# 72057594046644480} 2025-06-24T21:14:17.111445Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519627270294451617:2641] txid# 281474976715663 HANDLE EvClientConnected 2025-06-24T21:14:17.119839Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519627270294451617:2641] txid# 281474976715663 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715663} 2025-06-24T21:14:17.119901Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627270294451617:2641] txid# 281474976715663 SEND to# [59:7519627270294451616:2310] Source {TEvProposeTransactionStatus txid# 281474976715663 Status# 48} 2025-06-24T21:14:17.195820Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519627244524646914:2114] Handle TEvProposeTransaction 2025-06-24T21:14:17.195855Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519627244524646914:2114] TxId# 281474976715664 ProcessProposeTransaction 2025-06-24T21:14:17.195908Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519627244524646914:2114] Cookie# 0 userReqId# "" txid# 281474976715664 SEND to# [59:7519627270294451645:2653] 2025-06-24T21:14:17.198768Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519627270294451645:2653] txid# 281474976715664 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { ModifyUser { User: "targetuser" Password: "passwd" IsHashedPassword: false } } } } UserToken: "\n\014ordinaryuser\022\030\022\026\n\024all-users@well-known\032\334\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDg0Mjg1NywiaWF0IjoxNzUwNzk5NjU3LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.wRndUEV8XJHISAjYju9zo72Ik8BjKXXyjM9zUHSslGZv2rs3_SjymEEfZYL4E69XoNbXoMIsL2LezqhgvWlFsH809mZlhCznLrWtvPiY9SoYcSfiPG3lrKZt-jBW66X7kTrxGa0oLW_Fdnv9T1Q3x8q74OrsAJ6yhQweK3iKxRliMlI20C243ROmDnhNlBwpED5J0JFcX1wEX1S_qFIuXewJ3A9S--I_TznagcCmy7RQSLmWcXEHiWBJ7uBVCH1sph4LM5pdycs_YkLXPERlbUn6_xZaB-W-d4e4OC5lVsws7T7kiLNiy9VQ9yqk2qAL4kaLM74WgecZTo4RwlVaCw\"\005Login*\210\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDg0Mjg1NywiaWF0IjoxNzUwNzk5NjU3LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.**" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:41896" 2025-06-24T21:14:17.198838Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519627270294451645:2653] txid# 281474976715664 Bootstrap, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-24T21:14:17.198866Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519627270294451645:2653] txid# 281474976715664 Bootstrap, UserSID: ordinaryuser IsClusterAdministrator: 0 2025-06-24T21:14:17.199062Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519627270294451645:2653] txid# 281474976715664 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-24T21:14:17.199124Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519627270294451645:2653] txid# 281474976715664 HandleResolveDatabase, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-24T21:14:17.199205Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519627270294451645:2653] txid# 281474976715664 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:14:17.199545Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519627270294451645:2653] txid# 281474976715664 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:14:17.199588Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519627270294451645:2653] txid# 281474976715664, Access denied for ordinaryuser, attempt to manage user 2025-06-24T21:14:17.199701Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519627270294451645:2653] txid# 281474976715664, issues: { message: "Access denied for ordinaryuser" issue_code: 200000 severity: 1 } 2025-06-24T21:14:17.199737Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519627270294451645:2653] txid# 281474976715664 SEND to# [59:7519627270294451644:2322] Source {TEvProposeTransactionStatus Status# 5} 2025-06-24T21:14:17.200043Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=YjZhNTM2MGMtOTk2MDk2NDEtOTdjOGM4YTAtMzNhNzYzZWM=, ActorId: [59:7519627270294451635:2322], ActorState: ExecuteState, TraceId: 01jyhwna6p7xqh7mead26sda2b, Create QueryResponse for error on request, msg: 2025-06-24T21:14:17.200271Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519627244524646914:2114] Handle TEvExecuteKqpTransaction 2025-06-24T21:14:17.200297Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519627244524646914:2114] TxId# 281474976715665 ProcessProposeKqpTransaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::SelectRange [GOOD] Test command err: Trying to start YDB, gRPC: 23128, MsgBus: 22046 2025-06-24T21:14:12.173212Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627251154559362:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:12.173254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a1b/r3tmp/tmpBhUOx4/pdisk_1.dat 2025-06-24T21:14:12.705479Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:12.707311Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627251154559337:2079] 1750799652154200 != 1750799652154203 TServer::EnableGrpc on GrpcPort 2025-06-24T21:14:12.717388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 23128, node 1 2025-06-24T21:14:12.717521Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:12.719104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:12.823622Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:12.823654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:12.823672Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:12.823781Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22046 2025-06-24T21:14:13.212422Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22046 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:13.484116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:13.527349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:13.553582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.772351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.955602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:14.042097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.732144Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627264039462856:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.732255Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.033253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.067692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.099568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.137165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.184045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.272584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.307271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.409456Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627268334430820:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.409536Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.409752Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627268334430825:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.413853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:16.428577Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627268334430827:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:16.507787Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627268334430878:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:17.176611Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627251154559362:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:17.176716Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::FailNavigate [GOOD] Test command err: Trying to start YDB, gRPC: 14393, MsgBus: 16308 2025-06-24T21:14:12.511526Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627249357990068:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:12.511637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a1a/r3tmp/tmpTcs3Im/pdisk_1.dat 2025-06-24T21:14:12.918359Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14393, node 1 2025-06-24T21:14:12.984216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:12.984353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:12.986005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:13.006187Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:13.006212Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:13.006257Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:13.006404Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16308 2025-06-24T21:14:13.539256Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:13.757563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:13.776195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:13.790144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:13.801513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:13.956824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:14:14.108187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:14.171580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:16.044214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627266537860871:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.044383Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.374024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.401953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.440896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.482399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.531354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.564682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.636633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.726414Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627266537861536:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.726607Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.726928Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627266537861541:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.731462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:16.744560Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627266537861543:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2025-06-24T21:14:16.823009Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627266537861594:3430] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:17.511636Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627249357990068:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:17.520445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:17.954997Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7519627270832829179:3610], for# user0@builtin, access# DescribeSchema 2025-06-24T21:14:17.955039Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7519627270832829179:3610], for# user0@builtin, access# DescribeSchema 2025-06-24T21:14:17.966854Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627270832829169:2478], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[/Root/.sys/partition_stats]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:17.967896Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDk1ZjA3MDMtYTVjODE4ZDAtYzk4MTRiNTItZjYzMTA1ZWE=, ActorId: [1:7519627270832829162:2474], ActorState: ExecuteState, TraceId: 01jyhwnaxdam74gdfx4vqknhv4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> KqpSysColV1::InnerJoinTables >> KqpSystemView::PartitionStatsRange3 [GOOD] >> KqpSystemView::PartitionStatsRanges >> KqpSysColV1::StreamInnerJoinSelect [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::SelectRowAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 18094, MsgBus: 6843 2025-06-24T21:14:13.005843Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627252626492605:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:13.005907Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a18/r3tmp/tmplx7hjw/pdisk_1.dat 2025-06-24T21:14:13.388152Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:13.388954Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627248331525289:2079] 1750799653004510 != 1750799653004513 TServer::EnableGrpc on GrpcPort 18094, node 1 2025-06-24T21:14:13.482626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:13.482741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:13.532473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:13.558593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:13.558616Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:13.558629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:13.558733Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6843 2025-06-24T21:14:14.035936Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6843 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:14.219745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:14.259590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:14:14.274817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:14.433892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:14.623282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:14.701781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:16.436877Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627265511396104:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.436970Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.714029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.753610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.790799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.826380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.863228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.939381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:16.977484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.051801Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627269806364056:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.051895Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.052151Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627269806364062:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.055657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:17.069440Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627269806364064:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:17.138829Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627269806364115:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:18.007373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627252626492605:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:18.007471Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSystemView::NodesRange1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestSessionStealingDifferentKey [GOOD] Test command err: 2025-06-24T21:13:26.420118Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:13:26.420254Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:13:26.444694Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:13:26.444927Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:13:26.469348Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:13:26.469886Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:135:2159], cookie=15437833146362020119, session=0, seqNo=0) 2025-06-24T21:13:26.470051Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:13:26.483920Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:135:2159], cookie=15437833146362020119, session=1) 2025-06-24T21:13:26.484637Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:100: [72057594037927937] Fast-path detach session=1 from sender=[1:135:2159], cookie=12648066135928855106 2025-06-24T21:13:26.486643Z node 1 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[1:148:2170], cookie=481713940362086833) 2025-06-24T21:13:26.486763Z node 1 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[1:148:2170], cookie=481713940362086833) 2025-06-24T21:13:26.940778Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:26.959940Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:27.320737Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.340122Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:27.703128Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:27.715902Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.063349Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.075513Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.457884Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.475888Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:28.820626Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:28.833457Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.177262Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.191465Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.550811Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.565139Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:29.945971Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:29.961457Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.384999Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.408203Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:30.797780Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:30.816519Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.172981Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.188280Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.550917Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.563498Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:31.924931Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:31.941158Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.347574Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.360816Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:32.755870Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:32.769398Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.140321Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.155919Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.516689Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.529118Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:33.918444Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:33.930838Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.316061Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.332038Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:34.712979Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:34.725405Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.083732Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:35.095987Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.465593Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:35.480001Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:35.865735Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:35.883983Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:36.279780Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:36.299332Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:36.708589Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:36.724837Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:37.122270Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:37.140052Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:37.533090Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:37.546250Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:37.913033Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:37.928130Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:38.331563Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:38.343887Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:38.720235Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:38.732638Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:39.113568Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:39.126134Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:39.498926Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:39.511182Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:39.876992Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:39.890057Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:40.278075Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:40.290224Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:40.668820Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:40.681659Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:41.059612Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:41.072054Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:41.429655Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:41.442192Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:41.808370Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:41.821493Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:42.229088Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:42.248329Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:42.612567Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:42.627501Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:43.003027Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:43.015897Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:13:43.385868Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:13:43.400008Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::C ... : tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:04.528123Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:04.905145Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:04.924045Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:05.294386Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:05.307487Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:05.678952Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:05.695911Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:06.072367Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:06.089419Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:06.524566Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:06.542032Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:06.921304Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:06.936048Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:07.317730Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:07.330108Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:07.703245Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:07.717639Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:08.091166Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:08.110514Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:08.522543Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:08.544021Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:08.946646Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:08.968173Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:09.351578Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:09.368209Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:09.760039Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:09.775578Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:10.146728Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:10.160057Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:10.552965Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:10.565819Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:10.937181Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:10.949469Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:11.309282Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:11.326808Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:11.687441Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:11.703507Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:12.087525Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:12.107986Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:12.508467Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:12.527691Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:12.931535Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:12.951983Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:13.343487Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:13.360227Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:13.735596Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:13.752121Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:14.127480Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:14.144087Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:14.683472Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:14.697234Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:15.093653Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:15.108395Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:15.491798Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:15.507990Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:15.906935Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:15.920017Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:16.347508Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:16.361490Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:16.750406Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:16.763719Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:17.143490Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:17.155900Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:17.561443Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:17.576276Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:17.971629Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:17.988207Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:18.367882Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-24T21:14:18.381994Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-24T21:14:18.724715Z node 2 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-24T21:14:18.724824Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-24T21:14:18.740116Z node 2 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-24T21:14:18.751207Z node 2 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[2:642:2567], cookie=12390787645970249957) 2025-06-24T21:14:18.751318Z node 2 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[2:642:2567], cookie=12390787645970249957) 2025-06-24T21:14:19.202221Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:14:19.202342Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:14:19.224332Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:14:19.224708Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:14:19.252026Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:14:19.252940Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:135:2159], cookie=12345, session=0, seqNo=0) 2025-06-24T21:14:19.253125Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:14:19.273859Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:135:2159], cookie=12345, session=1) 2025-06-24T21:14:19.274704Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:142:2164], cookie=23456, session=1, seqNo=0) 2025-06-24T21:14:19.289336Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:142:2164], cookie=23456, session=1) 2025-06-24T21:14:19.701901Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-24T21:14:19.702022Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-24T21:14:19.729526Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-24T21:14:19.731934Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-24T21:14:19.769574Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-24T21:14:19.770581Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:135:2159], cookie=12345, session=0, seqNo=0) 2025-06-24T21:14:19.770753Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-24T21:14:19.788130Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:135:2159], cookie=12345, session=1) 2025-06-24T21:14:19.789071Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:142:2164], cookie=23456, session=1, seqNo=0) 2025-06-24T21:14:19.804753Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:142:2164], cookie=23456, session=1) >> KqpSysColV0::InnerJoinSelect |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamSelectRowAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 28464, MsgBus: 17905 2025-06-24T21:14:13.509191Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627251588080510:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:13.517111Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a17/r3tmp/tmpBatp2F/pdisk_1.dat 2025-06-24T21:14:13.961747Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:14.010824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:14.010922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 28464, node 1 2025-06-24T21:14:14.013095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:14.087970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:14.088002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:14.088016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:14.088156Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17905 2025-06-24T21:14:14.523306Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17905 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:14.769036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:14.797509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:14.978697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.166879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.272689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:17.021959Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627268767951288:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.022109Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.323869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.373500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.454636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.532192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.574653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.652950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.698365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.770247Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627268767951954:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.770346Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.770797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627268767951959:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.774624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:17.786425Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627268767951961:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:17.881305Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627268767952012:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:18.508159Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627251588080510:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:18.508257Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:19.109594Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799659140, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::SelectRowAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 1309, MsgBus: 6487 2025-06-24T21:14:13.598510Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627255181082049:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:13.598587Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a0d/r3tmp/tmp09GZwj/pdisk_1.dat 2025-06-24T21:14:14.007265Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627255181081856:2079] 1750799653578869 != 1750799653578872 2025-06-24T21:14:14.017450Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1309, node 1 2025-06-24T21:14:14.097139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:14.101675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:14.115695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:14.146000Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:14.146023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:14.146036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:14.146169Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6487 2025-06-24T21:14:14.607093Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6487 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:14.825925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:14.854740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.073762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.274395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.361910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:17.255012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627272360952670:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.255294Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.561023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.602552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.642101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.676627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.722321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.760247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.802797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.874687Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627272360953327:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.874777Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.875074Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627272360953332:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.879052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:17.896992Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627272360953334:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:17.977662Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627272360953385:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:18.598850Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627255181082049:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:18.618688Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |95.6%| [TA] $(B)/ydb/core/kesus/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpSysColV1::InnerJoinSelect |95.6%| [TA] {RESULT} $(B)/ydb/core/kesus/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamInnerJoinSelect [GOOD] Test command err: Trying to start YDB, gRPC: 27802, MsgBus: 21579 2025-06-24T21:14:13.438609Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627253444265935:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:13.438678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a15/r3tmp/tmpj17Shj/pdisk_1.dat 2025-06-24T21:14:13.864608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:13.864752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:13.897646Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:13.899457Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627253444265915:2079] 1750799653437435 != 1750799653437438 2025-06-24T21:14:13.909825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27802, node 1 2025-06-24T21:14:14.005149Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:14.005170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:14.005177Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:14.005286Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21579 2025-06-24T21:14:14.508410Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21579 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:14.702437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:14.731553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:14:14.748008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:14.902769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.071210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.163251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:16.944117Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627266329169442:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:16.944234Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.267845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.307301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.347131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.390790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.419444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.456494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.510469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.596990Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627270624137395:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.597058Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.597133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627270624137400:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.601781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:17.645925Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627270624137402:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:17.724921Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627270624137453:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:18.438947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627253444265935:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:18.439023Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:19.536561Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799659560, txId: 281474976715672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange3 [GOOD] Test command err: Trying to start YDB, gRPC: 3722, MsgBus: 15045 2025-06-24T21:14:13.423929Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627253940594134:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:13.424310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a14/r3tmp/tmpkWRU8q/pdisk_1.dat 2025-06-24T21:14:13.844483Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627253940593945:2079] 1750799653376600 != 1750799653376603 2025-06-24T21:14:13.857789Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3722, node 1 2025-06-24T21:14:13.892432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:13.892554Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:13.893551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:14.018916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:14.018940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:14.018958Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:14.024773Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15045 2025-06-24T21:14:14.403787Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15045 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:14.699596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:14.717013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:14.733761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:14.890660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.061273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:15.133431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:17.105531Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627271120464776:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.105699Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.505176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.559573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.601978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.636223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.670368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.707984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.751755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:17.831800Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627271120465434:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.831893Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.832143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627271120465439:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:17.836412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:17.847855Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627271120465441:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:17.926353Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627271120465492:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:18.403306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627253940594134:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:18.403379Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:19.360742Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799659347, txId: 281474976710672] shutting down >> KqpSysColV1::SelectRange >> KqpSystemView::PartitionStatsParametricRanges >> KqpSysColV0::SelectRowById >> TSchemeShardServerLess::StorageBillingLabels [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::StorageBillingLabels [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:13:06.495554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:13:06.495706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:06.495764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:13:06.495806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:13:06.501269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:13:06.501371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:13:06.501498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:13:06.501614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:13:06.504331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:13:06.504819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:13:06.650097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:13:06.650171Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:06.683779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:13:06.684339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:13:06.684539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:13:06.701556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:13:06.701868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:13:06.709199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:06.709776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:13:06.736411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:06.737434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:13:06.746462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:06.746564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:13:06.746859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:13:06.746920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:13:06.747024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:13:06.747122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.758535Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:13:06.917199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:13:06.917490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.917693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:13:06.917755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:13:06.917960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:13:06.918091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:06.924279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:06.924507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:13:06.924717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.924789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:13:06.924852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:13:06.924902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:13:06.932591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.932666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:13:06.932727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:13:06.948443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.948530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:13:06.948583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:06.948642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:13:06.961460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:13:06.968591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:13:06.968847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:13:06.969889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:13:06.970049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:13:06.970130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:06.970431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:13:06.970488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:13:06.970662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:13:06.970735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:13:06.973769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:13:06.973817Z node 1 :FLAT_TX_SCHEMESHARD ... ributesVersion: 2, at schemeshard: 72075186233409549 2025-06-24T21:13:07.425790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:589: Cannot publish paths for unknown operation id#0 2025-06-24T21:13:07.426361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:13:07.426464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-24T21:13:07.426508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-24T21:13:07.426544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-24T21:13:07.426598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:13:07.426684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-24T21:13:07.430601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5889: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 3 TabletID: 72075186233409549 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 2 UserAttributesVersion: 2 TenantHive: 18446744073709551615 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-24T21:13:07.430691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:13:07.430780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:568: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:572:2508], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 2, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 2, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:13:07.431053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409549 2025-06-24T21:13:07.431104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409549, txId: 0, path id: [OwnerId: 72075186233409549, LocalPathId: 1] 2025-06-24T21:13:07.431292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409549 2025-06-24T21:13:07.431335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:663:2573], at schemeshard: 72075186233409549, txId: 0, path id: 1 2025-06-24T21:13:07.431966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-24T21:13:07.432032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:13:07.432179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72075186233409549, cookie: 0 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-24T21:13:07.432420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-24T21:13:07.432463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-24T21:13:07.432846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-24T21:13:07.432960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-24T21:13:07.432998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:743:2633] TestWaitNotification: OK eventTxId 105 ... waiting for metering 2025-06-24T21:13:12.187461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:13:12.187533Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:12.241707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:13:12.241782Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:12.284729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:13:12.284798Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:30.843536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:13:30.843701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:90: TTxServerlessStorageBilling: initiate at first time, schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:01:00.000000Z, set LastBillTime: 1970-01-01T00:01:00.000000Z, next retry at: 1970-01-01T00:02:00.000000Z 2025-06-24T21:13:30.852080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:13:30.947439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-24T21:13:30.947592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:13:30.947686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:13:31.023128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-24T21:13:31.023277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-24T21:13:31.023383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-24T21:13:31.055989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-24T21:13:31.056162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-24T21:13:31.056250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-24T21:13:55.878536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:13:55.878688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:121: TTxServerlessStorageBilling: too soon call, wait until current period ends, schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:02:00.000000Z, LastBillTime: 1970-01-01T00:01:00.000000Z, lastBilled: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, toBill: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, next retry at: 1970-01-01T00:03:00.000000Z 2025-06-24T21:13:55.878777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:13:55.979131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-24T21:13:55.979288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-24T21:13:55.979367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-24T21:13:56.043473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-24T21:13:56.043587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-24T21:13:56.043676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-24T21:13:56.099525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6712: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-24T21:13:56.099634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-24T21:13:56.099719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-24T21:14:22.059935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:14:22.060389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:191: TTxServerlessStorageBilling: make a bill, record: '{"usage":{"start":120,"quantity":59,"finish":179,"type":"delta","unit":"byte*second"},"tags":{"ydb_size":0},"id":"72057594046678944-3-120-179-0","cloud_id":"CLOUD_ID_VAL","source_wt":180,"source_id":"sless-docapi-ydb-storage","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.v1","labels":{"k":"v"},"folder_id":"FOLDER_ID_VAL","version":"1.0.0"} ', schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:03:00.000000Z, LastBillTime: 1970-01-01T00:01:00.000000Z, lastBilled: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, toBill: 1970-01-01T00:02:00.000000Z--1970-01-01T00:02:59.000000Z, next retry at: 1970-01-01T00:04:00.000000Z 2025-06-24T21:14:22.063867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete ... blocking NKikimr::NMetering::TEvMetering::TEvWriteMeteringJson from FLAT_SCHEMESHARD_ACTOR to TFakeMetering cookie 0 ... waiting for metering (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbLogStore::AlterLogTable [FAIL] Test command err: 2025-06-24T21:11:27.196229Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626542330796773:2195];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.196294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447f/r3tmp/tmp8qIabI/pdisk_1.dat 2025-06-24T21:11:27.870261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.870346Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.885067Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.901403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25675, node 1 2025-06-24T21:11:28.191681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:28.191700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:28.191706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:28.191814Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:28.199372Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11623 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.719434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:28.992408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "LogStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Uint8" NotNull: true } Columns { Name: "resource_type" Type: "Utf8" NotNull: true } Columns { Name: "resource_id" Type: "Utf8" NotNull: true } Columns { Name: "uid" Type: "Utf8" NotNull: true } Columns { Name: "level" Type: "Int32" } Columns { Name: "message" Type: "Utf8" } Columns { Name: "json_payload" Type: "JsonDocument" } Columns { Name: "request_id" Type: "Utf8" } Columns { Name: "ingested_at" Type: "Timestamp" } Columns { Name: "saved_at" Type: "Timestamp" } KeyColumnNames: "timestamp" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" DefaultCompression { Codec: ColumnCodecLZ4 } } } } } TxId: 281474976715658 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:38072" , at schemeshard: 72057594046644480 2025-06-24T21:11:28.992908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /Root/LogStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-24T21:11:28.993476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: LogStore, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T21:11:28.993528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-24T21:11:28.993546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715658:0 type: TxCreateOlapStore target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-24T21:11:28.993624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:11:28.993664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:11:28.993728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-24T21:11:28.993772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-06-24T21:11:28.994090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-06-24T21:11:28.997199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715658:0 1 -> 2 2025-06-24T21:11:28.997525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:11:28.997555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) 2025-06-24T21:11:28.997702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T21:11:28.997743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-06-24T21:11:29.000220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715658, response: Status: StatusAccepted TxId: 281474976715658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-24T21:11:29.000439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusAccepted, operation: CREATE COLUMN STORE, path: /Root/LogStore 2025-06-24T21:11:29.000663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:11:29.000686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-24T21:11:29.000810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:11:29.000885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T21:11:29.000900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519626542330797188:2373], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 1 2025-06-24T21:11:29.000916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519626542330797188:2373], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 2 2025-06-24T21:11:29.000958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-24T21:11:29.000993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976715658:0 ProgressState, operation type: TxCreateOlapStore, at tablet# 72057594046644480 2025-06-24T21:11:29.001802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976715658:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 1 TabletType: ColumnShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StorageP ... IVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037889 not found 2025-06-24T21:13:50.012755Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037891 not found 2025-06-24T21:13:50.012778Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037888 not found 2025-06-24T21:13:50.012983Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-24T21:13:50.013208Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-24T21:13:50.014495Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:13:50.016395Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T21:13:50.016431Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T21:13:50.016483Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T21:13:50.016493Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T21:13:50.016576Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T21:13:50.017515Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:13:50.017635Z node 64 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[64:7519627148147081932:2285];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:865;event=tablet_die; 2025-06-24T21:13:50.017741Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-24T21:13:50.017761Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-24T21:13:50.017845Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:13:50.017864Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T21:13:50.017910Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T21:13:50.019411Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037890 not found 2025-06-24T21:13:50.021831Z node 64 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[64:7519627148147081925:2283];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:865;event=tablet_die; 2025-06-24T21:13:50.026102Z node 64 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[64:7519627148147081950:2286];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:865;event=tablet_die; 2025-06-24T21:13:50.030629Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T21:13:50.030677Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T21:13:50.030750Z node 64 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:13:55.201883Z node 67 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[67:7519627176737309477:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:13:55.204307Z node 67 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00447f/r3tmp/tmptBnpVt/pdisk_1.dat 2025-06-24T21:13:55.475503Z node 67 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:55.505502Z node 67 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 67 Type# 268639257 TServer::EnableGrpc on GrpcPort 2835, node 67 2025-06-24T21:13:55.521628Z node 67 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(67, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:55.521747Z node 67 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(67, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:55.533777Z node 67 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(67, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:55.576405Z node 67 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:55.576435Z node 67 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:55.576446Z node 67 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:55.576630Z node 67 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14318 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:13:56.037006Z node 67 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:13:56.146009Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "LogStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "resource_type" Type: "Utf8" NotNull: true } Columns { Name: "resource_id" Type: "Utf8" NotNull: true } Columns { Name: "uid" Type: "Utf8" NotNull: true } Columns { Name: "level" Type: "Int32" } Columns { Name: "message" Type: "Utf8" } Columns { Name: "json_payload" Type: "JsonDocument" } Columns { Name: "request_id" Type: "Utf8" } Columns { Name: "ingested_at" Type: "Timestamp" } Columns { Name: "saved_at" Type: "Timestamp" } KeyColumnNames: "timestamp" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" DefaultCompression { Codec: ColumnCodecLZ4 } } } } } TxId: 281474976715658 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:58312" , at schemeshard: 72057594046644480 2025-06-24T21:13:56.146579Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /Root/LogStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-24T21:13:56.146616Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715658:1, propose status:StatusPreconditionFailed, reason: Column stores are not supported, at schemeshard: 72057594046644480 2025-06-24T21:13:56.152351Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715658, response: Status: StatusPreconditionFailed Reason: "Column stores are not supported" TxId: 281474976715658 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:13:56.152671Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusPreconditionFailed, reason: Column stores are not supported, operation: CREATE COLUMN STORE, path: /Root/LogStore 2025-06-24T21:13:56.152885Z node 67 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [67:7519627181032277676:2596] txid# 281474976715658, issues: { message: "Column stores are not supported" severity: 1 } 2025-06-24T21:13:56.245005Z node 67 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; assertion failed at ydb/services/ydb/ydb_logstore_ut.cpp:435, virtual void NTestSuiteYdbLogStore::TTestCaseAlterLogTable::Execute_(NUnitTest::TTestContext &): (res.GetStatus() == EStatus::SUCCESS) failed: (PRECONDITION_FAILED != SUCCESS)
: Error: Column stores are not supported , with diff: (PRE|SUC)C(ONDITION_FAIL|)E(D|SS) TBackTrace::Capture()+28 (0x1D47B5AC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x1D93A5E0) NTestSuiteYdbLogStore::TTestCaseAlterLogTable::Execute_(NUnitTest::TTestContext&)+8821 (0x1CFA02C5) std::__y1::__function::__func, void ()>::operator()()+280 (0x1CFC9228) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x1D9717C6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1D941169) NTestSuiteYdbLogStore::TCurrentTest::Execute()+1204 (0x1CFC83F4) NUnitTest::TTestFactory::Execute()+2438 (0x1D942A36) NUnitTest::RunMain(int, char**)+5213 (0x1D96BD3D) ??+0 (0x7F9518BE0D90) __libc_start_main+128 (0x7F9518BE0E40) _start+41 (0x19C70029) |95.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_serverless/test-results/unittest/{meta.json ... results_accumulator.log} |95.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpSysColV1::InnerJoinSelectAsterisk >> KqpSysColV0::InnerJoinSelectAsterisk [GOOD] >> KqpSystemView::FailResolve [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReBalancingAfterSplit_sessionsWithPartition ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::InnerJoinSelectAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 24135, MsgBus: 21756 2025-06-24T21:14:16.953378Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627265570345132:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:16.953746Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a00/r3tmp/tmpbFUkS5/pdisk_1.dat 2025-06-24T21:14:17.404944Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627265570345028:2079] 1750799656942098 != 1750799656942101 2025-06-24T21:14:17.406328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:17.406437Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:17.408322Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:17.413961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24135, node 1 2025-06-24T21:14:17.499037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:17.499066Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:17.499074Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:17.499221Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21756 2025-06-24T21:14:17.959292Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21756 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:18.166536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:18.208455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:18.355625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:18.535741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:18.631517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:20.421654Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627282750215865:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:20.421815Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:20.776250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:20.817057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:20.899000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:20.941221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.017535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.098370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.171849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.243772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627287045183833:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:21.243889Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:21.247445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627287045183838:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:21.252337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:21.267396Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627287045183840:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:21.368218Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627287045183891:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:21.949098Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627265570345132:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.949183Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant [FAIL] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-StrictAclCheck ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::FailResolve [GOOD] Test command err: Trying to start YDB, gRPC: 18454, MsgBus: 14005 2025-06-24T21:14:18.292675Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627275837749619:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:18.292765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049ee/r3tmp/tmpdcVbcY/pdisk_1.dat 2025-06-24T21:14:18.678407Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627275837749597:2079] 1750799658291212 != 1750799658291215 2025-06-24T21:14:18.688645Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18454, node 1 2025-06-24T21:14:18.741836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:18.741944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:18.754353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:18.842276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:18.842301Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:18.842308Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:18.842429Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14005 TClient is connected to server localhost:14005 2025-06-24T21:14:19.316553Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:19.497423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:19.516932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:14:19.524790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:19.677809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:19.852676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:14:19.933386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.812189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627288722653120:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:21.812338Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:22.218655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.269123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.314404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.354317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.382106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.415206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.470899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.569778Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627293017621077:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:22.569856Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:22.569900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627293017621082:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:22.573278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:22.589346Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627293017621084:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:22.654545Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627293017621135:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:23.295272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627275837749619:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:23.295364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:23.774103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:23.924223Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7519627297312588759:3630], for# user0@builtin, access# SelectRow 2025-06-24T21:14:23.924502Z node 1 :KQP_EXECUTER ERROR: kqp_table_resolver.cpp:275: TxId: 281474976715674. Error resolving keys for entry: { TableId: [OwnerId: 72057594046644480, LocalPathId: 1] Access: 1 SyncVersion: false Status: AccessDenied Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Uint64 : NULL, Uint64 : NULL, Uint64 : NULL, Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-24T21:14:23.952322Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MjY0NzlmYmMtMzBiMmFlODktMzE3NTYyZjItZWYxNzA2ZmM=, ActorId: [1:7519627297312588732:2478], ActorState: ExecuteState, TraceId: 01jyhwngntebft43ahm34s9h7e, Create QueryResponse for error on request, msg: 2025-06-24T21:14:23.953126Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799663922, txId: 281474976715673] shutting down 2025-06-24T21:14:23.953814Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyhwngntebft43ahm34s9h7e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjY0NzlmYmMtMzBiMmFlODktMzE3NTYyZjItZWYxNzA2ZmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> KqpSysColV0::UpdateAndDelete [GOOD] >> KqpSystemView::NodesSimple [GOOD] >> ReadSessionImplTest::UsesOnRetryStateDuringRetries >> CompressExecutor::TestReorderedExecutor >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression >> ReadSessionImplTest::ReconnectOnTmpError [GOOD] >> ReadSessionImplTest::ReconnectOnTmpErrorAndThenTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeoutAndThenCreate [GOOD] >> ReadSessionImplTest::ReconnectsAfterFailure [GOOD] >> ReadSessionImplTest::SimpleDataHandlers >> ReadSessionImplTest::ForcefulDestroyPartitionStream [GOOD] >> ReadSessionImplTest::DestroyPartitionStreamRequest [GOOD] >> ReadSessionImplTest::DecompressZstdEmptyMessage [GOOD] >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit [GOOD] >> ReadSessionImplTest::PacksBatches_BatchesEqualToServerBatches [GOOD] >> ReadSessionImplTest::HoleBetweenOffsets [GOOD] >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] >> ReadSessionImplTest::SuccessfulInit [GOOD] >> ReadSessionImplTest::DecompressRaw [GOOD] >> ReadSessionImplTest::SuccessfulInitAndThenTimeoutCallback [GOOD] >> ReadSessionImplTest::DecompressGzip [GOOD] >> ReadSessionImplTest::DecompressZstd [GOOD] >> ReadSessionImplTest::StopsRetryAfterFailedAttempt [GOOD] >> ReadSessionImplTest::DecompressRawEmptyMessage [GOOD] >> ReadSessionImplTest::StopsRetryAfterTimeout [GOOD] >> ReadSessionImplTest::DecompressGzipEmptyMessage [GOOD] >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions [GOOD] >> ReadSessionImplTest::UsesOnRetryStateDuringRetries [GOOD] >> RetryPolicy::TWriteSession_TestPolicy >> ReadSessionImplTest::DecompressWithSynchronousExecutor [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease >> ReadSessionImplTest::DataReceivedCallbackReal >> YdbOlapStore::LogExistingUserId [GOOD] >> ReadSessionImplTest::ProperlyOrdersDecompressedData [GOOD] >> ReadSessionImplTest::PacksBatches_ExactlyTwoMessagesInBatch [GOOD] >> ReadSessionImplTest::PacksBatches_OneMessageInEveryBatch >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly >> ReadSessionImplTest::PacksBatches_OneMessageInEveryBatch [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks >> ReadSessionImplTest::SimpleDataHandlers [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit >> ApplyClusterEndpointTest::NoPorts [GOOD] >> ApplyClusterEndpointTest::PortFromCds [GOOD] >> ApplyClusterEndpointTest::PortFromDriver [GOOD] >> BasicUsage::MaxByteSizeEqualZero >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] >> TopicAutoscaling::WithDir_PartitionSplit_AutosplitByLoad [GOOD] >> WithSDK::DescribeConsumer ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] Test command err: 2025-06-24T21:14:26.203113Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.203191Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.203220Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.203614Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.204092Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.215294Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.215657Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.216647Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.216661Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.216673Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.216964Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.217408Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.217496Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.217634Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.217903Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-24T21:14:26.218639Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.218700Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.218721Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.218981Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.219605Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.219682Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.219943Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.221480Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.223545Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:26.223648Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.223683Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-24T21:14:26.224472Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.224536Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.224561Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.224892Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.225370Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.225514Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.225861Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 11 Compressed message data size: 31 2025-06-24T21:14:26.228421Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:26.228711Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-24T21:14:26.229020Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-24T21:14:26.229178Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-24T21:14:26.229268Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.229314Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:14:26.229342Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:14:26.229517Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 GOT RANGE 0 3 Getting new event 2025-06-24T21:14:26.230410Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:14:26.230439Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-24T21:14:26.230492Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:14:26.230626Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 GOT RANGE 3 5 Getting new event 2025-06-24T21:14:26.230697Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-24T21:14:26.230719Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-24T21:14:26.230738Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:14:26.230816Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 GOT RANGE 5 7 Getting new event 2025-06-24T21:14:26.230870Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-24T21:14:26.230898Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-24T21:14:26.230927Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:14:26.231023Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 GOT RANGE 7 9 2025-06-24T21:14:26.233199Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.233216Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.233271Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.233593Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.247398Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.247560Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.247800Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-24T21:14:26.248691Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:26.248930Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-24T21:14:26.249703Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-24T21:14:26.249923Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-24T21:14:26.250117Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.250157Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:14:26.250190Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:14:26.250210Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-24T21:14:26.250247Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:14:26.250461Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 5). Partition stream id: 1 GOT RANGE 0 5 Getting new event 2025-06-24T21:14:26.250571Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-24T21:14:26.250595Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-24T21:14:26.250616Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-24T21:14:26.250636Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-24T21:14:26.250658Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:14:26.250793Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 9). Partition stream id: 1 GOT RANGE 5 9 2025-06-24T21:14:26.251942Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.251971Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.251991Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.252363Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.252849Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.253058Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.253279Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.254252Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:26.255129Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:26.255438Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (10-11) 2025-06-24T21:14:26.255521Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-24T21:14:26.255596Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.255614Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:14:26.255633Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (10-10) 2025-06-24T21:14:26.255649Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (11-11) 2025-06-24T21:14:26.255684Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes 2025-06-24T21:14:26.255705Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes got data event: DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 11 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-24T21:14:26.255819Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 Got commit req { offset_ranges { assign_id: 1 end_offset: 3 } } RANGE 0 3 2025-06-24T21:14:26.255939Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 12). Partition stream id: 1 Got commit req { offset_ranges { assign_id: 1 start_offset: 3 end_offset: 12 } } RANGE 3 12 >> KqpSystemView::PartitionStatsRanges [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] Test command err: 2025-06-24T21:14:26.152040Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.152091Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.152125Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.159480Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:14:26.159556Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.159602Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.171688Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.009851s 2025-06-24T21:14:26.173091Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.179280Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:14:26.179424Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.186959Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.186990Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.187012Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.187412Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:14:26.187464Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.187506Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.187571Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008805s 2025-06-24T21:14:26.188142Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.190673Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:14:26.190748Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.191998Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.192024Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.192045Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.192602Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-24T21:14:26.192658Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.192682Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.192786Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.182241s 2025-06-24T21:14:26.193287Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.193725Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:14:26.193797Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.194769Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.194794Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.194834Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.195215Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-24T21:14:26.195257Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.195344Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.195439Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.209818s 2025-06-24T21:14:26.195856Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.196188Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:14:26.196251Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.197669Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.197699Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.197726Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.197990Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.198402Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.210321Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.210678Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TRANSPORT_UNAVAILABLE. Description:
: Error: GRpc error: (14): 2025-06-24T21:14:26.210714Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.210739Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.210817Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.238447s 2025-06-24T21:14:26.211075Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-24T21:14:26.212455Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.212546Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.212605Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.212916Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.213296Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.213429Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.213878Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.314918Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.315432Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-24T21:14:26.315519Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.315581Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-24T21:14:26.315672Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-24T21:14:26.416265Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-24T21:14:26.416480Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-24T21:14:26.417699Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.417917Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.417947Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.418282Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.418736Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.418903Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.419950Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.524088Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.524654Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-24T21:14:26.524729Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.524776Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-24T21:14:26.524872Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2025-06-24T21:14:26.524990Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-24T21:14:26.526672Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-24T21:14:26.530388Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-24T21:14:26.530565Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::UpdateAndDelete [GOOD] Test command err: Trying to start YDB, gRPC: 11194, MsgBus: 12639 2025-06-24T21:14:17.326539Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627272272921348:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:17.326568Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049f4/r3tmp/tmpQpMcVS/pdisk_1.dat 2025-06-24T21:14:17.759294Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627272272921328:2079] 1750799657322783 != 1750799657322786 2025-06-24T21:14:17.776557Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:17.779355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:17.779462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:17.784281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11194, node 1 2025-06-24T21:14:17.866873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:17.866889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:17.866893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:17.866982Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12639 TClient is connected to server localhost:12639 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:14:18.431295Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:18.588555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:18.632245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:14:18.655021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:18.818759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:14:19.003533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:19.088723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:20.927366Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627285157824879:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:20.927483Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:21.336637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.403984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.452274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.486379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.563405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.642959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.696575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.817530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627289452792844:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:21.817633Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:21.817690Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627289452792849:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:21.821507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:21.834223Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627289452792851:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:21.918921Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627289452792904:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:22.327045Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627272272921348:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:22.327165Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] Test command err: 2025-06-24T21:14:26.213921Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.213951Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.213995Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.214378Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.214780Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:14:26.214858Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.215749Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.215767Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.215782Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.216032Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.216317Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:14:26.216364Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.217140Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.217167Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.217199Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.217518Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:14:26.217565Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.217595Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.217705Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: INTERNAL_ERROR Issues: "
: Error: Failed to establish connection to server "" ( cluster cluster). Attempts done: 1 " } 2025-06-24T21:14:26.218281Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.218293Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.218304Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.219019Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-24T21:14:26.219062Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.219079Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.219140Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: TIMEOUT Issues: "
: Error: Failed to establish connection to server. Attempts done: 1 " } 2025-06-24T21:14:26.219819Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-24T21:14:26.220138Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-24T21:14:26.220163Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.220381Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.220787Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.231892Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-24T21:14:26.232245Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.232538Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 2. Cluster: "TestCluster". Topic: "TestTopic". Partition: 2. Read offset: (NULL) 2025-06-24T21:14:26.235397Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-50) 2025-06-24T21:14:26.235590Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.235623Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:14:26.235640Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:14:26.235652Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-24T21:14:26.235675Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-24T21:14:26.235702Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-24T21:14:26.235720Z :DEBUG: Take Data. Partition 1. Read: {0, 6} (7-7) 2025-06-24T21:14:26.235747Z :DEBUG: Take Data. Partition 1. Read: {0, 7} (8-8) 2025-06-24T21:14:26.235801Z :DEBUG: Take Data. Partition 1. Read: {0, 8} (9-9) 2025-06-24T21:14:26.235820Z :DEBUG: Take Data. Partition 1. Read: {0, 9} (10-10) 2025-06-24T21:14:26.235861Z :DEBUG: Take Data. Partition 1. Read: {0, 10} (11-11) 2025-06-24T21:14:26.235883Z :DEBUG: Take Data. Partition 1. Read: {0, 11} (12-12) 2025-06-24T21:14:26.235900Z :DEBUG: Take Data. Partition 1. Read: {0, 12} (13-13) 2025-06-24T21:14:26.235918Z :DEBUG: Take Data. Partition 1. Read: {0, 13} (14-14) 2025-06-24T21:14:26.235934Z :DEBUG: Take Data. Partition 1. Read: {0, 14} (15-15) 2025-06-24T21:14:26.235951Z :DEBUG: Take Data. Partition 1. Read: {0, 15} (16-16) 2025-06-24T21:14:26.236004Z :DEBUG: Take Data. Partition 1. Read: {0, 16} (17-17) 2025-06-24T21:14:26.236027Z :DEBUG: Take Data. Partition 1. Read: {0, 17} (18-18) 2025-06-24T21:14:26.236046Z :DEBUG: Take Data. Partition 1. Read: {0, 18} (19-19) 2025-06-24T21:14:26.236071Z :DEBUG: Take Data. Partition 1. Read: {0, 19} (20-20) 2025-06-24T21:14:26.236094Z :DEBUG: Take Data. Partition 1. Read: {0, 20} (21-21) 2025-06-24T21:14:26.236120Z :DEBUG: Take Data. Partition 1. Read: {0, 21} (22-22) 2025-06-24T21:14:26.236143Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (23-23) 2025-06-24T21:14:26.236173Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (24-24) 2025-06-24T21:14:26.236189Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (25-25) 2025-06-24T21:14:26.236204Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (26-26) 2025-06-24T21:14:26.236220Z :DEBUG: Take Data. Partition 1. Read: {1, 4} (27-27) 2025-06-24T21:14:26.236236Z :DEBUG: Take Data. Partition 1. Read: {1, 5} (28-28) 2025-06-24T21:14:26.236252Z :DEBUG: Take Data. Partition 1. Read: {1, 6} (29-29) 2025-06-24T21:14:26.236268Z :DEBUG: Take Data. Partition 1. Read: {1, 7} (30-30) 2025-06-24T21:14:26.236279Z :DEBUG: Take Data. Partition 1. Read: {1, 8} (31-31) 2025-06-24T21:14:26.236297Z :DEBUG: Take Data. Partition 1. Read: {1, 9} (32-32) 2025-06-24T21:14:26.236352Z :DEBUG: Take Data. Partition 1. Read: {1, 10} (33-33) 2025-06-24T21:14:26.236363Z :DEBUG: Take Data. Partition 1. Read: {1, 11} (34-34) 2025-06-24T21:14:26.236377Z :DEBUG: Take Data. Partition 1. Read: {1, 12} (35-35) 2025-06-24T21:14:26.236388Z :DEBUG: Take Data. Partition 1. Read: {1, 13} (36-36) 2025-06-24T21:14:26.236398Z :DEBUG: Take Data. Partition 1. Read: {1, 14} (37-37) 2025-06-24T21:14:26.236408Z :DEBUG: Take Data. Partition 1. Read: {1, 15} (38-38) 2025-06-24T21:14:26.236417Z :DEBUG: Take Data. Partition 1. Read: {1, 16} (39-39) 2025-06-24T21:14:26.236426Z :DEBUG: Take Data. Partition 1. Read: {1, 17} (40-40) 2025-06-24T21:14:26.236436Z :DEBUG: Take Data. Partition 1. Read: {1, 18} (41-41) 2025-06-24T21:14:26.236444Z :DEBUG: Take Data. Partition 1. Read: {1, 19} (42-42) 2025-06-24T21:14:26.236453Z :DEBUG: Take Data. Partition 1. Read: {1, 20} (43-43) 2025-06-24T21:14:26.236462Z :DEBUG: Take Data. Partition 1. Read: {1, 21} (44-44) 2025-06-24T21:14:26.236471Z :DEBUG: Take Data. Partition 1. Read: {1, 22} (45-45) 2025-06-24T21:14:26.236479Z :DEBUG: Take Data. Partition 1. Read: {1, 23} (46-46) 2025-06-24T21:14:26.236494Z :DEBUG: Take Data. Partition 1. Read: {1, 24} (47-47) 2025-06-24T21:14:26.236516Z :DEBUG: Take Data. Partition 1. Read: {1, 25} (48-48) 2025-06-24T21:14:26.236531Z :DEBUG: Take Data. Partition 1. Read: {1, 26} (49-49) 2025-06-24T21:14:26.236541Z :DEBUG: Take Data. Partition 1. Read: {1, 27} (50-50) 2025-06-24T21:14:26.236605Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-24T21:14:26.238675Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 2 (51-100) 2025-06-24T21:14:26.238822Z :DEBUG: Take Data. Partition 2. Read: {0, 0} (51-51) 2025-06-24T21:14:26.238847Z :DEBUG: Take Data. Partition 2. Read: {0, 1} (52-52) 2025-06-24T21:14:26.238870Z :DEBUG: Take Data. Partition 2. Read: {0, 2} (53-53) 2025-06-24T21:14:26.238886Z :DEBUG: Take Data. Partition 2. Read: {0, 3} (54-54) 2025-06-24T21:14:26.238903Z :DEBUG: Take Data. Partition 2. Read: {0, 4} (55-55) 2025-06-24T21:14:26.238914Z :DEBUG: Take Data. Partition 2. Read: {0, 5} (56-56) 2025-06-24T21:14:26.238924Z :DEBUG: Take Data. Partition 2. Read: {0, 6} (57-57) 2025-06-24T21:14:26.238933Z :DEBUG: Take Data. Partition 2. Read: {0, 7} (58-58) 2025-06-24T21:14:26.238950Z :DEBUG: Take Data. Partition 2. Read: {0, 8} (59-59) 2025-06-24T21:14:26.238970Z :DEBUG: Take Data. Partition 2. Read: {0, 9} (60-60) 2025-06-24T21:14:26.238982Z :DEBUG: Take Data. Partition 2. Read: {0, 10} (61-61) 2025-06-24T21:14:26.238997Z :DEBUG: Take Data. Partition 2. Read: {0, 11} (62-62) 2025-06-24T21:14:26.239006Z :DEBUG: Take Data. Partition 2. Read: {0, 12} (63-63) 2025-06-24T21:14:26.239024Z :DEBUG: Take Data. Partition 2. Read: {0, 13} (64-64) 2025-06-24T21:14:26.239036Z :DEBUG: Take Data. Partition 2. Read: {0, 14} (65-65) 2025-06-24T21:14:26.239046Z :DEBUG: Take Data. Partition 2. Read: {0, 15} (66-66) 2025-06-24T21:14:26.239072Z :DEBUG: Take Data. Partition 2. Read: {0, 16} (67-67) 2025-06-24T21:14:26.239082Z :DEBUG: Take Data. Partition 2. Read: {0, 17} (68-68) 2025-06-24T21:14:26.239099Z :DEBUG: Take Data. Partition 2. Read: {0, 18} (69-69) 2025-06-24T21:14:26.239114Z :DEBUG: Take Data. Partition 2. Read: {0, 19} (70-70) 2025-06-24T21:14:26.239123Z :DEBUG: Take Data. Partition 2. Read: {0, 20} (71-71) 2025-06-24T21:14:26.239132Z :DEBUG: Take Data. Partition 2. Read: {0, 21} (72-72) 2025-06-24T21:14:26.239157Z :DEBUG: Take Data. Partition 2. Read: {1, 0} (73-73) 2025-06-24T21:14:26.239174Z :DEBUG: Take Data. Partition 2. Read: {1, 1} (74-74) 2025-06-24T21:14:26.239186Z :DEBUG: Take Data. Partition 2. Read: {1, 2} (75-75) 2025-06-24T21:14:26.239199Z :DEBUG: Take Data. Partition 2. Read: {1, 3} (76-76) 2025-06-24T21:14:26.239208Z :DEBUG: Take Data. Partition 2. Read: {1, 4} (77-77) 2025-06-24T21:14:26.239225Z :DEBUG: Take Data. Partition 2. Read: {1, 5} (78-78) 2025-06-24T21:14:26.239235Z :DEBUG: Take Data. Partition 2. Read: {1, 6} (79-79) 2025-06-24T21:14:26.239252Z :DEBUG: Take Data. Partition 2. Read: {1, 7} (80-80) 2025-06-24T21:14:26.239265Z :DEBUG: Take Data. Partition 2. Read: {1, 8} (81-81) 2025-06-24T21:14:26.239274Z :DEBUG: Take Data. Partition 2. Read: {1, 9} (82-82) 2025-06-24T21:14:26.239319Z :DEBUG: Take Data. Partition 2. Read: {1, 10} (83-83) 2025-06-24T21:14:26.239334Z :DEBUG: Take Data. Partition 2. Read: {1, 11} (84-84) 2025-06-24T21:14:26.239350Z :DEBUG: Take Data. Partition 2. Read: {1, 12} (85-85) 2025-06-24T21:14:26.239364Z :DEBUG: Take Data. Partition 2. Read: {1, 13} (86-86) 2025-06-24T21:14:26.239372Z :DEBUG: Take Data. Partition 2. Read: {1, 14} (87-87) 2025-06-24T21:14:26.239381Z :DEBUG: Take Data. Partition 2. Read: {1, 15} (88-88) 2025-06-24T21:14:26.239390Z :DEBUG: Take Data. Partition 2. Read: {1, 16} (89-89) 2025-06-24T21:14:26.239399Z :DEBUG: Take Data. Partition 2. Read: {1, 17} (90-90) 2025-06-24T21:14:26.239408Z :DEBUG: Take Data. Partition 2. Read: {1, 18} (91-91) 2025-06-24T21:14:26.239420Z :DEBUG: Take Data. Partition 2. Read: {1, 19} (92-92) 2025-06-24T21:14:26.239429Z :DEBUG: Take Data. Partition 2. Read: {1, 20} (93-93) 2025-06-24T21:14:26.239440Z :DEBUG: Take Data. Partition 2. Read: {1, 21} (94-94) 2025-06-24T21:14:26.239449Z :DEBUG: Take Data. Partition 2. Read: {1, 22} (95-95) 2025-06-24T21:14:26.239498Z :DEBUG: Take Data. Partition 2. Read: {1, 23} (96-96) 2025-06-24T21:14:26.239564Z :DEBUG: Take Data. Partition 2. Read: {1, 24} (97-97) 2025-06-24T21:14:26.239576Z :DEBUG: Take Data. Partition 2. Read: {1, 25} (98-98) 2025-06-24T21:14:26.239586Z :DEBUG: Take Data. Partition 2. Read: {1, 26} (99-99) 2025-06-24T21:14:26.239595Z :DEBUG: Take Data. Partition 2. Read: {1, 27} (100-100) 2025-06-24T21:14:26.239628Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-24T21:14:26.239720Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-24T21:14:26.240632Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.240662Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.240727Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.241045Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.241408Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.241511Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.241820Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.342951Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.343207Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-24T21:14:26.343276Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.343322Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-24T21:14:26.343395Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-24T21:14:26.549042Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2025-06-24T21:14:26.650317Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-24T21:14:26.650855Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-24T21:14:26.651084Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-24T21:14:26.652588Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.652614Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.652635Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.653058Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.659570Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.659815Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.663732Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.764868Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.767362Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-24T21:14:26.767488Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.767545Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-24T21:14:26.767648Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2025-06-24T21:14:26.767893Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-24T21:14:26.767964Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-24T21:14:26.770989Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-24T21:14:26.771205Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> KqpSysColV1::InnerJoinTables [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesSimple [GOOD] Test command err: Trying to start YDB, gRPC: 28398, MsgBus: 10494 2025-06-24T21:14:16.177536Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627266412170156:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:16.177629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:16.219735Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627266355308471:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:16.219794Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:16.259648Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519627268446486465:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:16.325004Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a07/r3tmp/tmph0GhPc/pdisk_1.dat 2025-06-24T21:14:16.862411Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:16.923838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:16.923974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:16.924164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:16.924204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:16.924763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:16.924807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:16.928805Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:14:16.928854Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:14:16.929098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:16.929984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:16.931586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28398, node 1 2025-06-24T21:14:17.155840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:17.155869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:17.155882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:17.156070Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:17.202576Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:17.247926Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:17.329433Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10494 TClient is connected to server localhost:10494 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:18.282935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:18.335532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:18.578030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:19.159301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:19.324188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:21.177982Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627266412170156:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.178057Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:21.223396Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519627266355308471:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.223473Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:21.251245Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627268446486465:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.251312Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:21.380055Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627287887008582:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:21.380170Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:21.786586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.899672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.003324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.099519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.206924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.297882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.403499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:22.537704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627292181976707:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:22.537811Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:22.538060Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627292181976712:2374], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:22.541377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:22.585490Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627292181976714:2375], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:22.664168Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627292181976787:4205] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:24.042379Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799664035, txId: 281474976715672] shutting down >> KqpSysColV0::InnerJoinSelect [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRanges [GOOD] Test command err: Trying to start YDB, gRPC: 28519, MsgBus: 24719 2025-06-24T21:14:20.343903Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627284514451394:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:20.343961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049d3/r3tmp/tmpGfcTUD/pdisk_1.dat 2025-06-24T21:14:20.811500Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:20.815319Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627284514451373:2079] 1750799660341601 != 1750799660341604 2025-06-24T21:14:20.817169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:20.817303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:20.843470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28519, node 1 2025-06-24T21:14:20.971851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:20.971874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:20.971881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:20.971993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24719 2025-06-24T21:14:21.362545Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24719 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:21.686794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:14:21.716237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:21.914491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:22.106534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:22.193922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.951734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627297399354895:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:23.951856Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.213868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.242251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.266791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.292961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.320536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.406438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.441122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.546919Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627301694322857:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.546992Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.549764Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627301694322862:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.554062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:24.564860Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627301694322864:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:24.658667Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627301694322915:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:25.344832Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627284514451394:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:25.344912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:26.231058Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799666219, txId: 281474976715672] shutting down >> KqpSysColV1::InnerJoinSelect [GOOD] >> KqpSysColV1::SelectRange [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinTables [GOOD] Test command err: Trying to start YDB, gRPC: 7123, MsgBus: 21322 2025-06-24T21:14:20.325918Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627285265544240:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:20.334645Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049d9/r3tmp/tmpy5SgtH/pdisk_1.dat 2025-06-24T21:14:20.823566Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627285265544106:2079] 1750799660286981 != 1750799660286984 2025-06-24T21:14:20.828968Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:20.832845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:20.832972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:20.838169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7123, node 1 2025-06-24T21:14:21.011952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:21.011988Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:21.011999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:21.012111Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21322 2025-06-24T21:14:21.325430Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21322 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:21.652270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:21.679540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:21.693409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:21.900449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:22.083398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:14:22.180234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:23.929749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627298150447625:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:23.929879Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.382017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.420233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.464605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.540388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.614314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.685960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.721779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.814406Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627302445415592:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.814484Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.814603Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627302445415597:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.818086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:24.827552Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627302445415599:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:24.928540Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627302445415650:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:25.320459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627285265544240:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:25.320524Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSystemView::PartitionStatsParametricRanges [GOOD] >> KqpSysColV0::SelectRowById [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks [GOOD] >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime [GOOD] >> ReadSessionImplTest::PartitionStreamStatus [GOOD] >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::InnerJoinSelect [GOOD] Test command err: Trying to start YDB, gRPC: 15183, MsgBus: 26083 2025-06-24T21:14:21.139620Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627286063221673:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.139704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049cc/r3tmp/tmpI7Sg0q/pdisk_1.dat 2025-06-24T21:14:21.567594Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:21.567880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:21.568047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:21.571983Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627286063221651:2079] 1750799661138541 != 1750799661138544 2025-06-24T21:14:21.581934Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15183, node 1 2025-06-24T21:14:21.702812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:21.702835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:21.702843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:21.702968Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26083 2025-06-24T21:14:22.167745Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26083 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:22.385587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:22.407650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:22.418867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:22.569957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:22.779571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:22.867245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:24.552897Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627298948125171:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.553027Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.844003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.881030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.910883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.937843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.975312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.047362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.076806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.128882Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627303243093127:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.128961Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.129130Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627303243093132:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.132456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:25.145015Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627303243093134:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:25.244924Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627303243093187:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:26.139717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627286063221673:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.139807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinSelect [GOOD] Test command err: Trying to start YDB, gRPC: 18590, MsgBus: 14615 2025-06-24T21:14:21.589278Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627289281745153:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.589692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049c3/r3tmp/tmpk2JpRg/pdisk_1.dat 2025-06-24T21:14:22.003018Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:22.028006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:22.028141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18590, node 1 2025-06-24T21:14:22.030188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:22.146727Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:22.146761Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:22.146773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:22.146910Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14615 2025-06-24T21:14:22.589628Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14615 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:22.851069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:22.866894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:22.874868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:22.987480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.154383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.226471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:24.936575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627302166648556:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.936680Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.228314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.256671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.286803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.323108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.351946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.394777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.467943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.564449Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627306461616516:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.564534Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.564806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627306461616521:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.569185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:25.584488Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627306461616523:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:25.685508Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627306461616576:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:26.579483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627289281745153:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.579549Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogExistingUserId [GOOD] Test command err: 2025-06-24T21:12:00.857911Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626683405101037:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:00.859642Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004448/r3tmp/tmpzJJ9cO/pdisk_1.dat 2025-06-24T21:12:01.455902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:01.455970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:01.476714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:01.541906Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:01.568197Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 7442, node 1 2025-06-24T21:12:01.748191Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:01.748214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:01.748220Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:01.748325Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:01.867801Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7970 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:02.265456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:7970 2025-06-24T21:12:02.630307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) waiting... 2025-06-24T21:12:02.902153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:12:02.902399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:12:02.902647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:12:02.902784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:12:02.902888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:12:02.902999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:12:02.903094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:12:02.903609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:12:02.903735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:12:02.903862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:12:02.903962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626691995036463:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:12:02.992012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:12:02.992074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:12:02.992300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:12:02.992399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:12:02.992506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:12:02.992630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:12:02.992731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:12:02.992848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:12:02.992941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:12:02.993027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:12:02.993112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519626691995036461:2283];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:12:03.034297Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626691995036467:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:12:03.034370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626691995036467:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:12:03.034586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626691995036467:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:12:03.034679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626691995036467:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:12:03.034782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626691995036467:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:12:03.034888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626691995036467:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:12:03.034975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:751962 ... ished, don't wait for ack delivery in input channelId: 42, seqNo: [1] 2025-06-24T21:14:24.172995Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 43, seqNo: [1] 2025-06-24T21:14:24.173011Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 44, seqNo: [1] 2025-06-24T21:14:24.173028Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 45, seqNo: [1] 2025-06-24T21:14:24.173046Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 46, seqNo: [1] 2025-06-24T21:14:24.173063Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 47, seqNo: [1] 2025-06-24T21:14:24.173080Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 48, seqNo: [1] 2025-06-24T21:14:24.173098Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 49, seqNo: [1] 2025-06-24T21:14:24.173115Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 50, seqNo: [1] 2025-06-24T21:14:24.173130Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 51, seqNo: [1] 2025-06-24T21:14:24.173148Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 52, seqNo: [1] 2025-06-24T21:14:24.173164Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 53, seqNo: [1] 2025-06-24T21:14:24.173179Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 54, seqNo: [1] 2025-06-24T21:14:24.173196Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 55, seqNo: [1] 2025-06-24T21:14:24.173211Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 56, seqNo: [1] 2025-06-24T21:14:24.173230Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 57, seqNo: [1] 2025-06-24T21:14:24.173246Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 58, seqNo: [1] 2025-06-24T21:14:24.173263Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 59, seqNo: [1] 2025-06-24T21:14:24.173278Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 60, seqNo: [1] 2025-06-24T21:14:24.173295Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 61, seqNo: [1] 2025-06-24T21:14:24.173311Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 62, seqNo: [1] 2025-06-24T21:14:24.173328Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 63, seqNo: [1] 2025-06-24T21:14:24.173346Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 64, seqNo: [1] 2025-06-24T21:14:24.173371Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715670, task: 65. Tasks execution finished 2025-06-24T21:14:24.173410Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [28:7519627291896217967:3149], TxId: 281474976715670, task: 65. Ctx: { TraceId : 01jyhwneskevephdhqbv0en9z6. SessionId : ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-24T21:14:24.173633Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715670, task: 65. pass away 2025-06-24T21:14:24.173870Z node 28 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715670;task_id=65;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T21:14:24.173868Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [28:7519627291896217885:3078] TxId: 281474976715670. Ctx: { TraceId: 01jyhwneskevephdhqbv0en9z6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7519627291896217967:3149], task: 65, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 10771 Tasks { TaskId: 65 StageId: 1 CpuTimeUs: 787 FinishTimeMs: 1750799664172 InputRows: 1 InputBytes: 310 OutputRows: 1 OutputBytes: 310 ResultRows: 1 ResultBytes: 310 ComputeCpuTimeUs: 307 BuildCpuTimeUs: 480 HostName: "ghrun-4pjfmvffsq" NodeId: 28 CreateTimeMs: 1750799662704 UpdateTimeMs: 1750799664173 } MaxMemoryUsage: 1048576 } 2025-06-24T21:14:24.173959Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715670. Ctx: { TraceId: 01jyhwneskevephdhqbv0en9z6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7519627291896217967:3149] 2025-06-24T21:14:24.174158Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [28:7519627291896217885:3078] TxId: 281474976715670. Ctx: { TraceId: 01jyhwneskevephdhqbv0en9z6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:14:24.174255Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [28:7519627291896217885:3078] TxId: 281474976715670. Ctx: { TraceId: 01jyhwneskevephdhqbv0en9z6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.051454s ReadRows: 30 ReadBytes: 9600 ru: 34 rate limiter was not found force flag: 1 2025-06-24T21:14:24.174375Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, ActorId: [28:7519627287601250542:3078], ActorState: ExecuteState, TraceId: 01jyhwneskevephdhqbv0en9z6, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-24T21:14:24.174755Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, ActorId: [28:7519627287601250542:3078], ActorState: ExecuteState, TraceId: 01jyhwneskevephdhqbv0en9z6, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 1615.658 QueriesCount: 1 2025-06-24T21:14:24.174840Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, ActorId: [28:7519627287601250542:3078], ActorState: ExecuteState, TraceId: 01jyhwneskevephdhqbv0en9z6, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T21:14:24.174970Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, ActorId: [28:7519627287601250542:3078], ActorState: ExecuteState, TraceId: 01jyhwneskevephdhqbv0en9z6, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T21:14:24.175026Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, ActorId: [28:7519627287601250542:3078], ActorState: ExecuteState, TraceId: 01jyhwneskevephdhqbv0en9z6, EndCleanup, isFinal: 1 2025-06-24T21:14:24.175114Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, ActorId: [28:7519627287601250542:3078], ActorState: ExecuteState, TraceId: 01jyhwneskevephdhqbv0en9z6, Sent query response back to proxy, proxyRequestId: 5, proxyId: [28:7519627227471704562:2113] 2025-06-24T21:14:24.175192Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, ActorId: [28:7519627287601250542:3078], ActorState: unknown state, TraceId: 01jyhwneskevephdhqbv0en9z6, Cleanup temp tables: 0 2025-06-24T21:14:24.178253Z node 28 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799662000, txId: 18446744073709551615] shutting down 2025-06-24T21:14:24.178436Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=28&id=NGRmYTM4ZGUtM2RiOWQ4MC05MjI1MzRjZC04ODdiNzU0OQ==, ActorId: [28:7519627287601250542:3078], ActorState: unknown state, TraceId: 01jyhwneskevephdhqbv0en9z6, Session actor destroyed RESULT: [[42000u;"nginx";"resource_6";"19";[2];["message"];["{\"auth\":{\"org_id\":7704,\"service\":{\"internal\":\"false\",\"ip\":\"258.258.258.258\"},\"type\":\"token\",\"user\":{\"id\":1000042,\"ip\":\"257.257.257.257\",\"is_cloud\":\"false\"}}}"]]] --------------------- STATS: total CPU: 1145 duration: 1610539 usec cpu: 382737 usec { name: "/Root/OlapStore/log1" reads { rows: 50 bytes: 16000 } } 2025-06-24T21:14:24.296475Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[28:7519627236061640138:2285];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T21:14:24.296571Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[28:7519627236061640140:2286];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T21:14:24.326909Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;parent=[28:7519627236061640141:2287];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-24T21:14:24.327001Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[28:7519627236061640134:2284];fline=actor.cpp:33;event=skip_flush_writing; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::SelectRange [GOOD] Test command err: Trying to start YDB, gRPC: 27174, MsgBus: 13797 2025-06-24T21:14:21.981403Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627289821239994:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.984113Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049bb/r3tmp/tmpJwyTat/pdisk_1.dat 2025-06-24T21:14:22.438439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:22.438548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:22.472583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:22.491267Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627289821239974:2079] 1750799661973596 != 1750799661973599 2025-06-24T21:14:22.494932Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27174, node 1 2025-06-24T21:14:22.590028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:22.590061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:22.590069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:22.590174Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13797 2025-06-24T21:14:22.988982Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13797 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:23.176743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:23.212829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.409963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.582691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.668861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:25.346579Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627307001110803:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.346697Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.653866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.692044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.731564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.808606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.844905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.918684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.988731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:26.052352Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627311296078766:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:26.052421Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:26.052445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627311296078771:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:26.055563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:26.066712Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627311296078773:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:26.126843Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627311296078824:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:26.976065Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627289821239994:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.976147Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] Test command err: 2025-06-24T21:14:26.210176Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.210236Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.210263Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.210698Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.212244Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.229291Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.229883Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.231048Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:26.231563Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:26.231804Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-24T21:14:26.231971Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:26.232090Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.232147Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-24T21:14:26.232192Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:14:26.232219Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:14:26.237567Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.237594Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.237624Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.237987Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.238432Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.238575Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.238886Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-24T21:14:26.239847Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:26.240079Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-24T21:14:26.242431Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-24T21:14:26.242703Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-24T21:14:26.243005Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.243052Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:14:26.243089Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:14:26.243285Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 GOT RANGE 0 3 Getting new event 2025-06-24T21:14:26.243404Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:14:26.243427Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-24T21:14:26.243447Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:14:26.243845Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 GOT RANGE 3 5 Getting new event 2025-06-24T21:14:26.243937Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-24T21:14:26.243961Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-24T21:14:26.243989Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:14:26.244089Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 GOT RANGE 5 7 Getting new event 2025-06-24T21:14:26.247704Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-24T21:14:26.247769Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-24T21:14:26.247813Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:14:26.248026Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 GOT RANGE 7 9 2025-06-24T21:14:26.252740Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.252774Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.252803Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.319729Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.320327Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.320521Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.320930Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 100 Compressed message data size: 91 2025-06-24T21:14:26.321961Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:26.322241Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-24T21:14:26.322568Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-24T21:14:26.322811Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-24T21:14:26.322926Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.322968Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:14:26.323076Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 2). Partition stream id: 1 GOT RANGE 0 2 Getting new event 2025-06-24T21:14:26.323197Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:14:26.323219Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:14:26.323283Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [2, 3). Partition stream id: 1 GOT RANGE 2 3 Getting new event 2025-06-24T21:14:26.323342Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:14:26.323369Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:14:26.323433Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 4). Partition stream id: 1 GOT RANGE 3 4 Getting new event 2025-06-24T21:14:26.323480Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-24T21:14:26.323499Z :DEBUG: [db] [sessionid] [cluster] The application data ... er". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 190 SeqNo: 231 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 191 SeqNo: 232 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 192 SeqNo: 233 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 193 SeqNo: 234 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 194 SeqNo: 235 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 195 SeqNo: 236 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 196 SeqNo: 237 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 197 SeqNo: 238 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 198 SeqNo: 239 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 199 SeqNo: 240 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 200 SeqNo: 241 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:14:28.894116Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 201). Partition stream id: 1 GOT RANGE 0 201 2025-06-24T21:14:28.976599Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-24T21:14:28.977091Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-24T21:14:28.977137Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:28.977689Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:28.978099Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:28.978285Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-24T21:14:28.978605Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 1000000 Compressed message data size: 3028 Post function Getting new event 2025-06-24T21:14:29.099871Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-10) 2025-06-24T21:14:29.104355Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:29.113362Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:14:29.116606Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:14:29.117458Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-24T21:14:29.124891Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-24T21:14:29.125937Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-24T21:14:29.126844Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (7-7) 2025-06-24T21:14:29.127746Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (8-8) 2025-06-24T21:14:29.138068Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (9-9) 2025-06-24T21:14:29.138957Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (10-10) 2025-06-24T21:14:29.139033Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 10, size 10000000 bytes 2025-06-24T21:14:29.139241Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 9 SeqNo: 50 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 51 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:14:29.143046Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 11). Partition stream id: 1 GOT RANGE 0 11 2025-06-24T21:14:29.153943Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:29.153977Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:29.154002Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:29.161892Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:29.162783Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:29.162968Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:29.163236Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:29.163683Z :DEBUG: [db] [sessionid] [cluster] Requesting status for partition stream id: 1 2025-06-24T21:14:29.164953Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:29.164987Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:29.165040Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:29.165356Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:29.165887Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:29.166072Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:29.166647Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:29.167481Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:29.167623Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:29.167701Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:14:29.167905Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsParametricRanges [GOOD] Test command err: Trying to start YDB, gRPC: 21548, MsgBus: 4545 2025-06-24T21:14:22.002382Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627289611781433:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:22.004028Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049bf/r3tmp/tmp48swbm/pdisk_1.dat 2025-06-24T21:14:22.499135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:22.499395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:22.503918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:22.533285Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627289611781329:2079] 1750799661984028 != 1750799661984031 2025-06-24T21:14:22.573821Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21548, node 1 2025-06-24T21:14:22.744965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:22.744986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:22.744993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:22.745110Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:23.003185Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4545 TClient is connected to server localhost:4545 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:23.409990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:23.429015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:23.442062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.593799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.778171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.855629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:25.373146Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627306791652171:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.373295Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.712510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.754949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.792795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.828358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.908975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.952168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:26.025690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:26.114794Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627311086620137:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:26.114902Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:26.115182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627311086620142:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:26.120918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:26.135610Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627311086620144:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:14:26.211437Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627311086620195:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:26.995473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627289611781433:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.995556Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:27.996014Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799667979, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::SelectRowById [GOOD] Test command err: Trying to start YDB, gRPC: 9362, MsgBus: 9031 2025-06-24T21:14:22.284207Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627291318222222:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:22.284569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049b7/r3tmp/tmpwm9BVh/pdisk_1.dat 2025-06-24T21:14:22.693400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:22.693538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:22.696126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:22.739778Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627291318222041:2079] 1750799662263999 != 1750799662264002 2025-06-24T21:14:22.766427Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9362, node 1 2025-06-24T21:14:22.862893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:22.862914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:22.862920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:22.863041Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9031 2025-06-24T21:14:23.256963Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9031 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:23.589496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:23.607763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:14:23.620626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.755205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.899650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.978203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:25.633253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627304203125560:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.633360Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:25.975543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:26.013901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:26.054925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:26.099002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:26.169381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:26.213656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:26.269390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:26.367448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627308498093525:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:26.367528Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:26.367606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627308498093530:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:26.373754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:26.387800Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627308498093532:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:26.475974Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627308498093583:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:27.271942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627291318222222:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:27.272037Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSysColV1::InnerJoinSelectAsterisk [GOOD] >> YdbOlapStore::BulkUpsert [GOOD] >> YdbOlapStore::DuplicateRows |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> ObjectStorageListingTest::ListingNoFilter >> ObjectStorageListingTest::FilterListing ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinSelectAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 16459, MsgBus: 17395 2025-06-24T21:14:24.004694Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627300895059707:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:24.004778Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049b4/r3tmp/tmpYYAXYG/pdisk_1.dat 2025-06-24T21:14:24.387780Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627296600092393:2079] 1750799664003612 != 1750799664003615 2025-06-24T21:14:24.399714Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16459, node 1 2025-06-24T21:14:24.421629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:24.421969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:24.431496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:24.458979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:24.459004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:24.459017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:24.459168Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17395 TClient is connected to server localhost:17395 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:24.987469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:25.010716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:25.022124Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:25.154054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:14:25.308937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:25.394781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:27.117696Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627313779963222:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:27.117803Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:27.502267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:27.575760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:27.660699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:27.702623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:27.776227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:27.851092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:27.900321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:27.983554Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627313779963891:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:27.983654Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:27.984644Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627313779963896:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:27.989449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:28.006025Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627313779963898:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:14:28.080332Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627318074931249:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:29.004511Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627300895059707:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:29.004612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_PQv1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::TTLUsage Test command err: 2025-06-24T21:13:54.066117Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:13:54.066543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:13:54.066734Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049a6/r3tmp/tmppGePyP/pdisk_1.dat 2025-06-24T21:13:54.395006Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 31918, node 1 TClient is connected to server localhost:14578 2025-06-24T21:13:54.629800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:54.683766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:54.683850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:54.683891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:54.684220Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:54.688944Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:54.689426Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799631030476 != 1750799631030480 2025-06-24T21:13:54.736681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:54.736842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:54.748433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:54.884946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) 2025-06-24T21:13:55.007063Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:647:2539], Recipient [1:690:2570]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:13:55.008586Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:647:2539], Recipient [1:690:2570]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:13:55.008906Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:13:55.039355Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:13:55.039694Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037888 2025-06-24T21:13:55.047823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:13:55.048120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:13:55.048427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:13:55.048576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:13:55.048686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:13:55.048803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:13:55.048912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:13:55.049044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:13:55.049208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:13:55.049341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:13:55.049452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:13:55.052623Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:647:2539], Recipient [1:690:2570]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:13:55.074770Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 72075186224037888 2025-06-24T21:13:55.075105Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:13:55.075205Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:13:55.075414Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:13:55.075585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:13:55.075685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:13:55.075735Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:13:55.075859Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:13:55.075936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:13:55.075986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:13:55.076018Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:13:55.076217Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:13:55.076295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:13:55.076344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:13:55.076383Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:13:55.076493Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:13:55.076548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:13:55.076590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:13:55.076632Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:13:55.076706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:13:55.076747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:13:55.076775Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0; ... unt=0;;53:size=0;count=0;;54:size=0;count=0;;55:size=0;count=0;;56:size=0;count=0;;57:size=0;count=0;;58:size=0;count=0;;59:size=0;count=0;;60:size=0;count=0;;61:size=0;count=0;;62:size=0;count=0;;63:size=0;count=0;;64:size=0;count=0;;65:size=0;count=0;; TEvBlobStorage::TEvPut tId=72075186224037888;c=0;:77/0:size=69;count=1;size=4709;count=22;;1:size=90;count=1;size=43712;count=9;;2:size=0;count=0;;3:size=1466448;count=1;;4:size=1479208;count=1;;5:size=1458600;count=1;;6:size=1445448;count=1;;7:size=1445920;count=1;;8:size=1445528;count=1;;9:size=3556144;count=4;;10:size=1445744;count=1;;11:size=1445408;count=1;;12:size=1445360;count=1;;13:size=1445360;count=1;;14:size=2328312;count=3;;15:size=1445928;count=1;;16:size=1445608;count=1;;17:size=1445400;count=1;;18:size=1746304;count=2;;19:size=2525448;count=6;;20:size=1445376;count=1;;21:size=808584;count=1;;22:size=907240;count=1;;23:size=1749872;count=4;;24:size=0;count=0;;25:size=0;count=0;;26:size=0;count=0;;27:size=0;count=0;;28:size=0;count=0;;29:size=0;count=0;;30:size=0;count=0;;31:size=0;count=0;;32:size=0;count=0;;33:size=0;count=0;;34:size=0;count=0;;35:size=0;count=0;;36:size=0;count=0;;37:size=0;count=0;;38:size=0;count=0;;39:size=0;count=0;;40:size=0;count=0;;41:size=0;count=0;;42:size=0;count=0;;43:size=0;count=0;;44:size=0;count=0;;45:size=0;count=0;;46:size=0;count=0;;47:size=0;count=0;;48:size=0;count=0;;49:size=0;count=0;;50:size=0;count=0;;51:size=0;count=0;;52:size=0;count=0;;53:size=0;count=0;;54:size=0;count=0;;55:size=0;count=0;;56:size=0;count=0;;57:size=0;count=0;;58:size=0;count=0;;59:size=0;count=0;;60:size=0;count=0;;61:size=0;count=0;;62:size=0;count=0;;63:size=0;count=0;;64:size=0;count=0;;65:size=0;count=0;; 2025-06-24T21:14:27.816962Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-24T21:14:27.817039Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=with_appended.cpp:65;portions=32,33,34,35,;task_id=35369d96-514011f0-b2e76703-49c88cc7; 2025-06-24T21:14:27.817219Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;path_id=3757128055669836289;fline=common_level.h:121;from=412174000000,uid_412174,;to=599999000000,uid_599999,; 2025-06-24T21:14:27.817288Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;path_id=3757128055669836289;fline=common_level.h:141;itFrom=1;itTo=1;raw=35455593;count=1;packed=1046784; 2025-06-24T21:14:27.817446Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;path_id=3757128055669836289;fline=common_level.h:121;from=412174000000,uid_412174,;to=599999000000,uid_599999,; 2025-06-24T21:14:27.817488Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;path_id=3757128055669836289;fline=common_level.h:141;itFrom=1;itTo=1;raw=0;count=0;packed=0; 2025-06-24T21:14:27.817619Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=granule.cpp:17;event=upsert_portion;portion=(portion_id:32;path_id:3757128055669836289;records_count:30935;schema_version:1;level:2;;column_size:1087928;index_size:0;meta:(()););path_id=3757128055669836289; 2025-06-24T21:14:27.817741Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=tiering.cpp:49;tiering_info=__DEFAULT/0.000000s;$$DELETE/424797.000000s;; 2025-06-24T21:14:27.817835Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=granule.cpp:17;event=upsert_portion;portion=(portion_id:33;path_id:3757128055669836289;records_count:30935;schema_version:1;level:2;;column_size:1088408;index_size:0;meta:(()););path_id=3757128055669836289; 2025-06-24T21:14:27.817921Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=tiering.cpp:49;tiering_info=__DEFAULT/0.000000s;$$DELETE/538128.000000s;; 2025-06-24T21:14:27.817985Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=granule.cpp:17;event=upsert_portion;portion=(portion_id:34;path_id:3757128055669836289;records_count:30935;schema_version:1;level:2;;column_size:1087744;index_size:0;meta:(()););path_id=3757128055669836289; 2025-06-24T21:14:27.818064Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=tiering.cpp:49;tiering_info=__DEFAULT/0.000000s;$$DELETE/569063.000000s;; 2025-06-24T21:14:27.818122Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=granule.cpp:17;event=upsert_portion;portion=(portion_id:35;path_id:3757128055669836289;records_count:30936;schema_version:1;level:2;;column_size:1088240;index_size:0;meta:(()););path_id=3757128055669836289; 2025-06-24T21:14:27.818201Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=tiering.cpp:49;tiering_info=__DEFAULT/0.000000s;$$DELETE/599999.000000s;; 2025-06-24T21:14:27.818265Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::35369d96-514011f0-b2e76703-49c88cc7; 2025-06-24T21:14:27.818333Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:3757128055669836289;path_id:3757128055669836289;size:21158776;portions_count:35;); 2025-06-24T21:14:27.818410Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;tablet_id=72075186224037888;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:14:27.818502Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;tablet_id=72075186224037888;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T21:14:27.818574Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;tablet_id=72075186224037888;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=0;tx_id=18446744073709551615;;current_snapshot_ts=21000; 2025-06-24T21:14:27.818620Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;tablet_id=72075186224037888;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:14:27.818674Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;tablet_id=72075186224037888;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:14:27.818733Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;tablet_id=72075186224037888;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:14:27.818805Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;tablet_id=72075186224037888;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.399000s; 2025-06-24T21:14:27.818857Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;tablet_id=72075186224037888;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:14:27.819044Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037888 Save Batch GenStep: 1:21 Blob count: 4 VERIFY failed (2025-06-24T21:14:27.819347Z): tablet_id=72075186224037888;task_id=35369d96-514011f0-b2e76703-49c88cc7;verification=CompactionsLimit.Dec() >= 0;fline=ro_controller.cpp:35; ydb/library/actors/core/log.cpp:800 ~TVerifyFormattedRecordWriter(): requirement false failed NPrivate::InternalPanicImpl(int, char const*, char const*, int, int, int, TBasicStringBuf>, char const*, unsigned long)+873 (0x19728E39) NPrivate::Panic(NPrivate::TStaticBuf const&, int, char const*, char const*, char const*, ...)+571 (0x197174CB) NActors::TVerifyFormattedRecordWriter::~TVerifyFormattedRecordWriter()+326 (0x1AD8CEB6) NKikimr::NYDBTest::NColumnShard::TReadOnlyController::DoOnWriteIndexComplete(NKikimr::NOlap::TColumnEngineChanges const&, NKikimr::NColumnShard::TColumnShard const&)+3892 (0x49D68D64) NKikimr::NColumnShard::TTxWriteIndex::Complete(NActors::TActorContext const&)+4781 (0x3106DB5D) NKikimr::NTabletFlatExecutor::TSeat::Complete(NActors::TActorContext const&, bool)+899 (0x1F5CDC53) NKikimr::NTabletFlatExecutor::TLogicRedo::Confirm(unsigned int, NActors::TActorContext const&, NActors::TActorId const&)+3856 (0x1F4B2240) NKikimr::NTabletFlatExecutor::TExecutor::Handle(TAutoPtr, TDelete>&, NActors::TActorContext const&)+1521 (0x1F2FD4E1) NKikimr::NTabletFlatExecutor::TExecutor::StateWork(TAutoPtr&)+3039 (0x1F29981F) NActors::IActor::Receive(TAutoPtr&)+237 (0x1ACB9DAD) NActors::TTestActorRuntimeBase::SendInternal(TAutoPtr, unsigned int, bool)+3557 (0x368F2375) NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant)+12602 (0x368EABEA) NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration)+1076 (0x368F4F64) NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventIf(TSet, std::__y1::allocator> const&, std::__y1::function const&, TDuration)+292 (0x36ACCC14) NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEvent(NActors::TActorId const&, TDuration)+419 (0x36ACBD33) NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventRethrow(NActors::TActorId const&, TDuration)+307 (0x36AC3AD3) NActors::TTestActorRuntime::SimulateSleep(TDuration)+1115 (0x36AC36AB) NKikimr::NTestSuiteColumnShardTiers::TTestCaseTTLUsage::Execute_(NUnitTest::TTestContext&)+4913 (0x1930D061) std::__y1::__function::__func, void ()>::operator()()+280 (0x1931F198) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19BD8096) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19BA7A19) NKikimr::NTestSuiteColumnShardTiers::TCurrentTest::Execute()+1204 (0x1931E144) NUnitTest::TTestFactory::Execute()+2438 (0x19BA92E6) NUnitTest::RunMain(int, char**)+5213 (0x19BD260D) ??+0 (0x7FFBA0112D90) __libc_start_main+128 (0x7FFBA0112E40) _start+41 (0x16BC6029) >> CommitOffset::DistributedTxCommit_CheckSessionResetAfterCommit [GOOD] >> CommitOffset::DistributedTxCommit_CheckOffsetCommitForDifferentCases |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> KqpSystemView::NodesRange1 [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession_ToPast [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesRange1 [GOOD] Test command err: Trying to start YDB, gRPC: 19076, MsgBus: 19698 2025-06-24T21:14:21.314268Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627286588315658:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.314354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:21.375360Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627287446078337:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.375413Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:21.414115Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519627287849698431:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.414186Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:21.505348Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519627286404533575:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.571308Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627288796094037:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:21.967276Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049d0/r3tmp/tmpBpsoOT/pdisk_1.dat 2025-06-24T21:14:22.300658Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:22.390653Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:22.463238Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:22.511476Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:14:22.527921Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:14:22.519935Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:14:22.547301Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:22.562631Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:22.639252Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:22.815235Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:22.853119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:22.853260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:22.860790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:22.860867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:22.861084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:22.861129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:22.861274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:22.861313Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:22.882122Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:14:22.895660Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-24T21:14:22.895733Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:14:22.895747Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-24T21:14:22.896955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:22.897664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:22.898537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:22.898715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:22.910861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:22.910953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:22.961373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19076, node 1 2025-06-24T21:14:23.357937Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:23.357973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:23.357987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:23.358139Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19698 TClient is connected to server localhost:19698 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:24.359445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976730657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:24.405628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:24.593106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:24.838497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:24.921675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:26.314662Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627286588315658:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.314777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:26.375434Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519627287446078337:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.375519Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:26.415253Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627287849698431:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.415325Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:26.499361Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519627286404533575:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.499426Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:26.503267Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627288796094037:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.503342Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:27.596131Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627312358121309:2321], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:27.596243Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:27.941529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:28.098305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:28.163684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:28.253640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:28.393177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:28.505202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:28.595724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976730668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:28.746763Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627316653089401:2360], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:28.746841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:28.747127Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627316653089406:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:28.752036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976730669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:28.776093Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627316653089408:2364], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976730669 completed, doublechecking } 2025-06-24T21:14:28.858546Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627316653089483:4158] txid# 281474976730670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:30.553600Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799670538, txId: 281474976730672] shutting down >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-StrictAclCheck [FAIL] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly >> TSequence::CreateSequence >> TSequence::CreateSequenceParallel |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::WriteHardRateDispatcher >> GroupWriteTest::WithRead >> GroupWriteTest::ByTableName |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::SimpleEcho >> TGRpcStreamingTest::WritesDoneFromClient >> TSequence::CreateSequence [GOOD] >> TSequence::CreateDropRecreate >> TGRpcStreamingTest::ReadFinish >> TGRpcStreamingTest::ClientDisconnects >> TGRpcStreamingTest::WriteAndFinishWorks >> TGRpcStreamingTest::ClientNeverWrites |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> ObjectStorageListingTest::ListingNoFilter [GOOD] >> ObjectStorageListingTest::FilterListing [GOOD] >> GroupWriteTest::TwoTables >> TSequence::CreateSequenceParallel [GOOD] >> TSequence::CreateSequenceSequential >> TSequence::CreateDropRecreate [GOOD] >> TSequence::CreateSequenceInsideSequenceNotAllowed >> TSequence::CreateSequenceSequential [GOOD] >> TSequence::CreateSequenceInsideTableThenDropSequence ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> ObjectStorageListingTest::ListingNoFilter [GOOD] Test command err: 2025-06-24T21:14:35.646389Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:14:35.646773Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:14:35.646929Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e8f/r3tmp/tmpn7Rvx2/pdisk_1.dat 2025-06-24T21:14:36.121659Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:14:36.131434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:14:36.189857Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:36.205770Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799672655837 != 1750799672655841 2025-06-24T21:14:36.254753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:36.254888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:36.266680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:36.359909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:36.423915Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:14:36.424213Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:14:36.476258Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:14:36.476403Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:14:36.478262Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:14:36.478356Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:14:36.478417Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:14:36.478784Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:14:36.478931Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:14:36.479022Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:14:36.489942Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:14:36.522228Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:14:36.522446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:14:36.522597Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:14:36.522661Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:14:36.522699Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:14:36.522746Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:14:36.523249Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:14:36.523394Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:14:36.523651Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:14:36.523710Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:14:36.523771Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:14:36.523856Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:14:36.524020Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:14:36.524475Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:14:36.524707Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:14:36.524817Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:14:36.526693Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:14:36.537539Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:14:36.537692Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:14:36.698042Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:14:36.708513Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:14:36.708622Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:14:36.709681Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:14:36.709749Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:14:36.709836Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:14:36.710082Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:14:36.710264Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:14:36.710758Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:14:36.710842Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:14:36.713161Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:14:36.713733Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:14:36.715642Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:14:36.715744Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:14:36.716804Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:14:36.716880Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:14:36.717820Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:14:36.717869Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:14:36.717932Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:14:36.717999Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:14:36.718062Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:14:36.718174Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:14:36.722943Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:14:36.725189Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:14:36.725275Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:14:36.726344Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:14:36.774922Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:36.775123Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:36.776146Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:36.784931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:36.798214Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:14:36.843396Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:36.959992Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:14:36.963137Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:706:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:14:37.040321Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:776:2621] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:37.991798Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwnxb37drh3v08vetz27xc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzU5OWUwODctNGY0MGVkNDMtMTk4OGZjMmItYzM4MGY3OWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:38.148101Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:807:2638], serverId# [1:808:2639], sessionId# [0:0:0] 2025-06-24T21:14:38.152071Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:14:38.152423Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-24T21:14:38.164089Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:14:38.218070Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:815:2645], serverId# [1:816:2646], sessionId# [0:0:0] 2025-06-24T21:14:38.218342Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test/")), end at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test0")) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:14:38.218609Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 3 common prefixes: 2 2025-06-24T21:14:38.218876Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [1:815:2645], serverId# [1:816:2646], sessionId# [0:0:0] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> TSequence::CreateSequenceInsideSequenceNotAllowed [GOOD] >> TSequence::CreateSequenceInsideIndexTableNotAllowed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> ObjectStorageListingTest::FilterListing [GOOD] Test command err: 2025-06-24T21:14:35.646458Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:14:35.646923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:14:35.647122Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002edc/r3tmp/tmpfNAIGL/pdisk_1.dat 2025-06-24T21:14:36.121538Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:14:36.131164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:14:36.192077Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:36.205723Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799672655845 != 1750799672655849 2025-06-24T21:14:36.253923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:36.254060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:36.266748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:36.359914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:36.423370Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:14:36.423632Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:14:36.466902Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:14:36.467026Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:14:36.469984Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:14:36.470153Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:14:36.470215Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:14:36.472071Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:14:36.472270Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:14:36.472369Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:14:36.483251Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:14:36.518856Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:14:36.520211Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:14:36.520448Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:14:36.520508Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:14:36.520542Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:14:36.520600Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:14:36.522047Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:14:36.522231Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:14:36.522363Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:14:36.522412Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:14:36.522505Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:14:36.522551Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:14:36.522703Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:14:36.523237Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:14:36.523614Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:14:36.523727Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:14:36.525758Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:14:36.538374Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:14:36.538497Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:14:36.697706Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:14:36.703064Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:14:36.703180Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:14:36.704200Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:14:36.704268Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:14:36.704337Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:14:36.704607Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:14:36.704806Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:14:36.705184Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:14:36.705251Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:14:36.708896Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:14:36.710450Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:14:36.712232Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:14:36.712305Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:14:36.715428Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:14:36.715522Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:14:36.716528Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:14:36.716565Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:14:36.716614Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:14:36.716674Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:14:36.716718Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:14:36.716846Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:14:36.720539Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:14:36.722750Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:14:36.722838Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:14:36.723980Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:14:36.774986Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:36.775472Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:36.776298Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:36.784844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:36.797764Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:14:36.843739Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:36.959590Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:14:36.962160Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:706:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:14:37.023863Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:776:2621] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:37.991348Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhwnxb37yajvs6hnwr38cp2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTJlZTUyZWYtNGNlZmFkZmQtZWMxNWE2YS00Zjg0MDhhNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:38.148785Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:807:2638], serverId# [1:808:2639], sessionId# [0:0:0] 2025-06-24T21:14:38.153226Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-24T21:14:38.153563Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-24T21:14:38.164710Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:14:38.227459Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:815:2645], serverId# [1:816:2646], sessionId# [0:0:0] 2025-06-24T21:14:38.227719Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test/")), end at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test0")) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:14:38.227923Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 2 common prefixes: 1 2025-06-24T21:14:38.228116Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [1:815:2645], serverId# [1:816:2646], sessionId# [0:0:0] 2025-06-24T21:14:38.262380Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:821:2651], serverId# [1:822:2652], sessionId# [0:0:0] 2025-06-24T21:14:38.262695Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test/")), end at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test0")) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:14:38.262972Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 1 common prefixes: 1 2025-06-24T21:14:38.263369Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [1:821:2651], serverId# [1:822:2652], sessionId# [0:0:0] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> KqpSystemView::PartitionStatsFollower [GOOD] |95.7%| [TA] $(B)/ydb/core/tx/datashard/ut_object_storage_listing/test-results/unittest/{meta.json ... results_accumulator.log} |95.7%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/test-results/unittest/{meta.json ... results_accumulator.log} >> TSequence::CreateSequenceInsideTableThenDropSequence [GOOD] >> TSequence::CreateSequenceInsideTableThenDropTable >> TSequence::CreateSequenceInsideIndexTableNotAllowed [GOOD] >> TSequence::CopyTableWithSequence ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsFollower [GOOD] Test command err: Trying to start YDB, gRPC: 10655, MsgBus: 6285 2025-06-24T21:14:11.897141Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627243293294347:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:11.897188Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a1e/r3tmp/tmpK3espw/pdisk_1.dat 2025-06-24T21:14:12.364441Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:12.386132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:12.386241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:12.392259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10655, node 1 2025-06-24T21:14:12.535902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:12.535924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:12.535931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:12.536050Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6285 2025-06-24T21:14:12.923165Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6285 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:13.202130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:13.224417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:14:13.383437Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:13.383511Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:13.383637Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519627247588261940:2147], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:13.383664Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:14.383631Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:14.383666Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:14.383725Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519627247588261940:2147], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:14.383751Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:15.360887Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627260473164141:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.360995Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:15.387324Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:15.387364Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:15.387414Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519627247588261940:2147], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:15.387427Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:15.598047Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519627260473164166:2308], Recipient [1:7519627247588261940:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:14:15.598077Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:14:15.598092Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:14:15.598152Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519627260473164162:2305], Recipient [1:7519627247588261940:2147]: {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:14:15.598165Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:14:15.663755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Followers" Columns { Name: "Key" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } FollowerGroups { FollowerCount: 3 RequireAllDataCenters: false } } Temporary: false } } TxId: 281474976710658 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:14:15.664199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/Followers, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-24T21:14:15.664342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/Followers, opId: 281474976710658:0, schema: Name: "Followers" Columns { Name: "Key" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } FollowerGroups { FollowerCount: 3 RequireAllDataCenters: false } } Temporary: false, at schemeshard: 72057594046644480 2025-06-24T21:14:15.664855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: Followers, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T21:14:15.664902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-24T21:14:15.664929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710658:0 type: TxCreateTable target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-24T21:14:15.664988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:14:15.665105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new path created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:14:15.665128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710658:0 1 -> 2 2025-06-24T21:14:15.665836Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_create_table.cpp:744: TCreateTable Propose creating new table opId# 281474976710658:0 path# /Root/Followers pathId# [OwnerId: 72057594046644480, LocalPathId: 2] schemeshard# 72057594046644480 tx# WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Followers" Columns { Name: "Key" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } FollowerGroups { FollowerCount: 3 RequireAllDataCenters: false } } Temporary: false } FailOnExist: false 2025-06-24T21:14:15.665964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:14:15.666002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard ... d=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], pathId map=Followers, is column=0, is olap=0, RowCount 4, DataSize 800 2025-06-24T21:14:35.866337Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037888, followerId 0 2025-06-24T21:14:35.866404Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:1 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-24T21:14:35.866450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037888 2025-06-24T21:14:35.866477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046644480:1 data size 0 row count 0 2025-06-24T21:14:35.866496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=2, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], pathId map=Followers, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-24T21:14:35.866504Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037888, followerId 2 2025-06-24T21:14:35.866544Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-24T21:14:35.866807Z node 1 :SYSTEM_VIEWS TRACE: partition_stats.cpp:152: TEvSysView::TEvSendPartitionStats: domainKey [OwnerId: 72057594046644480, LocalPathId: 1] pathId [OwnerId: 72057594046644480, LocalPathId: 2] shardIdx 72057594046644480 1 followerId 0 stats DataSize: 800 RowCount: 4 IndexSize: 0 CPUCores: 0.000849 TabletId: 72075186224037888 NodeId: 1 StartTime: 1750799655679 AccessTime: 1750799656243 UpdateTime: 1750799656081 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 1 RangeReads: 0 RangeReadRows: 0 ImmediateTxCompleted: 1 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 ByKeyFilterSize: 0 FollowerId: 0 LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:14:35.866932Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-24T21:14:35.866934Z node 1 :SYSTEM_VIEWS TRACE: partition_stats.cpp:152: TEvSysView::TEvSendPartitionStats: domainKey [OwnerId: 72057594046644480, LocalPathId: 1] pathId [OwnerId: 72057594046644480, LocalPathId: 2] shardIdx 72057594046644480 1 followerId 2 stats DataSize: 0 RowCount: 0 IndexSize: 0 CPUCores: 0.000128 TabletId: 72075186224037888 NodeId: 1 StartTime: 1750799655766 AccessTime: 0 UpdateTime: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 1 RangeReadRows: 2 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 ByKeyFilterSize: 0 FollowerId: 2 LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-24T21:14:35.866952Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-24T21:14:35.866966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:14:36.000170Z node 1 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [1:7519627243293294299:2068], interval end# 2025-06-24T21:14:36.000000Z, event interval end# 2025-06-24T21:14:36.000000Z 2025-06-24T21:14:36.000173Z node 1 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [1:7519627243293294364:2076], interval end# 2025-06-24T21:14:36.000000Z, event interval end# 2025-06-24T21:14:36.000000Z 2025-06-24T21:14:36.000202Z node 1 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [1:7519627243293294364:2076], query logs count# 1, processor ids count# 1, processor id to database count# 0 2025-06-24T21:14:36.000217Z node 1 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [1:7519627243293294299:2068], query logs count# 0, processor ids count# 0, processor id to database count# 0 2025-06-24T21:14:36.413930Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:36.413957Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:36.413988Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519627247588261940:2147], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:36.413998Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:37.055084Z node 1 :SYSTEM_VIEWS INFO: sysview_service.cpp:886: Navigate by database succeeded: service id# [1:7519627243293294364:2076], database# /Root, no sysview processor 2025-06-24T21:14:37.414447Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:37.414499Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:37.414547Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519627247588261940:2147], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:37.414560Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:38.417425Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:38.417462Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:38.417509Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519627247588261940:2147], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:38.417525Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime ... SELECT from partition_stats for /Root/Followers , attempt 2 2025-06-24T21:14:38.887380Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519627359257412550:2457], owner: [1:7519627359257412546:2455], scan id: 0, sys view info: Type: EPartitionStats SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:14:38.889335Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519627359257412550:2457], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:14:38.889578Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274595843, Sender [1:7519627359257412550:2457], Recipient [1:7519627247588261940:2147]: NKikimrSysView.TEvGetPartitionStats DomainKeyOwnerId: 72057594046644480 DomainKeyPathId: 1 From { } FromInclusive: true To { } ToInclusive: false IncludePathColumn: true 2025-06-24T21:14:38.889602Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5016: StateWork, processing event NSysView::TEvSysView::TEvGetPartitionStats 2025-06-24T21:14:38.889766Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519627359257412550:2457], row count: 2, finished: 1 2025-06-24T21:14:38.889837Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519627359257412550:2457], owner: [1:7519627359257412546:2455], scan id: 0, sys view info: Type: EPartitionStats SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:14:38.893060Z node 1 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [1:7519627243293294364:2076], database# /Root, query hash# 3266603936201095014, cpu time# 297358 SELECT * FROM `/Root/.sys/partition_stats` WHERE FollowerId != 0 AND (RowReads != 0 OR RangeReadRows != 0) AND Path = '/Root/Followers' ... SELECT from partition_stats, attempt 0 2025-06-24T21:14:39.309537Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519627363552379867:2467], owner: [1:7519627363552379864:2465], scan id: 0, sys view info: Type: EPartitionStats SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:14:39.310564Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519627363552379867:2467], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:14:39.310766Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274595843, Sender [1:7519627363552379867:2467], Recipient [1:7519627247588261940:2147]: NKikimrSysView.TEvGetPartitionStats DomainKeyOwnerId: 72057594046644480 DomainKeyPathId: 1 From { } FromInclusive: true To { } ToInclusive: false IncludePathColumn: true 2025-06-24T21:14:39.310794Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5016: StateWork, processing event NSysView::TEvSysView::TEvGetPartitionStats 2025-06-24T21:14:39.310939Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519627363552379867:2467], row count: 2, finished: 1 2025-06-24T21:14:39.310996Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519627363552379867:2467], owner: [1:7519627363552379864:2465], scan id: 0, sys view info: Type: EPartitionStats SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:14:39.322722Z node 1 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [1:7519627243293294364:2076], database# /Root, query hash# 14960494650040056739, cpu time# 401445 2025-06-24T21:14:39.419029Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:39.419065Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:14:39.419113Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519627247588261940:2147], Recipient [1:7519627247588261940:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:14:39.419127Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> GroupWriteTest::Simple >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData >> TGRpcStreamingTest::WritesDoneFromClient [GOOD] >> BasicUsage::MaxByteSizeEqualZero [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage >> TGRpcStreamingTest::WriteAndFinishWorks [GOOD] >> TGRpcStreamingTest::ReadFinish [GOOD] >> TGRpcStreamingTest::ClientNeverWrites [GOOD] >> TSequence::CreateSequenceInsideTableThenDropTable [GOOD] >> TSequence::CreateSequencesWithIndexedTable >> TGRpcStreamingTest::SimpleEcho [GOOD] >> TGRpcStreamingTest::ClientDisconnects [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression >> TSequence::CopyTableWithSequence [GOOD] >> TSequence::AlterSequence ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::WritesDoneFromClient [GOOD] Test command err: 2025-06-24T21:14:38.722723Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627360168431616:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:38.725024Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004651/r3tmp/tmpGPjzn2/pdisk_1.dat 2025-06-24T21:14:39.247133Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:39.268742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:39.268861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:39.310542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:39.428374Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000027680] stream accepted Name# Session ok# true peer# ipv6:[::1]:50314 2025-06-24T21:14:39.428810Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000027680] facade attach Name# Session actor# [1:7519627364463399382:2258] peer# ipv6:[::1]:50314 2025-06-24T21:14:39.428847Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f000027680] facade read Name# Session peer# ipv6:[::1]:50314 2025-06-24T21:14:39.429085Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f000027680] read finished Name# Session ok# false data# peer# ipv6:[::1]:50314 2025-06-24T21:14:39.429152Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:302: Received TEvReadFinished, success = 0 2025-06-24T21:14:39.429185Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f000027680] facade finish Name# Session peer# ipv6:[::1]:50314 grpc status# (9) message# Everything is A-OK 2025-06-24T21:14:39.429716Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000027680] stream done notification Name# Session ok# true peer# ipv6:[::1]:50314 2025-06-24T21:14:39.429758Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000027680] stream finished Name# Session ok# true peer# ipv6:[::1]:50314 grpc status# (9) message# Everything is A-OK 2025-06-24T21:14:39.429786Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000027680] deregistering request Name# Session peer# ipv6:[::1]:50314 (finish done) 2025-06-24T21:14:39.429941Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:312: Received TEvNotifiedWhenDone ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::WriteAndFinishWorks [GOOD] Test command err: 2025-06-24T21:14:38.754219Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627359918195411:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:38.754281Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004610/r3tmp/tmpPlZiAj/pdisk_1.dat 2025-06-24T21:14:39.302495Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:39.308050Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627359918195205:2079] 1750799678735432 != 1750799678735435 2025-06-24T21:14:39.335926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:39.336040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:39.339098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:39.427523Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000029280] stream accepted Name# Session ok# true peer# ipv6:[::1]:36856 2025-06-24T21:14:39.427849Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000029280] facade attach Name# Session actor# [1:7519627364213163015:2258] peer# ipv6:[::1]:36856 2025-06-24T21:14:39.427889Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x51f000029280] facade write Name# Session data# peer# ipv6:[::1]:36856 2025-06-24T21:14:39.428184Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:396: [0x51f000029280] facade write Name# Session data# peer# ipv6:[::1]:36856 grpc status# (0) message# 2025-06-24T21:14:39.428915Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f000029280] write finished Name# Session ok# true peer# ipv6:[::1]:36856 2025-06-24T21:14:39.429205Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000029280] stream done notification Name# Session ok# true peer# ipv6:[::1]:36856 2025-06-24T21:14:39.429232Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f000029280] write finished Name# Session ok# true peer# ipv6:[::1]:36856 2025-06-24T21:14:39.429252Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000029280] stream finished Name# Session ok# true peer# ipv6:[::1]:36856 grpc status# (0) message# 2025-06-24T21:14:39.429294Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000029280] deregistering request Name# Session peer# ipv6:[::1]:36856 (finish done) 2025-06-24T21:14:39.429425Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:347: Received TEvWriteFinished, success = 1 2025-06-24T21:14:39.429439Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:347: Received TEvWriteFinished, success = 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::SimpleEcho [GOOD] Test command err: 2025-06-24T21:14:38.711556Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627361065127820:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:38.711646Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00461b/r3tmp/tmpoDK1SB/pdisk_1.dat 2025-06-24T21:14:39.279414Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:39.279533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:39.286549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:39.344191Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:39.347183Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627361065127775:2079] 1750799678699434 != 1750799678699437 2025-06-24T21:14:39.518480Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000029280] stream accepted Name# Session ok# true peer# ipv6:[::1]:58432 2025-06-24T21:14:39.518929Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000029280] facade attach Name# Session actor# [1:7519627365360095588:2258] peer# ipv6:[::1]:58432 2025-06-24T21:14:39.518968Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f000029280] facade read Name# Session peer# ipv6:[::1]:58432 2025-06-24T21:14:39.520197Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f000029280] read finished Name# Session ok# true data# peer# ipv6:[::1]:58432 2025-06-24T21:14:39.520412Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:142: Received TEvReadFinished, success = 1 2025-06-24T21:14:39.520442Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x51f000029280] facade write Name# Session data# peer# ipv6:[::1]:58432 2025-06-24T21:14:39.520664Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f000029280] facade finish Name# Session peer# ipv6:[::1]:58432 grpc status# (0) message# 2025-06-24T21:14:39.520753Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f000029280] write finished Name# Session ok# true peer# ipv6:[::1]:58432 2025-06-24T21:14:39.521017Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000029280] stream done notification Name# Session ok# true peer# ipv6:[::1]:58432 2025-06-24T21:14:39.521048Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000029280] stream finished Name# Session ok# true peer# ipv6:[::1]:58432 grpc status# (0) message# 2025-06-24T21:14:39.521102Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000029280] deregistering request Name# Session peer# ipv6:[::1]:58432 (finish done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ClientNeverWrites [GOOD] Test command err: 2025-06-24T21:14:38.711554Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627361517790128:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:38.711686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004649/r3tmp/tmpPU744F/pdisk_1.dat 2025-06-24T21:14:39.258795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:39.258906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:39.273915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:39.276377Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:39.283428Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627361517790083:2079] 1750799678699244 != 1750799678699247 2025-06-24T21:14:39.427600Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000029280] stream accepted Name# Session ok# true peer# ipv6:[::1]:33272 2025-06-24T21:14:39.428059Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000029280] facade attach Name# Session actor# [1:7519627365812757890:2257] peer# ipv6:[::1]:33272 2025-06-24T21:14:39.428100Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f000029280] facade read Name# Session peer# ipv6:[::1]:33272 2025-06-24T21:14:39.428177Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x51f000029280] facade write Name# Session data# peer# ipv6:[::1]:33272 2025-06-24T21:14:39.428468Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f000029280] facade finish Name# Session peer# ipv6:[::1]:33272 grpc status# (0) message# 2025-06-24T21:14:39.429010Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f000029280] write finished Name# Session ok# true peer# ipv6:[::1]:33272 2025-06-24T21:14:39.429109Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:187: Received TEvWriteFinished, success = 1 2025-06-24T21:14:39.429290Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000029280] stream done notification Name# Session ok# true peer# ipv6:[::1]:33272 2025-06-24T21:14:39.429358Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000029280] stream finished Name# Session ok# true peer# ipv6:[::1]:33272 grpc status# (0) message# 2025-06-24T21:14:39.429390Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:194: Received TEvNotifiedWhenDone 2025-06-24T21:14:39.429454Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f000029280] read finished Name# Session ok# false data# peer# ipv6:[::1]:33272 2025-06-24T21:14:39.429502Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:376: [0x51f000029280] deregistering request Name# Session peer# ipv6:[::1]:33272 (read done) 2025-06-24T21:14:39.429628Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:181: Received TEvReadFinished, success = 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ReadFinish [GOOD] Test command err: 2025-06-24T21:14:38.713313Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627360015621183:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:38.713377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004608/r3tmp/tmpZN8NTx/pdisk_1.dat 2025-06-24T21:14:39.241383Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:39.245924Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627360015621142:2079] 1750799678703953 != 1750799678703956 2025-06-24T21:14:39.253529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:39.253660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:39.326704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:39.426010Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000027680] stream accepted Name# Session ok# true peer# ipv6:[::1]:43562 2025-06-24T21:14:39.426542Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000027680] facade attach Name# Session actor# [1:7519627364310588951:2258] peer# ipv6:[::1]:43562 2025-06-24T21:14:39.426585Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f000027680] facade read Name# Session peer# ipv6:[::1]:43562 2025-06-24T21:14:39.426704Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f000027680] facade finish Name# Session peer# ipv6:[::1]:43562 grpc status# (0) message# 2025-06-24T21:14:39.427657Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f000027680] read finished Name# Session ok# false data# peer# ipv6:[::1]:43562 2025-06-24T21:14:39.427724Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000027680] stream done notification Name# Session ok# true peer# ipv6:[::1]:43562 2025-06-24T21:14:39.427724Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:265: Received TEvReadFinished, success = 0 2025-06-24T21:14:39.427758Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000027680] stream finished Name# Session ok# true peer# ipv6:[::1]:43562 grpc status# (0) message# 2025-06-24T21:14:39.427812Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000027680] deregistering request Name# Session peer# ipv6:[::1]:43562 (finish done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ClientDisconnects [GOOD] Test command err: 2025-06-24T21:14:38.719534Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627362606733059:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:38.719599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004603/r3tmp/tmpw8uLZt/pdisk_1.dat 2025-06-24T21:14:39.245138Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:39.251581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:39.251715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:39.273123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:39.273875Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627362606733024:2079] 1750799678708121 != 1750799678708124 2025-06-24T21:14:39.467962Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000027680] stream accepted Name# Session ok# true peer# ipv6:[::1]:59538 2025-06-24T21:14:39.479446Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000027680] stream done notification Name# Session ok# true peer# ipv6:[::1]:59538 2025-06-24T21:14:39.479500Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000027680] facade attach Name# Session actor# [1:7519627366901700834:2258] peer# ipv6:[::1]:59538 2025-06-24T21:14:39.483349Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:230: Received TEvNotifiedWhenDone 2025-06-24T21:14:39.499506Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000027680] stream finished Name# Session ok# false peer# unknown grpc status# (1) message# Request abandoned 2025-06-24T21:14:39.499560Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000027680] deregistering request Name# Session peer# unknown (finish done) >> TSequence::CreateSequencesWithIndexedTable [GOOD] >> TSequence::CreateTableWithDefaultFromSequence >> TColumnShardTestReadWrite::ReadGroupBy >> TColumnShardTestReadWrite::CompactionGC >> TColumnShardTestReadWrite::WriteExoticTypes >> CompressExecutor::TestReorderedExecutor [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithAbort |95.8%| [TA] $(B)/ydb/core/grpc_streaming/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.8%| [TA] {RESULT} $(B)/ydb/core/grpc_streaming/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TColumnShardTestReadWrite::CompactionSplitGranule_PKTimestamp >> TColumnShardTestReadWrite::CompactionGCFailingBs >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64 >> TColumnShardTestReadWrite::WriteReadNoCompression >> CommitOffset::Commit_WithSession_ParentNotFinished_SameSession [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession_ParentCommittedToEnd >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt32 >> TColumnShardTestReadWrite::ReadAggregate >> TSequence::AlterSequence [GOOD] >> TSequence::AlterTableSetDefaultFromSequence >> TSequence::CreateTableWithDefaultFromSequence [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceAndIndex >> TColumnShardTestReadWrite::ReadWithProgram >> TColumnShardTestReadWrite::Write >> EvWrite::WriteInTransaction >> TColumnShardTestReadWrite::ReadWithProgramLike >> YdbIndexTable::MultiShardTableTwoIndexes [GOOD] >> WithSDK::DescribeConsumer [GOOD] >> GroupWriteTest::WithRead [GOOD] >> TColumnShardTestReadWrite::ReadWithProgram [GOOD] >> EvWrite::WriteInTransaction [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceAndIndex [GOOD] >> TopicAutoscaling::ReBalancingAfterSplit_sessionsWithPartition [GOOD] >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK >> TColumnShardTestReadWrite::ReadWithProgramLike [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> EvWrite::WriteInTransaction [GOOD] Test command err: 2025-06-24T21:14:45.543979Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:45.563720Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:45.563958Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:45.569297Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:45.569473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:45.569647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:45.569716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:45.569803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:45.569862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:45.569953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:45.570044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:45.570110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:45.570177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.570249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:45.591099Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:45.591279Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:45.591362Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:45.591531Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.591693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:45.591769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:45.591809Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:45.592009Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:45.592086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:45.592145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:45.592183Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:45.592370Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.592439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:45.592497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:45.592527Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:45.592623Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:45.592692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:45.592788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:45.592839Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:45.592898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:45.592944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:45.592982Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:45.593192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:45.593233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:45.593269Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:45.593448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:45.593559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:45.593590Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:45.593710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:45.593756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.593788Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.593891Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:45.593962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:45.594004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:45.594040Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:45.594365Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=33; 2025-06-24T21:14:45.594433Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=35; 2025-06-24T21:14:45.594496Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=25; 2025-06-24T21:14:45.594566Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=29; 2025-06-24T21:14:45.594627Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:45.594683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:45.594711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:45.594744Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... _SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-24T21:14:46.391050Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=1; 2025-06-24T21:14:46.391121Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=2048;merger=0;interval_id=1; 2025-06-24T21:14:46.391181Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:14:46.391276Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:46.391304Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=2048;finished=1; 2025-06-24T21:14:46.391339Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:14:46.391566Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:46.391720Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:2048;schema=key: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:46.391784Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:14:46.391915Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;);columns=2;rows=2048; 2025-06-24T21:14:46.392022Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=229376;num_rows=2048;batch_columns=key,field; 2025-06-24T21:14:46.392243Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:288:2300];bytes=229376;rows=2048;faults=0;finished=0;fault=0;schema=key: uint64 field: string; 2025-06-24T21:14:46.392427Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:46.392538Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:46.392631Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:46.392916Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:46.393040Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:46.393140Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:46.393178Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:289:2301] finished for tablet 9437184 2025-06-24T21:14:46.393667Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:288:2300];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.006},{"events":["f_ack","l_task_result"],"t":0.031},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.033}],"full":{"a":1750799686360008,"name":"_full_task","f":1750799686360008,"d_finished":0,"c":0,"l":1750799686393237,"d":33229},"events":[{"name":"bootstrap","f":1750799686360306,"d_finished":3568,"c":1,"l":1750799686363874,"d":3568},{"a":1750799686392895,"name":"ack","f":1750799686391538,"d_finished":1121,"c":1,"l":1750799686392659,"d":1463},{"a":1750799686392871,"name":"processing","f":1750799686366695,"d_finished":15578,"c":9,"l":1750799686392661,"d":15944},{"name":"ProduceResults","f":1750799686362251,"d_finished":2758,"c":12,"l":1750799686393165,"d":2758},{"a":1750799686393167,"name":"Finish","f":1750799686393167,"d_finished":0,"c":0,"l":1750799686393237,"d":70},{"name":"task_result","f":1750799686366726,"d_finished":14262,"c":8,"l":1750799686391394,"d":14262}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:46.393775Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:288:2300];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:46.394197Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:288:2300];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.006},{"events":["f_ack","l_task_result"],"t":0.031},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.033}],"full":{"a":1750799686360008,"name":"_full_task","f":1750799686360008,"d_finished":0,"c":0,"l":1750799686393834,"d":33826},"events":[{"name":"bootstrap","f":1750799686360306,"d_finished":3568,"c":1,"l":1750799686363874,"d":3568},{"a":1750799686392895,"name":"ack","f":1750799686391538,"d_finished":1121,"c":1,"l":1750799686392659,"d":2060},{"a":1750799686392871,"name":"processing","f":1750799686366695,"d_finished":15578,"c":9,"l":1750799686392661,"d":16541},{"name":"ProduceResults","f":1750799686362251,"d_finished":2758,"c":12,"l":1750799686393165,"d":2758},{"a":1750799686393167,"name":"Finish","f":1750799686393167,"d_finished":0,"c":0,"l":1750799686393834,"d":667},{"name":"task_result","f":1750799686366726,"d_finished":14262,"c":8,"l":1750799686391394,"d":14262}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:46.394292Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:46.359375Z;index_granules=0;index_portions=1;index_batches=44;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=237240;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=237240;selected_rows=0; 2025-06-24T21:14:46.394356Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:46.394623Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:289:2301];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::WithRead [GOOD] Test command err: RandomSeed# 1717011915384384901 2025-06-24T21:14:38.418228Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 3 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-24T21:14:38.464661Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-24T21:14:38.464743Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 1 going to send TEvBlock {TabletId# 3 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-24T21:14:38.476042Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-24T21:14:38.498905Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 2 going to send TEvCollectGarbage {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:38.508141Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-24T21:14:46.187225Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:14:46.187350Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:46.187406Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:14:46.187448Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:46.262068Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Status# OK} 2025-06-24T21:14:46.262175Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Status# OK} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadWithProgram [GOOD] Test command err: 2025-06-24T21:14:45.574222Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:45.602990Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:45.603311Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:45.611078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:45.611341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:45.611576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:45.611703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:45.611880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:45.612007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:45.612185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:45.612311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:45.612400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:45.612466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.612547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:45.643551Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:45.643807Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:45.643865Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:45.644072Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.644266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:45.644357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:45.644403Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:45.644512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:45.644593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:45.644643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:45.644676Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:45.644842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.644919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:45.644958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:45.644991Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:45.645094Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:45.645150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:45.645226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:45.645274Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:45.645339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:45.645398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:45.645437Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:45.645667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:45.645711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:45.645749Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:45.645923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:45.645967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:45.645996Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:45.646132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:45.646205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.646243Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.646343Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:45.646422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:45.646468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:45.646500Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:45.646938Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=54; 2025-06-24T21:14:45.647032Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; 2025-06-24T21:14:45.647110Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=35; 2025-06-24T21:14:45.647231Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=79; 2025-06-24T21:14:45.647326Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:45.647424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:45.647469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:45.647518Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... _ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:46.446310Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:14:46.446337Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:14:46.446388Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:14:46.446424Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=fetching.cpp:17;event=apply; 2025-06-24T21:14:46.446461Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=interval.cpp:28;event=fetched;interval_idx=0; 2025-06-24T21:14:46.446498Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=interval.cpp:17;event=start_construct_result;interval_idx=0;interval_id=2;memory=8398003;count=1; 2025-06-24T21:14:46.446891Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=merge.cpp:152;event=DoExecute;interval_idx=0; 2025-06-24T21:14:46.449401Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=source.cpp:52;event=source_ready;intervals_count=1;source_idx=0; 2025-06-24T21:14:46.449540Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:46.449584Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:14:46.449617Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:14:46.449817Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:14:46.449861Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-24T21:14:46.449906Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-06-24T21:14:46.449945Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=0;merger=0;interval_id=2; 2025-06-24T21:14:46.449990Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:14:46.450093Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:46.450223Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:46.450442Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:46.450550Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:46.450649Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:46.450683Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:294:2306] finished for tablet 9437184 2025-06-24T21:14:46.451257Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:293:2305];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.012}],"full":{"a":1750799686437862,"name":"_full_task","f":1750799686437862,"d_finished":0,"c":0,"l":1750799686450757,"d":12895},"events":[{"name":"bootstrap","f":1750799686438095,"d_finished":2887,"c":1,"l":1750799686440982,"d":2887},{"a":1750799686450420,"name":"ack","f":1750799686450420,"d_finished":0,"c":0,"l":1750799686450757,"d":337},{"a":1750799686450399,"name":"processing","f":1750799686441045,"d_finished":5734,"c":9,"l":1750799686450304,"d":6092},{"name":"ProduceResults","f":1750799686439865,"d_finished":2037,"c":11,"l":1750799686450669,"d":2037},{"a":1750799686450672,"name":"Finish","f":1750799686450672,"d_finished":0,"c":0,"l":1750799686450757,"d":85},{"name":"task_result","f":1750799686441059,"d_finished":5612,"c":9,"l":1750799686450302,"d":5612}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:46.451355Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:293:2305];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:46.451818Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:293:2305];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_ProduceResults","f_Finish","l_task_result"],"t":0.012},{"events":["l_ack","l_processing","l_Finish"],"t":0.013}],"full":{"a":1750799686437862,"name":"_full_task","f":1750799686437862,"d_finished":0,"c":0,"l":1750799686451409,"d":13547},"events":[{"name":"bootstrap","f":1750799686438095,"d_finished":2887,"c":1,"l":1750799686440982,"d":2887},{"a":1750799686450420,"name":"ack","f":1750799686450420,"d_finished":0,"c":0,"l":1750799686451409,"d":989},{"a":1750799686450399,"name":"processing","f":1750799686441045,"d_finished":5734,"c":9,"l":1750799686450304,"d":6744},{"name":"ProduceResults","f":1750799686439865,"d_finished":2037,"c":11,"l":1750799686450669,"d":2037},{"a":1750799686450672,"name":"Finish","f":1750799686450672,"d_finished":0,"c":0,"l":1750799686451409,"d":737},{"name":"task_result","f":1750799686441059,"d_finished":5612,"c":9,"l":1750799686450302,"d":5612}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:46.451915Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:46.437274Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=8392;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=8392;selected_rows=0; 2025-06-24T21:14:46.451983Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:46.452306Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:294:2306];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableTwoIndexes [GOOD] Test command err: Trying to start YDB, gRPC: 1590, MsgBus: 26403 2025-06-24T21:11:26.297607Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626536423752644:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:26.297684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003fb4/r3tmp/tmpXEZQ2B/pdisk_1.dat 2025-06-24T21:11:26.683307Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626536423752613:2079] 1750799486293723 != 1750799486293726 2025-06-24T21:11:26.685728Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:26.735524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:26.735710Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:26.737740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1590, node 1 2025-06-24T21:11:26.875089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:26.875116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:26.875133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:26.875302Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:27.304671Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26403 TClient is connected to server localhost:26403 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:27.958942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:27.980029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:11:27.991666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:11:28.145717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:28.395577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.529078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:30.093598Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626553603623452:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.093788Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.456800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.485204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.514349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.556192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.586166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.633554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.671312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.750204Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626553603624109:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.750314Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.750524Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626553603624114:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.756288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:30.769032Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626553603624116:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:30.869198Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626553603624167:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:31.298071Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626536423752644:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:31.298141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:32.174277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... ion/3?node_id=2&id=YmE5N2E2ZC0zNWJjMWM3Zi1iNWZmNWUxLWUyNTViMjJj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.622642Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719490. Ctx: { TraceId: 01jyhwp12w6ghr1d70rr7meexx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjczZTQ1M2ItNWMwM2RkZWMtMjRjYzIzYTctMjEyODk2YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.623347Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719491. Ctx: { TraceId: 01jyhwp134c8gdxy88nj1svhhp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmMzZmM0ZWUtMTI3NDQ1MGYtZTg0NjM3ZGItN2NhNTNhNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.640287Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719492. Ctx: { TraceId: 01jyhwp13fa8hjq4a66yexdkge, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzFhNzZjMmUtZTU5OTk2NTktOTY2ZTFmNDQtNWUzODRjZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.642851Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719493. Ctx: { TraceId: 01jyhwp134c8gdxy88nj1svhhp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmMzZmM0ZWUtMTI3NDQ1MGYtZTg0NjM3ZGItN2NhNTNhNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.644410Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719494. Ctx: { TraceId: 01jyhwp12w6ghr1d70rr7meexx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjczZTQ1M2ItNWMwM2RkZWMtMjRjYzIzYTctMjEyODk2YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.664653Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719495. Ctx: { TraceId: 01jyhwp12v8sw0jezke6r94a4r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWMzN2UyODUtNjAyMGFmZWItZTA3ODE1OTctYjZjMGY5MzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.679536Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719496. Ctx: { TraceId: 01jyhwp14s22bt27d6nqmm1bpm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTFiYzgwOGUtYTBmODg4ODMtNmU2ZWQ1NDMtYzNiYmVkOWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.685733Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719497. Ctx: { TraceId: 01jyhwp159869x11t9nn215bkw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmE5N2E2ZC0zNWJjMWM3Zi1iNWZmNWUxLWUyNTViMjJj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.687196Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719498. Ctx: { TraceId: 01jyhwp14s22bt27d6nqmm1bpm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTFiYzgwOGUtYTBmODg4ODMtNmU2ZWQ1NDMtYzNiYmVkOWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.733322Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719499. Ctx: { TraceId: 01jyhwp16n126n6r7re54v6ewf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjczZTQ1M2ItNWMwM2RkZWMtMjRjYzIzYTctMjEyODk2YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.733437Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719501. Ctx: { TraceId: 01jyhwp16m1yx0q00epkemv7mw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmMzZmM0ZWUtMTI3NDQ1MGYtZTg0NjM3ZGItN2NhNTNhNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.735501Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719500. Ctx: { TraceId: 01jyhwp16g0rr966bm1www0r93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzFhNzZjMmUtZTU5OTk2NTktOTY2ZTFmNDQtNWUzODRjZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.744728Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719502. Ctx: { TraceId: 01jyhwp16m1yx0q00epkemv7mw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmMzZmM0ZWUtMTI3NDQ1MGYtZTg0NjM3ZGItN2NhNTNhNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.749871Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719503. Ctx: { TraceId: 01jyhwp16g0rr966bm1www0r93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzFhNzZjMmUtZTU5OTk2NTktOTY2ZTFmNDQtNWUzODRjZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.752728Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719504. Ctx: { TraceId: 01jyhwp16t1p1qkd37zenf76hw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWMzN2UyODUtNjAyMGFmZWItZTA3ODE1OTctYjZjMGY5MzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.761498Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719505. Ctx: { TraceId: 01jyhwp16m1yx0q00epkemv7mw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmMzZmM0ZWUtMTI3NDQ1MGYtZTg0NjM3ZGItN2NhNTNhNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.771457Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719506. Ctx: { TraceId: 01jyhwp1759hzjgkacjkbwnk5z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGNhMzE5YTQtY2UyNWFhOTUtNzY2ODFlYzQtN2Y4NTliYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.772624Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719507. Ctx: { TraceId: 01jyhwp16t1p1qkd37zenf76hw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWMzN2UyODUtNjAyMGFmZWItZTA3ODE1OTctYjZjMGY5MzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.786394Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719508. Ctx: { TraceId: 01jyhwp1759hzjgkacjkbwnk5z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGNhMzE5YTQtY2UyNWFhOTUtNzY2ODFlYzQtN2Y4NTliYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.796080Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719509. Ctx: { TraceId: 01jyhwp18b0p94skjknrgz1p9s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmE5N2E2ZC0zNWJjMWM3Zi1iNWZmNWUxLWUyNTViMjJj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.805618Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719510. Ctx: { TraceId: 01jyhwp18b0p94skjknrgz1p9s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmE5N2E2ZC0zNWJjMWM3Zi1iNWZmNWUxLWUyNTViMjJj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.832459Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719512. Ctx: { TraceId: 01jyhwp19b5f2fgt2kfbv201cx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjczZTQ1M2ItNWMwM2RkZWMtMjRjYzIzYTctMjEyODk2YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.838632Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719511. Ctx: { TraceId: 01jyhwp19n48v40njdaeaxvfdq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmMzZmM0ZWUtMTI3NDQ1MGYtZTg0NjM3ZGItN2NhNTNhNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.839805Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719513. Ctx: { TraceId: 01jyhwp19q1mhdcjwz2d9rhvmx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzFhNzZjMmUtZTU5OTk2NTktOTY2ZTFmNDQtNWUzODRjZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.854664Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719514. Ctx: { TraceId: 01jyhwp19b5f2fgt2kfbv201cx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjczZTQ1M2ItNWMwM2RkZWMtMjRjYzIzYTctMjEyODk2YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:14:40.858233Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719515. Ctx: { TraceId: 01jyhwp19b5f2fgt2kfbv201cx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjczZTQ1M2ItNWMwM2RkZWMtMjRjYzIzYTctMjEyODk2YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.861580Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719516. Ctx: { TraceId: 01jyhwp1akd2tbczjzxdbxyraf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWMzN2UyODUtNjAyMGFmZWItZTA3ODE1OTctYjZjMGY5MzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:14:40.875433Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719517. Ctx: { TraceId: 01jyhwp1akd2tbczjzxdbxyraf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWMzN2UyODUtNjAyMGFmZWItZTA3ODE1OTctYjZjMGY5MzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.902595Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719518. Ctx: { TraceId: 01jyhwp1b8dn46320x7tsyhfr2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmE5N2E2ZC0zNWJjMWM3Zi1iNWZmNWUxLWUyNTViMjJj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:14:40.911354Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719519. Ctx: { TraceId: 01jyhwp1b8dn46320x7tsyhfr2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmE5N2E2ZC0zNWJjMWM3Zi1iNWZmNWUxLWUyNTViMjJj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.920285Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719520. Ctx: { TraceId: 01jyhwp1awc71c9pk489etefez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGNhMzE5YTQtY2UyNWFhOTUtNzY2ODFlYzQtN2Y4NTliYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.927039Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719521. Ctx: { TraceId: 01jyhwp1awc71c9pk489etefez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGNhMzE5YTQtY2UyNWFhOTUtNzY2ODFlYzQtN2Y4NTliYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:40.931018Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719522. Ctx: { TraceId: 01jyhwp1awc71c9pk489etefez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGNhMzE5YTQtY2UyNWFhOTUtNzY2ODFlYzQtN2Y4NTliYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS >> TSequence::AlterTableSetDefaultFromSequence [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sequence/unittest >> TSequence::CreateTableWithDefaultFromSequenceAndIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:14:37.582421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:14:37.582527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:14:37.582564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:14:37.582599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:14:37.584120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:14:37.584200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:14:37.584331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:14:37.584557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:14:37.585332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:14:37.594564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:14:37.693254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:14:37.693312Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:37.711484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:14:37.711967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:14:37.712164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:14:37.725854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:14:37.726000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:14:37.726657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:14:37.726965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:14:37.734353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:14:37.735499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:14:37.741325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:14:37.741420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:14:37.741647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:14:37.741695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:14:37.741778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:14:37.741868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.748808Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:14:37.907359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:14:37.907597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.907856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:14:37.907928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:14:37.908184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:14:37.908273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:14:37.915258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:14:37.915485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:14:37.915758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.915837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:14:37.915872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:14:37.915931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:14:37.918483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.918564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:14:37.918617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:14:37.924739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.924813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.924855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:14:37.924928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:14:37.936710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:14:37.939921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:14:37.940151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:14:37.941264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:14:37.941470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:14:37.941543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:14:37.941861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:14:37.941914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:14:37.942108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:14:37.942182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:14:37.948563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:14:37.948625Z node 1 :FLAT_TX_SCHEMESHARD ... ntPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:14:46.420522Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:14:46.420567Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:2 progress is 3/4 2025-06-24T21:14:46.420624Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/4 2025-06-24T21:14:46.420682Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:2 progress is 3/4 2025-06-24T21:14:46.420734Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/4 2025-06-24T21:14:46.420796Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/4, is published: true 2025-06-24T21:14:46.421160Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:14:46.421202Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:46.421876Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [7:132:2155], Recipient [7:132:2155]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:14:46.421926Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:14:46.421988Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:14:46.422035Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:14:46.422338Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:14:46.422470Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:14:46.422507Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 4/4 2025-06-24T21:14:46.422539Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-24T21:14:46.422581Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 4/4 2025-06-24T21:14:46.422613Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-24T21:14:46.422652Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 4/4, is published: true 2025-06-24T21:14:46.422747Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:414:2370] message: TxId: 102 2025-06-24T21:14:46.422816Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-24T21:14:46.422888Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:14:46.422942Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:14:46.423088Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:14:46.423211Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:1 2025-06-24T21:14:46.423242Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:1 2025-06-24T21:14:46.423284Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:14:46.423317Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:2 2025-06-24T21:14:46.423343Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:2 2025-06-24T21:14:46.423397Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:14:46.423433Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:3 2025-06-24T21:14:46.423460Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:3 2025-06-24T21:14:46.423527Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-24T21:14:46.424571Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435084, Sender [7:132:2155], Recipient [7:132:2155]: NKikimr::NSchemeShard::TEvPrivate::TEvCleanDroppedPaths 2025-06-24T21:14:46.424631Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5123: StateWork, processing event TEvPrivate::TEvCleanDroppedPaths 2025-06-24T21:14:46.424750Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:14:46.424826Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-24T21:14:46.424932Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:14:46.426833Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:14:46.426893Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:46.427048Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:14:46.427078Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:46.427130Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:14:46.427212Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:46.427272Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:14:46.427300Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:46.433209Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:14:46.433304Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:46.433441Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:46.433557Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:46.433682Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [7:414:2370] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 102 at schemeshard: 72057594046678944 2025-06-24T21:14:46.433884Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:14:46.433970Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:14:46.434035Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [7:520:2469] 2025-06-24T21:14:46.434327Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [7:522:2471], Recipient [7:132:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:14:46.434375Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:14:46.434409Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-24T21:14:46.435020Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [7:597:2546], Recipient [7:132:2155]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-24T21:14:46.435100Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:14:46.435447Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:14:46.435771Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 344us result status StatusPathDoesNotExist 2025-06-24T21:14:46.436024Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeTable, state: EPathStateNotExist), drop stepId: 5000003, drop txId: 102" Path: "/MyRoot/Table" PathId: 2 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadWithProgramLike [GOOD] Test command err: 2025-06-24T21:14:45.694833Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:45.724591Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:45.724897Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:45.733121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:45.733396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:45.733657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:45.733808Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:45.733948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:45.734075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:45.734248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:45.734363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:45.734488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:45.734611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.734742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:45.766217Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:45.766513Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:45.766577Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:45.766801Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.766992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:45.767071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:45.767115Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:45.767273Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:45.767356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:45.767413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:45.767447Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:45.767610Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.767677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:45.767718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:45.767749Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:45.767860Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:45.767939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:45.768024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:45.768082Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:45.768154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:45.768226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:45.768265Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:45.768491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:45.768536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:45.768575Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:45.768782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:45.768829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:45.768860Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:45.769004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:45.769049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.769092Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.769184Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:45.769259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:45.769317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:45.769364Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:45.769815Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=45; 2025-06-24T21:14:45.769909Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=46; 2025-06-24T21:14:45.769982Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=30; 2025-06-24T21:14:45.770063Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=40; 2025-06-24T21:14:45.770153Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:45.770245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:45.770290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:45.770337Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-24T21:14:46.824762Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=6; 2025-06-24T21:14:46.824834Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=10;merger=0;interval_id=6; 2025-06-24T21:14:46.824896Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:14:46.824983Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-24T21:14:46.825022Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=10;finished=1; 2025-06-24T21:14:46.825055Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:14:46.825302Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:46.825425Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:10;schema=message: string;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-24T21:14:46.825501Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:14:46.825613Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;);columns=1;rows=10; 2025-06-24T21:14:46.825665Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=61;num_rows=10;batch_columns=message; 2025-06-24T21:14:46.825928Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:309:2321];bytes=61;rows=10;faults=0;finished=0;fault=0;schema=message: string; 2025-06-24T21:14:46.826054Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-24T21:14:46.826170Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-24T21:14:46.826287Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-24T21:14:46.826488Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:46.826594Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-24T21:14:46.826705Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-24T21:14:46.826747Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:310:2322] finished for tablet 9437184 2025-06-24T21:14:46.827251Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:309:2321];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.011},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.013}],"full":{"a":1750799686813721,"name":"_full_task","f":1750799686813721,"d_finished":0,"c":0,"l":1750799686826797,"d":13076},"events":[{"name":"bootstrap","f":1750799686813984,"d_finished":2800,"c":1,"l":1750799686816784,"d":2800},{"a":1750799686826468,"name":"ack","f":1750799686825279,"d_finished":1036,"c":1,"l":1750799686826315,"d":1365},{"a":1750799686826401,"name":"processing","f":1750799686818020,"d_finished":6125,"c":9,"l":1750799686826318,"d":6521},{"name":"ProduceResults","f":1750799686815515,"d_finished":2677,"c":12,"l":1750799686826733,"d":2677},{"a":1750799686826736,"name":"Finish","f":1750799686826736,"d_finished":0,"c":0,"l":1750799686826797,"d":61},{"name":"task_result","f":1750799686818038,"d_finished":4913,"c":8,"l":1750799686825135,"d":4913}],"id":"9437184::6"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-24T21:14:46.827352Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:309:2321];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:46.827760Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:309:2321];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.011},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.013}],"full":{"a":1750799686813721,"name":"_full_task","f":1750799686813721,"d_finished":0,"c":0,"l":1750799686827408,"d":13687},"events":[{"name":"bootstrap","f":1750799686813984,"d_finished":2800,"c":1,"l":1750799686816784,"d":2800},{"a":1750799686826468,"name":"ack","f":1750799686825279,"d_finished":1036,"c":1,"l":1750799686826315,"d":1976},{"a":1750799686826401,"name":"processing","f":1750799686818020,"d_finished":6125,"c":9,"l":1750799686826318,"d":7132},{"name":"ProduceResults","f":1750799686815515,"d_finished":2677,"c":12,"l":1750799686826733,"d":2677},{"a":1750799686826736,"name":"Finish","f":1750799686826736,"d_finished":0,"c":0,"l":1750799686827408,"d":672},{"name":"task_result","f":1750799686818038,"d_finished":4913,"c":8,"l":1750799686825135,"d":4913}],"id":"9437184::6"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-24T21:14:46.827847Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:46.813206Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=8392;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=8392;selected_rows=0; 2025-06-24T21:14:46.827892Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:46.828151Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:310:2322];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> WithSDK::DescribeConsumer [GOOD] Test command err: 2025-06-24T21:12:43.568093Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626867565856489:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:43.568146Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:43.918300Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00315f/r3tmp/tmpDdwZOY/pdisk_1.dat 2025-06-24T21:12:44.181673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:44.181752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:44.194119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:44.209837Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626867565856469:2079] 1750799563566572 != 1750799563566575 2025-06-24T21:12:44.221243Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11584, node 1 2025-06-24T21:12:44.386137Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00315f/r3tmp/yandexqrovYK.tmp 2025-06-24T21:12:44.386166Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00315f/r3tmp/yandexqrovYK.tmp 2025-06-24T21:12:44.386352Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00315f/r3tmp/yandexqrovYK.tmp 2025-06-24T21:12:44.386495Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:44.531381Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:44.640852Z INFO: TTestServer started on Port 28821 GrpcPort 11584 TClient is connected to server localhost:28821 PQClient connected to localhost:11584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:45.011120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:45.041800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:12:45.051188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:12:45.069247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:12:45.291296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-24T21:12:47.080057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626884745726458:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.080057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626884745726445:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.080204Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.091861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:47.113824Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626884745726460:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T21:12:47.452842Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626884745726524:2444] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:47.615957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.651824Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626884745726534:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:12:47.659532Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjUyMWQzYjEtODE5MWJkODctNjBkN2QwYWQtNWUxNTI5MTk=, ActorId: [1:7519626884745726443:2297], ActorState: ExecuteState, TraceId: 01jyhwjj6d5kn25dqv5ys69x9t, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:12:47.661907Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:12:47.686279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.811227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519626889040694121:2621] 2025-06-24T21:12:48.568928Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626867565856489:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:48.569000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:12:54.216142Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T21:12:54.237104Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T21:12:54.239641Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519626914810498093:2698], Recipient [1:7519626871860824088:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.239681Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.239693Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:12:54.239733Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519626914810498089:2695], Recipient [1:7519626871860824088:2146]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-24T21:12:54.239754Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:12:54.333981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQu ... 037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 6 DataSize: 0 UsedReserveSize: 0 2025-06-24T21:14:44.852718Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 1 2025-06-24T21:14:44.853037Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271188001, Sender [7:7519627360496906844:2414], Recipient [7:7519627313252265540:2142]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 6 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-06-24T21:14:44.853077Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4989: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-24T21:14:44.853099Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-06-24T21:14:44.853127Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099996s, queue# 1 2025-06-24T21:14:44.868365Z node 7 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:157: new Describe consumer request 2025-06-24T21:14:44.868467Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:473: TDescribeConsumerActor for request operation_params { } path: "test-topic" consumer: "test-consumer" include_stats: true include_location: true 2025-06-24T21:14:44.871065Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [7:7519627386266711714:2745]: Request location 2025-06-24T21:14:44.871099Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:686: DescribeTopicImpl [7:7519627386266711714:2745]: Request sessions 2025-06-24T21:14:44.871404Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [7:7519627360496906844:2414], Recipient [7:7519627313252265540:2142]: NKikimrSchemeOp.TDescribePath PathId: 13 SchemeshardId: 72057594046644480 2025-06-24T21:14:44.871437Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:14:44.871930Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 269877761, Sender [7:7519627386266711718:3170], Recipient [7:7519627360496906845:2415]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:14:44.871975Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5282: HandleHook, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:14:44.871992Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2880: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerConnected 2025-06-24T21:14:44.872020Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037892] server connected, pipe [7:7519627386266711716:2746], now have 1 active actors on pipe 2025-06-24T21:14:44.872344Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][test-topic] pipe [7:7519627386266711717:2747] connected; active server actors: 1 2025-06-24T21:14:44.872571Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][test-topic] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 7, Generation 1 2025-06-24T21:14:44.872602Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [7:7519627386266711714:2745]: Got location 2025-06-24T21:14:44.872622Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:729: DescribeTopicImpl [7:7519627386266711714:2745]: Got sessions 2025-06-24T21:14:44.872725Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271187975, Sender [7:7519627386266711714:2745], Recipient [7:7519627360496906845:2415]: NKikimrPQ.TStatus ClientId: "test-consumer" 2025-06-24T21:14:44.872740Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5272: HandleHook, processing event TEvPersQueue::TEvStatus 2025-06-24T21:14:44.872756Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1805: [PQ: 72075186224037892] Handle TEvPersQueue::TEvStatus 2025-06-24T21:14:44.872876Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7519627360496906845:2415], Partition 0, Sender [7:7519627360496906845:2415], Recipient [7:7519627360496906903:2418], Cookie: 0 2025-06-24T21:14:44.872915Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188491, Sender [7:7519627360496906845:2415], Recipient [7:7519627360496906903:2418]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-24T21:14:44.872939Z node 7 :PERSQUEUE TRACE: partition.h:597: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-24T21:14:44.873130Z node 7 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-24T21:14:44.874166Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [7:7519627386266711717:2747] disconnected; active server actors: 1 2025-06-24T21:14:44.874195Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][test-topic] pipe [7:7519627386266711717:2747] disconnected no session 2025-06-24T21:14:44.874258Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 269877764, Sender [7:7519627386266711718:3170], Recipient [7:7519627360496906845:2415]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:14:44.874280Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5283: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:14:44.874296Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2893: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:14:44.874318Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [7:7519627386266711716:2746] destroyed 2025-06-24T21:14:44.876247Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627360496906845:2415], Partition 0, Sender [0:0:0], Recipient [7:7519627360496906903:2418], Cookie: 0 2025-06-24T21:14:44.876329Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627360496906903:2418]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:14:44.876363Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:14:44.876415Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:14:44.876507Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:14:44.876535Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:14:44.876585Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:14:44.954698Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [7:7519627313252265540:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-24T21:14:44.954746Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5128: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-24T21:14:44.954774Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-24T21:14:44.954788Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-24T21:14:44.954852Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.000000s, queue# 1 2025-06-24T21:14:44.955499Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [7:7519627313252265540:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-24T21:14:44.955532Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5128: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-24T21:14:44.955559Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:14:44.976602Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627360496906845:2415], Partition 0, Sender [0:0:0], Recipient [7:7519627360496906903:2418], Cookie: 0 2025-06-24T21:14:44.976676Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627360496906903:2418]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:14:44.976707Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:14:44.976758Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:14:44.976839Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:14:44.976866Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:14:44.976912Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:14:45.078175Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627360496906845:2415], Partition 0, Sender [0:0:0], Recipient [7:7519627360496906903:2418], Cookie: 0 2025-06-24T21:14:45.078259Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627360496906903:2418]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:14:45.078290Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:14:45.078339Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:14:45.078415Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:14:45.078440Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:14:45.078476Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly [FAIL] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly-StrictAclCheck ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sequence/unittest >> TSequence::AlterTableSetDefaultFromSequence [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:14:37.584655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:14:37.584786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:14:37.584829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:14:37.584866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:14:37.584913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:14:37.584945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:14:37.585026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:14:37.585144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:14:37.585996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:14:37.594478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:14:37.693353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:14:37.693418Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:37.712702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:14:37.713124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:14:37.713310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:14:37.722872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:14:37.723203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:14:37.725506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:14:37.725990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:14:37.734375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:14:37.735510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:14:37.741377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:14:37.741478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:14:37.741743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:14:37.741798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:14:37.741850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:14:37.741948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.748809Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:14:37.890662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:14:37.892610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.896635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:14:37.896719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:14:37.898544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:14:37.898651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:14:37.902020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:14:37.902910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:14:37.903166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.903306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:14:37.903349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:14:37.903389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:14:37.905937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.906010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:14:37.906055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:14:37.908138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.908186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:14:37.908227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:14:37.908298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:14:37.912938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:14:37.918095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:14:37.918879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:14:37.919732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:14:37.919863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:14:37.919933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:14:37.921261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:14:37.921320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:14:37.921529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:14:37.921587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:14:37.927190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:14:37.927248Z node 1 :FLAT_TX_SCHEMESHARD ... 409549 Status: COMPLETE TxId: 114 Step: 5000014 OrderId: 114 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1806 } } CommitVersion { Step: 5000014 TxId: 114 } 2025-06-24T21:14:47.080374Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:14:47.081844Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [7:1055:2990], Recipient [7:127:2151]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:14:47.081888Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:14:47.081917Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T21:14:47.082985Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269551620, Sender [7:993:2936], Recipient [7:127:2151]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 993 RawX2: 30064774008 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-24T21:14:47.083040Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4983: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-24T21:14:47.083180Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 993 RawX2: 30064774008 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-24T21:14:47.083238Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 114, tablet: 72075186233409549, partId: 0 2025-06-24T21:14:47.083446Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 114:0, at schemeshard: 72057594046678944, message: Source { RawX1: 993 RawX2: 30064774008 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-24T21:14:47.083529Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 114:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:14:47.083772Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 114:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 993 RawX2: 30064774008 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-24T21:14:47.083871Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 114:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:14:47.083976Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-24T21:14:47.084037Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 114:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-24T21:14:47.084105Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 114:0 129 -> 240 2025-06-24T21:14:47.084342Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:14:47.085508Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:47.085667Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 114 2025-06-24T21:14:47.085724Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:47.088955Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 114 2025-06-24T21:14:47.089005Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:47.089125Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-24T21:14:47.089156Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:47.089301Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-24T21:14:47.089366Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:47.089433Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 114:0 2025-06-24T21:14:47.089587Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [7:993:2936] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 114 at schemeshard: 72057594046678944 2025-06-24T21:14:47.089986Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435072, Sender [7:127:2151], Recipient [7:127:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-24T21:14:47.090031Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-24T21:14:47.090094Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-24T21:14:47.090183Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 114:0 ProgressState 2025-06-24T21:14:47.090360Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:14:47.090403Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#114:0 progress is 1/1 2025-06-24T21:14:47.090446Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-06-24T21:14:47.090527Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#114:0 progress is 1/1 2025-06-24T21:14:47.090573Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-06-24T21:14:47.090626Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 114, ready parts: 1/1, is published: true 2025-06-24T21:14:47.090733Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:394:2360] message: TxId: 114 2025-06-24T21:14:47.090804Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-06-24T21:14:47.090856Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 114:0 2025-06-24T21:14:47.090903Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 114:0 2025-06-24T21:14:47.091077Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-24T21:14:47.093634Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:14:47.093767Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:631: Send to actor: [7:394:2360] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 114 at schemeshard: 72057594046678944 2025-06-24T21:14:47.094002Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 114: got EvNotifyTxCompletionResult 2025-06-24T21:14:47.094072Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 114: satisfy waiter [7:1022:2957] 2025-06-24T21:14:47.094324Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [7:1024:2959], Recipient [7:127:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:14:47.094378Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:14:47.094415Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 114 TestModificationResults wait txId: 115 2025-06-24T21:14:47.095731Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [7:1063:2998], Recipient [7:127:2151]: {TEvModifySchemeTransaction txid# 115 TabletId# 72057594046678944} 2025-06-24T21:14:47.095813Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:14:47.098864Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table3" Columns { Name: "value" DefaultFromSequence: "/MyRoot/seq1" } } } TxId: 115 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:14:47.099212Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/Table3, pathId: , opId: 115:0, at schemeshard: 72057594046678944 2025-06-24T21:14:47.099756Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 115:1, propose status:StatusInvalidParameter, reason: Column 'value' is of type Bool but default expression is of type Int64, at schemeshard: 72057594046678944 2025-06-24T21:14:47.100072Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:14:47.108495Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 115, response: Status: StatusInvalidParameter Reason: "Column \'value\' is of type Bool but default expression is of type Int64" TxId: 115 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:14:47.108873Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 115, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Column 'value' is of type Bool but default expression is of type Int64, operation: ALTER TABLE, path: /MyRoot/Table3 2025-06-24T21:14:47.108948Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 115, wait until txId: 115 |95.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_sequence/test-results/unittest/{meta.json ... results_accumulator.log} |95.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence/test-results/unittest/{meta.json ... results_accumulator.log} >> TColumnShardTestReadWrite::CompactionSplitGranule_PKDatetime >> TColumnShardTestReadWrite::WriteOverload-InStore >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64_Reboot >> TColumnShardTestReadWrite::ReadAggregate [GOOD] >> TColumnShardTestReadWrite::WriteReadModifications >> TColumnShardTestReadWrite::ReadWithProgramNoProjection >> YdbOlapStore::LogPagingAfter [GOOD] >> Normalizers::SchemaVersionsNormalizer >> GroupWriteTest::TwoTables [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadAggregate [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8328;columns=19; 2025-06-24T21:14:45.290346Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:45.324922Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:45.325254Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:45.335198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:45.335465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:45.335750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:45.335883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:45.336122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:45.336247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:45.336386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:45.336513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:45.336656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:45.336781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.336899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:45.373367Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:45.373759Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:45.373825Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:45.374003Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.374197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:45.374266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:45.374308Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:45.374399Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:45.374457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:45.374498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:45.374525Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:45.374723Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.374805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:45.374856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:45.374886Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:45.374980Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:45.375044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:45.375104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:45.375172Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:45.375239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:45.375280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:45.375312Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:45.375532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:45.375577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:45.375651Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:45.375841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:45.375898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:45.375932Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:45.376096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:45.376141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.376226Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.376313Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:45.376380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:45.376422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:45.376459Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:45.376905Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=52; 2025-06-24T21:14:45.376983Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-24T21:14:45.377060Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=35; 2025-06-24T21:14:45.377139Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=38; 2025-06-24T21:14:45.377227Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:45.377321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:45.377386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract ... n=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:14:49.001766Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-24T21:14:49.001808Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=76; 2025-06-24T21:14:49.001872Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1;merger=0;interval_id=76; 2025-06-24T21:14:49.001915Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:14:49.002009Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-24T21:14:49.002043Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1;finished=1; 2025-06-24T21:14:49.002081Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:14:49.002632Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:49.002789Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1;schema=100: binary 101: binary 102: binary 103: uint64;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-24T21:14:49.002831Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:14:49.002931Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;);columns=4;rows=1; 2025-06-24T21:14:49.002993Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=26;num_rows=1;batch_columns=100,101,102,103; 2025-06-24T21:14:49.003293Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[2:421:2433];bytes=26;rows=1;faults=0;finished=0;fault=0;schema=100: binary 101: binary 102: binary 103: uint64; 2025-06-24T21:14:49.003423Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-24T21:14:49.003535Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-24T21:14:49.003628Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-24T21:14:49.003962Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:49.004088Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-24T21:14:49.004179Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-24T21:14:49.004215Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [2:422:2434] finished for tablet 9437184 2025-06-24T21:14:49.004652Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[2:421:2433];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.015}],"full":{"a":1750799688989158,"name":"_full_task","f":1750799688989158,"d_finished":0,"c":0,"l":1750799689004266,"d":15108},"events":[{"name":"bootstrap","f":1750799688989403,"d_finished":2631,"c":1,"l":1750799688992034,"d":2631},{"a":1750799689003940,"name":"ack","f":1750799689002606,"d_finished":1047,"c":1,"l":1750799689003653,"d":1373},{"a":1750799689003922,"name":"processing","f":1750799688993303,"d_finished":7333,"c":10,"l":1750799689003656,"d":7677},{"name":"ProduceResults","f":1750799688990903,"d_finished":2710,"c":13,"l":1750799689004201,"d":2710},{"a":1750799689004204,"name":"Finish","f":1750799689004204,"d_finished":0,"c":0,"l":1750799689004266,"d":62},{"name":"task_result","f":1750799688993321,"d_finished":6138,"c":9,"l":1750799689002161,"d":6138}],"id":"9437184::76"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-24T21:14:49.004746Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[2:421:2433];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:49.005157Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[2:421:2433];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.015}],"full":{"a":1750799688989158,"name":"_full_task","f":1750799688989158,"d_finished":0,"c":0,"l":1750799689004788,"d":15630},"events":[{"name":"bootstrap","f":1750799688989403,"d_finished":2631,"c":1,"l":1750799688992034,"d":2631},{"a":1750799689003940,"name":"ack","f":1750799689002606,"d_finished":1047,"c":1,"l":1750799689003653,"d":1895},{"a":1750799689003922,"name":"processing","f":1750799688993303,"d_finished":7333,"c":10,"l":1750799689003656,"d":8199},{"name":"ProduceResults","f":1750799688990903,"d_finished":2710,"c":13,"l":1750799689004201,"d":2710},{"a":1750799689004204,"name":"Finish","f":1750799689004204,"d_finished":0,"c":0,"l":1750799689004788,"d":584},{"name":"task_result","f":1750799688993321,"d_finished":6138,"c":9,"l":1750799689002161,"d":6138}],"id":"9437184::76"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-24T21:14:49.005236Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:48.988628Z;index_granules=0;index_portions=1;index_batches=2;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=14056;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=14056;selected_rows=0; 2025-06-24T21:14:49.005276Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:49.005552Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:422:2434];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;; >> Normalizers::EmptyTablesNormalizer ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::TwoTables [GOOD] Test command err: RandomSeed# 4614278823797721104 2025-06-24T21:14:39.855336Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 72058679074007041 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-24T21:14:39.855436Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 72058502699329537 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-24T21:14:39.883803Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-24T21:14:39.883874Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 1 going to send TEvBlock {TabletId# 72058679074007041 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-24T21:14:39.884001Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-24T21:14:39.884029Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 1 going to send TEvBlock {TabletId# 72058502699329537 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-24T21:14:39.888571Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-24T21:14:39.888695Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-24T21:14:39.909322Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:39.909427Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:39.913963Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-24T21:14:39.914057Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-24T21:14:49.421459Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:14:49.421569Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:49.421636Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:49.421673Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:14:49.421712Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:49.421753Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:49.421783Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:14:49.421821Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:49.421861Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:49.525319Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Status# OK} 2025-06-24T21:14:49.525422Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Status# OK} 2025-06-24T21:14:49.525470Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Status# OK} 2025-06-24T21:14:49.525515Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Status# OK} 2025-06-24T21:14:49.525558Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Status# OK} 2025-06-24T21:14:49.525601Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Status# OK} >> TColumnShardTestReadWrite::ReadWithProgramNoProjection [GOOD] >> TColumnShardTestReadWrite::WriteExoticTypes [GOOD] >> TColumnShardTestReadWrite::WriteReadModifications [GOOD] >> TColumnShardTestReadWrite::WriteReadNoCompression [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadWithProgramNoProjection [GOOD] Test command err: 2025-06-24T21:14:49.784443Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:49.816564Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:49.816868Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:49.824767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:49.825024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:49.825270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:49.825409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:49.825544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:49.825666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:49.825876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:49.825995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:49.826113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:49.826246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.826362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:49.861760Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:49.862040Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:49.862108Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:49.862290Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.862439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:49.862511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:49.862560Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:49.862667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:49.862729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:49.862782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:49.862812Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:49.862983Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.863047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:49.863086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:49.863115Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:49.863238Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:49.863309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:49.863356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:49.863401Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:49.863440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:49.863467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:49.863495Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:49.863686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:49.863733Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:49.863769Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:49.863918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:49.863993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:49.864015Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:49.864142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:49.864202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.864237Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.864304Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:49.864374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:49.864415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:49.864450Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:49.864809Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=39; 2025-06-24T21:14:49.864883Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-24T21:14:49.864941Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=23; 2025-06-24T21:14:49.865018Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=41; 2025-06-24T21:14:49.865115Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:49.865198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:49.865244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:49.865281Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:50.654208Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:14:50.654346Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;);columns=2;rows=100; 2025-06-24T21:14:50.654430Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1200;num_rows=100;batch_columns=level,timestamp; 2025-06-24T21:14:50.654772Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:272:2284];bytes=1200;rows=100;faults=0;finished=0;fault=0;schema=level: int32 timestamp: timestamp[us]; 2025-06-24T21:14:50.654958Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:50.655112Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:50.655426Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:50.655604Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:50.655755Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:50.656119Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:50.656160Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:280:2292] finished for tablet 9437184 2025-06-24T21:14:50.656668Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:272:2284];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.008},{"events":["l_task_result"],"t":0.019},{"events":["f_ack"],"t":0.02},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.022}],"full":{"a":1750799690633849,"name":"_full_task","f":1750799690633849,"d_finished":0,"c":0,"l":1750799690656215,"d":22366},"events":[{"name":"bootstrap","f":1750799690634083,"d_finished":4692,"c":1,"l":1750799690638775,"d":4692},{"a":1750799690655572,"name":"ack","f":1750799690653886,"d_finished":1570,"c":1,"l":1750799690655456,"d":2213},{"a":1750799690655548,"name":"processing","f":1750799690642144,"d_finished":8359,"c":10,"l":1750799690655459,"d":9026},{"name":"ProduceResults","f":1750799690637301,"d_finished":3688,"c":13,"l":1750799690656145,"d":3688},{"a":1750799690656148,"name":"Finish","f":1750799690656148,"d_finished":0,"c":0,"l":1750799690656215,"d":67},{"name":"task_result","f":1750799690642164,"d_finished":6647,"c":9,"l":1750799690653758,"d":6647}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:50.656807Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:272:2284];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:50.657326Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:272:2284];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.008},{"events":["l_task_result"],"t":0.019},{"events":["f_ack"],"t":0.02},{"events":["l_ProduceResults","f_Finish"],"t":0.022},{"events":["l_ack","l_processing","l_Finish"],"t":0.023}],"full":{"a":1750799690633849,"name":"_full_task","f":1750799690633849,"d_finished":0,"c":0,"l":1750799690656855,"d":23006},"events":[{"name":"bootstrap","f":1750799690634083,"d_finished":4692,"c":1,"l":1750799690638775,"d":4692},{"a":1750799690655572,"name":"ack","f":1750799690653886,"d_finished":1570,"c":1,"l":1750799690655456,"d":2853},{"a":1750799690655548,"name":"processing","f":1750799690642144,"d_finished":8359,"c":10,"l":1750799690655459,"d":9666},{"name":"ProduceResults","f":1750799690637301,"d_finished":3688,"c":13,"l":1750799690656145,"d":3688},{"a":1750799690656148,"name":"Finish","f":1750799690656148,"d_finished":0,"c":0,"l":1750799690656855,"d":707},{"name":"task_result","f":1750799690642164,"d_finished":6647,"c":9,"l":1750799690653758,"d":6647}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-24T21:14:50.657414Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:50.632635Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=8392;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=8392;selected_rows=0; 2025-06-24T21:14:50.657474Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:50.657811Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:280:2292];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;; 2025-06-24T21:14:50.658552Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 1 at tablet 9437184 2025-06-24T21:14:50.658787Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 100 scanId: 0 version: {1750799690751:100} readable: {1750799690751:max} at tablet 9437184 2025-06-24T21:14:50.658907Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 100 scanId: 0 at tablet 9437184 2025-06-24T21:14:50.659137Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=100;scan_id=0;gen=0;table=;snapshot={1750799690751:100};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 100 } Function { Id: 1 Arguments { Id: 1 } Arguments { Id: 9 } } } } Command { Filter { Predicate { Id: 100 } } } ; 2025-06-24T21:14:50.659262Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=100;scan_id=0;gen=0;table=;snapshot={1750799690751:100};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 100 } Function { Id: 1 Arguments { Id: 1 } Arguments { Id: 9 } } } } Command { Filter { Predicate { Id: 100 } } } ; 2025-06-24T21:14:50.659383Z node 1 :TX_COLUMNSHARD_SCAN WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=100;scan_id=0;gen=0;table=;snapshot={1750799690751:100};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:14;event=TTxScan failed;problem=cannot parse program;details=Can't parse SsaProgram: program has no projections; >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteExoticTypes [GOOD] Test command err: 2025-06-24T21:14:44.476953Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:44.501503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:44.502565Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:44.515276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:44.515529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:44.515808Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:44.515979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:44.516135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:44.516281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:44.516425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:44.516557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:44.516673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:44.516795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.516975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:44.556327Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:44.556625Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:44.556704Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:44.556917Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.557981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:44.558069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:44.558133Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:44.558263Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:44.558427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:44.558464Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:44.558643Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:44.558745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:44.558778Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:44.558905Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:44.559003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:44.559055Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:44.559109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:44.559186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:44.559228Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:44.559468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:44.559509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:44.559549Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:44.559737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:44.559799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:44.559830Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:44.559983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:44.560033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.560071Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.560172Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:44.560250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:44.560316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:44.560349Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:44.560783Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=45; 2025-06-24T21:14:44.560876Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-24T21:14:44.560952Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=35; 2025-06-24T21:14:44.562712Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=1706; 2025-06-24T21:14:44.562819Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:44.562915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:44.562954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:44.562996Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: ta ... [{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"19,19,19,19,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"20,20,20,20,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadModifications [GOOD] Test command err: 2025-06-24T21:14:49.651465Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:49.676983Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:49.677231Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:49.684878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:49.685143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:49.685416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:49.685554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:49.685747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:49.685884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:49.685999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:49.686116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:49.686224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:49.686342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.686459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:49.723794Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:49.724108Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:49.724182Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:49.724385Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.724614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:49.724707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:49.724755Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:49.724879Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:49.724958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:49.725007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:49.725063Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:49.725281Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.725366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:49.725424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:49.725463Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:49.725580Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:49.725644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:49.725715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:49.725765Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:49.725828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:49.725869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:49.725899Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:49.726151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:49.726206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:49.726255Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:49.726444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:49.726496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:49.726531Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:49.726700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:49.726758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.726798Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.726879Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:49.726953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:49.727026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:49.727062Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:49.727593Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=56; 2025-06-24T21:14:49.727686Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-24T21:14:49.727779Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=42; 2025-06-24T21:14:49.727871Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=47; 2025-06-24T21:14:49.727989Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:49.728131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:49.728188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:49.728242Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 0Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=interval.cpp:28;event=fetched;interval_idx=0; 2025-06-24T21:14:51.089496Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=interval.cpp:17;event=start_construct_result;interval_idx=0;interval_id=6;memory=8394164;count=4; 2025-06-24T21:14:51.090142Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=merge.cpp:152;event=DoExecute;interval_idx=0; 2025-06-24T21:14:51.090750Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=source.cpp:52;event=source_ready;intervals_count=1;source_idx=3; 2025-06-24T21:14:51.090854Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:14:51.090880Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:14:51.090900Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:14:51.090969Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:14:51.091013Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:14:51.091076Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:14:51.091111Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:14:51.091315Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:14:51.091343Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-24T21:14:51.091386Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=6; 2025-06-24T21:14:51.091432Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=0;merger=0;interval_id=6; 2025-06-24T21:14:51.091480Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:14:51.091547Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:14:51.091671Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:14:51.091901Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:51.092079Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:14:51.092196Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:14:51.092240Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:394:2406] finished for tablet 9437184 2025-06-24T21:14:51.092697Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:390:2402];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.022}],"full":{"a":1750799691069476,"name":"_full_task","f":1750799691069476,"d_finished":0,"c":0,"l":1750799691092300,"d":22824},"events":[{"name":"bootstrap","f":1750799691069655,"d_finished":5223,"c":1,"l":1750799691074878,"d":5223},{"a":1750799691091873,"name":"ack","f":1750799691091873,"d_finished":0,"c":0,"l":1750799691092300,"d":427},{"a":1750799691091857,"name":"processing","f":1750799691075012,"d_finished":6484,"c":26,"l":1750799691091743,"d":6927},{"name":"ProduceResults","f":1750799691072424,"d_finished":3638,"c":28,"l":1750799691092224,"d":3638},{"a":1750799691092228,"name":"Finish","f":1750799691092228,"d_finished":0,"c":0,"l":1750799691092300,"d":72},{"name":"task_result","f":1750799691075028,"d_finished":6202,"c":26,"l":1750799691091741,"d":6202}],"id":"9437184::9"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:14:51.092794Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:390:2402];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:51.093117Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:390:2402];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_ProduceResults","f_Finish","l_task_result"],"t":0.022},{"events":["l_ack","l_processing","l_Finish"],"t":0.023}],"full":{"a":1750799691069476,"name":"_full_task","f":1750799691069476,"d_finished":0,"c":0,"l":1750799691092840,"d":23364},"events":[{"name":"bootstrap","f":1750799691069655,"d_finished":5223,"c":1,"l":1750799691074878,"d":5223},{"a":1750799691091873,"name":"ack","f":1750799691091873,"d_finished":0,"c":0,"l":1750799691092840,"d":967},{"a":1750799691091857,"name":"processing","f":1750799691075012,"d_finished":6484,"c":26,"l":1750799691091743,"d":7467},{"name":"ProduceResults","f":1750799691072424,"d_finished":3638,"c":28,"l":1750799691092224,"d":3638},{"a":1750799691092228,"name":"Finish","f":1750799691092228,"d_finished":0,"c":0,"l":1750799691092840,"d":612},{"name":"task_result","f":1750799691075028,"d_finished":6202,"c":26,"l":1750799691091741,"d":6202}],"id":"9437184::9"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:14:51.093219Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:51.068974Z;index_granules=0;index_portions=4;index_batches=4;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=9344;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=9344;selected_rows=0; 2025-06-24T21:14:51.093262Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:51.093623Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:394:2406];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogPagingAfter [GOOD] Test command err: 2025-06-24T21:12:13.962772Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626737563935863:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:13.967320Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004414/r3tmp/tmpN2FrL2/pdisk_1.dat 2025-06-24T21:12:14.614748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:14.614869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:14.635246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:14.665819Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24809, node 1 2025-06-24T21:12:14.985011Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:15.046169Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:15.046210Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:15.046224Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:15.046356Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27047 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:15.560854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:27047 2025-06-24T21:12:16.036093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) waiting... 2025-06-24T21:12:16.214939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:12:16.215257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:12:16.215561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:12:16.215692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:12:16.215818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:12:16.215956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:12:16.216105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:12:16.216221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:12:16.216387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:12:16.216491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:12:16.216604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519626750448838608:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:12:16.255767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:12:16.255833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:12:16.256077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:12:16.256197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:12:16.256317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:12:16.256416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:12:16.256514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:12:16.256630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:12:16.256754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:12:16.256868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:12:16.256990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519626750448838625:2287];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:12:16.303092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626750448838611:2286];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:12:16.303618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626750448838611:2286];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:12:16.303883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626750448838611:2286];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:12:16.303980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626750448838611:2286];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:12:16.304089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626750448838611:2286];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:12:16.304202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626750448838611:2286];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:12:16.304302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519626750448838611:2286];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;descripti ... 3103], CA [28:7519627400970892632:3154], CA [28:7519627400970892626:3149], CA [28:7519627400970892598:3128], CA [28:7519627400970892586:3125], CA [28:7519627400970892635:3157], CA [28:7519627400970892556:3106], CA [28:7519627400970892550:3101], CA [28:7519627400970892630:3152], CA [28:7519627400970892602:3131], CA [28:7519627400970892553:3104], CA [28:7519627400970892638:3160], CA [28:7519627400970892628:3150], CA [28:7519627400970892606:3134], CA [28:7519627400970892600:3129], CA [28:7519627400970892557:3107], CA [28:7519627400970892642:3163], CA [28:7519627400970892636:3158], CA [28:7519627400970892604:3132], CA [28:7519627400970892610:3137], CA [28:7519627400970892576:3118], CA [28:7519627400970892571:3113], CA [28:7519627400970892614:3140], CA [28:7519627400970892608:3135], CA [28:7519627400970892574:3116], CA [28:7519627400970892580:3121], CA [28:7519627400970892558:3108], CA [28:7519627400970892612:3138], CA [28:7519627400970892578:3119], CA [28:7519627400970892585:3124], CA [28:7519627400970892562:3111], CA [28:7519627400970892615:3141], CA [28:7519627400970892582:3122], CA [28:7519627400970892619:3144], CA [28:7519627400970892623:3147], CA [28:7519627400970892617:3142], CA [28:7519627400970892547:3099], CA [28:7519627400970892633:3155], CA [28:7519627400970892621:3145], CA [28:7519627400970892587:3126], 2025-06-24T21:14:47.179282Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [28:7519627400970892542:3086] TxId: 281474976715670. Ctx: { TraceId: 01jyhwp6020rp0tbm9hn807g24, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=MzQ4NzNmNWYtNTIwNjYyMzMtNTExMzVkZi00MWQwMTU1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7519627400970892637:3159], task: 61, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-24T21:14:47.179395Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [28:7519627400970892542:3086] TxId: 281474976715670. Ctx: { TraceId: 01jyhwp6020rp0tbm9hn807g24, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=MzQ4NzNmNWYtNTIwNjYyMzMtNTExMzVkZi00MWQwMTU1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [28:7519627400970892551:3102], CA [28:7519627400970892631:3153], CA [28:7519627400970892625:3148], CA [28:7519627400970892634:3156], CA [28:7519627400970892549:3100], CA [28:7519627400970892555:3105], CA [28:7519627400970892639:3161], CA [28:7519627400970892601:3130], CA [28:7519627400970892629:3151], CA [28:7519627400970892643:3164], CA [28:7519627400970892637:3159], CA [28:7519627400970892605:3133], CA [28:7519627400970892572:3114], CA [28:7519627400970892640:3162], CA [28:7519627400970892609:3136], CA [28:7519627400970892575:3117], CA [28:7519627400970892613:3139], CA [28:7519627400970892560:3109], CA [28:7519627400970892579:3120], CA [28:7519627400970892573:3115], CA [28:7519627400970892564:3112], CA [28:7519627400970892583:3123], CA [28:7519627400970892561:3110], CA [28:7519627400970892618:3143], CA [28:7519627400970892622:3146], CA [28:7519627400970892589:3127], CA [28:7519627400970892552:3103], CA [28:7519627400970892632:3154], CA [28:7519627400970892626:3149], CA [28:7519627400970892598:3128], CA [28:7519627400970892586:3125], CA [28:7519627400970892635:3157], CA [28:7519627400970892556:3106], CA [28:7519627400970892550:3101], CA [28:7519627400970892630:3152], CA [28:7519627400970892602:3131], CA [28:7519627400970892553:3104], CA [28:7519627400970892638:3160], CA [28:7519627400970892628:3150], CA [28:7519627400970892606:3134], CA [28:7519627400970892600:3129], CA [28:7519627400970892557:3107], CA [28:7519627400970892642:3163], CA [28:7519627400970892636:3158], CA [28:7519627400970892604:3132], CA [28:7519627400970892610:3137], CA [28:7519627400970892576:3118], CA [28:7519627400970892571:3113], CA [28:7519627400970892614:3140], CA [28:7519627400970892608:3135], CA [28:7519627400970892574:3116], CA [28:7519627400970892580:3121], CA [28:7519627400970892558:3108], CA [28:7519627400970892612:3138], CA [28:7519627400970892578:3119], CA [28:7519627400970892585:3124], CA [28:7519627400970892562:3111], CA [28:7519627400970892615:3141], CA [28:7519627400970892582:3122], CA [28:7519627400970892619:3144], CA [28:7519627400970892623:3147], CA [28:7519627400970892617:3142], CA [28:7519627400970892547:3099], CA [28:7519627400970892633:3155], CA [28:7519627400970892621:3145], CA [28:7519627400970892587:3126], 2025-06-24T21:14:47.179402Z node 28 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[28:7519627400970892682:3167];TabletId=72075186224037888;ScanId=40;TxId=281474976715670;ScanGen=1;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[28:7519627400970892644:3165];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0.001},{"events":["l_bootstrap","f_ProduceResults"],"t":0.005},{"events":["f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.006},{"events":["l_ack","l_processing","l_Finish"],"t":0.007}],"full":{"a":1750799687170547,"name":"_full_task","f":1750799687170547,"d_finished":0,"c":0,"l":1750799687178508,"d":7961},"events":[{"name":"bootstrap","f":1750799687172119,"d_finished":4297,"c":1,"l":1750799687176416,"d":4297},{"a":1750799687176730,"name":"ack","f":1750799687176730,"d_finished":0,"c":0,"l":1750799687178508,"d":1778},{"a":1750799687176688,"name":"processing","f":1750799687176688,"d_finished":0,"c":0,"l":1750799687178508,"d":1820},{"name":"ProduceResults","f":1750799687176369,"d_finished":636,"c":2,"l":1750799687177374,"d":636},{"a":1750799687177380,"name":"Finish","f":1750799687177380,"d_finished":0,"c":0,"l":1750799687178508,"d":1128}],"id":"72075186224037888::40"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,3,4,5,6,7;column_names=level,message,resource_id,resource_type,timestamp,uid;);;program_input=(column_ids=1,3,4,5,6,7;column_names=level,message,resource_id,resource_type,timestamp,uid;);;;); 2025-06-24T21:14:47.179494Z node 28 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[28:7519627400970892682:3167];TabletId=72075186224037888;ScanId=40;TxId=281474976715670;ScanGen=1;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:47.168561Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=6;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:14:47.179541Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [28:7519627400970892542:3086] TxId: 281474976715670. Ctx: { TraceId: 01jyhwp6020rp0tbm9hn807g24, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=MzQ4NzNmNWYtNTIwNjYyMzMtNTExMzVkZi00MWQwMTU1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7519627400970892589:3127], task: 29, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 558 Tasks { TaskId: 29 CpuTimeUs: 425 Tables { TablePath: "/Root/OlapStore/log1" } ComputeCpuTimeUs: 32 BuildCpuTimeUs: 393 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-4pjfmvffsq" NodeId: 28 CreateTimeMs: 1750799687100 CurrentWaitInputTimeUs: 52 UpdateTimeMs: 1750799687134 } MaxMemoryUsage: 1048576 } 2025-06-24T21:14:47.179544Z node 28 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[28:7519627400970892682:3167];TabletId=72075186224037888;ScanId=40;TxId=281474976715670;ScanGen=1;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:47.179681Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [28:7519627400970892542:3086] TxId: 281474976715670. Ctx: { TraceId: 01jyhwp6020rp0tbm9hn807g24, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=MzQ4NzNmNWYtNTIwNjYyMzMtNTExMzVkZi00MWQwMTU1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [28:7519627400970892551:3102], CA [28:7519627400970892631:3153], CA [28:7519627400970892625:3148], CA [28:7519627400970892634:3156], CA [28:7519627400970892549:3100], CA [28:7519627400970892555:3105], CA [28:7519627400970892639:3161], CA [28:7519627400970892601:3130], CA [28:7519627400970892629:3151], CA [28:7519627400970892643:3164], CA [28:7519627400970892637:3159], CA [28:7519627400970892605:3133], CA [28:7519627400970892572:3114], CA [28:7519627400970892640:3162], CA [28:7519627400970892609:3136], CA [28:7519627400970892575:3117], CA [28:7519627400970892613:3139], CA [28:7519627400970892560:3109], CA [28:7519627400970892579:3120], CA [28:7519627400970892573:3115], CA [28:7519627400970892564:3112], CA [28:7519627400970892583:3123], CA [28:7519627400970892561:3110], CA [28:7519627400970892618:3143], CA [28:7519627400970892622:3146], CA [28:7519627400970892589:3127], CA [28:7519627400970892552:3103], CA [28:7519627400970892632:3154], CA [28:7519627400970892626:3149], CA [28:7519627400970892598:3128], CA [28:7519627400970892586:3125], CA [28:7519627400970892635:3157], CA [28:7519627400970892556:3106], CA [28:7519627400970892550:3101], CA [28:7519627400970892630:3152], CA [28:7519627400970892602:3131], CA [28:7519627400970892553:3104], CA [28:7519627400970892638:3160], CA [28:7519627400970892628:3150], CA [28:7519627400970892606:3134], CA [28:7519627400970892600:3129], CA [28:7519627400970892557:3107], CA [28:7519627400970892642:3163], CA [28:7519627400970892636:3158], CA [28:7519627400970892604:3132], CA [28:7519627400970892610:3137], CA [28:7519627400970892576:3118], CA [28:7519627400970892571:3113], CA [28:7519627400970892614:3140], CA [28:7519627400970892608:3135], CA [28:7519627400970892574:3116], CA [28:7519627400970892580:3121], CA [28:7519627400970892558:3108], CA [28:7519627400970892612:3138], CA [28:7519627400970892578:3119], CA [28:7519627400970892585:3124], CA [28:7519627400970892562:3111], CA [28:7519627400970892615:3141], CA [28:7519627400970892582:3122], CA [28:7519627400970892619:3144], CA [28:7519627400970892623:3147], CA [28:7519627400970892617:3142], CA [28:7519627400970892547:3099], CA [28:7519627400970892633:3155], CA [28:7519627400970892621:3145], CA [28:7519627400970892587:3126], 2025-06-24T21:14:47.179803Z node 28 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[28:7519627400970892682:3167];TabletId=72075186224037888;ScanId=40;TxId=281474976715670;ScanGen=1;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,3,4,5,6,7;column_names=level,message,resource_id,resource_type,timestamp,uid;);;program_input=(column_ids=1,3,4,5,6,7;column_names=level,message,resource_id,resource_type,timestamp,uid;);;; 2025-06-24T21:14:47.179848Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [28:7519627400970892542:3086] TxId: 281474976715670. Ctx: { TraceId: 01jyhwp6020rp0tbm9hn807g24, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=MzQ4NzNmNWYtNTIwNjYyMzMtNTExMzVkZi00MWQwMTU1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7519627400970892598:3128], task: 30, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 786 Tasks { TaskId: 30 CpuTimeUs: 702 Tables { TablePath: "/Root/OlapStore/log1" } ComputeCpuTimeUs: 135 BuildCpuTimeUs: 567 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-4pjfmvffsq" NodeId: 28 CreateTimeMs: 1750799687101 CurrentWaitInputTimeUs: 54 UpdateTimeMs: 1750799687135 } MaxMemoryUsage: 1048576 } >> EvWrite::WriteWithLock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadNoCompression [GOOD] Test command err: 2025-06-24T21:14:44.477183Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:44.506076Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:44.506388Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:44.515343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:44.515564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:44.515791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:44.515893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:44.516050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:44.516201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:44.516319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:44.516424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:44.516528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:44.516669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.516779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:44.554959Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:44.555301Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:44.555366Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:44.555547Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.557570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:44.557668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:44.557714Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:44.557844Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:44.557984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:44.558028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:44.558071Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:44.558264Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:44.558387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:44.558414Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:44.558531Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:44.558642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:44.558689Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:44.558740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:44.558774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:44.558807Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:44.559038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:44.559078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:44.559115Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:44.559334Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:44.559395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:44.559434Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:44.559574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:44.559620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.559651Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.559722Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:44.559790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:44.559828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:44.559854Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:44.560281Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=42; 2025-06-24T21:14:44.560387Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=49; 2025-06-24T21:14:44.560465Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-24T21:14:44.561331Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=811; 2025-06-24T21:14:44.561444Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:44.561548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:44.561623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:44.561677Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tab ... 4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:14:51.105594Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:14:51.105754Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-24T21:14:51.105855Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-24T21:14:51.106163Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1000:2855];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-24T21:14:51.106324Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:14:51.106487Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:14:51.106632Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:14:51.106878Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:51.107034Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:14:51.107221Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:14:51.107272Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1001:2856] finished for tablet 9437184 2025-06-24T21:14:51.107785Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1000:2855];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.005},{"events":["f_processing","f_task_result"],"t":0.007},{"events":["f_ack","l_task_result"],"t":0.019},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.022}],"full":{"a":1750799691085240,"name":"_full_task","f":1750799691085240,"d_finished":0,"c":0,"l":1750799691107336,"d":22096},"events":[{"name":"bootstrap","f":1750799691085601,"d_finished":5394,"c":1,"l":1750799691090995,"d":5394},{"a":1750799691106848,"name":"ack","f":1750799691105239,"d_finished":1428,"c":1,"l":1750799691106667,"d":1916},{"a":1750799691106833,"name":"processing","f":1750799691093031,"d_finished":8420,"c":10,"l":1750799691106669,"d":8923},{"name":"ProduceResults","f":1750799691088328,"d_finished":5480,"c":13,"l":1750799691107256,"d":5480},{"a":1750799691107259,"name":"Finish","f":1750799691107259,"d_finished":0,"c":0,"l":1750799691107336,"d":77},{"name":"task_result","f":1750799691093064,"d_finished":6725,"c":9,"l":1750799691105044,"d":6725}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:14:51.107923Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1000:2855];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:51.108411Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1000:2855];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.005},{"events":["f_processing","f_task_result"],"t":0.007},{"events":["f_ack","l_task_result"],"t":0.019},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.022}],"full":{"a":1750799691085240,"name":"_full_task","f":1750799691085240,"d_finished":0,"c":0,"l":1750799691107974,"d":22734},"events":[{"name":"bootstrap","f":1750799691085601,"d_finished":5394,"c":1,"l":1750799691090995,"d":5394},{"a":1750799691106848,"name":"ack","f":1750799691105239,"d_finished":1428,"c":1,"l":1750799691106667,"d":2554},{"a":1750799691106833,"name":"processing","f":1750799691093031,"d_finished":8420,"c":10,"l":1750799691106669,"d":9561},{"name":"ProduceResults","f":1750799691088328,"d_finished":5480,"c":13,"l":1750799691107256,"d":5480},{"a":1750799691107259,"name":"Finish","f":1750799691107259,"d_finished":0,"c":0,"l":1750799691107974,"d":715},{"name":"task_result","f":1750799691093064,"d_finished":6725,"c":9,"l":1750799691105044,"d":6725}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:14:51.108496Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:51.084309Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=8392;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=8392;selected_rows=0; 2025-06-24T21:14:51.108541Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:51.108941Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> TColumnShardTestReadWrite::Write [GOOD] >> Normalizers::EmptyTablesNormalizer [GOOD] >> Normalizers::SchemaVersionsNormalizer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::EmptyTablesNormalizer [GOOD] Test command err: 2025-06-24T21:14:50.422243Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:50.449999Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:50.450308Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:50.458306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=PortionsCleaner; 2025-06-24T21:14:50.458849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-24T21:14:50.459081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:50.459279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:50.459404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:50.459526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:50.459676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:50.459826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:50.459962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:50.460102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:50.460234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:50.460508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:50.490912Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:50.491101Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=PortionsCleaner; 2025-06-24T21:14:50.491180Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-24T21:14:50.491734Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=54; 2025-06-24T21:14:50.491836Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=43; 2025-06-24T21:14:50.491914Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=32; 2025-06-24T21:14:50.492008Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=52; 2025-06-24T21:14:50.492171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=PortionsCleaner;id=NO_VALUE_OPTIONAL; 2025-06-24T21:14:50.492243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-24T21:14:50.492303Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-24T21:14:50.492489Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:50.492588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:50.492641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:50.492675Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-24T21:14:50.492771Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:50.492828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:50.492900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:50.492955Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:50.493153Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:50.493219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:50.493275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:50.493337Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:50.493442Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:50.493524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:50.493564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:50.493615Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:50.493664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:50.493704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:50.493732Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:50.493802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:50.493867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:50.493900Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:50.494142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:50.494197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:50.494233Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:50.494366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:50.494410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:50.494454Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:50.494531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:50.494581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0Chunk ... posite_init/tables_manager;fline=tables_manager.cpp:195;event=load_preset;preset_id=1;snapshot={1750799691393:10};version=1; 2025-06-24T21:14:52.008998Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:composite_init/tables_manager;fline=tables_manager.cpp:199;event=index_schema;preset_id=1;snapshot={1750799691393:10};version=1; 2025-06-24T21:14:52.013959Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:composite_init/tables_manager;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=5129; 2025-06-24T21:14:52.014104Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tables_managerLoadingTime=5707; 2025-06-24T21:14:52.015651Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:column_enginesLoadingTime=15; 2025-06-24T21:14:52.015938Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:countersLoadingTime=123; 2025-06-24T21:14:52.016133Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:countersLoadingTime=90; 2025-06-24T21:14:52.016228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:sharding_infoLoadingTime=32; 2025-06-24T21:14:52.016296Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:sharding_infoLoadingTime=28; 2025-06-24T21:14:52.016341Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=8; 2025-06-24T21:14:52.016406Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=7; 2025-06-24T21:14:52.016447Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=680; 2025-06-24T21:14:52.016591Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=62; 2025-06-24T21:14:52.016710Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=67; 2025-06-24T21:14:52.016863Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=93; 2025-06-24T21:14:52.016983Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=80; 2025-06-24T21:14:52.017174Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=147; 2025-06-24T21:14:52.028425Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=11179; 2025-06-24T21:14:52.028563Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=16; 2025-06-24T21:14:52.028637Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=12; 2025-06-24T21:14:52.028678Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=5; 2025-06-24T21:14:52.028812Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=88; 2025-06-24T21:14:52.028862Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-24T21:14:52.028964Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=69; 2025-06-24T21:14:52.029003Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=4; 2025-06-24T21:14:52.029099Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=39; 2025-06-24T21:14:52.029257Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=77; 2025-06-24T21:14:52.029360Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=58; 2025-06-24T21:14:52.029413Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=22074; 2025-06-24T21:14:52.029582Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 0 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=0;raw_bytes=0;count=0;records=0} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:14:52.029711Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:14:52.029801Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:14:52.029888Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:14:52.029944Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:14:52.030015Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:14:52.030106Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T21:14:52.065725Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:14:52.065839Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:14:52.065898Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:14:52.066019Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:14:52.073370Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:14:52.073509Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:14:52.073547Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:14:52.073580Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:14:52.073638Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:14:52.073730Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T21:14:52.073817Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:14:52.073883Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:14:52.073933Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:14:52.074037Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:14:52.175626Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 111 scanId: 0 version: {1750799691438:111} readable: {1750799691438:max} at tablet 9437184 2025-06-24T21:14:52.175706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::TEvDataShard::TEvKqpScan;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:49;event=insert_to_cache;key=string;records=0;size=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=string;records=0;count=0; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::Write [GOOD] Test command err: 2025-06-24T21:14:45.615729Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:45.644404Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:45.644715Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:45.652779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:45.653040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:45.653296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:45.653413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:45.653562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:45.653690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:45.653837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:45.653959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:45.654063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:45.654182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.654302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:45.688729Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:45.689018Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:45.689083Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:45.689288Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.689464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:45.689536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:45.689583Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:45.689714Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:45.689801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:45.689857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:45.689897Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:45.690078Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.690164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:45.690208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:45.690245Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:45.690349Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:45.690427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:45.690473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:45.690520Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:45.690586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:45.690646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:45.690684Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:45.690917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:45.690968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:45.691005Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:45.691239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:45.691295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:45.691328Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:45.691458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:45.691514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.691548Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.691653Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:45.691748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:45.691811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:45.691843Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:45.692341Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=51; 2025-06-24T21:14:45.692434Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-24T21:14:45.692521Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-24T21:14:45.692615Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=48; 2025-06-24T21:14:45.692725Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:45.692831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:45.692886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:45.692940Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... [{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"19,19,19,19,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"20,20,20,20,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; >> TColumnShardTestReadWrite::RebootWriteRead ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::SchemaVersionsNormalizer [GOOD] Test command err: 2025-06-24T21:14:49.890865Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:49.921070Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:49.921394Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:49.929644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SchemaVersionCleaner; 2025-06-24T21:14:49.929957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-24T21:14:49.930177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:49.930377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:49.930514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:49.930652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:49.930807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:49.930953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:49.931092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:49.931226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:49.931361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.931497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:49.962874Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:49.963072Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=SchemaVersionCleaner; 2025-06-24T21:14:49.963162Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-24T21:14:49.963496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SchemaVersionCleaner;id=NO_VALUE_OPTIONAL; 2025-06-24T21:14:49.963595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-24T21:14:49.963657Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-24T21:14:49.963831Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.963926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:49.964013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:49.964057Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-24T21:14:49.964152Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:49.964221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:49.964266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:49.964303Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:49.964512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.964581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:49.964658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:49.964711Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:49.964818Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:49.964896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:49.964963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:49.964998Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:49.965061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:49.965102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:49.965130Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:49.965184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:49.965246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:49.965296Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:49.965557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:49.965651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:49.965685Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:49.965815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:49.965870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.965929Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.965988Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:49.966033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:49.966062Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:49.966111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:49.966182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:49.966242Z node 1 : ... line=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:14:52.401652Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-24T21:14:52.401712Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-06-24T21:14:52.401815Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=20048;merger=0;interval_id=2; 2025-06-24T21:14:52.401875Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:14:52.401996Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:52.402041Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=20048;finished=1; 2025-06-24T21:14:52.402086Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:14:52.402364Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:52.402556Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:20048;schema=key1: uint64 key2: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:52.402614Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:14:52.402792Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;);columns=3;rows=20048; 2025-06-24T21:14:52.402877Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2405760;num_rows=20048;batch_columns=key1,key2,field; 2025-06-24T21:14:52.403130Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:515:2515];bytes=2405760;rows=20048;faults=0;finished=0;fault=0;schema=key1: uint64 key2: uint64 field: string; 2025-06-24T21:14:52.403325Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:52.403461Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:52.403586Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:52.404652Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:52.404828Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:52.405003Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:52.405113Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:520:2519] finished for tablet 9437184 2025-06-24T21:14:52.405690Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:515:2515];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.006},{"events":["f_ack","l_task_result"],"t":0.43},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.433}],"full":{"a":1750799691972011,"name":"_full_task","f":1750799691972011,"d_finished":0,"c":0,"l":1750799692405177,"d":433166},"events":[{"name":"bootstrap","f":1750799691972251,"d_finished":4220,"c":1,"l":1750799691976471,"d":4220},{"a":1750799692404618,"name":"ack","f":1750799692402329,"d_finished":1291,"c":1,"l":1750799692403620,"d":1850},{"a":1750799692404596,"name":"processing","f":1750799691978691,"d_finished":313895,"c":16,"l":1750799692403623,"d":314476},{"name":"ProduceResults","f":1750799691974666,"d_finished":4746,"c":19,"l":1750799692405091,"d":4746},{"a":1750799692405095,"name":"Finish","f":1750799692405095,"d_finished":0,"c":0,"l":1750799692405177,"d":82},{"name":"task_result","f":1750799691978711,"d_finished":312257,"c":15,"l":1750799692402170,"d":312257}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:52.405793Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:515:2515];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:52.406345Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:515:2515];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.006},{"events":["f_ack","l_task_result"],"t":0.43},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.433}],"full":{"a":1750799691972011,"name":"_full_task","f":1750799691972011,"d_finished":0,"c":0,"l":1750799692405876,"d":433865},"events":[{"name":"bootstrap","f":1750799691972251,"d_finished":4220,"c":1,"l":1750799691976471,"d":4220},{"a":1750799692404618,"name":"ack","f":1750799692402329,"d_finished":1291,"c":1,"l":1750799692403620,"d":2549},{"a":1750799692404596,"name":"processing","f":1750799691978691,"d_finished":313895,"c":16,"l":1750799692403623,"d":315175},{"name":"ProduceResults","f":1750799691974666,"d_finished":4746,"c":19,"l":1750799692405091,"d":4746},{"a":1750799692405095,"name":"Finish","f":1750799692405095,"d_finished":0,"c":0,"l":1750799692405876,"d":781},{"name":"task_result","f":1750799691978711,"d_finished":312257,"c":15,"l":1750799692402170,"d":312257}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:52.406472Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:51.971313Z;index_granules=0;index_portions=2;index_batches=748;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=2776560;inserted_portions_bytes=0;committed_portions_bytes=2488696;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=5265256;selected_rows=0; 2025-06-24T21:14:52.406528Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:52.406863Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:520:2519];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp_Reboot >> EvWrite::WriteWithLock [GOOD] >> TColumnShardTestReadWrite::PortionInfoSize [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime_Reboot >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32_Reboot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::PortionInfoSize [GOOD] Test command err: 208 136 28 48 32 24 16 24 56 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> EvWrite::WriteWithLock [GOOD] Test command err: 2025-06-24T21:14:52.424911Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:52.451969Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:52.452346Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:52.460370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:52.460632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:52.460885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:52.460992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:52.461129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:52.461287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:52.461428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:52.461554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:52.461666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:52.461772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:52.461888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:52.490174Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:52.490379Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:52.490453Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:52.490653Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:52.490851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:52.490938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:52.490992Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:52.491124Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:52.491250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:52.491313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:52.491351Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:52.491573Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:52.491660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:52.491715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:52.491749Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:52.491876Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:52.491950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:52.492020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:52.492082Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:52.492152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:52.492194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:52.492240Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:52.492497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:52.492545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:52.492582Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:52.492777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:52.492830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:52.492865Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:52.493004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:52.493055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:52.493092Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:52.493193Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:52.493273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:52.493317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:52.493361Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:52.493878Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=63; 2025-06-24T21:14:52.494001Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=47; 2025-06-24T21:14:52.494089Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=31; 2025-06-24T21:14:52.494160Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=31; 2025-06-24T21:14:52.494225Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:52.494294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:52.494344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:52.494383Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... lumn_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;);columns=2;rows=2048; 2025-06-24T21:14:53.361756Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=229376;num_rows=2048;batch_columns=key,field; 2025-06-24T21:14:53.362036Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:303:2315];bytes=229376;rows=2048;faults=0;finished=0;fault=0;schema=key: uint64 field: string; 2025-06-24T21:14:53.362209Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:1;records_count:2048;schema=key: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:53.362373Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:2048;schema=key: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:53.362424Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:14:53.362469Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:14:53.362775Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:53.362931Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:2048;schema=key: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:53.362979Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:14:53.363136Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;);columns=2;rows=2048; 2025-06-24T21:14:53.363249Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=229376;num_rows=2048;batch_columns=key,field; 2025-06-24T21:14:53.363444Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:303:2315];bytes=229376;rows=2048;faults=0;finished=0;fault=0;schema=key: uint64 field: string; 2025-06-24T21:14:53.363606Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:53.363739Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:53.363885Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:53.364145Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:53.364237Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:53.364333Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:53.364414Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:304:2316] finished for tablet 9437184 2025-06-24T21:14:53.364900Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:303:2315];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.005},{"events":["f_processing","f_task_result"],"t":0.008},{"events":["f_ack","l_task_result"],"t":0.054},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.058}],"full":{"a":1750799693306316,"name":"_full_task","f":1750799693306316,"d_finished":0,"c":0,"l":1750799693364478,"d":58162},"events":[{"name":"bootstrap","f":1750799693306538,"d_finished":5484,"c":1,"l":1750799693312022,"d":5484},{"a":1750799693364126,"name":"ack","f":1750799693361233,"d_finished":2432,"c":2,"l":1750799693363916,"d":2784},{"a":1750799693364115,"name":"processing","f":1750799693314499,"d_finished":30647,"c":18,"l":1750799693363920,"d":31010},{"name":"ProduceResults","f":1750799693309652,"d_finished":5554,"c":22,"l":1750799693364354,"d":5554},{"a":1750799693364356,"name":"Finish","f":1750799693364356,"d_finished":0,"c":0,"l":1750799693364478,"d":122},{"name":"task_result","f":1750799693314522,"d_finished":27912,"c":16,"l":1750799693361095,"d":27912}],"id":"9437184::3"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:53.364985Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:303:2315];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:53.365429Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:303:2315];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.005},{"events":["f_processing","f_task_result"],"t":0.008},{"events":["f_ack","l_task_result"],"t":0.054},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.058}],"full":{"a":1750799693306316,"name":"_full_task","f":1750799693306316,"d_finished":0,"c":0,"l":1750799693365033,"d":58717},"events":[{"name":"bootstrap","f":1750799693306538,"d_finished":5484,"c":1,"l":1750799693312022,"d":5484},{"a":1750799693364126,"name":"ack","f":1750799693361233,"d_finished":2432,"c":2,"l":1750799693363916,"d":3339},{"a":1750799693364115,"name":"processing","f":1750799693314499,"d_finished":30647,"c":18,"l":1750799693363920,"d":31565},{"name":"ProduceResults","f":1750799693309652,"d_finished":5554,"c":22,"l":1750799693364354,"d":5554},{"a":1750799693364356,"name":"Finish","f":1750799693364356,"d_finished":0,"c":0,"l":1750799693365033,"d":677},{"name":"task_result","f":1750799693314522,"d_finished":27912,"c":16,"l":1750799693361095,"d":27912}],"id":"9437184::3"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:53.365528Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:53.305636Z;index_granules=0;index_portions=2;index_batches=88;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=474480;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=474480;selected_rows=0; 2025-06-24T21:14:53.365583Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:53.365883Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:304:2316];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; >> Normalizers::ChunksV0MetaNormalizer >> Backup::ProposeBackup >> TopicAutoscaling::PartitionSplit_ManySession_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8 >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKString_Reboot >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32 >> Backup::ProposeBackup [GOOD] >> EvWrite::AbortInTransaction >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression >> PersQueueSdkReadSessionTest::ReadSessionWithAbort [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithClose ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 25922, MsgBus: 3472 2025-06-24T21:11:26.297606Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626534943662309:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:26.297668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f9d/r3tmp/tmpUL4pQh/pdisk_1.dat 2025-06-24T21:11:26.687464Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626534943662283:2079] 1750799486293751 != 1750799486293754 2025-06-24T21:11:26.696540Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:26.737244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:26.737316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:26.738539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25922, node 1 2025-06-24T21:11:26.875118Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:26.875169Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:26.875183Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:26.875310Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:27.310365Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3472 TClient is connected to server localhost:3472 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:27.906339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:27.935772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:11:27.949867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:28.131587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.290284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.405898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:29.828958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626547828565813:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:29.829051Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.456686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.528824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.565699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.598703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.667589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.713412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.786823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.850845Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626552123533780:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.850947Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.851124Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626552123533785:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.855109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:30.869375Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626552123533787:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:11:30.945343Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626552123533838:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:31.308033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626534943662309:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:31.308105Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:32.134689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... ThhNjQtMTJjMzUzOTYtZGNmMTAzNGMtYTk3MTk4MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.718081Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727029. Ctx: { TraceId: 01jyhwp9zd621q45b83ds87v3j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4ZWZjMmEtZDUwMTE3NDEtZDg4NDE5NGItMWQ3ZjYwYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.722123Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727030. Ctx: { TraceId: 01jyhwp9yt4pk86rgvcvkg47ph, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGIwYTlkNWEtNmU1NGRiNDktYTcxMDIzYjQtZTQxY2UxZGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.727072Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727031. Ctx: { TraceId: 01jyhwp9za69r6wnj0ee5fz59q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzU4YTM1NTEtYmFiNjQzZDktOGQ1M2VlMjYtMzhhOGViOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.729286Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727032. Ctx: { TraceId: 01jyhwp9zd8n2aqs4f3yk0ks8f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4YzhlN2EtM2Y2MjVkMGEtNGEzZTI3OGEtNTNjNTc4YTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.737520Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727033. Ctx: { TraceId: 01jyhwp9yt4pk86rgvcvkg47ph, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGIwYTlkNWEtNmU1NGRiNDktYTcxMDIzYjQtZTQxY2UxZGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.738534Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727034. Ctx: { TraceId: 01jyhwp9za69r6wnj0ee5fz59q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzU4YTM1NTEtYmFiNjQzZDktOGQ1M2VlMjYtMzhhOGViOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.742975Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727036. Ctx: { TraceId: 01jyhwp9zd8n2aqs4f3yk0ks8f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4YzhlN2EtM2Y2MjVkMGEtNGEzZTI3OGEtNTNjNTc4YTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.743059Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727035. Ctx: { TraceId: 01jyhwp9za69r6wnj0ee5fz59q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzU4YTM1NTEtYmFiNjQzZDktOGQ1M2VlMjYtMzhhOGViOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.748267Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727037. Ctx: { TraceId: 01jyhwpa0d9fzmz6zx9skkvgxa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjQwMDE4ZmItNmJhZWNmNzYtN2I2ZTE0YzEtN2FkNzJmODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.752476Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727038. Ctx: { TraceId: 01jyhwp9zd8n2aqs4f3yk0ks8f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4YzhlN2EtM2Y2MjVkMGEtNGEzZTI3OGEtNTNjNTc4YTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.762888Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727039. Ctx: { TraceId: 01jyhwpa0h02eg7ekt7kt42ss2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODYxZThhNjQtMTJjMzUzOTYtZGNmMTAzNGMtYTk3MTk4MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.769432Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727040. Ctx: { TraceId: 01jyhwpa0h02eg7ekt7kt42ss2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODYxZThhNjQtMTJjMzUzOTYtZGNmMTAzNGMtYTk3MTk4MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.777629Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727041. Ctx: { TraceId: 01jyhwpa0h02eg7ekt7kt42ss2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODYxZThhNjQtMTJjMzUzOTYtZGNmMTAzNGMtYTk3MTk4MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.782559Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727042. Ctx: { TraceId: 01jyhwpa189450a49m9r792tm3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4ZWZjMmEtZDUwMTE3NDEtZDg4NDE5NGItMWQ3ZjYwYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.788832Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727043. Ctx: { TraceId: 01jyhwpa0h02eg7ekt7kt42ss2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODYxZThhNjQtMTJjMzUzOTYtZGNmMTAzNGMtYTk3MTk4MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.791123Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727044. Ctx: { TraceId: 01jyhwpa1m93vc7rb1zzkxq718, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzU4YTM1NTEtYmFiNjQzZDktOGQ1M2VlMjYtMzhhOGViOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.793066Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727045. Ctx: { TraceId: 01jyhwpa189450a49m9r792tm3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4ZWZjMmEtZDUwMTE3NDEtZDg4NDE5NGItMWQ3ZjYwYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.801632Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727046. Ctx: { TraceId: 01jyhwpa1fcq55hx5stzcpkn4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGIwYTlkNWEtNmU1NGRiNDktYTcxMDIzYjQtZTQxY2UxZGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.804181Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727047. Ctx: { TraceId: 01jyhwpa1m93vc7rb1zzkxq718, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzU4YTM1NTEtYmFiNjQzZDktOGQ1M2VlMjYtMzhhOGViOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.806792Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727048. Ctx: { TraceId: 01jyhwpa189450a49m9r792tm3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4ZWZjMmEtZDUwMTE3NDEtZDg4NDE5NGItMWQ3ZjYwYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.813502Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727049. Ctx: { TraceId: 01jyhwpa1m93vc7rb1zzkxq718, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzU4YTM1NTEtYmFiNjQzZDktOGQ1M2VlMjYtMzhhOGViOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.814933Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727050. Ctx: { TraceId: 01jyhwpa1fcq55hx5stzcpkn4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGIwYTlkNWEtNmU1NGRiNDktYTcxMDIzYjQtZTQxY2UxZGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.817240Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727051. Ctx: { TraceId: 01jyhwpa22bwfg9283w3f69f6a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjQwMDE4ZmItNmJhZWNmNzYtN2I2ZTE0YzEtN2FkNzJmODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.826026Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727052. Ctx: { TraceId: 01jyhwpa1fcq55hx5stzcpkn4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGIwYTlkNWEtNmU1NGRiNDktYTcxMDIzYjQtZTQxY2UxZGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.828037Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727053. Ctx: { TraceId: 01jyhwpa2r68hxns0qjx17qfwk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4YzhlN2EtM2Y2MjVkMGEtNGEzZTI3OGEtNTNjNTc4YTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.833474Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727054. Ctx: { TraceId: 01jyhwpa22bwfg9283w3f69f6a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjQwMDE4ZmItNmJhZWNmNzYtN2I2ZTE0YzEtN2FkNzJmODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:14:49.839829Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727055. Ctx: { TraceId: 01jyhwpa22bwfg9283w3f69f6a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjQwMDE4ZmItNmJhZWNmNzYtN2I2ZTE0YzEtN2FkNzJmODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS 2025-06-24T21:14:49.849974Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727056. Ctx: { TraceId: 01jyhwpa3f3e0pt6080jqczfb3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODYxZThhNjQtMTJjMzUzOTYtZGNmMTAzNGMtYTk3MTk4MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.855579Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727057. Ctx: { TraceId: 01jyhwpa3f3e0pt6080jqczfb3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODYxZThhNjQtMTJjMzUzOTYtZGNmMTAzNGMtYTk3MTk4MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.859685Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727058. Ctx: { TraceId: 01jyhwpa3y4z14f69javhn4dmp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4ZWZjMmEtZDUwMTE3NDEtZDg4NDE5NGItMWQ3ZjYwYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.861050Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727059. Ctx: { TraceId: 01jyhwpa3f3e0pt6080jqczfb3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODYxZThhNjQtMTJjMzUzOTYtZGNmMTAzNGMtYTk3MTk4MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:14:49.866363Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727060. Ctx: { TraceId: 01jyhwpa3y4z14f69javhn4dmp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4ZWZjMmEtZDUwMTE3NDEtZDg4NDE5NGItMWQ3ZjYwYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-24T21:14:49.872549Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727061. Ctx: { TraceId: 01jyhwpa3y4z14f69javhn4dmp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmU4ZWZjMmEtZDUwMTE3NDEtZDg4NDE5NGItMWQ3ZjYwYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS >> EvWrite::AbortInTransaction [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage [GOOD] >> BasicUsage::TWriteSession_AutoBatching [GOOD] >> BasicUsage::TWriteSession_BatchingProducesContinueTokens [GOOD] >> BasicUsage::BrokenCredentialsProvider >> Normalizers::ChunksV0MetaNormalizer [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData [GOOD] >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::BrokenCompressedData [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::DataReceivedCallback ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> EvWrite::AbortInTransaction [GOOD] Test command err: 2025-06-24T21:14:55.283044Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:55.313624Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:55.313982Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:55.322249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:55.322523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:55.322795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:55.322920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:55.323072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:55.323246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:55.323397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:55.323526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:55.323631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:55.323748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:55.323876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:55.356493Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:55.356684Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:55.356756Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:55.357026Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:55.357230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:55.357317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:55.357367Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:55.357482Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:55.357556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:55.357609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:55.357657Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:55.357854Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:55.357930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:55.357978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:55.358012Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:55.358138Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:55.358210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:55.358261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:55.358307Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:55.358374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:55.358430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:55.358472Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:55.358710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:55.358758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:55.358797Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:55.359003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:55.359059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:55.359090Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:55.359275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:55.359355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:55.359398Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:55.359494Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:55.359573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:55.359617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:55.359654Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:55.360190Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=51; 2025-06-24T21:14:55.360287Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=42; 2025-06-24T21:14:55.360379Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-24T21:14:55.360485Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=51; 2025-06-24T21:14:55.360592Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:55.360691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:55.360739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:55.360793Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... l_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=1588;data_size=1564;sum=2224;count=8;size_of_meta=136; 2025-06-24T21:14:57.402236Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:110:2140];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=1660;data_size=1636;sum=2512;count=4;size_of_portion=208; 2025-06-24T21:14:57.404348Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 44 2025-06-24T21:14:57.404816Z node 2 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:110:2140];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=2;operation_id=1; 2025-06-24T21:14:57.417651Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 44 2025-06-24T21:14:57.418296Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;self_id=[2:110:2140];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=222;problem=finished; 2025-06-24T21:14:57.418364Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;self_id=[2:110:2140];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=222;problem=finished; 2025-06-24T21:14:57.418551Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750799697743 at tablet 9437184, mediator 0 2025-06-24T21:14:57.418596Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[5] execute at tablet 9437184 2025-06-24T21:14:57.418638Z node 2 :TX_COLUMNSHARD ERROR: ctor_logger.h:56: TxPlanStep[5] Ignore old txIds [112] for step 1750799697743 last planned step 1750799697743 at tablet 9437184 2025-06-24T21:14:57.418679Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[5] complete at tablet 9437184 2025-06-24T21:14:57.419035Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750799697743:max} readable: {1750799697743:max} at tablet 9437184 2025-06-24T21:14:57.419202Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:14:57.420923Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:110:2140];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750799697743:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } } } ; 2025-06-24T21:14:57.421006Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:110:2140];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750799697743:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } } } ; 2025-06-24T21:14:57.421791Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:110:2140];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750799697743:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":4,"inputs":[{"from":5}]},{"owner_id":5,"inputs":[{"from":6}]},{"owner_id":6,"inputs":[]}],"nodes":{"2":{"p":{"i":"1","p":{"address":{"name":"key","id":1}},"o":"1","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"p":{"data":[{"name":"key","id":1},{"name":"field","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":6},"5":{"p":{"i":"0","p":{"data":[{"name":"key","id":1},{"name":"field","id":2}]},"o":"1,2","t":"FetchOriginalData"},"w":4,"id":5},"4":{"p":{"i":"2","p":{"address":{"name":"field","id":2}},"o":"2","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"1,2","t":"Projection"},"w":18,"id":0}}}; 2025-06-24T21:14:57.421902Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:110:2140];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750799697743:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:14:57.422450Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:110:2140];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750799697743:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[2:180:2192];trace_detailed=; 2025-06-24T21:14:57.422986Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1,2;column_names=field,key;);; 2025-06-24T21:14:57.423256Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; 2025-06-24T21:14:57.423593Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:180:2192];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:57.423702Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:180:2192];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:57.423824Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:180:2192];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:57.423873Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [2:180:2192] finished for tablet 9437184 2025-06-24T21:14:57.424338Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:180:2192];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[2:179:2191];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750799697422384,"name":"_full_task","f":1750799697422384,"d_finished":0,"c":0,"l":1750799697423955,"d":1571},"events":[{"name":"bootstrap","f":1750799697422559,"d_finished":862,"c":1,"l":1750799697423421,"d":862},{"a":1750799697423564,"name":"ack","f":1750799697423564,"d_finished":0,"c":0,"l":1750799697423955,"d":391},{"a":1750799697423527,"name":"processing","f":1750799697423527,"d_finished":0,"c":0,"l":1750799697423955,"d":428},{"name":"ProduceResults","f":1750799697423405,"d_finished":247,"c":2,"l":1750799697423853,"d":247},{"a":1750799697423857,"name":"Finish","f":1750799697423857,"d_finished":0,"c":0,"l":1750799697423955,"d":98}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:57.424440Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:180:2192];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[2:179:2191];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:57.424848Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:180:2192];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[2:179:2191];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750799697422384,"name":"_full_task","f":1750799697422384,"d_finished":0,"c":0,"l":1750799697424488,"d":2104},"events":[{"name":"bootstrap","f":1750799697422559,"d_finished":862,"c":1,"l":1750799697423421,"d":862},{"a":1750799697423564,"name":"ack","f":1750799697423564,"d_finished":0,"c":0,"l":1750799697424488,"d":924},{"a":1750799697423527,"name":"processing","f":1750799697423527,"d_finished":0,"c":0,"l":1750799697424488,"d":961},{"name":"ProduceResults","f":1750799697423405,"d_finished":247,"c":2,"l":1750799697423853,"d":247},{"a":1750799697423857,"name":"Finish","f":1750799697423857,"d_finished":0,"c":0,"l":1750799697424488,"d":631}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:14:57.424941Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:180:2192];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:57.421872Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:14:57.424990Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:180:2192];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:57.425107Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:180:2192];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:49;event=insert_to_cache;key=string;records=0;size=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=string;records=0;count=0; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::ChunksV0MetaNormalizer [GOOD] Test command err: 2025-06-24T21:14:55.172948Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:55.200453Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:55.200797Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:55.208847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:55.209165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-24T21:14:55.209374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:55.209542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:55.209644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:55.209773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:55.209920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:55.210050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:55.210164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:55.210266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:55.210428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:55.210557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:55.241520Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:55.241717Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:55.241783Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-24T21:14:55.241938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=NO_VALUE_OPTIONAL; 2025-06-24T21:14:55.242018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-24T21:14:55.242065Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-24T21:14:55.242254Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:55.242362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:55.242431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:55.242467Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-24T21:14:55.242577Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:55.242639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:55.242679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:55.242712Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:55.242901Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:55.242973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:55.243019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:55.243057Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:55.243207Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:55.243297Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:55.243351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:55.243380Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:55.243431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:55.243471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:55.243498Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:55.243545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:55.243621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:55.243654Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:55.243866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:55.243914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:55.243943Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:55.244093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:55.244143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:55.244185Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:55.244247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:55.244286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:55.244319Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:55.244360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:55.244425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:55.244482Z node 1 :TX_ ... ARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-24T21:14:57.315112Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-06-24T21:14:57.315328Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=20048;merger=0;interval_id=2; 2025-06-24T21:14:57.315513Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:14:57.315683Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:57.315741Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=20048;finished=1; 2025-06-24T21:14:57.315794Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:14:57.316424Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:57.316938Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:20048;schema=key1: uint64 key2: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:57.317077Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:14:57.317487Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;);columns=3;rows=20048; 2025-06-24T21:14:57.317676Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2405760;num_rows=20048;batch_columns=key1,key2,field; 2025-06-24T21:14:57.318139Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:512:2512];bytes=2405760;rows=20048;faults=0;finished=0;fault=0;schema=key1: uint64 key2: uint64 field: string; 2025-06-24T21:14:57.318541Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:57.318713Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:57.318960Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:57.320335Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:14:57.320614Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:57.320924Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:57.321004Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:514:2513] finished for tablet 9437184 2025-06-24T21:14:57.321725Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:512:2512];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["l_task_result"],"t":0.341},{"events":["f_ack"],"t":0.342},{"events":["l_ProduceResults","f_Finish"],"t":0.346},{"events":["l_ack","l_processing","l_Finish"],"t":0.347}],"full":{"a":1750799696973982,"name":"_full_task","f":1750799696973982,"d_finished":0,"c":0,"l":1750799697321119,"d":347137},"events":[{"name":"bootstrap","f":1750799696974163,"d_finished":3902,"c":1,"l":1750799696978065,"d":3902},{"a":1750799697320279,"name":"ack","f":1750799697316333,"d_finished":2670,"c":1,"l":1750799697319003,"d":3510},{"a":1750799697320199,"name":"processing","f":1750799696979942,"d_finished":243857,"c":16,"l":1750799697319013,"d":244777},{"name":"ProduceResults","f":1750799696976322,"d_finished":6740,"c":19,"l":1750799697320974,"d":6740},{"a":1750799697320979,"name":"Finish","f":1750799697320979,"d_finished":0,"c":0,"l":1750799697321119,"d":140},{"name":"task_result","f":1750799696979960,"d_finished":240756,"c":15,"l":1750799697315974,"d":240756}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:57.321837Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:512:2512];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:14:57.322363Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:512:2512];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["l_task_result"],"t":0.341},{"events":["f_ack"],"t":0.342},{"events":["l_ProduceResults","f_Finish"],"t":0.346},{"events":["l_ack","l_processing","l_Finish"],"t":0.347}],"full":{"a":1750799696973982,"name":"_full_task","f":1750799696973982,"d_finished":0,"c":0,"l":1750799697321905,"d":347923},"events":[{"name":"bootstrap","f":1750799696974163,"d_finished":3902,"c":1,"l":1750799696978065,"d":3902},{"a":1750799697320279,"name":"ack","f":1750799697316333,"d_finished":2670,"c":1,"l":1750799697319003,"d":4296},{"a":1750799697320199,"name":"processing","f":1750799696979942,"d_finished":243857,"c":16,"l":1750799697319013,"d":245563},{"name":"ProduceResults","f":1750799696976322,"d_finished":6740,"c":19,"l":1750799697320974,"d":6740},{"a":1750799697320979,"name":"Finish","f":1750799697320979,"d_finished":0,"c":0,"l":1750799697321905,"d":926},{"name":"task_result","f":1750799696979960,"d_finished":240756,"c":15,"l":1750799697315974,"d":240756}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:14:57.322526Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:14:56.973461Z;index_granules=0;index_portions=2;index_batches=748;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=2776560;inserted_portions_bytes=0;committed_portions_bytes=2488696;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=5265256;selected_rows=0; 2025-06-24T21:14:57.322589Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:14:57.323032Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:514:2513];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly-StrictAclCheck [FAIL] >> TColumnShardTestReadWrite::ReadSomePrograms ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] Test command err: 2025-06-24T21:14:26.178987Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.179015Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.179045Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.179621Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.202162Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.202339Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.204077Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.206142Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.206305Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:26.206406Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.206451Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-24T21:14:26.207127Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.207265Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.207292Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.207669Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.208368Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.208537Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.208719Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.209073Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.209230Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:26.209311Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.209355Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-24T21:14:26.210172Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.210194Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.210226Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.210468Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.211037Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.211191Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.211369Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.211924Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.212875Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:26.214069Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.214111Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-24T21:14:26.214869Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.214933Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.214959Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.215237Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.215825Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.215947Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.216218Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.221516Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.223747Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:26.223900Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.223956Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-24T21:14:26.224821Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.224844Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.224887Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.225209Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.225820Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.225953Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.226203Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.226590Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.226696Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:26.226771Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.226802Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-24T21:14:26.227391Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.227429Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.227457Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.227789Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.229913Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.230296Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.230529Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.230934Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.231033Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:26.231124Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.231180Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-24T21:14:26.232064Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.232109Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.232136Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.232423Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.232991Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.233092Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.233257Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.233928Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.234086Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:26.234156Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.234193Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-24T21:14:26.234961Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.235037Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.235084Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.235367Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:26.235983Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:26.236088Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.236253Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:26.237818Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.238211Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:26.238282Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:26.238325Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-24T21:14:26.242975Z :ReadSession INFO: Random seed for debugging is 1750799666242949 2025-06-24T21:14:26.683882Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627309158842717:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.683935Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:27.214217Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002f02/r3tmp/tmpZQ4rKG/pdisk_1.dat 2025-06-24T21:14:27.249129Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:14:27.439043Z node 2 :METADATA_ ... 0 2025-06-24T21:14:46.624717Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 grpc read done: success# 1, data# { read { } } 2025-06-24T21:14:46.624803Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 got read request: guid# d3915dde-7e9e13d0-5207104e-c3a8b67c 2025-06-24T21:14:46.625623Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 0 (2-2) 2025-06-24T21:14:46.625721Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (2-2) GOT MESSAGE: Message { Data: "message3" Partition stream id: 1 Cluster: "dc1". Topic: "test-topic" Partition: 0 PartitionKey: "" Information: { Offset: 2 SeqNo: 3 MessageGroupId: "test-message-group-id" CreateTime: 2025-06-24T21:14:46.501000Z WriteTime: 2025-06-24T21:14:46.505000Z Ip: "ipv6:[::1]:35480" UncompressedSize: 8 Meta: { "logtype": "unknown", "ident": "unknown", "server": "ipv6:[::1]:35480" } } } 2025-06-24T21:14:46.625906Z :DEBUG: [/Root] [/Root] [493c618c-d50f319-f046d855-735bd080] [dc1] Commit offsets [2, 3). Partition stream id: 1 2025-06-24T21:14:46.626271Z :DEBUG: [/Root] [/Root] [493c618c-d50f319-f046d855-735bd080] [dc1] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:14:46.626970Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 grpc read done: success# 1, data# { commit { offset_ranges { assign_id: 1 start_offset: 2 end_offset: 3 } } } 2025-06-24T21:14:46.627239Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) committing to position 3 prev 2 end 3 by cookie 4 2025-06-24T21:14:46.627460Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|70ab275b-b4e7a54a-80ba4dfb-78f93db6_0] Write session got write response: sequence_numbers: 3 offsets: 2 already_written: false write_statistics { persist_duration_ms: 9 queued_in_partition_duration_ms: 102 } 2025-06-24T21:14:46.627510Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|70ab275b-b4e7a54a-80ba4dfb-78f93db6_0] Write session: acknoledged message 1 2025-06-24T21:14:46.627701Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:14:46.627744Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:14:46.627865Z node 2 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user offset is set to 3 (startOffset 0) session shared/user_1_1_13504621149265799465_v1 2025-06-24T21:14:46.628009Z node 2 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:14:46.631601Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 4 } 2025-06-24T21:14:46.631654Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) commit done to position 3 endOffset 3 with cookie 4 2025-06-24T21:14:46.631694Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 replying for commits: assignId# 1, from# 4, to# 4, offset# 3 2025-06-24T21:14:46.632273Z :DEBUG: [/Root] [/Root] [493c618c-d50f319-f046d855-735bd080] [dc1] Committed response: offset_ranges { assign_id: 1 start_offset: 2 end_offset: 3 } 2025-06-24T21:14:46.631218Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 3 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:14:46.631291Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:14:46.631300Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 4 2025-06-24T21:14:46.631377Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:14:46.704993Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|70ab275b-b4e7a54a-80ba4dfb-78f93db6_0] Write session will now close 2025-06-24T21:14:46.705075Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|70ab275b-b4e7a54a-80ba4dfb-78f93db6_0] Write session: aborting 2025-06-24T21:14:46.705708Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|70ab275b-b4e7a54a-80ba4dfb-78f93db6_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:14:46.705762Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|70ab275b-b4e7a54a-80ba4dfb-78f93db6_0] Write session: destroy 2025-06-24T21:14:46.708007Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message-group-id|70ab275b-b4e7a54a-80ba4dfb-78f93db6_0 grpc read done: success: 0 data: 2025-06-24T21:14:46.708034Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message-group-id|70ab275b-b4e7a54a-80ba4dfb-78f93db6_0 grpc read failed 2025-06-24T21:14:46.708075Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message-group-id|70ab275b-b4e7a54a-80ba4dfb-78f93db6_0 grpc closed 2025-06-24T21:14:46.708090Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message-group-id|70ab275b-b4e7a54a-80ba4dfb-78f93db6_0 is DEAD 2025-06-24T21:14:46.708866Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:14:46.711331Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [1:7519627395058191428:2566] destroyed 2025-06-24T21:14:46.711396Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:14:48.610750Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:14:49.123314Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 5 from offset 3 2025-06-24T21:14:53.610872Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:14:56.621831Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 6 from offset 3 2025-06-24T21:14:56.707247Z :INFO: [/Root] [/Root] [493c618c-d50f319-f046d855-735bd080] Closing read session. Close timeout: 0.000000s 2025-06-24T21:14:56.707340Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:3 2025-06-24T21:14:56.707407Z :INFO: [/Root] [/Root] [493c618c-d50f319-f046d855-735bd080] Counters: { Errors: 0 CurrentSessionLifetimeMs: 16623 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:14:56.707575Z :NOTICE: [/Root] [/Root] [493c618c-d50f319-f046d855-735bd080] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:14:56.707652Z :DEBUG: [/Root] [/Root] [493c618c-d50f319-f046d855-735bd080] [dc1] Abort session to cluster 2025-06-24T21:14:56.708372Z :NOTICE: [/Root] [/Root] [493c618c-d50f319-f046d855-735bd080] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:14:56.709011Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 grpc read done: success# 0, data# { } 2025-06-24T21:14:56.709049Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 grpc read failed 2025-06-24T21:14:56.709097Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 closed 2025-06-24T21:14:56.709970Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_1_1_13504621149265799465_v1 is DEAD 2025-06-24T21:14:56.711206Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [1:7519627369288387196:2486] disconnected; active server actors: 1 2025-06-24T21:14:56.711402Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [1:7519627369288387196:2486] client user disconnected session shared/user_1_1_13504621149265799465_v1 2025-06-24T21:14:56.732333Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session shared/user_1_1_13504621149265799465_v1 2025-06-24T21:14:56.732382Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [1:7519627369288387199:2489] destroyed 2025-06-24T21:14:56.732445Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_13504621149265799465_v1 2025-06-24T21:14:57.118090Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [1:7519627442302832163:2655] TxId: 281474976710724. Ctx: { TraceId: 01jyhwpgwgav3rx3nyerrrdpcc, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2M4NzA1YTQtOWUzZTU1M2UtMTdjYWU2MWEtYzFhMjI3MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 2 2025-06-24T21:14:57.118902Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519627442302832167:2655], TxId: 281474976710724, task: 3. Ctx: { TraceId : 01jyhwpgwgav3rx3nyerrrdpcc. SessionId : ydb://session/3?node_id=1&id=N2M4NzA1YTQtOWUzZTU1M2UtMTdjYWU2MWEtYzFhMjI3MzY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7519627442302832163:2655], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> TColumnShardTestReadWrite::WriteOverload-InStore [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64 >> ReadSessionImplTest::DataReceivedCallback [GOOD] >> ReadSessionImplTest::CommonHandler >> ReadSessionImplTest::CommonHandler [GOOD] >> TColumnShardTestReadWrite::ReadSomePrograms [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteOverload-InStore [GOOD] Test command err: 2025-06-24T21:14:49.172823Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:49.197885Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:49.198132Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:49.205397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:49.205592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:49.205802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:49.205890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:49.205971Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:49.206048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:49.206144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:49.206218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:49.206276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:49.206357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.206428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:49.233783Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:49.234092Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:49.234159Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:49.234348Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.234523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:49.234603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:49.234647Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:49.234766Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:49.234838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:49.234893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:49.234922Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:49.235048Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.235111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:49.235179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:49.235212Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:49.235313Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:49.235364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:49.235422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:49.235458Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:49.235495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:49.235524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:49.235554Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:49.235815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:49.235857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:49.235889Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:49.236041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:49.236093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:49.236126Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:49.236266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:49.236329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.236360Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.236415Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:49.236478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:49.236510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:49.236531Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:49.236936Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=39; 2025-06-24T21:14:49.237015Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=35; 2025-06-24T21:14:49.237121Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-24T21:14:49.237187Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=30; 2025-06-24T21:14:49.237280Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:49.237375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:49.237412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:49.237451Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... te::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4428;count=36;size_of_meta=136; 2025-06-24T21:14:59.223089Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=5724;count=18;size_of_portion=208; 2025-06-24T21:14:59.224096Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-06-24T21:14:59.224198Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=19;operation_id=18; 2025-06-24T21:14:59.236287Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-06-24T21:14:59.238280Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=4787e7e8-514011f0-86ba90ae-1590572a; 2025-06-24T21:14:59.238431Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1786;count=37; 2025-06-24T21:14:59.238499Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4674;count=38;size_of_meta=136; 2025-06-24T21:14:59.238553Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6042;count=19;size_of_portion=208; 2025-06-24T21:14:59.239448Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-06-24T21:14:59.239544Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=20;operation_id=19; 2025-06-24T21:14:59.251492Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-06-24T21:14:59.271060Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=47bf3680-514011f0-bb700ff4-78e0c7c; 2025-06-24T21:14:59.271313Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1880;count=39; 2025-06-24T21:14:59.271395Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4920;count=40;size_of_meta=136; 2025-06-24T21:14:59.271473Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6360;count=20;size_of_portion=208; 2025-06-24T21:14:59.272599Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-06-24T21:14:59.272721Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=21;operation_id=20; 2025-06-24T21:14:59.285314Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-06-24T21:14:59.287608Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=47f31b62-514011f0-bc052148-d77884c9; 2025-06-24T21:14:59.287850Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1974;count=41; 2025-06-24T21:14:59.287935Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=5166;count=42;size_of_meta=136; 2025-06-24T21:14:59.288019Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6678;count=21;size_of_portion=208; 2025-06-24T21:14:59.289242Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-06-24T21:14:59.289377Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=22;operation_id=21; 2025-06-24T21:14:59.301908Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-06-24T21:14:59.310099Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=22;last=22; 2025-06-24T21:14:59.310198Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=6330728;operation_id=4901da98-514011f0-83c794c2-6f07ea8d;in_flight=1;size_in_flight=6330728; 2025-06-24T21:14:59.604601Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:127:2157];write_id=22;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=8246112;count=1;actions=__DEFAULT,;waiting=1;; 2025-06-24T21:14:59.675739Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=4901da98-514011f0-83c794c2-6f07ea8d; 2025-06-24T21:14:59.675983Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=2068;count=43; 2025-06-24T21:14:59.676087Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=5412;count=44;size_of_meta=136; 2025-06-24T21:14:59.676184Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6996;count=22;size_of_portion=208; 2025-06-24T21:14:59.677348Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 2025-06-24T21:14:59.677488Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=23;operation_id=22; 2025-06-24T21:14:59.689848Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 >> TColumnShardTestReadWrite::RebootWriteRead [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession [GOOD] >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKUtf8 >> GroupWriteTest::Simple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadSomePrograms [GOOD] Test command err: 2025-06-24T21:14:59.476419Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:59.500816Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:59.501102Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:59.507405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:59.507578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:59.507771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:59.507894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:59.507975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:59.508073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:59.508179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:59.508265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:59.508394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:59.508503Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:59.508619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:59.532887Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:59.533146Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:59.533190Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:59.533353Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:59.533508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:59.533572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:59.533609Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:59.533695Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:59.533756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:59.533804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:59.533830Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:59.533980Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:59.534036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:59.534066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:59.534086Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:59.534162Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:59.534207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:59.534245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:59.534290Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:59.534350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:59.534391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:59.534417Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:59.534587Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:59.534618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:59.534644Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:59.534789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:59.534841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:59.534862Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:59.534981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:59.535033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:59.535076Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:59.535166Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:59.535259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:59.535293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:59.535343Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:59.535845Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=65; 2025-06-24T21:14:59.535940Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=38; 2025-06-24T21:14:59.536028Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=37; 2025-06-24T21:14:59.536127Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=40; 2025-06-24T21:14:59.536226Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:59.536326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:59.536377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:59.536438Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... lem=Background activities cannot be started: no index at tablet; 2025-06-24T21:15:00.154723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923003844864;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-24T21:15:00.154798Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923003844864;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;fline=schema.h:38;event=sync_schema; 2025-06-24T21:15:00.167337Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;this=88923003844864;op_tx=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_this=89129165484416;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T21:15:00.167458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;this=88923003844864;op_tx=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_this=89129165484416;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:102:2135]; 2025-06-24T21:15:00.167545Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;this=88923003844864;op_tx=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750799700437;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_this=89129165484416;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=10; 2025-06-24T21:15:00.167905Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T21:15:00.168105Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750799700437 at tablet 9437184, mediator 0 2025-06-24T21:15:00.168173Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] execute at tablet 9437184 2025-06-24T21:15:00.168574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-24T21:15:00.168684Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000001, ss: 1} ttl settings: { Version: 1 } at tablet 9437184 2025-06-24T21:15:00.174978Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:15:00.175169Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-24T21:15:00.175263Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-24T21:15:00.182648Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-24T21:15:00.182867Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:215;event=finished_tx;tx_id=10; 2025-06-24T21:15:00.208298Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=3200;columns=5; 2025-06-24T21:15:00.212648Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=1;last=1; 2025-06-24T21:15:00.212727Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=3200;operation_id=498b92d8-514011f0-9cb9f527-f3527b5c;in_flight=1;size_in_flight=3200; 2025-06-24T21:15:00.226132Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:127:2157];write_id=1;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=3768;count=1;actions=__DEFAULT,;waiting=1;; 2025-06-24T21:15:00.228223Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=3200;event=data_write_finished;writing_id=498b92d8-514011f0-9cb9f527-f3527b5c; 2025-06-24T21:15:00.228556Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=60;data_size=20;sum=60;count=1; 2025-06-24T21:15:00.228695Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=212;data_size=188;sum=212;count=2;size_of_meta=136; 2025-06-24T21:15:00.228829Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=284;data_size=260;sum=284;count=1;size_of_portion=208; 2025-06-24T21:15:00.229739Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-06-24T21:15:00.229874Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=2;operation_id=1; 2025-06-24T21:15:00.241809Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-06-24T21:15:00.254971Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750799700443 at tablet 9437184, mediator 0 2025-06-24T21:15:00.255067Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] execute at tablet 9437184 2025-06-24T21:15:00.255451Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=100;fline=abstract.h:83;progress_tx_id=100;lock_id=1;broken=0; 2025-06-24T21:15:00.255837Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=100;fline=tx_controller.cpp:215;event=finished_tx;tx_id=100; 2025-06-24T21:15:00.268973Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] complete at tablet 9437184 2025-06-24T21:15:00.269101Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=abstract.h:93;progress_tx_id=100;lock_id=1;broken=0; 2025-06-24T21:15:00.269390Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=100;commit_lock_id=1;fline=manager.cpp:177;event=remove_by_insert_id;id=2;operation_id=1; 2025-06-24T21:15:00.269435Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=100;commit_lock_id=1;fline=manager.cpp:180;event=remove_operation;operation_id=1; 2025-06-24T21:15:00.269863Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:240;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:15:00.269934Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:15:00.270077Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T21:15:00.282495Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:15:00.282613Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:15:00.282673Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:15:00.282783Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:15:00.283418Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 100 scanId: 0 version: {1750799700443:100} readable: {1750799700443:max} at tablet 9437184 2025-06-24T21:15:00.295611Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 100 scanId: 0 at tablet 9437184 2025-06-24T21:15:00.295833Z node 1 :TX_COLUMNSHARD_SCAN WARN: log.cpp:784: tx_id=100;scan_id=0;gen=0;table=;snapshot={1750799700443:100};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:14;event=TTxScan failed;problem=cannot parse program;details=Can't parse SsaProgram: Can't parse TOlapProgram protobuf; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::RebootWriteRead [GOOD] Test command err: 2025-06-24T21:14:53.773241Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:53.800402Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:53.800663Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:53.808581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:53.808816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:53.809072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:53.809212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:53.809359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:53.809461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:53.809582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:53.809687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:53.809784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:53.809904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:53.810055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:53.839713Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:53.839962Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:53.840039Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:53.840231Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:53.840464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:53.840539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:53.840586Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:53.840710Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:53.840781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:53.840838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:53.840874Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:53.841052Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:53.841128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:53.841175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:53.841202Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:53.841309Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:53.841379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:53.841448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:53.841503Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:53.841559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:53.841611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:53.841647Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:53.841893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:53.841943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:53.841986Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:53.842180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:53.842250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:53.842294Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:53.842445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:53.842521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:53.842554Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:53.842629Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:53.842699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:53.842744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:53.842771Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:53.843298Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=64; 2025-06-24T21:14:53.843395Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-24T21:14:53.843510Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-24T21:14:53.843593Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=40; 2025-06-24T21:14:53.843704Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:53.843808Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:53.843862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:53.843920Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... ayload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:00.357609Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:15:00.357767Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-24T21:15:00.357863Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-24T21:15:00.358191Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1000:2855];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-24T21:15:00.358388Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:00.358527Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:00.358658Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:00.358911Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:00.359060Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:00.359222Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:00.359273Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1001:2856] finished for tablet 9437184 2025-06-24T21:15:00.359834Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1000:2855];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["l_task_result"],"t":0.015},{"events":["f_ack"],"t":0.016},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.018}],"full":{"a":1750799700341148,"name":"_full_task","f":1750799700341148,"d_finished":0,"c":0,"l":1750799700359340,"d":18192},"events":[{"name":"bootstrap","f":1750799700341436,"d_finished":4024,"c":1,"l":1750799700345460,"d":4024},{"a":1750799700358879,"name":"ack","f":1750799700357259,"d_finished":1431,"c":1,"l":1750799700358690,"d":1892},{"a":1750799700358864,"name":"processing","f":1750799700347023,"d_finished":7248,"c":10,"l":1750799700358693,"d":7724},{"name":"ProduceResults","f":1750799700343725,"d_finished":3759,"c":13,"l":1750799700359256,"d":3759},{"a":1750799700359259,"name":"Finish","f":1750799700359259,"d_finished":0,"c":0,"l":1750799700359340,"d":81},{"name":"task_result","f":1750799700347044,"d_finished":5659,"c":9,"l":1750799700357051,"d":5659}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:00.359932Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1000:2855];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:00.360484Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1000:2855];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["l_task_result"],"t":0.015},{"events":["f_ack"],"t":0.016},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.018}],"full":{"a":1750799700341148,"name":"_full_task","f":1750799700341148,"d_finished":0,"c":0,"l":1750799700359982,"d":18834},"events":[{"name":"bootstrap","f":1750799700341436,"d_finished":4024,"c":1,"l":1750799700345460,"d":4024},{"a":1750799700358879,"name":"ack","f":1750799700357259,"d_finished":1431,"c":1,"l":1750799700358690,"d":2534},{"a":1750799700358864,"name":"processing","f":1750799700347023,"d_finished":7248,"c":10,"l":1750799700358693,"d":8366},{"name":"ProduceResults","f":1750799700343725,"d_finished":3759,"c":13,"l":1750799700359256,"d":3759},{"a":1750799700359259,"name":"Finish","f":1750799700359259,"d_finished":0,"c":0,"l":1750799700359982,"d":723},{"name":"task_result","f":1750799700347044,"d_finished":5659,"c":9,"l":1750799700357051,"d":5659}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:00.360580Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:00.340395Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7600;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7600;selected_rows=0; 2025-06-24T21:15:00.360634Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:00.361051Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::Simple [GOOD] Test command err: RandomSeed# 12530568002472231680 2025-06-24T21:14:42.509150Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 1 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-24T21:14:42.534029Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-24T21:14:42.534106Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 1 going to send TEvBlock {TabletId# 1 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-24T21:14:42.536803Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-24T21:14:42.551991Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:42.554704Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-24T21:15:00.735290Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:15:00.735418Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:15:00.735482Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:15:00.735530Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:15:00.811184Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Status# OK} 2025-06-24T21:15:00.811316Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Status# OK} ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::CommonHandler [GOOD] Test command err: 2025-06-24T21:14:26.229143Z :SpecifyClustersExplicitly INFO: Random seed for debugging is 1750799666229113 2025-06-24T21:14:26.687277Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627308531465870:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.687354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:26.767219Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627307592194217:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.771445Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002eac/r3tmp/tmpuXMVZO/pdisk_1.dat 2025-06-24T21:14:27.149805Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:14:27.150701Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:14:27.560209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:27.560316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:27.590271Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:27.616496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:27.616570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:27.629629Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:14:27.631213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:27.638524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21763, node 1 2025-06-24T21:14:27.727620Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:27.795565Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:27.892813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002eac/r3tmp/yandexzUb06l.tmp 2025-06-24T21:14:27.892837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002eac/r3tmp/yandexzUb06l.tmp 2025-06-24T21:14:27.893009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002eac/r3tmp/yandexzUb06l.tmp 2025-06-24T21:14:27.893201Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:28.143837Z INFO: TTestServer started on Port 4110 GrpcPort 21763 TClient is connected to server localhost:4110 PQClient connected to localhost:21763 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:28.544278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:14:30.844638Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627324772063672:2270], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.844638Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627324772063683:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.844741Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.851326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:30.889595Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519627324772063686:2274], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:14:30.986730Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519627324772063714:2132] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:31.376472Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519627324772063729:2278], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:31.376910Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627325711336100:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:31.377971Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZTkwNjBkMzUtODNmYjZkMzctYWQxMmVhZC04NzVmZmZl, ActorId: [2:7519627324772063670:2269], ActorState: ExecuteState, TraceId: 01jyhwnqhr58ktfr9ag7zrp21g, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:14:31.381968Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:14:31.397646Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NGE1ZGJiMDktNDk1ODYyM2YtYjY4NTllZTItZTU3OGVhZmI=, ActorId: [1:7519627325711336066:2297], ActorState: ExecuteState, TraceId: 01jyhwnqm0be91yy614ypvp2c9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:14:31.397996Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:14:31.447768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:31.609992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:31.687937Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627308531465870:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:31.688037Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:31.767708Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519627307592194217:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:31.767778Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Ro ... 664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519627434195007589:2493] disconnected; active server actors: 1 2025-06-24T21:14:56.893642Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519627434195007589:2493] client user disconnected session shared/user_3_1_7828948583989257990_v1 2025-06-24T21:14:56.893839Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session shared/user_3_1_7828948583989257990_v1 2025-06-24T21:14:56.893922Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [3:7519627434195007594:2497] destroyed 2025-06-24T21:14:56.894027Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_7828948583989257990_v1 2025-06-24T21:14:57.090548Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715698, task: 1, CA Id [3:7519627442784942375:2531]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-24T21:14:57.123357Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715698, task: 1, CA Id [3:7519627442784942375:2531]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:14:57.173840Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715698, task: 1, CA Id [3:7519627442784942375:2531]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:14:57.242668Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715698, task: 1, CA Id [3:7519627442784942375:2531]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:14:57.345758Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715698, task: 1, CA Id [3:7519627442784942375:2531]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:14:57.351375Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:14:57.351425Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:57.377940Z node 3 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715699. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:14:57.378062Z node 3 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [3:7519627442784942402:2524] TxId: 281474976715699. Ctx: { TraceId: 01jyhwpgxf0pjz1kf8a75gtwy3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTdhYzQ4ZmMtYjg0MjFhMGItZjI3NTdhZmYtNGQwZGI1ZmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:14:57.378299Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NTdhYzQ4ZmMtYjg0MjFhMGItZjI3NTdhZmYtNGQwZGI1ZmM=, ActorId: [3:7519627438489975054:2524], ActorState: ExecuteState, TraceId: 01jyhwpgxf0pjz1kf8a75gtwy3, Create QueryResponse for error on request, msg: 2025-06-24T21:14:57.379344Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyhwphaf20x153h0cyh8wcrh" } } YdbStatus: UNAVAILABLE ConsumedRu: 275 } 2025-06-24T21:14:57.464927Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715698, task: 1, CA Id [3:7519627442784942375:2531]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:14:58.210819Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.210855Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.210888Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:58.211298Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:58.211919Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:58.212133Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.212562Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: 13. Commit offset: 31 2025-06-24T21:14:58.214082Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.214124Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.214153Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:58.214489Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:58.215016Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:58.215211Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.216592Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:58.217655Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:58.218249Z :INFO: Error decompressing data: (TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check) 2025-06-24T21:14:58.218357Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-3) 2025-06-24T21:14:58.218525Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:58.218577Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:14:58.218607Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:14:58.218653Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 3, size 16 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { DataDecompressionError: "(TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check)" Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-24T21:14:58.220871Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.220901Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.220941Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:58.221415Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:58.222273Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:58.222428Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.222884Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:58.223680Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.224003Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:58.225059Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:58.225145Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:14:58.225250Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 2). Partition stream id: 1 2025-06-24T21:14:58.226890Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.226946Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.227000Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:58.227406Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:14:58.227933Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:14:58.228160Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:58.228446Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:14:58.229238Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:58.229837Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:14:58.231192Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-24T21:14:58.231282Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:14:58.231345Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:14:58.231392Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-24T21:14:58.231535Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:14:58.231578Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:15:00.238317Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:15:00.238372Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:15:00.238411Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:15:00.266618Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:15:00.267385Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:15:00.267561Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:15:00.269213Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:15:00.269460Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:15:00.269537Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:15:00.269625Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes >> Normalizers::CleanUnusedTablesNormalizer >> Normalizers::CleanEmptyPortionsNormalizer >> CommitOffset::DistributedTxCommit_CheckOffsetCommitForDifferentCases [GOOD] >> CommitOffset::DistributedTxCommit_Flat_CheckOffsetCommitForDifferentCases >> TColumnShardTestReadWrite::WriteOverload+InStore >> Normalizers::PortionsNormalizer >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime >> Normalizers::CleanUnusedTablesNormalizer [GOOD] >> Normalizers::PortionsNormalizer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::CleanUnusedTablesNormalizer [GOOD] Test command err: 2025-06-24T21:15:02.889556Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:02.919601Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:02.919954Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:02.928659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanIndexColumns; 2025-06-24T21:15:02.928987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-24T21:15:02.929206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:02.929417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:02.929563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:02.929670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:02.929844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:02.929997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:02.930124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:02.930237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:02.930365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:02.930507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:02.961007Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:02.961200Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=CleanIndexColumns; 2025-06-24T21:15:02.961268Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-24T21:15:02.961539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanIndexColumns;id=NO_VALUE_OPTIONAL; 2025-06-24T21:15:02.961632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-24T21:15:02.961700Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-24T21:15:02.961876Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:02.961994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:02.962056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:02.962093Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-24T21:15:02.962200Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:02.962270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:02.962315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:02.962354Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:02.962563Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:02.962647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:02.962721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:02.962753Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:02.962871Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:02.962952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:02.963012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:02.963057Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:02.963126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:02.963198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:02.963228Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:02.963286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:02.963328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:02.963389Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:02.963646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:02.963693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:02.963722Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:02.963899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:02.963967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:02.964006Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:02.964079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:02.964122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:02.964176Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:02.964240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:02.964283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:02.964340Z node 1 :TX_COLUMN ... TEvTaskProcessedResult; 2025-06-24T21:15:05.232052Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-24T21:15:05.232106Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-06-24T21:15:05.232170Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=20048;merger=0;interval_id=2; 2025-06-24T21:15:05.232214Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:15:05.232291Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.232318Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=20048;finished=1; 2025-06-24T21:15:05.232357Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:15:05.232535Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:05.232678Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:20048;schema=key1: uint64 key2: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.232719Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:15:05.232838Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;);columns=3;rows=20048; 2025-06-24T21:15:05.232909Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2405760;num_rows=20048;batch_columns=key1,key2,field; 2025-06-24T21:15:05.233075Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:517:2517];bytes=2405760;rows=20048;faults=0;finished=0;fault=0;schema=key1: uint64 key2: uint64 field: string; 2025-06-24T21:15:05.233243Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.233370Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.233482Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.234333Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:05.234502Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.234644Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.234686Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:522:2521] finished for tablet 9437184 2025-06-24T21:15:05.235119Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:517:2517];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.287},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.289}],"full":{"a":1750799704945163,"name":"_full_task","f":1750799704945163,"d_finished":0,"c":0,"l":1750799705234739,"d":289576},"events":[{"name":"bootstrap","f":1750799704945402,"d_finished":3167,"c":1,"l":1750799704948569,"d":3167},{"a":1750799705234302,"name":"ack","f":1750799705232509,"d_finished":1001,"c":1,"l":1750799705233510,"d":1438},{"a":1750799705234282,"name":"processing","f":1750799704950559,"d_finished":201536,"c":16,"l":1750799705233514,"d":201993},{"name":"ProduceResults","f":1750799704947281,"d_finished":3810,"c":19,"l":1750799705234671,"d":3810},{"a":1750799705234674,"name":"Finish","f":1750799705234674,"d_finished":0,"c":0,"l":1750799705234739,"d":65},{"name":"task_result","f":1750799704950587,"d_finished":200248,"c":15,"l":1750799705232411,"d":200248}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.235239Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:517:2517];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:05.235681Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:517:2517];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.287},{"events":["l_ProduceResults","f_Finish"],"t":0.289},{"events":["l_ack","l_processing","l_Finish"],"t":0.29}],"full":{"a":1750799704945163,"name":"_full_task","f":1750799704945163,"d_finished":0,"c":0,"l":1750799705235300,"d":290137},"events":[{"name":"bootstrap","f":1750799704945402,"d_finished":3167,"c":1,"l":1750799704948569,"d":3167},{"a":1750799705234302,"name":"ack","f":1750799705232509,"d_finished":1001,"c":1,"l":1750799705233510,"d":1999},{"a":1750799705234282,"name":"processing","f":1750799704950559,"d_finished":201536,"c":16,"l":1750799705233514,"d":202554},{"name":"ProduceResults","f":1750799704947281,"d_finished":3810,"c":19,"l":1750799705234671,"d":3810},{"a":1750799705234674,"name":"Finish","f":1750799705234674,"d_finished":0,"c":0,"l":1750799705235300,"d":626},{"name":"task_result","f":1750799704950587,"d_finished":200248,"c":15,"l":1750799705232411,"d":200248}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.235798Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:04.944551Z;index_granules=0;index_portions=2;index_batches=748;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=2776560;inserted_portions_bytes=0;committed_portions_bytes=2488696;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=5265256;selected_rows=0; 2025-06-24T21:15:05.235847Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:05.236173Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:522:2521];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::PortionsNormalizer [GOOD] Test command err: 2025-06-24T21:15:03.822003Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:03.849725Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:03.850024Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:03.857900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=EmptyPortionsCleaner; 2025-06-24T21:15:03.858187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=LeakedBlobsNormalizer; 2025-06-24T21:15:03.858299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-24T21:15:03.858501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:03.858711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:03.858835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:03.858963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:03.859105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:03.859252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:03.859357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:03.859477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:03.859608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:03.859727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:03.890780Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:03.890988Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=12;current_normalizer=CLASS_NAME=EmptyPortionsCleaner; 2025-06-24T21:15:03.891047Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-24T21:15:03.891386Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_empty.cpp:323;tasks_for_remove=0;distribution=; 2025-06-24T21:15:03.891524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=EmptyPortionsCleaner;id=NO_VALUE_OPTIONAL; 2025-06-24T21:15:03.891598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=LeakedBlobsNormalizer;id=NO_VALUE_OPTIONAL; 2025-06-24T21:15:03.891641Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-24T21:15:03.892119Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=67; 2025-06-24T21:15:03.892224Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=50; 2025-06-24T21:15:03.892322Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=37; 2025-06-24T21:15:03.892420Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=51; 2025-06-24T21:15:03.892547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=LeakedBlobsNormalizer;id=NO_VALUE_OPTIONAL; 2025-06-24T21:15:03.892605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-24T21:15:03.892647Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-24T21:15:03.892827Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:03.892929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:03.892980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:03.893015Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-24T21:15:03.893115Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:03.893177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:03.893239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:03.893285Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:03.893454Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:03.893509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:03.893550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:03.893579Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:03.893675Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:03.893728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:03.893764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:03.893792Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:03.893878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:03.893924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:03.893977Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:03.894036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:03.894075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:03.894101Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:03.894332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:03.894377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:03.894410Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_ ... :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:15:05.357540Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:15:05.357588Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:15:05.357670Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T21:15:05.357737Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:15:05.357797Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:15:05.357853Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:15:05.357956Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=1.000000s; 2025-06-24T21:15:05.358019Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:15:05.442543Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 111 scanId: 0 version: {1750799704827:111} readable: {1750799704827:max} at tablet 9437184 2025-06-24T21:15:05.442720Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 111 scanId: 0 at tablet 9437184 2025-06-24T21:15:05.442925Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1750799704827:111};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } Columns { Id: 3 } } } ; 2025-06-24T21:15:05.443016Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1750799704827:111};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } Columns { Id: 3 } } } ; 2025-06-24T21:15:05.443948Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1750799704827:111};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[{"from":8}]},{"owner_id":0,"inputs":[{"from":2},{"from":4},{"from":6}]},{"owner_id":8,"inputs":[]},{"owner_id":2,"inputs":[{"from":7}]},{"owner_id":4,"inputs":[{"from":7}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"8":{"p":{"p":{"data":[{"name":"key1","id":1},{"name":"key2","id":2},{"name":"field","id":3}]},"o":"0","t":"ReserveMemory"},"w":0,"id":8},"2":{"p":{"i":"1","p":{"address":{"name":"key1","id":1}},"o":"1","t":"AssembleOriginalData"},"w":11,"id":2},"6":{"p":{"i":"3","p":{"address":{"name":"field","id":3}},"o":"3","t":"AssembleOriginalData"},"w":11,"id":6},"7":{"p":{"i":"0","p":{"data":[{"name":"key1","id":1},{"name":"key2","id":2},{"name":"field","id":3}]},"o":"1,2,3","t":"FetchOriginalData"},"w":6,"id":7},"4":{"p":{"i":"2","p":{"address":{"name":"key2","id":2}},"o":"2","t":"AssembleOriginalData"},"w":11,"id":4},"0":{"p":{"i":"1,2,3","t":"Projection"},"w":33,"id":0}}}; 2025-06-24T21:15:05.444113Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1750799704827:111};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:15:05.444682Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:368:2376];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1750799704827:111};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:442:2441];trace_detailed=; 2025-06-24T21:15:05.445323Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1,2,3;column_names=field,key1,key2;);; 2025-06-24T21:15:05.445575Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; 2025-06-24T21:15:05.445966Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:442:2441];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:05.446115Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:442:2441];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.446238Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:442:2441];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.446287Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:442:2441] finished for tablet 9437184 2025-06-24T21:15:05.446763Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:442:2441];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:440:2440];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750799705444609,"name":"_full_task","f":1750799705444609,"d_finished":0,"c":0,"l":1750799705446350,"d":1741},"events":[{"name":"bootstrap","f":1750799705444835,"d_finished":911,"c":1,"l":1750799705445746,"d":911},{"a":1750799705445937,"name":"ack","f":1750799705445937,"d_finished":0,"c":0,"l":1750799705446350,"d":413},{"a":1750799705445897,"name":"processing","f":1750799705445897,"d_finished":0,"c":0,"l":1750799705446350,"d":453},{"name":"ProduceResults","f":1750799705445732,"d_finished":274,"c":2,"l":1750799705446267,"d":274},{"a":1750799705446270,"name":"Finish","f":1750799705446270,"d_finished":0,"c":0,"l":1750799705446350,"d":80}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.446860Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:442:2441];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:440:2440];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:05.447371Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:442:2441];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:440:2440];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750799705444609,"name":"_full_task","f":1750799705444609,"d_finished":0,"c":0,"l":1750799705446923,"d":2314},"events":[{"name":"bootstrap","f":1750799705444835,"d_finished":911,"c":1,"l":1750799705445746,"d":911},{"a":1750799705445937,"name":"ack","f":1750799705445937,"d_finished":0,"c":0,"l":1750799705446923,"d":986},{"a":1750799705445897,"name":"processing","f":1750799705445897,"d_finished":0,"c":0,"l":1750799705446923,"d":1026},{"name":"ProduceResults","f":1750799705445732,"d_finished":274,"c":2,"l":1750799705446267,"d":274},{"a":1750799705446270,"name":"Finish","f":1750799705446270,"d_finished":0,"c":0,"l":1750799705446923,"d":653}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:05.447462Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:442:2441];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:05.444049Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:15:05.447519Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:442:2441];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:05.447632Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:442:2441];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:49;event=insert_to_cache;key=string;records=0;size=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=string;records=0;count=0; >> AnalyzeColumnshard::AnalyzeRebootColumnShard [FAIL] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad [GOOD] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32_Reboot >> TColumnShardTestReadWrite::WriteReadDuplicate >> GroupWriteTest::ByTableName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::ByTableName [GOOD] Test command err: RandomSeed# 7810864646991138659 2025-06-24T21:14:38.416186Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 72058428954028033 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-24T21:14:38.468770Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-24T21:14:38.468845Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 1 going to send TEvBlock {TabletId# 72058428954028033 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-24T21:14:38.471801Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-24T21:14:38.502510Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 2 going to send TEvCollectGarbage {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:38.505232Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-24T21:14:51.276751Z 8 00h01m11.810512s :BS_LOGCUTTER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) KEEPER: RetryCutLogEvent: limit exceeded; FreeUpToLsn# 1098 2025-06-24T21:15:03.448657Z 6 00h01m23.910512s :BS_LOGCUTTER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) KEEPER: RetryCutLogEvent: limit exceeded; FreeUpToLsn# 2706 2025-06-24T21:15:09.615821Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:15:09.615943Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:15:09.615981Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:15:09.616008Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:15:09.664656Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Status# OK} 2025-06-24T21:15:09.664736Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Status# OK} >> BasicUsage::BrokenCredentialsProvider [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages >> PersQueueSdkReadSessionTest::ReadSessionWithClose [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8_Reboot ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> BasicUsage::BrokenCredentialsProvider [GOOD] Test command err: 2025-06-24T21:14:26.496707Z :MaxByteSizeEqualZero INFO: Random seed for debugging is 1750799666496671 2025-06-24T21:14:26.933831Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627307675917434:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.934136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:26.975291Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627310526196836:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.975401Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002e81/r3tmp/tmpErqtNg/pdisk_1.dat 2025-06-24T21:14:27.316298Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:14:27.320565Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:14:27.751823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:27.751957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:27.753701Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:27.781844Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:14:27.785409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18011, node 1 2025-06-24T21:14:27.947862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:27.947931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:27.953834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:27.958343Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:27.991727Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002e81/r3tmp/yandexOC7J0z.tmp 2025-06-24T21:14:27.992103Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:27.991746Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002e81/r3tmp/yandexOC7J0z.tmp 2025-06-24T21:14:27.991876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002e81/r3tmp/yandexOC7J0z.tmp 2025-06-24T21:14:27.991960Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:28.143523Z INFO: TTestServer started on Port 25209 GrpcPort 18011 TClient is connected to server localhost:25209 PQClient connected to localhost:18011 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:28.559548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:14:30.956255Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627327706066312:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.960009Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627327706066301:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.960752Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.968799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:30.991476Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519627327706066316:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T21:14:31.085839Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519627332001033642:2132] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:31.446909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:31.471015Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627329150754957:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:31.473428Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MWU0YTdlODktMjBjYTUyZmUtNGM4NTM3OC04NjM1ZTY4Yg==, ActorId: [1:7519627329150754921:2295], ActorState: ExecuteState, TraceId: 01jyhwnqr53tmhan3wzcak16wh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:14:31.475586Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:14:31.477875Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519627332001033657:2278], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:31.478156Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NjM5NDZhMWItNjM0NmIzZDgtZmJiNGQ5MjctYzkxZjc5Zjc=, ActorId: [2:7519627327706066299:2268], ActorState: ExecuteState, TraceId: 01jyhwnqkr1d6a3zyght3j3p7q, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:14:31.478531Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:14:31.613157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:31.792285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:18011", true, true, 1000); 2025-06-24T21:14:31.934818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627307675917434:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21: ... _session_actor.cpp:566: init check schema 2025-06-24T21:15:09.324054Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-24T21:15:09.324255Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-24T21:15:09.324276Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T21:15:09.324289Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-24T21:15:09.324310Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [5:7519627493525116282:2466] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-24T21:15:09.327792Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [5:7519627493525116282:2466] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-24T21:15:09.509586Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [5:7519627493525116282:2466] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-24T21:15:09.510620Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519627493525116319:2466] connected; active server actors: 1 2025-06-24T21:15:09.511002Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [5:7519627493525116282:2466] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-24T21:15:09.511038Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [5:7519627493525116282:2466] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-24T21:15:09.512129Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519627493525116319:2466] disconnected; active server actors: 1 2025-06-24T21:15:09.512165Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519627493525116319:2466] disconnected no session 2025-06-24T21:15:09.642481Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [5:7519627493525116282:2466] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-24T21:15:09.642526Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [5:7519627493525116282:2466] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-24T21:15:09.642548Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [5:7519627493525116282:2466] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-24T21:15:09.642583Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T21:15:09.643208Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 5, Generation: 1 2025-06-24T21:15:09.643268Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037892] server connected, pipe [5:7519627493525116341:2466], now have 1 active actors on pipe 2025-06-24T21:15:09.643302Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:15:09.643320Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:15:09.643394Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|b8250424-267a13c3-e96ad63-c9a28019_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-24T21:15:09.643498Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:15:09.643568Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:15:09.643722Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:15:09.643760Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:15:09.643858Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:15:09.644014Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|b8250424-267a13c3-e96ad63-c9a28019_0 2025-06-24T21:15:09.644814Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750799709644 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:15:09.644926Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|b8250424-267a13c3-e96ad63-c9a28019_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-24T21:15:09.645133Z :INFO: [] MessageGroupId [src] SessionId [src|b8250424-267a13c3-e96ad63-c9a28019_0] Write session: close. Timeout = 0 ms 2025-06-24T21:15:09.645192Z :INFO: [] MessageGroupId [src] SessionId [src|b8250424-267a13c3-e96ad63-c9a28019_0] Write session will now close 2025-06-24T21:15:09.645246Z :DEBUG: [] MessageGroupId [src] SessionId [src|b8250424-267a13c3-e96ad63-c9a28019_0] Write session: aborting 2025-06-24T21:15:09.646162Z :INFO: [] MessageGroupId [src] SessionId [src|b8250424-267a13c3-e96ad63-c9a28019_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:15:09.646224Z :DEBUG: [] MessageGroupId [src] SessionId [src|b8250424-267a13c3-e96ad63-c9a28019_0] Write session is aborting and will not restart 2025-06-24T21:15:09.646294Z :DEBUG: [] MessageGroupId [src] SessionId [src|b8250424-267a13c3-e96ad63-c9a28019_0] Write session: destroy 2025-06-24T21:15:09.646434Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|b8250424-267a13c3-e96ad63-c9a28019_0 grpc read done: success: 0 data: 2025-06-24T21:15:09.646477Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|b8250424-267a13c3-e96ad63-c9a28019_0 grpc read failed 2025-06-24T21:15:09.646508Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|b8250424-267a13c3-e96ad63-c9a28019_0 grpc closed 2025-06-24T21:15:09.646528Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|b8250424-267a13c3-e96ad63-c9a28019_0 is DEAD 2025-06-24T21:15:09.647554Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:15:09.647725Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [5:7519627493525116341:2466] destroyed 2025-06-24T21:15:09.647790Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:15:09.664077Z :INFO: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] Starting read session 2025-06-24T21:15:09.664196Z :DEBUG: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] Starting session to cluster null (localhost:30073) 2025-06-24T21:15:09.666603Z :DEBUG: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:15:09.666662Z :DEBUG: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:15:09.666729Z :DEBUG: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] [null] Reconnecting session to cluster null in 0.000000s 2025-06-24T21:15:09.668459Z :ERROR: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] [null] Got error. Status: CLIENT_UNAUTHENTICATED. Description:
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation 2025-06-24T21:15:09.668539Z :DEBUG: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:15:09.668578Z :DEBUG: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:15:09.668724Z :INFO: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] [null] Closing session to cluster: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " } Get event on client 2025-06-24T21:15:09.668915Z :NOTICE: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:15:09.668955Z :DEBUG: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] [null] Abort session to cluster Got close event: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " }2025-06-24T21:15:09.669044Z :INFO: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] Closing read session. Close timeout: 0.000000s 2025-06-24T21:15:09.669091Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:15:09.669147Z :INFO: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] Counters: { Errors: 1 CurrentSessionLifetimeMs: 5 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:15:09.669226Z :NOTICE: [/Root] [/Root] [56a7fbf2-e56e1774-ddc17edb-209a7c52] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK [GOOD] >> Normalizers::CleanEmptyPortionsNormalizer [GOOD] >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt32 >> TColumnShardTestReadWrite::WriteOverload+InStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::CleanEmptyPortionsNormalizer [GOOD] Test command err: 2025-06-24T21:15:03.269685Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:03.300239Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:03.300563Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:03.309248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=EmptyPortionsCleaner; 2025-06-24T21:15:03.309549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-24T21:15:03.309758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:03.309945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:03.310094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:03.310199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:03.310348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:03.310540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:03.310663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:03.310822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:03.310959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:03.311102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:03.354229Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:03.354416Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=EmptyPortionsCleaner; 2025-06-24T21:15:03.354474Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-24T21:15:03.354803Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_empty.cpp:323;tasks_for_remove=0;distribution=; 2025-06-24T21:15:03.354962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=EmptyPortionsCleaner;id=NO_VALUE_OPTIONAL; 2025-06-24T21:15:03.355068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-24T21:15:03.355116Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-24T21:15:03.355298Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:03.355404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:03.355467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:03.355500Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-24T21:15:03.355601Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:03.355660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:03.355700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:03.355740Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:03.355920Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:03.355988Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:03.356030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:03.356080Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:03.356194Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:03.356269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:03.356324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:03.356362Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:03.356412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:03.356457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:03.356487Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:03.356531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:03.356576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:03.356603Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:03.356801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:03.356864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:03.356918Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:03.357051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:03.357100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:03.357128Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:03.357179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:03.357220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:03.357247Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:03.357292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:03.357355Z ... 521Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-24T21:15:13.323571Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-06-24T21:15:13.323682Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=20048;merger=0;interval_id=2; 2025-06-24T21:15:13.323739Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:15:13.323834Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:13.323884Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=20048;finished=1; 2025-06-24T21:15:13.323925Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:15:13.324185Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:13.324382Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:20048;schema=key1: uint64 key2: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:13.324430Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:15:13.324569Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;);columns=3;rows=20048; 2025-06-24T21:15:13.324650Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2405760;num_rows=20048;batch_columns=key1,key2,field; 2025-06-24T21:15:13.324885Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:549:2546];bytes=2405760;rows=20048;faults=0;finished=0;fault=0;schema=key1: uint64 key2: uint64 field: string; 2025-06-24T21:15:13.325033Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:13.325164Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:13.325337Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:13.326336Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:13.326510Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:13.326637Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:13.326683Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:554:2550] finished for tablet 9437184 2025-06-24T21:15:13.327316Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:549:2546];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.007},{"events":["l_task_result"],"t":0.343},{"events":["f_ack"],"t":0.344},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.346}],"full":{"a":1750799712980081,"name":"_full_task","f":1750799712980081,"d_finished":0,"c":0,"l":1750799713326766,"d":346685},"events":[{"name":"bootstrap","f":1750799712980370,"d_finished":3625,"c":1,"l":1750799712983995,"d":3625},{"a":1750799713326270,"name":"ack","f":1750799713324156,"d_finished":1222,"c":1,"l":1750799713325378,"d":1718},{"a":1750799713326239,"name":"processing","f":1750799712987372,"d_finished":250674,"c":16,"l":1750799713325381,"d":251201},{"name":"ProduceResults","f":1750799712982370,"d_finished":4364,"c":19,"l":1750799713326663,"d":4364},{"a":1750799713326667,"name":"Finish","f":1750799713326667,"d_finished":0,"c":0,"l":1750799713326766,"d":99},{"name":"task_result","f":1750799712987395,"d_finished":249122,"c":15,"l":1750799713323996,"d":249122}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:13.327426Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:549:2546];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:13.327935Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:549:2546];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.007},{"events":["l_task_result"],"t":0.343},{"events":["f_ack"],"t":0.344},{"events":["l_ProduceResults","f_Finish"],"t":0.346},{"events":["l_ack","l_processing","l_Finish"],"t":0.347}],"full":{"a":1750799712980081,"name":"_full_task","f":1750799712980081,"d_finished":0,"c":0,"l":1750799713327480,"d":347399},"events":[{"name":"bootstrap","f":1750799712980370,"d_finished":3625,"c":1,"l":1750799712983995,"d":3625},{"a":1750799713326270,"name":"ack","f":1750799713324156,"d_finished":1222,"c":1,"l":1750799713325378,"d":2432},{"a":1750799713326239,"name":"processing","f":1750799712987372,"d_finished":250674,"c":16,"l":1750799713325381,"d":251915},{"name":"ProduceResults","f":1750799712982370,"d_finished":4364,"c":19,"l":1750799713326663,"d":4364},{"a":1750799713326667,"name":"Finish","f":1750799713326667,"d_finished":0,"c":0,"l":1750799713327480,"d":813},{"name":"task_result","f":1750799712987395,"d_finished":249122,"c":15,"l":1750799713323996,"d":249122}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-24T21:15:13.328054Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:12.979487Z;index_granules=0;index_portions=2;index_batches=748;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=2776560;inserted_portions_bytes=0;committed_portions_bytes=2488696;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=5265256;selected_rows=0; 2025-06-24T21:15:13.328110Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:13.328470Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:554:2550];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly-StrictAclCheck [FAIL] Test command err: Starting YDB, grpc: 6825, msgbus: 5619 2025-06-24T21:11:03.257811Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626437562972120:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:03.257884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00362e/r3tmp/tmpd6FJhi/pdisk_1.dat 2025-06-24T21:11:03.758534Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:03.805012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:03.805112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:03.816934Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6825, node 1 2025-06-24T21:11:04.011625Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:04.011650Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:04.011659Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:04.011804Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:04.273779Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5619 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:11:04.464331Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519626437562972316:2117] Handle TEvNavigate describe path dc-1 2025-06-24T21:11:04.488387Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519626441857940136:2450] HANDLE EvNavigateScheme dc-1 2025-06-24T21:11:04.488770Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519626441857940136:2450] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.542665Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519626441857940136:2450] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-24T21:11:04.560055Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519626441857940136:2450] Handle TEvDescribeSchemeResult Forward to# [1:7519626441857940135:2449] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:11:04.580716Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626437562972316:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.580751Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626437562972316:2117] TxId# 281474976710657 ProcessProposeTransaction 2025-06-24T21:11:04.580905Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626437562972316:2117] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519626441857940145:2455] 2025-06-24T21:11:04.706249Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626441857940145:2455] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.706352Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626441857940145:2455] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.706402Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626441857940145:2455] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.706504Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626441857940145:2455] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.706838Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626441857940145:2455] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.707011Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626441857940145:2455] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T21:11:04.707056Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626441857940145:2455] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-24T21:11:04.707278Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626441857940145:2455] txid# 281474976710657 HANDLE EvClientConnected 2025-06-24T21:11:04.708628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:11:04.711658Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519626441857940145:2455] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-06-24T21:11:04.711712Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519626441857940145:2455] txid# 281474976710657 SEND to# [1:7519626441857940144:2454] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} waiting... 2025-06-24T21:11:04.729642Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519626437562972316:2117] Handle TEvProposeTransaction 2025-06-24T21:11:04.729666Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519626437562972316:2117] TxId# 281474976710658 ProcessProposeTransaction 2025-06-24T21:11:04.729694Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519626437562972316:2117] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519626441857940184:2490] 2025-06-24T21:11:04.732421Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519626441857940184:2490] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-24T21:11:04.732487Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519626441857940184:2490] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 0 2025-06-24T21:11:04.732502Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519626441857940184:2490] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-24T21:11:04.732567Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519626441857940184:2490] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:11:04.732914Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519626441857940184:2490] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:11:04.733029Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519626441857940184:2490] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:11:04.733084Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519626441857940184:2490] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-24T21:11:04.733271Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519626441857940184:2490] txid# 281474976710658 HANDLE EvClientConnected 2025-06-24T21:11:04.733779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 28147497671065 ... y msg operationId: 281474976710660:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710660 msg type: 269090816 2025-06-24T21:14:50.298106Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710660, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:14:50.301446Z node 59 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750799690346, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:14:50.301638Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710660 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750799690346 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:14:50.301675Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:723: TTxOperationPlanStep Execute operation part is already done, operationId: 281474976710660:0 2025-06-24T21:14:50.301747Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710660:1, at tablet# 72057594046644480 2025-06-24T21:14:50.302240Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710660:1 128 -> 240 2025-06-24T21:14:50.302313Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710660:1, at tablet# 72057594046644480 2025-06-24T21:14:50.302498Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 7 2025-06-24T21:14:50.302634Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:568: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2], Generation: 1, ActorId:[60:7519627411200581393:2259], EffectiveACLVersion: 1, SubdomainVersion: 3, UserAttributesVersion: 1, TenantHive: 72075186224037888, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 1, actualUserAttrsVersion: 1, tenantHive: 72075186224037888, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046644480 2025-06-24T21:14:50.306368Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:14:50.306402Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710660, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:14:50.306692Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T21:14:50.306719Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [59:7519627405891477623:2375], at schemeshard: 72057594046644480, txId: 281474976710660, path id: 2 2025-06-24T21:14:50.306786Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710660:1, at schemeshard: 72057594046644480 2025-06-24T21:14:50.306824Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046644480] TSyncHive, operationId 281474976710660:1, ProgressState, NeedSyncHive: 0 2025-06-24T21:14:50.306841Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710660:1 240 -> 240 2025-06-24T21:14:50.309074Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710660 2025-06-24T21:14:50.309269Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710660 2025-06-24T21:14:50.309291Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710660 2025-06-24T21:14:50.309323Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710660, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 2025-06-24T21:14:50.309355Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 8 2025-06-24T21:14:50.309471Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710660, ready parts: 1/2, is published: true 2025-06-24T21:14:50.310886Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710660:1, at schemeshard: 72057594046644480 2025-06-24T21:14:50.310938Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976710660:1 ProgressState 2025-06-24T21:14:50.311098Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710660:1 progress is 2/2 2025-06-24T21:14:50.311117Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710660 ready parts: 2/2 2025-06-24T21:14:50.311164Z node 59 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710660:1 progress is 2/2 2025-06-24T21:14:50.311183Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710660 ready parts: 2/2 2025-06-24T21:14:50.311204Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710660, ready parts: 2/2, is published: true 2025-06-24T21:14:50.311285Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [59:7519627410186445251:2283] message: TxId: 281474976710660 2025-06-24T21:14:50.311324Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710660 ready parts: 2/2 2025-06-24T21:14:50.311361Z node 59 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710660:0 2025-06-24T21:14:50.311375Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710660:0 2025-06-24T21:14:50.311529Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 7 2025-06-24T21:14:50.311542Z node 59 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710660:1 2025-06-24T21:14:50.311548Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710660:1 2025-06-24T21:14:50.311585Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-06-24T21:14:50.313515Z node 59 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710660 TEST create admin clusteradmin 2025-06-24T21:14:50.328026Z node 59 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:578: Skip check permission connect db, user is a admin, database: /dc-1, user: root@builtin, from ip: ipv6:[::1]:56136 2025-06-24T21:14:50.922962Z node 60 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:53.599339Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[59:7519627405891477025:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:53.599447Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:54.914178Z node 60 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[60:7519627406905613794:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:54.914315Z node 60 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/tenant-db/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:55.481198Z node 59 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 60 2025-06-24T21:14:55.481862Z node 59 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(60, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:14:55.487221Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:14:55.561175Z node 59 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyhwpajqdb0gwb3pyc3bswbh", Request deadline has expired for 0.234995s seconds assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:24080 TBackTrace::Capture()+28 (0x19709CCC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x19BC7AE0) NKikimr::NTxProxyUT::CreateLocalUser(NKikimr::NTxProxyUT::TTestEnv const&, TBasicString> const&, TBasicString> const&, TBasicString> const&)+2057 (0x192E02D9) void NKikimr::NTxProxyUT::NTestSuiteSchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant(NUnitTest::TTestContext&)+3067 (0x1935CBDB) std::__y1::__function::__func, void ()>::operator()()+280 (0x193370B8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19BFECE6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19BCE669) NKikimr::NTxProxyUT::NTestSuiteSchemeReqAdminAccessInTenant::TCurrentTest::Execute()+1275 (0x1933626B) NUnitTest::TTestFactory::Execute()+2438 (0x19BCFF36) NUnitTest::RunMain(int, char**)+5213 (0x19BF925D) ??+0 (0x7FD3BBC26D90) __libc_start_main+128 (0x7FD3BBC26E40) _start+41 (0x16BA9029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK [GOOD] Test command err: 2025-06-24T21:12:43.575094Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626866876872374:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:43.575180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031a1/r3tmp/tmpwtzx03/pdisk_1.dat 2025-06-24T21:12:43.944732Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:12:44.170061Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:44.182989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:44.183076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:44.185037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62219, node 1 2025-06-24T21:12:44.387788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0031a1/r3tmp/yandexkkalh3.tmp 2025-06-24T21:12:44.387814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0031a1/r3tmp/yandexkkalh3.tmp 2025-06-24T21:12:44.387969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0031a1/r3tmp/yandexkkalh3.tmp 2025-06-24T21:12:44.388068Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:44.587124Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:44.642932Z INFO: TTestServer started on Port 12910 GrpcPort 62219 TClient is connected to server localhost:12910 PQClient connected to localhost:62219 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:45.133816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:45.148182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:12:45.167117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:12:45.309850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:12:47.257182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626884056742229:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.259441Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.259996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626884056742242:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.264686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:47.280401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 2025-06-24T21:12:47.281143Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626884056742244:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:12:47.339032Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626884056742308:2443] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:47.656179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.659473Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626884056742316:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:12:47.661315Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTRiNTczNGItN2RkZTMyMDYtOGJlMTNlOS04MDNhYzAzMw==, ActorId: [1:7519626884056742227:2297], ActorState: ExecuteState, TraceId: 01jyhwjjcnfk21g1bssdjwv6hr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:12:47.683853Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:12:47.697599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.792596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519626888351709898:2619] 2025-06-24T21:12:48.575621Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626866876872374:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:48.575715Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:12:54.236196Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T21:12:54.262342Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T21:12:54.264005Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519626914121513872:2697], Recipient [1:7519626871171839874:2145]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.264052Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.264069Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:12:54.264121Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519626914121513868:2694], Recipient [1:7519626871171839874:2145]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-06-24T21:12:54.264144Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:12:54.394569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSecond ... eIdle, received event# 271188544, Sender [7:7519627447304715322:2420], Recipient [7:7519627447304715319:2418]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-24T21:15:12.868373Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037896 Cookie: 20 2025-06-24T21:15:12.868381Z node 7 :PERSQUEUE TRACE: partition.h:625: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-24T21:15:12.868591Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188536, Sender [7:7519627447304715260:2415], Recipient [7:7519627447304715259:2414]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-24T21:15:12.868630Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188536, Sender [7:7519627447304715260:2415], Recipient [7:7519627498844324214:2802]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-24T21:15:12.868632Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5297: HandleHook, processing event TEvPQ::TEvSubDomainStatus 2025-06-24T21:15:12.868677Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5297: HandleHook, processing event TEvPQ::TEvSubDomainStatus 2025-06-24T21:15:12.868690Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271187975, Sender [7:7519627447304715260:2415], Recipient [7:7519627447304715259:2414]: NKikimrPQ.TStatus GetStatForAllConsumers: true 2025-06-24T21:15:12.868704Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5272: HandleHook, processing event TEvPersQueue::TEvStatus 2025-06-24T21:15:12.868724Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1805: [PQ: 72075186224037892] Handle TEvPersQueue::TEvStatus 2025-06-24T21:15:12.868765Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271187975, Sender [7:7519627447304715260:2415], Recipient [7:7519627498844324214:2802]: NKikimrPQ.TStatus GetStatForAllConsumers: true 2025-06-24T21:15:12.868783Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188536, Sender [7:7519627447304715260:2415], Recipient [7:7519627498844324210:2801]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-24T21:15:12.868785Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5272: HandleHook, processing event TEvPersQueue::TEvStatus 2025-06-24T21:15:12.868794Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5297: HandleHook, processing event TEvPQ::TEvSubDomainStatus 2025-06-24T21:15:12.868809Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1805: [PQ: 72075186224037897] Handle TEvPersQueue::TEvStatus 2025-06-24T21:15:12.868834Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271187975, Sender [7:7519627447304715260:2415], Recipient [7:7519627498844324210:2801]: NKikimrPQ.TStatus GetStatForAllConsumers: true 2025-06-24T21:15:12.868843Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5272: HandleHook, processing event TEvPersQueue::TEvStatus 2025-06-24T21:15:12.868852Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1805: [PQ: 72075186224037896] Handle TEvPersQueue::TEvStatus 2025-06-24T21:15:12.868907Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7519627498844324214:2802], Partition 1, Sender [7:7519627498844324214:2802], Recipient [7:7519627498844324299:2812], Cookie: 0 2025-06-24T21:15:12.868927Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7519627447304715259:2414], Partition 0, Sender [7:7519627447304715259:2414], Recipient [7:7519627447304715319:2418], Cookie: 0 2025-06-24T21:15:12.868969Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188536, Sender [7:7519627498844324214:2802], Recipient [7:7519627498844324299:2812]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-24T21:15:12.868994Z node 7 :PERSQUEUE TRACE: partition.h:621: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-24T21:15:12.869002Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188536, Sender [7:7519627447304715259:2414], Recipient [7:7519627447304715319:2418]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-24T21:15:12.869026Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7519627498844324214:2802], Partition 1, Sender [7:7519627498844324214:2802], Recipient [7:7519627498844324299:2812], Cookie: 0 2025-06-24T21:15:12.869036Z node 7 :PERSQUEUE TRACE: partition.h:621: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-24T21:15:12.869055Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188491, Sender [7:7519627498844324214:2802], Recipient [7:7519627498844324299:2812]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-24T21:15:12.869078Z node 7 :PERSQUEUE TRACE: partition.h:597: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-24T21:15:12.869081Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7519627447304715259:2414], Partition 0, Sender [7:7519627447304715259:2414], Recipient [7:7519627447304715319:2418], Cookie: 0 2025-06-24T21:15:12.869119Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188491, Sender [7:7519627447304715259:2414], Recipient [7:7519627447304715319:2418]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-24T21:15:12.869139Z node 7 :PERSQUEUE TRACE: partition.h:597: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-24T21:15:12.869338Z node 7 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-24T21:15:12.869396Z node 7 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-24T21:15:12.869476Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7519627498844324210:2801], Partition 2, Sender [7:7519627498844324210:2801], Recipient [7:7519627498844324297:2810], Cookie: 0 2025-06-24T21:15:12.869520Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188536, Sender [7:7519627498844324210:2801], Recipient [7:7519627498844324297:2810]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-24T21:15:12.869538Z node 7 :PERSQUEUE TRACE: partition.h:621: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-24T21:15:12.869581Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7519627498844324210:2801], Partition 2, Sender [7:7519627498844324210:2801], Recipient [7:7519627498844324297:2810], Cookie: 0 2025-06-24T21:15:12.869621Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188491, Sender [7:7519627498844324210:2801], Recipient [7:7519627498844324297:2810]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-24T21:15:12.869632Z node 7 :PERSQUEUE TRACE: partition.h:597: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-24T21:15:12.869661Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188503, Sender [7:7519627498844324299:2812], Recipient [7:7519627498844324214:2802]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:15:12.869680Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5278: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:15:12.869725Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188503, Sender [7:7519627447304715319:2418], Recipient [7:7519627447304715259:2414]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:15:12.869734Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5278: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:15:12.869796Z node 7 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-24T21:15:12.869904Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188503, Sender [7:7519627498844324297:2810], Recipient [7:7519627498844324210:2801]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:15:12.869929Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5278: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:15:12.870187Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 14 DataSize: 0 UsedReserveSize: 0 2025-06-24T21:15:12.870360Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 3 2025-06-24T21:15:12.870554Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271188001, Sender [7:7519627447304715260:2415], Recipient [7:7519627400060073971:2153]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 14 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-06-24T21:15:12.870612Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4989: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-24T21:15:12.870640Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-06-24T21:15:12.870663Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099995s, queue# 1 >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession_ParentCommittedToEnd [GOOD] >> CommitOffset::Commit_WithSession_ToPastParentPartition ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteOverload+InStore [GOOD] Test command err: 2025-06-24T21:15:03.646980Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:03.670248Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:03.670542Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:03.677983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:03.678195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:03.678372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:03.678478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:03.678564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:03.678660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:03.678786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:03.678893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:03.678953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:03.679032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:03.679122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:03.707991Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:03.708280Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:03.708334Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:03.708506Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:03.708661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:03.708734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:03.708773Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:03.708888Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:03.708960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:03.709006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:03.709035Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:03.709187Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:03.709246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:03.709300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:03.709330Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:03.709423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:03.709477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:03.709536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:03.709583Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:03.709638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:03.709676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:03.709704Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:03.709935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:03.709979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:03.710011Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:03.710166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:03.710221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:03.710253Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:03.710397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:03.710456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:03.710492Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:03.710562Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:03.710621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:03.710660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:03.710686Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:03.711129Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=43; 2025-06-24T21:15:03.711239Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=37; 2025-06-24T21:15:03.711326Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-24T21:15:03.711415Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=38; 2025-06-24T21:15:03.711523Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:03.711633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:03.711681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:03.711727Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... e::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4428;count=36;size_of_meta=136; 2025-06-24T21:15:14.108903Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=5724;count=18;size_of_portion=208; 2025-06-24T21:15:14.109922Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-06-24T21:15:14.110033Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=19;operation_id=18; 2025-06-24T21:15:14.122095Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-06-24T21:15:14.123975Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=505ffb8a-514011f0-91150289-b32b1466; 2025-06-24T21:15:14.124199Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1786;count=37; 2025-06-24T21:15:14.124283Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4674;count=38;size_of_meta=136; 2025-06-24T21:15:14.124350Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6042;count=19;size_of_portion=208; 2025-06-24T21:15:14.125345Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-06-24T21:15:14.125478Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=20;operation_id=19; 2025-06-24T21:15:14.137412Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-06-24T21:15:14.149746Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=509627a0-514011f0-bcb83b77-55cb6558; 2025-06-24T21:15:14.149917Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1880;count=39; 2025-06-24T21:15:14.149989Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4920;count=40;size_of_meta=136; 2025-06-24T21:15:14.150036Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6360;count=20;size_of_portion=208; 2025-06-24T21:15:14.150771Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-06-24T21:15:14.150852Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=21;operation_id=20; 2025-06-24T21:15:14.162637Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-06-24T21:15:14.164632Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=50c73b56-514011f0-aebd893b-a3b53cc0; 2025-06-24T21:15:14.164822Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1974;count=41; 2025-06-24T21:15:14.164891Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=5166;count=42;size_of_meta=136; 2025-06-24T21:15:14.164944Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6678;count=21;size_of_portion=208; 2025-06-24T21:15:14.165730Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-06-24T21:15:14.165825Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=22;operation_id=21; 2025-06-24T21:15:14.180218Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-06-24T21:15:14.188653Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=22;last=22; 2025-06-24T21:15:14.188763Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=6330728;operation_id=51e02426-514011f0-87f3f8de-7136b744;in_flight=1;size_in_flight=6330728; 2025-06-24T21:15:14.494179Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:127:2157];write_id=22;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=8246112;count=1;actions=__DEFAULT,;waiting=1;; 2025-06-24T21:15:14.574281Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=51e02426-514011f0-87f3f8de-7136b744; 2025-06-24T21:15:14.574508Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=2068;count=43; 2025-06-24T21:15:14.574596Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=5412;count=44;size_of_meta=136; 2025-06-24T21:15:14.574674Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6996;count=22;size_of_portion=208; 2025-06-24T21:15:14.575853Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 2025-06-24T21:15:14.576212Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=23;operation_id=22; 2025-06-24T21:15:14.591741Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 |95.8%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/{meta.json ... results_accumulator.log} |95.9%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/{meta.json ... results_accumulator.log} >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKString >> TColumnShardTestReadWrite::WriteStandaloneExoticTypes >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK [GOOD] >> TColumnShardTestReadWrite::WriteReadZSTD >> EvWrite::WriteWithSplit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK [GOOD] Test command err: 2025-06-24T21:12:43.568364Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626865134319234:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:43.568414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:43.847678Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00313f/r3tmp/tmpxBl6WD/pdisk_1.dat 2025-06-24T21:12:44.169389Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:44.180117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:44.180233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:44.188338Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626865134319210:2079] 1750799563563027 != 1750799563563030 2025-06-24T21:12:44.237231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15627, node 1 2025-06-24T21:12:44.436949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00313f/r3tmp/yandexZ0aVfe.tmp 2025-06-24T21:12:44.436977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00313f/r3tmp/yandexZ0aVfe.tmp 2025-06-24T21:12:44.437157Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00313f/r3tmp/yandexZ0aVfe.tmp 2025-06-24T21:12:44.437277Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:44.606329Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:44.639846Z INFO: TTestServer started on Port 22879 GrpcPort 15627 TClient is connected to server localhost:22879 PQClient connected to localhost:15627 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:45.018167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:45.051431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:12:45.070748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:12:45.076299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:12:45.286772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-24T21:12:47.420154Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626882314189177:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.420282Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.420376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626882314189204:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.425188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:47.460082Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626882314189206:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:12:47.744915Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626882314189270:2445] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:47.800899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.909087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.918633Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626882314189280:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:12:47.920587Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Zjc5MDc0ZC0zY2Y4YjExNS02ODBhNmQ3ZS1lZDkwYzM1Zg==, ActorId: [1:7519626882314189174:2297], ActorState: ExecuteState, TraceId: 01jyhwjjh742fj8cmp3xc6nn5y, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:12:47.922393Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:12:48.008652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519626886609156864:2623] 2025-06-24T21:12:48.568569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626865134319234:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:48.568638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:12:54.509473Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T21:12:54.541331Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T21:12:54.542601Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519626912378960838:2701], Recipient [1:7519626865134319531:2141]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.542626Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.542642Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:12:54.542690Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519626912378960834:2698], Recipient [1:7519626865134319531:2141]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-06-24T21:12:54.542704Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:12:54.659414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePers ... Id: 13 SchemeshardId: 72057594046644480 2025-06-24T21:15:14.031494Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:15:14.050397Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627480265238646:2414], Partition 0, Sender [0:0:0], Recipient [7:7519627480265238707:2418], Cookie: 0 2025-06-24T21:15:14.050499Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627480265238707:2418]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.050533Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.050581Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:14.050666Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:14.050705Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:14.050745Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:14.051895Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627506035043527:2720], Partition 2, Sender [0:0:0], Recipient [7:7519627506035043607:2729], Cookie: 0 2025-06-24T21:15:14.051968Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627506035043607:2729]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.052009Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.052053Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:14.052148Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:14.052186Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:14.052221Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:14.052312Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627506035043529:2721], Partition 1, Sender [0:0:0], Recipient [7:7519627506035043610:2731], Cookie: 0 2025-06-24T21:15:14.052364Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627506035043610:2731]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.052390Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.052421Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:14.052466Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:14.052485Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:14.052506Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:14.150741Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627480265238646:2414], Partition 0, Sender [0:0:0], Recipient [7:7519627480265238707:2418], Cookie: 0 2025-06-24T21:15:14.150834Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627480265238707:2418]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.150881Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.150942Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:14.151053Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:14.151089Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:14.151132Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:14.152210Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627506035043527:2720], Partition 2, Sender [0:0:0], Recipient [7:7519627506035043607:2729], Cookie: 0 2025-06-24T21:15:14.152284Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627506035043607:2729]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.152320Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.152367Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:14.152443Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:14.152477Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:14.152509Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:14.152773Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627506035043529:2721], Partition 1, Sender [0:0:0], Recipient [7:7519627506035043610:2731], Cookie: 0 2025-06-24T21:15:14.152823Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627506035043610:2731]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.152848Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.152878Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:14.152921Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:14.152941Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:14.152962Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:14.251094Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627480265238646:2414], Partition 0, Sender [0:0:0], Recipient [7:7519627480265238707:2418], Cookie: 0 2025-06-24T21:15:14.251197Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627480265238707:2418]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.251231Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.251284Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:14.251382Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:14.251426Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:14.251473Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:14.252515Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627506035043527:2720], Partition 2, Sender [0:0:0], Recipient [7:7519627506035043607:2729], Cookie: 0 2025-06-24T21:15:14.252582Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627506035043607:2729]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.252622Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.252661Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:14.252732Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:14.252766Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:14.252794Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:14.253069Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627506035043529:2721], Partition 1, Sender [0:0:0], Recipient [7:7519627506035043610:2731], Cookie: 0 2025-06-24T21:15:14.253124Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627506035043610:2731]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.253142Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:14.253174Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:14.253216Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:14.253248Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:14.253272Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> YdbOlapStore::LogWithUnionAllDescending [GOOD] >> YdbOlapStore::LogTsRangeDescending >> TColumnShardTestReadWrite::WriteReadExoticTypes >> EvWrite::WriteWithSplit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> EvWrite::WriteWithSplit [GOOD] Test command err: 2025-06-24T21:15:17.718473Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:17.746438Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:17.746780Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:17.754919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:17.755192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:17.755449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:17.755569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:17.755717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:17.755842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:17.755989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:17.756133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:17.756263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:17.756398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.756514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:17.792768Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:17.792947Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:17.793028Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:17.793261Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:17.793442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:17.793530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:17.793577Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:17.793697Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:17.793789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:17.793848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:17.793883Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:17.794088Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:17.794168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:17.794212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:17.794247Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:17.794365Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:17.794434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:17.794500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:17.794561Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:17.794628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:17.794670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:17.794720Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:17.794946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:17.794995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:17.795031Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:17.795258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:17.795334Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:17.795367Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:17.795504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:17.795560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.795599Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.795694Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:17.795771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:17.795815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:17.795880Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:17.796377Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=57; 2025-06-24T21:15:17.796482Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=49; 2025-06-24T21:15:17.796578Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=42; 2025-06-24T21:15:17.796677Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=49; 2025-06-24T21:15:17.796769Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:17.796867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:17.796916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:17.796971Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... ;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:15:20.627218Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:15:20.627345Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:219;stage=no data is ready yet;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:15:20.627677Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:15:20.627736Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-24T21:15:20.627795Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=1; 2025-06-24T21:15:20.627873Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1024;merger=0;interval_id=1; 2025-06-24T21:15:20.628051Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:15:20.628210Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:15:20.628264Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1024;finished=1; 2025-06-24T21:15:20.628411Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;);columns=2;rows=1024; 2025-06-24T21:15:20.628495Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8400896;num_rows=1024;batch_columns=key,field; 2025-06-24T21:15:20.628774Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:853:2865];bytes=8400896;rows=1024;faults=0;finished=0;fault=0;schema=key: uint64 field: string; 2025-06-24T21:15:20.628926Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:15:20.629119Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:15:20.629235Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:15:20.632508Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:20.632720Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:15:20.632871Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:15:20.632923Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:859:2871] finished for tablet 9437184 2025-06-24T21:15:20.633456Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:853:2865];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack"],"t":0.624},{"events":["l_task_result"],"t":0.724},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.728}],"full":{"a":1750799719904384,"name":"_full_task","f":1750799719904384,"d_finished":0,"c":0,"l":1750799720632996,"d":728612},"events":[{"name":"bootstrap","f":1750799719904823,"d_finished":4525,"c":1,"l":1750799719909348,"d":4525},{"a":1750799720632474,"name":"ack","f":1750799720529333,"d_finished":94963,"c":2,"l":1750799720627400,"d":95485},{"a":1750799720632446,"name":"processing","f":1750799719909605,"d_finished":189806,"c":11,"l":1750799720629320,"d":190356},{"name":"ProduceResults","f":1750799719907811,"d_finished":98005,"c":15,"l":1750799720632905,"d":98005},{"a":1750799720632909,"name":"Finish","f":1750799720632909,"d_finished":0,"c":0,"l":1750799720632996,"d":87},{"name":"task_result","f":1750799719909628,"d_finished":94522,"c":9,"l":1750799720629316,"d":94522}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:15:20.633572Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:853:2865];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:20.634113Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:853:2865];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack"],"t":0.624},{"events":["l_task_result"],"t":0.724},{"events":["l_ProduceResults","f_Finish"],"t":0.728},{"events":["l_ack","l_processing","l_Finish"],"t":0.729}],"full":{"a":1750799719904384,"name":"_full_task","f":1750799719904384,"d_finished":0,"c":0,"l":1750799720633624,"d":729240},"events":[{"name":"bootstrap","f":1750799719904823,"d_finished":4525,"c":1,"l":1750799719909348,"d":4525},{"a":1750799720632474,"name":"ack","f":1750799720529333,"d_finished":94963,"c":2,"l":1750799720627400,"d":96113},{"a":1750799720632446,"name":"processing","f":1750799719909605,"d_finished":189806,"c":11,"l":1750799720629320,"d":190984},{"name":"ProduceResults","f":1750799719907811,"d_finished":98005,"c":15,"l":1750799720632905,"d":98005},{"a":1750799720632909,"name":"Finish","f":1750799720632909,"d_finished":0,"c":0,"l":1750799720633624,"d":715},{"name":"task_result","f":1750799719909628,"d_finished":94522,"c":9,"l":1750799720629316,"d":94522}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-24T21:15:20.634253Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:19.903084Z;index_granules=0;index_portions=1;index_batches=2050;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=17137952;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=17137952;selected_rows=0; 2025-06-24T21:15:20.634307Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:20.634605Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:859:2871];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootColumnShard [FAIL] Test command err: 2025-06-24T21:05:29.668222Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:05:29.668540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:05:29.668691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004310/r3tmp/tmpqLJ4L7/pdisk_1.dat 2025-06-24T21:05:29.976369Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29433, node 1 2025-06-24T21:05:30.174823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:05:30.174877Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:05:30.174914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:05:30.175070Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:05:30.177365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:05:30.285985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:30.286144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:30.299224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6261 2025-06-24T21:05:30.832296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:05:33.662700Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:05:33.691918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:33.692027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:33.751519Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:05:33.753967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:33.932995Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:33.967610Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:33.968329Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:33.968765Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:33.968887Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:33.968963Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:33.969176Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:33.969249Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:33.969352Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:33.969409Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:05:34.147437Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:05:34.147554Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:05:34.161246Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:05:34.296223Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:05:34.339294Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:05:34.339392Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:05:34.367247Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:05:34.368328Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:05:34.368514Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:05:34.368565Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:05:34.368640Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:05:34.368747Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:05:34.368818Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:05:34.368881Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:05:34.369920Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:05:34.402030Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:34.402119Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:05:34.408123Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:05:34.410632Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:05:34.410840Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:05:34.417235Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:05:34.437606Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:05:34.437663Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:05:34.437723Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:05:34.450895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:05:34.465433Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:05:34.465618Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:05:34.633477Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:05:34.811372Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:05:34.856475Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:05:35.354601Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:05:35.587358Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:35.587499Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:05:35.603960Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:05:35.709621Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:05:35.709878Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:05:35.710210Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:05:35.710357Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2790];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:05:35.710499Z node 2 :TX_COLUMNSHARD WARN: ... de 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:14:43.656059Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:14:43.679857Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:14:43.679956Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:43.679994Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:14:45.074640Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:14:45.074725Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:45.074759Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:14:46.395731Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:14:46.395812Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:46.395846Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:14:47.749323Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:14:47.760191Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:14:47.760273Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:47.760326Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:14:49.075959Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:14:49.076063Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:49.076099Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:14:50.355022Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:14:50.355237Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:14:50.367821Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:14:50.367895Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:50.367931Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:14:51.643923Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:14:51.644007Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:51.644037Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:14:52.945837Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:14:52.945919Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:52.945955Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:14:54.243978Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:14:54.254996Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:14:54.255064Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:54.255090Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:14:55.603692Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-24T21:14:55.603773Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7957: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:14:55.603828Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7988: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:14:55.603863Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-24T21:14:55.814461Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:14:55.814540Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:55.814572Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:14:57.160583Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:14:57.160789Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:14:57.171877Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:14:57.171962Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:57.171997Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:14:58.510058Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:14:58.510143Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:58.510178Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:14:59.732674Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:14:59.732766Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:14:59.732794Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:15:01.019633Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:15:01.030665Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:15:01.030748Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:15:01.030778Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:15:02.419886Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:15:02.419976Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:15:02.420012Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-24T21:15:03.678210Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:15:03.678442Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:15:03.689520Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:15:03.689631Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:15:03.689673Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-24T21:15:04.930068Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-24T21:15:04.930148Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-24T21:15:04.930185Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. (TWithBackTrace) ydb/library/actors/testlib/test_runtime.h:579: Exception occured while waiting for NKikimr::NStat::TEvStatistics::TEvAnalyzeResponse: (NActors::TSchedulingLimitReachedException) TestActorRuntime Processed over 100000 events.ydb/library/actors/testlib/test_runtime.cpp:716: TBackTrace::Capture()+28 (0x1976384C) TWithBackTrace::TWithBackTrace<>()+80 (0x19391970) NKikimr::NStat::TEvStatistics::TEvAnalyzeResponse::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventRethrow(NActors::TActorId const&, TDuration)+485 (0x19365FD5) NKikimr::NStat::NTestSuiteAnalyzeColumnshard::TTestCaseAnalyzeRebootColumnShard::Execute_(NUnitTest::TTestContext&)+4263 (0x19382B27) std::__y1::__function::__func, void ()>::operator()()+280 (0x1938D9B8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19C511F6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19C29C59) NKikimr::NStat::NTestSuiteAnalyzeColumnshard::TCurrentTest::Execute()+1204 (0x1938CB84) NUnitTest::TTestFactory::Execute()+2438 (0x19C2B526) NUnitTest::RunMain(int, char**)+5213 (0x19C4B76D) ??+0 (0x7F696445CD90) __libc_start_main+128 (0x7F696445CE40) _start+41 (0x16C23029) |95.9%| [TA] $(B)/ydb/core/statistics/aggregator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.9%| [TA] {RESULT} $(B)/ydb/core/statistics/aggregator/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TColumnShardTestReadWrite::WriteReadStandalone >> TColumnShardTestReadWrite::WriteStandaloneExoticTypes [GOOD] >> TColumnShardTestReadWrite::WriteReadZSTD [GOOD] >> TColumnShardTestReadWrite::WriteReadStandaloneExoticTypes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteStandaloneExoticTypes [GOOD] Test command err: 2025-06-24T21:15:17.366063Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:17.387917Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:17.388204Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:17.394646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:17.394853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:17.395068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:17.395177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:17.395272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:17.395357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:17.395453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:17.395530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:17.395605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:17.395706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.395791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:17.417953Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:17.418183Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:17.418250Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:17.418407Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:17.418555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:17.418619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:17.418657Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:17.418774Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:17.418830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:17.418861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:17.418883Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:17.419026Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:17.419079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:17.419108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:17.419183Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:17.419306Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:17.419391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:17.419468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:17.419513Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:17.419556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:17.419587Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:17.419609Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:17.419804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:17.419841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:17.419871Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:17.420009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:17.420046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:17.420084Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:17.420221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:17.420259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.420317Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.420376Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:17.420430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:17.420461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:17.420482Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:17.420910Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=55; 2025-06-24T21:15:17.420986Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-24T21:15:17.421067Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-24T21:15:17.421177Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=55; 2025-06-24T21:15:17.421276Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:17.421384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:17.421427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:17.421469Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... [{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"19,19,19,19,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"20,20,20,20,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadZSTD [GOOD] Test command err: 2025-06-24T21:15:17.456397Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:17.480947Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:17.481249Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:17.488254Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:17.488488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:17.488721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:17.488856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:17.488990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:17.489134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:17.489256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:17.489374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:17.489483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:17.489643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.489773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:17.517019Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:17.517228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:17.517272Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:17.517416Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:17.517551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:17.517602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:17.517631Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:17.517729Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:17.517813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:17.517866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:17.517917Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:17.518068Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:17.518118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:17.518147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:17.518169Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:17.518247Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:17.518288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:17.518316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:17.518347Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:17.518392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:17.518437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:17.518465Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:17.518637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:17.518668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:17.518694Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:17.518826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:17.518874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:17.518898Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:17.519009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:17.519044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.519067Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.519119Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:17.519192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:17.519235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:17.519257Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:17.519619Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=36; 2025-06-24T21:15:17.519694Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=33; 2025-06-24T21:15:17.519758Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-24T21:15:17.519841Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=45; 2025-06-24T21:15:17.519908Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:17.519977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:17.520044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:17.520082Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:24.189099Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:15:24.189235Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-24T21:15:24.189310Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-24T21:15:24.189558Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1000:2855];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-24T21:15:24.189727Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:24.189855Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:24.189990Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:24.190215Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:24.190369Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:24.190493Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:24.190531Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1001:2856] finished for tablet 9437184 2025-06-24T21:15:24.190957Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1000:2855];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.015},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.017}],"full":{"a":1750799724173359,"name":"_full_task","f":1750799724173359,"d_finished":0,"c":0,"l":1750799724190584,"d":17225},"events":[{"name":"bootstrap","f":1750799724173615,"d_finished":3661,"c":1,"l":1750799724177276,"d":3661},{"a":1750799724190191,"name":"ack","f":1750799724188849,"d_finished":1171,"c":1,"l":1750799724190020,"d":1564},{"a":1750799724190181,"name":"processing","f":1750799724178785,"d_finished":6839,"c":10,"l":1750799724190023,"d":7242},{"name":"ProduceResults","f":1750799724175797,"d_finished":3455,"c":13,"l":1750799724190519,"d":3455},{"a":1750799724190521,"name":"Finish","f":1750799724190521,"d_finished":0,"c":0,"l":1750799724190584,"d":63},{"name":"task_result","f":1750799724178805,"d_finished":5528,"c":9,"l":1750799724188699,"d":5528}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:24.191019Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1000:2855];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:24.191401Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1000:2855];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.015},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.017}],"full":{"a":1750799724173359,"name":"_full_task","f":1750799724173359,"d_finished":0,"c":0,"l":1750799724191055,"d":17696},"events":[{"name":"bootstrap","f":1750799724173615,"d_finished":3661,"c":1,"l":1750799724177276,"d":3661},{"a":1750799724190191,"name":"ack","f":1750799724188849,"d_finished":1171,"c":1,"l":1750799724190020,"d":2035},{"a":1750799724190181,"name":"processing","f":1750799724178785,"d_finished":6839,"c":10,"l":1750799724190023,"d":7713},{"name":"ProduceResults","f":1750799724175797,"d_finished":3455,"c":13,"l":1750799724190519,"d":3455},{"a":1750799724190521,"name":"Finish","f":1750799724190521,"d_finished":0,"c":0,"l":1750799724191055,"d":534},{"name":"task_result","f":1750799724178805,"d_finished":5528,"c":9,"l":1750799724188699,"d":5528}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:24.191468Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:24.172603Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=4512;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=4512;selected_rows=0; 2025-06-24T21:15:24.191507Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:24.191869Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> KqpSystemView::QueryStatsSimple [GOOD] >> TColumnShardTestReadWrite::WriteReadExoticTypes [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKString ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadExoticTypes [GOOD] Test command err: 2025-06-24T21:15:20.685579Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:20.713581Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:20.713912Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:20.722226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:20.722485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:20.722763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:20.722877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:20.723037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:20.723170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:20.723296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:20.723409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:20.723516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:20.723642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:20.723796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:20.749321Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:20.749569Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:20.749630Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:20.749821Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:20.749962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:20.750030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:20.750071Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:20.750174Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:20.750228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:20.750258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:20.750293Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:20.750442Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:20.750491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:20.750521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:20.750542Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:20.750633Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:20.750678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:20.750716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:20.750752Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:20.750806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:20.750832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:20.750856Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:20.751015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:20.751046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:20.751077Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:20.751245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:20.751304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:20.751348Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:20.751474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:20.751520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:20.751547Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:20.751605Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:20.751653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:20.751680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:20.751701Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:20.752114Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=47; 2025-06-24T21:15:20.752216Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=57; 2025-06-24T21:15:20.752295Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-24T21:15:20.752366Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=31; 2025-06-24T21:15:20.752446Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:20.752527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:20.752575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:20.752637Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... load,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:25.742259Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:15:25.742407Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-24T21:15:25.742501Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2759;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-24T21:15:25.742833Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:398:2409];bytes=2759;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: binary json_payload: binary ingested_at: timestamp[us] saved_at: timestamp[us] request_id: binary; 2025-06-24T21:15:25.742993Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:25.743120Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:25.743271Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:25.743503Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:25.743655Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:25.743805Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:25.743885Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:399:2410] finished for tablet 9437184 2025-06-24T21:15:25.744458Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:398:2409];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing"],"t":0.004},{"events":["f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.036},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.038}],"full":{"a":1750799725705230,"name":"_full_task","f":1750799725705230,"d_finished":0,"c":0,"l":1750799725743951,"d":38721},"events":[{"name":"bootstrap","f":1750799725705437,"d_finished":3517,"c":1,"l":1750799725708954,"d":3517},{"a":1750799725743478,"name":"ack","f":1750799725741951,"d_finished":1349,"c":1,"l":1750799725743300,"d":1822},{"a":1750799725743458,"name":"processing","f":1750799725710226,"d_finished":7086,"c":10,"l":1750799725743303,"d":7579},{"name":"ProduceResults","f":1750799725707464,"d_finished":3655,"c":13,"l":1750799725743853,"d":3655},{"a":1750799725743858,"name":"Finish","f":1750799725743858,"d_finished":0,"c":0,"l":1750799725743951,"d":93},{"name":"task_result","f":1750799725710242,"d_finished":5602,"c":9,"l":1750799725741769,"d":5602}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:25.744543Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:398:2409];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:25.745043Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:398:2409];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing"],"t":0.004},{"events":["f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.036},{"events":["l_ProduceResults","f_Finish"],"t":0.038},{"events":["l_ack","l_processing","l_Finish"],"t":0.039}],"full":{"a":1750799725705230,"name":"_full_task","f":1750799725705230,"d_finished":0,"c":0,"l":1750799725744594,"d":39364},"events":[{"name":"bootstrap","f":1750799725705437,"d_finished":3517,"c":1,"l":1750799725708954,"d":3517},{"a":1750799725743478,"name":"ack","f":1750799725741951,"d_finished":1349,"c":1,"l":1750799725743300,"d":2465},{"a":1750799725743458,"name":"processing","f":1750799725710226,"d_finished":7086,"c":10,"l":1750799725743303,"d":8222},{"name":"ProduceResults","f":1750799725707464,"d_finished":3655,"c":13,"l":1750799725743853,"d":3655},{"a":1750799725743858,"name":"Finish","f":1750799725743858,"d_finished":0,"c":0,"l":1750799725744594,"d":736},{"name":"task_result","f":1750799725710242,"d_finished":5602,"c":9,"l":1750799725741769,"d":5602}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:25.745127Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:25.704368Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7928;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7928;selected_rows=0; 2025-06-24T21:15:25.745174Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:25.745561Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt64 >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::QueryStatsSimple [GOOD] Test command err: Trying to start YDB, gRPC: 8768, MsgBus: 61773 2025-06-24T21:14:18.395177Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627276570472679:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:18.395284Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:18.444002Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627275287877988:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:18.444145Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:18.554686Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519627276069541550:2248];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:18.554776Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049f1/r3tmp/tmpuLkX9n/pdisk_1.dat 2025-06-24T21:14:19.342549Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:19.376006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:19.376112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:19.376366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:19.376421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:19.377073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:19.377116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:19.385190Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:14:19.385232Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:14:19.385608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:19.386374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:19.386580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:19.391905Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 8768, node 1 2025-06-24T21:14:19.462537Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:19.555585Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:19.569462Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:14:19.569490Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:14:19.569498Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:14:19.569642Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61773 TClient is connected to server localhost:61773 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:20.812257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:14:20.869686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:21.236652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:21.582197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:21.776694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:14:23.384082Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627276570472679:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:23.384178Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:23.444076Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519627275287877988:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:23.444154Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:23.555239Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627276069541550:2248];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:23.555326Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:24.037205Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627302340278305:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.037327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:24.360221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.410794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.503283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.576664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.625113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:24.735981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/y ... -06-24T21:15:13.972534Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:13.972652Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:13.983609Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22216, node 16 2025-06-24T21:15:14.062917Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:14.062950Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:14.062961Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:14.063186Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27797 2025-06-24T21:15:14.663025Z node 16 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:15:14.683924Z node 18 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:15:14.694203Z node 17 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27797 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:14.902636Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:14.950733Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:15.081637Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:15.263084Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:15.352666Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:18.619695Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[16:7519627511701739954:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:18.619782Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:15:18.655052Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[17:7519627511931165010:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:18.655197Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:15:18.670266Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[18:7519627511889959767:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:18.670386Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:15:19.625917Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519627537471545761:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:19.626101Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:19.712434Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:19.812140Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:19.910006Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:20.013443Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:20.124353Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:20.255742Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:20.361667Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:20.514105Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519627541766513926:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:20.514219Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:20.514442Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519627541766513931:2375], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:20.522122Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:20.553799Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [16:7519627541766513933:2376], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:15:20.620679Z node 16 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [16:7519627541766514004:4288] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:23.225373Z node 16 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799723212, txId: 281474976715674] shutting down >> CommitOffset::DistributedTxCommit_Flat_CheckOffsetCommitForDifferentCases [GOOD] >> TPersQueueMirrorer::TestBasicRemote [GOOD] >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent [GOOD] >> YdbOlapStore::DuplicateRows [GOOD] >> YdbOlapStore::LogCountByResource |95.9%| [TA] $(B)/ydb/core/kqp/ut/sysview/test-results/unittest/{meta.json ... results_accumulator.log} |95.9%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/sysview/test-results/unittest/{meta.json ... results_accumulator.log} >> TColumnShardTestReadWrite::ReadStale >> TColumnShardTestReadWrite::WriteReadStandalone [GOOD] >> TColumnShardTestReadWrite::RebootWriteReadStandalone ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TPersQueueMirrorer::TestBasicRemote [GOOD] Test command err: 2025-06-24T21:12:43.571701Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626865885496530:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:43.571787Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00317d/r3tmp/tmpn92fia/pdisk_1.dat 2025-06-24T21:12:43.844531Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:12:44.143308Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626865885496489:2079] 1750799563555051 != 1750799563555054 2025-06-24T21:12:44.180435Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:44.186007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:44.186128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:44.193856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64277, node 1 2025-06-24T21:12:44.395754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00317d/r3tmp/yandexC37M0j.tmp 2025-06-24T21:12:44.395779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00317d/r3tmp/yandexC37M0j.tmp 2025-06-24T21:12:44.403419Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00317d/r3tmp/yandexC37M0j.tmp 2025-06-24T21:12:44.403587Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:44.579389Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:44.647216Z INFO: TTestServer started on Port 13642 GrpcPort 64277 TClient is connected to server localhost:13642 PQClient connected to localhost:64277 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:45.050719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:45.075479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:12:45.109788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:12:45.283635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:12:47.183618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626883065366468:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.183747Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.184830Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626883065366481:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.189480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:47.212655Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626883065366483:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:12:47.480912Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626883065366547:2443] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:47.602375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.672852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.713168Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626883065366555:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:12:47.715792Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODk4NGIyZGEtZDJmYmFjMWMtMTcwMDRiY2MtMzJkYWJmZTY=, ActorId: [1:7519626883065366451:2297], ActorState: ExecuteState, TraceId: 01jyhwjj9a9b72z29an9caka2m, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:12:47.718014Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:12:47.775810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519626887360334141:2622] 2025-06-24T21:12:48.572205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626865885496530:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:48.572290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:12:54.184560Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T21:12:54.205924Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519626865885496869:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:12:54.205977Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:12:54.206024Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:7519626865885496869:2180], Recipient [1:7519626865885496869:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:12:54.206042Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:12:54.209379Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T21:12:54.211024Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519626913130138113:2699], Recipient [1:7519626865885496869:2180]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.211048Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.211059Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at ta ... Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:25.762132Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:25.762176Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:25.762225Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:25.837140Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [6:7519627468931145309:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-24T21:15:25.837200Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5128: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-24T21:15:25.837245Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-24T21:15:25.837259Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-24T21:15:25.837342Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.000000s, queue# 1 2025-06-24T21:15:25.837736Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [6:7519627468931145309:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-24T21:15:25.837757Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5128: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-24T21:15:25.837776Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:15:25.862210Z node 6 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519627516175786586:2415], Partition 0, Sender [0:0:0], Recipient [6:7519627516175786648:2419], Cookie: 0 2025-06-24T21:15:25.862310Z node 6 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519627516175786648:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:25.862344Z node 6 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:25.862405Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:25.862514Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:25.862585Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:25.862635Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:25.962567Z node 6 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519627516175786586:2415], Partition 0, Sender [0:0:0], Recipient [6:7519627516175786648:2419], Cookie: 0 2025-06-24T21:15:25.962646Z node 6 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519627516175786648:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:25.962672Z node 6 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:25.962718Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:25.962802Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:25.962826Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:25.962858Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:26.062895Z node 6 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519627516175786586:2415], Partition 0, Sender [0:0:0], Recipient [6:7519627516175786648:2419], Cookie: 0 2025-06-24T21:15:26.062962Z node 6 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519627516175786648:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.062984Z node 6 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.063024Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:26.063092Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:26.063113Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:26.063138Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:26.161532Z :INFO: [/Root] [/Root] [90b635cf-d6da0f2c-53d9494d-9a238f95] Closing read session. Close timeout: 0.000000s 2025-06-24T21:15:26.161696Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:2:3 2025-06-24T21:15:26.161780Z :INFO: [/Root] [/Root] [90b635cf-d6da0f2c-53d9494d-9a238f95] Counters: { Errors: 0 CurrentSessionLifetimeMs: 5020 BytesRead: 27 MessagesRead: 3 BytesReadCompressed: 87 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:15:26.161940Z :NOTICE: [/Root] [/Root] [90b635cf-d6da0f2c-53d9494d-9a238f95] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:15:26.162002Z :DEBUG: [/Root] [/Root] [90b635cf-d6da0f2c-53d9494d-9a238f95] [] Abort session to cluster 2025-06-24T21:15:26.162768Z :NOTICE: [/Root] [/Root] [90b635cf-d6da0f2c-53d9494d-9a238f95] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:15:26.163335Z node 6 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519627516175786586:2415], Partition 0, Sender [0:0:0], Recipient [6:7519627516175786648:2419], Cookie: 0 2025-06-24T21:15:26.163444Z node 6 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519627516175786648:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.163476Z node 6 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.163543Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:26.163636Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:26.163666Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:26.163703Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:26.163785Z node 6 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_6_1_12504064849938588791_v1 grpc read done: success# 0, data# { } 2025-06-24T21:15:26.163828Z node 6 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_6_1_12504064849938588791_v1 grpc read failed 2025-06-24T21:15:26.163858Z node 6 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_6_1_12504064849938588791_v1 grpc closed 2025-06-24T21:15:26.163915Z node 6 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_6_1_12504064849938588791_v1 is DEAD 2025-06-24T21:15:26.165521Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [6:7519627546240558719:2717] disconnected; active server actors: 1 2025-06-24T21:15:26.165581Z node 6 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][test-topic] pipe [6:7519627546240558719:2717] client test-consumer disconnected session test-consumer_6_1_12504064849938588791_v1 2025-06-24T21:15:26.165773Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 269877764, Sender [6:7519627546240558723:3166], Recipient [6:7519627516175786586:2415]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.165818Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5283: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.165844Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:2893: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.165876Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session test-consumer_6_1_12504064849938588791_v1 2025-06-24T21:15:26.165940Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [6:7519627546240558722:2720] destroyed 2025-06-24T21:15:26.166018Z node 6 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_6_1_12504064849938588791_v1 2025-06-24T21:15:26.263581Z node 6 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519627516175786586:2415], Partition 0, Sender [0:0:0], Recipient [6:7519627516175786648:2419], Cookie: 0 2025-06-24T21:15:26.263682Z node 6 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519627516175786648:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.263716Z node 6 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.263771Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:26.263868Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:26.263904Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:26.263942Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadStandalone [GOOD] Test command err: 2025-06-24T21:15:23.861459Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:23.889377Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:23.889722Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:23.897190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:23.897470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:23.897712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:23.897822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:23.897964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:23.898086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:23.898208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:23.898315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:23.898412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:23.898503Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:23.898663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:23.929339Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:23.929580Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:23.929638Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:23.929859Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:23.930021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:23.930092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:23.930134Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:23.930221Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:23.930280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:23.930336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:23.930367Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:23.930538Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:23.930599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:23.930637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:23.930667Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:23.930763Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:23.930819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:23.930861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:23.931103Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:23.931195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:23.931247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:23.931286Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:23.931505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:23.931546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:23.931579Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:23.931777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:23.931831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:23.931879Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:23.932035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:23.932083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:23.932121Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:23.932201Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:23.932269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:23.932306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:23.932334Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:23.932761Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=46; 2025-06-24T21:15:23.932882Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=67; 2025-06-24T21:15:23.932965Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=38; 2025-06-24T21:15:23.933084Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=33; 2025-06-24T21:15:23.933164Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:23.933261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:23.933307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:23.933394Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:28.217905Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:15:28.218058Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-24T21:15:28.218158Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-24T21:15:28.218512Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:398:2409];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-24T21:15:28.218689Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:28.218835Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:28.218976Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:28.219243Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:28.219414Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:28.219562Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:28.219605Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:399:2410] finished for tablet 9437184 2025-06-24T21:15:28.220142Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:398:2409];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.028},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.03}],"full":{"a":1750799728188783,"name":"_full_task","f":1750799728188783,"d_finished":0,"c":0,"l":1750799728219664,"d":30881},"events":[{"name":"bootstrap","f":1750799728188995,"d_finished":3611,"c":1,"l":1750799728192606,"d":3611},{"a":1750799728219214,"name":"ack","f":1750799728217566,"d_finished":1445,"c":1,"l":1750799728219011,"d":1895},{"a":1750799728219199,"name":"processing","f":1750799728193985,"d_finished":21175,"c":10,"l":1750799728219013,"d":21640},{"name":"ProduceResults","f":1750799728191038,"d_finished":3662,"c":13,"l":1750799728219589,"d":3662},{"a":1750799728219592,"name":"Finish","f":1750799728219592,"d_finished":0,"c":0,"l":1750799728219664,"d":72},{"name":"task_result","f":1750799728194002,"d_finished":19592,"c":9,"l":1750799728217367,"d":19592}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:28.220237Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:398:2409];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:28.220730Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:398:2409];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.028},{"events":["l_ProduceResults","f_Finish"],"t":0.03},{"events":["l_ack","l_processing","l_Finish"],"t":0.031}],"full":{"a":1750799728188783,"name":"_full_task","f":1750799728188783,"d_finished":0,"c":0,"l":1750799728220283,"d":31500},"events":[{"name":"bootstrap","f":1750799728188995,"d_finished":3611,"c":1,"l":1750799728192606,"d":3611},{"a":1750799728219214,"name":"ack","f":1750799728217566,"d_finished":1445,"c":1,"l":1750799728219011,"d":2514},{"a":1750799728219199,"name":"processing","f":1750799728193985,"d_finished":21175,"c":10,"l":1750799728219013,"d":22259},{"name":"ProduceResults","f":1750799728191038,"d_finished":3662,"c":13,"l":1750799728219589,"d":3662},{"a":1750799728219592,"name":"Finish","f":1750799728219592,"d_finished":0,"c":0,"l":1750799728220283,"d":691},{"name":"task_result","f":1750799728194002,"d_finished":19592,"c":9,"l":1750799728217367,"d":19592}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:28.220808Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:28.188075Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7600;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7600;selected_rows=0; 2025-06-24T21:15:28.220856Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:28.221270Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent [GOOD] Test command err: 2025-06-24T21:12:43.575330Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626865950392826:2104];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:43.596416Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003190/r3tmp/tmpMi9Moc/pdisk_1.dat 2025-06-24T21:12:43.927777Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:12:44.184011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:44.184106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:44.191367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:44.223695Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:44.227443Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626865950392759:2079] 1750799563561347 != 1750799563561350 TServer::EnableGrpc on GrpcPort 6141, node 1 2025-06-24T21:12:44.382661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003190/r3tmp/yandexkuHoT9.tmp 2025-06-24T21:12:44.382705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003190/r3tmp/yandexkuHoT9.tmp 2025-06-24T21:12:44.387351Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003190/r3tmp/yandexkuHoT9.tmp 2025-06-24T21:12:44.387494Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:44.615160Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:44.643862Z INFO: TTestServer started on Port 20923 GrpcPort 6141 TClient is connected to server localhost:20923 PQClient connected to localhost:6141 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:45.098073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:12:45.137425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:12:47.093173Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626883130262744:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.093182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626883130262739:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.093315Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.096156Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626883130262760:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.096614Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.098342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:47.114984Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626883130262753:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:12:47.418160Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626883130262809:2442] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:47.564567Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626883130262843:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:12:47.566769Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTZiODgyODItMjg1MDIwNDctNzZjMGVmMWYtMmY4YWZjY2Q=, ActorId: [1:7519626883130262736:2297], ActorState: ExecuteState, TraceId: 01jyhwjj6jbtvh00h8m36r4v4s, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:12:47.569116Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:12:47.592416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.653679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.771110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519626887425230410:2621] 2025-06-24T21:12:48.580876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626865950392826:2104];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:48.581007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:12:54.009357Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T21:12:54.063614Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T21:12:54.089692Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519626913195034380:2697], Recipient [1:7519626865950393078:2140]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.089754Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.089773Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:12:54.089838Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519626913195034376:2694], Recipient [1:7519626865950393078:2140]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-06-24T21:12:54.089857Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:12:54.141035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingD ... r test-consumer session test-consumer_8_1_1561489250337679980_v1 grpc read done: success# 0, data# { } 2025-06-24T21:15:26.543275Z node 8 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_8_1_1561489250337679980_v1 grpc read failed 2025-06-24T21:15:26.543307Z node 8 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_8_1_1561489250337679980_v1 grpc closed 2025-06-24T21:15:26.543349Z node 8 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_8_1_1561489250337679980_v1 is DEAD 2025-06-24T21:15:26.544812Z node 8 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [8:7519627536507574976:2718] disconnected; active server actors: 1 2025-06-24T21:15:26.544854Z node 8 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][test-topic] pipe [8:7519627536507574976:2718] client test-consumer disconnected session test-consumer_8_1_1561489250337679980_v1 2025-06-24T21:15:26.545013Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 269877764, Sender [8:7519627540802542511:3284], Recipient [8:7519627540802542367:2747]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.545050Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:5283: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.545075Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:2893: [PQ: 72075186224037896] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.545099Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037896] Destroy direct read session test-consumer_8_1_1561489250337679980_v1 2025-06-24T21:15:26.545139Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037896] server disconnected, pipe [8:7519627540802542509:2772] destroyed 2025-06-24T21:15:26.545187Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 269877764, Sender [8:7519627540802542515:3285], Recipient [8:7519627540802542366:2746]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.545206Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:5283: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.545217Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:2893: [PQ: 72075186224037897] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.545230Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037897] Destroy direct read session test-consumer_8_1_1561489250337679980_v1 2025-06-24T21:15:26.545247Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037897] server disconnected, pipe [8:7519627540802542514:2771] destroyed 2025-06-24T21:15:26.545286Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 269877764, Sender [8:7519627536507574980:3167], Recipient [8:7519627510737770136:2415]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.545300Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:5283: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.545311Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:2893: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:15:26.545322Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session test-consumer_8_1_1561489250337679980_v1 2025-06-24T21:15:26.545339Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [8:7519627536507574979:2721] destroyed 2025-06-24T21:15:26.545384Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_8_1_1561489250337679980_v1 2025-06-24T21:15:26.545412Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_8_1_1561489250337679980_v1 2025-06-24T21:15:26.545428Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_8_1_1561489250337679980_v1 2025-06-24T21:15:26.634280Z node 8 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519627510737770136:2415], Partition 0, Sender [0:0:0], Recipient [8:7519627510737770196:2420], Cookie: 0 2025-06-24T21:15:26.634367Z node 8 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519627510737770196:2420]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.634394Z node 8 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.634445Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:26.634531Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:26.634567Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:26.634623Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:26.634691Z node 8 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519627540802542366:2746], Partition 1, Sender [0:0:0], Recipient [8:7519627540802542442:2753], Cookie: 0 2025-06-24T21:15:26.634723Z node 8 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519627540802542442:2753]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.634743Z node 8 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.634770Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:26.634813Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:26.634835Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:26.634852Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:26.634895Z node 8 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519627540802542367:2747], Partition 2, Sender [0:0:0], Recipient [8:7519627540802542445:2755], Cookie: 0 2025-06-24T21:15:26.634934Z node 8 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519627540802542445:2755]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.634953Z node 8 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.634989Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:26.635023Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:26.635044Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:26.635061Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:26.734704Z node 8 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519627510737770136:2415], Partition 0, Sender [0:0:0], Recipient [8:7519627510737770196:2420], Cookie: 0 2025-06-24T21:15:26.734821Z node 8 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519627510737770196:2420]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.734866Z node 8 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.734932Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:26.735052Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:26.735102Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:26.735168Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:26.735247Z node 8 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519627540802542366:2746], Partition 1, Sender [0:0:0], Recipient [8:7519627540802542442:2753], Cookie: 0 2025-06-24T21:15:26.735309Z node 8 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519627540802542442:2753]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.735333Z node 8 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.735369Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:26.735420Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:26.735449Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:26.735474Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:26.735534Z node 8 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519627540802542367:2747], Partition 2, Sender [0:0:0], Recipient [8:7519627540802542445:2755], Cookie: 0 2025-06-24T21:15:26.735579Z node 8 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519627540802542445:2755]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.735596Z node 8 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:26.735624Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:26.735675Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:26.735694Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:26.735716Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TColumnShardTestReadWrite::WriteReadStandaloneExoticTypes [GOOD] >> TColumnShardTestReadWrite::ReadStale [GOOD] >> TPopulatorQuorumTest::OneWriteOnlyRingGroup [GOOD] >> TPopulatorQuorumTest::OneRingGroup [GOOD] >> TPopulatorQuorumTest::OneDisconnectedRingGroup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadStandaloneExoticTypes [GOOD] Test command err: 2025-06-24T21:15:24.767759Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:24.794467Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:24.794751Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:24.801394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:24.801576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:24.801804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:24.801882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:24.801981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:24.802062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:24.802138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:24.802229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:24.802298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:24.802423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:24.802521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:24.830560Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:24.830886Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:24.830957Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:24.831221Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:24.831443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:24.831531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:24.831586Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:24.831727Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:24.831802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:24.831850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:24.831887Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:24.832097Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:24.832175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:24.832241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:24.832275Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:24.832403Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:24.832474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:24.832520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:24.832574Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:24.832634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:24.832688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:24.832727Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:24.832982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:24.833035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:24.833078Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:24.833310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:24.833372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:24.833449Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:24.833643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:24.833700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:24.833737Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:24.833821Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:24.833893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:24.833937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:24.833976Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:24.834489Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=62; 2025-06-24T21:15:24.834585Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-24T21:15:24.834724Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=51; 2025-06-24T21:15:24.834823Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=45; 2025-06-24T21:15:24.834923Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:24.835023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:24.835075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:24.835176Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:29.405412Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:15:29.405560Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-24T21:15:29.405665Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2759;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-24T21:15:29.405974Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:398:2409];bytes=2759;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: binary json_payload: binary ingested_at: timestamp[us] saved_at: timestamp[us] request_id: binary; 2025-06-24T21:15:29.406128Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:29.406253Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:29.406423Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:29.406659Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:29.406822Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:29.406963Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:29.407019Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:399:2410] finished for tablet 9437184 2025-06-24T21:15:29.407543Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:398:2409];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.014},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.016}],"full":{"a":1750799729390829,"name":"_full_task","f":1750799729390829,"d_finished":0,"c":0,"l":1750799729407073,"d":16244},"events":[{"name":"bootstrap","f":1750799729391015,"d_finished":3444,"c":1,"l":1750799729394459,"d":3444},{"a":1750799729406634,"name":"ack","f":1750799729405128,"d_finished":1339,"c":1,"l":1750799729406467,"d":1778},{"a":1750799729406617,"name":"processing","f":1750799729395831,"d_finished":6857,"c":10,"l":1750799729406469,"d":7313},{"name":"ProduceResults","f":1750799729393036,"d_finished":3431,"c":13,"l":1750799729406998,"d":3431},{"a":1750799729407006,"name":"Finish","f":1750799729407006,"d_finished":0,"c":0,"l":1750799729407073,"d":67},{"name":"task_result","f":1750799729395848,"d_finished":5379,"c":9,"l":1750799729404944,"d":5379}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:29.407614Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:398:2409];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:29.408085Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:398:2409];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.014},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.016}],"full":{"a":1750799729390829,"name":"_full_task","f":1750799729390829,"d_finished":0,"c":0,"l":1750799729407664,"d":16835},"events":[{"name":"bootstrap","f":1750799729391015,"d_finished":3444,"c":1,"l":1750799729394459,"d":3444},{"a":1750799729406634,"name":"ack","f":1750799729405128,"d_finished":1339,"c":1,"l":1750799729406467,"d":2369},{"a":1750799729406617,"name":"processing","f":1750799729395831,"d_finished":6857,"c":10,"l":1750799729406469,"d":7904},{"name":"ProduceResults","f":1750799729393036,"d_finished":3431,"c":13,"l":1750799729406998,"d":3431},{"a":1750799729407006,"name":"Finish","f":1750799729407006,"d_finished":0,"c":0,"l":1750799729407664,"d":658},{"name":"task_result","f":1750799729395848,"d_finished":5379,"c":9,"l":1750799729404944,"d":5379}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:29.408161Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:29.390127Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7928;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7928;selected_rows=0; 2025-06-24T21:15:29.408203Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:29.408596Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> TPopulatorQuorumTest::TwoRingGroups [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadStale [GOOD] Test command err: 2025-06-24T21:15:28.822821Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:28.845122Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:28.845410Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:28.852686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:28.852928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:28.853157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:28.853249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:28.853370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:28.853484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:28.853601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:28.853689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:28.853767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:28.853851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:28.853934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:28.882645Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:28.882959Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:28.883025Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:28.883283Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:28.883472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:28.883556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:28.883608Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:28.883747Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:28.883841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:28.883909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:28.883957Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:28.884167Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:28.884304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:28.884357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:28.884393Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:28.884512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:28.884604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:28.884671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:28.884723Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:28.884804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:28.884853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:28.884892Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:28.885150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:28.885204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:28.885253Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:28.885477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:28.885571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:28.885612Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:28.885773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:28.885830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:28.885874Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:28.885968Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:28.886070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:28.886123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:28.886157Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:28.886668Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=57; 2025-06-24T21:15:28.886787Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=58; 2025-06-24T21:15:28.886897Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=52; 2025-06-24T21:15:28.886992Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=42; 2025-06-24T21:15:28.887112Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:28.887262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:28.887316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:28.887384Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 6-24T21:15:29.503399Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:15:29.503588Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-24T21:15:29.503674Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-24T21:15:29.510956Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-24T21:15:29.511108Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:215;event=finished_tx;tx_id=10; 2025-06-24T21:15:29.534927Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=54320;columns=10; 2025-06-24T21:15:29.546161Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=1;last=1; 2025-06-24T21:15:29.546230Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=54320;operation_id=5b0782d8-514011f0-a219e268-1506ec95;in_flight=1;size_in_flight=54320; 2025-06-24T21:15:29.562628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:127:2157];write_id=1;path_id={internal: 9438184000001, ss: 1};entity_id=3;size=11104;limit=10240;r_count=999;fline=column_info.h:139;sizes=5552,5552;s_splitted=5616,5720;r_splitted=499,500; 2025-06-24T21:15:29.563867Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:127:2157];write_id=1;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=74272;count=11;actions=__DEFAULT,;waiting=1;; 2025-06-24T21:15:29.569379Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=54320;event=data_write_finished;writing_id=5b0782d8-514011f0-a219e268-1506ec95; 2025-06-24T21:15:29.569714Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=86;data_size=62;sum=86;count=1; 2025-06-24T21:15:29.569865Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=558;data_size=550;sum=558;count=2;size_of_meta=136; 2025-06-24T21:15:29.569957Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=630;data_size=622;sum=630;count=1;size_of_portion=208; 2025-06-24T21:15:29.571139Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 11 2025-06-24T21:15:29.571380Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=2;operation_id=1; 2025-06-24T21:15:29.583634Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 11 2025-06-24T21:15:29.597715Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750799729785 at tablet 9437184, mediator 0 2025-06-24T21:15:29.597813Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] execute at tablet 9437184 2025-06-24T21:15:29.598146Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=100;fline=abstract.h:83;progress_tx_id=100;lock_id=1;broken=0; 2025-06-24T21:15:29.598716Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=100;fline=tx_controller.cpp:215;event=finished_tx;tx_id=100; 2025-06-24T21:15:29.610849Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] complete at tablet 9437184 2025-06-24T21:15:29.611001Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=abstract.h:93;progress_tx_id=100;lock_id=1;broken=0; 2025-06-24T21:15:29.611281Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=100;commit_lock_id=1;fline=manager.cpp:177;event=remove_by_insert_id;id=2;operation_id=1; 2025-06-24T21:15:29.611342Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=100;commit_lock_id=1;fline=manager.cpp:180;event=remove_operation;operation_id=1; 2025-06-24T21:15:29.611748Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:240;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:15:29.611813Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:15:29.611923Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T21:15:29.626171Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:15:29.626285Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:15:29.626354Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:15:29.626489Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:15:29.627048Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 1 version: {1750799369785:max} readable: {1750799729785:max} at tablet 9437184 2025-06-24T21:15:29.639379Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 1 at tablet 9437184 2025-06-24T21:15:29.639563Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tx_id=18446744073709551615;scan_id=1;gen=0;table=test_olap_table;snapshot={1750799369785:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=constructor.cpp:18;event=overriden_columns;ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043; 2025-06-24T21:15:29.639686Z node 1 :TX_COLUMNSHARD_SCAN WARN: log.cpp:784: tx_id=18446744073709551615;scan_id=1;gen=0;table=test_olap_table;snapshot={1750799369785:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:14;event=TTxScan failed;problem=cannot build metadata withno ranges;details=Snapshot too old: {1750799369785:max}. CS min read snapshot: {1750799429785:max}. now: 2025-06-24T21:15:29.639647Z; 2025-06-24T21:15:29.655186Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750799369785:max} readable: {1750799729785:max} at tablet 9437184 2025-06-24T21:15:29.667404Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:15:29.673671Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750799369785:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } Columns { Id: 6 } } } ; 2025-06-24T21:15:29.673813Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750799369785:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } Columns { Id: 6 } } } ; 2025-06-24T21:15:29.674854Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750799369785:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":4,"inputs":[{"from":5}]},{"owner_id":5,"inputs":[{"from":6}]},{"owner_id":6,"inputs":[]}],"nodes":{"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"p":{"data":[{"name":"timestamp","id":1},{"name":"message","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":6},"5":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1},{"name":"message","id":6}]},"o":"1,6","t":"FetchOriginalData"},"w":4,"id":5},"4":{"p":{"i":"6","p":{"address":{"name":"message","id":6}},"o":"6","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"1,6","t":"Projection"},"w":18,"id":0}}}; 2025-06-24T21:15:29.675021Z node 1 :TX_COLUMNSHARD_SCAN WARN: log.cpp:784: tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750799369785:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:14;event=TTxScan failed;problem=cannot build metadata withno ranges;details=Snapshot too old: {1750799369785:max}. CS min read snapshot: {1750799429785:max}. now: 2025-06-24T21:15:29.674970Z; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorQuorumTest::OneWriteOnlyRingGroup [GOOD] Test command err: replicas: [1:24339059:0], [1:1099535966835:0], [1:2199047594611:0], [1:3298559222387:0], [1:4398070850163:0], [1:5497582477939:0] replicaActorToServiceMap: actor: [1:8:2055], service: [1:2199047594611:0] actor: [1:5:2052], service: [1:1099535966835:0] actor: [1:17:2064], service: [1:5497582477939:0] actor: [1:2:2049], service: [1:24339059:0] actor: [1:14:2061], service: [1:4398070850163:0] actor: [1:11:2058], service: [1:3298559222387:0] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult 2025-06-24T21:13:22.280682Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:795: [1:29:2075] Handle NKikimr::TEvStateStorage::TEvListSchemeBoardResult: sender# [1:19:2066] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult (done) 2025-06-24T21:13:22.328595Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:29:2075] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/TestPath" PathDescription { Self { Name: "TestPath" PathId: 100 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1 ParentPathId: 1 PathState: EPathStateNoChanges PathVersion: 1 } } PathId: 100 PathOwnerId: 72057594046678944 }: sender# [1:26:2073], cookie# 12345, event size# 76, preserialized size# 0 2025-06-24T21:13:22.328690Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:29:2075] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], cookie# 12345, is deletion# false, version: 1 ... waiting for updates from replica populators 2025-06-24T21:13:22.331016Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:32:2078] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:8:2055] 2025-06-24T21:13:22.331087Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:32:2078] Successful handshake: replica# [1:8:2055] 2025-06-24T21:13:22.331137Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:32:2078] Start full sync: replica# [1:8:2055] 2025-06-24T21:13:22.331266Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:33:2079] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:11:2058] 2025-06-24T21:13:22.331293Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:33:2079] Successful handshake: replica# [1:11:2058] 2025-06-24T21:13:22.331336Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:33:2079] Start full sync: replica# [1:11:2058] 2025-06-24T21:13:22.331395Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:34:2080] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:14:2061] 2025-06-24T21:13:22.331429Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:34:2080] Successful handshake: replica# [1:14:2061] 2025-06-24T21:13:22.331468Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:34:2080] Start full sync: replica# [1:14:2061] 2025-06-24T21:13:22.331507Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:35:2081] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:17:2064] 2025-06-24T21:13:22.331531Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:35:2081] Successful handshake: replica# [1:17:2064] 2025-06-24T21:13:22.331554Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:35:2081] Start full sync: replica# [1:17:2064] 2025-06-24T21:13:22.331647Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-24T21:13:22.331731Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:30:2076] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:2:2049] 2025-06-24T21:13:22.331760Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:30:2076] Successful handshake: replica# [1:2:2049] 2025-06-24T21:13:22.331805Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:30:2076] Start full sync: replica# [1:2:2049] 2025-06-24T21:13:22.331865Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:31:2077] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:5:2052] 2025-06-24T21:13:22.331899Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:31:2077] Successful handshake: replica# [1:5:2052] 2025-06-24T21:13:22.331925Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:31:2077] Start full sync: replica# [1:5:2052] 2025-06-24T21:13:22.332016Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:32:2078] 2025-06-24T21:13:22.332104Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.332306Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-24T21:13:22.332378Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.332469Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.332585Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:8:2055], cookie# 0 2025-06-24T21:13:22.332696Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:33:2079] 2025-06-24T21:13:22.332764Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-24T21:13:22.332835Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.332897Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.333014Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:34:2080] 2025-06-24T21:13:22.333065Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:11:2058], cookie# 0 2025-06-24T21:13:22.333141Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.333218Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-24T21:13:22.333282Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:14:2061], cookie# 0 2025-06-24T21:13:22.333365Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.333455Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:35:2081] 2025-06-24T21:13:22.333516Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.333586Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:17:2064], cookie# 0 2025-06-24T21:13:22.333646Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-24T21:13:22.333719Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.333796Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:30:2076] 2025-06-24T21:13:22.333852Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.333930Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-24T21:13:22.333991Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:2:2049], cookie# 0 2025-06-24T21:13:22.334059Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.334136Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:31:2077] 2025-06-24T21:13:22.334191Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.334278Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-24T21:13:22.334328Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:5:2052], cookie# 0 2025-06-24T21:13:22.334401Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.334505Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-24T21:13:22.334573Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:32:2078] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:13:22.334651Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.334721Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:32:2078], cookie# 0 2025-06-24T21:13:22.334774Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:32:2078], cookie# 0 2025-06-24T21:13:22.334822Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:33:2079] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:11:2058] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.334926Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-24T21:13:22.334977Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.335029Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:33:2079], cookie# 0 2025-06-24T21:13:22.335068Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:33:2079], cookie# 0 2025-06-24T21:13:22.335109Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:34:2080] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:14:2061] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.335198Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:34:2080], cookie# 0 2025-06-24T21:13:22.335223Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:34:2080], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.335310Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-24T21:13:22.335366Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.335436Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:35:2081], cookie# 0 2025-06-24T21:13:22.335482Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:35:2081], cookie# 0 2025-06-24T21:13:22.335527Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:35:2081] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:17:2064] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.335617Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-24T21:13:22.335674Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.335731Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 0 2025-06-24T21:13:22.335756Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:30:2076], cookie# 0 2025-06-24T21:13:22.335789Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:30:2076] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:2:2049] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.335881Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-24T21:13:22.335935Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.336008Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:31:2077], cookie# 0 2025-06-24T21:13:22.336035Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:31:2077], cookie# 0 2025-06-24T21:13:22.336064Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:31:2077] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:5:2052] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 ... waiting for updates from replica populators (done) populatorToReplicaMap: populator: [1:33:2079], replica: [1:3298559222387:0] populator: [1:30:2076], replica: [1:24339059:0] populator: [1:31:2077], replica: [1:1099535966835:0] populator: [1:34:2080], replica: [1:4398070850163:0] populator: [1:35:2081], replica: [1:5497582477939:0] populator: [1:32:2078], replica: [1:2199047594611:0] 2025-06-24T21:13:22.336348Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:32:2078], cookie# 12345 2025-06-24T21:15:29.774150Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 12345 2025-06-24T21:15:29.774274Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:29:2075] Ack update: ack to# [1:26:2073], cookie# 12345, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], version# 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorQuorumTest::OneRingGroup [GOOD] Test command err: replicas: [1:24339059:0], [1:1099535966835:0], [1:2199047594611:0] replicaActorToServiceMap: actor: [1:8:2055], service: [1:2199047594611:0] actor: [1:5:2052], service: [1:1099535966835:0] actor: [1:2:2049], service: [1:24339059:0] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult 2025-06-24T21:13:22.278798Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:795: [1:20:2066] Handle NKikimr::TEvStateStorage::TEvListSchemeBoardResult: sender# [1:10:2057] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult (done) 2025-06-24T21:13:22.326959Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:20:2066] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/TestPath" PathDescription { Self { Name: "TestPath" PathId: 100 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1 ParentPathId: 1 PathState: EPathStateNoChanges PathVersion: 1 } } PathId: 100 PathOwnerId: 72057594046678944 }: sender# [1:17:2064], cookie# 12345, event size# 76, preserialized size# 0 2025-06-24T21:13:22.327064Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:20:2066] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], cookie# 12345, is deletion# false, version: 1 ... waiting for updates from replica populators 2025-06-24T21:13:22.330813Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:23:2069] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:8:2055] 2025-06-24T21:13:22.330895Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:23:2069] Successful handshake: replica# [1:8:2055] 2025-06-24T21:13:22.330937Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:23:2069] Start full sync: replica# [1:8:2055] 2025-06-24T21:13:22.331111Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:2199047594611:0] }: sender# [1:23:2069] 2025-06-24T21:13:22.331208Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:21:2067] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:2:2049] 2025-06-24T21:13:22.331238Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:21:2067] Successful handshake: replica# [1:2:2049] 2025-06-24T21:13:22.331281Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:21:2067] Start full sync: replica# [1:2:2049] 2025-06-24T21:13:22.331345Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:22:2068] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:5:2052] 2025-06-24T21:13:22.331371Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:22:2068] Successful handshake: replica# [1:5:2052] 2025-06-24T21:13:22.331393Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:22:2068] Start full sync: replica# [1:5:2052] 2025-06-24T21:13:22.331485Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:23:2069] 2025-06-24T21:13:22.331573Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:23:2069] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:20:2066] 2025-06-24T21:13:22.331746Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:24339059:0] }: sender# [1:21:2067] 2025-06-24T21:13:22.331825Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:21:2067] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:20:2066] 2025-06-24T21:13:22.331895Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:23:2069] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:20:2066], cookie# 0 2025-06-24T21:13:22.332010Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:23:2069] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:8:2055], cookie# 0 2025-06-24T21:13:22.332125Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:21:2067] 2025-06-24T21:13:22.332191Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:21:2067] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:20:2066], cookie# 0 2025-06-24T21:13:22.332305Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:1099535966835:0] }: sender# [1:22:2068] 2025-06-24T21:13:22.332356Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:21:2067] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:2:2049], cookie# 0 2025-06-24T21:13:22.332426Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:22:2068] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:20:2066] 2025-06-24T21:13:22.332515Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:22:2068] 2025-06-24T21:13:22.332574Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:22:2068] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:20:2066], cookie# 0 2025-06-24T21:13:22.332649Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:2199047594611:0] }: sender# [1:23:2069] 2025-06-24T21:13:22.332702Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:22:2068] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:5:2052], cookie# 0 2025-06-24T21:13:22.332761Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:23:2069] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:20:2066] 2025-06-24T21:13:22.332872Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:24339059:0] }: sender# [1:21:2067] 2025-06-24T21:13:22.332913Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:21:2067] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:20:2066] 2025-06-24T21:13:22.332983Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:23:2069] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:13:22.333046Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:21:2067] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:2:2049] 2025-06-24T21:13:22.333102Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:20:2066] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:23:2069], cookie# 0 2025-06-24T21:13:22.333143Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:20:2066] Ack for unknown update (already acked?): sender# [1:23:2069], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.333248Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:20:2066] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:21:2067], cookie# 0 2025-06-24T21:13:22.333278Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:20:2066] Ack for unknown update (already acked?): sender# [1:21:2067], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.333347Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:1099535966835:0] }: sender# [1:22:2068] 2025-06-24T21:13:22.333402Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:22:2068] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:20:2066] 2025-06-24T21:13:22.333486Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:20:2066] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:22:2068], cookie# 0 2025-06-24T21:13:22.333518Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:20:2066] Ack for unknown update (already acked?): sender# [1:22:2068], cookie# 0 2025-06-24T21:13:22.333552Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:22:2068] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:5:2052] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 ... waiting for updates from replica populators (done) populatorToReplicaMap: populator: [1:22:2068], replica: [1:1099535966835:0] populator: [1:23:2069], replica: [1:2199047594611:0] populator: [1:21:2067], replica: [1:24339059:0] 2025-06-24T21:13:22.333725Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:20:2066] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:23:2069], cookie# 12345 2025-06-24T21:15:30.062409Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:20:2066] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:21:2067], cookie# 12345 2025-06-24T21:15:30.062488Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:20:2066] Ack update: ack to# [1:17:2064], cookie# 12345, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], version# 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorQuorumTest::OneDisconnectedRingGroup [GOOD] Test command err: replicas: [1:24339059:0], [1:1099535966835:0], [1:2199047594611:0], [1:3298559222387:0], [1:4398070850163:0], [1:5497582477939:0] replicaActorToServiceMap: actor: [1:8:2055], service: [1:2199047594611:0] actor: [1:5:2052], service: [1:1099535966835:0] actor: [1:17:2064], service: [1:5497582477939:0] actor: [1:2:2049], service: [1:24339059:0] actor: [1:14:2061], service: [1:4398070850163:0] actor: [1:11:2058], service: [1:3298559222387:0] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult 2025-06-24T21:13:22.278253Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:795: [1:29:2075] Handle NKikimr::TEvStateStorage::TEvListSchemeBoardResult: sender# [1:19:2066] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult (done) 2025-06-24T21:13:22.324219Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:29:2075] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/TestPath" PathDescription { Self { Name: "TestPath" PathId: 100 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1 ParentPathId: 1 PathState: EPathStateNoChanges PathVersion: 1 } } PathId: 100 PathOwnerId: 72057594046678944 }: sender# [1:26:2073], cookie# 12345, event size# 76, preserialized size# 0 2025-06-24T21:13:22.324318Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:29:2075] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], cookie# 12345, is deletion# false, version: 1 ... waiting for updates from replica populators 2025-06-24T21:13:22.326576Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:32:2078] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:8:2055] 2025-06-24T21:13:22.326652Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:32:2078] Successful handshake: replica# [1:8:2055] 2025-06-24T21:13:22.326690Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:32:2078] Start full sync: replica# [1:8:2055] 2025-06-24T21:13:22.326776Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:33:2079] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:11:2058] 2025-06-24T21:13:22.326805Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:33:2079] Successful handshake: replica# [1:11:2058] 2025-06-24T21:13:22.326845Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:33:2079] Start full sync: replica# [1:11:2058] 2025-06-24T21:13:22.326901Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:34:2080] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:14:2061] 2025-06-24T21:13:22.326929Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:34:2080] Successful handshake: replica# [1:14:2061] 2025-06-24T21:13:22.326952Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:34:2080] Start full sync: replica# [1:14:2061] 2025-06-24T21:13:22.326989Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:35:2081] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:17:2064] 2025-06-24T21:13:22.327011Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:35:2081] Successful handshake: replica# [1:17:2064] 2025-06-24T21:13:22.327034Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:35:2081] Start full sync: replica# [1:17:2064] 2025-06-24T21:13:22.327114Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-24T21:13:22.327328Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:30:2076] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:2:2049] 2025-06-24T21:13:22.327359Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:30:2076] Successful handshake: replica# [1:2:2049] 2025-06-24T21:13:22.327397Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:30:2076] Start full sync: replica# [1:2:2049] 2025-06-24T21:13:22.327473Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:31:2077] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:5:2052] 2025-06-24T21:13:22.327500Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:31:2077] Successful handshake: replica# [1:5:2052] 2025-06-24T21:13:22.327522Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:31:2077] Start full sync: replica# [1:5:2052] 2025-06-24T21:13:22.327602Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:32:2078] 2025-06-24T21:13:22.327684Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.327867Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-24T21:13:22.327926Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.328001Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.328113Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:8:2055], cookie# 0 2025-06-24T21:13:22.328219Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:33:2079] 2025-06-24T21:13:22.328291Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-24T21:13:22.328353Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.328427Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.328520Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:34:2080] 2025-06-24T21:13:22.328567Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:11:2058], cookie# 0 2025-06-24T21:13:22.328633Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.328700Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-24T21:13:22.328768Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:14:2061], cookie# 0 2025-06-24T21:13:22.328833Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.328902Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:35:2081] 2025-06-24T21:13:22.328987Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.329048Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:17:2064], cookie# 0 2025-06-24T21:13:22.329102Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-24T21:13:22.329175Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.329250Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:30:2076] 2025-06-24T21:13:22.329299Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.329380Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-24T21:13:22.329434Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:2:2049], cookie# 0 2025-06-24T21:13:22.329501Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.329586Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:31:2077] 2025-06-24T21:13:22.329629Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.329698Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-24T21:13:22.329748Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:5:2052], cookie# 0 2025-06-24T21:13:22.329804Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.329884Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-24T21:13:22.329948Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:32:2078] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:13:22.330026Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.330104Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:32:2078], cookie# 0 2025-06-24T21:13:22.330148Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:32:2078], cookie# 0 2025-06-24T21:13:22.330188Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:33:2079] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:11:2058] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.330296Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-24T21:13:22.330353Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.330413Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:33:2079], cookie# 0 2025-06-24T21:13:22.330442Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:33:2079], cookie# 0 2025-06-24T21:13:22.330481Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:34:2080] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:14:2061] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.330544Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:34:2080], cookie# 0 2025-06-24T21:13:22.330566Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:34:2080], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.330632Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-24T21:13:22.330680Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.330750Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:35:2081], cookie# 0 2025-06-24T21:13:22.330778Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:35:2081], cookie# 0 2025-06-24T21:13:22.330820Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:35:2081] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:17:2064] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.330907Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-24T21:13:22.330972Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.331024Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 0 2025-06-24T21:13:22.331049Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:30:2076], cookie# 0 2025-06-24T21:13:22.331079Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:30:2076] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:2:2049] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.331219Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-24T21:13:22.331277Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.331351Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:31:2077], cookie# 0 2025-06-24T21:13:22.331394Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:31:2077], cookie# 0 2025-06-24T21:13:22.331434Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:31:2077] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:5:2052] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 ... waiting for updates from replica populators (done) populatorToReplicaMap: populator: [1:33:2079], replica: [1:3298559222387:0] populator: [1:30:2076], replica: [1:24339059:0] populator: [1:31:2077], replica: [1:1099535966835:0] populator: [1:34:2080], replica: [1:4398070850163:0] populator: [1:35:2081], replica: [1:5497582477939:0] populator: [1:32:2078], replica: [1:2199047594611:0] 2025-06-24T21:13:22.331710Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:32:2078], cookie# 12345 2025-06-24T21:15:29.987160Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 12345 2025-06-24T21:15:29.987262Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:29:2075] Ack update: ack to# [1:26:2073], cookie# 12345, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], version# 1 >> TColumnShardTestReadWrite::WriteRead ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorQuorumTest::TwoRingGroups [GOOD] Test command err: replicas: [1:24339059:0], [1:1099535966835:0], [1:2199047594611:0], [1:3298559222387:0], [1:4398070850163:0], [1:5497582477939:0] replicaActorToServiceMap: actor: [1:8:2055], service: [1:2199047594611:0] actor: [1:5:2052], service: [1:1099535966835:0] actor: [1:17:2064], service: [1:5497582477939:0] actor: [1:2:2049], service: [1:24339059:0] actor: [1:14:2061], service: [1:4398070850163:0] actor: [1:11:2058], service: [1:3298559222387:0] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult 2025-06-24T21:13:22.278253Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:795: [1:29:2075] Handle NKikimr::TEvStateStorage::TEvListSchemeBoardResult: sender# [1:19:2066] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult (done) 2025-06-24T21:13:22.319018Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:29:2075] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/TestPath" PathDescription { Self { Name: "TestPath" PathId: 100 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1 ParentPathId: 1 PathState: EPathStateNoChanges PathVersion: 1 } } PathId: 100 PathOwnerId: 72057594046678944 }: sender# [1:26:2073], cookie# 12345, event size# 76, preserialized size# 0 2025-06-24T21:13:22.319131Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:29:2075] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], cookie# 12345, is deletion# false, version: 1 ... waiting for updates from replica populators 2025-06-24T21:13:22.322456Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:32:2078] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:8:2055] 2025-06-24T21:13:22.322535Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:32:2078] Successful handshake: replica# [1:8:2055] 2025-06-24T21:13:22.322578Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:32:2078] Start full sync: replica# [1:8:2055] 2025-06-24T21:13:22.322664Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:33:2079] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:11:2058] 2025-06-24T21:13:22.322698Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:33:2079] Successful handshake: replica# [1:11:2058] 2025-06-24T21:13:22.322750Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:33:2079] Start full sync: replica# [1:11:2058] 2025-06-24T21:13:22.322808Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:34:2080] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:14:2061] 2025-06-24T21:13:22.322839Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:34:2080] Successful handshake: replica# [1:14:2061] 2025-06-24T21:13:22.322869Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:34:2080] Start full sync: replica# [1:14:2061] 2025-06-24T21:13:22.322906Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:35:2081] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:17:2064] 2025-06-24T21:13:22.322928Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:35:2081] Successful handshake: replica# [1:17:2064] 2025-06-24T21:13:22.322951Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:35:2081] Start full sync: replica# [1:17:2064] 2025-06-24T21:13:22.323043Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-24T21:13:22.323133Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:30:2076] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:2:2049] 2025-06-24T21:13:22.323180Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:30:2076] Successful handshake: replica# [1:2:2049] 2025-06-24T21:13:22.323215Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:30:2076] Start full sync: replica# [1:2:2049] 2025-06-24T21:13:22.323263Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:31:2077] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:5:2052] 2025-06-24T21:13:22.323291Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:31:2077] Successful handshake: replica# [1:5:2052] 2025-06-24T21:13:22.323316Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:31:2077] Start full sync: replica# [1:5:2052] 2025-06-24T21:13:22.323390Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:32:2078] 2025-06-24T21:13:22.323479Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.324282Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-24T21:13:22.324352Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.324416Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.324520Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:8:2055], cookie# 0 2025-06-24T21:13:22.324615Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:33:2079] 2025-06-24T21:13:22.324698Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-24T21:13:22.324749Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.324789Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.324846Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:34:2080] 2025-06-24T21:13:22.324879Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:11:2058], cookie# 0 2025-06-24T21:13:22.324934Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.325008Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-24T21:13:22.325048Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:14:2061], cookie# 0 2025-06-24T21:13:22.325090Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.325132Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:35:2081] 2025-06-24T21:13:22.325171Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.325223Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:17:2064], cookie# 0 2025-06-24T21:13:22.325257Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-24T21:13:22.325298Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.325339Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:30:2076] 2025-06-24T21:13:22.325370Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.325422Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-24T21:13:22.325461Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:2:2049], cookie# 0 2025-06-24T21:13:22.325494Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-24T21:13:22.325533Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:31:2077] 2025-06-24T21:13:22.325562Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-24T21:13:22.325615Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-24T21:13:22.325644Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:5:2052], cookie# 0 2025-06-24T21:13:22.325684Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.325744Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-24T21:13:22.325777Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:32:2078] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:13:22.325839Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.325884Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:32:2078], cookie# 0 2025-06-24T21:13:22.325913Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:32:2078], cookie# 0 2025-06-24T21:13:22.325948Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:33:2079] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:11:2058] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.326036Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-24T21:13:22.326067Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.326096Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:33:2079], cookie# 0 2025-06-24T21:13:22.326138Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:33:2079], cookie# 0 2025-06-24T21:13:22.326160Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:34:2080] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:14:2061] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.326214Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:34:2080], cookie# 0 2025-06-24T21:13:22.326229Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:34:2080], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.326264Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-24T21:13:22.326296Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.326334Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:35:2081], cookie# 0 2025-06-24T21:13:22.326360Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:35:2081], cookie# 0 2025-06-24T21:13:22.326383Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:35:2081] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:17:2064] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.326428Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-24T21:13:22.326467Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.326498Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 0 2025-06-24T21:13:22.326511Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:30:2076], cookie# 0 2025-06-24T21:13:22.326530Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:30:2076] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:2:2049] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-24T21:13:22.326581Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-24T21:13:22.326617Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-24T21:13:22.326660Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:31:2077], cookie# 0 2025-06-24T21:13:22.326677Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:31:2077], cookie# 0 2025-06-24T21:13:22.326706Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:31:2077] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:5:2052] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 ... waiting for updates from replica populators (done) populatorToReplicaMap: populator: [1:33:2079], replica: [1:3298559222387:0] populator: [1:30:2076], replica: [1:24339059:0] populator: [1:31:2077], replica: [1:1099535966835:0] populator: [1:34:2080], replica: [1:4398070850163:0] populator: [1:35:2081], replica: [1:5497582477939:0] populator: [1:32:2078], replica: [1:2199047594611:0] 2025-06-24T21:13:22.326866Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:32:2078], cookie# 12345 2025-06-24T21:13:22.326905Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:33:2079], cookie# 12345 2025-06-24T21:13:22.326935Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:34:2080], cookie# 12345 2025-06-24T21:15:30.196150Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 12345 2025-06-24T21:15:30.196266Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:29:2075] Ack update: ack to# [1:26:2073], cookie# 12345, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], version# 1 >> TColumnShardTestReadWrite::WriteStandalone |95.9%| [TA] $(B)/ydb/core/tx/scheme_board/ut_populator/test-results/unittest/{meta.json ... results_accumulator.log} |95.9%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_populator/test-results/unittest/{meta.json ... results_accumulator.log} >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64_Reboot |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::WriteTable >> LocalTableWriter::WaitTxIds >> LocalTableWriter::DataAlongWithHeartbeat >> LocalTableWriter::ConsistentWrite >> TColumnShardTestReadWrite::WriteRead [GOOD] >> TColumnShardTestReadWrite::WriteStandalone [GOOD] >> TColumnShardTestReadWrite::RebootWriteReadStandalone [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteRead [GOOD] Test command err: 2025-06-24T21:15:31.203754Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:31.233496Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:31.233858Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:31.242261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:31.242507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:31.242761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:31.242909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:31.243049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:31.243202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:31.243355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:31.243468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:31.243575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:31.243715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:31.243867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:31.275919Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:31.276264Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:31.276336Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:31.276551Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:31.276788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:31.276890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:31.276949Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:31.277084Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:31.277173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:31.277229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:31.277266Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:31.277472Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:31.277545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:31.277608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:31.277642Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:31.277767Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:31.277840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:31.277888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:31.277995Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:31.278074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:31.278131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:31.278163Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:31.278409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:31.278461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:31.278503Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:31.278765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:31.278840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:31.278876Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:31.279048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:31.279123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:31.279192Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:31.279281Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:31.279391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:31.279461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:31.279502Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:31.280005Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=58; 2025-06-24T21:15:31.280106Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=47; 2025-06-24T21:15:31.280253Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=55; 2025-06-24T21:15:31.280375Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=69; 2025-06-24T21:15:31.280476Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:31.280588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:31.280645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:31.280699Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.131537Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:15:36.131687Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-24T21:15:36.131787Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-24T21:15:36.132137Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:398:2409];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-24T21:15:36.132334Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.132481Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.132617Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.132873Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:36.133042Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.133187Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.133237Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:399:2410] finished for tablet 9437184 2025-06-24T21:15:36.133770Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:398:2409];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.023},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.025}],"full":{"a":1750799736107336,"name":"_full_task","f":1750799736107336,"d_finished":0,"c":0,"l":1750799736133295,"d":25959},"events":[{"name":"bootstrap","f":1750799736107571,"d_finished":3884,"c":1,"l":1750799736111455,"d":3884},{"a":1750799736132848,"name":"ack","f":1750799736131191,"d_finished":1459,"c":1,"l":1750799736132650,"d":1906},{"a":1750799736132829,"name":"processing","f":1750799736112946,"d_finished":15330,"c":10,"l":1750799736132652,"d":15796},{"name":"ProduceResults","f":1750799736109838,"d_finished":3790,"c":13,"l":1750799736133219,"d":3790},{"a":1750799736133223,"name":"Finish","f":1750799736133223,"d_finished":0,"c":0,"l":1750799736133295,"d":72},{"name":"task_result","f":1750799736112970,"d_finished":13701,"c":9,"l":1750799736130954,"d":13701}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.133852Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:398:2409];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:36.134423Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:398:2409];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.023},{"events":["l_ProduceResults","f_Finish"],"t":0.025},{"events":["l_ack","l_processing","l_Finish"],"t":0.026}],"full":{"a":1750799736107336,"name":"_full_task","f":1750799736107336,"d_finished":0,"c":0,"l":1750799736133904,"d":26568},"events":[{"name":"bootstrap","f":1750799736107571,"d_finished":3884,"c":1,"l":1750799736111455,"d":3884},{"a":1750799736132848,"name":"ack","f":1750799736131191,"d_finished":1459,"c":1,"l":1750799736132650,"d":2515},{"a":1750799736132829,"name":"processing","f":1750799736112946,"d_finished":15330,"c":10,"l":1750799736132652,"d":16405},{"name":"ProduceResults","f":1750799736109838,"d_finished":3790,"c":13,"l":1750799736133219,"d":3790},{"a":1750799736133223,"name":"Finish","f":1750799736133223,"d_finished":0,"c":0,"l":1750799736133904,"d":681},{"name":"task_result","f":1750799736112970,"d_finished":13701,"c":9,"l":1750799736130954,"d":13701}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.134522Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:36.106593Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7600;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7600;selected_rows=0; 2025-06-24T21:15:36.134574Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:36.134973Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:399:2410];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteStandalone [GOOD] Test command err: 2025-06-24T21:15:31.443514Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:31.473791Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:31.474142Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:31.482496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:31.482755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:31.483030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:31.483184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:31.483358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:31.483504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:31.483659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:31.483783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:31.483893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:31.484020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:31.484145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:31.516834Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:31.517136Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:31.517214Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:31.517425Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:31.517600Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:31.517681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:31.517731Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:31.517859Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:31.517951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:31.518004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:31.518045Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:31.518253Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:31.518327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:31.518373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:31.518414Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:31.518547Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:31.518613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:31.518659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:31.518711Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:31.518772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:31.518834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:31.518876Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:31.519119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:31.519194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:31.519260Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:31.519487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:31.519547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:31.519582Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:31.519739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:31.519792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:31.519842Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:31.519955Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:31.520036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:31.520085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:31.520122Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:31.520694Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=58; 2025-06-24T21:15:31.520807Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=44; 2025-06-24T21:15:31.520897Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=41; 2025-06-24T21:15:31.521003Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=52; 2025-06-24T21:15:31.521116Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:31.521262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:31.521325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:31.521385Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... [{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"19,19,19,19,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"20,20,20,20,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::RebootWriteReadStandalone [GOOD] Test command err: 2025-06-24T21:15:29.346678Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:29.373972Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:29.374274Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:29.381569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:29.381785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:29.382030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:29.382136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:29.382280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:29.382391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:29.382500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:29.382607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:29.382702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:29.382830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:29.382956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:29.412356Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:29.412576Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:29.412627Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:29.412817Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:29.412974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:29.413037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:29.413077Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:29.413187Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:29.413253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:29.413293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:29.413329Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:29.413484Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:29.413539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:29.413576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:29.413606Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:29.413712Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:29.413768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:29.413838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:29.413881Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:29.413930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:29.413964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:29.413992Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:29.414189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:29.414229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:29.414274Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:29.414452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:29.414513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:29.414547Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:29.414674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:29.414721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:29.414752Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:29.414817Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:29.414876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:29.414911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:29.414937Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:29.415383Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=55; 2025-06-24T21:15:29.415469Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=33; 2025-06-24T21:15:29.415537Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=29; 2025-06-24T21:15:29.415609Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=35; 2025-06-24T21:15:29.415704Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:29.415789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:29.415841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:29.415915Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.274065Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:15:36.274242Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-24T21:15:36.274353Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-24T21:15:36.274739Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1000:2855];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-24T21:15:36.274937Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.275090Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.275266Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.275560Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:15:36.275735Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.275905Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.275958Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1001:2856] finished for tablet 9437184 2025-06-24T21:15:36.276590Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1000:2855];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.006},{"events":["f_ack","l_task_result"],"t":0.017},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.019}],"full":{"a":1750799736256118,"name":"_full_task","f":1750799736256118,"d_finished":0,"c":0,"l":1750799736276032,"d":19914},"events":[{"name":"bootstrap","f":1750799736256417,"d_finished":4292,"c":1,"l":1750799736260709,"d":4292},{"a":1750799736275533,"name":"ack","f":1750799736273667,"d_finished":1638,"c":1,"l":1750799736275305,"d":2137},{"a":1750799736275507,"name":"processing","f":1750799736262368,"d_finished":8048,"c":10,"l":1750799736275311,"d":8573},{"name":"ProduceResults","f":1750799736258876,"d_finished":4192,"c":13,"l":1750799736275938,"d":4192},{"a":1750799736275943,"name":"Finish","f":1750799736275943,"d_finished":0,"c":0,"l":1750799736276032,"d":89},{"name":"task_result","f":1750799736262395,"d_finished":6231,"c":9,"l":1750799736273412,"d":6231}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.276700Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1000:2855];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:15:36.277269Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1000:2855];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.006},{"events":["f_ack","l_task_result"],"t":0.017},{"events":["l_ProduceResults","f_Finish"],"t":0.019},{"events":["l_ack","l_processing","l_Finish"],"t":0.02}],"full":{"a":1750799736256118,"name":"_full_task","f":1750799736256118,"d_finished":0,"c":0,"l":1750799736276759,"d":20641},"events":[{"name":"bootstrap","f":1750799736256417,"d_finished":4292,"c":1,"l":1750799736260709,"d":4292},{"a":1750799736275533,"name":"ack","f":1750799736273667,"d_finished":1638,"c":1,"l":1750799736275305,"d":2864},{"a":1750799736275507,"name":"processing","f":1750799736262368,"d_finished":8048,"c":10,"l":1750799736275311,"d":9300},{"name":"ProduceResults","f":1750799736258876,"d_finished":4192,"c":13,"l":1750799736275938,"d":4192},{"a":1750799736275943,"name":"Finish","f":1750799736275943,"d_finished":0,"c":0,"l":1750799736276759,"d":816},{"name":"task_result","f":1750799736262395,"d_finished":6231,"c":9,"l":1750799736273412,"d":6231}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-24T21:15:36.277383Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:15:36.255323Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7600;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7600;selected_rows=0; 2025-06-24T21:15:36.277440Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:15:36.277880Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1001:2856];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> LocalTableWriter::DecimalKeys >> LocalTableWriter::SupportedTypes >> LocalTableWriter::DataAlongWithHeartbeat [GOOD] >> LocalTableWriter::WaitTxIds [GOOD] >> LocalTableWriter::WriteTable [GOOD] >> LocalTableWriter::ConsistentWrite [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DataAlongWithHeartbeat [GOOD] Test command err: 2025-06-24T21:15:36.569667Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627610121619800:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:36.569730Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0020fd/r3tmp/tmpUnuwlP/pdisk_1.dat 2025-06-24T21:15:36.974239Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627610121619766:2079] 1750799736563746 != 1750799736563749 2025-06-24T21:15:36.996801Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:37.046332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:37.046460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:37.048352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15001 TServer::EnableGrpc on GrpcPort 18784, node 1 2025-06-24T21:15:37.355640Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:37.355669Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:37.355675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:37.355808Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:15:37.598069Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15001 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:37.971703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:38.012748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799738142 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-24T21:15:38.182713Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618711555044:2353] Handshake: worker# [1:7519627618711555045:2354] 2025-06-24T21:15:38.183238Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618711555044:2353] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:15:38.183683Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618711555044:2353] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:15:38.183747Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618711555044:2353] Send handshake: worker# [1:7519627618711555045:2354] 2025-06-24T21:15:38.184170Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618711555044:2353] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 19b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T21:15:38.190928Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618711555044:2353] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-24T21:15:38.191139Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618711555044:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-24T21:15:38.191371Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627618711555048:2353] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T21:15:38.191412Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618711555044:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:38.191543Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627618711555048:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-24T21:15:38.193657Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627618711555048:2353] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:15:38.193748Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618711555044:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:38.193832Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618711555044:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::WriteTable [GOOD] Test command err: 2025-06-24T21:15:36.569640Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627611523869389:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:36.569721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002178/r3tmp/tmpuxYG1H/pdisk_1.dat 2025-06-24T21:15:37.000963Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627611523869355:2079] 1750799736563750 != 1750799736563753 2025-06-24T21:15:37.015668Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:37.022079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:37.022316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:37.044611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61891 TServer::EnableGrpc on GrpcPort 24038, node 1 2025-06-24T21:15:37.352784Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:37.352811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:37.352830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:37.352987Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:15:37.583351Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61891 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:37.995108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:38.014833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799738142 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-24T21:15:38.175351Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627620113804635:2352] Handshake: worker# [1:7519627620113804542:2291] 2025-06-24T21:15:38.175792Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627620113804635:2352] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:15:38.176089Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627620113804635:2352] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:15:38.176130Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627620113804635:2352] Send handshake: worker# [1:7519627620113804542:2291] 2025-06-24T21:15:38.180224Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627620113804635:2352] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 36b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 36b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T21:15:38.180452Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627620113804635:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 36 },{ Order: 2 BodySize: 36 },{ Order: 3 BodySize: 36 }] } 2025-06-24T21:15:38.183512Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627620113804638:2352] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T21:15:38.183589Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627620113804635:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:38.183708Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627620113804638:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-24T21:15:38.188702Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627620113804638:2352] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:15:38.188795Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627620113804635:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:38.188851Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627620113804635:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::ConsistentWrite [GOOD] Test command err: 2025-06-24T21:15:36.569592Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627610065337934:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:36.569677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0021d2/r3tmp/tmpc2w1RK/pdisk_1.dat 2025-06-24T21:15:36.989985Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627610065337900:2079] 1750799736563787 != 1750799736563790 2025-06-24T21:15:37.001060Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:37.045575Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:37.045688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:37.048002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14924 TServer::EnableGrpc on GrpcPort 13198, node 1 2025-06-24T21:15:37.355626Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:37.355651Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:37.355658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:37.355778Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:15:37.610083Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14924 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:38.041287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:38.085288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799738198 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-24T21:15:38.265324Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handshake: worker# [1:7519627618655273085:2291] 2025-06-24T21:15:38.265798Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:15:38.266275Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:15:38.266342Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Send handshake: worker# [1:7519627618655273085:2291] 2025-06-24T21:15:38.266803Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T21:15:38.271034Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-24T21:15:38.271416Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 },{ Order: 2 BodySize: 48 },{ Order: 3 BodySize: 48 }] } 2025-06-24T21:15:38.271622Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627618655273180:2351] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T21:15:38.271689Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:38.271794Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627618655273180:2351] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 2 Group: 0 Step: 2 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 3 Group: 0 Step: 3 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-24T21:15:38.274417Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627618655273180:2351] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:15:38.274505Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:38.274581Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } 2025-06-24T21:15:38.275486Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 19b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T21:15:38.276232Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 5 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 6 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 7 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 8 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T21:15:38.277068Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } VersionTxIds { Version { Step: 30 TxId: 0 } TxId: 3 } 2025-06-24T21:15:38.277253Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 5 BodySize: 49 },{ Order: 6 BodySize: 49 },{ Order: 7 BodySize: 49 },{ Order: 8 BodySize: 49 }] } 2025-06-24T21:15:38.277499Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627618655273180:2351] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 5 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 6 Group: 0 Step: 12 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 7 Group: 0 Step: 21 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 8 Group: 0 Step: 22 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-24T21:15:38.281915Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627618655273180:2351] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:15:38.282032Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:38.282115Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [5,6,7,8] } 2025-06-24T21:15:38.282780Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 9 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 10 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T21:15:38.282942Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 9 BodySize: 49 },{ Order: 10 BodySize: 49 }] } 2025-06-24T21:15:38.283116Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627618655273180:2351] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 9 Group: 0 Step: 13 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 10 Group: 0 Step: 23 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-24T21:15:38.285172Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627618655273180:2351] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:15:38.285265Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:38.285318Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [9,10] } 2025-06-24T21:15:38.285868Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627618655273177:2351] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 19b Offset: 11 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::WaitTxIds [GOOD] Test command err: 2025-06-24T21:15:36.569735Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627610472181484:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:36.569840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0020d3/r3tmp/tmpmaJhLy/pdisk_1.dat 2025-06-24T21:15:37.050646Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:37.058196Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627610472181462:2079] 1750799736567787 != 1750799736567790 2025-06-24T21:15:37.067695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:37.067798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:37.069800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20623 TServer::EnableGrpc on GrpcPort 14277, node 1 2025-06-24T21:15:37.355484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:37.355517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:37.355573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:37.355755Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:15:37.595247Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20623 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:37.973556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:38.012992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:15:38.024327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799738135 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-24T21:15:38.185215Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handshake: worker# [1:7519627619062116743:2353] 2025-06-24T21:15:38.185556Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:15:38.185874Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:15:38.185929Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Send handshake: worker# [1:7519627619062116743:2353] 2025-06-24T21:15:38.186367Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T21:15:38.194978Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-24T21:15:38.195180Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-24T21:15:38.195374Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627619062116746:2352] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T21:15:38.195412Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:38.195512Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627619062116746:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-24T21:15:38.199127Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627619062116746:2352] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:15:38.199220Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:38.199277Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-24T21:15:39.187586Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } 2025-06-24T21:15:39.187703Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 49 }] } 2025-06-24T21:15:39.187830Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627619062116746:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-24T21:15:39.193351Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627619062116746:2352] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:15:39.193417Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:39.193451Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627619062116742:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2] } >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions [GOOD] >> PersQueueSdkReadSessionTest::SettingsValidation >> LocalTableWriter::DecimalKeys [GOOD] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery [GOOD] >> BasicUsage::TWriteSession_WriteEncoded >> TCdcStreamTests::VirtualTimestamps ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DecimalKeys [GOOD] Test command err: 2025-06-24T21:15:38.108485Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627617651310746:2191];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:38.108574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002089/r3tmp/tmp12guLL/pdisk_1.dat 2025-06-24T21:15:38.462011Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:38.468629Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627617651310572:2079] 1750799738090265 != 1750799738090268 2025-06-24T21:15:38.547572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:38.547724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:38.556815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3997 TServer::EnableGrpc on GrpcPort 4696, node 1 2025-06-24T21:15:38.820003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:38.820049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:38.820058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:38.820248Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3997 2025-06-24T21:15:39.108627Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:39.238487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:39.264180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:15:39.270021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799739388 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Decimal(1,0)" TypeId: 4865 Id: 1 NotNull: false TypeInfo { DecimalPrecision: 1 DecimalScale: 0 } IsBuildInProgress: false } Columns { Name: "value" Type: "Decimal(35,10)" TypeId: 4865 I... (TRUNCATED) 2025-06-24T21:15:39.423442Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627621946278555:2352] Handshake: worker# [1:7519627621946278462:2291] 2025-06-24T21:15:39.423883Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627621946278555:2352] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:15:39.424302Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627621946278555:2352] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Decimal(1,0) : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:15:39.424359Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627621946278555:2352] Send handshake: worker# [1:7519627621946278462:2291] 2025-06-24T21:15:39.424795Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627621946278555:2352] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 57b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T21:15:39.425011Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627621946278555:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 57 },{ Order: 2 BodySize: 57 },{ Order: 3 BodySize: 57 }] } 2025-06-24T21:15:39.425178Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627621946278558:2352] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T21:15:39.425218Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627621946278555:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:39.425297Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627621946278558:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b }] } 2025-06-24T21:15:39.432577Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627621946278558:2352] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:15:39.432666Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627621946278555:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:39.432712Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627621946278555:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } >> TCdcStreamTests::Basic >> CommitOffset::Commit_WithSession_ToPastParentPartition [GOOD] >> LocalTableWriter::SupportedTypes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter [GOOD] Test command err: 2025-06-24T21:12:43.848307Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626868220024101:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:43.848364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003127/r3tmp/tmpvhUsD0/pdisk_1.dat 2025-06-24T21:12:44.182381Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:12:44.406570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:44.406708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:44.409176Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:44.445009Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:44.448643Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626868220024074:2079] 1750799563847197 != 1750799563847200 TServer::EnableGrpc on GrpcPort 8663, node 1 2025-06-24T21:12:44.543844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003127/r3tmp/yandexzCNuzg.tmp 2025-06-24T21:12:44.543894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003127/r3tmp/yandexzCNuzg.tmp 2025-06-24T21:12:44.544145Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003127/r3tmp/yandexzCNuzg.tmp 2025-06-24T21:12:44.544276Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:44.638035Z INFO: TTestServer started on Port 24901 GrpcPort 8663 TClient is connected to server localhost:24901 2025-06-24T21:12:44.872272Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; PQClient connected to localhost:8663 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:45.006805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:12:45.071088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:12:45.297343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:12:47.306928Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626885399894054:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.307098Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.311302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626885399894067:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.339708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:47.359333Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626885399894069:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T21:12:47.425065Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626885399894133:2444] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:47.793564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.802058Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626885399894141:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:12:47.802928Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YzQ3M2I5YzgtY2RmYjc5ZGEtZDYyYzAzNTctNzA0MWQ3NzE=, ActorId: [1:7519626885399894050:2297], ActorState: ExecuteState, TraceId: 01jyhwjje88hk88ddztymjgfyg, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:12:47.806029Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:12:47.874881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:48.001608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519626889694861726:2620] 2025-06-24T21:12:48.851358Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626868220024101:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:48.851491Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:12:54.247506Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T21:12:54.279579Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T21:12:54.282799Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519626915464665698:2697], Recipient [1:7519626872514991690:2140]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.282831Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.282843Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:12:54.282890Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519626915464665694:2694], Recipient [1:7519626872514991690:2140]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-24T21:12:54.282908Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:12:54.357951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfile ... E TRACE: partition.h:607: StateIdle, processing event TEvPQ::TEvPipeDisconnected 2025-06-24T21:15:41.033984Z node 7 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:15:41.034033Z node 7 :PERSQUEUE TRACE: partition_write.cpp:910: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessChangeOwnerRequests. 2025-06-24T21:15:41.034085Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.034164Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.034200Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.034243Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.034322Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188506 (NKikimr::TEvPQ::TEvPipeDisconnected), Tablet [7:7519627538828251620:2414], Partition 0, Sender [7:7519627538828251620:2414], Recipient [7:7519627538828251680:2418], Cookie: 0 2025-06-24T21:15:41.034392Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188506, Sender [7:7519627538828251620:2414], Recipient [7:7519627538828251680:2418]: NKikimr::TEvPQ::TEvPipeDisconnected 2025-06-24T21:15:41.034420Z node 7 :PERSQUEUE TRACE: partition.h:607: StateIdle, processing event TEvPQ::TEvPipeDisconnected 2025-06-24T21:15:41.034456Z node 7 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:15:41.034498Z node 7 :PERSQUEUE TRACE: partition_write.cpp:910: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessChangeOwnerRequests. 2025-06-24T21:15:41.034534Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.034604Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.034636Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.034671Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.035444Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 21 DataSize: 0 UsedReserveSize: 0 2025-06-24T21:15:41.035587Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 0 2025-06-24T21:15:41.035947Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271188001, Sender [7:7519627538828251621:2415], Recipient [7:7519627491583610314:2142]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 21 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-06-24T21:15:41.035984Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4989: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-24T21:15:41.036006Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-06-24T21:15:41.036037Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099994s, queue# 1 2025-06-24T21:15:41.117125Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627611842697581:2930], Partition 3, Sender [0:0:0], Recipient [7:7519627611842697674:2938], Cookie: 0 2025-06-24T21:15:41.117202Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627611842697674:2938]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.117231Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.117274Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.117357Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.117396Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.117436Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.117481Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627590367860566:2791], Partition 2, Sender [0:0:0], Recipient [7:7519627590367860642:2798], Cookie: 0 2025-06-24T21:15:41.117514Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627590367860642:2798]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.117533Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.117554Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.117581Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.117594Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.117608Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.117643Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627611842697586:2931], Partition 4, Sender [0:0:0], Recipient [7:7519627611842697675:2939], Cookie: 0 2025-06-24T21:15:41.117676Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627611842697675:2939]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.117690Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.117710Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.117736Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.117751Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.117767Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.117805Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627538828251620:2414], Partition 0, Sender [0:0:0], Recipient [7:7519627538828251680:2418], Cookie: 0 2025-06-24T21:15:41.117844Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627538828251680:2418]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.117856Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.117878Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.117908Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.117927Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.117944Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.117983Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627590367860570:2792], Partition 1, Sender [0:0:0], Recipient [7:7519627590367860645:2800], Cookie: 0 2025-06-24T21:15:41.118014Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627590367860645:2800]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.118031Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.118053Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.118080Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.118093Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.118111Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.136448Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [7:7519627491583610314:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-24T21:15:41.136509Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5128: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-24T21:15:41.136526Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-24T21:15:41.136541Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-24T21:15:41.136599Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.000000s, queue# 1 2025-06-24T21:15:41.136916Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [7:7519627491583610314:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-24T21:15:41.136941Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5128: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-24T21:15:41.136954Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> KqpQuery::QueryCacheTtl >> KqpExplain::Explain >> KqpQuery::QueryClientTimeout >> KqpStats::DeferredEffects+UseSink >> KqpQuery::CurrentUtcTimestamp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::SupportedTypes [GOOD] Test command err: 2025-06-24T21:15:40.001560Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627628832883323:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:40.001632Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002071/r3tmp/tmpID1J42/pdisk_1.dat 2025-06-24T21:15:40.369342Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:40.413351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:40.413501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:40.415666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23859 TServer::EnableGrpc on GrpcPort 20730, node 1 2025-06-24T21:15:40.626510Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:40.626539Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:40.626567Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:40.626763Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23859 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:41.010659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:41.027001Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:15:41.037446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750799741145 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "int32_value" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "ui... (TRUNCATED) 2025-06-24T21:15:41.170932Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627633127851267:2350] Handshake: worker# [1:7519627633127851174:2289] 2025-06-24T21:15:41.171464Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627633127851267:2350] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:15:41.171843Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627633127851267:2350] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:15:41.171905Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627633127851267:2350] Send handshake: worker# [1:7519627633127851174:2289] 2025-06-24T21:15:41.175237Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627633127851267:2350] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 45b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 41b Offset: 5 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 41b Offset: 6 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 7 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 44b Offset: 8 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 66b Offset: 9 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 71b Offset: 10 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 72b Offset: 11 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 12 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 13 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 14 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 58b Offset: 15 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 16 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 54b Offset: 17 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 18 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 76b Offset: 19 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 20 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 54b Offset: 21 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 61b Offset: 22 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 23 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 24 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 46b Offset: 25 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 47b Offset: 26 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 50b Offset: 27 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 28 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 72b Offset: 29 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 30 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 64b Offset: 31 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-24T21:15:41.176254Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627633127851267:2350] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 45 },{ Order: 2 BodySize: 45 },{ Order: 3 BodySize: 45 },{ Order: 4 BodySize: 45 },{ Order: 5 BodySize: 41 },{ Order: 6 BodySize: 41 },{ Order: 7 BodySize: 45 },{ Order: 8 BodySize: 44 },{ Order: 9 BodySize: 66 },{ Order: 10 BodySize: 71 },{ Order: 11 BodySize: 72 },{ Order: 12 BodySize: 49 },{ Order: 13 BodySize: 48 },{ Order: 14 BodySize: 51 },{ Order: 15 BodySize: 58 },{ Order: 16 BodySize: 51 },{ Order: 17 BodySize: 54 },{ Order: 18 BodySize: 57 },{ Order: 19 BodySize: 76 },{ Order: 20 BodySize: 45 },{ Order: 21 BodySize: 54 },{ Order: 22 BodySize: 61 },{ Order: 23 BodySize: 51 },{ Order: 24 BodySize: 45 },{ Order: 25 BodySize: 46 },{ Order: 26 BodySize: 47 },{ Order: 27 BodySize: 50 },{ Order: 28 BodySize: 49 },{ Order: 29 BodySize: 72 },{ Order: 30 BodySize: 57 },{ Order: 31 BodySize: 64 }] } 2025-06-24T21:15:41.176798Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627633127851270:2350] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T21:15:41.176874Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627633127851267:2350] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:41.177212Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627633127851270:2350] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 4 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 5 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 41b },{ Order: 6 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 41b },{ Order: 7 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 8 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 44b },{ Order: 9 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 66b },{ Order: 10 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 71b },{ Order: 11 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 72b },{ Order: 12 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 13 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 14 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 15 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 58b },{ Order: 16 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 17 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 54b },{ Order: 18 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 19 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 76b },{ Order: 20 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 21 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 54b },{ Order: 22 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 61b },{ Order: 23 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 24 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 25 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 46b },{ Order: 26 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 47b },{ Order: 27 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 50b },{ Order: 28 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 29 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 72b },{ Order: 30 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 31 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 64b }] } 2025-06-24T21:15:41.250632Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519627633127851270:2350] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:15:41.250714Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627633127851267:2350] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-24T21:15:41.250809Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519627633127851267:2350] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31] } >> KqpLimits::DatashardProgramSize+useSink >> TCdcStreamTests::VirtualTimestamps [GOOD] >> TCdcStreamTests::ResolvedTimestamps >> TCdcStreamTests::Basic [GOOD] >> TCdcStreamTests::Attributes |95.9%| [TA] $(B)/ydb/core/tx/replication/service/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} |95.9%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> CommitOffset::Commit_WithSession_ToPastParentPartition [GOOD] Test command err: 2025-06-24T21:12:43.596086Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626866417578490:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:43.609511Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003147/r3tmp/tmpGoOmSM/pdisk_1.dat 2025-06-24T21:12:43.851435Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:12:44.111957Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:44.120518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:44.120608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:44.183403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14914, node 1 2025-06-24T21:12:44.389904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003147/r3tmp/yandexBI4dS6.tmp 2025-06-24T21:12:44.389929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003147/r3tmp/yandexBI4dS6.tmp 2025-06-24T21:12:44.395274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003147/r3tmp/yandexBI4dS6.tmp 2025-06-24T21:12:44.395444Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:44.580500Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:44.637946Z INFO: TTestServer started on Port 30179 GrpcPort 14914 TClient is connected to server localhost:30179 PQClient connected to localhost:14914 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:45.096467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:45.113582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:12:45.129263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:12:45.135327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:12:45.282784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-24T21:12:47.050120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626883597448335:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.050886Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626883597448323:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.051013Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:47.055400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:12:47.078240Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626883597448338:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T21:12:47.318186Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626883597448402:2443] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:12:47.472570Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519626883597448417:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:12:47.480426Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NWIzNGIyNGQtOTA5ZTY4MmMtNWUxNTFjNDctMTIwOTQ4OGY=, ActorId: [1:7519626883597448321:2297], ActorState: ExecuteState, TraceId: 01jyhwjj5z616nj69tcahjn77b, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:12:47.485771Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:12:47.598141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.683648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:47.754916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519626887892416005:2622] 2025-06-24T21:12:48.580876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626866417578490:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:48.580946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:12:54.239730Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T21:12:54.261337Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-24T21:12:54.262534Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519626913662219977:2699], Recipient [1:7519626866417578672:2145]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.262565Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:12:54.262579Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:12:54.262617Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519626913662219973:2696], Recipient [1:7519626866417578672:2145]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-24T21:12:54.262634Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:12:54.347640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSecond ... teIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627617273130436:2864], Partition 4, Sender [0:0:0], Recipient [7:7519627617273130515:2870], Cookie: 0 2025-06-24T21:15:41.844680Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627617273130515:2870]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.844712Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.844763Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.844837Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.844865Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.844891Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.857068Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627617273130435:2863], Partition 3, Sender [0:0:0], Recipient [7:7519627617273130527:2873], Cookie: 0 2025-06-24T21:15:41.857178Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627617273130527:2873]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.857225Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.857287Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.857393Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.857447Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.857494Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.869809Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627600093260771:2725], Partition 1, Sender [0:0:0], Recipient [7:7519627600093260851:2734], Cookie: 0 2025-06-24T21:15:41.869915Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627600093260851:2734]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.869975Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.870037Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.870159Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.870208Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.870248Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.876494Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:7519627518488880009:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:15:41.876564Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:15:41.876615Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [7:7519627518488880009:2153], Recipient [7:7519627518488880009:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:15:41.876638Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:15:41.886642Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627600093260770:2724], Partition 2, Sender [0:0:0], Recipient [7:7519627600093260848:2732], Cookie: 0 2025-06-24T21:15:41.886742Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627600093260848:2732]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.886787Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.886847Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.886948Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.887011Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.887080Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.887277Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627565733521276:2409], Partition 0, Sender [0:0:0], Recipient [7:7519627565733521338:2413], Cookie: 0 2025-06-24T21:15:41.887381Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627565733521338:2413]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.887422Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.887480Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.887573Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.887612Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.887653Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.900496Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 270794757, Sender [0:0:0], Recipient [7:7519627565733521276:2409]: NKikimr::TEvKeyValue::TEvPeriodicRefresh 2025-06-24T21:15:41.902996Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 65538, Sender [0:0:0], Recipient [7:7519627565733521276:2409]: NActors::TEvents::TEvWakeup 2025-06-24T21:15:41.924066Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 65538 (NActors::TEvents::TEvWakeup), Tablet [7:7519627565733521276:2409], Partition 0, Sender [0:0:0], Recipient [7:7519627565733521338:2413], Cookie: 0 2025-06-24T21:15:41.924164Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 65538, Sender [0:0:0], Recipient [7:7519627565733521338:2413]: NActors::TEvents::TEvWakeup 2025-06-24T21:15:41.924430Z node 7 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=471, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:15:41.924574Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188501, Sender [7:7519627565733521338:2413], Recipient [7:7519627565733521276:2409]: NKikimr::TEvPQ::TEvPartitionCounters 2025-06-24T21:15:41.924647Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5276: HandleHook, processing event TEvPQ::TEvPartitionCounters 2025-06-24T21:15:41.924690Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1266: [PQ: 72075186224037892] Handle TEvPQ::TEvPartitionCounters PartitionId 0 2025-06-24T21:15:41.945073Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627617273130436:2864], Partition 4, Sender [0:0:0], Recipient [7:7519627617273130515:2870], Cookie: 0 2025-06-24T21:15:41.945186Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627617273130515:2870]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.945230Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.945288Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.945403Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.945447Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.945492Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.961986Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627617273130435:2863], Partition 3, Sender [0:0:0], Recipient [7:7519627617273130527:2873], Cookie: 0 2025-06-24T21:15:41.962077Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627617273130527:2873]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.962123Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:15:41.962183Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:15:41.962283Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:15:41.962326Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:15:41.962374Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:15:41.970149Z node 7 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519627600093260771:2725], Partition 1, Sender [0:0:0], Recipient [7:7519627600093260851:2734], Cookie: 0 2025-06-24T21:15:41.970261Z node 7 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519627600093260851:2734]: NKikimr::TEvPQ::TEvUpdateAvailableSize >> ColumnShardTiers::DSConfigsStub [GOOD] >> KqpQuery::RewriteIfPresentToMap >> TCdcStreamTests::Attributes [GOOD] >> TCdcStreamTests::DocApi >> TCdcStreamTests::ResolvedTimestamps [GOOD] >> TCdcStreamTests::SchemaChanges ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsStub [GOOD] Test command err: 2025-06-24T21:13:57.295852Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:13:57.296248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:13:57.296433Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004993/r3tmp/tmp1wfCen/pdisk_1.dat 2025-06-24T21:13:57.628497Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 31278, node 1 TClient is connected to server localhost:25532 2025-06-24T21:13:57.853786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:57.909427Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:57.909509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:57.909544Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:57.909898Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:57.914501Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:57.914980Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799634370125 != 1750799634370129 2025-06-24T21:13:57.961534Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:57.961683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:57.973205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:58.100486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) 2025-06-24T21:13:58.219395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:13:58.219658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:13:58.219950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:13:58.220057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:13:58.220155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:13:58.220230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:13:58.220300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:13:58.220371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:13:58.220449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:13:58.220575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:13:58.220728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:13:58.238032Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-06-24T21:13:58.240021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:13:58.240175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:13:58.240351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:13:58.240412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:13:58.240620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:13:58.240674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:13:58.240786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:13:58.240854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:13:58.240919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:13:58.240970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:13:58.241195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:13:58.241236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:13:58.241391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:13:58.241448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:13:58.241561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:13:58.241609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:13:58.241708Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:13:58.241769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:13:58.241799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:13:58.242461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:13:58.242526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:13:58.267827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:693:2572];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:13:58.267933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:693:2572];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:13:58.268184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:693:2572];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:13:58.268322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:693:2572];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register; ... ROXY ERROR: schemereq.cpp:553: Actor# [1:3530:4660] txid# 281474976715753, issues: { message: "Other entities depend on this data source, please remove them at the beginning: /Root/olapStore/olapTable" severity: 1 } REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=
: Error: Execution, code: 1060
:1:27: Error: Executing DROP OBJECT EXTERNAL_DATA_SOURCE
: Error:
: Error: Other entities depend on this data source, please remove them at the beginning: /Root/olapStore/olapTable, code: 2003 , code: 2003 ;EXPECTATION=0 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=0;WAITING=1 REQUEST=DROP TABLE `/Root/olapStore/olapTable`;EXPECTATION=1;WAITING=1 2025-06-24T21:15:20.177216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropTable, opId: 281474976715764:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp:438) 2025-06-24T21:15:21.157414Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715764;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715764; 2025-06-24T21:15:21.158153Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715764;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715764; 2025-06-24T21:15:21.158383Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715764;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715764; REQUEST=DROP TABLE `/Root/olapStore/olapTable`;RESULT=
: Info: Execution, code: 1060
:1:12: Info: Executing DROP TABLE
: Info: Success, code: 4 ;EXPECTATION=1 FINISHED_REQUEST=DROP TABLE `/Root/olapStore/olapTable`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 2025-06-24T21:15:31.653721Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:31.654495Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:31.654563Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-24T21:15:31.654630Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:31.654695Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 0 2025-06-24T21:15:31.654751Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-24T21:15:31.654827Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 0 2025-06-24T21:15:31.654899Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:31.655536Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:31.655596Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:31.655635Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:31.656434Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:31.656481Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037888;has_config=0; 2025-06-24T21:15:31.656520Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037888 2025-06-24T21:15:31.656559Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 72075186224037888 2025-06-24T21:15:31.656590Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037888 2025-06-24T21:15:31.656640Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 72075186224037888 2025-06-24T21:15:31.656694Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:31.656729Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:31.656756Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037889;has_config=0; 2025-06-24T21:15:31.656784Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037889 2025-06-24T21:15:31.656813Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 72075186224037889 2025-06-24T21:15:31.656837Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037889 2025-06-24T21:15:31.656875Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 72075186224037889 2025-06-24T21:15:31.656915Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:31.657064Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:31.657094Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037890;has_config=0; 2025-06-24T21:15:31.657127Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037890 2025-06-24T21:15:31.657158Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 72075186224037890 2025-06-24T21:15:31.657182Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037890 2025-06-24T21:15:31.657219Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 72075186224037890 2025-06-24T21:15:31.657259Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:31.657934Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-24T21:15:31.658034Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:693:2572];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-24T21:15:31.658101Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:697:2576];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 2025-06-24T21:15:42.997981Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-24T21:15:42.998103Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-24T21:15:42.998151Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-24T21:15:42.998617Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-24T21:15:42.999283Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:42.999375Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037888;has_config=0; 2025-06-24T21:15:42.999456Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037888 2025-06-24T21:15:42.999568Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:42.999643Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:42.999680Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037889;has_config=0; 2025-06-24T21:15:42.999713Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037889 2025-06-24T21:15:42.999766Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:42.999945Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:42.999991Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037890;has_config=0; 2025-06-24T21:15:43.000026Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037890 2025-06-24T21:15:43.000079Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.000430Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.000480Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-06-24T21:15:43.000516Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-24T21:15:43.000578Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.001448Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:690:2570];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-24T21:15:43.001562Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:693:2572];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-24T21:15:43.001638Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:697:2576];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 >> KqpQuery::UdfTerminate >> TCdcStreamTests::DocApi [GOOD] >> TCdcStreamTests::DocApiNegative >> KqpExplain::ExplainStream >> TCdcStreamTests::SchemaChanges [GOOD] >> TCdcStreamTests::RetentionPeriod |95.9%| [TA] $(B)/ydb/core/persqueue/ut/ut_with_sdk/test-results/unittest/{meta.json ... results_accumulator.log} >> TCdcStreamTests::DocApiNegative [GOOD] >> TCdcStreamTests::Negative >> KqpLimits::KqpMkqlMemoryLimitException >> TCdcStreamTests::RetentionPeriod [GOOD] >> TCdcStreamTests::TopicPartitions >> TCdcStreamTests::Negative [GOOD] >> TCdcStreamTests::DisableProtoSourceIdInfo |95.9%| [TA] {RESULT} $(B)/ydb/core/persqueue/ut/ut_with_sdk/test-results/unittest/{meta.json ... results_accumulator.log} >> TCdcStreamTests::DisableProtoSourceIdInfo [GOOD] >> TCdcStreamTests::CreateStream >> KqpQuery::CurrentUtcTimestamp [GOOD] >> KqpQuery::DdlInDataQuery >> TCdcStreamTests::TopicPartitions [GOOD] >> TCdcStreamTests::ReplicationAttribute >> YdbIndexTable::OnlineBuildWithDataColumn [GOOD] >> KqpExplain::Explain [GOOD] >> KqpExplain::ExplainDataQuery >> TCdcStreamTests::CreateStream [GOOD] >> TCdcStreamTests::AlterStream >> KqpStats::DeferredEffects+UseSink [GOOD] >> KqpStats::DataQueryWithEffects+UseSink >> KqpLimits::DatashardProgramSize+useSink [GOOD] >> KqpLimits::DatashardProgramSize-useSink >> KqpQuery::RewriteIfPresentToMap [GOOD] >> KqpQuery::RowsLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::OnlineBuildWithDataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 31039, MsgBus: 5533 2025-06-24T21:11:26.304360Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626536159388413:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:26.304436Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003f42/r3tmp/tmp5tn7yq/pdisk_1.dat 2025-06-24T21:11:26.688677Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519626536159388394:2079] 1750799486303354 != 1750799486303357 2025-06-24T21:11:26.696860Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:26.738029Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:26.738124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:26.739947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31039, node 1 2025-06-24T21:11:26.875914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:26.875945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:26.875953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:26.876087Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:27.314154Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5533 TClient is connected to server localhost:5533 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:27.834051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:11:27.876828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.055616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.285928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:28.384286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:11:29.780502Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626549044291909:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:29.780599Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.457534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.499027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.578741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.607867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.643426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.680125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.711596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:30.803242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626553339259870:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.803329Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.803446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626553339259875:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:11:30.806522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:11:30.815571Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519626553339259877:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:11:30.913980Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626553339259928:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:11:31.326194Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519626536159388413:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:31.326253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:11:32.121348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:11:33.053905Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { Tr ... Ctx: { TraceId: 01jyhwr1jpfjd41mm4t12cs8n6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.660062Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720639. Ctx: { TraceId: 01jyhwr1jpfjd41mm4t12cs8n6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.703007Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720640. Ctx: { TraceId: 01jyhwr1m8e5w7wds0rvsvazfb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.713279Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720641. Ctx: { TraceId: 01jyhwr1m8e5w7wds0rvsvazfb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.772996Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720642. Ctx: { TraceId: 01jyhwr1pc8r97m3axc9jageeg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.780642Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720643. Ctx: { TraceId: 01jyhwr1pc8r97m3axc9jageeg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.823237Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720644. Ctx: { TraceId: 01jyhwr1r1ex394wn8s5mgqac4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.869849Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720645. Ctx: { TraceId: 01jyhwr1r1ex394wn8s5mgqac4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.908353Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720646. Ctx: { TraceId: 01jyhwr1tpbpwcdn1zqvnzjyme, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.918112Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720647. Ctx: { TraceId: 01jyhwr1tpbpwcdn1zqvnzjyme, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.981513Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720648. Ctx: { TraceId: 01jyhwr1wq1fmdnqdbfz2gxwby, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:46.993177Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720649. Ctx: { TraceId: 01jyhwr1wq1fmdnqdbfz2gxwby, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.043830Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720650. Ctx: { TraceId: 01jyhwr1yz7dyvj9ysdp0d08bq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.050230Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720651. Ctx: { TraceId: 01jyhwr1yz7dyvj9ysdp0d08bq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.099301Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720652. Ctx: { TraceId: 01jyhwr20pdpfs121c4be9cnt8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.106692Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720653. Ctx: { TraceId: 01jyhwr20pdpfs121c4be9cnt8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.188468Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720654. Ctx: { TraceId: 01jyhwr23b222y6sg3fg3qynfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.197405Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720655. Ctx: { TraceId: 01jyhwr23b222y6sg3fg3qynfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.249498Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720656. Ctx: { TraceId: 01jyhwr25cdjm90wpsdwy7rnkz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.259251Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720657. Ctx: { TraceId: 01jyhwr25cdjm90wpsdwy7rnkz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.304003Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720658. Ctx: { TraceId: 01jyhwr27269nznpp7j0v615q2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.314410Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720659. Ctx: { TraceId: 01jyhwr27269nznpp7j0v615q2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.361800Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720660. Ctx: { TraceId: 01jyhwr28v1sa2njw7atc8csac, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.408198Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720661. Ctx: { TraceId: 01jyhwr28v1sa2njw7atc8csac, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.450385Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720662. Ctx: { TraceId: 01jyhwr2bm3heg00zc9vrm15as, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.460128Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720663. Ctx: { TraceId: 01jyhwr2bm3heg00zc9vrm15as, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.503830Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720664. Ctx: { TraceId: 01jyhwr2d90crmjkjbfkbr9sbr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.512663Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720665. Ctx: { TraceId: 01jyhwr2d90crmjkjbfkbr9sbr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.550179Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720666. Ctx: { TraceId: 01jyhwr2epa78tjd2zfw30jete, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.557891Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720667. Ctx: { TraceId: 01jyhwr2epa78tjd2zfw30jete, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.606412Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720668. Ctx: { TraceId: 01jyhwr2gcdaznne3r4rnkxy9f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.612884Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720669. Ctx: { TraceId: 01jyhwr2gcdaznne3r4rnkxy9f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQyOWYxOTQtN2EzNjcxYTQtNzY3MjM4MmYtNzkyMzIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.649018Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720670. Ctx: { TraceId: 01jyhwr2hw5kpykzstcsp1r5bx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:15:47.656666Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720671. Ctx: { TraceId: 01jyhwr2hw5kpykzstcsp1r5bx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWRmNzk3NTctOWUwNmViZGItOGMzOTc0N2UtNWFiZmVjMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS >> TCdcStreamTests::ReplicationAttribute [GOOD] >> TCdcStreamTests::StreamOnIndexTableNegative >> KqpQuery::UdfTerminate [GOOD] >> KqpQuery::UdfMemoryLimit >> TCdcStreamTests::AlterStream [GOOD] >> TCdcStreamTests::DropStream |95.9%| [TA] $(B)/ydb/core/kqp/ut/idx_test/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpExplain::ExplainStream [GOOD] >> KqpExplain::ExplainScanQueryWithParams >> TCdcStreamTests::StreamOnIndexTableNegative [GOOD] >> TCdcStreamTests::StreamOnIndexTable |95.9%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/idx_test/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQuery::QueryClientTimeout [GOOD] >> KqpQuery::QueryClientTimeoutPrecompiled >> KqpParams::ExplicitSameParameterTypesQueryCacheCheck >> KqpQuery::QueryCacheTtl [GOOD] >> KqpQuery::QueryCachePermissionsLoss >> YdbOlapStore::LogTsRangeDescending [GOOD] >> TCdcStreamTests::DropStream [GOOD] >> TCdcStreamTests::AlterStreamImplShouldFail >> TCdcStreamTests::StreamOnIndexTable [GOOD] >> TCdcStreamTests::StreamOnBuildingIndexTable >> ColumnShardTiers::DSConfigs [GOOD] >> KqpLimits::KqpMkqlMemoryLimitException [GOOD] >> KqpLimits::LargeParametersAndMkqlFailure >> ColumnShardTiers::DSConfigsWithQueryServiceDdl [GOOD] >> KqpQuery::DdlInDataQuery [GOOD] >> KqpQuery::CreateAsSelect_BadCases >> PersQueueSdkReadSessionTest::SettingsValidation [GOOD] >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds >> TCdcStreamTests::AlterStreamImplShouldFail [GOOD] >> TCdcStreamTests::DropStreamImplShouldFail ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigs [GOOD] Test command err: 2025-06-24T21:13:53.662143Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:13:53.662604Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:13:53.662785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049ae/r3tmp/tmpOjHhRz/pdisk_1.dat 2025-06-24T21:13:54.013424Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 31714, node 1 TClient is connected to server localhost:10245 2025-06-24T21:13:54.236863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:54.271606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:54.271679Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:54.271708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:54.272095Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:54.276605Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:54.277086Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799630469514 != 1750799630469518 2025-06-24T21:13:54.325120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:54.325287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:54.336897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:54.556696Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-06-24T21:14:06.631825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:707:2585], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.632034Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.637902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:06.873073Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:823:2663], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.873213Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.873534Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:828:2668], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:06.878765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:06.952538Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:830:2670], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:14:07.944447Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:924:2735] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:08.615404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:09.181594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:10.008050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:10.734121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:11.297938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:12.743083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:13.093222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-24T21:14:28.584844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 2025-06-24T21:14:30.430667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:14:30.430739Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-24T21:14:30.840910Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:215;event=skip_tier_manager_start;tier=/Root/tier1;has_secrets=1;tier_config=0; 2025-06-24T21:14:30.841001Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:196;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2025-06-24T21:14:30.841053Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={}; 2025-06-24T21:14:30.841136Z node 1 :TX_TIE ... ect_deleted;path=/Root/tier2; 2025-06-24T21:15:43.161266Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-24T21:15:43.161511Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.161573Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037892;has_config=0; 2025-06-24T21:15:43.161644Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 2025-06-24T21:15:43.161713Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037892 2025-06-24T21:15:43.161768Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-06-24T21:15:43.161851Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037892 2025-06-24T21:15:43.161942Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.161999Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.162024Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037893;has_config=0; 2025-06-24T21:15:43.162049Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 2025-06-24T21:15:43.162084Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037893 2025-06-24T21:15:43.162108Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-06-24T21:15:43.162141Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037893 2025-06-24T21:15:43.162182Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.162212Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.162231Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037894;has_config=0; 2025-06-24T21:15:43.162255Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037894 2025-06-24T21:15:43.162275Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037894 2025-06-24T21:15:43.162293Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-06-24T21:15:43.162316Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037894 2025-06-24T21:15:43.162341Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.162493Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.162514Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-06-24T21:15:43.162538Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-24T21:15:43.162560Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-06-24T21:15:43.162579Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:43.162601Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-06-24T21:15:43.162627Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.163115Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.163177Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-06-24T21:15:43.163214Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-24T21:15:43.163245Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-06-24T21:15:43.163267Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:43.163302Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-06-24T21:15:43.163339Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.163816Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2973:4242];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-24T21:15:43.163922Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2979:4244];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-24T21:15:43.163996Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:2981:4246];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 2025-06-24T21:15:54.319073Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.319196Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.319238Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.319280Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.319492Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.320274Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.320395Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.320462Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037892;has_config=0; 2025-06-24T21:15:54.320531Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-06-24T21:15:54.320639Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.320692Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.320727Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037893;has_config=0; 2025-06-24T21:15:54.320761Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-06-24T21:15:54.320807Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.320843Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.320870Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037894;has_config=0; 2025-06-24T21:15:54.320900Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-06-24T21:15:54.320948Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.321000Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.321025Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-24T21:15:54.321053Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:54.321111Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.321349Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.321381Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-24T21:15:54.321417Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:54.321465Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.322541Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.322590Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-24T21:15:54.322628Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:54.322682Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.323035Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2973:4242];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-24T21:15:54.323172Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2979:4244];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-24T21:15:54.323241Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:2981:4246];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 >> KqpLimits::DatashardProgramSize-useSink [GOOD] >> KqpLimits::DatashardReplySize >> KqpStats::DataQueryWithEffects+UseSink [GOOD] >> KqpStats::DataQueryWithEffects-UseSink >> TCdcStreamTests::StreamOnBuildingIndexTable [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanEnabled ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsWithQueryServiceDdl [GOOD] Test command err: 2025-06-24T21:13:52.671913Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:13:52.672541Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:13:52.672754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049b3/r3tmp/tmpqzTlVc/pdisk_1.dat 2025-06-24T21:13:53.196154Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 28120, node 1 TClient is connected to server localhost:1554 2025-06-24T21:13:53.848050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:53.896301Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:53.896365Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:53.896392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:53.896685Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:53.908247Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:53.909722Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799629486696 != 1750799629486700 2025-06-24T21:13:53.961626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:53.961806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:53.974681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:13:54.194994Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-06-24T21:14:05.778153Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:709:2589], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.778320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:719:2594], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.778421Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:05.788405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:05.815436Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:723:2597], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T21:14:05.888560Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:774:2629] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:06.679286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:08.155518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:08.708605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:09.676268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:10.490084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:11.032124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:12.746565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:13.090566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:14:17.543956Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwmz14evtpt8ymrxey0k1v", SessionId: ydb://session/3?node_id=1&id=ODBkYzgwMzQtY2RjMWNkZmQtNzYzMjg4YmYtYjU5YjUzM2M=, Slow query, duration: 11.772473s, status: STATUS_CODE_UNSPECIFIED, user: root@builtin, results: 0b, text: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n ", parameters: 0b REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-24T21:14:28.627934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 2025-06-24T21:14:30.534007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:14:30.534061Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-24T21:14:30.931781Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:215;event=skip_tier_manager_start;tier=/Root/tier1;has_secrets=1;tier_config=0; 2025-06-24T21:14:30.931886Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:196;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2025-06-24T21:14:30.931945Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={}; 2025-06-24T21:14:30.932033Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-06- ... ect_deleted;path=/Root/tier2; 2025-06-24T21:15:43.292457Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-24T21:15:43.292717Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.292787Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037892;has_config=0; 2025-06-24T21:15:43.292850Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 2025-06-24T21:15:43.292915Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037892 2025-06-24T21:15:43.292972Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-06-24T21:15:43.293054Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037892 2025-06-24T21:15:43.293140Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.293209Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.293240Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037893;has_config=0; 2025-06-24T21:15:43.293278Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 2025-06-24T21:15:43.293312Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037893 2025-06-24T21:15:43.293342Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-06-24T21:15:43.293384Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037893 2025-06-24T21:15:43.293427Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.293466Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.293494Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037894;has_config=0; 2025-06-24T21:15:43.293523Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037894 2025-06-24T21:15:43.293552Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037894 2025-06-24T21:15:43.293578Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-06-24T21:15:43.293614Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037894 2025-06-24T21:15:43.293655Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.293705Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.293735Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-06-24T21:15:43.293762Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-24T21:15:43.293796Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-06-24T21:15:43.293822Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:43.293856Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-06-24T21:15:43.293890Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.294977Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-24T21:15:43.295024Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-06-24T21:15:43.295060Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-24T21:15:43.295100Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-06-24T21:15:43.295130Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:43.295193Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-06-24T21:15:43.295237Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:43.295786Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2974:4246];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-24T21:15:43.295900Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2981:4248];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-24T21:15:43.295967Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:2983:4250];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 2025-06-24T21:15:54.491854Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.491930Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.491957Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.491987Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.492033Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.492296Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-24T21:15:54.492399Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.492468Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037892;has_config=0; 2025-06-24T21:15:54.492533Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-06-24T21:15:54.492628Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.492684Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.492714Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037893;has_config=0; 2025-06-24T21:15:54.492745Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-06-24T21:15:54.492790Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.492823Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.492854Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037894;has_config=0; 2025-06-24T21:15:54.492883Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-06-24T21:15:54.492924Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.492988Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.493038Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-24T21:15:54.493071Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:54.493119Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.493189Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.493217Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-24T21:15:54.493247Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:54.493284Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.493686Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-24T21:15:54.493732Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-24T21:15:54.493769Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-24T21:15:54.493816Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-24T21:15:54.493970Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2974:4246];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-24T21:15:54.494053Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2981:4248];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-24T21:15:54.494113Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:2983:4250];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogTsRangeDescending [GOOD] Test command err: 2025-06-24T21:11:27.126967Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626542717838968:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:27.127024Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00449f/r3tmp/tmpucpfNS/pdisk_1.dat 2025-06-24T21:11:27.812011Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:27.833674Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 6167, node 1 2025-06-24T21:11:27.908238Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:27.908267Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:27.908273Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:27.908372Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:27.953232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:27.953327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:27.956207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:11:28.191359Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21032 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:28.487413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:21032 2025-06-24T21:11:28.941855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "OlapStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "message" Type: "Utf8" } Columns { Name: "json_payload" Type: "JsonDocument" } Columns { Name: "resource_id" Type: "Utf8" NotNull: true } Columns { Name: "uid" Type: "Utf8" NotNull: true } Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "resource_type" Type: "Utf8" NotNull: true } Columns { Name: "level" Type: "Int32" } Columns { Name: "ingested_at" Type: "Timestamp" } Columns { Name: "saved_at" Type: "Timestamp" } Columns { Name: "request_id" Type: "Utf8" } KeyColumnNames: "timestamp" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" } } } } TxId: 281474976715658 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:11:28.942408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /Root/OlapStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-24T21:11:28.955772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: OlapStore, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T21:11:28.955842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-24T21:11:28.955859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715658:0 type: TxCreateOlapStore target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-24T21:11:28.955945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:11:28.956017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:11:28.956059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-24T21:11:28.956109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-06-24T21:11:28.956407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-06-24T21:11:28.958314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715658:0 1 -> 2 2025-06-24T21:11:28.958572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:11:28.958603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) 2025-06-24T21:11:28.958744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T21:11:28.958783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-06-24T21:11:28.968321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715658, response: Status: StatusAccepted TxId: 281474976715658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-24T21:11:28.968617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusAccepted, operation: CREATE COLUMN STORE, path: /Root/OlapStore 2025-06-24T21:11:28.968868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:11:28.968888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-24T21:11:28.969026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:11:28.969142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T21:11:28.969161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519626542717839561:2386], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 1 2025-06-24T21:11:28.969175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519626542717839561:2386], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 2 2025-06-24T21:11:28.969232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-24T21:11:28.969267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976715658:0 ProgressState, operation type: TxCreateOlapStore, at tablet# 72057594046644480 waiting... 2025-06-24T21:11:28.970038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976715658:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 1 TabletType: ColumnShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { ... : 65. Tasks execution finished, don't wait for ack delivery in input channelId: 38, seqNo: [1] 2025-06-24T21:15:53.458676Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 39, seqNo: [1] 2025-06-24T21:15:53.458693Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 40, seqNo: [1] 2025-06-24T21:15:53.458712Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 41, seqNo: [1] 2025-06-24T21:15:53.458730Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 42, seqNo: [1] 2025-06-24T21:15:53.458747Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 43, seqNo: [1] 2025-06-24T21:15:53.458763Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 44, seqNo: [1] 2025-06-24T21:15:53.458779Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 45, seqNo: [1] 2025-06-24T21:15:53.458795Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 46, seqNo: [1] 2025-06-24T21:15:53.458810Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 47, seqNo: [1] 2025-06-24T21:15:53.458827Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 48, seqNo: [1] 2025-06-24T21:15:53.458843Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 49, seqNo: [1] 2025-06-24T21:15:53.458859Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 50, seqNo: [1] 2025-06-24T21:15:53.458876Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 51, seqNo: [1] 2025-06-24T21:15:53.458892Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 52, seqNo: [1] 2025-06-24T21:15:53.458909Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 53, seqNo: [1] 2025-06-24T21:15:53.458924Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 54, seqNo: [1] 2025-06-24T21:15:53.458940Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 55, seqNo: [1] 2025-06-24T21:15:53.458956Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 56, seqNo: [1] 2025-06-24T21:15:53.458972Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 57, seqNo: [1] 2025-06-24T21:15:53.458988Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 58, seqNo: [1] 2025-06-24T21:15:53.459004Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 59, seqNo: [1] 2025-06-24T21:15:53.459020Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 60, seqNo: [1] 2025-06-24T21:15:53.459037Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 61, seqNo: [1] 2025-06-24T21:15:53.459051Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 62, seqNo: [1] 2025-06-24T21:15:53.459067Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 63, seqNo: [1] 2025-06-24T21:15:53.459083Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 64, seqNo: [1] 2025-06-24T21:15:53.459111Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710670, task: 65. Tasks execution finished 2025-06-24T21:15:53.459172Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [28:7519627684940611760:3161], TxId: 281474976710670, task: 65. Ctx: { TraceId : 01jyhwr71h2f9zazzgn2036mj1. SessionId : ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-24T21:15:53.459406Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710670, task: 65. pass away 2025-06-24T21:15:53.459620Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [28:7519627684940611665:3086] TxId: 281474976710670. Ctx: { TraceId: 01jyhwr71h2f9zazzgn2036mj1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7519627684940611760:3161], task: 65, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 10537 Tasks { TaskId: 65 StageId: 1 CpuTimeUs: 584 FinishTimeMs: 1750799753457 ComputeCpuTimeUs: 124 BuildCpuTimeUs: 460 HostName: "ghrun-4pjfmvffsq" NodeId: 28 CreateTimeMs: 1750799753279 UpdateTimeMs: 1750799753459 } MaxMemoryUsage: 1048576 } 2025-06-24T21:15:53.459664Z node 28 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710670;task_id=65;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-24T21:15:53.459713Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976710670. Ctx: { TraceId: 01jyhwr71h2f9zazzgn2036mj1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7519627684940611760:3161] 2025-06-24T21:15:53.459908Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [28:7519627684940611665:3086] TxId: 281474976710670. Ctx: { TraceId: 01jyhwr71h2f9zazzgn2036mj1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-24T21:15:53.460006Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [28:7519627684940611665:3086] TxId: 281474976710670. Ctx: { TraceId: 01jyhwr71h2f9zazzgn2036mj1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.159418s ReadRows: 0 ReadBytes: 0 ru: 106 rate limiter was not found force flag: 1 2025-06-24T21:15:53.460120Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, ActorId: [28:7519627680645644314:3086], ActorState: ExecuteState, TraceId: 01jyhwr71h2f9zazzgn2036mj1, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS RESULT: [] --------------------- STATS: total CPU: 3224 duration: 1538 usec cpu: 1538 usec duration: 275325 usec cpu: 290671 usec { name: "/Root/OlapStore/log1" } 2025-06-24T21:15:53.460628Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, ActorId: [28:7519627680645644314:3086], ActorState: ExecuteState, TraceId: 01jyhwr71h2f9zazzgn2036mj1, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 288.235 QueriesCount: 1 2025-06-24T21:15:53.460741Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, ActorId: [28:7519627680645644314:3086], ActorState: ExecuteState, TraceId: 01jyhwr71h2f9zazzgn2036mj1, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-24T21:15:53.460919Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, ActorId: [28:7519627680645644314:3086], ActorState: ExecuteState, TraceId: 01jyhwr71h2f9zazzgn2036mj1, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-24T21:15:53.460990Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, ActorId: [28:7519627680645644314:3086], ActorState: ExecuteState, TraceId: 01jyhwr71h2f9zazzgn2036mj1, EndCleanup, isFinal: 1 2025-06-24T21:15:53.461100Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, ActorId: [28:7519627680645644314:3086], ActorState: ExecuteState, TraceId: 01jyhwr71h2f9zazzgn2036mj1, Sent query response back to proxy, proxyRequestId: 5, proxyId: [28:7519627616221131017:2148] 2025-06-24T21:15:53.461159Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, ActorId: [28:7519627680645644314:3086], ActorState: unknown state, TraceId: 01jyhwr71h2f9zazzgn2036mj1, Cleanup temp tables: 0 2025-06-24T21:15:53.465326Z node 28 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799753000, txId: 18446744073709551615] shutting down 2025-06-24T21:15:53.465542Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=28&id=Nzk2NjIxNzQtY2VjYTEwZWUtMTU1NjJjMDYtOGEzYzIzNGI=, ActorId: [28:7519627680645644314:3086], ActorState: unknown state, TraceId: 01jyhwr71h2f9zazzgn2036mj1, Session actor destroyed 2025-06-24T21:15:53.475459Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[28:7519627624811066566:2286];fline=actor.cpp:33;event=skip_flush_writing; >> KqpQuery::RowsLimit [GOOD] >> KqpQuery::RowsLimitServiceOverride >> TCdcStreamTests::DropStreamImplShouldFail [GOOD] >> TCdcStreamTests::CopyTableShouldNotCopyStream >> TCdcStreamWithInitialScanTests::InitialScanEnabled [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanDisabled >> KqpExplain::ExplainScanQueryWithParams [GOOD] >> KqpExplain::FewEffects+UseSink >> KqpStats::MultiTxStatsFullYql >> KqpQuery::Now >> KqpExplain::ExplainDataQuery [GOOD] >> KqpExplain::ExplainDataQueryWithParams >> TCdcStreamWithInitialScanTests::InitialScanDisabled [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanProgress >> KqpExplain::UpdateOn+UseSink >> TCdcStreamTests::CopyTableShouldNotCopyStream [GOOD] >> TCdcStreamTests::MoveTableShouldFail >> KqpParams::ExplicitSameParameterTypesQueryCacheCheck [GOOD] >> KqpParams::ImplicitDifferentParameterTypesQueryCacheCheck >> TCdcStreamWithInitialScanTests::InitialScanProgress [GOOD] >> TCdcStreamWithInitialScanTests::WithoutPqTransactions >> TCdcStreamTests::MoveTableShouldFail [GOOD] >> TCdcStreamTests::CheckSchemeLimits >> KqpLimits::LargeParametersAndMkqlFailure [GOOD] >> KqpLimits::ManyPartitions >> KqpQuery::UdfMemoryLimit [GOOD] >> KqpQuery::TryToUpdateNonExistentColumn >> KqpQuery::QueryClientTimeoutPrecompiled [GOOD] >> KqpQuery::QueryExplain >> YdbOlapStore::LogCountByResource [GOOD] >> KqpQuery::QueryCachePermissionsLoss [GOOD] >> KqpQuery::QueryCancelWrite >> TCdcStreamWithInitialScanTests::WithoutPqTransactions [GOOD] >> TCdcStreamWithInitialScanTests::WithPqTransactions >> KqpStats::DataQueryWithEffects-UseSink [GOOD] >> KqpStats::DataQueryMulti >> BasicUsage::TWriteSession_WriteEncoded [GOOD] >> CompressExecutor::TestExecutorMemUsage >> KqpQuery::RowsLimitServiceOverride [GOOD] >> KqpQuery::SelectCountAsteriskFromVar >> TCdcStreamTests::CheckSchemeLimits [GOOD] >> TCdcStreamTests::RebootSchemeShard >> KqpStats::MultiTxStatsFullYql [GOOD] >> KqpStats::MultiTxStatsFullScan >> TCdcStreamWithInitialScanTests::WithPqTransactions [GOOD] >> TCdcStreamWithInitialScanTests::AlterStream >> KqpExplain::FewEffects+UseSink [GOOD] >> KqpExplain::FewEffects-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogCountByResource [GOOD] Test command err: 2025-06-24T21:12:10.581447Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626726737276908:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:10.589045Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004418/r3tmp/tmpXdjjO4/pdisk_1.dat 2025-06-24T21:12:11.428580Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:11.453876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:11.453991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:11.463831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2505, node 1 2025-06-24T21:12:11.587614Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:11.815055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:11.815097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:11.815105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:11.815225Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12262 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:12.202790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-1" reason: "YELLOW-e9e2-1231c6b1-2" reason: "YELLOW-e9e2-1231c6b1-3" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-1" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 1 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-2" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 2 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 1 host: "::1" port: 12001 } 2025-06-24T21:12:16.595282Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626749170016800:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:16.580034Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519626750976674087:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:16.580085Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:16.653099Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519626750922220350:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:16.653159Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:12:16.663585Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004418/r3tmp/tmpsVZfU4/pdisk_1.dat 2025-06-24T21:12:17.149381Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:17.173517Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:17.173606Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:17.174342Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:17.174399Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:17.175837Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:17.175897Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:17.185414Z node 4 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-06-24T21:12:17.185541Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:17.186649Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:12:17.197469Z node 4 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-24T21:12:17.199198Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26029, node 4 2025-06-24T21:12:17.235845Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-24T21:12:17.524763Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:17.524789Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:17.524799Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:17.524938Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:12:17.615749Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:17.636479Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:17.694911Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7630 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:17.903641Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:21.567844Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519626749170016800:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:21.567970Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:21.581748Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519626750976674087:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:21.581825Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:12:21.639262Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519626750922220350:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:21.639346Z node 6 :METADATA ... pp:472;ProcessSingleRangeResult={ Blob: DS:2181038080:[72075186224037888:1:24:26:0:4784:0] Offset: 1808 Size: 1608 }; 2025-06-24T21:16:00.451539Z node 47 :BLOB_CACHE DEBUG: log.cpp:784: fline=blob_cache.cpp:562;insert_cache={ Blob: DS:2181038080:[72075186224037888:1:24:26:0:4784:0] Offset: 1808 Size: 1608 }; 2025-06-24T21:16:00.451563Z node 47 :BLOB_CACHE DEBUG: log.cpp:784: fline=blob_cache.cpp:502;ProcessSingleRangeResult={ Blob: DS:2181038080:[72075186224037888:1:24:26:0:4784:0] Offset: 1808 Size: 1608 };send_replies=1; 2025-06-24T21:16:00.451578Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.451582Z node 47 :BLOB_CACHE DEBUG: ctor_logger.h:56: Send result: { Blob: DS:2181038080:[72075186224037888:1:24:26:0:4784:0] Offset: 1808 Size: 1608 } to: [47:7519627713437894890:3178] status: OK 2025-06-24T21:16:00.451597Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:16:00.451690Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:219;stage=no data is ready yet;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.451733Z node 47 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-06-24T21:16:00.451742Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:16:00.451841Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.451861Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:16:00.451863Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927529:3153];TabletId=72075186224037888;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:16:00.451958Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:219;stage=no data is ready yet;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.452004Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:16:00.452014Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927529:3153];TabletId=72075186224037888;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.452039Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927529:3153];TabletId=72075186224037888;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:16:00.452106Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.452127Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:16:00.452168Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927529:3153];TabletId=72075186224037888;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:219;stage=no data is ready yet;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.452222Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927529:3153];TabletId=72075186224037888;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:16:00.452224Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:219;stage=no data is ready yet;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.452268Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-24T21:16:00.452325Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927529:3153];TabletId=72075186224037888;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.452344Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927529:3153];TabletId=72075186224037888;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:16:00.452368Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.452389Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-24T21:16:00.452454Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927529:3153];TabletId=72075186224037888;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:219;stage=no data is ready yet;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.452496Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927528:3152];TabletId=72075186224037889;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;method=produce result;fline=actor.cpp:219;stage=no data is ready yet;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=3,6;column_names=resource_id,resource_type;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=3,4,5,6;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=3,6;column_names=resource_id,resource_type;);;program_input=(column_ids=3,6;column_names=resource_id,resource_type;);;;); 2025-06-24T21:16:00.452498Z node 47 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[47:7519627709142927529:3153];TabletId=72075186224037888;ScanId=40;TxId=281474976710673;ScanGen=1;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; >> KqpExplain::ExplainDataQueryWithParams [GOOD] >> KqpExplain::CreateTableAs+Stats >> KqpQuery::Now [GOOD] >> KqpQuery::GenericQueryNoRowsLimit >> KqpParams::ImplicitDifferentParameterTypesQueryCacheCheck [GOOD] >> KqpParams::DefaultParameterValue >> KqpExplain::UpdateOn+UseSink [GOOD] >> KqpExplain::UpdateOn-UseSink >> TCdcStreamTests::RebootSchemeShard [GOOD] >> TCdcStreamTests::MeteringServerless >> KqpLimits::ManyPartitions [GOOD] >> KqpLimits::ManyPartitionsSorting >> TCdcStreamWithInitialScanTests::AlterStream [GOOD] >> TCdcStreamWithInitialScanTests::DropStream >> TColumnShardTestReadWrite::CompactionGC [GOOD] >> KqpParams::CheckQueryCacheForUnpreparedQuery >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64 [GOOD] >> TColumnShardTestReadWrite::CompactionGCFailingBs [GOOD] >> KqpQuery::TryToUpdateNonExistentColumn [GOOD] >> KqpQuery::UpdateThenDelete+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionGC [GOOD] Test command err: 2025-06-24T21:14:44.479526Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:44.531321Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:44.531664Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:44.539756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:44.540029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:44.540285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:44.540429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:44.540554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:44.540663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:44.540770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:44.540893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:44.541003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:44.541134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.541247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:44.572532Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:44.572790Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:44.572858Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:44.573051Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.573221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:44.573321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:44.573367Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:44.573485Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:44.573557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:44.573613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:44.573655Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:44.573833Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.573898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:44.573940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:44.573978Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:44.574095Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:44.574156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:44.574201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:44.574247Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:44.574304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:44.574370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:44.574402Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:44.574626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:44.574671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:44.574713Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:44.574902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:44.574954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:44.574984Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:44.575124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:44.575200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.575246Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.575334Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:44.575407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:44.575446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:44.575478Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:44.575904Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=55; 2025-06-24T21:14:44.576027Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=46; 2025-06-24T21:14:44.576104Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=31; 2025-06-24T21:14:44.576202Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=53; 2025-06-24T21:14:44.576305Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:44.576993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:44.577055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:44.577107Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... =9437184;task_id=65c6d160-514011f0-b2532e77-d98ff8f5;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::CLEANUP::PORTIONS;success=1; 2025-06-24T21:15:47.628291Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=65c6d160-514011f0-b2532e77-d98ff8f5;fline=manager.cpp:15;event=unlock;process_id=CS::CLEANUP::PORTIONS::PORTIONS_DROP::65c6d160-514011f0-b2532e77-d98ff8f5; 2025-06-24T21:15:47.628444Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=65c6d160-514011f0-b2532e77-d98ff8f5;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:15:47.628546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;task_id=65c6d160-514011f0-b2532e77-d98ff8f5;tablet_id=9437184;fline=columnshard_impl.cpp:482;event=skip_compaction;reason=disabled; 2025-06-24T21:15:47.628629Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=65c6d160-514011f0-b2532e77-d98ff8f5;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T21:15:47.628739Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=65c6d160-514011f0-b2532e77-d98ff8f5;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:15:47.628822Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=65c6d160-514011f0-b2532e77-d98ff8f5;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:15:47.628881Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=65c6d160-514011f0-b2532e77-d98ff8f5;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:15:47.628983Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=65c6d160-514011f0-b2532e77-d98ff8f5;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.499500s; 2025-06-24T21:15:47.629064Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=65c6d160-514011f0-b2532e77-d98ff8f5;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:15:47.629280Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:18:2:0:6043488:0] 2025-06-24T21:15:47.629358Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:45:2:0:6171112:0] 2025-06-24T21:15:47.629400Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:46:3:0:6043488:0] 2025-06-24T21:15:47.629468Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:38:4:0:6043488:0] 2025-06-24T21:15:47.629533Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:24:2:0:6171112:0] 2025-06-24T21:15:47.629580Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:21:2:0:6171112:0] 2025-06-24T21:15:47.629632Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:16:3:0:6171112:0] 2025-06-24T21:15:47.629686Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:8:4:0:6171112:0] 2025-06-24T21:15:47.629738Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:43:3:0:6171112:0] 2025-06-24T21:15:47.629787Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:13:3:0:6043488:0] 2025-06-24T21:15:47.629838Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:55:3:0:6171112:0] 2025-06-24T21:15:47.629884Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:36:2:0:6171112:0] 2025-06-24T21:15:47.629945Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:22:3:0:6043488:0] 2025-06-24T21:15:47.629988Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:27:2:0:6043488:0] 2025-06-24T21:15:47.630027Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:23:4:0:6043488:0] 2025-06-24T21:15:47.630067Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:51:2:0:6043488:0] 2025-06-24T21:15:47.630121Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:32:4:0:6043488:0] 2025-06-24T21:15:47.630196Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:30:2:0:6043488:0] 2025-06-24T21:15:47.630256Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:28:3:0:6043488:0] 2025-06-24T21:15:47.630300Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:34:3:0:1792:0] 2025-06-24T21:15:47.630339Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:2:4:0:6171112:0] 2025-06-24T21:15:47.630380Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:44:4:0:6043488:0] 2025-06-24T21:15:47.630418Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:49:3:0:6043488:0] 2025-06-24T21:15:47.630462Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:48:2:0:6171112:0] 2025-06-24T21:15:47.630520Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:52:3:0:6043488:0] 2025-06-24T21:15:47.630581Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:10:3:0:6043488:0] 2025-06-24T21:15:47.630628Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:40:3:0:6171112:0] 2025-06-24T21:15:47.630669Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:41:4:0:6043488:0] 2025-06-24T21:15:47.630707Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:12:2:0:6043488:0] 2025-06-24T21:15:47.630748Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:1:3:0:6171112:0] 2025-06-24T21:15:47.630790Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:7:3:0:6043488:0] 2025-06-24T21:15:47.630834Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:9:2:0:6043488:0] 2025-06-24T21:15:47.630874Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:35:4:0:6043488:0] 2025-06-24T21:15:47.630911Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:42:2:0:6043488:0] 2025-06-24T21:15:47.630946Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:19:3:0:6171112:0] 2025-06-24T21:15:47.630984Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:54:2:0:6043488:0] 2025-06-24T21:15:47.631022Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:15:2:0:6043488:0] 2025-06-24T21:15:47.631071Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:57:2:0:6043488:0] 2025-06-24T21:15:47.631464Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:20:4:0:6043488:0] 2025-06-24T21:15:47.631512Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:29:4:0:6171112:0] 2025-06-24T21:15:47.631570Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:26:4:0:6171112:0] 2025-06-24T21:15:47.631617Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:11:4:0:6171112:0] 2025-06-24T21:15:47.631657Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:31:3:0:6171112:0] 2025-06-24T21:15:47.631695Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:14:4:0:6171112:0] 2025-06-24T21:15:47.631737Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:53:4:0:6171112:0] 2025-06-24T21:15:47.631794Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:50:4:0:6171112:0] 2025-06-24T21:15:47.631838Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:33:2:0:6043488:0] 2025-06-24T21:15:47.631889Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:56:4:0:6043488:0] 2025-06-24T21:15:47.631937Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:39:2:0:6043488:0] 2025-06-24T21:15:47.631979Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:6:2:0:6171112:0] 2025-06-24T21:15:47.632018Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:47:4:0:6043488:0] 2025-06-24T21:15:47.632064Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:37:3:0:6171112:0] 2025-06-24T21:15:47.632103Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:25:3:0:6043488:0] 2025-06-24T21:15:47.632139Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:17:4:0:6043488:0] 2025-06-24T21:15:47.632185Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:4:3:0:6171112:0] GC for channel 3 deletes blobs: WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 Compactions happened: 34 Cleanups happened: 1 Old portions: 1 2 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 58 Cleaned up portions: 1 2 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 58 >> KqpQuery::CreateAsSelect_BadCases [GOOD] >> KqpQuery::CreateAsSelectView >> KqpQuery::QueryExplain [GOOD] >> KqpQuery::QueryFromSqs >> KqpQuery::QueryCancelWrite [GOOD] >> KqpQuery::QueryCancelWriteImmediate >> TCdcStreamWithInitialScanTests::DropStream [GOOD] >> TCdcStreamWithInitialScanTests::RacyAlterStreamAndRestart ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64 [GOOD] Test command err: 2025-06-24T21:14:44.477592Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:44.505276Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:44.505601Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:44.515413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:44.515674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:44.515976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:44.516149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:44.516290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:44.516409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:44.516520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:44.516630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:44.516753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:44.516890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.516992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:44.567026Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:44.567379Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:44.567456Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:44.567655Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.567833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:44.567923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:44.567996Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:44.568139Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:44.568215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:44.568264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:44.568303Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:44.568475Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.568554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:44.568601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:44.568636Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:44.568775Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:44.568877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:44.568944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:44.569003Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:44.569087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:44.569139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:44.569169Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:44.569435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:44.569502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:44.569548Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:44.569752Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:44.569827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:44.569868Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:44.570012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:44.570069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.570104Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.570208Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:44.570287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:44.570332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:44.570366Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:44.575271Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=93; 2025-06-24T21:14:44.575403Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=48; 2025-06-24T21:14:44.575536Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=61; 2025-06-24T21:14:44.575633Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=39; 2025-06-24T21:14:44.575753Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:44.575901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:44.575979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:44.576042Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... d:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););(portion_id:218;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7503120;index_size:0;meta:(()););(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););; 2025-06-24T21:16:07.291962Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=6e6896aa-514011f0-842045e9-395ca1c5;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=6e6896aa-514011f0-842045e9-395ca1c5;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5976:7963];task_id=6e6896aa-514011f0-842045e9-395ca1c5;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=3; 2025-06-24T21:16:07.294432Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-24T21:16:07.300052Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-24T21:16:07.404876Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-24T21:16:07.405010Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7503120;count=812;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-24T21:16:08.127683Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-24T21:16:08.127837Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-24T21:16:08.127907Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7369450;count=1;packed=7504840; 2025-06-24T21:16:08.127992Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=95658;count=1749; 2025-06-24T21:16:08.128073Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=26198;data_size=26188;sum=2562418;count=1750;size_of_meta=136; 2025-06-24T21:16:08.128151Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=26270;data_size=26260;sum=2625418;count=875;size_of_portion=208; 2025-06-24T21:16:08.128916Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-24T21:16:08.292175Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 2025-06-24T21:16:08.299519Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=37548672;raw_bytes=36867050;count=5;records=375200} inactive {blob_bytes=110272840;raw_bytes=107127800;count=216;records=1200200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:08.737467Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-24T21:16:08.737532Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;fline=with_appended.cpp:65;portions=222,;task_id=6e6896aa-514011f0-842045e9-395ca1c5; 2025-06-24T21:16:08.738195Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::6e6896aa-514011f0-842045e9-395ca1c5; 2025-06-24T21:16:08.738269Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:22538992;portions_count:222;); 2025-06-24T21:16:08.738316Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:08.738406Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:16:08.738506Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799387954;tx_id=18446744073709551615;;current_snapshot_ts=1750799686024; 2025-06-24T21:16:08.738560Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:08.738617Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:08.738662Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:08.738745Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.871000s; 2025-06-24T21:16:08.738807Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=6e6896aa-514011f0-842045e9-395ca1c5;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:08.738963Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionGCFailingBs [GOOD] Test command err: 2025-06-24T21:14:44.479453Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:44.506226Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:44.506516Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:44.515350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:44.515602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:44.515868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:44.516101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:44.516296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:44.516457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:44.516604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:44.516766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:44.516898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:44.517105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.517231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:44.555002Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:44.555296Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:44.555363Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:44.555567Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.557992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:44.558098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:44.558151Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:44.558289Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:44.558424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:44.558456Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:44.558649Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558722Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:44.558773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:44.558819Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:44.558924Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:44.559043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:44.559091Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:44.559170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:44.559209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:44.559240Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:44.559520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:44.559578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:44.559645Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:44.559842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:44.559897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:44.559930Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:44.560121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:44.560175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.560267Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.560360Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:44.560435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:44.560479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:44.560509Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:44.560945Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=56; 2025-06-24T21:14:44.561034Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; 2025-06-24T21:14:44.561121Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=41; 2025-06-24T21:14:44.561369Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=197; 2025-06-24T21:14:44.561488Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:44.561614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:44.561667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:44.561719Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tab ... .cpp:784: tablet_id=9437184;task_id=665c6cde-514011f0-842aaa56-a1da7ae3;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::CLEANUP::PORTIONS;success=1; 2025-06-24T21:15:48.593148Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=665c6cde-514011f0-842aaa56-a1da7ae3;fline=manager.cpp:15;event=unlock;process_id=CS::CLEANUP::PORTIONS::PORTIONS_DROP::665c6cde-514011f0-842aaa56-a1da7ae3; 2025-06-24T21:15:48.593238Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=665c6cde-514011f0-842aaa56-a1da7ae3;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:15:48.593347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;task_id=665c6cde-514011f0-842aaa56-a1da7ae3;tablet_id=9437184;fline=columnshard_impl.cpp:482;event=skip_compaction;reason=disabled; 2025-06-24T21:15:48.593422Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=665c6cde-514011f0-842aaa56-a1da7ae3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T21:15:48.593510Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=665c6cde-514011f0-842aaa56-a1da7ae3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:15:48.593567Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=665c6cde-514011f0-842aaa56-a1da7ae3;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:15:48.593618Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=665c6cde-514011f0-842aaa56-a1da7ae3;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:15:48.593721Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=665c6cde-514011f0-842aaa56-a1da7ae3;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.501000s; 2025-06-24T21:15:48.593799Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=665c6cde-514011f0-842aaa56-a1da7ae3;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:15:48.594093Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:8:4:0:6043488:0] 2025-06-24T21:15:48.594187Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:35:4:0:6171112:0] 2025-06-24T21:15:48.594235Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:55:3:0:6043488:0] 2025-06-24T21:15:48.594284Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:39:2:0:6043488:0] 2025-06-24T21:15:48.594327Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:50:4:0:6043488:0] 2025-06-24T21:15:48.594388Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:27:2:0:6171112:0] 2025-06-24T21:15:48.594434Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:18:2:0:6043488:0] 2025-06-24T21:15:48.594476Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:36:2:0:6043488:0] 2025-06-24T21:15:48.594527Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:45:2:0:6043488:0] 2025-06-24T21:15:48.594585Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:44:4:0:6043488:0] 2025-06-24T21:15:48.594632Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:23:4:0:6043488:0] 2025-06-24T21:15:48.594672Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:11:4:0:6043488:0] 2025-06-24T21:15:48.594748Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:51:2:0:6171112:0] 2025-06-24T21:15:48.594792Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:34:3:0:6171112:0] 2025-06-24T21:15:48.594833Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:41:4:0:6171112:0] 2025-06-24T21:15:48.594875Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:53:4:0:6171112:0] 2025-06-24T21:15:48.594916Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:5:4:0:6043488:0] 2025-06-24T21:15:48.594954Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:54:2:0:6043488:0] 2025-06-24T21:15:48.594996Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:7:3:0:6043488:0] 2025-06-24T21:15:48.595034Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:20:4:0:6043488:0] 2025-06-24T21:15:48.595076Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:2:4:0:6171112:0] 2025-06-24T21:15:48.595110Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:46:3:0:6171112:0] 2025-06-24T21:15:48.595138Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:26:4:0:6043488:0] 2025-06-24T21:15:48.595202Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:24:2:0:6171112:0] 2025-06-24T21:15:48.595245Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:1:3:0:6171112:0] 2025-06-24T21:15:48.595295Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:43:3:0:6171112:0] 2025-06-24T21:15:48.595350Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:22:3:0:6171112:0] 2025-06-24T21:15:48.595390Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:30:2:0:6043488:0] 2025-06-24T21:15:48.595447Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:40:3:0:6043488:0] 2025-06-24T21:15:48.595505Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:16:3:0:6043488:0] 2025-06-24T21:15:48.595557Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:31:3:0:6043488:0] 2025-06-24T21:15:48.595605Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:49:3:0:6043488:0] 2025-06-24T21:15:48.595645Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:48:2:0:6171112:0] 2025-06-24T21:15:48.595699Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:33:2:0:6043488:0] 2025-06-24T21:15:48.595759Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:29:4:0:6171112:0] 2025-06-24T21:15:48.595810Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:2:4:0:6171112:0] 2025-06-24T21:15:48.595872Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:13:3:0:6043488:0] 2025-06-24T21:15:48.595914Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:47:4:0:6043488:0] 2025-06-24T21:15:48.595980Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:38:4:0:6171112:0] 2025-06-24T21:15:48.596027Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:28:3:0:6043488:0] 2025-06-24T21:15:48.596085Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:32:4:0:1792:0] 2025-06-24T21:15:48.596131Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:6:2:0:6171112:0] 2025-06-24T21:15:48.596177Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:15:2:0:6043488:0] 2025-06-24T21:15:48.596219Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:4:3:0:6171112:0] 2025-06-24T21:15:48.596259Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:17:4:0:6171112:0] 2025-06-24T21:15:48.596309Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:21:2:0:6043488:0] 2025-06-24T21:15:48.596366Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:37:3:0:6043488:0] 2025-06-24T21:15:48.596404Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:14:4:0:6171112:0] 2025-06-24T21:15:48.596447Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:10:3:0:6043488:0] 2025-06-24T21:15:48.596485Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:12:2:0:6171112:0] 2025-06-24T21:15:48.596523Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:19:3:0:6171112:0] 2025-06-24T21:15:48.596565Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:52:3:0:6043488:0] 2025-06-24T21:15:48.596603Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:25:3:0:6043488:0] 2025-06-24T21:15:48.596643Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:42:2:0:6043488:0] 2025-06-24T21:15:48.596691Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:9:2:0:6171112:0] GC for channel 4 deletes blobs: WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 Compactions happened: 35 Cleanups happened: 1 Old portions: 1 2 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 58 Cleaned up portions: 1 2 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 58 >> KqpQuery::YqlSyntaxV0 >> KqpStats::DataQueryMulti [GOOD] >> KqpQuery::SelectCountAsteriskFromVar [GOOD] >> KqpStats::MultiTxStatsFullScan [GOOD] >> KqpStats::OneShardLocalExec+UseSink >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] >> KqpLimits::OutOfSpaceBulkUpsertFail >> TCdcStreamWithInitialScanTests::RacyAlterStreamAndRestart [GOOD] >> TCdcStreamWithInitialScanTests::MeteringServerless >> KqpParams::MissingOptionalParameter+UseSink >> TColumnShardTestReadWrite::WriteReadDuplicate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::DataQueryMulti [GOOD] Test command err: Trying to start YDB, gRPC: 15216, MsgBus: 17309 2025-06-24T21:15:44.334730Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627644655137222:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:44.334831Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003292/r3tmp/tmptnG9Fc/pdisk_1.dat 2025-06-24T21:15:44.763334Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627644655137187:2079] 1750799744323424 != 1750799744323427 2025-06-24T21:15:44.771578Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:44.834450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:44.834619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:44.848140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15216, node 1 2025-06-24T21:15:45.020722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:45.020751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:45.020759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:45.020895Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:15:45.357382Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17309 TClient is connected to server localhost:17309 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:46.034364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:46.058051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:15:46.073958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.245119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:15:46.415893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:46.510686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:47.893400Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627657540040735:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:47.893507Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.374898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.424077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.462944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.497334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.532083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.572177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.648426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.736567Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627661835008700:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.736641Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.736685Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627661835008705:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.740578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:48.754994Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627661835008707:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:15:48.819540Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627661835008758:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:49.334774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627644655137222:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:49.334857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Warning: Type annotation, code: 1030
:3:46: Warning: At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 ... 644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:03.096304Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627706725245202:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:03.096383Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 29021, MsgBus: 16707 2025-06-24T21:16:04.649355Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627732464064734:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:04.649417Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003292/r3tmp/tmpJkYVZD/pdisk_1.dat 2025-06-24T21:16:04.818760Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:04.822501Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:04.822600Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:04.823294Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627732464064708:2079] 1750799764645123 != 1750799764645126 2025-06-24T21:16:04.826421Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29021, node 4 2025-06-24T21:16:04.881473Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:04.881496Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:04.881505Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:04.881657Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16707 TClient is connected to server localhost:16707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:05.554053Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:16:05.575190Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:05.662949Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:05.674307Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:05.877091Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:05.969036Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:08.809449Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627749643935526:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:08.809550Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:08.884180Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:08.924240Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:08.998535Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.044556Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.118885Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.200496Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.245377Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.325496Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627753938903485:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:09.325579Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:09.325731Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627753938903490:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:09.329568Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:09.345909Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627753938903492:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:09.446036Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627753938903543:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:09.649370Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627732464064734:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:09.649451Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::SelectCountAsteriskFromVar [GOOD] Test command err: Trying to start YDB, gRPC: 23371, MsgBus: 9784 2025-06-24T21:15:45.822696Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627646643812238:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:45.823544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00327b/r3tmp/tmp1zDKog/pdisk_1.dat 2025-06-24T21:15:46.281037Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:46.293517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:46.293627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:46.294820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23371, node 1 2025-06-24T21:15:46.402826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:46.402861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:46.402875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:46.402993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9784 2025-06-24T21:15:46.823317Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9784 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:47.053650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:47.085522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:47.232263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:47.414083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:47.478288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:49.277647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627663823682934:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:49.277811Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:49.661234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:49.691017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:49.723743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:49.751932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:49.782229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:49.854643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:49.929337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:49.988556Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627663823683599:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:49.988632Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:49.988812Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627663823683604:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:49.992759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:50.008961Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627663823683606:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:15:50.095428Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627668118650953:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:50.813925Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627646643812238:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:50.814014Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 16957, MsgBus: 9587 2025-06-24T21:15:52.003589Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627677231433272:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:52.003657Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00327b/r3tmp/tmpmEDwki/pdisk_1.dat 2025-06-24T21:15:52.112100Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subs ... 046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:03.723249Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627703165950896:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:03.723331Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 15374, MsgBus: 1981 2025-06-24T21:16:04.902312Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627730187794080:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:04.902390Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00327b/r3tmp/tmp9ENG3G/pdisk_1.dat 2025-06-24T21:16:05.034044Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627730187794053:2079] 1750799764901878 != 1750799764901881 2025-06-24T21:16:05.046849Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:05.049753Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:05.049857Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:05.056501Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15374, node 4 2025-06-24T21:16:05.100006Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:05.100035Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:05.100046Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:05.100194Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1981 TClient is connected to server localhost:1981 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:05.731999Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:05.755976Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:05.823277Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:05.954077Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:06.000554Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:06.091069Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:09.159286Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627751662632169:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:09.159410Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:09.192172Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.237483Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.278943Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.315409Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.352422Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.426649Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.500350Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:09.588513Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627751662632831:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:09.588636Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:09.588886Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627751662632836:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:09.593488Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:09.608034Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627751662632838:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:09.693513Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627751662632889:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:09.905382Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627730187794080:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:09.905453Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQuery::GenericQueryNoRowsLimit [GOOD] >> KqpQuery::GenericQueryNoRowsLimitLotsOfRows >> KqpParams::DefaultParameterValue [GOOD] >> KqpParams::Decimal-QueryService-UseSink >> KqpExplain::CreateTableAs+Stats [GOOD] >> KqpExplain::CreateTableAs-Stats >> KqpLimits::ManyPartitionsSorting [GOOD] >> KqpLimits::DatashardReplySize [GOOD] >> KqpLimits::DataShardReplySizeExceeded ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] Test command err: 2025-06-24T21:14:26.174970Z :TestReorderedExecutor INFO: Random seed for debugging is 1750799666174934 2025-06-24T21:14:26.675113Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627307752239564:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.675322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:26.751244Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627309249932457:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.767295Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:27.046314Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ee4/r3tmp/tmplsBCnI/pdisk_1.dat 2025-06-24T21:14:27.108599Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:14:27.554523Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:27.554606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:27.556371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:27.556446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:27.597313Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:27.607737Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:14:27.607930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:27.613363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29795, node 1 2025-06-24T21:14:27.673234Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:27.790597Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:27.967717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002ee4/r3tmp/yandex69X719.tmp 2025-06-24T21:14:27.967742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002ee4/r3tmp/yandex69X719.tmp 2025-06-24T21:14:27.967927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002ee4/r3tmp/yandex69X719.tmp 2025-06-24T21:14:27.968041Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:28.143577Z INFO: TTestServer started on Port 2698 GrpcPort 29795 TClient is connected to server localhost:2698 PQClient connected to localhost:29795 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:28.703488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:14:31.112114Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627330724769223:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:31.112807Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627330724769200:2270], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:31.112918Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:31.115428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627329227076886:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:31.115496Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627329227076894:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:31.115638Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:31.118992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:31.126118Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627329227076911:2637] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:14:31.144382Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627329227076910:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:14:31.144505Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519627330724769229:2274], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:14:31.208423Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627329227077010:2703] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:31.229913Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519627330724769257:2132] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:31.438704Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627329227077020:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:31.443479Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODNmNzQwODYtOTljODEwYWYtM2NjNWNkMzYtYzQxOTYxYzQ=, ActorId: [1:7519627329227076878:2297], ActorState: ExecuteState, TraceId: 01jyhwnqt8b510gcgefawfthmb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:14:31.445958Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:14:31.447202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:31.449373Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519627330724769272:2278], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:31.451563Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NGRkYzYwNDctOTcxMzQyMDctMmRlYTIwMDMtNmRhOTNmMmE=, ActorId: [2:7519627330724769198:2269], ActorState: Exe ... act_chooser_actor.h:142: TPartitionChooser [13:7519627753874733953:2443] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-24T21:16:10.170927Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [13:7519627753874733953:2443] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-24T21:16:10.171876Z node 13 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [13:7519627758169701304:2443] connected; active server actors: 1 2025-06-24T21:16:10.171939Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [13:7519627753874733953:2443] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-24T21:16:10.171959Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [13:7519627753874733953:2443] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-24T21:16:10.172411Z node 13 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [13:7519627758169701304:2443] disconnected; active server actors: 1 2025-06-24T21:16:10.172445Z node 13 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [13:7519627758169701304:2443] disconnected no session 2025-06-24T21:16:10.325890Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [13:7519627753874733953:2443] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-24T21:16:10.325944Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [13:7519627753874733953:2443] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-24T21:16:10.325971Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [13:7519627753874733953:2443] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-24T21:16:10.326007Z node 13 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T21:16:10.332648Z node 13 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 14, Generation: 1 2025-06-24T21:16:10.331291Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037892] server connected, pipe [13:7519627758169701329:2443], now have 1 active actors on pipe 2025-06-24T21:16:10.335170Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:16:10.335230Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:16:10.335363Z node 14 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|c6269c89-e9a25708-e36cca1a-75aa47c5_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-24T21:16:10.336959Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|c6269c89-e9a25708-e36cca1a-75aa47c5_0 2025-06-24T21:16:10.335498Z node 14 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:16:10.335586Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:16:10.336426Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:16:10.336457Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:16:10.339374Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750799770339 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:16:10.336566Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:16:10.339551Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|c6269c89-e9a25708-e36cca1a-75aa47c5_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-24T21:16:10.343496Z :INFO: [] MessageGroupId [src] SessionId [src|c6269c89-e9a25708-e36cca1a-75aa47c5_0] Write session: close. Timeout = 0 ms 2025-06-24T21:16:10.343565Z :INFO: [] MessageGroupId [src] SessionId [src|c6269c89-e9a25708-e36cca1a-75aa47c5_0] Write session will now close 2025-06-24T21:16:10.343624Z :DEBUG: [] MessageGroupId [src] SessionId [src|c6269c89-e9a25708-e36cca1a-75aa47c5_0] Write session: aborting 2025-06-24T21:16:10.344260Z :INFO: [] MessageGroupId [src] SessionId [src|c6269c89-e9a25708-e36cca1a-75aa47c5_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:16:10.344331Z :DEBUG: [] MessageGroupId [src] SessionId [src|c6269c89-e9a25708-e36cca1a-75aa47c5_0] Write session: destroy 2025-06-24T21:16:10.347126Z node 13 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|c6269c89-e9a25708-e36cca1a-75aa47c5_0 grpc read done: success: 0 data: 2025-06-24T21:16:10.347175Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|c6269c89-e9a25708-e36cca1a-75aa47c5_0 grpc read failed 2025-06-24T21:16:10.347226Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|c6269c89-e9a25708-e36cca1a-75aa47c5_0 grpc closed 2025-06-24T21:16:10.347250Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|c6269c89-e9a25708-e36cca1a-75aa47c5_0 is DEAD 2025-06-24T21:16:10.349478Z node 13 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:16:10.351586Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [13:7519627758169701329:2443] destroyed 2025-06-24T21:16:10.351656Z node 14 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:16:10.415428Z :INFO: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Starting read session 2025-06-24T21:16:10.415485Z :DEBUG: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Starting cluster discovery 2025-06-24T21:16:10.415759Z :INFO: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13752: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:13752
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:13752. " 2025-06-24T21:16:10.415809Z :DEBUG: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Restart cluster discovery in 0.009931s 2025-06-24T21:16:10.427409Z :DEBUG: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Starting cluster discovery 2025-06-24T21:16:10.427775Z :INFO: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13752: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:13752
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:13752. " 2025-06-24T21:16:10.427834Z :DEBUG: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Restart cluster discovery in 0.013047s 2025-06-24T21:16:10.443428Z :DEBUG: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Starting cluster discovery 2025-06-24T21:16:10.443651Z :INFO: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13752: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:13752
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:13752. " 2025-06-24T21:16:10.443689Z :DEBUG: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Restart cluster discovery in 0.038597s 2025-06-24T21:16:10.487349Z :DEBUG: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Starting cluster discovery 2025-06-24T21:16:10.487733Z :NOTICE: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Aborting read session. Description: SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13752: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:13752
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:13752. " } 2025-06-24T21:16:10.487971Z :NOTICE: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13752: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:13752
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:13752. " } 2025-06-24T21:16:10.488116Z :INFO: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Closing read session. Close timeout: 0.000000s 2025-06-24T21:16:10.488242Z :NOTICE: [/Root] [/Root] [cc46f419-695c1d41-1f68bb4d-a2a923d0] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:16:10.813886Z node 13 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [13:7519627758169701359:2462]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-24T21:16:10.852748Z node 13 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [13:7519627758169701359:2462]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:16:10.906643Z node 13 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [13:7519627758169701359:2462]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:16:10.982733Z node 13 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [13:7519627758169701359:2462]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:16:11.079164Z node 13 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [13:7519627758169701359:2462]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:16:11.259739Z node 13 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [13:7519627758169701359:2462]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> KqpExplain::UpdateOn-UseSink [GOOD] >> KqpExplain::UpdateOnSecondary+UseSink >> KqpExplain::FewEffects-UseSink [GOOD] >> KqpExplain::FullOuterJoin >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::ManyPartitionsSorting [GOOD] Test command err: Trying to start YDB, gRPC: 25371, MsgBus: 17407 2025-06-24T21:15:48.583718Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627662062897822:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:48.583872Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00324f/r3tmp/tmpdr9UUk/pdisk_1.dat 2025-06-24T21:15:48.866394Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:48.867857Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627662062897804:2079] 1750799748583005 != 1750799748583008 TServer::EnableGrpc on GrpcPort 25371, node 1 2025-06-24T21:15:48.943659Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:48.943677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:48.943689Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:48.943818Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:15:48.962911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:48.962988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:48.964960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17407 TClient is connected to server localhost:17407 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:49.450167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:49.469675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:49.598671Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:15:49.614321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:49.767584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:49.849801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:51.319810Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627674947801328:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:51.320010Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:51.614428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.639331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.668542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.698460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.772935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.842710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.880951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.945804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627674947801995:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:51.945947Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:51.946185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627674947802000:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:51.950416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:51.962796Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627674947802002:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:15:52.035087Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627679242769349:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:52.890240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:53.584040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627662062897822:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:53.584116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:15:55.564797Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7 ... \"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":3,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"Tables\":[\"ManyShardsTable\"],\"PlanNodeId\":1,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Key (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/ManyShardsTable\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"ManyShardsTable\",\"ReadColumns\":[\"Data\",\"Key\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Node Type\":\"Stage\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":4,\"Sum\":100,\"Max\":25,\"Min\":25},\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":326,\"Max\":91,\"Min\":75},\"ActiveMessageMs\":{\"Count\":4,\"Max\":91,\"Min\":2},\"FirstMessageMs\":{\"Count\":4,\"Sum\":16,\"Max\":6,\"Min\":2},\"Bytes\":{\"Count\":4,\"Sum\":8168,\"Max\":2075,\"Min\":2004,\"History\":[22,581,23,1848,24,2429,45,2927,47,4172,64,4753,67,5609,79,5927,81,6840,84,7255,92,8168]},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":310000,\"Max\":88000,\"Min\":69000}},\"Name\":\"4\",\"Push\":{\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":326,\"Max\":91,\"Min\":75},\"Chunks\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"ResumeMessageMs\":{\"Count\":4,\"Sum\":325,\"Max\":91,\"Min\":75},\"FirstMessageMs\":{\"Count\":4,\"Sum\":15,\"Max\":6,\"Min\":2},\"ActiveMessageMs\":{\"Count\":4,\"Max\":91,\"Min\":2},\"PauseMessageMs\":{\"Count\":4,\"Sum\":3,\"Max\":2,\"Min\":0},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":311000,\"Max\":88000,\"Min\":69000},\"WaitTimeUs\":{\"Count\":4,\"Sum\":303125,\"Max\":87285,\"Min\":65459,\"History\":[22,20013,23,60467,24,80022,45,98996,47,138568,64,158818,67,199179,79,207115,81,278833,84,294596,92,303125]},\"WaitPeriods\":{\"Count\":4,\"Sum\":100,\"Max\":25,\"Min\":25}}}],\"DurationUs\":{\"Count\":4,\"Sum\":317000,\"Max\":88000,\"Min\":72000},\"MaxMemoryUsage\":{\"Count\":4,\"Sum\":4194304,\"Max\":1048576,\"Min\":1048576,\"History\":[0,2097152,1,3145728,3,4194304,92,4194304]},\"Tasks\":4,\"OutputRows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"FinishedTasks\":4,\"IngressRows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"PhysicalStageId\":0,\"StageDurationUs\":89000,\"Table\":[{\"Path\":\"\\/Root\\/ManyShardsTable\",\"ReadRows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"ReadBytes\":{\"Count\":4,\"Sum\":8800,\"Max\":2208,\"Min\":2192}}],\"BaseTimeMs\":1750799772483,\"WaitInputTimeUs\":{\"Count\":4,\"Sum\":282868,\"Max\":82816,\"Min\":59638,\"History\":[22,17008,23,50933,24,66140,45,84652,47,122977,64,142033,67,181399,79,189069,81,259846,84,275199,92,282868]},\"OutputBytes\":{\"Count\":4,\"Sum\":8168,\"Max\":2075,\"Min\":2004},\"CpuTimeUs\":{\"Count\":4,\"Sum\":21699,\"Max\":7758,\"Min\":4218,\"History\":[0,1206,1,1295,3,1373,22,2510,23,5883,24,7382,45,8887,47,14966,64,16017,67,17424,79,17871,81,19702,84,20324,92,21699]},\"Ingress\":[{\"Pop\":{\"Chunks\":{\"Count\":4,\"Sum\":100,\"Max\":25,\"Min\":25},\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":325,\"Max\":91,\"Min\":75},\"ActiveMessageMs\":{\"Count\":4,\"Max\":91,\"Min\":2},\"FirstMessageMs\":{\"Count\":4,\"Sum\":15,\"Max\":6,\"Min\":2},\"Bytes\":{\"Count\":4,\"Sum\":35200,\"Max\":8832,\"Min\":8768,\"History\":[22,2464,23,8128,24,10592,45,12704,47,17984,64,20448,67,24320,79,25696,81,29568,84,31328,92,35200]},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":310000,\"Max\":88000,\"Min\":69000}},\"External\":{},\"Name\":\"KqpReadRangesSource\",\"Ingress\":{},\"Push\":{\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":325,\"Max\":91,\"Min\":75},\"Chunks\":{\"Count\":4,\"Sum\":100,\"Max\":25,\"Min\":25},\"ResumeMessageMs\":{\"Count\":4,\"Sum\":325,\"Max\":91,\"Min\":75},\"FirstMessageMs\":{\"Count\":4,\"Sum\":15,\"Max\":6,\"Min\":2},\"ActiveMessageMs\":{\"Count\":4,\"Max\":91,\"Min\":2},\"Bytes\":{\"Count\":4,\"Sum\":35200,\"Max\":8832,\"Min\":8768,\"History\":[22,2464,23,8128,24,10592,45,12704,47,17984,64,20448,67,24320,79,25696,81,29568,84,31328,92,35200]},\"PauseMessageMs\":{\"Count\":4,\"Sum\":3,\"Max\":2,\"Min\":0},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":310000,\"Max\":88000,\"Min\":69000},\"WaitTimeUs\":{\"Count\":4,\"Sum\":303476,\"Max\":87285,\"Min\":65609,\"History\":[22,19999,23,60585,24,80148,45,99140,47,138745,64,159039,67,199497,79,207448,81,279157,84,294927,92,303476]},\"WaitPeriods\":{\"Count\":4,\"Sum\":99,\"Max\":25,\"Min\":24}}}],\"UpdateTimeMs\":91}}],\"Node Type\":\"Merge\",\"SortColumns\":[\"Key (Asc)\"],\"PlanNodeType\":\"Connection\"}],\"Node Type\":\"Stage\",\"Stats\":{\"UseLlvm\":\"undefined\",\"OutputRows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"PhysicalStageId\":1,\"FinishedTasks\":1,\"InputBytes\":{\"Count\":1,\"Sum\":8168,\"Max\":8168,\"Min\":8168},\"DurationUs\":{\"Count\":1,\"Sum\":91000,\"Max\":91000,\"Min\":91000},\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[4,1048576,95,1048576]},\"BaseTimeMs\":1750799772483,\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":34,\"Max\":34,\"Min\":34},\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":94,\"Max\":94,\"Min\":94},\"ActiveMessageMs\":{\"Count\":1,\"Max\":94,\"Min\":7},\"FirstMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"Bytes\":{\"Count\":1,\"Sum\":7774,\"Max\":7774,\"Min\":7774,\"History\":[24,597,47,1178,69,1593,88,3496,95,7774]},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":87000,\"Max\":87000,\"Min\":87000}},\"Name\":\"RESULT\",\"Push\":{\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":94,\"Max\":94,\"Min\":94},\"Chunks\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":91,\"Max\":91,\"Min\":91},\"FirstMessageMs\":{\"Count\":1,\"Sum\":6,\"Max\":6,\"Min\":6},\"ActiveMessageMs\":{\"Count\":1,\"Max\":94,\"Min\":6},\"PauseMessageMs\":{\"Count\":1,\"Sum\":5,\"Max\":5,\"Min\":5},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":88000,\"Max\":88000,\"Min\":88000},\"WaitTimeUs\":{\"Count\":1,\"Sum\":78733,\"Max\":78733,\"Min\":78733,\"History\":[24,19796,47,38248,69,57761,88,75382,95,78733]},\"WaitPeriods\":{\"Count\":1,\"Sum\":33,\"Max\":33,\"Min\":33},\"WaitMessageMs\":{\"Count\":1,\"Max\":91,\"Min\":5}}}],\"CpuTimeUs\":{\"Count\":1,\"Sum\":15460,\"Max\":15460,\"Min\":15460,\"History\":[4,650,24,2473,47,7713,69,8735,88,11752,95,15460]},\"StageDurationUs\":91000,\"ResultRows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"ResultBytes\":{\"Count\":1,\"Sum\":7774,\"Max\":7774,\"Min\":7774},\"OutputBytes\":{\"Count\":1,\"Sum\":7774,\"Max\":7774,\"Min\":7774},\"Input\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":41,\"Max\":41,\"Min\":41},\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":93,\"Max\":93,\"Min\":93},\"ActiveMessageMs\":{\"Count\":1,\"Max\":93,\"Min\":3},\"FirstMessageMs\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"Bytes\":{\"Count\":1,\"Sum\":8168,\"Max\":8168,\"Min\":8168,\"History\":[24,852,47,1433,69,1848,88,3757,95,8168]},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":90000,\"Max\":90000,\"Min\":90000}},\"Name\":\"2\",\"Push\":{\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":91,\"Max\":91,\"Min\":91},\"Chunks\":{\"Count\":1,\"Sum\":100,\"Max\":100,\"Min\":100},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":91,\"Max\":91,\"Min\":91},\"FirstMessageMs\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"ActiveMessageMs\":{\"Count\":1,\"Max\":91,\"Min\":3},\"Bytes\":{\"Count\":1,\"Sum\":8168,\"Max\":8168,\"Min\":8168,\"History\":[24,2512,47,4753,69,6190,88,7670,95,8168]},\"PauseMessageMs\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":88000,\"Max\":88000,\"Min\":88000},\"WaitTimeUs\":{\"Count\":1,\"Sum\":18846,\"Max\":18846,\"Min\":18846,\"History\":[24,4662,47,9115,69,13868,88,18095,95,18846]},\"WaitPeriods\":{\"Count\":1,\"Sum\":37,\"Max\":37,\"Min\":37},\"WaitMessageMs\":{\"Count\":1,\"Max\":91,\"Min\":3}}}],\"UpdateTimeMs\":94,\"InputRows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"Tasks\":1}}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"Compilation\":{\"FromCache\":false,\"DurationUs\":155438,\"CpuTimeUs\":151761},\"ProcessCpuTimeUs\":312,\"TotalDurationUs\":359370,\"ResourcePoolId\":\"default\",\"QueuedTimeUs\":92866},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Plans\":[{\"PlanNodeId\":5,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Key (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Path\":\"\\/Root\\/ManyShardsTable\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"ManyShardsTable\",\"ReadColumns\":[\"Data\",\"Key\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"PlanNodeType\":\"Query\"}}" query_ast: "(\n(let $1 (KqpTable \'\"/Root/ManyShardsTable\" \'\"72057594046644480:2\" \'\"\" \'1))\n(let $2 (KqpRowsSourceSettings $1 \'(\'\"Data\" \'\"Key\") \'(\'(\'\"Sorted\")) (Void) \'()))\n(let $3 (StructType \'(\'\"Data\" (OptionalType (DataType \'Int32))) \'(\'\"Key\" (OptionalType (DataType \'Uint32)))))\n(let $4 \'(\'(\'\"_logical_id\" \'367) \'(\'\"_id\" \'\"da14aefe-a7dd7280-d04b8203-c9cb729b\") \'(\'\"_wide_channels\" $3)))\n(let $5 (DqPhyStage \'((DqSource (DataSource \'\"KqpReadRangesSource\") $2)) (lambda \'($9) (block \'(\n (let $10 (lambda \'($11) (Member $11 \'\"Data\") (Member $11 \'\"Key\")))\n (return (FromFlow (ExpandMap (ToFlow $9) $10)))\n))) $4))\n(let $6 (DqCnMerge (TDqOutput $5 \'\"0\") \'(\'(\'1 \'\"Asc\"))))\n(let $7 (DqPhyStage \'($6) (lambda \'($12) (FromFlow (NarrowMap (ToFlow $12) (lambda \'($13 $14) (AsStruct \'(\'\"Data\" $13) \'(\'\"Key\" $14)))))) \'(\'(\'\"_logical_id\" \'379) \'(\'\"_id\" \'\"cdd1385a-998dbeee-f4be6f44-c7a158ca\"))))\n(let $8 (DqCnResult (TDqOutput $7 \'\"0\") \'(\'\"Key\" \'\"Data\")))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($5 $7) \'($8) \'() \'(\'(\'\"type\" \'\"generic\")))) \'((KqpTxResultBinding (ListType $3) \'\"0\" \'\"0\")) \'(\'(\'\"type\" \'\"query\"))))\n)\n" total_duration_us: 359370 total_cpu_time_us: 280519 query_meta: "{\"query_database\":\"/Root\",\"query_parameter_types\":{},\"table_metadata\":[\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/ManyShardsTable\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":2},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"Data\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"Int32\\\",\\\"TypeId\\\":1,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Key\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint32\\\",\\\"TypeId\\\":2,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"Key\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\"],\"table_meta_serialization_type\":2,\"created_at\":\"1750799772\",\"query_type\":\"QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY\",\"query_syntax\":\"1\",\"query_cluster\":\"db\",\"query_id\":\"9f84d117-2c51e0ee-f40d48e4-17a693e9\",\"version\":\"1.0\"}" >> TCdcStreamWithInitialScanTests::MeteringServerless [GOOD] >> TCdcStreamWithInitialScanTests::MeteringDedicated >> KqpQuery::RandomNumber >> KqpLimits::QueryReplySize ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadDuplicate [GOOD] Test command err: 2025-06-24T21:15:08.308753Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:08.334942Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:08.335300Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:08.343791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:08.344067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:08.344342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:08.344474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:08.344610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:08.344751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:08.344889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:08.345010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:08.345120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:08.345225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:08.345390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:08.383949Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:08.384299Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:08.384385Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:08.384595Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:08.384781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:08.384861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:08.384908Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:08.385036Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:08.385132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:08.385213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:08.385253Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:08.385458Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:08.385543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:08.385601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:08.385717Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:08.385842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:08.385908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:08.385951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:08.386001Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:08.386062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:08.386126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:08.386168Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:08.386399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:08.386451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:08.386488Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:08.386672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:08.386741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:08.386774Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:08.386996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:08.387079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:08.387116Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:08.387221Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:08.387302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:08.387371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:08.387413Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:08.387899Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=65; 2025-06-24T21:15:08.388072Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=106; 2025-06-24T21:15:08.388177Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=37; 2025-06-24T21:15:08.388290Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=59; 2025-06-24T21:15:08.388395Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:08.388517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:08.388574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:08.388631Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tab ... r.cpp:47;event=interval_result;interval_idx=0;count=10;merger=0;interval_id=49; 2025-06-24T21:16:12.444146Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:16:12.444264Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:16:12.444311Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=10;finished=1; 2025-06-24T21:16:12.444359Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:16:12.445857Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:16:12.446038Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:10;schema=timestamp: timestamp[us];);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:16:12.446082Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:16:12.446201Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=10; 2025-06-24T21:16:12.446261Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=80;num_rows=10;batch_columns=timestamp; 2025-06-24T21:16:12.446551Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:10327:12333];bytes=80;rows=10;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us]; 2025-06-24T21:16:12.446676Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:16:12.446790Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:16:12.446887Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:16:12.447435Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:16:12.447555Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:16:12.447652Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:16:12.447692Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:10331:12337] finished for tablet 9437184 2025-06-24T21:16:12.448173Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:10327:12333];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.188},{"events":["l_bootstrap"],"t":0.419},{"events":["f_processing","f_task_result"],"t":0.421},{"events":["l_task_result"],"t":2.256},{"events":["f_ack"],"t":2.258},{"events":["l_ProduceResults","f_Finish"],"t":2.259},{"events":["l_ack","l_processing","l_Finish"],"t":2.26}],"full":{"a":1750799770187746,"name":"_full_task","f":1750799770187746,"d_finished":0,"c":0,"l":1750799772447748,"d":2260002},"events":[{"name":"bootstrap","f":1750799770188360,"d_finished":418575,"c":1,"l":1750799770606935,"d":418575},{"a":1750799772447408,"name":"ack","f":1750799772445824,"d_finished":1086,"c":1,"l":1750799772446910,"d":1426},{"a":1750799772447384,"name":"processing","f":1750799770609228,"d_finished":809027,"c":1766,"l":1750799772446913,"d":809391},{"name":"ProduceResults","f":1750799770376008,"d_finished":342799,"c":1769,"l":1750799772447673,"d":342799},{"a":1750799772447676,"name":"Finish","f":1750799772447676,"d_finished":0,"c":0,"l":1750799772447748,"d":72},{"name":"task_result","f":1750799770609271,"d_finished":773716,"c":1765,"l":1750799772444478,"d":773716}],"id":"9437184::49"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:16:12.448244Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:10327:12333];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:16:12.448682Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:10327:12333];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.188},{"events":["l_bootstrap"],"t":0.419},{"events":["f_processing","f_task_result"],"t":0.421},{"events":["l_task_result"],"t":2.256},{"events":["f_ack"],"t":2.258},{"events":["l_ProduceResults","f_Finish"],"t":2.259},{"events":["l_ack","l_processing","l_Finish"],"t":2.26}],"full":{"a":1750799770187746,"name":"_full_task","f":1750799770187746,"d_finished":0,"c":0,"l":1750799772448283,"d":2260537},"events":[{"name":"bootstrap","f":1750799770188360,"d_finished":418575,"c":1,"l":1750799770606935,"d":418575},{"a":1750799772447408,"name":"ack","f":1750799772445824,"d_finished":1086,"c":1,"l":1750799772446910,"d":1961},{"a":1750799772447384,"name":"processing","f":1750799770609228,"d_finished":809027,"c":1766,"l":1750799772446913,"d":809926},{"name":"ProduceResults","f":1750799770376008,"d_finished":342799,"c":1769,"l":1750799772447673,"d":342799},{"a":1750799772447676,"name":"Finish","f":1750799772447676,"d_finished":0,"c":0,"l":1750799772448283,"d":607},{"name":"task_result","f":1750799770609271,"d_finished":773716,"c":1765,"l":1750799772444478,"d":773716}],"id":"9437184::49"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:16:12.448759Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:16:10.186608Z;index_granules=0;index_portions=294;index_batches=294;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=686784;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=686784;selected_rows=0; 2025-06-24T21:16:12.448798Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:16:12.449084Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:10331:12337];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> KqpParams::CheckQueryCacheForUnpreparedQuery [GOOD] >> KqpParams::Decimal+QueryService-UseSink >> KqpExplain::SortStage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32 [GOOD] Test command err: 2025-06-24T21:14:52.070984Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:52.100993Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:52.101300Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:52.109343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:52.109581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:52.109827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:52.109979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:52.110130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:52.110269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:52.110393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:52.110508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:52.110628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:52.110790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:52.110915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:52.145156Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:52.145427Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:52.145487Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:52.145682Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:52.145886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:52.145973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:52.146017Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:52.146140Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:52.146225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:52.146275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:52.146312Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:52.146503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:52.146582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:52.146627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:52.146655Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:52.146746Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:52.146811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:52.146861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:52.146912Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:52.146993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:52.147050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:52.147079Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:52.147353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:52.147409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:52.147466Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:52.147673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:52.147733Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:52.147767Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:52.147919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:52.147976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:52.148025Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:52.148122Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:52.148206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:52.148251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:52.148282Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:52.148767Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=49; 2025-06-24T21:14:52.148861Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-24T21:14:52.148938Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=31; 2025-06-24T21:14:52.149058Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=63; 2025-06-24T21:14:52.149180Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:52.149283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:52.149330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:52.149389Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... ;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7200040;index_size:0;meta:(()););(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7198464;index_size:0;meta:(()););; 2025-06-24T21:16:13.083175Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=73a06b48-514011f0-be8414b2-93e57bf;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=73a06b48-514011f0-be8414b2-93e57bf;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5588:7575];task_id=73a06b48-514011f0-be8414b2-93e57bf;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=2; 2025-06-24T21:16:13.084559Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-24T21:16:13.089248Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-24T21:16:13.206259Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-24T21:16:13.206388Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7198464;count=779;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-24T21:16:13.907350Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-24T21:16:13.907502Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-24T21:16:13.907565Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7069450;count=1;packed=7200040; 2025-06-24T21:16:13.907647Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=86;data_size=60;sum=88752;count=1743; 2025-06-24T21:16:13.907766Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=25134;data_size=25124;sum=2348976;count=1744;size_of_meta=136; 2025-06-24T21:16:13.907845Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=25206;data_size=25196;sum=2411760;count=872;size_of_portion=208; 2025-06-24T21:16:13.908495Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-24T21:16:14.011606Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 2025-06-24T21:16:14.017535Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=28824008;raw_bytes=28296800;count=4;records=300200} inactive {blob_bytes=112525736;raw_bytes=109396450;count=217;records=1275200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:14.375314Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-24T21:16:14.375381Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;fline=with_appended.cpp:65;portions=222,;task_id=73a06b48-514011f0-be8414b2-93e57bf; 2025-06-24T21:16:14.376107Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::73a06b48-514011f0-be8414b2-93e57bf; 2025-06-24T21:16:14.376194Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:21623968;portions_count:222;); 2025-06-24T21:16:14.376244Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:14.376325Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:16:14.376416Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799395462;tx_id=18446744073709551615;;current_snapshot_ts=1750799693643; 2025-06-24T21:16:14.376469Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:14.376534Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:14.376576Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:14.376658Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.895000s; 2025-06-24T21:16:14.376711Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=73a06b48-514011f0-be8414b2-93e57bf;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:14.376901Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 >> KqpLimits::TooBigQuery-useSink >> KqpQuery::UpdateThenDelete+UseSink [GOOD] >> KqpQuery::QueryCancelWriteImmediate [GOOD] >> KqpLimits::QSReplySizeEnsureMemoryLimits+useSink >> KqpQuery::QueryFromSqs [GOOD] >> KqpExplain::UpdateSecondaryConditionalPrimaryKey+UseSink >> KqpQuery::YqlSyntaxV0 [GOOD] >> KqpQuery::YqlTableSample ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::UpdateThenDelete+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 3144, MsgBus: 3046 2025-06-24T21:15:46.606582Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627651151336837:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:46.606635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003279/r3tmp/tmpdTryKc/pdisk_1.dat 2025-06-24T21:15:47.027957Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:47.028680Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627651151336801:2079] 1750799746603835 != 1750799746603838 TServer::EnableGrpc on GrpcPort 3144, node 1 2025-06-24T21:15:47.064842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:47.064924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:47.067032Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:15:47.130281Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:47.130324Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:47.130334Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:47.130515Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3046 TClient is connected to server localhost:3046 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:15:47.633901Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:47.718310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:47.746372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:47.894846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:48.049089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:48.126770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:50.062732Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627668331207630:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:50.062845Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:50.371055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:50.397963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:50.426579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:50.452533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:50.480187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:50.546471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:50.613468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:50.661986Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627668331208297:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:50.662061Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:50.662085Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627668331208302:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:50.665474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:50.674473Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627668331208304:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:15:50.757359Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627668331208355:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:51.609152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627651151336837:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:51.609225Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:15:52.174451Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519627676921143240:2482], TxId: 281474976710672, task: 1. Ctx: { TraceId : 01jyhwr6m8f4pw4ehhdbjzhmtd. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=Yjg4YjNmY2UtMjVlOTAwMGItMmY1OWNhYjItNmYwNjUzNzY=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED DEFAULT_ERROR: {
: Error: Terminate was called, reason(17): Bad filter value. }. 2025- ... -24T21:16:08.733695Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=N2MxNmFiMTctY2RjODBiMWMtZGRkZDA5MWMtNzI5MjhhMWY=, ActorId: [3:7519627749148455876:2473], ActorState: ExecuteState, TraceId: 01jyhwrq3ybqh8w0cmknb901mz, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: Trying to start YDB, gRPC: 2421, MsgBus: 28620 2025-06-24T21:16:09.773639Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627753686571267:2137];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003279/r3tmp/tmpUlzLLl/pdisk_1.dat 2025-06-24T21:16:09.855554Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:16:09.938148Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:09.952005Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627753686571166:2079] 1750799769755489 != 1750799769755492 2025-06-24T21:16:09.956555Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:09.956658Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:09.959076Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2421, node 4 2025-06-24T21:16:10.021861Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:10.021899Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:10.021909Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:10.022096Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28620 TClient is connected to server localhost:28620 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:10.687022Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:10.695348Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:10.703763Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:10.773192Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:10.781500Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:10.951732Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:11.083652Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:13.747056Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627770866441971:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:13.747122Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:13.869887Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:13.910202Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:13.960090Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:13.999483Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.047372Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.131375Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.212425Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.376948Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627775161409939:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.377043Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.377311Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627775161409944:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.382495Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:14.400779Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627775161409946:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:14.470581Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627775161409997:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:14.762121Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627753686571267:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:14.762194Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; [] >> KqpQuery::CreateAsSelectView [GOOD] >> KqpStats::OneShardLocalExec+UseSink [GOOD] >> KqpStats::OneShardLocalExec-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryCancelWriteImmediate [GOOD] Test command err: Trying to start YDB, gRPC: 14487, MsgBus: 30182 2025-06-24T21:15:44.336881Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627646529592795:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:44.336965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003307/r3tmp/tmpAVqkKs/pdisk_1.dat 2025-06-24T21:15:44.763317Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:44.769530Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627646529592694:2079] 1750799744323725 != 1750799744323728 2025-06-24T21:15:44.794644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:44.794740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:44.797806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14487, node 1 2025-06-24T21:15:45.019635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:45.019673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:45.019683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:45.019834Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:15:45.349164Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30182 TClient is connected to server localhost:30182 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:46.010560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:46.024380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:15:46.030866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.230300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.432349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:15:46.516265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:47.703928Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627659414496217:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:47.704159Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.374895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.415621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.445144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.470169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.500714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.537537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.607337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.700588Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627663709464181:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.700663Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.700822Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627663709464186:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.704660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:48.718192Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627663709464188:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:15:48.800389Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627663709464239:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:49.339082Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627646529592795:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:49.339162Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 16277, MsgBus: 16010 2025-06-24T21:15:55.383525Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627690800219381:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:55.383580Z node 2 :METADATA_PROVIDER ERROR: log.cp ... ists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627727852822203:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:08.503346Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 62686, MsgBus: 3083 2025-06-24T21:16:10.419499Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627754246081153:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:10.419571Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003307/r3tmp/tmp9FHewM/pdisk_1.dat 2025-06-24T21:16:10.622345Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:10.643403Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627754246081134:2079] 1750799770418774 != 1750799770418777 2025-06-24T21:16:10.645108Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:10.645213Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:10.647686Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62686, node 4 2025-06-24T21:16:10.710252Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:10.710280Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:10.710291Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:10.710487Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3083 TClient is connected to server localhost:3083 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:11.350872Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:11.359261Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:11.365397Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:11.451857Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:16:11.480415Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:11.699879Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:11.787730Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.454414Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627771425951958:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.454522Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.535830Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.578038Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.612729Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.657200Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.700851Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.777883Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.853426Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.941000Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627771425952625:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.941170Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.941498Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627771425952630:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.946173Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:14.970566Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627771425952632:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:15.040325Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627775720919979:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:15.419894Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627754246081153:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:15.419978Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpParams::MissingOptionalParameter+UseSink [GOOD] >> KqpParams::ImplicitParameterTypes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryFromSqs [GOOD] Test command err: Trying to start YDB, gRPC: 30185, MsgBus: 28313 2025-06-24T21:15:44.341919Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627642996754491:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:44.342962Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032ce/r3tmp/tmpnrHYGZ/pdisk_1.dat 2025-06-24T21:15:44.782888Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:44.793614Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627642996754383:2079] 1750799744323452 != 1750799744323455 2025-06-24T21:15:44.804230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:44.804484Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:44.846002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30185, node 1 2025-06-24T21:15:45.021565Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:45.021608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:45.021642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:45.021800Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:15:45.348074Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28313 TClient is connected to server localhost:28313 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:45.974323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:46.022794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:15:46.030490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.202431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.402120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.483193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:47.825956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627655881657911:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:47.826091Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.375069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.423651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.457149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.490066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.519509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.558376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.593710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.675330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627660176625862:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.675410Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.675685Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627660176625867:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.683788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:48.694953Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627660176625869:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:15:48.767778Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627660176625920:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:49.339267Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627642996754491:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:49.383637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:15:49.984150Z node 1 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00036ef80] finished request Name# CreateSession ok# true peer# ipv6:%5B::1%5D:46336 2025-06-24T21:15:50.002225Z node 1 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00036fd80] received request Name# ExecuteDataQuery ok# true data# session_id: "ydb://session/3?nod ... tSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 64152, MsgBus: 19941 2025-06-24T21:16:10.318239Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627756716049575:2226];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032ce/r3tmp/tmpvYewhH/pdisk_1.dat 2025-06-24T21:16:10.362978Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:16:10.521452Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:10.521565Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:10.529377Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627756716049385:2079] 1750799770271800 != 1750799770271803 2025-06-24T21:16:10.541803Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:10.547745Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64152, node 4 2025-06-24T21:16:10.630922Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:10.630956Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:10.630970Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:10.631128Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19941 TClient is connected to server localhost:19941 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:11.259883Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:11.284425Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:11.294041Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:11.373906Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:11.614942Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:11.700019Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.508664Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627773895920208:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.508828Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.570076Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.613023Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.657009Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.695254Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.734512Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.781758Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.857781Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:14.952131Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627773895920869:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.952282Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.952377Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627773895920874:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.956883Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:14.973418Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627773895920876:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:15.073531Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627778190888223:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:15.291660Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627756716049575:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:15.291887Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:16.287353Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpExplain::CreateTableAs-Stats [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::CreateAsSelectView [GOOD] Test command err: Trying to start YDB, gRPC: 21512, MsgBus: 10644 2025-06-24T21:15:44.333749Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627643497849157:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:44.333885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032ab/r3tmp/tmpRwbZrq/pdisk_1.dat 2025-06-24T21:15:44.775135Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627643497849125:2079] 1750799744326515 != 1750799744326518 2025-06-24T21:15:44.791065Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:44.796590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:44.796689Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:44.798739Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21512, node 1 2025-06-24T21:15:45.022600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:45.022640Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:45.022648Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:45.022786Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:15:45.360968Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10644 TClient is connected to server localhost:10644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:45.975240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:46.026098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.211818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.390553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.468577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:47.704108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627656382752641:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:47.704275Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.375100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.436645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.465938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.491007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.517178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.550185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.626799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.712166Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627660677720608:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.712276Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.712336Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627660677720613:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.755773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:48.769911Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627660677720615:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:15:48.847107Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627660677720666:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:49.333861Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627643497849157:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:49.333947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 22317, MsgBus: 63839 2025-06-24T21:15:50.944789Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627668146551618:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:50.944919Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... 37906;self_id=[3:7519627727475486899:2613];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:865;event=tablet_die; 2025-06-24T21:16:06.922066Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037963;self_id=[3:7519627727475487675:2718];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:865;event=tablet_die; 2025-06-24T21:16:06.922499Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037959;self_id=[3:7519627727475487630:2678];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:865;event=tablet_die; 2025-06-24T21:16:06.925418Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037904;self_id=[3:7519627727475486863:2600];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:865;event=tablet_die; 2025-06-24T21:16:07.012413Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519627744655359772:5242] txid# 281474976715685, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:07.029091Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715687:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:07.179497Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519627744655359915:5331] txid# 281474976715689, issues: { message: "Check failed: path: \'/Root/RowSrc\', error: path exist, request doesn\'t accept it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:07.179752Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NjhhYTJiZmQtNmMyYTc3NTktZmMxZGQxOWQtOTRlMzZmZTA=, ActorId: [3:7519627740360392452:3351], ActorState: ExecuteState, TraceId: 01jyhwrndyaqefvnvke6dk2zdg, Create QueryResponse for error on request, msg: 2025-06-24T21:16:07.411707Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519627744655359992:5364] txid# 281474976715691, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:07.424171Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:08.407043Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519627748950327651:5522] txid# 281474976715697, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:08.419939Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715699:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 29423, MsgBus: 24457 2025-06-24T21:16:10.186385Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627757409961648:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:10.186445Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032ab/r3tmp/tmpIsu9Hb/pdisk_1.dat 2025-06-24T21:16:10.369981Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:10.370636Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627757409961626:2079] 1750799770184737 != 1750799770184740 2025-06-24T21:16:10.386878Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:10.387010Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:10.388854Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29423, node 4 2025-06-24T21:16:10.464673Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:10.464703Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:10.464713Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:10.464870Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24457 TClient is connected to server localhost:24457 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:11.114741Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:11.211426Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:14.981682Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627774589831424:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.981765Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627774589831436:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.981919Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:14.988117Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:15.010291Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627774589831461:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:16:15.073652Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627778884798808:2334] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:15.125691Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:15.186704Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627757409961648:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:15.186784Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:15.188568Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:15.847113Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.449741Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627783179766753:2709] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:16.474261Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpQuery::CreateAsSelectBadTypes+IsOlap >> KqpExplain::UpdateSecondaryConditional+UseSink >> KqpQuery::GenericQueryNoRowsLimitLotsOfRows [GOOD] >> KqpQuery::NoEvaluate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::CreateTableAs-Stats [GOOD] Test command err: Trying to start YDB, gRPC: 30227, MsgBus: 9053 2025-06-24T21:15:44.338708Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627642553730493:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:44.338924Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032f9/r3tmp/tmpjHFsjh/pdisk_1.dat 2025-06-24T21:15:44.803314Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627642553730378:2079] 1750799744323855 != 1750799744323858 2025-06-24T21:15:44.810590Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:44.812169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:44.812267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:44.834068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30227, node 1 2025-06-24T21:15:45.020163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:45.020192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:45.020202Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:45.020408Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:15:45.347564Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9053 TClient is connected to server localhost:9053 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:46.011121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:46.032786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.178021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.355489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:46.427022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:47.756023Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627655438633910:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:47.756227Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.374905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.414801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.445620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.485787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.521507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.561662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.595374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.676990Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627659733601871:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.677096Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.677430Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627659733601876:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.683747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:48.703172Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627659733601878:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:15:48.768470Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627659733601931:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:49.338946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627642553730493:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:49.339018Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[],"Iterator":"precompute_0_0","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_0"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":6,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"Tables":["EightShard"],"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":[ ... Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]},{"name":"\/Root\/test\/test2\/Destination3","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 6570, MsgBus: 2705 2025-06-24T21:16:13.820612Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519627769653912491:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:13.829490Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032f9/r3tmp/tmpFJG5DL/pdisk_1.dat 2025-06-24T21:16:14.000093Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:14.015310Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519627769653912472:2079] 1750799773819063 != 1750799773819066 2025-06-24T21:16:14.018265Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:14.018369Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:14.025654Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6570, node 5 2025-06-24T21:16:14.075851Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:14.075881Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:14.075891Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:14.076054Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2705 TClient is connected to server localhost:2705 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:14.766528Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:14.863893Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:18.462001Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627791128749587:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:18.462000Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627791128749595:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:18.462216Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:18.466992Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:18.487001Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519627791128749602:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:16:18.566674Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519627791128749653:2333] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:18.604042Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.821140Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519627769653912491:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:18.821242Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; PLAN::{"Plan":{"Plans":[{"Tables":["Destination"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/Destination","Name":"FillTable","Table":"Destination","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Destination","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]},{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/Destination","Name":"FillTable","Table":"Destination","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} PLAN::{"Plan":{"Plans":[{"Tables":["test\/Destination2"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/test\/Destination2","Name":"FillTable","Table":"test\/Destination2","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]},{"name":"\/Root\/test\/Destination2","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/test\/Destination2","Name":"FillTable","Table":"test\/Destination2","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} PLAN::{"Plan":{"Plans":[{"Tables":["test\/test2\/Destination3"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]},{"name":"\/Root\/test\/test2\/Destination3","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} >> KqpQuery::QueryTimeout >> KqpQuery::RandomNumber [GOOD] >> KqpQuery::RandomUuid >> KqpExplain::SortStage [GOOD] >> KqpExplain::SelfJoin3xSameLabels >> KqpLimits::ComputeActorMemoryAllocationFailure+useSink >> TCdcStreamWithInitialScanTests::MeteringDedicated [GOOD] >> KqpParams::Decimal-QueryService-UseSink [GOOD] >> KqpExplain::FullOuterJoin [GOOD] >> KqpLimits::StreamWrite+Allowed >> KqpExplain::UpdateOnSecondary+UseSink [GOOD] >> KqpExplain::UpdateOnSecondary-UseSink >> KqpParams::Decimal+QueryService-UseSink [GOOD] >> KqpParams::Decimal-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_cdc_stream/unittest >> TCdcStreamWithInitialScanTests::MeteringDedicated [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:15:43.225151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:15:43.225237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:15:43.225288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:15:43.225331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:15:43.226286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:15:43.226345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:15:43.226447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:15:43.226522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:15:43.227328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:15:43.229021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:15:43.317265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:15:43.317328Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:43.335658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:15:43.336042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:15:43.336246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:15:43.343912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:15:43.344141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:15:43.347263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:15:43.347790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:15:43.353362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:15:43.354250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:15:43.360082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:15:43.360158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:15:43.360951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:15:43.361010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:15:43.361091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:15:43.361171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.367879Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:15:43.513860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:15:43.514104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.514766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:15:43.514891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:15:43.515174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:15:43.515250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:15:43.517736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:15:43.517946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:15:43.518131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.518175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:15:43.518216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:15:43.518251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:15:43.520482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.520551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:15:43.520590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:15:43.523080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.523129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.523206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:15:43.523273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:15:43.528996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:15:43.531637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:15:43.531847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:15:43.533000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:15:43.533120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:15:43.533169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:15:43.533431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:15:43.533470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:15:43.533610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:15:43.533665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:15:43.536150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:15:43.536223Z node 1 :FLAT_TX_SCHEMESHARD ... wner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72075186233409546, cookie: 281474976715657 2025-06-24T21:16:16.898435Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72075186233409546, cookie: 281474976715657 2025-06-24T21:16:16.898471Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 281474976715657 2025-06-24T21:16:16.898595Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 281474976715657, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 5 2025-06-24T21:16:16.898643Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 6 2025-06-24T21:16:16.898741Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 2/3, is published: true 2025-06-24T21:16:16.904333Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 281474976715657 2025-06-24T21:16:16.904481Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 281474976715657 2025-06-24T21:16:16.917859Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72075186233409546, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1559 } } CommitVersion { Step: 250 TxId: 281474976715657 } 2025-06-24T21:16:16.917952Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 281474976715657, tablet: 72075186233409552, partId: 1 2025-06-24T21:16:16.918129Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1559 } } CommitVersion { Step: 250 TxId: 281474976715657 } 2025-06-24T21:16:16.918292Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72075186233409546, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1559 } } CommitVersion { Step: 250 TxId: 281474976715657 } 2025-06-24T21:16:16.919738Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72075186233409546, at schemeshard: 72075186233409546, message: Source { RawX1: 758 RawX2: 81604381269 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-24T21:16:16.919837Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 281474976715657, tablet: 72075186233409552, partId: 1 2025-06-24T21:16:16.920071Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546, message: Source { RawX1: 758 RawX2: 81604381269 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-24T21:16:16.920190Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 281474976715657:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72075186233409546 2025-06-24T21:16:16.920371Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 281474976715657:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72075186233409546 message: Source { RawX1: 758 RawX2: 81604381269 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-24T21:16:16.920499Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715657:1, shardIdx: 72075186233409546:4, shard: 72075186233409552, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72075186233409546 2025-06-24T21:16:16.920586Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-24T21:16:16.920659Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976715657:1, datashard: 72075186233409552, at schemeshard: 72075186233409546 2025-06-24T21:16:16.920759Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:1 129 -> 240 2025-06-24T21:16:16.924447Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-24T21:16:16.925980Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-24T21:16:16.926231Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-24T21:16:16.926321Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72075186233409546] TDone opId# 281474976715657:1 ProgressState 2025-06-24T21:16:16.926571Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715657:1 progress is 3/3 2025-06-24T21:16:16.926649Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-24T21:16:16.926730Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715657:1 progress is 3/3 2025-06-24T21:16:16.926794Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-24T21:16:16.926862Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 3/3, is published: true 2025-06-24T21:16:16.926929Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-24T21:16:16.927001Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715657:0 2025-06-24T21:16:16.927073Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715657:0 2025-06-24T21:16:16.927208Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 3 2025-06-24T21:16:16.927265Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715657:1 2025-06-24T21:16:16.927290Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715657:1 2025-06-24T21:16:16.927380Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 5 2025-06-24T21:16:16.927427Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715657:2 2025-06-24T21:16:16.927463Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715657:2 2025-06-24T21:16:16.927496Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-24T21:16:19.937737Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T21:16:19.938338Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 4 took 614us result status StatusNameConflict 2025-06-24T21:16:19.938656Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/Shared/Table/Stream/streamImpl\', error: path is not a common path (id: [OwnerId: 72075186233409546, LocalPathId: 4], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" Path: "/MyRoot/Shared/Table/Stream/streamImpl" PathId: 4 LastExistedPrefixPath: "/MyRoot/Shared/Table/Stream/streamImpl" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409554 } } PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T21:16:22.654593Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-24T21:16:22.655120Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 4 took 555us result status StatusNameConflict 2025-06-24T21:16:22.655436Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/Shared/Table/Stream/streamImpl\', error: path is not a common path (id: [OwnerId: 72075186233409546, LocalPathId: 4], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" Path: "/MyRoot/Shared/Table/Stream/streamImpl" PathId: 4 LastExistedPrefixPath: "/MyRoot/Shared/Table/Stream/streamImpl" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409554 } } PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> KqpLimits::DataShardReplySizeExceeded [GOOD] >> KqpQuery::YqlTableSample [GOOD] >> KqpQuery::UpdateWhereInSubquery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::Decimal-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 17405, MsgBus: 29421 2025-06-24T21:15:54.972568Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627687193808821:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:54.972934Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003248/r3tmp/tmplKk4jm/pdisk_1.dat 2025-06-24T21:15:55.334186Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627687193808723:2079] 1750799754962464 != 1750799754962467 2025-06-24T21:15:55.334374Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17405, node 1 2025-06-24T21:15:55.404515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:55.404724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:55.420550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:15:55.445367Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:55.445412Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:55.445426Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:55.445607Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29421 TClient is connected to server localhost:29421 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:15:55.972463Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:56.055513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:56.070567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:15:56.085359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:56.210905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:15:56.339336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:56.403021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:58.160178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627704373679539:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:58.160300Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:58.498202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:58.530756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:58.562235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:58.592214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:58.623005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:58.653862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:58.686734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:58.777389Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627704373680197:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:58.777477Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:58.777853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627704373680202:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:58.782568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:58.797117Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627704373680204:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:15:58.901976Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627704373680255:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:59.968091Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627687193808821:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:59.979355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8933, MsgBus: 12300 2025-06-24T21:16:01.183639Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627717829067648:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:01.186652Z node 2 :METADATA_PROVIDER ERROR: log.cpp ... subdomain.cpp:311) 2025-06-24T21:16:14.682003Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.786506Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:14.792781Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.987831Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:15.070178Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:17.843431Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627785369152634:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:17.843527Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:17.903702Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:17.942850Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.015698Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.048580Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.078434Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.111171Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.153435Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.229789Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627789664120589:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:18.229879Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:18.229950Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627789664120594:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:18.233090Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:18.242791Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627789664120596:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:18.320627Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627789664120647:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:18.783262Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627768189281832:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:18.783350Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:19.731094Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.195647Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519627802549022978:2511], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:4:17: Error: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:3:25: Error: At function: Parameter, At function: DataType
:3:25: Error: Invalid decimal precision: 99 2025-06-24T21:16:21.195975Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=ZWVjNWU5MTktYTQzYzlmODgtNTMzYTE5NDAtOWYwZGY3YTA=, ActorId: [4:7519627793959088174:2464], ActorState: ExecuteState, TraceId: 01jyhws39b624k51dv2jap5czm, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:16:21.345129Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZWVjNWU5MTktYTQzYzlmODgtNTMzYTE5NDAtOWYwZGY3YTA=, ActorId: [4:7519627793959088174:2464], ActorState: ExecuteState, TraceId: 01jyhws3andj6vcmcb6b0ptzwa, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1374: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $value22 type mismatch, expected: { Kind: Data Data { Scheme: 4865 DecimalParams { Precision: 22 Scale: 9 } } }, actual: Type (Data), schemeType: Decimal(35,10), schemeTypeId: 4865 2025-06-24T21:16:21.379639Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519627802549022994:2517], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:7:29: Error: At function: KiWriteTable!
:7:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:4:25: Error: Implicit decimal cast would lose precision
:7:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:7:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:16:21.379971Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=ZWVjNWU5MTktYTQzYzlmODgtNTMzYTE5NDAtOWYwZGY3YTA=, ActorId: [4:7519627793959088174:2464], ActorState: ExecuteState, TraceId: 01jyhws3fc38stbhmwyb118n95, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:16:21.411959Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519627802549023003:2521], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:29: Error: At function: KiWriteTable!
:3:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:0:14: Error: Implicit decimal cast would lose precision
:3:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:3:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:16:21.413617Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=ZWVjNWU5MTktYTQzYzlmODgtNTMzYTE5NDAtOWYwZGY3YTA=, ActorId: [4:7519627793959088174:2464], ActorState: ExecuteState, TraceId: 01jyhws3gg07mm9b0tdmxjrknn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::FullOuterJoin [GOOD] Test command err: Trying to start YDB, gRPC: 7845, MsgBus: 64526 2025-06-24T21:15:47.240398Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627657977311388:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:47.250890Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00325a/r3tmp/tmpulnpNq/pdisk_1.dat 2025-06-24T21:15:47.594536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:47.594649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:47.608935Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627657977311361:2079] 1750799747217028 != 1750799747217031 2025-06-24T21:15:47.617757Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:47.625856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7845, node 1 2025-06-24T21:15:47.721024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:47.721054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:47.721066Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:47.721253Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64526 TClient is connected to server localhost:64526 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:15:48.259536Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:48.346026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:48.378982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:15:48.559463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:48.697366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:48.768220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:50.736903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627670862214879:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:50.737040Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:50.983218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.013407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.045369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.082370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.116045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.185225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.253272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:15:51.307949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627675157182840:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:51.308031Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:51.308137Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627675157182845:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:51.312282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:51.327358Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627675157182847:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:15:51.408450Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627675157182898:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:52.240582Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627657977311388:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:52.240681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[],"Iterator":"precompute_0_0","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_0"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":6,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"Tables":["EightShard"],"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges": ... onnected 2025-06-24T21:16:15.019860Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:15.022996Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:15.024379Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519627774859393833:2079] 1750799774835510 != 1750799774835513 2025-06-24T21:16:15.036993Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23989, node 5 2025-06-24T21:16:15.092290Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:15.092324Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:15.092337Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:15.092530Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16865 TClient is connected to server localhost:16865 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:15.771458Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:15.793502Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:15.886627Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:15.929159Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:16.152577Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:16.238561Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:19.254873Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627796334231955:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.255013Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.319469Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.362225Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.408201Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.451959Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.491403Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.537690Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.589247Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.680332Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627796334232614:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.680447Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.680454Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627796334232619:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.684892Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:19.700600Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519627796334232621:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:19.760503Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519627796334232672:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:19.836172Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519627774859393850:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:19.836255Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:21.335897Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:21.665662Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.708372Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpLimits::QueryReplySize [GOOD] >> KqpLimits::ReadsetCountLimit >> KqpQuery::CreateAsSelectBadTypes+IsOlap [GOOD] >> KqpQuery::CreateAsSelectBadTypes-IsOlap >> KqpParams::ImplicitParameterTypes [GOOD] >> KqpParams::ImplicitSameParameterTypesQueryCacheCheck >> KqpLimits::QSReplySizeEnsureMemoryLimits+useSink [GOOD] >> KqpLimits::QSReplySizeEnsureMemoryLimits-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::DataShardReplySizeExceeded [GOOD] Test command err: Trying to start YDB, gRPC: 15653, MsgBus: 13665 2025-06-24T21:15:44.611422Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627645542754044:2130];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:44.612073Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003287/r3tmp/tmpHcGI2M/pdisk_1.dat 2025-06-24T21:15:44.953221Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627645542753953:2079] 1750799744586903 != 1750799744586906 2025-06-24T21:15:44.959902Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15653, node 1 2025-06-24T21:15:45.032192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:45.033431Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:45.037465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:15:45.066623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:45.066641Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:45.066646Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:45.066748Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13665 2025-06-24T21:15:45.611743Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13665 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:45.993576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:46.035383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:48.960780Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627662722624125:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.960886Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627662722624131:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.960966Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:48.965012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:15:48.974605Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627662722624139:2321], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:15:49.057906Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627667017591488:2565] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:15:49.590480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627645542754044:2130];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:49.590555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21993, MsgBus: 26716 2025-06-24T21:15:51.947502Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627673478691860:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:15:51.947575Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003287/r3tmp/tmpoqPiwM/pdisk_1.dat 2025-06-24T21:15:52.097586Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:52.101609Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519627673478691841:2079] 1750799751947099 != 1750799751947102 2025-06-24T21:15:52.118169Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:15:52.118265Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:15:52.120490Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21993, node 2 2025-06-24T21:15:52.171314Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:15:52.171329Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:15:52.171335Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:15:52.171465Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26716 TClient is connected to server localhost:26716 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:15:52.632970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:15:52.650028Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:15:52.967109Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:15:56.232491Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627694953529316:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:56.232599Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:56.232844Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627694953529328:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:15:56.236499Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976 ... tart YDB, gRPC: 11198, MsgBus: 4451 2025-06-24T21:16:13.947973Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627768884380273:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:13.948048Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003287/r3tmp/tmpYP2Fth/pdisk_1.dat 2025-06-24T21:16:14.101599Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:14.102394Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:14.102461Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:14.113560Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11198, node 4 2025-06-24T21:16:14.176062Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:14.176103Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:14.176113Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:14.176292Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4451 TClient is connected to server localhost:4451 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:14.797466Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:14.822189Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.922727Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:15.041964Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:15.171862Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:15.256163Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:17.898372Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627786064251045:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:17.898542Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:17.971550Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.012789Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.048521Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.084678Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.120515Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.160092Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.204804Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:18.270323Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627790359219001:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:18.270403Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:18.270491Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627790359219006:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:18.274123Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:18.284564Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627790359219008:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:18.360947Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627790359219059:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:19.016833Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627768884380273:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:19.016938Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:19.633507Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:23.413042Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=NDMyNDZhMjAtZTg3ZTA5ZTktNzA1NTdkYmYtOTRkNmY5YzE=, ActorId: [4:7519627794654186628:2473], ActorState: ExecuteState, TraceId: 01jyhws51jbby3tq3kbz9t26f8, Create QueryResponse for error on request, msg: >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8 [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailure-useSink >> KqpExplain::UpdateSecondaryConditionalPrimaryKey+UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalPrimaryKey-UseSink >> KqpStats::OneShardLocalExec-UseSink [GOOD] >> KqpTypes::QuerySpecialTypes >> KqpStats::RequestUnitForSuccessExplicitPrepare >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64 [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailure+useSink [GOOD] >> KqpLimits::CancelAfterRwTx+useSink >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8 [GOOD] Test command err: 2025-06-24T21:14:55.624888Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:55.652781Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:55.653140Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:55.661300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:55.661528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:55.661784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:55.661908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:55.662048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:55.662181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:55.662297Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:55.662398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:55.662493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:55.662641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:55.662760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:55.693811Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:55.694147Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:55.694224Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:55.694416Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:55.694612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:55.694693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:55.694742Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:55.694876Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:55.694946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:55.695000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:55.695042Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:55.695235Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:55.695301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:55.695348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:55.695376Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:55.695488Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:55.695559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:55.695630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:55.695681Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:55.695737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:55.695783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:55.695813Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:55.696073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:55.696119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:55.696157Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:55.696366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:55.696438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:55.696473Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:55.696607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:55.696666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:55.696700Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:55.696778Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:55.696844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:55.696886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:55.696917Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:55.697454Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=54; 2025-06-24T21:14:55.697551Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; 2025-06-24T21:14:55.697634Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=32; 2025-06-24T21:14:55.697720Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=40; 2025-06-24T21:14:55.697840Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:55.697959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:55.698022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:55.698088Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... mn_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:9288];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:8592];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:8280];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:8288];;;;switched=(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7574640;index_size:0;meta:(()););(portion_id:218;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7570008;index_size:0;meta:(()););(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7574640;index_size:0;meta:(()););; 2025-06-24T21:16:23.914775Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=782d7958-514011f0-98f796a9-df6f5e7c;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=782d7958-514011f0-98f796a9-df6f5e7c;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:6031:8017];task_id=782d7958-514011f0-98f796a9-df6f5e7c;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=3; 2025-06-24T21:16:23.916514Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-24T21:16:23.921387Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-24T21:16:24.076787Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-24T21:16:24.076910Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7570008;count=819;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-24T21:16:24.818975Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-24T21:16:24.819122Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=9999,9999,9999,9999,; 2025-06-24T21:16:24.819207Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7433340;count=1;packed=7574640; 2025-06-24T21:16:24.819294Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=86;data_size=62;sum=95362;count=1749; 2025-06-24T21:16:24.819369Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=26414;data_size=26406;sum=2613226;count=1750;size_of_meta=136; 2025-06-24T21:16:24.819441Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=26486;data_size=26478;sum=2676226;count=875;size_of_portion=208; 2025-06-24T21:16:24.820133Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-24T21:16:24.969121Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 673 2025-06-24T21:16:24.975692Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=37893352;raw_bytes=37186700;count=5;records=375200} inactive {blob_bytes=111591656;raw_bytes=108150240;count=216;records=1200200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:25.493764Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-24T21:16:25.493828Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;fline=with_appended.cpp:65;portions=222,;task_id=782d7958-514011f0-98f796a9-df6f5e7c; 2025-06-24T21:16:25.494505Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::782d7958-514011f0-98f796a9-df6f5e7c; 2025-06-24T21:16:25.494578Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:22744072;portions_count:222;); 2025-06-24T21:16:25.494615Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:25.494678Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:16:25.494734Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799399137;tx_id=18446744073709551615;;current_snapshot_ts=1750799697198; 2025-06-24T21:16:25.494771Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:25.494813Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:25.494849Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:25.494938Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.886000s; 2025-06-24T21:16:25.494991Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=782d7958-514011f0-98f796a9-df6f5e7c;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:25.495186Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 673 >> KqpStats::RequestUnitForBadRequestExecute >> KqpQuery::QueryTimeout [GOOD] >> KqpQuery::QueryResultsTruncated ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::OneShardLocalExec-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 15365, MsgBus: 1947 2025-06-24T21:16:00.013666Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627711991353778:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:00.013854Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003243/r3tmp/tmpNT0BXg/pdisk_1.dat 2025-06-24T21:16:00.389677Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:00.390367Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627711991353755:2079] 1750799760011998 != 1750799760012001 TServer::EnableGrpc on GrpcPort 15365, node 1 2025-06-24T21:16:00.449886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:00.450030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:00.450930Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:00.450961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:00.450967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:00.451100Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:16:00.453341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1947 TClient is connected to server localhost:1947 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:01.048268Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:01.127221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:01.155567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:01.166361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:01.341890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:01.523020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:01.603070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:03.299903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627724876257292:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:03.300033Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:03.690258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:03.725401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:03.763600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:03.793307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:03.861032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:03.896455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:03.934874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:03.991406Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627724876257949:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:03.991499Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:03.991564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627724876257954:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:03.994810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:04.004102Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627724876257956:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:04.095199Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627729171225305:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:05.017978Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627711991353778:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:05.022228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:05.413211Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799765400, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 2337, MsgBus: 19322 2025-06-24T21:16:06.322523Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=tabl ... sts.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627762658408651:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:17.474733Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 3613, MsgBus: 30250 2025-06-24T21:16:19.027373Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627792740052034:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:19.042820Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003243/r3tmp/tmpmCktI8/pdisk_1.dat 2025-06-24T21:16:19.158153Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:19.164717Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627788445084683:2079] 1750799779014782 != 1750799779014785 2025-06-24T21:16:19.178063Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:19.178163Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:19.181947Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3613, node 4 2025-06-24T21:16:19.227720Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:19.227744Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:19.227753Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:19.227901Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30250 TClient is connected to server localhost:30250 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:19.848007Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:19.855400Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:19.866703Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:19.946401Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:20.075039Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:16:20.119757Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:20.189523Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:22.996827Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627805624955495:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:22.996943Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:23.061093Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:23.114530Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:23.157348Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:23.202381Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:23.240511Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:23.286551Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:23.341840Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:23.471215Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627809919923452:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:23.471378Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:23.472135Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627809919923457:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:23.477453Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:23.490554Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627809919923459:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:23.589140Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627809919923510:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:24.030681Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627792740052034:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:24.030731Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQuery::RandomUuid [GOOD] >> KqpQuery::ReadOverloaded+StreamLookup >> CompressExecutor::TestExecutorMemUsage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64 [GOOD] Test command err: 2025-06-24T21:15:00.317050Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:00.344964Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:00.345271Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:00.353155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:00.353365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:00.353598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:00.353738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:00.353870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:00.353991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:00.354100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:00.354199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:00.354335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:00.354484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:00.354590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:00.386809Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:00.387072Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:00.387128Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:00.387345Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:00.387581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:00.387660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:00.387705Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:00.387826Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:00.387889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:00.387936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:00.387968Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:00.388167Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:00.388236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:00.388281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:00.388310Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:00.388418Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:00.388495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:00.388545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:00.388604Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:00.388657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:00.388697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:00.388726Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:00.388941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:00.388999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:00.389044Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:00.389222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:00.389268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:00.389318Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:00.389466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:00.389518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:00.389551Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:00.389662Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:00.389726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:00.389765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:00.389796Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:00.390261Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=57; 2025-06-24T21:15:00.390359Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=43; 2025-06-24T21:15:00.390436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=32; 2025-06-24T21:15:00.390545Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=45; 2025-06-24T21:15:00.390651Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:00.390741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:00.390786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:00.390841Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... d:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););(portion_id:218;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7503120;index_size:0;meta:(()););(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););; 2025-06-24T21:16:25.015018Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5976:7963];task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=3; 2025-06-24T21:16:25.017691Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-24T21:16:25.029104Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-24T21:16:25.143247Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-24T21:16:25.143432Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7503120;count=812;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-24T21:16:25.916598Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-24T21:16:25.916720Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-24T21:16:25.916780Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7369450;count=1;packed=7504840; 2025-06-24T21:16:25.916852Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=95658;count=1749; 2025-06-24T21:16:25.916920Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=26198;data_size=26188;sum=2562418;count=1750;size_of_meta=136; 2025-06-24T21:16:25.916989Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=26270;data_size=26260;sum=2625418;count=875;size_of_portion=208; 2025-06-24T21:16:25.917613Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-24T21:16:26.064809Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 2025-06-24T21:16:26.073711Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=37548672;raw_bytes=36867050;count=5;records=375200} inactive {blob_bytes=110272840;raw_bytes=107127800;count=216;records=1200200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:26.566805Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-24T21:16:26.566887Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;fline=with_appended.cpp:65;portions=222,;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3; 2025-06-24T21:16:26.567901Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::7856d4d8-514011f0-8a5f0ea6-b7682df3; 2025-06-24T21:16:26.568001Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:22538992;portions_count:222;); 2025-06-24T21:16:26.568071Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:26.568169Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:16:26.568249Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799403819;tx_id=18446744073709551615;;current_snapshot_ts=1750799701890; 2025-06-24T21:16:26.568303Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:26.568360Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:26.568407Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:26.568500Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.871000s; 2025-06-24T21:16:26.568557Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7856d4d8-514011f0-8a5f0ea6-b7682df3;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:26.568752Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime [GOOD] Test command err: 2025-06-24T21:15:04.051295Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:04.080737Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:04.081097Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:04.089682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:04.089922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:04.090214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:04.090356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:04.090489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:04.090694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:04.090822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:04.090953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:04.091087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:04.091261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:04.091387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:04.123004Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:04.123295Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:04.123363Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:04.123576Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:04.123772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:04.123857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:04.123927Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:04.124079Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:04.124160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:04.124217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:04.124258Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:04.124468Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:04.124546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:04.124592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:04.124630Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:04.124749Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:04.124837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:04.124890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:04.124942Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:04.125002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:04.125055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:04.125099Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:04.125345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:04.125393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:04.125436Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:04.125652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:04.125706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:04.125742Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:04.125916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:04.125970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:04.126010Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:04.126093Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:04.126166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:04.126208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:04.126240Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:04.126743Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=50; 2025-06-24T21:15:04.126860Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=51; 2025-06-24T21:15:04.126996Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=57; 2025-06-24T21:15:04.127087Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=37; 2025-06-24T21:15:04.127207Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:04.127341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:04.127393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:04.127452Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... LOB:0:9240];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7200040;index_size:0;meta:(()););(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7198464;index_size:0;meta:(()););; 2025-06-24T21:16:25.138255Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5588:7575];task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=2; 2025-06-24T21:16:25.139444Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-24T21:16:25.143918Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-24T21:16:25.255510Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-24T21:16:25.255620Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7198464;count=779;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-24T21:16:25.915658Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-24T21:16:25.915813Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-24T21:16:25.915888Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7069450;count=1;packed=7200040; 2025-06-24T21:16:25.915987Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=86;data_size=60;sum=88752;count=1743; 2025-06-24T21:16:25.916110Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=25134;data_size=25124;sum=2348976;count=1744;size_of_meta=136; 2025-06-24T21:16:25.916195Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=25206;data_size=25196;sum=2411760;count=872;size_of_portion=208; 2025-06-24T21:16:25.916910Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-24T21:16:26.013906Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 2025-06-24T21:16:26.023663Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=28824008;raw_bytes=28296800;count=4;records=300200} inactive {blob_bytes=112525736;raw_bytes=109396450;count=217;records=1275200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:26.334484Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-24T21:16:26.334563Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;fline=with_appended.cpp:65;portions=222,;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec; 2025-06-24T21:16:26.335392Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::7ae41daa-514011f0-ba5c5b82-6f91cfec; 2025-06-24T21:16:26.335475Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:21623968;portions_count:222;); 2025-06-24T21:16:26.335523Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:26.335597Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:16:26.335679Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799407442;tx_id=18446744073709551615;;current_snapshot_ts=1750799705623; 2025-06-24T21:16:26.335727Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:26.335777Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:26.335820Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:26.335896Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.895000s; 2025-06-24T21:16:26.335946Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=7ae41daa-514011f0-ba5c5b82-6f91cfec;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:26.336111Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp [GOOD] Test command err: 2025-06-24T21:15:00.310615Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:00.333060Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:00.333373Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:00.339929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:00.340142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:00.340344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:00.340444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:00.340541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:00.340639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:00.340714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:00.340783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:00.340849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:00.340962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:00.341032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:00.372725Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:00.372922Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:00.372961Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:00.373122Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:00.373262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:00.373318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:00.373349Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:00.373429Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:00.373483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:00.373522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:00.373540Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:00.373670Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:00.373715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:00.373745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:00.373762Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:00.373834Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:00.373875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:00.373905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:00.373937Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:00.373973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:00.374012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:00.374034Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:00.374183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:00.374211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:00.374245Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:00.374375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:00.374411Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:00.374433Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:00.374528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:00.374561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:00.374582Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:00.374629Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:00.374692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:00.374725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:00.374750Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:00.375093Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=32; 2025-06-24T21:15:00.375202Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=63; 2025-06-24T21:15:00.375306Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=38; 2025-06-24T21:15:00.375365Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=24; 2025-06-24T21:15:00.375458Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:00.375537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:00.375576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:00.375617Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... d:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););(portion_id:218;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7503120;index_size:0;meta:(()););(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););; 2025-06-24T21:16:25.611197Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5976:7963];task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=3; 2025-06-24T21:16:25.613777Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-24T21:16:25.621026Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-24T21:16:25.711744Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-24T21:16:25.711869Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7503120;count=812;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-24T21:16:26.405047Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-24T21:16:26.405225Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-24T21:16:26.405302Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7369450;count=1;packed=7504840; 2025-06-24T21:16:26.405397Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=95658;count=1749; 2025-06-24T21:16:26.405485Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=26198;data_size=26188;sum=2562418;count=1750;size_of_meta=136; 2025-06-24T21:16:26.405568Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5976:7963];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=26270;data_size=26260;sum=2625418;count=875;size_of_portion=208; 2025-06-24T21:16:26.406271Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-24T21:16:26.546858Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 2025-06-24T21:16:26.556970Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=37548672;raw_bytes=36867050;count=5;records=375200} inactive {blob_bytes=110272840;raw_bytes=107127800;count=216;records=1200200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:26.995655Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-24T21:16:26.995731Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;fline=with_appended.cpp:65;portions=222,;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10; 2025-06-24T21:16:26.996622Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::78eea9fc-514011f0-8e6c2610-f9cfba10; 2025-06-24T21:16:26.996714Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:22538992;portions_count:222;); 2025-06-24T21:16:26.996765Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:26.996847Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:16:26.996919Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799403818;tx_id=18446744073709551615;;current_snapshot_ts=1750799701889; 2025-06-24T21:16:26.996966Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:26.997018Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:26.997062Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:26.997144Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.871000s; 2025-06-24T21:16:26.997200Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=78eea9fc-514011f0-8e6c2610-f9cfba10;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:26.997373Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 >> KqpExplain::SelfJoin3xSameLabels [GOOD] >> KqpExplain::SqlIn >> KqpQuery::NoEvaluate [GOOD] >> KqpExplain::UpdateSecondaryConditional+UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditional-UseSink >> KqpExplain::LimitOffset >> KqpQuery::CreateAsSelectBadTypes-IsOlap [GOOD] >> KqpQuery::CreateAsSelectPath+UseTablePathPrefix ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> CompressExecutor::TestExecutorMemUsage [GOOD] Test command err: 2025-06-24T21:14:26.187247Z :WriteAndReadSomeMessagesWithAsyncCompression INFO: Random seed for debugging is 1750799666187208 2025-06-24T21:14:26.694075Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627310504518534:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.710414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:26.783554Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627308864497908:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:27.027164Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:14:27.026803Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002eef/r3tmp/tmp3UpTfa/pdisk_1.dat 2025-06-24T21:14:27.233659Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:27.684436Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:27.684573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:27.708494Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:27.711475Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:27.744669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:27.744748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:27.762193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:27.763254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:14:27.763371Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:14:27.764449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:27.784873Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 17514, node 1 2025-06-24T21:14:27.915369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002eef/r3tmp/yandexD9OgDM.tmp 2025-06-24T21:14:27.915399Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002eef/r3tmp/yandexD9OgDM.tmp 2025-06-24T21:14:27.915602Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002eef/r3tmp/yandexD9OgDM.tmp 2025-06-24T21:14:27.915724Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:28.146751Z INFO: TTestServer started on Port 25371 GrpcPort 17514 TClient is connected to server localhost:25371 PQClient connected to localhost:17514 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:28.541388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:14:30.801653Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627326044367276:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.801653Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627326044367284:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.801776Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.808865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:30.840221Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519627326044367290:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:14:30.932012Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519627326044367318:2131] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:31.413805Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627327684388768:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:31.415837Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519627326044367333:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:31.416783Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YmYwZjZjZjAtM2ZiNzEzZjUtNTRiOWY5YTEtZDIyYjE0Mw==, ActorId: [1:7519627327684388717:2295], ActorState: ExecuteState, TraceId: 01jyhwnqjk8xbfdxh3y61w691y, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:14:31.417739Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NTNkMTIxODYtZjNhMWE2ZDUtODYxNTYxNDYtM2FhYTIwZTk=, ActorId: [2:7519627326044367274:2268], ActorState: ExecuteState, TraceId: 01jyhwnqgf3jzd6s291amhj6rc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:14:31.419572Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:14:31.420547Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:14:31.446541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:31.647343Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627310504518534:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:31.647513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:14:31.649921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:31.764542Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTab ... erver" ip=ipv6:[::1]:40692 proto=v1 topic=test-topic durationSec=0 2025-06-24T21:16:24.810376Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T21:16:24.812604Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 3 sessionId: describe result for acl check 2025-06-24T21:16:24.812814Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-24T21:16:24.812848Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T21:16:24.812866Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-24T21:16:24.812894Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [13:7519627815892865013:2545] (SourceId=test-message-group-id, PreferedPartition=(NULL)) StartKqpSession 2025-06-24T21:16:24.817881Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [13:7519627815892865013:2545] (SourceId=test-message-group-id, PreferedPartition=(NULL)) Select from the table Test retry state: get retry delay 2025-06-24T21:16:25.013777Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976710697. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-24T21:16:25.013927Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [13:7519627815892865024:2547] TxId: 281474976710697. Ctx: { TraceId: 01jyhws6vj4zp6j5pc70v9pj2w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MWFlMWI4ZjItNjhjOTgwNTctODI2OGFlZGItNmM3ODczNDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-24T21:16:25.019379Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|28533df9-3a29d651-7c9a8e19-860721bf_0] Got error. Status: UNAVAILABLE, Description:
: Error: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=MWFlMWI4ZjItNjhjOTgwNTctODI2OGFlZGItNmM3ODczNDk=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyhws6vj4zp6j5pc73gsf1bw" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 , code: 500001 2025-06-24T21:16:25.014175Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=13&id=MWFlMWI4ZjItNjhjOTgwNTctODI2OGFlZGItNmM3ODczNDk=, ActorId: [13:7519627815892865014:2547], ActorState: ExecuteState, TraceId: 01jyhws6vj4zp6j5pc70v9pj2w, Create QueryResponse for error on request, msg: 2025-06-24T21:16:25.016470Z node 13 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [13:7519627815892865013:2545] (SourceId=test-message-group-id, PreferedPartition=(NULL)) ReplyError: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=MWFlMWI4ZjItNjhjOTgwNTctODI2OGFlZGItNmM3ODczNDk=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyhws6vj4zp6j5pc73gsf1bw" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 2025-06-24T21:16:25.019419Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|28533df9-3a29d651-7c9a8e19-860721bf_0] Write session will restart in 2.000000s 2025-06-24T21:16:25.016615Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 3 reason: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=MWFlMWI4ZjItNjhjOTgwNTctODI2OGFlZGItNmM3ODczNDk=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyhws6vj4zp6j5pc73gsf1bw" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 sessionId: 2025-06-24T21:16:25.017035Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: is DEAD 2025-06-24T21:16:25.019570Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|28533df9-3a29d651-7c9a8e19-860721bf_0] Write session: Do CDS request 2025-06-24T21:16:25.019609Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|28533df9-3a29d651-7c9a8e19-860721bf_0] Do schedule cds request after 2000 ms 2025-06-24T21:16:25.576906Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976710699. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:16:25.577089Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [13:7519627820187832392:2549] TxId: 281474976710699. Ctx: { TraceId: 01jyhws6wgez6x8paddk4tynzz, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjZlOWE0NzQtZGVjNjVjODYtZDc3YTYzZDEtYzViMzdiODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:16:25.577364Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=13&id=YjZlOWE0NzQtZGVjNjVjODYtZDc3YTYzZDEtYzViMzdiODk=, ActorId: [13:7519627815892865033:2549], ActorState: ExecuteState, TraceId: 01jyhws6wgez6x8paddk4tynzz, Create QueryResponse for error on request, msg: 2025-06-24T21:16:25.579073Z node 13 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyhws7dvfdjjss4x6bpns706" } } YdbStatus: UNAVAILABLE ConsumedRu: 364 } 2025-06-24T21:16:25.792867Z node 14 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720683. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:16:25.793016Z node 14 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [14:7519627822524204837:2420] TxId: 281474976720683. Ctx: { TraceId: 01jyhws73x6wqj0qsm1zwf08c1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZDliMzEyN2MtZWU4ZmUxYjItZGUyMTgzYWEtNTQ5OTZiM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:16:25.793286Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=ZDliMzEyN2MtZWU4ZmUxYjItZGUyMTgzYWEtNTQ5OTZiM2Y=, ActorId: [14:7519627822524204820:2420], ActorState: ExecuteState, TraceId: 01jyhws73x6wqj0qsm1zwf08c1, Create QueryResponse for error on request, msg: 2025-06-24T21:16:25.795570Z node 14 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyhws7kn79q4kd9r9m60kwf5" } } YdbStatus: UNAVAILABLE ConsumedRu: 331 } 2025-06-24T21:16:25.814644Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|28533df9-3a29d651-7c9a8e19-860721bf_0] Write session: close. Timeout = 0 ms 2025-06-24T21:16:25.814740Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|28533df9-3a29d651-7c9a8e19-860721bf_0] Write session will now close 2025-06-24T21:16:25.814818Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|28533df9-3a29d651-7c9a8e19-860721bf_0] Write session: aborting 2025-06-24T21:16:25.816235Z :WARNING: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|28533df9-3a29d651-7c9a8e19-860721bf_0] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-24T21:16:25.816301Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|28533df9-3a29d651-7c9a8e19-860721bf_0] Write session: destroy 2025-06-24T21:16:25.835180Z node 14 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720684. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:16:25.835403Z node 14 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [14:7519627822524204868:2428] TxId: 281474976720684. Ctx: { TraceId: 01jyhws7pf7mmt94aqts6z5rkj, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZGQxMTZmYmEtYjc1OTYzOTQtZTY5M2QzMjAtMjUyYjIzZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:16:25.835744Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=ZGQxMTZmYmEtYjc1OTYzOTQtZTY5M2QzMjAtMjUyYjIzZDY=, ActorId: [14:7519627822524204865:2428], ActorState: ExecuteState, TraceId: 01jyhws7pf7mmt94aqts6z5rkj, Create QueryResponse for error on request, msg: 2025-06-24T21:16:25.836957Z node 14 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyhws7pf7mmt94aqtvkmg18n" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-24T21:16:25.978924Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976710701. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:16:25.979098Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [13:7519627820187832480:2557] TxId: 281474976710701. Ctx: { TraceId: 01jyhws7tjd3apejec09b6b9h9, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjhmN2ZhMjEtNWE0YmY2YmQtYjA2YTU2YWUtNGI1ZjZjMzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:16:25.979383Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=13&id=ZjhmN2ZhMjEtNWE0YmY2YmQtYjA2YTU2YWUtNGI1ZjZjMzU=, ActorId: [13:7519627820187832477:2557], ActorState: ExecuteState, TraceId: 01jyhws7tjd3apejec09b6b9h9, Create QueryResponse for error on request, msg: 2025-06-24T21:16:25.980632Z node 13 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyhws7tjd3apejec0d44qsvs" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } >> KqpLimits::BigParameter >> KqpQuery::SelectWhereInSubquery >> KqpExplain::PrecomputeRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::NoEvaluate [GOOD] Test command err: Trying to start YDB, gRPC: 30211, MsgBus: 24322 2025-06-24T21:16:00.277750Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627713017622834:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:00.277869Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00323d/r3tmp/tmpNVuz6i/pdisk_1.dat 2025-06-24T21:16:00.681511Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:00.681600Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627713017622816:2079] 1750799760276858 != 1750799760276861 TServer::EnableGrpc on GrpcPort 30211, node 1 2025-06-24T21:16:00.712667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:00.712976Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:00.716276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:00.752010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:00.752041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:00.752052Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:00.752183Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24322 TClient is connected to server localhost:24322 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:16:01.291119Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:01.428948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:01.447239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:01.465017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:01.605688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:01.757178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:01.837777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:03.627121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627725902526367:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:03.627263Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:04.050932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.125059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.157049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.192926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.269636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.344763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.417230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.523500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627730197494338:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:04.523620Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:04.524005Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627730197494343:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:04.528097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:04.542790Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627730197494345:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:04.600940Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627730197494396:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:05.278010Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627713017622834:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:05.278097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 3774, MsgBus: 3144 2025-06-24T21:16:07.166457Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627743638547538:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:07.166566Z node 2 :METADATA_PROVIDER ERROR: log.cpp: ... ll use file: (empty maybe) 2025-06-24T21:16:21.867781Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:21.867789Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:21.867969Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8093 TClient is connected to server localhost:8093 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:22.465769Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:22.472814Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:22.494184Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:22.569803Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:22.633154Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:22.775849Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:22.866211Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:25.930102Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627819297054663:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.930195Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.975615Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:26.050491Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:26.102448Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:26.180106Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:26.265323Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:26.316533Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:26.392394Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:26.507830Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627823592022627:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:26.507939Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:26.508193Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627823592022632:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:26.512518Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:26.528644Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627823592022634:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:26.582795Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627823592022685:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:26.610952Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627802117183866:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:26.611040Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:27.821665Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519627827886990273:2480], status: UNSUPPORTED, issues:
: Error: Default error
:7:24: Error: EVALUATE IF is not supported in YDB queries., code: 2030 2025-06-24T21:16:27.821902Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NDNhYWFiYWEtNjU4Y2U3NzEtOWE0Y2VkODItNzNiNzY5NjQ=, ActorId: [4:7519627827886990265:2475], ActorState: ExecuteState, TraceId: 01jyhws9r517t8d9gvzghnkf82, ReplyQueryCompileError, status UNSUPPORTED remove tx with tx_id: 2025-06-24T21:16:27.854513Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519627827886990277:2482], status: UNSUPPORTED, issues:
: Error: Default error
:4:28: Error: EVALUATE is not supported in YDB queries., code: 2030 2025-06-24T21:16:27.854761Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NDNhYWFiYWEtNjU4Y2U3NzEtOWE0Y2VkODItNzNiNzY5NjQ=, ActorId: [4:7519627827886990265:2475], ActorState: ExecuteState, TraceId: 01jyhws9sqekb9f96zc4mzg59x, ReplyQueryCompileError, status UNSUPPORTED remove tx with tx_id: 2025-06-24T21:16:28.017362Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519627827886990291:2487], status: UNSUPPORTED, issues:
: Error: Default error
:8:78: Error: ATOM evaluation is not supported in YDB queries., code: 2030 2025-06-24T21:16:28.018431Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NDNhYWFiYWEtNjU4Y2U3NzEtOWE0Y2VkODItNzNiNzY5NjQ=, ActorId: [4:7519627827886990265:2475], ActorState: ExecuteState, TraceId: 01jyhws9yqee0mhefse3qnphf6, ReplyQueryCompileError, status UNSUPPORTED remove tx with tx_id: >> KqpLimits::ComputeActorMemoryAllocationFailure-useSink [GOOD] >> KqpExplain::UpdateConditional+UseSink >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService+useSink >> KqpQuery::UpdateWhereInSubquery [GOOD] >> KqpQuery::UpdateThenDelete-UseSink >> KqpQuery::CreateAsSelectTypes-NotNull-IsOlap >> KqpParams::ImplicitSameParameterTypesQueryCacheCheck [GOOD] >> KqpParams::InvalidJson >> KqpLimits::ReadsetCountLimit [GOOD] >> KqpLimits::QueryExecTimeoutCancel >> KqpLimits::QSReplySizeEnsureMemoryLimits-useSink [GOOD] >> KqpLimits::QSReplySize+useSink >> KqpAnalyze::AnalyzeTable+ColumnStore >> KqpParams::Decimal-QueryService+UseSink [GOOD] >> KqpParams::Decimal+QueryService+UseSink >> KqpTypes::QuerySpecialTypes [GOOD] >> KqpTypes::DyNumberCompare >> KqpStats::RequestUnitForSuccessExplicitPrepare [GOOD] >> KqpStats::RequestUnitForExecute >> KqpStats::RequestUnitForBadRequestExecute [GOOD] >> KqpStats::RequestUnitForBadRequestExplicitPrepare >> KqpQuery::QueryResultsTruncated [GOOD] >> KqpQuery::QueryStats+UseSink >> KqpExplain::UpdateOnSecondary-UseSink [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService+useSink [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService-useSink >> KqpQuery::CreateAsSelectPath+UseTablePathPrefix [GOOD] >> KqpQuery::CreateAsSelectPath-UseTablePathPrefix >> KqpExplain::UpdateSecondaryConditionalPrimaryKey-UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalSecondaryKey+UseSink >> KqpExplain::LimitOffset [GOOD] >> KqpExplain::MultiUsedStage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateOnSecondary-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 18712, MsgBus: 21565 2025-06-24T21:16:00.820993Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627712843515002:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:00.827789Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003236/r3tmp/tmpNU6fr5/pdisk_1.dat 2025-06-24T21:16:01.135536Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627712843514977:2079] 1750799760818445 != 1750799760818448 2025-06-24T21:16:01.180928Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18712, node 1 2025-06-24T21:16:01.199619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:01.200952Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:01.204155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:01.263596Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:01.263624Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:01.263635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:01.263792Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21565 TClient is connected to server localhost:21565 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:01.827293Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:01.858337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:01.895502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:02.041900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:02.204041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:02.278365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:04.328300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627730023385805:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:04.328419Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:04.701118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.735095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.802969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.871801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.904940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:04.975194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:05.047634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:05.137574Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627734318353778:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:05.137682Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:05.138260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627734318353783:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:05.142595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:05.159342Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627734318353785:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:05.240635Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627734318353836:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:05.820844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627712843515002:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:05.820927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"Tables":["EightShard"],"PlanNodeId":2,"Operators":[{"Inputs":[],"Path":"\/Root\/EightShard","Name":"Update","SinkType":"KqpTableSink","Table":"EightShard"}],"Plans":[{"PlanNodeId":1,"Operators":[{"Inputs":[],"Iterator":"[{Data: 0,Key: 100}]","Name":"Iterator"}],"Node Type":"ConstantExpr"}],"Node Type":"Sink"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","writes":[{"columns":["Data","K ... [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:28.135628Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:28.179916Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:28.234772Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:28.281995Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:28.334515Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:28.380758Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:28.456917Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:28.578791Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627832209224133:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:28.578889Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:28.579203Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627832209224138:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:28.583349Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:28.597252Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627832209224140:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:28.697510Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627832209224191:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:28.947370Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627810734385393:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:28.947458Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:30.340144Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.389587Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.445637Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) {"Plan":{"Plans":[{"PlanNodeId":29,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":28,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_2_1","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_2_1"}],"Node Type":"Effect"},{"PlanNodeId":27,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":26,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_3_1","Name":"Iterator"}],"Node Type":"Delete-ConstantExpr","CTE Name":"precompute_3_1"}],"Node Type":"Effect"},{"PlanNodeId":25,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":24,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_3_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_3_0"}],"Node Type":"Effect"},{"PlanNodeId":22,"Plans":[{"PlanNodeId":21,"Plans":[{"PlanNodeId":20,"Plans":[{"PlanNodeId":19,"Operators":[{"Inputs":[{"Other":"ConstantExpression"},{"Other":"ConstantExpression"},{"Other":"ConstantExpression"}],"Iterator":"FlatMap","Name":"Iterator"}],"Node Type":"ConstantExpr"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_3_0","Node Type":"Precompute_3_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":18,"Plans":[{"PlanNodeId":17,"Plans":[{"PlanNodeId":16,"Plans":[{"PlanNodeId":15,"Operators":[{"Inputs":[{"InternalOperatorId":1},{"InternalOperatorId":1}],"Iterator":"Map","Name":"Iterator"},{"E-Rows":"1","Inputs":[],"Predicate":"","E-Cost":"0","E-Size":"5","Name":"Filter"}],"Node Type":"ConstantExpr-Filter"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_3_1","Node Type":"Precompute_3_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":13,"Plans":[{"PlanNodeId":12,"Operators":[{"Inputs":[{"Other":"ConstantExpression"}],"Iterator":"[ToDict]","Name":"Iterator"}],"Node Type":"ConstantExpr"}],"Subplan Name":"CTE precompute_2_0","Node Type":"Precompute_2_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"Filter","Name":"Iterator"},{"E-Rows":"2","Inputs":[],"Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"ConstantExpr-Filter"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_2_1","Node Type":"Precompute_2_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"E-Size":"0","LookupKeyColumns":["Key"],"Node Type":"TableLookup","PlanNodeId":2,"Path":"\/Root\/SecondaryKeys","Columns":["Fk","Key"],"E-Rows":"2","Plans":[{"PlanNodeId":1,"Operators":[{"Inputs":[],"Iterator":"precompute_0_1","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_1"}],"Table":"SecondaryKeys","PlanNodeType":"Connection","E-Cost":"0"}],"Node Type":"Stage"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","reads":[{"lookup_by":["Key"],"columns":["Fk","Key"],"type":"Lookup"}],"writes":[{"columns":["Fk","Key","Value"],"type":"MultiUpsert"}]},{"name":"\/Root\/SecondaryKeys\/Index\/indexImplTable","writes":[{"columns":["Fk","Key"],"type":"MultiUpsert"},{"type":"MultiErase"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":8,"Operators":[{"E-Rows":"2","Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"Filter"}],"Node Type":"Upsert"}],"Node Type":"Effect"},{"PlanNodeId":9,"Plans":[{"PlanNodeId":10,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":16,"Operators":[{"E-Rows":"1","Predicate":"","E-Cost":"0","E-Size":"5","Name":"Filter"}],"Node Type":"Filter"}],"Node Type":"Delete"}],"Node Type":"Effect"},{"PlanNodeId":17,"Plans":[{"PlanNodeId":18,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpExplain::SqlIn [GOOD] >> KqpExplain::SsaProgramInJsonPlan >> ColumnShardTiers::TieringUsage [GOOD] >> KqpExplain::PrecomputeRange [GOOD] >> KqpExplain::PureExpr >> KqpQuery::SelectWhereInSubquery [GOOD] >> KqpQuery::TableSink_ReplaceDataShardDataQuery+UseSink >> KqpQuery::CreateAsSelectTypes-NotNull-IsOlap [GOOD] >> KqpQuery::CreateAsSelectTypes+NotNull-IsOlap >> KqpExplain::UpdateConditional+UseSink [GOOD] >> KqpExplain::UpdateConditional-UseSink >> KqpLimits::BigParameter [GOOD] >> KqpLimits::AffectedShardsLimit >> KqpQuery::OlapCreateAsSelect_Simple ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::TieringUsage [GOOD] Test command err: 2025-06-24T21:13:56.824708Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:13:56.825106Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:13:56.825279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004994/r3tmp/tmpgXMDNW/pdisk_1.dat 2025-06-24T21:13:57.155241Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 61664, node 1 TClient is connected to server localhost:12039 2025-06-24T21:13:57.379557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:13:57.429529Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:13:57.429617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:13:57.429652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:13:57.429965Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:13:57.438139Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:13:57.438616Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750799633854256 != 1750799633854260 2025-06-24T21:13:57.484541Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:13:57.484694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:13:57.496174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-06-24T21:14:07.906628Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:643:2533], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:07.906807Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:08.028314Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:08.041831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:08.487973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:781:2620], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:08.488112Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:08.488529Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:786:2625], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:08.494262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:08.636717Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:788:2627], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:14:08.984132Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:880:2690] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:09.643069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:10.204970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:10.949211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:11.802899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:12.440923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:14:13.771590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:14:14.154760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-24T21:14:29.820756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-24T21:14:42.348325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715709:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schem ... f1470-514011f0-b3543925-a3687ba8;tablet_id=72075186224037893;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:36.066715Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037893 Delete Blob DS:2181038080:[72075186224037893:1:13:15:0:1136:0] 2025-06-24T21:16:36.066767Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037893 Delete Blob DS:2181038080:[72075186224037893:1:12:14:0:1520:0] 2025-06-24T21:16:36.066938Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-24T21:16:36.066983Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;fline=with_appended.cpp:65;portions=19,;task_id=827f8cc0-514011f0-a682c080-69cfb8eb; 2025-06-24T21:16:36.067737Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::827f8cc0-514011f0-a682c080-69cfb8eb; 2025-06-24T21:16:36.067815Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:16;path_id:16;size:110960;portions_count:5;); 2025-06-24T21:16:36.067859Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;tablet_id=72075186224037893;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:36.067943Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;tablet_id=72075186224037893;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=1; 2025-06-24T21:16:36.068031Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;tablet_id=72075186224037893;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=2;drop=0;skip=0;portions_counter=2;chunks=18;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:36.068109Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;tablet_id=72075186224037893;fline=manager.cpp:10;event=lock;process_id=CS::CLEANUP::PORTIONS::PORTIONS_DROP::82adf100-514011f0-98e803c3-985593c; 2025-06-24T21:16:36.068232Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;tablet_id=72075186224037893;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:36.068302Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;tablet_id=72075186224037893;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=1.000000s; 2025-06-24T21:16:36.068346Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=827f8cc0-514011f0-a682c080-69cfb8eb;tablet_id=72075186224037893;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:36.068459Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037893 Save Batch GenStep: 1:16 Blob count: 1 2025-06-24T21:16:36.068698Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037893 Save Batch GenStep: 1:17 Blob count: 1 2025-06-24T21:16:36.069928Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2772:4076];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;path_id=16;fline=storage.cpp:87;event=granule_compaction_weight;priority=(10,19999998864); 2025-06-24T21:16:36.070043Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2772:4076];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;path_id=16;fline=optimizer.h:900;stop_instant=NO_VALUE_OPTIONAL;size=2656;next=;count=2;info={bytes=1136;count=1;records=1};event=start_optimization;stop_point=;main_portion=19; 2025-06-24T21:16:36.070307Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2772:4076];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;fline=manager.cpp:10;event=lock;process_id=CS::GENERAL::82ae4308-514011f0-9ac7e8ad-1781d08a; 2025-06-24T21:16:36.071551Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:2817:4105];tablet_id=72075186224037893;parent=[1:2772:4076];fline=manager.cpp:88;event=ask_data;request=request_id=82;16={portions_count=2};; 2025-06-24T21:16:36.071931Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:2817:4105];tablet_id=72075186224037893;parent=[1:2772:4076];fline=manager.cpp:88;event=ask_data;request=request_id=84;16={portions_count=2};; 2025-06-24T21:16:36.073148Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 72075186224037893 2025-06-24T21:16:36.073276Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[47] (CS::CLEANUP::PORTIONS) apply at tablet 72075186224037893 2025-06-24T21:16:36.074255Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=3808;raw_bytes=48720;count=2;records=43} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108288;raw_bytes=3516209;count=2;records=2927} inactive {blob_bytes=2656;raw_bytes=2178;count=2;records=2} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 72075186224037893 2025-06-24T21:16:36.074865Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=82ae4308-514011f0-9ac7e8ad-1781d08a;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-06-24T21:16:36.075291Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=82ae4308-514011f0-9ac7e8ad-1781d08a; 2025-06-24T21:16:36.081041Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: event=on_execution;consumer=GENERAL_COMPACTION;task_id=82ae4308-514011f0-9ac7e8ad-1781d08a;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=82ae4308-514011f0-9ac7e8ad-1781d08a;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=72075186224037893;parent_id=[1:2772:4076];task_id=82ae4308-514011f0-9ac7e8ad-1781d08a;task_class=CS::GENERAL;fline=general_compaction.cpp:138;event=blobs_created_diff;appended=0;;column_id:1;chunk_idx:0;blob_range:[NO_BLOB:0:192];;column_id:2;chunk_idx:0;blob_range:[NO_BLOB:192:232];;column_id:3;chunk_idx:0;blob_range:[NO_BLOB:424:256];;column_id:4;chunk_idx:0;blob_range:[NO_BLOB:680:192];;column_id:5;chunk_idx:0;blob_range:[NO_BLOB:872:264];;column_id:4294967040;chunk_idx:0;blob_range:[NO_BLOB:1136:192];;column_id:4294967041;chunk_idx:0;blob_range:[NO_BLOB:1328:192];;;;switched=(portion_id:20;path_id:16;records_count:1;schema_version:2;level:0;cs:plan_step=1742161894000;tx_id=18446744073709551615;;wi:11;;column_size:1136;index_size:0;meta:(()););(portion_id:19;path_id:16;records_count:1;schema_version:2;level:0;;column_size:1520;index_size:0;meta:(()););; 2025-06-24T21:16:36.081145Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event=on_execution;consumer=GENERAL_COMPACTION;task_id=82ae4308-514011f0-9ac7e8ad-1781d08a;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=82ae4308-514011f0-9ac7e8ad-1781d08a;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=72075186224037893;parent_id=[1:2772:4076];task_id=82ae4308-514011f0-9ac7e8ad-1781d08a;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=2; 2025-06-24T21:16:36.081385Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2772:4076];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-24T21:16:36.081493Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2772:4076];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:63;event=Limiter; 2025-06-24T21:16:36.081846Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 72075186224037893 2025-06-24T21:16:36.081931Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2772:4076];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=94;data_size=70;sum=2726;count=57; 2025-06-24T21:16:36.081992Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2772:4076];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=246;data_size=238;sum=7134;count=58;size_of_meta=136; 2025-06-24T21:16:36.082051Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2772:4076];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=318;data_size=310;sum=9222;count=29;size_of_portion=208; 2025-06-24T21:16:36.082206Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[49] (CS::GENERAL) apply at tablet 72075186224037893 2025-06-24T21:16:36.085638Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 72075186224037893 Save Batch GenStep: 1:18 Blob count: 1 2025-06-24T21:16:36.085790Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=3808;raw_bytes=48720;count=2;records=43} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108288;raw_bytes=3516209;count=2;records=2927} inactive {blob_bytes=2656;raw_bytes=2178;count=2;records=2} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 72075186224037893 Cleaning waiting... Fake storage clean FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 >> KqpQuery::UpdateThenDelete-UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditional-UseSink [GOOD] >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey+UseSink |96.0%| [TA] $(B)/ydb/core/tx/tiering/ut/test-results/unittest/{meta.json ... results_accumulator.log} |96.0%| [TA] {RESULT} $(B)/ydb/core/tx/tiering/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpParams::InvalidJson [GOOD] >> KqpTypes::DyNumberCompare [GOOD] >> KqpTypes::SelectNull >> KqpStats::RequestUnitForExecute [GOOD] >> KqpStats::StatsProfile >> KqpParams::RowsList >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService-useSink [GOOD] >> KqpLimits::ComputeNodeMemoryLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::UpdateThenDelete-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 63422, MsgBus: 8410 2025-06-24T21:16:12.059224Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627763670123061:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:12.059290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00321b/r3tmp/tmp7uvXf8/pdisk_1.dat 2025-06-24T21:16:12.383474Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627763670123042:2079] 1750799772057970 != 1750799772057973 2025-06-24T21:16:12.393197Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63422, node 1 2025-06-24T21:16:12.456439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:12.456548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:12.458111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:12.471966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:12.472001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:12.472011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:12.472208Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8410 TClient is connected to server localhost:8410 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:12.991774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:13.010134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:13.018327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:13.072483Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:13.163051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:13.329487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:13.401388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:15.446688Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627776555026572:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:15.446804Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:15.852094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:15.905803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:15.944172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.015357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.047424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.085745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.160430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.245727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627780849994536:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:16.245817Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:16.246126Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627780849994541:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:16.250054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:16.261548Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627780849994543:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:16.350271Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627780849994594:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:17.059511Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627763670123061:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:17.059603Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:17.799567Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627785144962189:2483], status: GENERIC_ERROR, issues:
:3:26: Error: mismatched input '[' expecting {'*', '(', '@', '$', ABORT, ACTION, ADD, AFTER, ALL, ALTER, ANALYZE, AND, ANSI, ANY, ARRAY, AS, ASC, ASSUME, AS ... pp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627814582131369:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:29.433913Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 20300, MsgBus: 16180 2025-06-24T21:16:31.284627Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627845696404591:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:31.284700Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00321b/r3tmp/tmpi51xVg/pdisk_1.dat 2025-06-24T21:16:31.443255Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:31.443591Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627845696404572:2079] 1750799791283522 != 1750799791283525 2025-06-24T21:16:31.461277Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:31.461367Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:31.464111Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20300, node 4 2025-06-24T21:16:31.532021Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:31.532049Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:31.532058Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:31.532205Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16180 TClient is connected to server localhost:16180 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:32.132966Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:32.139940Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:32.151038Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:32.233584Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:32.366330Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:32.424340Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.517660Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:35.046379Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627862876275398:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.046476Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.113380Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.156455Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.233774Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.272373Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.324541Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.385101Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.465152Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.571322Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627862876276067:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.571425Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.571576Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627862876276072:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.575432Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:35.588670Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627862876276074:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:35.679000Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627862876276125:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:36.287984Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627845696404591:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:36.288072Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; [] >> KqpStats::RequestUnitForBadRequestExplicitPrepare [GOOD] >> KqpStats::OneShardNonLocalExec+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::InvalidJson [GOOD] Test command err: Trying to start YDB, gRPC: 29087, MsgBus: 10463 2025-06-24T21:16:13.098205Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627769261827625:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:13.098279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003211/r3tmp/tmpqwBrwM/pdisk_1.dat 2025-06-24T21:16:13.456624Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:13.457044Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627769261827604:2079] 1750799773096828 != 1750799773096831 TServer::EnableGrpc on GrpcPort 29087, node 1 2025-06-24T21:16:13.497917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:13.501670Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:13.503980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:13.523775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:13.523799Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:13.523811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:13.523947Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10463 TClient is connected to server localhost:10463 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:14.079587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:14.107015Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:14.107943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.270745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.466339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.526605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:16.436868Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627782146731126:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:16.437010Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:16.815238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.848696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.887582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.917520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.949797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:17.020506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:17.056235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:17.135434Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627786441699082:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:17.135510Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:17.135954Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627786441699087:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:17.139797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:17.150390Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627786441699089:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:17.218667Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627786441699140:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:18.098050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627769261827625:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:18.098144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 62650, MsgBus: 1518 2025-06-24T21:16:19.363127Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627794139553143:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:19.363253Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPat ... 03211/r3tmp/tmpne0YcT/pdisk_1.dat 2025-06-24T21:16:32.636510Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:32.639367Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627851617776513:2079] 1750799792470922 != 1750799792470925 2025-06-24T21:16:32.657264Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:32.657376Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:32.660281Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22593, node 4 2025-06-24T21:16:32.749535Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:32.749572Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:32.749582Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:32.749741Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18661 TClient is connected to server localhost:18661 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:33.342649Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:33.350628Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:33.370113Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:33.446336Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:33.555646Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:16:33.631188Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:33.707953Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:36.663168Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627868797647349:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:36.663269Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:36.740618Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:36.815800Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:36.857589Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:36.903695Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:36.943219Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:37.019294Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:37.061244Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:37.155994Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627873092615310:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:37.156087Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:37.156582Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627873092615315:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:37.161829Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:37.177028Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627873092615317:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:37.249876Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627873092615368:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:37.512531Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627851617776535:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:37.512742Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:38.708894Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:38.860367Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZTJmNGM3ZjUtNmQ0NmZhZTktNGM3YjA2ZjAtMTYxOTI4Zg==, ActorId: [4:7519627877387582947:2475], ActorState: ExecuteState, TraceId: 01jyhwsmfn012xrfaqnrk4p0xa, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1052: Invalid Json value
: Error: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1052: Invalid Json value >> KqpQuery::QueryStats+UseSink [GOOD] >> KqpQuery::QueryStats-UseSink >> KqpQuery::CreateAsSelectPath-UseTablePathPrefix [GOOD] >> KqpQuery::ReadOverloaded+StreamLookup [GOOD] >> KqpQuery::ReadOverloaded-StreamLookup >> KqpParams::Decimal+QueryService+UseSink [GOOD] >> KqpQuery::CreateAsSelectTypes+NotNull-IsOlap [GOOD] >> KqpQuery::CreateAsSelectTypes-NotNull+IsOlap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::CreateAsSelectPath-UseTablePathPrefix [GOOD] Test command err: Trying to start YDB, gRPC: 28038, MsgBus: 14820 2025-06-24T21:16:20.930618Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627798656599178:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:20.930677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031de/r3tmp/tmp0H1V26/pdisk_1.dat 2025-06-24T21:16:21.224247Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627798656599156:2079] 1750799780929363 != 1750799780929366 2025-06-24T21:16:21.264473Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28038, node 1 2025-06-24T21:16:21.356763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:21.356870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:21.358880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:21.378500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:21.378531Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:21.378540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:21.378711Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14820 TClient is connected to server localhost:14820 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:21.960016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:21.975395Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:21.977782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:24.160954Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627815836468977:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:24.161122Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:24.161820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627815836468991:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:24.166838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:24.183872Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627815836468993:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:16:24.280937Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627815836469044:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:24.666888Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627815836469065:2300], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:6:62: Error: At function: KiCreateTable!
:6:20: Error: Invalid type for column: Value. Only YQL data types and PG types are currently supported, code: 2031 2025-06-24T21:16:24.668505Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTdiNGM5YS1kOTJiYTg1Ni1hYzM3MTI5NS1jZDhmYTlkMQ==, ActorId: [1:7519627815836468954:2289], ActorState: ExecuteState, TraceId: 01jyhws4396yh8nqazaxbzv6s9, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:16:24.721102Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627815836469089:2309], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:6:45: Error: At function: KiCreateTable!
:6:20: Error: Invalid type for column: Value. Only YQL data types and PG types are currently supported, code: 2031 2025-06-24T21:16:24.721309Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWNiODVmODEtYWFlNmI4ODEtYzZmY2U4ZmUtMWYzNzEyODU=, ActorId: [1:7519627815836469078:2303], ActorState: ExecuteState, TraceId: 01jyhws6qa70q517rbzk6pdvtp, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:16:24.764989Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627815836469101:2315], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:6:43: Error: At function: KiCreateTable!
:6:20: Error: Invalid type for column: Value. Only YQL data types and PG types are currently supported, code: 2031 2025-06-24T21:16:24.766689Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTQ2MWM2ZTAtOWY0MmNhNjgtYTJiYmJmMWUtOGY3NDdiY2I=, ActorId: [1:7519627815836469095:2312], ActorState: ExecuteState, TraceId: 01jyhws6rr3xv7rgfkxnzrsqfr, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: Trying to start YDB, gRPC: 6572, MsgBus: 20656 2025-06-24T21:16:25.464682Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627822372189053:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:25.464769Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031de/r3tmp/tmp7aVqmP/pdisk_1.dat 2025-06-24T21:16:25.641464Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:25.645398Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519627822372189029:2079] 1750799785463381 != 1750799785463384 2025-06-24T21:16:25.664139Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:25.664251Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:25.668358Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6572, node 2 2025-06-24T21:16:25.729762Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:25.729787Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:25.729794Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:25.729923Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20656 TClient is connected to server localhost:20656 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:16:26.237978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:16:26.247541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:26.472733Z node ... do unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:16:30.921286Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:33.780661Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519627855918688547:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:33.780719Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519627855918688524:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:33.780831Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:33.784864Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:33.802383Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519627855918688553:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T21:16:33.899701Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519627855918688604:2360] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:33.959133Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.326314Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.904570Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627838738818799:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:34.904662Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 15358, MsgBus: 64884 2025-06-24T21:16:35.711117Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627865090946901:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:35.711211Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031de/r3tmp/tmpDbFwBR/pdisk_1.dat 2025-06-24T21:16:35.906467Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:35.919849Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:35.919943Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:35.924628Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15358, node 4 2025-06-24T21:16:35.983809Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:35.983836Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:35.983846Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:35.983991Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64884 TClient is connected to server localhost:64884 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:36.656771Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:36.665607Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:36.674924Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:16:36.719062Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:36.728750Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-24T21:16:36.741406Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:16:39.672290Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627882270816719:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:39.672490Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:39.672780Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627882270816744:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:39.677764Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:39.703410Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627882270816746:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:16:39.777780Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627882270816799:2361] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:39.807473Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:40.089997Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710666, at schemeshard: 72057594046644480 2025-06-24T21:16:40.095336Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:40.711140Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627865090946901:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:40.711220Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpExplain::MultiUsedStage [GOOD] >> KqpExplain::MergeConnection >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt32 [GOOD] >> KqpExplain::PureExpr [GOOD] >> KqpExplain::ReadTableRangesFullScan >> KqpStats::StreamLookupStats+StreamLookupJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::Decimal+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 19310, MsgBus: 3926 2025-06-24T21:16:09.103186Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627750680829594:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:09.105331Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003232/r3tmp/tmpe6A3L3/pdisk_1.dat 2025-06-24T21:16:09.495315Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627750680829571:2079] 1750799769100673 != 1750799769100676 2025-06-24T21:16:09.504456Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19310, node 1 2025-06-24T21:16:09.536321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:09.540514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:09.567238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:09.593833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:09.593859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:09.593873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:09.594063Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3926 TClient is connected to server localhost:3926 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:10.119832Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:10.219770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:10.264832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:10.433856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:10.632893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:10.717662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:12.683710Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627763565733086:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:12.683838Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:13.053430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:13.089334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:13.129665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:13.171678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:13.240866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:13.291883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:13.334107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:13.437326Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627767860701049:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:13.437413Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:13.437755Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627767860701054:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:13.442041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:13.454331Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627767860701056:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:13.519274Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627767860701107:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:14.104996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627750680829594:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:14.105085Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 20811, MsgBus: 64509 2025-06-24T21:16:15.943994Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627779757258052:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:15.944051Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... meshard: 72057594046644480 2025-06-24T21:16:34.090331Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:34.182845Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:34.249327Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:34.386444Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:34.484145Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:37.482303Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627872855373566:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:37.482384Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:37.555089Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:37.600131Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:37.680606Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:37.764525Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:37.847029Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:37.905459Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:37.986253Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:38.104283Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627877150341532:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:38.104469Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:38.104909Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627877150341537:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:38.109878Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:38.128161Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627877150341539:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:38.229298Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627877150341590:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:38.229562Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627855675502778:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:38.229659Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:39.714076Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:40.801702Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519627885740276692:2527], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:4:17: Error: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:3:25: Error: At function: Parameter, At function: DataType
:3:25: Error: Invalid decimal precision: 99 2025-06-24T21:16:40.802110Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=ZDc5MjRmMTMtZGY5NTFjYS05MDdkYmE0Zi0yNGJiMDQ2ZA==, ActorId: [4:7519627885740276690:2526], ActorState: ExecuteState, TraceId: 01jyhwspee4hw6x2530ax9fhbp, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:16:40.942064Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=N2E1ZjcyLTY0OWI3OWMyLTE4M2RlZTk3LWQwOWY0MGEw, ActorId: [4:7519627885740276696:2529], ActorState: ExecuteState, TraceId: 01jyhwspfabwmm0c63vxwkqg7x, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1223: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $value22 type mismatch, expected: { Kind: Data Data { Scheme: 4865 DecimalParams { Precision: 22 Scale: 9 } } }, actual: Type (Data), schemeType: Decimal(35,10), schemeTypeId: 4865 2025-06-24T21:16:40.980813Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519627885740276712:2535], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:7:29: Error: At function: KiWriteTable!
:7:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:4:25: Error: Implicit decimal cast would lose precision
:7:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:7:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:16:40.983110Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NDMwNzE5NDctM2QyMDA5NGMtMmI4MTRhYTctZTE1OGQwMw==, ActorId: [4:7519627885740276710:2534], ActorState: ExecuteState, TraceId: 01jyhwspky1h10hedcakk7m646, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:16:41.020399Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519627890035244019:2540], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:29: Error: At function: KiWriteTable!
:3:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:0:14: Error: Implicit decimal cast would lose precision
:3:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:3:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:16:41.021162Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=YmRkMDExNWYtOTVlZGI0YzQtZTZhNDU0ZGYtYTFkOWI0, ActorId: [4:7519627885740276721:2539], ActorState: ExecuteState, TraceId: 01jyhwspn44z2ydkg1426cjdgn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpExplain::UpdateConditional-UseSink [GOOD] >> KqpExplain::UpdateConditionalKey+UseSink >> KqpQuery::TableSink_ReplaceDataShardDataQuery+UseSink [GOOD] >> KqpQuery::TableSink_ReplaceDataShardDataQuery-UseSink >> KqpLimits::StreamWrite+Allowed [GOOD] >> KqpLimits::StreamWrite-Allowed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt32 [GOOD] Test command err: 2025-06-24T21:14:45.292572Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:45.319109Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:45.319447Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:45.327004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:45.327238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:45.327507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:45.327604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:45.327686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:45.327796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:45.327919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:45.328045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:45.328126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:45.328210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.328300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:45.363733Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:45.364059Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:45.364114Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:45.364312Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.364509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:45.364598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:45.364652Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:45.364804Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:45.364893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:45.364943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:45.364986Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:45.365202Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:45.365283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:45.365333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:45.365366Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:45.365481Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:45.365546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:45.365623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:45.365675Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:45.365734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:45.365788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:45.365828Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:45.366068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:45.366120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:45.366174Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:45.366359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:45.366429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:45.366466Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:45.366634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:45.366686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.366722Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:45.366803Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:45.366875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:45.366949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:45.366982Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:45.367538Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=51; 2025-06-24T21:14:45.367636Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-24T21:14:45.367732Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=39; 2025-06-24T21:14:45.367814Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=40; 2025-06-24T21:14:45.367906Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:45.368026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:45.368068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:45.368121Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2126;data_size=2102;sum=522292;count=216;size_of_portion=208; 2025-06-24T21:16:42.640576Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=26719; 2025-06-24T21:16:42.640718Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=14; 2025-06-24T21:16:42.641662Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=878; 2025-06-24T21:16:42.641742Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=28113; 2025-06-24T21:16:42.641797Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=28286; 2025-06-24T21:16:42.641872Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=14; 2025-06-24T21:16:42.642220Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=280; 2025-06-24T21:16:42.642288Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=29308; 2025-06-24T21:16:42.642493Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=130; 2025-06-24T21:16:42.642664Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=111; 2025-06-24T21:16:42.642893Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=165; 2025-06-24T21:16:42.643102Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=150; 2025-06-24T21:16:42.650106Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=6929; 2025-06-24T21:16:42.657944Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=7713; 2025-06-24T21:16:42.658064Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2025-06-24T21:16:42.658129Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=13; 2025-06-24T21:16:42.658177Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=7; 2025-06-24T21:16:42.658306Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=57; 2025-06-24T21:16:42.658367Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-24T21:16:42.658489Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=80; 2025-06-24T21:16:42.658543Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=7; 2025-06-24T21:16:42.658631Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=47; 2025-06-24T21:16:42.658737Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=60; 2025-06-24T21:16:42.658839Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=61; 2025-06-24T21:16:42.658957Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=54381; 2025-06-24T21:16:42.659191Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108238352;raw_bytes=183045560;count=15;records=1915000} inactive {blob_bytes=205426288;raw_bytes=316809958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:42.659341Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:16:42.659413Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:16:42.659515Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:16:42.659579Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:16:42.659743Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:42.659840Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:16:42.659925Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799389394;tx_id=18446744073709551615;;current_snapshot_ts=1750799686322; 2025-06-24T21:16:42.659989Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:42.660053Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:42.660134Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:42.660264Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:42.662229Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:4112:6074];tablet_id=9437184;parent=[1:4077:6047];fline=manager.cpp:88;event=ask_data;request=request_id=205;9438184000001={portions_count=54};; 2025-06-24T21:16:42.665102Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:16:42.665521Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:16:42.665569Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:16:42.665612Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:16:42.665681Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:42.665791Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:16:42.665884Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799389394;tx_id=18446744073709551615;;current_snapshot_ts=1750799686322; 2025-06-24T21:16:42.665955Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:42.666022Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:42.666081Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:42.666187Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.999000s; 2025-06-24T21:16:42.666289Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> TColumnShardTestReadWrite::ReadGroupBy [GOOD] >> KqpLimits::AffectedShardsLimit [GOOD] >> KqpLimits::CancelAfterRoTx >> KqpExplain::UpdateSecondaryConditionalSecondaryKey+UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink >> KqpTypes::SelectNull [GOOD] >> KqpTypes::MultipleCurrentUtcTimestamp >> KqpQuery::OlapCreateAsSelect_Simple [GOOD] >> KqpQuery::OltpCreateAsSelect_Simple >> TColumnShardTestReadWrite::CompactionSplitGranule_PKDatetime [GOOD] >> TColumnShardTestReadWrite::CompactionSplitGranule_PKTimestamp [GOOD] >> KqpParams::RowsList [GOOD] >> KqpParams::MissingParameter >> KqpStats::StatsProfile [GOOD] >> KqpStats::SelfJoin >> KqpExplain::SsaProgramInJsonPlan [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadGroupBy [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8328;columns=19; -- group by key: 0 2025-06-24T21:14:44.477588Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:44.515717Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:44.516033Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:44.524201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:44.524436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:44.524645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:44.524752Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:44.524933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:44.525032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:44.525162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:44.525314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:44.525441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:44.525566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.525675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:44.555923Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:44.556204Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:44.556257Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:44.556412Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.559314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:44.559402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:44.559452Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:44.559573Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:44.559646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:44.559686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:44.559716Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:44.559923Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.560026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:44.560072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:44.560101Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:44.560190Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:44.560251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:44.560300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:44.560346Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:44.560398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:44.560447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:44.560479Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:44.560698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:44.560742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:44.560777Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:44.560956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:44.561013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:44.561044Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:44.561167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:44.561207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.561241Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.561319Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:44.561379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:44.561414Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:44.561443Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:44.561984Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=126; 2025-06-24T21:14:44.562080Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=38; 2025-06-24T21:14:44.562153Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=31; 2025-06-24T21:14:44.562229Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=33; 2025-06-24T21:14:44.562332Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:44.562418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:44.562464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Ex ... :2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2052; 2025-06-24T21:16:44.885610Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1;merger=0;interval_id=2052; 2025-06-24T21:16:44.885655Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:16:44.885769Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-24T21:16:44.885808Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1;finished=1; 2025-06-24T21:16:44.885851Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:16:44.886458Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:16:44.886655Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1;schema=100: binary 101: binary 102: binary 103: uint64;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-24T21:16:44.886705Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:16:44.886818Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;);columns=4;rows=1; 2025-06-24T21:16:44.886893Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=26;num_rows=1;batch_columns=100,101,102,103; 2025-06-24T21:16:44.887197Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[54:419:2431];bytes=26;rows=1;faults=0;finished=0;fault=0;schema=100: binary 101: binary 102: binary 103: uint64; 2025-06-24T21:16:44.887339Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-24T21:16:44.887466Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-24T21:16:44.887568Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-24T21:16:44.887881Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:16:44.887996Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-24T21:16:44.888102Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-24T21:16:44.888140Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [54:420:2432] finished for tablet 9437184 2025-06-24T21:16:44.888643Z node 54 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[54:419:2431];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.015},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.017}],"full":{"a":1750799804870790,"name":"_full_task","f":1750799804870790,"d_finished":0,"c":0,"l":1750799804888194,"d":17404},"events":[{"name":"bootstrap","f":1750799804870997,"d_finished":3030,"c":1,"l":1750799804874027,"d":3030},{"a":1750799804887857,"name":"ack","f":1750799804886427,"d_finished":1167,"c":1,"l":1750799804887594,"d":1504},{"a":1750799804887843,"name":"processing","f":1750799804874126,"d_finished":8639,"c":10,"l":1750799804887596,"d":8990},{"name":"ProduceResults","f":1750799804872827,"d_finished":3225,"c":13,"l":1750799804888124,"d":3225},{"a":1750799804888128,"name":"Finish","f":1750799804888128,"d_finished":0,"c":0,"l":1750799804888194,"d":66},{"name":"task_result","f":1750799804874144,"d_finished":7310,"c":9,"l":1750799804885945,"d":7310}],"id":"9437184::2052"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-24T21:16:44.888735Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[54:419:2431];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:16:44.889166Z node 54 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[54:419:2431];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.015},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.017}],"full":{"a":1750799804870790,"name":"_full_task","f":1750799804870790,"d_finished":0,"c":0,"l":1750799804888776,"d":17986},"events":[{"name":"bootstrap","f":1750799804870997,"d_finished":3030,"c":1,"l":1750799804874027,"d":3030},{"a":1750799804887857,"name":"ack","f":1750799804886427,"d_finished":1167,"c":1,"l":1750799804887594,"d":2086},{"a":1750799804887843,"name":"processing","f":1750799804874126,"d_finished":8639,"c":10,"l":1750799804887596,"d":9572},{"name":"ProduceResults","f":1750799804872827,"d_finished":3225,"c":13,"l":1750799804888124,"d":3225},{"a":1750799804888128,"name":"Finish","f":1750799804888128,"d_finished":0,"c":0,"l":1750799804888776,"d":648},{"name":"task_result","f":1750799804874144,"d_finished":7310,"c":9,"l":1750799804885945,"d":7310}],"id":"9437184::2052"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-24T21:16:44.889249Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:16:44.870199Z;index_granules=0;index_portions=1;index_batches=2;schema_columns=4;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=14056;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=14056;selected_rows=0; 2025-06-24T21:16:44.889285Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:16:44.889662Z node 54 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[54:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKTimestamp [GOOD] Test command err: 2025-06-24T21:14:44.477224Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:44.505931Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:44.506199Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:44.515250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:44.515500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:44.515782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:44.515920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:44.516070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:44.516225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:44.516344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:44.516451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:44.516572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:44.516698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.516802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:44.556136Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:44.556386Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:44.556451Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:44.556622Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.557974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:44.558065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:44.558116Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:44.558233Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:44.558347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:44.558379Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:44.558558Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:44.558665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:44.558697Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:44.558777Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:44.558829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:44.558898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:44.558951Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:44.559005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:44.559059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:44.559098Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:44.559352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:44.559394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:44.559434Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:44.559634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:44.559683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:44.559728Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:44.559867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:44.559911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.559959Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:44.560041Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:44.560100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:44.560163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:44.560203Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:44.560604Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=38; 2025-06-24T21:14:44.560700Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; 2025-06-24T21:14:44.560780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-24T21:14:44.561343Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=508; 2025-06-24T21:14:44.561451Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:44.561546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:44.561590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:44.561643Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tab ... 84;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2158;data_size=2150;sum=529040;count=216;size_of_portion=208; 2025-06-24T21:16:45.987705Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=33125; 2025-06-24T21:16:45.987801Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=19; 2025-06-24T21:16:45.988628Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=758; 2025-06-24T21:16:45.988688Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=34380; 2025-06-24T21:16:45.988749Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=34605; 2025-06-24T21:16:45.988851Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=23; 2025-06-24T21:16:45.989191Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=270; 2025-06-24T21:16:45.989238Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=35821; 2025-06-24T21:16:45.989443Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=131; 2025-06-24T21:16:45.989610Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=95; 2025-06-24T21:16:45.989848Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=177; 2025-06-24T21:16:45.990051Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=145; 2025-06-24T21:16:45.997073Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=6933; 2025-06-24T21:16:46.002443Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=5237; 2025-06-24T21:16:46.002546Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=24; 2025-06-24T21:16:46.002602Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=17; 2025-06-24T21:16:46.002659Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-24T21:16:46.002728Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=41; 2025-06-24T21:16:46.002782Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=14; 2025-06-24T21:16:46.002919Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=99; 2025-06-24T21:16:46.002984Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=11; 2025-06-24T21:16:46.003049Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=29; 2025-06-24T21:16:46.003134Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=52; 2025-06-24T21:16:46.003282Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=65; 2025-06-24T21:16:46.003323Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=60555; 2025-06-24T21:16:46.003499Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108275528;raw_bytes=198365560;count=15;records=1915000} inactive {blob_bytes=205496480;raw_bytes=345889958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:46.003609Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:16:46.003659Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:16:46.003753Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:16:46.003810Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:16:46.003922Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:46.004006Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:16:46.004095Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799388554;tx_id=18446744073709551615;;current_snapshot_ts=1750799685473; 2025-06-24T21:16:46.004172Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:46.004228Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:46.004284Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:46.004409Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:46.006045Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:4143:6103];tablet_id=9437184;parent=[1:4108:6076];fline=manager.cpp:88;event=ask_data;request=request_id=205;9438184000001={portions_count=54};; 2025-06-24T21:16:46.008864Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:16:46.009271Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:16:46.009317Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:16:46.009379Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:16:46.009460Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:46.009582Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:16:46.009669Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799388554;tx_id=18446744073709551615;;current_snapshot_ts=1750799685473; 2025-06-24T21:16:46.009728Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:46.009790Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:46.009848Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:46.009968Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.999000s; 2025-06-24T21:16:46.010056Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKDatetime [GOOD] Test command err: 2025-06-24T21:14:49.172937Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:49.195758Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:49.196030Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:49.204168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:49.204444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:49.204725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:49.204855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:49.205009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:49.205159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:49.205284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:49.205403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:49.205522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:49.205654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.205799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:49.236577Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:49.236850Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:49.236910Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:49.237145Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.237326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:49.237420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:49.237474Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:49.237611Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:49.237696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:49.237744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:49.237780Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:49.237995Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.238073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:49.238123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:49.238157Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:49.238278Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:49.238343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:49.238391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:49.238443Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:49.238505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:49.238571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:49.238614Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:49.238847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:49.238893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:49.238958Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:49.239190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:49.239265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:49.239308Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:49.239471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:49.239520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.239559Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.239669Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:49.239756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:49.239802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:49.239834Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:49.240357Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=55; 2025-06-24T21:14:49.240459Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-24T21:14:49.240548Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=39; 2025-06-24T21:14:49.240637Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=41; 2025-06-24T21:14:49.240755Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:49.240866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:49.240923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:49.240978Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 37184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2126;data_size=2102;sum=522292;count=216;size_of_portion=208; 2025-06-24T21:16:45.979663Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=26264; 2025-06-24T21:16:45.979737Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=9; 2025-06-24T21:16:45.980399Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=597; 2025-06-24T21:16:45.980446Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=27247; 2025-06-24T21:16:45.980497Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=27398; 2025-06-24T21:16:45.980580Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=15; 2025-06-24T21:16:45.980850Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=217; 2025-06-24T21:16:45.980908Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=28314; 2025-06-24T21:16:45.981109Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=133; 2025-06-24T21:16:45.981226Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=79; 2025-06-24T21:16:45.981378Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=111; 2025-06-24T21:16:45.981521Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=103; 2025-06-24T21:16:45.985127Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=3548; 2025-06-24T21:16:45.988999Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=3772; 2025-06-24T21:16:45.989094Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=19; 2025-06-24T21:16:45.989138Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=9; 2025-06-24T21:16:45.989177Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=8; 2025-06-24T21:16:45.989260Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=39; 2025-06-24T21:16:45.989309Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=4; 2025-06-24T21:16:45.989466Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=112; 2025-06-24T21:16:45.989514Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-24T21:16:45.989573Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=26; 2025-06-24T21:16:45.989653Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=46; 2025-06-24T21:16:45.989733Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=52; 2025-06-24T21:16:45.989785Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=44796; 2025-06-24T21:16:45.989977Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108238352;raw_bytes=183045560;count=15;records=1915000} inactive {blob_bytes=205426288;raw_bytes=316809958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:45.990084Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:16:45.990148Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:16:45.990221Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:16:45.990281Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:16:45.990404Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:45.990486Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:16:45.990575Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799393269;tx_id=18446744073709551615;;current_snapshot_ts=1750799690197; 2025-06-24T21:16:45.990627Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:45.990674Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:45.990740Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:45.990875Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:45.992594Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:4112:6074];tablet_id=9437184;parent=[1:4077:6047];fline=manager.cpp:88;event=ask_data;request=request_id=205;9438184000001={portions_count=54};; 2025-06-24T21:16:45.995544Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:16:45.995969Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:16:45.996006Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:16:45.996033Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:16:45.996082Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:45.996168Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:16:45.996239Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799393269;tx_id=18446744073709551615;;current_snapshot_ts=1750799690197; 2025-06-24T21:16:45.996299Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:45.996360Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:45.996408Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:45.996524Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.999000s; 2025-06-24T21:16:45.996595Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey+UseSink [GOOD] >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink >> KqpQuery::QueryStats-UseSink [GOOD] >> KqpJoinOrder::TPCDS16-ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH8 >> KqpJoinOrder::ShuffleEliminationManyKeysJoinPredicate >> KqpJoinOrder::CanonizedJoinOrderTPCH6 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::SsaProgramInJsonPlan [GOOD] Test command err: Trying to start YDB, gRPC: 21606, MsgBus: 2979 2025-06-24T21:16:16.255754Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627783567817904:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:16.257650Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031f9/r3tmp/tmp638bSZ/pdisk_1.dat 2025-06-24T21:16:16.638063Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:16.638660Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627783567817880:2079] 1750799776254178 != 1750799776254181 TServer::EnableGrpc on GrpcPort 21606, node 1 2025-06-24T21:16:16.694416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:16.694494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:16.701554Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:16.732055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:16.732088Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:16.732109Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:16.732288Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2979 TClient is connected to server localhost:2979 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:17.280570Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:17.317683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:17.341128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:17.358003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:17.525365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:17.683987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:17.771619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:19.556972Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627796452721409:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.557105Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.873455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.909260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.940407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.973713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:20.016106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:20.056266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:20.132685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:20.198754Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627800747689361:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:20.198834Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:20.199077Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627800747689366:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:20.202821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:20.213954Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627800747689368:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:20.314894Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627800747689421:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:21.256170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627783567817904:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:21.256231Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"SortBy":"row.Text","Name":"Sort"},{"Scan":"Parallel","ReadRange":["Key [150, 266]"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/Eigh ... : tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.446010Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.446027Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037944;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.446750Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037984;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.446792Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037966;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.453089Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037984;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.453103Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037966;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.453843Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.453848Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037958;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.460070Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.460240Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037958;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.460810Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037948;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.460938Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037967;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.467162Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037948;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.467347Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037967;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.467944Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.467964Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.474240Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.474833Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.475184Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.475671Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037980;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.481293Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.481858Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037980;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.482023Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037975;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.482566Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037973;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.488324Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037975;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.488879Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037973;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.489100Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.489602Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.495520Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.496243Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.496438Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.497106Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037971;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.502789Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.503460Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037971;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.503574Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037957;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.504373Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037976;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-24T21:16:45.510273Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037957;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:16:45.510703Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037976;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["OlapTable"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"E-Rows":"0","Predicate":"Value \u003E 0","Pushdown":"True","Name":"Filter","E-Size":"0","E-Cost":"0"},{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/OlapTable","E-Rows":"0","Table":"OlapTable","ReadColumns":["Key","Value"],"SsaProgram":{"Command":[{"Assign":{"Constant":{"Int32":0},"Column":{"Id":3}}},{"Assign":{"Function":{"YqlOperationId":15,"KernelIdx":0,"FunctionType":2,"Arguments":[{"Id":2},{"Id":3}]},"Column":{"Id":4}}},{"Assign":{"Constant":{"Uint8":0},"Column":{"Id":5}}},{"Assign":{"Function":{"YqlOperationId":17,"KernelIdx":1,"FunctionType":2,"Arguments":[{"Id":4},{"Id":5}]},"Column":{"Id":6}}},{"Filter":{"Predicate":{"Id":6}}},{"Projection":{"Columns":[{"Id":1},{"Id":2}]}}]},"E-Cost":"0"}],"Node Type":"Filter-TableFullScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/OlapTable","reads":[{"columns":["Key","Value"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/OlapTable","E-Rows":"0","Table":"OlapTable","ReadColumns":["Key","Value"],"SsaProgram":{"Command":[{"Assign":{"Constant":{"Int32":0},"Column":{"Id":3}}},{"Assign":{"Function":{"YqlOperationId":15,"KernelIdx":0,"FunctionType":2,"Arguments":[{"Id":2},{"Id":3}]},"Column":{"Id":4}}},{"Assign":{"Constant":{"Uint8":0},"Column":{"Id":5}}},{"Assign":{"Function":{"YqlOperationId":17,"KernelIdx":1,"FunctionType":2,"Arguments":[{"Id":4},{"Id":5}]},"Column":{"Id":6}}},{"Filter":{"Predicate":{"Id":6}}},{"Projection":{"Columns":[{"Id":1},{"Id":2}]}}]},"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"0","Predicate":"Value \u003E 0","Pushdown":"True","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Filter"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} >> KqpIndexLookupJoin::MultiJoins >> KqpExplain::MergeConnection [GOOD] >> KqpExplain::IdxFullscan >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryStats-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24816, MsgBus: 28555 2025-06-24T21:16:21.932294Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627801946689436:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:21.932868Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031c3/r3tmp/tmpj7i71r/pdisk_1.dat 2025-06-24T21:16:22.292030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:22.292296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:22.294939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:22.315741Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:22.317115Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627801946689317:2079] 1750799781898063 != 1750799781898066 TServer::EnableGrpc on GrpcPort 24816, node 1 2025-06-24T21:16:22.374149Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:22.374171Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:22.374178Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:22.374314Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28555 TClient is connected to server localhost:28555 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:22.935376Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:22.987070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:23.020410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:23.154272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:23.304216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:23.391382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:25.203346Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627819126560142:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.203463Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.481950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.516752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.549466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.627962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.683218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.760244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.791589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.851842Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627819126560805:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.851928Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.852115Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627819126560810:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.857068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:25.869811Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627819126560812:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:25.961876Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627819126560865:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:26.929580Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627801946689436:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:26.929650Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:27.109327Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YjdmMzZhZTItYmQwMGViZmItODJkYjljYy00YmU5MmE0YQ==, ActorId: [1:7519627827716495726:2473], ActorState: ExecuteState, TraceId: 01jyhws91jeaj2hz4gwks9dm3r, Create QueryResponse for error on request, msg:
: Error: Request timeout 50ms exceeded
: Error: Cancelling after 50ms during compilation Trying to start YDB, gRPC: 32289, MsgBus: 19680 2025-06-24T21:16:27.977597Z node 2 :METADATA_PROVID ... uration_us: 8854 table_access { name: "/Root/EightShard" updates { rows: 3 bytes: 47 } partitions_count: 1 } table_access { name: "/Root/TwoShard" reads { rows: 3 bytes: 35 } partitions_count: 1 } cpu_time_us: 3493 affected_shards: 2 } compilation { duration_us: 271740 cpu_time_us: 267811 } process_cpu_time_us: 529 total_duration_us: 289120 total_cpu_time_us: 271833 Trying to start YDB, gRPC: 9159, MsgBus: 15929 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031c3/r3tmp/tmpxCVqqK/pdisk_1.dat 2025-06-24T21:16:41.567346Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:16:41.578329Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:41.581784Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627887340486126:2079] 1750799801330749 != 1750799801330752 2025-06-24T21:16:41.592640Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:41.592765Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:41.595903Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9159, node 4 2025-06-24T21:16:41.647719Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:41.647747Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:41.647755Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:41.647898Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15929 TClient is connected to server localhost:15929 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:42.341546Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:42.349460Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:42.362162Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:42.422511Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:42.455069Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:42.652411Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:42.737032Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:45.574494Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627904520356952:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:45.574605Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:45.646524Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:45.685546Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:45.725046Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:45.798204Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:45.834314Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:45.906582Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:45.945378Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:46.010978Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627908815324908:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:46.011074Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:46.011244Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627908815324913:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:46.016419Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:46.027310Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627908815324915:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:46.106412Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627908815324966:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } query_phases { duration_us: 5143 table_access { name: "/Root/TwoShard" reads { rows: 3 bytes: 35 } partitions_count: 1 } cpu_time_us: 3663 affected_shards: 1 } query_phases { duration_us: 6208 table_access { name: "/Root/EightShard" updates { rows: 3 bytes: 47 } partitions_count: 1 } cpu_time_us: 2717 affected_shards: 2 } compilation { duration_us: 259789 cpu_time_us: 255874 } process_cpu_time_us: 778 total_duration_us: 276165 total_cpu_time_us: 263032 >> KqpExplain::ReadTableRangesFullScan [GOOD] >> KqpExplain::ReadTableRanges >> KqpLimits::QSReplySize+useSink [GOOD] >> KqpLimits::QSReplySize-useSink >> KqpJoin::AllowJoinsForComplexPredicates-StreamLookup >> KqpJoinOrder::FiveWayJoinStatsOverride-ColumnStore >> KqpStats::StreamLookupStats+StreamLookupJoin [GOOD] >> KqpStats::StreamLookupStats-StreamLookupJoin >> KqpQuery::CreateAsSelectTypes-NotNull+IsOlap [GOOD] >> KqpQuery::CreateAsSelectTypes+NotNull+IsOlap >> KqpExplain::UpdateConditionalKey+UseSink [GOOD] >> KqpExplain::UpdateConditionalKey-UseSink >> KqpJoinOrder::TestJoinHint2+ColumnStore >> KqpStats::OneShardNonLocalExec+UseSink [GOOD] >> KqpStats::OneShardNonLocalExec-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 [GOOD] Test command err: 2025-06-24T21:14:49.352975Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:49.380308Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:49.380579Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:49.387852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:49.388106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:49.388342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:49.388453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:49.388582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:49.388716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:49.388827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:49.388974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:49.389078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:49.389200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.389302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:49.419303Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:49.419550Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:49.419606Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:49.419789Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.420002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:49.420078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:49.420123Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:49.420236Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:49.420305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:49.420345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:49.420371Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:49.420532Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.420597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:49.420635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:49.420662Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:49.420772Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:49.420840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:49.420886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:49.420930Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:49.420979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:49.421025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:49.421059Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:49.421279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:49.421319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:49.421355Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:49.421542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:49.421592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:49.421619Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:49.421746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:49.421787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.421835Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.421928Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:49.422001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:49.422060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:49.422089Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:49.422491Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=43; 2025-06-24T21:14:49.422569Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=35; 2025-06-24T21:14:49.422645Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=32; 2025-06-24T21:14:49.422708Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=30; 2025-06-24T21:14:49.422809Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:49.422908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:49.422960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:49.423006Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2158;data_size=2150;sum=529040;count=216;size_of_portion=208; 2025-06-24T21:16:49.148696Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=29169; 2025-06-24T21:16:49.148823Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=13; 2025-06-24T21:16:49.149819Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=924; 2025-06-24T21:16:49.149893Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=30597; 2025-06-24T21:16:49.149950Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=30801; 2025-06-24T21:16:49.150030Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=16; 2025-06-24T21:16:49.150382Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=273; 2025-06-24T21:16:49.150451Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=31949; 2025-06-24T21:16:49.150674Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=143; 2025-06-24T21:16:49.150873Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=128; 2025-06-24T21:16:49.151109Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=175; 2025-06-24T21:16:49.151452Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=262; 2025-06-24T21:16:49.159115Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=7578; 2025-06-24T21:16:49.167820Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=8573; 2025-06-24T21:16:49.167928Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2025-06-24T21:16:49.167983Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2025-06-24T21:16:49.168047Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-24T21:16:49.168166Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=64; 2025-06-24T21:16:49.168211Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-24T21:16:49.168346Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=84; 2025-06-24T21:16:49.168397Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-24T21:16:49.168474Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=44; 2025-06-24T21:16:49.168577Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=67; 2025-06-24T21:16:49.168680Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=56; 2025-06-24T21:16:49.168748Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=60361; 2025-06-24T21:16:49.168977Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108275536;raw_bytes=198365560;count=15;records=1915000} inactive {blob_bytes=205496480;raw_bytes=345889958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:49.169117Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:16:49.169186Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:16:49.169286Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:16:49.169342Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:16:49.169513Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:49.169604Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:16:49.169710Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799393460;tx_id=18446744073709551615;;current_snapshot_ts=1750799690379; 2025-06-24T21:16:49.169789Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:49.169870Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:49.169922Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:49.170036Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:49.172138Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:4143:6103];tablet_id=9437184;parent=[1:4108:6076];fline=manager.cpp:88;event=ask_data;request=request_id=205;9438184000001={portions_count=54};; 2025-06-24T21:16:49.177098Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:16:49.177497Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:16:49.177538Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:16:49.177568Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:16:49.177615Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:49.177717Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:16:49.177792Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799393460;tx_id=18446744073709551615;;current_snapshot_ts=1750799690379; 2025-06-24T21:16:49.177839Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:49.177885Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:49.177925Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:49.178009Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.999000s; 2025-06-24T21:16:49.178071Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> KqpQuery::OltpCreateAsSelect_Simple [GOOD] >> KqpQuery::OltpCreateAsSelect_Disable >> KqpQuery::TableSink_ReplaceDataShardDataQuery-UseSink [GOOD] >> KqpQuery::TableSinkWithSubquery >> KqpJoinOrder::ShuffleEliminationOneJoin >> KqpParams::MissingParameter [GOOD] >> KqpParams::MissingOptionalParameter-UseSink >> RetryPolicy::TWriteSession_TestPolicy [GOOD] >> RetryPolicy::TWriteSession_TestBrokenPolicy >> KqpJoin::IndexLoookupJoinStructJoin+StreamLookupJoin >> KqpTypes::MultipleCurrentUtcTimestamp [GOOD] >> KqpStats::SelfJoin [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpTypes::MultipleCurrentUtcTimestamp [GOOD] Test command err: Trying to start YDB, gRPC: 12567, MsgBus: 14438 2025-06-24T21:16:26.962736Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627824733490187:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:26.962828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003145/r3tmp/tmp6LOikR/pdisk_1.dat 2025-06-24T21:16:27.390287Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12567, node 1 2025-06-24T21:16:27.438491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:27.438591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:27.440086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:27.503713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:27.503768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:27.503872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:27.504021Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14438 2025-06-24T21:16:27.971309Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14438 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:28.153521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:28.175871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:28.199346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:28.335785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:28.506673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:28.591878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.404203Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627841913360974:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:30.404311Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:30.762264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.798343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.839185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.874663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.910900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.984627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:31.061404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:31.139731Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627846208328934:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:31.139801Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:31.139994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627846208328939:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:31.144368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:31.160895Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627846208328941:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:31.254609Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627846208328992:3430] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:31.966106Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627824733490187:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:31.966177Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 22221, MsgBus: 27021 2025-06-24T21:16:33.601459Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627853292449817:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:33.601510Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/run ... sts.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627882062782472:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:44.941541Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 2460, MsgBus: 65418 2025-06-24T21:16:46.533084Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627910445767869:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:46.533151Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003145/r3tmp/tmpS3CTLN/pdisk_1.dat 2025-06-24T21:16:46.661226Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:46.661977Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627910445767846:2079] 1750799806529495 != 1750799806529498 2025-06-24T21:16:46.676356Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:46.676443Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:46.679619Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2460, node 4 2025-06-24T21:16:46.728691Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:46.728738Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:46.728748Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:46.728925Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65418 TClient is connected to server localhost:65418 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:47.354491Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:47.360757Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:47.378880Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:47.467137Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:47.626022Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:47.704744Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:47.787739Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:50.515312Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627927625638667:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:50.515425Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:50.583103Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.622305Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.665624Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.711183Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.754998Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.856296Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.937889Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:51.022601Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627931920606630:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:51.022690Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:51.023275Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627931920606635:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:51.028787Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:51.050378Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627931920606637:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:51.110535Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627931920606688:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:51.535271Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627910445767869:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:51.535361Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQuery::ReadOverloaded-StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::SelfJoin [GOOD] Test command err: Trying to start YDB, gRPC: 13350, MsgBus: 28884 2025-06-24T21:16:26.938391Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627822884457015:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:26.938450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00313d/r3tmp/tmpvDH9P9/pdisk_1.dat 2025-06-24T21:16:27.295923Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627822884456973:2079] 1750799786919696 != 1750799786919699 2025-06-24T21:16:27.313658Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13350, node 1 2025-06-24T21:16:27.354582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:27.355282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:27.368069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:27.411261Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:27.411283Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:27.411291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:27.411791Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28884 TClient is connected to server localhost:28884 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:16:27.952469Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:28.119795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:28.153259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:28.314650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:28.470859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:28.552659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.325787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627840064327812:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:30.325900Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:30.761633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.834263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.874150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.907911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.943227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:30.985275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:31.033742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:31.175409Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627844359295772:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:31.175521Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:31.175783Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627844359295777:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:31.180013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:31.191650Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627844359295779:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:31.259872Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627844359295830:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:31.933966Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627822884457015:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:31.934039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 2946, MsgBus: 8935 2025-06-24T21:16:33.582516Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627853064355881:2086];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:33.582786Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:52.230632Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627935707404807:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"E-Size":"0","PlanNodeId":3,"LookupKeyColumns":["Key"],"Node Type":"TableLookupJoin","Path":"\/Root\/TwoShard","Columns":["Key"],"E-Rows":"0","Table":"TwoShard","Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["TwoShard"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/TwoShard","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"TwoShard","ReadColumns":["Key"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage","Stats":{"UseLlvm":"undefined","Table":[{"Path":"\/Root\/TwoShard","ReadRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"ReadBytes":{"Count":2,"Sum":48,"Max":24,"Min":24}}],"OutputRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"PhysicalStageId":0,"FinishedTasks":2,"IngressRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"DurationUs":{"Count":2,"Sum":2000,"Max":1000,"Min":1000},"MaxMemoryUsage":{"Count":2,"Sum":2097152,"Max":1048576,"Min":1048576,"History":[5,2097152]},"BaseTimeMs":1750799813938,"Output":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"FirstMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12,"History":[5,48]}},"Name":"RESULT","Push":{"WaitTimeUs":{"Count":2,"Sum":6938,"Max":3511,"Min":3427,"History":[5,6938]},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1},"Chunks":{"Count":2,"Sum":6,"Max":3,"Min":3},"ResumeMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"FirstMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4}}}],"CpuTimeUs":{"Count":2,"Sum":784,"Max":648,"Min":136,"History":[5,784]},"Ingress":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"FirstMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"Bytes":{"Count":2,"Sum":96,"Max":48,"Min":48,"History":[5,96]}},"External":{},"Name":"KqpReadRangesSource","Ingress":{},"Push":{"LastMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"ResumeMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"FirstMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"Bytes":{"Count":2,"Sum":96,"Max":48,"Min":48,"History":[5,96]},"WaitTimeUs":{"Count":2,"Sum":7002,"Max":3552,"Min":3450,"History":[5,7002]},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1}}}],"StageDurationUs":1000,"ResultRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"ResultBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"OutputBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"UpdateTimeMs":5,"Tasks":2}}],"PlanNodeType":"Connection","E-Cost":"0"}],"Node Type":"Collect","Stats":{"UseLlvm":"undefined","Table":[{"Path":"\/Root\/TwoShard","ReadRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"ReadBytes":{"Count":2,"Sum":24,"Max":12,"Min":12}}],"OutputRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"PhysicalStageId":1,"FinishedTasks":2,"InputBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"DurationUs":{"Count":2,"Sum":8000,"Max":4000,"Min":4000},"MaxMemoryUsage":{"Count":2,"Sum":2097152,"Max":1048576,"Min":1048576,"History":[9,2097152]},"BaseTimeMs":1750799813938,"Output":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":14,"Max":7,"Min":7},"FirstMessageMs":{"Count":2,"Sum":14,"Max":7,"Min":7},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12,"History":[9,48]}},"Name":"6","Push":{"LastMessageMs":{"Count":2,"Sum":14,"Max":7,"Min":7},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Chunks":{"Count":2,"Sum":6,"Max":3,"Min":3},"ResumeMessageMs":{"Count":2,"Sum":14,"Max":7,"Min":7},"FirstMessageMs":{"Count":2,"Sum":14,"Max":7,"Min":7},"PauseMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"WaitTimeUs":{"Count":2,"Sum":11525,"Max":5827,"Min":5698,"History":[9,11525]},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1},"WaitMessageMs":{"Count":2,"Max":7,"Min":4}}}],"CpuTimeUs":{"Count":2,"Sum":759,"Max":492,"Min":267,"History":[9,759]},"StageDurationUs":4000,"WaitInputTimeUs":{"Count":2,"Sum":11280,"Max":5713,"Min":5567,"History":[9,11280]},"OutputBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"Input":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"FirstMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12,"History":[9,48]}},"Name":"2","Push":{"LastMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"ResumeMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"FirstMessageMs":{"Count":2,"Sum":8,"Max":4,"Min":4},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12,"History":[9,48]},"PauseMessageMs":{"Count":2,"Sum":2,"Max":1,"Min":1},"WaitTimeUs":{"Count":2,"Sum":5806,"Max":3000,"Min":2806,"History":[9,5806]},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1},"WaitMessageMs":{"Count":2,"Max":4,"Min":1}}}],"UpdateTimeMs":8,"InputRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Tasks":2}}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":5}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit","Stats":{"UseLlvm":"undefined","Output":[{"Pop":{"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":8,"Max":8,"Min":8},"FirstMessageMs":{"Count":1,"Sum":8,"Max":8,"Min":8},"Bytes":{"Count":1,"Sum":24,"Max":24,"Min":24,"History":[9,24]}},"Name":"8","Push":{"LastMessageMs":{"Count":1,"Sum":8,"Max":8,"Min":8},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"Chunks":{"Count":1,"Sum":6,"Max":6,"Min":6},"ResumeMessageMs":{"Count":1,"Sum":8,"Max":8,"Min":8},"FirstMessageMs":{"Count":1,"Sum":8,"Max":8,"Min":8},"PauseMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"WaitTimeUs":{"Count":1,"Sum":5356,"Max":5356,"Min":5356,"History":[9,5356]},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1},"WaitMessageMs":{"Count":1,"Max":8,"Min":2}}}],"MaxMemoryUsage":{"Count":1,"Sum":1048576,"Max":1048576,"Min":1048576,"History":[9,1048576]},"DurationUs":{"Count":1,"Sum":2000,"Max":2000,"Min":2000},"InputBytes":{"Count":1,"Sum":48,"Max":48,"Min":48},"Tasks":1,"OutputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"FinishedTasks":1,"InputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"PhysicalStageId":2,"StageDurationUs":2000,"BaseTimeMs":1750799813938,"OutputBytes":{"Count":1,"Sum":24,"Max":24,"Min":24},"CpuTimeUs":{"Count":1,"Sum":776,"Max":776,"Min":776,"History":[9,776]},"UpdateTimeMs":9,"Input":[{"Pop":{"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":8,"Max":8,"Min":8},"FirstMessageMs":{"Count":1,"Sum":8,"Max":8,"Min":8},"Bytes":{"Count":1,"Sum":48,"Max":48,"Min":48,"History":[9,48]}},"Name":"4","Push":{"LastMessageMs":{"Count":1,"Sum":7,"Max":7,"Min":7},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"ResumeMessageMs":{"Count":1,"Sum":7,"Max":7,"Min":7},"FirstMessageMs":{"Count":1,"Sum":7,"Max":7,"Min":7},"Bytes":{"Count":1,"Sum":48,"Max":48,"Min":48,"History":[9,48]},"PauseMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"WaitTimeUs":{"Count":1,"Sum":5284,"Max":5284,"Min":5284,"History":[9,5284]},"WaitPeriods":{"Count":1,"Sum":2,"Max":2,"Min":2},"WaitMessageMs":{"Count":1,"Max":7,"Min":2}}}]}}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":7}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit","Stats":{"UseLlvm":"undefined","OutputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"PhysicalStageId":3,"FinishedTasks":1,"InputBytes":{"Count":1,"Sum":24,"Max":24,"Min":24},"DurationUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000},"MaxMemoryUsage":{"Count":1,"Sum":1048576,"Max":1048576,"Min":1048576,"History":[10,1048576]},"BaseTimeMs":1750799813938,"Output":[{"Pop":{"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":9,"Max":9,"Min":9},"FirstMessageMs":{"Count":1,"Sum":9,"Max":9,"Min":9},"Bytes":{"Count":1,"Sum":24,"Max":24,"Min":24,"History":[10,24]}},"Name":"RESULT","Push":{"LastMessageMs":{"Count":1,"Sum":9,"Max":9,"Min":9},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"Chunks":{"Count":1,"Sum":6,"Max":6,"Min":6},"ResumeMessageMs":{"Count":1,"Sum":9,"Max":9,"Min":9},"FirstMessageMs":{"Count":1,"Sum":9,"Max":9,"Min":9},"PauseMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"WaitTimeUs":{"Count":1,"Sum":6339,"Max":6339,"Min":6339,"History":[10,6339]},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1},"WaitMessageMs":{"Count":1,"Max":9,"Min":2}}}],"CpuTimeUs":{"Count":1,"Sum":637,"Max":637,"Min":637,"History":[10,637]},"StageDurationUs":1000,"ResultRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"ResultBytes":{"Count":1,"Sum":24,"Max":24,"Min":24},"OutputBytes":{"Count":1,"Sum":24,"Max":24,"Min":24},"Input":[{"Pop":{"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":9,"Max":9,"Min":9},"FirstMessageMs":{"Count":1,"Sum":9,"Max":9,"Min":9},"Bytes":{"Count":1,"Sum":24,"Max":24,"Min":24,"History":[10,24]}},"Name":"6","Push":{"LastMessageMs":{"Count":1,"Sum":8,"Max":8,"Min":8},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"ResumeMessageMs":{"Count":1,"Sum":8,"Max":8,"Min":8},"FirstMessageMs":{"Count":1,"Sum":8,"Max":8,"Min":8},"Bytes":{"Count":1,"Sum":24,"Max":24,"Min":24,"History":[10,24]},"PauseMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"WaitTimeUs":{"Count":1,"Sum":6256,"Max":6256,"Min":6256,"History":[10,6256]},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1},"WaitMessageMs":{"Count":1,"Max":8,"Min":2}}}],"UpdateTimeMs":9,"InputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"Tasks":1}}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"Compilation":{"FromCache":false,"DurationUs":307711,"CpuTimeUs":302924},"ProcessCpuTimeUs":463,"TotalDurationUs":340830,"ResourcePoolId":"default","QueuedTimeUs":569},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":9,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/TwoShard","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"TwoShard","ReadColumns":["Key"],"E-Cost":"0"}],"Node Type":"TableFullScan"},{"Operators":[{"E-Rows":"0","Columns":["Key"],"E-Size":"0","E-Cost":"0","Name":"TableLookup","Table":"TwoShard","LookupKeyColumns":["Key"]}],"Node Type":"TableLookup","PlanNodeType":"TableLookup"}],"Operators":[{"Name":"LookupJoin","LookupKeyColumns":["Key"]}],"Node Type":"LookupJoin","PlanNodeType":"Connection"}],"Operators":[{"A-Rows":6,"A-SelfCpu":0.776,"A-Cpu":0.776,"A-Size":24,"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Operators":[{"A-Rows":6,"A-SelfCpu":0.637,"A-Cpu":1.413,"A-Size":24,"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","PlanNodeType":"Query"}} >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink [GOOD] >> KqpStats::StreamLookupStats-StreamLookupJoin [GOOD] >> KqpStats::SysViewClientLost >> KqpQuery::OltpCreateAsSelect_Disable [GOOD] >> KqpQuery::OlapCreateAsSelect_Complex >> KqpLimits::StreamWrite-Allowed [GOOD] >> KqpLimits::TooBigColumn+useSink >> KqpJoinOrder::TPCHEveryQueryWorks-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::ReadOverloaded-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 6729, MsgBus: 6522 2025-06-24T21:16:15.851638Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627777894428388:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:15.851723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00320c/r3tmp/tmp066HqE/pdisk_1.dat 2025-06-24T21:16:16.219351Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:16.219729Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627777894428367:2079] 1750799775849848 != 1750799775849851 2025-06-24T21:16:16.236774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:16.236883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:16.262612Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6729, node 1 2025-06-24T21:16:16.341745Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:16.341766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:16.341771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:16.341910Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6522 TClient is connected to server localhost:6522 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:16.870747Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:16.881829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:16.899836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:16.907218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:17.049007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:17.207052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:17.286858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:19.029992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627795074299193:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.030118Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.356767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.389056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.420721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.492900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.527588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.568153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.605529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.668347Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627795074299851:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.668419Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.668684Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627795074299856:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.672514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:19.683782Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627795074299858:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:19.748971Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627795074299909:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:20.852037Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627777894428388:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:20.852150Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 3009, MsgBus: 20024 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00320c/r3tmp/tmp2pKHWZ/pdisk_1.dat 2025-06-24T21:16:22.096413Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migratio ... alhost:11856 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:46.929127Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:47.010802Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:47.152582Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:47.349236Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:47.790208Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:48.126872Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:48.840441Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1682:3278], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:48.840740Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:48.874533Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:49.090243Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:49.351127Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:49.628885Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:49.903875Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.272703Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.565217Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.981184Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:2351:3773], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:50.981323Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:50.981717Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:2356:3778], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:50.988939Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:51.148448Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:2358:3780], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:51.211438Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:2416:3819] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:52.687822Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:52.942355Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.300421Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.316576Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [4:3105:4334], TxId: 281474976715675, task: 1. Ctx: { TraceId : 01jyhwt32sfm9sk0376n6fe3z7. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NjJkODk1MTQtY2Q5NmI5ODAtNTI0ODZkNGMtOGVmMmFhN2U=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Source[0] fatal error: {
: Error: Table '/Root/SecondaryKeys' retry limit exceeded. } 2025-06-24T21:16:55.316759Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [4:3105:4334], TxId: 281474976715675, task: 1. Ctx: { TraceId : 01jyhwt32sfm9sk0376n6fe3z7. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NjJkODk1MTQtY2Q5NmI5ODAtNTI0ODZkNGMtOGVmMmFhN2U=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: OVERLOADED DEFAULT_ERROR: {
: Error: Table '/Root/SecondaryKeys' retry limit exceeded. }. 2025-06-24T21:16:55.318139Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:3106:4335], TxId: 281474976715675, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NjJkODk1MTQtY2Q5NmI5ODAtNTI0ODZkNGMtOGVmMmFhN2U=. TraceId : 01jyhwt32sfm9sk0376n6fe3z7. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:3099:4023], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-06-24T21:16:55.319061Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=NjJkODk1MTQtY2Q5NmI5ODAtNTI0ODZkNGMtOGVmMmFhN2U=, ActorId: [4:2679:4023], ActorState: ExecuteState, TraceId: 01jyhwt32sfm9sk0376n6fe3z7, Create QueryResponse for error on request, msg: >> KqpExplain::IdxFullscan [GOOD] >> KqpExplain::MultiJoinCteLinks >> TColumnShardTestReadWrite::CompactionInGranule_PKString [GOOD] >> KqpQuery::TableSinkWithSubquery [GOOD] >> KqpExplain::ReadTableRanges [GOOD] >> KqpExplain::Predicates ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 26565, MsgBus: 21670 2025-06-24T21:16:18.335320Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627790546165375:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:18.335524Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031e5/r3tmp/tmp79CSnK/pdisk_1.dat 2025-06-24T21:16:18.733230Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:18.735311Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627790546165262:2079] 1750799778323735 != 1750799778323738 TServer::EnableGrpc on GrpcPort 26565, node 1 2025-06-24T21:16:18.777638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:18.777767Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:18.779380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:18.792098Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:18.792128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:18.792135Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:18.792272Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21670 TClient is connected to server localhost:21670 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:19.335045Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:19.409579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:16:19.427570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.586473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:19.741658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:19.808141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:21.636773Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627803431068796:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:21.636892Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:22.040448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.076484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.126417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.153028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.183480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.260479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.305771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.374028Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627807726036753:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:22.374121Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:22.374352Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627807726036758:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:22.378587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:22.388700Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627807726036760:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:22.499681Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627807726036811:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:23.331239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627790546165375:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:23.373492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:23.600461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:23.671500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:50.329493Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:50.466656Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.512345Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.566075Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.629209Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.674148Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.728072Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.810909Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:50.969886Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627928022890593:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:50.970015Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:50.970129Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627928022890598:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:50.975618Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:50.991159Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627928022890600:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:51.071438Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627910843019144:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:51.071517Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:51.081466Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627932317857947:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:52.748547Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:52.790917Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:52.834400Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) {"Plan":{"Plans":[{"PlanNodeId":18,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":17,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_1_2","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_1_2"}],"Node Type":"Effect"},{"PlanNodeId":16,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":15,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_1_1","Name":"Iterator"}],"Node Type":"Delete-ConstantExpr","CTE Name":"precompute_1_1"}],"Node Type":"Effect"},{"PlanNodeId":14,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":13,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_1_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_1_0"}],"Node Type":"Effect"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":7,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","E-Rows":"0","ReadRangesPointPrefixLen":"1","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Subplan Name":"CTE Stage_5","Node Type":"Stage","Parent Relationship":"InitPlan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Node Type":"UnionAll","CTE Name":"Stage_5","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_1","Node Type":"Precompute_1_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":1,"Node Type":"UnionAll","CTE Name":"Stage_5","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_2","Node Type":"Precompute_1_2","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","writes":[{"columns":["Key","Fk"],"type":"MultiUpsert"}]},{"name":"\/Root\/SecondaryKeys\/Index\/indexImplTable","reads":[{"columns":["Fk","Key"],"scan_by":["Fk [1, 4)"],"type":"Scan"}],"writes":[{"columns":["Key","Fk"],"type":"MultiUpsert"},{"type":"MultiErase"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":7,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"},{"PlanNodeId":8,"Plans":[{"PlanNodeId":9,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":14,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Delete"}],"Node Type":"Effect"},{"PlanNodeId":15,"Plans":[{"PlanNodeId":16,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":22,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpJoinOrder::OltpJoinTypeHintCBOTurnOFF >> KqpExplain::UpdateConditionalKey-UseSink [GOOD] >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink [GOOD] >> KqpIndexLookupJoin::MultiJoins [GOOD] >> KqpIndexLookupJoin::LeftSemiJoinWithLeftFilter-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKString [GOOD] Test command err: 2025-06-24T21:15:26.554820Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:26.580572Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:26.580864Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:26.587433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:26.587674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:26.587879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:26.587954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:26.588099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:26.588199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:26.588301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:26.588387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:26.588455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:26.588571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:26.588652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:26.615515Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:26.615793Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:26.615855Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:26.616070Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:26.616275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:26.616364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:26.616413Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:26.616530Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:26.616595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:26.616646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:26.616683Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:26.616869Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:26.616934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:26.616974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:26.617012Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:26.617113Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:26.617171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:26.617215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:26.617258Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:26.617325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:26.617370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:26.617403Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:26.617632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:26.617677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:26.617713Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:26.617922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:26.618002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:26.618033Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:26.618167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:26.618208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:26.618243Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:26.618347Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:26.618415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:26.618455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:26.618482Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:26.618954Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=47; 2025-06-24T21:15:26.619040Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; 2025-06-24T21:15:26.619119Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-24T21:15:26.619218Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=36; 2025-06-24T21:15:26.619327Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:26.619425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:26.619482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:26.619537Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... mn_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:9288];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:8592];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:8280];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:8288];;;;switched=(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7574640;index_size:0;meta:(()););(portion_id:218;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7570008;index_size:0;meta:(()););(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7574640;index_size:0;meta:(()););; 2025-06-24T21:16:56.100682Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:6031:8017];task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=3; 2025-06-24T21:16:56.102233Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-24T21:16:56.106718Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-24T21:16:56.255267Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-24T21:16:56.255411Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7570008;count=819;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-24T21:16:56.951527Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-24T21:16:56.951668Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=9999,9999,9999,9999,; 2025-06-24T21:16:56.951727Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7433340;count=1;packed=7574640; 2025-06-24T21:16:56.951807Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=86;data_size=62;sum=95362;count=1749; 2025-06-24T21:16:56.951874Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=26414;data_size=26406;sum=2613226;count=1750;size_of_meta=136; 2025-06-24T21:16:56.951940Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6031:8017];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=26486;data_size=26478;sum=2676226;count=875;size_of_portion=208; 2025-06-24T21:16:56.952655Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-24T21:16:57.094029Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 673 2025-06-24T21:16:57.099428Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=37893352;raw_bytes=37186700;count=5;records=375200} inactive {blob_bytes=111591656;raw_bytes=108150240;count=216;records=1200200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:16:57.555027Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-24T21:16:57.555117Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;fline=with_appended.cpp:65;portions=222,;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7; 2025-06-24T21:16:57.556053Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::8b67aef8-514011f0-b58913a3-13cbb3a7; 2025-06-24T21:16:57.556157Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:22744072;portions_count:222;); 2025-06-24T21:16:57.556213Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:16:57.556304Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:16:57.556384Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799430063;tx_id=18446744073709551615;;current_snapshot_ts=1750799728123; 2025-06-24T21:16:57.556432Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:16:57.556491Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:57.556536Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:16:57.556626Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.886000s; 2025-06-24T21:16:57.556684Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=8b67aef8-514011f0-b58913a3-13cbb3a7;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:16:57.556871Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 673 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::TableSinkWithSubquery [GOOD] Test command err: Trying to start YDB, gRPC: 15617, MsgBus: 3781 2025-06-24T21:16:30.524579Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627843635085165:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:30.524672Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003114/r3tmp/tmpoKtqbF/pdisk_1.dat 2025-06-24T21:16:30.959386Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627843635085136:2079] 1750799790523561 != 1750799790523564 2025-06-24T21:16:30.968961Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15617, node 1 2025-06-24T21:16:31.005884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:31.005974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:31.007502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:31.115975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:31.116007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:31.116025Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:31.116175Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3781 2025-06-24T21:16:31.543648Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3781 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:31.853422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:31.876021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:31.890381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.065095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.213339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.299950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:34.161499Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627860814955975:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.161655Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.471004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.509049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.554143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.600533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.671229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.707134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.789229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.886185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627860814956639:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.886290Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.886873Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627860814956644:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.890233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:34.900972Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627860814956646:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:34.994966Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627860814956697:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:35.525060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627843635085165:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:35.525125Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17747, MsgBus: 9945 2025-06-24T21:16:37.585204Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627870449075786:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:37.643224Z node 2 :METADATA_PROVIDER ERROR: log.cpp:78 ... rd__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:45.719996Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:48.364599Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519627917495318740:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:48.364693Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:48.400409Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:48.602172Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:48.860934Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519627917495320120:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:48.861037Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:48.861342Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519627917495320125:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:48.866303Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:48.880314Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519627917495320127:2405], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:16:48.953320Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519627917495320178:3199] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:49.706227Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627900315448942:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:49.706286Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4566, MsgBus: 6243 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003114/r3tmp/tmpgUABt7/pdisk_1.dat 2025-06-24T21:16:52.597349Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:52.597560Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:16:52.603262Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627938153715113:2079] 1750799812350733 != 1750799812350736 2025-06-24T21:16:52.618715Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:52.618843Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 4566, node 4 2025-06-24T21:16:52.625571Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:52.707848Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:52.707887Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:52.707896Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:52.708045Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6243 TClient is connected to server localhost:6243 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:53.259328Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:53.264503Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:53.348885Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:56.591320Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627955333584935:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:56.591434Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:56.631424Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:56.681060Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:56.739174Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627955333585108:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:56.739280Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:56.739461Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627955333585113:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:56.744340Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:56.760711Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627955333585115:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:16:56.842455Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627955333585166:2440] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:57.192577Z node 4 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037888 Cancelled read: {[4:7519627959628552522:2328], 0} 2025-06-24T21:16:57.468056Z node 4 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037889 Cancelled read: {[4:7519627959628552548:2337], 0} >> KqpIndexLookupJoin::InnerJoinOnlyLeftColumn+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateConditionalKey-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 6151, MsgBus: 3492 2025-06-24T21:16:31.044472Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627845117250494:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:31.046072Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003112/r3tmp/tmpNAeDCo/pdisk_1.dat 2025-06-24T21:16:31.522045Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:31.529023Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627845117250476:2079] 1750799791043745 != 1750799791043748 2025-06-24T21:16:31.534017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:31.534098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:31.538066Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6151, node 1 2025-06-24T21:16:31.620553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:31.620599Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:31.620627Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:31.620795Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3492 2025-06-24T21:16:32.074698Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3492 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:32.299174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:32.336187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.518179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.687171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.760957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:34.664330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627858002153989:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.664447Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.015422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.052068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.092099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.123283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.167954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.239532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.312794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.403871Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627862297121954:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.403960Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.404206Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627862297121959:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.408765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:35.427858Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627862297121961:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:35.513036Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627862297122012:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:36.046830Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627845117250494:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:36.046905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"Tables":["EightShard"],"PlanNodeId":5,"Operators":[{"Inputs":[],"Path":"\/Root\/EightShard","Name":"Upsert","SinkType":"KqpTableSink","Table":"EightShard"}],"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Operators":[{"E-Rows":"0","Inputs":[{"ExternalPlanNodeId":1}],"Predicate":"item.Data \u003E 0","E-Cost":"0","E-Size":"0","Name":"Filter"}],"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"Tabl ... 499759Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:51.499768Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:51.499898Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5178 TClient is connected to server localhost:5178 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:52.197833Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:52.209013Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:52.218790Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:52.343097Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:52.535338Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:52.623681Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.104481Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627950842291782:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.104584Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.170136Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.240880Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.277099Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.352865Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.407297Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.488860Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.545219Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.632189Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627950842292446:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.632308Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.633058Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627950842292451:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.637785Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:55.652646Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627950842292453:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:55.758072Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627950842292504:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:56.199272Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627933662420982:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:56.199368Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":8,"Plans":[{"Tables":["EightShard"],"PlanNodeId":7,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/EightShard","Name":"Upsert","Table":"EightShard"},{"Inputs":[],"Iterator":"precompute_1_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_1_0"}],"Node Type":"Effect"},{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"0","ReadRangesPointPrefixLen":"1","ReadRangesKeys":["Key"],"Table":"EightShard","ReadColumns":["Data","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Stage"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"columns":["Data","Key"],"scan_by":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"type":"Scan"}],"writes":[{"columns":["Data","Key"],"type":"MultiUpsert"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/EightShard","Name":"Upsert","Table":"EightShard"}],"Plans":[{"PlanNodeId":8,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"Name":"TableRangeScan","Path":"\/Root\/EightShard","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Key"],"Table":"EightShard","ReadColumns":["Data","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpQuery::CreateAsSelectTypes+NotNull+IsOlap [GOOD] >> KqpParams::MissingOptionalParameter-UseSink [GOOD] >> KqpParams::ParameterTypes >> KqpIndexLookupJoin::CheckCastUint32ToUint16+StreamLookupJoin-NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 13873, MsgBus: 26485 2025-06-24T21:16:21.122558Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627804372481135:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:21.122673Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031d9/r3tmp/tmpDIzdCW/pdisk_1.dat 2025-06-24T21:16:21.451570Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627804372481117:2079] 1750799781121840 != 1750799781121843 2025-06-24T21:16:21.461795Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13873, node 1 2025-06-24T21:16:21.493740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:21.498276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:21.513392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:21.547861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:21.547887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:21.547893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:21.548024Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26485 TClient is connected to server localhost:26485 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:16:22.143801Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:22.294525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:22.335615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:22.354747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:22.498843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:22.655742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:22.740931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:24.636447Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627817257384644:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:24.636603Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.030675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.075263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.109776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.158096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.197047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.270971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.343487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.392015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627821552352606:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.392162Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.392441Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627821552352611:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.396873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:25.411166Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627821552352613:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:25.515795Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627821552352664:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:26.123031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627804372481135:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:26.123097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:26.634842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... epricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:49.482591Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:49.501211Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:49.560017Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:49.581505Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:49.811466Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:49.912400Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:52.840547Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627937652519324:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.840663Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.920831Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:52.959640Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:52.996540Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.034693Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.075133Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.172632Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.247431Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.342499Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627941947487284:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:53.342600Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:53.342792Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627941947487289:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:53.348299Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:53.360441Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627941947487291:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:53.445759Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627941947487342:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:53.562440Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627920472648729:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:53.562520Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:55.198948Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.283412Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.380235Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) {"Plan":{"Plans":[{"PlanNodeId":14,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":13,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_2_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_2_0"}],"Node Type":"Effect"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"Filter","Name":"Iterator"},{"E-Rows":"2","Inputs":[],"Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"ConstantExpr-Filter"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_2_0","Node Type":"Precompute_2","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"E-Size":"0","LookupKeyColumns":["Key"],"Node Type":"TableLookup","PlanNodeId":2,"Path":"\/Root\/SecondaryKeys","Columns":["Key"],"E-Rows":"2","Plans":[{"PlanNodeId":1,"Operators":[{"Inputs":[],"Iterator":"precompute_0_1","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_1"}],"Table":"SecondaryKeys","PlanNodeType":"Connection","E-Cost":"0"}],"Node Type":"Stage"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","reads":[{"lookup_by":["Key"],"columns":["Key"],"type":"Lookup"}],"writes":[{"columns":["Key","Value"],"type":"MultiUpsert"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":8,"Operators":[{"E-Rows":"2","Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"Filter"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpJoin::IndexLoookupJoinStructJoin+StreamLookupJoin [GOOD] >> KqpJoin::IdxLookupSelf >> KqpJoinOrder::SortingsSimpleOrderByPKAlias+RemoveLimitOperator >> KqpJoinOrder::CanonizedJoinOrderTPCH4 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::CreateAsSelectTypes+NotNull+IsOlap [GOOD] Test command err: Trying to start YDB, gRPC: 4464, MsgBus: 25130 2025-06-24T21:16:32.277445Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627851854678147:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:32.277497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00310d/r3tmp/tmp3fMy2g/pdisk_1.dat 2025-06-24T21:16:32.656051Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627851854678128:2079] 1750799792270968 != 1750799792270971 2025-06-24T21:16:32.673669Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4464, node 1 2025-06-24T21:16:32.680502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:32.680618Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:32.682635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:32.766834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:32.766860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:32.766872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:32.767016Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25130 TClient is connected to server localhost:25130 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:33.305042Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:33.402671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:33.419245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:35.552259Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627864739580655:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.552286Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627864739580635:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.552415Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.556964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:35.571091Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627864739580672:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:16:35.660154Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627864739580723:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:36.006810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:36.564026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 14485, MsgBus: 9440 2025-06-24T21:16:37.729672Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627870051872103:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:37.729761Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00310d/r3tmp/tmpgF4y9B/pdisk_1.dat 2025-06-24T21:16:37.877929Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:37.879485Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519627870051872080:2079] 1750799797728639 != 1750799797728642 2025-06-24T21:16:37.892890Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:37.892976Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:37.898831Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14485, node 2 2025-06-24T21:16:37.990386Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:37.990415Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:37.990423Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:37.990598Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9440 TClient is connected to server localhost:9440 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:38.493377Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:38.500216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:38.739199Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:41.089455Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627887231741896:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:41.089541Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627887231741903:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:41.089594Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:41.093681Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (Get ... 04588Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.811723Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.813085Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.815868Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.816487Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.820335Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.821177Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.825865Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.826491Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.828266Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.829011Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.834597Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.835359Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.836495Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.837172Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.842877Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.842881Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.843613Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.843625Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.850571Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.850572Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.851388Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.851682Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.858206Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037921;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.859177Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.859496Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.859780Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.866805Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.866806Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.867626Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.868164Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037937;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.874757Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.875675Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.882654Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.883653Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.891532Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.893387Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.900910Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.901726Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.908512Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.909319Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037917;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.916064Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037917;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.916841Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.923423Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.924173Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.925124Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037937;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.926063Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:16:58.931181Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:16:58.938537Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; >> KqpJoinOrder::TPCDS96-ColumnStore >> KqpJoin::AllowJoinsForComplexPredicates-StreamLookup [GOOD] >> KqpJoin::ComplexJoin >> KqpJoinOrder::TPCDS34+ColumnStore >> KqpIndexLookupJoin::CheckCastInt32ToInt16+StreamLookupJoin-NotNull >> KqpStats::OneShardNonLocalExec-UseSink [GOOD] >> TGroupMapperTest::MonteCarlo [GOOD] >> KqpLimits::TooBigColumn+useSink [GOOD] >> KqpLimits::ReplySizeExceeded |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MonteCarlo [GOOD] >> KqpQuery::OlapCreateAsSelect_Complex [GOOD] >> KqpExplain::MultiJoinCteLinks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::OneShardNonLocalExec-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 27285, MsgBus: 13075 2025-06-24T21:16:27.912821Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627827583462754:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:27.912923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003136/r3tmp/tmpiqfuVM/pdisk_1.dat 2025-06-24T21:16:28.280738Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627827583462731:2079] 1750799787911307 != 1750799787911310 2025-06-24T21:16:28.293149Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:28.325298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:28.325386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 27285, node 1 2025-06-24T21:16:28.332957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:28.447068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:28.447092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:28.447100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:28.447258Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13075 TClient is connected to server localhost:13075 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:28.923961Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:28.942366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:28.963639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:28.981161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:29.146765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:29.308866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:29.379177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:31.464910Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627844763333558:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:31.465023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:31.809542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:31.845107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:31.882722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:31.923552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:31.971734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:32.047013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:32.091087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:32.186589Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627849058301516:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:32.186710Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:32.187195Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627849058301521:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:32.192129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:32.211475Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627849058301523:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:32.269996Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627849058301574:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:32.913604Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627827583462754:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:32.913684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:33.493885Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627853353269151:2479], status: GENERIC_ERROR, issues:
:2:12: Error: mismatched input 'INCORRECT_STMT' expecting {';', '(', '$', ALTER, ANALYZE, BACKUP, BATCH, COMMIT, CREATE, DECLARE, DEFINE, DELETE, DISCARD ... hard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:52.234671Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519627933291852707:2071] 1750799811932539 != 1750799811932536 2025-06-24T21:16:52.256293Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:52.256395Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:52.257323Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:52.257404Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:52.265088Z node 5 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-06-24T21:16:52.266511Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:52.268587Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5032, node 5 2025-06-24T21:16:52.385199Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:52.385226Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:52.385236Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:52.385432Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24938 TClient is connected to server localhost:24938 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:16:52.945559Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:52.946466Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:52.963202Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:52.992390Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:53.094718Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:53.268317Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:53.375963Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:56.696815Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627952801013275:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:56.696948Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:56.740873Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:56.820022Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:56.870814Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519627931326174651:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:56.870900Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:56.916365Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:56.946478Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519627933291852869:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:56.946550Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:57.004587Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:57.103625Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:57.162638Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:57.280408Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:57.418861Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627957095981516:2385], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:57.418987Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:57.419357Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627957095981521:2388], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:57.425078Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:57.450118Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519627957095981523:2389], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:57.516848Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519627957095981600:4443] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |96.1%| [TA] $(B)/ydb/core/mind/bscontroller/ut/test-results/unittest/{meta.json ... results_accumulator.log} |96.1%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpExplain::Predicates [GOOD] >> TCdcStreamTests::MeteringServerless [GOOD] >> TCdcStreamTests::MeteringDedicated >> KqpIndexLookupJoin::LeftSemiJoinWithLeftFilter-StreamLookup [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime_Reboot [GOOD] >> KqpIndexLookupJoin::InnerJoinOnlyLeftColumn+StreamLookup [GOOD] >> KqpIndexLookupJoin::InnerJoinOnlyLeftColumn-StreamLookup >> KqpIndexLookupJoin::CheckCastUint32ToUint16+StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUint32ToUint16+StreamLookupJoin+NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::OlapCreateAsSelect_Complex [GOOD] Test command err: Trying to start YDB, gRPC: 27419, MsgBus: 6087 2025-06-24T21:16:38.495276Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627875083019427:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:38.495334Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003106/r3tmp/tmpWY97GO/pdisk_1.dat 2025-06-24T21:16:38.879530Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:38.880625Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627875083019399:2079] 1750799798494062 != 1750799798494065 TServer::EnableGrpc on GrpcPort 27419, node 1 2025-06-24T21:16:38.900041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:38.900145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:38.902238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:38.960882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:38.960905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:38.960915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:38.961048Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6087 TClient is connected to server localhost:6087 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:39.503617Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:39.604240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:39.624999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:41.729582Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627887967921915:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:41.729601Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627887967921940:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:41.729688Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:41.733687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:41.745646Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627887967921943:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:16:41.817079Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627887967921994:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:42.145420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:16:42.281169Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519627892262889439:2303];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:16:42.282223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:16:42.318397Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:16:42.318631Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037893 2025-06-24T21:16:42.322471Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519627892262889439:2303];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:16:42.322663Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037897 2025-06-24T21:16:42.327937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:42.328225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:42.328578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:42.328748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:42.328872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:42.328986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:42.329128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:42.329245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:42.329392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:16:42.329534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:16:42.329658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519627892262889447:2305];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:16:42.329904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519627892262889439:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:42.329950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519627892262889439:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:42.330155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519627892262889439:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:42.330378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519627892262889439:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:42.330525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519627892262889439:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:42.330655Z node 1 :TX_C ... 2Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037908;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:17:03.721527Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519627982721466336:2474];ev=NActors::IEventHandle;tablet_id=72075186224037910;tx_id=281474976710666;this=88923022595008;method=TTxController::StartProposeOnExecute;tx_info=281474976710666:TX_KIND_SCHEMA;min=1750799823720;max=18446744073709551615;plan=0;src=[4:7519627956951661046:2151];cookie=232:3;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-24T21:17:03.722254Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037909;self_id=[4:7519627982721466324:2471];ev=NActors::IEventHandle;tablet_id=72075186224037909;tx_id=281474976710666;this=88923022495552;method=TTxController::StartProposeOnExecute;tx_info=281474976710666:TX_KIND_SCHEMA;min=1750799823721;max=18446744073709551615;plan=0;src=[4:7519627956951661046:2151];cookie=222:3;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-24T21:17:03.723742Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037908;self_id=[4:7519627982721466329:2472];ev=NActors::IEventHandle;tablet_id=72075186224037908;tx_id=281474976710666;this=88923022489952;method=TTxController::StartProposeOnExecute;tx_info=281474976710666:TX_KIND_SCHEMA;min=1750799823723;max=18446744073709551615;plan=0;src=[4:7519627956951661046:2151];cookie=212:3;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-24T21:17:03.748916Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:03.749016Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:03.749362Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:03.749518Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:03.749652Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:03.749786Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:03.749933Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:03.750100Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:03.750236Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:03.750367Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:03.750489Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];tablet_id=72075186224037911;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:03.755532Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:17:03.755611Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:17:03.755766Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:17:03.755826Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:17:03.756086Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:17:03.756125Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:17:03.756255Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:17:03.756303Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:17:03.756352Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:17:03.756384Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:17:03.756645Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:17:03.756811Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:17:03.757102Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:17:03.757143Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:17:03.757305Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:17:03.757360Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:17:03.757456Z node 4 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:17:03.757515Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:17:03.757562Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:17:03.758501Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:17:03.758545Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:17:03.768405Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519627982721466338:2475];ev=NActors::IEventHandle;tablet_id=72075186224037911;tx_id=281474976710666;this=88923022488608;method=TTxController::StartProposeOnExecute;tx_info=281474976710666:TX_KIND_SCHEMA;min=1750799823767;max=18446744073709551615;plan=0;src=[4:7519627956951661046:2151];cookie=242:3;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-24T21:17:03.774739Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-24T21:17:03.779917Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-24T21:17:03.780756Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710666; 2025-06-24T21:17:03.781869Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-24T21:17:03.786486Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710666; 2025-06-24T21:17:03.788009Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710666; 2025-06-24T21:17:03.788593Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-24T21:17:03.794189Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710666; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::MultiJoinCteLinks [GOOD] Test command err: Trying to start YDB, gRPC: 28285, MsgBus: 14950 2025-06-24T21:16:29.745206Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627836963623621:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:29.745447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00312a/r3tmp/tmpNZ9P0q/pdisk_1.dat 2025-06-24T21:16:30.190887Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:30.210608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:30.210725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 28285, node 1 2025-06-24T21:16:30.218498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:30.269908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:30.269938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:30.269946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:30.270122Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14950 2025-06-24T21:16:30.771333Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14950 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:30.980188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:31.015892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:31.039739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:31.196433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:31.367031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:31.461806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:33.260312Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627854143494416:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:33.260430Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:33.576907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:33.609217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:33.653293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:33.696507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:33.768906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:33.806135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:33.842854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:33.915581Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627854143495078:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:33.915673Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:33.915925Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627854143495083:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:33.920467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:33.974162Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627854143495085:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:34.074056Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627858438462432:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:34.745449Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627836963623621:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:34.745532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"TopSort","Limit":"SUM(10,15)","TopSortBy":"row.Text"},{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"0","Table":"EightShard","ReadColumns":["Data","Key","Text"],"E-Cost":"0"}],"Node Type":"TopSort-TableFullSc ... 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:59.204978Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:59.212068Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:59.234409Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:59.333369Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:59.532560Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:59.634615Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:03.146031Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627982196230752:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.146142Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.225786Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.301445Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.349863Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.390897Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.438266Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.515305Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.566492Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.672075Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627982196231419:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.672158Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.675457Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627982196231425:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.680671Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:03.695509Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519627982196231427:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:03.770546Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519627982196231478:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":12,"Plans":[{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"E-Size":"0","PlanNodeId":8,"LookupKeyColumns":["Key"],"Node Type":"TableLookup","Path":"\/Root\/EightShard","Columns":["Data","Key","Text"],"E-Rows":"0","Table":"EightShard","Plans":[{"PlanNodeId":7,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"PartitionByKey","Name":"Iterator"},{"Inputs":[],"Name":"PartitionByKey","Input":"precompute_0_0"}],"Node Type":"ConstantExpr-Aggregate","CTE Name":"precompute_0_0"}],"PlanNodeType":"Connection","E-Cost":"0"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"1001"},{"Inputs":[{"InternalOperatorId":3},{"InternalOperatorId":2}],"E-Rows":"0","Condition":"es.Key = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"0","E-Cost":"0"},{"Inputs":[],"ToFlow":"precompute_0_0","Name":"ToFlow"},{"Inputs":[{"ExternalPlanNodeId":8}],"E-Rows":"0","Predicate":"Exist(item.Key)","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Limit-InnerJoin (MapJoin)-ConstantExpr-Filter","CTE Name":"precompute_0_0"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":10}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":5,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/KeyValue","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"KeyValue","ReadColumns":["Key","Value"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Collect"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"Precompute_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"lookup_by":["Key"],"columns":["Data","Key","Text"],"type":"Lookup"}]},{"name":"\/Root\/KeyValue","reads":[{"columns":["Key","Value"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":7,"Operators":[{"E-Rows":"0","Columns":["Data","Key","Text"],"E-Size":"0","E-Cost":"0","Name":"TableLookup","Table":"EightShard","LookupKeyColumns":["Key"]}],"Node Type":"TableLookup","PlanNodeType":"Connection"}],"Operators":[{"E-Rows":"0","Predicate":"Exist(item.Key)","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Filter"},{"PlanNodeId":13,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/KeyValue","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"KeyValue","ReadColumns":["Key","Value"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"0","Condition":"es.Key = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"0","E-Cost":"0"}],"Node Type":"InnerJoin (MapJoin)"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} >> KqpJoin::IdxLookupSelf [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::Predicates [GOOD] Test command err: Trying to start YDB, gRPC: 16492, MsgBus: 6456 2025-06-24T21:16:30.574082Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627841870998330:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:30.581657Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00311b/r3tmp/tmp0r0fn5/pdisk_1.dat 2025-06-24T21:16:31.183835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:31.183934Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:31.202225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:31.222564Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:31.223374Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627841870998309:2079] 1750799790570177 != 1750799790570180 TServer::EnableGrpc on GrpcPort 16492, node 1 2025-06-24T21:16:31.307972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:31.308002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:31.308009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:31.308172Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6456 2025-06-24T21:16:31.622029Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6456 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:32.054589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:32.075707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:32.109890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.282308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.458208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.547140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:34.299748Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627859050869138:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.299849Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.636733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.673612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.767242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.810122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.882587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.952895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.994829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:35.064706Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627863345837104:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.064796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.065028Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627863345837109:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:35.069804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:35.084104Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627863345837111:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:35.174447Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627863345837162:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:35.574464Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627841870998330:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:35.574599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"TopSort","Limit":"4","TopSortBy":"row.Data"},{"Scan":"Parallel","ReadRange":["Key [150, 266]"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"P ... sultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 18532, MsgBus: 5261 2025-06-24T21:16:58.533055Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519627962896427629:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:58.533109Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00311b/r3tmp/tmpoT9qut/pdisk_1.dat 2025-06-24T21:16:58.740135Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:58.741592Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519627962896427608:2079] 1750799818531645 != 1750799818531648 2025-06-24T21:16:58.757730Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:58.757833Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:58.766246Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18532, node 5 2025-06-24T21:16:58.870924Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:58.870944Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:58.870950Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:58.871057Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5261 TClient is connected to server localhost:5261 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:16:59.559863Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:59.609462Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:59.634632Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:59.728665Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:59.951675Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:00.045647Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:03.184689Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627984371265714:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.184847Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.297090Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.382134Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.437132Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.492570Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.535851Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519627962896427629:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:03.536409Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:03.549434Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.603026Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.667440Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.759736Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627984371266381:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.759821Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.759884Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519627984371266386:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.763334Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:03.774534Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519627984371266388:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:03.851523Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519627984371266440:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:05.300550Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::SortingsByPKWithLookupJoin+RemoveLimitOperator >> KqpParams::ParameterTypes [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64_Reboot [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftSemiJoinWithLeftFilter-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 12821, MsgBus: 5542 2025-06-24T21:16:49.660112Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627925596312548:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:49.660335Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c22/r3tmp/tmpdYuFTl/pdisk_1.dat 2025-06-24T21:16:50.086683Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:50.103491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:50.103598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:50.105657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12821, node 1 2025-06-24T21:16:50.205850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:50.205881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:50.205893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:50.206057Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5542 2025-06-24T21:16:50.620574Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5542 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:51.057850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:51.120630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:51.350108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:51.523487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:51.601295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:53.168959Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627942776183181:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:53.169098Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:53.544822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.594025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.663016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.699837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.735165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.768524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.808176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.892529Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627942776183840:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:53.892624Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:53.892823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627942776183845:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:53.898386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:53.915276Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627942776183847:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:53.970963Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627942776183898:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:54.662206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627925596312548:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:54.662294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:55.668122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.708613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/ ... 14 2025-06-24T21:17:00.657371Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:00.744633Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:00.757672Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:00.854349Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:01.058530Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:17:01.151191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.554324Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627983863195464:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.554446Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:03.657664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.729762Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.815645Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.898741Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.934016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.993688Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.044192Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.120854Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627988158163429:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:04.120952Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:04.121253Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627988158163434:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:04.126054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:04.142787Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519627988158163436:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:04.220646Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519627988158163487:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:04.631271Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519627966683324677:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:04.631352Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:05.489642Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.546549Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.627906Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.679419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.724893Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.776237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoin::FullOuterJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime_Reboot [GOOD] Test command err: 2025-06-24T21:14:54.242043Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:54.276517Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:54.276873Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:54.285793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:54.286072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:54.286381Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:54.286563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:54.286722Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:54.286868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:54.287029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:54.287191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:54.287333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:54.287469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:54.287618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:54.323653Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:54.323964Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:54.324055Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:54.324279Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:54.324485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:54.324564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:54.324615Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:54.324774Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:54.324878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:54.324939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:54.324978Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:54.325193Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:54.325276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:54.325330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:54.325368Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:54.325479Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:54.325550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:54.325619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:54.325676Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:54.325739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:54.325801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:54.325840Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:54.326070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:54.326116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:54.326154Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:54.326347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:54.326394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:54.326424Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:54.326551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:54.326611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:54.326646Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:54.326720Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:54.326821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:54.326878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:54.326916Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:54.327516Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=58; 2025-06-24T21:14:54.327620Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; 2025-06-24T21:14:54.327728Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=51; 2025-06-24T21:14:54.327826Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=46; 2025-06-24T21:14:54.327926Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:54.328043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:54.328099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:54.328163Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... me=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=25230;data_size=25196;sum=13004960;count=7164;size_of_portion=208; 2025-06-24T21:17:06.535223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=110575; 2025-06-24T21:17:06.535316Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=14; 2025-06-24T21:17:06.537223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1839; 2025-06-24T21:17:06.537286Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=112792; 2025-06-24T21:17:06.537339Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=112916; 2025-06-24T21:17:06.537410Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=14; 2025-06-24T21:17:06.538443Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=977; 2025-06-24T21:17:06.538494Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=114541; 2025-06-24T21:17:06.538664Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=114; 2025-06-24T21:17:06.538783Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=71; 2025-06-24T21:17:06.539230Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=395; 2025-06-24T21:17:06.539614Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=333; 2025-06-24T21:17:06.578987Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=39288; 2025-06-24T21:17:06.656268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=77138; 2025-06-24T21:17:06.656412Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=18; 2025-06-24T21:17:06.656500Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=15; 2025-06-24T21:17:06.656571Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=9; 2025-06-24T21:17:06.656690Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=59; 2025-06-24T21:17:06.656767Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=10; 2025-06-24T21:17:06.656920Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=90; 2025-06-24T21:17:06.656995Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=10; 2025-06-24T21:17:06.657092Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=40; 2025-06-24T21:17:06.657211Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=62; 2025-06-24T21:17:06.657335Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=63; 2025-06-24T21:17:06.657393Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=244054; 2025-06-24T21:17:06.657573Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=21623968;raw_bytes=21227350;count=3;records=225200} inactive {blob_bytes=141321168;raw_bytes=137674250;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:17:06.657730Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:17:06.657805Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:17:06.657892Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:17:06.657951Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:17:06.658239Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:06.658341Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:06.658433Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799399666;tx_id=18446744073709551615;;current_snapshot_ts=1750799696026; 2025-06-24T21:17:06.658493Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:06.658555Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:06.658610Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:06.658727Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:17:06.662773Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:10133:11718];tablet_id=9437184;parent=[1:10010:11603];fline=manager.cpp:88;event=ask_data;request=request_id=212;9438184000001={portions_count=224};; 2025-06-24T21:17:06.675235Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:17:06.676614Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:17:06.676678Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:17:06.676716Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:17:06.676777Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:06.676911Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:06.677006Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799399666;tx_id=18446744073709551615;;current_snapshot_ts=1750799696026; 2025-06-24T21:17:06.677065Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:06.677133Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:06.677189Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:06.677292Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=1.000000s; 2025-06-24T21:17:06.677362Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10010:11603];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::IdxLookupSelf [GOOD] Test command err: Trying to start YDB, gRPC: 61836, MsgBus: 5660 2025-06-24T21:16:53.919312Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627941208493180:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:53.919379Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c0b/r3tmp/tmpspL3ht/pdisk_1.dat 2025-06-24T21:16:54.306241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:54.306335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:54.314963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:54.318936Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:54.327017Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627941208493162:2079] 1750799813918219 != 1750799813918222 TServer::EnableGrpc on GrpcPort 61836, node 1 2025-06-24T21:16:54.463516Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:54.463539Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:54.463553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:54.463666Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5660 TClient is connected to server localhost:5660 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:16:54.961534Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:55.091134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:55.129322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:55.138635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:55.285749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:55.452630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.539213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:57.382550Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627958388363991:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:57.382700Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:57.667572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:57.743625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:57.797136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:57.866768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:57.902905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:57.995559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:58.031772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:58.107860Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627962683331957:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:58.107953Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:58.108055Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627962683331962:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:58.112902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:58.130797Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627962683331964:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:58.190793Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627962683332015:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:58.921743Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627941208493180:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:58.921813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:59.404417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... 5-06-24T21:17:01.577202Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:01.577290Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:01.583096Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6648, node 2 2025-06-24T21:17:01.667684Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:01.667704Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:01.667711Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:01.667819Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12715 TClient is connected to server localhost:12715 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:02.284729Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:02.294487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:02.306275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.397328Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:02.410127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.658883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.742429Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:04.994890Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627988347096530:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:04.994990Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.054882Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.088718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.124020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.169551Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.210779Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.259646Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.308197Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.386781Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627992642064481:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.386875Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.387162Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519627992642064486:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.393235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:05.406046Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519627992642064488:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:05.481395Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519627992642064539:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:06.379266Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519627975462193172:2181];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:06.379350Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:06.533297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.574684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.645869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::SortingsDifferentDirs-RemoveLimitOperator >> KqpIndexLookupJoin::CheckCastInt32ToInt16-StreamLookupJoin-NotNull >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32_Reboot [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::ParameterTypes [GOOD] Test command err: Trying to start YDB, gRPC: 23277, MsgBus: 18330 2025-06-24T21:16:40.522378Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627886830803157:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:40.540353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003102/r3tmp/tmpPYwT59/pdisk_1.dat 2025-06-24T21:16:40.957017Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:40.958081Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627886830803129:2079] 1750799800517688 != 1750799800517691 TServer::EnableGrpc on GrpcPort 23277, node 1 2025-06-24T21:16:40.978489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:40.978577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:40.982412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:41.030302Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:41.030329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:41.030336Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:41.030489Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18330 TClient is connected to server localhost:18330 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:16:41.539250Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:41.690540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:41.732238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:41.878327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:42.064832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:42.130308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:44.011738Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627904010673958:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:44.011837Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:44.356065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.391737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.427802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.461745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.490325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.529995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.608335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.706532Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627904010674623:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:44.706634Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:44.706679Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627904010674628:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:44.710561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:44.722623Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627904010674630:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:44.778873Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627904010674681:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:45.522511Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627886830803157:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:45.522576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8839, MsgBus: 28111 2025-06-24T21:16:47.023076Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627915563698529:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:47.023193Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPat ... ists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519627941037269402:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:58.424281Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 22974, MsgBus: 5427 2025-06-24T21:17:01.076515Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519627975264409500:2152];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:01.076645Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003102/r3tmp/tmprIhv89/pdisk_1.dat 2025-06-24T21:17:01.281109Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:01.286988Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627975264409372:2079] 1750799821029896 != 1750799821029899 2025-06-24T21:17:01.309760Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:01.309871Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:01.312735Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22974, node 4 2025-06-24T21:17:01.472513Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:01.472543Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:01.472554Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:01.472715Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5427 2025-06-24T21:17:02.079097Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5427 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:02.259848Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:02.273699Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:02.287599Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.381976Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.691867Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.799567Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:05.419237Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627992444280210:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.419343Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.491066Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.535249Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.591005Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.631365Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.671133Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.730648Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.819807Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.924973Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627992444280870:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.925072Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.925148Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627992444280875:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.931301Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:05.946274Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627992444280877:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:06.009013Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627996739248224:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:06.065962Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627975264409500:2152];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:06.066023Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64_Reboot [GOOD] Test command err: 2025-06-24T21:14:49.389426Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:49.408624Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:49.408894Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:49.415947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:49.416141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:49.416356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:49.416471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:49.416585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:49.416712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:49.416853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:49.416972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:49.417096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:49.417191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.417305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:49.453342Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:49.453578Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:49.453636Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:49.453857Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.454014Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:49.454082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:49.454123Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:49.454249Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:49.454315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:49.454361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:49.454388Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:49.454596Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:49.454670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:49.454712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:49.454748Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:49.454833Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:49.454886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:49.454931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:49.454979Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:49.455062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:49.455112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:49.455176Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:49.455397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:49.455445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:49.455485Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:49.455656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:49.455702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:49.455730Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:49.455951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:49.456020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.456061Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:49.456161Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:49.456243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:49.456291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:49.456319Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:49.456803Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=63; 2025-06-24T21:14:49.456884Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=30; 2025-06-24T21:14:49.456960Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-24T21:14:49.457037Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=36; 2025-06-24T21:14:49.457127Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:49.457221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:49.457271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:49.457329Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... ame=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=26286;data_size=26260;sum=13688824;count=7164;size_of_portion=208; 2025-06-24T21:17:07.620211Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=129519; 2025-06-24T21:17:07.620319Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=16; 2025-06-24T21:17:07.622235Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1838; 2025-06-24T21:17:07.622306Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=131807; 2025-06-24T21:17:07.622366Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=131971; 2025-06-24T21:17:07.622452Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=16; 2025-06-24T21:17:07.623787Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=1264; 2025-06-24T21:17:07.623851Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=134032; 2025-06-24T21:17:07.624057Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=135; 2025-06-24T21:17:07.624198Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=85; 2025-06-24T21:17:07.624643Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=386; 2025-06-24T21:17:07.625020Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=319; 2025-06-24T21:17:07.654321Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=29214; 2025-06-24T21:17:07.694250Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=39788; 2025-06-24T21:17:07.694394Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=18; 2025-06-24T21:17:07.694465Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=15; 2025-06-24T21:17:07.694521Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=7; 2025-06-24T21:17:07.694622Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=57; 2025-06-24T21:17:07.694678Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=8; 2025-06-24T21:17:07.694787Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=65; 2025-06-24T21:17:07.694847Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=8; 2025-06-24T21:17:07.694931Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=38; 2025-06-24T21:17:07.695043Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=61; 2025-06-24T21:17:07.695164Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=53; 2025-06-24T21:17:07.695211Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=213713; 2025-06-24T21:17:07.695395Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22538992;raw_bytes=22128150;count=3;records=225200} inactive {blob_bytes=147791880;raw_bytes=143975050;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:17:07.695550Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:17:07.695616Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:17:07.695696Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:17:07.695751Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:17:07.695979Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:07.696068Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:07.696148Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799394983;tx_id=18446744073709551615;;current_snapshot_ts=1750799691186; 2025-06-24T21:17:07.696201Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:07.696257Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:07.696302Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:07.696410Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:17:07.700707Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:10439:12024];tablet_id=9437184;parent=[1:10316:11909];fline=manager.cpp:88;event=ask_data;request=request_id=215;9438184000001={portions_count=224};; 2025-06-24T21:17:07.703642Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:17:07.704122Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:17:07.704166Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:17:07.704201Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:17:07.704257Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:07.704360Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:07.704446Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799394983;tx_id=18446744073709551615;;current_snapshot_ts=1750799691186; 2025-06-24T21:17:07.704503Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:07.704562Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:07.704611Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:07.704705Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=1.000000s; 2025-06-24T21:17:07.704768Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10316:11909];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> KqpJoinOrder::TPCDS92-ColumnStore >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32 [GOOD] >> OlapEstimationRowsCorrectness::TPCH10 >> KqpLimits::QSReplySize-useSink [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin3+RemoveLimitOperator >> KqpIndexLookupJoin::CheckCastInt32ToInt16+StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastInt32ToInt16+StreamLookupJoin+NotNull >> KqpJoinOrder::CanonizedJoinOrderTPCH1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32_Reboot [GOOD] Test command err: 2025-06-24T21:14:54.378456Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:54.396780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:54.397025Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:54.403050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:54.403260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:54.403439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:54.403526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:54.403603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:54.403689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:54.403795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:54.403868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:54.403948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:54.404058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:54.404139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:54.431134Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:54.431377Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:54.431431Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:54.431578Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:54.431713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:54.431776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:54.431830Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:54.431940Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:54.431991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:54.432046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:54.432070Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:54.432217Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:54.432289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:54.432328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:54.432348Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:54.432439Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:54.432486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:54.432530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:54.432573Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:54.432631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:54.432672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:54.432707Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:54.432931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:54.432974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:54.433008Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:54.433201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:54.433245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:54.433271Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:54.433379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:54.433409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:54.433438Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:54.433507Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:54.433569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:54.433621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:54.433645Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:54.434064Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=52; 2025-06-24T21:14:54.434141Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=28; 2025-06-24T21:14:54.434194Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=21; 2025-06-24T21:14:54.434269Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=33; 2025-06-24T21:14:54.434348Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:54.434413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:54.434454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:54.434502Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... ame=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=25230;data_size=25196;sum=13004960;count=7164;size_of_portion=208; 2025-06-24T21:17:08.938031Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=147397; 2025-06-24T21:17:08.938157Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=16; 2025-06-24T21:17:08.940327Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=2080; 2025-06-24T21:17:08.940407Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=149965; 2025-06-24T21:17:08.940468Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=150112; 2025-06-24T21:17:08.940552Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=17; 2025-06-24T21:17:08.941546Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=917; 2025-06-24T21:17:08.941603Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=151759; 2025-06-24T21:17:08.941780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=112; 2025-06-24T21:17:08.941895Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=65; 2025-06-24T21:17:08.942259Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=312; 2025-06-24T21:17:08.942566Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=245; 2025-06-24T21:17:08.970850Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=28179; 2025-06-24T21:17:09.008880Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=37851; 2025-06-24T21:17:09.009025Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=19; 2025-06-24T21:17:09.009105Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=16; 2025-06-24T21:17:09.009165Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=11; 2025-06-24T21:17:09.009270Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=55; 2025-06-24T21:17:09.009333Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=8; 2025-06-24T21:17:09.009443Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=68; 2025-06-24T21:17:09.009503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=8; 2025-06-24T21:17:09.009592Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=39; 2025-06-24T21:17:09.009713Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=69; 2025-06-24T21:17:09.009821Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=59; 2025-06-24T21:17:09.009871Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=227635; 2025-06-24T21:17:09.010051Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=21623968;raw_bytes=21227350;count=3;records=225200} inactive {blob_bytes=141321168;raw_bytes=137674250;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:17:09.010196Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:17:09.010270Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:17:09.010350Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:17:09.010404Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:17:09.010669Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:09.010757Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:09.010838Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799399810;tx_id=18446744073709551615;;current_snapshot_ts=1750799696170; 2025-06-24T21:17:09.010890Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:09.010945Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:09.010993Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:09.011098Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:17:09.015225Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:10134:11719];tablet_id=9437184;parent=[1:10011:11604];fline=manager.cpp:88;event=ask_data;request=request_id=212;9438184000001={portions_count=224};; 2025-06-24T21:17:09.018403Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:17:09.019530Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:17:09.019583Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:17:09.019618Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:17:09.019676Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:09.019783Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:09.019869Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799399810;tx_id=18446744073709551615;;current_snapshot_ts=1750799696170; 2025-06-24T21:17:09.019923Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:09.019980Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:09.020030Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:09.020130Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=1.000000s; 2025-06-24T21:17:09.020209Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10011:11604];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32 [GOOD] Test command err: 2025-06-24T21:14:56.427664Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:56.454260Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:56.454574Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:56.462108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:56.462319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:56.462540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:56.462664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:56.462795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:56.462921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:56.463031Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:56.463129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:56.463256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:56.463405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:56.463504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:56.493250Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:56.493491Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:56.493545Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:56.493716Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:56.493872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:56.493951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:56.494002Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:56.494122Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:56.494212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:56.494256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:56.494287Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:56.494436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:56.494498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:56.494534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:56.494560Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:56.494673Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:56.494741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:56.494808Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:56.494858Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:56.494912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:56.494974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:56.495003Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:56.495238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:56.495282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:56.520237Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:56.520590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:56.520685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:56.520722Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:56.520843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:56.520878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:56.520907Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:56.521015Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:56.521081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:56.521135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:56.521172Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:56.521661Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=80; 2025-06-24T21:14:56.521764Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-24T21:14:56.521837Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=32; 2025-06-24T21:14:56.521945Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=58; 2025-06-24T21:14:56.522052Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:56.522132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:56.522189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:56.522236Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... LOB:0:9240];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7200040;index_size:0;meta:(()););(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7198464;index_size:0;meta:(()););; 2025-06-24T21:17:09.403658Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=94d690d0-514011f0-b78d3a46-4737bbce;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=94d690d0-514011f0-b78d3a46-4737bbce;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5588:7575];task_id=94d690d0-514011f0-b78d3a46-4737bbce;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=2; 2025-06-24T21:17:09.405347Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-24T21:17:09.409818Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-24T21:17:09.555756Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-24T21:17:09.555902Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7198464;count=779;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-24T21:17:10.208340Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-24T21:17:10.208507Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-24T21:17:10.208577Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7069450;count=1;packed=7200040; 2025-06-24T21:17:10.208661Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=86;data_size=60;sum=88752;count=1743; 2025-06-24T21:17:10.208772Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=25134;data_size=25124;sum=2348976;count=1744;size_of_meta=136; 2025-06-24T21:17:10.208853Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5588:7575];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=25206;data_size=25196;sum=2411760;count=872;size_of_portion=208; 2025-06-24T21:17:10.209603Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-24T21:17:10.316061Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 2025-06-24T21:17:10.322283Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=28824008;raw_bytes=28296800;count=4;records=300200} inactive {blob_bytes=112525736;raw_bytes=109396450;count=217;records=1275200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:17:10.674938Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-24T21:17:10.675022Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;fline=with_appended.cpp:65;portions=222,;task_id=94d690d0-514011f0-b78d3a46-4737bbce; 2025-06-24T21:17:10.675825Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::94d690d0-514011f0-b78d3a46-4737bbce; 2025-06-24T21:17:10.675913Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:21623968;portions_count:222;); 2025-06-24T21:17:10.675962Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:10.676044Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:10.676116Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799399822;tx_id=18446744073709551615;;current_snapshot_ts=1750799698003; 2025-06-24T21:17:10.676166Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:10.676234Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:10.676277Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:10.676361Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.895000s; 2025-06-24T21:17:10.676414Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=94d690d0-514011f0-b78d3a46-4737bbce;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:17:10.676600Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 >> KqpIndexLookupJoin::InnerJoinOnlyRightColumn+StreamLookup >> RetryPolicy::TWriteSession_TestBrokenPolicy [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster >> KqpIndexLookupJoin::LeftJoinCustomColumnOrder+StreamLookup >> KqpLimits::QueryExecTimeoutCancel [GOOD] >> KqpLimits::QueryExecTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::QSReplySize-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 27418, MsgBus: 61032 2025-06-24T21:16:18.114926Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627790005371479:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:18.115062Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031f0/r3tmp/tmpZGr9Uw/pdisk_1.dat 2025-06-24T21:16:18.417278Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:18.417859Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627790005371461:2079] 1750799778113458 != 1750799778113461 TServer::EnableGrpc on GrpcPort 27418, node 1 2025-06-24T21:16:18.481050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:18.482439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:18.505342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:18.520971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:18.520991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:18.521004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:18.521140Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61032 TClient is connected to server localhost:61032 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:19.134524Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:19.152265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:19.174349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:19.186392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:19.327128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:19.502120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:19.581246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.515174Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627802890274985:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:21.515358Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:21.868621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.906752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.939837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.981605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.012921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.089506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.128904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:22.217596Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627807185242945:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:22.217674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:22.217900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627807185242950:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:22.221775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:22.233606Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627807185242952:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:22.311119Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627807185243003:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:23.116145Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627790005371479:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:23.116226Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:23.410001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 2025-06-24T21:16:50.700494Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:50.700597Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:50.706178Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14917, node 4 2025-06-24T21:16:50.775767Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:50.775795Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:50.775804Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:50.775948Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15457 TClient is connected to server localhost:15457 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:51.433655Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:51.442253Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:51.455307Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:51.524278Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:51.548048Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:51.803504Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:51.907496Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:54.765955Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627946212616365:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.766061Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.848809Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.903019Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.976691Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.022733Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.106104Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.155403Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.250496Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.365634Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627950507584333:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.365745Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.365971Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627950507584338:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.370212Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:55.386786Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627950507584340:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:55.450877Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627950507584391:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:55.511242Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627929032745575:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:55.511314Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:57.126784Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:05.637274Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:17:05.637307Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:10.248918Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=NTljZWUyNDYtOTdjODM2NGItY2E3YmNiYi05MWY5MTFhMA==, ActorId: [4:7519628006342160435:2636], ActorState: ExecuteState, TraceId: 01jyhwth3vdh1vb8nffrrgjqb5, Create QueryResponse for error on request, msg:
: Error: Intermediate data materialization exceeded size limit (88240925 > 50331648). This usually happens when trying to write large amounts of data or to perform lookup by big collection of keys in single query. Consider using smaller batches of data., code: 2013 >> KqpJoin::ComplexJoin [GOOD] >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt32 [GOOD] >> KqpJoinOrder::TPCHEveryQueryWorks+ColumnStore >> KqpIndexLookupJoin::LeftOnlyJoinValueColumn+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::ComplexJoin [GOOD] Test command err: Trying to start YDB, gRPC: 63515, MsgBus: 4419 2025-06-24T21:16:50.668898Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627928461764099:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:50.669245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c1d/r3tmp/tmpryd8AX/pdisk_1.dat 2025-06-24T21:16:51.202569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:51.202912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:51.203561Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627928461763893:2079] 1750799810640685 != 1750799810640688 2025-06-24T21:16:51.215622Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:51.217615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63515, node 1 2025-06-24T21:16:51.317781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:51.317807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:51.317814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:51.317917Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4419 2025-06-24T21:16:51.671285Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4419 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:52.006292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:52.040855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:52.219614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:52.387485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:52.483807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:54.305614Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627945641634732:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.305757Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.617773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.660869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.737430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.774132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.809478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.876646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.922189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.994469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627945641635393:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.994550Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.994728Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627945641635398:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.001261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:55.018420Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627945641635400:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:55.078144Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627949936602747:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:55.673745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627928461764099:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:55.673830Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 62350, MsgBus: 20218 2025-06-24T21:17:03.396094Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627985900342544:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:03.397186Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:03.711984Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20218 TClient is connected to server localhost:20218 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:04.308730Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:04.323906Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:04.341665Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:04.420181Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:17:04.436114Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.651108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:04.744302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:07.420134Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628003080213340:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:07.420246Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:07.501095Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.549487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.589154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.623674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.706923Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.787834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.883390Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.963490Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628003080214006:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:07.963604Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:07.967493Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628003080214011:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:07.971988Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:07.989848Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628003080214013:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:08.055424Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628007375181360:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:08.391270Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519627985900342544:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:08.391346Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:09.324917Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.392067Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.486809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.534818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.584050Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::TPCDS87-ColumnStore >> KqpIndexLookupJoin::CheckCastUint32ToUint16+StreamLookupJoin+NotNull [GOOD] >> KqpIndexLookupJoin::InnerJoinOnlyLeftColumn-StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt32 [GOOD] Test command err: 2025-06-24T21:15:14.876507Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:14.898952Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:14.899276Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:14.906335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:14.906578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:14.906840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:14.906923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:14.907017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:14.907172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:14.907290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:14.907371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:14.907439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:14.907549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:14.907662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:14.939793Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:14.940066Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:14.940142Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:14.940363Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:14.940613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:14.940710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:14.940759Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:14.940884Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:14.940955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:14.941007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:14.941039Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:14.941212Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:14.941305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:14.941351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:14.941383Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:14.941483Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:14.941551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:14.941606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:14.941650Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:14.941704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:14.941765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:14.941800Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:14.942034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:14.942079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:14.942117Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:14.942308Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:14.942383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:14.942415Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:14.942552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:14.942602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:14.942637Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:14.942725Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:14.942814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:14.942866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:14.942896Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:14.943367Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=46; 2025-06-24T21:15:14.943476Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=48; 2025-06-24T21:15:14.943564Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=37; 2025-06-24T21:15:14.943642Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=35; 2025-06-24T21:15:14.943741Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:14.943831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:14.943887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:14.943938Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2126;data_size=2102;sum=522292;count=216;size_of_portion=208; 2025-06-24T21:17:13.585542Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=27556; 2025-06-24T21:17:13.585641Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=15; 2025-06-24T21:17:13.586599Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=898; 2025-06-24T21:17:13.586677Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=28894; 2025-06-24T21:17:13.586735Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=29082; 2025-06-24T21:17:13.586816Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=15; 2025-06-24T21:17:13.587229Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=341; 2025-06-24T21:17:13.587292Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=30242; 2025-06-24T21:17:13.587500Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=133; 2025-06-24T21:17:13.587699Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=139; 2025-06-24T21:17:13.587940Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=184; 2025-06-24T21:17:13.588163Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=164; 2025-06-24T21:17:13.594255Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=6007; 2025-06-24T21:17:13.601654Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=7221; 2025-06-24T21:17:13.601802Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=18; 2025-06-24T21:17:13.601873Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=16; 2025-06-24T21:17:13.601923Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=7; 2025-06-24T21:17:13.602054Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=70; 2025-06-24T21:17:13.602116Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-24T21:17:13.602227Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=69; 2025-06-24T21:17:13.602286Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=7; 2025-06-24T21:17:13.602378Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=44; 2025-06-24T21:17:13.602491Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=64; 2025-06-24T21:17:13.602619Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=87; 2025-06-24T21:17:13.602671Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=54753; 2025-06-24T21:17:13.602912Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108238344;raw_bytes=183045560;count=15;records=1915000} inactive {blob_bytes=205426288;raw_bytes=316809958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:17:13.603066Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:17:13.603366Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:17:13.603469Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:17:13.603551Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:17:13.603739Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:13.603847Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:17:13.603944Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799418976;tx_id=18446744073709551615;;current_snapshot_ts=1750799715904; 2025-06-24T21:17:13.604020Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:13.604103Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:13.604156Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:13.604327Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:17:13.606424Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:4112:6074];tablet_id=9437184;parent=[1:4077:6047];fline=manager.cpp:88;event=ask_data;request=request_id=205;9438184000001={portions_count=54};; 2025-06-24T21:17:13.621350Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:17:13.621900Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:17:13.621957Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:17:13.622005Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:17:13.622082Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:13.622232Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:17:13.622330Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799418976;tx_id=18446744073709551615;;current_snapshot_ts=1750799715904; 2025-06-24T21:17:13.622412Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:13.622490Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:13.622565Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:13.622683Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.999000s; 2025-06-24T21:17:13.622760Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4077:6047];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> KqpJoin::FullOuterJoin [GOOD] >> KqpJoin::FullOuterJoin2 >> KqpLimits::ReplySizeExceeded [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUint32ToUint16+StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 6088, MsgBus: 27969 2025-06-24T21:17:01.095969Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627974765911666:2234];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bf7/r3tmp/tmpvIHGdN/pdisk_1.dat 2025-06-24T21:17:01.348655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:17:01.543651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:01.543948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:01.549718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:01.593782Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:01.598541Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627970470944160:2079] 1750799820978639 != 1750799820978642 TServer::EnableGrpc on GrpcPort 6088, node 1 2025-06-24T21:17:01.690471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:01.690507Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:01.690528Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:01.690683Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27969 2025-06-24T21:17:02.059483Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27969 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:02.492166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:02.515872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:02.528509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.689944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.903474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:03.011624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:04.767886Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627987650814973:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:04.768008Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.076143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.115231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.163958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.205244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.244821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.320343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.367357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:05.467777Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627991945782939:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.467897Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.468185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627991945782944:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.473370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:05.486360Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627991945782946:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:05.588471Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627991945782997:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:06.059736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627974765911666:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:06.059808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:06.776572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... a/build/build_root/2btm/003bf7/r3tmp/tmpHqLXzJ/pdisk_1.dat 2025-06-24T21:17:08.505730Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:17:08.619301Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628003367113455:2079] 1750799828380635 != 1750799828380638 2025-06-24T21:17:08.691719Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:08.703617Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:08.703706Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:08.712006Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27202, node 2 2025-06-24T21:17:08.803651Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:08.803668Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:08.803673Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:08.803765Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1836 TClient is connected to server localhost:1836 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:09.337190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:09.361326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:09.467305Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.471895Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:17:09.636776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:09.719639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:12.475301Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628020546984264:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:12.475611Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:12.555415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.634709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.688094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.773945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.816348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.882764Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.967498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:13.083552Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628024841952232:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:13.083648Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:13.083961Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628024841952237:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:13.088477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:13.107298Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628024841952239:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:13.170574Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628024841952290:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:13.423670Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628003367113605:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:13.423726Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:14.421522Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.539668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::InnerJoinOnlyLeftColumn-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 23952, MsgBus: 26048 2025-06-24T21:17:00.286710Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627969415670283:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:00.300090Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bfd/r3tmp/tmppo1HOY/pdisk_1.dat 2025-06-24T21:17:00.856030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:00.856124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:00.862424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:00.897925Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:00.899294Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627969415670161:2079] 1750799820272523 != 1750799820272526 TServer::EnableGrpc on GrpcPort 23952, node 1 2025-06-24T21:17:01.003741Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:01.003765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:01.003772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:01.003876Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:01.299516Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26048 TClient is connected to server localhost:26048 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:01.871916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:01.899228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:01.911873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.063258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.246111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:02.330482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:04.098367Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627986595540978:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:04.098491Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:04.453579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.526845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.585857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.622291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.654448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.737745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.813014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.882624Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627986595541647:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:04.882729Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:04.882809Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627986595541652:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:04.886766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:04.905702Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627986595541654:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:04.983973Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627986595541705:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:05.294268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627969415670283:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:05.294455Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:06.186425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... lf { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:08.996416Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:09.011894Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:09.030757Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:09.117714Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:09.229451Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:09.312501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:09.413037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:11.817144Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628017645280628:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:11.817267Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:11.929776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.010247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.053337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.090786Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.129922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.213732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.327184Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:12.415362Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628021940248590:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:12.415460Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:12.415772Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628021940248595:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:12.419113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:12.435483Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628021940248597:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:12.521995Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628021940248648:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:13.183301Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628004760377139:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:13.183364Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:13.759527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:13.811104Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:13.891505Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:13.932946Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:13.972582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.048868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp_Reboot [GOOD] >> KqpJoinOrder::ShuffleEliminationReuseShuffleTwoJoins >> KqpJoinOrder::Sortings4Year+RemoveLimitOperator >> KqpIndexLookupJoin::CheckCastInt32ToInt16-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastInt32ToInt16-StreamLookupJoin+NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::ReplySizeExceeded [GOOD] Test command err: Trying to start YDB, gRPC: 6880, MsgBus: 24596 2025-06-24T21:16:23.873093Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627812915731293:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:23.879893Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031b4/r3tmp/tmpnOl398/pdisk_1.dat 2025-06-24T21:16:24.232220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:24.232356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:24.236776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:24.246373Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:24.247286Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627812915731274:2079] 1750799783869624 != 1750799783869627 TServer::EnableGrpc on GrpcPort 6880, node 1 2025-06-24T21:16:24.323169Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:24.323209Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:24.323219Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:24.323366Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24596 TClient is connected to server localhost:24596 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:24.895843Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:24.902253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:24.951740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:28.872116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627812915731293:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:28.872167Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:34.291196Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627860160373089:2413], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.291321Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.291575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627860160373101:2416], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.296753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:34.307536Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627860160373103:2417], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:16:34.365168Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627860160373154:2975] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:34.746272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:39.199633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:16:39.199664Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded Trying to start YDB, gRPC: 29689, MsgBus: 63997 2025-06-24T21:16:44.861796Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627901254287129:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:44.866308Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031b4/r3tmp/tmpqXlkC2/pdisk_1.dat 2025-06-24T21:16:45.044525Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:45.045898Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519627901254287106:2079] 1750799804841886 != 1750799804841889 2025-06-24T21:16:45.064109Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:45.064235Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:45.068161Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29689, node 2 2025-06-24T21:16:45.132660Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:45.132695Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:45.132716Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:45.132860Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63997 TClient is connected to server localhost:63997 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:45.690832Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:45.696748Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:45.707537Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:45.870618Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:49.865899Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519627901254287129:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:49.871378Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot det ... istence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031b4/r3tmp/tmpvrfPpX/pdisk_1.dat 2025-06-24T21:17:05.949148Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:05.952255Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519627992515645019:2079] 1750799825706401 != 1750799825706404 2025-06-24T21:17:05.966168Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:05.966260Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:05.968942Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2522, node 4 2025-06-24T21:17:06.065369Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:06.065393Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:06.065401Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:06.065562Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62022 TClient is connected to server localhost:62022 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:06.669868Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:06.677707Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:06.683789Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:06.756332Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.764997Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:17:06.959542Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:07.050692Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:10.117499Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628013990483117:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:10.117603Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:10.179962Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:10.227839Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:10.282719Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:10.334441Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:10.383063Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:10.435241Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:10.534228Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:10.674705Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628013990483776:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:10.674835Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:10.675539Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628013990483781:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:10.681497Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:10.701835Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519628013990483783:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:10.715441Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627992515645044:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:10.715597Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:10.802852Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519628013990483837:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:12.288422Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.717297Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzczZjQxNDEtNjM5YmUxNWYtMmZjMWFjYmMtZGExNTdmZmY=, ActorId: [4:7519628022580418715:2476], ActorState: ExecuteState, TraceId: 01jyhwts1mfjm5b8bwd6nzx0s6, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp_Reboot [GOOD] Test command err: 2025-06-24T21:14:53.963827Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:53.993251Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:53.993545Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:53.999476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:53.999632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:53.999823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:53.999965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:54.000121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:54.000253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:54.000407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:54.000561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:54.000683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:54.000814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:54.000955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:54.030161Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:54.030366Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:54.030409Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:54.030555Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:54.030688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:54.030737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:54.030768Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:54.030849Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:54.030906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:54.030987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:54.031010Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:54.031176Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:54.031238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:54.031279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:54.031309Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:54.031414Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:54.031486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:54.031536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:54.031585Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:54.031642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:54.031719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:54.031748Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:54.031904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:54.031933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:54.031962Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:54.032132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:54.032209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:54.032246Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:54.032372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:54.032407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:54.032430Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:54.032494Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:54.032567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:54.032631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:54.032679Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:54.033184Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=50; 2025-06-24T21:14:54.033284Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=44; 2025-06-24T21:14:54.033348Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=26; 2025-06-24T21:14:54.033409Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=31; 2025-06-24T21:14:54.033514Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:54.033610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:54.033647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:54.033689Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... ame=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=26286;data_size=26260;sum=13688824;count=7164;size_of_portion=208; 2025-06-24T21:17:16.125975Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=108630; 2025-06-24T21:17:16.126065Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=14; 2025-06-24T21:17:16.127826Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1695; 2025-06-24T21:17:16.127887Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=110696; 2025-06-24T21:17:16.127941Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=110825; 2025-06-24T21:17:16.128020Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=14; 2025-06-24T21:17:16.129167Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=1086; 2025-06-24T21:17:16.129222Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=112582; 2025-06-24T21:17:16.129399Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=117; 2025-06-24T21:17:16.129530Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=75; 2025-06-24T21:17:16.129933Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=348; 2025-06-24T21:17:16.130297Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=308; 2025-06-24T21:17:16.156327Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=25950; 2025-06-24T21:17:16.186393Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=29955; 2025-06-24T21:17:16.186527Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=16; 2025-06-24T21:17:16.186591Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=12; 2025-06-24T21:17:16.186640Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=7; 2025-06-24T21:17:16.186736Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=55; 2025-06-24T21:17:16.186786Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-24T21:17:16.186884Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=61; 2025-06-24T21:17:16.186927Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-24T21:17:16.186999Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=34; 2025-06-24T21:17:16.187115Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=69; 2025-06-24T21:17:16.187234Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=54; 2025-06-24T21:17:16.187284Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=177685; 2025-06-24T21:17:16.187452Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22538992;raw_bytes=22128150;count=3;records=225200} inactive {blob_bytes=147791880;raw_bytes=143975050;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:17:16.187593Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:17:16.187658Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:17:16.187730Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:17:16.187780Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:17:16.188008Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:16.188097Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:16.188177Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799399559;tx_id=18446744073709551615;;current_snapshot_ts=1750799695762; 2025-06-24T21:17:16.188227Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:16.188278Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:16.188319Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:16.188419Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:17:16.192584Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:10441:12026];tablet_id=9437184;parent=[1:10318:11911];fline=manager.cpp:88;event=ask_data;request=request_id=215;9438184000001={portions_count=224};; 2025-06-24T21:17:16.195514Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:17:16.196380Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:17:16.196422Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:17:16.196458Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:17:16.196510Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:16.196602Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:16.196681Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799399559;tx_id=18446744073709551615;;current_snapshot_ts=1750799695762; 2025-06-24T21:17:16.196734Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:16.196788Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:16.196832Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:16.196935Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=1.000000s; 2025-06-24T21:17:16.196991Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10318:11911];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> KqpJoin::JoinWithDuplicates >> KqpIndexLookupJoin::CheckCastInt32ToInt16+StreamLookupJoin+NotNull [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH7 >> KqpIndexLookupJoin::LeftJoinCustomColumnOrder+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinCustomColumnOrder-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastInt32ToInt16+StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 17632, MsgBus: 16386 2025-06-24T21:17:04.804695Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627986108361124:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:04.804760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bed/r3tmp/tmp7LoJ6C/pdisk_1.dat 2025-06-24T21:17:05.270983Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:05.275344Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627986108361101:2079] 1750799824803261 != 1750799824803264 2025-06-24T21:17:05.321076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:05.321178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 17632, node 1 2025-06-24T21:17:05.324862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:05.454606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:05.454637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:05.454646Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:05.454799Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16386 2025-06-24T21:17:05.819657Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16386 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:06.149772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:06.183699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:06.201611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:06.338205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:06.498206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:06.578158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:08.611032Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628003288231923:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:08.611172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:08.992306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.048310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.091249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.128581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.169735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.217592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.273664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:09.411504Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628007583199880:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:09.411590Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:09.411850Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628007583199885:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:09.420893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:09.443895Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628007583199887:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:09.547887Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628007583199938:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:09.805053Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627986108361124:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:09.805116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:10.909331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... n.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 16217, MsgBus: 9769 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bed/r3tmp/tmpx21J9B/pdisk_1.dat 2025-06-24T21:17:12.706735Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:17:12.736806Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:12.738166Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628024371480782:2079] 1750799832521635 != 1750799832521638 2025-06-24T21:17:12.750287Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:12.750375Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:12.752702Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16217, node 2 2025-06-24T21:17:12.811622Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:12.811645Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:12.811653Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:12.811771Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9769 TClient is connected to server localhost:9769 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:13.428433Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:13.459297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:13.563081Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:13.566079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:13.767015Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:13.855286Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:16.511291Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628041551351607:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:16.511417Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:16.582968Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.621562Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.703258Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.752744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.793812Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.828674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.867519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.957969Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628041551352270:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:16.958072Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:16.958431Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628041551352275:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:16.964563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:16.981987Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628041551352277:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:17.078190Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628045846319624:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:18.313386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.428243Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::SortingsByPK-RemoveLimitOperator >> KqpJoinOrder::CanonizedJoinOrderTPCH22 >> KqpIndexLookupJoin::InnerJoinOnlyRightColumn+StreamLookup [GOOD] >> KqpIndexLookupJoin::InnerJoinOnlyRightColumn-StreamLookup >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32_Reboot [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH12 >> KqpIndexLookupJoin::LeftOnlyJoinValueColumn+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftOnlyJoinValueColumn-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32_Reboot [GOOD] Test command err: 2025-06-24T21:15:08.376429Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:08.404018Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:08.404418Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:08.412506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:08.412781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:08.413081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:08.413245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:08.413382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:08.413507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:08.413644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:08.413777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:08.413915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:08.414061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:08.414193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:08.442494Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:08.442775Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:08.442853Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:08.443057Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:08.443293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:08.443390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:08.443447Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:08.443587Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:08.443663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:08.443709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:08.443740Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:08.443940Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:08.444012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:08.444057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:08.444105Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:08.444209Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:08.444273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:08.444336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:08.444376Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:08.444422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:08.444499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:08.444540Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:08.444729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:08.444764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:08.444793Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:08.444939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:08.444979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:08.444999Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:08.445163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:08.445215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:08.445251Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:08.445333Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:08.445395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:08.445467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:08.445514Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:08.445983Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=44; 2025-06-24T21:15:08.446087Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-24T21:15:08.446173Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-24T21:15:08.446258Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=41; 2025-06-24T21:15:08.446347Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:08.446444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:08.446508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:08.446562Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... e=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=25230;data_size=25196;sum=13004960;count=7164;size_of_portion=208; 2025-06-24T21:17:22.268805Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=124431; 2025-06-24T21:17:22.268904Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=15; 2025-06-24T21:17:22.270921Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1928; 2025-06-24T21:17:22.270991Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=126827; 2025-06-24T21:17:22.271052Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=126988; 2025-06-24T21:17:22.271138Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=16; 2025-06-24T21:17:22.272432Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=1160; 2025-06-24T21:17:22.272500Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=129756; 2025-06-24T21:17:22.272708Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=137; 2025-06-24T21:17:22.272857Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=91; 2025-06-24T21:17:22.273376Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=458; 2025-06-24T21:17:22.273822Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=385; 2025-06-24T21:17:22.309598Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=35680; 2025-06-24T21:17:22.377295Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=67529; 2025-06-24T21:17:22.377454Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=19; 2025-06-24T21:17:22.377540Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=19; 2025-06-24T21:17:22.377601Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=10; 2025-06-24T21:17:22.377715Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=63; 2025-06-24T21:17:22.377780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=10; 2025-06-24T21:17:22.377893Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=67; 2025-06-24T21:17:22.377954Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=8; 2025-06-24T21:17:22.378035Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=36; 2025-06-24T21:17:22.378156Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=71; 2025-06-24T21:17:22.378266Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=61; 2025-06-24T21:17:22.378318Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=255647; 2025-06-24T21:17:22.378500Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=21623968;raw_bytes=21227350;count=3;records=225200} inactive {blob_bytes=141321168;raw_bytes=137674250;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:17:22.378652Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:17:22.378728Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:17:22.378811Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:17:22.378864Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:17:22.379127Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:22.379499Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:22.379594Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799413809;tx_id=18446744073709551615;;current_snapshot_ts=1750799710169; 2025-06-24T21:17:22.379650Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:22.379704Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:22.379748Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:22.379871Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:17:22.383900Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:10132:11717];tablet_id=9437184;parent=[1:10009:11602];fline=manager.cpp:88;event=ask_data;request=request_id=215;9438184000001={portions_count=224};; 2025-06-24T21:17:22.387140Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:17:22.388197Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:17:22.388246Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:17:22.388281Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:17:22.388335Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:22.388433Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:22.388518Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799413809;tx_id=18446744073709551615;;current_snapshot_ts=1750799710169; 2025-06-24T21:17:22.388575Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:22.388637Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:22.388685Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:22.388776Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=1.000000s; 2025-06-24T21:17:22.388835Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10009:11602];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> KqpJoin::FullOuterJoin2 [GOOD] >> KqpIndexLookupJoin::CheckCastInt32ToInt16-StreamLookupJoin+NotNull [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::FullOuterJoin2 [GOOD] Test command err: Trying to start YDB, gRPC: 24731, MsgBus: 23286 2025-06-24T21:17:09.713643Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628009912131340:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:09.713735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003be8/r3tmp/tmpAWQ4TL/pdisk_1.dat 2025-06-24T21:17:10.258245Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:10.261526Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628009912131116:2079] 1750799829654833 != 1750799829654836 2025-06-24T21:17:10.277527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:10.277659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 24731, node 1 2025-06-24T21:17:10.281039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:10.429471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:10.429493Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:10.429518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:10.429642Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:10.707708Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23286 TClient is connected to server localhost:23286 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:11.201890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:11.230748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:11.427118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:11.595100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:11.698358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:13.540909Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628027092001927:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:13.541026Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:13.822938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:13.869008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:13.903736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:13.965667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.057438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.113578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.164473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.260628Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628031386969884:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:14.260715Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:14.260815Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628031386969889:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:14.264520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:14.276863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:17:14.278526Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628031386969891:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:14.331055Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628031386969942:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:14.719457Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628009912131340:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:14.719583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:15.582791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 61021, node 2 2025-06-24T21:17:17.971305Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:18.031396Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:18.031418Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:18.031430Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:18.031576Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13310 TClient is connected to server localhost:13310 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:17:18.655242Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:18.728638Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:18.739912Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:18.765667Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:18.856981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:19.040649Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:19.123078Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:21.936428Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628059500988348:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:21.936518Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:22.049709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:22.097915Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:22.157325Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:22.217356Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:22.277293Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:22.385502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:22.489828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:22.609132Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628063795956306:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:22.609253Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:22.609700Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628063795956311:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:22.614404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:22.632361Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628042321117546:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:22.632437Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:22.632923Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:17:22.633197Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628063795956313:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:22.731063Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628063795956366:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:24.041442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.112274Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.201431Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoin::RightSemiJoin_SimpleKey >> KqpJoin::JoinWithDuplicates [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_NestedJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastInt32ToInt16-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 18331, MsgBus: 24497 2025-06-24T21:17:11.032793Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628012742836280:2152];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:11.039687Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003be7/r3tmp/tmptjBlgH/pdisk_1.dat 2025-06-24T21:17:11.480628Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:11.482289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:11.482386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:11.484711Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628012742836165:2079] 1750799830968304 != 1750799830968307 2025-06-24T21:17:11.486013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18331, node 1 2025-06-24T21:17:11.643683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:11.643704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:11.643711Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:11.643802Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24497 2025-06-24T21:17:12.041858Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24497 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:12.361673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:12.377300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:12.395453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:12.545762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:12.740300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:12.827683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:14.653755Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628029922706984:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:14.653863Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.105688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.148214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.189252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.268485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.308146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.372090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.439598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.551500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628034217674947:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.551590Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.552020Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628034217674952:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.555970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:15.571197Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628034217674954:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:15.660322Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628034217675007:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:15.990394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628012742836280:2152];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:15.990496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:16.855015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003be7/r3tmp/tmpmxZnOJ/pdisk_1.dat 2025-06-24T21:17:19.255176Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:19.255289Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:19.256589Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:19.271264Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628046075655169:2079] 1750799839055279 != 1750799839055282 2025-06-24T21:17:19.271668Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11052, node 2 2025-06-24T21:17:19.411577Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:19.411611Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:19.411620Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:19.411756Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13697 2025-06-24T21:17:20.075282Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13697 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:20.161956Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:20.169997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:20.182408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:20.314620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:20.491124Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:20.575397Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:24.055303Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628071845460579:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:24.055412Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:24.067380Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628050370622523:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:24.067469Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:24.168596Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.222596Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.283626Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.330966Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.381182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.452514Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.501441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.625677Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628071845461243:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:24.625787Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:24.626184Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628071845461248:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:24.630939Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:24.643075Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628071845461250:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:24.720084Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628071845461301:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:26.103678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:26.229727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::TPCHRandomJoinViewJustWorks+ColumnStore >> KqpIndexLookupJoin::LeftJoinCustomColumnOrder-StreamLookup [GOOD] >> KqpIndexLookupJoin::InnerJoinOnlyRightColumn-StreamLookup [GOOD] >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt64 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinCustomColumnOrder-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 5520, MsgBus: 11431 2025-06-24T21:17:13.460246Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628028539893160:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:13.460298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bb0/r3tmp/tmpnp3NMi/pdisk_1.dat 2025-06-24T21:17:14.004761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:14.004860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:14.015843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:14.041420Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:14.042434Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628028539892972:2079] 1750799833422840 != 1750799833422843 TServer::EnableGrpc on GrpcPort 5520, node 1 2025-06-24T21:17:14.290630Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:14.290648Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:14.290655Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:14.291723Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:14.442625Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11431 TClient is connected to server localhost:11431 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:15.078394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:15.111710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:15.128534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:15.305963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:15.513794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:15.603018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:17.555942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628045719763778:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:17.556076Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:17.937525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:17.973403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.013922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.055275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.100637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.148280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.194194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.299395Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628050014731735:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.299458Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.299783Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628050014731740:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.304898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:18.354680Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628050014731742:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:18.432656Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628050014731793:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:18.469515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628028539893160:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:18.469577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:19.592390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... 00 TClient is connected to server localhost:27300 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:17:22.679261Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:22.756345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:22.769793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:22.851755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:17:23.028976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:23.138324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:25.934007Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628078161635602:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:25.934099Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.006098Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:26.056612Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:26.133844Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:26.213271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:26.286092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:26.399115Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:26.493903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:26.621333Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628082456603556:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.621401Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.621536Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628082456603561:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.625052Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:26.662329Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628082456603563:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:26.671246Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628060981765021:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:26.671339Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:26.734072Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628082456603616:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:28.305561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.349157Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.438795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.471397Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.562918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.610359Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpIndexLookupJoin::CheckCastUint32ToUint16-StreamLookupJoin-NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::InnerJoinOnlyRightColumn-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 15366, MsgBus: 23509 2025-06-24T21:17:13.277615Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628026955534905:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:13.284614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bb1/r3tmp/tmpeJME9O/pdisk_1.dat 2025-06-24T21:17:13.910259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:13.910366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:13.920225Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628026955534883:2079] 1750799833274151 != 1750799833274154 2025-06-24T21:17:13.920513Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:13.938786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15366, node 1 2025-06-24T21:17:14.155302Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:14.155328Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:14.155336Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:14.155470Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:14.330903Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23509 TClient is connected to server localhost:23509 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:17:15.320883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:17:15.344133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:15.363336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:15.537585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:15.721729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:15.822319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:17.713327Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628044135405722:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:17.713446Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.070247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.128449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.187894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.239635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.279516Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628026955534905:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:18.279625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:18.282039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.365026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.408456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.488883Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628048430373679:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.488959Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.489223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628048430373684:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.493898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:18.512643Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628048430373686:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:18.574844Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628048430373737:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:19.893328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... d to server localhost:13465 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:23.786004Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:23.792813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:23.813663Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:23.969254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:24.176476Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:24.358555Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:27.251321Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628066962455693:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:27.251410Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:27.284232Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628088437293600:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:27.284331Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:27.385023Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.427831Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.474683Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.508455Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.549951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.635578Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.716120Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.819370Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628088437294264:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:27.819503Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:27.819791Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628088437294269:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:27.824324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:27.843790Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628088437294271:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:27.929832Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628088437294322:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:29.294860Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.359991Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.442614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.492420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.539856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.576519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpIndexLookupJoin::LeftOnlyJoinValueColumn-StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt64 [GOOD] Test command err: 2025-06-24T21:15:27.169393Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:27.196005Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:27.196334Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:27.204181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:27.204403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:27.204606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:27.204686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:27.204792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:27.204888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:27.204969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:27.205084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:27.205192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:27.205322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:27.205454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:27.233535Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:27.233808Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:27.233862Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:27.234025Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:27.234190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:27.234254Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:27.234306Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:27.234404Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:27.234467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:27.234500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:27.234520Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:27.234656Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:27.234723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:27.234758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:27.234780Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:27.234859Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:27.234909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:27.234941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:27.234976Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:27.235015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:27.235056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:27.235089Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:27.235305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:27.235340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:27.235369Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:27.235514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:27.235551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:27.235570Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:27.235670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:27.235719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:27.235760Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:27.235832Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:27.235889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:27.235938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:27.235980Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:27.236415Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=54; 2025-06-24T21:15:27.236503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=35; 2025-06-24T21:15:27.236594Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=38; 2025-06-24T21:15:27.236677Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=37; 2025-06-24T21:15:27.236830Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:27.236949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:27.237009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:27.237066Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 84;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2158;data_size=2150;sum=529040;count=216;size_of_portion=208; 2025-06-24T21:17:31.656090Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=39970; 2025-06-24T21:17:31.656185Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=13; 2025-06-24T21:17:31.657145Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=881; 2025-06-24T21:17:31.657262Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=41365; 2025-06-24T21:17:31.657320Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=41630; 2025-06-24T21:17:31.657401Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=15; 2025-06-24T21:17:31.657766Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=310; 2025-06-24T21:17:31.657820Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=42938; 2025-06-24T21:17:31.658035Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=127; 2025-06-24T21:17:31.658218Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=121; 2025-06-24T21:17:31.658500Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=187; 2025-06-24T21:17:31.658748Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=160; 2025-06-24T21:17:31.665363Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=6513; 2025-06-24T21:17:31.672541Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=7009; 2025-06-24T21:17:31.672720Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=25; 2025-06-24T21:17:31.672801Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=13; 2025-06-24T21:17:31.672851Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=8; 2025-06-24T21:17:31.673170Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=96; 2025-06-24T21:17:31.673266Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=17; 2025-06-24T21:17:31.673410Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=81; 2025-06-24T21:17:31.673459Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-24T21:17:31.673535Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=39; 2025-06-24T21:17:31.673648Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=73; 2025-06-24T21:17:31.673795Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=71; 2025-06-24T21:17:31.673855Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=86017; 2025-06-24T21:17:31.674112Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108275528;raw_bytes=198365560;count=15;records=1915000} inactive {blob_bytes=205496480;raw_bytes=345889958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:17:31.674268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:17:31.674339Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:17:31.674462Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:17:31.674525Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:17:31.674703Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:31.674812Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:17:31.674902Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799431280;tx_id=18446744073709551615;;current_snapshot_ts=1750799728199; 2025-06-24T21:17:31.674971Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:31.675039Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:31.675138Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:31.675351Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:17:31.677387Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:4143:6103];tablet_id=9437184;parent=[1:4108:6076];fline=manager.cpp:88;event=ask_data;request=request_id=205;9438184000001={portions_count=54};; 2025-06-24T21:17:31.680295Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:17:31.680782Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:17:31.680860Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:17:31.680900Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:17:31.680978Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:31.681110Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:17:31.681209Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799431280;tx_id=18446744073709551615;;current_snapshot_ts=1750799728199; 2025-06-24T21:17:31.681293Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:31.681365Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:31.681413Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:31.681545Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.999000s; 2025-06-24T21:17:31.681620Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4108:6076];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> KqpJoinOrder::SortingsSimpleOrderByPKAlias-RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftOnlyJoinValueColumn-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 16414, MsgBus: 26643 2025-06-24T21:17:15.281321Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628035379757365:2247];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:15.281388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ba2/r3tmp/tmpASYvbK/pdisk_1.dat 2025-06-24T21:17:15.754525Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:15.756121Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628035379757123:2079] 1750799835181397 != 1750799835181400 2025-06-24T21:17:15.775668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:15.775798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:15.781036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16414, node 1 2025-06-24T21:17:15.899775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:15.899796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:15.899806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:15.899928Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26643 2025-06-24T21:17:16.209024Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26643 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:16.696564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:16.729064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:16.877294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:17.109176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:17.208445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:19.235749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628052559627943:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:19.235877Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:19.665634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:19.738994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:19.774754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:19.850732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:19.901213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:19.994661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:20.061660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:20.230600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628056854595905:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:20.230703Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:20.231036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628056854595910:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:20.239128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:20.256061Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628056854595912:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:20.287336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628035379757365:2247];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:20.287428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:20.358994Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628056854595963:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:21.778643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.858500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:25.783274Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:25.795333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:17:25.805185Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:25.906615Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:26.114157Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:26.225876Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:29.008311Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628094061973109:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:29.008397Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:29.063682Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.109366Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.146227Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.190807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.243375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.313388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.398281Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.498775Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628072587135105:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:29.510051Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:29.594385Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628094061973766:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:29.594493Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:29.594795Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628094061973771:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:29.600187Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:29.621174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:17:29.621460Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628094061973773:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:29.719755Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628094061973824:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:31.158504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:31.213618Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:31.256965Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:31.303757Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:31.359735Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:31.410262Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::TestJoinHint1-ColumnStore >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness2 >> KqpJoinOrder::CanonizedJoinOrderTPCDS64_small >> KqpLimits::ComputeNodeMemoryLimit [GOOD] >> KqpJoinOrder::FiveWayJoinStatsOverride-ColumnStore [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_NestedJoin [GOOD] >> KqpJoin::RightSemiJoin_SimpleKey [GOOD] >> KqpJoin::RightTableIndexPredicate >> KqpIndexLookupJoin::CheckCastUint32ToUint16-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUint32ToUint16-StreamLookupJoin+NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::LeftJoinPushdownPredicate_NestedJoin [GOOD] Test command err: Trying to start YDB, gRPC: 27386, MsgBus: 12522 2025-06-24T21:17:20.185863Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628057541340653:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:20.185910Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b90/r3tmp/tmpuGZExI/pdisk_1.dat 2025-06-24T21:17:20.750794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:20.750913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:20.756816Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:20.760396Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628057541340633:2079] 1750799840180590 != 1750799840180593 2025-06-24T21:17:20.780108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27386, node 1 2025-06-24T21:17:21.035860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:21.035893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:21.035902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:21.036038Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:21.244088Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12522 TClient is connected to server localhost:12522 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:21.997460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:22.031701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:22.319871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:22.602643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:22.754013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:25.192536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628057541340653:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:25.192668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:25.240750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628079016178746:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:25.240897Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:25.677312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:25.724127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:25.806193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:25.862046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:25.909558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:25.975908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:26.037599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:26.148948Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628083311146706:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.149083Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.149335Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628083311146711:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.153681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:26.182178Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628083311146713:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:26.258676Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628083311146770:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:27.523535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.583416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... 371245Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628099350212516:2079] 1750799850066094 != 1750799850066097 2025-06-24T21:17:30.384519Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:30.385983Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25603, node 2 2025-06-24T21:17:30.596690Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:30.596719Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:30.596728Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:30.596881Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25238 2025-06-24T21:17:31.101603Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:31.389611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:31.396455Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:31.405619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:31.494652Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:31.715673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:31.861308Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:34.627285Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628116530083324:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:34.627391Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:34.703404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:34.747648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:34.794501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:34.856214Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:34.919737Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:34.996224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:35.102813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:35.108708Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628099350212722:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:35.108988Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:35.216770Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628120825051283:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:35.216860Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:35.217256Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628120825051288:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:35.221863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:35.254268Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628120825051290:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:35.314616Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628120825051343:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:36.648451Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:36.740522Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:36.833738Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinStatsOverride-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9170, MsgBus: 16837 2025-06-24T21:16:50.741337Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627926884114624:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:50.756421Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c1a/r3tmp/tmpqNk72a/pdisk_1.dat 2025-06-24T21:16:51.152936Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627926884114604:2079] 1750799810730751 != 1750799810730754 2025-06-24T21:16:51.153843Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9170, node 1 2025-06-24T21:16:51.239895Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:51.239970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:51.241720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:51.295651Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:51.295675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:51.295683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:51.295798Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16837 2025-06-24T21:16:51.767367Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16837 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:51.984774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:52.017035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:54.090503Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627944063984439:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.090610Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627944063984431:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.090772Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.095135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:54.113518Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627944063984445:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:16:54.204809Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627944063984496:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:54.564006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.729779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.805340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.846408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.888789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.102344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.136874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.167508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.203835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.245960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.282555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.349941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.387980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.731941Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627926884114624:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:55.732039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:56.077186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subopera ... 50125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038430;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.755469Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.756168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.756206Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038430;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.756771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038432;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.762257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038432;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.762258Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.762913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038440;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.762947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038466;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.771053Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038466;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.771760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038434;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.772962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038440;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.773441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038442;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.777514Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038434;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.778188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038485;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.778284Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038442;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.779390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038462;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.784217Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038462;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.784847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038458;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.788721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038485;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.789239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038474;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.791417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038458;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.792531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.794626Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038474;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.795485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038470;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.796895Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.797387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038436;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.801560Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038436;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.801730Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038470;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.802086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.802381Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038476;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.806736Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.807953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038472;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.808080Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038476;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.808699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038438;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.812929Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038472;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.813536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038454;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.814876Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038438;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.817505Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038454;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.818034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:29.822113Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:29.931500Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwt7mf30dr2ftrawyx4nnr", SessionId: ydb://session/3?node_id=1&id=OWU2NDRiZTktNjE2OTcxY2UtYzJjYjVkYTItMmY3N2QxMmQ=, Slow query, duration: 31.547525s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:17:30.270897Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:30.271453Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:30.272072Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519628051438192677:5853];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-24T21:17:30.272538Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8_Reboot [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH20 >> KqpJoin::LeftJoinWithNull-StreamLookupJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::ComputeNodeMemoryLimit [GOOD] Test command err: Trying to start YDB, gRPC: 4483, MsgBus: 23830 2025-06-24T21:16:26.357507Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627824193076804:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:26.357561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ad/r3tmp/tmpHusVix/pdisk_1.dat 2025-06-24T21:16:26.752919Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4483, node 1 2025-06-24T21:16:26.807044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:26.807205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:26.815517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:26.836677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:26.836705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:26.836713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:26.836904Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23830 TClient is connected to server localhost:23830 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:27.371398Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:27.453758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:27.468888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:27.484694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:29.632731Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627837077979635:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:29.632920Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:29.635742Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627837077979647:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:29.641230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:29.658817Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627837077979649:2320], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:16:29.755845Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627837077979700:2557] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:30.185599Z node 1 :KQP_COMPUTE WARN: log.cpp:784: fline=kqp_compute_actor_factory.cpp:41;problem=cannot_allocate_memory;tx_id=281474976715661;task_id=2;memory=1048576; 2025-06-24T21:16:30.185641Z node 1 :KQP_COMPUTE WARN: dq_compute_memory_quota.h:152: TxId: 281474976715661, task: 2. [Mem] memory 1048576 NOT granted 2025-06-24T21:16:30.198640Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519627841372947039:2329], TxId: 281474976715661, task: 2. Ctx: { TraceId : 01jyhwsbhxe6dpr8hy41hs4hpn. SessionId : ydb://session/3?node_id=1&id=MjhlZTkxNDktYWI5YzljMTUtYzA0MTE0NS04YjUwYWE4Ng==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: OVERLOADED KIKIMR_PRECONDITION_FAILED: {
: Error: Mkql memory limit exceeded, allocated by task 2: 10, host: ghrun-4pjfmvffsq, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-24T21:16:30.180357Z }, code: 2029 }. 2025-06-24T21:16:30.199618Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519627841372947038:2328], TxId: 281474976715661, task: 1. Ctx: { TraceId : 01jyhwsbhxe6dpr8hy41hs4hpn. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=MjhlZTkxNDktYWI5YzljMTUtYzA0MTE0NS04YjUwYWE4Ng==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7519627841372947027:2314], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-06-24T21:16:30.203953Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MjhlZTkxNDktYWI5YzljMTUtYzA0MTE0NS04YjUwYWE4Ng==, ActorId: [1:7519627837077979632:2314], ActorState: ExecuteState, TraceId: 01jyhwsbhxe6dpr8hy41hs4hpn, Create QueryResponse for error on request, msg:
: Error: Mkql memory limit exceeded, allocated by task 2: 10, host: ghrun-4pjfmvffsq, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-24T21:16:30.180357Z } , code: 2029 Trying to start YDB, gRPC: 14029, MsgBus: 8593 2025-06-24T21:16:31.010134Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627845111794012:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:31.010199Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031ad/r3tmp/tmpwg3yHV/pdisk_1.dat 2025-06-24T21:16:31.222401Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:31.222503Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:31.234183Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:31.234675Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:31.237306Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519627840816826698:2079] 1750799791009473 != 1750799791009476 TServer::EnableGrpc on GrpcPort 14029, node 2 2025-06-24T21:16:31.383874Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:31.383900Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:31.383908Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:31.384028Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8593 TClient is connected to server localhost:8593 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root ... ) 2025-06-24T21:16:40.703931Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14359 TClient is connected to server localhost:14359 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:41.197970Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:41.203507Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:41.210483Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:41.297076Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:41.479868Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:41.576238Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:16:41.609715Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.240711Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627901131895691:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:44.240812Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:44.360364Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.406533Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.442413Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.489952Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.550539Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.599223Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.671757Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:44.773204Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627901131896359:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:44.773305Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:44.774061Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519627901131896364:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:44.778395Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:44.795185Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519627901131896366:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:44.877644Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519627901131896417:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:45.530161Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519627883952024898:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:45.530235Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:55.623257Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:16:55.623312Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:37.304127Z node 4 :KQP_EXECUTER WARN: kqp_literal_executer.cpp:103: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhwsvm3f6hdj0458nn5259y, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MTdhNjMyYTAtYjAyMTU2NTctNTcwOWM1OC0xZTgwNjBmYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, memory limit exceeded. 2025-06-24T21:17:37.305190Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=MTdhNjMyYTAtYjAyMTU2NTctNTcwOWM1OC0xZTgwNjBmYQ==, ActorId: [4:7519627909721831280:2473], ActorState: ExecuteState, TraceId: 01jyhwsvm3f6hdj0458nn5259y, Create QueryResponse for error on request, msg: 2025-06-24T21:17:37.305413Z node 4 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwsvm3f6hdj0458nn5259y", SessionId: ydb://session/3?node_id=4&id=MTdhNjMyYTAtYjAyMTU2NTctNTcwOWM1OC0xZTgwNjBmYQ==, Slow query, duration: 51.221569s, status: PRECONDITION_FAILED, user: UNAUTHENTICATED, results: 0b, text: "\n SELECT ToDict(\n ListMap(\n ListFromRange(0ul, 5000000ul),\n ($x) -> { RETURN AsTuple($x, $x + 1); }\n )\n );\n ", parameters: 0b
: Warning: Type annotation, code: 1030
:2:13: Warning: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:2:20: Warning: At function: ToDict
:3:17: Warning: At function: OrderedMap
:5:26: Warning: At lambda
:5:38: Warning: At tuple
:5:53: Warning: At function: +
:5:53: Warning: Integral type implicit bitcast: Uint64 and Int32, code: 1107
: Error: Memory limit exceeded, code: 2029 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8_Reboot [GOOD] Test command err: 2025-06-24T21:15:12.482224Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:12.506786Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:12.507072Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:12.514058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:12.514293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:12.514617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:12.514968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:12.515186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:12.515316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:12.515475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:12.515618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:12.515748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:12.515881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:12.516001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:12.552191Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:12.552454Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:12.552518Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:12.552750Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:12.552953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:12.553054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:12.553105Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:12.553222Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:12.553293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:12.553339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:12.553371Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:12.553566Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:12.553651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:12.553699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:12.553731Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:12.553830Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:12.553888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:12.553932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:12.553981Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:12.554037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:12.554103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:12.554154Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:12.554395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:12.554442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:12.554482Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:12.554701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:12.554755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:12.554787Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:12.554928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:12.554983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:12.555018Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:12.555114Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:12.555215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:12.555260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:12.555293Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:12.555799Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=54; 2025-06-24T21:15:12.555919Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=51; 2025-06-24T21:15:12.556012Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=37; 2025-06-24T21:15:12.556099Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=34; 2025-06-24T21:15:12.556227Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:12.556383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:12.556440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:12.556494Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... e=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=26510;data_size=26478;sum=13963248;count=7164;size_of_portion=208; 2025-06-24T21:17:42.440921Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=150588; 2025-06-24T21:17:42.441010Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=13; 2025-06-24T21:17:42.442936Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1854; 2025-06-24T21:17:42.443004Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=152874; 2025-06-24T21:17:42.443061Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=153006; 2025-06-24T21:17:42.443137Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=13; 2025-06-24T21:17:42.444489Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=1168; 2025-06-24T21:17:42.444554Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=155045; 2025-06-24T21:17:42.444745Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=128; 2025-06-24T21:17:42.444886Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=87; 2025-06-24T21:17:42.445370Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=431; 2025-06-24T21:17:42.445758Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=332; 2025-06-24T21:17:42.483066Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=37212; 2025-06-24T21:17:42.530667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=46951; 2025-06-24T21:17:42.530811Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=16; 2025-06-24T21:17:42.530881Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=15; 2025-06-24T21:17:42.530932Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=8; 2025-06-24T21:17:42.531024Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=49; 2025-06-24T21:17:42.531073Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-24T21:17:42.531775Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=648; 2025-06-24T21:17:42.531862Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=12; 2025-06-24T21:17:42.531955Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=42; 2025-06-24T21:17:42.532077Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=74; 2025-06-24T21:17:42.532179Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=54; 2025-06-24T21:17:42.532223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=250526; 2025-06-24T21:17:42.532397Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22744072;raw_bytes=22320020;count=3;records=225200} inactive {blob_bytes=149450960;raw_bytes=145316940;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:17:42.532541Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:17:42.532609Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:17:42.532682Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:17:42.532735Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:17:42.532951Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:42.533032Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:42.533128Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799418081;tx_id=18446744073709551615;;current_snapshot_ts=1750799714273; 2025-06-24T21:17:42.533176Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:42.533228Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:42.533269Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:42.533364Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:17:42.536889Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:10513:12097];tablet_id=9437184;parent=[1:10389:11981];fline=manager.cpp:88;event=ask_data;request=request_id=212;9438184000001={portions_count=224};; 2025-06-24T21:17:42.540525Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:17:42.540960Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:17:42.541001Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:17:42.541035Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:17:42.541105Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:17:42.541206Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:17:42.541283Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799418081;tx_id=18446744073709551615;;current_snapshot_ts=1750799714273; 2025-06-24T21:17:42.541336Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:17:42.541394Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:42.541442Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:17:42.541530Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=1.000000s; 2025-06-24T21:17:42.541587Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10389:11981];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> KqpJoinOrder::SortingsSimpleOrderByPKAlias+RemoveLimitOperator [GOOD] >> KqpJoinOrder::SortingsDifferentDirs+RemoveLimitOperator >> KqpJoinOrder::TestJoinOrderHintsSimple-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsSimpleOrderByPKAlias+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 12551, MsgBus: 21526 2025-06-24T21:17:02.036139Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627978535889521:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:02.036182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bf5/r3tmp/tmpm0ZGg5/pdisk_1.dat 2025-06-24T21:17:02.648666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:02.648829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:02.651219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:02.652999Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:02.655427Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627978535889499:2079] 1750799822031604 != 1750799822031607 TServer::EnableGrpc on GrpcPort 12551, node 1 2025-06-24T21:17:02.895736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:02.895781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:02.895797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:02.895906Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:03.091275Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21526 TClient is connected to server localhost:21526 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:03.503264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:05.639988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627991420792035:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.640120Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.640433Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627991420792047:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.645027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:05.658661Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627991420792049:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:05.763826Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627991420792100:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:06.119939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.237886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.267032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.299504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.330834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.475089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.502916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.542184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.637598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.680202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.724053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.761989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.793685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.036505Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627978535889521:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:07.102466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:07.508232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 45511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038425;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.251371Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038586;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.251878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.258748Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038425;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.261256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038427;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.266354Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038427;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.266994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.267378Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.267794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.272982Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.273593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.279015Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.279826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.289433Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.289940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.292117Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.292762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.302559Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.303093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.305926Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.306575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.309022Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.309833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.320784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.321977Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.322450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.328381Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.329085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.331327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.338988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.341950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.346184Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.347490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.353472Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.354408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.357709Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.358303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.360940Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.362682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.366196Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.369731Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.471688Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwtjng5788x5zjerh0p5d9", SessionId: ydb://session/3?node_id=1&id=MmRhNzhiMmItMWNjMGU1ZTItZTgxNDM3NDAtZjMxZWMwZQ==, Slow query, duration: 30.790681s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:17:40.987469Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:40.987721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519628111679904218:6286];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-24T21:17:40.988278Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:40.989254Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::CheckCastUint32ToUint16-StreamLookupJoin+NotNull [GOOD] >> KqpJoinOrder::OltpJoinTypeHintCBOTurnOFF [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUint32ToUint16-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 27943, MsgBus: 65216 2025-06-24T21:17:32.760291Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628109820614150:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:32.760337Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b7e/r3tmp/tmphnJ0OC/pdisk_1.dat 2025-06-24T21:17:33.370971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:33.371060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:33.377156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:33.413573Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628109820614128:2079] 1750799852756234 != 1750799852756237 2025-06-24T21:17:33.424871Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27943, node 1 2025-06-24T21:17:33.639628Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:33.639650Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:33.639661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:33.639750Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:33.799720Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:65216 TClient is connected to server localhost:65216 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:34.647525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:34.675394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:34.690081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:34.954300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:35.166200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:35.288716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:37.408644Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628131295452246:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:37.408749Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:37.762801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:37.763615Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628109820614150:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:37.763666Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:37.809933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:37.862434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:37.910353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:37.943316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:38.002926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:38.070737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:38.146704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628135590420199:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:38.146797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:38.147137Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628135590420204:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:38.150851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:38.164235Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628135590420206:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:38.262618Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628135590420258:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:39.503053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... ed at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 31694, MsgBus: 24137 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b7e/r3tmp/tmpVTVObH/pdisk_1.dat 2025-06-24T21:17:41.719308Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:17:41.735295Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628146595909842:2079] 1750799861484414 != 1750799861484417 2025-06-24T21:17:41.740283Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:41.748879Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:41.749853Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 31694, node 2 2025-06-24T21:17:41.757856Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:41.823701Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:41.823722Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:41.823730Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:41.823848Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24137 TClient is connected to server localhost:24137 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:42.552560Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:42.567785Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:42.568625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:42.581666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:42.671508Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:42.875012Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:43.007400Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:45.407345Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628163775780640:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:45.407441Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:45.494356Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:45.568774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:45.625570Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:45.671779Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:45.707496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:45.763767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:45.846073Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:45.946200Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628163775781311:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:45.946282Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:45.946463Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628163775781316:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:45.950608Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:45.966777Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628163775781318:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:46.026656Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628168070748665:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:47.225262Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:47.356557Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::SortingsWithLookupJoin1-RemoveLimitOperator >> KqpJoin::RightTableIndexPredicate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::OltpJoinTypeHintCBOTurnOFF [GOOD] Test command err: Trying to start YDB, gRPC: 20208, MsgBus: 28442 2025-06-24T21:16:59.057299Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627966724237232:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:59.057363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c01/r3tmp/tmp24EqSL/pdisk_1.dat 2025-06-24T21:16:59.509909Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:59.510073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:59.515056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:59.524857Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:59.525674Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627966724237136:2079] 1750799819014461 != 1750799819014464 TServer::EnableGrpc on GrpcPort 20208, node 1 2025-06-24T21:16:59.677369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:59.677390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:59.677397Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:59.677518Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28442 2025-06-24T21:17:00.025707Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:00.478888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:00.516087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:02.618956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627979609139663:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:02.619088Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:02.619440Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627979609139675:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:02.628203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:02.646802Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627979609139677:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:02.731740Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627979609139728:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:03.087664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.205027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.235909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.270310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.305639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.479891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.519384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.603528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.670728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.715257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.746839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.776143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:03.811298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:04.089181Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627966724237232:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:04.089290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:04.517287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 96506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.904451Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.905095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.909049Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.909545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.914227Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.917848Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.918498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.919832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.926547Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.929416Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.930346Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.931747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.939522Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.940142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.943537Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.944023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.950616Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.953473Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.954102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.955315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.963406Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.963931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.968153Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.968638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.974527Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.975018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.977993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.978598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.986573Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.988340Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:40.988969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:40.993637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.007896Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.008687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.012823Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.013399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.018872Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.021680Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.072080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.085957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.239591Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwtfrmdbcka35v598a2t3k", SessionId: ydb://session/3?node_id=1&id=ZmQ4Y2RjNGItZTE1OWJmOTYtOWNjZDE5YTgtYmZlMDNmMDE=, Slow query, duration: 34.530572s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:17:41.528903Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:41.529368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628005378949529:2806];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:17:41.529518Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:41.530678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCDS78 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightTableIndexPredicate [GOOD] Test command err: Trying to start YDB, gRPC: 20601, MsgBus: 18854 2025-06-24T21:17:28.747628Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628089582429628:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:28.751687Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b80/r3tmp/tmpV3pWzX/pdisk_1.dat 2025-06-24T21:17:29.465004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:29.465111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:29.467247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:29.543138Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:29.551373Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628089582429597:2079] 1750799848739307 != 1750799848739310 TServer::EnableGrpc on GrpcPort 20601, node 1 2025-06-24T21:17:29.758802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:29.758824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:29.758831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:29.758928Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:29.780604Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18854 TClient is connected to server localhost:18854 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:30.736902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:30.761171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:30.782045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:30.973661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:31.273885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:31.414183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:33.751272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628089582429628:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:33.870975Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:34.425678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628115352235040:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:34.425839Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:34.944149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:35.036068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:35.079950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:35.115559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:35.153233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:35.211495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:35.270769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:35.376637Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628119647203006:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:35.376723Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:35.376967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628119647203011:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:35.466837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:35.493988Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628119647203013:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:35.580618Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628119647203066:3431] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:36.852800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:37.035874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 5778, MsgBus: 2861 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b80/r3tmp/tmpfyug5H/pdisk_1.dat 2025-06-24T21:17:41.609440Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:17:41.681023Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:41.681265Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628145829818311:2079] 1750799861426770 != 1750799861426773 2025-06-24T21:17:41.696985Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:41.697100Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:41.698486Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5778, node 2 2025-06-24T21:17:41.831834Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:41.831876Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:41.831886Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:41.832008Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2861 2025-06-24T21:17:42.451284Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2861 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:42.653908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:42.663430Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:42.689632Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:42.771877Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:42.975723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:43.075636Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:46.523010Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628167304656415:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:46.523158Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:46.610482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:46.685272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:46.737707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:46.813883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:46.895812Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:46.986616Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:47.035543Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:47.132924Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628171599624376:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:47.133054Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:47.133506Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628171599624381:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:47.138312Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:47.159120Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628171599624383:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:17:47.250383Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628171599624434:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:49.033222Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpLimits::QueryExecTimeout [GOOD] >> KqpJoinOrder::FiveWayJoinWithConstantFold-ColumnStore >> KqpJoin::LeftJoinWithNull-StreamLookupJoin [GOOD] >> KqpJoin::PushdownPredicateNoFullScan ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::QueryExecTimeout [GOOD] Test command err: Trying to start YDB, gRPC: 15559, MsgBus: 9507 2025-06-24T21:16:15.803932Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627779534733098:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:15.804102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003203/r3tmp/tmpltrzoj/pdisk_1.dat 2025-06-24T21:16:16.205600Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:16.207272Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627779534733080:2079] 1750799775801311 != 1750799775801314 TServer::EnableGrpc on GrpcPort 15559, node 1 2025-06-24T21:16:16.215936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:16.216040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:16.220262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:16.278900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:16.278931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:16.278940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:16.279075Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9507 TClient is connected to server localhost:9507 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:16.817633Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:16.849686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:16.874160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:17.060632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:17.214974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:17.304683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:18.980762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627792419636600:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:18.980880Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.307477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.347608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.383185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.417986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.465246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.499702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.579817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:19.641982Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627796714604557:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.642089Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.642336Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627796714604562:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:19.646630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:19.661909Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627796714604564:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:19.752137Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627796714604615:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:20.802338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627779534733098:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:20.802404Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:20.818195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:24.388321Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb:/ ... 025-06-24T21:17:13.384077Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519628028858548304:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:13.384143Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003203/r3tmp/tmpM81Vx8/pdisk_1.dat 2025-06-24T21:17:13.625202Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:13.629818Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519628028858548286:2079] 1750799833382969 != 1750799833382972 2025-06-24T21:17:13.646221Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:13.646334Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:13.649249Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20192, node 4 2025-06-24T21:17:13.843313Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:13.843341Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:13.843352Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:13.843564Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23235 2025-06-24T21:17:14.439891Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23235 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:14.761068Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:14.773659Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:14.801993Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:14.949794Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:15.222620Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:15.327839Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:18.085651Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628050333386398:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.085789Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.181849Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.240847Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.297252Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.349241Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.384883Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519628028858548304:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:18.384993Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:18.432168Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.494814Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.569381Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:18.691878Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628050333387067:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.692013Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.692493Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628050333387072:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.698148Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:18.727612Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519628050333387074:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:18.814640Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519628050333387125:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:28.603336Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:17:28.603374Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded
: Error: Query did not complete within specified timeout 500ms, session id ydb://session/3?node_id=4&id=OTc5Y2IyMC04MTY1MDNjZi1hZGIzYmE4Zi0yYjBlZjYxYQ== >> KqpJoin::JoinAggregateSingleRow >> KqpJoinOrder::TPCDS96-ColumnStore [GOOD] >> KqpJoinOrder::TPCDS16-ColumnStore [GOOD] >> KqpJoinOrder::TPCDS88+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS96-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 14212, MsgBus: 14365 2025-06-24T21:17:02.856006Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627979615539887:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:02.856302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bf3/r3tmp/tmpUt5aDF/pdisk_1.dat 2025-06-24T21:17:03.352076Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627979615539852:2079] 1750799822842183 != 1750799822842186 2025-06-24T21:17:03.360938Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:03.378680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:03.378996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14212, node 1 2025-06-24T21:17:03.386012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:03.460020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:03.460043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:03.460049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:03.460186Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14365 2025-06-24T21:17:03.887298Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14365 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:04.316660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:06.379597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627996795409682:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:06.379739Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:06.380010Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627996795409694:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:06.384271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:06.398451Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627996795409696:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:06.496150Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627996795409747:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:06.825404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.951486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:06.986052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.022286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.063249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.250642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.337026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.391771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.469413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.502196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.538749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.598587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.632954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:07.865727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627979615539887:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:07.865854Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:08.237967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 27682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.529970Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038564;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.530538Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038467;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.533132Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.533710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.535957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038467;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.536565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038486;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.539484Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.540064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.541687Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038486;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.542202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.545788Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.546462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.547967Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.548588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.552109Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.552725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.553689Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.554255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.558310Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.558936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.559704Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.560272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.564342Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.564991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.565356Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.565890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.570591Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.570783Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.571188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.571210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.576551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.576551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.577245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.577569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.582724Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.582724Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.583395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.588840Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.656575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038510;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:41.665119Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038510;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:41.713441Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwtke06pqj9p1qq4635xak", SessionId: ydb://session/3?node_id=1&id=YjU5YTU2NjctNWE5M2Q2MTEtN2M4MGQxYzAtZTE0OTNjZWY=, Slow query, duration: 31.248619s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:17:42.327078Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:42.327610Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628044040061769:3905];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-24T21:17:42.327732Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:42.328062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> RetryPolicy::TWriteSession_RetryOnTargetCluster [GOOD] >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster >> KqpJoinOrder::SortingsDifferentDirs-RemoveLimitOperator [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS16-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 19400, MsgBus: 32518 2025-06-24T21:16:49.271238Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627925711821576:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:49.271316Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c32/r3tmp/tmpFGq9nT/pdisk_1.dat 2025-06-24T21:16:49.790681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:49.790762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:49.796470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:49.864263Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:49.864594Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627925711821539:2079] 1750799809268767 != 1750799809268770 TServer::EnableGrpc on GrpcPort 19400, node 1 2025-06-24T21:16:50.093250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:50.093291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:50.093312Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:50.093510Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:16:50.296119Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32518 TClient is connected to server localhost:32518 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:51.027430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:51.051385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:52.958610Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627938596724075:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.958640Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627938596724081:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.958732Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.963008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:52.978136Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627938596724089:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:16:53.051326Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627942891691436:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:53.563051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.681281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.715187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.747318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.774066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.948462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:53.980212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.014532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.055914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.127791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.207649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.248875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:54.276045Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627925711821576:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:54.276200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:54.301727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:55.075541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... =281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.913236Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.913795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038474;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.917240Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.917754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.923017Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.927640Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038474;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.928174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038520;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.931817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.934043Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038520;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.935602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.937108Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.937727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.940947Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.941605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.942598Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.943110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.946895Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.947710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038450;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.948135Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.948676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.953499Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038450;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.954141Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.954386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.954698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.960222Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.961118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.962735Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.963505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.966698Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.967250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.970331Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.970946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:27.972193Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:27.986399Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:17:28.074784Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwt6db119a7vcednxx17wa", SessionId: ydb://session/3?node_id=1&id=YjUzODllNmUtMTM3ZWQ3MTItZmZmOGQ0MDEtNjc4NGQ4ZTk=, Slow query, duration: 30.942385s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:17:28.773221Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:17:28.773899Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519628063150803889:6304];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-24T21:17:28.774335Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:17:28.775297Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:17:53.828433Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwvh9v04rfdtc58jzj844b", SessionId: ydb://session/3?node_id=1&id=YjUzODllNmUtMTM3ZWQ3MTItZmZmOGQ0MDEtNjc4NGQ4ZTk=, Slow query, duration: 12.776020s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "-- NB: Subquerys\n$orders_with_several_warehouses = (\n select cs_order_number\n from `/Root/test/ds/catalog_sales`\n group by cs_order_number\n having count(distinct cs_warehouse_sk) > 1\n);\n\n-- start query 1 in stream 0 using template query16.tpl and seed 171719422\nselect\n count(distinct cs1.cs_order_number) as `order count`\n ,sum(cs_ext_ship_cost) as `total shipping cost`\n ,sum(cs_net_profit) as `total net profit`\nfrom\n `/Root/test/ds/catalog_sales` cs1\n cross join `/Root/test/ds/date_dim`\n cross join `/Root/test/ds/customer_address`\n cross join `/Root/test/ds/call_center`\n left semi join $orders_with_several_warehouses cs2 on cs1.cs_order_number = cs2.cs_order_number\n left only join `/Root/test/ds/catalog_returns` cr1 on cs1.cs_order_number = cr1.cr_order_number\nwhere\n cast(d_date as date) between cast('1999-4-01' as date) and\n (cast('1999-4-01' as date) + DateTime::IntervalFromDays(60))\nand cs1.cs_ship_date_sk = d_date_sk\nand cs1.cs_ship_addr_sk = ca_address_sk\nand ca_state = 'IL'\nand cs1.cs_call_center_sk = cc_call_center_sk\nand cc_county in ('Richland County','Bronx County','Maverick County','Mesa County',\n 'Raleigh County'\n)\norder by `order count`\nlimit 100;\n", parameters: 0b >> KqpJoinOrder::SortingsByPKWithLookupJoin+RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::CheckCastInt64ToUint64+StreamLookupJoin-NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsDifferentDirs-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 6947, MsgBus: 20392 2025-06-24T21:17:10.955001Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628014039351389:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:10.961544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bd7/r3tmp/tmpyt0FZw/pdisk_1.dat 2025-06-24T21:17:11.377471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:11.377632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:11.390970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:11.434090Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:11.435248Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628014039351197:2079] 1750799830910721 != 1750799830910724 TServer::EnableGrpc on GrpcPort 6947, node 1 2025-06-24T21:17:11.567680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:11.567701Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:11.567718Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:11.567828Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20392 2025-06-24T21:17:11.951262Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20392 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:12.285208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:12.304007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:14.869455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628031219221019:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:14.869598Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:14.869922Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628031219221031:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:14.874546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:14.893354Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628031219221033:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:14.991477Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628031219221084:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:15.353390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.500316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.540852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.578828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.615130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.791325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.869747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.907574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.942265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.961791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628014039351389:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:15.961870Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:15.997595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.037330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.077881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.119970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.809809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subopera ... 10668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.514823Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.515517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.522523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.522974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.526822Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.527743Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.528211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.532012Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.532636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.535597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.537380Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.538049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.541021Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.541551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.543675Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.544332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.552082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.552712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.556151Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.556777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.561400Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.562117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.565712Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.566402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.570128Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.570801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.579890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.580439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.584364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.584924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.589922Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.590450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.594515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.595509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.600687Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.603593Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.604365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.607851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.612513Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.616526Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.755069Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwtvwbckqpke4a6ptcbdaq", SessionId: ydb://session/3?node_id=1&id=NzZlODdkMGYtOTJjNDE5OWMtY2QxODczMTktM2IzMTdhZjA=, Slow query, duration: 36.638621s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:17:56.074548Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:56.075116Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:56.076084Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628061283998329:2848];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:17:56.076562Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> OlapEstimationRowsCorrectness::TPCDS78 >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64_Reboot [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPKWithLookupJoin+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 10299, MsgBus: 30348 2025-06-24T21:17:09.536035Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628007478968422:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:09.772338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bec/r3tmp/tmp40BHwC/pdisk_1.dat 2025-06-24T21:17:10.064696Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:10.064847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:10.070167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:10.114321Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:10.119317Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628007478968327:2079] 1750799829446573 != 1750799829446576 TServer::EnableGrpc on GrpcPort 10299, node 1 2025-06-24T21:17:10.268272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:10.268294Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:10.268301Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:10.268397Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:10.534489Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30348 TClient is connected to server localhost:30348 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:11.085441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:11.163399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:13.427580Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628024658838151:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:13.427688Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:13.427768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628024658838162:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:13.433856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:13.451367Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628024658838165:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:13.522259Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628024658838216:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:13.917978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.029302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.106345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.142021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.179683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.391005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.470261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628007478968422:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:14.470344Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:14.481955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.570080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.625276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.676867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.720099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.769361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:14.813158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:15.573097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.200790Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.201378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.205927Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.206496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.214952Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.215534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.219106Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.219730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.225699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.226288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.228760Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.229510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.241471Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.244417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.245030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.248836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.261450Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.262023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.263524Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.264062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.273265Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.273823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.275186Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.275734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.282036Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.285814Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.286614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.291937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.297648Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.298321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.299313Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.299847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.306146Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.306791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.315928Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.316523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.317896Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.318556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.327612Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.328188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:55.333479Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.342187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:55.448974Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwttev74c2eb71znxcvq1v", SessionId: ydb://session/3?node_id=1&id=MjQ5YzE5MDctZjZmZGQ3ZGItNzlkNjIwNTktYjcxMGQxMw==, Slow query, duration: 37.789027s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:17:56.061069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:56.061683Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:56.062533Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::PushdownPredicateNoFullScan [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64_Reboot [GOOD] Test command err: 2025-06-24T21:15:32.822447Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:32.849509Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:32.849790Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:32.857046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:32.857269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:32.857484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:32.857605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:32.857730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:32.857851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:32.857962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:32.858073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:32.858185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:32.858311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:32.858416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:32.888094Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:32.888311Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:32.888363Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:32.888524Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:32.888673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:32.888737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:32.888781Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:32.888869Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:32.888915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:32.888950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:32.888978Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:32.889096Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:32.889145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:32.889170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:32.889191Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:32.889255Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:32.889294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:32.889320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:32.889363Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:32.889394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:32.889426Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:32.889450Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:32.889622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:32.889651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:32.889676Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:32.889827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:32.889859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:32.889878Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:32.889970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:32.890000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:32.890033Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:32.890096Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:32.890149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:32.890176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:32.890207Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:32.890544Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=39; 2025-06-24T21:15:32.890615Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=33; 2025-06-24T21:15:32.890677Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=24; 2025-06-24T21:15:32.890732Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=26; 2025-06-24T21:15:32.890792Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:32.890868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:32.890904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:32.890939Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... ame=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=26286;data_size=26260;sum=13688824;count=7164;size_of_portion=208; 2025-06-24T21:18:02.004217Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=179785; 2025-06-24T21:18:02.004338Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=15; 2025-06-24T21:18:02.006293Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1871; 2025-06-24T21:18:02.006370Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=182120; 2025-06-24T21:18:02.006430Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=182273; 2025-06-24T21:18:02.006520Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=18; 2025-06-24T21:18:02.007896Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=1301; 2025-06-24T21:18:02.007966Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=184926; 2025-06-24T21:18:02.008177Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=139; 2025-06-24T21:18:02.008319Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=86; 2025-06-24T21:18:02.008808Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=432; 2025-06-24T21:18:02.009279Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=388; 2025-06-24T21:18:02.053550Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=44165; 2025-06-24T21:18:02.105494Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=51773; 2025-06-24T21:18:02.105649Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=18; 2025-06-24T21:18:02.105723Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=17; 2025-06-24T21:18:02.105780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=9; 2025-06-24T21:18:02.105887Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=63; 2025-06-24T21:18:02.105944Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-24T21:18:02.106061Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=72; 2025-06-24T21:18:02.106118Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=8; 2025-06-24T21:18:02.106203Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=43; 2025-06-24T21:18:02.106313Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=65; 2025-06-24T21:18:02.106409Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=53; 2025-06-24T21:18:02.106463Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=299401; 2025-06-24T21:18:02.106650Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22538992;raw_bytes=22128150;count=3;records=225200} inactive {blob_bytes=147791880;raw_bytes=143975050;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:18:02.106802Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:18:02.106873Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:18:02.106957Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:18:02.107017Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:18:02.107427Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:18:02.107557Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:18:02.107665Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799438414;tx_id=18446744073709551615;;current_snapshot_ts=1750799734617; 2025-06-24T21:18:02.107719Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:18:02.107780Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:18:02.107826Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:18:02.107941Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:18:02.111816Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:10440:12025];tablet_id=9437184;parent=[1:10317:11910];fline=manager.cpp:88;event=ask_data;request=request_id=212;9438184000001={portions_count=224};; 2025-06-24T21:18:02.115319Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:18:02.116328Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:18:02.116380Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:18:02.116417Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:18:02.116472Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:18:02.116577Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:18:02.116663Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799438414;tx_id=18446744073709551615;;current_snapshot_ts=1750799734617; 2025-06-24T21:18:02.116718Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:18:02.116785Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:18:02.116851Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:18:02.116951Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=1.000000s; 2025-06-24T21:18:02.117014Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10317:11910];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> KqpJoinOrder::DatetimeConstantFold-ColumnStore >> KqpJoinOrder::SortingsWithLookupJoin3+RemoveLimitOperator [GOOD] >> OlapEstimationRowsCorrectness::TPCH11 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::PushdownPredicateNoFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 20089, MsgBus: 2526 2025-06-24T21:17:45.268524Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628162075884365:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:45.268603Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b74/r3tmp/tmpyPVmOP/pdisk_1.dat 2025-06-24T21:17:46.090872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:46.091008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:46.107627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:46.155320Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628162075884347:2079] 1750799865258698 != 1750799865258701 2025-06-24T21:17:46.178607Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20089, node 1 2025-06-24T21:17:46.324198Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:46.427790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:46.427814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:46.427822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:46.427945Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2526 TClient is connected to server localhost:2526 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:47.671395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:47.736585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:47.758135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:48.018595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:48.259186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:48.377874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:50.271351Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628162075884365:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:50.271445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:50.524019Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628183550722475:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:50.524133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:51.030841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:51.120692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:51.161143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:51.234338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:51.275422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:51.328164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:51.411377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:51.559427Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628187845690442:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:51.559490Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:51.559778Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628187845690447:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:51.564430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:51.590517Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628187845690449:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:17:51.694855Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628187845690500:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:53.289572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... :2,\"OutputBytes\":4}],\"PeakMemoryUsageBytes\":65536,\"CpuTimeUs\":842}],\"UseLlvm\":\"undefined\",\"OutputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"PhysicalStageId\":1,\"FinishedTasks\":1,\"InputBytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[5,1048576]},\"BaseTimeMs\":1750799885330,\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"FirstMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"Bytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4,\"History\":[5,4]}},\"Name\":\"RESULT\",\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"FirstMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"PauseMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitTimeUs\":{\"Count\":1,\"Sum\":3379,\"Max\":3379,\"Min\":3379,\"History\":[5,3379]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitMessageMs\":{\"Count\":1,\"Max\":4,\"Min\":1}}}],\"CpuTimeUs\":{\"Count\":1,\"Sum\":451,\"Max\":451,\"Min\":451,\"History\":[5,451]},\"StageDurationUs\":0,\"ResultRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResultBytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"OutputBytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"Input\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"FirstMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"Bytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4,\"History\":[5,4]}},\"Name\":\"2\",\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"FirstMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"Bytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4,\"History\":[5,4]},\"PauseMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitTimeUs\":{\"Count\":1,\"Sum\":3319,\"Max\":3319,\"Min\":3319,\"History\":[5,3319]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitMessageMs\":{\"Count\":1,\"Max\":4,\"Min\":1}}}],\"UpdateTimeMs\":5,\"InputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Tasks\":1}}],\"Node Type\":\"Precompute_1\",\"Parent Relationship\":\"InitPlan\",\"PlanNodeType\":\"Materialize\"}],\"Node Type\":\"Query\",\"Stats\":{\"Compilation\":{\"FromCache\":false,\"DurationUs\":789615,\"CpuTimeUs\":782524},\"ProcessCpuTimeUs\":2319,\"TotalDurationUs\":852016,\"ResourcePoolId\":\"default\",\"QueuedTimeUs\":0},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":6,\"Plans\":[{\"PlanNodeId\":7,\"Operators\":[{\"E-Rows\":\"1\",\"Columns\":[\"id\",\"value\"],\"E-Size\":\"0\",\"E-Cost\":\"0\",\"Name\":\"TableLookup\",\"Table\":\"TableRight\",\"LookupKeyColumns\":[\"id\"]}],\"Node Type\":\"TableLookup\",\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"E-Rows\":\"1\",\"Predicate\":\"Exist(item.id)\",\"Name\":\"Filter\",\"E-Size\":\"0\",\"E-Cost\":\"0\"}],\"Node Type\":\"Filter\"},{\"PlanNodeId\":12,\"Plans\":[{\"PlanNodeId\":13,\"Operators\":[{\"Scan\":\"Parallel\",\"ReadRange\":[\"hash_key (9488119898155926451)\"],\"E-Size\":\"0\",\"Name\":\"TablePointLookup\",\"Path\":\"\\/Root\\/TableLeft\",\"E-Rows\":\"1\",\"Table\":\"TableLeft\",\"ReadColumns\":[\"ref_id\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TablePointLookup\"}],\"Operators\":[{\"E-Size\":\"0\",\"A-SelfCpu\":0.655,\"Name\":\"Filter\",\"Predicate\":\"Exist(item.ref_id)\",\"A-Rows\":1,\"E-Rows\":\"1\",\"A-Cpu\":0.655,\"E-Cost\":\"0\",\"A-Size\":4}],\"Node Type\":\"Filter\"}],\"Operators\":[{\"E-Rows\":\"0.2\",\"Condition\":\"r.id = l.ref_id\",\"Name\":\"InnerJoin (MapJoin)\",\"E-Size\":\"0\",\"E-Cost\":\"4.5\"}],\"Node Type\":\"InnerJoin (MapJoin)\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":1.579,\"A-Cpu\":2.234,\"A-Size\":7,\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":0.6,\"A-Cpu\":2.834,\"A-Size\":7,\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_2\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"PlanNodeType\":\"Query\"}}" query_ast: "(\n(declare %kqp%tx_result_binding_0_0 (TupleType (DataType \'Uint64) (DataType \'Uint64)))\n(declare %kqp%tx_result_binding_1_0 (ListType (StructType \'(\'\"ref_id\" (OptionalType (DataType \'Uint64))))))\n(let $1 (DataType \'Uint64))\n(let $2 (OptionalType $1))\n(let $3 \'(\'\"_partition_mode\" \'\"single\"))\n(let $4 \'(\'(\'\"_logical_id\" \'1146) \'(\'\"_id\" \'\"bbe75e61-297b362d-55ca411e-c6fe39e7\") $3))\n(let $5 (DqPhyStage \'() (lambda \'() (block \'(\n (let $40 \'((DataType \'String) \'\"\" \'1))\n (let $41 (CallableType \'(\'1) \'($1) $40 \'($2 \'\"Init\")))\n (let $42 (Udf \'\"Digest.MurMurHash\" (Void) (VoidType) \'\"\" $41 (VoidType) \'\"\" \'(\'(\'\"strict\"))))\n (let $43 (Apply $42 (ToString (Utf8 \'\"target\"))))\n (return (ToStream (Just \'($43 $43))))\n))) $4))\n(let $6 (DqCnValue (TDqOutput $5 \'0)))\n(let $7 (KqpPhysicalTx \'($5) \'($6) \'() \'(\'(\'\"type\" \'\"compute\"))))\n(let $8 (KqpTable \'\"/Root/TableLeft\" \'\"72057594046644480:18\" \'\"\" \'1))\n(let $9 \'(\'\"ref_id\"))\n(let $10 \'\"%kqp%tx_result_binding_0_0\")\n(let $11 (TupleType $1 $1))\n(let $12 (KqlKeyInc (Nth %kqp%tx_result_binding_0_0 \'1)))\n(let $13 (KqpRowsSourceSettings $8 $9 \'() \'($12 $12)))\n(let $14 (DqPhyStage \'((DqSource (DataSource \'\"KqpReadRangesSource\") $13)) (lambda \'($44) (FromFlow (OrderedFilter (ToFlow $44) (lambda \'($45) (Exists (Member $45 \'\"ref_id\")))))) \'(\'(\'\"_logical_id\" \'1228) \'(\'\"_id\" \'\"f75543fe-f74e425f-c037ef9b-9585a516\"))))\n(let $15 (DqCnUnionAll (TDqOutput $14 \'0)))\n(let $16 (DqPhyStage \'($15) (lambda \'($46) $46) \'(\'(\'\"_logical_id\" \'1744) \'(\'\"_id\" \'\"ee8423b5-7b669607-6aee4cdd-e93925ba\"))))\n(let $17 (DqCnResult (TDqOutput $16 \'0) \'()))\n(let $18 (KqpTxResultBinding $11 \'0 \'0))\n(let $19 \'(\'(\'\"type\" \'\"data\")))\n(let $20 (KqpPhysicalTx \'($14 $16) \'($17) \'(\'($10 $18)) $19))\n(let $21 \'\"%kqp%tx_result_binding_1_0\")\n(let $22 (StructType \'(\'\"ref_id\" $2)))\n(let $23 (ListType $22))\n(let $24 %kqp%tx_result_binding_1_0)\n(let $25 \'(\'(\'\"_logical_id\" \'1270) \'(\'\"_id\" \'\"e970c8f9-b65f941d-44996cec-57412017\") $3))\n(let $26 (DqPhyStage \'() (lambda \'() (Iterator (PartitionByKey $24 (lambda \'($47) (Member $47 \'\"ref_id\")) (Void) (Void) (lambda \'($48) (Map (Filter (FlatMap $48 (lambda \'($49) (Take (Nth $49 \'1) (Uint64 \'1)))) (lambda \'($50) (Exists (Member $50 \'\"ref_id\")))) (lambda \'($51) (AsStruct \'(\'\"id\" (Member $51 \'\"ref_id\"))))))))) $25))\n(let $27 (KqpTable \'\"/Root/TableRight\" \'\"72057594046644480:17\" \'\"\" \'1))\n(let $28 (KqpCnStreamLookup (TDqOutput $26 \'0) $27 \'(\'\"id\" \'\"value\") (ListType (StructType \'(\'\"id\" $2))) \'(\'(\'\"Strategy\" \'\"LookupRows\"))))\n(let $29 (Uint64 \'\"1001\"))\n(let $30 (StructType \'(\'\"r.value\" (OptionalType (DataType \'Utf8)))))\n(let $31 \'(\'(\'\"_logical_id\" \'1536) \'(\'\"_id\" \'\"b6c51b7b-642e1803-5b29e5b9-47b2bf03\") \'(\'\"_wide_channels\" $30)))\n(let $32 (DqPhyStage \'($28) (lambda \'($52) (block \'(\n (let $53 \'(\'Many \'Hashed \'Compact))\n (let $54 (SqueezeToDict (FlatMap (ToFlow $24) (lambda \'($55) (block \'(\n (let $56 (Member $55 \'\"ref_id\"))\n (let $57 (Nothing (OptionalType (TupleType $1 $22))))\n (let $58 (IfPresent $56 (lambda \'($59) (Just \'($59 $55))) $57))\n (return (If (Exists $56) $58 $57))\n )))) (lambda \'($60) (Nth $60 \'0)) (lambda \'($61) (Nth $61 \'1)) $53))\n (return (FromFlow (ExpandMap (Take (FlatMap $54 (lambda \'($62) (MapJoinCore (OrderedFilter (ToFlow $52) (lambda \'($63) (Exists (Member $63 \'\"id\")))) $62 \'\"Inner\" \'(\'\"id\") $9 \'(\'\"value\" \'\"r.value\") \'() \'(\'\"r.id\") \'(\'\"l.ref_id\")))) $29) (lambda \'($64) (Member $64 \'\"r.value\")))))\n))) $31))\n(let $33 (DqCnUnionAll (TDqOutput $32 \'0)))\n(let $34 (DqPhyStage \'($33) (lambda \'($65) (FromFlow (NarrowMap (Take (ToFlow $65) $29) (lambda \'($66) (AsStruct \'(\'\"r.value\" $66)))))) \'(\'(\'\"_logical_id\" \'1549) \'(\'\"_id\" \'\"e0813148-b0168efd-b25a8c5e-dcc5e4b5\"))))\n(let $35 \'($26 $32 $34))\n(let $36 (DqCnResult (TDqOutput $34 \'0) \'(\'\"r.value\")))\n(let $37 (KqpTxResultBinding $23 \'1 \'0))\n(let $38 (KqpPhysicalTx $35 \'($36) \'(\'($21 $37)) $19))\n(let $39 \'($7 $20 $38))\n(return (KqpPhysicalQuery $39 \'((KqpTxResultBinding (ListType $30) \'\"2\" \'0)) \'(\'(\'\"type\" \'\"data_query\"))))\n)\n" total_duration_us: 852016 total_cpu_time_us: 807030 query_meta: "{\"query_database\":\"/Root\",\"query_parameter_types\":{},\"table_metadata\":[\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/TableLeft\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":18},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"data\\\",\\\"Id\\\":3,\\\"Type\\\":\\\"Utf8\\\",\\\"TypeId\\\":4608,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"hash_key\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"ref_id\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"hash_key\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\",\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/TableRight\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":17},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"id\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"value\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"Utf8\\\",\\\"TypeId\\\":4608,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"id\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\"],\"table_meta_serialization_type\":2,\"created_at\":\"1750799885\",\"query_type\":\"QUERY_TYPE_SQL_DML\",\"query_syntax\":\"1\",\"query_cluster\":\"db\",\"query_id\":\"2ceb5535-d51d0215-cf0a45bf-a694c2f1\",\"version\":\"1.0\"}" >> KqpJoin::JoinAggregateSingleRow [GOOD] >> KqpJoin::JoinConvert >> KqpJoinOrder::SortingsWithLookupJoin4-RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin3+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 18881, MsgBus: 18853 2025-06-24T21:17:12.091942Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628021990785326:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:12.116204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bc4/r3tmp/tmpvwNWds/pdisk_1.dat 2025-06-24T21:17:12.616348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:12.616473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:12.654669Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:12.668661Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628021990785300:2079] 1750799832072089 != 1750799832072092 2025-06-24T21:17:12.670252Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18881, node 1 2025-06-24T21:17:12.782627Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:12.782652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:12.782666Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:12.782816Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18853 2025-06-24T21:17:13.129861Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18853 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:13.504745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:13.518816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:15.787521Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628034875687831:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.787623Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.788021Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628034875687843:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.792150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:15.812422Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628034875687845:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:15.883677Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628034875687896:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:16.252037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.389524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.424061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.457309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.494757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.658696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.695599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.735976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.794207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.828695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.861514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.905145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.944851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:17.092222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628021990785326:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:17.092342Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:17.817380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... pp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.711727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.715667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.722654Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.723199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.727650Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.728239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.733614Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.734267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.736728Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.737305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.743989Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.744647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.750979Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.751673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.756841Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.757516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.761992Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.762587Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.763235Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.763833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.769665Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.770340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.775437Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.776651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.779822Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.780564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.786148Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.789832Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.823550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.829759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.830881Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.832907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.839802Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.840228Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.840390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.840990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.846474Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.850827Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.855963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.864545Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:01.019450Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwtwxwf3b195b33n2wjtes", SessionId: ydb://session/3?node_id=1&id=OGVlMzZhMDQtMTM5YzY0NDgtYWMzNjdjOTAtNzkyYmVjZDk=, Slow query, duration: 40.830343s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:01.321651Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:01.322007Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:01.322412Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519628129364985309:4574];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-24T21:18:01.322907Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
:7:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504
:7:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504 >> TCdcStreamTests::MeteringDedicated [GOOD] >> TCdcStreamTests::ChangeOwner >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin-NotNull >> KqpJoinOrder::Sortings4Year+RemoveLimitOperator [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin2+RemoveLimitOperator >> KqpIndexLookupJoin::CheckCastInt64ToUint64+StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastInt64ToUint64+StreamLookupJoin+NotNull >> TCdcStreamTests::ChangeOwner [GOOD] >> TCdcStreamTests::DropIndexWithStream ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::Sortings4Year+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 12488, MsgBus: 12654 2025-06-24T21:17:18.959222Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628047970666618:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:18.960990Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b92/r3tmp/tmpks4gO8/pdisk_1.dat 2025-06-24T21:17:19.463330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:19.463431Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:19.474753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:19.530799Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:19.535292Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628047970666522:2079] 1750799838918577 != 1750799838918580 TServer::EnableGrpc on GrpcPort 12488, node 1 2025-06-24T21:17:19.747612Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:19.747634Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:19.747641Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:19.747751Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:19.907374Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12654 TClient is connected to server localhost:12654 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:20.634462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:20.661008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:23.160492Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628069445503646:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:23.160768Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:23.163272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628069445503658:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:23.170676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:23.191716Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628069445503660:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:23.284655Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628069445503711:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:23.797125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:23.926371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:23.927332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628047970666618:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:23.927430Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:24.007648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.061287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.103447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.293849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.347672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.393498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.441183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.493167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.528693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.621078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:24.664729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:25.581317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.014387Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.015001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.028184Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.028733Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.029281Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.029727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.034533Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.038837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.044042Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.044541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.049593Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.050190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.051488Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.051947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.057085Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.057681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.059734Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.060192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.062909Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.064561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.065308Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.065863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.069932Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.070564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.073686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.074275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.076449Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.077066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.079083Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.079667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.082200Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.082687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.084816Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.085762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.087962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.088535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.090862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.091415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.093102Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.094039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:06.096467Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.098683Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:06.238542Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwv4j74n7cswqctk9w5rqz", SessionId: ydb://session/3?node_id=1&id=ZDAyNGY4NTctODM1ZWZjOWEtMmQwNTA0MmEtNTU4ODg1MWQ=, Slow query, duration: 38.230085s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:06.753388Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:06.754185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:06.754228Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::SortingsByPK-RemoveLimitOperator [GOOD] >> TCdcStreamTests::DropIndexWithStream [GOOD] >> TCdcStreamTests::DropTableWithIndexWithStream >> KqpJoin::JoinConvert [GOOD] >> KqpJoinOrder::FiveWayJoinWithPreds-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPK-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 65347, MsgBus: 6607 2025-06-24T21:17:21.847662Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628061817859698:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:21.847852Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b88/r3tmp/tmpYzoRB1/pdisk_1.dat 2025-06-24T21:17:22.420197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:22.420326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:22.427209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:22.515518Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:22.516848Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628061817859568:2079] 1750799841804026 != 1750799841804029 TServer::EnableGrpc on GrpcPort 65347, node 1 2025-06-24T21:17:22.807215Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:22.807239Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:22.807246Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:22.807389Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:22.879359Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6607 TClient is connected to server localhost:6607 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:23.932019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:23.957452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:26.506977Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628083292696691:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.507089Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.511300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628083292696703:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.517419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:26.541913Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628083292696705:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:26.651604Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628083292696757:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:26.843259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628061817859698:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:26.843336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:27.422365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.626230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.668699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.708560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.762464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:27.995807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.040394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.126985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.203585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.252363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.294909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.326934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:28.372059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:29.185904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... 06677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038502;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.406690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.407185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038496;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.412450Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038502;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.413052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.421234Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038496;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.421742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.427812Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.428502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038437;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.433950Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.434448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.434602Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038437;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.435277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038441;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.440415Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.440620Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038441;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.441066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.441161Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038443;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.446641Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.446690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038443;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.447256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.447257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038445;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.453262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.454190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038431;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.457289Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038445;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.457886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038544;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.459912Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038431;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.460486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038439;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.463240Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038544;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.463861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.466436Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038439;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.467030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.469213Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.470324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.472320Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.473253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.476978Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.478940Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.545275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.551575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.554776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:10.561080Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:10.599783Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwv8awax3rpkpkh2q00xv9", SessionId: ydb://session/3?node_id=1&id=Nzc1Njk3ZWMtMmYwZDMwMDctMTI2YzhhMmMtYjg3OGJiMjc=, Slow query, duration: 38.730837s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:10.930883Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:10.931471Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:10.932237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519628237911546270:6058];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-24T21:18:10.932701Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TCdcStreamTests::DropTableWithIndexWithStream [GOOD] >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin+NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinConvert [GOOD] Test command err: Trying to start YDB, gRPC: 64544, MsgBus: 17831 2025-06-24T21:17:57.353677Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628213994311087:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:57.353884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b59/r3tmp/tmpAqWx3y/pdisk_1.dat 2025-06-24T21:17:57.937167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:57.937278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:57.940001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:57.963329Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628213994310903:2079] 1750799877269184 != 1750799877269187 2025-06-24T21:17:57.989210Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64544, node 1 2025-06-24T21:17:58.118564Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:58.118589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:58.118601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:58.118706Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:58.341033Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17831 TClient is connected to server localhost:17831 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:59.195540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:59.250074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:59.564660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:17:59.884148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:00.045924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:02.351292Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628213994311087:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:02.386050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:02.668014Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628235469149007:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:02.668125Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:03.162738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:03.200945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:03.239872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:03.273842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:03.331975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:03.378294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:03.441240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:03.583482Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628239764116965:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:03.583549Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:03.583685Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628239764116970:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:03.587961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:03.611016Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628239764116972:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:03.687680Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628239764117025:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:05.017212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:05.072837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... 160 2025-06-24T21:18:08.812404Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:08.812483Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:08.824006Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15860, node 2 2025-06-24T21:18:08.971724Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:08.971745Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:08.971753Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:08.971857Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62617 2025-06-24T21:18:09.499574Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62617 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:09.644444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:09.651133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:09.669443Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:09.819855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:10.098145Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:10.230166Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:13.235580Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628282711308165:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:13.235665Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:13.395105Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.474272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.521348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.564463Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.608235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.681067Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.755664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.875748Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628282711308827:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:13.875839Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:13.879277Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628282711308832:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:13.889292Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:13.915823Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628282711308834:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:14.010301Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628287006276181:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:15.286768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:15.356439Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:15.414848Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:15.936131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::TPCDS92-ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_cdc_stream/unittest >> TCdcStreamTests::DropTableWithIndexWithStream [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:15:43.225145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:15:43.225234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:15:43.225270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:15:43.225302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:15:43.226308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:15:43.226359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:15:43.226443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:15:43.226515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:15:43.227293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:15:43.229038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:15:43.322351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:15:43.322406Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:15:43.339275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:15:43.339660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:15:43.339816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:15:43.346672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:15:43.346846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:15:43.347458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:15:43.347860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:15:43.353361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:15:43.354242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:15:43.360266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:15:43.360361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:15:43.360962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:15:43.361019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:15:43.361077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:15:43.361173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.367894Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:15:43.499954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:15:43.501148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.502259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:15:43.502327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:15:43.503513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:15:43.503623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:15:43.508394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:15:43.510604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:15:43.510889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.511043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:15:43.511084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:15:43.511118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:15:43.513698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.513774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:15:43.513819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:15:43.516399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.516452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:15:43.516502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:15:43.516549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:15:43.521859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:15:43.525892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:15:43.527172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:15:43.528276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:15:43.528424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:15:43.528473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:15:43.530183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:15:43.530253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:15:43.530456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:15:43.530546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:15:43.533058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:15:43.533120Z node 1 :FLAT_TX_SCHEMESHARD ... 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:18:19.828742Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:18:19.828781Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:18:19.829962Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:18:19.830048Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:18:19.830081Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:18:19.830117Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-24T21:18:19.830155Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:18:19.830266Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 4/5, is published: true 2025-06-24T21:18:19.833123Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:18:19.833188Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:18:19.833539Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:18:19.833696Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 5/5 2025-06-24T21:18:19.833737Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-24T21:18:19.833799Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#103:0 progress is 5/5 2025-06-24T21:18:19.833837Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-24T21:18:19.833881Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 5/5, is published: true 2025-06-24T21:18:19.833988Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [20:381:2347] message: TxId: 103 2025-06-24T21:18:19.834095Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-24T21:18:19.834187Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:0 2025-06-24T21:18:19.834256Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:0 2025-06-24T21:18:19.834424Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:18:19.834509Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:1 2025-06-24T21:18:19.834537Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:1 2025-06-24T21:18:19.834576Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:18:19.834605Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:2 2025-06-24T21:18:19.834630Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:2 2025-06-24T21:18:19.834682Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:18:19.834715Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:3 2025-06-24T21:18:19.834738Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:3 2025-06-24T21:18:19.834774Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-24T21:18:19.834802Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 103:4 2025-06-24T21:18:19.834827Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 103:4 2025-06-24T21:18:19.834917Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-24T21:18:19.836895Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:18:19.836999Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-24T21:18:19.837135Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-24T21:18:19.837223Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-24T21:18:19.837267Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:18:19.838803Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:18:19.839236Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:18:19.839970Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:18:19.840026Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:18:19.840139Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:18:19.842318Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:18:19.844865Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:18:19.844983Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [20:762:2663] 2025-06-24T21:18:19.845344Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-06-24T21:18:19.846104Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:18:19.846554Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream" took 515us result status StatusPathDoesNotExist 2025-06-24T21:18:19.846831Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Index/indexImplTable/Stream\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table/Index/indexImplTable\' (id: [OwnerId: 72057594046678944, LocalPathId: 4])" Path: "/MyRoot/Table/Index/indexImplTable/Stream" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:18:19.847823Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:18:19.848225Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" took 453us result status StatusPathDoesNotExist 2025-06-24T21:18:19.848539Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Index/indexImplTable/Stream/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table/Index/indexImplTable\' (id: [OwnerId: 72057594046678944, LocalPathId: 4])" Path: "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> KqpIndexLookupJoin::CheckCastInt64ToUint64+StreamLookupJoin+NotNull [GOOD] |96.2%| [TA] $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} |96.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpJoinOrder::CanonizedJoinOrderLookupBug >> KqpIndexLookupJoin::LeftJoinOnlyLeftColumn+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastInt64ToUint64+StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 2544, MsgBus: 14878 2025-06-24T21:18:02.999855Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628236171144769:2163];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:03.000105Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b51/r3tmp/tmpSLqZ73/pdisk_1.dat 2025-06-24T21:18:03.598926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:03.599019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:03.609882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:03.712636Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:03.719925Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628236171144644:2079] 1750799882953122 != 1750799882953125 TServer::EnableGrpc on GrpcPort 2544, node 1 2025-06-24T21:18:03.919733Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:03.919763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:03.919771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:03.919869Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:04.024350Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14878 TClient is connected to server localhost:14878 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:04.992380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:18:05.065559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:05.322080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:05.583425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:05.688181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:07.993690Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628236171144769:2163];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:08.012866Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:08.125992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628261940950065:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:08.126123Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:08.504482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:08.586477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:08.634227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:08.714377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:08.755338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:08.831084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:08.910228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:09.024430Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628266235918035:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:09.024507Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:09.024830Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628266235918040:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:09.029224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:09.060416Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628266235918042:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:18:09.137097Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628266235918095:3430] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:10.746143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:10.834013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part ... ath existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:18:13.633007Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:13.633096Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:13.647280Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628282543709921:2079] 1750799893288296 != 1750799893288299 2025-06-24T21:18:13.669758Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:13.670688Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19785, node 2 2025-06-24T21:18:13.890570Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:13.890602Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:13.890610Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:13.890763Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15466 2025-06-24T21:18:14.351274Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15466 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:14.764347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:14.772449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:14.780587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:14.912664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:15.160725Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:15.303022Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:18.351854Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628282543710155:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:18.351900Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:18.412597Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628304018548034:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:18.412686Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:18.550040Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:18.610476Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:18.668733Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:18.714545Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:18.806925Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:18.905194Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.043649Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.154166Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628308313516003:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:19.154262Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:19.154521Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628308313516008:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:19.158390Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:19.187276Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628308313516010:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:19.270626Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628308313516061:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:20.924507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:21.049718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS92-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 30998, MsgBus: 11252 2025-06-24T21:17:11.622672Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628016524105856:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:11.622752Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bd0/r3tmp/tmpF8JOIJ/pdisk_1.dat 2025-06-24T21:17:12.160455Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628016524105675:2079] 1750799831569308 != 1750799831569311 2025-06-24T21:17:12.165424Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:12.167094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:12.167191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:12.174056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30998, node 1 2025-06-24T21:17:12.388812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:12.388839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:12.388846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:12.395006Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:12.603042Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11252 TClient is connected to server localhost:11252 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:13.483631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:13.503834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:15.910967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628033703975503:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.911225Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.913488Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628033703975515:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.918121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:15.933994Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628033703975517:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:16.007401Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628037998942865:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:16.372578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.534318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.577844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.607497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628016524105856:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:16.607574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:16.622693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.694916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.878414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.927925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:16.972705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:17.045515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:17.086708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:17.129098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:17.162545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:17.207476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:17.929986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 58.196418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.201728Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.202933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.209381Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.210354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.215700Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.216355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.221557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.222245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.223382Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.223898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.228109Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.228541Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.228740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.229057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.234534Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.235676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.236908Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.239459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.240576Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.241180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.244374Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.244955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.247887Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.248447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.250234Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.250794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.253920Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.254506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.256328Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.257056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.260862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.261581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.262457Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.263038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.267028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.267766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.268697Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.273572Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.274472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:17:58.282267Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:17:58.391424Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwtwy9c50psqqpgxsht5zh", SessionId: ydb://session/3?node_id=1&id=YmQ3YjY1NS04YzM5MTE1MC03YzI2NTQyMS0yZTZlOWZm, Slow query, duration: 38.189603s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:17:59.028306Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:59.028971Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:17:59.029371Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628068063720685:2948];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-24T21:17:59.029821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TPCDS87+ColumnStore >> KqpJoin::JoinDupColumnRight >> KqpLimits::CancelAfterRwTx+useSink [GOOD] >> KqpLimits::CancelAfterRwTx-useSink >> KqpJoinOrder::SortingsByPrefixWithConstant+RemoveLimitOperator >> KqpJoinOrder::TPCDS87-ColumnStore [GOOD] >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin+NotNull [GOOD] >> KqpJoinOrder::SortingsSimpleOrderByPKAlias-RemoveLimitOperator [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH6 [GOOD] >> KqpJoinOrder::TestJoinHint1-ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS87-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 11217, MsgBus: 16148 2025-06-24T21:17:16.311079Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628041684736535:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:16.311475Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b9f/r3tmp/tmphYVzEP/pdisk_1.dat 2025-06-24T21:17:16.913953Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:16.923327Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628041684736401:2079] 1750799836286312 != 1750799836286315 2025-06-24T21:17:16.927553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:16.927669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:16.930166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11217, node 1 2025-06-24T21:17:17.207762Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:17.207788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:17.207794Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:17.207950Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:17.347306Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16148 TClient is connected to server localhost:16148 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:18.145055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:18.163839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:20.345409Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628058864606226:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:20.345574Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:20.346096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628058864606238:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:20.350232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:20.365999Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628058864606240:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:20.455706Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628058864606293:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:20.963381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.092848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.139392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.175829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.216582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.299260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628041684736535:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:21.299371Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:21.399106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.446785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.497687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.551218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.630610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.733389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.779744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:21.835025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:22.780520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... hod=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.592905Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.593515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.601914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.602541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.606161Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.606807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.616501Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.617157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038530;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.626106Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038530;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.626106Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.626689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.626706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.633132Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.633910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.636333Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.636917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.642656Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.643403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.648155Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.648760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.656533Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.657203Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.658944Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.659665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.663370Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.664073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.665572Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.666263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.669981Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.670721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:00.671727Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.676495Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:00.851626Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwv1x13q3yhf5g1z97k7qj", SessionId: ydb://session/3?node_id=1&id=YzQ1ZWU0YzAtZjFiYWEyMy05NTgzZmNkLTk4YzcyOGVj, Slow query, duration: 35.569567s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:01.645527Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:01.647069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628093224351082:2931];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:18:01.648013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:01.649601Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:23.662311Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwghsdntj4b1t3xbk03cd", SessionId: ydb://session/3?node_id=1&id=YzQ1ZWU0YzAtZjFiYWEyMy05NTgzZmNkLTk4YzcyOGVj, Slow query, duration: 10.612178s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "PRAGMA TablePathPrefix='/Root/test/ds';\n\n-- NB: Subquerys\n$bla1 = (select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from store_sales as store_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where store_sales.ss_sold_date_sk = date_dim.d_date_sk\n and store_sales.ss_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11);\n\n$bla2 = ((select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from catalog_sales as catalog_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk\n and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11)\n union all\n (select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from web_sales as web_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where web_sales.ws_sold_date_sk = date_dim.d_date_sk\n and web_sales.ws_bill_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11));\n\n-- start query 1 in stream 0 using template query87.tpl and seed 1819994127\nselect count(*)\nfrom $bla1 bla1 left only join $bla2 bla2 using (c_last_name, c_first_name, d_date)\n;\n\n-- end query 1 in stream 0 using template query87.tpl", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 22775, MsgBus: 31797 2025-06-24T21:18:11.209384Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628276157307137:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:11.209423Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b44/r3tmp/tmp4Y0oyi/pdisk_1.dat 2025-06-24T21:18:11.779016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:11.779131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:11.788034Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:11.797737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22775, node 1 2025-06-24T21:18:11.907728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:11.907750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:11.907757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:11.907856Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31797 2025-06-24T21:18:12.221669Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31797 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:12.837911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:12.881662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:13.056750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:13.309068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:13.432314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:15.688157Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628293337177928:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:15.688294Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:16.089507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.143086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.212131Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628276157307137:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:16.212307Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:16.227358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.321164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.374702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.460183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.526568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.647678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628297632145896:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:16.647802Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:16.648096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628297632145901:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:16.653041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:16.687370Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628297632145903:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:16.779073Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628297632145956:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:18.414512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:18.496110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /ho ... path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:18:20.778652Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:20.778733Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:20.779933Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:20.782457Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:20.785576Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628315848874136:2079] 1750799900347923 != 1750799900347926 TServer::EnableGrpc on GrpcPort 20393, node 2 2025-06-24T21:18:20.999701Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:20.999729Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:20.999738Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:20.999847Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8014 2025-06-24T21:18:21.370865Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8014 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:21.874827Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:21.882134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:18:21.894777Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:22.027820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:22.284569Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:22.399215Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:24.955259Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628333028744935:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:24.955342Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:25.028855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.068359Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.106314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.146237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.187931Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.262895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.317155Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.375514Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628315848874345:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:25.375756Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:25.404091Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628337323712888:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:25.404186Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:25.404551Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628337323712893:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:25.409462Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:25.423453Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628337323712895:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:18:25.520769Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628337323712946:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:27.075375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:27.186535Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsSimpleOrderByPKAlias-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 65468, MsgBus: 1796 2025-06-24T21:17:35.223769Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628123010669279:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:35.223955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b7d/r3tmp/tmppiR6c3/pdisk_1.dat 2025-06-24T21:17:35.730814Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:35.730923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:35.734186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:35.782341Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65468, node 1 2025-06-24T21:17:35.931130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:35.931181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:35.931193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:35.931323Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:36.211274Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1796 TClient is connected to server localhost:1796 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:36.842802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:39.543815Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628140190538887:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:39.543928Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:39.544200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628140190538899:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:39.548250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:39.564515Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628140190538901:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:39.634952Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628140190538952:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:40.022662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.147874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.224583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.227375Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628123010669279:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:40.227438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:40.293726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.336089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.623574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.660128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.690226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.728287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.760295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.861928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.892463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:40.928929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:41.665224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:41.703259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself ... 33731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.536153Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.536723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038472;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.546977Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038472;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.548035Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.549640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.552000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.562726Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.564572Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.565927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.568050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.578714Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.579492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.585370Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.585984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.594075Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.594706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.596524Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.597237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.609198Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.609769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.612006Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.612571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038516;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.618908Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038516;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.619810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038492;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.623934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.624937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.626394Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038492;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.627972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038454;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.634954Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.636487Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038454;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.639929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.644126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.650440Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.651026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038538;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.652583Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.654244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038484;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.661017Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038484;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.661763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:23.664269Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038538;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.667914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:23.890988Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwvm7q5ne5988tvscqrfqq", SessionId: ydb://session/3?node_id=1&id=YTI0Y2FjYTYtZmIzZjY1ZjMtNzE2MzRmYjAtZmMyMDM5ODA=, Slow query, duration: 39.832373s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:24.280575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:24.281098Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:24.281775Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519628286219453275:5999];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-24T21:18:24.282193Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpStats::SysViewClientLost [FAIL] >> KqpStats::SysViewCancelled ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinHint1-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 13141, MsgBus: 16948 2025-06-24T21:17:36.470233Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628127627207419:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:36.502025Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b7a/r3tmp/tmpI5ooXn/pdisk_1.dat 2025-06-24T21:17:37.235357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:37.235466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:37.281314Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:37.284702Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628127627207391:2079] 1750799856465170 != 1750799856465173 2025-06-24T21:17:37.296702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13141, node 1 2025-06-24T21:17:37.527179Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:37.559778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:37.559806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:37.559813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:37.559927Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16948 TClient is connected to server localhost:16948 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:38.570273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:41.208271Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628149102044515:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:41.208392Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:41.211234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628149102044527:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:41.215683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:41.231362Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628149102044529:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:41.335919Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628149102044580:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:41.470753Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628127627207419:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:41.470830Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:41.779859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:41.931112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:41.985707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.024650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.074638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.383598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.429539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.477579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.509442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.564521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.629786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.678719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.739373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:43.625498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.385497Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.386023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.390721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.391611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.397749Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.399911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.403603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.404205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.407223Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.407966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.419577Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.419632Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.454036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.458142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.460616Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.461208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.464525Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.465265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.469418Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.470604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.473336Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.474010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.476894Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.477590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.479761Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.480463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.483947Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.484601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.485961Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.486577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.490738Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.491688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.492262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.492859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.497768Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.498047Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.498453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.498584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.506278Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.507010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.508834Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.514126Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.683693Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwvpe06haqr13w7taaqjnh", SessionId: ydb://session/3?node_id=1&id=MjU4NzE4YzktNWVjMTIyMjAtZjE3MmQ1MWUtZGRhYzI1YTI=, Slow query, duration: 38.378878s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:25.214788Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:25.215519Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:25.219017Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::LeftJoinOnlyLeftColumn+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinOnlyLeftColumn-StreamLookup >> KqpJoinOrder::TestJoinHint2+ColumnStore [GOOD] >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv-ColumnStore >> KqpJoinOrder::TestJoinOrderHintsManyHintTrees ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH6 [GOOD] Test command err: Trying to start YDB, gRPC: 23943, MsgBus: 5462 2025-06-24T21:16:49.266534Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627925268962840:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:49.266595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c35/r3tmp/tmppHAZg5/pdisk_1.dat 2025-06-24T21:16:49.797818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:49.798915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:49.805537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:49.807987Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:49.808683Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627925268962741:2079] 1750799809258264 != 1750799809258267 TServer::EnableGrpc on GrpcPort 23943, node 1 2025-06-24T21:16:50.097649Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:50.097682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:50.097688Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:50.097823Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:16:50.275362Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5462 TClient is connected to server localhost:5462 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:51.028153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:51.060179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:52.656557Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627938153865286:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.657014Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627938153865274:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.657094Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.664489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:52.682766Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627938153865288:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:16:52.759009Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627938153865339:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:53.572720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:16:53.804046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:53.804303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:53.804563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:53.804672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:53.804786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:53.804910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:53.805031Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:53.805135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:53.805253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:16:53.805362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:16:53.805481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519627942448832877:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:16:53.826141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627942448832867:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:53.826213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627942448832867:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:53.826437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627942448832867:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:53.826555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627942448832867:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:53.826701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627942448832867:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:53.826816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627942448832867:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:53.826992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627942448832867:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:53.827105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627942448832867:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:53.827217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627942448832867:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... .493745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039239;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.501402Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039290;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.502072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.516433Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039239;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.517121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039230;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.524969Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.525606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.539595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039230;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.540293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.548677Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.549357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039294;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.563396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.564103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039332;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.572762Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039294;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.573448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.578873Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039332;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.587888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039268;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.596658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.597342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039234;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.603017Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039268;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.615862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039296;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.620595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039234;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.621250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.622291Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039296;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.622973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039225;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.626730Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.627701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039252;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.633285Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039252;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.633996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.634692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039225;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.635343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.647431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.648152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.653596Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.654469Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.871303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:16.877392Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:16.973731Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwvdb235kk7nx9nn1vssvs", SessionId: ydb://session/3?node_id=1&id=ZmViYTI5NGUtZTNiM2RjMjEtOTc2ZmQwNzMtMzRkZDcwNzA=, Slow query, duration: 39.979289s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:17.396668Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:17.396942Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519628140017364461:7818];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:18:17.397258Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:17.398408Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:18.106439Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyhwwn03cyh45h4qq6xcsg71, SessionId: CompileActor 2025-06-24 21:18:18.105 WARN ydb-core-kqp-ut-join(pid=877454, tid=0x00007FE15EAF5640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-24T21:18:19.533273Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyhwwpbrdjdwktfj95vx77b2, SessionId: CompileActor 2025-06-24 21:18:19.532 WARN ydb-core-kqp-ut-join(pid=877454, tid=0x00007FE15E2E6640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed >> KqpJoinOrder::DatetimeConstantFold+ColumnStore >> OlapEstimationRowsCorrectness::TPCH9 >> KqpJoinOrder::ShuffleEliminationOneJoin [GOOD] >> KqpJoinOrder::ShuffleEliminationManyKeysJoinPredicate [GOOD] >> KqpJoinOrder::TPCDS90+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinHint2+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 10051, MsgBus: 13813 2025-06-24T21:16:51.348992Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627932243916076:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:51.349361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c12/r3tmp/tmpYN2TGq/pdisk_1.dat 2025-06-24T21:16:51.828072Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627932243916058:2079] 1750799811347851 != 1750799811347854 2025-06-24T21:16:51.842327Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10051, node 1 2025-06-24T21:16:51.850971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:51.851140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:51.854196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:51.984014Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:51.984037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:51.984059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:51.985191Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13813 2025-06-24T21:16:52.369410Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13813 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:52.660829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:52.671869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:54.902582Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627945128818590:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.902715Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.902820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627945128818601:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:54.908240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:54.919323Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627945128818604:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:16:55.004043Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627945128818655:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:55.415217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:16:55.763999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:55.764257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:55.764560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519627949423786208:2321];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:55.764605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519627949423786208:2321];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:55.764642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:55.764859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:55.764893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519627949423786208:2321];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:55.765336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:55.765413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519627949423786208:2321];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:55.765524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:55.765598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519627949423786208:2321];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:55.765666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:55.765761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519627949423786208:2321];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:55.765827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:55.765898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519627949423786208:2321];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:55.765965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:16:55.766054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519627949423786208:2321];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:55.766092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:16:55.766212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519627949423786208:2321];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:16:55.766221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519627949423786206:2319];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMet ... 93276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.596104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.596719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.602617Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.611588Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.612121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.622015Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.622572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.623698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.629436Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.630094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.641595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.642135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.643980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.645268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.656883Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.657487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.663122Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.663691Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.664346Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.670394Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.671073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.671915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.682012Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.688692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.689241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.691854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.697934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.698621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.701953Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.702801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.712158Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.713074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.720436Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.721207Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.721841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.728000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.731618Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.735482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:20.939928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:20.946517Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.007487Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwvhbp9mdkdy9db7930hq5", SessionId: ydb://session/3?node_id=1&id=NjE5YmExNjctOTdiZjFkNjQtZGM3NDAzOWUtNzY0MjI4NGI=, Slow query, duration: 39.896294s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:21.355320Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:21.355930Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:21.356676Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519628211416836649:9207];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:18:21.357134Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness2 [GOOD] >> KqpJoin::JoinDupColumnRight [GOOD] >> KqpJoin::JoinDupColumnRightPure >> KqpJoinOrder::SortingsWithLookupJoin1+RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationOneJoin [GOOD] Test command err: Trying to start YDB, gRPC: 21991, MsgBus: 19814 2025-06-24T21:16:52.579263Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627937373247781:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:52.579308Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c0c/r3tmp/tmplbfI5N/pdisk_1.dat 2025-06-24T21:16:52.994609Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:52.995830Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627937373247760:2079] 1750799812577665 != 1750799812577668 TServer::EnableGrpc on GrpcPort 21991, node 1 2025-06-24T21:16:53.023418Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:53.023523Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:53.027700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:53.047461Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:53.047489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:53.047511Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:53.047640Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19814 TClient is connected to server localhost:19814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:53.586580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:16:53.608341Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:16:53.614178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:55.833544Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627950258150287:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.833701Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.834004Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627950258150299:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:55.839539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:55.857139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:16:55.858821Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627950258150301:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:16:55.947641Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627950258150353:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:56.266691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:16:56.497911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519627954553117904:2320];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:56.497908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519627954553117896:2312];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:56.498144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519627954553117904:2320];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:56.498419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519627954553117904:2320];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:56.498536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519627954553117904:2320];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:56.498610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519627954553117896:2312];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:56.498645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519627954553117904:2320];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:56.498750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519627954553117904:2320];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:56.498835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519627954553117896:2312];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:56.498886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519627954553117904:2320];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:56.498944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519627954553117896:2312];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:56.499001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519627954553117904:2320];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:56.499042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519627954553117896:2312];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:56.499131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519627954553117904:2320];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:16:56.499169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519627954553117896:2312];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:56.499332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519627954553117896:2312];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:56.499435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519627954553117896:2312];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:56.499436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519627954553117904:2320];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:16:56.499555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519627954553117896:2312];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:16:56.499577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037 ... 1961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.567898Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.568630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.569537Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.570044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.575596Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.576265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.576360Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.576981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.583182Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.583872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.585029Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.585771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.590453Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.591137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.591475Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.592083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.599974Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.600761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.601995Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.602497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.611489Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.612044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.617413Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.618028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.619819Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.620583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.630808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.631837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.631895Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.632998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039304;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.638600Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.639335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.645831Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.646325Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039304;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.646878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.647000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.654798Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.658301Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.659945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:21.666805Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:21.943758Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwvhdz0neqvcrs84bqts3e", SessionId: ydb://session/3?node_id=1&id=M2MwMDI5NDAtMzQ3MGE5ODItZTc4NzViZDItODY3ZDBiNzI=, Slow query, duration: 40.759249s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:22.486767Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:22.487367Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628263790820119:11183];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-24T21:18:22.493690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:22.494870Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationManyKeysJoinPredicate [GOOD] Test command err: Trying to start YDB, gRPC: 7501, MsgBus: 1964 2025-06-24T21:16:49.263686Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627923612034059:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:49.263790Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c30/r3tmp/tmpSMo2xH/pdisk_1.dat 2025-06-24T21:16:49.794696Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627923612034032:2079] 1750799809254371 != 1750799809254374 2025-06-24T21:16:49.805896Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:49.825271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:49.825403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:49.836483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7501, node 1 2025-06-24T21:16:50.091036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:50.091076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:50.091095Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:50.091285Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:16:50.278028Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1964 TClient is connected to server localhost:1964 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:51.044955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:52.770483Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627936496936566:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.770659Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.771034Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627936496936578:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.775847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:52.791410Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627936496936580:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:16:52.875918Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627936496936631:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:53.569977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:16:53.785533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:53.785816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:53.786107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:53.786235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:53.786338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:53.786464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:53.786607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:53.786732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:53.786850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:16:53.787004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:16:53.787159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519627940791904168:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:16:53.788403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627940791904162:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:53.788450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627940791904162:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:53.788646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627940791904162:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:53.788776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627940791904162:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:53.788899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627940791904162:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:53.788998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627940791904162:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:53.789142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627940791904162:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:53.789253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627940791904162:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:53.789344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627940791904162:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:16:53.789440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627940791904162:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execu ... 48758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.253886Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.258898Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.259756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.263641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.268704Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.269324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.272890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.273400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.278321Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.281592Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.282315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.283447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.292030Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.292669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.295531Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.296116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.308158Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.308804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.314222Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.314727Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.314915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.315714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.325523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.326167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.329250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.329863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.339481Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.340049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.342482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.343067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.350755Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.354838Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.355762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.359630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.365392Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.365916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.368903Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.369802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:22.383073Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.388043Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:22.658069Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwvecve12ev29kqg27y9z2", SessionId: ydb://session/3?node_id=1&id=NGNhYmU0ODYtMTIyYTljMjgtYjhjMjA3NGQtM2FkMjljNWM=, Slow query, duration: 44.582547s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:23.107791Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:23.108386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:23.109065Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519628142655402764:7649];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:18:23.109504Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness2 [GOOD] Test command err: Trying to start YDB, gRPC: 18937, MsgBus: 24220 2025-06-24T21:17:37.243867Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628130625239543:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:37.286603Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b78/r3tmp/tmplPfetp/pdisk_1.dat 2025-06-24T21:17:37.986924Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:37.988135Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628130625239514:2079] 1750799857220145 != 1750799857220148 2025-06-24T21:17:38.049704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:38.049808Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:38.051960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18937, node 1 2025-06-24T21:17:38.292375Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:38.303657Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:38.303675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:38.303682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:38.303779Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24220 TClient is connected to server localhost:24220 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:39.349388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:39.383502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:41.732557Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628147805109341:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:41.732681Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:41.732871Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628147805109353:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:41.737011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:41.754805Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628147805109355:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:41.847941Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628147805109406:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:42.246431Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628130625239543:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:42.246501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:42.267831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.519481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.606182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.666067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.734499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.943605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:42.985940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:43.019793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:43.098575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:43.144300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:43.172507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:43.243000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:43.280712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:44.032206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 47881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.854047Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.854748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.858907Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.863647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.868992Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.869514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.873470Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.874017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.883956Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.884547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.890768Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.891857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038443;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.897632Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038443;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.898342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.903813Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.904493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.909546Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.910199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.915100Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.915774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.920983Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.921774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.927073Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.927879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.932772Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.933339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.938310Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.938829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038441;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.952001Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038441;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.952559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.964835Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.965610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.970994Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.971820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038445;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.979452Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038445;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.980525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.986378Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.987050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:24.992975Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:24.999263Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:25.223672Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwvpq98tqs547e451kmgw1", SessionId: ydb://session/3?node_id=1&id=YmZjYWI3MTItMTU4ODJlZDAtMzE0OTEwNDktMzQzMjZhNDg=, Slow query, duration: 38.621308s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:25.674796Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:25.675425Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:25.676351Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519628233704471425:4451];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-24T21:18:25.676827Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpAnalyze::AnalyzeTable+ColumnStore [FAIL] >> KqpAnalyze::AnalyzeTable-ColumnStore >> KqpJoinOrder::TestJoinOrderHintsSimple-ColumnStore [GOOD] >> KqpIndexLookupJoin::Left+StreamLookup >> KqpJoin::TwoJoinsWithQueryService >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness1 >> KqpLimits::CancelAfterRoTx [GOOD] >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookup >> KqpJoinOrder::SortingsDifferentDirs+RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::LeftJoinOnlyLeftColumn-StreamLookup [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin1-RemoveLimitOperator [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinOrderHintsSimple-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 21525, MsgBus: 11077 2025-06-24T21:17:49.359690Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628182488623154:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:49.359892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b6a/r3tmp/tmp3dDQ2l/pdisk_1.dat 2025-06-24T21:17:50.126985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:50.143332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:50.148429Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:50.239305Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:50.242797Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628182488623006:2079] 1750799869308398 != 1750799869308401 TServer::EnableGrpc on GrpcPort 21525, node 1 2025-06-24T21:17:50.400549Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:50.511760Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:50.511779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:50.511790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:50.511897Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11077 TClient is connected to server localhost:11077 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:51.637184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:54.339517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628182488623154:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:54.339583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:54.621180Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628203963460132:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:54.621301Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:54.621762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628203963460144:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:54.625836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:54.643430Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628203963460146:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:54.730790Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628203963460197:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:55.083309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.221363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.304240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.338869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.368146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.638472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.695601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.774256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.820747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.863548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.911120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.964927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:56.045330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:57.023752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 4976710714; 2025-06-24T21:18:34.346471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.351714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.357152Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.357685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.362058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.362555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.367030Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.369896Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.370399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.375626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.379575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.380170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.385192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.385743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.390664Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.392563Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.393064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.396077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.402032Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.402659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.405185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.405725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.411711Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.412190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.416187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.416630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.423080Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.425104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.425653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.427855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.434156Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.435616Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.436448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.440760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.445278Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.446238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.449870Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.460652Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.517679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038564;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:34.539988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038564;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:34.639429Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhww36kdpsjbrhq36g4mxsn", SessionId: ydb://session/3?node_id=1&id=YzE1Y2Q3NTYtNzI2MTgzMzktODU4MzM5ZjktZDZmMDUyYjY=, Slow query, duration: 35.259475s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:35.012683Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:35.013303Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:35.013675Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519628294157792528:4952];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-24T21:18:35.014145Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinOnlyLeftColumn-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 7322, MsgBus: 4828 2025-06-24T21:18:23.589367Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628325569554860:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:23.589892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b2d/r3tmp/tmphtZHxo/pdisk_1.dat 2025-06-24T21:18:24.138255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:24.138360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:24.147974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:24.158061Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:24.168034Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628325569554662:2079] 1750799903427642 != 1750799903427645 TServer::EnableGrpc on GrpcPort 7322, node 1 2025-06-24T21:18:24.431281Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:24.460488Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:24.460527Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:24.460537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:24.460663Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4828 TClient is connected to server localhost:4828 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:25.318238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:25.339721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:25.365035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:25.607290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:25.919356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:26.018491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:28.238064Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628347044392775:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:28.238186Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:28.504215Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628325569554860:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:28.504295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:28.676702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:28.718299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:28.762827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:28.842212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:28.884705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:28.980148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.063516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.153758Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628351339360741:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:29.153864Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:29.154097Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628351339360746:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:29.158730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:29.178504Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628351339360748:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:29.264600Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628351339360801:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:31.171999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... RN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:34.492022Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27510 2025-06-24T21:18:35.063272Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27510 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:35.289783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:35.303541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:35.314615Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:35.417605Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:35.611461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:35.697606Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:38.676336Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628391764770683:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:38.676433Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:38.793418Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:38.911947Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:38.999785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.067785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.166952Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.269290Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.352123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.498885Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628396059738654:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:39.498982Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:39.499262Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628396059738659:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:39.504211Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:39.521970Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628396059738661:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:39.599058Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628396059738712:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:41.345594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:41.389613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:41.465863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:41.508409Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:41.558579Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:41.609316Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsDifferentDirs+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 5184, MsgBus: 15006 2025-06-24T21:17:48.734401Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628178883715185:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:48.734720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b6b/r3tmp/tmpEaGC4E/pdisk_1.dat 2025-06-24T21:17:49.505588Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:49.515211Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628178883715001:2079] 1750799868677439 != 1750799868677442 2025-06-24T21:17:49.524972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:49.525057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:49.535097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5184, node 1 2025-06-24T21:17:49.711284Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:49.891877Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:49.891898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:49.891905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:49.892017Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15006 TClient is connected to server localhost:15006 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:51.170311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:51.217142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:53.723257Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628178883715185:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:53.744279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:54.131871Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628204653519428:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:54.131970Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:54.132049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628204653519440:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:54.136067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:54.149274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:17:54.149939Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628204653519442:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:54.250294Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628204653519493:2341] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:54.683965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:54.896925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:54.935904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:54.978152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.014258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.245607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.284284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.367301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.414996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.455589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.530825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.621684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:55.673916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operatio ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.388359Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.389437Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038500;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.389963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.395925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.400218Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.400758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.406516Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.407101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.414694Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.423404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038542;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.426449Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.427033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.444190Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038542;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.444723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.450388Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.451290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038514;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.452845Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.453330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.457183Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038514;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.457760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.458715Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.459379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.463785Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.464479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.464839Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.465266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.470455Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.475535Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.476148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038502;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.480099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038498;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.485674Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038502;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.486251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.491829Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.492414Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.493669Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038498;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.494301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038530;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.500081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.501610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038536;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.505341Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038530;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.521585Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038536;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.544001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:37.558295Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:37.667581Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhww2sx3pchqkn7pb6jgh56", SessionId: ydb://session/3?node_id=1&id=OTQ4NzJlNjUtOTdkMzNkMDgtODA4MThhOTgtYTNiMWE1NjY=, Slow query, duration: 38.693188s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:38.148445Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:38.149028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:38.149596Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpLimits::TooBigQuery-useSink [GOOD] >> KqpLimits::WaitCAsStateOnAbort ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin1-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 12057, MsgBus: 5825 2025-06-24T21:17:52.753785Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628195683839956:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:52.754443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b62/r3tmp/tmpbUxDqH/pdisk_1.dat 2025-06-24T21:17:53.530471Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:53.530921Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628195683839758:2079] 1750799872686154 != 1750799872686157 2025-06-24T21:17:53.558340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:53.558472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:53.561353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12057, node 1 2025-06-24T21:17:53.952859Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:53.953480Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:53.953487Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:53.953493Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:53.953600Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5825 TClient is connected to server localhost:5825 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:54.938358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:54.963118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:57.583958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628217158676885:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:57.584100Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:57.587233Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628217158676897:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:57.591630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:57.610971Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628217158676899:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:57.683657Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628217158676950:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:57.755121Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628195683839956:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:57.755242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:57.987373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.127239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.161238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.213837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.254465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.455608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.507365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.557585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.588723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.625000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.664076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.732013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:58.784464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:59.511574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... 04983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.608562Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.609054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.618544Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.622056Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.622552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.623758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.633135Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.633734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.636295Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.636780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.642922Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.646026Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.646608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.647984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.657343Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.657987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.659993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.660473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.669523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.670199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.671634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.672090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.681211Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.681684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.686715Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.687625Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.688059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.693406Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.694029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.695939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.705271Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.706836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.707806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.708468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.717883Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.718464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.720293Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.720923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:35.730095Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:35.732082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:36.108747Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhww5zcakkgvhqvgmxrtxz6", SessionId: ydb://session/3?node_id=1&id=N2ZkOGQxMDQtYzY5ZDE5MDMtZjI4NzRiMGUtMmRmMzY5NTc=, Slow query, duration: 33.887333s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:36.614094Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:36.614523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628251518421848:2992];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-24T21:18:36.614893Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:36.615539Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::JoinDupColumnRightPure [GOOD] >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc-RemoveLimitOperator >> KqpJoinOrder::TPCDS23 >> KqpIndexLookupJoin::SimpleLeftSemiJoin-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinDupColumnRightPure [GOOD] Test command err: Trying to start YDB, gRPC: 23728, MsgBus: 25307 2025-06-24T21:18:26.771983Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628340272966637:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:26.801597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b28/r3tmp/tmpqk0w23/pdisk_1.dat 2025-06-24T21:18:27.569385Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628340272966603:2079] 1750799906741642 != 1750799906741645 2025-06-24T21:18:27.629420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:27.629519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:27.647721Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:27.656117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23728, node 1 2025-06-24T21:18:27.808117Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:28.031811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:28.031835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:28.031846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:28.031980Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25307 TClient is connected to server localhost:25307 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:29.102416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:29.163188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:29.472669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:29.808346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:29.947967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:31.775272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628340272966637:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:31.861740Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:32.412093Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628366042772015:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:32.412241Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:32.769953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:32.851693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:32.900917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:32.940876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:32.981391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:33.042180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:33.100577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:33.215319Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628370337739971:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:33.215434Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:33.219290Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628370337739976:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:33.224045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:33.244145Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628370337739978:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:33.315602Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628370337740029:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:34.816537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.910737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... -06-24T21:18:38.788600Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:38.788695Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:38.796764Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27587, node 2 2025-06-24T21:18:38.987821Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:38.987840Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:38.987847Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:38.987974Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10960 2025-06-24T21:18:39.459332Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10960 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:39.836530Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:39.849300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:39.857349Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:39.971341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:40.215665Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:40.348016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:43.239242Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628414019618422:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:43.239335Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:43.318757Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:43.365836Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:43.429330Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:43.439731Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628392544780463:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:43.452647Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:43.485500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:43.529075Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:43.591664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:43.678113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:43.804678Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628414019619085:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:43.804774Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:43.804983Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628414019619090:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:43.809479Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:43.827190Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628414019619092:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:43.918334Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628414019619143:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:45.834180Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:45.906684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:45.956999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpFlipJoin::RightSemi_1 >> KqpJoin::TwoJoinsWithQueryService [GOOD] >> KqpAnalyze::AnalyzeTable-ColumnStore [GOOD] >> KqpExplain::AggGroupLimit >> KqpJoinOrder::CanonizedJoinOrderTPCH8 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::TwoJoinsWithQueryService [GOOD] Test command err: Trying to start YDB, gRPC: 64510, MsgBus: 9070 2025-06-24T21:18:44.085385Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628417690299390:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:44.085456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003afd/r3tmp/tmpOP34wD/pdisk_1.dat 2025-06-24T21:18:44.671516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:44.671596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:44.728338Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:44.731454Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628417690299208:2079] 1750799924008748 != 1750799924008751 2025-06-24T21:18:44.741527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64510, node 1 2025-06-24T21:18:45.067832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:45.067884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:45.067892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:45.068012Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:45.083246Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9070 TClient is connected to server localhost:9070 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:46.258343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:46.293848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:48.608988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628434870169034:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:48.609078Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:48.937193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:49.086575Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628417690299390:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:49.086651Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:49.179512Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628439165136439:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.179593Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.206806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:49.302021Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628439165136518:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.302093Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.315644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:49.401012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628439165136597:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.401081Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.402057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628439165136602:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.406621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:49.422225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-24T21:18:49.422485Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628439165136604:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:18:49.525778Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628439165136656:2494] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpFlipJoin::Inner_1 >> KqpJoinOrder::CanonizedJoinOrderTPCH4 [GOOD] >> KqpIndexLookupJoin::Left+StreamLookup [GOOD] >> KqpIndexLookupJoin::Left-StreamLookup >> KqpLimits::OutOfSpaceBulkUpsertFail [GOOD] >> KqpLimits::OutOfSpaceYQLUpsertFail+useSink >> KqpJoinOrder::FiveWayJoinWithConstantFold-ColumnStore [GOOD] >> KqpJoinOrder::SortingsComplexOrderBy+RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH8 [GOOD] Test command err: Trying to start YDB, gRPC: 24517, MsgBus: 19958 2025-06-24T21:16:49.269680Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627921768807773:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:49.269865Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c25/r3tmp/tmpeBQj7M/pdisk_1.dat 2025-06-24T21:16:49.783364Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:49.783490Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:49.792992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:49.830717Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:49.832870Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627921768807679:2079] 1750799809263343 != 1750799809263346 TServer::EnableGrpc on GrpcPort 24517, node 1 2025-06-24T21:16:50.099828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:50.099884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:50.099895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:50.100059Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:16:50.283631Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19958 TClient is connected to server localhost:19958 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:51.038104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:52.656515Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627934653710211:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.656725Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.657341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627934653710223:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:52.664953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:52.682387Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627934653710225:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:16:52.756265Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627934653710276:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:53.572289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:16:53.804449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627938948677829:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:53.804449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:16:53.804652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:53.804789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627938948677829:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:16:53.805122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:53.805264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:53.805415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:53.805560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:53.805674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:53.805780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:53.805890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:16:53.805996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:16:53.806094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519627938948677844:2324];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:16:53.811637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627938948677829:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:16:53.811862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627938948677829:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:16:53.811976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627938948677829:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:16:53.812103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627938948677829:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:16:53.812232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627938948677829:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:16:53.812368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627938948677829:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:16:53.812473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627938948677829:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:16:53.812561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519627938948677829:2316];tablet_id=72075186224037888;process=TTxInitSchema:: ... :45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.274458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.274994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.278928Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.279615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.280359Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.280989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.285236Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.285839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.286275Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.287178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.291075Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.291628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.291748Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.292273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.296964Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.297111Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.297549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.298015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.302311Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.302311Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.302871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.302871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.308143Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.308279Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.308822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.310196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.313945Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.314860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.315602Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.316262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.321089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.321300Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.321820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:18.326786Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:18.599931Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwvc2vc64yf16x346cr5zb", SessionId: ydb://session/3?node_id=1&id=OGVhNWJhN2UtZjFkZTgwZWYtZTQ1YjYxOWUtM2U1NjEwYWU=, Slow query, duration: 42.891422s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:19.014821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:19.015290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:19.016269Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628252481346636:11083];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-24T21:18:19.016858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:41.571322Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwzxm6ph30x1yvbbrpp0a", SessionId: ydb://session/3?node_id=1&id=OGVhNWJhN2UtZjFkZTgwZWYtZTQ1YjYxOWUtM2U1NjEwYWU=, Slow query, duration: 12.768603s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/\";\n\n-- TPC-H/TPC-R National Market Share Query (Q8)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\nselect\n o_year,\n sum(case\n when nation = 'MOZAMBIQUE' then volume\n else 0\n end) / sum(volume) as mkt_share\nfrom\n (\n select\n DateTime::GetYear(o_orderdate) as o_year,\n l_extendedprice * (1 - l_discount) as volume,\n n2.n_name as nation\n from\n part\n cross join supplier\n cross join lineitem\n cross join orders\n cross join customer\n cross join nation n1\n cross join nation n2\n cross join region\n where\n p_partkey = l_partkey\n and s_suppkey = l_suppkey\n and l_orderkey = o_orderkey\n and o_custkey = c_custkey\n and c_nationkey = n1.n_nationkey\n and n1.n_regionkey = r_regionkey\n and r_name = 'AFRICA'\n and s_nationkey = n2.n_nationkey\n and o_orderdate between date('1995-01-01') and date('1996-12-31')\n and p_type = 'ECONOMY PLATED COPPER'\n ) as all_nations\ngroup by\n o_year\norder by\n o_year;", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH4 [GOOD] Test command err: Trying to start YDB, gRPC: 22127, MsgBus: 13113 2025-06-24T21:17:02.320966Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627980627995541:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:02.321155Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bf4/r3tmp/tmpo0fpAQ/pdisk_1.dat 2025-06-24T21:17:02.874655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:02.874759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:02.943634Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:02.944320Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627980627995394:2079] 1750799822282592 != 1750799822282595 2025-06-24T21:17:02.956922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22127, node 1 2025-06-24T21:17:03.039877Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:03.039900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:03.039907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:03.040007Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:03.275407Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13113 TClient is connected to server localhost:13113 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:03.858158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:05.977129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627993512897932:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.982460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:05.983226Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627993512897924:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.983380Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:05.994186Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627993512897938:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:06.057270Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627997807865285:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:06.460084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:06.698385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519627997807865521:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:06.698383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:06.698576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:06.698637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519627997807865521:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:06.698880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:06.699008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:06.699112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:06.699241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:06.699387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:06.699514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:06.699626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:06.699730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:06.699849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519627997807865584:2318];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:06.701909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519627997807865521:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:06.702044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519627997807865521:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:06.702139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519627997807865521:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:06.702231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519627997807865521:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:06.702352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519627997807865521:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:06.702452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519627997807865521:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:06.702564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519627997807865521:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:06.702674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519627997807865521:2312];tablet_id=72075186224037895;process=TTxInitSchema:: ... 58398Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.458996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.458996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.464221Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.464877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.465290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.465838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.470911Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.471616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.478595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.482526Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.483027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.483258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.492633Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.493295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.499021Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.499796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.499900Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.501133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.505649Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.510856Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.511867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.514596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.522555Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.523056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.523957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.524544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.532746Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.533330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.539845Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.540458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.543314Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.543901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.545950Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.551767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.553953Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.554513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.561753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.561753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.562500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:33.575086Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:33.763618Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwvymh3tzk37dg0h8zp1r4", SessionId: ydb://session/3?node_id=1&id=NDc4OWE4N2QtNTZmOTc2M2QtYjc3MWNjNzYtZjdlMzJiNGQ=, Slow query, duration: 39.057812s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:34.118256Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:34.118870Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:34.119843Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519628268390851067:9258];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:18:34.120412Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithConstantFold-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 19503, MsgBus: 17324 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b5c/r3tmp/tmpptp45q/pdisk_1.dat 2025-06-24T21:17:55.770227Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628207625293655:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:55.770606Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:17:56.448013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:56.448111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:56.464049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:56.511237Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628207625293471:2079] 1750799875712430 != 1750799875712433 2025-06-24T21:17:56.525494Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19503, node 1 2025-06-24T21:17:56.747381Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:56.753688Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:56.753709Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:56.753717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:56.753837Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17324 TClient is connected to server localhost:17324 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:57.649515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:57.671910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:00.099497Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628229100130594:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:00.099630Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:00.100226Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628229100130606:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:00.104703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:00.117617Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628229100130608:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:00.183769Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628229100130659:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:00.671793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:00.735715Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628207625293655:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:00.736553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:00.814732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:00.850723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:00.884091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:00.919071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:01.087496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:01.155119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:01.211623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:01.278935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:01.317084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:01.373924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:01.407784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:01.446832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:02.720261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 32188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.434146Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.434751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.438418Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.438965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038439;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.444637Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038439;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.444636Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.445219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.445251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.450699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.450699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.451371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.451786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.457590Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.457735Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.458356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.458975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.463855Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.463893Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.464503Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.464503Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.470018Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.470018Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.470694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.470694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.475953Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.476595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.477013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.477962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.482143Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.482812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.483542Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.485368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.488072Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.488881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.491380Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.492133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.494154Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.500059Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.525576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:44.533061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:44.686810Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhww97809cymkwryz9cjdah", SessionId: ydb://session/3?node_id=1&id=NjQxMjM2YWItNGM5NWY0OGUtODdiZGM2ZWEtOWUxYWJiZDU=, Slow query, duration: 39.141596s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:45.289059Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:45.289614Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:45.299545Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628263459874855:2879];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:18:45.300162Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::SortingsWithLookupJoin4+RemoveLimitOperator >> KqpJoin::IdxLookupLeftPredicate >> KqpJoinOrder::DatetimeConstantFold-ColumnStore [GOOD] >> KqpJoinOrder::TestJoinHint2-ColumnStore >> KqpIndexLookupJoin::SimpleLeftSemiJoin-StreamLookup [GOOD] >> KqpJoin::AllowJoinsForComplexPredicates+StreamLookup >> KqpFlipJoin::RightSemi_1 [GOOD] >> KqpFlipJoin::RightOnly_3 >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift >> KqpExplain::AggGroupLimit [GOOD] >> KqpExplain::ComplexJoin >> KqpFlipJoin::Inner_1 [GOOD] >> KqpFlipJoin::Inner_2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::DatetimeConstantFold-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 16858, MsgBus: 19545 2025-06-24T21:18:07.393364Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628257406680590:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:07.426998Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b4f/r3tmp/tmpYwutlk/pdisk_1.dat 2025-06-24T21:18:08.248680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:08.248801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:08.264256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:08.378437Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:08.403072Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:08.415411Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628257406680461:2079] 1750799887351666 != 1750799887351669 TServer::EnableGrpc on GrpcPort 16858, node 1 2025-06-24T21:18:08.691722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:08.691748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:08.691765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:08.691870Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19545 TClient is connected to server localhost:19545 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:09.885189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:09.910887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:12.007537Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628278881517586:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:12.007659Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:12.008126Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628278881517598:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:12.013011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:12.023829Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628278881517600:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:12.087021Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628278881517651:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:12.397893Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628257406680590:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:12.397993Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:12.591965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:12.798795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:12.856722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:12.937196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.011967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.274019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.313634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.367261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.434258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.507091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.569839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.623111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:13.713396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:14.603139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 56739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.177190Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.177687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.183686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.184283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.189262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.189850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.194360Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.194915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.202247Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.202955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.211021Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.211635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.233115Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.233718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.244963Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.245461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.249759Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.250249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.254631Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.260495Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.261000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.263724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.272172Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.272960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.276212Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.276698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.286042Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.287321Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.287866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.291514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.297077Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.297712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.302194Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.302780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.308573Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.309123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.312167Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.312840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038520;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:55.314972Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.318439Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038520;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:55.475940Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwmcs6nnxz0qm7qmv6j8w", SessionId: ydb://session/3?node_id=1&id=NWEwMWNhNTItNjhkNTM5NjAtZmUyZDcxYzMtZDIwYWJhNTM=, Slow query, duration: 38.488915s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:55.780908Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:55.781286Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628308946295207:2896];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:18:55.781604Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:55.782862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::Left-StreamLookup [GOOD] >> KqpLimits::WaitCAsStateOnAbort [GOOD] >> KqpLimits::WaitCAsTimeout >> KqpJoin::JoinLeftPureInner ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::Left-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 21685, MsgBus: 4693 2025-06-24T21:18:44.051852Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628414495613082:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:44.052260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b04/r3tmp/tmpq3LqCV/pdisk_1.dat 2025-06-24T21:18:44.699271Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628414495612901:2079] 1750799923988704 != 1750799923988707 2025-06-24T21:18:44.747919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:44.748039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:44.757913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:44.797267Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21685, node 1 2025-06-24T21:18:44.987354Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:45.039324Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:45.043334Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:45.043357Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:45.043495Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4693 TClient is connected to server localhost:4693 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:46.495314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:46.538549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:46.556427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:46.843074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:47.119073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:47.214263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:49.043250Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628414495613082:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:49.043322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:49.783930Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628440265418319:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.784031Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:50.310667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.354116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.442852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.528213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.567926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.626725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.667892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.762333Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628444560386278:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:50.762410Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:50.762764Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628444560386283:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:50.766256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:50.778365Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628444560386285:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:50.863404Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628444560386338:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:52.150810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... on 2025-06-24T21:18:55.915312Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11535 TClient is connected to server localhost:11535 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:56.541294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:56.551443Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:56.578534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:56.702568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:56.934887Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:57.052222Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:00.355335Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628484801260116:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:00.355451Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:00.500205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:00.551976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:00.612161Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:00.672240Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:00.730922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:00.830483Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:00.904434Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:01.035542Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628489096228081:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:01.035654Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:01.039440Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628489096228086:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:01.047224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:01.075349Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:19:01.075610Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628489096228088:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:01.170083Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628489096228139:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:02.681229Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.725940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.769591Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.815446Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.856652Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.910578Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::SortingsWithLookupJoin4-RemoveLimitOperator [GOOD] >> KqpJoinOrder::TestJoinOrderHintsComplex-ColumnStore >> KqpJoinOrder::ShuffleEliminationReuseShuffleTwoJoins [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin4-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 2328, MsgBus: 24308 2025-06-24T21:18:10.279248Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628272168160260:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:10.279538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b46/r3tmp/tmpxSImly/pdisk_1.dat 2025-06-24T21:18:10.990491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:10.990607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:11.000489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:11.059017Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:11.075362Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628272168160075:2079] 1750799890151549 != 1750799890151552 TServer::EnableGrpc on GrpcPort 2328, node 1 2025-06-24T21:18:11.150583Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:11.259710Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:11.259731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:11.259742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:11.259837Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24308 TClient is connected to server localhost:24308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:12.185759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:12.227094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:14.779429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628289348029902:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:14.779552Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:14.779888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628289348029914:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:14.783928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:14.797951Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628289348029916:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:14.867646Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628289348029967:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:15.234548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628272168160260:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:15.234607Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:15.302872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:15.438218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:15.502334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:15.545251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:15.579775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:15.850210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:15.917412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:15.982467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.025890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.082838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.126094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.170959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:16.214080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:17.423751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subopera ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.918871Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.919653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.924178Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.927028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.927675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.931836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.935940Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.936682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.940394Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.941156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.946492Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.947051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.951032Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.955815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.960944Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.961530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.965492Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.966046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.970630Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.977173Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.978549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.979012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.988445Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.989189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.993380Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:58.993920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:58.998863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:59.003444Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:59.003881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:59.004923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:59.009459Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:59.010036Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:59.010238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:59.011697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:59.017403Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:59.018138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:59.021082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:59.022114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:59.024973Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:59.025714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:18:59.032454Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:59.033194Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:59.227516Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwqqreyavv2jpn5fgky57", SessionId: ydb://session/3?node_id=1&id=NmZjZjNlMzAtZDk2ZjI3ZmYtNWZmZWUwNmItNWNmY2U0OGY=, Slow query, duration: 38.818510s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:59.660701Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:59.661224Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:59.661842Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::IdxLookupLeftPredicate [GOOD] >> KqpJoin::HashJoinWithAsTable >> KqpFlipJoin::RightOnly_3 [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin2+RemoveLimitOperator [GOOD] >> KqpFlipJoin::Inner_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::RightOnly_3 [GOOD] Test command err: Trying to start YDB, gRPC: 14517, MsgBus: 18701 2025-06-24T21:18:51.913021Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628447765243427:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:51.913320Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ace/r3tmp/tmpIH0cnE/pdisk_1.dat 2025-06-24T21:18:52.710448Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628447765243289:2079] 1750799931834431 != 1750799931834434 2025-06-24T21:18:52.721257Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:52.742176Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:52.742263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:52.744559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14517, node 1 2025-06-24T21:18:52.899488Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:53.019726Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:53.019754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:53.019761Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:53.019877Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18701 TClient is connected to server localhost:18701 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:53.874072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:53.903357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:53.929649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:54.254903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:54.599460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:54.735446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:56.875248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628447765243427:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:56.875313Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:57.197004Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628473535048703:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:57.197105Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:57.679868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:57.723785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:57.781687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:57.823326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:57.872134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:57.925881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:58.017382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:58.145710Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628477830016667:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:58.145797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:58.151946Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628477830016672:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:58.159110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:58.179853Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628477830016674:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:58.247255Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628477830016729:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:59.638487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... ecting -> Connected TServer::EnableGrpc on GrpcPort 12303, node 2 2025-06-24T21:19:02.643794Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:02.643817Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:02.643825Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:02.643943Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27292 2025-06-24T21:19:03.159265Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27292 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:03.394502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:03.411449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:03.421811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:03.539985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:03.764740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:03.883631Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:06.883302Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628510211916004:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.883442Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.964603Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.007722Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.064113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.111813Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628493032045247:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:07.111879Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:07.139860Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.210178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.302327Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.401454Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.496093Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628514506883973:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:07.496206Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:07.496560Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628514506883978:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:07.500964Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:07.522640Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628514506883980:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:07.588647Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628514506884031:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:08.989441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:09.039711Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:09.089848Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:09.156795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoin::IndexLoookupJoinStructJoin-StreamLookupJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::Inner_2 [GOOD] Test command err: Trying to start YDB, gRPC: 18278, MsgBus: 13636 2025-06-24T21:18:53.755386Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628456114487520:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:53.755723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003acd/r3tmp/tmpwQ9XGO/pdisk_1.dat 2025-06-24T21:18:54.340279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:54.340385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:54.348273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:54.371107Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:54.383340Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628456114487336:2079] 1750799933664485 != 1750799933664488 TServer::EnableGrpc on GrpcPort 18278, node 1 2025-06-24T21:18:54.640535Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:54.640555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:54.640578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:54.640692Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:54.759304Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13636 TClient is connected to server localhost:13636 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:55.974421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:55.999354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:18:56.019215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:56.236749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:18:56.509400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:56.638707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:58.745471Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628477589325468:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:58.745589Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:58.751313Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628456114487520:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:58.790384Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:59.174282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:59.214032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:59.255764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:59.337092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:59.406831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:59.533660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:59.622911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:59.757391Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628481884293430:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:59.757509Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:59.757869Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628481884293435:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:59.762640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:59.781244Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628481884293437:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:59.877843Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628481884293490:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:01.457176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 1:19:03.990376Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:03.992364Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:03.993610Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628497193186461:2079] 1750799943599090 != 1750799943599093 TServer::EnableGrpc on GrpcPort 11481, node 2 2025-06-24T21:19:04.299028Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:04.299049Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:04.299057Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:04.299187Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62788 2025-06-24T21:19:04.657150Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62788 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:05.029448Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:05.042142Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:19:05.062053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:05.149407Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:05.357365Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:05.455666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:08.196378Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628518668024585:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:08.196465Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:08.317444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.431873Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.525922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.612028Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.673847Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.759792Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.842077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.944913Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628518668025254:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:08.945017Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:08.945116Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628518668025259:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:08.949046Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:08.965126Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628518668025261:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:09.034041Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628522962992611:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:10.465106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:10.527515Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:10.614234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:10.680038Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> OlapEstimationRowsCorrectness::TPCH10 [GOOD] >> KqpExplain::ComplexJoin [GOOD] >> KqpExplain::CompoundKeyRange >> KqpJoin::AllowJoinsForComplexPredicates+StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin2+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 3613, MsgBus: 5623 2025-06-24T21:18:13.267949Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628283849010301:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:13.268224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b33/r3tmp/tmpmR4l9Z/pdisk_1.dat 2025-06-24T21:18:14.067325Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628283849010117:2079] 1750799893147510 != 1750799893147513 2025-06-24T21:18:14.139835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:14.139926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:14.140062Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:14.150197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3613, node 1 2025-06-24T21:18:14.227970Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:14.418904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:14.418946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:14.418957Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:14.419080Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5623 TClient is connected to server localhost:5623 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:15.318482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:15.340502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:18.148491Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628305323847242:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:18.148602Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:18.151289Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628305323847253:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:18.156343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:18.185351Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628305323847256:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:18.231253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628283849010301:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:18.231338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:18.248873Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628305323847307:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:18.967712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.170640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.215048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.249248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.325888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.540995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.578866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.631688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.678155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.713446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.746764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.785186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:19.821761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:20.552769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperatio ... :45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.902341Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.903092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.907083Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038564;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.911836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.917766Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.918350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.922156Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.922709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038530;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.932653Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038530;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.933230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.935745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.936270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.940720Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.941513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.945348Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.946081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.946762Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.947720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038586;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.951856Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.952769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.953700Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038586;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.954344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.958286Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.958956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.959342Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.959926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.964913Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.965035Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.965572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.965616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.970957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.971081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.971834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.971866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.978076Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.981338Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.982104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.989641Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:04.989743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:04.996926Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:05.111576Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwt4z7dmgkdvedjcy06x8", SessionId: ydb://session/3?node_id=1&id=NDg4MGI0MDYtMWNjMjkwYTUtMTJjODk2MmUtNGUwOTAyZg==, Slow query, duration: 42.231524s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:05.386629Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:05.387205Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:05.387993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628335388624463:2827];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:19:05.388442Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
:8:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504
:8:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationReuseShuffleTwoJoins [GOOD] Test command err: Trying to start YDB, gRPC: 9130, MsgBus: 30707 2025-06-24T21:17:18.665943Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628046959886033:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:18.675785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b9d/r3tmp/tmpkI1NaV/pdisk_1.dat 2025-06-24T21:17:19.114993Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628046959885937:2079] 1750799838642000 != 1750799838642003 2025-06-24T21:17:19.143139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:19.143693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:19.143947Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:19.149638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9130, node 1 2025-06-24T21:17:19.267519Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:19.267560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:19.267576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:19.267685Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30707 2025-06-24T21:17:19.672359Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:20.082339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:20.104133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:22.351293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628064139755767:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:22.351419Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:22.351502Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628064139755779:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:22.356145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:22.370900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:17:22.371160Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628064139755781:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:22.459388Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628064139755832:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:22.955013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:23.063776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:23.064096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:23.064321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:23.064430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:23.064521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:23.064621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:23.064732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:23.064862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:23.064987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:23.065121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:23.065234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628068434723243:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:23.146975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:17:23.147052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:17:23.147162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:17:23.147189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:17:23.149256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:17:23.149307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:17:23.149400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:17:23.149443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:17:23.149486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:17:23.149576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:17:23.149776 ... 0626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.083827Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.084410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039223;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.085793Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.086351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.089782Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039223;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.090324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.091203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.091806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.095986Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.096571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.098762Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.099433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.103089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.103691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.104842Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.105386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.109287Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.109928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.111053Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.112584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.114906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.115726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.121036Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.121671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.121746Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.122357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.131596Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.132108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.136020Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.136762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.144199Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.145119Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.145601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.150573Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.152086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.155919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.161530Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.169363Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.169850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:56.174663Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:56.543436Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwg8bfhe1kccmy2afhc25", SessionId: ydb://session/3?node_id=1&id=MmRiMjlhMWItZjc1MzJhNDItYzQyYzcxNjktZWNlYWIwZmQ=, Slow query, duration: 43.794976s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:56.884131Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:56.885277Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:56.885789Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628424917067406:11646];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-24T21:18:56.886296Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCH1 [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH14 >> KqpJoin::JoinLeftPureInner [GOOD] >> KqpJoin::JoinLeftPureFull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::AllowJoinsForComplexPredicates+StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 62096, MsgBus: 23524 2025-06-24T21:18:50.538638Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628443669335087:2191];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:50.538838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ad0/r3tmp/tmpIJdVRa/pdisk_1.dat 2025-06-24T21:18:51.438760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:51.438912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:51.449928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:51.463685Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:51.471807Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:51.475383Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628443669334934:2079] 1750799930462834 != 1750799930462837 TServer::EnableGrpc on GrpcPort 62096, node 1 2025-06-24T21:18:51.853968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:51.854001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:51.854010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:51.854115Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23524 TClient is connected to server localhost:23524 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:52.988347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:53.022589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:53.319122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:53.612224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:18:53.730590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:55.534970Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628443669335087:2191];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:55.535059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:56.057567Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628469439140346:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:56.057659Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:56.396541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:56.449044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:56.541228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:56.612417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:56.660250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:56.703847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:56.776893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:56.884138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628469439141012:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:56.884249Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:56.884707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628469439141017:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:56.889770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:56.908802Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628469439141019:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:18:57.001185Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628469439141070:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:58.551886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:58.654871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... ed at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:58.883717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:58.973384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 26220, MsgBus: 25549 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ad0/r3tmp/tmpAh13wb/pdisk_1.dat 2025-06-24T21:19:02.074830Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:02.075089Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:02.075292Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:19:02.076367Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:02.085449Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628489169732224:2079] 1750799941646019 != 1750799941646022 2025-06-24T21:19:02.099942Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26220, node 2 2025-06-24T21:19:02.316554Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:02.316578Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:02.316586Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:02.316700Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:02.711280Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25549 TClient is connected to server localhost:25549 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:03.278096Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:03.295784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:03.309604Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:03.429719Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:03.605426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:03.689653Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:06.168361Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628510644570325:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.168443Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.237921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.334785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.425527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.490697Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.557774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.685434Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.769763Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.871132Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628510644570988:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.871251Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.871638Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628510644570993:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.875908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:06.896133Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628510644570995:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:06.961425Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628510644571046:3413] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpIndexLookupJoin::InnerJoinLeftFilter+StreamLookup >> KqpIndexLookupJoin::JoinWithComplexCondition+StreamLookupJoin >> KqpFlipJoin::Right_1 >> KqpJoinOrder::TPCDS34+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH10 [GOOD] Test command err: Trying to start YDB, gRPC: 31458, MsgBus: 31261 2025-06-24T21:17:11.743867Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628016691418014:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:11.753243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bcc/r3tmp/tmpdQxMuD/pdisk_1.dat 2025-06-24T21:17:12.345790Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:12.349235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:12.349338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:12.352340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31458, node 1 2025-06-24T21:17:12.577218Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:12.577243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:12.577257Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:12.577366Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:12.737022Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31261 TClient is connected to server localhost:31261 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:13.418580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:13.456534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:15.981415Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628033871287635:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.981553Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.991322Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628033871287647:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:15.999793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:16.014898Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628033871287649:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:16.113969Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628038166254996:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:16.456415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:16.705154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:16.705393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:16.705653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:16.705772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:16.705896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:16.706001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:16.706111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:16.706238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:16.706349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:16.706486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:16.706597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628038166255242:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:16.715234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628038166255244:2312];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:16.715748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628038166255244:2312];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:16.723267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628038166255244:2312];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:16.723485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628038166255244:2312];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:16.723595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628038166255244:2312];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:16.723883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628038166255244:2312];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:16.723995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628038166255244:2312];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:16.724109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628038166255244:2312];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:16.724206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628038166255244:2312];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:16.724458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628038166255244:2312];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline= ... 0705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.009320Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.013182Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.013734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.015855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.025769Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.026326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.028141Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.028633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.038542Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.039113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039251;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.040608Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.041095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.047092Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.047930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.053435Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039251;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.059773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.062928Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.063476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.070714Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.073336Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.073858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.075468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.084898Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.085521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.095944Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.096516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.103238Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.105132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.111258Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.111830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.116825Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.117328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.121290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.121808Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.127066Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.132055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.136104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.136765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:53.142637Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.147065Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:53.435767Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhww9bf0r0k1jfbvy9jsv8t", SessionId: ydb://session/3?node_id=1&id=ZTI3OGExMTEtZjYwYjBiY2YtN2ExYTYyZjEtNzg3OThhY2E=, Slow query, duration: 47.755367s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:53.900551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:53.901152Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:53.901734Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628398943566314:11591];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-24T21:18:53.902276Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCH12 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH1 [GOOD] Test command err: Trying to start YDB, gRPC: 13872, MsgBus: 11605 2025-06-24T21:17:12.856920Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628023135197421:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:12.856956Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bb6/r3tmp/tmpJTAymm/pdisk_1.dat 2025-06-24T21:17:13.474665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:13.474790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:13.476055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:13.486740Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:13.488116Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628023135197336:2079] 1750799832853374 != 1750799832853377 TServer::EnableGrpc on GrpcPort 13872, node 1 2025-06-24T21:17:13.605306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:13.605333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:13.605341Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:13.605483Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11605 2025-06-24T21:17:13.890169Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11605 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:14.456694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:14.484957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:16.778450Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628040315067168:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:16.778599Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:16.778884Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628040315067180:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:16.784025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:16.797479Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628040315067182:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:16.890843Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628040315067233:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:17.322460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:17.598632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:17.598920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628044610034773:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:17.598928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:17.598949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628044610034773:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:17.599575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628044610034773:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:17.599575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:17.599708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:17.599782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628044610034773:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:17.599834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:17.599887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628044610034773:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:17.599993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:17.600003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628044610034773:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:17.600119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:17.600148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628044610034773:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:17.600315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:17.600428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:17.600564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:17.600601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628044610034773:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:17.600674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628044610034800:2319];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:17.600697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628044610034773:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... .377900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.382553Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.383352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.388086Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.388744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039253;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.393959Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039253;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.394650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.395467Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.395927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.400593Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.401097Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.401278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.401678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.410841Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.413481Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.414061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.415694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.423662Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.429446Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.429983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.438391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.444481Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.445047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.447583Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.448304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.451628Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.452351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.454160Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.454794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.457896Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.458577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.464695Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.465401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.466940Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.467781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:51.471337Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.475574Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:18:51.700028Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwbtk3r6mymev3ehap30e", SessionId: ydb://session/3?node_id=1&id=ZTY4OTk1NDUtOTNhOGYyNDctYmVjYjEzMjItZTVmZWMyZjQ=, Slow query, duration: 43.488099s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:52.279245Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:52.279848Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:52.279908Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519628272243337743:7820];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:18:52.280369Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:18:53.393960Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyhwxq4c01zxxf0wz3sdc4k7, SessionId: CompileActor 2025-06-24 21:18:53.393 WARN ydb-core-kqp-ut-join(pid=881426, tid=0x00007F78CDCB6640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-24T21:18:56.228484Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyhwxt4f14hmb9z7z8bk3kvv, SessionId: CompileActor 2025-06-24 21:18:56.227 WARN ydb-core-kqp-ut-join(pid=881426, tid=0x00007F78CE4C5640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed >> KqpJoinOrder::CanonizedJoinOrderTPCH2 >> KqpJoin::HashJoinWithAsTable [GOOD] >> KqpJoinOrder::FiveWayJoinWithPreds-ColumnStore [GOOD] >> KqpIndexLookupJoin::LeftJoinSkipNullFilter+StreamLookup >> KqpFlipJoin::Inner_3 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::HashJoinWithAsTable [GOOD] Test command err: Trying to start YDB, gRPC: 17650, MsgBus: 7166 2025-06-24T21:19:01.162745Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628492319740789:2187];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:01.162969Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ac0/r3tmp/tmpr6SGtQ/pdisk_1.dat 2025-06-24T21:19:01.877217Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:01.877933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:01.878007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:01.913055Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628492319740629:2079] 1750799941080166 != 1750799941080169 2025-06-24T21:19:01.914985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17650, node 1 2025-06-24T21:19:02.202886Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:02.203370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:02.203379Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:02.203384Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:02.203491Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7166 TClient is connected to server localhost:7166 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:03.526142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:03.567305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:19:03.593378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:03.849714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:04.117127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:04.237425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:06.155079Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628492319740789:2187];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:06.155139Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:06.280196Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628513794578731:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.280320Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.799865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.848067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.904727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.954266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.033418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.113234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.162493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.266133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628518089546694:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:07.266229Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:07.266473Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628518089546699:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:07.270012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:07.293131Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628518089546701:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:07.359839Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628518089546753:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:08.701526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... runner/.ya/build/build_root/2btm/003ac0/r3tmp/tmp0wouUO/pdisk_1.dat 2025-06-24T21:19:11.846070Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519628533578338623:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:12.018742Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:19:12.207907Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:12.211331Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628533578338501:2079] 1750799951694588 != 1750799951694591 2025-06-24T21:19:12.226564Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:12.226699Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:12.229528Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20636, node 2 2025-06-24T21:19:12.435245Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:12.435263Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:12.435269Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:12.435411Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6707 2025-06-24T21:19:12.848510Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:13.205793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:13.238719Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:13.359847Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:13.617723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:13.791006Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:16.686211Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628555053176615:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:16.686309Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:16.783248Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628533578338623:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:16.783310Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:16.837502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.923988Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.985372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:17.037065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:17.077319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:17.170782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:17.246662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:17.388177Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628559348144580:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:17.388294Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:17.391386Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628559348144585:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:17.396018Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:17.412080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-24T21:19:17.412494Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628559348144587:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:19:17.494490Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628559348144638:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:18.751219Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS34+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 24073, MsgBus: 3096 2025-06-24T21:17:03.698259Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627983871904357:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:03.698727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003bee/r3tmp/tmpeDWbbo/pdisk_1.dat 2025-06-24T21:17:04.172727Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627983871904154:2079] 1750799823668881 != 1750799823668884 2025-06-24T21:17:04.184655Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24073, node 1 2025-06-24T21:17:04.192124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:04.192216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:04.197018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:04.281561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:04.281585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:04.281600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:04.281719Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3096 2025-06-24T21:17:04.686247Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3096 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:05.060507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:05.077516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:07.433159Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628001051773982:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:07.433301Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:07.433574Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628001051773994:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:07.438239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:07.453087Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628001051773996:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:17:07.518962Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628001051774047:2335] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:07.912023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:08.168089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:08.168385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:08.168741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:08.168919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:08.169064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:08.169194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:08.169349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:08.169482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:08.169574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:08.169677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:08.169776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628005346741589:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:08.170184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628005346741601:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:08.170242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628005346741601:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:08.170515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628005346741601:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:08.170644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628005346741601:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:08.170796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628005346741601:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:08.170917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628005346741601:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:08.171040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628005346741601:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:08.171170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628005346741601:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:08.171261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628005346741601:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... ;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.471161Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.471710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.479080Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.485086Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.485654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.488981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.497592Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.500073Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.500571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.510927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.517069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.520339Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.520923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.524016Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.529569Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.533791Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.534368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.541419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.549628Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.550331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.552525Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.556296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.566248Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.570044Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.570612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.571452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.576350Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.577045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.577179Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.584620Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.682799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039294;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:43.688312Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039294;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:43.800559Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhww05h45f02638xehkqd7m", SessionId: ydb://session/3?node_id=1&id=NTUzZTBhMjktNmY1NGMxNzQtMWI0ZWE1OGMtM2FmNWZiZTI=, Slow query, duration: 47.526749s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:44.486975Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:18:44.487519Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:18:44.487745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519628288814629607:9473];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:18:44.488262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:19:08.766489Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwxt4ccz38m9phn80h889y", SessionId: ydb://session/3?node_id=1&id=NTUzZTBhMjktNmY1NGMxNzQtMWI0ZWE1OGMtM2FmNWZiZTI=, Slow query, duration: 13.137779s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query34.tpl and seed 1971067816\nselect c_last_name\n ,c_first_name\n ,c_salutation\n ,c_preferred_cust_flag\n ,ss_ticket_number\n ,cnt from\n (select store_sales.ss_ticket_number ss_ticket_number\n ,store_sales.ss_customer_sk ss_customer_sk\n ,count(*) cnt\n from store_sales as store_sales \n cross join date_dim as date_dim\n cross join store as store\n cross join household_demographics as household_demographics\n where store_sales.ss_sold_date_sk = date_dim.d_date_sk\n and store_sales.ss_store_sk = store.s_store_sk\n and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk\n and (date_dim.d_dom between 1 and 3 or date_dim.d_dom between 25 and 28)\n and (household_demographics.hd_buy_potential = '>10000' or\n household_demographics.hd_buy_potential = 'Unknown')\n and household_demographics.hd_vehicle_count > 0\n and (case when household_demographics.hd_vehicle_count > 0\n\t then household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count\n\t else null\n\t end) > 1.2\n and date_dim.d_year in (2000,2000+1,2000+2)\n and store.s_county in ('Salem County','Terrell County','Arthur County','Oglethorpe County',\n 'Lunenburg County','Perry County','Halifax County','Sumner County')\n group by store_sales.ss_ticket_number,store_sales.ss_customer_sk) dn \n cross join customer as customer\n where ss_customer_sk = c_customer_sk\n and cnt between 15 and 20\n order by c_last_name,c_first_name,c_salutation,c_preferred_cust_flag desc, ss_ticket_number;\n\n-- end query 1 in stream 0 using template query34.tpl", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH12 [GOOD] Test command err: Trying to start YDB, gRPC: 23619, MsgBus: 25906 2025-06-24T21:17:24.356680Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628075866690545:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:24.357023Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b82/r3tmp/tmpvfIEvs/pdisk_1.dat 2025-06-24T21:17:25.027272Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628075866690361:2079] 1750799844315847 != 1750799844315850 2025-06-24T21:17:25.040106Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:25.042032Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:25.042142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:25.046833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23619, node 1 2025-06-24T21:17:25.195652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:25.195677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:25.195686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:25.195782Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:25.339296Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25906 TClient is connected to server localhost:25906 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:26.239571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:28.619721Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628093046560186:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:28.619827Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:28.623261Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628093046560191:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:28.632048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:28.646445Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628093046560200:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:28.748186Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628093046560251:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:29.137161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:29.351756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628075866690545:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:29.351847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:29.465320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:29.465591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:29.465889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:29.466020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:29.466134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:29.466249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:29.466360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:29.466474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:29.466592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:29.466722Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:29.466832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628097341527764:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:29.504578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628097341527781:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:29.504651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628097341527781:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:29.504860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628097341527781:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:29.505002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628097341527781:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:29.505129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628097341527781:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:29.505262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628097341527781:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:29.505453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628097341527781:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:29.505578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628097341527781:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:29.505697Z node 1 :TX_COLU ... 96966Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.197653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.199814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.205618Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.206268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.208626Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.209091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.212095Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.212813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.220042Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.220698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.222777Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.223744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.226528Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.227508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.229412Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.229941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.233203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.233807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.235861Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.236432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.239131Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.239804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.242668Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.243192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.244905Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.245497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.248659Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.249225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.250362Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.250934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.254987Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.255659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.256193Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.256786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.261177Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.261725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.262163Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.262916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:01.267355Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.267976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:01.580312Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwpamd3xw26s87rzx08sh", SessionId: ydb://session/3?node_id=1&id=MTA2ZGFkOS03YTMyYjNiOS1mNDgxZGIwNy1hYTViNDUwNg==, Slow query, duration: 42.615567s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:01.939898Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:01.940412Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519628380809415963:9393];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:19:01.941501Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:01.942835Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderLookupBug [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithPreds-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 30811, MsgBus: 12648 2025-06-24T21:18:19.401674Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628310234041278:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:19.401876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b32/r3tmp/tmprP1vDd/pdisk_1.dat 2025-06-24T21:18:20.007077Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628310234041140:2079] 1750799899360830 != 1750799899360833 2025-06-24T21:18:20.026394Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30811, node 1 2025-06-24T21:18:20.110403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:20.110488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:20.119413Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:20.307875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:20.307910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:20.307917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:20.308021Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:20.406352Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12648 TClient is connected to server localhost:12648 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:21.490352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:21.513765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:24.317331Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628331708878262:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:24.317497Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:24.317762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628331708878274:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:24.324755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:24.371613Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628331708878276:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:24.387252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628310234041278:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:24.387361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:24.480481Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628331708878329:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:25.031343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.230876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.269929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.303603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.338310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.578890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.630779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.686754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.749308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.820221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.866695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.905662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:25.948816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:26.809929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.304315Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.304315Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.304854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.305116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.309528Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.309527Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.310090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.310090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.314957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.314957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.315447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.315448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.320192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.320192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.320761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.320762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.325603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.325603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.326180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.326507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.330911Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.330953Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.331487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.331520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.336164Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.336164Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.336747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.336849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.341803Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.341803Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.342401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.342401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.347569Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.347570Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.348275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.348678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.353688Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.353707Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.361673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.367554Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.436284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:10.442784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:10.478364Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwx0pf4b6b89b1jk843dxw", SessionId: ydb://session/3?node_id=1&id=NThkNjA1NjItNWJkYTU3OTAtODVjODcxMmEtZTgwMzE3YjI=, Slow query, duration: 40.894594s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:10.876153Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:10.877155Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:10.879533Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpLimits::WaitCAsTimeout [GOOD] >> KqpParams::BadParameterType >> KqpJoin::IndexLoookupJoinStructJoin-StreamLookupJoin [GOOD] >> KqpJoin::JoinAggregate >> KqpJoinOrder::CanonizedJoinOrderTPCH7 [GOOD] >> KqpJoin::JoinLeftPureFull [GOOD] >> KqpJoinOrder::SortingsWithLookupJoinByPrefix+RemoveLimitOperator >> KqpExplain::CompoundKeyRange [GOOD] >> KqpJoinOrder::TPCHRandomJoinViewJustWorks+ColumnStore [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH17 >> KqpJoin::RightTableValuePredicate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderLookupBug [GOOD] Test command err: Trying to start YDB, gRPC: 1935, MsgBus: 28447 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b30/r3tmp/tmpvxYizJ/pdisk_1.dat 2025-06-24T21:18:23.338331Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628326322668276:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:23.338758Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:18:23.982516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:23.982613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:23.995519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:24.023258Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628326322668111:2079] 1750799903188752 != 1750799903188755 2025-06-24T21:18:24.030882Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1935, node 1 2025-06-24T21:18:24.287201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:24.287231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:24.287244Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:24.287369Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:24.336979Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28447 TClient is connected to server localhost:28447 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:25.396909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:25.417426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:28.151428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628347797505250:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:28.155225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628347797505239:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:28.155364Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:28.158026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:28.181271Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628347797505253:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:28.283099Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628347797505304:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:28.287261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628326322668276:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:28.287338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:28.727057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:28.903260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:28.956344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:28.997086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.029440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.238591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.274180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.312231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.392079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.443870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.531062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.569037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:29.641525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:30.611026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subopera ... 0Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038444;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.136284Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.136825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.140459Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038444;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.141166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.143837Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.144698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.147824Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.148471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038452;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.150011Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.150633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038430;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.154319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038452;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.154984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.156412Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038430;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.157003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038484;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.161405Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.162099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.162255Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038484;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.164323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038456;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.168564Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.169288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.170431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038456;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.171074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038466;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.175431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.176115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038442;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.176988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038466;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.177666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.182242Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.182703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038490;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.186980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038442;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.187657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038486;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.188667Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038490;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.189356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.193555Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038486;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.194307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.195040Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.196545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:13.201338Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.201918Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:13.374354Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwx49q5g08fkp3stye062c", SessionId: ydb://session/3?node_id=1&id=YjE5Nzc3ZTItMjk0NThjMWEtZTcxNmI1MWEtYjJiMDRiMg==, Slow query, duration: 40.102219s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:13.772816Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:13.773404Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:13.785810Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628386452217766:2968];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-24T21:19:13.786544Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
:3:9: Warning: Symbol $limit is not used, code: 4527
:2:9: Warning: Symbol $browserGroup is not used, code: 4527
:1:9: Warning: Symbol $quotaName is not used, code: 4527
:4:9: Warning: Symbol $offset is not used, code: 4527
:3:9: Warning: Symbol $limit is not used, code: 4527
:2:9: Warning: Symbol $browserGroup is not used, code: 4527
:1:9: Warning: Symbol $quotaName is not used, code: 4527
:4:9: Warning: Symbol $offset is not used, code: 4527 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinLeftPureFull [GOOD] Test command err: Trying to start YDB, gRPC: 7130, MsgBus: 24422 2025-06-24T21:19:07.493952Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628517183634016:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:07.494021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ab8/r3tmp/tmppU7H00/pdisk_1.dat 2025-06-24T21:19:08.298569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:08.298655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:08.304251Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:08.309499Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628517183633874:2079] 1750799947448293 != 1750799947448296 2025-06-24T21:19:08.345656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7130, node 1 2025-06-24T21:19:08.524344Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:08.715900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:08.715947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:08.715961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:08.716083Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24422 TClient is connected to server localhost:24422 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:09.966144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:10.000707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:10.022420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:10.225398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:10.469240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:10.602255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:12.490742Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628517183634016:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:12.490826Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:12.655134Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628538658472000:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:12.655256Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:12.980888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:13.038833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:13.100902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:13.141614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:13.184197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:13.282872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:13.355171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:13.465241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628542953439962:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:13.465315Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:13.465387Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628542953439967:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:13.469551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:13.495555Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628542953439969:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:13.584401Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628542953440024:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 31360, MsgBus: 15317 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ab8/r3tmp/tmpZFh7rL/pdisk_1.dat 2025-06-24T21:19:16.931368Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:19:16.974524Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:16.974612Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:16.976192Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:16.999352Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628554225138980:2079] 1750799956655918 != 1750799956655921 2025-06-24T21:19:17.001815Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31360, node 2 2025-06-24T21:19:17.231777Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:17.231796Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:17.231805Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:17.231911Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15317 2025-06-24T21:19:17.735282Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15317 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:18.045418Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:18.073561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:18.180037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:18.395124Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:18.482789Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:20.896308Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628571405009782:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:20.896405Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:20.960126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:20.999889Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:21.051786Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:21.124899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:21.178839Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:21.261017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:21.355271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:21.470204Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628575699977738:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:21.470286Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:21.470448Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628575699977743:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:21.474345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:21.503339Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628575699977745:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:19:21.594113Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628575699977796:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpJoin::RightSemiJoin_FullScan >> KqpJoinOrder::CanonizedJoinOrderTPCH22 [GOOD] >> KqpJoinOrder::SortingsByPrefixWithConstant+RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::InnerJoinLeftFilter+StreamLookup [GOOD] >> KqpIndexLookupJoin::InnerJoinLeftFilter-StreamLookup >> KqpFlipJoin::Right_1 [GOOD] >> KqpFlipJoin::Right_2 >> KqpIndexLookupJoin::JoinWithComplexCondition+StreamLookupJoin [GOOD] >> KqpIndexLookupJoin::JoinWithComplexCondition-StreamLookupJoin >> KqpFlipJoin::RightSemi_2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH7 [GOOD] Test command err: Trying to start YDB, gRPC: 11219, MsgBus: 4538 2025-06-24T21:17:20.435371Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628055812373425:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:20.435476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b89/r3tmp/tmpCvRZyK/pdisk_1.dat 2025-06-24T21:17:21.082942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:21.083045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:21.090921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:21.130015Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:21.131401Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628055812373408:2079] 1750799840434157 != 1750799840434160 TServer::EnableGrpc on GrpcPort 11219, node 1 2025-06-24T21:17:21.379657Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:21.379681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:21.379687Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:21.379777Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:21.551335Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4538 TClient is connected to server localhost:4538 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:22.222517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:22.240039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:24.933871Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628072992243236:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:24.933980Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:24.934961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628072992243248:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:24.940478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:24.952730Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628072992243250:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:17:25.051463Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628077287210597:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:25.439438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628055812373425:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:25.439728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:25.481892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:25.820629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:25.820854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:25.821125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:25.821240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:25.821329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:25.821420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:25.821512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:25.821631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:25.821737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:25.821836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:25.821936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628077287210841:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:25.824185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628077287210849:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:25.824237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628077287210849:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:25.824418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628077287210849:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:25.824521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628077287210849:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:25.824608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628077287210849:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:25.824693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628077287210849:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:25.824775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628077287210849:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:25.824893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628077287210849:2315];tablet_ ... 3274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:58.983931Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:58.984286Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:58.984559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:58.984736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039218;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:58.991414Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039218;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:58.997222Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:58.998041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039204;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:58.999477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.005748Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.006498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.012121Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039204;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.012495Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.012725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039314;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.013141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.024123Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.024743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039282;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.027306Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039314;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.027910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039348;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.038593Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039348;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.038885Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039282;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.043974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.047372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.050590Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.053799Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.054411Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.059281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.060760Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.061438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.070187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.070785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.084295Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.085140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039260;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.085168Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.085644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.093404Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.097606Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039260;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.098206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.102715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:18:59.104366Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.114266Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:18:59.379100Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwjpccwm8bfpab92dnc15", SessionId: ydb://session/3?node_id=1&id=ZGE2ZGNlYjktYmUyN2FhYTktOTk4ODhkY2UtZjhjOTY4NGQ=, Slow query, duration: 44.134132s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:18:59.823259Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:18:59.823268Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:18:59.823912Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628425179619220:11387];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-24T21:18:59.824437Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoinOrder::FiveWayJoinWithComplexPreds-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCHRandomJoinViewJustWorks+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 29801, MsgBus: 12948 2025-06-24T21:17:31.168435Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628102146147745:2175];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:31.223692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b7f/r3tmp/tmplUo1J1/pdisk_1.dat 2025-06-24T21:17:31.928844Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:31.929400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:31.929501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:31.940586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29801, node 1 2025-06-24T21:17:32.163065Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:32.181860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:32.181890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:32.181903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:32.182061Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12948 TClient is connected to server localhost:12948 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:33.127070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:33.149177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:35.461434Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628119326017415:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:35.461555Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:35.461967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628119326017427:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:35.466609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:35.488303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:17:35.491366Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628119326017429:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:35.560322Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628119326017480:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:35.924978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:36.163367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628102146147745:2175];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:36.163429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:36.241804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:36.241801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628123620984956:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:36.242019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628123620984956:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:36.242023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:36.242313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:36.242425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:36.242526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:36.242642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:36.242779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:36.242889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:36.243013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:36.243118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628123620984956:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:36.243125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:36.243292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628123620985053:2317];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:36.243306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628123620984956:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:36.243423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628123620984956:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:36.243516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628123620984956:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:36.243625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628123620984956:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:36.243711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628123620984956:2310];tablet_id=72075186 ... 63586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039225;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.164487Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.165030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039205;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.168546Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039225;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.169110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.171353Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039205;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.171923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.174061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.174608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039185;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.176856Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.177327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039207;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.181825Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039185;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.182158Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039207;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.182442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039231;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.182712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.186124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039231;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.186694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.190430Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.190944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.192611Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.193222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.194504Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.195022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039253;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.198679Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.199793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.200153Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039253;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.200721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.205465Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.205986Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.206081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.206515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.211279Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.211503Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.212065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.212608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.217479Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.217495Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.218126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.223377Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.314004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039254;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:11.320201Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039254;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:11.434084Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwyyzasry376kmy1z3mcc", SessionId: ydb://session/3?node_id=1&id=NjNhODMwZTEtZTRhMzk1NjctODY2ZmI0NzMtZjIyMTQwNDU=, Slow query, duration: 43.626287s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:11.860047Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:11.860679Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:11.862994Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519628419973774661:9462];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-24T21:19:11.863571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPrefixWithConstant+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 5737, MsgBus: 8780 2025-06-24T21:18:27.866177Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628342592615681:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:27.869733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b27/r3tmp/tmpu43jma/pdisk_1.dat 2025-06-24T21:18:28.533770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:28.533870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:28.579456Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:28.580879Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628342592615497:2079] 1750799907821001 != 1750799907821004 2025-06-24T21:18:28.594234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5737, node 1 2025-06-24T21:18:28.819026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:28.819047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:28.819055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:28.819181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:28.851376Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8780 TClient is connected to server localhost:8780 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:30.236974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:30.325561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:32.865025Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628342592615681:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:32.865115Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:33.272316Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628368362419921:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:33.272483Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:33.279201Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628368362419933:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:33.294180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:33.315768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:18:33.316027Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628368362419935:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:33.399778Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628368362419986:2341] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:34.078438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.242917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.303444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.352859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.414175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.687476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.736583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.773956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.813217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.849028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.930965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:34.996455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:35.051408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_c ... 86182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.291019Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.294600Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.295103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.297966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.306891Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.307620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.311450Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.311950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.317172Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.317950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.320651Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.321097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.322863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.323720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.328605Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.329194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.330118Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.330670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.334201Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.334789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.340924Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.341519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.344374Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.344856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.350121Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.351735Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.351737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.352203Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.357235Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.360079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.362343Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.362874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.365124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.365657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.368866Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.369533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.371097Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.372000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:18.374998Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.376572Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:18.540320Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwxae416ce9f31nh8c3hx7", SessionId: ydb://session/3?node_id=1&id=YTQ3ZmVlMTctYTg5ZjFmNjItYTQ0MTNjNDAtYjYxYzhhOTE=, Slow query, duration: 38.980601s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:18.950722Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:18.951303Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:18.954202Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628407017132082:2897];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-24T21:19:18.954658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH22 [GOOD] Test command err: Trying to start YDB, gRPC: 27204, MsgBus: 29777 2025-06-24T21:17:22.154799Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628064093692092:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:22.167515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b86/r3tmp/tmpDvamIn/pdisk_1.dat 2025-06-24T21:17:22.817643Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:22.818752Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628064093691911:2079] 1750799842135868 != 1750799842135871 2025-06-24T21:17:22.832033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:22.832119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:22.833180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27204, node 1 2025-06-24T21:17:23.136140Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:23.155791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:23.155811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:23.155818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:23.155911Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29777 TClient is connected to server localhost:29777 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:24.394524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:26.959255Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628081273561746:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.959417Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.959803Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628081273561758:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:26.965282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:26.982816Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628081273561760:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:27.064860Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628085568529107:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:27.151253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628064093692092:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:27.151346Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:27.537259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:27.827018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:27.827558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:27.827868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:27.828000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:27.828122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:27.828260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:27.828382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:27.828485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:27.828591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:27.828706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:27.828814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628085568529381:2323];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:27.869403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628085568529371:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:27.869475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628085568529371:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:27.869715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628085568529371:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:27.869839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628085568529371:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:27.869935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628085568529371:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:27.870073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628085568529371:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:27.870210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628085568529371:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:27.870336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628085568529371:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:27.870435Z node 1 :TX_COLU ... 1592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.458840Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039348;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.459474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.464461Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.465191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.468016Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.468492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.470052Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.471239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039298;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.473981Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.474574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.482338Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039298;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.482844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.487955Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.488619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039296;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.494193Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.495237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039296;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.495885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.500540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.505241Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.505907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039316;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.509906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.510551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.519925Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039316;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.520420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.523925Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.524509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.529434Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.530204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.533560Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.534256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.543360Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.543879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.547254Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.547882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.552969Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.555071Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.555681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.559575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:06.565128Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.569059Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:06.947396Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwwqxz4zz1jypvqxsspg90", SessionId: ydb://session/3?node_id=1&id=Zjk4N2VlZGQtNTgyZTVlYzktNDNlMzAyMDItZjkyNDMzOTE=, Slow query, duration: 46.339384s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:07.312336Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:07.312960Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:07.313586Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628467820677422:11659];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-24T21:19:07.314107Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::LeftJoinSkipNullFilter+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinSkipNullFilter-StreamLookup >> OlapEstimationRowsCorrectness::TPCH3 >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv-ColumnStore [GOOD] >> KqpJoinOrder::TestJoinOrderHintsSimple+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH16 >> KqpFlipJoin::Inner_3 [GOOD] >> KqpFlipJoin::LeftSemi_1 >> KqpJoin::ExclusionJoin >> KqpParams::BadParameterType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 12694, MsgBus: 4208 2025-06-24T21:18:34.180084Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628375437853942:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:34.180242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b21/r3tmp/tmpXdGE17/pdisk_1.dat 2025-06-24T21:18:35.062351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:35.069586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:35.078815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:35.087181Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:35.093200Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628375437853730:2079] 1750799914151680 != 1750799914151683 2025-06-24T21:18:35.180699Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 12694, node 1 2025-06-24T21:18:35.508345Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:35.508372Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:35.508380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:35.508507Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4208 TClient is connected to server localhost:4208 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:36.518357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:38.743010Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628392617723558:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:38.743178Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:38.743554Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628392617723570:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:38.748621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:38.773753Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628392617723572:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:38.833319Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628392617723623:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:39.177609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628375437853942:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:39.177708Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:39.251355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.395165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.469482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.521419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.580194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.744515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.780831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.824134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.870639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.916900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:39.951501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:40.012666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:40.070845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:40.858135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/cor ... 42231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.646514Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.647491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.653085Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.653771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.660510Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.661243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.663800Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.664360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.667267Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.668040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.672752Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.673481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.677421Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.678172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.684867Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.685474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.693609Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.694199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.696221Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.696779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.704378Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.704957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.710863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.711600Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.718951Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.722070Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.722650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.723695Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.731209Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.731909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.732094Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.732882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.738022Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.738773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.744584Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.745152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.749858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.750433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:22.760425Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.763473Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:22.921139Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwxe977gd7a3ezrp16pf7h", SessionId: ydb://session/3?node_id=1&id=NmU0Yjg4ZDUtM2NjYWRhOWEtYzZlYTliNzQtYzBhODhmMTc=, Slow query, duration: 39.424644s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:23.308427Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:23.308985Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519628495696958101:4951];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-24T21:19:23.316405Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:23.318127Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::CompoundKeyRange [GOOD] Test command err: 2025-06-24T21:16:33.202957Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627854713070167:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:33.204414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003109/r3tmp/tmpNiKdcb/pdisk_1.dat 2025-06-24T21:16:33.657410Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:33.694865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:33.694970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 11021, node 1 2025-06-24T21:16:33.704081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:33.792071Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:33.792098Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:33.792124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:33.792279Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:16:33.853230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) TClient is connected to server localhost:16606 2025-06-24T21:16:34.128928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:16:34.154357Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627859300890503:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:34.154481Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Database/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:16:34.206899Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:34.299706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:34.299792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:34.327513Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:16:34.331707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:34.352862Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:16:34.402222Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:34.402334Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:34.410339Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:34.555104Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:16:34.555367Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:16:34.555458Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:16:34.555522Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:16:34.555603Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:16:34.555692Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:16:34.555787Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:16:34.555856Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:16:34.555925Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:16:34.651866Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:16:34.651971Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:16:34.701990Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:16:34.702060Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:16:34.702359Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:16:34.702423Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:16:34.702459Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:16:34.702495Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:16:34.702521Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:16:34.702545Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:16:34.703838Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:34.704526Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7519627859300891011:2290] 2025-06-24T21:16:34.704553Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:16:34.704626Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:16:34.716729Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:16:34.717264Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:16:34.717286Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:16:34.717376Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:16:34.746339Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519627859300891083:2347] txid# 281474976720657, issues: { message: "Schemeshard not available" severity: 1 } 2025-06-24T21:16:34.753952Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 20 Issues { message: "Schemeshard not available" severity: 1 } SchemeShardStatus: 13 SchemeShardReason: "Schemeshard not available" } 2025-06-24T21:16:34.755592Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:16:34.755668Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:7519627859300891169:2289], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:16:34.759240Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7519627859300891175:2384] 2025-06-24T21:16:34.759786Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7519627859300891175:2384], schemeshard id = 72075186224037897 2025-06-24T21:16:34.830587Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:16:34.835825Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720658:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.843745Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720658 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:16:34.843798Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720658 2025-06-24T21:16:35.030145Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720658. Doublechecking... 2025-06-24T21:16:35.162051Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:35.179165Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:16:36.989313Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627867597973174:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:36.989404Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:37.336967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:16:37.732087Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037912;self_id=[2:7519627872185793433:2322];tablet_id=72075186224037912;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=norma ... cting -> Connected TServer::EnableGrpc on GrpcPort 9719, node 7 2025-06-24T21:19:16.191822Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:16.191852Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:16.191862Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:16.192026Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:16.527490Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31109 TClient is connected to server localhost:31109 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:17.121950Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:17.132362Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:19:17.158213Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:19:17.291445Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:17.542256Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:17.654465Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:20.479469Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519628550067917109:2202];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:20.479562Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:21.707382Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519628575837722355:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:21.707536Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:21.875975Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:21.996933Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:22.081092Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:22.144798Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:22.228963Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:22.302447Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:22.389692Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:22.512032Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519628580132690334:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:22.512155Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:22.512629Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519628580132690339:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:22.524315Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:22.561118Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519628580132690341:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:19:22.654650Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519628580132690395:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Logs"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"10"},{"Scan":"Parallel","ReadRange":["App (new_app_1)","Ts (49)","Host (null, xyz)"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/Logs","E-Rows":"1","Table":"Logs","ReadColumns":["App","Host","Message","Ts"],"E-Cost":"0"}],"Node Type":"Limit-TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"Name":"Limit","Limit":"10"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Logs","reads":[{"lookup_by":["App (new_app_1)","Ts (49)"],"columns":["App","Host","Message","Ts"],"scan_by":["Host (null, xyz)"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Operators":[{"Scan":"Parallel","ReadRange":["App (new_app_1)","Ts (49)","Host (null, xyz)"],"E-Size":"0","Name":"TableRangeScan","Path":"\/Root\/Logs","E-Rows":"1","Table":"Logs","ReadColumns":["App","Host","Message","Ts"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Operators":[{"Name":"Limit","Limit":"10"}],"Node Type":"Limit"}],"Operators":[{"Name":"Limit","Limit":"10"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} >> GroupWriteTest::WriteHardRateDispatcher [GOOD] >> KqpStats::SysViewCancelled [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin1+RemoveLimitOperator [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::WriteHardRateDispatcher [GOOD] Test command err: RandomSeed# 10388329716059704794 2025-06-24T21:14:38.424790Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 5 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-24T21:14:38.461717Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-24T21:14:38.461794Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 1 going to send TEvBlock {TabletId# 5 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-24T21:14:38.464883Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-24T21:14:38.497984Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 2 going to send TEvCollectGarbage {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:14:38.505012Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-24T21:19:32.064264Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:19:32.064387Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:19:32.064440Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-24T21:19:32.064480Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-24T21:19:32.469991Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Status# OK} 2025-06-24T21:19:32.470110Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Status# OK} >> KqpJoin::RightTableValuePredicate [GOOD] |96.2%| [TA] $(B)/ydb/core/load_test/ut/test-results/unittest/{meta.json ... results_accumulator.log} |96.2%| [TA] {RESULT} $(B)/ydb/core/load_test/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::BadParameterType [GOOD] Test command err: Trying to start YDB, gRPC: 29856, MsgBus: 17922 2025-06-24T21:16:17.285829Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627785438276562:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:17.285892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031f3/r3tmp/tmpIz7nKi/pdisk_1.dat 2025-06-24T21:16:17.603464Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29856, node 1 2025-06-24T21:16:17.682678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:17.682818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:17.684400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:17.691796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:17.691821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:17.691828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:17.691984Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17922 TClient is connected to server localhost:17922 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:18.299304Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:16:18.308195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:18.324276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:16:18.333629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:18.491880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:18.636333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:18.706109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:20.742324Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627798323180044:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:20.742452Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:21.062982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.093151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.125148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.159906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.202739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.237328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.272069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:21.328168Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627802618147994:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:21.328243Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627802618147999:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:21.328250Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:21.331893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:21.366909Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627802618148001:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:21.451248Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627802618148052:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:22.286243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627785438276562:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:22.286324Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:22.746922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:32.571782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console c ... ];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031f3/r3tmp/tmpf2cAvr/pdisk_1.dat 2025-06-24T21:19:24.290221Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:19:24.371047Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:24.375638Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519628590440610640:2079] 1750799964176026 != 1750799964176029 2025-06-24T21:19:24.397982Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:24.398078Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:24.400641Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3648, node 4 2025-06-24T21:19:24.535846Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:24.535875Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:24.535886Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:24.536070Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63985 2025-06-24T21:19:25.218808Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63985 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:25.509053Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:25.517170Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:25.530315Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:25.666624Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:25.912469Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:26.065591Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:29.199550Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519628590440610778:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:29.199664Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:30.486101Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628616210416066:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:30.486197Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:30.594604Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:30.653957Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:30.795512Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:30.844337Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:30.926818Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.016312Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.081102Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.199335Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628620505384033:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:31.199455Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:31.207335Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628620505384038:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:31.221458Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:31.246982Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519628620505384041:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:31.308438Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519628620505384102:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:33.624260Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Mjg4NzhmYzUtMTllYTViZDQtN2NiYTQ1ODgtMTIzYzhiMDY=, ActorId: [4:7519628629095318974:2482], ActorState: ExecuteState, TraceId: 01jyhwyz07fh6g5kn491r4mhnd, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1374: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $group type mismatch, expected: { Kind: Data Data { Scheme: 2 } }, actual: Type (Data), schemeType: Int32, schemeTypeId: 1 >> KqpJoin::JoinAggregate [GOOD] >> KqpIndexLookupJoin::InnerJoinLeftFilter-StreamLookup [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH11 >> KqpJoinOrder::SortingsByPK+RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightTableValuePredicate [GOOD] Test command err: Trying to start YDB, gRPC: 15359, MsgBus: 30014 2025-06-24T21:19:26.473501Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628597408021034:2195];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:26.526400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a75/r3tmp/tmpXRNGLr/pdisk_1.dat 2025-06-24T21:19:27.197229Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:27.197355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:27.215498Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:27.216510Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628597408020868:2079] 1750799966369152 != 1750799966369155 2025-06-24T21:19:27.285711Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15359, node 1 2025-06-24T21:19:27.369064Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:27.547706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:27.547730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:27.547737Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:27.547840Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30014 TClient is connected to server localhost:30014 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:28.814877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:28.851325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:19:28.869200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:29.167398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:29.504974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:29.655614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:31.451271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628597408021034:2195];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:31.451372Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:32.012994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628623177826297:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:32.013116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:32.389253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.452007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.534642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.601556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.644374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.696063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.750808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.857923Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628623177826963:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:32.858030Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:32.858656Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628623177826968:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:32.867753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:32.883747Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628623177826970:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:19:32.972933Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628623177827021:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:34.448382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpFlipJoin::Right_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin1+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 16859, MsgBus: 13004 2025-06-24T21:18:40.984972Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628400377960437:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:40.985396Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b05/r3tmp/tmpYbNH8a/pdisk_1.dat 2025-06-24T21:18:41.606152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:41.606251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:41.617248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:41.656023Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:41.659538Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628400377960248:2079] 1750799920858031 != 1750799920858034 TServer::EnableGrpc on GrpcPort 16859, node 1 2025-06-24T21:18:41.863585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:41.863618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:41.863626Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:41.863728Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:41.967285Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13004 TClient is connected to server localhost:13004 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:42.896771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:45.376356Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628421852797370:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:45.376503Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:45.376875Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628421852797382:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:45.387945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:45.414677Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628421852797384:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:45.512859Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628421852797435:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:45.923527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:45.972840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628400377960437:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:45.972939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:46.065877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:46.115369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:46.201044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:46.301861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:46.666200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:46.725761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:46.774283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:46.877864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:46.917923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:46.989063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:47.051759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:47.110788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:48.261140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... pp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.257062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.259657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.262949Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.265952Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.266535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.267527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.278448Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.278961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.279379Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.279841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.284731Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.285326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.286750Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.287796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.290364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.290966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.293220Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.293815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.296567Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.297322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.299446Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.300087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.302619Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.303278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.305038Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.305627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.308735Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.309322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.310755Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.311621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.315731Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.316392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.316437Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.317004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.322723Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.322847Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.323461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.324205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:29.329132Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.330042Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:29.438410Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwxnsac0qwfb40wnrnbg8w", SessionId: ydb://session/3?node_id=1&id=YjY3ZTkzNmMtOTk5ZjFlMmUtYzM0ZjNiMTctZDFmMDUyOTQ=, Slow query, duration: 38.259574s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:29.787130Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:29.787674Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:29.788026Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628460507509333:2877];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:19:29.788553Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
:7:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504
:7:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinAggregate [GOOD] Test command err: Trying to start YDB, gRPC: 1531, MsgBus: 13783 2025-06-24T21:19:14.149470Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628546939074045:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:14.149512Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ab2/r3tmp/tmpaTn5sU/pdisk_1.dat 2025-06-24T21:19:14.851164Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628546939073909:2079] 1750799954124888 != 1750799954124891 2025-06-24T21:19:14.866999Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:14.868364Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:14.868458Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:14.875222Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1531, node 1 2025-06-24T21:19:15.091823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:15.091844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:15.091866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:15.091987Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:15.186097Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13783 TClient is connected to server localhost:13783 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:16.111882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:16.159711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:16.213535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:16.500125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:16.719997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:16.835252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:19.085178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628568413912034:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:19.085276Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:19.151260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628546939074045:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:19.151332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:19.374732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:19.417603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:19.501272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:19.562263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:19.603996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:19.659261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:19.747891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:19.844836Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628568413912697:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:19.844915Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:19.845097Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628568413912702:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:19.851395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:19.865402Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628568413912704:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:19.931976Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628568413912756:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:21.392017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:24.820979Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11518, node 2 2025-06-24T21:19:25.051812Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:25.051838Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:25.051847Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:25.051981Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23386 2025-06-24T21:19:25.513340Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23386 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:25.912674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:25.923189Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:25.945564Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:26.059880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:26.257240Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:26.359575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:29.252356Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628611844584952:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:29.252466Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:29.319334Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:29.381392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:29.452974Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:29.469802Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628590369746873:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:29.469865Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:29.501530Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:29.548471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:29.608947Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:29.674214Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:29.801529Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628611844585608:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:29.801623Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:29.801865Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628611844585613:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:29.806549Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:29.828792Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:19:29.829914Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628611844585615:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:29.888406Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628611844585667:3412] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:31.462953Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.542630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.634092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoin::RightSemiJoin_FullScan [GOOD] >> KqpJoin::RightSemiJoin_ComplexSecondaryIndexPrefix >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::InnerJoinLeftFilter-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 23979, MsgBus: 63741 2025-06-24T21:19:17.759986Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628560465028111:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:17.760535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003aac/r3tmp/tmpgNuQXT/pdisk_1.dat 2025-06-24T21:19:18.518087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:18.518210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:18.520668Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:18.521907Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628560465027930:2079] 1750799957749852 != 1750799957749855 2025-06-24T21:19:18.531413Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23979, node 1 2025-06-24T21:19:18.743691Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:18.835246Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:18.835274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:18.835298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:18.835418Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63741 TClient is connected to server localhost:63741 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:19.767596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:19:19.822202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:20.074472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:20.333666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:20.489493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:22.557790Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628581939866059:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:22.557927Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:22.763317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628560465028111:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:22.763427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:22.906870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:22.959594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.024241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.110623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.154403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.253727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.319831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.423278Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628586234834013:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:23.423370Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:23.427302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628586234834018:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:23.432190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:23.459317Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628586234834020:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:23.527793Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628586234834071:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:25.055783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:25.118845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... RN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:28.601481Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16041 2025-06-24T21:19:29.039913Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16041 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:29.497806Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:29.511911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:29.522981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:29.622209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:29.857731Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:29.967099Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:32.767423Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628623280329616:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:32.767536Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:32.897435Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.943774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.003806Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.063243Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.122875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.178378Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.258484Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.376541Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628627575297576:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.376630Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.376980Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628627575297581:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.381153Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:33.407319Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628627575297583:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:33.466284Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628627575297635:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:34.997017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.086168Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.164340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.257572Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.304152Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.346209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::CanonizedJoinOrderTPCH21 >> KqpFlipJoin::RightSemi_2 [GOOD] >> KqpFlipJoin::RightSemi_3 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::Right_2 [GOOD] Test command err: Trying to start YDB, gRPC: 23671, MsgBus: 29857 2025-06-24T21:19:18.401887Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628564055211837:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:18.401927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a98/r3tmp/tmpDjBYLQ/pdisk_1.dat 2025-06-24T21:19:18.976472Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628564055211819:2079] 1750799958400787 != 1750799958400790 2025-06-24T21:19:18.998626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:18.998707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:19.001218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:19.028711Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23671, node 1 2025-06-24T21:19:19.045730Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T21:19:19.055001Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-24T21:19:19.155750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:19.155771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:19.155783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:19.155893Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:19.443270Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29857 TClient is connected to server localhost:29857 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:20.382448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:20.427125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:20.641863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:20.865959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:20.964313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:22.892523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628581235082642:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:22.892665Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:23.297199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.370561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.403905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628564055211837:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:23.404002Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:23.452900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.569696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.618707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.702585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.785709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.883195Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628585530050614:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:23.883284Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:23.883575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628585530050619:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:23.892150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:23.910336Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628585530050621:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:23.999839Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628585530050672:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:25.835787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB fir ... nnecting -> Connected TServer::EnableGrpc on GrpcPort 11352, node 2 2025-06-24T21:19:29.139751Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:29.139779Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:29.139792Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:29.139903Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:29.524454Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7416 TClient is connected to server localhost:7416 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:30.104767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:30.113995Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:30.121928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:30.222743Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:30.447894Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:30.548769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:33.527258Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628606275431589:2154];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:33.527332Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:33.764278Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628627750269558:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.764385Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.910641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.970186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.039300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.079250Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.127286Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.210609Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.294908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.404147Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628632045237521:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:34.404261Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:34.404836Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628632045237526:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:34.407957Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:34.468403Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628632045237528:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:34.550081Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628632045237581:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:35.920419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.983629Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.030617Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.079723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::CanonizedJoinOrderTPCDS64_small [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::SysViewCancelled [GOOD] Test command err: Trying to start YDB, gRPC: 24723, MsgBus: 3154 2025-06-24T21:16:43.844017Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627896476569872:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:43.844098Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003100/r3tmp/tmpeN0nRC/pdisk_1.dat 2025-06-24T21:16:44.207218Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627896476569849:2079] 1750799803842828 != 1750799803842831 2025-06-24T21:16:44.234069Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24723, node 1 2025-06-24T21:16:44.275563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:44.275656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:44.285770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:44.352822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:44.352842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:44.352847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:44.352995Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3154 TClient is connected to server localhost:3154 2025-06-24T21:16:44.872848Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:45.055917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:45.073263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:16:45.087318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:45.240059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:45.417370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:45.502504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:47.272992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627913656440667:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:47.273124Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:47.640337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:47.672206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:47.701309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:47.744341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:47.774784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:47.849515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:47.895480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:47.968845Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627913656441330:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:47.968937Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:47.969141Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627913656441335:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:47.973787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:48.016542Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627913656441337:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:48.074569Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627917951408684:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:48.846355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627896476569872:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:48.846422Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":6,"Plans":[{"E-Size":"0","PlanNodeId":5,"LookupKeyColumns":["Key"],"Node Type":"TableLookup","Path":"\/Root\/TwoShard","Columns":["Key","Value1","Value2"],"E-Rows":"0","Table":"TwoShard","Plans":[{"PlanNodeId":4,"Plans":[{"PlanNod ... nExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:35.381579Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:35.392012Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:18:35.402562Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:35.685201Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:36.064786Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:36.210374Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:38.615130Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519628372199179844:2086];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:38.615280Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:40.497642Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628402263952478:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:40.497832Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:40.585563Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:40.658173Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:40.750963Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:40.833470Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:40.933689Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:41.050790Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:41.156193Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:41.245368Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628406558920452:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:41.245472Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:41.245739Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519628406558920457:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:41.251056Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:41.281267Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519628406558920459:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:18:41.371990Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519628406558920510:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:43.317514Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:18:48.968185Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:18:48.968215Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:32.420594Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799972399, txId: 281474976715673] shutting down 2025-06-24T21:19:33.543552Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519628629897221735:2796] TxId: 281474976715675. Ctx: { TraceId: 01jyhwyz20204v3vkvntvxpgcw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NDY5YjVkOWItNzU3MmM0N2ItOGJlMWRjYmItMzhjNTdjMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 100ms } {
: Error: Cancelling after 102ms during execution } ] 2025-06-24T21:19:33.544418Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519628629897221756:2820], TxId: 281474976715675, task: 1. Ctx: { TraceId : 01jyhwyz20204v3vkvntvxpgcw. SessionId : ydb://session/3?node_id=4&id=NDY5YjVkOWItNzU3MmM0N2ItOGJlMWRjYmItMzhjNTdjMjA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519628629897221735:2796], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-24T21:19:33.544939Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519628629897221764:2828], TxId: 281474976715675, task: 9. Ctx: { SessionId : ydb://session/3?node_id=4&id=NDY5YjVkOWItNzU3MmM0N2ItOGJlMWRjYmItMzhjNTdjMjA=. TraceId : 01jyhwyz20204v3vkvntvxpgcw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519628629897221735:2796], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-24T21:19:33.545510Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=NDY5YjVkOWItNzU3MmM0N2ItOGJlMWRjYmItMzhjNTdjMjA=, ActorId: [4:7519628621307287074:2796], ActorState: ExecuteState, TraceId: 01jyhwyz20204v3vkvntvxpgcw, Create QueryResponse for error on request, msg:
: Error: Request canceled after 100ms
: Error: Cancelling after 102ms during execution 2025-06-24T21:19:33.561122Z node 4 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037922 Cancelled read: {[4:7519628629897221767:2820], 0} 2025-06-24T21:19:34.089313Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750799973971, txId: 281474976715677] shutting down >> KqpJoinOrder::CanonizedJoinOrderTPCC >> KqpIndexLookupJoin::LeftJoinSkipNullFilter-StreamLookup [GOOD] >> KqpIndexLookupJoin::JoinWithComplexCondition-StreamLookupJoin [GOOD] >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc-RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::LeftJoinOnlyRightColumn+StreamLookup >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness1 [GOOD] >> OlapEstimationRowsCorrectness::TPCH21 >> KqpJoinOrder::CanonizedJoinOrderTPCH15 >> KqpJoinOrder::CanonizedJoinOrderTPCH19 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinSkipNullFilter-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 25365, MsgBus: 25560 2025-06-24T21:19:21.897092Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628575104236228:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:21.920805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a7f/r3tmp/tmp9FxCNB/pdisk_1.dat 2025-06-24T21:19:22.416816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:22.416922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:22.418997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:22.478742Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:22.482809Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628575104236108:2079] 1750799961821045 != 1750799961821048 TServer::EnableGrpc on GrpcPort 25365, node 1 2025-06-24T21:19:22.595778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:22.595829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:22.595837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:22.595978Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:22.923829Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25560 TClient is connected to server localhost:25560 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:23.666529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:23.704264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:23.898366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:24.080312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:24.164719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:26.457481Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628596579074223:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:26.457596Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:26.870973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628575104236228:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:26.871066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:26.936156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:26.982297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:27.022239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:27.074162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:27.134821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:27.204898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:27.275540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:27.393102Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628600874042186:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:27.393213Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:27.393490Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628600874042191:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:27.399283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:27.425402Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628600874042193:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:27.532131Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628600874042247:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:29.009738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:29.080679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... RN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:32.573452Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11023 2025-06-24T21:19:33.205975Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11023 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:33.322848Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:33.336137Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:19:33.357950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:33.437706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:33.654651Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:33.734098Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:36.455310Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628640488791637:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:36.455427Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:36.512513Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.568659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.630762Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.670776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.745101Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.836337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.935754Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:37.025589Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628644783759594:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:37.025690Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:37.025966Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628644783759599:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:37.030425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:37.059764Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628644783759601:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:19:37.122393Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628644783759652:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:38.793187Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:38.853317Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:38.895976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:38.938305Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.007206Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.064185Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::JoinWithComplexCondition-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 10949, MsgBus: 13354 2025-06-24T21:19:18.435594Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628561773123976:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:18.467273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003aa3/r3tmp/tmpFPIMZt/pdisk_1.dat 2025-06-24T21:19:18.967414Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628561773123920:2079] 1750799958351630 != 1750799958351633 2025-06-24T21:19:19.004072Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10949, node 1 2025-06-24T21:19:19.016291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:19.016411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:19.019058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:19.114192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:19.114208Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:19.114216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:19.114305Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13354 2025-06-24T21:19:19.458280Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13354 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:19.857708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:19.898502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:20.117543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:20.315525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:20.429603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:22.851287Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628578952994739:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:22.851408Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:23.440662Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628561773123976:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:23.440735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:23.561302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.616557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.670229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.738433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.789086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.898577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:23.960489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:24.092445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628587542929994:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:24.092519Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:24.092904Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628587542929999:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:24.097177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:24.153735Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628587542930001:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:24.232416Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628587542930052:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:25.797191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:25.850694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 28135, MsgBus: 26704 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003aa3/r3tmp/tmp77fzY0/pdisk_1.dat 2025-06-24T21:19:29.699327Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:19:29.863390Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:29.867249Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628611050580355:2079] 1750799969524249 != 1750799969524252 2025-06-24T21:19:29.902918Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:29.903033Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:29.912279Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28135, node 2 2025-06-24T21:19:30.047871Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:30.047895Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:30.047904Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:30.048042Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26704 2025-06-24T21:19:30.588138Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26704 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:19:31.004537Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:31.020224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.122160Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:31.315428Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:31.426151Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:33.780222Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628628230451180:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.780332Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.865338Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.935725Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.994443Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.040476Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.081672Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.148027Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.197253Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:34.328172Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628632525419129:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:34.328248Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:34.328266Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628632525419134:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:34.332420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:34.346413Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628632525419136:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:34.446082Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628632525419187:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:35.792524Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.858843Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> OlapEstimationRowsCorrectness::TPCH2 >> KqpFlipJoin::LeftSemi_1 [GOOD] >> KqpJoin::ExclusionJoin [GOOD] >> KqpJoin::CrossJoinCount ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 63375, MsgBus: 17020 2025-06-24T21:18:48.918407Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628433574488159:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:48.950428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003af6/r3tmp/tmptgQM6T/pdisk_1.dat 2025-06-24T21:18:49.766340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:49.766451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:49.774118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:49.805014Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:49.806256Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628433574487973:2079] 1750799928835663 != 1750799928835666 TServer::EnableGrpc on GrpcPort 63375, node 1 2025-06-24T21:18:50.081882Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:50.082375Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:50.082398Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:50.082408Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:50.082510Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17020 TClient is connected to server localhost:17020 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:51.077842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:53.922017Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628433574488159:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:53.922088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:53.933489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628455049325101:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:53.933608Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:53.934011Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628455049325113:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:53.938323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:53.957563Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628455049325115:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:54.051510Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628459344292462:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:54.420521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:54.628578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:54.683205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:54.778968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:54.829551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:55.094552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:55.130331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:55.187817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:55.232886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:55.273062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:55.325485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:55.369322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:55.416490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:56.261594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 76710714; 2025-06-24T21:19:35.484114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.488332Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.489036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.494748Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.499607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.504174Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.504801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.509966Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.510578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.519664Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.520240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.524586Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.525240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.530636Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.531373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.537699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.538519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.541280Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.541801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.545047Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.545727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.549303Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.550232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.552686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.554596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.561820Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.562547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.567766Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.568408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.568870Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.569524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.576062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.576835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.578214Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.578833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.587327Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.587999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.589199Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.589731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:35.594640Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.600771Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:35.762027Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwxxcref6wg8qppqzxwwe0", SessionId: ydb://session/3?node_id=1&id=MzFiMmZhOWYtZDZlYzhjYjktMzRhOWFkY2QtYmZjYzI1NzA=, Slow query, duration: 36.792998s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:36.107449Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:36.107495Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:36.108228Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
:2:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503
: Warning: Execution, code: 1060
:2:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCDS64_small [GOOD] Test command err: Trying to start YDB, gRPC: 27877, MsgBus: 31190 2025-06-24T21:17:38.768868Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628135375802146:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:38.769525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b77/r3tmp/tmpkQMwJz/pdisk_1.dat 2025-06-24T21:17:39.469701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:39.469790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:39.470528Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628135375802027:2079] 1750799858690173 != 1750799858690176 2025-06-24T21:17:39.472425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:39.487411Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27877, node 1 2025-06-24T21:17:39.788168Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:39.803677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:39.803698Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:39.803705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:39.803806Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31190 TClient is connected to server localhost:31190 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:40.826209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:40.860494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:43.367993Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628156850639150:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:43.368101Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:43.368516Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628156850639162:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:43.372561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:43.387274Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628156850639164:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:43.491811Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628156850639215:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:43.732233Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628135375802146:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:43.732333Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:43.946852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:44.233799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:44.234000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:44.234285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:44.234468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:44.234610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:44.234730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:44.234831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:44.234950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:44.235066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:44.236339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:44.236582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628161145606775:2320];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:44.237730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628161145606779:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:44.237809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628161145606779:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:44.237994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628161145606779:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:44.238131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628161145606779:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:44.238245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628161145606779:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:44.238363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628161145606779:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:44.238473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628161145606779:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:44.238571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628161145606779:2322];tabl ... 16061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.619714Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.620283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.626269Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.627050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.630369Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.630908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.640770Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.641333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.645169Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.645720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.655647Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.656232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.660261Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.660802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.666934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.670093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.670682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.675823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.680575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.682005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.685825Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.686561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.696512Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.697154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.700491Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.701268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.711777Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.712474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.715757Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.716410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.725237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.725933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.728519Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.729160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.740307Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.743473Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.822653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.822923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:19.829778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.829906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:19.942438Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwx7mt5jjr567vjfs8k04y", SessionId: ydb://session/3?node_id=1&id=ZjVlMmYyZDAtNzUyNWY1OTAtODg1NzQxNmUtNGE1OWNjYTU=, Slow query, duration: 43.243493s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:20.693366Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:20.694055Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519628457498396823:9540];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:19:20.694563Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:20.695584Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::LeftSemi_1 [GOOD] Test command err: Trying to start YDB, gRPC: 11998, MsgBus: 61910 2025-06-24T21:19:22.200261Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628582067636423:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:22.200837Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a7e/r3tmp/tmpnrVnT7/pdisk_1.dat 2025-06-24T21:19:22.805662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:22.805803Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:22.830360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:22.903237Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628582067636235:2079] 1750799962161959 != 1750799962161962 2025-06-24T21:19:22.922913Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11998, node 1 2025-06-24T21:19:23.131321Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:23.131345Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:23.131352Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:23.131486Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:23.276365Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61910 TClient is connected to server localhost:61910 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:24.395503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:24.442659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:24.664728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:24.872817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:25.017901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:27.203316Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628582067636423:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:27.203388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:27.684916Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628603542474354:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:27.685034Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:28.018795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:28.102867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:28.185713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:28.254351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:28.345496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:28.453234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:28.517654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:28.638300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628607837442322:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:28.638371Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:28.638508Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628607837442327:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:28.642179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:28.658216Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628607837442329:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:28.767762Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628607837442380:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:30.289205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:30.349371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... onnecting -> Connected TServer::EnableGrpc on GrpcPort 4780, node 2 2025-06-24T21:19:33.999755Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:33.999780Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:33.999788Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:33.999905Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3763 TClient is connected to server localhost:3763 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:19:34.644737Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:34.677459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:34.690400Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:34.705253Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:34.825745Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:35.027094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:35.114819Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:38.622629Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628628308445068:2154];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:38.622702Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:38.944433Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628649783283037:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:38.944517Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:39.027181Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.070249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.138642Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.190412Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.240697Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.289248Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.373090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.487437Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628654078250995:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:39.487515Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:39.487747Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628654078251000:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:39.491926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:39.505402Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628654078251002:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:39.599359Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628654078251053:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:41.017037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:41.108374Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:41.154303Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:41.199462Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness1 [GOOD] Test command err: Trying to start YDB, gRPC: 61663, MsgBus: 13373 2025-06-24T21:18:44.183725Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628415813938875:2135];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b02/r3tmp/tmpTzRm94/pdisk_1.dat 2025-06-24T21:18:44.554655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:18:44.863287Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628415813938770:2079] 1750799924097606 != 1750799924097609 2025-06-24T21:18:44.930578Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:44.934424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:44.934529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:44.940704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61663, node 1 2025-06-24T21:18:45.172684Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:45.219856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:45.219890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:45.219897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:45.219999Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13373 TClient is connected to server localhost:13373 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:46.317260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:46.344144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:49.063183Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628437288775891:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.063327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.063420Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628437288775903:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:49.072481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:49.093901Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628437288775905:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:49.167378Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628415813938875:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:49.167697Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:49.175045Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628437288775956:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:49.583761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:49.779114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:49.831908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:49.869805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:49.922806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.205677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.290006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.358081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.403903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.442597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.484042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.557579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:50.607102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:18:51.865324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 56264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.460635Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.460635Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.461082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.461135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.465976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.465976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.466541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.466542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.471697Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.471711Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.472303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.472394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.477089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.477649Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.482246Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.482828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038513;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.483803Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.484238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038475;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.488761Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038513;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.489352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.489747Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038475;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.490273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.495021Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.495037Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.495657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.495656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038455;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.500581Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.501008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.504005Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038455;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.504519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.508704Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.509343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.512740Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.513306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038495;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.513328Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.514642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.518598Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038495;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.520083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:31.525258Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.525315Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:31.652845Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwxsccedvwzk4q2c7v6a88", SessionId: ydb://session/3?node_id=1&id=ZDFlMThlY2EtY2NlMzNiMTktZTVmMDA3NWItYzQwZTRkZTI=, Slow query, duration: 36.791640s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:32.078058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:32.078611Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628471648520164:2765];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-24T21:19:32.080395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:32.082189Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::JoinLeftPureExclusion >> KqpJoinOrder::FiveWayJoinWithPreds+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH13 >> KqpJoinOrder::FiveWayJoinWithConstantFold+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH20 [GOOD] >> KqpJoinOrder::TPCDS94+ColumnStore >> KqpIndexLookupJoin::LeftSemi >> KqpFlipJoin::RightSemi_3 [GOOD] >> KqpJoin::RightSemiJoin_ComplexSecondaryIndexPrefix [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH20 [GOOD] Test command err: Trying to start YDB, gRPC: 5054, MsgBus: 14281 2025-06-24T21:17:45.264792Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628163216687765:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:45.265354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b6d/r3tmp/tmp7FFdQu/pdisk_1.dat 2025-06-24T21:17:45.925199Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628163216687579:2079] 1750799865196982 != 1750799865196985 2025-06-24T21:17:45.939649Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5054, node 1 2025-06-24T21:17:45.997320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:45.998025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:46.023599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:46.107944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:46.107965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:46.107972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:46.108071Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:46.246943Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14281 TClient is connected to server localhost:14281 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:47.129383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:49.934487Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628180396557407:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:49.934651Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:49.935026Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628180396557419:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:49.939946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:49.956520Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628180396557421:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:50.103707Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628184691524768:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:50.255255Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628163216687765:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:50.255375Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:50.534392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:50.900027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:50.900329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:50.900762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:50.900870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:50.900998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:50.901156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:50.901279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:50.901391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:50.901530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:50.901648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:50.901748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628184691525020:2317];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:50.901970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628184691525013:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:50.902046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628184691525013:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:50.902368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628184691525013:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:50.902505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628184691525013:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:50.902631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628184691525013:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:50.902851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628184691525013:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:50.903002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628184691525013:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:50.903165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628184691525013:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:50.903332Z node 1 :TX_COLUMN ... 46543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.456611Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.457184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.460454Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.460965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.466623Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.470988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.471524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.474868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.482340Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.482901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.484451Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.485033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.490879Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.496623Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.497320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.503104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.503834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.509945Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.512532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.516234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.522004Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.525729Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.526283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.527724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.541439Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.541984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.548344Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.548896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.552322Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.552858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.558609Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.564329Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.565431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.568673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.575043Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.575575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.576305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.580445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:24.587081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:24.594733Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:25.122711Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwxcdm324k8kcwwae8y2hf", SessionId: ydb://session/3?node_id=1&id=ODlhMWIxYS05ZTljZTZmYy01MmY1ZjIyYi1lODRmMTFiZQ==, Slow query, duration: 43.533721s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:25.579157Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:25.579812Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:25.581065Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519628481044315001:9475];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:19:25.581575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::LeftJoinOnlyRightColumn+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinOnlyRightColumn-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::RightSemi_3 [GOOD] Test command err: Trying to start YDB, gRPC: 61062, MsgBus: 9937 2025-06-24T21:19:30.046887Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628614148693445:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:30.047095Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a67/r3tmp/tmpvhB1zS/pdisk_1.dat 2025-06-24T21:19:30.821185Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628609853725998:2079] 1750799969987094 != 1750799969987097 2025-06-24T21:19:30.826349Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:30.828564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:30.828670Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:30.845304Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61062, node 1 2025-06-24T21:19:31.165458Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:31.183101Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:31.183127Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:31.183134Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:31.183261Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9937 TClient is connected to server localhost:9937 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:32.186969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:32.227495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:32.247122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:32.474932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:32.776109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:32.907304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:35.047731Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628614148693445:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:35.062789Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:35.130075Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628635623531409:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:35.130205Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:35.557928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.645995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.742315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.791344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.859560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:35.947017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.036148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.151533Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628639918499382:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:36.151644Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:36.151982Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628639918499387:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:36.155357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:36.166822Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628639918499389:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:36.267858Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628639918499442:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:37.650582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... nnecting -> Connected TServer::EnableGrpc on GrpcPort 10627, node 2 2025-06-24T21:19:40.927784Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:40.927811Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:40.927820Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:40.927950Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:41.295440Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9739 TClient is connected to server localhost:9739 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:42.153050Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:42.166751Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:42.286392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:42.507114Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:42.690225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:45.072075Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628680959774688:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:45.072188Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:45.185342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:45.251848Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:45.290265Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628659484936594:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:45.290358Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:45.341933Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:45.412594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:45.471978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:45.550998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:45.594296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:45.762596Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628680959775353:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:45.762683Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:45.763027Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628680959775358:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:45.770563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:45.792462Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-24T21:19:45.795916Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628680959775360:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:19:45.858546Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628680959775411:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:47.601689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.660559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.761103Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.821639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightSemiJoin_ComplexSecondaryIndexPrefix [GOOD] Test command err: Trying to start YDB, gRPC: 27127, MsgBus: 3628 2025-06-24T21:19:27.674706Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628603005441305:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:27.674904Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a6d/r3tmp/tmp5V4LKu/pdisk_1.dat 2025-06-24T21:19:28.478568Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628603005441193:2079] 1750799967602913 != 1750799967602916 2025-06-24T21:19:28.481501Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:28.483562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:28.483656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:28.488564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27127, node 1 2025-06-24T21:19:28.513378Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:19:28.719369Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:28.719880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:28.719889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:28.719899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:28.719994Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3628 TClient is connected to server localhost:3628 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:29.887083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:29.925302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:29.949706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:30.215651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:30.502317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:30.640644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:32.671249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628603005441305:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:32.671310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:32.999993Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628624480279349:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.000102Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.343598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.373972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.408033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.472415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.548080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.609258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.696232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:33.816725Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628628775247311:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.816796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.817043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628628775247316:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:33.821437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:33.839370Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628628775247318:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:33.898251Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628628775247371:3432] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:35.807759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, ... t_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:40.136026Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2848 2025-06-24T21:19:40.711449Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2848 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:41.010562Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:41.024089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:19:41.039986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:41.148414Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:41.471259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:19:41.674410Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.360380Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628675453715148:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:44.360467Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:44.495259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.565669Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.612854Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.642603Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628653978877168:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:44.642655Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:44.674188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.758242Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.840557Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.893341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:45.098620Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628679748683109:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:45.098739Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:45.098981Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628679748683114:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:45.103749Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:45.118196Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628679748683116:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:19:45.181166Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628679748683167:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:46.575179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:46.658756Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:46.706066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:46.750934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:46.797903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::SortingsComplexOrderBy+RemoveLimitOperator [GOOD] >> KqpJoinOrder::TestJoinHint2-ColumnStore [GOOD] >> KqpJoin::CrossJoinCount [GOOD] >> KqpJoin::JoinLeftPureExclusion [GOOD] >> KqpJoin::JoinLeftPureCross >> KqpIndexLookupJoin::SimpleLeftOnlyJoin-StreamLookup >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK-RemoveLimitOperator >> KqpJoinOrder::TPCDS95-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::CrossJoinCount [GOOD] Test command err: Trying to start YDB, gRPC: 26627, MsgBus: 2916 2025-06-24T21:19:34.516444Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628633357629200:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:34.516485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a52/r3tmp/tmpt4xOYJ/pdisk_1.dat 2025-06-24T21:19:35.119564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:35.119687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:35.165570Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:35.167919Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628633357629179:2079] 1750799974498958 != 1750799974498961 2025-06-24T21:19:35.169120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26627, node 1 2025-06-24T21:19:35.411711Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:35.411732Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:35.411740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:35.411844Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:35.555438Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2916 TClient is connected to server localhost:2916 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:36.460879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:36.481389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:36.501283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:36.682350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:36.868879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:36.967607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:39.308853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628654832467303:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:39.308954Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:39.528018Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628633357629200:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:39.528095Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:39.692509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.736859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.784671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.826226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.866154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:39.938081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:40.002459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:40.106864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628659127435260:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:40.106948Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:40.107133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628659127435265:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:40.111542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:40.136289Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628659127435267:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:40.229206Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628659127435319:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:42.213929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:19:44.720898Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:44.720985Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:44.730130Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628675706910970:2079] 1750799984307659 != 1750799984307662 2025-06-24T21:19:44.737683Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:44.739244Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10235, node 2 2025-06-24T21:19:44.943816Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:44.943844Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:44.943851Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:44.943950Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8825 2025-06-24T21:19:45.387265Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8825 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:45.588431Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:45.603782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:45.617057Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:45.720672Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:46.027684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:46.119064Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:49.655347Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628697181749090:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:49.655463Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:49.774832Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:49.825244Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:49.889964Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:49.977952Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:50.025791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:50.089873Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:50.172399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:50.396304Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628701476717049:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:50.396387Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:50.396739Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628701476717054:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:50.401780Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:50.429167Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628701476717056:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:50.490566Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628701476717107:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:52.329331Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:52.432906Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:52.540533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsComplexOrderBy+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 18037, MsgBus: 13116 2025-06-24T21:18:56.538908Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628470247800805:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:56.539345Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003acc/r3tmp/tmpZm05zV/pdisk_1.dat 2025-06-24T21:18:57.165331Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628470247800622:2079] 1750799936492980 != 1750799936492983 2025-06-24T21:18:57.175398Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:57.188386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:57.188491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:57.190773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18037, node 1 2025-06-24T21:18:57.426763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:57.426788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:57.426798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:57.426931Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:57.494597Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13116 TClient is connected to server localhost:13116 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:58.510630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:58.525077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:00.881979Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628487427670449:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:00.882138Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:00.882470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628487427670461:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:00.898040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:00.920698Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628487427670463:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:01.020164Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628491722637810:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:01.524480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628470247800805:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:01.524533Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:01.559596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:01.842313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:01.922740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:01.980933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.063537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.359650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.419668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.477547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.522452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.571197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.606869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.683479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:02.720713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:03.584590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 92255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.495529Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.496155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.497659Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.498280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.502429Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.503134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.503948Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.504499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.509241Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.509848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.509967Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.510671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.516186Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.516282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.516849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.516873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.522892Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.522892Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.523589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.523885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.529386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.529580Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.530131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.530164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.536319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.536319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.536931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.536949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.543053Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.543122Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.543789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.544166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.549614Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.550003Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.550664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.550670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.557026Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.557027Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.596596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:45.603428Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:45.691461Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwy4g85scdf53cf1z7ter2", SessionId: ydb://session/3?node_id=1&id=MjZiNGM2NTItYTIzNjQ3ZjgtY2JmODVhOC03MGZlYTllZg==, Slow query, duration: 39.442385s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:46.201526Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:46.202191Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628526082382372:2858];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-24T21:19:46.202638Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:46.203692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::SortingsWithLookupJoin4+RemoveLimitOperator [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinHint2-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 24365, MsgBus: 20974 2025-06-24T21:19:01.795854Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628489708402013:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:01.796214Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003abe/r3tmp/tmp8XgSMW/pdisk_1.dat 2025-06-24T21:19:02.518261Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:02.518847Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628489708401832:2079] 1750799941735671 != 1750799941735674 TServer::EnableGrpc on GrpcPort 24365, node 1 2025-06-24T21:19:02.603878Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:02.603977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:02.739344Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:02.759690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:02.839042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:02.839061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:02.839079Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:02.839199Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20974 TClient is connected to server localhost:20974 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:04.027813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:04.055684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:06.638360Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628511183238955:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.638474Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.638975Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628511183238967:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:06.643515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:06.658786Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628511183238969:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:06.735429Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628511183239021:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:06.763273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628489708402013:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:06.763373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:07.251138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.402424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.444702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.496988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.565385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.847924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.915011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.955006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:07.996329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.034795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.079631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.168389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:08.210517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:09.032735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.551244Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.551789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.557113Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.557682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.559655Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.560073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.562890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.563482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.568396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.568595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.569095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.569414Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.573952Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.574552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.577907Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.578446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.579126Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.579990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.584742Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.585330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.587579Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.588128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.593857Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.598673Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.599139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.600130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.609780Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.610240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.615467Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.616051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.619410Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.620041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.621490Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.622106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.627515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.628102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.629074Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.630291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.633422Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.643445Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.712490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:46.719996Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:46.759590Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwy9jfadamvt5brb5dgm9r", SessionId: ydb://session/3?node_id=1&id=NjY5NTY0OGEtZGQ2YTljYzMtNjg0Yzc3YTctOGMwMGMyNjk=, Slow query, duration: 35.319345s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:47.065977Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:47.066471Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:47.067496Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::LeftSemi [GOOD] >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable+StreamLookupJoin >> KqpIndexLookupJoin::LeftOnly+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin4+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 24140, MsgBus: 9951 2025-06-24T21:18:59.819905Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628481478757305:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:59.820298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ac3/r3tmp/tmpXyx1g6/pdisk_1.dat 2025-06-24T21:19:00.515772Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628481478757102:2079] 1750799939737809 != 1750799939737812 2025-06-24T21:19:00.557466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:00.557578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:00.559336Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:00.562274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24140, node 1 2025-06-24T21:19:00.811400Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:00.870574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:00.870601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:00.870608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:00.870708Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9951 TClient is connected to server localhost:9951 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:02.174936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:02.231700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:04.777123Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628502953594225:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:04.777261Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:04.777680Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628502953594237:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:04.782190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:04.800908Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628502953594239:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:04.807233Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628481478757305:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:04.807328Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:04.880525Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628502953594290:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:05.252623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:05.390219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:05.438956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:05.543826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:05.593547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:05.787343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:05.857209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:05.948325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.032772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.083618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.136021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.174129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.226763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:06.956080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... :45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.642779Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.643346Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.649046Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.654357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.657529Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.658050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.664061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.668513Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.669078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.670392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.680397Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.682609Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.683351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.686036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.697023Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.697598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.702266Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.702862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.703824Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.704594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.708898Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.710455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.711703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.712471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.716577Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.717892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.717948Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.718611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.724341Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.724341Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.725077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.726493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.731759Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.732525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.738134Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.738789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.741858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.742509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:48.747318Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:48.754127Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:49.022445Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwy7vz96gyyt56zfn5c1an", SessionId: ydb://session/3?node_id=1&id=YTViYzI3NS1mODI1N2U3OS04MDFlOTRhZS01NTE0ZWRhYg==, Slow query, duration: 39.326365s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:49.493229Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:49.493770Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:49.494401Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628537313338439:2763];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:19:49.494858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
:8:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504
:8:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504 >> KqpJoinOrder::TPCDS90-ColumnStore >> KqpJoin::RightTableKeyPredicate >> KqpJoinOrder::CanonizedJoinOrderTPCH3 >> KqpIndexLookupJoin::LeftJoinOnlyRightColumn-StreamLookup [GOOD] >> KqpJoin::JoinLeftPureCross [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinOnlyRightColumn-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 12737, MsgBus: 12008 2025-06-24T21:19:41.997472Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628662176397413:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:41.997873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a30/r3tmp/tmp2jDW4J/pdisk_1.dat 2025-06-24T21:19:42.753395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:42.753498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:42.771974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:42.811771Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:42.812334Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628662176397212:2079] 1750799981939440 != 1750799981939443 TServer::EnableGrpc on GrpcPort 12737, node 1 2025-06-24T21:19:43.084227Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:43.085098Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:43.085106Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:43.085113Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:43.085248Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12008 TClient is connected to server localhost:12008 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:44.158441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:44.195641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:44.481392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:44.721140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:44.843376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:46.971231Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628662176397413:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:46.971297Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:46.993726Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628683651235342:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:46.993818Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.374950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.421633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.458386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.534844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.585513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.641373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.708653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.823289Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628687946203298:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.823405Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.827281Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628687946203303:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.832047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:47.845699Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628687946203305:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:19:47.920816Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628687946203356:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:49.879572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:49.953225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... d to server localhost:25200 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:53.776741Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:53.791743Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:19:53.806082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:53.942335Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:54.320118Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:54.440614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:57.383346Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628709301625054:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:57.383432Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:57.462179Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628730776462966:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:57.462243Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:57.515125Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:57.581546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:57.632182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:57.688574Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:57.759009Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:57.838044Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:57.912781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:58.080687Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628735071430920:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:58.080800Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:58.081197Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628735071430925:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:58.087354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:58.135822Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628735071430927:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:19:58.226733Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628735071430978:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:59.856883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:59.920130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.016611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.070514Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.203681Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.254708Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinLeftPureCross [GOOD] Test command err: Trying to start YDB, gRPC: 62441, MsgBus: 10973 2025-06-24T21:19:45.890934Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628678937090355:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:45.901905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a0d/r3tmp/tmpDHPSEv/pdisk_1.dat 2025-06-24T21:19:46.623514Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628678937090156:2079] 1750799985807127 != 1750799985807130 2025-06-24T21:19:46.630158Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:46.651289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:46.651387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:46.658518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62441, node 1 2025-06-24T21:19:46.843242Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:46.963920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:46.963938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:46.963945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:46.964038Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10973 TClient is connected to server localhost:10973 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:47.893841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:47.930788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:47.957627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:48.127695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:48.438649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:48.601105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:50.872797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628700411928292:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:50.872885Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:50.895615Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628678937090355:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:50.939176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:51.277013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:51.337766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:51.382269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:51.429408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:51.494937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:51.581701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:51.670737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:51.761219Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628704706896252:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:51.761289Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:51.761652Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628704706896257:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:51.765815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:51.783971Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628704706896259:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:51.871129Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628704706896312:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 16786, MsgBus: 4085 2025-06-24T21:19:55.375286Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519628724398198439:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:55.375332Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a0d/r3tmp/tmpVyXmuY/pdisk_1.dat 2025-06-24T21:19:55.685265Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:55.685350Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:55.685558Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:55.699260Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628724398198414:2079] 1750799995355329 != 1750799995355332 2025-06-24T21:19:55.699433Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16786, node 2 2025-06-24T21:19:55.910771Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:55.910797Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:55.910805Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:55.910926Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4085 2025-06-24T21:19:56.415281Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4085 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:56.869864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:56.896021Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:56.910207Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:57.003760Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:57.349906Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:57.478519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:00.299278Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628745873036517:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:00.299454Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:00.324079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.373617Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.379262Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628724398198439:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:00.379315Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:00.418283Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.493674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.538106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.605196Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.711132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.821942Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628745873037183:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:00.822036Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:00.822438Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628745873037188:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:00.825850Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:00.842505Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628745873037190:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:00.929892Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628745873037241:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpIndexLookupJoin::SimpleLeftOnlyJoin-StreamLookup [GOOD] >> KqpIndexLookupJoin::SimpleLeftSemiJoin+StreamLookup >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable+StreamLookupJoin [GOOD] >> KqpJoinOrder::TestJoinOrderHintsComplex-ColumnStore [GOOD] >> KqpJoinOrder::FourWayJoinLeftFirst-ColumnStore >> RetryPolicy::TWriteSession_SeqNoShift [GOOD] >> RetryPolicy::RetryWithBatching >> KqpJoinOrder::TPCDSEveryQueryWorks-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable+StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 13174, MsgBus: 62498 2025-06-24T21:19:48.072375Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628690839680185:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:48.072425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039f9/r3tmp/tmp19ay8S/pdisk_1.dat 2025-06-24T21:19:48.696297Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:48.696398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:48.710218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:48.732672Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:48.781286Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628690839680165:2079] 1750799988064583 != 1750799988064586 TServer::EnableGrpc on GrpcPort 13174, node 1 2025-06-24T21:19:48.966510Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:48.966530Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:48.966537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:48.966651Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:49.127519Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62498 TClient is connected to server localhost:62498 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:49.966117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:49.984017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:49.995307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:50.241257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:50.520221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:50.707792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:53.074684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628690839680185:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:53.074742Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:53.195758Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628712314518309:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:53.195861Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:53.663545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:53.764400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:53.802004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:53.839473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:53.881770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:53.937373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:53.993522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:54.090615Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628716609486269:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:54.090687Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:54.091049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628716609486274:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:54.099441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:54.127591Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628716609486276:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:19:54.191611Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628716609486331:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:55.423553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... e; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039f9/r3tmp/tmpSNLsdx/pdisk_1.dat 2025-06-24T21:19:58.785326Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:58.793557Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:58.793823Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:58.798803Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628735250849666:2079] 1750799998305921 != 1750799998305924 2025-06-24T21:19:58.813726Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25468, node 2 2025-06-24T21:19:59.058251Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:59.058278Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:59.058287Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:59.058397Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2729 2025-06-24T21:19:59.421069Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2729 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:59.901813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:59.909083Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:59.918007Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:20:00.044250Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.276479Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:00.380492Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:02.873853Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628752430720483:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:02.873973Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:02.935915Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.981409Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.025477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.094110Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.184377Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.259863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.318887Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.321190Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628735250849686:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:03.321660Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:03.430758Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628756725688443:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:03.430841Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:03.431709Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628756725688448:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:03.437566Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:03.456073Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628756725688450:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:03.520786Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628756725688501:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:05.126186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.191796Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpIndexLookupJoin::LeftOnly+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftOnly-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinOrderHintsComplex-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 14750, MsgBus: 63473 2025-06-24T21:19:10.317576Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628528829619488:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:10.317838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ab4/r3tmp/tmpGxDmKS/pdisk_1.dat 2025-06-24T21:19:11.073579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:11.073702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:11.085175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:11.142355Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:11.147341Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628528829619355:2079] 1750799950224808 != 1750799950224811 TServer::EnableGrpc on GrpcPort 14750, node 1 2025-06-24T21:19:11.364625Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:11.375546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:11.375574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:11.375580Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:11.375702Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63473 TClient is connected to server localhost:63473 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:12.558661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:12.631570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:15.266652Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628528829619488:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:15.266748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:15.349573Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628550304456496:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:15.355202Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628550304456484:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:15.355378Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:15.363408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:15.400896Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628550304456498:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:15.467748Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628550304456551:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:16.031338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.285887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.350058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.395203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.434909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.626429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.664905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.700060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.728368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.759446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.792649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.831264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:16.863531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:17.763039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 4: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.643498Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.644148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.656985Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.657547Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.657553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.658012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.666677Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.671550Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.671845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.672007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.682860Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.683464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.684767Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.685233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.699351Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.699387Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.699891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.699892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.712544Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.715217Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.715786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.721463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038495;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.733621Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.734158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.737419Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038495;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.737928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.755365Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.755914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.759353Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.759906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.772889Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.773860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.780013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.780612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.795235Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.796777Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:57.824921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038542;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:19:57.850561Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038542;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:58.022973Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwyjdc81w1g7gbaw3na8nn", SessionId: ydb://session/3?node_id=1&id=OGRkYTY3ZGUtYTVkYTE4Zi01MDg4ZGEyZi0zZTY0NzUyMg==, Slow query, duration: 37.527418s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:58.527307Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:58.527626Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:58.528093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519628713513245492:6973];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-24T21:19:58.528503Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
: Warning: Unapplied hint: JoinOrder( (Unused1 Unused2) (Unused3 Unused4) ), code: 4534
: Warning: Unapplied hint: Rows(Unused # 10e8), code: 4534
: Warning: Unapplied hint: Rows(R T # 1), code: 4534
: Warning: Execution, code: 1060
: Warning: Unapplied hint: JoinOrder( (Unused1 Unused2) (Unused3 Unused4) ), code: 4534
: Warning: Unapplied hint: Rows(Unused # 10e8), code: 4534
: Warning: Unapplied hint: Rows(R T # 1), code: 4534 >> KqpIndexLookupJoin::CheckCastUtf8ToString+StreamLookupJoin-NotNull >> KqpJoin::RightTableKeyPredicate [GOOD] >> OlapEstimationRowsCorrectness::TPCH11 [GOOD] >> KqpIndexLookupJoin::CheckCastInt64ToUint64-StreamLookupJoin-NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightTableKeyPredicate [GOOD] Test command err: Trying to start YDB, gRPC: 7437, MsgBus: 1195 2025-06-24T21:20:01.192387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628747530593755:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:01.203950Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039d9/r3tmp/tmpkmilPZ/pdisk_1.dat 2025-06-24T21:20:01.882505Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628747530593595:2079] 1750800001158920 != 1750800001158923 2025-06-24T21:20:01.890062Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:01.897821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:01.897923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:01.900324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7437, node 1 2025-06-24T21:20:02.211114Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:02.224062Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:02.224083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:02.224090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:02.224214Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1195 TClient is connected to server localhost:1195 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:03.284089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:03.307961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:03.315507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:03.530501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:03.887119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:04.023040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:06.169510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628769005431710:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:06.169666Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:06.191297Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628747530593755:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:06.228190Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:06.597484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.655497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.702948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.796096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.882895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.978345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:07.084715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:07.195464Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628773300399677:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:07.195566Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:07.195965Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628773300399682:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:07.209190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:07.244188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:20:07.244571Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628773300399684:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:07.308115Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628773300399739:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:09.175849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::TPCHRandomJoinViewJustWorks-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH11 [GOOD] Test command err: Trying to start YDB, gRPC: 10194, MsgBus: 16602 2025-06-24T21:18:08.315085Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628262169798575:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:08.315505Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b47/r3tmp/tmpE9I8P9/pdisk_1.dat 2025-06-24T21:18:08.882687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:08.882795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:08.889852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:08.958498Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:08.962567Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628262169798372:2079] 1750799888260538 != 1750799888260541 TServer::EnableGrpc on GrpcPort 10194, node 1 2025-06-24T21:18:09.195806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:09.195831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:09.195839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:09.195947Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:09.283249Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16602 TClient is connected to server localhost:16602 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:10.233091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:10.255066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:12.501250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628279349668199:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:12.501402Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:12.501873Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628279349668211:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:12.509542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:12.530733Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628279349668213:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:12.615959Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628279349668264:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:13.042498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:18:13.299458Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628262169798575:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:13.299525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:13.452064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:13.452287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:13.452545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:13.452689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:13.452814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:13.452941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:13.453051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:13.453185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:13.453288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:18:13.453408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:18:13.453525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628283644635755:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:13.453529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628283644635782:2324];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:18:13.453585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628283644635755:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:13.453709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628283644635755:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:13.453808Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628283644635755:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:13.453896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628283644635755:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:13.453983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628283644635755:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:13.454074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628283644635755:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:13.454179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628283644635755:2311];tabl ... 39391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039185;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.147062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.148271Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039185;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.148810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.151803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.158163Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.158860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.160923Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.161555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.168287Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.168855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.174962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.175592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.182120Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.182682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.185009Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.185518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.192148Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.192697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.205335Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.205919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.207312Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.207851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.215475Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.216378Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.216931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.216949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039221;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.222364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.222364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039221;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.223043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.223296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.228800Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.228832Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.229455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.230136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.235109Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.235707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.236499Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.237436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:50.240340Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.242680Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:50.472131Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwy69t9cb1b9t4cs43a430", SessionId: ydb://session/3?node_id=1&id=NmY5NWE2MjItNDI0NDUzNzQtZTUyZTdjZWEtODE0YTEwZmI=, Slow query, duration: 42.380884s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:50.967671Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:50.968289Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:50.968999Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519628588587360482:9589];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:19:50.969485Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::SimpleLeftSemiJoin+StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::SimpleLeftSemiJoin+StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 25994, MsgBus: 25366 2025-06-24T21:19:55.619722Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628722474872556:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:55.619975Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039f0/r3tmp/tmpPr48AH/pdisk_1.dat 2025-06-24T21:19:56.241298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:56.241394Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:56.252176Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:56.291301Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628722474872372:2079] 1750799995544097 != 1750799995544100 2025-06-24T21:19:56.322331Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25994, node 1 2025-06-24T21:19:56.541907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:56.541929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:56.541939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:56.542029Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:56.542305Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25366 TClient is connected to server localhost:25366 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:57.746314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:57.765772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:57.999626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:58.271803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:19:58.351649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:00.538781Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628743949710500:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:00.538881Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:00.593266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628722474872556:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:00.593356Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:00.928175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:00.975830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:01.028872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:01.104825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:01.144923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:01.234790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:01.309559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:01.430143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628748244678467:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:01.430284Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:01.430804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628748244678472:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:01.436410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:01.457451Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628748244678474:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:01.543986Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628748244678527:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:02.944852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.036224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... d to server localhost:29565 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:08.037102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:08.045773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:08.066154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:08.148533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:08.374257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:08.522886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:11.407267Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628771024436390:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:11.407341Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:11.559329Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628792499274447:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:11.559473Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:11.693545Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:11.765324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:11.837594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:11.939641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:12.022532Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:12.099934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:12.150870Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:12.275937Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628796794242405:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:12.276048Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:12.276352Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628796794242410:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:12.281242Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:12.299665Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628796794242412:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:12.370059Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628796794242463:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:14.133688Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.197783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.273223Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.319100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.411182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.496123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::FourWayJoinLeftFirst+ColumnStore >> KqpIndexLookupJoin::LeftOnly-StreamLookup [GOOD] >> KqpIndexLookupJoin::CheckCastUtf8ToString+StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUtf8ToString+StreamLookupJoin+NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftOnly-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 11145, MsgBus: 32584 2025-06-24T21:19:59.899710Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628741634799258:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:59.900232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039e4/r3tmp/tmpXRiKB4/pdisk_1.dat 2025-06-24T21:20:00.588598Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:00.589445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:00.589532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:00.592341Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628741634799077:2079] 1750799999802285 != 1750799999802288 2025-06-24T21:20:00.610195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11145, node 1 2025-06-24T21:20:00.807282Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:00.863694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:00.863728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:00.863739Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:00.863858Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32584 TClient is connected to server localhost:32584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:01.806063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:01.851529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:20:01.865728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.173855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:02.401402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:02.518917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:04.640742Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628763109637204:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:04.640841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:04.831608Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628741634799258:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:04.845702Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:05.011885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.065312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.163453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.213391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.302213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.407768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.455381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.548942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628767404605162:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:05.549072Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:05.559284Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628767404605167:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:05.566283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:05.586768Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628767404605169:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:05.691630Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628767404605223:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:07.053112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... YOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21361 TClient is connected to server localhost:21361 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:11.792264Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:20:11.827552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:11.937865Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:12.208011Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:12.308702Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:14.987277Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628781615492321:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:14.987361Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:15.258131Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628807385297719:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:15.258254Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:15.313803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.379067Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.424823Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.468337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.517467Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.580898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.669661Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.845833Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628807385298384:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:15.845917Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:15.846187Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628807385298389:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:15.850426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:15.884361Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628807385298391:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:20:15.946209Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628807385298442:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:17.506345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:17.580302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:17.626926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:17.666841Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:17.744961Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:17.790465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpIndexLookupJoin::CheckCastUint64ToInt64+StreamLookupJoin-NotNull >> KqpIndexLookupJoin::CheckCastInt64ToUint64-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastInt64ToUint64-StreamLookupJoin+NotNull >> KqpJoinOrder::SortingsWithLookupJoinByPrefix+RemoveLimitOperator [GOOD] >> KqpJoinOrder::TPCDS94-ColumnStore >> KqpJoinOrder::DatetimeConstantFold+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoinByPrefix+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 7538, MsgBus: 5311 2025-06-24T21:19:25.496915Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628595196069187:2183];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:25.497207Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a79/r3tmp/tmpJNCOrv/pdisk_1.dat 2025-06-24T21:19:26.361894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:26.362011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:26.386173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:26.482780Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:26.486560Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628595196069042:2079] 1750799965418722 != 1750799965418725 2025-06-24T21:19:26.499597Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 7538, node 1 2025-06-24T21:19:26.866295Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:26.866326Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:26.866333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:26.866450Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5311 TClient is connected to server localhost:5311 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:27.935202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:27.958005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:30.479354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628595196069187:2183];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:30.479417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:30.722745Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628616670906172:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:30.722830Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628616670906178:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:30.722880Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:30.727642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:30.740223Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628616670906186:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:30.802914Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628616670906237:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:31.158307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.280747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.327060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.377429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.422586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.655956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.711505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.799108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.880317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:31.981616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.030413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.102907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.142972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:32.955036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperatio ... 15.074773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.080811Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.081453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.085273Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.085770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.088474Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.089093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.096250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.096789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.102505Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.103048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.106096Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.106738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.117008Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.117471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.120003Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.120509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.125441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.126023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.130398Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.130899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.135276Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.139829Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.140412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.143716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.150142Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.150618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.153459Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.153943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.155778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.156322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.161493Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.163192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.163751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.167962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.172961Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.173540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.179044Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.181373Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.181852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:15.191246Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:15.387409Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwz1376h77xvv0vtg95d8d", SessionId: ydb://session/3?node_id=1&id=NWFjOTNhLTc1NWM1YzAzLTlhYTA0N2NiLWIzODc3Yzc4, Slow query, duration: 39.859591s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:15.814292Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:15.814853Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:15.815578Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519628715455172063:4677];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-24T21:20:15.816090Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TestJoinOrderHintsManyHintTrees [GOOD] >> KqpIndexLookupJoin::CheckCastUtf8ToString+StreamLookupJoin+NotNull [GOOD] >> KqpIndexLookupJoin::LeftJoinRightNullFilter+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::DatetimeConstantFold+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 10690, MsgBus: 14480 2025-06-24T21:18:35.699782Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628380269502222:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:35.713037Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b1c/r3tmp/tmp8e4GEk/pdisk_1.dat 2025-06-24T21:18:36.335922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:36.336020Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:36.343129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:36.353876Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:36.358049Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628380269502201:2079] 1750799915690881 != 1750799915690884 TServer::EnableGrpc on GrpcPort 10690, node 1 2025-06-24T21:18:36.555637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:36.555669Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:36.555693Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:36.555798Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:36.731453Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14480 TClient is connected to server localhost:14480 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:37.565776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:40.618197Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628401744339333:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:40.618338Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:40.618817Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628401744339345:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:40.623281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:40.643078Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628401744339347:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:40.704479Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628380269502222:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:40.704552Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:40.713476Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628401744339398:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:41.132900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:18:41.574602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628406039306911:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:41.574881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628406039306911:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:41.576366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:41.576468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:41.576838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:41.576983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:41.577090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:41.577179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:41.577266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:41.577364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:41.577458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:18:41.577582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:18:41.577718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628406039306906:2311];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:18:41.587616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628406039306911:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:41.587863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628406039306911:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:41.587989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628406039306911:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:41.588087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628406039306911:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:41.588184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628406039306911:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:41.588278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628406039306911:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:41.588370Z node 1 :TX_COLU ... 8871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.932516Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.933304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.936254Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.936872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.939234Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.939831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.942846Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.943535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.946025Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.946731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.952250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.952937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.955370Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.955868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.958889Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.959561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.962306Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.962973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.965313Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.965988Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.972617Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.972617Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.973198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.973260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.978876Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.978876Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.979591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.979701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.985689Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.985689Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.986376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.986376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.992054Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.992773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.994719Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:12.996415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.998682Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:13.006415Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:13.093589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:13.099883Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:13.286266Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwywpw59hg08xe0tqga3ty", SessionId: ydb://session/3?node_id=1&id=YWIzNjI5N2YtNmM5ZDMwYjYtYzkxNDhlMWQtOGQxYzhjNTk=, Slow query, duration: 42.241467s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:13.716460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:13.716848Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:13.716874Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628753931715723:11528];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-24T21:20:13.717690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUtf8ToString+StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 14398, MsgBus: 8858 2025-06-24T21:20:11.998300Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628792000773863:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:12.003867Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039ca/r3tmp/tmpKmpRaH/pdisk_1.dat 2025-06-24T21:20:12.721827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:12.721931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:12.733156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:12.743404Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:12.746680Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628792000773758:2079] 1750800011927613 != 1750800011927616 TServer::EnableGrpc on GrpcPort 14398, node 1 2025-06-24T21:20:13.087218Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:13.087932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:13.087941Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:13.087948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:13.093485Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8858 TClient is connected to server localhost:8858 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:14.362521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:14.388765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:20:14.409451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.646771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:14.917416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:15.035496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:16.943476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628792000773863:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:16.943553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:17.309805Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628817770579173:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:17.309951Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:17.656063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:17.744265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:17.827511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:17.881243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:17.961331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:18.039369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:18.094535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:18.192120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628822065547132:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:18.192246Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:18.197718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628822065547137:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:18.207597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:18.227427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:20:18.228259Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628822065547139:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:18.328075Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628822065547194:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:19.552413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo ... TA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039ca/r3tmp/tmptLU2hI/pdisk_1.dat 2025-06-24T21:20:21.565576Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:21.565658Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:21.571720Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:21.575295Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628834432211558:2079] 1750800021244509 != 1750800021244512 2025-06-24T21:20:21.591739Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29375, node 2 2025-06-24T21:20:21.767726Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:21.767753Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:21.767760Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:21.767875Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1850 2025-06-24T21:20:22.356574Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1850 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:22.764697Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:22.789160Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:22.908441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:23.206709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:23.371064Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:26.067347Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628855907049676:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:26.067440Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:26.165967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:26.226575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:26.279350Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628834432211725:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:26.279406Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:26.306498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:26.339845Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:26.383511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:26.445426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:26.544472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:26.628673Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628855907050336:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:26.628759Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:26.629068Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628855907050343:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:26.632910Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:26.654441Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628855907050345:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:20:26.713876Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628855907050396:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:27.968363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:28.099260Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::FiveWayJoinWithComplexPreds-ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinOrderHintsManyHintTrees [GOOD] Test command err: Trying to start YDB, gRPC: 27120, MsgBus: 12626 2025-06-24T21:18:34.707345Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628373595544115:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:34.707722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b1d/r3tmp/tmpOlUVTF/pdisk_1.dat 2025-06-24T21:18:35.491360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:35.491460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:35.503454Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:35.535256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:35.539266Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628373595543934:2079] 1750799914640726 != 1750799914640729 TServer::EnableGrpc on GrpcPort 27120, node 1 2025-06-24T21:18:35.697339Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:35.721464Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:35.721483Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:35.721489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:35.721589Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12626 TClient is connected to server localhost:12626 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:36.692969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:36.743941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:39.304404Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628395070381066:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:39.304586Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628395070381058:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:39.304713Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:39.310057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:39.329802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:18:39.330223Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628395070381072:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:39.384987Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628395070381123:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:39.700294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628373595544115:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:39.716906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:39.884687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:18:40.218106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:40.218408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:40.218723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:40.218854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:40.218961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:40.219091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:40.219498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:40.219659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:40.219774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:18:40.219933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:18:40.220066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628399365348675:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:18:40.228005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628399365348667:2313];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:40.228085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628399365348667:2313];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:40.228318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628399365348667:2313];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:40.228443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628399365348667:2313];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:40.228551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628399365348667:2313];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:40.228667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628399365348667:2313];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:40.228802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628399365348667:2313];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_regis ... ss_local=63;result=not_found; 2025-06-24T21:20:09.902883Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.903393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.904863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.905466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.909034Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.909621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.918136Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.919008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.921505Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.921962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.929769Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.930299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.933094Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.933566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.939134Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.944102Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.944572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.945655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.951101Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.951896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.952665Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.953244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039207;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.958063Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.960715Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039207;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.961391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.961526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.967560Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.968138Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.968150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.968669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.972943Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.974160Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.974830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.974887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.981360Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.982068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.984775Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.985351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:09.987889Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:09.998494Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:10.241228Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwyww842q4zh3drn526pbq", SessionId: ydb://session/3?node_id=1&id=YTc2ZmMxMDgtNjc3NGMzMmUtYWEzODA4MmQtODNlZDQ4Nzc=, Slow query, duration: 39.031857s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:11.132427Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:11.132960Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:11.133319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519628626998652020:7880];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:20:11.133741Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534 >> KqpIndexLookupJoin::CheckCastUint64ToInt64+StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUint64ToInt64+StreamLookupJoin+NotNull >> KqpJoinOrder::TPCDS88-ColumnStore >> KqpIndexLookupJoin::CheckCastInt64ToUint64-StreamLookupJoin+NotNull [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin2-RemoveLimitOperator >> KqpLimits::CancelAfterRwTx-useSink [GOOD] >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookupDepededRead ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithComplexPreds-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 30584, MsgBus: 23661 2025-06-24T21:19:30.691843Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628614896003884:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:30.738739Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a5f/r3tmp/tmpiZ5r4u/pdisk_1.dat 2025-06-24T21:19:31.309759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:31.309866Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:31.331871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:31.386872Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:31.395325Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628614896003837:2079] 1750799970664251 != 1750799970664254 TServer::EnableGrpc on GrpcPort 30584, node 1 2025-06-24T21:19:31.691697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:31.691718Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:31.691725Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:31.691826Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:31.712106Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23661 TClient is connected to server localhost:23661 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:33.049147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:33.083907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:35.453380Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628636370840960:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:35.453573Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:35.453881Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628636370840972:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:35.458887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:35.481649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:19:35.483395Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628636370840974:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:35.559637Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628636370841025:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:35.692738Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628614896003884:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:35.692816Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:36.295527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.472506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.515637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.556957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.610969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.809489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.893914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.940294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:36.991227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:37.035390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:37.115405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:37.159715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:37.232280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operat ... 81405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038457;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.089916Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.090364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.094867Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.095738Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038457;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.096137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.100548Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.101130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.103761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.109301Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.109825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.112189Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.112686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038459;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.118344Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.120500Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038459;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.121027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.123623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.129551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.130100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.131988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.132511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.140983Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.141536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.143601Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.144019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038447;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.150368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.150917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.153025Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038447;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.153544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.162314Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.162874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.164160Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.164552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.173572Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.174173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.176319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.176716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.185795Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.187134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:20.187299Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.200417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:20.371419Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwz6fwa2fknxejtrjapt1b", SessionId: ydb://session/3?node_id=1&id=ODIyZTUyMGMtNjAyZDg0OTUtZTIyNjMzZjAtOTdhZTExMDU=, Slow query, duration: 39.318294s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:20.694647Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:20.695259Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519628730860137722:4396];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-24T21:20:20.695663Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:20.696522Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastInt64ToUint64-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 20211, MsgBus: 27149 2025-06-24T21:20:13.400103Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628801479949729:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:13.400175Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039c7/r3tmp/tmpIRC9G9/pdisk_1.dat 2025-06-24T21:20:14.310755Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:14.316761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:14.316970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:14.319654Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628801479949711:2079] 1750800013392088 != 1750800013392091 2025-06-24T21:20:14.328721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20211, node 1 2025-06-24T21:20:14.470801Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:14.631639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:14.631660Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:14.631667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:14.631761Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27149 TClient is connected to server localhost:27149 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:15.918565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:15.940361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:20:15.954620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:16.206480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:16.607915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:20:16.777586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:18.403663Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628801479949729:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:18.417610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:19.036971Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628827249755126:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:19.037060Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:19.474154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:19.535886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:19.578999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:19.660604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:19.709713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:19.798290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:19.862915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:19.991577Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628827249755790:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:19.991660Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:19.995054Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628827249755795:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:20.000255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:20.027604Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628827249755797:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:20:20.136823Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628831544723146:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:21.959185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... lled at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 2556, MsgBus: 24379 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039c7/r3tmp/tmp8cm6PK/pdisk_1.dat 2025-06-24T21:20:24.567829Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:20:24.671570Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:24.675428Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628846544419482:2079] 1750800024267974 != 1750800024267977 2025-06-24T21:20:24.696722Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:24.696824Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:24.705089Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2556, node 2 2025-06-24T21:20:24.863927Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:24.863950Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:24.863958Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:24.864068Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24379 2025-06-24T21:20:25.363260Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24379 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:25.677940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:25.691930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:20:25.713235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:25.828087Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:26.085533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:26.206348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:29.355128Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628868019257599:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:29.365243Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:29.416701Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.461251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.532040Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.621782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.681808Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.770853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.850325Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.956228Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628868019258264:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:29.956300Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:29.956508Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628868019258269:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:29.960659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:29.979327Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628868019258271:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:30.049871Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628872314225618:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:31.258191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.385308Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::TPCDS95+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCDS78 [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKString_Reboot [GOOD] >> KqpJoinOrder::Sortings4Year-RemoveLimitOperator >> KqpJoinOrder::CanonizedJoinOrderTPCH5 >> KqpJoinOrder::SortingsByPK+RemoveLimitOperator [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKString_Reboot [GOOD] Test command err: 2025-06-24T21:14:56.200662Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:14:56.224128Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:14:56.224429Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:14:56.231129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:14:56.231349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:14:56.231569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:14:56.231664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:14:56.231740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:14:56.231811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:14:56.231886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:14:56.231965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:14:56.232051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:14:56.232141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:14:56.232206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:14:56.257924Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:14:56.258114Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:14:56.258166Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:14:56.258339Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:56.258504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:14:56.258583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:14:56.258623Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:14:56.258723Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:14:56.258783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:14:56.258821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:14:56.258847Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:14:56.258983Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:14:56.259038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:14:56.259071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:14:56.259093Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:14:56.259190Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:14:56.259235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:14:56.259274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:14:56.259307Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:14:56.259341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:14:56.259374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:14:56.259405Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:14:56.259596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:14:56.259624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:14:56.277569Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:14:56.277883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:14:56.277975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:14:56.278023Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:14:56.278192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:14:56.278272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:14:56.278322Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:14:56.278412Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:14:56.278485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:14:56.278539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:14:56.278571Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:14:56.278979Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=52; 2025-06-24T21:14:56.279097Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=48; 2025-06-24T21:14:56.279209Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=29; 2025-06-24T21:14:56.279291Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=37; 2025-06-24T21:14:56.279379Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:14:56.279480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:14:56.279528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:14:56.279575Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... XECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=26510;data_size=26478;sum=13963248;count=7164;size_of_portion=208; 2025-06-24T21:20:34.528570Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=3829647; 2025-06-24T21:20:34.528675Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=14; 2025-06-24T21:20:34.530623Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1874; 2025-06-24T21:20:34.530693Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=3831946; 2025-06-24T21:20:34.530748Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=3832091; 2025-06-24T21:20:34.530827Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=15; 2025-06-24T21:20:34.532157Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=1263; 2025-06-24T21:20:34.532222Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=3834097; 2025-06-24T21:20:34.532417Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=126; 2025-06-24T21:20:34.532589Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=84; 2025-06-24T21:20:34.533052Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=391; 2025-06-24T21:20:34.533423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=310; 2025-06-24T21:20:34.583697Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=50177; 2025-06-24T21:20:34.632974Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=49122; 2025-06-24T21:20:34.633110Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=17; 2025-06-24T21:20:34.633179Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=14; 2025-06-24T21:20:34.633231Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=7; 2025-06-24T21:20:34.633322Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=48; 2025-06-24T21:20:34.633373Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-24T21:20:34.633490Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=80; 2025-06-24T21:20:34.633545Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=9; 2025-06-24T21:20:34.633616Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=35; 2025-06-24T21:20:34.633712Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=57; 2025-06-24T21:20:34.633798Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=48; 2025-06-24T21:20:34.633838Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=3944806; 2025-06-24T21:20:34.634030Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22744072;raw_bytes=22320020;count=3;records=225200} inactive {blob_bytes=149450960;raw_bytes=145316940;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:20:34.634170Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:20:34.634237Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:20:34.634316Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:20:34.634365Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:20:34.634635Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:20:34.634719Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:20:34.634794Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799401813;tx_id=18446744073709551615;;current_snapshot_ts=1750799698005; 2025-06-24T21:20:34.634858Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:20:34.634917Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:20:34.634955Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:20:34.635054Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:20:34.640181Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:10514:12098];tablet_id=9437184;parent=[1:10390:11982];fline=manager.cpp:88;event=ask_data;request=request_id=209;9438184000001={portions_count=224};; 2025-06-24T21:20:34.642339Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:20:34.642951Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:20:34.642993Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:20:34.643024Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:20:34.643072Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:20:34.643180Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-24T21:20:34.643256Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799401813;tx_id=18446744073709551615;;current_snapshot_ts=1750799698005; 2025-06-24T21:20:34.643306Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:20:34.643364Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:20:34.643404Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:20:34.643492Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=1.000000s; 2025-06-24T21:20:34.719775Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10390:11982];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; >> YdbYqlClient::SimpleColumnFamilies [GOOD] >> YdbYqlClient::TableKeyRangesSinglePartition >> OlapEstimationRowsCorrectness::TPCH9 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCDS78 [GOOD] Test command err: Trying to start YDB, gRPC: 12073, MsgBus: 24868 2025-06-24T21:17:54.083797Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628204756933846:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:54.084254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b5e/r3tmp/tmpCyMwiH/pdisk_1.dat 2025-06-24T21:17:54.802748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:54.802851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:54.824601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:54.902252Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628204756933654:2079] 1750799874021180 != 1750799874021183 2025-06-24T21:17:54.920458Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12073, node 1 2025-06-24T21:17:55.063462Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:17:55.275645Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:55.275665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:55.275672Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:55.275797Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24868 TClient is connected to server localhost:24868 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:56.342650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:56.391641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:17:59.067376Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628204756933846:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:59.067492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:59.368043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628226231770786:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:59.368190Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:59.376207Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628226231770798:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:59.383528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:59.402480Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628226231770800:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:59.491403Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628226231770852:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:59.997125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:18:00.299067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:00.299282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:00.299558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:00.299680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:00.299793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:00.299901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:00.300005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:00.300109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:00.300230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:18:00.300367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:18:00.300479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628230526738407:2324];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:18:00.346622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628230526738403:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:00.346744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628230526738403:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:00.346914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628230526738403:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:00.347025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628230526738403:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:00.347133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628230526738403:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:00.363396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628230526738403:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:00.363510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628230526738403:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:00.363621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628230526738403:2320];tabl ... blet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.644805Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.645385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.649868Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.655760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.659382Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.659896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.665789Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.669677Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.670404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.671487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.680482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.680937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.684082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.684685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.685193Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.685996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.691348Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.692081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.692111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.692700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.698255Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.698255Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.698901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.699218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:32.702737Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.708200Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:32.883672Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwxq3e3jmvrdyvm7j3rja7", SessionId: ydb://session/3?node_id=1&id=MTg4NGFiMTEtZDAxYTg2NjgtYjIzYTJmZi1iNDQ2N2ZhNA==, Slow query, duration: 40.357193s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:33.524526Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:33.525116Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:33.525708Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628582714113619:11356];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-24T21:19:33.526196Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:22.502491Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwzkyqd1qq52z7b65bhrvk", SessionId: ydb://session/3?node_id=1&id=MTg4NGFiMTEtZDAxYTg2NjgtYjIzYTJmZi1iNDQ2N2ZhNA==, Slow query, duration: 27.658386s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "PRAGMA TablePathPrefix='/Root/test/ds';\n\n-- NB: Subquerys\n\n$ws =\n\n (select date_dim.d_year AS ws_sold_year, web_sales.ws_item_sk ws_item_sk,\n\n web_sales.ws_bill_customer_sk ws_customer_sk,\n\n sum(ws_quantity) ws_qty,\n\n sum(ws_wholesale_cost) ws_wc,\n\n sum(ws_sales_price) ws_sp\n\n from web_sales as web_sales\n\n left join web_returns as web_returns on web_returns.wr_order_number=web_sales.ws_order_number and web_sales.ws_item_sk=web_returns.wr_item_sk\n\n join date_dim as date_dim on web_sales.ws_sold_date_sk = date_dim.d_date_sk\n\n where wr_order_number is null\n\n group by date_dim.d_year, web_sales.ws_item_sk, web_sales.ws_bill_customer_sk\n\n );\n\n$cs =\n\n (select date_dim.d_year AS cs_sold_year, catalog_sales.cs_item_sk cs_item_sk,\n\n catalog_sales.cs_bill_customer_sk cs_customer_sk,\n\n sum(cs_quantity) cs_qty,\n\n sum(cs_wholesale_cost) cs_wc,\n\n sum(cs_sales_price) cs_sp\n\n from catalog_sales as catalog_sales\n\n left join catalog_returns as catalog_returns on catalog_returns.cr_order_number=catalog_sales.cs_order_number and catalog_sales.cs_item_sk=catalog_returns.cr_item_sk\n\n join date_dim as date_dim on catalog_sales.cs_sold_date_sk = date_dim.d_date_sk\n\n where cr_order_number is null\n\n group by date_dim.d_year, catalog_sales.cs_item_sk, catalog_sales.cs_bill_customer_sk\n\n );\n\n$ss=\n\n (select date_dim.d_year AS ss_sold_year, store_sales.ss_item_sk ss_item_sk,\n\n store_sales.ss_customer_sk ss_customer_sk,\n\n sum(ss_quantity) ss_qty,\n\n sum(ss_wholesale_cost) ss_wc,\n\n sum(ss_sales_price) ss_sp\n\n from store_sales as store_sales\n\n left join store_returns as store_returns on store_returns.sr_ticket_number=store_sales.ss_ticket_number and store_sales.ss_item_sk=store_returns.sr_item_sk\n\n join date_dim as date_dim on store_sales.ss_sold_date_sk = date_dim.d_date_sk\n\n where sr_ticket_number is null\n\n group by date_dim.d_year, store_sales.ss_item_sk, store_sales.ss_customer_sk\n\n );\n\n-- start query 1 in stream 0 using template query78.tpl and seed 1819994127\n\n select\n\nss_sold_year, ss_item_sk, ss_customer_sk,\n\ncast(ss_qty as double)/(coalesce(ws_qty,0)+coalesce(cs_qty,0)) ratio,\n\nss_qty store_qty, ss_wc store_wholesale_cost, ss_sp store_sales_price,\n\ncoalesce(ws_qty,0)+coalesce(cs_qty,0) other_chan_qty,\n\ncoalesce(ws_wc,0)+coalesce(cs_wc,0) other_chan_wholesale_cost,\n\ncoalesce(ws_sp,0)+coalesce(cs_sp,0) other_chan_sales_price\n\nfrom $ss ss\n\nleft join $ws ws on (ws.ws_sold_year=ss.ss_sold_year and ws.ws_item_sk=ss.ss_item_sk and ws.ws_customer_sk=ss.ss_customer_sk)\n\nleft join $cs cs on (cs.cs_sold_year=ss.ss_sold_year and cs.cs_item_sk=ss.ss_item_sk and cs.cs_customer_sk=ss.ss_customer_sk)\n\nwhere (coalesce(ws_qty,0)>0 or coalesce(cs_qty, 0)>0) and ss_sold_year=2001\n\norder by\n\n ss_sold_year, ss_item_sk, ss_customer_sk,\n\n store_qty desc, store_wholesale_cost desc, store_sales_price desc,\n\n other_chan_qty,\n\n other_chan_wholesale_cost,\n\n other_chan_sales_price,\n\n ratio\n\nlimit 100;\n\n\n\n-- end query 1 in stream 0 using template query78.tpl", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPK+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 11812, MsgBus: 11963 2025-06-24T21:19:38.319554Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628647596758177:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:38.331415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a48/r3tmp/tmpaicQx0/pdisk_1.dat 2025-06-24T21:19:38.951627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:38.951711Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:38.969159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:39.060452Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:39.063324Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628647596757993:2079] 1750799978230005 != 1750799978230008 TServer::EnableGrpc on GrpcPort 11812, node 1 2025-06-24T21:19:39.291949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:39.291970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:39.291985Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:39.292086Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:39.299262Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11963 TClient is connected to server localhost:11963 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:40.398441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:40.413775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:43.032337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628669071595121:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:43.032466Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:43.032914Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628669071595133:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:43.037704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:43.059130Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628669071595135:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:43.114638Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628669071595186:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:43.303279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628647596758177:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:43.303398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:43.761208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:43.915043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:43.951117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:43.995681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.082237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.298985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.378800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.423783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.460693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.532727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.635266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.698434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:44.771042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:45.536580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 60109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.063570Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.064189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.069721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.070258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.074074Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.074566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.084610Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.085309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.088820Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.089410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.095139Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.104609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.107100Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.107681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.111707Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.112995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.117525Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.122517Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.123090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.123801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.134733Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.136515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.137074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.139555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.147093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.149493Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.150068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.151499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.157582Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.158162Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.158869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.159694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.165890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.166904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.168701Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.169316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.173765Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.175395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.176554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:31.182624Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:31.362104Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwzd701d5c12vact6naw1d", SessionId: ydb://session/3?node_id=1&id=NGYyYjc0NjAtYTFkZDUxY2YtZTdlMjA5Mi0zNWVhYzlmOA==, Slow query, duration: 43.425007s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:31.737725Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:31.738228Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:31.738864Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628699136371638:2702];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-24T21:20:31.739297Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::LeftJoinRightNullFilter+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinRightNullFilter-StreamLookup >> KqpJoin::JoinLeftPureInnerConverted >> KqpIndexLookupJoin::CheckCastUint64ToInt64+StreamLookupJoin+NotNull [GOOD] >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookup [GOOD] >> KqpJoinOrder::Chain65Nodes >> KqpJoinOrder::TPCDS92+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCC [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUint64ToInt64+StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 4437, MsgBus: 14558 2025-06-24T21:20:22.859707Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628840511075387:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:22.860190Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039c1/r3tmp/tmp8EPUUu/pdisk_1.dat 2025-06-24T21:20:23.605467Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:23.607421Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628840511075189:2079] 1750800022737880 != 1750800022737883 2025-06-24T21:20:23.654911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:23.655031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 4437, node 1 2025-06-24T21:20:23.661868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:23.854967Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:23.935645Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:23.935667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:23.935673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:23.935780Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14558 TClient is connected to server localhost:14558 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:25.288363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:25.315804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:25.355387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:25.579291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:25.862917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:25.953416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:27.839259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628840511075387:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:27.839339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:28.497439Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628866280880611:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:28.497545Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:29.043907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.084066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.166010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.205748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.241524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.286755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.372347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:29.507516Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628870575848579:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:29.507599Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:29.507946Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628870575848584:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:29.512264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:29.539469Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628870575848586:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:29.645025Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628870575848637:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:31.105429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:20:34.033441Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628887376401664:2079] 1750800033416598 != 1750800033416601 2025-06-24T21:20:34.044951Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:34.045207Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:34.045305Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:34.060985Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2573, node 2 2025-06-24T21:20:34.299852Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:34.299882Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:34.299891Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:34.300053Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:20:34.519802Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63056 TClient is connected to server localhost:63056 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:35.344490Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:35.351951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:20:35.368862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:35.561765Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:35.906208Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:36.046173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:38.491748Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628887376401739:2089];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:38.491847Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:38.512024Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628908851239787:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:38.512137Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:38.653636Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:38.705847Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:38.796086Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:38.856013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:38.913039Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.007629Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.112325Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.235318Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628913146207750:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:39.235418Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:39.238482Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628913146207755:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:39.241999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:39.263767Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628913146207757:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:20:39.338401Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628913146207808:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:40.801949Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:40.938823Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> RetryPolicy::RetryWithBatching [GOOD] >> OlapEstimationRowsCorrectness::TPCDS78 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH9 [GOOD] Test command err: Trying to start YDB, gRPC: 1770, MsgBus: 4204 2025-06-24T21:18:36.375756Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628382174063976:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:36.376141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b10/r3tmp/tmpta1vuf/pdisk_1.dat 2025-06-24T21:18:37.027954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:37.028180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:37.033000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:37.157632Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:37.159320Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628382174063795:2079] 1750799916300373 != 1750799916300376 TServer::EnableGrpc on GrpcPort 1770, node 1 2025-06-24T21:18:37.360702Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:37.427757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:37.427781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:37.427788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:37.427924Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4204 TClient is connected to server localhost:4204 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:38.727596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:38.754922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:41.363437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628382174063976:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:41.363533Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:41.677610Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628403648900921:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:41.677738Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:41.678049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628403648900933:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:41.682647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:41.710196Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628403648900935:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:41.817529Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628403648900986:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:42.293474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:18:42.618904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628407943868552:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:42.623443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:42.623637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:42.623956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:42.624087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:42.624192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:42.624291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:42.624378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:42.624475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:42.624563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:18:42.624692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:18:42.624806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628407943868516:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:18:42.627751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628407943868552:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:42.627993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628407943868552:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:42.628091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628407943868552:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:42.628207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628407943868552:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:42.628339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628407943868552:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:42.628432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628407943868552:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:42.628531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628407943868552:2325];tablet_id ... 5874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.731745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.732333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.737941Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.738487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.738819Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.739449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.745608Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.746287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.751496Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.752046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.757078Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.763050Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.763737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.767686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.774451Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.775025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.779749Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.780362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.790733Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.791776Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.792373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.795506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.798655Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.799471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.809060Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.809636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.811001Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.811537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.817941Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.823696Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.824337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.827992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.835104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.840823Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.841491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.853843Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.940789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039338;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:17.948406Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039338;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:17.986259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:18.003100Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:18.080207Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwyzcqfeerax29ce3pnva6", SessionId: ydb://session/3?node_id=1&id=NWQyZTcwODItMmM5NDdlODEtNjQ2MjM4NWMtZTk2NGMyZDQ=, Slow query, duration: 44.296261s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:18.397211Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:18.397718Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:18.398959Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628785901051381:12126];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-24T21:20:18.399439Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 32756, MsgBus: 27603 2025-06-24T21:16:30.302892Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627841049067089:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:30.308034Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003125/r3tmp/tmpgkUE0w/pdisk_1.dat 2025-06-24T21:16:30.745067Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627841049067044:2079] 1750799790290637 != 1750799790290640 2025-06-24T21:16:30.777618Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:30.786302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:30.786413Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 32756, node 1 2025-06-24T21:16:30.787698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:30.892848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:30.892874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:30.892884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:30.892994Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27603 2025-06-24T21:16:31.331281Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27603 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:31.538853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:31.575777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:31.779518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:31.959088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:16:32.031820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:33.780966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627853933970568:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:33.781074Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.093009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.167464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.198908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.232795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.308057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.379457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.451929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:34.516736Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627858228938536:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.516793Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627858228938541:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.516802Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:34.520171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:34.531868Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627858228938543:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:16:34.625612Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627858228938594:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:35.299759Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627841049067089:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:35.299816Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:35.807088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 23972, MsgBus: 65318 2025-06-24T21:16:38.208367Z node 2 :METADATA_PROVIDER WARN ... SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzhcyff1yhsj8yxsxcqh9, Create QueryResponse for error on request, msg: 2025-06-24T21:19:53.024695Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzhts8mfj6becatnnjt32, Create QueryResponse for error on request, msg: 2025-06-24T21:19:54.767545Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzkh50jhy87ffsgtc5ryc, Create QueryResponse for error on request, msg: 2025-06-24T21:19:55.848194Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzmjq4bzxyqzvvwqh0183, Create QueryResponse for error on request, msg: 2025-06-24T21:19:57.017325Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwznpzfpwme38va6mb7xzw, Create QueryResponse for error on request, msg: 2025-06-24T21:19:57.795494Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzpfeb7nw4f3krg5an6qz, Create QueryResponse for error on request, msg: 2025-06-24T21:19:58.371420Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzq1544b7hf39rtys0z2n, Create QueryResponse for error on request, msg: 2025-06-24T21:19:58.925001Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzqjf6y3pnk5e5atsw9nm, Create QueryResponse for error on request, msg: 2025-06-24T21:20:01.411796Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzszv9f3e75skp4ma95gj, Create QueryResponse for error on request, msg: 2025-06-24T21:20:02.507741Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519628752215671814:2483] TxId: 281474976710847. Ctx: { TraceId: 01jyhwzv246tt8srfm9bvjsxct, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 391ms } {
: Error: Cancelling after 391ms during execution } ] 2025-06-24T21:20:02.507997Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzv246tt8srfm9bvjsxct, Create QueryResponse for error on request, msg: 2025-06-24T21:20:04.639472Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzx4d7sjwbpk1jt2zthvc, Create QueryResponse for error on request, msg: 2025-06-24T21:20:06.929091Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519628769395541274:2483] TxId: 281474976710860. Ctx: { TraceId: 01jyhwzzbx3n36dfezbf29mcar, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 404ms } {
: Error: Cancelling after 402ms during execution } ] 2025-06-24T21:20:06.929310Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519628769395541279:4101], TxId: 281474976710860, task: 2. Ctx: { TraceId : 01jyhwzzbx3n36dfezbf29mcar. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519628769395541274:2483], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-24T21:20:06.929631Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519628769395541278:4100], TxId: 281474976710860, task: 1. Ctx: { TraceId : 01jyhwzzbx3n36dfezbf29mcar. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519628769395541274:2483], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-24T21:20:06.930395Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhwzzbx3n36dfezbf29mcar, Create QueryResponse for error on request, msg: 2025-06-24T21:20:12.135435Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx04e1e3gb5evnndj44d04, Create QueryResponse for error on request, msg: 2025-06-24T21:20:16.067832Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx088nancr8rss9axwmqy0, Create QueryResponse for error on request, msg: 2025-06-24T21:20:16.555390Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx08qm5mcqzt51j2zec2vr, Create QueryResponse for error on request, msg: 2025-06-24T21:20:17.202385Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx09btb07y2dt9gs4c3yb6, Create QueryResponse for error on request, msg: 2025-06-24T21:20:27.431878Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx0kaxcr13zzwrdevtxwfb, Create QueryResponse for error on request, msg: 2025-06-24T21:20:27.991890Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx0kw97dheercqvnqbqtfr, Create QueryResponse for error on request, msg: 2025-06-24T21:20:28.540375Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519628863884822964:2483] TxId: 281474976710917. Ctx: { TraceId: 01jyhx0md6ekq0qz55r045d8r6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 466ms } {
: Error: Cancelling after 469ms during execution } ] 2025-06-24T21:20:28.540545Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519628863884822969:4510], TxId: 281474976710917, task: 2. Ctx: { TraceId : 01jyhx0md6ekq0qz55r045d8r6. SessionId : ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519628863884822964:2483], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-24T21:20:28.540805Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519628863884822968:4509], TxId: 281474976710917, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhx0md6ekq0qz55r045d8r6. SessionId : ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519628863884822964:2483], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-24T21:20:28.541378Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx0md6ekq0qz55r045d8r6, Create QueryResponse for error on request, msg: 2025-06-24T21:20:30.359756Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx0p613b0e7ryscvbvpw4e, Create QueryResponse for error on request, msg: 2025-06-24T21:20:33.719486Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx0ses3k4900tae7vpdg8a, Create QueryResponse for error on request, msg: 2025-06-24T21:20:34.211525Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519628889654627053:2483] TxId: 281474976710932. Ctx: { TraceId: 01jyhx0sy45krg50batxawvkpk, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 482ms } {
: Error: Cancelling after 478ms during execution } ] 2025-06-24T21:20:34.272312Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519628889654627060:4615], TxId: 281474976710932, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jyhx0sy45krg50batxawvkpk. SessionId : ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519628889654627053:2483], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-24T21:20:34.272992Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx0sy45krg50batxawvkpk, Create QueryResponse for error on request, msg: 2025-06-24T21:20:37.895398Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGRlYzFkMTItOGQ3OWMyZDMtZGM0OTFiMzctMjlhOTVhYTI=, ActorId: [4:7519628455862924254:2483], ActorState: ExecuteState, TraceId: 01jyhx0xgs8rfrgccqp4tyezpb, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> RetryPolicy::RetryWithBatching [GOOD] Test command err: 2025-06-24T21:14:26.154550Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.154588Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.154617Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:14:26.159444Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:14:26.159502Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.159549Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.170513Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008217s 2025-06-24T21:14:26.172414Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:14:26.172454Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.172476Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.172541Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.007629s 2025-06-24T21:14:26.173082Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:14:26.173111Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.173130Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:14:26.173188Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008233s 2025-06-24T21:14:26.196940Z :TWriteSession_TestPolicy INFO: Random seed for debugging is 1750799666196915 2025-06-24T21:14:26.704687Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627309097443462:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.704765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:14:26.807040Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627309064671654:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:14:26.807114Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ec7/r3tmp/tmpQdcGEK/pdisk_1.dat 2025-06-24T21:14:27.065795Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:14:27.075387Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:14:27.494796Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:14:27.529158Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:27.529293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:27.534224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:14:27.534320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:14:27.543693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:14:27.552476Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:14:27.553849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28207, node 1 2025-06-24T21:14:27.698927Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:27.811277Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:14:27.890639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002ec7/r3tmp/yandexo1yIbs.tmp 2025-06-24T21:14:27.890681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002ec7/r3tmp/yandexo1yIbs.tmp 2025-06-24T21:14:27.896460Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002ec7/r3tmp/yandexo1yIbs.tmp 2025-06-24T21:14:27.896694Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:14:28.158390Z INFO: TTestServer started on Port 24657 GrpcPort 28207 TClient is connected to server localhost:24657 PQClient connected to localhost:28207 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:14:28.582955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-24T21:14:28.667768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:14:30.732728Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627326277313434:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.733061Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.733450Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627326277313447:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:14:30.861707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:14:30.891662Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627326277313449:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:14:30.980188Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627326277313539:2670] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:14:31.378496Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519627326244540996:2275], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:31.378815Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NjM0MzZiZTgtZDU2Mzc5NTAtZGQ1OTFiNWEtMzU4ZTQ1ZGI=, ActorId: [2:7519627326244540954:2268], ActorState: ExecuteState, TraceId: 01jyhwnqjfebwpff8m0rbkwx4c, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:14:31.376479Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519627326277313549:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:14:31.381705Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:14:31.397209Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODk4Mjg3ZjMtYzE0M2QxYWUtZjZlOGU1NzYtNzdkMzU3ZWU=, ActorId: [1:7519627326277313432:2297], ActorState: ExecuteState, TraceId: 01jyhwnqea42c2eben0jvd0dmh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:14:31.397712Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 ... 6224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:20:41.001248Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-06-24T21:20:41.001290Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:20:41.001335Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-24T21:20:41.001361Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:20:41.001403Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-06-24T21:20:41.001436Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:20:41.001488Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 4 is stored on disk 2025-06-24T21:20:41.001516Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:20:41.001563Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-06-24T21:20:41.001594Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:20:41.001632Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 6 is stored on disk 2025-06-24T21:20:41.001668Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:20:41.001714Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 7 is stored on disk 2025-06-24T21:20:41.001758Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:20:41.001816Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 8 is stored on disk 2025-06-24T21:20:41.001842Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:20:41.001884Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 9 is stored on disk 2025-06-24T21:20:41.002282Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:20:41.002336Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T21:20:41.002427Z node 17 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1208, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:20:41.002682Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-24T21:20:41.002879Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T21:20:41.003502Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 10 max time lag 0ms effective offset 0 2025-06-24T21:20:41.003822Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 suffix '63' size 1208 2025-06-24T21:20:41.003975Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 1208 count 10 last offset 0, current partition end offset: 10 2025-06-24T21:20:41.004032Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-24T21:20:41.004149Z node 17 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 10 parts_count 0 source 1 size 1208 accessed 0 times before, last time 2025-06-24T21:20:41.000000Z 2025-06-24T21:20:41.004206Z node 17 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-24T21:20:41.004296Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 suffix '63' 2025-06-24T21:20:41.004300Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-24T21:20:41.006858Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session got write response: sequence_numbers: 1 sequence_numbers: 2 sequence_numbers: 3 sequence_numbers: 4 sequence_numbers: 5 sequence_numbers: 6 sequence_numbers: 7 sequence_numbers: 8 sequence_numbers: 9 sequence_numbers: 10 offsets: 0 offsets: 1 offsets: 2 offsets: 3 offsets: 4 offsets: 5 offsets: 6 offsets: 7 offsets: 8 offsets: 9 already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false write_statistics { persist_duration_ms: 12 queued_in_partition_duration_ms: 1 } 2025-06-24T21:20:41.006970Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: acknoledged message 1 2025-06-24T21:20:41.007057Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: acknoledged message 2 2025-06-24T21:20:41.007117Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: acknoledged message 3 2025-06-24T21:20:41.007194Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: acknoledged message 4 2025-06-24T21:20:41.007250Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: acknoledged message 5 2025-06-24T21:20:41.007319Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: acknoledged message 6 2025-06-24T21:20:41.007381Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: acknoledged message 7 2025-06-24T21:20:41.007443Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: acknoledged message 8 2025-06-24T21:20:41.007501Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: acknoledged message 9 2025-06-24T21:20:41.007543Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: acknoledged message 10 2025-06-24T21:20:41.004704Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 10 count 10 size 1188 from pos 0 cbcount 10 2025-06-24T21:20:41.004882Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1750800040987 queuesize 0 startOffset 0 2025-06-24T21:20:41.011499Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: close. Timeout = 0 ms 2025-06-24T21:20:41.011598Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session will now close 2025-06-24T21:20:41.011705Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: aborting 2025-06-24T21:20:41.012538Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:20:41.012627Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0] Write session: destroy 2025-06-24T21:20:41.017325Z node 17 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0 grpc read done: success: 0 data: 2025-06-24T21:20:41.017387Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0 grpc read failed 2025-06-24T21:20:41.017443Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0 grpc closed 2025-06-24T21:20:41.017488Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message-group-id|118aa45f-401b563d-ad8cee58-58996de9_0 is DEAD 2025-06-24T21:20:41.018648Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:20:41.019369Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [17:7519628915321402676:2619] destroyed 2025-06-24T21:20:41.019448Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCC [GOOD] Test command err: Trying to start YDB, gRPC: 27131, MsgBus: 11212 2025-06-24T21:19:41.395859Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628663528713563:2178];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:41.395902Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a34/r3tmp/tmpu9tHCB/pdisk_1.dat 2025-06-24T21:19:41.992000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:41.992109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:41.996101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:42.043285Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628663528713424:2079] 1750799981377822 != 1750799981377825 2025-06-24T21:19:42.062159Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27131, node 1 2025-06-24T21:19:42.336852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:42.336873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:42.336880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:42.336988Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:42.431271Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11212 TClient is connected to server localhost:11212 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:43.360954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:43.393809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:19:46.163183Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628685003550551:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:46.163292Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:46.171301Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628685003550563:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:46.180266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:46.196601Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628685003550565:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:19:46.268848Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628685003550616:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:46.399310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628663528713563:2178];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:46.491958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:46.799837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:46.994299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.061243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.169056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.256306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.482182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.524580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.597074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.639091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.679217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.769828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.826087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:47.883740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:19:48.965716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 92676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.298409Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.299129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.308026Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.310133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.310701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.315905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.321196Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.321966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.324074Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.324556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.331840Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.332595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038457;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.337806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038457;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.344971Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.345576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.349631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.355051Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.359539Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.360871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.364880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038469;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.372460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.372983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.375778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038469;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.376295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.381422Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.384541Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.385159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.387887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.393310Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.397320Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.397912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038467;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.399841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.405529Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.410394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.415436Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038467;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.416086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038452;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.421645Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038452;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.424193Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.429617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:32.440683Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:32.623504Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwzgzc7xghggj2qsq577jx", SessionId: ydb://session/3?node_id=1&id=N2M5MjY3OWUtMWE2NWE4YTQtMWE1ZTQ4ZjAtYzIyNWQ5MzM=, Slow query, duration: 40.832591s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:33.096463Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:20:33.097049Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:20:33.097712Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519628779492847653:4403];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-24T21:20:33.098110Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoinOrder::TPCH12_100 >> KqpJoinOrder::UdfConstantFold+ColumnStore >> KqpJoinOrder::TPCDS90+ColumnStore [GOOD] |96.3%| [TA] $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/test-results/unittest/{meta.json ... results_accumulator.log} |96.3%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCDS78 [GOOD] Test command err: Trying to start YDB, gRPC: 5955, MsgBus: 62340 2025-06-24T21:18:04.991503Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628245447199374:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:05.040664Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b50/r3tmp/tmpbGzJzI/pdisk_1.dat 2025-06-24T21:18:05.799708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:05.799798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:05.807268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:05.859050Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:05.875333Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628245447199346:2079] 1750799884987865 != 1750799884987868 TServer::EnableGrpc on GrpcPort 5955, node 1 2025-06-24T21:18:06.047272Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:06.187766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:06.187795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:06.187819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:06.187938Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62340 TClient is connected to server localhost:62340 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:07.285403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:07.300169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:09.833964Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628266922036471:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:09.834120Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:09.842145Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628266922036483:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:09.847896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:09.871747Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628266922036485:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:09.964469Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628266922036536:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:09.995242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628245447199374:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:09.995309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:10.350277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:18:10.644349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:10.644583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:10.644837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:10.644944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:10.645090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:10.645188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:10.645322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:10.645446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:10.645544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:18:10.645659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:18:10.645768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628271217004032:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:18:10.649629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628271217004025:2310];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:10.649684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628271217004025:2310];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:10.649860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628271217004025:2310];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:10.649955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628271217004025:2310];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:10.650055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628271217004025:2310];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:10.650167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628271217004025:2310];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:10.650302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628271217004025:2310];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:10.650420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628271217004025:2310];tablet ... 70402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.676018Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.676616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.679597Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.680194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.680964Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.681554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.685781Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.686376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.686934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.687545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.692186Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.692891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.695014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.695671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.697973Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.698570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.701427Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.702052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.703352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.703983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.707857Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.708486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.709963Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.710916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.714586Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.715560Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.715631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.716127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.721191Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.721802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.723081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.724314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.727368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.727958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.729334Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.729901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.733619Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.734804Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.735896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:44.747571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:44.917673Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwy1tndzcs0vnq29181g5g", SessionId: ydb://session/3?node_id=1&id=YjIxNmVmNmYtZjRlNzc5NmMtMWYyYTJiZjItNjBhNzhkNGQ=, Slow query, duration: 41.408208s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:45.530736Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:45.531406Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:45.532353Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519628511735208043:7718];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:19:45.532841Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::SimpleLeftJoin-StreamLookup >> KqpJoinOrder::TPCDS87+ColumnStore [GOOD] >> KqpJoinOrder::UdfConstantFold-ColumnStore >> KqpFlipJoin::Right_3 >> YdbYqlClient::TableKeyRangesSinglePartition [GOOD] >> KqpJoin::FullOuterJoinSizeCheck >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK-RemoveLimitOperator [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS90+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 5915, MsgBus: 10994 2025-06-24T21:18:37.703429Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628385747752982:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:37.703472Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b0e/r3tmp/tmpnXoSWa/pdisk_1.dat 2025-06-24T21:18:38.500517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:38.500631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:38.513149Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:38.609625Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:38.619297Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628385747752941:2079] 1750799917666236 != 1750799917666239 TServer::EnableGrpc on GrpcPort 5915, node 1 2025-06-24T21:18:38.799825Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:38.802480Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:38.802488Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:38.802507Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:38.802614Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10994 TClient is connected to server localhost:10994 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:39.883055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:42.374244Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628407222590072:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:42.374391Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:42.379282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628407222590084:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:42.389107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:42.412589Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628407222590086:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:18:42.476174Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628407222590137:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:42.688879Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628385747752982:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:42.688964Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:42.874720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:18:43.172178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:43.172413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:43.172687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:43.172834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:43.172949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:43.173072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:43.173197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:43.173309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628411517557632:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:43.173337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628411517557632:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:43.173337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:43.173442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628411517557632:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:43.173442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:18:43.173560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:18:43.174130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628411517557632:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:43.174345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628411517557629:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:18:43.174552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628411517557632:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:43.174679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628411517557632:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:43.174803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628411517557632:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:43.174908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519628411517557632:2314];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:43.174996Z node 1 :TX_COLUMN ... t_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.898807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.900127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039290;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.903047Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.903770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.909709Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039290;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.913439Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.919483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.923278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.928703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.929234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039218;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.929705Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.930405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.934649Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039218;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.935490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039234;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.940127Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039234;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.940686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039326;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.947490Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039326;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.948081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.955268Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.956259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.956321Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.956886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039220;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.965056Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.965689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039262;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.966679Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039220;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.968005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039268;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.972974Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039268;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.973587Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039212;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.976605Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039262;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.977102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039232;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.982419Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039212;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.983500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039308;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:12.987462Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039232;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:12.988096Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039308;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:13.165224Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwyz1n1cq3rfqrpery8k4n", SessionId: ydb://session/3?node_id=1&id=NjhhZGE3YWQtMjNjOWIxYzQtZWU2MGJkMWMtNmY0ZjVjMjg=, Slow query, duration: 39.734858s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:13.782800Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:20:13.783338Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519628639150861041:7882];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:20:13.790235Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:20:13.791104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:20:34.449215Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0fv4780qnkjffgfhhnhm", SessionId: ydb://session/3?node_id=1&id=NjhhZGE3YWQtMjNjOWIxYzQtZWU2MGJkMWMtNmY0ZjVjMjg=, Slow query, duration: 11.052433s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query90.tpl and seed 2031708268\nselect cast(amc as decimal(15,4))/cast(pmc as decimal(15,4)) am_pm_ratio\n from ( select count(*) amc\n from web_sales cross join household_demographics cross join time_dim cross join web_page\n where ws_sold_time_sk = time_dim.t_time_sk\n and ws_ship_hdemo_sk = household_demographics.hd_demo_sk\n and ws_web_page_sk = web_page.wp_web_page_sk\n and time_dim.t_hour between 9 and 9+1\n and household_demographics.hd_dep_count = 3\n and web_page.wp_char_count between 5000 and 5200) at cross join\n ( select count(*) pmc\n from web_sales cross join household_demographics cross join time_dim cross join web_page\n where ws_sold_time_sk = time_dim.t_time_sk\n and ws_ship_hdemo_sk = household_demographics.hd_demo_sk\n and ws_web_page_sk = web_page.wp_web_page_sk\n and time_dim.t_hour between 16 and 16+1\n and household_demographics.hd_dep_count = 3\n and web_page.wp_char_count between 5000 and 5200) pt\n order by am_pm_ratio\n limit 100;\n ", parameters: 0b >> KqpIndexLookupJoin::LeftJoinRightNullFilter-StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TableKeyRangesSinglePartition [GOOD] Test command err: 2025-06-24T21:11:56.994430Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519626665293528299:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:11:56.994931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445d/r3tmp/tmpMuVoVp/pdisk_1.dat 2025-06-24T21:11:57.460902Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:11:57.473914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:11:57.474024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:11:57.492165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23325, node 1 2025-06-24T21:11:57.746600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:11:57.746624Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:11:57.746632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:11:57.746748Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:11:58.004088Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64448 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:11:58.218365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:00.492009Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626682473398498:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:00.492154Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:00.839082Z node 1 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [1:7519626682473398534:2621] txid# 281474976715658, Access denied for badguy@builtin on path /Root, with access CreateTable 2025-06-24T21:12:00.839300Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519626682473398534:2621] txid# 281474976715658, issues: { message: "Access denied for badguy@builtin on path /Root" issue_code: 200000 severity: 1 } 2025-06-24T21:12:01.006707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519626686768365845:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:01.007076Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:01.019856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:02.977376Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519626690320968579:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:02.977429Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445d/r3tmp/tmp7Ldmzh/pdisk_1.dat 2025-06-24T21:12:03.277438Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:03.305409Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:12:03.305485Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:12:03.311758Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-24T21:12:03.312650Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19502, node 4 2025-06-24T21:12:03.427658Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:12:03.427677Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:12:03.427684Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:12:03.427782Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61104 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:12:03.610526Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:12:04.004303Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:12:06.336041Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626707500838750:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:06.336133Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:06.365401Z node 4 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [4:7519626707500838771:2626] txid# 281474976710658, Access denied for badguy@builtin on path /Root, with access CreateTable 2025-06-24T21:12:06.365536Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519626707500838771:2626] txid# 281474976710658, issues: { message: "Access denied for badguy@builtin on path /Root" issue_code: 200000 severity: 1 } 2025-06-24T21:12:06.451306Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519626707500838783:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:06.451418Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:12:06.468419Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:12:08.399209Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519626717759391881:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:12:08.407407Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445d/r3tmp/tmpCitwkf/pdisk_1.dat 2025-06-24T21:12:08.603649Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:12:08.606016Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown ... anner.cpp:120: TxId: 281474976719648. Ctx: { TraceId: 01jyhx0szv5m47wfm90qnfeknd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:34.048430Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719649. Ctx: { TraceId: 01jyhx0t44d6pn39q6k2yv8zfw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:34.200178Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719650. Ctx: { TraceId: 01jyhx0t8w1dzy62e32d0ysxj3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:34.339997Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719651. Ctx: { TraceId: 01jyhx0tdpcp8rzxf4qmd65b8w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:34.496460Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719652. Ctx: { TraceId: 01jyhx0tjjdksvfq7kgyqnhf68, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:34.712724Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719653. Ctx: { TraceId: 01jyhx0tsr9zb3sajkzvwgke4n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:34.883542Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719654. Ctx: { TraceId: 01jyhx0txgfwy2512g6q3ytpc8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:35.072492Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719655. Ctx: { TraceId: 01jyhx0v3s9w6f224xm52bez9t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:35.263485Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719656. Ctx: { TraceId: 01jyhx0v90fbvvg1s54a91p3q5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:35.450084Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719657. Ctx: { TraceId: 01jyhx0vfe6b5h2zpdyd1mqmxx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:35.636673Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719658. Ctx: { TraceId: 01jyhx0vng9fvn4p0r0fwz9fn0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:35.852504Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719659. Ctx: { TraceId: 01jyhx0vtv9n0dtv8tn18s7tbh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:36.003885Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719660. Ctx: { TraceId: 01jyhx0w1v797svqtz5rbkcgsh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:36.148576Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719661. Ctx: { TraceId: 01jyhx0w65ej45qehnwp3hyq1k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:36.309142Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719662. Ctx: { TraceId: 01jyhx0waz4mv7j39m7c5rhyp2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:36.452760Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719663. Ctx: { TraceId: 01jyhx0wfw66w27y6pa4xz35mq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:36.655619Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719664. Ctx: { TraceId: 01jyhx0wme0szxbbcqqznr80v2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:36.856544Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719665. Ctx: { TraceId: 01jyhx0wtp0srq17j14bd4b9ck, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:37.004115Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719666. Ctx: { TraceId: 01jyhx0x0q2jerb72eqtm3qca2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:37.231534Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719667. Ctx: { TraceId: 01jyhx0x5y0fmy01c7pqazqd7a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:37.384585Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719668. Ctx: { TraceId: 01jyhx0xc9fr4tday2ry9mfrt3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjBhODI5ZS1jOTI1NWU0Ny01MmFiNWNkZS1mM2ExZWIxYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:20:37.436681Z node 7 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 9 2025-06-24T21:20:37.437669Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:20:40.810361Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519628917919550344:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:40.810420Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00445d/r3tmp/tmp2mm03V/pdisk_1.dat 2025-06-24T21:20:41.606705Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:41.639931Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:41.640028Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:41.652322Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30097, node 10 2025-06-24T21:20:41.915459Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:41.967847Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:41.967872Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:41.967881Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:41.968059Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14997 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:42.469069Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:45.815310Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519628917919550344:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:45.815411Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:46.962216Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoin::JoinLeftPureInnerConverted [GOOD] >> KqpJoin::JoinMismatchDictKeyTypes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS87+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9362, MsgBus: 8602 2025-06-24T21:18:25.303957Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628336794041428:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:25.304249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b29/r3tmp/tmp0iM8hI/pdisk_1.dat 2025-06-24T21:18:25.968180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:25.968293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:25.972982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:26.040853Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628336794041250:2079] 1750799905182187 != 1750799905182190 2025-06-24T21:18:26.074222Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9362, node 1 2025-06-24T21:18:26.327269Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:26.329996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:26.330004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:26.330011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:26.330105Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8602 TClient is connected to server localhost:8602 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:27.253195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:27.278535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:29.752809Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628353973911078:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:29.752899Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:29.759233Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628353973911090:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:29.763653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:29.775651Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628353973911092:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:29.843515Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628353973911143:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:30.300121Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628336794041428:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:30.300239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:30.626587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:18:30.945200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:30.945497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:30.945818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:30.945933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:30.946044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:30.946149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:30.946257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:30.946393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:30.946501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:18:30.946667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:18:30.946780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628358268878654:2312];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:18:30.950406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628358268878655:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:30.950461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628358268878655:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:30.950705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628358268878655:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:30.950811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628358268878655:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:30.950913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628358268878655:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:30.951080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628358268878655:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:30.951224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628358268878655:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:30.951339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628358268878655:2313];tablet_id ... od=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.870156Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.870688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.876966Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.877506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.882296Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.882859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.883448Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.883891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.888911Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.889560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.895997Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.896525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.902882Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.905801Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.906448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.907697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.917351Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.917898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.920491Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.921358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.930951Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.931818Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.932327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.936042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.942421Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.942979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:06.945496Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:06.956478Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:07.006890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039210;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:07.015419Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039210;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:07.024479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039228;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:07.052264Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039228;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:07.140932Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwypfc92et5jvpj3xepvzk", SessionId: ydb://session/3?node_id=1&id=MzFhODVkNGUtZDhhM2YwOC1iZjEzY2M5Yy03NWM0MTA2, Slow query, duration: 42.488522s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:08.005864Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:08.008059Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:08.008614Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628714751221463:11403];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-24T21:20:08.012307Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:39.028021Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0e7be6x30cj3te0e7y09", SessionId: ydb://session/3?node_id=1&id=MzFhODVkNGUtZDhhM2YwOC1iZjEzY2M5Yy03NWM0MTA2, Slow query, duration: 17.284218s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "PRAGMA TablePathPrefix='/Root/test/ds';\n\n-- NB: Subquerys\n$bla1 = (select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from store_sales as store_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where store_sales.ss_sold_date_sk = date_dim.d_date_sk\n and store_sales.ss_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11);\n\n$bla2 = ((select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from catalog_sales as catalog_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk\n and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11)\n union all\n (select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from web_sales as web_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where web_sales.ws_sold_date_sk = date_dim.d_date_sk\n and web_sales.ws_bill_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11));\n\n-- start query 1 in stream 0 using template query87.tpl and seed 1819994127\nselect count(*)\nfrom $bla1 bla1 left only join $bla2 bla2 using (c_last_name, c_first_name, d_date)\n;\n\n-- end query 1 in stream 0 using template query87.tpl", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinRightNullFilter-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 17600, MsgBus: 18176 2025-06-24T21:20:30.610203Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628871476076773:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:30.623220Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039b9/r3tmp/tmpIhsZqg/pdisk_1.dat 2025-06-24T21:20:31.263475Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628871476076679:2079] 1750800030594846 != 1750800030594849 2025-06-24T21:20:31.282916Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:31.348146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:31.348247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:31.349662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17600, node 1 2025-06-24T21:20:31.635598Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:31.636059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:31.636068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:31.636079Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:31.636177Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18176 TClient is connected to server localhost:18176 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:32.680890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:32.722273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:32.737132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:20:32.973889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:33.273937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:33.368949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:35.607282Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628871476076773:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:35.607343Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:35.826815Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628892950914807:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:35.826928Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:36.507411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:36.543437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:36.577440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:36.616732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:36.653957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:36.734674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:36.798743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:36.915180Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628897245882779:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:36.915280Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:36.915507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628897245882784:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:36.919652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:36.932870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628897245882786:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:37.037390Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628901540850133:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:38.444918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... maybe) 2025-06-24T21:20:42.231667Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:42.231676Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:42.231758Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15351 TClient is connected to server localhost:15351 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:20:42.724087Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:42.780388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:42.800598Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:42.868696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:43.062941Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:43.151885Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:46.063381Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628940518266006:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:46.063443Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:46.128687Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:46.186852Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:46.278426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:46.332914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:46.421690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:46.526053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:46.594774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:46.700648Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628940518266668:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:46.700740Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:46.701043Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628940518266673:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:46.705002Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:46.721892Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628940518266675:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:20:46.801920Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628940518266726:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:48.400248Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:48.473259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:48.514898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:48.551071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:48.603528Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:48.662567Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 13117, MsgBus: 2609 2025-06-24T21:19:55.812352Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628721653028982:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:55.812805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039eb/r3tmp/tmpFSZkYm/pdisk_1.dat 2025-06-24T21:19:56.484949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:56.485046Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:56.487358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:56.534409Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628721653028810:2079] 1750799995783261 != 1750799995783264 2025-06-24T21:19:56.552309Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13117, node 1 2025-06-24T21:19:56.677121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:56.677148Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:56.677155Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:56.677273Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:56.806965Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2609 TClient is connected to server localhost:2609 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:57.998512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:58.028189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:00.629100Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628743127865941:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:00.629205Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:00.632450Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628743127865953:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:00.642058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:00.663088Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628743127865955:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:00.724336Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628743127866006:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:00.803496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628721653028982:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:00.803600Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:01.387967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:01.609128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:01.691833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:01.741421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:01.803188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.044350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.086930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.120057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.167799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.221729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.301474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.361894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.413453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.207300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... =72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.051407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.056940Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.057717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.063933Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.064677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.065045Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.065805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.071843Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.072491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.077611Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.078158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.087502Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.088070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.092661Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.093181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.093896Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.094489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.100605Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.101427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.103115Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.103675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.107708Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.108492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.114817Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.117814Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.118498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.133046Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.133686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.142812Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.147666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.157133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.157648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.167011Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.172286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.172605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.178345Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.179348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:44.182737Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.185522Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:44.480581Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwzyt06r6f5h8ptefbanb2", SessionId: ydb://session/3?node_id=1&id=ZTU5ZWZlNTMtMmMyNzkyYTQtYzlmOWVlYzYtZDgzOWYyYzU=, Slow query, duration: 38.527706s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:44.764716Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:44.765209Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:44.765715Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519628833322196947:4522];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-24T21:20:44.766160Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
:4:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503
: Warning: Execution, code: 1060
:4:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503 >> KqpJoinOrder::CanonizedJoinOrderTPCH10 >> KqpJoinOrder::SortingsComplexOrderBy-RemoveLimitOperator >> KqpJoinOrder::SortingsWithLookupJoin3-RemoveLimitOperator >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKUtf8 [GOOD] >> KqpJoinOrder::TPCDS34-ColumnStore >> KqpJoinOrder::TPCH9_100 >> KqpIndexLookupJoin::SimpleLeftJoin-StreamLookup [GOOD] >> KqpIndexLookupJoin::SimpleLeftOnlyJoin+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKUtf8 [GOOD] Test command err: 2025-06-24T21:15:01.357262Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:01.380860Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:01.381181Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:01.389180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:01.389421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:01.389684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:01.389827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:01.389975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:01.390142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:01.390274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:01.390400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:01.390519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:01.390659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:01.390803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:01.419192Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:01.419453Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:01.419517Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:01.419687Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:01.419863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:01.419945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:01.419988Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:01.420128Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:01.420208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:01.420263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:01.420295Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:01.420479Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:01.420549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:01.420603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:01.420644Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:01.420752Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:01.420816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:01.420872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:01.420929Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:01.420985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:01.421037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:01.421072Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:01.421353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:01.421424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:01.421474Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:01.421657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:01.421732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:01.421767Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:01.421917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:01.421968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:01.422005Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:01.422060Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:01.422107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:01.422133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:01.422154Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:01.422514Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=47; 2025-06-24T21:15:01.422625Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=62; 2025-06-24T21:15:01.422707Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=37; 2025-06-24T21:15:01.422766Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=31; 2025-06-24T21:15:01.422854Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:01.422926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:01.422959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:01.422995Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 4;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2446;data_size=2437;sum=283954;count=255;size_of_portion=208; 2025-06-24T21:20:56.463590Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=38686; 2025-06-24T21:20:56.463675Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=13; 2025-06-24T21:20:56.464327Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=561; 2025-06-24T21:20:56.464398Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=39758; 2025-06-24T21:20:56.464456Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=39963; 2025-06-24T21:20:56.464555Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=22; 2025-06-24T21:20:56.464874Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=256; 2025-06-24T21:20:56.464921Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=41241; 2025-06-24T21:20:56.465153Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=148; 2025-06-24T21:20:56.465328Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=105; 2025-06-24T21:20:56.465575Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=186; 2025-06-24T21:20:56.465777Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=147; 2025-06-24T21:20:56.468410Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2557; 2025-06-24T21:20:56.471221Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=2701; 2025-06-24T21:20:56.471336Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=25; 2025-06-24T21:20:56.471421Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=15; 2025-06-24T21:20:56.471484Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=17; 2025-06-24T21:20:56.471612Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=64; 2025-06-24T21:20:56.471707Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=15; 2025-06-24T21:20:56.471848Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=89; 2025-06-24T21:20:56.471920Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=15; 2025-06-24T21:20:56.472039Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=62; 2025-06-24T21:20:56.472176Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=71; 2025-06-24T21:20:56.472357Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=92; 2025-06-24T21:20:56.472414Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=59492; 2025-06-24T21:20:56.472659Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=119665952;raw_bytes=192854450;count=5;records=1855000} inactive {blob_bytes=632703072;raw_bytes=989320282;count=54;records=9818750} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:20:56.472819Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:20:56.472915Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:20:56.473007Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:20:56.473075Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:20:56.473246Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:20:56.473391Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:20:56.473521Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799404438;tx_id=18446744073709551615;;current_snapshot_ts=1750799702359; 2025-06-24T21:20:56.473586Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:20:56.473660Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:20:56.473718Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:20:56.473894Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:20:56.475459Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:3168:5115];tablet_id=9437184;parent=[1:3133:5088];fline=manager.cpp:88;event=ask_data;request=request_id=222;9438184000001={portions_count=59};; 2025-06-24T21:20:56.477887Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:20:56.478119Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:20:56.478168Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:20:56.478205Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:20:56.478265Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:20:56.478401Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:20:56.478527Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799404438;tx_id=18446744073709551615;;current_snapshot_ts=1750799702359; 2025-06-24T21:20:56.478613Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:20:56.478690Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:20:56.478760Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:20:56.478885Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.999000s; 2025-06-24T21:20:56.478989Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3133:5088];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; |96.4%| [TA] $(B)/ydb/services/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpFlipJoin::Right_3 [GOOD] >> KqpJoin::JoinMismatchDictKeyTypes [GOOD] >> KqpIndexLookupJoin::CheckAllKeyTypesCast >> KqpJoin::FullOuterJoinSizeCheck [GOOD] >> KqpJoinOrder::TestJoinOrderHintsComplex+ColumnStore >> KqpJoin::FullOuterJoinNotNullJoinKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinMismatchDictKeyTypes [GOOD] Test command err: Trying to start YDB, gRPC: 7441, MsgBus: 19000 2025-06-24T21:20:42.116702Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628923453363011:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:42.116937Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00399d/r3tmp/tmpeq2iPA/pdisk_1.dat 2025-06-24T21:20:42.668356Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:42.671376Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628923453362913:2079] 1750800042061524 != 1750800042061527 2025-06-24T21:20:42.704842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:42.704971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:42.739801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7441, node 1 2025-06-24T21:20:42.911889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:42.911925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:42.911935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:42.912065Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:20:43.111421Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19000 TClient is connected to server localhost:19000 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:43.924980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:43.946982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:43.963084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:44.236784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:44.523193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:44.658083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:46.807182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628940633233720:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:46.807276Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:47.103410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628923453363011:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:47.110502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:47.376497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:47.414238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:47.481426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:47.520198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:47.553851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:47.642363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:47.692183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:47.798075Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628944928201685:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:47.798148Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:47.798358Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628944928201690:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:47.802532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:47.827346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:20:47.827565Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628944928201692:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:47.929785Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628944928201743:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:49.789123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is und ... /runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 11012, MsgBus: 20096 2025-06-24T21:20:53.203695Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519628969476249510:2208];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00399d/r3tmp/tmpWvpPGu/pdisk_1.dat 2025-06-24T21:20:53.384662Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:20:53.571834Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:53.572348Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:53.572424Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:53.575369Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519628969476249338:2079] 1750800053164157 != 1750800053164160 2025-06-24T21:20:53.590804Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11012, node 2 2025-06-24T21:20:53.803821Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:53.803847Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:53.803855Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:53.803974Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:20:54.180816Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20096 TClient is connected to server localhost:20096 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:54.716335Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:54.749864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:54.891671Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:55.222575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:55.343008Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:58.179270Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519628969476249510:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:58.193694Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:58.355269Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628990951087467:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:58.355373Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:58.438614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:58.502740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:58.586167Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:58.626182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:58.704139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:58.771281Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:58.849546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:58.921860Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628990951088132:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:58.921944Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:58.922393Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519628990951088137:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:58.926856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:58.942116Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519628990951088139:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:59.019184Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519628995246055486:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:00.342738Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) |96.4%| [TA] {RESULT} $(B)/ydb/services/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpJoinOrder::FiveWayJoinStatsOverride+ColumnStore >> KqpJoinOrder::TPCDS90-ColumnStore [GOOD] >> KqpIndexLookupJoin::SimpleLeftOnlyJoin+StreamLookup [GOOD] >> KqpJoinOrder::FourWayJoinLeftFirst-ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS90-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 21532, MsgBus: 62198 2025-06-24T21:20:00.485089Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628743536572155:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:00.485123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039e2/r3tmp/tmps6Q3s6/pdisk_1.dat 2025-06-24T21:20:01.175725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:01.175827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:01.204849Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:01.206470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:01.206881Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628743536572070:2079] 1750800000473918 != 1750800000473921 TServer::EnableGrpc on GrpcPort 21532, node 1 2025-06-24T21:20:01.376257Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:01.376276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:01.376283Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:01.376381Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:20:01.515236Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62198 TClient is connected to server localhost:62198 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:02.434938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:02.488169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:20:05.034142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628765011409201:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:05.034299Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:05.034718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628765011409213:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:05.040506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:05.065940Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628765011409215:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:20:05.158809Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628765011409266:2339] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:05.487263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628743536572155:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:05.487336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:05.538603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.676512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.731493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.769331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:05.828064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.094829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.134793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.179198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.224436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.306493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.345595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.383697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:06.434947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:07.448140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 97001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.102097Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.102667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.107331Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.107891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.113264Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.114203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.114838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.116397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.120221Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.120776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.121129Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.121616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.125891Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.126502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.127121Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.128038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.131980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.132218Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.132537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.132714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.137777Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.138230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.138470Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.139035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.143037Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.143636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.143709Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.144511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.149162Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.149894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.150096Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.154688Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.181439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.181464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.186444Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.186485Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.207027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.212004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038432;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.217498Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038432;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.222798Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:20:47.259457Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx02xj8ft63qbcjtwwf5we", SessionId: ydb://session/3?node_id=1&id=Mjk1OWUwODItNzRlZmNlZTgtODMyZWVmOGUtNGE1NWUxNGQ=, Slow query, duration: 37.096823s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:48.029571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:20:48.030191Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:20:48.030923Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519628855205740646:4680];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-24T21:20:48.031481Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::SimpleLeftOnlyJoin+StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 21811, MsgBus: 19097 2025-06-24T21:20:48.565078Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628951868842845:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:48.592228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00398a/r3tmp/tmpXh3c6B/pdisk_1.dat 2025-06-24T21:20:49.318860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:49.318988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:49.325630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:49.336788Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:49.339330Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628951868842646:2079] 1750800048459045 != 1750800048459048 TServer::EnableGrpc on GrpcPort 21811, node 1 2025-06-24T21:20:49.543684Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:49.751678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:49.751706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:49.751713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:49.751846Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19097 TClient is connected to server localhost:19097 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:51.012759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:51.035457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:51.046998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:51.315767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:51.699839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:51.814120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:53.531577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628951868842845:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:53.531632Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:54.108153Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628977638648065:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:54.108263Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:54.744485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:54.791350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:54.844977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:54.905431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:54.969567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:55.025826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:55.104185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:55.263112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628981933616021:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:55.263231Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:55.263538Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628981933616026:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:55.267896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:55.282270Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628981933616028:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:55.372528Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628981933616079:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:56.857880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... tion TClient is connected to server localhost:6413 2025-06-24T21:21:01.207433Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6413 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:21:01.557601Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:21:01.599600Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:21:01.613252Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:01.744029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:02.013475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:02.131322Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:05.379316Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629022407227067:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:05.379657Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:05.456997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:05.519325Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:05.582362Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:05.643340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:05.706970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:05.812491Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:05.918007Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:06.023290Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629026702195034:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:06.023402Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:06.027443Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629026702195039:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:06.031872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:06.049883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:21:06.051930Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629026702195041:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:21:06.105345Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629026702195092:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:07.694300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:07.769564Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:07.814009Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:07.859526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:07.936646Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:07.989888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoin::FullOuterJoinNotNullJoinKey [GOOD] >> KqpJoinOrder::TPCDSEveryQueryWorks+ColumnStore >> KqpJoinOrder::TPCDS96+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::FullOuterJoinNotNullJoinKey [GOOD] Test command err: Trying to start YDB, gRPC: 25283, MsgBus: 2648 2025-06-24T21:20:51.411228Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628961763173122:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:51.411992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003977/r3tmp/tmpAWdJSm/pdisk_1.dat 2025-06-24T21:20:52.214853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:52.227803Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:52.240801Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:52.241933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:52.254872Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628961763172941:2079] 1750800051306090 != 1750800051306093 TServer::EnableGrpc on GrpcPort 25283, node 1 2025-06-24T21:20:52.568472Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:52.639644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:52.639665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:52.639672Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:52.639768Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2648 TClient is connected to server localhost:2648 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:53.939332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:53.976129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:53.994992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:54.242793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:54.575678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:54.713976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:56.379880Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628961763173122:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:56.409740Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:57.056166Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628987532978367:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:57.056295Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:57.577984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:57.657626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:57.702110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:57.744759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:57.796866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:57.857887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:57.926663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:58.041311Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628991827946325:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:58.041406Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:58.041747Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628991827946330:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:58.045157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:58.062320Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628991827946332:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:58.172433Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628991827946383:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:59.726061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... iles were not loaded 2025-06-24T21:21:03.893367Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629016479795384:2079] 1750800063422318 != 1750800063422321 2025-06-24T21:21:03.928870Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:03.928933Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:03.932401Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18701, node 2 2025-06-24T21:21:04.066238Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:04.066263Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:04.066270Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:04.066372Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62007 2025-06-24T21:21:04.471270Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62007 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:04.909362Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:04.919812Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:04.938271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:05.042718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:05.361520Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:05.503644Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:08.479306Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629016479795565:2193];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:08.479378Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:08.887342Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629037954633511:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:08.887448Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:08.968461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:09.034048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:09.098150Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:09.162106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:09.216555Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:09.314029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:09.412350Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:09.538990Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629042249601469:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:09.539073Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:09.539526Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629042249601474:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:09.544375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:09.572045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:21:09.572739Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629042249601476:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:21:09.633888Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629042249601527:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:11.505140Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:11.581991Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FourWayJoinLeftFirst-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 22496, MsgBus: 30943 2025-06-24T21:20:07.659875Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628775544636581:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:07.659939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039d5/r3tmp/tmpd1yQ6u/pdisk_1.dat 2025-06-24T21:20:08.370746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:08.370884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:08.373300Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:08.498568Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22496, node 1 2025-06-24T21:20:08.691677Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:08.699707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:08.699722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:08.699736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:08.699828Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30943 TClient is connected to server localhost:30943 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:09.968455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:12.663242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628775544636581:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:12.689457Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:13.072291Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628801314440971:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:13.075632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628801314440960:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:13.076872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:13.083278Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:13.096736Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628801314440974:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:13.187358Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628801314441025:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:13.510922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:13.651461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:13.703739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:13.773776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:13.808239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.086001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.119480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.151944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.226956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.278878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.328865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.372021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:14.455019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.236279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.287046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itse ... 15741Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.316378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.319776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.325117Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.325165Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.325755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.325755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.334578Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.338581Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.339175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.339449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.348244Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.348767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.351936Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.352424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.354254Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.354888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.364577Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.365142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.366733Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.367221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.371955Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.373268Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.373867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.378836Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.379862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.383562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.388623Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.389221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.392617Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.393413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.398321Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.402895Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.403627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.407878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.413224Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.417386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.534025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038508;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.550015Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038508;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.610663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:58.633918Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:58.679763Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0an8crg27tmy6bp0ccpk", SessionId: ydb://session/3?node_id=1&id=NTk5NWRmMzktZjgxNmQwYjItYTAwMWY2NmYtN2ZmYTRkMTg=, Slow query, duration: 40.590716s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:59.117765Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:59.118228Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:59.118941Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519628904393676261:5056];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-24T21:20:59.119424Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKString [GOOD] >> KqpJoinOrder::TPCHRandomJoinViewJustWorks-ColumnStore [GOOD] >> KqpIndexLookupJoin::Inner+StreamLookup >> KqpIndexLookupJoin::JoinWithSubquery+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKString [GOOD] Test command err: 2025-06-24T21:15:17.359955Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:15:17.387438Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:15:17.387797Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:15:17.396196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:15:17.396430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:15:17.396688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:15:17.396828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:15:17.396974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:15:17.397103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:15:17.397225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:15:17.397351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:15:17.397464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:15:17.397585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.397731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:15:17.427252Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:15:17.427538Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:15:17.427603Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:15:17.427824Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:17.428018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:15:17.428088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:15:17.428133Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:15:17.428271Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:15:17.428351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:15:17.428405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:15:17.428443Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:15:17.428646Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:15:17.428729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:15:17.428793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:15:17.428841Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:15:17.428967Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:15:17.429027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:15:17.429083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:15:17.429139Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:15:17.429195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:15:17.429253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:15:17.429290Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:15:17.429520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:15:17.429583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:15:17.429634Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:15:17.429844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:15:17.429910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:15:17.429949Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:15:17.430112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:15:17.430171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.430224Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:15:17.430311Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:15:17.430387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:15:17.430458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:15:17.430499Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:15:17.431004Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=59; 2025-06-24T21:15:17.431103Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=38; 2025-06-24T21:15:17.431259Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=59; 2025-06-24T21:15:17.431359Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=46; 2025-06-24T21:15:17.431468Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:15:17.431573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:15:17.431627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:15:17.431687Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 4;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2446;data_size=2437;sum=283954;count=255;size_of_portion=208; 2025-06-24T21:21:15.525480Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=33839; 2025-06-24T21:21:15.525590Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=12; 2025-06-24T21:21:15.526235Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=575; 2025-06-24T21:21:15.526321Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=34881; 2025-06-24T21:21:15.526375Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=35089; 2025-06-24T21:21:15.526464Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=23; 2025-06-24T21:21:15.526739Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=213; 2025-06-24T21:21:15.526794Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=36324; 2025-06-24T21:21:15.527028Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=162; 2025-06-24T21:21:15.527260Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=162; 2025-06-24T21:21:15.527539Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=200; 2025-06-24T21:21:15.527760Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=153; 2025-06-24T21:21:15.530726Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2889; 2025-06-24T21:21:15.534381Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=3557; 2025-06-24T21:21:15.534519Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2025-06-24T21:21:15.534603Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=20; 2025-06-24T21:21:15.534699Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=17; 2025-06-24T21:21:15.534837Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=71; 2025-06-24T21:21:15.534912Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=16; 2025-06-24T21:21:15.535057Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=91; 2025-06-24T21:21:15.535140Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=28; 2025-06-24T21:21:15.535308Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=64; 2025-06-24T21:21:15.535446Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=70; 2025-06-24T21:21:15.535578Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=71; 2025-06-24T21:21:15.535642Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=55536; 2025-06-24T21:21:15.535883Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=119665944;raw_bytes=192854450;count=5;records=1855000} inactive {blob_bytes=632703064;raw_bytes=989320282;count=54;records=9818750} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:21:15.536037Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:21:15.536133Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:21:15.536251Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:21:15.536316Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:21:15.536513Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:21:15.536613Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:21:15.536721Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799420427;tx_id=18446744073709551615;;current_snapshot_ts=1750799718348; 2025-06-24T21:21:15.536783Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:21:15.536844Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:21:15.536918Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:21:15.537063Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:21:15.538566Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:3165:5112];tablet_id=9437184;parent=[1:3130:5085];fline=manager.cpp:88;event=ask_data;request=request_id=222;9438184000001={portions_count=59};; 2025-06-24T21:21:15.541289Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:21:15.541897Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:21:15.541940Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:21:15.542002Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:21:15.542078Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:21:15.542204Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-24T21:21:15.542295Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750799420427;tx_id=18446744073709551615;;current_snapshot_ts=1750799718348; 2025-06-24T21:21:15.542371Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:21:15.542455Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:21:15.542508Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:21:15.542659Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.999000s; 2025-06-24T21:21:15.542740Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3130:5085];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCHRandomJoinViewJustWorks-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 19948, MsgBus: 65108 2025-06-24T21:20:16.388491Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628812009067455:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:16.388535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039c6/r3tmp/tmp2e3x32/pdisk_1.dat 2025-06-24T21:20:17.159680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:17.159768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:17.161809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:17.241323Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19948, node 1 2025-06-24T21:20:17.420555Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:17.535524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:17.535548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:17.535555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:17.535667Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65108 TClient is connected to server localhost:65108 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:18.567756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:21.355126Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628833483904545:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:21.355236Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:21.355438Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628833483904557:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:21.358221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:21.368849Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628833483904559:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:21.391505Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628812009067455:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:21.391566Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:21.433048Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628833483904610:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:21.712633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:21.877225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:21.929472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:21.966755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:21.999607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:22.188798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:22.225319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:22.307569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:22.378947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:22.438212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:22.478524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:22.559523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:22.611434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:23.732120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:23.776943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itse ... 54825Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.655525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.662002Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.662684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.670191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.675714Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.676412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.680782Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.681272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.686772Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.690164Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.690700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.691677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.697304Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.702668Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.703174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.707824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.713199Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.713858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.716821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.717299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.725082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.725617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.729255Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.729794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.738738Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.739272Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.740772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.741738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.746992Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.752707Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.753298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.755771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.762635Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.763288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.769691Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.770280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.776215Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.776784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:08.779860Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.790457Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.960323Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0jsz92xdecfsdp6vax65", SessionId: ydb://session/3?node_id=1&id=YzUzMWFiN2QtZmYyZWQ4MDMtZTk0OTNjMjEtNDJhOWI0M2Y=, Slow query, duration: 42.528447s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:09.547465Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:09.547971Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:09.548953Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519628988102753873:6034];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-24T21:21:09.549412Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCH14 [GOOD] >> KqpJoinOrder::TPCDS95-ColumnStore [GOOD] >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt+ColumnStore >> KqpIndexLookupJoin::RightSemi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH14 [GOOD] Test command err: Trying to start YDB, gRPC: 21871, MsgBus: 12386 2025-06-24T21:19:16.307465Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628553198574032:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:16.307542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ab0/r3tmp/tmpl1tWba/pdisk_1.dat 2025-06-24T21:19:16.907298Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628553198573943:2079] 1750799956217818 != 1750799956217821 2025-06-24T21:19:16.941332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:16.941426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:16.947977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:16.972314Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21871, node 1 2025-06-24T21:19:17.213459Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:17.213492Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:17.213501Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:17.213639Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:17.283300Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12386 TClient is connected to server localhost:12386 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:18.316081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:18.340462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:20.709383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628570378443778:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:20.709508Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:20.709838Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628570378443790:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:20.714064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:20.742734Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628570378443792:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:20.808396Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628570378443843:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:21.252423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:21.259469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628553198574032:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:21.259539Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:21.501870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:21.502092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:21.502332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:21.502436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:21.502539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:21.502640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:21.502757Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:21.502881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:21.502985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:21.503130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:21.511463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628574673411400:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:21.514860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628574673411389:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:21.514915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628574673411389:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:21.515048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628574673411389:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:21.515274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628574673411389:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:21.515372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628574673411389:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:21.515739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628574673411389:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:21.515869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628574673411389:2315];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:21.515989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628574673411389:2315];tabl ... 8866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.195106Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.199904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.204378Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.204964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.207569Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.208302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.214432Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.215542Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.215740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.216037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.221981Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.222811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.228216Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.228767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.229277Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.229903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.238020Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.238794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.238852Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.243542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.245357Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.246053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.249793Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.250335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.257260Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.257828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.264482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.265045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.268753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.270262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.279928Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.280623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.284821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.285499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.291004Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.300277Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.373534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.377496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:02.386215Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.389184Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:02.583178Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0asb3eh66rr12wz00zz0", SessionId: ydb://session/3?node_id=1&id=YWM4Yzk4NGEtYjVlZjBjMmItMmJjYjllNmQtZTk0OTc0Y2E=, Slow query, duration: 44.363167s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:02.990563Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:02.991213Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:02.991636Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628961220526426:11740];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-24T21:21:02.992151Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS95-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 10131, MsgBus: 9672 2025-06-24T21:19:56.562780Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628724767693471:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:56.577422Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039e8/r3tmp/tmpjuLNTC/pdisk_1.dat 2025-06-24T21:19:57.264550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:57.264658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:57.283341Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628724767693374:2079] 1750799996546256 != 1750799996546259 2025-06-24T21:19:57.326014Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:57.327141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10131, node 1 2025-06-24T21:19:57.559711Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:57.592941Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:57.592961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:57.592968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:57.593071Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9672 TClient is connected to server localhost:9672 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:59.034597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:59.078482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:01.559379Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628724767693471:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:01.559496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:01.783363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628746242530504:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:01.783482Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:01.783558Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628746242530514:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:01.801593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:01.832015Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628746242530518:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:01.907978Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628746242530569:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:02.351606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.619029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.672330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.730460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.796291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:02.991457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.033322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.071976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.104231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.158677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.200531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.255022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:03.328384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:04.289371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... anager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.649916Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.651906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.652479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.662573Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.665431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.670978Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.671662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.671662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.677424Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.678252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.683998Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.684561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.692169Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.692766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.699007Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.699856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.705547Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.706406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.709383Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.709897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038467;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.711773Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.712427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.716430Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038467;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.719349Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.721191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.727498Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.728272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.732174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.741513Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.742948Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.849041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.855389Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.857718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:20:47.875509Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:47.959562Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0048apymdmr0b9gm192s", SessionId: ydb://session/3?node_id=1&id=MTBiZDgzZWMtNjE0ODM1ZDEtNTQ3ZDhlY2UtZTQ3ZDRlMzc=, Slow query, duration: 40.654184s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:48.786668Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:48.787276Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:48.791763Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628780602274750:2785];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:20:48.792317Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:16.086117Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx1nwz4e1qb1jw1g6ze7p3", SessionId: ydb://session/3?node_id=1&id=MTBiZDgzZWMtNjE0ODM1ZDEtNTQ3ZDhlY2UtZTQ3ZDRlMzc=, Slow query, duration: 13.718058s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n-- NB: Subquerys\n$ws_wh =\n(select ws1.ws_order_number ws_order_number,ws1.ws_warehouse_sk wh1,ws2.ws_warehouse_sk wh2\n from web_sales ws1 cross join web_sales ws2\n where ws1.ws_order_number = ws2.ws_order_number\n and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk);\n-- start query 1 in stream 0 using template query95.tpl and seed 2031708268\n select\n count(distinct ws1.ws_order_number) as `order count`\n ,sum(ws_ext_ship_cost) as `total shipping cost`\n ,sum(ws_net_profit) as `total net profit`\nfrom\n web_sales ws1\n cross join date_dim\n cross join customer_address\n cross join web_site\nwhere\n cast(d_date as date) between cast('2002-4-01' as date) and\n (cast('2002-4-01' as date) + DateTime::IntervalFromDays(60))\nand ws1.ws_ship_date_sk = d_date_sk\nand ws1.ws_ship_addr_sk = ca_address_sk\nand ca_state = 'AL'\nand ws1.ws_web_site_sk = web_site_sk\nand web_company_name = 'pri'\nand ws1.ws_order_number in (select ws_order_number\n from $ws_wh)\nand ws1.ws_order_number in (select wr_order_number\n from web_returns cross join $ws_wh ws_wh\n where wr_order_number = ws_wh.ws_order_number)\norder by `order count`\nlimit 100;\n", parameters: 0b >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc+RemoveLimitOperator >> OlapEstimationRowsCorrectness::TPCH5 >> KqpIndexLookupJoin::Inner+StreamLookup [GOOD] >> KqpIndexLookupJoin::Inner-StreamLookup |96.4%| [TA] $(B)/ydb/core/tx/columnshard/ut_rw/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpIndexLookupJoin::JoinWithSubquery+StreamLookup [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH17 [GOOD] >> KqpIndexLookupJoin::JoinWithSubquery-StreamLookup >> KqpJoinOrder::CanonizedJoinOrderTPCH2 [GOOD] |96.4%| [TA] {RESULT} $(B)/ydb/core/tx/columnshard/ut_rw/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH17 [GOOD] Test command err: Trying to start YDB, gRPC: 20048, MsgBus: 13121 2025-06-24T21:19:26.527807Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628599591936124:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:26.528080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a73/r3tmp/tmp1J3Ior/pdisk_1.dat 2025-06-24T21:19:27.271950Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:27.272061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:27.292809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:27.385424Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:27.387365Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628599591935991:2079] 1750799966445585 != 1750799966445588 TServer::EnableGrpc on GrpcPort 20048, node 1 2025-06-24T21:19:27.596420Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:27.639757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:27.639784Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:27.639791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:27.639931Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13121 TClient is connected to server localhost:13121 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:28.864551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:31.523758Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628599591936124:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:31.523858Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:31.719385Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628621066773121:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:31.719519Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:31.719973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628621066773133:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:31.724592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:31.756100Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628621066773135:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:31.823588Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628621066773186:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:32.209276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:32.570097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:32.570354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:32.570659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:32.570767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:32.570859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:32.570973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:32.571085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:32.574642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628625361740715:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:32.574706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628625361740715:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:32.574969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628625361740715:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:32.575065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628625361740715:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:32.575476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:32.575610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:32.575747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:32.575881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628625361740721:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:32.578841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628625361740715:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:32.579072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628625361740715:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:32.579189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628625361740715:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:32.579318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628625361740715:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:32.579440Z node 1 :TX_COLU ... 06916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.712979Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.713577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.717115Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.717691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.727088Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.727693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.732639Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.733190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.737369Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.737884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.742985Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.747303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.752670Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.753250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.757276Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.758307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.764381Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.764943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.776864Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.777465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.783739Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.784325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.790993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.791629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.796495Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.797088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.806018Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.806580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.811470Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.813266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.817005Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.817537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.824878Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.825729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.830715Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.835530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.840255Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.840955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:07.845379Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:07.851306Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:08.228627Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0h295bashjrhpp35vten", SessionId: ydb://session/3?node_id=1&id=YTQ2OThiZTgtMzA3OGRiNy1kMDFmY2IwZS1jNWVjNjA4Yw==, Slow query, duration: 43.576617s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:09.072200Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:09.083419Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:09.084032Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519628861584977449:7623];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:21:09.084588Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TestJoinOrderHintsSimple+ColumnStore [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin2-RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::RightSemi [GOOD] >> KqpIndexLookupJoin::SimpleInnerJoin+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH2 [GOOD] Test command err: Trying to start YDB, gRPC: 18485, MsgBus: 5219 2025-06-24T21:19:20.079930Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628574155835382:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:20.091533Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a82/r3tmp/tmpmkhg1F/pdisk_1.dat 2025-06-24T21:19:20.830402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:20.830504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:20.831393Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628574155835201:2079] 1750799960030780 != 1750799960030783 2025-06-24T21:19:20.833020Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:20.835358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18485, node 1 2025-06-24T21:19:21.031282Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:21.035665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:21.035684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:21.035690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:21.035785Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5219 TClient is connected to server localhost:5219 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:22.006007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:24.545505Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628591335705028:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:24.545649Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:24.546182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628591335705040:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:24.550967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:24.571545Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628591335705042:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:24.640375Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628591335705093:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:25.075283Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628574155835382:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:25.075371Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:25.210432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:25.512683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:25.512937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:25.513245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:25.513368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:25.513470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:25.513576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:25.513675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:25.513826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:25.513952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:25.514083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:25.514201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628595630672705:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:25.577767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628595630672619:2315];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:25.577839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628595630672619:2315];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:25.578137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628595630672619:2315];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:25.578282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628595630672619:2315];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:25.578399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628595630672619:2315];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:25.578509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628595630672619:2315];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:25.578614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628595630672619:2315];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:25.578735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628595630672619:2315];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:25.578831Z node 1 :TX_COLUMNS ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.157379Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.158250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.158790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.166783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.172424Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.172963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.180621Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.181187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.186608Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.190521Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.195321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.199822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.209633Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.210221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.211390Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.211913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.225989Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.226539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.228317Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.228842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.243321Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.243875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.244787Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.245348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.265744Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.266399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.267237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.267781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.283712Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.284495Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.284709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.285291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.306410Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.307103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.308388Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.321722Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.457664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.460736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.474584Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.479708Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.513582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:04.540293Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:04.874801Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0d4zemhxwshn11zpadwy", SessionId: ydb://session/3?node_id=1&id=ZmIzMGIyOTgtNGQ0ZTlmZDYtMzNiM2ZlOGYtYzdkMjNhMWE=, Slow query, duration: 44.234316s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:05.441972Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:05.442746Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:05.446027Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TPCDS16+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH16 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinOrderHintsSimple+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 3860, MsgBus: 27901 2025-06-24T21:19:33.455731Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628627819485598:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:33.455925Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a58/r3tmp/tmpljhP5z/pdisk_1.dat 2025-06-24T21:19:34.095763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:34.095890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:34.101428Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:34.102640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:34.105077Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628627819485504:2079] 1750799973372033 != 1750799973372036 TServer::EnableGrpc on GrpcPort 3860, node 1 2025-06-24T21:19:34.417776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:34.417798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:34.417805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:34.417908Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:34.439326Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27901 TClient is connected to server localhost:27901 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:35.585896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:35.600456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:37.980355Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628644999355333:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:37.980527Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:37.980961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628644999355345:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:37.985717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:38.004368Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628644999355347:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:38.103111Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628649294322694:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:38.438323Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628627819485598:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:38.438418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:38.702601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:38.998022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:38.998215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:38.998511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:38.998658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:38.998773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:38.998886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:38.999012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:38.999237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:38.999356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:38.999501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:38.999644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628649294322938:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:39.012677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628649294322947:2319];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:39.012733Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628649294322947:2319];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:39.012952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628649294322947:2319];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:39.013082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628649294322947:2319];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:39.013195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628649294322947:2319];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:39.013295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628649294322947:2319];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:39.013393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628649294322947:2319];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:39.013489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628649294322947:2319];tablet ... s_local=63;result=not_found; 2025-06-24T21:21:17.090376Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.092461Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.093043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.095023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.098542Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.099120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.100571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.101166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.106454Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.106455Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.107036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.107036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.114228Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.114779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.115824Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.116282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.122554Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.123087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.123694Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.124167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.129737Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.130250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.133455Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.133943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.138236Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.138455Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.138891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.138934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.143976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.144136Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.144623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.145793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.150154Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.150235Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.150863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.151646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.156604Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.156604Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.157181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:17.162377Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:17.384853Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0s577d6pxx5ckhmwmp5r", SessionId: ydb://session/3?node_id=1&id=ZmZiZTU1ZDctMTI5YzNhNjItYTQ0ZDc3NGItNTQ0NmI1OTU=, Slow query, duration: 44.448753s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:17.832604Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:17.833177Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:17.833955Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519629027251502911:11504];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-24T21:21:17.834441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534 >> KqpIndexLookupJoin::Inner-StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin2-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 23659, MsgBus: 62994 2025-06-24T21:20:34.350432Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628888063987210:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:34.350718Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039b2/r3tmp/tmpoVpFA6/pdisk_1.dat 2025-06-24T21:20:35.275090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:35.275278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:35.277397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:35.308530Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:35.312394Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628888063987029:2079] 1750800034316046 != 1750800034316049 2025-06-24T21:20:35.343346Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 23659, node 1 2025-06-24T21:20:35.671808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:35.671829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:35.671835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:35.671941Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62994 TClient is connected to server localhost:62994 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:36.769630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:39.351249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628888063987210:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:39.351314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:39.700069Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628909538824160:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:39.700158Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628909538824168:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:39.700220Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:39.709168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:39.744242Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628909538824174:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:39.811749Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628909538824225:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:40.188334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:40.405626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:40.456110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:40.493227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:40.556877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:40.875827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:40.919231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:40.988726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:41.035824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:41.075336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:41.123257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:41.183283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:41.242125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:42.108172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 80789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038544;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.486164Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038544;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.486712Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038468;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.486893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038536;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.487304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.493188Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.493188Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038536;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.494012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.494173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038504;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.500068Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038504;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.500585Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.500777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038474;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.501171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.506899Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.506900Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038474;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.507597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038466;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.507896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038492;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.513376Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038492;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.513377Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038466;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.514080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038512;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.514815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.519758Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.519758Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038512;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.520410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038470;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.520420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038506;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.526309Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038470;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.526309Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038506;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.527018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.527039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.533050Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.533058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.533975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038494;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.534041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038498;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.539653Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038494;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.539653Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038498;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.540339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038472;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.540345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.546170Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038472;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.546169Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.546882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038462;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:24.553012Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038462;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:24.759458Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx14vz03yaj18hwm1t526q", SessionId: ydb://session/3?node_id=1&id=ODU2NzhjNzUtMzNmZjIyYmYtM2U0ZTQwNDAtNGIzZDAwZjE=, Slow query, duration: 39.831598s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:25.153827Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:25.154375Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:25.154912Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519628943898568201:2733];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:21:25.155337Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> OlapEstimationRowsCorrectness::TPCH3 [GOOD] >> KqpJoin::RightSemiJoin_KeyPrefix ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::Inner-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 9315, MsgBus: 9350 2025-06-24T21:21:18.169226Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629079440533563:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:18.169750Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00394f/r3tmp/tmp9OYYzs/pdisk_1.dat 2025-06-24T21:21:18.946673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:18.946772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:18.955599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:19.032014Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:19.035394Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629079440533466:2079] 1750800078136175 != 1750800078136178 TServer::EnableGrpc on GrpcPort 9315, node 1 2025-06-24T21:21:19.267919Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:19.307678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:19.307705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:19.307716Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:19.307815Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9350 TClient is connected to server localhost:9350 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:20.570963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:20.604405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:21:20.619559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:20.873841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:21.098493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:21.245574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:23.155537Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629079440533563:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:23.225031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:23.427361Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629100915371585:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:23.427568Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:23.801715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:23.845223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:23.911536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:23.953848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:24.040289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:24.136088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:24.222270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:24.343172Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629105210339549:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:24.343244Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:24.343656Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629105210339554:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:24.353596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:24.367206Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629105210339556:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:21:24.472312Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629105210339611:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:25.902553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... maybe) 2025-06-24T21:21:28.855656Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:28.855663Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:28.855770Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21916 2025-06-24T21:21:29.343424Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21916 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:29.740320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:29.767036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:29.863946Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:30.110275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:30.207138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:32.916419Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629139129214683:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:32.916538Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:32.989588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.051494Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.088353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.133766Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.194801Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.263030Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.367103Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.469061Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629143424182636:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:33.469147Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:33.469515Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629143424182641:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:33.473565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:33.488560Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629143424182643:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:21:33.549743Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629143424182694:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:34.879269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:34.937529Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:35.004919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:35.105113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:35.188712Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:35.246215Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::Sortings4Year-RemoveLimitOperator [GOOD] >> KqpJoinOrder::FiveWayJoin+ColumnStore >> KqpJoin::RightSemiJoin_ComplexKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH16 [GOOD] Test command err: Trying to start YDB, gRPC: 23723, MsgBus: 61235 2025-06-24T21:19:33.740247Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628629088990842:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:33.740597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a5a/r3tmp/tmpj2I0rY/pdisk_1.dat 2025-06-24T21:19:34.361644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:34.361748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:34.464954Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628629088990632:2079] 1750799973653150 != 1750799973653153 2025-06-24T21:19:34.474541Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:34.489682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23723, node 1 2025-06-24T21:19:34.721800Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:34.735666Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:34.735685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:34.735692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:34.735793Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61235 TClient is connected to server localhost:61235 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:35.966463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:35.993450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:38.735485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628629088990842:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:38.736406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:38.781652Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628650563827761:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:38.781820Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:38.782133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628650563827773:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:38.786098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:38.805913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:19:38.806155Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628650563827775:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:38.901279Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628650563827828:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:39.294118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:39.634428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:39.634637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:39.634647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628654858795321:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:39.634687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628654858795321:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:39.634930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:39.634986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628654858795321:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:39.635071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:39.635100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628654858795321:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:39.636449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:39.636609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:39.636697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:39.636800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:39.636902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:39.637035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:39.637143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628654858795353:2315];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:39.643274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628654858795321:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:39.643524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628654858795321:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:39.643647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628654858795321:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_regis ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.253071Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.253648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.256176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.257477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.265578Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.266349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.267945Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.268979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.275465Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.282435Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.381344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.390651Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.395182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.404392Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.404889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.414441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.414952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.424554Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.425036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.438643Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.444022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.453432Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.454025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.467332Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.467824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.477156Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.477792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.491250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.491761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.501112Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.501715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.506247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.515200Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.516307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.521188Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.521771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.527025Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.529774Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.530725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.541747Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.617084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:12.635759Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:12.867386Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0pf306d68jx862jdngzj", SessionId: ydb://session/3?node_id=1&id=ZjljN2M3NDEtZTkxZmQzMC1lNGVhZjBjOC03MzMyMjg1Ng==, Slow query, duration: 42.687575s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:13.314039Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:13.314502Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:13.315308Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TPCDS88+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH3 [GOOD] Test command err: Trying to start YDB, gRPC: 15189, MsgBus: 20162 2025-06-24T21:19:32.341176Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628624451677068:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:32.341240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a5c/r3tmp/tmpKRVDOo/pdisk_1.dat 2025-06-24T21:19:32.949491Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:32.965899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:32.966011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:33.004543Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15189, node 1 2025-06-24T21:19:33.195815Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:33.195854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:33.195887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:33.196049Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:33.280751Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20162 TClient is connected to server localhost:20162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:34.083115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:34.127412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:36.406418Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628641631546852:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:36.406549Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:36.406996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628641631546864:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:36.411487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:36.426504Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628641631546866:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:36.510012Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628641631546917:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:37.008260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:37.301458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:37.301777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:37.302076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:37.302209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:37.302320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:37.302435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:37.302531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:37.302666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:37.302788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:37.302947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:37.303067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628645926514447:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:37.345864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628645926514482:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:37.345980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628645926514482:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:37.346227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628645926514482:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:37.346344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628645926514482:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:37.346473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628645926514482:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:37.346608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628645926514482:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:37.346732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628645926514482:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:37.346891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628645926514482:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:37.347047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628645926514482:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:37.352872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628645926514482:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline= ... 11855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.217885Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.218856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.222995Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.223535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.228605Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.229271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.231060Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.231639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.237221Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.237872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.240929Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.241556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.247447Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.247987Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.248443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.248473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.254028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.254659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.254692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.255269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.263879Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.264382Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.264706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039199;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.264777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.272368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039199;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.273007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.273046Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.273639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.279543Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.280200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.281156Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.281782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.286573Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.287628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.293935Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.294408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.297964Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.298511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:19.308120Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.312357Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:19.608800Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0q2fewcj19rr4qjcxr09", SessionId: ydb://session/3?node_id=1&id=YWVkNDQ4YTItN2QwZWM2NjAtZTE2YzU2M2UtMjdjZjMzNjk=, Slow query, duration: 48.808881s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:20.128186Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:20.128803Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:20.129575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519628959459173974:9648];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-24T21:21:20.130058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK+RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::Sortings4Year-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 24354, MsgBus: 13882 2025-06-24T21:20:38.071770Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628908095541062:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:38.072160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00399f/r3tmp/tmp4FkN8Q/pdisk_1.dat 2025-06-24T21:20:38.710579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:38.710704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:38.743322Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628908095540878:2079] 1750800038014660 != 1750800038014663 2025-06-24T21:20:38.754302Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:38.769198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24354, node 1 2025-06-24T21:20:38.963640Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:38.963666Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:38.963690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:38.963841Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:20:39.089382Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13882 TClient is connected to server localhost:13882 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:40.054087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:40.104629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:43.021432Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628929570378009:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:43.021589Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:43.021951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628929570378021:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:43.026855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:43.042764Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628929570378023:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:43.067562Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628908095541062:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:43.067665Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:43.118370Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628929570378074:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:43.688033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:43.976018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.022043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.062123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.131023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.394860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.452384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.551325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.611773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.696156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.736452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.797479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:44.853855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:46.361721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 75944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.594422Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.595038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.597191Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.597962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.608607Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.609226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.611629Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.612161Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.621599Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.622189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.623396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.623941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.633389Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.633992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.635755Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.636263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.645175Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.645883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.647987Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.648453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.655069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.657492Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.658192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.659972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.668786Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.669391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.674229Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.674957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.675522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.680505Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.681140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.686477Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.687549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.687615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.693661Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.696546Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.696663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.697728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:32.702581Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.719686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.949597Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx19bqe7rnw4mg4je75j0g", SessionId: ydb://session/3?node_id=1&id=Y2E5NGViYy1mZTlmYzQ4NS0yODY4YTMxNC1kMTY1NzdhMg==, Slow query, duration: 43.421649s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:33.323001Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:33.323675Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:33.324476Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519629092779161964:6003];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-24T21:21:33.324990Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::JoinWithSubquery-StreamLookup [GOOD] >> KqpJoinOrder::FiveWayJoinWithComplexPreds2-ColumnStore >> KqpJoinOrder::TestJoinHint1+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::JoinWithSubquery-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 4931, MsgBus: 24230 2025-06-24T21:21:19.020040Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629081795463358:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:19.020081Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00394c/r3tmp/tmp1h9c6q/pdisk_1.dat 2025-06-24T21:21:19.853189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:19.853276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:19.862059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:19.887316Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629077500496042:2079] 1750800078978495 != 1750800078978498 2025-06-24T21:21:19.890373Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4931, node 1 2025-06-24T21:21:20.103315Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:20.267699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:20.267718Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:20.267725Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:20.267852Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24230 TClient is connected to server localhost:24230 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:21.433661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:21.491578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:21.698501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:21.940977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:22.049770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:24.023275Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629081795463358:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:24.043956Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:24.647885Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629103270301473:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:24.647989Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:25.065505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:25.161547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:25.260694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:25.334710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:25.385259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:25.469487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:25.548692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:25.712385Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629107565269437:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:25.712480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:25.712878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629107565269442:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:25.717861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:25.738195Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629107565269444:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:21:25.827878Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629107565269495:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:27.289225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:27.342421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part ... RN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:31.207947Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:31.527297Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12515 TClient is connected to server localhost:12515 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:32.122209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:32.131017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:32.150507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:32.256803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:21:32.629488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:32.825897Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:36.461943Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629155589242402:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:36.462037Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:36.565868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:36.637773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:36.677559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:36.733190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:36.800990Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:36.907662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:37.001353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:37.091584Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629159884210359:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:37.091693Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:37.092002Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629159884210364:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:37.096391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:37.117929Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629159884210366:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:21:37.182181Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629159884210417:3435] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:39.140124Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:39.218373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:39.290557Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:39.379556Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:39.456644Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:39.548268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::CanonizedJoinOrderTPCH11 [GOOD] >> KqpIndexLookupJoin::SimpleInnerJoin-StreamLookup >> KqpIndexLookupJoin::SimpleInnerJoin+StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS88+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 22477, MsgBus: 1530 2025-06-24T21:17:59.837752Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628225229147245:2177];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:59.837813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b58/r3tmp/tmpKrKizZ/pdisk_1.dat 2025-06-24T21:18:00.499329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:00.499435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:00.517630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:18:00.531276Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:00.533714Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628225229147094:2079] 1750799879751994 != 1750799879751997 TServer::EnableGrpc on GrpcPort 22477, node 1 2025-06-24T21:18:00.831789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:00.831811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:00.831830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:00.831948Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:18:00.847532Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1530 TClient is connected to server localhost:1530 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:01.713955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:01.782686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:04.828620Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628225229147245:2177];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:04.828686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:05.003602Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628250998951520:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:05.003742Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:05.004299Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628250998951532:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:05.009165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:05.033272Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628250998951534:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:05.103877Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628250998951585:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:05.661294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:18:06.057558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:06.057818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:06.058107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:06.058226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:06.058326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:06.058428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:06.058535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:06.058636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:06.058732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:18:06.058850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:18:06.058952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628250998951854:2327];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:18:06.135459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628250998951784:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:06.135553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628250998951784:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:06.135803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628250998951784:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:06.135912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628250998951784:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:06.136014Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628250998951784:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:06.136113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628250998951784:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:06.136209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628250998951784:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:06.136307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628250998951784:2314];tablet_ ... 5-06-24T21:19:43.607425Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:43.608172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:43.610022Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039300;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:43.610619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:43.614505Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:43.620948Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:43.621505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:43.623424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039240;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:43.632103Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:43.632937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:43.633444Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039240;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:43.633895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:43.639087Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:43.639789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:43.645837Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:43.646355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:19:43.651936Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:43.660318Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:19:43.901282Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwxyzxezvhjyxj84tncpzv", SessionId: ydb://session/3?node_id=1&id=YWVhMjE2NjgtZDdkMzNlYTQtOTcyMWE4MzAtYWI5Y2EzYmM=, Slow query, duration: 43.295361s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:19:44.780099Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:44.780233Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:19:44.781031Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:27.441384Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0p9fcrycb00x92bfynyp", SessionId: ydb://session/3?node_id=1&id=YWVhMjE2NjgtZDdkMzNlYTQtOTcyMWE4MzAtYWI5Y2EzYmM=, Slow query, duration: 57.440656s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query88.tpl and seed 318176889\nselect *\nfrom\n (select count(*) h8_30_to_9\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 8\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s1 cross join\n (select count(*) h9_to_9_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 9\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s2 cross join\n (select count(*) h9_30_to_10\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 9\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s3 cross join\n (select count(*) h10_to_10_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 10\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s4 cross join\n (select count(*) h10_30_to_11\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 10\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s5 cross join\n (select count(*) h11_to_11_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 11\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s6 cross join\n (select count(*) h11_30_to_12\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 11\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s7 cross join\n (select count(*) h12_to_12_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 12\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s8\n;", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::SimpleInnerJoin+StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 11543, MsgBus: 4353 2025-06-24T21:21:24.631574Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629105163057026:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:24.639381Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003947/r3tmp/tmpEfEocs/pdisk_1.dat 2025-06-24T21:21:25.534735Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629105163056841:2079] 1750800084562742 != 1750800084562745 2025-06-24T21:21:25.560781Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:25.618027Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:25.644825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:25.644933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:25.687576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:25.695393Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 11543, node 1 2025-06-24T21:21:26.071814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:26.071837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:26.071843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:26.071953Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4353 TClient is connected to server localhost:4353 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:27.308588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:27.350770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:27.649664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:27.972539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:28.205948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:29.633679Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629105163057026:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:29.644465Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:30.373092Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629130932862255:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:30.373186Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:30.849363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:30.907501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:30.944784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:30.980263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:31.024413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:31.092249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:31.184485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:31.310261Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629135227830209:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:31.310334Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:31.310552Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629135227830214:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:31.318063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:31.331268Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629135227830216:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:21:31.403079Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629135227830269:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:32.793891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB ... d to server localhost:16861 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:37.512818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:37.539344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:37.561537Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:37.665302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:37.867751Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:37.999506Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:40.703306Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629172235147562:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:40.703406Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:40.770852Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:40.821962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:40.888802Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:40.947600Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629150760309512:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:40.947651Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:40.951486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:41.015786Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:41.091349Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:41.187409Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:41.286790Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629176530115520:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:41.286903Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:41.287733Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629176530115525:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:41.292721Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:41.326336Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629176530115527:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:21:41.414233Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629176530115579:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:42.888033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:42.947920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:43.009404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:43.062128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:43.121022Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:43.169410Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft+ColumnStore [GOOD] >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv+ColumnStore >> KqpJoinOrder::UdfConstantFold-ColumnStore [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH15 [GOOD] >> KqpFlipJoin::RightOnly_1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH11 [GOOD] Test command err: Trying to start YDB, gRPC: 20049, MsgBus: 18775 2025-06-24T21:19:38.215410Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628651521363769:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:38.216039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a4e/r3tmp/tmpIimEx5/pdisk_1.dat 2025-06-24T21:19:38.873645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:38.878071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:38.890084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:38.952720Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:38.955265Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628651521363559:2079] 1750799978133658 != 1750799978133661 TServer::EnableGrpc on GrpcPort 20049, node 1 2025-06-24T21:19:39.257503Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:39.258017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:39.258026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:39.258034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:39.258138Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18775 TClient is connected to server localhost:18775 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:40.169139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:40.212610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:42.694107Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628668701233390:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:42.694230Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:42.694653Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628668701233402:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:42.699728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:42.723412Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628668701233404:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:42.818340Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628668701233455:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:43.199984Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628651521363769:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:43.218815Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:43.270515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:43.634170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628672996200999:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:43.636141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:43.636450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:43.636752Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:43.636878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:43.636978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:43.637084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:43.637181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:43.637298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:43.637434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:43.637588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:43.637709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628672996201003:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:43.639744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628672996200999:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:43.639950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628672996200999:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:43.640040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628672996200999:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:43.640146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628672996200999:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:43.640239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628672996200999:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:43.640340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628672996200999:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:43.640451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628672996200999:2312];tabl ... 9555Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.720242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.727557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.728193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.734616Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.735211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.741800Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.742680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.746993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.750566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.754945Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.756105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.761900Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.762880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.767904Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.768561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.775451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.775842Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.776979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.781235Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.781861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.782973Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.783594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.787653Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.788672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.789213Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.789762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.794162Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.795088Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.795702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.795727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.801693Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.802441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.803025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.803264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.808652Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.808652Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.809262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.809290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.815034Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.815035Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.970246Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0x5qfrdmypac511v10va", SessionId: ydb://session/3?node_id=1&id=YTAwMGI0N2YtNTY3NjkyZjAtNDk0ZGNmN2QtNTFhYTY0MA==, Slow query, duration: 46.921447s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:24.320959Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:24.321330Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519629046658414130:11602];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-24T21:21:24.321331Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:24.321753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCH13 [GOOD] >> KqpJoinOrder::FiveWayJoinWithPreds+ColumnStore [GOOD] >> KqpJoinOrder::TPCDS94-ColumnStore [GOOD] >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt-ColumnStore >> KqpJoinOrder::FiveWayJoinWithConstantFold+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::UdfConstantFold-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 30129, MsgBus: 63735 2025-06-24T21:20:49.694581Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628953497427504:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:49.694741Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003985/r3tmp/tmplQhdHc/pdisk_1.dat 2025-06-24T21:20:50.541502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:50.541617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:50.617897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:50.621729Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:50.623664Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:50.631408Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628953497427366:2079] 1750800049625695 != 1750800049625698 TServer::EnableGrpc on GrpcPort 30129, node 1 2025-06-24T21:20:51.003671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:51.003693Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:51.003700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:51.003816Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63735 TClient is connected to server localhost:63735 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:52.126507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:52.140237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:54.663248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628953497427504:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:54.663322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:54.832309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628974972264492:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:54.832436Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:54.833027Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628974972264504:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:54.837722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:54.910043Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628974972264506:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:54.975139Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628974972264558:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:55.356042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:55.564791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:55.650898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:55.694070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:55.731272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.071057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.167763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.243575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.343278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.379868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.420490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.506636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.551476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:57.359421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 40.364706Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.365213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.367812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.374818Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.377627Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.378127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.383774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.384847Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.385486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.389875Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.390842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.391585Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.392179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.396597Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.397269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.397617Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.398160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.403370Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.403379Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.404017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.404017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.409773Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.409849Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.410457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.410638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.416636Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.416678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.417321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.417373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.423173Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.423319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.423865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.423865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.430091Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.431607Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.432581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.433050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.443056Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.443683Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.445162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038442;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:40.455322Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038442;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:40.635515Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx1kkpfkg26pxg6s51saat", SessionId: ydb://session/3?node_id=1&id=MTQ2MzE0LWU3YjJjZThlLTZmMGEwMDAzLWZlNTNmOTM=, Slow query, duration: 40.612756s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:41.168004Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:41.168623Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:41.169751Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519629026511880762:3309];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-24T21:21:41.170323Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::RightSemiJoin_KeyPrefix [GOOD] >> KqpJoin::RightSemiJoin_SecondaryIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 15398, MsgBus: 17456 2025-06-24T21:19:39.787905Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628654853398420:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:39.788099Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a44/r3tmp/tmpVapTb1/pdisk_1.dat 2025-06-24T21:19:40.503891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:40.504000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:40.508638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:40.552556Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:40.555325Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628654853398331:2079] 1750799979733291 != 1750799979733294 TServer::EnableGrpc on GrpcPort 15398, node 1 2025-06-24T21:19:40.801977Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:40.802634Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:40.802650Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:40.802697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:40.802823Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17456 TClient is connected to server localhost:17456 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:41.724502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:41.756527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:44.336052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628676328235463:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:44.336163Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:44.336591Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628676328235475:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:44.341715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:44.356234Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628676328235477:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:44.460018Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628676328235528:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:44.863887Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628654853398420:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:44.864247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:44.980638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:45.703547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:45.703547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628680623203053:2311];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:45.703790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:45.704098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:45.704223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:45.704337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:45.704443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:45.704615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:45.704735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:45.704856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:45.704985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:45.705100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628680623203061:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:45.705292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628680623203053:2311];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:45.705501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628680623203053:2311];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:45.705604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628680623203053:2311];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:45.705698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628680623203053:2311];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:45.705805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628680623203053:2311];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:45.705938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628680623203053:2311];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:45.706064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628680623203053:2311];tabl ... 34492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039336;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:22.936743Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:22.937314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039354;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:22.948342Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039354;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:22.948947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039358;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:22.951537Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039336;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:22.952095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039318;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:22.962401Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039358;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:22.962981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:22.967829Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039318;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:22.968506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039312;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:22.977093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:22.977706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:22.979363Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039312;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:22.979901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:22.986733Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:22.989226Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:22.989816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039308;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:22.991486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:22.998808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039308;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.000619Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.001024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039234;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.003646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039304;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.009667Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039234;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.010279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.012473Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039304;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.012882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039276;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.019078Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.021369Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039276;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.021984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039298;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.023588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.027435Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.028307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039314;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.031711Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039298;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.032197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.034471Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039314;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.035865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039266;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.038366Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.038933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039244;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:23.042019Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039266;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.044755Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039244;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:23.207522Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0y78147rq0kje24jdrx8", SessionId: ydb://session/3?node_id=1&id=MTIxOWI2ZTgtZGY1ODU2YmYtMjYxNTVjYWYtZGIyOWNmZTg=, Slow query, duration: 45.084407s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:23.992453Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:23.993039Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:23.993609Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519628981270960244:9547];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:21:23.994066Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::LeftSemiJoinWithLeftFilter+StreamLookup >> KqpJoinOrder::CanonizedJoinOrderTPCH19 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS94-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 30998, MsgBus: 14267 2025-06-24T21:20:25.489507Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628851009058274:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:25.489808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039be/r3tmp/tmpUf5OyS/pdisk_1.dat 2025-06-24T21:20:26.184984Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:26.187262Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628851009058079:2079] 1750800025453080 != 1750800025453083 2025-06-24T21:20:26.225674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:26.225755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 30998, node 1 2025-06-24T21:20:26.230452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:26.506208Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:26.506752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:26.506759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:26.506769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:26.506873Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14267 TClient is connected to server localhost:14267 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:27.633041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:30.057302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628872483895206:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:30.057448Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:30.058040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628872483895218:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:30.062897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:30.087305Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628872483895220:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:30.160026Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628872483895271:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:30.489078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628851009058274:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:30.489161Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:30.789872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:30.959355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.008656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.052075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.096050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.310851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.372196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.463443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.542704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.587619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.631054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.713778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:31.770793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:32.684325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... ent=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.568144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.572059Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038513;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.572544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.577026Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.577624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.581742Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.582343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.588370Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.588818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.594042Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.594483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.601984Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.602462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038485;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.619580Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038485;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.620055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.624011Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.624481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.632251Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.632781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.636816Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.637263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.637582Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.638113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.642997Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.646912Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.647924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.651120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.657826Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.658871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.660112Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.660698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.665743Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.670457Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.675607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:16.680786Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:16.947511Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0vsg1ah5zxswqjjf29qz", SessionId: ydb://session/3?node_id=1&id=OThlNGMwMS1mNDY4OGNmNC0yODlkNWY1My0zYzA4NzgzNA==, Slow query, duration: 41.314158s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:17.595514Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:17.596001Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:17.596525Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519629022807777209:5946];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-24T21:21:17.596919Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:43.051406Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx2ha67nvnwnn2wxe76r4t", SessionId: ydb://session/3?node_id=1&id=OThlNGMwMS1mNDY4OGNmNC0yODlkNWY1My0zYzA4NzgzNA==, Slow query, duration: 12.611883s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n$bla1 = (select ws_order_number\n from web_sales\n group by ws_order_number\n having COUNT(DISTINCT ws_warehouse_sk) > 1);\n\n-- start query 1 in stream 0 using template query94.tpl and seed 2031708268\nselect\n count(distinct ws1.ws_order_number) as `order count`\n ,sum(ws_ext_ship_cost) as `total shipping cost`\n ,sum(ws_net_profit) as `total net profit`\nfrom\n web_sales ws1\n cross join date_dim\n cross join customer_address\n cross join web_site\n left semi join $bla1 bla1 on (ws1.ws_order_number = bla1.ws_order_number)\n left only join web_returns on (ws1.ws_order_number = web_returns.wr_order_number)\nwhere\n cast(d_date as date) between cast('1999-4-01' as date) and\n (cast('1999-4-01' as date) + DateTime::IntervalFromDays(60))\nand ws1.ws_ship_date_sk = d_date_sk\nand ws1.ws_ship_addr_sk = ca_address_sk\nand ca_state = 'NE'\nand ws1.ws_web_site_sk = web_site_sk\nand web_company_name = 'pri'\norder by `order count`\nlimit 100;\n", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH13 [GOOD] Test command err: Trying to start YDB, gRPC: 4829, MsgBus: 29194 2025-06-24T21:19:47.331765Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628688266591119:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:47.345318Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a00/r3tmp/tmpCx6zRf/pdisk_1.dat 2025-06-24T21:19:48.031967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:48.032067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:48.034907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:48.107702Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:48.109996Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628688266591090:2079] 1750799987297789 != 1750799987297792 TServer::EnableGrpc on GrpcPort 4829, node 1 2025-06-24T21:19:48.255348Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:48.255369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:48.255375Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:48.255506Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:48.301313Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29194 TClient is connected to server localhost:29194 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:49.184513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:49.200334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:51.695489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628705446460921:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:51.695577Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628705446460933:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:51.695628Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:51.704212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:51.735383Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628705446460935:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:51.839676Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628705446460986:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:52.335253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628688266591119:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:52.335339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:52.339993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:52.676124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:52.676409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:52.676670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:52.676798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:52.676898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:52.677002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:52.677135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:52.677242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:52.677349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:52.677481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:52.677617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628709741428488:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:52.681511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628709741428565:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:52.681574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628709741428565:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:52.681799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628709741428565:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:52.681966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628709741428565:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:52.682067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628709741428565:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:52.682200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628709741428565:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:52.682341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628709741428565:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:52.682460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628709741428565:2323];tablet ... 03777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.310301Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.311526Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.311628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.312511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.318757Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.322081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.322873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039342;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.327077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.331752Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039342;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.332295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039322;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.337806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039322;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.338389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.341611Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.342145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.345490Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.346184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039302;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.348399Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.349112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.355888Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.356562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.359290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039302;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.359825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.363050Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.363732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.366423Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.367074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.369772Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.370664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.373067Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.373699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.376885Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.378350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.379541Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.380246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.384850Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.385384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.385640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.388920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.392027Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.394811Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.686441Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx14t0e4yefjqksm0mzq6b", SessionId: ydb://session/3?node_id=1&id=MWMyNmViNDAtMWI4ZWIzOWEtZGQ1MmNjMzItMjZkNzI4YQ==, Slow query, duration: 46.821870s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:32.160912Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:32.161497Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:32.162077Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519628945964665321:7660];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224039392; 2025-06-24T21:21:32.162542Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH15 [GOOD] Test command err: Trying to start YDB, gRPC: 27793, MsgBus: 15756 2025-06-24T21:19:42.883889Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628665791875326:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:42.895645Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a14/r3tmp/tmpfWhKXQ/pdisk_1.dat 2025-06-24T21:19:43.640715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:43.640807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:43.654439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:43.660384Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628665791875106:2079] 1750799982853653 != 1750799982853656 2025-06-24T21:19:43.680170Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27793, node 1 2025-06-24T21:19:43.879645Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:43.879674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:43.879682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:43.879798Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:43.890363Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15756 TClient is connected to server localhost:15756 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:44.901763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:44.925796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:47.410161Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628687266712234:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.410285Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.410697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628687266712246:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.414359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:47.439521Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628687266712248:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:47.511664Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628687266712299:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:47.887896Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628665791875326:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:47.888016Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:47.995020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:48.322355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628691561679869:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:48.325127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:48.325358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:48.325640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:48.325765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:48.325868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:48.325971Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:48.326057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:48.326158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:48.326257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:48.326391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:48.326515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628691561679841:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:48.330972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628691561679869:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:48.331218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628691561679869:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:48.331333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628691561679869:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:48.331442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628691561679869:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:48.331541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628691561679869:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:48.331656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628691561679869:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:48.331771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628691561679869:2318];tabl ... 985327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:25.989596Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:25.990141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:25.994942Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:25.999246Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:25.999812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.000154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.006511Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.010077Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.010860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.016162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.022861Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.024900Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.025525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.031057Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.031895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039358;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.035267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.037990Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039358;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.038636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.041375Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.042098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.048960Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.049558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.059720Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.060294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.060878Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.061402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.066336Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.066998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.073465Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.074221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.080711Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.080934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.081425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.081425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:26.093063Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.096532Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:26.344198Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx129b1x0b506qasx1bdnd", SessionId: ydb://session/3?node_id=1&id=ODJhM2U2OWEtNTUwZjM4ZWEtM2ZmMGM3YzItZjdkZmZiNzA=, Slow query, duration: 44.060084s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:26.711717Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:26.711851Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:26.712791Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519629056633958411:11563];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-24T21:21:26.713357Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:27.901278Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyhx2dzgew6t90hxn7yx0gz6, SessionId: CompileActor 2025-06-24 21:21:27.900 WARN ydb-core-kqp-ut-join(pid=892776, tid=0x00007F66E15D5640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-24T21:21:32.404134Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyhx2j808xb7fgrfc3dtavh8, SessionId: CompileActor 2025-06-24 21:21:32.403 WARN ydb-core-kqp-ut-join(pid=892776, tid=0x00007F66E0DC6640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed >> KqpJoin::RightSemiJoin_ComplexKey [GOOD] >> KqpJoin::RightSemiJoin_ComplexSecondaryIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithPreds+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 13682, MsgBus: 9787 2025-06-24T21:19:46.376728Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628681718169480:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:46.376774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a06/r3tmp/tmpHOqP1F/pdisk_1.dat 2025-06-24T21:19:47.028595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:47.028717Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 13682, node 1 2025-06-24T21:19:47.075332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:47.194282Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:47.232924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:47.232951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:47.232958Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:47.233103Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:47.379352Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9787 TClient is connected to server localhost:9787 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:48.195573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:48.219946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:19:50.694829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628698898039261:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:50.694949Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:50.695031Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628698898039273:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:50.699682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:50.719346Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628698898039275:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:19:50.799693Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628698898039326:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:51.263638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:51.379328Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628681718169480:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:51.379386Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:51.580765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:51.581015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:51.581281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:51.581434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:51.581555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:51.581659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:51.581781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:51.581880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:51.581992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:51.582101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:51.582232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628703193006823:2315];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:51.586095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628703193006822:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:51.586144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628703193006822:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:51.586301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628703193006822:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:51.586406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628703193006822:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:51.586501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628703193006822:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:51.586620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628703193006822:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:51.586753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628703193006822:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:51.586860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628703193006822:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:51.586967Z node 1 :TX_COLUMNSHARD WARN: log ... 12629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.616344Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039269;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.616786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.623817Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.624422Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.624957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.630613Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.631222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.635776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.637637Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.638232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.644476Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.645142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.650570Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.650771Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.651186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.651506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.658515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.660155Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.660847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.661014Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.669933Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.670563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.672370Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.672817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.682113Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.682751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.683689Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.684728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.694057Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.694585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.696171Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.696654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.701601Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.706785Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.707741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.711510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.717315Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.717870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039223;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.721128Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:29.735674Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039223;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:30.054173Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx12vnbz6250mm6bbn0qkg", SessionId: ydb://session/3?node_id=1&id=NzZlYTNmOWQtMjU4NmU3ZWEtNWYwZWE1OTItZGNhYzhhMzU=, Slow query, duration: 47.184036s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:30.416625Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:21:30.417059Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:21:30.417736Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519629008135731237:9513];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:21:30.418230Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoinOrder::CanonizedJoinOrderTPCDS64 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithConstantFold+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 32746, MsgBus: 5191 2025-06-24T21:19:47.638977Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628687565402659:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:47.644437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039fa/r3tmp/tmp6aOF4Y/pdisk_1.dat 2025-06-24T21:19:48.298888Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:48.302745Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628687565402591:2079] 1750799987524311 != 1750799987524314 2025-06-24T21:19:48.309288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:48.309391Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:48.317637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32746, node 1 2025-06-24T21:19:48.619920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:48.619937Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:48.619944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:48.620066Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:48.643782Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5191 TClient is connected to server localhost:5191 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:49.457555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:49.484245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:52.079794Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628709040239710:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:52.079875Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:52.083230Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628709040239722:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:52.087209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:52.108932Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628709040239724:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:52.187449Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628709040239775:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:52.600290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:52.641459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628687565402659:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:52.641592Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:52.884755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:52.884948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:52.885217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:52.885380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:52.885514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:52.885620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:52.885742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:52.885862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:52.885975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:52.886126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:52.886246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628709040240027:2318];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:52.889163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628709040240029:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:52.889223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628709040240029:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:52.889442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628709040240029:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:52.889555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628709040240029:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:52.889652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628709040240029:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:52.889774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628709040240029:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:52.889878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628709040240029:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:52.895728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628709040240029:2320];tablet_ ... 5768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.781573Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.782069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.789486Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.790013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.795969Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.796693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.796860Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.797345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.806042Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.808393Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.808906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.811791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.818609Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.820759Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.822040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.826185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.834026Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.835372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.839953Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.840437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.841040Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.842022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.845971Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.846652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.848461Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.849570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.852512Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.853223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.855992Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.856682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.858548Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.859930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.863056Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.863766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.866669Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.867322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.868872Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.869498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.873379Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.875505Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.035516Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx141bdssvjymg65z8g83k", SessionId: ydb://session/3?node_id=1&id=OGYxN2IzNDgtZDA0MDY1NzYtYTg5YWI1MTEtYTMwZmY1NjM=, Slow query, duration: 46.958825s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:31.384978Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:31.385363Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:31.386192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519629078407485127:11454];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-24T21:21:31.386762Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::InnerJoinCustomColumnOrder+StreamLookup >> OlapEstimationRowsCorrectness::TPCH2 [GOOD] >> KqpJoin::IdxLookupPartialLeftPredicate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH19 [GOOD] Test command err: Trying to start YDB, gRPC: 29832, MsgBus: 24443 2025-06-24T21:19:43.187557Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628671922982923:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:43.187696Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a13/r3tmp/tmpqU1RXP/pdisk_1.dat 2025-06-24T21:19:43.923284Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628671922982902:2079] 1750799983178190 != 1750799983178193 2025-06-24T21:19:43.976647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:43.976743Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:43.984415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:44.003223Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29832, node 1 2025-06-24T21:19:44.283542Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:44.284302Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:44.284310Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:44.284316Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:44.284443Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24443 TClient is connected to server localhost:24443 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:45.396916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:45.419817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:48.079799Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628693397820026:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:48.079897Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:48.083231Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628693397820038:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:48.088176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:48.109617Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628693397820040:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:48.191252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628671922982923:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:48.195364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:48.207741Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628693397820091:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:48.678422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:49.120282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:49.120527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:49.120803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:49.120923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:49.121032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:49.121142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:49.121298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:49.121431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:49.121532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:49.121661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:49.121795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628693397820283:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:49.123397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693397820284:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:49.123439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693397820284:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:49.123666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693397820284:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:49.123782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693397820284:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:49.123886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693397820284:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:49.123981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693397820284:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:49.124073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693397820284:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:49.124194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693397820284:2313];tabl ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.074658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.075336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.078685Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.079331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.085075Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.085721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.091663Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.092195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.099755Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.100321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039344;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.105767Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.106504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.108471Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039344;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.109132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039342;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.115957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.116548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.121909Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.122120Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039342;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.122557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.122597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.128055Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.128137Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.128712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.128896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.134632Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.136174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.140416Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.141711Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.234087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039296;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.234739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.243571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.245124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.248808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039296;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.250173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.256757Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.257300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.259496Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.260080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.262310Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.263040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:32.265558Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.268345Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:32.399409Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx14ds6sm4vywj650bdxnj", SessionId: ydb://session/3?node_id=1&id=YTBiYmIzOTgtOWMzNThjY2ItZmJlNWExYzgtZmNjNTg0ODM=, Slow query, duration: 47.923603s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:32.739960Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:32.740710Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:32.741335Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::FiveWayJoin-ColumnStore >> OlapEstimationRowsCorrectness::TPCH21 [GOOD] >> KqpJoinOrder::TPCDS61-ColumnStore >> KqpJoinOrder::SortingsByPrefixWithConstant-RemoveLimitOperator >> KqpIndexLookupJoin::SimpleInnerJoin-StreamLookup [GOOD] >> KqpIndexLookupJoin::SimpleLeftJoin+StreamLookup >> KqpJoin::LeftJoinWithNull+StreamLookupJoin >> KqpJoinOrder::SortingsWithLookupJoin3-RemoveLimitOperator [GOOD] >> KqpJoinOrder::SortingsComplexOrderBy-RemoveLimitOperator [GOOD] >> KqpJoinOrder::FiveWayJoinWithComplexPreds+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH21 [GOOD] >> KqpFlipJoin::RightOnly_1 [GOOD] >> KqpFlipJoin::RightOnly_2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH2 [GOOD] Test command err: Trying to start YDB, gRPC: 21410, MsgBus: 10336 2025-06-24T21:19:43.607591Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628671276685259:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:43.623744Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a0e/r3tmp/tmplMbzKE/pdisk_1.dat 2025-06-24T21:19:44.180330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:44.180432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:44.188366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:44.197410Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21410, node 1 2025-06-24T21:19:44.471769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:44.471789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:44.471795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:44.471887Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:44.635015Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10336 TClient is connected to server localhost:10336 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:45.394384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:45.408968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:47.741635Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628688456555030:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.741763Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.742110Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628688456555042:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.746746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:47.763966Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628688456555044:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:47.846951Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628688456555096:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:48.257059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:48.516176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:48.516430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:48.516684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:48.516815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:48.516934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:48.517042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:48.517165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:48.517284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:48.517377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:48.517526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:48.517644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628692751522638:2313];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:48.594121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628692751522646:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:48.594190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628692751522646:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:48.594349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628692751522650:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:48.594389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628692751522650:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:48.594425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628692751522646:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:48.594536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628692751522646:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:48.594545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628692751522650:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:48.594642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628692751522646:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:48.594657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628692751522650:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:48.595124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628692751522646:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=C ... ecute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.316356Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039194;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.316933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039220;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.320185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.320763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.323837Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039220;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.324469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039210;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.330993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039210;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.331660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039198;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.332680Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.333153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039200;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.340738Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039198;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.341474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039242;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.348538Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039200;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.349123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039204;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.354006Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039242;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.359556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039218;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.366419Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039204;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.367095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039202;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.371274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039218;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.371813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039250;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.378721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039202;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.382532Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039250;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.383091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039252;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.385372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039212;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.394420Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039252;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.395975Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039212;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.396571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039224;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.399882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039254;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.406376Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039254;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.407056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039216;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.407229Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039224;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.408240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039236;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.413232Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039216;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.413956Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039236;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.414405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039240;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.414573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039208;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.420004Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039240;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.420553Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039208;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.509774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039196;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.521082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039196;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.654957Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx13578ygdk8961z2a3z6h", SessionId: ydb://session/3?node_id=1&id=MWVkNzkzZjUtZTA1NzA2MTMtMzU4MjJhYWEtMzYyNGNiYzY=, Slow query, duration: 47.478705s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:31.072778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:31.073423Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:31.073479Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:31.074593Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224038933;local_tx_no=11;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710716; 2025-06-24T21:21:31.075124Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224039094;local_tx_no=11;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH21 [GOOD] Test command err: Trying to start YDB, gRPC: 20315, MsgBus: 27788 2025-06-24T21:19:42.723968Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628668218871493:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:42.757628Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a33/r3tmp/tmpIEJiz1/pdisk_1.dat 2025-06-24T21:19:43.442639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:43.442763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:43.448225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:43.561784Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:43.563210Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628668218871312:2079] 1750799982648666 != 1750799982648669 TServer::EnableGrpc on GrpcPort 20315, node 1 2025-06-24T21:19:43.736306Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:19:43.784104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:43.784133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:43.784185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:43.784323Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27788 TClient is connected to server localhost:27788 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:44.886506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:47.451037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628689693708437:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.451181Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.455282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628689693708449:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:47.463921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:47.483354Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628689693708451:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:47.583868Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628689693708502:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:47.699304Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628668218871493:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:47.699391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:48.031242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:48.372306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:48.372544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:48.372809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:48.372923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:48.373021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:48.373135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:48.373251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:48.373356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:48.373442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:48.373556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:48.373647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628693988675976:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:48.433092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628693988676105:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:48.433156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628693988676105:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:48.433385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628693988676105:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:48.433502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628693988676105:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:48.433644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628693988676105:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:48.433755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628693988676105:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:48.433877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628693988676105:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:48.433976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519628693988676105:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:48.434070Z node 1 :TX_COLU ... 20963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.525937Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.526584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.528581Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039222;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.529097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.536023Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.536653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039292;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.541604Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.542120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.549328Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039292;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.549807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039294;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.551328Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.551888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.556754Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039294;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.560299Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.560900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.563985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039256;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.569831Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.570445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.583210Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039256;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.583781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.589112Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.589609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.595177Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.595805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039288;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.597077Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.598106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.601257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039288;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.602369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.608055Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.608627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.613937Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.614536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.615032Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.615679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.620295Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.620953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.627184Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.633214Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.741388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:29.755599Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.879393Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx140v1y3yrdqf1ydz7bzq", SessionId: ydb://session/3?node_id=1&id=NzRjMzUzNjgtNTZlZGUzYTQtNzg0NDJmMDgtYzkwNjE0NGM=, Slow query, duration: 45.819385s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:30.457969Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:30.458506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519628943096814655:7689];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:21:30.459014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:30.462465Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin3-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 29784, MsgBus: 22910 2025-06-24T21:20:56.880568Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628986625943484:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:56.880603Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003967/r3tmp/tmpZrUW46/pdisk_1.dat 2025-06-24T21:20:57.722270Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:57.723517Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628986625943457:2079] 1750800056834150 != 1750800056834153 TServer::EnableGrpc on GrpcPort 29784, node 1 2025-06-24T21:20:57.929895Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:57.929993Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:57.949824Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:57.976802Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:58.012199Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:58.012220Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:58.012227Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:58.012330Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22910 TClient is connected to server localhost:22910 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:59.222994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:59.247454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:01.883170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628986625943484:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:01.883240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:02.282889Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629012395747890:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:02.283266Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629012395747902:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:02.284937Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:02.288172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:02.368250Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629012395747904:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:02.453286Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629012395747955:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:02.802080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.007407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.097503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.179628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.263309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.498779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.579297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.621590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.683665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.720291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.782556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.825909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.899134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:04.669863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 38422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.249072Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.249647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.251496Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.252086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.261833Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.262435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.267707Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.268366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.274396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.276808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.277471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.283303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.289849Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.290465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.291342Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.291873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.301748Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.304779Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.305369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.308032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.317920Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.318493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.324051Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.325382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.331285Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.332077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.333793Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.334328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.340993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.345329Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.349521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.352875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.360980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.361523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.363775Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.365838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.376491Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.376788Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.412177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038512;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:49.427026Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038512;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.565978Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx1tjb1rqfb2smsan5x60t", SessionId: ydb://session/3?node_id=1&id=ZmM0ODViZDAtYThjYzVmOTItMjU4MjMyYWQtM2JhYWU2MjM=, Slow query, duration: 42.417866s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:49.977998Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:49.978769Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519629119769949409:4955];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-24T21:21:49.979806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:49.981259Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsComplexOrderBy-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 2637, MsgBus: 5395 2025-06-24T21:20:55.929296Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628981255767925:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:55.943654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00396b/r3tmp/tmpGODCDM/pdisk_1.dat 2025-06-24T21:20:56.629263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:56.629354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:56.652283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:56.652569Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:56.658578Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628981255767714:2079] 1750800055843817 != 1750800055843820 TServer::EnableGrpc on GrpcPort 2637, node 1 2025-06-24T21:20:56.927691Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:56.970187Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:56.970209Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:56.970242Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:56.970348Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5395 TClient is connected to server localhost:5395 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:58.118708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:58.152339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:00.774772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629002730604844:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:00.774943Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:00.775295Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629002730604856:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:00.780022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:00.800349Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629002730604858:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:00.867835Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629002730604909:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:00.985200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628981255767925:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:00.985531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:01.427256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:01.613242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:01.655355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:01.745332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:01.830582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:02.041142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:02.109664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:02.201517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:02.277267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:02.331987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:02.383598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:02.420742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:02.456787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.353666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperatio ... 21212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.532866Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.533366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.535632Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.536119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.551669Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.552177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.556841Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.557361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.562970Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.565672Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.566219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.567809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.574330Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.574941Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.574970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.575880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.587167Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.587679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.592101Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.592623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.592782Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.594102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.597673Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.598322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.608114Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.608650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.612098Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.612750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.631187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.631779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.631976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.634001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.637962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.638637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038486;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.642506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.645314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038480;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.645684Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038486;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.654079Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038480;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.711729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:48.717640Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:48.775405Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx1scn103zabrka7p3rq2d", SessionId: ydb://session/3?node_id=1&id=NzczYzk2ODMtNTNmMTBhNDItNDk2MTcxMDItY2ViZTU3OTE=, Slow query, duration: 42.833546s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:49.028724Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:49.029330Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:49.030433Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519629174529325560:6357];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-24T21:21:49.030958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> OlapEstimationRowsCorrectness::TPCDS96 >> KqpIndexLookupJoin::JoinByComplexKeyWithNullComponents+StreamLookupJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH21 [GOOD] Test command err: Trying to start YDB, gRPC: 26523, MsgBus: 32453 2025-06-24T21:19:40.334936Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628659016056129:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:40.335205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a35/r3tmp/tmp6z37x3/pdisk_1.dat 2025-06-24T21:19:40.978545Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:40.979354Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628659016055948:2079] 1750799980245558 != 1750799980245561 2025-06-24T21:19:41.007994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:41.008081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:41.012066Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26523, node 1 2025-06-24T21:19:41.167674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:41.167694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:41.167702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:41.167797Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:41.339744Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32453 TClient is connected to server localhost:32453 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:42.184669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:42.220005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:19:44.768171Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628676195925778:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:44.768287Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:44.768645Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628676195925790:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:44.773555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:44.803264Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628676195925792:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:19:44.879530Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628676195925843:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:45.322727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628659016056129:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:45.327312Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:45.371209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:45.688124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:45.688318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:45.688573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:45.688710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:45.688800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:45.688909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:45.689004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:45.689098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:45.689189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:45.689282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:45.689371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628680490893410:2323];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:45.695849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628680490893384:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:45.695910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628680490893384:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:45.696132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628680490893384:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:45.696246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628680490893384:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:45.696366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628680490893384:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:45.696487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628680490893384:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:45.696614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628680490893384:2310];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:45.696721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628680490893384:2310];tabl ... 58679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.964781Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.965479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.971548Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.971628Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.972146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.972167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.991907Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.992502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.993959Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:30.994462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:30.998371Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.003881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.007642Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.008192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.013519Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.014061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.017405Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.018056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.027862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.028418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.031558Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.032074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.039100Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.040283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.045724Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.046283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.053590Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.054121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.055825Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.056342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.065452Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.066109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.067814Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.068301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.082219Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.082775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.087481Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.088015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:31.092724Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.095822Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:31.350407Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0zz73r5hegyth6gwqe4z", SessionId: ydb://session/3?node_id=1&id=OTczYTVlNmItNWM0ODlmZDMtMzdiNTFjYmYtOGUyNmYzNDk=, Slow query, duration: 51.437644s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:31.691572Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:31.692118Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:31.693153Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519629002613487416:9612];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-24T21:21:31.693631Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::LeftSemiJoinWithLeftFilter+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable-StreamLookupJoin >> KqpIndexLookupJoin::CheckCastUint64ToInt64-StreamLookupJoin-NotNull >> KqpJoinOrder::CanonizedJoinOrderTPCH18 >> KqpJoinOrder::SortingsWithLookupJoinByPrefix-RemoveLimitOperator >> KqpJoin::RightSemiJoin_SecondaryIndex [GOOD] >> KqpIndexLookupJoin::InnerJoinCustomColumnOrder+StreamLookup [GOOD] >> KqpIndexLookupJoin::InnerJoinCustomColumnOrder-StreamLookup >> KqpJoin::IdxLookupPartialLeftPredicate [GOOD] >> KqpJoin::IdxLookupPartialWithTempTable >> KqpJoin::RightSemiJoin_ComplexSecondaryIndex [GOOD] >> KqpJoin::LeftJoinWithNull+StreamLookupJoin [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_Simple ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightSemiJoin_SecondaryIndex [GOOD] Test command err: Trying to start YDB, gRPC: 14909, MsgBus: 17878 2025-06-24T21:21:39.328765Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629170202649527:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:39.332200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00393f/r3tmp/tmph07sbL/pdisk_1.dat 2025-06-24T21:21:40.044574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:40.044659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:40.051995Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:40.054663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14909, node 1 2025-06-24T21:21:40.327683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:40.327706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:40.327712Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:40.327808Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:40.371476Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17878 TClient is connected to server localhost:17878 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:41.166329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:41.188021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:41.202384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:41.370243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:41.579220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:41.685907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:43.580471Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629187382520293:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:43.580584Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:44.027870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:44.074816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:44.107510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:44.192051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:44.262970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:44.333794Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629170202649527:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:44.334590Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:44.374944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:44.494771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:44.584241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629191677488253:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:44.584319Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:44.584606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629191677488258:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:44.588605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:44.603100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:21:44.603360Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629191677488260:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:21:44.691417Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629191677488311:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:46.288781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ ... ed to server localhost:5217 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:51.925926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:51.935803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:21:51.951524Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:52.038332Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:52.281480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:52.391498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:55.424438Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629239612211496:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:55.424529Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:55.491673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.578198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.611209Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629218137373390:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:55.611838Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:55.645689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.737386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.806178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.873159Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.953081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:56.047292Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629243907179453:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:56.047394Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:56.047764Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629243907179458:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:56.052841Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:56.074496Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629243907179460:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:21:56.166626Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629243907179511:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:58.083010Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.160339Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.210887Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.263076Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.305468Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.172913Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpIndexLookupJoin::SimpleLeftJoin+StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightSemiJoin_ComplexSecondaryIndex [GOOD] Test command err: Trying to start YDB, gRPC: 12538, MsgBus: 10278 2025-06-24T21:21:40.729209Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629173030597926:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:40.729389Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003938/r3tmp/tmp14y3sW/pdisk_1.dat 2025-06-24T21:21:41.327445Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629173030597815:2079] 1750800100696400 != 1750800100696403 2025-06-24T21:21:41.374452Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:41.378357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:41.378467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:41.382215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12538, node 1 2025-06-24T21:21:41.645028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:41.645047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:41.645060Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:41.645199Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:41.743271Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10278 TClient is connected to server localhost:10278 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:42.471568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:42.489681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:42.501976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:42.762037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:43.003852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:43.142755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:45.508210Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629194505435927:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:45.508349Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:45.727305Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629173030597926:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:45.727410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:45.985464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:46.020191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:46.060664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:46.119648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:46.179068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:46.230568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:46.322983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:46.409046Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629198800403885:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:46.409108Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:46.409388Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629198800403890:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:46.413923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:46.460244Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629198800403892:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:21:46.539680Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629198800403945:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:48.387882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... t_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:53.427959Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3461 2025-06-24T21:21:53.991389Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3461 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:54.268912Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:54.275855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:54.294554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:54.402391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:54.645561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:54.834174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:57.991252Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629224265151478:2242];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:57.991343Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:58.248538Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629250034956662:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:58.248635Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:58.400430Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.449635Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.496690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.550126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.613715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.702149Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.779560Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.880780Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629250034957320:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:58.880873Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:58.881241Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629250034957325:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:58.885623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:58.911369Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629250034957327:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:21:58.989863Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629250034957378:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:00.763972Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.839982Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.925709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.973903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.055992Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpFlipJoin::RightOnly_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::SimpleLeftJoin+StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 7515, MsgBus: 3103 2025-06-24T21:21:45.238882Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629197090183510:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:45.261649Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003932/r3tmp/tmpaovlX7/pdisk_1.dat 2025-06-24T21:21:46.048706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:46.048831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:46.057178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:46.064510Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:46.066071Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629197090183326:2079] 1750800105161702 != 1750800105161705 TServer::EnableGrpc on GrpcPort 7515, node 1 2025-06-24T21:21:46.353341Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:46.431672Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:46.431692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:46.431698Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:46.431826Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3103 TClient is connected to server localhost:3103 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:47.805686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:47.845783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:48.137793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:48.393391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:48.515895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:50.219647Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629197090183510:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:50.219775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:50.704095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629218565021447:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:50.704208Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:51.001328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:51.042880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:51.079627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:51.159749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:51.213198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:51.297079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:51.381737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:51.491428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629222859989419:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:51.491501Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:51.491727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629222859989424:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:51.497061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:51.508253Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629222859989426:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:21:51.611485Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629222859989477:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:52.958080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:53.064931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part pr ... d to server localhost:62835 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:57.048108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:57.053463Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:57.058047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:57.135450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:57.353650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:57.461351Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:00.771359Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629238560818623:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:00.771460Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:01.014026Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629264330623909:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:01.014116Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:01.104957Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.146127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.231247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.282991Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.398673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.507543Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.606166Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.751983Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629264330624571:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:01.752097Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:01.752396Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629264330624576:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:01.757471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:01.791021Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629264330624578:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:01.894816Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629264330624631:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:04.348752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:04.402753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:04.453315Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:04.498049Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:04.573673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:04.653498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::CanonizedJoinOrderTPCH3 [GOOD] >> KqpJoinOrder::FiveWayJoinWithComplexPreds2+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::RightOnly_2 [GOOD] Test command err: Trying to start YDB, gRPC: 21695, MsgBus: 20722 2025-06-24T21:21:48.742434Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629208882432483:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:48.976820Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003925/r3tmp/tmpdS8zkB/pdisk_1.dat 2025-06-24T21:21:49.275993Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:49.276134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:49.277377Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:49.291373Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629208882432305:2079] 1750800108568488 != 1750800108568491 2025-06-24T21:21:49.302416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21695, node 1 2025-06-24T21:21:49.568470Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:49.627795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:49.627844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:49.627857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:49.628002Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20722 TClient is connected to server localhost:20722 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:50.686993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:50.731563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:21:50.749743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:50.954998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:51.160103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:51.263706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:53.325178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629230357270420:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:53.325290Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:53.655262Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629208882432483:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:53.655338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:53.814552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:53.895621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:53.939786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:54.017707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:54.087757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:54.135033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:54.201367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:54.324429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629234652238389:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:54.324497Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:54.324734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629234652238394:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:54.328800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:54.349675Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629234652238396:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:21:54.448455Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629234652238447:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:56.133453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... ecting -> Connected TServer::EnableGrpc on GrpcPort 31354, node 2 2025-06-24T21:21:58.763744Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:58.763765Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:58.763779Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:58.763928Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20253 2025-06-24T21:21:59.294786Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20253 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:59.668487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:59.687376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:59.709864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:59.873797Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:00.078174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:00.178404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:02.455289Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629267285123882:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:02.455382Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:02.502375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:02.543654Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:02.592433Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:02.649898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:02.713641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:02.817357Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:02.897588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:03.002516Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629267285124539:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:03.002643Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:03.002780Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629267285124544:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:03.007767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:03.023049Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629267285124546:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:03.090290Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629271580091893:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:03.294187Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629250105253293:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:03.397766Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:04.983451Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:05.038743Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:05.101523Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:05.161409Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft-ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH9 >> KqpIndexLookupJoin::JoinByComplexKeyWithNullComponents+StreamLookupJoin [GOOD] >> KqpIndexLookupJoin::JoinByComplexKeyWithNullComponents-StreamLookupJoin >> KqpJoinOrder::TPCDS34-ColumnStore [GOOD] >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable-StreamLookupJoin [GOOD] >> KqpIndexLookupJoin::CheckCastUint64ToInt64-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUint64ToInt64-StreamLookupJoin+NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH3 [GOOD] Test command err: Trying to start YDB, gRPC: 6486, MsgBus: 32299 2025-06-24T21:20:02.794685Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628753554979103:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:02.795130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039d6/r3tmp/tmpdxO6iB/pdisk_1.dat 2025-06-24T21:20:03.492546Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:03.507350Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628753554978997:2079] 1750800002759817 != 1750800002759820 2025-06-24T21:20:03.524194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:03.524288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:03.532887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6486, node 1 2025-06-24T21:20:03.798389Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:03.854429Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:03.854449Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:03.854455Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:03.854563Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32299 TClient is connected to server localhost:32299 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:04.877319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:04.904870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:07.308589Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628775029816123:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:07.308748Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:07.310121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628775029816135:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:07.318442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:07.340208Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628775029816137:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:07.432102Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628775029816188:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:07.791409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628753554979103:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:07.791538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:07.893167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:20:08.468535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:08.468793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:08.469104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:08.469229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:08.469336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:08.469448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:08.469558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:08.469694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:20:08.469799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:20:08.469918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:20:08.470027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628779324783734:2315];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:20:08.455136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628779324783754:2323];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:08.471693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628779324783754:2323];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:08.471910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628779324783754:2323];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:08.472020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628779324783754:2323];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:08.472131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628779324783754:2323];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:08.472229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628779324783754:2323];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:08.472326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628779324783754:2323];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:08.472438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519628779324783754:2323];tablet ... 73056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.476327Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.476872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.482564Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.483094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.486466Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.491335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.496987Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.497513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.500967Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.501578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.507212Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.507744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.511410Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.511998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.513788Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.514443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.521045Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.521753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.528109Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.528816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.532840Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.533911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.535441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.536056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.540552Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.541269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.541945Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.543087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.549589Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.549965Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.550293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.550795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.556924Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.557630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.560699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.567916Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.569266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.569436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:49.581208Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.589623Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:49.775172Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx1pm453eydmxy2b74t6e7", SessionId: ydb://session/3?node_id=1&id=MjFiODJkMjctOWE3OTcyNTgtMjFiNDZhMDMtY2M5ODA1ZmY=, Slow query, duration: 46.664693s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:50.320615Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:50.321252Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:50.321876Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519629032727890905:7919];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224039392; 2025-06-24T21:21:50.322361Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TPCDS61+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 8414, MsgBus: 24070 2025-06-24T21:21:51.507829Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629218786727753:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:51.508303Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003921/r3tmp/tmprq1amk/pdisk_1.dat 2025-06-24T21:21:52.247280Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629218786727551:2079] 1750800111370435 != 1750800111370438 2025-06-24T21:21:52.272992Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:52.280828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:52.280933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:52.293938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8414, node 1 2025-06-24T21:21:52.491667Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:52.543662Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:52.543691Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:52.543698Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:52.543819Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24070 TClient is connected to server localhost:24070 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:53.428359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:53.464667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:53.475128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:53.642068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:53.954057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:54.094880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:56.404985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629240261565670:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:56.405100Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:56.468058Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629218786727753:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:56.471430Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:56.844932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:56.903803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:56.987850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:57.060085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:57.109884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:57.185584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:57.250537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:57.326105Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629244556533631:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:57.326191Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:57.326539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629244556533636:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:57.330789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:57.370378Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629244556533638:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:21:57.464928Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629244556533691:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:59.152307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003921/r3tmp/tmpnx1Wol/pdisk_1.dat 2025-06-24T21:22:02.542406Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:02.542485Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:02.545826Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:02.557489Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629268375120601:2079] 1750800122156479 != 1750800122156482 2025-06-24T21:22:02.609306Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12806, node 2 2025-06-24T21:22:02.883600Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:02.883620Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:02.883626Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:02.883735Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:22:03.295298Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13036 TClient is connected to server localhost:13036 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:03.972277Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:03.995323Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:04.010012Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:04.113869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:04.324706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:04.489069Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:07.163301Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629289849958718:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.163389Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.197418Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629268375120621:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:07.197473Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:07.217362Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.261931Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.299634Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.386252Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.451923Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.508646Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.626781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.756962Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629289849959384:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.757059Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.757341Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629289849959389:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.761498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:07.784471Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629289849959391:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:07.859541Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629289849959442:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:09.622455Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:09.718752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS34-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 4757, MsgBus: 30346 2025-06-24T21:20:57.320937Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628990037394763:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:57.340506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00395f/r3tmp/tmplVTWFx/pdisk_1.dat 2025-06-24T21:20:58.190161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:58.190255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:58.201402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:58.285117Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:58.343274Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:58.347353Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628990037394735:2079] 1750800057296431 != 1750800057296434 TServer::EnableGrpc on GrpcPort 4757, node 1 2025-06-24T21:20:58.579231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:58.579250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:58.579263Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:58.579382Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30346 TClient is connected to server localhost:30346 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:59.830098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:02.323342Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628990037394763:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:02.341398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:03.029044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629015807199163:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:03.029187Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:03.029537Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629015807199175:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:03.034092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:03.061885Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629015807199177:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:03.123795Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629015807199228:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:03.455108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.576351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.619064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.654564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.690775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.913558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.954383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:03.994441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:04.029056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:04.063709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:04.106490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:04.149443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:04.192115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:04.933281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/co ... 16691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.219528Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.219997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.223420Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.228444Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.229092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.231939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.237366Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.237985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.242666Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.243096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.252574Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.253073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.256044Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.256506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.262192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.262930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.266119Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.266676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.276336Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.277602Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.278257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.283931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.288551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.289172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.293556Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.294056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.298756Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.303491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.307602Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.308123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.312970Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.313494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.317589Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.318848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.323621Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.328761Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.329336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.343652Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.418314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:47.428656Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:47.607395Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx1v2zdvn5rxzfptjqa6sg", SessionId: ydb://session/3?node_id=1&id=NGFhOTU2YzQtOGI1MGViNzEtZTM0ZjMyNjEtNGZmNGUyZTc=, Slow query, duration: 39.927540s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:48.608502Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:48.609100Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:48.609778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519629161836114086:6082];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-24T21:21:48.610360Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TPCHEveryQueryWorks-ColumnStore [GOOD] >> KqpIndexLookupJoin::InnerJoinCustomColumnOrder-StreamLookup [GOOD] >> KqpJoinOrder::ShuffleEliminationTpcdsMapJoinBug >> KqpJoin::IdxLookupPartialWithTempTable [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_NoPushdown ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::InnerJoinCustomColumnOrder-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 20173, MsgBus: 25770 2025-06-24T21:21:53.816308Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629229531546603:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:53.855532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003918/r3tmp/tmpHPvVvW/pdisk_1.dat 2025-06-24T21:21:54.669597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:54.669713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:54.678982Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629229531546422:2079] 1750800113764901 != 1750800113764904 2025-06-24T21:21:54.679917Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:54.687944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20173, node 1 2025-06-24T21:21:54.954883Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:55.091786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:55.091811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:55.091819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:55.091947Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25770 TClient is connected to server localhost:25770 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:56.241697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:56.276120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:56.436952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:56.762073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:56.971221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:58.815293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629229531546603:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:58.815361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:59.081310Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629255301351829:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:59.081457Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:59.662821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:59.728121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:59.801023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:59.840189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:59.886261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:59.949297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.035650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.153862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629259596319790:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.154053Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.155800Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629259596319795:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.161349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:00.188763Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629259596319797:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:00.278956Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629259596319850:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:02.717940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:02.763074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... d to server localhost:16573 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:06.472917Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:06.488565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:22:06.505597Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:06.621850Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:06.923008Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:07.039540Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:10.115786Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629279034290364:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:10.115877Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:10.426828Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629300509128457:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:10.426939Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:10.523418Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.577540Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.623915Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.694403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.748138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.813737Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.905158Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:11.004912Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629304804096411:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:11.004997Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:11.005228Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629304804096416:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:11.009185Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:11.030096Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629304804096418:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:11.138280Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629304804096471:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:12.581573Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.669854Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.732047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.785390Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.837853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.895842Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCHEveryQueryWorks-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 15010, MsgBus: 17034 2025-06-24T21:16:57.801627Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627956074605300:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:57.801689Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003c09/r3tmp/tmpAdIBhQ/pdisk_1.dat 2025-06-24T21:16:58.387976Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:58.388098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:58.445418Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:58.447360Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627956074605202:2079] 1750799817771556 != 1750799817771559 2025-06-24T21:16:58.457377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15010, node 1 2025-06-24T21:16:58.567681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:58.567704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:58.567716Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:58.567834Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17034 2025-06-24T21:16:58.846942Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17034 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:59.253042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:59.285939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:17:01.278379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627973254475031:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:01.278505Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:01.278961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627973254475043:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:01.283380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:01.296857Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627973254475045:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:17:01.382366Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627973254475096:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:01.763908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:01.928590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:01.979812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:02.049974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:02.086680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:02.122192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:02.165323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:02.214394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:17:02.848430Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627956074605300:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:02.848525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:17:13.311513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:17:13.311545Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:39.801728Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwx0cd2dj13krsy78tb87b", SessionId: ydb://session/3?node_id=1&id=YTIxOWI2MmItZTcyMmE0MDEtYTgxYjc5YTYtOTM0ZTc5MDY=, Slow query, duration: 10.537929s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n\n-- TPC-H/TPC-R Minimum Cost Supplier Query (Q2)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\nselect\n s_acctbal,\n s_name,\n n_name,\n p_partkey,\n p_mfgr,\n s_address,\n s_phone,\n s_comment\nfrom\n `/Root/part`\n cross join `/Root/supplier`\n cross join `/Root/partsupp`\n cross join `/Root/nation`\n cross join `/Root/region`\n cross join (\n select\n `/Root/partsupp`.ps_partkey as sc_ps_partkey,\n min(ps_supplycost) as min_ps_supplycost\n from\n `/Root/partsupp`\n cross join `/Root/supplier`\n cross join `/Root/nation`\n cross join `/Root/region`\n where\n s_suppkey = ps_suppkey\n and s_nationkey = n_nationkey\n and n_regionkey = r_regionkey\n and r_name = 'AMERICA'\n group by `/Root/partsupp`.ps_partkey\n ) as min_ps_supplycosts\nwhere\n p_partkey = ps_partkey\n and s_suppkey = ps_suppkey\n and p_size = 10\n and p_type like '%COPPER'\n and s_nationkey = n_nationkey\n and n_regionkey = r_regionkey\n and r_name = 'AMERICA'\n and ps_supplycost = min_ps_supplycost\n and p_partkey = ps_partkey\norder by\n s_acctbal desc,\n n_name,\n s_name,\n p_partkey\nlimit 100;\n", parameters: 0b 2025-06-24T21:19:26.813157Z node 1 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037891 Cancelled read: {[1:7519628596024738249:3497], 0} 2025-06-24T21:20:12.256484Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwzqaf9f4f4wrg0j7r0207", SessionId: ydb://session/3?node_id=1&id=YTIxOWI2MmItZTcyMmE0MDEtYTgxYjc5YTYtOTM0ZTc5MDY=, Slow query, duration: 13.968350s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n\n-- TPC-H/TPC-R Suppliers Who Kept Orders Waiting Query (Q21)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\nselect\n `/Root/supplier`.s_name as s_name,\n count(*) as numwait\nfrom\n `/Root/supplier`\n cross join `/Root/lineitem` l1\n cross join `/Root/orders`\n cross join `/Root/nation`\n left semi join (\n select\n l2.l_orderkey as l_orderkey\n from\n `/Root/lineitem` l2\n cross join `/Root/supplier`\n cross join `/Root/lineitem` l1\n cross join `/Root/orders`\n cross join `/Root/nation`\n where\n s_suppkey = l1.l_suppkey\n and o_orderkey = l1.l_orderkey\n and o_orderstatus = 'F'\n and l1.l_receiptdate > l1.l_commitdate\n and s_nationkey = n_nationkey\n and n_name = 'EGYPT'\n and l2.l_orderkey = l1.l_orderkey\n and l2.l_suppkey <> l1.l_suppkey\n ) as l2 on l2.l_orderkey = l1.l_orderkey\n left only join (\n select\n l3.l_orderkey as l_orderkey\n from\n `/Root/lineitem` l3\n cross join `/Root/supplier`\n cross join `/Root/lineitem` l1\n cross join `/Root/orders`\n cross join `/Root/nation`\n where\n s_suppkey = l1.l_suppkey\n and o_orderkey = l1.l_orderkey\n and o_orderstatus = 'F'\n and l1.l_receiptdate > l1.l_commitdate\n and s_nationkey = n_nationkey\n and n_name = 'EGYPT'\n and l3.l_orderkey = l1.l_orderkey\n and l3.l_suppkey <> l1.l_suppkey\n and l3.l_receiptdate > l3.l_commitdate\n ) as l3 on l3.l_orderkey = l1.l_orderkey\nwhere\n s_suppkey = l1.l_suppkey\n and o_orderkey = l1.l_orderkey\n and o_orderstatus = 'F'\n and l1.l_receiptdate > l1.l_commitdate\n and s_nationkey = n_nationkey\n and n_name = 'EGYPT'\ngroup by\n `/Root/supplier`.s_name\norder by\n numwait desc,\n s_name\nlimit 100;\n", parameters: 0b 2025-06-24T21:21:02.867407Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx1b93d4kae8ak5c0wq49h", SessionId: ydb://session/3?node_id=1&id=YTIxOWI2MmItZTcyMmE0MDEtYTgxYjc5YTYtOTM0ZTc5MDY=, Slow query, duration: 11.372631s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "-- TPC-H/TPC-R National Market Share Query (Q8)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\n$join1 = (\nselect\n l.l_extendedprice * (1 - l.l_discount) as volume,\n l.l_suppkey as l_suppkey,\n l.l_orderkey as l_orderkey\nfrom\n `/Root/part` as p\njoin\n `/Root/lineitem` as l\non\n p.p_partkey = l.l_partkey\nwhere\n p.p_type = 'ECONOMY PLATED COPPER'\n);\n$join2 = (\nselect\n j.volume as volume,\n j.l_orderkey as l_orderkey,\n s.s_nationkey as s_nationkey\nfrom\n $join1 as j\njoin\n `/Root/supplier` as s\non\n s.s_suppkey = j.l_suppkey\n);\n$join3 = (\nselect\n j.volume as volume,\n j.l_orderkey as l_orderkey,\n n.n_name as nation\nfrom\n $join2 as j\njoin\n `/Root/nation` as n\non\n n.n_nationkey = j.s_nationkey\n);\n$join4 = (\nselect\n j.volume as volume,\n j.nation as nation,\n DateTime::GetYear(cast(o.o_orderdate as Timestamp)) as o_year,\n o.o_custkey as o_custkey\nfrom\n $join3 as j\njoin\n `/Root/orders` as o\non\n o.o_orderkey = j.l_orderkey\nwhere o_orderdate between Date('1995-01-01') and Date('1996-12-31')\n);\n$join5 = (\nselect\n j.volume as volume,\n j.nation as nation,\n j.o_year as o_year,\n c.c_nationkey as c_nationkey\nfrom\n $join4 as j\njoin\n `/Root/customer` as c\non\n c.c_custkey = j.o_custkey\n);\n$join6 = (\nselect\n j.volume as volume,\n j.nation as nation,\n j.o_year as o_year,\n n.n_regionkey as n_regionkey\nfrom\n $join5 as j\njoin\n `/Root/nation` as n\non\n n.n_nationkey = j.c_nationkey\n);\n$join7 = (\nselect\n j.volume as volume,\n j.nation as nation,\n j.o_year as o_year\nfrom\n $join6 as j\njoin\n `/Root/region` as r\non\n r.r_regionkey = j.n_regionkey\nwhere\n r.r_name = 'AFRICA'\n);\n\nselect\n o_year,\n sum(case\n when nation = 'MOZAMBIQUE' then volume\n else 0\n end) / sum(volume) as mkt_share\nfrom\n $join7 as all_nations\ngroup by\n o_year\norder by\n o_year;\n", parameters: 0b >> KqpJoin::LeftJoinPushdownPredicate_Simple [GOOD] >> KqpFlipJoin::LeftSemi_2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::IdxLookupPartialWithTempTable [GOOD] Test command err: Trying to start YDB, gRPC: 1956, MsgBus: 23051 2025-06-24T21:21:54.950155Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629233453953232:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:54.950348Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003917/r3tmp/tmpOtWThv/pdisk_1.dat 2025-06-24T21:21:55.628982Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629233453953043:2079] 1750800114874382 != 1750800114874385 2025-06-24T21:21:55.629128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:55.629694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:55.644976Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:55.649457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1956, node 1 2025-06-24T21:21:55.923380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:55.923403Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:55.923415Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:55.923526Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:55.968357Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23051 TClient is connected to server localhost:23051 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:57.366831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:57.412625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:57.427087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:57.706233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:58.002755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:58.124427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:59.951519Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629233453953232:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:59.951612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:00.220270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629259223758481:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.220401Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.752392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.795324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.838955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.890443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.955342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.022579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.128937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.261007Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629263518726444:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:01.261077Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:01.261525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629263518726449:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:01.265520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:01.336707Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629263518726451:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:01.439413Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629263518726502:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:03.214327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... .349958Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:06.351297Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629282866959612:2079] 1750800125932622 != 1750800125932625 2025-06-24T21:22:06.379866Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8822, node 2 2025-06-24T21:22:06.687468Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:06.687491Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:06.687502Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:06.687635Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:22:06.959298Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13536 TClient is connected to server localhost:13536 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:07.488803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:07.504253Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:07.521308Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:07.674983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:07.897611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:08.034887Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:10.963259Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629282866959672:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:10.963332Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:11.135331Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629308636765014:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:11.135435Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:11.181136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:11.227622Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:11.272155Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:11.317845Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:11.417712Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:11.559413Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:11.617684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:11.731191Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629308636765677:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:11.731299Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:11.731401Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629308636765682:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:11.735902Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:11.760211Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629308636765684:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:11.854499Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629308636765735:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:13.822807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:13.907207Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:13.983269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::LeftJoinPushdownPredicate_Simple [GOOD] Test command err: Trying to start YDB, gRPC: 6306, MsgBus: 12058 2025-06-24T21:21:56.499050Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629240524125217:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:56.508733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00390b/r3tmp/tmp7vSaVT/pdisk_1.dat 2025-06-24T21:21:57.112563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:57.112665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:57.124393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:57.189488Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:57.191288Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629240524125014:2079] 1750800116392158 != 1750800116392161 TServer::EnableGrpc on GrpcPort 6306, node 1 2025-06-24T21:21:57.479469Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:57.479751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:57.479767Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:57.479775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:57.479885Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12058 TClient is connected to server localhost:12058 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:58.482175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:58.511681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:21:58.532155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:58.756805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:21:59.011713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:21:59.115306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.165015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629261998963125:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:01.165111Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:01.459426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629240524125217:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:01.459504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:01.662684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.701740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.753670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.810770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.878634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.977498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:02.069608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:02.199096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629266293931090:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:02.199244Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:02.200671Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629266293931095:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:02.204543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:02.222890Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629266293931097:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:02.319587Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629266293931148:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:03.721022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... th # /home/runner/.ya/build/build_root/2btm/00390b/r3tmp/tmpWWeG6D/pdisk_1.dat 2025-06-24T21:22:06.578640Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:22:06.768587Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:06.799811Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:06.799920Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:06.802507Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23751, node 2 2025-06-24T21:22:07.075818Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:07.075845Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:07.075860Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:07.076000Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:22:07.399382Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1089 TClient is connected to server localhost:1089 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:08.288660Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:08.295433Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:22:08.310814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:08.424063Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:08.683457Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:08.785600Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:11.863374Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629308530300129:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:11.863465Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:11.910907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:11.977733Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.045487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.100714Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.172068Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.318801Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.405770Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:12.503833Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629312825268089:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:12.503961Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:12.504180Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629312825268094:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:12.509327Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:12.544152Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629312825268096:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:22:12.622345Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629312825268147:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:14.411317Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:14.460637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:14.528598Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc+RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::JoinByComplexKeyWithNullComponents-StreamLookupJoin [GOOD] >> KqpJoinOrder::SortingsByPKWithLookupJoin-RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 25172, MsgBus: 64609 2025-06-24T21:21:27.721205Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629118404326170:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:27.721483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003946/r3tmp/tmpsk9Ltd/pdisk_1.dat 2025-06-24T21:21:28.300205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:28.300294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:28.312625Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:28.332860Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:28.340118Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629118404325949:2079] 1750800087627320 != 1750800087627323 TServer::EnableGrpc on GrpcPort 25172, node 1 2025-06-24T21:21:28.659778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:28.659810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:28.659825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:28.659938Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:28.723466Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64609 TClient is connected to server localhost:64609 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:29.582343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:29.600184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:32.432279Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629139879163074:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:32.432407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:32.433785Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629139879163086:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:32.438246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:32.455931Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629139879163088:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:32.567006Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629139879163140:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:32.687335Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629118404326170:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:32.687443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:32.936056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.203869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.264470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.300276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.339369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.558692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.646960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.704172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.777440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.828076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.888021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.941483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:33.999378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:34.748311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... =72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:12.962058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:12.962651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:12.963774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:12.972767Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:12.973314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:12.976355Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:12.976838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:12.987400Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:12.987935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:12.990651Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:12.991125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:12.997531Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:12.998049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.000854Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.001315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038513;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.011016Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038513;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.013014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.013509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038495;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.015704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.028756Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038495;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.029378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.031871Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.032341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.038416Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.039064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.044048Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.044609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038453;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.052887Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.053387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038477;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.054206Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038453;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.054630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.067294Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038477;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.068319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.068651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.082789Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.205281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038430;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.224443Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038430;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.237807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:13.263892Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:13.423530Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx2r6z17t1pg4t1ay24v54", SessionId: ydb://session/3?node_id=1&id=N2RkMDM0MTctMmQwNzE2NTUtY2YyNzAxODgtNzFiMmYwOTg=, Slow query, duration: 35.919476s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:13.839967Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:13.840419Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519629277318143268:6075];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-24T21:22:13.840921Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:13.843103Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
:2:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503
: Warning: Execution, code: 1060
:2:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503 >> KqpJoinOrder::TPCDS94+ColumnStore [GOOD] >> KqpIndexLookupJoin::CheckCastUint64ToInt64-StreamLookupJoin+NotNull [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::JoinByComplexKeyWithNullComponents-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 8519, MsgBus: 16786 2025-06-24T21:22:01.736457Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629261693049718:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:01.741388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038fe/r3tmp/tmpReh1j9/pdisk_1.dat 2025-06-24T21:22:02.184237Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:02.184363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:02.187967Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:02.188171Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629261693049698:2079] 1750800121734509 != 1750800121734512 2025-06-24T21:22:02.202446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8519, node 1 2025-06-24T21:22:02.379859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:02.379898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:02.379907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:02.380021Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16786 2025-06-24T21:22:02.758237Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16786 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:03.162197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:03.190131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:03.371038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:03.602708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:03.738673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:06.309585Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629283167887812:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:06.309718Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:06.735359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629261693049718:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:06.735487Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:06.869773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:06.908934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:06.949407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.007875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.101729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.192914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.312892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:07.428330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629287462855774:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.428419Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.428663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629287462855779:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.433023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:07.451617Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629287462855781:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:07.524253Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629287462855832:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:09.208849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:09.274563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part ... ed at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 32730, MsgBus: 18101 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038fe/r3tmp/tmp9TsYAP/pdisk_1.dat 2025-06-24T21:22:11.658196Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:22:11.765901Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:11.775335Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629308383275819:2079] 1750800131351548 != 1750800131351551 2025-06-24T21:22:11.790895Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:11.790983Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:11.806579Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32730, node 2 2025-06-24T21:22:12.087806Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:12.087831Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:12.087839Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:12.088003Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:22:12.355302Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18101 TClient is connected to server localhost:18101 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:13.137582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:13.147958Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:13.162589Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:13.286083Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:13.655394Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:13.801929Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:16.652476Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629329858113929:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:16.652589Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:16.803898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:16.900788Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:16.969982Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.046134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.128601Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.226095Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.302090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.429260Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629334153081883:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:17.429354Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:17.429505Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629334153081888:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:17.433359Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:17.447682Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629334153081890:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:17.531623Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629334153081941:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:19.132890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:19.178217Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUint64ToInt64-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 2423, MsgBus: 11968 2025-06-24T21:22:02.474417Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629268942826532:2111];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:02.475983Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038ec/r3tmp/tmpi5Y79t/pdisk_1.dat 2025-06-24T21:22:02.989740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:02.995523Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:03.012911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:03.065944Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:03.067310Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629268942826458:2079] 1750800122453451 != 1750800122453454 TServer::EnableGrpc on GrpcPort 2423, node 1 2025-06-24T21:22:03.245748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:03.245768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:03.245779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:03.245876Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:22:03.475337Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11968 TClient is connected to server localhost:11968 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:04.368265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:04.394550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:04.626101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:04.834398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:04.950794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:07.424976Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629290417664568:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.425079Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.458142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629268942826532:2111];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:07.458212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:07.964930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:08.022119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:08.083572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:08.153135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:08.213721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:08.269617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:08.334504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:08.441199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629294712632531:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:08.441276Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:08.441611Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629294712632536:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:08.446136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:08.459678Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629294712632538:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:08.543375Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629294712632591:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:10.275855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.362012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part ... ath existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:22:12.923141Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:12.923255Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:12.924123Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:12.925687Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629310439959985:2079] 1750800132565197 != 1750800132565200 2025-06-24T21:22:12.941233Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12719, node 2 2025-06-24T21:22:13.175710Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:13.175737Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:13.175744Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:13.175876Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14345 2025-06-24T21:22:13.567453Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14345 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:13.999265Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:14.012037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:14.025241Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:14.171890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:14.437147Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:14.553883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:17.612057Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629310439960159:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:17.612126Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:17.716326Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629331914798087:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:17.716421Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:17.813354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.876535Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.921451Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.984647Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:18.067528Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:18.145885Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:18.228419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:18.388567Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629336209766043:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:18.388638Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:18.388967Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629336209766048:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:18.392996Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:18.407481Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629336209766050:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:18.502830Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629336209766101:3413] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:20.620347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:20.786441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS94+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 26851, MsgBus: 25139 2025-06-24T21:19:48.117352Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628691685806702:2218];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:48.117581Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039f5/r3tmp/tmpanw9B2/pdisk_1.dat 2025-06-24T21:19:48.790452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:19:48.790578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:19:48.793258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:19:48.839136Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:19:48.847185Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628691685806522:2079] 1750799988085509 != 1750799988085512 TServer::EnableGrpc on GrpcPort 26851, node 1 2025-06-24T21:19:49.051808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:19:49.051827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:19:49.051851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:19:49.051973Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:19:49.119502Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25139 TClient is connected to server localhost:25139 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:19:50.009845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:19:50.033083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:19:52.709957Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628708865676354:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:52.710096Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:52.710386Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628708865676366:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:19:52.714398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:19:52.751053Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628708865676368:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:19:52.847935Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628708865676419:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:19:53.125122Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628691685806702:2218];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:19:53.125179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:19:53.327796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:19:53.684169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628713160643959:2310];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:53.684169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:19:53.684382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:53.684710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:53.684827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:53.684934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:53.685043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:53.685175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:53.685285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:19:53.685390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:19:53.685498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:19:53.685593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628713160643959:2310];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:19:53.685600Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628713160643965:2316];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:19:53.685769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628713160643959:2310];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:19:53.685898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628713160643959:2310];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:19:53.686020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628713160643959:2310];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:19:53.686121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628713160643959:2310];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:19:53.686223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628713160643959:2310];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:19:53.686316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519628713160643959:2310];tabl ... =TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.234475Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.235074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.237852Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039332;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.239118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039340;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.249391Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.250012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.253448Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039340;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.254065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.264177Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.264786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.268144Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.273053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.283048Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.283742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.287448Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.288078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.294218Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.295033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.298403Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.304360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.310106Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.310779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.314746Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.319625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.325103Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.325762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039210;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.329410Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.330118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.340068Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.340700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.344185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039210;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.345020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.354746Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.355322Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.359798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:21:36.366509Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:21:36.789006Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx18gk3dqykhdc90vxsb23", SessionId: ydb://session/3?node_id=1&id=YTJkYzc4MGQtMTVmZTg5NGEtNTRlYTlmNGYtZWM1NTZkODM=, Slow query, duration: 48.126098s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:37.480024Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:21:37.480458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:21:37.481626Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:22:10.117718Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx35wf5z8m6jpc1xgteyw3", SessionId: ydb://session/3?node_id=1&id=YTJkYzc4MGQtMTVmZTg5NGEtNTRlYTlmNGYtZWM1NTZkODM=, Slow query, duration: 18.612438s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n$bla1 = (select ws_order_number\n from web_sales\n group by ws_order_number\n having COUNT(DISTINCT ws_warehouse_sk) > 1);\n\n-- start query 1 in stream 0 using template query94.tpl and seed 2031708268\nselect\n count(distinct ws1.ws_order_number) as `order count`\n ,sum(ws_ext_ship_cost) as `total shipping cost`\n ,sum(ws_net_profit) as `total net profit`\nfrom\n web_sales ws1\n cross join date_dim\n cross join customer_address\n cross join web_site\n left semi join $bla1 bla1 on (ws1.ws_order_number = bla1.ws_order_number)\n left only join web_returns on (ws1.ws_order_number = web_returns.wr_order_number)\nwhere\n cast(d_date as date) between cast('1999-4-01' as date) and\n (cast('1999-4-01' as date) + DateTime::IntervalFromDays(60))\nand ws1.ws_ship_date_sk = d_date_sk\nand ws1.ws_ship_addr_sk = ca_address_sk\nand ca_state = 'NE'\nand ws1.ws_web_site_sk = web_site_sk\nand web_company_name = 'pri'\norder by `order count`\nlimit 100;\n", parameters: 0b >> KqpJoin::LeftJoinPushdownPredicate_NoPushdown [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_Nulls >> KqpFlipJoin::LeftSemi_2 [GOOD] >> KqpFlipJoin::LeftSemi_3 >> KqpNewEngine::DeleteOn+UseSink >> KqpJoinOrder::FourWayJoinLeftFirst+ColumnStore [GOOD] >> TJaegerTracingConfiguratorTests::DefaultConfig >> TConsoleTests::TestCreateTenant >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionCreate >> TConfigsCacheTests::TestNoNotificationIfConfigIsCached >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_TENANTS_AND_NODE_TYPES >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainUnaffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] >> TConfigsCacheTests::TestNoNotificationIfConfigIsCached [GOOD] >> TConfigsCacheTests::TestFullConfigurationRestore >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionCreate [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClient >> TJaegerTracingConfiguratorTests::DefaultConfig [GOOD] >> TJaegerTracingConfiguratorTests::GlobalRules ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FourWayJoinLeftFirst+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9531, MsgBus: 2110 2025-06-24T21:20:20.379753Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628828495650862:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:20.443939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039c5/r3tmp/tmpKUsRPE/pdisk_1.dat 2025-06-24T21:20:21.088150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:21.088236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:21.104741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:21.133010Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:21.135356Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628828495650705:2079] 1750800020247005 != 1750800020247008 TServer::EnableGrpc on GrpcPort 9531, node 1 2025-06-24T21:20:21.447363Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:21.467070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:21.467584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:21.467612Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:21.473021Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2110 TClient is connected to server localhost:2110 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:22.645391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:22.692407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:25.335957Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628828495650862:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:25.336186Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:26.146659Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628854265455130:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:26.146806Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:26.149736Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628854265455142:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:26.154494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:26.169306Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628854265455144:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:26.274966Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628854265455195:2341] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:26.761370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:20:27.270460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628858560422770:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:27.272090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:27.272394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:27.272735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:27.272905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:27.273015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:27.273127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:27.273251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:27.273391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:20:27.273497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:20:27.273640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:20:27.273761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628858560422709:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:20:27.279290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628858560422770:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:27.279533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628858560422770:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:27.279658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628858560422770:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:27.279787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628858560422770:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:27.279897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628858560422770:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:27.279997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628858560422770:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:27.280114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628858560422770:2318];tablet_id ... 3829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039316;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.108815Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.109462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.111957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039316;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.112597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039338;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.114951Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.115934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.120889Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.121579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.127364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.127987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.128204Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039338;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.128666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039340;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.133917Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.135240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039348;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.137771Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039340;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.138372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039332;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.145158Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039348;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.145667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039344;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.152217Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039332;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.152798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.155013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039344;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.158259Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.158933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.159924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039336;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.180826Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039336;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.181435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039328;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.182646Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.183140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.190893Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039328;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.191679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.197458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.197940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.200879Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.201483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.208446Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.212102Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.212899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.220116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:10.220513Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.225922Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:10.401613Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx2ej72npxr756s0fzhq0h", SessionId: ydb://session/3?node_id=1&id=NTI2YmM2MDAtNGY5NzcyOWUtYWMxMGE2NTUtOGEzOTkzYmU=, Slow query, duration: 42.775830s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:10.772203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:10.772868Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:10.774603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519629245107537295:11556];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-24T21:22:10.775183Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClient [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdates >> TConfigsCacheTests::TestFullConfigurationRestore [GOOD] >> TConfigsCacheTests::TestConfigurationSaveOnNotification >> TJaegerTracingConfiguratorTests::GlobalRules [GOOD] >> TJaegerTracingConfiguratorTests::ExternalTracePlusSampling |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> TConfigsCacheTests::TestConfigurationSaveOnNotification [GOOD] >> TConfigsCacheTests::TestOverwrittenConfigurationDoesntCauseNotification >> TJaegerTracingConfiguratorTests::ExternalTracePlusSampling [GOOD] >> TJaegerTracingConfiguratorTests::RequestTypeThrottler |96.5%| [TA] $(B)/ydb/core/tx/datashard/ut_external_blobs/test-results/unittest/{meta.json ... results_accumulator.log} |96.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_external_blobs/test-results/unittest/{meta.json ... results_accumulator.log} >> TConsoleTests::TestCreateTenant [GOOD] >> TConsoleTests::TestCreateTenantExtSubdomain >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK+RemoveLimitOperator [GOOD] >> KqpNewEngine::DeleteOn+UseSink [GOOD] >> KqpNewEngine::DeleteOn-UseSink >> TJaegerTracingConfiguratorTests::RequestTypeThrottler [GOOD] >> TJaegerTracingConfiguratorTests::RequestTypeSampler >> KqpFlipJoin::LeftSemi_3 [GOOD] >> TConfigsCacheTests::TestOverwrittenConfigurationDoesntCauseNotification [GOOD] >> TConfigsCacheTests::TestConfigurationChangeSensor >> TSchemeShardUserAttrsTest::SpecialAttributes >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdates [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdatesAddRemove >> TJaegerTracingConfiguratorTests::RequestTypeSampler [GOOD] >> TJaegerTracingConfiguratorTests::SamplingSameScope ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::LeftSemi_3 [GOOD] Test command err: Trying to start YDB, gRPC: 4509, MsgBus: 30393 2025-06-24T21:22:17.655045Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629334285742418:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:17.663699Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038c8/r3tmp/tmphxdQGc/pdisk_1.dat 2025-06-24T21:22:18.279475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:18.279561Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:18.290913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:18.368598Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:18.379287Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629334285742237:2079] 1750800137642632 != 1750800137642635 TServer::EnableGrpc on GrpcPort 4509, node 1 2025-06-24T21:22:18.707556Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:18.743680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:18.743708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:18.743714Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:18.743825Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30393 TClient is connected to server localhost:30393 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:19.807891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:19.840863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:20.063999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:20.322292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:20.447332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:22.655347Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629334285742418:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:22.655445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:23.035949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629360055547663:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:23.036073Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:23.435646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:23.505067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:23.556100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:23.602899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:23.637174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:23.682573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:23.771489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:23.903657Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629360055548331:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:23.903767Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:23.904199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629360055548336:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:23.908977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:23.948959Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629360055548338:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:24.031609Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629364350515687:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:25.740878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:25.786974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part ... != 1750800148649897 TServer::EnableGrpc on GrpcPort 16331, node 2 2025-06-24T21:22:29.334191Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:29.334217Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:29.334225Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:29.334381Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16339 2025-06-24T21:22:29.802132Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16339 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:30.256776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:30.267791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:30.281389Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:30.384867Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:30.791348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:30.930088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:33.680433Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629401642916570:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:33.680547Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:33.715261Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629380168078629:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:33.715342Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:33.793587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:33.843309Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:33.898901Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:33.994418Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:34.061229Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:34.122395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:34.212831Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:34.315598Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629405937884539:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:34.315702Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:34.316156Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629405937884544:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:34.320892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:34.352210Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629405937884546:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:34.414139Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629405937884597:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:36.136208Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:36.208108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:36.277672Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:36.385147Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TConfigsCacheTests::TestConfigurationChangeSensor [GOOD] >> TConfigsDispatcherTests::TestSubscriptionNotification >> KqpJoin::LeftJoinPushdownPredicate_Nulls [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 12394, MsgBus: 2568 2025-06-24T21:21:42.177978Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629180203463309:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:42.178112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003936/r3tmp/tmp7XaB8A/pdisk_1.dat 2025-06-24T21:21:42.853769Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:42.855861Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629180203463142:2079] 1750800102139402 != 1750800102139405 2025-06-24T21:21:42.875845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:42.876005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:42.878315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12394, node 1 2025-06-24T21:21:43.276592Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:43.277405Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:43.279870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:43.279894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:43.280027Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2568 TClient is connected to server localhost:2568 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:44.343582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:44.380017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:46.999724Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629197383332972:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:46.999852Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:47.000257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629197383332984:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:47.005050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:47.021543Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629197383332986:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:47.108135Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629201678300333:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:47.176298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629180203463309:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:47.176423Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:47.498034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:47.658069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:47.703847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:47.782921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:47.825723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:48.037251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:48.092041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:48.133661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:48.214127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:48.273560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:48.328572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:48.406616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:48.451900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:49.364672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... =72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.633129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.638291Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.638874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.642284Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.642916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.652250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.652944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.655927Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.656420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.661333Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.661922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.665965Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.666453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.670852Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.676488Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.676987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.679792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.684544Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.685177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.690037Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.690525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.694234Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.694831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.698810Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.699496Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.699761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.700032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.704870Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.705440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.707773Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.708424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.713827Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.714458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.715787Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.716928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.719949Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.720977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.721675Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.727038Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.896673Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx36a07c16tbhba3acmpqg", SessionId: ydb://session/3?node_id=1&id=YmU1M2NhNTctNDI4YmU5ODItY2ExNzQ4MC1iMmVmOGEwZA==, Slow query, duration: 38.960070s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:31.170862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:31.171438Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:31.171634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519629283282695404:4528];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-24T21:22:31.172102Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
:4:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503
: Warning: Execution, code: 1060
:4:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503 >> KqpJoinOrder::FiveWayJoinWithComplexPreds2-ColumnStore [GOOD] >> TJaegerTracingConfiguratorTests::SamplingSameScope [GOOD] >> TJaegerTracingConfiguratorTests::ThrottlingByDb >> TConfigsDispatcherTests::TestSubscriptionNotification [GOOD] >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberAfterUpdate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::LeftJoinPushdownPredicate_Nulls [GOOD] Test command err: Trying to start YDB, gRPC: 31175, MsgBus: 30234 2025-06-24T21:22:16.850291Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629328433264883:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:16.850371Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038cf/r3tmp/tmpywHyXI/pdisk_1.dat 2025-06-24T21:22:17.363138Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629328433264720:2079] 1750800136754480 != 1750800136754483 2025-06-24T21:22:17.393091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:17.393198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:17.397798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:17.398763Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31175, node 1 2025-06-24T21:22:17.622273Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:17.622293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:17.622299Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:17.622414Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:22:17.855507Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30234 TClient is connected to server localhost:30234 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:18.809780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:18.895305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:18.918866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:19.136843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:19.360128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:19.490820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:21.859553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629328433264883:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:21.925459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:22.011284Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629354203070146:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:22.011396Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:22.516061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:22.601186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:22.656000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:22.709778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:22.756476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:22.824931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:22.890899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:23.052409Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629358498038109:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:23.052494Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:23.052860Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629358498038114:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:23.057177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:23.081954Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629358498038116:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:23.175276Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629358498038167:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:24.593123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... .427429Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:27.431459Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629376527590604:2079] 1750800147022804 != 1750800147022807 2025-06-24T21:22:27.446481Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9826, node 2 2025-06-24T21:22:27.647647Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:27.647666Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:27.647673Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:27.647770Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29784 2025-06-24T21:22:28.135653Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29784 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:28.614058Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:28.628186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:28.637218Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:28.771862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:28.966585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:29.097879Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:31.651370Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629393707461401:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:31.651452Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:31.713064Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:31.805127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:31.857638Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:31.910749Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:31.964795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:32.045319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:32.116811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:32.131260Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629376527590687:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:32.131324Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:32.234809Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629398002429357:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:32.234898Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:32.235320Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629398002429362:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:32.240593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:32.254627Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629398002429364:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:32.321411Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629398002429417:3412] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:34.023601Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:34.098548Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:34.213921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TSchemeShardUserAttrsTest::SpecialAttributes [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink-UseDataQuery >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberAfterUpdate [GOOD] >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberDuringUpdate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::SpecialAttributes [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:22:42.331397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:22:42.331513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:22:42.331569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:22:42.331617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:22:42.334402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:22:42.334485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:22:42.334608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:22:42.334827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:22:42.335634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:22:42.347271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:22:42.505835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:42.505903Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:42.527333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:22:42.527783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:22:42.528046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:22:42.548418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:22:42.548729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:22:42.555621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:42.556203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:22:42.595902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:22:42.600426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:22:42.631494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:22:42.631612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:22:42.631895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:22:42.631952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:22:42.632075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:22:42.632184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:22:42.662860Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:22:42.859353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:22:42.863463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:42.866024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:22:42.866119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:22:42.871333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:22:42.871529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:42.884038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:42.890712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:22:42.890990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:42.891159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:22:42.891202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:22:42.891236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:22:42.904527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:42.904613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:22:42.904674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:22:42.907208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:42.907271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:42.907319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:22:42.907370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:22:42.914924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:22:42.917910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:22:42.919284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:22:42.920557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:42.920711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:22:42.920764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:22:42.922997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:22:42.923071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:22:42.923366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:22:42.923458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:22:42.926548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:22:42.926620Z node 1 :FLAT_TX_SCHEMESHARD ... ] was 2 2025-06-24T21:22:43.044264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:22:43.044356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:22:43.044393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:22:43.044434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T21:22:43.044472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:22:43.044563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T21:22:43.047432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-24T21:22:43.047562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000002 2025-06-24T21:22:43.048464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:43.048573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:22:43.048636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 102:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002, at schemeshard: 72057594046678944 2025-06-24T21:22:43.048780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 240 2025-06-24T21:22:43.048939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:22:43.049004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:22:43.050887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:22:43.051463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:22:43.053564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:22:43.053600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:22:43.053747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:22:43.053885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:22:43.053940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-24T21:22:43.054000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:22:43.054271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:22:43.054318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:22:43.054416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:22:43.054449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:22:43.054499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:22:43.054536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:22:43.054577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T21:22:43.054626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:22:43.054663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:22:43.054690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:22:43.054761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:22:43.054802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T21:22:43.054850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:22:43.054887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-24T21:22:43.055627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:22:43.055720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:22:43.055757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:22:43.055793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:22:43.055827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:22:43.056710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:22:43.056800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:22:43.056838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:22:43.056882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:22:43.056925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:22:43.057007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:22:43.061452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:22:43.061725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 2025-06-24T21:22:43.064690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: "DirD" } AlterUserAttributes { UserAttributes { Key: "__extra_path_symbols_allowed" Value: "./_" } } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:22:43.064956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/DirD, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:22:43.065060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusInvalidParameter, reason: UserAttributes: attribute '__extra_path_symbols_allowed' has invalid value './_', forbidden symbols are found, at schemeshard: 72057594046678944 2025-06-24T21:22:43.067663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusInvalidParameter Reason: "UserAttributes: attribute \'__extra_path_symbols_allowed\' has invalid value \'./_\', forbidden symbols are found" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:22:43.067941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: UserAttributes: attribute '__extra_path_symbols_allowed' has invalid value './_', forbidden symbols are found, operation: CREATE DIRECTORY, path: /MyRoot/DirD TestModificationResult got TxId: 103, wait until txId: 103 >> TJaegerTracingConfiguratorTests::ThrottlingByDb [GOOD] >> TJaegerTracingConfiguratorTests::SamplingByDb ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithComplexPreds2-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 12994, MsgBus: 64121 2025-06-24T21:21:43.952364Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629187299250758:2195];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:43.952465Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003934/r3tmp/tmpFX5VYQ/pdisk_1.dat 2025-06-24T21:21:44.827580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:44.827689Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:44.836911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:44.975964Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:44.989161Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:45.000212Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629187299250601:2079] 1750800103846723 != 1750800103846726 TServer::EnableGrpc on GrpcPort 12994, node 1 2025-06-24T21:21:45.275525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:45.275551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:45.275559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:45.275672Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64121 TClient is connected to server localhost:64121 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:46.457161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:46.472442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:48.945126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629187299250758:2195];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:48.945246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:49.178334Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629213069055027:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:49.178471Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:49.179264Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629213069055039:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:49.186390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:49.212147Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629213069055041:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:49.275595Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629213069055092:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:49.708201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:49.849883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:49.887605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:49.934287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:50.008427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:50.203101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:50.281761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:50.352265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:50.390057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:50.434527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:50.472617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:50.505048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:50.546414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:51.238394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 45513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.849769Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.850203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.850427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.850729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.855730Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.856429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.857936Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.858590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.862262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.862888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.868819Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.869152Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.869479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.869620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.874986Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.875055Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.875655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.875655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.880844Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.881062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.881468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.881586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.888041Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.888196Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.888738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.888881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.893923Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.894685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038575;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.904115Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.905300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.908086Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038575;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.908585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.914291Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.914955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.917536Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.918685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.924277Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.925495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:30.926484Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:30.930956Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:31.059577Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx384t706pz8w80k9p36s0", SessionId: ydb://session/3?node_id=1&id=ZjE3MmY0MmQtOTNhNTQzNDEtMzM0OGJhNDAtNzc5YzljMjk=, Slow query, duration: 37.240347s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:31.383451Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:31.384018Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:31.385158Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519629243133831815:2807];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-24T21:22:31.385659Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TConsoleTests::TestCreateTenantExtSubdomain [GOOD] >> TConsoleTests::TestCreateSharedTenant |96.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_user_attributes/test-results/unittest/{meta.json ... results_accumulator.log} |96.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/test-results/unittest/{meta.json ... results_accumulator.log} >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberDuringUpdate [GOOD] >> TConfigsDispatcherTests::TestRemoveSubscription >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdatesAddRemove [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientDeadCausesSubscriptionDeregistration >> KqpJoinOrder::UdfConstantFold+ColumnStore [GOOD] >> TConfigsDispatcherTests::TestRemoveSubscription [GOOD] >> TConfigsDispatcherTests::TestRemoveSubscriptionWhileUpdateInProcess >> TJaegerTracingConfiguratorTests::SamplingByDb [GOOD] >> TJaegerTracingConfiguratorTests::SharedThrottlingLimits >> KqpJoinOrder::CanonizedJoinOrderTPCH5 [GOOD] >> KqpRm::NotEnoughMemory >> KqpRm::SingleTask >> KqpRm::SingleSnapshotByExchanger >> KqpRm::DisonnectNodes >> KqpNewEngine::PkRangeSelect1 >> KqpJoinOrder::TPCH12_100 [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientDeadCausesSubscriptionDeregistration [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientReconnectsOnConnectionLoose >> TConfigsDispatcherTests::TestRemoveSubscriptionWhileUpdateInProcess [GOOD] >> TConfigsDispatcherTests::TestEmptyChangeCausesNoNotification >> KqpNewEngine::DeleteOn-UseSink [GOOD] >> KqpNewEngine::DeleteWithBuiltin+UseSink >> TJaegerTracingConfiguratorTests::SharedThrottlingLimits [GOOD] >> TJaegerTracingConfiguratorTests::SharedSamplingLimits >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt-ColumnStore [GOOD] >> KqpRm::NotEnoughMemory [GOOD] >> KqpRm::SingleTask [GOOD] >> TConfigsDispatcherTests::TestEmptyChangeCausesNoNotification [GOOD] >> TConfigsDispatcherTests::TestYamlAndNonYamlCoexist >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientReconnectsOnConnectionLoose [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithKnownConfig ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SingleTask [GOOD] Test command err: 2025-06-24T21:22:48.464729Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:22:48.465258Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/00468d/r3tmp/tmpPGYx8x/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:22:48.465850Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/00468d/r3tmp/tmpPGYx8x/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/00468d/r3tmp/tmpPGYx8x/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 10444944998800713651 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:22:48.572738Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:22:48.573027Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:22:48.609698Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2101] with ResourceBroker at [2:431:2100] 2025-06-24T21:22:48.609850Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2102] 2025-06-24T21:22:48.610053Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2339] with ResourceBroker at [1:430:2320] 2025-06-24T21:22:48.610135Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2340] 2025-06-24T21:22:48.610204Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T21:22:48.610241Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T21:22:48.610279Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T21:22:48.610302Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T21:22:48.619237Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.662184Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.662468Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.662570Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.662882Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:48.663017Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:48.663238Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T21:22:48.663282Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.663387Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.663631Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T21:22:48.663659Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.663740Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.664339Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-24T21:22:48.664590Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.664956Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.665381Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-24T21:22:48.665554Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.665663Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T21:22:48.665898Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:48.666043Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.666141Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T21:22:48.666217Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:48.669132Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-1 (1 by [1:459:2339]) priority=0 resources={0, 100} 2025-06-24T21:22:48.669222Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-1 (1 by [1:459:2339]) to queue queue_kqp_resource_manager 2025-06-24T21:22:48.669290Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-2-1 (1 by [1:459:2339]) from queue queue_kqp_resource_manager 2025-06-24T21:22:48.669341Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-1 (1 by [1:459:2339]) to queue queue_kqp_resource_manager 2025-06-24T21:22:48.669399Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-2-1 (1 by [1:459:2339])) 2025-06-24T21:22:48.685008Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T21:22:48.685333Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-2-1 (1 by [1:459:2339]) (release resources {0, 100}) 2025-06-24T21:22:48.685399Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.000000 (remove task kqp-1-2-1 (1 by [1:459:2339])) 2025-06-24T21:22:48.685446Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 2. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 0. >> TJaegerTracingConfiguratorTests::SharedSamplingLimits [GOOD] >> TLogSettingsConfiguratorTests::TestNoChanges >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::UdfConstantFold+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 63586, MsgBus: 28789 2025-06-24T21:20:47.705699Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628945004875210:2124];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:47.705860Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00398b/r3tmp/tmpypLHyt/pdisk_1.dat 2025-06-24T21:20:48.528095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:48.528196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:48.541859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:48.598615Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:48.611304Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628945004875127:2079] 1750800047676390 != 1750800047676393 TServer::EnableGrpc on GrpcPort 63586, node 1 2025-06-24T21:20:48.696600Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:48.932496Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:48.932522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:48.932530Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:48.932679Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28789 TClient is connected to server localhost:28789 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:50.206811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:52.691399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628945004875210:2124];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:52.691496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:53.291100Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628970774679556:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:53.291276Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:53.291815Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628970774679568:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:53.308486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:53.334700Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628970774679570:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:53.420314Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628970774679621:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:53.973452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:20:54.430579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:54.430919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:54.431164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:54.431259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:54.431325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:54.431390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:54.431463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:54.432051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:20:54.432178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:20:54.432272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:20:54.432346Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628975069647136:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:20:54.469979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628975069647369:2332];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:54.470053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628975069647369:2332];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:54.470302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628975069647369:2332];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:54.470414Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628975069647369:2332];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:54.470528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628975069647369:2332];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:54.470632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628975069647369:2332];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:54.470762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628975069647369:2332];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:54.470898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519628975069647369:2332];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:20:54.471005Z node 1 :TX_COLU ... 60338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.363380Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.364137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.370107Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.370776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.376192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.376783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.376834Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.377543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.383762Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.384512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.392267Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.392825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.401060Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.401642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039223;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.411636Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.412245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039194;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.416316Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039223;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.416890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.422177Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039194;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.422717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039212;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.426820Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.431532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039214;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.432667Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039212;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.433169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039196;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.437441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039214;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.438619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039226;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.444247Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039196;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.444807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039192;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.450527Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039226;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.450565Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039192;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.451570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039204;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.458124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039204;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.458832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039190;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.465619Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039190;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.587213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.593717Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.612419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.626264Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.627544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:33.634364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:33.713732Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx33d52632aegg7qywfceg", SessionId: ydb://session/3?node_id=1&id=YTA2Mjc3MGItYmRhMGNkYTctODQ3M2UwOTMtNmU4ZTFmZDQ=, Slow query, duration: 44.747359s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:34.063891Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:34.064991Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519629284307339395:9581];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:22:34.067933Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:34.076227Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NotEnoughMemory [GOOD] Test command err: 2025-06-24T21:22:48.451935Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:22:48.452465Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/00468c/r3tmp/tmpiaikiL/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:22:48.453015Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/00468c/r3tmp/tmpiaikiL/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/00468c/r3tmp/tmpiaikiL/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 4061661971777180545 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:22:48.575095Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:22:48.576001Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:22:48.606625Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:461:2101] with ResourceBroker at [2:432:2100] 2025-06-24T21:22:48.606803Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:462:2102] 2025-06-24T21:22:48.606998Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:460:2340] with ResourceBroker at [1:431:2321] 2025-06-24T21:22:48.607081Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:463:2341] 2025-06-24T21:22:48.613589Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T21:22:48.613692Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T21:22:48.613828Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T21:22:48.613882Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T21:22:48.614081Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.650798Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.651063Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.651212Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.651584Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:48.651808Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:48.651951Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T21:22:48.651986Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.652128Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.652392Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T21:22:48.652420Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.652492Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.653010Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-24T21:22:48.653229Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.653538Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.654099Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.654239Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:48.654485Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.654530Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.654646Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T21:22:48.654831Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T21:22:48.654946Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH5 [GOOD] Test command err: Trying to start YDB, gRPC: 29937, MsgBus: 17478 2025-06-24T21:20:37.967071Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628901215791787:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:37.977867Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039a6/r3tmp/tmp7vAFQ4/pdisk_1.dat 2025-06-24T21:20:38.748468Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:38.751404Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628901215791606:2079] 1750800037890357 != 1750800037890360 2025-06-24T21:20:38.793272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:38.793357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:38.803552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29937, node 1 2025-06-24T21:20:39.000821Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:39.063822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:39.063847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:39.063854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:39.063950Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17478 TClient is connected to server localhost:17478 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:40.053839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:40.079952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:42.737025Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628922690628728:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:42.737146Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:42.743462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628922690628740:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:42.748382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:42.763898Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628922690628742:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:42.895552Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628922690628793:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:42.947932Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628901215791787:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:42.947991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:43.253280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:20:43.551620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628926985596339:2315];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:43.551836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628926985596339:2315];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:43.551863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:43.551888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:43.552103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:43.552300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628926985596339:2315];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:43.552521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:43.552651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:43.552777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:43.552866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:43.552964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:20:43.553064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:20:43.553173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:20:43.553278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519628926985596352:2322];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:20:43.554406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628926985596339:2315];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:43.554526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628926985596339:2315];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:43.554647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628926985596339:2315];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:43.554743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628926985596339:2315];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:43.554853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519628926985596339:2315];tabl ... 28796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.638841Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.639435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.643279Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.643843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.650265Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.651000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.665489Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.666092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.680271Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.680861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.694890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.695495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.709459Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.710019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.716219Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.716983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.722588Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.723635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.728867Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.729548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.735936Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.736662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.739793Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.740332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.748313Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.748896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.750973Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.754723Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.755793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.756125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.761565Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.766968Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.767840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.773774Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.774478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.780742Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.874331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.876545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:21.881064Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:21.891990Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:22.076282Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx2tdzbhc1z62hpy3hv4yb", SessionId: ydb://session/3?node_id=1&id=OGU5ZmZiNjQtZGJhNjAxZWItYzY2OTMwY2YtMTJiZDI3NWY=, Slow query, duration: 42.296589s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:22.484565Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:22.485130Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:22.486285Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519629184683669624:7728];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:22:22.486753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TConfigsDispatcherTests::TestYamlAndNonYamlCoexist [GOOD] >> TConfigsDispatcherTests::TestYamlEndToEnd >> KqpJoinOrder::SortingsByPrefixWithConstant-RemoveLimitOperator [GOOD] >> KqpRm::SingleSnapshotByExchanger [GOOD] >> KqpRm::DisonnectNodes [GOOD] >> TLogSettingsConfiguratorTests::TestNoChanges [GOOD] >> TLogSettingsConfiguratorTests::TestAddComponentEntries ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 10591, MsgBus: 18697 2025-06-24T21:21:49.606202Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629211116445519:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:49.606728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003922/r3tmp/tmpkjCYoV/pdisk_1.dat 2025-06-24T21:21:50.229623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:50.229727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:50.236285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:50.296770Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:50.299299Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629211116445350:2079] 1750800109553944 != 1750800109553947 TServer::EnableGrpc on GrpcPort 10591, node 1 2025-06-24T21:21:50.511693Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:50.511728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:50.511735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:50.511833Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:50.604002Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18697 TClient is connected to server localhost:18697 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:51.582208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:51.627340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:54.127116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629232591282476:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:54.127228Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:54.127924Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629232591282488:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:54.132021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:54.148008Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629232591282490:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:54.227992Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629232591282541:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:54.610780Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629211116445519:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:54.610862Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:54.685976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:54.823846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:54.891549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:54.973912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.061503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.291030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.327210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.413415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.453616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.494858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.557128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.609097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:55.649027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:21:56.428280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 78980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.886899Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.888315Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.889046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.892217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.909853Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.910464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.915451Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.916031Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.926779Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.927546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.927736Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.928295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.936815Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.937551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.939028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.939605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.947577Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.948193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.952760Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.953652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.957521Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.958176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.962587Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.967463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.972925Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.973506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.977061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.978802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038506;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.984372Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038506;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.984589Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.985111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038484;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.985454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038496;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.990900Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038496;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.990924Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038484;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.991589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.992590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:37.998542Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.999632Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:38.067596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:38.086200Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:38.130548Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3ddk9sachvrbf6k5t4hy", SessionId: ydb://session/3?node_id=1&id=NGJjOGNmODctMjYyMWFjZTMtMTA1MmI4YTYtOWRmMjA2Nw==, Slow query, duration: 38.910606s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:38.487764Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:38.488376Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:38.488875Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519629262656060057:2899];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-24T21:22:38.490331Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCH12_100 [GOOD] Test command err: Trying to start YDB, gRPC: 12521, MsgBus: 10435 2025-06-24T21:20:47.422601Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628947768544761:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:47.423418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003991/r3tmp/tmp8LzHMv/pdisk_1.dat 2025-06-24T21:20:48.103294Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628947768544629:2079] 1750800047341376 != 1750800047341379 2025-06-24T21:20:48.126244Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:48.141590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:48.141684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:48.208511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12521, node 1 2025-06-24T21:20:48.423310Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:48.455951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:48.455974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:48.455982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:48.456141Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10435 TClient is connected to server localhost:10435 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:49.401479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:49.435829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:52.353487Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628969243381757:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:52.353689Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:52.354104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628969243381769:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:52.358899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:52.378715Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628969243381771:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:52.423289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628947768544761:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:52.427400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:52.476613Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628969243381824:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:53.082376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:20:53.507784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:53.508118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:53.508438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:53.508555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:53.508675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:53.508797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:53.508907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:53.509027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:20:53.509299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:20:53.509432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:20:53.509542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519628973538349349:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:20:53.514521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628973538349350:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:53.514580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628973538349350:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:53.514811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628973538349350:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:53.514923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628973538349350:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:53.515030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628973538349350:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:53.518338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628973538349350:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:53.518641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628973538349350:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:53.518772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519628973538349350:2314];tabl ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.739536Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.740223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.743094Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.743835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.746040Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.747245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.750480Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.751339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.753301Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.754303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.757564Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.758260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.760854Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.761750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039199;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.764762Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.765596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.773249Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039199;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.773799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.779806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.781881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.783877Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.785685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.795975Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.798331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.799862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.802894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.808134Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.810859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.811600Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.813879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039354;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.819770Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.820626Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039354;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.822442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.828983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.836489Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.838207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.839784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.841400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.852529Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.855496Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:28.981192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039206;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:28.990289Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039206;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:29.100916Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx30zx2m9fe9dz4n2eawyk", SessionId: ydb://session/3?node_id=1&id=MjAwYTg1MjMtZTNjYTIyNDctMTI0OWI1ZmUtNjE4MzYyYzY=, Slow query, duration: 42.607111s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:29.475072Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:29.475181Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:29.475875Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithKnownConfig [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPending >> TConsoleTests::TestCreateSharedTenant [GOOD] >> TConsoleTests::TestCreateServerlessTenant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::DisonnectNodes [GOOD] Test command err: 2025-06-24T21:22:48.449791Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:22:48.450330Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/004681/r3tmp/tmpMp7lUK/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:22:48.450966Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/004681/r3tmp/tmpMp7lUK/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/004681/r3tmp/tmpMp7lUK/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 8744054582084642197 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:22:48.572710Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:22:48.572982Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:22:48.613948Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2101] with ResourceBroker at [2:431:2100] 2025-06-24T21:22:48.614071Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2102] 2025-06-24T21:22:48.614238Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2339] with ResourceBroker at [1:430:2320] 2025-06-24T21:22:48.614314Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2340] 2025-06-24T21:22:48.614377Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T21:22:48.614413Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T21:22:48.614451Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T21:22:48.614481Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T21:22:48.623283Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.660320Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.660803Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.660933Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.661315Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:48.661480Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:48.661636Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T21:22:48.661685Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.661834Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.662130Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T21:22:48.662159Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.662251Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.662896Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-24T21:22:48.663204Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.663700Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.664203Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-24T21:22:48.671414Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.671610Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T21:22:48.671911Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:48.672135Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.672276Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T21:22:48.672353Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:50.012452Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-24T21:22:50.012563Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-24T21:22:50.019396Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 2 2025-06-24T21:22:50.019682Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 2 2025-06-24T21:22:50.019845Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2025-06-24T21:22:50.020917Z node 2 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [2:150:2088] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-06-24T21:22:50.021089Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-24T21:22:50.021323Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:60:2074] ServerId# [1:352:2270] TabletId# 72057594037932033 PipeClientId# [2:60:2074] 2025-06-24T21:22:50.021626Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:495: Subcriber is not available for info exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:50.021664Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:167: Kill previous info exchanger subscriber for 'kqpexch+/dc-1' at [2:464:2104], reason: tenant updated 2025-06-24T21:22:50.021792Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:50.024764Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:50.024925Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:50.400221Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request >> KqpLimits::OutOfSpaceYQLUpsertFail+useSink [GOOD] >> KqpLimits::OutOfSpaceYQLUpsertFail-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SingleSnapshotByExchanger [GOOD] Test command err: 2025-06-24T21:22:48.457931Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:22:48.458464Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/004696/r3tmp/tmpFVJOji/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:22:48.459132Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/004696/r3tmp/tmpFVJOji/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/004696/r3tmp/tmpFVJOji/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 12578243176917050906 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:22:48.580328Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:22:48.580632Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:22:48.616954Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:461:2101] with ResourceBroker at [2:432:2100] 2025-06-24T21:22:48.617107Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:462:2102] 2025-06-24T21:22:48.617271Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:460:2340] with ResourceBroker at [1:431:2321] 2025-06-24T21:22:48.617360Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:463:2341] 2025-06-24T21:22:48.617460Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T21:22:48.617497Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T21:22:48.617598Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T21:22:48.617622Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T21:22:48.622951Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.647743Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.648010Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.648123Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.648495Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:48.648714Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:48.648858Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T21:22:48.648899Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.649013Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.649249Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T21:22:48.649275Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.649348Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800168 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:48.649927Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-24T21:22:48.650134Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.650405Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.650933Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.651071Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:48.655615Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.655679Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:48.655811Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T21:22:48.656016Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T21:22:48.656221Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:48.658998Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-1 (1 by [1:460:2340]) priority=0 resources={0, 100} 2025-06-24T21:22:48.659073Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-1 (1 by [1:460:2340]) to queue queue_kqp_resource_manager 2025-06-24T21:22:48.659134Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-2-1 (1 by [1:460:2340]) from queue queue_kqp_resource_manager 2025-06-24T21:22:48.663269Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-1 (1 by [1:460:2340]) to queue queue_kqp_resource_manager 2025-06-24T21:22:48.663331Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-2-1 (1 by [1:460:2340])) 2025-06-24T21:22:48.687320Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T21:22:48.687459Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-2-1-2 (2 by [1:460:2340]) priority=0 resources={0, 100} 2025-06-24T21:22:48.687519Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-2-1-2 (2 by [1:460:2340]) to queue queue_kqp_resource_manager 2025-06-24T21:22:48.687571Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-2-1-2 (2 by [1:460:2340]) from queue queue_kqp_resource_manager 2025-06-24T21:22:48.687612Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-2-1-2 (2 by [1:460:2340]) to queue queue_kqp_resource_manager 2025-06-24T21:22:48.687671Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-2-1-2 (2 by [1:460:2340])) 2025-06-24T21:22:48.687799Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 2, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T21:22:48.688051Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:48.688182Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800168 AvailableComputeActors: 80 UsedMemory: 200 TotalMemory: 1000 Memory { Pool: 1 Available: 800 } ExecutionUnits: 80 2025-06-24T21:22:48.688427Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:50.026260Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-24T21:22:50.026415Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-2-1 (1 by [1:460:2340]) (release resources {0, 100}) 2025-06-24T21:22:50.026498Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.300150 (remove task kqp-1-2-1 (1 by [1:460:2340])) 2025-06-24T21:22:50.026578Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.100300 2025-06-24T21:22:50.026669Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 2. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-24T21:22:50.026750Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-2-1-2 (2 by [1:460:2340]) (release resources {0, 100}) 2025-06-24T21:22:50.026814Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.300150 to 0.100300 (remove task kqp-2-1-2 (2 by [1:460:2340])) 2025-06-24T21:22:50.026871Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 2, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-24T21:22:50.027134Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:50.027402Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800169 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:50.027852Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:50.351710Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:22:47.459664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:22:47.459790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:22:47.459837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:22:47.459872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:22:47.460957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:22:47.461023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:22:47.461179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:22:47.461323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:22:47.462112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:22:47.464494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:22:47.591882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:47.591960Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:47.638954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:22:47.639433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:22:47.639636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:22:47.679844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:22:47.680261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:22:47.699405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:47.703594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:22:47.753195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:22:47.758885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:22:47.804196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:22:47.804305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:22:47.806262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:22:47.806346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:22:47.806461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:22:47.806579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:22:47.832264Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:22:48.187653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:22:48.191554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:48.193172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:22:48.193258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:22:48.195107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:22:48.195264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:48.198819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:48.204678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:22:48.205003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:48.205199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:22:48.205249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:22:48.205316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:22:48.208133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:48.208213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:22:48.208262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:22:48.216346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:48.216450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:48.216533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:22:48.216590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:22:48.225039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:22:48.231523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:22:48.233939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:22:48.235267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:48.235432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:22:48.235497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:22:48.239824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:22:48.239915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:22:48.240147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:22:48.240231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:22:48.245831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:22:48.245893Z node 1 :FLAT_TX_SCHEMESHARD ... hard: 72057594046678944 2025-06-24T21:22:49.317476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-24T21:22:49.317658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 129 2025-06-24T21:22:49.317826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:22:49.456922Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:416:2385], attempt# 0 2025-06-24T21:22:49.518176Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:416:2385], sender# [1:415:2384] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-24T21:22:49.543796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:22:49.543864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:22:49.544161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:22:49.544214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:22:49.544698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:22:49.544762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:22:49.545269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:22:49.545400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:22:49.545446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:22:49.545482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:22:49.545530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:22:49.545604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T21:22:49.563967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:12831 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FD90F008-7204-4682-9DB4-6955B8BD8FBB amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-24T21:22:49.619323Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:416:2385], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:12831 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A11DDC32-31C5-4117-ADE4-847056FFF929 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-24T21:22:49.634809Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:416:2385], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-24T21:22:49.634951Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:415:2384] 2025-06-24T21:22:49.642004Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:416:2385], sender# [1:415:2384], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:12831 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 8CBC9047-5BE7-4C30-95B9-939277FE7B15 amz-sdk-request: attempt=1 content-length: 20 content-md5: 2qFn9G0TW8wfvJ9C+A5Jbw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 20 2025-06-24T21:22:49.648509Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:416:2385], result# PutObjectResult { ETag: daa167f46d135bcc1fbc9f42f80e496f } 2025-06-24T21:22:49.648608Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:416:2385], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-24T21:22:49.648777Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:415:2384], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-24T21:22:49.668798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:22:49.668871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:22:49.669031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:22:49.669134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:22:49.669228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:49.669287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:22:49.669333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:22:49.669395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:22:49.669566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:22:49.673264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:22:49.673832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:22:49.673897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:22:49.674007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:22:49.674043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:22:49.674101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:22:49.674135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:22:49.674175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:22:49.674247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 102 2025-06-24T21:22:49.674299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:22:49.674348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:22:49.674387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:22:49.674505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:22:49.677298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:22:49.677355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:401:2371] TestWaitNotification: OK eventTxId 102 >> KqpJoinOrder::FiveWayJoin-ColumnStore [GOOD] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TConfigsDispatcherTests::TestYamlEndToEnd [GOOD] >> TConsoleConfigHelpersTests::TestConfigCourier |96.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TLogSettingsConfiguratorTests::TestAddComponentEntries [GOOD] >> TLogSettingsConfiguratorTests::TestRemoveComponentEntries >> KqpRm::Reduce ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPrefixWithConstant-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 25634, MsgBus: 30618 2025-06-24T21:21:55.580319Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629239302353148:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:55.580379Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00390f/r3tmp/tmpvauOZI/pdisk_1.dat 2025-06-24T21:21:56.309188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:56.309279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:56.340960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:56.406449Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629239302353125:2079] 1750800115578227 != 1750800115578230 2025-06-24T21:21:56.422722Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25634, node 1 2025-06-24T21:21:56.586244Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:56.586263Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:56.586270Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:56.586374Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:56.619363Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30618 TClient is connected to server localhost:30618 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:57.490387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:00.233186Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629260777190263:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.233187Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629260777190251:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.233262Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.237602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:00.263981Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629260777190265:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:22:00.366355Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629260777190316:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:00.583267Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629239302353148:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:00.583341Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:00.797245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:00.954289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.016006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.095406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.151827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.333074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.415668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.458828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.493481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.540075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.606382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.645950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.721637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:02.386182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 42.297631Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.297644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.298234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.303435Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.303976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.304090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.304559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038485;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.310141Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038485;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.310141Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.310758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.310758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.316641Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.316646Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.317321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038469;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.317572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.323065Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038469;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.323069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.323717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038477;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.323717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.329439Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038477;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.329444Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.330092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.330505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.335937Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.335937Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.336601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.337026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.343270Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.343395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.344010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.344153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.350015Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.350014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.350674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.351132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.358975Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.359765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.361658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.362349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.365984Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.368898Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:42.479422Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3k6nfw6q4q36fbxvz1s9", SessionId: ydb://session/3?node_id=1&id=NDkzZWNmLWQyMGNiM2RjLWM0MTkzODFjLWE1YWQxZTM3, Slow query, duration: 37.337813s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:42.886740Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:22:42.891976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:22:42.892647Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519629355266487765:4567];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-24T21:22:42.893091Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> TConsoleConfigHelpersTests::TestConfigCourier [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriber >> KqpNewEngine::MultiSelect >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] >> KqpJoinOrder::TPCDS88-ColumnStore [GOOD] >> TLogSettingsConfiguratorTests::TestRemoveComponentEntries [GOOD] >> TLogSettingsConfiguratorTests::TestChangeDefaults >> KqpRm::Reduce [GOOD] >> KqpNewEngine::BlindWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoin-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 5035, MsgBus: 2211 2025-06-24T21:21:55.132074Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629236032019789:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:55.132295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003911/r3tmp/tmpzARabw/pdisk_1.dat 2025-06-24T21:21:55.918107Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:55.918193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:55.921125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:55.934261Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629236032019654:2079] 1750800115046229 != 1750800115046232 2025-06-24T21:21:55.941715Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5035, node 1 2025-06-24T21:21:56.243227Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:56.273287Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:56.273311Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:56.273324Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:56.273427Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2211 TClient is connected to server localhost:2211 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:57.302716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:57.317910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:00.092295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629236032019789:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:00.092363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:00.310832Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629257506856779:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.310956Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.311220Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629257506856791:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.319608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:00.338708Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629257506856793:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:22:00.399663Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629257506856844:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:00.974663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.089652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.140664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.180588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.230548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.434774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.495627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.547136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.638220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.725977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.788366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.842296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.891093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:03.043893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperatio ... 32126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.637718Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.638350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.651221Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.651825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.656652Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.657211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.663017Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.663631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.678736Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.679389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.683269Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.684148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.685148Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.685789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.690005Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.690685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.691862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.692586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.704083Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.704664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.708044Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.708857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.714987Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.715754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.718620Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.719073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.721582Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.722193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.724704Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.725361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.732874Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.733437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.738965Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.739982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.743656Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.744863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.752025Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.753104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:42.759067Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.764590Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:42.931540Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3m1z4r622v0pa3rnnkq5", SessionId: ydb://session/3?node_id=1&id=NDFjY2RhNDItNjYxYjk1ODctZDg5MDk4ZC1iNTVmOTI0Mg==, Slow query, duration: 36.915782s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:43.289100Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:43.289645Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519629347701186079:4436];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-24T21:22:43.294064Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:43.294923Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::Reduce [GOOD] Test command err: 2025-06-24T21:22:53.893652Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:22:53.894248Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/00467b/r3tmp/tmpi2JtmI/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:22:53.897610Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/00467b/r3tmp/tmpi2JtmI/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/00467b/r3tmp/tmpi2JtmI/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 1365116271682418787 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:22:53.953926Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:22:53.954215Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:22:53.969432Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:457:2101] with ResourceBroker at [2:428:2100] 2025-06-24T21:22:53.969589Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:458:2102] 2025-06-24T21:22:53.969752Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:456:2336] with ResourceBroker at [1:427:2317] 2025-06-24T21:22:53.969857Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:459:2337] 2025-06-24T21:22:53.969982Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T21:22:53.970022Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T21:22:53.970054Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-24T21:22:53.970075Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-24T21:22:53.970225Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:54.000909Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750800173 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:54.001235Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:54.001338Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800173 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:54.001615Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:54.001701Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T21:22:54.001732Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:54.001821Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750800173 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:54.001934Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-24T21:22:54.001972Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-24T21:22:54.002044Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750800173 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-24T21:22:54.002150Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-24T21:22:54.002950Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-24T21:22:54.003035Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:54.007700Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:54.008276Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:54.008434Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-24T21:22:54.008591Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-24T21:22:54.008837Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-24T21:22:54.009028Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-24T21:22:54.012196Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [1:456:2336]) priority=0 resources={0, 100} 2025-06-24T21:22:54.012287Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T21:22:54.012350Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [1:456:2336]) from queue queue_kqp_resource_manager 2025-06-24T21:22:54.012396Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T21:22:54.012439Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [1:456:2336])) 2025-06-24T21:22:54.012622Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-24T21:22:54.013526Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task kqp-1-1-1 (1 by [1:456:2336]) (priority=0 type=kqp_query resources={0, 30} resubmit=0) 2025-06-24T21:22:54.013584Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:456:2336]) to queue queue_kqp_resource_manager 2025-06-24T21:22:54.013644Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.075000 (insert task kqp-1-1-1 (1 by [1:456:2336])) 2025-06-24T21:22:54.013686Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 70, Free Tier: 0, ExecutionUnits: 0. >> KqpNewEngine::PkRangeSelect1 [GOOD] >> KqpNewEngine::PkRangeSelect2 >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookupDepededRead [GOOD] >> KqpSort::TopSortParameter >> TLogSettingsConfiguratorTests::TestChangeDefaults [GOOD] >> TModificationsValidatorTests::TestApplyValidators_TENANTS [GOOD] >> TModificationsValidatorTests::TestApplyValidators_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestApplyValidatorsWithOldConfig [GOOD] >> TModificationsValidatorTests::TestChecksLimitError [GOOD] >> TModificationsValidatorTests::TestChecksLimitWarning [GOOD] >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] [GOOD] |96.5%| [TA] $(B)/ydb/core/kqp/rm_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |96.5%| [TA] {RESULT} $(B)/ydb/core/kqp/rm_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNewEngine::DeleteWithBuiltin+UseSink [GOOD] >> KqpNewEngine::DeleteWithBuiltin-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS88-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 64733, MsgBus: 22423 2025-06-24T21:20:33.570429Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628886557748115:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:33.595239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039b8/r3tmp/tmpo37M2M/pdisk_1.dat 2025-06-24T21:20:34.332951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:34.333056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:34.348610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:34.441745Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64733, node 1 2025-06-24T21:20:34.498555Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628886557747926:2079] 1750800033504894 != 1750800033504897 2025-06-24T21:20:34.575258Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:34.747690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:34.747710Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:34.747717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:34.747816Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22423 TClient is connected to server localhost:22423 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:35.758931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:35.772762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:38.575191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628886557748115:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:38.575441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:38.668056Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628908032585054:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:38.668196Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:38.668492Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628908032585066:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:38.672691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:38.686425Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628908032585068:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:38.791220Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628908032585122:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:39.132197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.241713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.278072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.323488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.369953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.638537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.673996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.730533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.804857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.855016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.916408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:39.975664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:40.047732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:40.817587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subope ... 5-06-24T21:21:28.749591Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:28.754502Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:28.755285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:28.760563Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:28.763877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:28.773948Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:28.774489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:28.788830Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:28.789391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:28.803599Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:28.804213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:28.814081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:28.814670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:28.828791Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:28.829366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:28.844019Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:28.844635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:21:28.854457Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:21:29.095664Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx13wn3a0atc32mszx0ssk", SessionId: ydb://session/3?node_id=1&id=NTdlOTI2Ni0xYTg0ZmFhMy0xMDk3MTJlNy01MzhhOWZlOQ==, Slow query, duration: 45.170122s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:21:29.892031Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:29.892035Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:21:29.893568Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:46.977556Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3k6y3y8vypearkwnjm8v", SessionId: ydb://session/3?node_id=1&id=NTdlOTI2Ni0xYTg0ZmFhMy0xMDk3MTJlNy01MzhhOWZlOQ==, Slow query, duration: 41.826104s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query88.tpl and seed 318176889\nselect *\nfrom\n (select count(*) h8_30_to_9\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 8\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s1 cross join\n (select count(*) h9_to_9_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 9\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s2 cross join\n (select count(*) h9_30_to_10\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 9\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s3 cross join\n (select count(*) h10_to_10_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 10\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s4 cross join\n (select count(*) h10_30_to_11\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 10\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s5 cross join\n (select count(*) h11_to_11_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 11\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s6 cross join\n (select count(*) h11_30_to_12\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 11\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s7 cross join\n (select count(*) h12_to_12_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 12\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s8\n;", parameters: 0b |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TModificationsValidatorTests::TestChecksLimitWarning [GOOD] Test command err: 2025-06-24T21:22:33.029764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:33.029827Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:33.192798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:35.437379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:35.437465Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:35.479820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:36.681818Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:36.681885Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:36.722172Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:38.306991Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:38.307064Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:38.356538Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:40.193444Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:40.193530Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:40.252079Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:41.773770Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:41.773851Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:41.817772Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:43.295970Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:43.296066Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:43.343594Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:45.350425Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:45.350507Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:45.403508Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:47.223739Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:47.223826Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:47.325294Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:48.795890Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:48.795985Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:48.843656Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:50.651717Z node 11 :CMS_CONFIGS TRACE: console_configs_manager.h:259: StateWork, received event# 268637729, Sender [11:167:2174], Recipient [11:351:2292]: {TEvControllerProposeConfigRequest Record# } 2025-06-24T21:22:50.655235Z node 11 :CMS_CONFIGS TRACE: console_configs_manager.h:293: StateWork, processing event TEvBlobStorage::TEvControllerProposeConfigRequest 2025-06-24T21:22:50.667935Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:105: StateWork, received event# 269877760, Sender [11:312:2281], Recipient [11:311:2278]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936131 Status: OK ServerId: [11:402:2338] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:22:50.668066Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:115: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:22:50.684310Z node 11 :CMS_CONFIGS TRACE: console_configs_manager.h:259: StateWork, received event# 273285144, Sender [11:311:2278], Recipient [11:351:2292]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionRequest { Generation: 1 Options { NodeId: 11 Host: "ghrun-4pjfmvffsq.auto.internal" Tenant: "" NodeType: "" } ConfigItemKinds: 29 ConfigItemKinds: 1 ConfigItemKinds: 89 ConfigItemKinds: 2 ConfigItemKinds: 90 ConfigItemKinds: 32 ConfigItemKinds: 3 ConfigItemKinds: 33 ConfigItemKinds: 34 ConfigItemKinds: 6 ConfigItemKinds: 36 ConfigItemKinds: 37 ConfigItemKinds: 8 ConfigItemKinds: 38 ConfigItemKinds: 10 ConfigItemKinds: 39 ConfigItemKinds: 43 ConfigItemKinds: 73 ConfigItemKinds: 75 ConfigItemKinds: 46 ConfigItemKinds: 77 ConfigItemKinds: 80 ConfigItemKinds: 81 ConfigItemKinds: 52 ConfigItemKinds: 54 ConfigItemKinds: 25 ConfigItemKinds: 55 ConfigItemKinds: 26 ServeYaml: true YamlApiVersion: 1 } 2025-06-24T21:22:50.684661Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.h:229: StateWork, received event# 273285144, Sender [11:311:2278], Recipient [11:355:2304]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionRequest { Generation: 1 Options { NodeId: 11 Host: "ghrun-4pjfmvffsq.auto.internal" Tenant: "" NodeType: "" } ConfigItemKinds: 29 ConfigItemKinds: 1 ConfigItemKinds: 89 ConfigItemKinds: 2 ConfigItemKinds: 90 ConfigItemKinds: 32 ConfigItemKinds: 3 ConfigItemKinds: 33 ConfigItemKinds: 34 ConfigItemKinds: 6 ConfigItemKinds: 36 ConfigItemKinds: 37 ConfigItemKinds: 8 ConfigItemKinds: 38 ConfigItemKinds: 10 ConfigItemKinds: 39 ConfigItemKinds: 43 ConfigItemKinds: 73 ConfigItemKinds: 75 ConfigItemKinds: 46 ConfigItemKinds: 77 ConfigItemKinds: 80 ConfigItemKinds: 81 ConfigItemKinds: 52 ConfigItemKinds: 54 ConfigItemKinds: 25 ConfigItemKinds: 55 ConfigItemKinds: 26 ServeYaml: true YamlApiVersion: 1 } 2025-06-24T21:22:50.684734Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.h:232: StateWork, processing event TEvConsole::TEvConfigSubscriptionRequest 2025-06-24T21:22:50.684866Z node 11 :CMS_CONFIGS DEBUG: console_configs_provider.cpp:866: TConfigsProvider registered new subscription [11:311:2278]:1 2025-06-24T21:22:50.684978Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:627: TConfigsProvider: check if update is required for volatile subscription [11:311:2278]:1 2025-06-24T21:22:50.685059Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:710: TConfigsProvider: new config found for subscription [11:311:2278]:1 version= 2025-06-24T21:22:50.685232Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:312: TSubscriptionClientSender([11:311:2278]) send TEvConfigSubscriptionResponse 2025-06-24T21:22:50.686437Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:105: StateWork, received event# 273286169, Sender [11:403:2304], Recipient [11:311:2278]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionResponse { Generation: 1 Status { Code: SUCCESS } } 2025-06-24T21:22:50.686502Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:111: StateWork, processing event TEvConsole::TEvConfigSubscriptionResponse 2025-06-24T21:22:50.686772Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:320: StateWork, received event# 273285146, Sender [11:355:2304], Recipient [11:403:2304]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { } MainYamlConfigNotChanged: true } 2025-06-24T21:22:50.686843Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:323: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-24T21:22:50.686950Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:379: TSubscriptionClientSender([11:311:2278]) send TEvConfigSubscriptionNotificationRequest: Order: 1 Generation: 1 Config { } MainYamlConfigNotChanged: true 2025-06-24T21:22:50.690467Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:105: StateWork, received event# 273285146, Sender [11:403:2304], Recipient [11:311:2278]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Order: 1 Generation: 1 Config { } MainYamlConfigNotChanged: true } 2025-06-24T21:22:50.690563Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:113: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-24T21:22:50.697095Z node 11 :CMS_CONFIGS INFO: log_settings_configurator.cpp:86: TLogSettingsConfigurator: got new config: 2025-06-24T21:22:50.697222Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component GLOBAL has been changed from WARN to NOTICE 2025-06-24T21:22:50.697299Z node 11 :CMS_CONFIGS NOTICE: log_settings_configur ... NOTICE to ALERT 2025-06-24T21:22:55.222270Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TX_CONVEYOR has been changed from DEBUG to ALERT 2025-06-24T21:22:55.222294Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TX_CONVEYOR has been changed from 0 to 10 2025-06-24T21:22:55.222320Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component TX_LIMITER has been changed from NOTICE to ALERT 2025-06-24T21:22:55.222345Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TX_LIMITER has been changed from DEBUG to ALERT 2025-06-24T21:22:55.222383Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TX_LIMITER has been changed from 0 to 10 2025-06-24T21:22:55.222429Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component ARROW_HELPER has been changed from NOTICE to ALERT 2025-06-24T21:22:55.222456Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component ARROW_HELPER has been changed from DEBUG to ALERT 2025-06-24T21:22:55.222481Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component ARROW_HELPER has been changed from 0 to 10 2025-06-24T21:22:55.222560Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component SSA_GRAPH_EXECUTION has been changed from NOTICE to ALERT 2025-06-24T21:22:55.222596Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component SSA_GRAPH_EXECUTION has been changed from DEBUG to ALERT 2025-06-24T21:22:55.222620Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component SSA_GRAPH_EXECUTION has been changed from 0 to 10 2025-06-24T21:22:55.222647Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KAFKA_PROXY has been changed from NOTICE to ALERT 2025-06-24T21:22:55.222673Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KAFKA_PROXY has been changed from DEBUG to ALERT 2025-06-24T21:22:55.222698Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component KAFKA_PROXY has been changed from 0 to 10 2025-06-24T21:22:55.222724Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component OBJECTS_MONITORING has been changed from NOTICE to ALERT 2025-06-24T21:22:55.222748Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component OBJECTS_MONITORING has been changed from DEBUG to ALERT 2025-06-24T21:22:55.222773Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component OBJECTS_MONITORING has been changed from 0 to 10 2025-06-24T21:22:55.222801Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component STATISTICS has been changed from NOTICE to ALERT 2025-06-24T21:22:55.222827Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component STATISTICS has been changed from DEBUG to ALERT 2025-06-24T21:22:55.222869Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component STATISTICS has been changed from 0 to 10 2025-06-24T21:22:55.222900Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_REQUEST_COST has been changed from NOTICE to ALERT 2025-06-24T21:22:55.222926Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_REQUEST_COST has been changed from DEBUG to ALERT 2025-06-24T21:22:55.222949Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_REQUEST_COST has been changed from 0 to 10 2025-06-24T21:22:55.222974Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_VDISK_BALANCING has been changed from NOTICE to ALERT 2025-06-24T21:22:55.222998Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_VDISK_BALANCING has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223027Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_VDISK_BALANCING has been changed from 0 to 10 2025-06-24T21:22:55.223069Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_GETBLOCK has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223098Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_GETBLOCK has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223123Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_GETBLOCK has been changed from 0 to 10 2025-06-24T21:22:55.223171Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_SHRED has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223197Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_SHRED has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223220Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_SHRED has been changed from 0 to 10 2025-06-24T21:22:55.223245Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_CHECKINTEGRITY has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223272Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_CHECKINTEGRITY has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223296Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_CHECKINTEGRITY has been changed from 0 to 10 2025-06-24T21:22:55.223337Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_BRIDGE has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223364Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_BRIDGE has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223385Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_BRIDGE has been changed from 0 to 10 2025-06-24T21:22:55.223431Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component LDAP_AUTH_PROVIDER has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223462Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component LDAP_AUTH_PROVIDER has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223486Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component LDAP_AUTH_PROVIDER has been changed from 0 to 10 2025-06-24T21:22:55.223512Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component GROUPED_MEMORY_LIMITER has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223539Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component GROUPED_MEMORY_LIMITER has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223563Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component GROUPED_MEMORY_LIMITER has been changed from 0 to 10 2025-06-24T21:22:55.223592Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component DATA_INTEGRITY has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223615Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component DATA_INTEGRITY has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223638Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component DATA_INTEGRITY has been changed from 0 to 10 2025-06-24T21:22:55.223666Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component TX_PRIORITIES_QUEUE has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223705Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TX_PRIORITIES_QUEUE has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223730Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TX_PRIORITIES_QUEUE has been changed from 0 to 10 2025-06-24T21:22:55.223767Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BSCONFIG has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223796Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BSCONFIG has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223821Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BSCONFIG has been changed from 0 to 10 2025-06-24T21:22:55.223849Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component NAMESERVICE has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223872Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component NAMESERVICE has been changed from DEBUG to ALERT 2025-06-24T21:22:55.223894Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component NAMESERVICE has been changed from 0 to 10 2025-06-24T21:22:55.223922Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BRIDGE has been changed from NOTICE to ALERT 2025-06-24T21:22:55.223983Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BRIDGE has been changed from DEBUG to ALERT 2025-06-24T21:22:55.224012Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BRIDGE has been changed from 0 to 10 2025-06-24T21:22:55.224138Z node 14 :CMS_CONFIGS TRACE: log_settings_configurator.cpp:100: TLogSettingsConfigurator: Send TEvConfigNotificationResponse: SubscriptionId: 0 ConfigId { } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookupDepededRead [GOOD] Test command err: Trying to start YDB, gRPC: 11387, MsgBus: 26111 2025-06-24T21:16:22.490839Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627808124314223:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:22.490900Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031bc/r3tmp/tmpVh8zGd/pdisk_1.dat 2025-06-24T21:16:22.843321Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627808124314203:2079] 1750799782489650 != 1750799782489653 2025-06-24T21:16:22.850873Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11387, node 1 2025-06-24T21:16:22.896775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:22.897389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:22.922006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:22.943829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:22.943869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:22.943896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:22.944016Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26111 TClient is connected to server localhost:26111 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:16:23.512712Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:23.555869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:23.572368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:16:23.589362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:25.640245Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627821009217095:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.640382Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.641199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627821009217107:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:25.645859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:25.662874Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627821009217109:2320], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:16:25.747544Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627821009217160:2563] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:26.231565Z node 1 :KQP_COMPUTE WARN: log.cpp:784: fline=kqp_compute_actor_factory.cpp:41;problem=cannot_allocate_memory;tx_id=281474976715661;task_id=2;memory=1048576; 2025-06-24T21:16:26.231613Z node 1 :KQP_COMPUTE WARN: dq_compute_memory_quota.h:152: TxId: 281474976715661, task: 2. [Mem] memory 1048576 NOT granted 2025-06-24T21:16:26.269301Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519627825304184500:2329], TxId: 281474976715661, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjFmNzUxOWMtYTllNDBiNDMtMzA0OGIxZWMtYTMyMzI3Zjg=. TraceId : 01jyhws7n32sf6enw3td69sezg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: OVERLOADED KIKIMR_PRECONDITION_FAILED: {
: Error: Mkql memory limit exceeded, allocated by task 2: 10, host: ghrun-4pjfmvffsq, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-24T21:16:26.228654Z }, code: 2029 }. 2025-06-24T21:16:26.271552Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519627825304184498:2328], TxId: 281474976715661, task: 1. Ctx: { TraceId : 01jyhws7n32sf6enw3td69sezg. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NjFmNzUxOWMtYTllNDBiNDMtMzA0OGIxZWMtYTMyMzI3Zjg=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7519627825304184487:2314], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-06-24T21:16:26.274444Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NjFmNzUxOWMtYTllNDBiNDMtMzA0OGIxZWMtYTMyMzI3Zjg=, ActorId: [1:7519627821009217092:2314], ActorState: ExecuteState, TraceId: 01jyhws7n32sf6enw3td69sezg, Create QueryResponse for error on request, msg:
: Error: Mkql memory limit exceeded, allocated by task 2: 10, host: ghrun-4pjfmvffsq, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-24T21:16:26.228654Z } , code: 2029 Trying to start YDB, gRPC: 6660, MsgBus: 25338 2025-06-24T21:16:27.139382Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519627827618508281:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:27.150468Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0031bc/r3tmp/tmpAwYyO9/pdisk_1.dat 2025-06-24T21:16:27.338309Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:27.338410Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:16:27.342346Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:27.364102Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6660, node 2 2025-06-24T21:16:27.414069Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:27.414096Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:27.414106Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:27.414245Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25338 TClient is connected to server localhost:25338 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'R ... SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 453ms } {
: Error: Cancelling after 454ms during execution } ] 2025-06-24T21:22:32.396202Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519629397646255381:3928], TxId: 281474976710823, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=. TraceId : 01jyhx4dc5fyn9y9sbv8hhkpan. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519629397646255374:2483], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-24T21:22:32.396733Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519629397646255382:3929], TxId: 281474976710823, task: 3. Ctx: { CustomerSuppliedId : . TraceId : 01jyhx4dc5fyn9y9sbv8hhkpan. SessionId : ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519629397646255374:2483], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-24T21:22:32.397352Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4dc5fyn9y9sbv8hhkpan, Create QueryResponse for error on request, msg: 2025-06-24T21:22:32.867991Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4dtwfmqarc82h2qs9ht4, Create QueryResponse for error on request, msg: 2025-06-24T21:22:33.342790Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4e9rdwcwexv6xgjvrtmd, Create QueryResponse for error on request, msg: 2025-06-24T21:22:33.819542Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4erf60qf02jxc8em93pd, Create QueryResponse for error on request, msg: 2025-06-24T21:22:34.501609Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4fdsedqqasagb9scqs9t, Create QueryResponse for error on request, msg: 2025-06-24T21:22:34.973846Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4fwje83cn4drpk5e6h4t, Create QueryResponse for error on request, msg: 2025-06-24T21:22:35.795634Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4gp37c9zbbgmj9a5c1ap, Create QueryResponse for error on request, msg: 2025-06-24T21:22:36.274054Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4h534b29a63g8k7tbefs, Create QueryResponse for error on request, msg: 2025-06-24T21:22:37.158663Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4j0m4ewb2n2h6pgg5hax, Create QueryResponse for error on request, msg: 2025-06-24T21:22:37.632993Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4jfj0nsr38cqfhtppykk, Create QueryResponse for error on request, msg: 2025-06-24T21:22:38.119928Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4jyh0nndsfz05fsy1f84, Create QueryResponse for error on request, msg: 2025-06-24T21:22:39.111423Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4kxfa3t6vk5g4mkktsxk, Create QueryResponse for error on request, msg: 2025-06-24T21:22:39.595470Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4mcq1gst557sn45sqn6h, Create QueryResponse for error on request, msg: 2025-06-24T21:22:40.555390Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4naf0czgr50mwzncf1a8, Create QueryResponse for error on request, msg: 2025-06-24T21:22:41.046178Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4nsq9vqxvm81xd7snxgm, Create QueryResponse for error on request, msg: 2025-06-24T21:22:41.659354Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4pd26zke5b6nedkaf3jr, Create QueryResponse for error on request, msg: 2025-06-24T21:22:42.159045Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4pwm8pq6agwjbqz2bp6z, Create QueryResponse for error on request, msg: 2025-06-24T21:22:42.991656Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4qphd4znf1enjtmpp3g2, Create QueryResponse for error on request, msg: 2025-06-24T21:22:43.495435Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4r5yffzcf9s3ca4d98tp, Create QueryResponse for error on request, msg: 2025-06-24T21:22:44.217127Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4rwre22sqxq5mjwnkrdz, Create QueryResponse for error on request, msg: 2025-06-24T21:22:44.715818Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4sc9610z8hxy65j729zf, Create QueryResponse for error on request, msg: 2025-06-24T21:22:45.395625Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4t1e5dtrsc9dfmqbmdjx, Create QueryResponse for error on request, msg: 2025-06-24T21:22:45.899158Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4th5e9hgmtc16m81xdja, Create QueryResponse for error on request, msg: 2025-06-24T21:22:46.649376Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4v8j6knxmkavdceffdbr, Create QueryResponse for error on request, msg: 2025-06-24T21:22:47.155401Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4vr62ybjdb25q73y10wg, Create QueryResponse for error on request, msg: 2025-06-24T21:22:47.829858Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4wdbax6h1h3jmkdge4me, Create QueryResponse for error on request, msg: 2025-06-24T21:22:48.338696Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4wx7cvkcs9ap5xxnrecm, Create QueryResponse for error on request, msg: 2025-06-24T21:22:49.320415Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4xvw1dtb5565b7gpd8fa, Create QueryResponse for error on request, msg: 2025-06-24T21:22:50.383407Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4yvdaa6nwhg8tk9ryk7e, Create QueryResponse for error on request, msg: 2025-06-24T21:22:50.902183Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4zd22cqdp5t0fp32v5yd, Create QueryResponse for error on request, msg: 2025-06-24T21:22:51.495421Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx4zzm52pp8n4x6t1fktdq, Create QueryResponse for error on request, msg: 2025-06-24T21:22:52.024677Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx50g29x4gkzqxsc0m05f6, Create QueryResponse for error on request, msg: 2025-06-24T21:22:52.661841Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzI3NTk1OWUtNDg5Y2FjNWMtNWRkOWI3MGItNWQxZjJkYTM=, ActorId: [4:7519628933789783518:2483], ActorState: ExecuteState, TraceId: 01jyhx513n9rtkhtrmhzjfg3zp, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:22:54.360144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:22:54.360273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:22:54.360317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:22:54.360355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:22:54.360421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:22:54.360453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:22:54.360517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:22:54.360593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:22:54.361389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:22:54.361750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:22:54.447536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:54.447601Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:54.474183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:22:54.478875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:22:54.479070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:22:54.488514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:22:54.488818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:22:54.489502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:54.489968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:22:54.497224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:22:54.497457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:22:54.498697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:22:54.498764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:22:54.498875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:22:54.498928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:22:54.498974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:22:54.499120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:22:54.505953Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:22:54.655491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:22:54.655723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:54.655948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:22:54.656014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:22:54.656259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:22:54.656332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:54.659057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:54.659294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:22:54.659598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:54.659686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:22:54.659727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:22:54.659775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:22:54.663119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:54.663235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:22:54.663294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:22:54.666360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:54.666436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:22:54.666511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:22:54.666576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:22:54.670463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:22:54.675970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:22:54.676191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:22:54.677300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:54.677443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:22:54.677493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:22:54.677795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:22:54.677858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:22:54.678037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:22:54.678151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:22:54.681089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:22:54.681171Z node 1 :FLAT_TX_SCHEMESHARD ... 248053Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:478:2434] HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: DA3B354E-5C85-41F9-B905-BEE6DADA1080 amz-sdk-request: attempt=1 content-length: 11 content-md5: jsMhyzH+cyrvZpBm0dQVGQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_01.csv / 2025-06-24T21:22:55.248167Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:479:2436], sender# [1:478:2434], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } / 11 2025-06-24T21:22:55.248358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:22:55.248618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:22:55.248675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:22:55.248716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:22:55.248778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:22:55.248897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T21:22:55.249265Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:486:2441], result# PutObjectResult { ETag: 8ec321cb31fe732aef669066d1d41519 } 2025-06-24T21:22:55.249342Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:486:2441], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-24T21:22:55.249631Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:485:2439], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:22461 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 0F779F51-BC52-4E35-A834-8F44DD6965F9 amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-24T21:22:55.258691Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:479:2436], result# PutObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 } 2025-06-24T21:22:55.258761Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:479:2436], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-24T21:22:55.259118Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:478:2434], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:22:55.271736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:22:55.315823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 4294969602 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:22:55.315900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-24T21:22:55.316082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 4294969602 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:22:55.316225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 327 RawX2: 4294969602 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:22:55.316302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:55.316461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:22:55.317379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 323 RawX2: 4294969601 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:22:55.317426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:22:55.317567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 323 RawX2: 4294969601 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:22:55.317655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 323 RawX2: 4294969601 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:22:55.317703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:22:55.317737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:22:55.317797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:22:55.317854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:22:55.317897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:22:55.318036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:22:55.329276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:22:55.329968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:22:55.330409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:22:55.330459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:22:55.330565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:22:55.330602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:22:55.330649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:22:55.330711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:22:55.330759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:22:55.330834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:376:2341] message: TxId: 102 2025-06-24T21:22:55.330892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:22:55.330935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:22:55.330966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:22:55.331132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:22:55.340111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:22:55.340188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:459:2417] TestWaitNotification: OK eventTxId 102 >> TConsoleConfigHelpersTests::TestConfigSubscriber [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantTenant >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPending [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPendingExtSubdomain >> KqpJoinOrder::TPCDS92+ColumnStore [GOOD] >> KqpKv::ReadRows_UnknownTable >> TConsoleTests::TestCreateServerlessTenant [GOOD] >> TConsoleTests::TestCreateServerlessTenantWrongSharedDb |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> KqpJoinOrder::TPCH9_100 [GOOD] >> KqpJoinOrder::SortingsWithLookupJoinByPrefix-RemoveLimitOperator [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantTenant [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantMultipleTenants >> KqpJoinOrder::TPCDS23 [GOOD] >> TBackupTests::ShouldSucceedOnLargeData[Raw] >> KqpNotNullColumns::InsertNotNullPk >> KqpNewEngine::MultiSelect [GOOD] >> KqpNewEngine::MultiOutput ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS92+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 6081, MsgBus: 15680 2025-06-24T21:20:43.824369Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628929779505041:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:43.824823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003994/r3tmp/tmpldSllT/pdisk_1.dat 2025-06-24T21:20:44.428225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:44.428324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:44.471723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:44.524983Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:44.527168Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628929779504947:2079] 1750800043768726 != 1750800043768729 TServer::EnableGrpc on GrpcPort 6081, node 1 2025-06-24T21:20:44.844479Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:44.871102Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:44.871121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:44.871129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:44.871248Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15680 TClient is connected to server localhost:15680 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:45.976705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:46.003315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:20:48.795243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628929779505041:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:48.795311Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:48.940386Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628951254342077:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:48.940606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628951254342089:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:48.940718Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:48.945248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:48.963173Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628951254342091:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:20:49.044875Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628955549309439:2339] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:49.477937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:20:49.886142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:49.886373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:49.886657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:49.886822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:49.886944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:49.887051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:49.887435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:49.887606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:20:49.887737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:20:49.887845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:20:49.887960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519628955549309675:2318];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:20:49.888889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628955549309670:2313];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:49.888926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628955549309670:2313];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:49.889108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628955549309670:2313];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:49.889206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628955549309670:2313];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:49.889289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628955549309670:2313];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:49.889400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628955549309670:2313];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:49.889495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628955549309670:2313];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:49.889584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519628955549309670:2313];tablet ... 1474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.062980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.062980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.063589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.063922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.068672Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.068747Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.069270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.069516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.074507Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.074506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.075077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.075441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.080513Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.080513Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.081104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.081397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.086599Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.086613Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.087256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.087261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.092289Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.092460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.092931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.093033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.098250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.098250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.098857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.099367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.104316Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.104367Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.105534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.105601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.112605Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.112605Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.113184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:29.118188Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:29.358813Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx33g8e0hjm4mkbjvw07xw", SessionId: ydb://session/3?node_id=1&id=YTc5MjU1YTQtNmU1NTJlMzAtMmMyMGEzYmYtZTEzM2NiNTI=, Slow query, duration: 40.293493s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:29.936591Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:22:29.936710Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:22:29.937380Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:22:49.606354Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx4ms5e09rj3n46ek1zyp7", SessionId: ydb://session/3?node_id=1&id=YTc5MjU1YTQtNmU1NTJlMzAtMmMyMGEzYmYtZTEzM2NiNTI=, Slow query, duration: 10.079213s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n$bla = (\n SELECT\n web_sales.ws_item_sk bla_item_sk,\n avg(ws_ext_discount_amt) bla_ext_discount_amt\n FROM\n web_sales\n cross join date_dim\n WHERE\n cast(d_date as date) between cast('2001-03-12' as date) and\n (cast('2001-03-12' as date) + DateTime::IntervalFromDays(90))\n and d_date_sk = ws_sold_date_sk\n group by web_sales.ws_item_sk\n );\n\n-- start query 1 in stream 0 using template query92.tpl and seed 2031708268\nselect\n sum(ws_ext_discount_amt) as `Excess Discount Amount`\nfrom\n web_sales\n cross join item\n cross join date_dim\n join $bla bla on (item.i_item_sk = bla.bla_item_sk)\nwhere\ni_manufact_id = 356\nand i_item_sk = ws_item_sk\nand cast(d_date as date) between cast('2001-03-12' as date) and\n (cast('2001-03-12' as date) + DateTime::IntervalFromDays(90))\nand d_date_sk = ws_sold_date_sk\nand ws_ext_discount_amt\n > 1.3 * bla.bla_ext_discount_amt\norder by `Excess Discount Amount`\nlimit 100;\n", parameters: 0b >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantMultipleTenants [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantDomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoinByPrefix-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 2331, MsgBus: 12271 2025-06-24T21:22:04.191960Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629277807792462:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:04.192508Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038ea/r3tmp/tmpaFjv4g/pdisk_1.dat 2025-06-24T21:22:05.057943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:05.058042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:05.075745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:05.102795Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:05.155326Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629277807792250:2079] 1750800124136273 != 1750800124136276 2025-06-24T21:22:05.160055Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2331, node 1 2025-06-24T21:22:05.424020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:05.424044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:05.424053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:05.424192Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12271 TClient is connected to server localhost:12271 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:06.538989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:09.195310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629277807792462:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:09.195374Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:09.340093Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629299282629395:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:09.340194Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629299282629384:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:09.340514Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:09.344752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:09.369721Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629299282629398:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:22:09.443443Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629299282629449:2339] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:09.784354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:09.931347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:09.969231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.006149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.035694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.194476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.257511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.315638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.398261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.442301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.524260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.588229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:10.629350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:11.694146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/co ... 03122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.506030Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.506706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.516519Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.517100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.519833Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.520387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.525586Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.526275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.530915Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.535939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.540204Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.540727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.543069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.543745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.546737Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.547404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.549225Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.549832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.555629Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.556369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.557985Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.558448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.562143Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.563950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.569872Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.570426Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.572078Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.572644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.578410Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.579470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.580517Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.581271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.590028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.590733Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.604053Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.604653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.611037Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.611945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:51.617808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.629121Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:51.739234Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3wdv1kvh2dq1srfnshx3", SessionId: ydb://session/3?node_id=1&id=MTUzM2FiYTYtN2Y0ZWQ2MTQtODI2MzU5OGUtNzQ1ODA3ZTI=, Slow query, duration: 37.150709s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:52.173357Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:22:52.173537Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:22:52.174210Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519629393771927418:4626];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-24T21:22:52.174726Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoinOrder::CanonizedJoinOrderTPCH10 [GOOD] >> KqpNewEngine::PkRangeSelect2 [GOOD] >> KqpNewEngine::OnlineRO_Consistent >> YdbProxy::CopyTable >> YdbProxy::ListDirectory >> YdbProxy::RemoveDirectory >> YdbProxy::CreateTopic >> KqpNewEngine::BlindWrite [GOOD] >> KqpNewEngine::BlindWriteParameters >> YdbProxy::CreateTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCH9_100 [GOOD] Test command err: Trying to start YDB, gRPC: 20649, MsgBus: 10276 2025-06-24T21:20:57.721095Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628986720179965:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:57.721693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003963/r3tmp/tmpMe1kmy/pdisk_1.dat 2025-06-24T21:20:58.486126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:58.486221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:58.549613Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628986720179784:2079] 1750800057633518 != 1750800057633521 2025-06-24T21:20:58.570017Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:58.570836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20649, node 1 2025-06-24T21:20:58.687386Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:58.864841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:58.864865Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:58.864876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:58.864993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10276 TClient is connected to server localhost:10276 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:00.216008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:02.680123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628986720179965:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:02.680189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:03.442761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629012489984211:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:03.442852Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629012489984219:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:03.442906Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:03.450882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:03.477151Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629012489984225:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:03.569266Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629012489984276:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:03.977466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:04.268687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:04.268901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:04.269158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:04.269264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:04.269351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:04.269508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:04.269613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:04.269713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:04.269857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:04.269985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:04.270121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629016784951771:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:04.275098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629016784951774:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:04.275271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629016784951774:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:04.275473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629016784951774:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:04.275601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629016784951774:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:04.275697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629016784951774:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:04.275791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629016784951774:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:04.275925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629016784951774:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:04.276029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629016784951774:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:04.276150Z node 1 :TX_COLU ... 17991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.019934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.020479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.030292Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.030858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.031745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.032235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.036935Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.037557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.043931Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.044494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.050123Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.050630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.057419Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.057936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.059866Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.060579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.065927Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.067667Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.068199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.071794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.077591Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.078257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.081968Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.082500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.106453Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.107132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.120662Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.121916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.127090Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.134013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.134649Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.135777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.140353Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.141885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.144554Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.145493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.148207Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.159176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.217018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039199;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:37.224921Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039199;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:37.388507Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3am1djqgtzdt29b295pd", SessionId: ydb://session/3?node_id=1&id=ZmE1YjkzNTctMWZmMWNiZWYtYTUzOWIyMTUtYzVlMWM4NDU=, Slow query, duration: 41.035067s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:37.718808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:37.719432Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:37.719528Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519629304547806738:9407];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:22:37.720013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TConsoleTests::TestCreateServerlessTenantWrongSharedDb [GOOD] >> TConsoleTests::TestCreateTenantWrongName >> KqpJoinOrder::TestJoinOrderHintsComplex+ColumnStore [GOOD] >> KqpJoinOrder::FiveWayJoinStatsOverride+ColumnStore [GOOD] >> KqpJoinOrder::TPCDS95+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS23 [GOOD] Test command err: Trying to start YDB, gRPC: 19832, MsgBus: 25316 2025-06-24T21:18:49.192424Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628438367955902:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:49.192467Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ae3/r3tmp/tmp4zH8MN/pdisk_1.dat 2025-06-24T21:18:49.943274Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628438367955883:2079] 1750799929169027 != 1750799929169030 2025-06-24T21:18:50.014427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:18:50.014517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:18:50.014701Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:18:50.025270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19832, node 1 2025-06-24T21:18:50.372550Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:18:50.373424Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:18:50.373432Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:18:50.373439Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:18:50.373567Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25316 TClient is connected to server localhost:25316 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:18:51.402280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:18:51.444352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:18:54.199239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628438367955902:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:18:54.199315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:18:54.305144Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628459842793008:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:54.305298Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:54.305710Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628459842793020:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:18:54.310205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:18:54.331531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:18:54.331789Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628459842793022:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:18:54.407579Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628459842793076:2342] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:18:54.806602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:18:55.094516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:55.094773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:55.095080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:55.095413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:55.095551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:55.095677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:55.095790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:18:55.095911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:18:55.096026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:18:55.096157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:18:55.096268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628464137760606:2321];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:18:55.143109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628464137760623:2324];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:18:55.143467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628464137760623:2324];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:18:55.143696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628464137760623:2324];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:18:55.143792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628464137760623:2324];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:18:55.143893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628464137760623:2324];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:18:55.143998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628464137760623:2324];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:18:55.144095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519628464137760623:2324];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_regis ... 3;result=not_found; 2025-06-24T21:20:28.135182Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.135811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.139903Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.140611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.141984Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.142806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.146879Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.147539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.149262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.150133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.153196Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.153894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.161181Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.161822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.168090Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.168788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.175085Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.176033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.176313Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.176838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.183104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.183828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:20:28.190239Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.191017Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:20:28.413117Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhwzb7q9mbdcafq3zdz0v3e", SessionId: ydb://session/3?node_id=1&id=NWZlNDY0YmQtYjJlMWM0MGItZjZhYTE2YjYtN2IyYTQ1NGQ=, Slow query, duration: 42.501131s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:20:29.291021Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:29.291555Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:20:29.294076Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519628816325136628:11424];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-24T21:20:29.294567Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:51.316517Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx2pnt5cfhhwc98gb7yqhz", SessionId: ydb://session/3?node_id=1&id=NWZlNDY0YmQtYjJlMWM0MGItZjZhYTE2YjYtN2IyYTQ1NGQ=, Slow query, duration: 75.385227s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n\n$blabla = (\n\n select substring(cast(item.i_item_desc as string),0,30) itemdesc,item.i_item_sk item_sk,date_dim.d_date solddate\n\n from store_sales as store_sales\n\n cross join date_dim as date_dim\n\n cross join item as item\n\n where ss_sold_date_sk = d_date_sk\n\n and ss_item_sk = i_item_sk\n\n and d_year in (2000,2000+1,2000+2,2000+3)\n\n);\n\n$frequent_ss_items =\n\n (select itemdesc, item_sk, solddate,count(*) cnt\n\n from $blabla\n\n group by itemdesc,item_sk,solddate\n\n having count(*) >4);\n\n\n\n$max_store_sales =\n\n (select max(csales) tpcds_cmax\n\n from (select customer.c_customer_sk c_customer_sk,sum(ss_quantity*ss_sales_price) csales\n\n from store_sales as store_sales\n\n cross join customer as customer\n\n cross join date_dim as date_dim\n\n where ss_customer_sk = c_customer_sk\n\n and ss_sold_date_sk = d_date_sk\n\n and d_year in (2000,2000+1,2000+2,2000+3)\n\n group by customer.c_customer_sk) x);\n\n\n\n$best_ss_customer =\n\n (select customer.c_customer_sk c_customer_sk,sum(ss_quantity*ss_sales_price) ssales\n\n from store_sales as store_sales\n\n cross join customer as customer\n\n where ss_customer_sk = c_customer_sk\n\n group by customer.c_customer_sk\n\n having sum(ss_quantity*ss_sales_price) > (95/100.0) * $max_store_sales);\n\n\n\n-- start query 1 in stream 0 using template query23.tpl and seed 2031708268\n\nselect sum(sales)\n\n from (select cs_quantity*cs_list_price sales\n\n from catalog_sales as catalog_sales\n\n cross join date_dim as date_dim\n\n where d_year = 2000\n\n and d_moy = 3\n\n and cs_sold_date_sk = d_date_sk\n\n and cs_item_sk in (select item_sk from $frequent_ss_items)\n\n and cs_bill_customer_sk in (select c_customer_sk from $best_ss_customer)\n\n union all\n\n select ws_quantity*ws_list_price sales\n\n from web_sales as web_sales\n\n cross join date_dim as date_dim\n\n where d_year = 2000\n\n and d_moy = 3\n\n and ws_sold_date_sk = d_date_sk\n\n and ws_item_sk in (select item_sk from $frequent_ss_items)\n\n and ws_bill_customer_sk in (select c_customer_sk from $best_ss_customer)) y\n\n limit 100;\n\n\n\nselect c_last_name,c_first_name,sales\n\n from (select customer.c_last_name c_last_name,customer.c_first_name c_first_name,sum(cs_quantity*cs_list_price) sales\n\n from catalog_sales as catalog_sales\n\n cross join customer as customer\n\n cross join date_dim as date_dim\n\n where d_year = 2000\n\n and d_moy = 3\n\n and cs_sold_date_sk = d_date_sk\n\n and cs_item_sk in (select item_sk from $frequent_ss_items)\n\n and cs_bill_customer_sk in (select c_customer_sk from $best_ss_customer)\n\n and cs_bill_customer_sk = c_customer_sk\n\n group by customer.c_last_name,customer.c_first_name\n\n union all\n\n select customer.c_last_name c_last_name,customer.c_first_name c_first_name,sum(ws_quantity*ws_list_price) sales\n\n from web_sales as web_sales\n\n cross join customer as customer\n\n cross join date_dim as date_dim\n\n where d_year = 2000\n\n and d_moy = 3\n\n and ws_sold_date_sk = d_date_sk\n\n and ws_item_sk in (select item_sk from $frequent_ss_items)\n\n and ws_bill_customer_sk in (select c_customer_sk from $best_ss_customer)\n\n and ws_bill_customer_sk = c_customer_sk\n\n group by customer.c_last_name,customer.c_first_name) y\n\n order by c_last_name,c_first_name,sales\n\n limit 100;\n\n\n\n-- end query 1 in stream 0 using template query23.tpl", parameters: 0b >> KqpKv::ReadRows_UnknownTable [GOOD] >> KqpMergeCn::TopSortByDesc_Double_Limit3 >> KqpSort::TopSortParameter [GOOD] >> KqpSort::TopSortExpr >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft-ColumnStore [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPendingExtSubdomain [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunning >> KqpNewEngine::DeleteWithBuiltin-UseSink [GOOD] >> KqpNewEngine::DeleteON >> KqpNotNullColumns::ReplaceNotNullPk >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantDomain [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriptionEraser ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH10 [GOOD] Test command err: Trying to start YDB, gRPC: 21520, MsgBus: 19620 2025-06-24T21:20:54.845387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628976760494005:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:54.845443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003972/r3tmp/tmpkmC4dd/pdisk_1.dat 2025-06-24T21:20:55.540625Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:55.540715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:55.541688Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628976760493909:2079] 1750800054750769 != 1750800054750772 2025-06-24T21:20:55.623841Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:55.624732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21520, node 1 2025-06-24T21:20:55.899249Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:56.031691Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:56.031716Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:56.031724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:56.031820Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19620 TClient is connected to server localhost:19620 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:56.979483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:59.839249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628976760494005:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:59.860049Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:00.013003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629002530298345:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:00.013086Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629002530298334:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:00.013241Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:00.017513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:00.051296Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629002530298348:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:00.151956Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629002530298399:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:00.643924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:01.172786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:01.172986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:01.173230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:01.173344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:01.173456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:01.173556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:01.173652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:01.173799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:01.173921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:01.174021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:01.174127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519629006825265946:2318];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:01.176811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629002530298627:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:01.176861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629002530298627:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:01.177048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629002530298627:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:01.177145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629002530298627:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:01.177242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629002530298627:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:01.177345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629002530298627:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:01.177490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629002530298627:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:01.177602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629002530298627:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:01.177717Z node 1 :TX_COLU ... 88639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.591285Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.592300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.594902Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.595885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.597351Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.597955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.602679Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.603457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.607945Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.608567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.617974Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.619014Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.621256Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.621764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.630780Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.631432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.632824Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.633548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.637391Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.638045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.644050Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.644871Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.645291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.645349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039282;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.650576Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.651912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.657450Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.658308Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.658640Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039282;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.659926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.664278Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.667555Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.668389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.668476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.674755Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.683968Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.761820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.771937Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.796151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:40.803017Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:40.919916Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx377v0bp268zvr4mwjcev", SessionId: ydb://session/3?node_id=1&id=ZjZiMmUzOTgtYWY3N2NhYzItYjAwMmM3YzAtODQ1YTNhYWU=, Slow query, duration: 48.027927s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:41.352129Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:41.352676Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:41.353247Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519629311767990514:9568];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:22:41.353677Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinOrderHintsComplex+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 24669, MsgBus: 13653 2025-06-24T21:21:03.618118Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629016220896572:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:03.618328Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003957/r3tmp/tmpd2Lyni/pdisk_1.dat 2025-06-24T21:21:04.204034Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629016220896436:2079] 1750800063542835 != 1750800063542838 2025-06-24T21:21:04.223909Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:04.246317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:04.246437Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:04.252114Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24669, node 1 2025-06-24T21:21:04.459642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:04.459661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:04.459684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:04.459801Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:04.639321Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13653 TClient is connected to server localhost:13653 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:05.403601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:05.438328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:08.215924Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629037695733563:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:08.216055Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:08.216495Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629037695733575:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:08.220782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:08.238260Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629037695733577:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:08.351876Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629037695733628:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:08.587291Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629016220896572:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:08.587392Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:08.939228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:09.454028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:09.454280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:09.454580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:09.454690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:09.454791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:09.454896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:09.455009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:09.455134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:09.458417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629041990701206:2323];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:09.458484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629041990701206:2323];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:09.458705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629041990701206:2323];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:09.458816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629041990701206:2323];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:09.458908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629041990701206:2323];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:09.459004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629041990701206:2323];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:09.459098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629041990701206:2323];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:09.459322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:09.459500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:09.459612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629041990701152:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:09.461184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629041990701206:2323];tabl ... 4: tablet_id=72075186224039215;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.791396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.792017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.797145Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.799776Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039215;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.800313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.804697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.810156Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.810641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.816457Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.816966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.821933Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.822543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.823847Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.824314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.830243Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.830837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.836017Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.836751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.836791Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.837209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.842344Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.843819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.847930Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.848417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.848488Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.849025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.853458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.854550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.855384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.855965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.862924Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.864606Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.865404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.868173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.872259Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.876474Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.877093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:46.905623Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:47.111901Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3hfk8f9gq9kh7c4nmgyc", SessionId: ydb://session/3?node_id=1&id=NDk0YzVjMy1hZDM4Zjk5Yy03MjBhNTBkNi00YjAwNjAyYQ==, Slow query, duration: 43.731319s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:47.466689Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:47.467289Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:47.468310Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519629346933425837:9516];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-24T21:22:47.468783Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
: Warning: Unapplied hint: JoinOrder( (Unused1 Unused2) (Unused3 Unused4) ), code: 4534
: Warning: Unapplied hint: Rows(Unused # 10e8), code: 4534
: Warning: Unapplied hint: Rows(R T # 1), code: 4534
: Warning: Execution, code: 1060
: Warning: Unapplied hint: JoinOrder( (Unused1 Unused2) (Unused3 Unused4) ), code: 4534
: Warning: Unapplied hint: Rows(Unused # 10e8), code: 4534
: Warning: Unapplied hint: Rows(R T # 1), code: 4534 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 26069, MsgBus: 9757 2025-06-24T21:22:11.322453Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629308433040547:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:11.322532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038e3/r3tmp/tmpJj0EfY/pdisk_1.dat 2025-06-24T21:22:11.897750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:11.897853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:12.005810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:12.036815Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:12.040258Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629308433040343:2079] 1750800131275838 != 1750800131275841 TServer::EnableGrpc on GrpcPort 26069, node 1 2025-06-24T21:22:12.330228Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:12.391208Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:12.391231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:12.391242Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:12.391348Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9757 TClient is connected to server localhost:9757 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:13.530641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:13.571690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:16.206988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629329907877465:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:16.207125Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:16.207680Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629329907877477:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:16.212300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:16.230912Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629329907877479:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:22:16.322688Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629308433040547:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:16.322765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:16.332894Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629329907877530:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:16.678908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:16.833280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:16.885879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:16.945545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:16.985964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.177111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.237377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.285094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.349815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.399966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.455681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.490261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:17.536189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:18.264323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.649876Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.650407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.655569Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.656026Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.656193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.656789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.662104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.662207Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.662786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.662786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.668662Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.668662Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.669304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.669673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.674990Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.674990Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.675652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.676113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.680112Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.680951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.685311Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.685919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.687226Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.687726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.691590Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.692106Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.692264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.692647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.697604Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.697741Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.698254Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.698539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.703433Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.703433Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.704073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.704529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.709423Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.709498Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.710145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.711108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:55.715718Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.715744Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:55.823578Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx42jk2qwj4gjyay1d7h25", SessionId: ydb://session/3?node_id=1&id=N2FkYmI1NWEtYzVkMzNjMTItOGQzMDA1NTctNDJjMDM2N2U=, Slow query, duration: 34.936433s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:56.104196Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:56.104203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:56.105322Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TConsoleTests::TestCreateTenantWrongName [GOOD] >> TConsoleTests::TestCreateTenantWrongNameExtSubdomain |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinStatsOverride+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 28869, MsgBus: 5546 2025-06-24T21:21:07.359794Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629033519549636:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:07.387673Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003955/r3tmp/tmpYQ4M9d/pdisk_1.dat 2025-06-24T21:21:08.236446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:08.236560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:08.256087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:08.375120Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:08.378858Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629033519549591:2079] 1750800067333530 != 1750800067333533 TServer::EnableGrpc on GrpcPort 28869, node 1 2025-06-24T21:21:08.397560Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:08.600022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:08.600050Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:08.600058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:08.600195Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5546 TClient is connected to server localhost:5546 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:09.837589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:09.859561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:12.363315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629033519549636:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:12.363424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:12.650784Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629054994386721:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:12.650914Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:12.651288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629054994386733:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:12.659418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:12.728949Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629054994386735:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:12.803095Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629054994386786:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:13.607738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:13.969400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:13.969650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:13.969891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:13.969998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:13.970092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:13.970191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:13.970285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:13.970383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:13.970474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:13.970573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:13.970668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629059289354320:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:13.998780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629059289354322:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:13.998841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629059289354322:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:13.999076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629059289354322:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:14.003308Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629059289354322:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:14.003506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629059289354322:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:14.003613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629059289354322:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:14.003703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629059289354322:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:14.003798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629059289354322:2318];tablet_ ... 87758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.091707Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.097206Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.097851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.101196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.105920Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.109282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.110106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.115186Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.115781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.115853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.120500Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.124887Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.125518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.127728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.139774Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.140273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.144884Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.145501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.151740Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.152390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.157418Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.158030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.163105Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.163819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.169357Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.170007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.175395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.176087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.181474Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.182063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.187664Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.188322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.194577Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.195224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.201124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.203445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.215028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.237790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:48.239766Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.242950Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:48.466176Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3pv3f5pgra5584e17y4z", SessionId: ydb://session/3?node_id=1&id=NDcxNzFhZmQtYWZiZjM1MjAtYzA3NmY5ZGMtZmQzM2E2NWM=, Slow query, duration: 39.598170s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:48.745530Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:48.746141Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:48.746907Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519629308397493849:7875];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224039392; 2025-06-24T21:22:48.747386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpNotNullColumns::InsertNotNullPk [GOOD] >> KqpNotNullColumns::InsertNotNullPkPg+useSink >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout >> TConsoleConfigHelpersTests::TestConfigSubscriptionEraser [GOOD] >> FeatureFlagsConfiguratorTest::TestFeatureFlagsUpdates >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved >> YdbProxy::CreateTopic [GOOD] >> YdbProxy::DescribeConsumer >> YdbProxy::RemoveDirectory [GOOD] >> YdbProxy::StaticCreds ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS95+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 65168, MsgBus: 25711 2025-06-24T21:20:35.616074Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628893940520024:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:35.616135Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039b1/r3tmp/tmpABWZ6x/pdisk_1.dat 2025-06-24T21:20:36.317047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:36.317161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:36.319063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:36.344845Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628893940520004:2079] 1750800035614919 != 1750800035614922 2025-06-24T21:20:36.352093Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65168, node 1 2025-06-24T21:20:36.583658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:36.583678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:36.583685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:36.583789Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:20:36.671926Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25711 TClient is connected to server localhost:25711 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:37.818439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:40.608140Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628915415357131:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:40.608297Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:40.615307Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628915415357143:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:40.619290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628893940520024:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:40.619364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:40.623475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:40.639352Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628915415357145:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:40.725413Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628915415357198:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:41.056917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:20:41.356804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:41.357215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:41.357504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:41.357622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:41.357721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:41.357845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:41.358226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:41.358372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:20:41.358480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:20:41.358615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:20:41.358717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519628919710324723:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:20:41.361703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628919710324725:2314];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:20:41.361777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628919710324725:2314];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:20:41.361942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628919710324725:2314];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:20:41.362502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628919710324725:2314];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:20:41.362657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628919710324725:2314];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:20:41.362759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628919710324725:2314];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:20:41.362861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628919710324725:2314];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:20:41.362962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519628919710324725:2314];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:20:41.363070Z node 1 :TX_COLU ... ntroller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.589512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.596244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.601670Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.602224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.608505Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.609141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.614708Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.616208Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.616731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.622129Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.622642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.627821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.629842Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.630738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.640088Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.640712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.644515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.645113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.650529Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.651101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.659821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.660062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.660435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.660640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.666035Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.666644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.666817Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.667352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.672469Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.673070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.673399Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.674002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:20.678976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.684940Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:20.960556Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx2qce7jepz9mggxgbspbh", SessionId: ydb://session/3?node_id=1&id=MTkyMWI5MDMtYWZkZGViNTAtYzc4MDdmYzQtODk4ZjRiZWU=, Slow query, duration: 44.306024s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:22.040501Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:22.041305Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519629233242984257:9602];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:22:22.041790Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:22.047104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:53.971477Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx4kkf5s375dm7get9bcvp", SessionId: ydb://session/3?node_id=1&id=MTkyMWI5MDMtYWZkZGViNTAtYzc4MDdmYzQtODk4ZjRiZWU=, Slow query, duration: 15.651447s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n-- NB: Subquerys\n$ws_wh =\n(select ws1.ws_order_number ws_order_number,ws1.ws_warehouse_sk wh1,ws2.ws_warehouse_sk wh2\n from web_sales ws1 cross join web_sales ws2\n where ws1.ws_order_number = ws2.ws_order_number\n and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk);\n-- start query 1 in stream 0 using template query95.tpl and seed 2031708268\n select\n count(distinct ws1.ws_order_number) as `order count`\n ,sum(ws_ext_ship_cost) as `total shipping cost`\n ,sum(ws_net_profit) as `total net profit`\nfrom\n web_sales ws1\n cross join date_dim\n cross join customer_address\n cross join web_site\nwhere\n cast(d_date as date) between cast('2002-4-01' as date) and\n (cast('2002-4-01' as date) + DateTime::IntervalFromDays(60))\nand ws1.ws_ship_date_sk = d_date_sk\nand ws1.ws_ship_addr_sk = ca_address_sk\nand ca_state = 'AL'\nand ws1.ws_web_site_sk = web_site_sk\nand web_company_name = 'pri'\nand ws1.ws_order_number in (select ws_order_number\n from $ws_wh)\nand ws1.ws_order_number in (select wr_order_number\n from web_returns cross join $ws_wh ws_wh\n where wr_order_number = ws_wh.ws_order_number)\norder by `order count`\nlimit 100;\n", parameters: 0b >> YdbProxy::ListDirectory [GOOD] >> YdbProxy::DropTopic >> YdbProxy::CopyTable [GOOD] >> YdbProxy::CopyTables >> YdbProxy::CreateTable [GOOD] >> YdbProxy::CreateCdcStream >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeRequest >> KqpNewEngine::LocksSingleShard >> KqpJoinOrder::SortingsByPKWithLookupJoin-RemoveLimitOperator [GOOD] >> FeatureFlagsConfiguratorTest::TestFeatureFlagsUpdates [GOOD] >> KqpNewEngine::MultiOutput [GOOD] >> KqpNewEngine::MultiStatement >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize >> KqpNewEngine::KeyColumnOrder ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> FeatureFlagsConfiguratorTest::TestFeatureFlagsUpdates [GOOD] Test command err: 2025-06-24T21:22:33.081302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:33.081374Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:33.193023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:35.019171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:35.019241Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:35.068318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:36.468273Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:36.468340Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:36.521703Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:38.335211Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:38.335334Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:38.400442Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:40.249381Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:40.249467Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:40.319291Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:54.550450Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:54.550549Z node 15 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:54.649003Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:56.180842Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:56.180929Z node 16 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:56.236802Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:02.419802Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:02.419930Z node 21 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:02.471300Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:04.308570Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:04.308677Z node 22 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:04.372970Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:05.851616Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:05.851717Z node 23 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:05.934771Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:07.360351Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:07.360445Z node 24 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:07.476843Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload [GOOD] >> TBackupTests::BackupUuidColumn[Zstd] >> KqpNewEngine::BlindWriteParameters [GOOD] >> KqpNewEngine::BlindWriteListParameter >> TConsoleTests::TestCreateTenantWrongNameExtSubdomain [GOOD] >> TConsoleTests::TestCreateTenantWrongPool >> KqpNewEngine::OnlineRO_Consistent [GOOD] >> KqpNewEngine::OnlineRO_Inconsistent ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:07.732816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:07.732902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:07.732938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:07.732990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:07.734022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:07.734075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:07.734149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:07.734775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:07.735599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:07.737400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:07.862566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:07.862621Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:07.885432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:07.889934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:07.890120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:07.902042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:07.902321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:07.902981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:07.903353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:07.906478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:07.906687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:07.915988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:07.916113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:07.916259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:07.916310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:07.916416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:07.916578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:07.940310Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:23:08.106941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:08.108835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.109958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:08.110059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:08.111624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:08.111754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:08.121812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.122040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:08.122270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.122351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:08.122398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:08.122431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:08.124689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.124761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:08.124887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:08.127580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.127641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.127690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.127743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:08.132600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:08.134763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:08.134994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:08.137397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.137525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:08.137578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.139173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:08.139248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.139483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:08.139564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:08.141923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:08.141977Z node 1 :FLAT_TX_SCHEMESHARD ... : TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.994173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.994406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.994526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.994618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.994870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.995217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.995553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.995863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.995946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.996116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.996179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.996234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.996559Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:23:09.001324Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:09.001539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:09.002653Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435083, Sender [1:567:2494], Recipient [1:567:2494]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:23:09.002706Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5014: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:23:09.008322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:09.008433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:09.008951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:09.009018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:09.009083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:09.009127Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:09.013838Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274399233, Sender [1:603:2494], Recipient [1:567:2494]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:23:09.013914Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5105: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:23:09.013959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:567:2494] sender: [1:625:2058] recipient: [1:15:2062] 2025-06-24T21:23:09.080741Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:624:2538], Recipient [1:567:2494]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-24T21:23:09.080818Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:23:09.080956Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:23:09.081229Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 244us result status StatusSuccess 2025-06-24T21:23:09.081825Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 1 WriteSpeedInBytesPerSecond: 7 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 7 AccountSize: 17 DataSize: 17 UsedReserveSize: 7 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:09.082566Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271188001, Sender [1:626:2539], Recipient [1:567:2494]: NKikimrPQ.TEvPeriodicTopicStats PathId: 2 Generation: 1 Round: 96 DataSize: 19 UsedReserveSize: 7 2025-06-24T21:23:09.082629Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4989: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-24T21:23:09.082692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 19 UsedReserveSize 7 2025-06-24T21:23:09.082741Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-24T21:23:09.082808Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.000000s, queue# 1 2025-06-24T21:23:09.083059Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:627:2540], Recipient [1:567:2494]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-24T21:23:09.083104Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:23:09.083352Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:23:09.083603Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 227us result status StatusSuccess 2025-06-24T21:23:09.084158Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 1 WriteSpeedInBytesPerSecond: 7 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 7 AccountSize: 17 DataSize: 17 UsedReserveSize: 7 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpNotNullColumns::ReplaceNotNullPk [GOOD] >> KqpNotNullColumns::ReplaceNotNullPkPg >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize [GOOD] >> YdbProxy::DescribeConsumer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPKWithLookupJoin-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 9811, MsgBus: 21532 2025-06-24T21:22:21.457075Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629349742150488:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:21.457250Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038c3/r3tmp/tmpBnIAfR/pdisk_1.dat 2025-06-24T21:22:22.230949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:22.231066Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:22.250250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:22.313658Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:22.315316Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629349742150340:2079] 1750800141399849 != 1750800141399852 TServer::EnableGrpc on GrpcPort 9811, node 1 2025-06-24T21:22:22.463685Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:22.599718Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:22.599736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:22.599742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:22.599838Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21532 TClient is connected to server localhost:21532 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:23.750135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:23.792232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:26.451655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629349742150488:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:26.451777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:26.608191Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629371216987468:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:26.608322Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:26.611255Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629371216987481:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:26.615567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:26.637632Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629371216987483:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:22:26.716254Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629371216987534:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:27.105032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.231968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.317358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.368570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.420154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.656679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.709694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.752565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.801938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.858284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.906560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:27.949566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:28.012188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:29.167657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, subopera ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.711257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038558;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.711904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.712076Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.712575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.717262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.717263Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.717935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.718261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.723077Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.723803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.724109Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.724703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.729249Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.729681Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.729886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.730249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.735550Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.735551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.736175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.739272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.744347Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.745032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038586;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.749490Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.750021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.752416Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038586;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.753859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.755302Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.755994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.760024Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.760679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.761332Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.761867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.770880Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.771415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.778257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.778731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.780650Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.781265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.784352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.785358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:23:02.786752Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.790931Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:02.881101Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx4d5eaajdv1z3rc103f9p", SessionId: ydb://session/3?node_id=1&id=OGNkMzczNDMtYmEwOWE0MGYtZjI1MzUyOGQtMTU1ZjZmMTY=, Slow query, duration: 31.153559s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:03.187057Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:03.187964Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:03.189517Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpMergeCn::TopSortByDesc_Double_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Date_Limit4 >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved [GOOD] >> KqpSort::TopSortExpr [GOOD] >> KqpSort::TopSortExprPk >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunning [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunningExtSubdomain >> YdbProxy::StaticCreds [GOOD] >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeRequest [GOOD] >> TBackupTests::BackupUuidColumn[Zstd] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:09.523948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:09.524037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:09.524079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:09.524117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:09.524170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:09.524210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:09.524269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:09.524363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:09.525118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:09.525465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:09.625614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:09.625679Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:09.662557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:09.671340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:09.671526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:09.715596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:09.715941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:09.716570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:09.716891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:09.724901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:09.725093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:09.726238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:09.726303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:09.726441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:09.726489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:09.726526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:09.726670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.733632Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:23:09.862845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:09.863092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.863360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:09.863428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:09.863681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:09.863771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:09.866181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:09.866422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:09.866699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.866750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:09.866807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:09.866837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:09.869625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.869683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:09.869740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:09.871658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.871710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.871762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:09.871826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:09.882096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:09.884311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:09.884471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:09.885453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:09.885583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:09.885634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:09.885998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:09.886065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:09.886221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:09.886291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:09.889006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:09.889050Z node 1 :FLAT_TX_SCHEMESHARD ... SQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186233409551][Topic3] Discovered subdomain [OwnerId: 72057594046678944, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186233409551 2025-06-24T21:23:10.683592Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186233409547][Topic1] Discovered subdomain [OwnerId: 72057594046678944, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186233409547 2025-06-24T21:23:10.684177Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:10.686330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:23:10.686408Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:10.686506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:23:10.686564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:23:10.686601Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:10.688123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:23:10.688251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:23:10.688438Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877763, Sender [1:1031:2887], Recipient [1:288:2273]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037968897 ClientId: [1:1031:2887] ServerId: [1:1034:2890] } 2025-06-24T21:23:10.688471Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5047: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:23:10.688502Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5843: Client pipe, to tablet: 72057594037968897, from:72057594046678944 is reset TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T21:23:10.688892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T21:23:10.688943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T21:23:10.689450Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:1047:2903], Recipient [1:288:2273]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:23:10.689492Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:23:10.689523Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T21:23:10.689617Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124996, Sender [1:551:2482], Recipient [1:288:2273]: NKikimrScheme.TEvNotifyTxCompletion TxId: 104 2025-06-24T21:23:10.689646Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4964: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-24T21:23:10.689726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T21:23:10.689809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:23:10.689839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:1045:2901] 2025-06-24T21:23:10.690000Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877764, Sender [1:1047:2903], Recipient [1:288:2273]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:23:10.690031Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5049: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-24T21:23:10.690084Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5881: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2025-06-24T21:23:10.690670Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:1048:2904], Recipient [1:288:2273]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-24T21:23:10.690717Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:23:10.690796Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:23:10.691041Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 186us result status StatusSuccess 2025-06-24T21:23:10.691553Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 808 AccountSize: 808 DataSize: 31 UsedReserveSize: 31 } } PQPartitionsInside: 4 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:10.692447Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271188001, Sender [1:1049:2905], Recipient [1:288:2273]: NKikimrPQ.TEvPeriodicTopicStats PathId: 4 Generation: 1 Round: 6 DataSize: 151 UsedReserveSize: 151 2025-06-24T21:23:10.692491Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4989: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-24T21:23:10.692527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 4] DataSize 151 UsedReserveSize 151 2025-06-24T21:23:10.692561Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-24T21:23:10.692969Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:1050:2906], Recipient [1:288:2273]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-24T21:23:10.693004Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:23:10.693095Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:23:10.695329Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 171us result status StatusSuccess 2025-06-24T21:23:10.695810Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 808 AccountSize: 808 DataSize: 182 UsedReserveSize: 182 } } PQPartitionsInside: 4 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> YdbProxy::DropTopic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:07.732331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:07.732435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:07.732493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:07.732534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:07.733901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:07.733973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:07.734110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:07.734865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:07.735967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:07.737583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:07.845681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:07.845757Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:07.891524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:07.896239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:07.896458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:07.920494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:07.920839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:07.921392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:07.921659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:07.923757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:07.923922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:07.924788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:07.924831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:07.924933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:07.924968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:07.925009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:07.925121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:07.930401Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:23:08.107401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:08.109481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.110200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:08.110294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:08.111921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:08.112035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:08.124794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.125053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:08.125337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.125413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:08.125476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:08.125515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:08.132560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.132643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:08.132713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:08.144275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.144346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.144404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.144476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:08.148607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:08.152089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:08.152266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:08.153335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.153476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:08.153534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.153839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:08.153911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.154096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:08.154178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:08.160219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:08.160282Z node 1 :FLAT_TX_SCHEMESHARD ... tionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-24T21:23:10.404186Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186233409546, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-24T21:23:10.404325Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186233409546, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 16975298 UsedReserveSize: 16975298 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-24T21:23:10.404656Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186233409547][Topic1] Send TEvPeriodicTopicStats PathId: 2 Generation: 2 StatsReportRound: 2 DataSize: 16975298 UsedReserveSize: 16975298 2025-06-24T21:23:10.404814Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2025-06-24T21:23:10.405080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 16975298 UsedReserveSize 16975298 2025-06-24T21:23:10.423488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:23:10.435692Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:10.435899Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 227us result status StatusSuccess 2025-06-24T21:23:10.436366Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:10.875448Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186233409547][Topic1] TPersQueueReadBalancer::HandleWakeup 2025-06-24T21:23:10.875554Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186233409547][Topic1] Send TEvPersQueue::TEvStatus TabletId: 72075186233409546 Cookie: 3 2025-06-24T21:23:10.875996Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186233409546, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 16975298 UsedReserveSize: 16975298 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-24T21:23:10.876132Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186233409546, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-24T21:23:10.876231Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186233409546, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-24T21:23:10.876591Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186233409547][Topic1] Send TEvPeriodicTopicStats PathId: 2 Generation: 2 StatsReportRound: 3 DataSize: 16975298 UsedReserveSize: 16975298 2025-06-24T21:23:10.876716Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2025-06-24T21:23:10.877013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 16975298 UsedReserveSize 16975298 2025-06-24T21:23:10.899474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:23:10.911766Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:10.911987Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 241us result status StatusSuccess 2025-06-24T21:23:10.912479Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:10.958217Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:23:10.958472Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 286us result status StatusSuccess 2025-06-24T21:23:10.958961Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DescribeConsumer [GOOD] Test command err: 2025-06-24T21:23:02.515340Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629525144425907:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:02.515397Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0026e9/r3tmp/tmp9eGAcy/pdisk_1.dat 2025-06-24T21:23:03.213073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:03.213179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:03.231351Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629525144425880:2079] 1750800182512367 != 1750800182512370 2025-06-24T21:23:03.254076Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:03.257195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:03.536872Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9717 TServer::EnableGrpc on GrpcPort 10065, node 1 2025-06-24T21:23:03.947881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:03.947915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:03.947928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:03.948171Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9717 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:04.900479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:05.032578Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629538029328371:2294] txid# 281474976710658, issues: { message: "Invalid retention period: specified: 31536000s, min: 1s, max: 2678400s" severity: 1 } 2025-06-24T21:23:07.057524Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629549153665674:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:07.057575Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0026e9/r3tmp/tmpzVqr7Y/pdisk_1.dat 2025-06-24T21:23:07.323079Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:07.347294Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:07.347395Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:07.355412Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29556 TServer::EnableGrpc on GrpcPort 19403, node 2 2025-06-24T21:23:07.658078Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:07.658098Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:07.658103Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:07.658206Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29556 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:08.032179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:08.047496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:08.071526Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpNotNullColumns::InsertNotNullPkPg+useSink [GOOD] >> KqpNotNullColumns::InsertNotNullPkPg-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeRequest [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:08.396166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:08.396272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:08.396313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:08.396350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:08.396411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:08.396442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:08.396514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:08.396607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:08.397376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:08.397754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:08.486924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:08.486996Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:08.514741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:08.519390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:08.519599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:08.529438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:08.529735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:08.530414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.530764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:08.533782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:08.533986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:08.535167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:08.535265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:08.535402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:08.535446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:08.535512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:08.535668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.543223Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:23:08.705998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:08.706243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.706448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:08.706504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:08.706761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:08.706846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:08.720140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.720387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:08.720659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.720723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:08.720778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:08.720816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:08.724645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.724719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:08.724783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:08.729269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.729337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.729392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.729463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:08.733626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:08.737791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:08.737985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:08.739007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.739189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:08.739251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.739575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:08.739627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.739824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:08.739921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:08.742272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:08.742321Z node 1 :FLAT_TX_SCHEMESHARD ... 6233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:11.159450Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186233409547][Topic1] TPersQueueReadBalancer::HandleWakeup 2025-06-24T21:23:11.159561Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186233409547][Topic1] Send TEvPersQueue::TEvStatus TabletId: 72075186233409546 Cookie: 3 2025-06-24T21:23:11.160417Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186233409547][Topic1] Send TEvPeriodicTopicStats PathId: 2 Generation: 2 StatsReportRound: 4 DataSize: 16975298 UsedReserveSize: 0 2025-06-24T21:23:11.160554Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2025-06-24T21:23:11.160788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 16975298 UsedReserveSize 0 2025-06-24T21:23:11.187464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-24T21:23:11.198077Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:11.198335Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 281us result status StatusSuccess 2025-06-24T21:23:11.198890Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 11 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:11.244962Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:23:11.245227Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 312us result status StatusSuccess 2025-06-24T21:23:11.245829Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 11 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:11.246784Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186233409547][Topic1] pipe [1:673:2586] connected; active server actors: 1 2025-06-24T21:23:11.281576Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186233409547][Topic1] BALANCER INIT DONE for Topic1: (0, 72075186233409546) (1, 72075186233409546) (2, 72075186233409546) 2025-06-24T21:23:11.282055Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186233409547][Topic1] Discovered subdomain [OwnerId: 72057594046678944, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186233409547 2025-06-24T21:23:11.292544Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:11.292792Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 273us result status StatusSuccess 2025-06-24T21:23:11.293393Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 11 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:11.293576Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186233409547][Topic1] TEvClientConnected TabletId 72057594046678944, NodeId 1, Generation 3 2025-06-24T21:23:11.294159Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186233409547][Topic1] TEvClientConnected TabletId 72075186233409546, NodeId 1, Generation 2 2025-06-24T21:23:11.336364Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186233409547][Topic1] pipe [1:720:2621] connected; active server actors: 1 >> YdbProxy::CopyTables [GOOD] >> YdbProxy::AlterTopic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::StaticCreds [GOOD] Test command err: 2025-06-24T21:23:02.564276Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629524353099215:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:02.583737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0026d9/r3tmp/tmpETEXv8/pdisk_1.dat 2025-06-24T21:23:03.204390Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629524353099034:2079] 1750800182534332 != 1750800182534335 2025-06-24T21:23:03.215943Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:03.228491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:03.228606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:03.244147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27138 TServer::EnableGrpc on GrpcPort 21902, node 1 2025-06-24T21:23:03.570738Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:03.940181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:03.940211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:03.940220Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:03.940363Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27138 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:04.875890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:04.911996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:05.006348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:23:05.024535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) 2025-06-24T21:23:05.032777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-24T21:23:05.051254Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629537238001562:2326] txid# 281474976710660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-24T21:23:07.025221Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629546343232112:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:07.025292Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0026d9/r3tmp/tmpWkxkcT/pdisk_1.dat 2025-06-24T21:23:07.306377Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:07.306456Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:07.310438Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:07.313956Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:07.321671Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629546343232059:2079] 1750800187023347 != 1750800187023350 TClient is connected to server localhost:10731 TServer::EnableGrpc on GrpcPort 65466, node 2 2025-06-24T21:23:07.703937Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:07.703966Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:07.703973Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:07.704104Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10731 2025-06-24T21:23:08.059427Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:08.189580Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:08.198333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:08.252895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750800188242 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user1" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 1 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 ... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750800188242 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user1" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 ... (TRUNCATED) >> YdbProxy::CreateCdcStream [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DropTopic [GOOD] Test command err: 2025-06-24T21:23:02.532536Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629527609583165:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:02.532596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002724/r3tmp/tmp53kx6S/pdisk_1.dat 2025-06-24T21:23:03.309882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:03.309995Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:03.316896Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629527609583143:2079] 1750800182530702 != 1750800182530705 2025-06-24T21:23:03.325293Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:03.326695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:03.575294Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16735 TServer::EnableGrpc on GrpcPort 7140, node 1 2025-06-24T21:23:03.940195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:03.940228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:03.940241Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:03.940415Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16735 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:04.875251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:04.911293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002724/r3tmp/tmpKSdLWR/pdisk_1.dat 2025-06-24T21:23:07.515135Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:23:07.561997Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:07.563969Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629545453252934:2079] 1750800187180567 != 1750800187180570 2025-06-24T21:23:07.569573Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:07.569651Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:07.571564Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28449 TServer::EnableGrpc on GrpcPort 64777, node 2 2025-06-24T21:23:07.979563Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:07.979590Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:07.979598Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:07.979731Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:08.227292Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28449 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:08.494296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:08.693586Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2025-06-24T21:23:08.716233Z node 2 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-24T21:23:08.716304Z node 2 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-24T21:23:08.716344Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-24T21:23:08.716375Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-24T21:23:08.736215Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629549748220996:2400] txid# 281474976710660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } >> KqpNewEngine::DeleteON [GOOD] >> KqpNewEngine::DeleteWithInputMultiConsumption+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::BackupUuidColumn[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:10.097067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:10.097173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:10.097219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:10.097257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:10.097320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:10.097367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:10.097428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:10.097525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:10.098310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:10.098697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:10.200678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:10.200749Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:10.219553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:10.219720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:10.219925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:10.228743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:10.229388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:10.230126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:10.230416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:10.233955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:10.234168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:10.235532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:10.235600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:10.235850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:10.235903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:10.235954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:10.236039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:10.243277Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:10.412767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:10.414186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:10.414445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:10.414500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:10.414732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:10.414819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:10.425634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:10.425875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:10.426105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:10.426226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:10.426281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:10.426336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:10.430245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:10.430390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:10.430448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:10.432882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:10.432951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:10.433036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:10.433098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:10.437159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:10.440335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:10.440583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:10.441689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:10.441870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:10.441930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:10.442242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:10.442297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:10.442489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:10.442571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:10.445393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:10.445450Z node 1 :FLAT_TX_SCHEMESHARD ... hard: 72057594046678944 2025-06-24T21:23:11.288415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-24T21:23:11.288559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 129 2025-06-24T21:23:11.288709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:23:11.313651Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:416:2385], attempt# 0 2025-06-24T21:23:11.438145Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:416:2385], sender# [1:415:2384] REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:13378 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 127841FE-99BA-4E7B-BE32-BBB8CDD65BD6 amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-24T21:23:11.454493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:11.454562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:23:11.454897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:11.454958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:23:11.455522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:11.455603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:11.455995Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:416:2385], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } 2025-06-24T21:23:11.458229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:23:11.458384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:23:11.458465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:23:11.458524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:23:11.458573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:23:11.458700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:13378 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 284A3004-EF49-4C79-9EB6-B43B267E4AEA amz-sdk-request: attempt=1 content-length: 357 content-md5: IxJB3qM/y2xlsv8qcwTF7g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-24T21:23:11.471722Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:416:2385], result# PutObjectResult { ETag: 231241dea33fcb6c65b2ff2a7304c5ee } 2025-06-24T21:23:11.472113Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:415:2384] 2025-06-24T21:23:11.472367Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:416:2385], sender# [1:415:2384], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-06-24T21:23:11.474757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:13378 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: DC6E9E88-8472-4655-A9A9-4500A95C874D amz-sdk-request: attempt=1 content-length: 40 content-md5: LXbLDYru8NmFsYXNSXjnpQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 40 2025-06-24T21:23:11.479302Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:416:2385], result# PutObjectResult { ETag: 2d76cb0d8aeef0d985b185cd4978e7a5 } 2025-06-24T21:23:11.479396Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:416:2385], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-24T21:23:11.479600Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:415:2384], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-24T21:23:11.492500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-24T21:23:11.492592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:23:11.492835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-24T21:23:11.492963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-24T21:23:11.493060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:11.493121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:11.493168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:23:11.493227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:23:11.493425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:11.496700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:11.497163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:11.497226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:23:11.497365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:23:11.497415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:11.497464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:23:11.497502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:11.497547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:23:11.497652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 102 2025-06-24T21:23:11.497716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:11.497776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:23:11.497821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:23:11.497969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:23:11.500659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:23:11.500721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:401:2371] TestWaitNotification: OK eventTxId 102 |96.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TConsoleTests::TestCreateTenantWrongPool [GOOD] >> TConsoleTests::TestCreateTenantWrongPoolExtSubdomain |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::CreateCdcStream [GOOD] Test command err: 2025-06-24T21:23:02.552227Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629526128701895:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:02.552892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0026f6/r3tmp/tmpmYrGff/pdisk_1.dat 2025-06-24T21:23:03.244703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:03.244790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:03.253001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:03.264817Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:03.563369Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26012 TServer::EnableGrpc on GrpcPort 9040, node 1 2025-06-24T21:23:03.940266Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:03.940293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:03.940302Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:03.940429Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26012 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:04.872932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:04.912099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:06.782252Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629543308571604:2302] txid# 281474976710658, issues: { message: "Column key has wrong key type Float" severity: 1 } 2025-06-24T21:23:06.794853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:06.947754Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629543308571692:2362] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/table\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:07.837791Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629546043731253:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:07.897982Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0026f6/r3tmp/tmpTrbrTW/pdisk_1.dat 2025-06-24T21:23:08.040677Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:08.040743Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629546043731130:2079] 1750800187798672 != 1750800187798675 2025-06-24T21:23:08.061328Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:08.061413Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:08.064627Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63322 TServer::EnableGrpc on GrpcPort 8221, node 2 2025-06-24T21:23:08.530217Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:08.530249Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:08.530257Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:08.530405Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:08.876140Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63322 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:09.132569Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:09.155364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:11.854919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:12.040289Z node 2 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][2:7519629567518568447:2307] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:4:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-24T21:23:12.123214Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629567518568504:2450] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/table/updates\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeCdcStream, state: EPathStateNoChanges)" severity: 1 } >> KqpJoinOrder::TPCDS96+ColumnStore [GOOD] >> KqpNewEngine::ReadAfterWrite >> TBackupTests::ShouldSucceedOnLargeData[Zstd] >> KqpNotNullColumns::ReplaceNotNull >> KqpNotNullColumns::ReplaceNotNullPkPg [GOOD] >> KqpNotNullColumns::SelectNotNullColumns >> KqpNewEngine::LocksSingleShard [GOOD] >> KqpNewEngine::LocksMultiShardOk >> KqpNewEngine::MultiStatement [GOOD] >> KqpNewEngine::MultiStatementMixPure >> KqpNewEngine::KeyColumnOrder [GOOD] >> KqpNewEngine::KeyColumnOrder2 >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart [GOOD] >> YdbProxy::AlterTopic [GOOD] >> KqpNewEngine::BlindWriteListParameter [GOOD] >> KqpNewEngine::BrokenLocksAtROTx >> TBackupTests::BackupUuidColumn[Raw] >> TConsoleTests::TestCreateTenantWrongPoolExtSubdomain [GOOD] >> TConsoleTests::TestCreateTenantAlreadyExists ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS96+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 64251, MsgBus: 62395 2025-06-24T21:21:15.323812Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629065174953293:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:15.324147Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003950/r3tmp/tmpUyNmaq/pdisk_1.dat 2025-06-24T21:21:16.064259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:16.064366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:16.074504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:16.138093Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:16.139302Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629065174953108:2079] 1750800075265048 != 1750800075265051 TServer::EnableGrpc on GrpcPort 64251, node 1 2025-06-24T21:21:16.303651Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:16.448411Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:16.448430Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:16.448437Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:16.448555Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62395 TClient is connected to server localhost:62395 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:17.776693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:17.827887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:21:20.305864Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629065174953293:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:20.305920Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:21.288179Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629090944757537:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:21.288288Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:21.291240Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629090944757549:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:21.295401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:21.319338Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629090944757551:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:21:21.392591Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629090944757604:2342] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:21.857758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:22.161322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:22.161558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:22.161808Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:22.161926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:22.162109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:22.162258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:22.162261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629095239725146:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:22.162301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629095239725146:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:22.162369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:22.162468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629095239725146:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:22.162487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:22.162614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629095239725146:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:22.162632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:22.162732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629095239725146:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:22.162743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:22.162880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629095239725152:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:22.162885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629095239725146:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:22.162998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629095239725146:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:22.163128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629095239725146:2315];tabl ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.645749Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.646252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.649807Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039328;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.650690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039242;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.651595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.652202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039254;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.655838Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039242;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.656430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.657202Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039254;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.657731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039222;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.661588Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.662253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039314;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.662536Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039222;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.663119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.667381Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039314;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.668015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039286;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.668169Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.668652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039238;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.672821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039286;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.673428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039250;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.673682Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039238;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.674176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039252;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.677857Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039250;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.678398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.760230Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.760749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039270;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.765955Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039252;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.766463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.770223Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039270;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.775512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039338;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.776199Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.776694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039262;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.782335Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039262;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.786318Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039338;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.786822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.787706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.797250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.797750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039290;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.799741Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.810863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039290;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:53.892986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:22:53.915392Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:22:54.038011Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3t653018ze4scqdb1vp5", SessionId: ydb://session/3?node_id=1&id=ZjE3ZTI3YjYtMWYzNGY4ZGMtOWI4ZTU1ZTAtODZjMzkwMg==, Slow query, duration: 41.742391s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:54.655281Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:22:54.655326Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:22:54.656426Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:11.453384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:11.453474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:11.453512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:11.453547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:11.453599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:11.453632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:11.453679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:11.453772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:11.454488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:11.454823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:11.541293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:11.541370Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:11.558965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:11.559437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:11.559622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:11.567758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:11.567992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:11.568656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:11.568968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:11.572000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:11.572207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:11.573383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:11.573442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:11.573663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:11.573711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:11.573773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:11.573862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:11.581585Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:11.743659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:11.743915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:11.744126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:11.744168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:11.744461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:11.744552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:11.747120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:11.747390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:11.747611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:11.747664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:11.747720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:11.747754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:11.752395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:11.752478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:11.752560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:11.754989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:11.755045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:11.755092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:11.755171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:11.759063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:11.761514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:11.761780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:11.762838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:11.762990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:11.763047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:11.763427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:11.763504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:11.763689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:11.763807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:11.766063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:11.766104Z node 1 :FLAT_TX_SCHEMESHARD D ... shard: 72057594046678944 2025-06-24T21:23:16.605577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:16.605750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-24T21:23:16.605835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:23:16.606069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 2, at schemeshard: 72057594046678944 2025-06-24T21:23:16.606206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.606293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:16.606330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:23:16.606436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:16.607176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:16.607529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:23:16.607889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.608001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.608390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.608467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.608690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.608771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.608867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.609042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.609142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.609286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.609499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.609596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.609718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.609763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.609824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.622482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:16.625111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:16.625191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:16.625928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:16.625990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:16.626038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:16.628054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:759:2710] sender: [1:813:2058] recipient: [1:15:2062] 2025-06-24T21:23:16.681456Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:23:16.681751Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeTable" took 339us result status StatusSuccess 2025-06-24T21:23:16.682244Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeTable" PathDescription { Self { Name: "SomeTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "SomeTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 Family: 1 FamilyName: "alternative" NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 4140 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { PoolsUsage { PoolKind: "pool-kind-1" DataSize: 1020 IndexSize: 0 } PoolsUsage { PoolKind: "pool-kind-2" DataSize: 3120 IndexSize: 0 } } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 82488 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 4140 DataSize: 4140 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 1020 DataSize: 1020 IndexSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-2" TotalSize: 3120 DataSize: 3120 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:16.689124Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:23:16.689280Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 178us result status StatusSuccess 2025-06-24T21:23:16.689726Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SomeTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 4140 DataSize: 4140 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 1020 DataSize: 1020 IndexSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-2" TotalSize: 3120 DataSize: 3120 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> KqpNewEngine::OnlineRO_Inconsistent [GOOD] >> KqpNewEngine::Nondeterministic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::AlterTopic [GOOD] Test command err: 2025-06-24T21:23:02.515306Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629526556113284:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:02.515363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0026c3/r3tmp/tmpiSMY2Y/pdisk_1.dat 2025-06-24T21:23:03.219138Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:03.235330Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629526556113257:2079] 1750800182509813 != 1750800182509816 2025-06-24T21:23:03.254030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:03.254141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:03.268634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:03.539280Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14263 TServer::EnableGrpc on GrpcPort 22283, node 1 2025-06-24T21:23:03.974483Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:03.974510Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:03.974517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:03.974627Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14263 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:04.902598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:06.580048Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629543735983086:2302] txid# 281474976710658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-24T21:23:06.604854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:07.698317Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629546326310962:2167];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0026c3/r3tmp/tmpxzvsLV/pdisk_1.dat 2025-06-24T21:23:07.725560Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:23:07.850446Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:07.854060Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629546326310830:2079] 1750800187678384 != 1750800187678387 2025-06-24T21:23:07.859287Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:07.859362Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:07.898532Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2962 TServer::EnableGrpc on GrpcPort 22776, node 2 2025-06-24T21:23:08.143730Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:08.143752Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:08.143758Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:08.143877Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2962 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:08.523678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:08.531413Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:08.770071Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:11.409962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:11.500371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:12.591017Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629569014160797:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:12.591071Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0026c3/r3tmp/tmp7yIwIM/pdisk_1.dat 2025-06-24T21:23:12.843564Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629569014160774:2079] 1750800192573967 != 1750800192573970 2025-06-24T21:23:12.850599Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:12.859249Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:12.859328Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:12.864097Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24109 TServer::EnableGrpc on GrpcPort 27560, node 3 2025-06-24T21:23:13.170159Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:13.170182Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:13.170191Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:13.170310Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24109 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:13.612974Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:13.635397Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:13.860553Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:23:13.893249Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519629573309128841:2396] txid# 281474976715660, issues: { message: "Invalid retention period: specified: 31536000s, min: 1s, max: 2678400s" severity: 1 } >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled [GOOD] >> KqpNotNullColumns::InsertNotNullPkPg-useSink [GOOD] >> KqpNotNullColumns::InsertNotNullPg+useSink >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunningExtSubdomain [GOOD] >> TConsoleTests::TestAlterUnknownTenant >> KikimrIcGateway::TestLoadTableMetadata >> KikimrIcGateway::TestLoadExternalTable >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata |96.6%| [TA] $(B)/ydb/core/tx/replication/ydb_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |96.6%| [TA] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBackupTests::BackupUuidColumn[Raw] [GOOD] >> KikimrIcGateway::TestListPath >> KikimrProvider::TestFillAuthPropertiesNone [GOOD] >> KikimrProvider::TestFillAuthPropertiesServiceAccount [GOOD] >> KikimrProvider::TestFillAuthPropertiesMdbBasic [GOOD] >> KqpMergeCn::TopSortBy_Date_Limit4 [GOOD] >> KqpMergeCn::TopSortByDesc_Datetime_Limit3 >> KqpSort::TopSortExprPk [GOOD] >> KqpSort::TopSortTableExpr ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:07.732316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:07.732444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:07.732489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:07.732548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:07.733905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:07.733980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:07.734059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:07.734742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:07.735599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:07.737570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:07.854065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:07.854131Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:07.871552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:07.872020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:07.872295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:07.882941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:07.885864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:07.889086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:07.890883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:07.898773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:07.901357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:07.914935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:07.915040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:07.918548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:07.918627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:07.918707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:07.918815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:07.933048Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:08.110555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:08.110833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.111024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:08.111069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:08.113336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:08.113497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:08.120537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.121676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:08.121960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.122095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:08.122145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:08.122189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:08.126331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.126402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:08.126454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:08.129668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.129725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.129771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.129820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:08.142800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:08.148376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:08.148608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:08.149647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.149814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:08.149873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.150233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:08.150290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.150502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:08.150602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:08.156415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:08.156496Z node 1 :FLAT_TX_SCHEMESHARD ... _SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:17.801083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.801294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:23:17.801626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.801744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.802171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.802262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.802524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.802635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.802740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.802938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.803021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.803369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.803627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.803726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.803920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.803985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.804037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.804293Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:23:17.813501Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:17.813700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:17.816052Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435083, Sender [1:1013:2954], Recipient [1:1013:2954]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:23:17.816118Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5014: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:23:17.817709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:17.817804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:17.818032Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:1013:2954], Recipient [1:1013:2954]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:23:17.818077Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:23:17.818258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:17.818310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:17.818358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:17.818413Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:17.818792Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274399233, Sender [1:1049:2954], Recipient [1:1013:2954]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:23:17.818833Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5105: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:23:17.818872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1013:2954] sender: [1:1067:2058] recipient: [1:15:2062] 2025-06-24T21:23:17.865265Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:1066:2996], Recipient [1:1013:2954]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-24T21:23:17.865362Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:23:17.865619Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:23:17.865982Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 413us result status StatusSuccess 2025-06-24T21:23:17.866898Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 MaxPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 13984 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 82488 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 13984 DataSize: 13984 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrProvider::TestFillAuthPropertiesMdbBasic [GOOD] >> KqpJoinOrder::TPCDS61-ColumnStore [GOOD] >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt+ColumnStore [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> KqpNotNullColumns::UpsertNotNullPkPg >> ReadAttributesUtils::AttributesGatheringEmpry [GOOD] >> ReadAttributesUtils::AttributesGatheringFilter [GOOD] >> ReadAttributesUtils::AttributesGatheringRecursive [GOOD] >> KqpSort::ReverseOptimized ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::BackupUuidColumn[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:17.737934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:17.738043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:17.738088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:17.738122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:17.738193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:17.738237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:17.738312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:17.738405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:17.739278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:17.739557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:17.825064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:17.825143Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:17.848141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:17.848541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:17.848682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:17.856084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:17.856270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:17.856844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:17.857205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:17.860085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:17.860306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:17.861420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:17.861473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:17.861660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:17.861711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:17.861750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:17.861853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:17.868153Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:18.013871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:18.014114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:18.014379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:18.014432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:18.014687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:18.014758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:18.021538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:18.021789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:18.021986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:18.022078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:18.022132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:18.022181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:18.030225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:18.030383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:18.030433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:18.032821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:18.032880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:18.032965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:18.033029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:18.036657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:18.038848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:18.039032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:18.040165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:18.040314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:18.040387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:18.040686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:18.040749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:18.040923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:18.040993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:18.043383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:18.043430Z node 1 :FLAT_TX_SCHEMESHARD ... schemeshard: 72057594046678944 2025-06-24T21:23:18.426334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-24T21:23:18.426456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 129 2025-06-24T21:23:18.426578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:23:18.440574Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:416:2385], attempt# 0 2025-06-24T21:23:18.482408Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:416:2385], sender# [1:415:2384] REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:19147 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E68BB41A-AEFD-44FE-9517-5530D230FF7F amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-24T21:23:18.493434Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:416:2385], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:19147 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C9B789BF-20E3-41A7-8BFC-28284DA1D35B amz-sdk-request: attempt=1 content-length: 357 content-md5: IxJB3qM/y2xlsv8qcwTF7g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-24T21:23:18.500048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:18.500111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:23:18.500353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:18.500389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:23:18.500785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:18.500837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:18.501282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:23:18.501381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:23:18.501427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:23:18.501470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:23:18.501514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:23:18.501577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T21:23:18.503538Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:416:2385], result# PutObjectResult { ETag: 231241dea33fcb6c65b2ff2a7304c5ee } 2025-06-24T21:23:18.503761Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:415:2384] 2025-06-24T21:23:18.503859Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:416:2385], sender# [1:415:2384], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-06-24T21:23:18.505733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:19147 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 09D5F8DD-9212-44DF-AC47-6DBD0B81DF7B amz-sdk-request: attempt=1 content-length: 39 content-md5: GLX1nc5/cKhlAfxBHlykQA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 39 2025-06-24T21:23:18.508124Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:416:2385], result# PutObjectResult { ETag: 18b5f59dce7f70a86501fc411e5ca440 } 2025-06-24T21:23:18.508190Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:416:2385], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-24T21:23:18.508369Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:415:2384], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-24T21:23:18.516875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-24T21:23:18.516978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:23:18.517164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-24T21:23:18.517258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-24T21:23:18.517314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:18.517361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:18.517401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:23:18.517435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:23:18.517571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:18.519761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:18.520150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:18.520219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:23:18.520332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:23:18.520375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:18.520404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:23:18.520428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:18.520462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:23:18.520524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 102 2025-06-24T21:23:18.520562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:18.520593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:23:18.520666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:23:18.520772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:23:18.522603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:23:18.522641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:401:2371] TestWaitNotification: OK eventTxId 102 >> KqpNotNullColumns::ReplaceNotNull [GOOD] >> KqpNotNullColumns::JoinLeftTableWithNotNullPk+StreamLookup |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> ReadAttributesUtils::AttributesGatheringRecursive [GOOD] >> KqpNotNullColumns::SelectNotNullColumns [GOOD] >> KqpNotNullColumns::ReplaceNotNullPg >> ReadAttributesUtils::ReplaceAttributesEmpty [GOOD] >> ReadAttributesUtils::ReplaceAttributesFilter [GOOD] >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink-UseDataQuery |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS61-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 3408, MsgBus: 13564 2025-06-24T21:21:55.581085Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629236527408857:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:55.581463Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003910/r3tmp/tmpdFmG8Y/pdisk_1.dat 2025-06-24T21:21:56.228282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:56.228376Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:56.248493Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:56.279322Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629236527408672:2079] 1750800115517722 != 1750800115517725 2025-06-24T21:21:56.315176Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3408, node 1 2025-06-24T21:21:56.587223Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:56.643781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:56.643803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:56.643811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:56.643924Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13564 TClient is connected to server localhost:13564 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:57.762398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:57.797365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:00.499907Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629258002245798:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.500046Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.503250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629258002245810:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:00.515027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:00.529702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:22:00.535361Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629258002245812:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:22:00.568998Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629236527408857:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:00.569077Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:00.627081Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629258002245863:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:01.058595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.221416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.282411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.325180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.366878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.536422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.623639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.672454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.708680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.747402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.835848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.903620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:01.952350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operatio ... 474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.232620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.240262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.240664Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.240990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.241127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.248120Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.248849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038504;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.252253Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.252814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.255347Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038504;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.255994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.261768Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.265120Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.266080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.267957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.271942Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.272612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.272729Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.273270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.278128Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.278630Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.278858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038464;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.279284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.285063Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.286832Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038464;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.287607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038455;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.289944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038468;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.298856Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038455;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.300264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.303302Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038468;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.304133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-24T21:22:46.309613Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.311309Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:22:46.439867Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3kkmama7p70mq36ncxmz", SessionId: ydb://session/3?node_id=1&id=MTAyZTJjMzktNDVkM2E5MGMtZWU4NDQ4ZjYtZGFjZmQ2ODQ=, Slow query, duration: 40.882885s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:22:47.215522Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:47.216187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:22:47.219426Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519629404031160362:5897];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-24T21:22:47.219937Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:15.295121Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx59mw6xfvyfdwveanhv06", SessionId: ydb://session/3?node_id=1&id=MTAyZTJjMzktNDVkM2E5MGMtZWU4NDQ4ZjYtZGFjZmQ2ODQ=, Slow query, duration: 14.399254s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query61.tpl and seed 1930872976\nselect promotions,total,cast(promotions as float)/cast(total as float)*100\nfrom\n (select sum(ss_ext_sales_price) promotions\n from store_sales\n cross join store\n cross join promotion\n cross join date_dim\n cross join customer\n cross join customer_address\n cross join item\n where ss_sold_date_sk = d_date_sk\n and ss_store_sk = s_store_sk\n and ss_promo_sk = p_promo_sk\n and ss_customer_sk= c_customer_sk\n and ca_address_sk = c_current_addr_sk\n and ss_item_sk = i_item_sk\n and ca_gmt_offset = -6\n and i_category = 'Sports'\n and (p_channel_dmail = 'Y' or p_channel_email = 'Y' or p_channel_tv = 'Y')\n and s_gmt_offset = -6\n and d_year = 2001\n and d_moy = 12) promotional_sales cross join\n (select sum(ss_ext_sales_price) total\n from store_sales\n cross join store\n cross join date_dim\n cross join customer\n cross join customer_address\n cross join item\n where ss_sold_date_sk = d_date_sk\n and ss_store_sk = s_store_sk\n and ss_customer_sk= c_customer_sk\n and ca_address_sk = c_current_addr_sk\n and ss_item_sk = i_item_sk\n and ca_gmt_offset = -6\n and i_category = 'Sports'\n and s_gmt_offset = -6\n and d_year = 2001\n and d_moy = 12) all_sales\norder by promotions, total\nlimit 100;\n", parameters: 0b >> KqpNewEngine::ReadAfterWrite [GOOD] >> KqpNewEngine::Replace |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> ReadAttributesUtils::ReplaceAttributesFilter [GOOD] >> TConsoleTests::TestAlterUnknownTenant [GOOD] >> TConsoleTests::TestAlterUnknownTenantExtSubdomain >> KqpJoinOrder::TestJoinHint1+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:09.346243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:09.346341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:09.346386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:09.346426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:09.346474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:09.346513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:09.346568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:09.346672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:09.347459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:09.347784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:09.434073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:09.434144Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:09.451526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:09.451999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:09.452178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:09.463394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:09.463644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:09.464366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:09.464736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:09.473201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:09.473399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:09.474661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:09.474725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:09.474969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:09.475018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:09.475065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:09.475202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.492063Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:09.652689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:09.652930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.653152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:09.653204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:09.653543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:09.653643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:09.664096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:09.664316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:09.664576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.664636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:09.664687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:09.664725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:09.669393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.669469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:09.669525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:09.671904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.671966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:09.672011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:09.672063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:09.675846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:09.678032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:09.678250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:09.679312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:09.679474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:09.679533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:09.679857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:09.679917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:09.680080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:09.680161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:09.682245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:09.682308Z node 1 :FLAT_TX_SCHEMESHARD ... chemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 2, at schemeshard: 72057594046678944 2025-06-24T21:23:21.085888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.086160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 6, at schemeshard: 72057594046678944 2025-06-24T21:23:21.086530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.086684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.087072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.087187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.087431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.087530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.087626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.087849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.088006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.088250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.088520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.088607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.088740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.088805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.088879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:21.089165Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:23:21.101732Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:21.101945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:21.103548Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435083, Sender [1:1131:3060], Recipient [1:1131:3060]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:23:21.103614Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5014: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:23:21.105052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:21.105137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:21.106241Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:1131:3060], Recipient [1:1131:3060]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:23:21.106304Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:23:21.106496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:21.106558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:21.106717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:21.106759Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:21.107594Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274399233, Sender [1:1167:3060], Recipient [1:1131:3060]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:23:21.107661Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5105: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:23:21.107716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1131:3060] sender: [1:1187:2058] recipient: [1:15:2062] 2025-06-24T21:23:21.158790Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:1186:3104], Recipient [1:1131:3060]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-24T21:23:21.158890Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:23:21.159035Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:23:21.159439Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 356us result status StatusSuccess 2025-06-24T21:23:21.160470Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 1 MinPartitionsCount: 20 MaxPartitionsCount: 20 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 13984 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 18941 Memory: 141368 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 13984 DataSize: 13984 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TConsoleTests::TestCreateTenantAlreadyExists [GOOD] >> TConsoleTests::TestCreateTenantAlreadyExistsExtSubdomain >> KqpNewEngine::LocksMultiShardOk [GOOD] >> KqpNewEngine::LocksNoMutations >> KqpNewEngine::KeyColumnOrder2 [GOOD] >> KqpNewEngine::LocksMultiShard >> KqpNewEngine::DeleteWithInputMultiConsumption+UseSink [GOOD] >> KqpNewEngine::DeleteWithInputMultiConsumption-UseSink >> KqpNewEngine::InShardsWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 26894, MsgBus: 18187 2025-06-24T21:21:22.434564Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629095256924671:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:22.434832Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003948/r3tmp/tmpRSlnyv/pdisk_1.dat 2025-06-24T21:21:23.200669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:23.200765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:23.212306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:23.229128Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:23.247268Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629095256924577:2079] 1750800082398862 != 1750800082398865 TServer::EnableGrpc on GrpcPort 26894, node 1 2025-06-24T21:21:23.467712Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:23.555619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:23.555643Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:23.555650Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:23.555755Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18187 TClient is connected to server localhost:18187 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:24.403419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:24.463501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:27.327010Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629116731761699:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:27.327162Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:27.327727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629116731761711:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:27.339442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:27.372326Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629116731761713:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:27.423783Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629095256924671:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:27.423848Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:27.435790Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629116731761764:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:27.934097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:28.250873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629121026729255:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:28.251058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629121026729255:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:28.252306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629121026729254:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:28.252361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629121026729254:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:28.252636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629121026729254:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:28.252776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629121026729254:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:28.252882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629121026729254:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:28.252994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629121026729254:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:28.253121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629121026729254:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:28.253220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629121026729254:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:28.253328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629121026729254:2311];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:28.253939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629121026729255:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:28.254074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629121026729255:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:28.254182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629121026729255:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:28.254276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629121026729255:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:28.254374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629121026729255:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:28.254498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629121026729255:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:28.255099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629121026729255:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:28.255252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629121026729255:2312];tablet_id=720 ... 21650Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:03.922368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:03.928403Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:03.929142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:03.935334Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:03.936128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:03.946573Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:03.947258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:03.954401Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:03.955516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:03.961921Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:03.962713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:03.969111Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:03.969807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:03.975958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:03.980360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:03.983807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:03.989176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:03.990605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:03.998000Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:03.999313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:04.009793Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.010374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:04.014110Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.014672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:04.015778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.016403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:04.020727Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.021434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:04.030493Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.030834Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.031205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:04.031668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:04.038097Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.040001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:04.045687Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.046362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:04.048296Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.049048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:04.063211Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.067381Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:04.246008Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx43cz8fgy9shh6x028z24", SessionId: ydb://session/3?node_id=1&id=NWZjYzRhYWEtZTY2YTgxMTgtYjEyNzRjNmMtNTYwNmZjNzE=, Slow query, duration: 42.517996s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:04.539932Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:04.540528Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:04.541208Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519629365839900888:7704];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:23:04.541641Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] >> KqpNotNullColumns::InsertNotNullPg+useSink [GOOD] >> KqpNotNullColumns::InsertNotNullPg-useSink >> KqpNewEngine::MultiStatementMixPure [GOOD] >> KqpNewEngine::MultiUsagePrecompute >> OlapEstimationRowsCorrectness::TPCH5 [GOOD] >> KqpNewEngine::BrokenLocksAtROTx [GOOD] >> KqpNewEngine::BrokenLocksAtROTxSharded ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinHint1+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 7815, MsgBus: 23037 2025-06-24T21:21:44.370379Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629192432583857:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:44.410625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003933/r3tmp/tmpnZUEG7/pdisk_1.dat 2025-06-24T21:21:44.974935Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629192432583632:2079] 1750800104308135 != 1750800104308138 2025-06-24T21:21:44.979301Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:44.995348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:44.995461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:45.000859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7815, node 1 2025-06-24T21:21:45.247824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:45.247847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:45.247881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:45.247999Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:45.367346Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23037 TClient is connected to server localhost:23037 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:46.150335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:46.164320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:48.270274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629209612453456:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:48.270415Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:48.270672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629209612453468:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:48.274585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:48.284739Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629209612453470:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:48.382919Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629209612453521:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:48.792956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:49.048723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:49.049012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:49.049263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:49.049384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:49.049505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:49.049610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:49.049705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:49.049802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:49.049896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:49.050024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:49.050146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519629209612453770:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:49.053695Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519629209612453769:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:49.053764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519629209612453769:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:49.053947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519629209612453769:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:49.054043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519629209612453769:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:49.054124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519629209612453769:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:49.054241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519629209612453769:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:49.054333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519629209612453769:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:49.054441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519629209612453769:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:49.054547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519629209612453769:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.025433Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.026241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.028749Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.029313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.032613Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.033350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.035648Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.036378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.040329Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.046775Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.047681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.051649Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.052881Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.053602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.057750Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.058362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.059486Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.060201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.064655Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.065558Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.065810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.066208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.071729Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.071729Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.072470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.072861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.078703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.078725Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.079465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.079476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.086094Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.086094Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.086871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.087390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.093657Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.093657Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.094509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.094748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.101226Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.101225Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.102957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:12.110357Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:12.246470Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx4nbmc94c06wabp9tb96a", SessionId: ydb://session/3?node_id=1&id=OGQ4OTg5YTMtODgyZWE5Ny00YWJiNWM0Ni0zNGYyMzBiNw==, Slow query, duration: 32.129402s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:12.518523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:12.518592Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:12.519366Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KikimrIcGateway::TestLoadTableMetadata [GOOD] >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata >> KikimrIcGateway::TestListPath [GOOD] >> KikimrIcGateway::TestDropTable >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] >> KqpNotNullColumns::UpsertNotNullPkPg [GOOD] >> KqpRanges::DateKeyPredicate >> KqpNewEngine::Nondeterministic [GOOD] >> KqpNewEngine::OrderedScalarContext >> KikimrIcGateway::TestLoadExternalTable [GOOD] >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata >> TConsoleTests::TestAlterUnknownTenantExtSubdomain [GOOD] >> TConsoleTests::TestAlterBorrowedStorage >> KqpNotNullColumns::ReplaceNotNullPg [GOOD] >> KqpNotNullColumns::SecondaryKeyWithNotNullColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:24.174613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:24.174722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:24.174911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:24.174937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:24.174990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:24.175020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:24.175098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:24.175199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:24.175912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:24.176181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:24.248819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:24.248878Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:24.274182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:24.274667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:24.274890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:24.307770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:24.308059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:24.308889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:24.309272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:24.313200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:24.313458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:24.314793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:24.314868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:24.315117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:24.315199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:24.315260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:24.315353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:24.326855Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:24.480012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:24.480272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:24.480552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:24.480605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:24.480847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:24.480926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:24.484003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:24.484235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:24.484478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:24.484566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:24.484621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:24.484686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:24.488604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:24.488707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:24.488755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:24.491086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:24.491178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:24.491271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:24.491332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:24.495359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:24.498570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:24.498830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:24.500023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:24.500204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:24.500268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:24.500608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:24.500674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:24.500937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:24.501026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:24.503822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:24.503882Z node 1 :FLAT_TX_SCHEMESHARD ... , operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:25.028159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:25.029028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:23:25.029134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:23:25.029178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:23:25.029236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:23:25.029284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-24T21:23:25.029384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:12928 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 6C0F7954-951C-4AFF-9C86-E0B1686AB692 amz-sdk-request: attempt=1 content-length: 638 content-md5: Myp3UygaBNGp6+7AMgyRnQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 638 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:23:25.033922Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:487:2443], result# PutObjectResult { ETag: 332a7753281a04d1a9ebeec0320c919d } 2025-06-24T21:23:25.034085Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:486:2440] 2025-06-24T21:23:25.034241Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:487:2443], sender# [1:486:2440], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-06-24T21:23:25.036709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:12928 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: D00204A9-B2B4-4F19-942E-AB6B4E837FED amz-sdk-request: attempt=1 content-length: 20 content-md5: 2qFn9G0TW8wfvJ9C+A5Jbw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 20 2025-06-24T21:23:25.037845Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:487:2443], result# PutObjectResult { ETag: daa167f46d135bcc1fbc9f42f80e496f } 2025-06-24T21:23:25.037894Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:487:2443], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-24T21:23:25.038058Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:486:2440], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-24T21:23:25.058564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 322 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:23:25.058633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:23:25.058775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 322 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:23:25.058960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 322 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:23:25.059051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:25.059230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:25.059674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 326 RawX2: 4294969603 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:23:25.059732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-24T21:23:25.059849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 326 RawX2: 4294969603 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:23:25.059923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 326 RawX2: 4294969603 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:23:25.059962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:25.059988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:25.060029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:23:25.060067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:23:25.060092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:23:25.060219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:25.063296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:25.063441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:25.064196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:25.064253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:23:25.064376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:23:25.064414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:25.064450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:23:25.064482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:25.064515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:23:25.064578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:375:2341] message: TxId: 102 2025-06-24T21:23:25.064630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:25.064685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:23:25.064720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:23:25.064871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:23:25.066730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:23:25.066782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:459:2418] TestWaitNotification: OK eventTxId 102 >> KqpJoinOrder::FiveWayJoin+ColumnStore [GOOD] >> KqpSort::TopSortTableExpr [GOOD] >> KqpSort::TopSortTableExprOffset >> TConsoleTests::TestCreateTenantAlreadyExistsExtSubdomain [GOOD] >> TConsoleTests::TestCreateSubSubDomain >> KqpSort::ReverseOptimized [GOOD] >> KqpSort::ReverseOptimizedWithPredicate >> KqpMergeCn::TopSortByDesc_Datetime_Limit3 [GOOD] >> KqpMergeCn::TopSortByDesc_Bool_And_PKUint64_Limit4 >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch >> SystemView::ShowCreateTableDefaultLiteral >> SystemView::PartitionStatsOneSchemeShard ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH5 [GOOD] Test command err: Trying to start YDB, gRPC: 22291, MsgBus: 7078 2025-06-24T21:21:27.865754Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629115765146587:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:27.865926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003945/r3tmp/tmpEkX9l0/pdisk_1.dat 2025-06-24T21:21:28.508722Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629115765146491:2079] 1750800087801851 != 1750800087801854 2025-06-24T21:21:28.542380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:28.542483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:28.551647Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:28.553292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22291, node 1 2025-06-24T21:21:28.807797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:28.807819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:28.807827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:28.807955Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:28.931259Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7078 TClient is connected to server localhost:7078 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:29.796593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:32.850419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629115765146587:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:32.850530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:33.051039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629141534950911:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:33.051178Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:33.051652Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629141534950923:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:33.056216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:33.071647Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629141534950925:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:33.138040Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629141534950976:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:33.521559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:33.888360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:33.888612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:33.888947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:33.889098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:33.889201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:33.889310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:33.889524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:33.889650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:33.889783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:33.889914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:33.890047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629141534951239:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:33.912366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629141534951206:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:33.912480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629141534951206:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:33.912786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629141534951206:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:33.912914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629141534951206:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:33.913053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629141534951206:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:33.913198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629141534951206:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:33.913353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629141534951206:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:33.913469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519629141534951206:2313];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:33.913616Z node 1 :TX_COLUMNS ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.649702Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.649702Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.650383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.650383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.655955Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.655978Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.656664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.656664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.661840Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.661841Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.662465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.662466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.667869Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.667869Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.668531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.669626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.673661Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.674048Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.674347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.674594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.679459Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.679459Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.680131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.680131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.685165Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.685166Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.686333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.686679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.691644Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.692352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.692661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.693085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.698176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.698177Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.699358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.699605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.704768Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.704811Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.705504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.706083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.711102Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.711101Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.880613Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx47mbapkcrqsvanzzm9t1", SessionId: ydb://session/3?node_id=1&id=YzRkODI1OTYtZTc5ZmUwMDQtYmZiNTBlMDItNzJkYzU2Njg=, Slow query, duration: 40.820605s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:07.244023Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:07.245140Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:07.246067Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> ShowCreateView::WithTablePathPrefix >> KqpNotNullColumns::JoinLeftTableWithNotNullPk+StreamLookup [GOOD] >> KqpNotNullColumns::JoinLeftTableWithNotNullPk-StreamLookup >> SystemView::Nodes >> SystemView::StoragePoolsRanges >> DbCounters::TabletsSimple >> KqpNewEngine::Replace [GOOD] >> KqpNewEngine::ReadRangeWithParams >> KqpNewEngine::LocksNoMutations [GOOD] >> KqpNewEngine::LocksNoMutationsSharded >> KqpNotNullColumns::InsertNotNullPg-useSink [GOOD] >> KqpNotNullColumns::JoinBothTablesWithNotNullPk+StreamLookup >> KqpNewEngine::LocksMultiShard [GOOD] >> KqpNewEngine::LocksEffects >> KqpNewEngine::InShardsWrite [GOOD] >> KqpNewEngine::Join >> KikimrIcGateway::TestDropTable [GOOD] >> KikimrIcGateway::TestDropResourcePool ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoin+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 61443, MsgBus: 62347 2025-06-24T21:21:40.309877Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629174604291421:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:40.309922Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00393d/r3tmp/tmpH5M8V1/pdisk_1.dat 2025-06-24T21:21:40.948164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:40.948316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:40.958507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:41.029712Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61443, node 1 2025-06-24T21:21:41.117091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:41.117118Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:41.117126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:41.117197Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:41.372681Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62347 TClient is connected to server localhost:62347 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:41.964030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:44.544231Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629191784161144:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:44.544334Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:44.547440Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629191784161156:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:44.551944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:44.578138Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629191784161158:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:44.649992Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629191784161209:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:45.086268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:45.373356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:45.373562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:45.373817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:45.373949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:45.374050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:45.374147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:45.374246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:45.374351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:45.374449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:45.374577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:45.374687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629196079128729:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:45.389020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629196079128748:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:45.389082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629196079128748:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:45.389271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629196079128748:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:45.389372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629196079128748:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:45.389469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629196079128748:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:45.389565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629196079128748:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:45.389658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629196079128748:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:45.389755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629196079128748:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:45.389856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629196079128748:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:45.389972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629196079128748:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:45.390117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;s ... 9395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.569701Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.570279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.573096Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.573690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.575953Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.576609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.579277Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.579911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.582648Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.583327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.587441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.588168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.590309Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.590946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.594405Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.595094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.597241Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.597881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.601044Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.601764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.604322Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.605036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.608014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.609080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.611238Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.612200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.614924Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.615639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.618476Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.619205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.621276Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.622407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.624786Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.625421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.627703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.628431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.633013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.633797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:13.634007Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.638905Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:13.805714Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx4k777dt7mn600r6tbvqh", SessionId: ydb://session/3?node_id=1&id=OTY1ZGQzZDgtZGRlOTg0Y2UtNzM3YTgzODAtY2FlZjdmMzU=, Slow query, duration: 35.877164s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:14.147493Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:14.147778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:14.148279Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519629501021856487:10152];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:23:14.148776Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> SystemView::AuthGroups_Access >> TConsoleTests::TestAlterBorrowedStorage [GOOD] >> TConsoleTests::TestAlterStorageUnitsOfSharedTenant >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata >> KqpNotNullColumns::SecondaryKeyWithNotNullColumn [GOOD] >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumn >> KqpNewEngine::MultiUsagePrecompute [GOOD] >> KqpNewEngine::MultiUsageInnerConnection >> KqpNewEngine::BrokenLocksAtROTxSharded [GOOD] >> KqpNewEngine::BrokenLocksOnUpdate >> KqpNewEngine::DeleteWithInputMultiConsumption-UseSink [GOOD] >> KqpRanges::DateKeyPredicate [GOOD] >> KqpRanges::DuplicateKeyPredicateLiteral >> KqpQueryServiceScripts::ExecuteScript >> KqpService::CloseSessionsWithLoad >> TConsoleTests::TestCreateSubSubDomain [GOOD] >> TConsoleTests::TestCreateSubSubDomainExtSubdomain >> KikimrIcGateway::TestDropResourcePool [GOOD] >> KqpNewEngine::OrderedScalarContext [GOOD] >> KqpNewEngine::PagingNoPredicateExtract >> KqpSort::ReverseOptimizedWithPredicate [GOOD] >> KqpSort::ReverseMixedOrderNotOptimized ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::DeleteWithInputMultiConsumption-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 19918, MsgBus: 28681 2025-06-24T21:22:30.236133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629388154645710:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:30.236193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003199/r3tmp/tmpppTbS0/pdisk_1.dat 2025-06-24T21:22:31.008508Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:31.011536Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629388154645691:2079] 1750800150233247 != 1750800150233250 2025-06-24T21:22:31.027533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:31.027630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:31.039769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19918, node 1 2025-06-24T21:22:31.356181Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:31.367715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:31.367734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:31.367741Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:31.367850Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28681 TClient is connected to server localhost:28681 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:32.382243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:32.414626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:32.430679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:32.634430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:32.838300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:32.941682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:35.239898Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629388154645710:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:35.240024Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:35.400077Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629409629483808:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:35.400230Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:35.789157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:35.879193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:35.955237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:35.996684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:36.072475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:36.138158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:36.200773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:36.339221Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629413924451773:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:36.339287Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:36.339803Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629413924451778:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:36.343967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:36.363882Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629413924451782:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:36.453570Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629413924451835:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 9182, MsgBus: 26697 2025-06-24T21:22:39.437787Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629428249714711:2188];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00 ... ], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:18.676396Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519629593772244208:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 29995, MsgBus: 27936 2025-06-24T21:23:23.240145Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519629617021032100:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:23.240202Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003199/r3tmp/tmpuxVYg0/pdisk_1.dat 2025-06-24T21:23:23.455341Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:23.455863Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519629617021032067:2079] 1750800203238799 != 1750800203238802 2025-06-24T21:23:23.481603Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:23.481735Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:23.487559Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29995, node 7 2025-06-24T21:23:23.543949Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:23.543976Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:23.543990Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:23.544151Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27936 TClient is connected to server localhost:27936 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:23:24.271759Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:24.332031Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:24.359523Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:24.458402Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:24.663896Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:24.758154Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:28.071386Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629638495870170:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:28.075297Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:28.243278Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629617021032100:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:28.243388Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:28.293518Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:28.380373Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:28.429290Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:28.508402Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:28.568391Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:28.636199Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:28.681969Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:28.779804Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629638495870836:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:28.779914Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:28.780325Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629638495870841:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:28.787253Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:28.803847Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629638495870843:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:28.863581Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629638495870894:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpNewEngine::ReadRangeWithParams [GOOD] >> KqpNewEngine::ReadDifferentColumns ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestDropResourcePool [GOOD] Test command err: Trying to start YDB, gRPC: 3236, MsgBus: 26219 2025-06-24T21:23:19.083506Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629600038514137:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:19.113478Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036d5/r3tmp/tmpJmX223/pdisk_1.dat 2025-06-24T21:23:19.623494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:19.623606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:19.643575Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:19.651755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3236, node 1 2025-06-24T21:23:20.084999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:20.085027Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:20.085043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:20.085196Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:20.112825Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26219 TClient is connected to server localhost:26219 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:21.217219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:21.244172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:21.315131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-24T21:23:22.950997Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629612923416674:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:22.951129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.667978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.801532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.840639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.881215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.972432Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629617218384288:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.972516Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.972969Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629617218384293:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.980714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:23.991573Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629617218384295:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-06-24T21:23:24.057983Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629621513351642:2569] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:24.084030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629600038514137:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:24.084102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8776, MsgBus: 6117 2025-06-24T21:23:25.482247Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629624737390283:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:25.482309Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036d5/r3tmp/tmplcZWGD/pdisk_1.dat 2025-06-24T21:23:25.654201Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:25.669543Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:25.669638Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:25.672485Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8776, node 2 2025-06-24T21:23:25.712770Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:25.712793Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:25.712807Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:25.712926Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6117 TClient is connected to server localhost:6117 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:26.236631Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:26.255295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:23:26.523386Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:28.828980Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629637622292816:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:28.829080Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:28.867859Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:28.915737Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:28.998618Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:29.054778Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:29.119710Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629641917260425:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:29.119774Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:29.120147Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629641917260430:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:29.124824Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:29.142698Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629641917260432:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715664 completed, doublechecking } 2025-06-24T21:23:29.216019Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629641917260483:2561] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:29.451706Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found
: Info: Success, code: 4 Trying to start YDB, gRPC: 7296, MsgBus: 22720 2025-06-24T21:23:30.409018Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629644380288727:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:30.409079Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036d5/r3tmp/tmpaZnBMm/pdisk_1.dat TServer::EnableGrpc on GrpcPort 7296, node 3 2025-06-24T21:23:30.590790Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:30.591703Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:30.596170Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:30.604781Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:30.605278Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629644380288709:2079] 1750800210408285 != 1750800210408288 2025-06-24T21:23:30.624405Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:30.624428Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:30.624436Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:30.624557Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22720 TClient is connected to server localhost:22720 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:31.236850Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:31.253692Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:23:31.280214Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:31.422788Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> SystemView::StoragePoolsRanges [GOOD] >> SystemView::TopPartitionsByCpuFields >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv+ColumnStore [GOOD] >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout [GOOD] >> KqpNewEngine::Join [GOOD] >> KqpNewEngine::JoinIdxLookup >> KqpQueryService::ExecStats >> KqpNotNullColumns::JoinLeftTableWithNotNullPk-StreamLookup [GOOD] >> KqpNotNullColumns::JoinRightTableWithNotNullColumns+StreamLookup >> KqpMergeCn::TopSortByDesc_Bool_And_PKUint64_Limit4 [GOOD] >> KqpMergeCn::TopSortBy_Date_And_Datetime_Limit4 >> KqpNewEngine::LocksNoMutationsSharded [GOOD] >> KqpNewEngine::MultiEffects ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:07.732377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:07.732476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:07.732524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:07.732567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:07.733913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:07.733974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:07.734100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:07.734762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:07.735610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:07.737388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:07.852221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:07.852281Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:07.877879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:07.878268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:07.878431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:07.894612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:07.894868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:07.895706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:07.896066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:07.899371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:07.903340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:07.914576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:07.914664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:07.920103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:07.920216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:07.920329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:07.920466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:07.936258Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:08.118302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:08.118595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.118892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:08.118946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:08.119241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:08.119336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:08.125362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.125599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:08.125824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.125891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:08.125953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:08.126000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:08.128490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.128566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:08.128626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:08.132199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.132262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:08.132315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.132373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:08.136610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:08.139219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:08.139453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:08.140533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:08.140702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:08.140771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.141146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:08.141207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:08.141408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:08.141502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:08.144088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:08.144163Z node 1 :FLAT_TX_SCHEMESHARD ... MESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:36.459105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.459381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2025-06-24T21:23:36.459736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.459871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.460259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.460338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.460554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.460654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.460748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.460954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.461044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.461202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.461447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.461541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.461677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.461730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.461788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:23:36.462025Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:23:36.489498Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:36.489714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:36.491834Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435083, Sender [1:1749:3669], Recipient [1:1749:3669]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:23:36.491903Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5014: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:23:36.498110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:36.498223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:36.498726Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:1749:3669], Recipient [1:1749:3669]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:23:36.498788Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:23:36.499776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:36.499855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:36.499929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:36.499973Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:23:36.517146Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274399233, Sender [1:1787:3669], Recipient [1:1749:3669]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:23:36.517220Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5105: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:23:36.517264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1749:3669] sender: [1:1807:2058] recipient: [1:15:2062] 2025-06-24T21:23:36.581756Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122945, Sender [1:1806:3715], Recipient [1:1749:3669]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-24T21:23:36.581845Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-24T21:23:36.581979Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:23:36.582312Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 315us result status StatusSuccess 2025-06-24T21:23:36.590004Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 MaxPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 27456 RowCount: 200 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 22334 Memory: 156728 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 27456 DataSize: 27456 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestSecretsExistingValidation >> KqpNewEngine::LocksEffects [GOOD] >> KqpNewEngine::LeftSemiJoin >> KqpNotNullColumns::JoinBothTablesWithNotNullPk+StreamLookup [GOOD] >> KqpNotNullColumns::JoinBothTablesWithNotNullPk-StreamLookup >> TConsoleTests::TestAlterStorageUnitsOfSharedTenant [GOOD] >> TConsoleTests::TestAlterServerlessTenant |96.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_stats/test-results/unittest/{meta.json ... results_accumulator.log} |96.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_stats/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpSort::TopSortTableExprOffset [GOOD] >> KqpSort::TopSortResults >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumn [GOOD] >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumnPg |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 64560, MsgBus: 7240 2025-06-24T21:21:48.174244Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629207440668205:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:48.182131Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00392e/r3tmp/tmptpPL0e/pdisk_1.dat 2025-06-24T21:21:48.812401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:48.812556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:48.846265Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:48.847320Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629207440668058:2079] 1750800108135816 != 1750800108135819 2025-06-24T21:21:48.863661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64560, node 1 2025-06-24T21:21:49.043796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:49.043817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:49.043825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:49.043950Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:21:49.177663Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7240 TClient is connected to server localhost:7240 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:50.270085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:50.288197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:52.792103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629224620537901:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:52.792207Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629224620537890:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:52.792372Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:52.797587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:52.814526Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629224620537904:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:52.911863Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629224620537955:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:53.173560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629207440668205:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:53.173623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:53.447795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:53.794415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:53.794683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:53.795003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:53.796398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:53.796615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:53.796623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629228915505480:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:53.796686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629228915505480:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:53.796798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:53.796912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629228915505480:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:53.796927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:53.797022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629228915505480:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:53.797073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:53.797122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629228915505480:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:53.797204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:53.797231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629228915505480:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:53.797916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:53.798053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629228915505512:2319];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:53.801206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629228915505480:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:53.801346Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629228915505480:2310];tablet_ ... 4;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.061136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.061483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.067103Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.067103Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.067897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.068233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.073947Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.073947Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.074621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.074998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039231;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.079966Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039231;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.079965Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.080680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.081319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.085569Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.086227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.091135Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.091734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.093427Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.094177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.097832Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.098563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.100285Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.100976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039269;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.104056Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.104633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.109686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.110373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.112200Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039269;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.112709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039217;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.116522Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.117213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039239;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.118314Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039217;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.119337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.122740Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039239;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.124758Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.135915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.142240Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.150535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.151780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:20.156510Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.158954Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:20.285253Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx4vv48wq22s9pqypfm3kj", SessionId: ydb://session/3?node_id=1&id=NTdlZWQ3YzgtNTY4YjU2Ny1kYTI5MWRkMy1hMzFjODI2OA==, Slow query, duration: 33.528445s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:20.593555Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:20.593555Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:20.594431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> ColumnBuildTest::ValidDefaultValue >> KqpRanges::DuplicateKeyPredicateLiteral [GOOD] >> KqpRanges::DuplicateCompositeKeyPredicate >> TConsoleTests::TestCreateSubSubDomainExtSubdomain [GOOD] >> TConsoleTests::TestAuthorization |96.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpSort::ReverseMixedOrderNotOptimized [GOOD] >> KqpSort::ReverseRangeOptimized >> TAsyncIndexTests::MergeIndexWithReboots[PipeResets] >> KqpNewEngine::BrokenLocksOnUpdate [GOOD] >> KqpNewEngine::ComplexLookupLimit >> KqpNewEngine::MultiUsageInnerConnection [GOOD] >> KqpNewEngine::MultipleBroadcastJoin >> KqpJoinOrder::TPCDS16+ColumnStore [GOOD] >> KqpNewEngine::ReadDifferentColumns [GOOD] >> KqpNewEngine::ReadDifferentColumnsPk >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] >> KqpQueryService::TableSink_OltpUpsert >> KqpQueryService::ExecStats [GOOD] >> KqpQueryService::ExecStatsPlan >> KikimrIcGateway::TestSecretsExistingValidation [GOOD] >> KqpNewEngine::PagingNoPredicateExtract [GOOD] >> KqpNewEngine::JoinIdxLookup [GOOD] >> KqpNewEngine::JoinIdxLookupWithPredicate >> SystemView::Nodes [GOOD] >> SystemView::PartitionStatsFields >> SystemView::PartitionStatsOneSchemeShard [GOOD] >> SystemView::PartitionStatsOneSchemeShardDataQuery >> KqpNotNullColumns::JoinRightTableWithNotNullColumns+StreamLookup [GOOD] >> KqpNotNullColumns::JoinRightTableWithNotNullColumns-StreamLookup >> KqpNewEngine::MultiEffects [GOOD] >> KqpNewEngine::MultiEffectsOnSameTable >> KqpQueryServiceScripts::ExecuteScript [GOOD] >> KqpQueryServiceScripts::ExecuteMultiScript >> TConsoleTests::TestAlterServerlessTenant [GOOD] >> TConsoleTests::TestAttributes >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] [GOOD] >> KqpMergeCn::TopSortBy_Date_And_Datetime_Limit4 [GOOD] >> KqpMergeCn::SortBy_PK_Uint64_Desc >> KqpNewEngine::LeftSemiJoin [GOOD] >> KqpNewEngine::LocksInRoTx ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestSecretsExistingValidation [GOOD] Test command err: Trying to start YDB, gRPC: 11478, MsgBus: 17658 2025-06-24T21:23:19.070419Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629597518956386:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:19.070484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036cd/r3tmp/tmpAZkbPF/pdisk_1.dat 2025-06-24T21:23:19.606392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:19.606512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:19.619763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:19.655299Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629597518956270:2079] 1750800199060795 != 1750800199060798 2025-06-24T21:23:19.668694Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11478, node 1 2025-06-24T21:23:20.071939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:20.071963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:20.071970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:20.072069Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:20.086456Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17658 TClient is connected to server localhost:17658 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:21.389518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:21.415555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:21.478219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:23:22.945401Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629610403858848:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:22.945530Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.666028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.808600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.847619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.925248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.975590Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629614698826464:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.975715Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.977686Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629614698826469:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.987459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:24.001085Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629614698826471:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-06-24T21:23:24.067353Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629618993793818:2566] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:24.069097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629597518956386:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:24.069141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 26042, MsgBus: 19820 2025-06-24T21:23:25.464220Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629623874259685:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:25.464270Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036cd/r3tmp/tmpIfgRFM/pdisk_1.dat 2025-06-24T21:23:25.598693Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:25.603334Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629623874259661:2079] 1750800205452592 != 1750800205452595 TServer::EnableGrpc on GrpcPort 26042, node 2 2025-06-24T21:23:25.625721Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:25.625804Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:25.626901Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:25.719742Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:25.719765Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:25.719775Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:25.719900Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19820 TClient is connected to server localhost:19820 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRU ... ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:23:37.342193Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715714:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Trying to start YDB, gRPC: 28679, MsgBus: 6988 2025-06-24T21:23:38.445904Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629678422350684:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:38.445986Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036cd/r3tmp/tmp9S1SqX/pdisk_1.dat 2025-06-24T21:23:38.690426Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:38.693904Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629678422350666:2079] 1750800218437953 != 1750800218437956 2025-06-24T21:23:38.715362Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:38.715461Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:38.718619Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28679, node 3 2025-06-24T21:23:38.855820Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:38.855858Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:38.855869Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:38.856019Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6988 2025-06-24T21:23:39.475304Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6988 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:39.722270Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:39.754777Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:39.862253Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:40.118497Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:40.225919Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:42.699253Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629695602221503:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:42.699357Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:42.768931Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.849448Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.906210Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.985793Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:43.071700Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:43.113107Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:43.153794Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:43.250652Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629699897189459:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:43.250735Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:43.250802Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629699897189464:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:43.256045Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:43.270424Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519629699897189466:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:43.363133Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519629699897189517:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:43.440341Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519629678422350684:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:43.440422Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PagingNoPredicateExtract [GOOD] Test command err: Trying to start YDB, gRPC: 4228, MsgBus: 18786 2025-06-24T21:22:47.251719Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629461745342969:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:47.266185Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003182/r3tmp/tmp2Lovjr/pdisk_1.dat 2025-06-24T21:22:48.062656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:48.062760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:48.066756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:48.085779Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:48.089077Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629461745342938:2079] 1750800167241290 != 1750800167241293 TServer::EnableGrpc on GrpcPort 4228, node 1 2025-06-24T21:22:48.279966Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:48.328151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:48.328177Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:48.328185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:48.328302Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18786 TClient is connected to server localhost:18786 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:49.379315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:22:49.456648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:49.655344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:49.966693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:50.064685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:52.247332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629461745342969:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:52.247392Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:52.252154Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629483220181058:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:52.252289Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:52.671517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:52.750410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:52.786749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:52.822796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:52.862502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:52.909649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:52.950760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:53.023093Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629487515149012:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:53.023236Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:53.023569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629487515149017:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:53.027205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:53.040239Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629487515149019:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:22:53.115386Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629487515149071:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 19793, MsgBus: 32291 2025-06-24T21:22:55.324781Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629495090687522:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:55.324818Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:31.661709Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:23:31.662661Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519629649296623390:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:31.733059Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519629649296623441:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 24600, MsgBus: 27017 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003182/r3tmp/tmp6qdzs2/pdisk_1.dat 2025-06-24T21:23:35.264146Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:23:35.344275Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:35.345586Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519629666619581421:2079] 1750800215047588 != 1750800215047591 2025-06-24T21:23:35.371370Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:35.371507Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:35.384902Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24600, node 7 2025-06-24T21:23:35.599822Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:35.599850Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:35.599862Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:35.600017Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27017 2025-06-24T21:23:36.043327Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27017 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:36.509105Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:36.527671Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:23:36.542040Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:36.672099Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:36.914507Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:37.032428Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:40.888350Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629688094419556:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:40.888480Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:40.954643Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:41.004837Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:41.084646Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:41.148699Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:41.194550Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:41.325286Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:41.385763Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:41.470205Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629692389387514:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:41.470361Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:41.470700Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629692389387519:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:41.476181Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:41.494628Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629692389387521:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:41.579666Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629692389387572:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS16+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 17204, MsgBus: 27674 2025-06-24T21:21:37.099402Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629156222333270:2179];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003944/r3tmp/tmpOoNw5D/pdisk_1.dat 2025-06-24T21:21:37.637107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:21:37.877929Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:37.878018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:37.881156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17204, node 1 2025-06-24T21:21:37.995536Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629156222333129:2079] 1750800096920438 != 1750800096920441 2025-06-24T21:21:37.998233Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:38.094974Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:38.212051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:38.212083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:38.212090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:38.212180Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27674 TClient is connected to server localhost:27674 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:39.274154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:39.303366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:21:41.897742Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629177697170251:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:41.897856Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:41.898306Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629177697170263:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:41.902585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:41.924563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:21:41.924921Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629177697170265:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:41.991238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629156222333270:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:41.995441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:42.007509Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629177697170316:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:42.422853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:42.730434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:42.730650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:42.730902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:42.731004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:42.731139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:42.731268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:42.731367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:42.731450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:42.731543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:42.731659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:42.731776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629181992137830:2320];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:42.749588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629181992137810:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:42.749696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629181992137810:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:42.749908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629181992137810:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:42.750199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629181992137810:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:42.750343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629181992137810:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:42.750491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629181992137810:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:42.750601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519629181992137810:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_regis ... _id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.668147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039185;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.671692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.673382Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039185;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.674079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.677618Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.678203Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.679352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.680085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.683706Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.684497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039193;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.685086Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.685796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.690274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039193;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.690844Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.690935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.691394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.697320Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.697321Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.698097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039219;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.698513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.705114Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039219;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.705116Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.705850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.706147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.712331Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.712333Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.713103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.713516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.719778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.719778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.721107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.721452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:06.726842Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.727726Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:06.892588Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx4dzk2km10n4w91yd4wrj", SessionId: ydb://session/3?node_id=1&id=N2JhMWUzOS1lMDkxNGNlYi0zMzM5MGU0ZC04ZjVlZmZiMg==, Slow query, duration: 34.328462s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:07.549610Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:07.550094Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519629461165057881:9313];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:23:07.550727Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:07.551858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:34.505005Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx5wvv16x60nyk0nkmq0et", SessionId: ydb://session/3?node_id=1&id=N2JhMWUzOS1lMDkxNGNlYi0zMzM5MGU0ZC04ZjVlZmZiMg==, Slow query, duration: 13.932883s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "-- NB: Subquerys\n$orders_with_several_warehouses = (\n select cs_order_number\n from `/Root/test/ds/catalog_sales`\n group by cs_order_number\n having count(distinct cs_warehouse_sk) > 1\n);\n\n-- start query 1 in stream 0 using template query16.tpl and seed 171719422\nselect\n count(distinct cs1.cs_order_number) as `order count`\n ,sum(cs_ext_ship_cost) as `total shipping cost`\n ,sum(cs_net_profit) as `total net profit`\nfrom\n `/Root/test/ds/catalog_sales` cs1\n cross join `/Root/test/ds/date_dim`\n cross join `/Root/test/ds/customer_address`\n cross join `/Root/test/ds/call_center`\n left semi join $orders_with_several_warehouses cs2 on cs1.cs_order_number = cs2.cs_order_number\n left only join `/Root/test/ds/catalog_returns` cr1 on cs1.cs_order_number = cr1.cr_order_number\nwhere\n cast(d_date as date) between cast('1999-4-01' as date) and\n (cast('1999-4-01' as date) + DateTime::IntervalFromDays(60))\nand cs1.cs_ship_date_sk = d_date_sk\nand cs1.cs_ship_addr_sk = ca_address_sk\nand ca_state = 'IL'\nand cs1.cs_call_center_sk = cc_call_center_sk\nand cc_county in ('Richland County','Bronx County','Maverick County','Mesa County',\n 'Raleigh County'\n)\norder by `order count`\nlimit 100;\n", parameters: 0b >> KqpJoinOrder::CanonizedJoinOrderTPCH18 [GOOD] >> KqpJoinOrder::FiveWayJoinWithComplexPreds+ColumnStore [GOOD] >> KqpNotNullColumns::JoinBothTablesWithNotNullPk-StreamLookup [GOOD] >> ColumnBuildTest::ValidDefaultValue [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:45.275087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:45.275227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:45.275270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:45.275320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:45.275377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:45.275415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:45.275514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:45.275590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:45.276361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:45.276665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:45.355451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:45.355523Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:45.373912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:45.378149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:45.378325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:45.386470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:45.386724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:45.387405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:45.387855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:45.390848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:45.391059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:45.392327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:45.392401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:45.392522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:45.392578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:45.392654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:45.392842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:45.400530Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:23:45.552106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:45.552329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:45.552532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:45.552577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:45.552802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:45.552886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:45.555358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:45.555608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:45.555847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:45.555934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:45.555974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:45.556021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:45.558267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:45.558341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:45.558394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:45.560596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:45.560667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:45.560736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:45.560797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:45.566173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:45.576018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:45.576247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:45.577293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:45.577433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:45.577489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:45.577772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:45.577826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:45.577996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:45.578114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:45.580636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:45.580702Z node 1 :FLAT_TX_SCHEMESHARD ... schemeshard: 72057594046678944 2025-06-24T21:23:46.456195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-24T21:23:46.456336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 129 2025-06-24T21:23:46.456463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:23:46.469867Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:417:2386], attempt# 0 2025-06-24T21:23:46.574670Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:417:2386], sender# [1:416:2385] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-24T21:23:46.588227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:46.588315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:26183 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C09322D0-32F4-406A-96C7-AB9B39181CBA amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD 2025-06-24T21:23:46.588629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-24T21:23:46.588674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:23:46.589259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:46.589339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:23:46.590101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:23:46.590235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:23:46.590273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:23:46.590315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:23:46.590375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:23:46.590469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-24T21:23:46.599577Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:417:2386], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } 2025-06-24T21:23:46.613307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:26183 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 07C7E538-4BCB-4E41-93A4-7CA74D07804B amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-24T21:23:46.617774Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:417:2386], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-24T21:23:46.617900Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:416:2385] 2025-06-24T21:23:46.618027Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:417:2386], sender# [1:416:2385], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:26183 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1D499AEA-426D-4727-A91A-7CFB25A9CB6E amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-24T21:23:46.629739Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:417:2386], result# PutObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 } 2025-06-24T21:23:46.629818Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:417:2386], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-24T21:23:46.630009Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:416:2385], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-24T21:23:46.656630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:23:46.656719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:23:46.656893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:23:46.657012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-24T21:23:46.657082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:46.657127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:46.657163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:23:46.663421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:23:46.663672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:46.666936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:46.667398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:46.667467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:23:46.667602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:23:46.667634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:46.667670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:23:46.667706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:46.667742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:23:46.667825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:338:2315] message: TxId: 102 2025-06-24T21:23:46.667984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:46.668024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:23:46.668055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:23:46.668171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:23:46.674401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:23:46.674499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:402:2372] TestWaitNotification: OK eventTxId 102 >> TConsoleTests::TestAuthorization [GOOD] >> TConsoleTests::TestAuthorizationExtSubdomain >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadDataSourceProperties >> OlapEstimationRowsCorrectness::TPCDS96 [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> SystemView::AuthGroups_Access [GOOD] >> SystemView::AuthGroups_ResultOrder >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::ValidDefaultValue [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:42.041086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:42.041210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:42.041262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:42.041307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:42.042230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:42.042282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:42.042428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:42.042532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:42.043416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:42.048320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:42.184081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:42.184159Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:42.208185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:42.208686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:42.208949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:42.224314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:42.224677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:42.231501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:42.232117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:42.272001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:42.277570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:42.299998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:42.300125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:42.300400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:42.300462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:42.300584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:42.300698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:42.332451Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:42.486373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:42.491789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:42.493519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:42.493636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:42.495108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:42.495293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:42.500468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:42.501822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:42.502113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:42.502254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:42.502303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:42.502342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:42.504956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:42.505020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:42.505072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:42.507688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:42.507738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:42.507783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:42.507865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:42.512795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:42.515405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:42.518196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:42.519358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:42.519532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:42.519584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:42.521026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:42.521095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:42.521301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:42.521414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:42.525498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:42.525564Z node 1 :FLAT_TX_SCHEMESHARD ... } ExecLevel: 0 TxId: 281474976725761 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409550 2025-06-24T21:23:48.067128Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-24T21:23:48.067250Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3017], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-24T21:23:48.067647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976725761:4294967295 from tablet: 72075186233409549 to tablet: 72075186233409550 cookie: 0:281474976725761 msg type: 269090816 2025-06-24T21:23:48.067794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976725761, partId: 4294967295, tablet: 72075186233409550 2025-06-24T21:23:48.068005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-24T21:23:48.068040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 0/1, is published: true 2025-06-24T21:23:48.068091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-24T21:23:48.088357Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [1:1815:3675], Recipient [1:757:2644]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409549 ClientId: [1:1815:3675] ServerId: [1:1817:3677] } 2025-06-24T21:23:48.088431Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:23:48.156415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 650, transactions count in step: 1, at schemeshard: 72075186233409549 2025-06-24T21:23:48.156582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976725761 AckTo { RawX1: 0 RawX2: 0 } } Step: 650 MediatorID: 72075186233409551 TabletID: 72075186233409549, at schemeshard: 72075186233409549 2025-06-24T21:23:48.156649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 HandleReply TEvOperationPlan: step# 650 2025-06-24T21:23:48.156705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976725761:0 128 -> 240 2025-06-24T21:23:48.163643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-06-24T21:23:48.163731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72075186233409549] TDone opId# 281474976725761:0 ProgressState 2025-06-24T21:23:48.163825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-24T21:23:48.163859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-24T21:23:48.163894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-24T21:23:48.163922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-24T21:23:48.163955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2025-06-24T21:23:48.164044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:572:2508] message: TxId: 281474976725761 2025-06-24T21:23:48.164088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-24T21:23:48.164120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976725761:0 2025-06-24T21:23:48.164151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976725761:0 2025-06-24T21:23:48.164245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-24T21:23:48.176993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2025-06-24T21:23:48.177103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976725761 2025-06-24T21:23:48.177206Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 106, txId# 281474976725761 2025-06-24T21:23:48.177301Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3017], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }}, txId# 281474976725761 2025-06-24T21:23:48.180130Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-24T21:23:48.180267Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3017], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-24T21:23:48.180339Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-24T21:23:48.186988Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done 2025-06-24T21:23:48.187121Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3017], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-24T21:23:48.187202Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 106, subscribers count# 1 2025-06-24T21:23:48.187448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T21:23:48.187495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:1167:3035] TestWaitNotification: OK eventTxId 106 2025-06-24T21:23:48.190154Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 106 2025-06-24T21:23:48.190537Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "ColumnValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 1111 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "ColumnValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 1111 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } >> TBackupTests::ShouldSucceedOnLargeData[Raw] [GOOD] >> ShowCreateView::WithTablePathPrefix [GOOD] >> ShowCreateView::WithSingleQuotedTablePathPrefix >> KqpQueryServiceScripts::ExecuteScriptStatsProfile ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::JoinBothTablesWithNotNullPk-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 15288, MsgBus: 14509 2025-06-24T21:23:01.579137Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629521827659839:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:01.586069Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003164/r3tmp/tmpEQqUO9/pdisk_1.dat 2025-06-24T21:23:02.073308Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:02.073407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:02.076249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:02.126706Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15288, node 1 2025-06-24T21:23:02.289118Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:02.289145Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:02.289174Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:02.289306Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:02.580339Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14509 TClient is connected to server localhost:14509 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:03.126545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:05.245620Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629539007529508:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:05.245759Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:05.520751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:05.674183Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629539007529612:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:05.674285Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:05.676114Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629539007529617:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:05.681275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:05.702375Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629539007529619:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:23:05.803843Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629539007529670:2394] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:05.986192Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519629539007529713:2316], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing key column in input: Key for table: /Root/TestInsertNotNullPk, code: 2029 2025-06-24T21:23:05.987384Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZjVhZjczNS1lOWVjZjBhMC0zY2EzYTAxYS01ZGU2M2IyZQ==, ActorId: [1:7519629539007529487:2288], ActorState: ExecuteState, TraceId: 01jyhx5ek9cae4am54ecjnzps5, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-24T21:23:06.017496Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519629539007529722:2320], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:47: Error: Failed to convert type: Struct<'Key':Null,'Value':String> to Struct<'Key':Uint64,'Value':String?>
:1:47: Error: Failed to convert 'Key': Null to Uint64
:1:47: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:23:06.019653Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZjVhZjczNS1lOWVjZjBhMC0zY2EzYTAxYS01ZGU2M2IyZQ==, ActorId: [1:7519629539007529487:2288], ActorState: ExecuteState, TraceId: 01jyhx5emd276ztdxz31jwcgq1, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 65072, MsgBus: 18734 2025-06-24T21:23:06.895533Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629541903453059:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:06.936029Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003164/r3tmp/tmpXN0G4F/pdisk_1.dat 2025-06-24T21:23:07.143283Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629541903453023:2079] 1750800186888222 != 1750800186888225 2025-06-24T21:23:07.162936Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:07.169813Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:07.169893Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:07.173570Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65072, node 2 2025-06-24T21:23:07.331728Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:07.331749Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:07.331757Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:07.331876Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18734 TClient is connected to server localhost:18734 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:23:07.935327Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:23:07.957977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_sub ... de 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003164/r3tmp/tmpIMDEnh/pdisk_1.dat 2025-06-24T21:23:39.078853Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:39.089925Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:39.090035Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:39.097800Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61007, node 7 2025-06-24T21:23:39.170182Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:39.170208Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:39.170222Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:39.170423Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15984 2025-06-24T21:23:39.847488Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15984 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:40.070075Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:40.083462Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:40.096410Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:40.200537Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:40.485880Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:40.633768Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:43.823947Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629702274551803:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:43.824084Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:43.832563Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629680799713735:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:43.832651Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:43.873078Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:43.961494Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.045489Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.101933Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.185147Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.326577Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.379289Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.486412Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629706569519772:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:44.486592Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:44.486936Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629706569519777:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:44.493255Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:44.514155Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629706569519779:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:44.570317Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629706569519830:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:46.068523Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:46.305506Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumnPg [GOOD] >> KqpQueryService::TableSink_OltpUpsert [GOOD] >> KqpQueryService::TableSink_OltpUpdate >> ColumnBuildTest::CancelBuild >> KqpSort::ReverseRangeOptimized [GOOD] >> KqpSort::ReverseRangeLimitOptimized ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH18 [GOOD] Test command err: Trying to start YDB, gRPC: 12981, MsgBus: 30239 2025-06-24T21:22:02.652273Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629266938176637:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:02.653634Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038ef/r3tmp/tmpQabi3L/pdisk_1.dat 2025-06-24T21:22:03.223747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:03.223863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:03.269607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:03.304839Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:03.311318Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629266938176617:2079] 1750800122628381 != 1750800122628384 TServer::EnableGrpc on GrpcPort 12981, node 1 2025-06-24T21:22:03.583943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:03.583961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:03.583968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:03.584062Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:22:03.655505Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30239 TClient is connected to server localhost:30239 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:04.740343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:07.443213Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629288413013747:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.443302Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.443420Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629288413013759:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:07.447509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:07.472102Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629288413013761:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:22:07.572063Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629288413013812:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:07.687753Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629266938176637:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:07.688021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:08.014530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:22:08.414979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629292707981349:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:08.415900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629292707981349:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:08.416095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:08.416137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:08.416182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629292707981349:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:08.416306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629292707981349:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:08.416311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:08.416432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:08.416436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629292707981349:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:08.416551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:08.416563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629292707981349:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:08.416656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:08.416680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629292707981349:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:08.416770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:08.416794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629292707981349:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:22:08.417383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:22:08.417525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:22:08.417649Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:22:08.417758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629292707981356:2318];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:22:08.420761Z node 1 :TX_COLU ... 4956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039232;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.270466Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039232;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.271491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039298;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.272956Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.273563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.277298Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039298;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.277925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.279581Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.280170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039186;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.283670Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.284330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.289985Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.290678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.296350Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.297071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.297190Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039186;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.297623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.302906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.303724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.309386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.310565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.311446Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.311956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.316390Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.317062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.317235Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.321420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.322197Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.322832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.326746Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.331806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.332456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.334589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.341139Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.342462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.347686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.348450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039234;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.352282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.353624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.353962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039234;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.358704Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.541911Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx57858dqy9vycpexsyxtp", SessionId: ydb://session/3?node_id=1&id=YWJiNGFiYzMtZDk1Y2E2M2QtNzQwNjJiZTgtZmU2MjRhMTY=, Slow query, duration: 33.103625s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:31.884222Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:31.884849Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:31.885250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519629610535617473:11024];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-24T21:23:31.885791Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithComplexPreds+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 10600, MsgBus: 3159 2025-06-24T21:21:57.684639Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629245076181312:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:57.684696Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003900/r3tmp/tmpaXPawn/pdisk_1.dat 2025-06-24T21:21:58.533041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:58.533134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:58.548238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:58.625798Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:58.630655Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629245076181292:2079] 1750800117666261 != 1750800117666264 TServer::EnableGrpc on GrpcPort 10600, node 1 2025-06-24T21:21:58.776176Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:58.855675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:58.855692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:58.855700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:58.855806Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3159 TClient is connected to server localhost:3159 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:59.990506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:00.017728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:02.691296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629245076181312:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:02.691391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:02.929251Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629266551018427:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:02.929394Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:02.929737Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629266551018439:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:02.933887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:02.961522Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629266551018441:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:22:03.051930Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629270845985788:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:03.532324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:22:03.835923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:03.836146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:03.836417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:03.836539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:03.836678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:03.836794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:03.836868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629270845985988:2314];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:03.836897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:03.836901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629270845985988:2314];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:03.837004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:22:03.837021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629270845985988:2314];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:03.837134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629270845985988:2314];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:03.837139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:22:03.837244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629270845985988:2314];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:03.837271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:22:03.837362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629270845985988:2314];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:03.837388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519629270845986012:2322];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:22:03.838217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629270845985988:2314];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:03.838398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629270845985988:2314];tablet_ ... 95097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.001264Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.001980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.005068Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.005658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.011396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.013992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.017651Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.018301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.020379Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.021165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.027676Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.028428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.029582Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.030323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.044567Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.045169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.051089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.052056Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.052687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.058963Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.060002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.063877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.066460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.067290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.073202Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.073202Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.073850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.074267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.080294Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.080509Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.081179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.081191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.088909Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.089742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.090500Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.091483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.097179Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.097960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:31.100786Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.104126Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:31.365066Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx53x42q695gpy0z0v6fz1", SessionId: ydb://session/3?node_id=1&id=YjZkNDJkNDgtYWNkZWE4YzAtNmQyMzQzNWYtNWY1M2Q1YWU=, Slow query, duration: 36.352509s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:31.648613Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:31.648977Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:31.649007Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519629502774255623:7663];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:23:31.649836Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumnPg [GOOD] Test command err: Trying to start YDB, gRPC: 28890, MsgBus: 64528 2025-06-24T21:23:05.123028Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629539759447725:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:05.123206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003163/r3tmp/tmpDlRXdN/pdisk_1.dat 2025-06-24T21:23:05.615565Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:05.615699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:05.622088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:05.662178Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:05.663201Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629539759447560:2079] 1750800185092677 != 1750800185092680 TServer::EnableGrpc on GrpcPort 28890, node 1 2025-06-24T21:23:05.762705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:05.762730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:05.762745Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:05.762875Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64528 2025-06-24T21:23:06.118692Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64528 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:06.550770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:06.568219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:08.889323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629552644350087:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:08.889447Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:09.213892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:09.393429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629556939317489:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:09.393528Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:09.395264Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629556939317494:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:09.411955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:09.432664Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629556939317496:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:23:09.491058Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629556939317547:2397] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:09.689957Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519629556939317589:2316], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:14: Error: Missing key column in input: Key for table: /Root/TestReplaceNotNullPk, code: 2029 2025-06-24T21:23:09.691532Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTBmNGMyY2MtMjM0NTdmY2EtNzAxZWM2N2ItNWRlY2QxNjg=, ActorId: [1:7519629552644350069:2289], ActorState: ExecuteState, TraceId: 01jyhx5j6g0aprq9b2w5nf1yfj, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-24T21:23:09.723855Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519629556939317598:2320], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:49: Error: Failed to convert type: Struct<'Key':Null,'Value':String> to Struct<'Key':Uint64,'Value':String?>
:1:49: Error: Failed to convert 'Key': Null to Uint64
:1:49: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:23:09.725605Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTBmNGMyY2MtMjM0NTdmY2EtNzAxZWM2N2ItNWRlY2QxNjg=, ActorId: [1:7519629552644350069:2289], ActorState: ExecuteState, TraceId: 01jyhx5j866hp5nj7ftr2kg3pm, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 32497, MsgBus: 7602 2025-06-24T21:23:10.610398Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629558384688699:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:10.638716Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003163/r3tmp/tmp1sMFKl/pdisk_1.dat 2025-06-24T21:23:10.855048Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:10.855141Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:10.857154Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:10.857553Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629558384688681:2079] 1750800190606198 != 1750800190606201 2025-06-24T21:23:10.870277Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32497, node 2 2025-06-24T21:23:11.042754Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:11.042779Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:11.042787Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:11.042907Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7602 TClient is connected to server localhost:7602 2025-06-24T21:23:11.639753Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:11.728113Z ... 44073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:41.668999Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:41.684247Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:45.030302Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629710090871220:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:45.030411Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:45.068024Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:45.185284Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629710090871373:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:45.185422Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:45.186066Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629710090871378:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:45.195368Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:45.215309Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629710090871380:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:23:45.287041Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629710090871431:2428] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:45.459319Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629688616034294:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:45.459407Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:46.659490Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [7:7519629714385838877:2343], TxId: 281474976710663, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=. TraceId : 01jyhx6nsmf9bx3dr02kf7dpp9. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: BAD_REQUEST KIKIMR_BAD_COLUMN_TYPE: {
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 }. 2025-06-24T21:23:46.660561Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [7:7519629714385838878:2344], TxId: 281474976710663, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jyhx6nsmf9bx3dr02kf7dpp9. SessionId : ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [7:7519629714385838873:2290], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-24T21:23:46.660951Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [7:7519629714385838879:2345], TxId: 281474976710663, task: 3. Ctx: { SessionId : ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=. TraceId : 01jyhx6nsmf9bx3dr02kf7dpp9. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [7:7519629714385838873:2290], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-24T21:23:46.661118Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [7:7519629714385838880:2346], TxId: 281474976710663, task: 4. Ctx: { SessionId : ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=. CustomerSuppliedId : . TraceId : 01jyhx6nsmf9bx3dr02kf7dpp9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [7:7519629714385838873:2290], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-24T21:23:46.661858Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=, ActorId: [7:7519629710090871202:2290], ActorState: ExecuteState, TraceId: 01jyhx6nsmf9bx3dr02kf7dpp9, Create QueryResponse for error on request, msg: 2025-06-24T21:23:46.722753Z node 7 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-24T21:23:46.759679Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519629714385838907:2351], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing not null column in input: Index1. All not null columns should be initialized, code: 2032 2025-06-24T21:23:46.762661Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=, ActorId: [7:7519629710090871202:2290], ActorState: ExecuteState, TraceId: 01jyhx6pdaat1g7sr2hah58bnw, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:23:46.800154Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519629714385838927:2359], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing not null column in input: Index1. All not null columns should be initialized, code: 2032 2025-06-24T21:23:46.802898Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=, ActorId: [7:7519629710090871202:2290], ActorState: ExecuteState, TraceId: 01jyhx6pescxgcnqsdf2qh13pv, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:23:46.843612Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519629714385838946:2367], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:14: Error: Missing not null column in input: Index1. All not null columns should be initialized, code: 2032 2025-06-24T21:23:46.846419Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=, ActorId: [7:7519629710090871202:2290], ActorState: ExecuteState, TraceId: 01jyhx6pg08fenk4zcz0ttth96, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:23:47.287471Z node 7 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:23:47.305761Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519629714385838965:2375], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 2025-06-24T21:23:47.307449Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=, ActorId: [7:7519629710090871202:2290], ActorState: ExecuteState, TraceId: 01jyhx6phgc5h1vjrps5qrgzx3, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:23:48.075634Z node 7 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:23:48.083031Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519629718680806284:2385], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 2025-06-24T21:23:48.089393Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=, ActorId: [7:7519629710090871202:2290], ActorState: ExecuteState, TraceId: 01jyhx6pzw7gt56ts2p99a9cm7, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:23:48.820272Z node 7 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:23:48.824807Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519629722975773607:2397], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 2025-06-24T21:23:48.827220Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=OGU2NDg5ODktMjZjYjI1M2UtZmFhY2E2NTgtODg3NzIzZWY=, ActorId: [7:7519629710090871202:2290], ActorState: ExecuteState, TraceId: 01jyhx6qre2fdn3hjxsndax4bq, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: >> KqpQueryService::ExecStatsPlan [GOOD] >> KqpQueryService::ExecStatsAst ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:01.187542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:01.187662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:01.187702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:01.187736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:01.187797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:01.187840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:01.187931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:01.188022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:01.188817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:01.189210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:01.270594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:01.270656Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:01.288244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:01.292909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:01.293125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:01.302188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:01.302428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:01.303090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:01.303448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:01.306545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:01.306735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:01.307990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:01.308058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:01.308162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:01.308210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:01.308258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:01.308406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:01.315941Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:23:01.449088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:01.449310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:01.449516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:01.449567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:01.449794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:01.449868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:01.452575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:01.452816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:01.453039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:01.453118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:01.453159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:01.453206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:01.455527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:01.455604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:01.455645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:01.460134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:01.460216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:01.460283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:01.460338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:01.464110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:01.467026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:01.467269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:01.468318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:01.468455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:01.468508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:01.468802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:01.468857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:01.469025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:01.469135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:01.471548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:01.471617Z node 1 :FLAT_TX_SCHEMESHARD ... :3457:5418], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } REQUEST: PUT /data_00.csv?partNumber=100&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:20792 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 9B7A94DE-3F2F-4B1F-B2C6-804745D9C5D0 amz-sdk-request: attempt=1 content-length: 130 content-md5: Wyd1w7MZYbbZucaVvuRDAw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / partNumber=100&uploadId=1 / 130 2025-06-24T21:23:48.935353Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3458:5419], result# UploadPartResult { ETag: 5b2775c3b31961b6d9b9c695bee44303 } 2025-06-24T21:23:48.935597Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3457:5418] 2025-06-24T21:23:48.935694Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3458:5419], sender# [1:3457:5418], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv?partNumber=101&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:20792 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 88976806-D5E8-401E-AA55-1D09816241DD amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / partNumber=101&uploadId=1 / 0 2025-06-24T21:23:48.939764Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3458:5419], result# UploadPartResult { ETag: d41d8cd98f00b204e9800998ecf8427e } 2025-06-24T21:23:48.939830Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:3458:5419], success# 1, error# , multipart# 1, uploadId# 1 2025-06-24T21:23:48.947658Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:526: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [1:3458:5419], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [a59dd9a97cf3685e69093fb2d96653c6,bdbb215613239cb3a835fee1fe7e7ca3,cb38dbc776d5763f1926dfb22d508c87,3c430d66d07a0a4b1fa889f321fce197,43baf91083f286b60bf15e7786459cd9,90b5581bef612fa3bf9b38b336af405f,fd4869c26a12d22ee79256d778954d04,a9459bc28198b0b6bd67732c492fd740,697a3f8386ea1ff4e327de943224cb1a,614da0b4ec9464e69cd0c59909e80fbb,9b94eb3f67aa4c8a0bcbf546833ed966,fd45c3afacec641ad19e59d2b31aeba4,fd69678aecbc149601f58cf13c64d33e,90c09ab4923bc9f97f825d36e32bf362,c1586416a281a4cca2b2b4e333d9b079,f31908576272623f9f0a19bf774cde8e,6fe3b42388304d2af07c629aeb683581,7bc90eec21ca5bb3648e6a48e83c5730,8e1dda26de1af89bdffe2eefdcebea1d,14dc42d90caa1575bbfffa9dc8f21d66,92efb2368eecb32d4075c09294fde0b7,98efff5f7c7ecb42e7af65142ce05af9,6206c81807b3b9283b0173ee2c682100,616b431b91aedc9de4593321eb42ba96,9ae4762563ffdec596cc9ca4cb8913e1,946ebf2d95b4796ea2faee21f017be79,45834a9948bb4ab8b62d1894156d13ed,6ad3fe7286856927c1e00422bc8da697,ef89464d20eae46829e1bf557e4d04ce,f128e5de32097d205453080b01c94ac3,c13e650ee2cfcecfdf4f578a2e5b1c2d,fc26314711b25d20fc654cf59301b806,56f6f2c574fba86496a87a7dd5fab46c,c7951eace72cfe0f14f808173e07bc64,3d9ad3340e58b973eaf8d4f14ba3b0f9,fc41d6fdfb52389dda8b26d7a0a3a889,9974b6ae96ffd0b756acb67088e890f9,cde8a5604010abe8fccfa9492144036f,0364e048eaac35c26d48b0c5072b5255,aac5a84927124d6ae4931e2650c80d9f,eab068fe4ca35c2f3e35890bd727eb4f,bc3646bdbcbc7f97dcddf2202ea9421f,6d3f63d672eda4a4617c9e7589a68bfc,0401bade6c3031b5be872238520b993a,1c6405688f86423480173e3e316a20bd,52395f68e877cbb8d7115a247331b0a7,4b0673ac18058554d2c53bf9f99b34b2,87bc1b9e650b31e81a9ad2531e3ef9da,b29053c8cd093c8b92ad3954c42cb7be,faf1084f6b33b00e2e822d1d3c3f0083,eedec03ee8d7eda4654db7206ad0889e,be4469dd028d5519a67098055f25513f,a7afa9827ec27c565cff1ed505a06f4b,91fe8109d2ad934c4364d90c29aaba71,73b81ea00e11db12d66497d30eb48446,cce69ef69777afeab34eefa515abc7f4,4e4ac1a421353964356400b8be8e21da,32cd6083b12660bcd4062af08d89eb05,71957b9db37811c7680638b82dc6384b,a8787e692c423a2dfa07dd261e72790a,283838ab16206b27738ea6653110f833,88bf084fb3029f0d5c0705eece930d70,1ed2f9f7221f1718b81fdf2d846347dd,406706cfbc454922dcad50b9c534b8d1,dbb606c993d798974ed4f5c9ebf195ca,1a4a3868dc6fa26c6b019d237f9ea6f4,82660a3c6b576a1b3fea925f3c179a2e,d393db2749ae42e854e85eeec2ea3592,b42c92ad14ee0e5351fec7e5a045a91b,2c7af27f9dc77efbcbe71c2d7997d6e9,278aba62ab1d9e3ff16df2d82ac5f5c7,6b8380404a7e7ec95ad5f3941d5d404c,c9813b9fc1d6b5087e64849076edd0f8,160785e4dac02a91c43a497ee59eea06,db529a9ba22f60f404031cfe85e966e9,9b70af168e2d3769bd8bc4dffa3202ea,9ac39c3843b6621ace44acf430a59e06,4603ff564a46e93951f246ed18926071,66b85f35ee76a7f71f50e9aad56758de,1665c284ad04d6b893b69372bf8fc6b9,8c1c27ec88fb52f06de6e7516a392672,0a5f992db51277a05ec12f0d6459ef21,8debe3a6023155561cb0890fc05bd7fb,938ece258b7596f8eea7e82bc2b8f88c,767ca0dcf0b154fa3c818044bbfc58fd,914cc7165d994bb05824332ac120446f,ab0ece250f5959a510170ee07aa21b5d,8bf4b44d67f062026b0010a8a0b39cc0,e0aa13fa8246e68c18905d3abadfc44d,27b021b75b6a95f63ea27f7ec238c05f,673e661e4cfea1e431678dd9881c2a8c,f101b34943f1831ae8c0b46ffcb1c2d6,562b32a8142b29c1a88e507ab1981a6b,fdea4c6fc2befb44614992ca8bf34b21,b7c8ec6acc45b037978482996e910b75,aec72fbd2e171b798900b22897d00941,710ef5b5e8eba750b6acc9b32dff42a3,821c7e22ef9c22098171e7f837dcfcc8,aecc9f6d0e6f54e938a10d40fda96d7b,5b2775c3b31961b6d9b9c695bee44303,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:20792 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 79DCD7AB-1033-4E94-BD7C-1D113729D871 amz-sdk-request: attempt=1 content-length: 11529 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv / uploadId=1 2025-06-24T21:23:48.967008Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:623: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [1:3458:5419], result# CompleteMultipartUploadResult { Bucket: Key: data_00.csv ETag: 5d8c28efc812b445ddd02900ff3ee599 } 2025-06-24T21:23:48.967498Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3457:5418], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-24T21:23:48.984341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-24T21:23:48.984432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:23:48.984613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-24T21:23:48.984732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-24T21:23:48.984811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:48.984860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:48.984940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:23:48.985011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:23:48.985228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:48.990159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:48.990893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:23:48.990961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:23:48.991089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:23:48.991133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:48.991235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:23:48.991279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:48.991326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:23:48.991445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:338:2315] message: TxId: 102 2025-06-24T21:23:48.991500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:23:48.991545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:23:48.991582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:23:48.991773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:23:48.996975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:23:48.997049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3443:5405] TestWaitNotification: OK eventTxId 102 >> ColumnBuildTest::BaseCase >> KqpRanges::DuplicateCompositeKeyPredicate [GOOD] >> KqpRanges::DeleteNotFullScan+UseSink >> ColumnBuildTest::AlreadyExists >> KqpJoinOrder::FiveWayJoinWithComplexPreds2+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCDS96 [GOOD] Test command err: Trying to start YDB, gRPC: 14948, MsgBus: 65370 2025-06-24T21:22:01.818382Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629263214785189:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:01.819182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038f3/r3tmp/tmpwonhW9/pdisk_1.dat 2025-06-24T21:22:02.363691Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:02.363806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:02.372615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:02.409107Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:02.411766Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629263214785043:2079] 1750800121785424 != 1750800121785427 TServer::EnableGrpc on GrpcPort 14948, node 1 2025-06-24T21:22:02.547724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:02.547754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:02.547769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:02.547871Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65370 2025-06-24T21:22:02.816822Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:65370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:03.379428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:03.417014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:22:06.024248Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629284689622171:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:06.024585Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:06.024929Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629284689622183:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:06.029205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:06.045208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:22:06.051386Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629284689622185:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:22:06.119930Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629284689622236:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:06.592503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:22:06.823251Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629263214785189:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:06.827921Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:06.968620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:06.968844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:06.969144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:06.969285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:06.969405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:06.969504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:06.969634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:06.969767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:22:06.969902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:22:06.970037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:22:06.970150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629284689622477:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:22:07.001114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629284689622487:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:07.001177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629284689622487:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:07.001388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629284689622487:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:07.001517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629284689622487:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:07.001655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629284689622487:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:07.001790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629284689622487:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:07.001920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519629284689622487:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_regis ... 07089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.611764Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.612518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.613328Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.613891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.618347Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.618863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.618973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.619482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.625243Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.625981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.630961Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.631653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.633003Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.633545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.638511Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.638988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.639312Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.639907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.644936Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.644935Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.645485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.645933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.651434Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.652207Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.652648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.652810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.659132Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.659133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.659875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.659884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.665766Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.665766Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.666700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039253;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.666836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.672862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039253;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.672954Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.673612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.673823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039251;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:29.679731Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039251;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.682611Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-24T21:23:29.817937Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx56zf1577ycqjmn1gjtf6", SessionId: ydb://session/3?node_id=1&id=Nzg5YTM1MmItYzYzZmIxZmYtY2E0NDQwMGEtM2IxMmUyNTY=, Slow query, duration: 31.657978s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:30.372244Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:23:30.372303Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-24T21:23:30.372733Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519629516617892916:7839];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:23:30.374502Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpNewEngine::ReadDifferentColumnsPk [GOOD] >> KqpNewEngine::PushFlatmapInnerConnectionsToStageInput >> KqpSort::TopSortResults [GOOD] >> KqpSort::UnionAllSortLimit >> KqpNewEngine::ComplexLookupLimit [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> TConsoleTests::TestAttributes [GOOD] >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunning ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::ComplexLookupLimit [GOOD] Test command err: Trying to start YDB, gRPC: 18649, MsgBus: 9710 2025-06-24T21:22:54.647630Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629490657655150:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:54.647674Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003179/r3tmp/tmpUuQtdH/pdisk_1.dat 2025-06-24T21:22:55.289452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:55.289590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:55.293004Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:55.296030Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629490657655046:2079] 1750800174605945 != 1750800174605948 2025-06-24T21:22:55.329935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18649, node 1 2025-06-24T21:22:55.407869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:55.407903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:55.407912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:55.408062Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:22:55.723387Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9710 TClient is connected to server localhost:9710 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:56.305210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:56.320918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:56.341860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:56.565239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:56.769928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:56.869701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:58.717459Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629507837525867:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:58.717595Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:59.046235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:59.120032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:59.212772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:59.247240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:59.291999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:59.376882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:59.446474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:59.560267Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629512132493834:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:59.560390Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:59.567312Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629512132493839:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:59.576003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:59.603525Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629512132493841:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:59.643649Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629490657655150:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:59.663886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:59.679006Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629512132493892:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 14786, MsgBus: 4626 2025-06-24T21:23:02.191453Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629526498107558:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:02.191502Z node 2 :METADATA_PROVIDER ERROR: log.cpp:78 ... Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } Trying to start YDB, gRPC: 24139, MsgBus: 23685 2025-06-24T21:23:43.130915Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519629703238194188:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:43.131886Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003179/r3tmp/tmpvYey1U/pdisk_1.dat 2025-06-24T21:23:43.321220Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519629703238194163:2079] 1750800223123071 != 1750800223123074 2025-06-24T21:23:43.328021Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:43.335182Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:43.335291Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:43.340856Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24139, node 7 2025-06-24T21:23:43.407804Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:43.407831Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:43.407843Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:43.408008Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23685 2025-06-24T21:23:44.161213Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23685 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:44.364795Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:44.399355Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:44.521897Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:44.764625Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:44.864506Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:48.134138Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629703238194188:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:48.134232Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:48.265265Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629724713032266:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:48.265430Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:48.350070Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:48.397595Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:48.450772Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:48.499336Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:48.550759Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:48.641609Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:48.717675Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:48.841920Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629724713032933:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:48.842042Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:48.842744Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629724713032938:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:48.849093Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:48.865186Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629724713032940:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:48.968267Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629724713032991:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:50.673264Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpNotNullColumns::JoinRightTableWithNotNullColumns-StreamLookup [GOOD] >> KqpNotNullColumns::OptionalParametersDataQuery >> KqpNewEngine::JoinIdxLookupWithPredicate [GOOD] >> KqpNewEngine::ItemsLimit >> TConsoleTests::TestAuthorizationExtSubdomain [GOOD] >> TConsoleTests::TestAttributesExtSubdomain >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata [GOOD] >> ColumnBuildTest::CancelBuild [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH9 [GOOD] >> KqpNewEngine::MultiEffectsOnSameTable [GOOD] >> KqpNewEngine::LookupColumns >> KqpQueryService::TableSink_OltpUpdate [GOOD] >> KqpQueryService::TableSink_Oltp_Replace+UseSink >> SystemView::PartitionStatsFields [GOOD] >> SystemView::ConcurrentScans >> ColumnBuildTest::AlreadyExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithComplexPreds2+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9778, MsgBus: 23721 2025-06-24T21:22:10.418172Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629300288126568:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:10.418595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038e9/r3tmp/tmp2t7rkZ/pdisk_1.dat 2025-06-24T21:22:11.080083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:11.080188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:11.090035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:11.160427Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:11.163355Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629300288126380:2079] 1750800130346304 != 1750800130346307 TServer::EnableGrpc on GrpcPort 9778, node 1 2025-06-24T21:22:11.398294Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:11.500461Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:11.500499Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:11.500507Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:11.501078Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23721 TClient is connected to server localhost:23721 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:12.510871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:12.535888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:15.351787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629321762963508:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:15.351930Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:15.359280Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629321762963520:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:15.370112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:15.375624Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629300288126568:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:15.375782Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:15.392032Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629321762963522:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:22:15.456095Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629321762963573:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:15.906527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:22:16.218648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629326057931125:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:16.218885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629326057931125:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:16.224369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:16.224436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:16.224646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:16.224763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:16.224857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:16.224950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:16.225114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:16.225244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:22:16.225354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:22:16.225452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:22:16.225547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629326057931126:2322];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:22:16.232452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629326057931125:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:16.232715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629326057931125:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:16.232821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629326057931125:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:16.232912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629326057931125:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:16.233004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629326057931125:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:16.233094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519629326057931125:2321];tablet ... 2909Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.803623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.803940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.809412Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.810101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.813751Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.814425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.815765Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.816251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.820572Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.821250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.821290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.821792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.827784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.827784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.828503Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.828522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.834442Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.834442Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.835195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.835531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.841181Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.841181Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.841989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.848956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.849838Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.850475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.856084Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.856720Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.856875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.857300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.863264Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.863309Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.864061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.864195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.869337Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.870115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.872933Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.874373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:36.876401Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:36.880886Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:37.052277Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx5bmwe05b1shbjerwr2wp", SessionId: ydb://session/3?node_id=1&id=MjBjZWVmZDItMTBhODlkZmQtNjc5OGI5YTYtNzgzMDUwZGI=, Slow query, duration: 34.111705s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:37.413607Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:37.414144Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:37.414876Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519629613820791845:10518];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-24T21:23:37.415373Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpQueryServiceScripts::ExecuteMultiScript [GOOD] >> KqpQueryServiceScripts::ExecuteScriptPg >> KqpNewEngine::MultipleBroadcastJoin [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::CancelBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:51.117165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:51.117250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:51.117286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:51.117317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:51.117371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:51.117393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:51.117433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:51.117518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:51.118115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:51.118441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:51.202862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:51.202931Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:51.222979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:51.223697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:51.223850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:51.232050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:51.232270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:51.232917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:51.233264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:51.236361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:51.236563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:51.237739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:51.237808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:51.238021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:51.238072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:51.238124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:51.238215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:51.245529Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:51.394921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:51.395292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:51.395562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:51.395610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:51.395853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:51.395947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:51.399623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:51.399876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:51.400088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:51.400140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:51.400179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:51.400214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:51.408175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:51.408251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:51.408314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:51.410490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:51.410546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:51.410601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:51.410669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:51.414458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:51.416861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:51.417037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:51.417974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:51.418115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:51.418197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:51.418487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:51.418539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:51.418727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:51.418850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:51.421161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:51.421207Z node 1 :FLAT_TX_SCHEMESHARD ... 4T21:23:55.148092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710761:0 HandleReply TEvOperationPlan: step# 5000007 2025-06-24T21:23:55.148148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710761:0 128 -> 240 2025-06-24T21:23:55.156240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-24T21:23:55.156311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 281474976710761:0 ProgressState 2025-06-24T21:23:55.156408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-24T21:23:55.156460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-24T21:23:55.156506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-24T21:23:55.156531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-24T21:23:55.156560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: true 2025-06-24T21:23:55.156628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:127:2151] message: TxId: 281474976710761 2025-06-24T21:23:55.156669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-24T21:23:55.156699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-24T21:23:55.156724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710761:0 2025-06-24T21:23:55.156795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 12 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-24T21:23:55.162429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-24T21:23:55.162516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710761 2025-06-24T21:23:55.162588Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710761 2025-06-24T21:23:55.162683Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1169:3020], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710761 2025-06-24T21:23:55.167805Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking 2025-06-24T21:23:55.167915Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1169:3020], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:23:55.167981Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Cancellation_Unlocking to Cancelled 2025-06-24T21:23:55.172111Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled 2025-06-24T21:23:55.172229Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancelled, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1169:3020], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:23:55.172275Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-24T21:23:55.172456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:23:55.172499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:1193:3044] TestWaitNotification: OK eventTxId 102 2025-06-24T21:23:55.174799Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-24T21:23:55.175182Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } 2025-06-24T21:23:55.177531Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:23:55.177792Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 282us result status StatusSuccess 2025-06-24T21:23:55.178233Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "DefaultValue" Type: "Uint64" TypeId: 4 Id: 4 NotNull: false DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpNewEngine::LocksInRoTx [GOOD] >> KqpNewEngine::LiteralKeys >> SystemView::TopPartitionsByCpuFields [GOOD] >> SystemView::TopPartitionsByCpuFollowers ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata [GOOD] Test command err: Trying to start YDB, gRPC: 10103, MsgBus: 23626 2025-06-24T21:23:19.067738Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629597982650384:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:19.067839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00369f/r3tmp/tmpgENlWi/pdisk_1.dat 2025-06-24T21:23:19.568725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:19.568841Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:19.576455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:19.615738Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629597982650360:2079] 1750800199063138 != 1750800199063141 2025-06-24T21:23:19.676863Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10103, node 1 2025-06-24T21:23:20.147194Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:20.147615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:20.147635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:20.147644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:20.147808Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23626 TClient is connected to server localhost:23626 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:21.152956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:21.204534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:21.230206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:21.494409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:21.729764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:21.813062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:23.050831Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629615162521169:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.050949Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.667937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.755064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.795960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.836026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.888271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.953082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.993956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:24.068198Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629597982650384:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:24.068249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:24.068332Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629619457489128:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.068385Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.068641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629619457489133:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.071693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:24.082162Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629619457489135:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:24.178088Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629619457489189:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:25.372747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/ac ... db/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:41.304195Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:41.314863Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:41.410003Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:41.636958Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:41.715616Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:44.309571Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629706841551775:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:44.309663Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:44.378423Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.465989Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.526178Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.556453Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.595226Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.669161Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.707784Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:44.779601Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629706841552440:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:44.779750Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:44.783103Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629706841552445:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:44.791357Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:44.806372Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-24T21:23:44.809889Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519629706841552447:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:44.890642Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519629706841552498:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:45.259763Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519629689661681071:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:45.260004Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:46.250214Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:46.885769Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:23:47.606414Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:48.262571Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:48.897977Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:23:49.537687Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710697:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:23:50.228068Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:50.309378Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:23:54.213366Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710737:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::AlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:52.386504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:52.386594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:52.386630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:52.386668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:52.386726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:52.386765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:52.386828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:52.386916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:52.387689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:52.388055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:52.472073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:52.472119Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:52.491804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:52.492180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:52.492367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:52.514986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:52.515234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:52.515934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:52.516241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:52.519462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:52.519639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:52.520807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:52.520868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:52.521081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:52.521125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:52.521178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:52.521258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.527626Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:52.672927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:52.673242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.673486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:52.673549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:52.673776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:52.673852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:52.684075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:52.684310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:52.684528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.684581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:52.684618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:52.684654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:52.688358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.688477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:52.688548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:52.695119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.695207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.695257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:52.695330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:52.705012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:52.707869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:52.708054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:52.708961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:52.709114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:52.709168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:52.709491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:52.709551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:52.709722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:52.709804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:52.712093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:52.712141Z node 1 :FLAT_TX_SCHEMESHARD ... LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 2025-06-24T21:23:55.595596Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__create.cpp:23: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: DoExecute TxId: 106 DatabaseName: "/MyRoot/ServerLessDB" Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } 2025-06-24T21:23:55.598490Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable 2025-06-24T21:23:55.598591Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1147:3015], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:23:55.598633Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.cpp:181: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: AllocateTxId 106 2025-06-24T21:23:55.598812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 106, at schemeshard: 72075186233409549 2025-06-24T21:23:55.598888Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2216: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, id# 106, txId# 281474976725757 2025-06-24T21:23:55.598953Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2219: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1147:3015], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976725757 2025-06-24T21:23:55.601262Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable 2025-06-24T21:23:55.601346Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1147:3015], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:23:55.601650Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:428: AlterMainTablePropose 106 AlterMainTable Transaction { WorkingDir: "/MyRoot/ServerLessDB" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table" Columns { Name: "value" Type: "Uint64" DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: true } } Internal: true } TxId: 281474976725757 TabletId: 72075186233409549 FailOnExist: true 2025-06-24T21:23:55.604308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/ServerLessDB" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table" Columns { Name: "value" Type: "Uint64" DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: true } } Internal: true } TxId: 281474976725757 TabletId: 72075186233409549 FailOnExist: true , at schemeshard: 72075186233409549 2025-06-24T21:23:55.604499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/ServerLessDB/Table, pathId: , opId: 281474976725757:0, at schemeshard: 72075186233409549 2025-06-24T21:23:55.604817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976725757:1, propose status:StatusInvalidParameter, reason: Cannot alter type for column 'value', at schemeshard: 72075186233409549 2025-06-24T21:23:55.615637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976725757, response: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, at schemeshard: 72075186233409549 2025-06-24T21:23:55.615904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976725757, database: /MyRoot/ServerLessDB, subject: , status: StatusInvalidParameter, reason: Cannot alter type for column 'value', operation: ALTER TABLE, path: /MyRoot/ServerLessDB/Table 2025-06-24T21:23:55.616101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6777: Handle: TEvModifySchemeTransactionResult: txId# 281474976725757, status# StatusInvalidParameter 2025-06-24T21:23:55.616177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6779: Message: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549 2025-06-24T21:23:55.616291Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2053: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, id# 106, cookie: 106, record: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, status: StatusInvalidParameter 2025-06-24T21:23:55.616427Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2058: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1147:3015], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, cookie: 106, record: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, status: StatusInvalidParameter 2025-06-24T21:23:55.617622Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:2027: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuilder::TTxReply: ReplyOnCreation, BuildIndexId: 106, status: BAD_REQUEST, error: At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column 'value', replyTo: [1:1147:3015], message: TxId: 106 Status: BAD_REQUEST Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } IndexBuild { Id: 106 Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } State: STATE_PREPARING Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } } BUILDCOLUMN RESPONSE CREATE: NKikimrIndexBuilder.TEvCreateResponse TxId: 106 Status: BAD_REQUEST Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } IndexBuild { Id: 106 Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } State: STATE_PREPARING Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } } >> KqpMergeCn::SortBy_PK_Uint64_Desc [GOOD] >> KqpMergeCn::SortBy_Int32 >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows [GOOD] >> ColumnBuildTest::BaseCase [GOOD] >> KqpQueryService::ExecStatsAst [GOOD] >> KqpQueryService::DmlNoTx ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::MultipleBroadcastJoin [GOOD] Test command err: Trying to start YDB, gRPC: 65048, MsgBus: 28146 2025-06-24T21:22:53.828593Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629487206534829:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:53.839578Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003181/r3tmp/tmpHdRr1u/pdisk_1.dat 2025-06-24T21:22:54.367925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:54.371197Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629487206534711:2079] 1750800173759389 != 1750800173759392 2025-06-24T21:22:54.371638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:54.371921Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:54.380061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65048, node 1 2025-06-24T21:22:54.651376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:54.651399Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:54.651408Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:54.651543Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:22:54.867237Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28146 TClient is connected to server localhost:28146 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:55.518928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:55.606715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:55.904799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:56.119237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:56.257309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:57.983869Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629504386405528:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:57.984021Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:58.294185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:58.334774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:58.381576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:58.447817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:58.487455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:58.552709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:58.604167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:58.685468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629508681373482:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:58.685587Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:58.685829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629508681373487:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:58.689618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:58.703267Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629508681373489:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:58.787037Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629508681373540:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:58.820599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629487206534829:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:58.820675Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 28834, MsgBus: 29423 2025-06-24T21:23:01.684535Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629523236018837:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:01.684596Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... 220Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:44.459260Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519629705753617960:2079] 1750800224092417 != 1750800224092420 2025-06-24T21:23:44.476906Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63950, node 7 2025-06-24T21:23:44.563758Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:44.563791Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:44.563806Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:44.563983Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17342 2025-06-24T21:23:45.162326Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17342 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:45.465617Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:45.487510Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:23:45.502884Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:45.608861Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:45.845169Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:45.971119Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:49.111258Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629705753617986:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:49.111344Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:49.637087Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629727228456073:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:49.637229Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:49.754467Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:49.862321Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:49.915883Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:49.965853Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:50.019113Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:50.080992Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:50.161609Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:50.320652Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629731523424031:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:50.320787Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:50.321077Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629731523424036:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:50.326647Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:50.345583Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629731523424038:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:50.403324Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629731523424089:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:52.203600Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:52.266785Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:52.318930Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) [] >> KqpQueryServiceScripts::ExecuteScriptStatsProfile [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfter >> SystemView::PartitionStatsOneSchemeShardDataQuery [GOOD] >> SystemView::PgTablesOneSchemeShardDataQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BaseCase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:51.937409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:51.937506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:51.937545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:51.937580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:51.937636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:51.937684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:51.937743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:51.937812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:51.938513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:51.938883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:52.026155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:52.026222Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:52.046181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:52.046618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:52.046783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:52.054938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:52.055160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:52.055856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:52.056193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:52.059070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:52.059284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:52.060514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:52.060584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:52.060803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:52.060855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:52.060915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:52.061017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.068219Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:52.211584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:52.211892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.212168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:52.212220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:52.212504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:52.212586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:52.215065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:52.215319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:52.215583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.215639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:52.215679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:52.215714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:52.217899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.217977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:52.218029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:52.220153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.220205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:52.220249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:52.220331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:52.224409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:52.226808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:52.227023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:52.228052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:52.228190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:52.228238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:52.228523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:52.228571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:52.228761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:52.228848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:52.231321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:52.231388Z node 1 :FLAT_TX_SCHEMESHARD ... 2 } ExecLevel: 0 TxId: 281474976725761 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409550 2025-06-24T21:23:56.777576Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-24T21:23:56.777680Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3017], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-24T21:23:56.777918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976725761:4294967295 from tablet: 72075186233409549 to tablet: 72075186233409550 cookie: 0:281474976725761 msg type: 269090816 2025-06-24T21:23:56.777995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976725761, partId: 4294967295, tablet: 72075186233409550 2025-06-24T21:23:56.778104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-24T21:23:56.778127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 0/1, is published: true 2025-06-24T21:23:56.778161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-24T21:23:56.790844Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [1:1815:3675], Recipient [1:757:2644]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409549 ClientId: [1:1815:3675] ServerId: [1:1817:3677] } 2025-06-24T21:23:56.790909Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:23:56.859253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 650, transactions count in step: 1, at schemeshard: 72075186233409549 2025-06-24T21:23:56.859412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976725761 AckTo { RawX1: 0 RawX2: 0 } } Step: 650 MediatorID: 72075186233409551 TabletID: 72075186233409549, at schemeshard: 72075186233409549 2025-06-24T21:23:56.859471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 HandleReply TEvOperationPlan: step# 650 2025-06-24T21:23:56.859516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976725761:0 128 -> 240 2025-06-24T21:23:56.868680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-06-24T21:23:56.868757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72075186233409549] TDone opId# 281474976725761:0 ProgressState 2025-06-24T21:23:56.868848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-24T21:23:56.868876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-24T21:23:56.868912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-24T21:23:56.868938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-24T21:23:56.868971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2025-06-24T21:23:56.869039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:572:2508] message: TxId: 281474976725761 2025-06-24T21:23:56.869084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-24T21:23:56.869115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976725761:0 2025-06-24T21:23:56.869140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976725761:0 2025-06-24T21:23:56.869231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-24T21:23:56.887944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2025-06-24T21:23:56.888035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976725761 2025-06-24T21:23:56.888108Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 106, txId# 281474976725761 2025-06-24T21:23:56.888205Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3017], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }}, txId# 281474976725761 2025-06-24T21:23:56.894044Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-24T21:23:56.894164Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3017], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-24T21:23:56.894240Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-24T21:23:56.897732Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done 2025-06-24T21:23:56.897840Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3017], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-24T21:23:56.897897Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 106, subscribers count# 1 2025-06-24T21:23:56.898094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-24T21:23:56.898136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:1167:3035] TestWaitNotification: OK eventTxId 106 2025-06-24T21:23:56.900815Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 106 2025-06-24T21:23:56.901145Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:49.943334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:49.943444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:49.943486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:49.943550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:49.943598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:49.943627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:49.943700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:49.943789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:49.944588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:49.944967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:50.037243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:50.037303Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:50.053799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:50.054227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:50.054397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:50.063015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:50.063237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:50.063998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:50.064332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:50.067840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:50.068033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:50.069255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:50.069323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:50.069511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:50.069569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:50.070345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:50.070479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:50.077647Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:50.215738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:50.216026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:50.216300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:50.216393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:50.216621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:50.216689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:50.218957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:50.219169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:50.219399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:50.219457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:50.219496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:50.219527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:50.221650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:50.221703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:50.221760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:50.223696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:50.223744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:50.223786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:50.223861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:50.228323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:50.230830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:50.231013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:50.231973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:50.232107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:50.232155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:50.232533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:50.232603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:50.232770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:50.232843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:50.235224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:50.235274Z node 1 :FLAT_TX_SCHEMESHARD ... lMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'28))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.606578Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2058:3918], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'29))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.615735Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2059:3919], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'30))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.625157Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2060:3920], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'31))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.634725Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2061:3921], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'32))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.644029Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2062:3922], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'33))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.652091Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2063:3923], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'34))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.660562Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2064:3924], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'35))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.668687Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2065:3925], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'36))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.676923Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2066:3926], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'37))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.685274Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2067:3927], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'38))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.693758Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2068:3928], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'39))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.702041Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2069:3929], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'40))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.710275Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2070:3930], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'41))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.719511Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2071:3931], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'42))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.727650Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2072:3932], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'43))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.736595Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2073:3933], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'44))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.745085Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2074:3934], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'45))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.753881Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2075:3935], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'46))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.762780Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2076:3936], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'47))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.771423Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2077:3937], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'48))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.780228Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2078:3938], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'49))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-24T21:23:56.788652Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2079:3939], Recipient [1:757:2644]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'50))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> KqpSort::ReverseRangeLimitOptimized [GOOD] >> KqpSort::TopParameter >> BasicStatistics::TwoDatabases |96.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH9 [GOOD] Test command err: Trying to start YDB, gRPC: 3642, MsgBus: 11732 2025-06-24T21:22:11.453205Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629306172189723:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:11.453274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038db/r3tmp/tmphIfjYp/pdisk_1.dat 2025-06-24T21:22:12.123298Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629306172189537:2079] 1750800131354314 != 1750800131354317 2025-06-24T21:22:12.166728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:12.168724Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:12.171313Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:12.185854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3642, node 1 2025-06-24T21:22:12.562497Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:12.571707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:12.571738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:12.571749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:12.571862Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11732 TClient is connected to server localhost:11732 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:13.754705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:16.290523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629327647026660:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:16.290611Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629327647026672:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:16.290644Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:16.294841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:16.327371Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629327647026674:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:22:16.431702Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629327647026725:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:16.447263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629306172189723:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:16.447318Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:16.875701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:22:17.196492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:17.196691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:17.196933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:17.197043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:17.197136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:17.197273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:17.197396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:17.197507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:22:17.197604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:22:17.197708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:22:17.197803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519629331941994227:2319];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:22:17.199173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519629331941994230:2322];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:17.199210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519629331941994230:2322];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:17.199383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519629331941994230:2322];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:17.199535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519629331941994230:2322];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:17.199645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519629331941994230:2322];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:17.199745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519629331941994230:2322];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:17.199848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519629331941994230:2322];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:17.199978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519629331941994230:2322];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:22:17.200101Z node 1 :TX_COLUMN ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.914630Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039240;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.915360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.917727Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039280;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.919093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039246;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.921200Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.921886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.928298Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.929050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039220;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.933435Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039246;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.934023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039228;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.934682Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039220;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.935606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039192;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.944827Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039192;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.945451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.945490Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039228;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.946045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039248;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.955595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.956206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039298;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.958741Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039248;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.959445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039300;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.969708Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039300;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.970271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039274;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.975627Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039298;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.976269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.977498Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039274;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.978241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039268;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.983040Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.984995Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039268;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.985088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039258;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.985628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039276;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:34.992923Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039276;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.997434Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039258;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:34.998028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039242;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:35.000072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039226;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:35.009926Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039242;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:35.009967Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039226;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:35.010529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039278;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:35.011387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:35.018207Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039278;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:35.018230Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:35.155060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039216;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:35.170846Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039216;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:35.344997Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx5d400vec42wqsf303ng7", SessionId: ydb://session/3?node_id=1&id=Mzg2OGM2YmEtY2ZhODA0YWQtY2JhYTNmOTUtNmQyOTM5NWQ=, Slow query, duration: 30.896344s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:35.706426Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:35.706430Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:35.707446Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> ColumnStatistics::CountMinSketchServerlessStatistics >> BasicStatistics::NotFullStatisticsColumnshard >> KqpRanges::DeleteNotFullScan+UseSink [GOOD] >> KqpRanges::DeleteNotFullScan-UseSink >> SystemView::AuthGroups_ResultOrder [GOOD] >> SystemView::AuthGroups_TableRange |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::ProbeServerless |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::NotFullStatisticsDatashard |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> TConsoleTests::TestAttributesExtSubdomain [GOOD] >> TConsoleTests::TestDatabaseQuotas >> KqpNewEngine::PushFlatmapInnerConnectionsToStageInput [GOOD] >> KqpNewEngine::PushPureFlatmapInnerConnectionsToStage |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> KqpSort::UnionAllSortLimit [GOOD] >> HttpRequest::Status |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::SimpleGlobalIndex |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink+UseDataQuery >> HttpRequest::AnalyzeServerless >> HttpRequest::Probe >> KqpJoinOrder::ShuffleEliminationTpcdsMapJoinBug [GOOD] >> KqpQueryService::TableSink_Oltp_Replace+UseSink [GOOD] >> SystemView::ConcurrentScans [GOOD] >> SystemView::PDisksFields >> TBackupTests::ShouldSucceedOnLargeData[Zstd] [GOOD] >> DbCounters::TabletsSimple [GOOD] >> LabeledDbCounters::OneTablet >> ColumnStatistics::CountMinSketchStatistics ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSort::UnionAllSortLimit [GOOD] Test command err: Trying to start YDB, gRPC: 12138, MsgBus: 29011 2025-06-24T21:22:55.807898Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629494264187060:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:55.835481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00316e/r3tmp/tmpqFDYb8/pdisk_1.dat 2025-06-24T21:22:56.458922Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:56.470150Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629494264187034:2079] 1750800175802254 != 1750800175802257 2025-06-24T21:22:56.483426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:56.483534Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:56.489109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12138, node 1 2025-06-24T21:22:56.655793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:56.655822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:56.655830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:56.655974Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:22:56.827475Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29011 TClient is connected to server localhost:29011 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:57.522588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:57.584366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:57.794064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:58.013980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:58.114247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:59.805169Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629511444057849:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:59.805297Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:00.145005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:00.224253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:00.282254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:00.322277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:00.382954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:00.451074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:00.504780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:00.590643Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629515739025807:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:00.590725Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:00.590936Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629515739025812:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:00.595755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:00.613208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-24T21:23:00.613502Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629515739025814:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:00.687482Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629515739025865:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:00.815259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629494264187060:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:00.815327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8290, MsgBus: 16031 2025-06-24T21:23:03.823570Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629531003895179:2240];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00 ... id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:46.432056Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 10147, MsgBus: 2185 2025-06-24T21:23:53.080261Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519629746158407793:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:53.080669Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00316e/r3tmp/tmpfG6Qql/pdisk_1.dat 2025-06-24T21:23:53.400609Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:53.400727Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:53.403498Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:53.420869Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10147, node 7 2025-06-24T21:23:53.515949Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:53.515976Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:53.515988Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:53.516205Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2185 2025-06-24T21:23:54.125549Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2185 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:54.329630Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:54.337728Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:54.347083Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:54.422888Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:54.708236Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:54.804905Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:58.080565Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629746158407793:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:58.080656Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:58.151451Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629767633245856:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:58.151618Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:58.249426Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:58.299129Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:58.382378Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:58.434275Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:58.493282Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:58.585949Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:58.652041Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:58.764866Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629767633246519:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:58.765040Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:58.765355Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629767633246524:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:58.771347Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:58.785552Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629767633246526:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:58.872166Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629767633246577:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_Oltp_Replace+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 21415, MsgBus: 15714 2025-06-24T21:23:44.915443Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629704084771831:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:44.925099Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00310b/r3tmp/tmpF9tUhI/pdisk_1.dat 2025-06-24T21:23:45.343574Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629704084771767:2079] 1750800224886592 != 1750800224886595 2025-06-24T21:23:45.347594Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:45.394091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:45.394211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 21415, node 1 2025-06-24T21:23:45.396050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:45.520608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:45.520642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:45.520656Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:45.520775Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15714 2025-06-24T21:23:45.925304Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15714 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:46.298599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:46.323733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:48.509397Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629721264641595:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:48.509528Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:48.782277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:48.946654Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629721264641700:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:48.946722Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:48.946954Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629721264641705:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:48.951349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:48.964756Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629721264641707:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:23:49.050511Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629725559609054:2394] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:49.544247Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519629725559609146:2335], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Missing key column in input: Col1 for table: /Root/DataShard, code: 2029 2025-06-24T21:23:49.546060Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OTJlMjE1NTMtNzFlNWZhN2MtOTA0Mzc3YWMtOGFhZTNjYjY=, ActorId: [1:7519629725559609144:2334], ActorState: ExecuteState, TraceId: 01jyhx6s4b25vfver3btafsmc9, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: Trying to start YDB, gRPC: 6582, MsgBus: 63856 2025-06-24T21:23:50.524620Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629731833894064:2244];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00310b/r3tmp/tmpVfBRJO/pdisk_1.dat 2025-06-24T21:23:50.562435Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:23:50.642869Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629731833893829:2079] 1750800230429177 != 1750800230429180 2025-06-24T21:23:50.656126Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:50.664686Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:50.664775Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:50.669615Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6582, node 2 2025-06-24T21:23:50.812981Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:50.813006Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:50.813013Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:50.813122Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63856 TClient is connected to server localhost:63856 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:51.364733Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:51.403005Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:23:51.511637Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:53.855764Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629744718796350:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.855889Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.865135Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.953680Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629744718796454:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.953770Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.954654Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629744718796459:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.958711Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:53.975265Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519629744718796461:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:23:54.049252Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629749013763808:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 19702, MsgBus: 64478 2025-06-24T21:23:55.606271Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629753144939910:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:55.606382Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00310b/r3tmp/tmpDk7VxG/pdisk_1.dat 2025-06-24T21:23:55.724245Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:55.725302Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629753144939891:2079] 1750800235605860 != 1750800235605863 TServer::EnableGrpc on GrpcPort 19702, node 3 2025-06-24T21:23:55.749888Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:55.750036Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:55.775644Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:55.820033Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:55.820054Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:55.820064Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:55.820221Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64478 TClient is connected to server localhost:64478 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:56.444286Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:56.456966Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:23:56.614326Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:59.083259Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629770324809710:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:59.083356Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:59.132046Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:59.332814Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:59.585369Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629770324811083:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:59.585474Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:59.585747Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629770324811088:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:59.590487Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:59.603045Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519629770324811090:2405], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:23:59.691994Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519629770324811143:3200] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:24:00.606801Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519629753144939910:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:00.606902Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> BasicStatistics::TwoNodes >> KqpNotNullColumns::OptionalParametersDataQuery [GOOD] >> KqpNotNullColumns::OptionalParametersScanQuery >> BasicStatistics::Simple |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::Serverless >> KqpNewEngine::LookupColumns [GOOD] >> KikimrIcGateway::TestLoadDataSourceProperties [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:15.821881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:15.822003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:15.822048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:15.822088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:15.822255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:15.822301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:15.822386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:15.822466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:15.823425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:15.823841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:15.924790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:15.924859Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:15.942462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:15.942789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:15.942917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:15.951272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:15.951504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:15.952223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:15.952572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:15.956223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:15.956433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:15.957735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:15.957805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:15.958030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:15.958085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:15.958138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:15.958251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:15.968223Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:16.129797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:16.130062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.130343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:16.130409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:16.130650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:16.130751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:16.136292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:16.136590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:16.136824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.136928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:16.136982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:16.137037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:16.142020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.142108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:16.142159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:16.144665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.144741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:16.144820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:16.144881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:16.148801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:16.152171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:16.152458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:16.153633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:16.153790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:16.153845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:16.154202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:16.154263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:16.154447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:16.154522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:16.159063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:16.159126Z node 1 :FLAT_TX_SCHEMESHARD ... mr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } REQUEST: PUT /data_00.csv.zst?partNumber=100&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:4802 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C6DD8A2B-72D3-42D9-B60F-0B396583ECDF amz-sdk-request: attempt=1 content-length: 55 content-md5: B5SOCmjwb1RI3tHamcoRHA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv.zst / partNumber=100&uploadId=1 / 55 2025-06-24T21:24:02.871451Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3457:5418], result# UploadPartResult { ETag: 07948e0a68f06f5448ded1da99ca111c } 2025-06-24T21:24:02.871756Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3456:5417] 2025-06-24T21:24:02.871852Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3457:5418], sender# [1:3456:5417], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst?partNumber=101&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:4802 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 5D5FE332-B8A9-4687-AB75-4867C88C4762 amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv.zst / partNumber=101&uploadId=1 / 0 2025-06-24T21:24:02.875645Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3457:5418], result# UploadPartResult { ETag: d41d8cd98f00b204e9800998ecf8427e } 2025-06-24T21:24:02.875706Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:3457:5418], success# 1, error# , multipart# 1, uploadId# 1 2025-06-24T21:24:02.881611Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:526: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [1:3457:5418], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [f8f51a1e4a70db44fa91cc2ab9680824,9eba675fd7f187274786dff2f47292df,921325fb6b8811df3d06a44dbe1f8523,4eeb6b90e8e61075275bd8a42f56bd69,2840a487abe8cb9502b3d9c8a8e1c942,607d8f6e3b235a360d63796efd3a51c2,ed22e08df7fb8840f7cabc779cc86885,efeff2c7731061edd9a39059cc078045,4af01cb3455932f28e3bba713dcd57c9,dc94d36ecf3b36d183d75c84b9b2fac6,e2ce425dd2bb582abcc13d0d714c3554,b71e46686939d2cdf046520dd2774281,ab731a82a161e5e044b24e895a1713d6,1df51aaec89711e13a6f95c13113e36c,b6066b2ed343831b1b0ee0076179981e,332d34d77adc2b024a33d87e07d4233f,cf0093cc99590a0e8f9c199ed6deca07,8cc923ec76224e69263ac93b7bfabd30,690d66897e0780f2dfe3614e5a659a22,7502aae0ec253663b1cbfdc8ede92ab9,7d2c6f728ee0c12097dfe5441970b946,5fc7b9b675e0a125eea67cf05f82627f,fc8c5faa99cc7f4ce7ca320f8e7adb58,8e305c5aca758683ff25407a7bbd9220,181bce9c6393e22a0ac359a7b45d8187,639677548f0a8b776a6db92f44d96505,390ff8f57cfa4c04bfbed0d7a63c90e8,3dd76756e6558fd6c8c918210f7dc136,a3f5254fdad3ded54edef910e704c151,e9186373f80dbaa55dd04d07621de277,8898b965060a431b499261ec0cd3cee3,3ed51c736e64defe04980ce328b17aa4,bb0e45971888796588c12ea1c1bec162,e2b3defa84005d3892986ca6894b811f,656c7c809c8c8485f6e91892591cd284,779c6827126f255bde25ae242bf4c8ff,8883fc9b073e683558f1231c5f2142d0,19390a0e3340bcb6ccfe866a790f05cb,305182d3e9745fba3aad1973bb1bfc93,002819d72a6dc7954ecc1bcd2bd20254,325c6bc3cdd6fd83083cf0126c606218,b86932903843b9626e80bd9ccb5d0571,b5054116537a7c467bdb488c9d67dee7,fc3a45bd17a00b147e4f9c55bc2493da,1118e2f41e8839211163250796a65dce,b403ff17c2c269a79201a03ce439dc2a,88f2692ee439cfadef1cd21d58aac8d3,e5bef12f89b101af84d52299a5867d99,ed613335180c53f69d450ef8b176a4d5,150fd7dcdc86eb38c7f821ff4698d8bc,a0c18bf08acc6ebecac04a2520efee9b,e8463d7ce8f502d1575a433c1b30a9af,f123e0fc879e2fdc2c3e2f698fc4176d,d7ab79d73e4648e0a2bf8dec3a19c019,4e74b82f6a8ea7fad8790ee7dfcdb76e,f72bb1d8aa0f5c9265bae10a3784d8e8,924b317371d16363a37962b17a2ae4bb,7214b458c7e25c791e54bd430b835a6e,e79dba1b56122372af3fe7b06ea91bda,6aae345b94d78fc7c1ed0b8697cf5e62,fd3636ed699facb5f0c12f81741cabc5,2c4a198408c3eb9577fcd339ca62c539,59fbf761f9b7574b65fa6877b167bb8c,14f9f5cfdf3a6c33c577a54429b19cb6,c6d078b3be9cd7943e8145fd982baeef,198f55ae25539fbd54a4a6075beac2d1,939123b44e362c76a151a85af0247fb7,0147f8bd741be7780cbc900b6f4b0899,43453200aeaf201420737354cd73cfe4,de26d1339779fe0c538d01d5963fd423,5c903650e719f959dc9f37ea360c6319,23607b3f36e0a2abae7f1ed8e38596f3,0db9af920c6d1cf868e470bf7a349747,aed6ac19c60d08500582eea9dadcdfee,3f4e37ddd3e2e56a725323fad4d85cf6,942b269af420b4277d025cea489dcb25,89eddc25ba615b6cf09b9cd9a11a16bb,1d8e7f0613dc1919ee90133c468380bd,8bf1e4c1266d8437c1bd85e0fca6640a,e9eabcf5b61cf257f530b156dbd77a88,411f1661ae7650d2144e8c6f8a33b28f,6706ec5b8771e555779d5cbeca41aa75,b3a33ef21a8224ddc78a52e8d7ca8357,58749d344f42c192e572eda4ee66fb01,381aeb5ee3014e2c0fd9b85bd59ce005,9aed2297cd10dce10d68de3ff1830b42,be88e095fc3a13708b714db03b1f2744,5628e81ee17fb22fc828ed1b2169578b,a1cfb563fa4af884fe02ced05c26c881,fc602b8ee2e9746fb52823f8fd1f0f28,a1de256e94c7baa9b8ab905c892d1a14,6bff895b0b5f3552ad4bdc61b0d24148,fcba1d258a8651d831767b42e010e439,bef6e3d7088e671809fe584531f96971,f0b489242271d11200dbdbc78e4ce715,372d2d6877fff7c04433e492ad4dbd45,32191cf1972dcccd59c0b5a8b53d4f23,25928b7997b97ac58f18fbbe589573e8,472e53a27497661c6400410909405c4e,07948e0a68f06f5448ded1da99ca111c,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv.zst?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:4802 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4FCF2A76-3133-403D-B73B-2336F74B2192 amz-sdk-request: attempt=1 content-length: 11529 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv.zst / uploadId=1 2025-06-24T21:24:02.891959Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:623: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [1:3457:5418], result# CompleteMultipartUploadResult { Bucket: Key: data_00.csv.zst ETag: c902b621cdd1ee89b9f1c4e6c36e6e45 } 2025-06-24T21:24:02.892547Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3456:5417], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-24T21:24:02.918253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-24T21:24:02.918343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:24:02.918531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-24T21:24:02.918688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 311 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-24T21:24:02.918768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:24:02.918812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:24:02.918855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:24:02.918906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:24:02.919105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:24:02.928958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:24:02.929780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:24:02.929851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:24:02.929973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:24:02.930014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:24:02.930058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:24:02.930096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:24:02.930147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:24:02.930239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:339:2316] message: TxId: 102 2025-06-24T21:24:02.930295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:24:02.930335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:24:02.930369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:24:02.930508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:24:02.941314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:24:02.941388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3442:5404] TestWaitNotification: OK eventTxId 102 >> KqpQueryService::DmlNoTx [GOOD] >> KqpNewEngine::LiteralKeys [GOOD] >> KqpQueryServiceScripts::ExecuteScriptPg [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationTpcdsMapJoinBug [GOOD] Test command err: Trying to start YDB, gRPC: 61057, MsgBus: 17737 2025-06-24T21:22:16.047203Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629325427778512:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:16.047551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038d1/r3tmp/tmpCQjCU3/pdisk_1.dat 2025-06-24T21:22:16.850284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:16.850392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:16.860471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:16.921601Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:16.924966Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629325427778331:2079] 1750800135976527 != 1750800135976530 TServer::EnableGrpc on GrpcPort 61057, node 1 2025-06-24T21:22:17.061732Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:17.228265Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:17.228290Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:17.228298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:17.228433Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17737 TClient is connected to server localhost:17737 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:18.346606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:18.361837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:21.033377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629325427778512:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:21.033448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:21.263960Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629351197582752:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:21.264086Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:21.273456Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629351197582764:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:21.277781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:21.299849Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629351197582766:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:22:21.395575Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629351197582817:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:21.847732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:22:22.176549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:22.176757Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:22.177012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:22.177139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:22.177256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:22.177358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:22.177472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:22.177601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:22:22.177729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:22:22.177845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:22:22.177900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519629355492550368:2317];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:22.177928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519629355492550368:2317];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:22.177964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519629355492550376:2325];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:22:22.178058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519629355492550368:2317];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:22.178146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519629355492550368:2317];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:22.178235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519629355492550368:2317];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:22.178350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519629355492550368:2317];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:22.178446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519629355492550368:2317];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:22.178556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519629355492550368:2317];tabl ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:40.980369Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:40.981037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:40.982227Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:40.982769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:40.986523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:40.987120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:40.988115Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:40.988671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:40.993334Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:40.993914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:40.994033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:40.994416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:40.999807Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:40.999806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.000465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.000720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.006290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.006291Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.006999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.006998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.013175Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.013175Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.013834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.014463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.020204Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.020203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.020896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.021197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.027057Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.027056Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.027832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.027986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.032958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.033663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.036184Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.036903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.039506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.040171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.045890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.046431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.046668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:41.052528Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:41.194667Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx5jbd34kf3j4nvp7gbt5g", SessionId: ydb://session/3?node_id=1&id=MzkzM2M5ODgtZmFhMWQwZDktZGY5OGMwZDYtNGVmMzAzMmE=, Slow query, duration: 31.388396s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:41.763514Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:41.763515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:41.764276Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadDataSourceProperties [GOOD] Test command err: Trying to start YDB, gRPC: 29345, MsgBus: 10379 2025-06-24T21:23:19.075540Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629599077256280:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:19.075679Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0036b3/r3tmp/tmpHUB699/pdisk_1.dat 2025-06-24T21:23:19.682179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:19.682295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:19.684572Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:19.691296Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629599077256090:2079] 1750800199063461 != 1750800199063464 2025-06-24T21:23:19.700080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29345, node 1 2025-06-24T21:23:20.075402Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:20.078964Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:20.078980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:20.078986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:20.079091Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10379 TClient is connected to server localhost:10379 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:21.166132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:21.214055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:23:21.229142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:21.512108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:21.734318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:21.842056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:23.297087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629616257126918:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.297205Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.667021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.762565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.803546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.843928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.896795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:23.970675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:24.008463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:24.074513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629599077256280:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:24.074574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:24.126773Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629620552094880:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.126880Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.131275Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629620552094885:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.134956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:24.147461Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629620552094887:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:24.241230Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629620552094938:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:25.394404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:50.025609Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:50.037000Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:50.100991Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:50.138499Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:50.364890Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:50.475927Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:53.414398Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629744929891185:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.414514Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.472479Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.511569Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.547708Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.585126Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.631167Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.691475Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.772605Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.858226Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629744929891847:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.858343Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.858833Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629744929891852:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.862666Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:53.875711Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519629744929891854:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:53.938764Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519629744929891905:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:54.083288Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519629727750020397:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:54.083402Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:55.310647Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:56.009378Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:23:56.594198Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:57.302775Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:57.985548Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:23:58.672864Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:23:59.395449Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:59.445171Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:24:03.594646Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710725:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) >> BasicStatistics::TwoTables ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::DmlNoTx [GOOD] Test command err: Trying to start YDB, gRPC: 19572, MsgBus: 21803 2025-06-24T21:23:37.783708Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629678027848575:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:37.783979Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003113/r3tmp/tmpPlv3xu/pdisk_1.dat 2025-06-24T21:23:38.398629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:38.398729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:38.412684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:38.432731Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:38.434353Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629678027848394:2079] 1750800217725570 != 1750800217725573 TServer::EnableGrpc on GrpcPort 19572, node 1 2025-06-24T21:23:38.660730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:38.660769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:38.660777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:38.660890Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:38.763267Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21803 TClient is connected to server localhost:21803 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:39.405766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:39.434449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:23:39.454545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:39.657860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:39.817535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:39.927242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:41.776093Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629695207719204:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:41.776233Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:42.118840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.192312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.277211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.349355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.395873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.478543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.510081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.587473Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629699502687167:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:42.587566Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:42.587615Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629699502687172:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:42.591133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:42.610222Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629699502687174:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:42.707973Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629699502687225:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:42.781757Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629678027848575:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:42.782209Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 27590, MsgBus: 8431 2025-06-24T21:23:44.932376Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629707743244175:2156];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00 ... d : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED DEFAULT_ERROR: {
: Error: Terminate was called, reason(43):
:1:8: Failed to unwrap empty optional }. 2025-06-24T21:23:56.776165Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=Njg3ODY5ZGUtOGQ4ZWRlMzAtMzY5NTA3YTItNzg4YjFhZjQ=, ActorId: [3:7519629756265055345:2485], ActorState: ExecuteState, TraceId: 01jyhx703d3mq1mf3tkbbh09re, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 26118, MsgBus: 20930 2025-06-24T21:23:57.837362Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519629761003959823:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:57.837432Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003113/r3tmp/tmpk2O3na/pdisk_1.dat 2025-06-24T21:23:57.986298Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:57.986401Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:57.999531Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:58.003783Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:58.007350Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519629761003959799:2079] 1750800237836935 != 1750800237836938 TServer::EnableGrpc on GrpcPort 26118, node 4 2025-06-24T21:23:58.120183Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:58.120211Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:58.120223Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:58.120375Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20930 TClient is connected to server localhost:20930 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:58.772481Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:58.798005Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:58.853430Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:58.900864Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:59.099841Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:59.197087Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:02.253044Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519629782478797932:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:02.253162Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:02.298650Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.343490Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.387852Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.433195Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.480802Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.528610Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.576684Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.663412Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519629782478798588:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:02.663535Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:02.664026Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519629782478798593:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:02.670783Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:02.690320Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519629782478798595:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:24:02.790868Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519629782478798646:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:24:02.845253Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519629761003959823:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:02.845411Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::LookupColumns [GOOD] Test command err: Trying to start YDB, gRPC: 27664, MsgBus: 22426 2025-06-24T21:23:08.687680Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629552083517956:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:08.687881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003151/r3tmp/tmpX0T6CX/pdisk_1.dat 2025-06-24T21:23:09.188570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:09.188682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:09.191393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:09.234784Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629552083517788:2079] 1750800188637013 != 1750800188637016 2025-06-24T21:23:09.244972Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27664, node 1 2025-06-24T21:23:09.399627Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:09.399653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:09.399675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:09.399785Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22426 2025-06-24T21:23:09.739291Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22426 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:10.376840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:10.411846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:10.601988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:10.850580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:10.939254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:12.804235Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629569263388625:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:12.804371Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:13.249322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:13.299506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:13.352148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:13.384495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:13.424014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:13.506864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:13.586603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:13.676305Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629552083517956:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:13.676366Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:13.685974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629573558356593:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:13.686071Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:13.686291Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629573558356598:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:13.690216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:13.713382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-24T21:23:13.714530Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629573558356600:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:13.789539Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629573558356655:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 32197, MsgBus: 32635 2025-06-24T21:23:15.987511Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629583078898235:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:15.987740Z node 2 :METADATA_PROVIDER ERROR: log.cp ... ], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:52.167234Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519629738716240412:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 15900, MsgBus: 15399 2025-06-24T21:23:55.636895Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519629751423842729:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:55.637008Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003151/r3tmp/tmpy6UPY6/pdisk_1.dat 2025-06-24T21:23:55.788899Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519629751423842692:2079] 1750800235635971 != 1750800235635974 2025-06-24T21:23:55.794873Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:55.825246Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:55.825384Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:55.827687Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15900, node 7 2025-06-24T21:23:55.922602Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:55.922634Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:55.922655Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:55.922876Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15399 2025-06-24T21:23:56.647549Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15399 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:56.745431Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:56.764030Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:56.861560Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:57.131768Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:57.237067Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:00.638008Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629751423842729:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:00.638108Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:00.689707Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629772898680821:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:00.689799Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:00.783863Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:00.880771Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:00.931563Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.015938Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.101719Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.165405Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.319950Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.426767Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629777193648780:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:01.426887Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:01.427179Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629777193648785:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:01.432827Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:01.446586Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629777193648787:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:24:01.545275Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629777193648838:3430] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> SystemView::PgTablesOneSchemeShardDataQuery [GOOD] >> SystemView::QueryStats |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.8%| [TA] $(B)/ydb/core/kqp/provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} |96.8%| [TA] {RESULT} $(B)/ydb/core/kqp/provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::LiteralKeys [GOOD] Test command err: Trying to start YDB, gRPC: 3333, MsgBus: 63255 2025-06-24T21:23:09.446577Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629555711087257:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:09.447418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00314f/r3tmp/tmpVI47LK/pdisk_1.dat 2025-06-24T21:23:10.033131Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:10.051440Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629555711087036:2079] 1750800189359058 != 1750800189359061 TServer::EnableGrpc on GrpcPort 3333, node 1 2025-06-24T21:23:10.258782Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:10.258880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:10.264005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:10.264022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:10.264048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:10.264162Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:10.264229Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:10.432753Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63255 TClient is connected to server localhost:63255 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:11.188012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:11.224044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:11.250995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:11.438244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:11.667946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:11.755978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:13.557623Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629572890957881:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:13.557734Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:13.925253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:13.975276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:14.055298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:14.085467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:14.120732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:14.164598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:14.234362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:14.320714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629577185925833:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:14.320803Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:14.321524Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629577185925838:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:14.326916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:14.340243Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629577185925840:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:14.426650Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629577185925891:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:14.429266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629555711087257:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:14.429330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:15.443746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... OR: schemereq.cpp:553: Actor# [6:7519629740785643599:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 2911, MsgBus: 27088 2025-06-24T21:23:56.612329Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519629758501715936:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:56.612391Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00314f/r3tmp/tmpyadxNC/pdisk_1.dat 2025-06-24T21:23:56.813980Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:56.835355Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519629758501715913:2079] 1750800236598315 != 1750800236598318 2025-06-24T21:23:56.835749Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:56.835860Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:56.838829Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2911, node 7 2025-06-24T21:23:56.923204Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:56.923247Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:56.923280Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:56.923521Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27088 TClient is connected to server localhost:27088 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:23:57.671497Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:23:57.707952Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:57.718034Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:57.730422Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:57.813115Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:58.083992Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:58.184987Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:01.282657Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629779976554027:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:01.282820Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:01.375966Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.429385Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.479766Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.526855Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.568505Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.616741Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629758501715936:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:01.617162Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:01.646942Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.756436Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.853346Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629779976554690:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:01.853472Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:01.853574Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629779976554695:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:01.859245Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:01.883022Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629779976554697:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:24:01.965523Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629779976554748:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpMergeCn::SortBy_Int32 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptPg [GOOD] Test command err: Trying to start YDB, gRPC: 62078, MsgBus: 26435 2025-06-24T21:23:34.574728Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629662550379759:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:34.586625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003119/r3tmp/tmpkURYgi/pdisk_1.dat 2025-06-24T21:23:35.315023Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629662550379546:2079] 1750800214488129 != 1750800214488132 2025-06-24T21:23:35.353782Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:35.368792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:35.368910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:35.372596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62078, node 1 2025-06-24T21:23:35.543455Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:35.772008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:35.772047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:35.772058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:35.772190Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26435 TClient is connected to server localhost:26435 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:37.044930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:37.138118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:37.405725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:37.643470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:37.727607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:38.924100Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629679730250377:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:38.924245Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:39.550778Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629662550379759:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:39.550842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:40.081094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.119037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.213107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.255899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.296494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.384207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.424227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.575650Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629688320185643:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:40.575751Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:40.576166Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629688320185648:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:40.580990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:40.602717Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629688320185650:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:40.664001Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629688320185703:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:42.176225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:42.178720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... 301982Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:56.302085Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:56.304734Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64479, node 3 2025-06-24T21:23:56.371527Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:56.371570Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:56.371582Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:56.371714Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3914 TClient is connected to server localhost:3914 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:56.937006Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:56.952364Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:57.025327Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:57.170301Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:57.258611Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:57.331473Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:59.880947Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629769327280196:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:59.881033Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:59.936619Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:59.990078Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:00.025436Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:00.071852Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:00.120770Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:00.171812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:00.247580Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:00.359356Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629773622248158:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:00.359546Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:00.360029Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629773622248163:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:00.364688Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:00.378488Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519629773622248165:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:24:00.441241Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519629773622248216:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:24:01.118450Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519629756442376707:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:01.118573Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:01.700610Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.702546Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:01.704406Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:04.950775Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800244977, txId: 281474976715704] shutting down >> KqpSort::TopParameter [GOOD] >> KqpSort::TopParameterFilter |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> TConsoleTests::TestDatabaseQuotas [GOOD] >> TConsoleTests::TestDatabaseQuotasBadOverallQuota >> BasicStatistics::ServerlessGlobalIndex >> KqpRanges::DeleteNotFullScan-UseSink [GOOD] >> KqpRanges::CastKeyBounds |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoServerlessDbs |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoServerlessTwoSharedDbs >> HttpRequest::Analyze ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpMergeCn::SortBy_Int32 [GOOD] Test command err: Trying to start YDB, gRPC: 26802, MsgBus: 6766 2025-06-24T21:22:58.735427Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629510253533273:2165];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:58.735497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00316d/r3tmp/tmpqvYiQ2/pdisk_1.dat 2025-06-24T21:22:59.103247Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:59.107373Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629510253533146:2079] 1750800178702767 != 1750800178702770 2025-06-24T21:22:59.117969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:59.118076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:59.126029Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26802, node 1 2025-06-24T21:22:59.219337Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:59.219356Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:59.219364Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:59.219466Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6766 TClient is connected to server localhost:6766 2025-06-24T21:22:59.790665Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:59.999729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:00.051953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:02.123857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629527433402972:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:02.123975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:02.432627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:02.630876Z node 1 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Unknown table '/Root/WrongTable' Trying to start YDB, gRPC: 6290, MsgBus: 5947 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00316d/r3tmp/tmpmwiDX9/pdisk_1.dat 2025-06-24T21:23:03.911328Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:23:03.928979Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:03.929071Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:03.947260Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629530748226204:2079] 1750800183643765 != 1750800183643768 2025-06-24T21:23:03.951577Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:03.956841Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6290, node 2 2025-06-24T21:23:04.065281Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:04.065300Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:04.065307Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:04.065427Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5947 TClient is connected to server localhost:5947 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:04.654990Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:04.663670Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:04.664132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:23:04.675314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:04.815440Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:05.007251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:05.084407Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:07.092845Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519629547928097006:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:07.092918Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:07.171861Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:07.214171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23 ... R WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519629759594088859:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:56.964676Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00316d/r3tmp/tmpziUGy5/pdisk_1.dat 2025-06-24T21:23:57.174054Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:57.174175Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:57.175409Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:57.178122Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519629759594088841:2079] 1750800236963579 != 1750800236963582 2025-06-24T21:23:57.197295Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1341, node 8 2025-06-24T21:23:57.321780Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:57.321816Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:57.321828Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:57.322011Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32458 2025-06-24T21:23:57.979381Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32458 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:58.026641Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:58.042032Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:58.145501Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:58.376859Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:58.488413Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:01.976854Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519629759594088859:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:01.976942Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:02.235360Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519629785363894248:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:02.235506Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:02.331756Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.394610Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.458776Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.521720Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.600288Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.660921Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.744465Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:02.851713Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519629785363894909:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:02.851835Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:02.852033Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519629785363894914:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:02.860322Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:02.881959Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7519629785363894916:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:24:02.937533Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7519629785363894967:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:24:04.720494Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:06.177279Z node 8 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800246202, txId: 281474976710674] shutting down |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> KqpNewEngine::ItemsLimit [GOOD] >> KqpNewEngine::JoinDictWithPure |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> SystemView::ShowCreateTableDefaultLiteral [GOOD] >> SystemView::ShowCreateTableColumn |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> ShowCreateView::WithSingleQuotedTablePathPrefix [GOOD] >> ShowCreateView::WithTwoTablePathPrefixes >> SystemView::PDisksFields [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> SystemView::GroupsFields |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> SystemView::AuthGroups_TableRange [GOOD] >> SystemView::AuthOwners >> KqpNewEngine::PushPureFlatmapInnerConnectionsToStage [GOOD] >> TConsoleTests::TestDatabaseQuotasBadOverallQuota [GOOD] >> TConsoleTests::TestDatabaseQuotasBadStorageQuota |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PushPureFlatmapInnerConnectionsToStage [GOOD] Test command err: Trying to start YDB, gRPC: 15086, MsgBus: 30568 2025-06-24T21:23:14.830669Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629577896577158:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:14.830766Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00314c/r3tmp/tmpP2wOvf/pdisk_1.dat 2025-06-24T21:23:15.223312Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629577896577140:2079] 1750800194829612 != 1750800194829615 2025-06-24T21:23:15.241150Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15086, node 1 2025-06-24T21:23:15.298159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:15.301262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:15.347960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:15.395079Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:15.395099Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:15.395115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:15.395242Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30568 2025-06-24T21:23:15.858159Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30568 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:16.022747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:16.091795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:16.266801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:16.447895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:16.531875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:18.529058Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629595076447972:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:18.529167Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:18.842645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:18.889834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:18.922305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:18.954437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:19.038961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:19.102599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:19.188825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:19.328525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629599371415932:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:19.328603Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:19.328816Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629599371415937:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:19.332672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:19.359536Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629599371415939:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:19.421996Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629599371415990:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:19.831295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629577896577158:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:19.831373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 2351, MsgBus: 23875 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00314c/r3tmp/tmpi44e09/pdisk_1.dat 2025-06-24T21:23:21.989922Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:23:22.075393Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:22.076755Z nod ... 34], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:57.995176Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519629761765999427:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 9628, MsgBus: 21873 2025-06-24T21:24:02.448264Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519629783875301577:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:02.448448Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00314c/r3tmp/tmpEnn9s8/pdisk_1.dat 2025-06-24T21:24:02.670669Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:02.675269Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519629783875301557:2079] 1750800242441485 != 1750800242441488 2025-06-24T21:24:02.688431Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:02.688547Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:02.691043Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9628, node 7 2025-06-24T21:24:02.831870Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:02.831898Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:02.831909Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:02.832074Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21873 2025-06-24T21:24:03.487350Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21873 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:24:03.766718Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:03.780914Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:03.876924Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:04.111834Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:04.220248Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:07.216209Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629805350139667:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:07.216338Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:07.273337Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:07.327272Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:07.385442Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:07.443107Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:07.448628Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629783875301577:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:07.448729Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:07.498000Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:07.610121Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:07.669591Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:07.770848Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629805350140327:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:07.770962Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:07.771275Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629805350140332:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:07.776201Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:07.793813Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629805350140334:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:24:07.875528Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629805350140385:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> TableWriter::Backup [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> KqpNotNullColumns::OptionalParametersScanQuery [GOOD] >> SystemView::TopPartitionsByCpuFollowers [GOOD] >> SystemView::SystemViewFailOps >> TableWriter::Restore [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> TableWriter::Backup [GOOD] >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> TableWriter::Restore [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfter [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfterAndTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::OptionalParametersScanQuery [GOOD] Test command err: Trying to start YDB, gRPC: 26307, MsgBus: 8373 2025-06-24T21:23:15.636595Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629582808353929:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:15.636718Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003149/r3tmp/tmpZA83hP/pdisk_1.dat 2025-06-24T21:23:16.068924Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:16.115395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:16.115533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 26307, node 1 2025-06-24T21:23:16.122294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:16.194033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:16.194057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:16.194064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:16.194181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8373 2025-06-24T21:23:16.643359Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8373 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:16.860509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:18.852867Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629595693256410:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:18.852967Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:19.237442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:19.460182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629599988223811:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:19.460283Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:19.460537Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629599988223816:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:19.465010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:19.489698Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629599988223818:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:23:19.573896Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629599988223869:2394] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:19.763623Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519629599988223913:2316], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:14: Error: Missing not null column in input: Value. All not null columns should be initialized, code: 2032 2025-06-24T21:23:19.765597Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTU2MTQxZTYtODAwMDg5NzMtMzFkMTE5ZWUtMjg3NjE4Ng==, ActorId: [1:7519629595693256407:2289], ActorState: ExecuteState, TraceId: 01jyhx5w1t1vy2n9wbd8kqbakn, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:23:19.818182Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519629599988223922:2320], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:47: Error: Failed to convert type: Struct<'Key':Int32,'Value':Null> to Struct<'Key':Uint64?,'Value':String>
:1:47: Error: Failed to convert 'Value': Null to String
:1:47: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:23:19.820199Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTU2MTQxZTYtODAwMDg5NzMtMzFkMTE5ZWUtMjg3NjE4Ng==, ActorId: [1:7519629595693256407:2289], ActorState: ExecuteState, TraceId: 01jyhx5w35360ck4jp37q0zbb4, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 21824, MsgBus: 13962 2025-06-24T21:23:20.744356Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629601560760217:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:20.744398Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003149/r3tmp/tmp7hRhx5/pdisk_1.dat 2025-06-24T21:23:21.004199Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:21.005894Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629601560760199:2079] 1750800200718905 != 1750800200718908 2025-06-24T21:23:21.018213Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:21.018309Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:21.022101Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21824, node 2 2025-06-24T21:23:21.291775Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:21.291801Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:21.291811Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:21.291939Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13962 2025-06-24T21:23:21.796012Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13962 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:22.107740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) ... p: SetPath # /home/runner/.ya/build/build_root/2btm/003149/r3tmp/tmpMCg0hn/pdisk_1.dat 2025-06-24T21:24:04.632577Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:04.742481Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:04.745006Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519629793104557508:2079] 1750800244496798 != 1750800244496801 2025-06-24T21:24:04.770888Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:04.771027Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:04.773893Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21895, node 7 2025-06-24T21:24:04.916003Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:04.916033Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:04.916044Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:04.916248Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23280 2025-06-24T21:24:05.492700Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23280 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:24:05.844637Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:05.855265Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:24:05.863242Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:05.969563Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:06.236193Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:06.344352Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:09.592737Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629814579395616:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:09.592864Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:09.673958Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:09.771318Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:09.834630Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:09.881427Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:09.942067Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:10.030840Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:10.119051Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:10.224304Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629818874363584:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:10.224430Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:10.224739Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629818874363589:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:10.232378Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:10.251639Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629818874363591:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:24:10.347042Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629818874363642:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:24:11.860603Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:12.360463Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800252390, txId: 281474976710674] shutting down 2025-06-24T21:24:12.643525Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800252677, txId: 281474976710676] shutting down 2025-06-24T21:24:12.854176Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800252880, txId: 281474976710678] shutting down >> TConsoleTests::TestDatabaseQuotasBadStorageQuota [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:23:28.893670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:28.893803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:28.893853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:28.893897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:28.893966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:28.894015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:28.894103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:28.894188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:28.895051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:28.895486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:28.981393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:23:28.981470Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:29.007110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:29.012916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:29.013119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:29.022401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:29.022670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:29.023399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:29.023846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:29.026755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:29.026982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:29.028285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:29.028350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:29.028471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:29.028519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:29.028565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:29.028722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:23:29.036394Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:23:29.192677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:29.192930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:29.193148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:29.193193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:29.193431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:29.193517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:29.196192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:29.196411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:29.196668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:29.196756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:29.196802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:29.196857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:29.201734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:29.201830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:29.201878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:29.204027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:29.204106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:29.204179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:29.204243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:29.216024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:29.218616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:29.218843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:29.219944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:29.220097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:23:29.220148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:29.220461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:23:29.220522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:29.220712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:23:29.220818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:23:29.223422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:29.223499Z node 1 :FLAT_TX_SCHEMESHARD ... 06-24T21:24:13.762404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-24T21:24:13.762561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 129 2025-06-24T21:24:13.762709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:24:13.775449Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:3458:5419], attempt# 0 2025-06-24T21:24:13.803172Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:3458:5419], sender# [1:3457:5418] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-24T21:24:13.816204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:24:13.816284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:24:13.816578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:24:13.816632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:24:13.817690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:24:13.817765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:24:13.819290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:24:13.819428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:24:13.819481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:24:13.819527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-24T21:24:13.819590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:24:13.819721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:1298 Accept: */* ConnectionFAKE_COORDINATOR: Erasing txId 102 : Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: BBD583DC-94FE-4620-9F2B-DD7FC50826CF amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-24T21:24:13.821666Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:3458:5419], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:1298 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4EFF1950-D05B-49FF-B85E-947FF26CC520 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-24T21:24:13.828789Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:3458:5419], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-24T21:24:13.829058Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3457:5418] 2025-06-24T21:24:13.829594Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3458:5419], sender# [1:3457:5418], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-06-24T21:24:13.831276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:1298 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1629BC55-A980-4906-A241-72330ED6D8D4 amz-sdk-request: attempt=1 content-length: 740 content-md5: P/a/uWmNWYxyRT1pAtAE7A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 740 2025-06-24T21:24:13.833631Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:3458:5419], result# PutObjectResult { ETag: 3ff6bfb9698d598c72453d6902d004ec } 2025-06-24T21:24:13.833713Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:3458:5419], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-24T21:24:13.834225Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3457:5418], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-24T21:24:13.874014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-24T21:24:13.874098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-24T21:24:13.874282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-24T21:24:13.874396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-24T21:24:13.874484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:24:13.874532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:24:13.874585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:24:13.874643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 129 -> 240 2025-06-24T21:24:13.874814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:24:13.879496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:24:13.880347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:24:13.880431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:24:13.880569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:24:13.880609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:24:13.880654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:24:13.880702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:24:13.880741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-24T21:24:13.880833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:338:2315] message: TxId: 102 2025-06-24T21:24:13.880892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:24:13.880936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:24:13.880970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:24:13.881112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:24:13.886074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:24:13.886145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3443:5405] TestWaitNotification: OK eventTxId 102 |96.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} |96.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpRanges::CastKeyBounds [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TConsoleTests::TestDatabaseQuotasBadStorageQuota [GOOD] Test command err: 2025-06-24T21:22:33.070285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:33.070353Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:33.195125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:34.510129Z node 9 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:22:34.510767Z node 9 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpKZIOOi/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:22:34.511412Z node 9 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpKZIOOi/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpKZIOOi/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 13119218657415639415 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:22:34.724555Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:22:34.724727Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:22:34.738010Z node 9 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000000:_:0:0:0]: (2147483648) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpKZIOOi/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T21:22:34.753944Z node 9 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000001:_:0:0:0]: (2147483649) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpKZIOOi/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T21:22:34.754450Z node 9 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000002:_:0:0:0]: (2147483650) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpKZIOOi/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T21:22:34.884914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:1, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:22:35.044811Z node 3 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:22:35.045383Z node 3 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpKZIOOi/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:22:35.045621Z node 3 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpKZIOOi/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpKZIOOi/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 4424997912255982907 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimS ... gWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:24:05.009684Z node 146 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:24:05.010191Z node 146 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:24:05.010387Z node 146 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 5338734469926028536 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:24:05.066705Z node 147 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:24:05.073654Z node 147 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:24:05.073895Z node 147 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 931142345280450303 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:24:05.108893Z node 149 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:24:05.109456Z node 149 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:24:05.109667Z node 149 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 4531991295309889908 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:24:05.153372Z node 151 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:24:05.153941Z node 151 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:24:05.154154Z node 151 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002795/r3tmp/tmpGIE77X/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 3701291540521800478 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:24:05.443526Z node 145 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:24:05.443648Z node 145 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:05.580609Z node 145 :STATISTICS WARN: tx_init.cpp:287: [72075186233409554] TTxInit::Complete. EnableColumnStatistics=false 2025-06-24T21:24:08.792408Z node 154 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:24:08.792479Z node 154 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:08.855642Z node 154 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:12.661591Z node 163 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:24:12.661682Z node 163 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:12.735116Z node 163 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) >> KqpSort::TopParameterFilter [GOOD] |96.8%| [TA] $(B)/ydb/core/backup/impl/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} |96.8%| [TA] {RESULT} $(B)/ydb/core/backup/impl/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNewEngine::JoinDictWithPure [GOOD] >> KqpNewEngine::IdxLookupExtractMembers ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpRanges::CastKeyBounds [GOOD] Test command err: Trying to start YDB, gRPC: 10402, MsgBus: 16352 2025-06-24T21:23:20.160186Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629603417053957:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:20.160408Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003140/r3tmp/tmpOp0jXo/pdisk_1.dat 2025-06-24T21:23:20.683346Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629603417053754:2079] 1750800200064763 != 1750800200064766 2025-06-24T21:23:20.711712Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:20.714026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:20.714153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:20.716436Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10402, node 1 2025-06-24T21:23:20.983375Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:20.983401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:20.983409Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:20.983565Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:21.062186Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16352 TClient is connected to server localhost:16352 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:21.912353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:21.930450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:23.964164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629616301956285:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:23.964304Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.241193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:24.412876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629620596923686:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.412964Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.413359Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629620596923691:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.419550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:24.447377Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629620596923693:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:23:24.521378Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629620596923744:2395] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:24.728754Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519629620596923786:2316], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing key column in input: Key for table: /Root/TestUpsertNotNullPk, code: 2029 2025-06-24T21:23:24.730378Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDcxM2RjNC05MTZkNDI1My00MGQ0ZjM4ZC1kMmFkYzA4MA==, ActorId: [1:7519629616301956281:2289], ActorState: ExecuteState, TraceId: 01jyhx60ws4jw0k7e1qjp9ekr8, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-24T21:23:24.817815Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519629620596923806:2289], TxId: 281474976710662, task: 1. Ctx: { TraceId : 01jyhx60y409fj4bp91smpnrqh. SessionId : ydb://session/3?node_id=1&id=ZDcxM2RjNC05MTZkNDI1My00MGQ0ZjM4ZC1kMmFkYzA4MA==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: BAD_REQUEST KIKIMR_BAD_COLUMN_TYPE: {
: Error: Tried to insert NULL value into NOT NULL column: Key, code: 2031 }. 2025-06-24T21:23:24.818272Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZDcxM2RjNC05MTZkNDI1My00MGQ0ZjM4ZC1kMmFkYzA4MA==, ActorId: [1:7519629616301956281:2289], ActorState: ExecuteState, TraceId: 01jyhx60y409fj4bp91smpnrqh, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 25710, MsgBus: 27981 2025-06-24T21:23:25.641912Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629625118707466:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:25.651246Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003140/r3tmp/tmp9agGwG/pdisk_1.dat 2025-06-24T21:23:25.768368Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:25.769346Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629625118707235:2079] 1750800205599276 != 1750800205599279 2025-06-24T21:23:25.783449Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:25.783536Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 25710, node 2 2025-06-24T21:23:25.789361Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:25.875765Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:25.875788Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:25.875797Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:25.875905Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27981 TClient is connected to server localhost:27981 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:26.392274Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemes ... uery","Stats":{"Compilation":{"FromCache":false,"DurationUs":437807,"CpuTimeUs":426954},"ProcessCpuTimeUs":2114,"TotalDurationUs":455362,"ResourcePoolId":"default","QueuedTimeUs":780},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"A-SelfCpu":1.026,"A-Cpu":1.026,"Path":"\/Root\/Join2","Name":"Delete","Table":"Join2"}],"Node Type":"Delete"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query"}} Trying to start YDB, gRPC: 3483, MsgBus: 23862 2025-06-24T21:24:08.574632Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519629807801639719:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:08.574739Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003140/r3tmp/tmplbeYzK/pdisk_1.dat 2025-06-24T21:24:08.761199Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:08.762837Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519629807801639695:2079] 1750800248573826 != 1750800248573829 2025-06-24T21:24:08.780289Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:08.780442Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:08.783267Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3483, node 7 2025-06-24T21:24:08.856148Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:08.856186Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:08.856198Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:08.856412Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23862 2025-06-24T21:24:09.591683Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23862 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:24:09.703878Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:09.716885Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:09.798640Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:10.042046Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:10.151002Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:13.349874Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629829276477806:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.350005Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.433295Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.476784Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.528459Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.574285Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.574943Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629807801639719:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:13.574997Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:13.625675Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.712983Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.764633Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.841840Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629829276478474:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.841972Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.842295Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629829276478479:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.847734Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:13.861746Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629829276478481:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:24:13.943098Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629829276478532:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSort::TopParameterFilter [GOOD] Test command err: Trying to start YDB, gRPC: 5429, MsgBus: 9480 2025-06-24T21:23:20.562060Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629603531117325:2166];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:20.562390Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003134/r3tmp/tmpdbBQZ3/pdisk_1.dat 2025-06-24T21:23:21.151499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:21.151621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:21.161432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:21.212507Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:21.215008Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629603531117185:2079] 1750800200514744 != 1750800200514747 TServer::EnableGrpc on GrpcPort 5429, node 1 2025-06-24T21:23:21.351747Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:21.351769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:21.351780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:21.351888Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:21.552180Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9480 TClient is connected to server localhost:9480 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:22.147066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:22.166123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:23:22.190680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:22.375534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:22.631371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:22.716884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:24.619277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629620710988015:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:24.619378Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:25.046495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:25.078058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:25.106850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:25.139475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:25.172984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:25.212491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:25.259566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:25.357047Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629625005955976:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:25.357129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:25.357416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629625005955981:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:25.365937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:25.379747Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629625005955983:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:25.455602Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629625005956034:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:25.554609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629603531117325:2166];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:25.554660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 5572, MsgBus: 16760 2025-06-24T21:23:27.941109Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629632285178160:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:27.941161Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: ... tate: Unknown -> Disconnected 2025-06-24T21:24:08.330748Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:08.336146Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29784, node 7 2025-06-24T21:24:08.434210Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:08.434239Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:08.434251Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:08.434421Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9378 2025-06-24T21:24:09.147352Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:24:09.320324Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:09.339853Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:09.430821Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:09.662643Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:09.761740Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:13.077828Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629832604106616:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.077996Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.108589Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.135053Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629811129268511:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:13.135183Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:13.161699Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.208072Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.250381Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.297116Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.352880Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.400768Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.486111Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629832604107272:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.486238Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.486593Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629832604107277:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.492499Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:13.508520Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629832604107279:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:24:13.563215Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629832604107330:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ( (declare $limit (DataType 'Uint64)) (declare $value (DataType 'Int32)) (let $1 (KqpTable '"/Root/TwoShard" '"72057594046644480:2" '"" '1)) (let $2 '('"Key" '"Value1" '"Value2")) (let $3 (KqpRowsSourceSettings $1 $2 '() (Void) '())) (let $4 (DataType 'Int32)) (let $5 (Min (Uint64 '"1001") $limit)) (let $6 (StructType '('"Key" (OptionalType (DataType 'Uint32))) '('"Value1" (OptionalType (DataType 'String))) '('"Value2" (OptionalType $4)))) (let $7 '('('"_logical_id" '497) '('"_id" '"206adb77-f38b3e95-894b294d-b0f6a6ac") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $3)) (lambda '($12) (block '( (let $13 (lambda '($16) (block '( (let $17 (Member $16 '"Value2")) (return (Member $16 '"Key") (Member $16 '"Value1") $17 (Coalesce (!= $17 $value) (Bool 'false))) )))) (let $14 (WideFilter (ExpandMap (ToFlow $12) $13) (lambda '($18 $19 $20 $21) $21) $5)) (let $15 (lambda '($22 $23 $24 $25) $22 $23 $24)) (return (FromFlow (WideMap $14 $15))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($26) (FromFlow (NarrowMap (Take (ToFlow $26) $5) (lambda '($27 $28 $29) (AsStruct '('"Key" $27) '('"Value1" $28) '('"Value2" $29)))))) '('('"_logical_id" '510) '('"_id" '"961fe206-80bcc505-a10a40e2-1c8ec5e")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '('('"$limit") '('"$value")) '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) >> SystemView::QueryStats [GOOD] >> SystemView::QueryStatsFields >> SystemView::GroupsFields [GOOD] >> SystemView::Describe >> KqpIndexLookupJoin::CheckAllKeyTypesCast [GOOD] >> TLocksTest::BrokenSameKeyLock >> TLocksFatTest::RangeSetBreak >> TFlatTest::CrossRW >> TObjectStorageListingTest::Listing >> TFlatTest::CopyCopiedTableAndRead >> TFlatTest::LargeDatashardReplyDistributed >> TObjectStorageListingTest::Split >> TLocksTest::Range_IncorrectDot1 >> TLocksTest::CK_Range_BrokenLock >> TFlatTest::ReadOnlyMode >> TFlatTest::SelectRangeNullArgs3 >> TFlatTest::ShardFreezeUnfreezeAlreadySet >> TFlatTest::Mix_DML_DDL >> KqpJoinOrder::TPCDS61+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckAllKeyTypesCast [GOOD] Test command err: Trying to start YDB, gRPC: 5315, MsgBus: 4841 2025-06-24T21:20:50.333928Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628960653827968:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:50.347545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003981/r3tmp/tmpMAMTFO/pdisk_1.dat 2025-06-24T21:20:51.023574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:51.023673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:51.036838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:20:51.043511Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:51.055359Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628960653827759:2079] 1750800050190235 != 1750800050190238 TServer::EnableGrpc on GrpcPort 5315, node 1 2025-06-24T21:20:51.355337Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:51.355949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:51.355957Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:51.355965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:51.356245Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4841 TClient is connected to server localhost:4841 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:52.776832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:52.809433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:52.838184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:53.097756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:20:53.500265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:53.655965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:20:55.326518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628960653827968:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:55.326587Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:55.970896Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628982128665882:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:55.971026Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.355381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.409778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.464577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.520057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.580743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.665557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.714751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.824277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628986423633842:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.824386Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.824771Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628986423633847:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.829679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:56.857451Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628986423633849:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:20:56.947715Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628986423633902:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:58.826202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:23:36.339481Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx6c7ha99jnqmrxqp1cc70, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:23:36.378098Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629670632385240:5879], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:23:36.378328Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx6c8y6p9p445nmz2z28wg, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:23:43.834144Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629700697156778:6025], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:23:43.834437Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx6khb0r5sm06jmcvvbybv, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:23:47.740254Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629717877026189:6101], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:23:47.740455Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx6qc2d0e960cz3v64gpez, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:23:47.780363Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629717877026202:6107], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:23:47.791765Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx6qd784n6nd6ahqkxjkg8, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:23:47.827595Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629717877026215:6113], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:23:47.830488Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx6qev0db39e5pd6w0za8k, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:23:55.582452Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629752236765061:6259], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:23:55.582733Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx6z1210erckxkwwfvh70p, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:23:55.618047Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629752236765074:6265], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:23:55.618268Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx6z29awzgs4q9t9rxt5q0, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:23:59.389824Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629769416634499:6341], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:23:59.390760Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx72qybw0p5mv846086tnx, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:23:59.434570Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629769416634512:6347], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:23:59.434930Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx72sd2tz52vjdg06gqtjc, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:24:06.807934Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629799481406044:6491], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:24:06.808219Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx79z3bb7dcrkkgtekbnxc, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:24:10.730938Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629816661275471:6570], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:24:10.731318Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx7dth022bf69ww31cx8rf, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:24:10.776246Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629816661275484:6576], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:24:10.778969Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx7dvy4bkp1tvyv2p40xwm, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:24:10.811630Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519629816661275498:6582], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-24T21:24:10.811900Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzlkMjBhODQtMTM4OTllMzEtOGZhNWFjMDctMzA4ZGJmNGU=, ActorId: [2:7519629043567149110:2479], ActorState: ExecuteState, TraceId: 01jyhx7dx287s43f3nzre29vz9, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> SystemView::SystemViewFailOps [GOOD] >> SystemView::TabletsFields >> SystemView::AuthOwners [GOOD] >> SystemView::AuthOwners_Access >> TLocksTest::NoLocksSet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS61+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 27683, MsgBus: 28637 2025-06-24T21:22:13.663839Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629317075616882:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:13.663998Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0038d2/r3tmp/tmpk358fs/pdisk_1.dat 2025-06-24T21:22:14.391071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:14.395564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:14.467631Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629317075616859:2079] 1750800133640165 != 1750800133640168 2025-06-24T21:22:14.493274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:14.500752Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27683, node 1 2025-06-24T21:22:14.724453Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:14.775762Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:14.775772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:14.775779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:14.775950Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28637 TClient is connected to server localhost:28637 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:15.974139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:15.999016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:22:18.667269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629317075616882:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:18.667332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:18.789773Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629338550453988:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:18.789913Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:18.790357Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629338550454001:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:18.794737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:18.807036Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629338550454003:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:22:18.897810Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629338550454054:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:19.396332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:22:19.794565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:19.802339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629342845421604:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:22:19.802531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629342845421604:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:19.802772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629342845421604:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:19.802885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629342845421604:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:19.802975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629342845421604:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:19.803071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629342845421604:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:19.803473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:22:19.803624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:22:19.803728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:22:19.803828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:22:19.803914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:22:19.804001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:19.804091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:22:19.804184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:22:19.804280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:22:19.804398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519629342845421607:2322];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:22:19.807402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629342845421604:2319];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:22:19.807566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629342845421604:2319];tabl ... 474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.207016Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.207755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.211287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.212976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.213650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.219334Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.220091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.220587Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.221159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.225482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.226234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.226585Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.227203Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.231687Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.231800Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.232387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.233153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.237926Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.238128Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.305864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.306676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.312678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.313190Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.313404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.314987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.321016Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.321117Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.321759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.322187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.328417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.328417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.330021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:38.336574Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:38.435949Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx5fq5c29aqbz8145m6pv2", SessionId: ydb://session/3?node_id=1&id=MzQ2Y2ZiYS1mMTkyODUzNC0yZDQ0OGJjNy0xMDY2YWRhZA==, Slow query, duration: 31.325861s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:39.108993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:39.108993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:39.109360Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519629557593823212:7977];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224039392; 2025-06-24T21:23:39.109817Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:24:12.646913Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx6y3cf4w25xn7f18pcp9k", SessionId: ydb://session/3?node_id=1&id=MzQ2Y2ZiYS1mMTkyODUzNC0yZDQ0OGJjNy0xMDY2YWRhZA==, Slow query, duration: 18.041424s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query61.tpl and seed 1930872976\nselect promotions,total,cast(promotions as float)/cast(total as float)*100\nfrom\n (select sum(ss_ext_sales_price) promotions\n from store_sales\n cross join store\n cross join promotion\n cross join date_dim\n cross join customer\n cross join customer_address\n cross join item\n where ss_sold_date_sk = d_date_sk\n and ss_store_sk = s_store_sk\n and ss_promo_sk = p_promo_sk\n and ss_customer_sk= c_customer_sk\n and ca_address_sk = c_current_addr_sk\n and ss_item_sk = i_item_sk\n and ca_gmt_offset = -6\n and i_category = 'Sports'\n and (p_channel_dmail = 'Y' or p_channel_email = 'Y' or p_channel_tv = 'Y')\n and s_gmt_offset = -6\n and d_year = 2001\n and d_moy = 12) promotional_sales cross join\n (select sum(ss_ext_sales_price) total\n from store_sales\n cross join store\n cross join date_dim\n cross join customer\n cross join customer_address\n cross join item\n where ss_sold_date_sk = d_date_sk\n and ss_store_sk = s_store_sk\n and ss_customer_sk= c_customer_sk\n and ca_address_sk = c_current_addr_sk\n and ss_item_sk = i_item_sk\n and ca_gmt_offset = -6\n and i_category = 'Sports'\n and s_gmt_offset = -6\n and d_year = 2001\n and d_moy = 12) all_sales\norder by promotions, total\nlimit 100;\n", parameters: 0b >> TFlatTest::Mix_DML_DDL [GOOD] >> TFlatTest::OutOfDiskSpace [GOOD] >> TFlatTest::SelectRangeNullArgs3 [GOOD] >> TFlatTest::SelectRangeNullArgs4 >> TFlatTest::ShardFreezeUnfreezeAlreadySet [GOOD] >> TFlatTest::ShardFreezeUnfreeze >> TFlatTest::CrossRW [GOOD] >> TFlatTest::GetTabletCounters >> TObjectStorageListingTest::Split [GOOD] >> TObjectStorageListingTest::SuffixColumns >> TFlatTest::CopyCopiedTableAndRead [GOOD] >> TFlatTest::CopyTableAndAddFollowers >> TFlatTest::ReadOnlyMode [GOOD] >> TFlatTest::RejectByIncomingReadSetSize ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::OutOfDiskSpace [GOOD] Test command err: 2025-06-24T21:24:20.464829Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629859785216492:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.465020Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aab/r3tmp/tmp5089Gq/pdisk_1.dat 2025-06-24T21:24:20.860697Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.863499Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629859785216474:2079] 1750800260464141 != 1750800260464144 2025-06-24T21:24:20.900720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.900816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.902361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19378 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.209021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:21.228307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:24:21.243916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.488859Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:21.498762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) waiting... proxy error code: Unknown error:
: Error: Resolve failed for table: /dc-1/Table, error: column 'value' not exist, code: 200400 2025-06-24T21:24:21.535019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) waiting... 2025-06-24T21:24:21.564804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) waiting... 2025-06-24T21:24:21.583311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) waiting... proxy error code: Unknown error:
:5:24: Error: At function: AsList
:5:32: Error: At function: SetResult
:4:27: Error: At function: SelectRow
:4:27: Error: Mismatch of key columns count for table [/dc-1/Table], expected: 2, but got 1., code: 2028 >> KqpService::CloseSessionsWithLoad [GOOD] >> KqpService::PatternCache >> TFlatTest::SelectRangeReverseItemsLimit >> SystemView::Describe [GOOD] >> SystemView::DescribeSystemFolder >> TObjectStorageListingTest::Listing [GOOD] >> TObjectStorageListingTest::ManyDeletes >> SystemView::TabletsFields [GOOD] >> SystemView::TabletsShards >> KqpNewEngine::IdxLookupExtractMembers [GOOD] >> TFlatTest::ShardFreezeUnfreeze [GOOD] >> TFlatTest::GetTabletCounters [GOOD] >> SystemView::QueryStatsFields [GOOD] >> SystemView::PartitionStatsTtlFields >> TObjectStorageListingTest::SuffixColumns [GOOD] >> HttpRequest::Status [GOOD] >> TFlatTest::SelectRangeNullArgs4 [GOOD] >> TFlatTest::CopyTableAndAddFollowers [GOOD] >> TFlatTest::CopyCopiedTableAndDropFirstCopy >> TFlatTest::CopyTableAndReturnPartAfterCompaction >> TLocksFatTest::RangeSetBreak [GOOD] >> TLocksFatTest::RangeSetNotBreak >> TFlatTest::RejectByIncomingReadSetSize [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::IdxLookupExtractMembers [GOOD] Test command err: Trying to start YDB, gRPC: 26246, MsgBus: 26949 2025-06-24T21:23:23.303754Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629617846977130:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:23.316224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00311e/r3tmp/tmpNsmgvS/pdisk_1.dat 2025-06-24T21:23:23.821853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:23.821966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:23.832265Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:23.844293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26246, node 1 2025-06-24T21:23:24.055751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:24.055776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:24.055798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:24.055915Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26949 2025-06-24T21:23:24.326906Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26949 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:24.786185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:24.823483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:24.966600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:25.131276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:23:25.216897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:26.944060Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629630731880595:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:26.944169Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:27.250519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:27.300442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:27.356126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:27.399398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:27.471935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:27.518653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:27.573311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:27.707286Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629635026848548:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:27.707387Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:27.710052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629635026848553:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:27.714716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:27.735031Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629635026848555:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:27.807706Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629635026848606:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:28.304332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629617846977130:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:28.304401Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17946, MsgBus: 6508 2025-06-24T21:23:30.244776Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629644854976054:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:30.278642Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00311e/r3tmp/tmpdegcZl/pdisk_1.dat 2025-06-24T21:23:30.434010Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23: ... :2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:24:14.554709Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519629833179391124:3432] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 7252, MsgBus: 6426 2025-06-24T21:24:17.857885Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519629848790767371:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:17.857992Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00311e/r3tmp/tmpF4xCO7/pdisk_1.dat 2025-06-24T21:24:17.978393Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:17.979972Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519629848790767351:2079] 1750800257857224 != 1750800257857227 TServer::EnableGrpc on GrpcPort 7252, node 7 2025-06-24T21:24:17.997662Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:17.997789Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:17.999470Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:18.041799Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:18.041826Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:18.041837Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:18.041996Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6426 TClient is connected to server localhost:6426 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:24:18.621994Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:18.639097Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:18.721377Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:18.873272Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:18.881334Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:18.960152Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:22.463287Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629870265605486:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:22.463405Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:22.563978Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:22.642665Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:22.719088Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:22.766248Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:22.811346Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:22.858065Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519629848790767371:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:22.858170Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:22.889518Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:22.970331Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:23.108077Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629874560573455:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:23.108205Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:23.108303Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519629874560573460:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:23.113237Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:23.126146Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519629874560573462:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:24:23.220182Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519629874560573513:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::ShardFreezeUnfreeze [GOOD] Test command err: 2025-06-24T21:24:20.500160Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629860925573675:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.500204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa7/r3tmp/tmps5lnWN/pdisk_1.dat 2025-06-24T21:24:20.950402Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.955271Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629860925573649:2079] 1750800260498887 != 1750800260498890 2025-06-24T21:24:20.957170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.957332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.959256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24632 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.249089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:21.271877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.442654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) waiting... 2025-06-24T21:24:21.474285Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629865220541649:2391] txid# 281474976710660, issues: { message: "Requested freeze state already set" severity: 1 } Error 1: Requested freeze state already set 2025-06-24T21:24:21.476457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) waiting... Error 1: Requested freeze state already set 2025-06-24T21:24:21.496971Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629865220541690:2426] txid# 281474976710662, issues: { message: "Requested freeze state already set" severity: 1 } 2025-06-24T21:24:21.505039Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:23.777639Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629873254563887:2123];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:23.777674Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa7/r3tmp/tmpFXMXPK/pdisk_1.dat 2025-06-24T21:24:23.955913Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:23.959304Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629873254563805:2079] 1750800263770503 != 1750800263770506 2025-06-24T21:24:23.976149Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:23.976272Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:23.978119Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29077 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:24.253355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:24.259650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:24:24.263251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:24.329203Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:24:24.352156Z node 2 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976715660: 2025-06-24T21:24:24.352344Z node 2 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [2:7519629877549531811:2393] txid# 281474976715660 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-24T21:24:24.352455Z node 2 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [2:7519629877549531811:2393] txid# 281474976715660 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-24T21:24:24.352475Z node 2 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [2:7519629877549531811:2393] txid# 281474976715660 invalidateDistCache: 0 DIE TDataReq MarkShardError TabletsLeft# 1 2025-06-24T21:24:24.354824Z node 2 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976715661: 2025-06-24T21:24:24.355000Z node 2 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [2:7519629877549531819:2398] txid# 281474976715661 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-24T21:24:24.355090Z node 2 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [2:7519629877549531819:2398] txid# 281474976715661 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-24T21:24:24.355107Z node 2 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [2:7519629877549531819:2398] txid# 281474976715661 invalidateDistCache: 0 DIE TDataReq MarkShardError TabletsLeft# 1 2025-06-24T21:24:24.364626Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::Status [GOOD] Test command err: 2025-06-24T21:24:06.172187Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:06.172694Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:06.172785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004832/r3tmp/tmpTSInlQ/pdisk_1.dat 2025-06-24T21:24:06.630342Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25748, node 1 2025-06-24T21:24:06.961177Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:06.961243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:06.961286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:06.961791Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:06.964764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:07.067994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:07.068156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:07.084314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:65440 2025-06-24T21:24:07.673892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:10.990846Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:11.028091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:11.028224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:11.070319Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:11.072389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:11.287987Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:11.323392Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.324089Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.324641Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.324822Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.325046Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.325147Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.325228Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.325324Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.325406Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.539633Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:11.539741Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:11.556431Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:11.711992Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:11.776659Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:11.776766Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:11.809658Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:11.810389Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:11.810617Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:11.810696Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:11.810748Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:11.810803Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:11.810874Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:11.810936Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:11.811459Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:11.839740Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:11.839865Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:11.845871Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:24:11.852804Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1838:2585] 2025-06-24T21:24:11.853976Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1838:2585], schemeshard id = 72075186224037897 2025-06-24T21:24:11.858777Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:24:11.877575Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:11.877638Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:11.877710Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:24:11.892187Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:11.899892Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:11.900035Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:12.092308Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:12.284368Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:12.349689Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:12.937318Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:13.194026Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2143:3020], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.194280Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.219054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:24:13.539746Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2277:2829];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:24:13.540014Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2277:2829];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:24:13.540279Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2277:2829];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:24:13.540417Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2277:2829];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:24:13.540558Z node 2 :TX_COLUMNSHARD WARN: ... MNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:16.213532Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:16.214067Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:16.214552Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:16.215389Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:16.215889Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:16.216692Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:16.217158Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:16.218493Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:17.059774Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3707:3174], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.060287Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.064085Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-24T21:24:17.121020Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:17.121956Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:17.123712Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:17.124446Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:17.124907Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:17.126552Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:17.127585Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:17.128300Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:17.128767Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:17.129266Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:17.825510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3869:3220], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.825663Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.845918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-24T21:24:17.906565Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:17.907258Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:17.907742Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:17.908230Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:17.909478Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:17.910223Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:17.911356Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:17.911814Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:17.912607Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:17.913022Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; waiting actualization: 0/0.000019s 2025-06-24T21:24:25.904568Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:5754:5385] 2025-06-24T21:24:25.924279Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_NO_OPERATION Answer: 'No analyze operation' FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::GetTabletCounters [GOOD] Test command err: 2025-06-24T21:24:20.449312Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629859231057397:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.453543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab0/r3tmp/tmptyIbBz/pdisk_1.dat 2025-06-24T21:24:20.835259Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629859231057269:2079] 1750800260438468 != 1750800260438471 2025-06-24T21:24:20.836644Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.889264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.889353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.891306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22257 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.206762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:21.223274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:21.245687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.452678Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:23.791761Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629873685784207:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:23.791830Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab0/r3tmp/tmpIzs0i2/pdisk_1.dat 2025-06-24T21:24:23.969646Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:23.975322Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629873685784187:2079] 1750800263787829 != 1750800263787832 2025-06-24T21:24:23.985134Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:23.985225Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:23.986927Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11265 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:24.273378Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:24.280675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:24.300169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800264409 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::SuffixColumns [GOOD] Test command err: 2025-06-24T21:24:20.451279Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629860246968302:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.451465Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aae/r3tmp/tmpYcUR63/pdisk_1.dat 2025-06-24T21:24:20.864038Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.876980Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629860246968141:2079] 1750800260438286 != 1750800260438289 2025-06-24T21:24:20.902017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.902126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.904111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5100, node 1 2025-06-24T21:24:21.183550Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:21.183579Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:21.183587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:21.183731Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:21.453702Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9699 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.769752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:21.783975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:21.809003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:24:21.821077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /dc-1/Dir/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800261973 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "Hash" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Path" ... (TRUNCATED) waiting... TClient::Ls request: /dc-1/Dir/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800261973 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "Hash" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Path" ... (TRUNCATED) 2025-06-24T21:24:23.891175Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629872744432901:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:23.891240Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aae/r3tmp/tmpAEpi09/pdisk_1.dat 2025-06-24T21:24:24.097605Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629872744432884:2079] 1750800263890516 != 1750800263890519 2025-06-24T21:24:24.105164Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63404, node 2 2025-06-24T21:24:24.129134Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:24.129246Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:24.155556Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:24.178546Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:24.178576Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:24.178585Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:24.178738Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9999 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:24.417208Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:24.439185Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.880874Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553163, Sender [2:7519629877039401482:2442], Recipient [2:7519629877039400828:2269]: NKikimrTxDataShard.TEvObjectStorageListingRequest TableId: 3 SerializedKeyPrefix: "\002\000\010\000\000\0002\000\000\000\000\000\000\000\010\000\000\000Bucket50" PathColumnPrefix: "Music/AC DC/" PathColumnDelimiter: "/" SerializedStartAfterKeySuffix: "\002\000\037\000\000\000Music/AC DC/Shoot to Thrill.mp3\010\000\000\000B\000\000\000\000\000\000\000" ColumnsToReturn: 3 ColumnsToReturn: 4 ColumnsToReturn: 6 MaxKeys: 10 2025-06-24T21:24:24.880922Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvDataShard::TEvObjectStorageListingRequest 2025-06-24T21:24:24.881157Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC/Shoot to Thrill.mp3") (type:4, value:"B\0\0\0\0\0\0\0")), end at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:24:24.881369Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Shoot to Thrill.mp3" -> (Utf8 : Music/AC DC/Shoot to Thrill.mp3, Uint64 : 77, String : ) 2025-06-24T21:24:24.881419Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Shoot to Thrill.mp3" -> (Utf8 : Music/AC DC/Shoot to Thrill.mp3, Uint64 : 88, String : ) 2025-06-24T21:24:24.881456Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Shoot to Thrill.mp3" -> (Utf8 : Music/AC DC/Shoot to Thrill.mp3, Uint64 : 666, String : ) 2025-06-24T21:24:24.881486Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 1, String : ) 2025-06-24T21:24:24.881522Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 66, String : ) 2025-06-24T21:24:24.881603Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 5 common prefixes: 0 2025-06-24T21:24:24.906526Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553163, Sender [2:7519629877039401487:2444], Recipient [2:7519629877039400828:2269]: NKikimrTxDataShard.TEvObjectStorageListingRequest TableId: 3 SerializedKeyPrefix: "\002\000\010\000\000\0002\000\000\000\000\000\000\000\010\000\000\000Bucket50" PathColumnPrefix: "Music/AC DC/" PathColumnDelimiter: "/" SerializedStartAfterKeySuffix: "\001\000\037\000\000\000Music/AC DC/Shoot to Thrill.mp3" ColumnsToReturn: 3 ColumnsToReturn: 4 ColumnsToReturn: 5 MaxKeys: 10 2025-06-24T21:24:24.906557Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvDataShard::TEvObjectStorageListingRequest 2025-06-24T21:24:24.906710Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC/Shoot to Thrill.mp3")), end at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:24:24.906885Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 1, Uint64 : 10) 2025-06-24T21:24:24.906929Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 66, Uint64 : 10) 2025-06-24T21:24:24.906997Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 2 common prefixes: 0 2025-06-24T21:24:24.909193Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeNullArgs4 [GOOD] Test command err: 2025-06-24T21:24:20.464500Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629861963272037:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.464587Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aad/r3tmp/tmp1ttXZk/pdisk_1.dat 2025-06-24T21:24:20.837779Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.854323Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629861963272017:2079] 1750800260463278 != 1750800260463281 2025-06-24T21:24:20.888718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.888865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.890610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13900 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.212602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:21.247981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.487579Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:23.776891Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629872002312898:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:23.777038Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aad/r3tmp/tmp9h4VGO/pdisk_1.dat 2025-06-24T21:24:23.944525Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:23.950849Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629872002312881:2079] 1750800263776442 != 1750800263776445 2025-06-24T21:24:23.971077Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:23.971164Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:23.973425Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2546 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:24.316814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:24.324466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:24.336388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:24.340875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TFlatTest::SelectRangeReverseItemsLimit [GOOD] >> TFlatTest::SelectRangeReverseIncludeKeys ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::RejectByIncomingReadSetSize [GOOD] Test command err: 2025-06-24T21:24:20.584721Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629859304250885:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.584838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa8/r3tmp/tmpvPQnLq/pdisk_1.dat 2025-06-24T21:24:21.195762Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:21.196923Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629859304250857:2079] 1750800260577073 != 1750800260577076 2025-06-24T21:24:21.242747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:21.242828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:21.248116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1896 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: 2025-06-24T21:24:21.592731Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.600858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:21.727868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1" OperationType: ESchemeOpMkDir MkDir { Name: "Dir1" } } TxId: 281474976715658 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:24:21.728086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /dc-1/Dir1, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-24T21:24:21.728215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: dc-1, child name: Dir1, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T21:24:21.728249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-24T21:24:21.728267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715658:0 type: TxMkDir target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-24T21:24:21.728314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:24:21.728493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T21:24:21.728548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:24:21.730828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715658, response: Status: StatusAccepted TxId: 281474976715658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-24T21:24:21.731069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /dc-1, subject: , status: StatusAccepted, operation: CREATE DIRECTORY, path: /dc-1/Dir1 2025-06-24T21:24:21.731318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:24:21.731356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-24T21:24:21.731537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:24:21.731614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:21.731653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519629863599218816:2383], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 1 2025-06-24T21:24:21.731673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519629863599218816:2383], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 2 2025-06-24T21:24:21.731716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-24T21:24:21.731737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:63: MkDir::TPropose operationId# 281474976715658:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:24:21.731771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976715658 ready parts: 1/1 2025-06-24T21:24:21.735749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715658 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:24:21.737427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-24T21:24:21.737528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-24T21:24:21.737544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715658 2025-06-24T21:24:21.737562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715658, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 4 2025-06-24T21:24:21.737580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-24T21:24:21.737840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-24T21:24:21.737909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-24T21:24:21.737924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715658 2025-06-24T21:24:21.737935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715658, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 2 2025-06-24T21:24:21.737943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:24:21.737979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715658, ready parts: 0/1, is published: true 2025-06-24T21:24:21.738134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:21.738156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715658, ready parts: 0/1, is published: true 2025-06-24T21:24:21.738174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:21.739788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976715658:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715658 msg type: 269090816 Waiting for schemeshard to restart... TClient::Ls request: /dc-1 2025-06-24T21:24:21.739891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976715658, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:24:21.739984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715658 2025-06-24T21:24:21.740016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715658 2025-06-24T21:24:21.741886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanS ... 94046644480 Generation: 4 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715661 2025-06-24T21:24:22.047159Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:18} Tx{28, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard 2025-06-24T21:24:22.047177Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:18} Tx{28, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:24:22.047238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 4 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715661 2025-06-24T21:24:22.047246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715661 2025-06-24T21:24:22.047256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715661, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], version: 3 2025-06-24T21:24:22.047267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-24T21:24:22.047324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715661, subscribers: 0 2025-06-24T21:24:22.047369Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:18} Tx{28, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} hope 1 -> done Change{38, redo 166b alter 0b annex 0, ~{ 48, 59 } -{ }, 0 gb} 2025-06-24T21:24:22.047388Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:18} Tx{28, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:24:22.049003Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:16:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:24:22.049005Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:14:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:24:22.049026Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:16:1:24576:122:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:24:22.049070Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:18:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:24:22.049080Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:14:1:24576:107:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:24:22.049125Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:18:1:24576:132:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:24:22.049129Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:17:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:24:22.049146Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:17:1:24576:119:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:24:22.049191Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:15} commited cookie 1 for step 14 2025-06-24T21:24:22.049207Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:19} commited cookie 1 for step 16 2025-06-24T21:24:22.049301Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:19} commited cookie 1 for step 17 2025-06-24T21:24:22.049316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715661 2025-06-24T21:24:22.049335Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:19} commited cookie 1 for step 18 2025-06-24T21:24:22.049342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715661 2025-06-24T21:24:22.103828Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594046644480] ::Bootstrap [1:7519629867894186506:2262] 2025-06-24T21:24:22.103845Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594046644480] lookup [1:7519629867894186506:2262] 2025-06-24T21:24:22.103876Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594046644480] queue send [1:7519629867894186506:2262] 2025-06-24T21:24:22.104054Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594046644480] forward result local node, try to connect [1:7519629867894186506:2262] 2025-06-24T21:24:22.104106Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594046644480]::SendEvent [1:7519629867894186506:2262] 2025-06-24T21:24:22.104159Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594046644480] Accept Connect Originator# [1:7519629867894186506:2262] 2025-06-24T21:24:22.104252Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594046644480] connected with status OK role: Leader [1:7519629867894186506:2262] 2025-06-24T21:24:22.104275Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594046644480] send queued [1:7519629867894186506:2262] 2025-06-24T21:24:22.104301Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046644480] push event to server [1:7519629867894186506:2262] 2025-06-24T21:24:22.104352Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594046644480] HandleSend Sender# [1:7519629867894186505:2262] EventType# 271124996 2025-06-24T21:24:22.104448Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:19} Tx{29, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion 2025-06-24T21:24:22.104469Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:19} Tx{29, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:24:22.104499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-24T21:24:22.104531Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:19} Tx{29, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} hope 1 -> done Change{39, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:24:22.104581Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:19} Tx{29, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:24:22.104939Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:397: TClient[72057594046644480] received poison pill [1:7519629867894186506:2262] 2025-06-24T21:24:22.104963Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594046644480] notify reset [1:7519629867894186506:2262] 2025-06-24T21:24:22.105028Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:182: [72057594046644480] Got PeerClosed from# [1:7519629867894186506:2262] 2025-06-24T21:24:24.172301Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629876743621189:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:24.172334Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa8/r3tmp/tmpMEZGBS/pdisk_1.dat 2025-06-24T21:24:24.352304Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:24.353725Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629876743621171:2079] 1750800264171673 != 1750800264171676 2025-06-24T21:24:24.392003Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:24.392056Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:24.395867Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5228 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:24.536384Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:24.544518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:24.559793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:25.187252Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:27.632798Z node 2 :TX_PROXY ERROR: datareq.cpp:2829: Actor# [2:7519629889628524156:2600] txid# 281474976715700 FailProposedRequest: Transaction incoming read set size 1000084 for tablet 72075186224037889 exceeded limit 1000 Status# ExecError 2025-06-24T21:24:27.632882Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7519629889628524156:2600] txid# 281474976715700 RESPONSE Status# ExecError marker# P13c >> HttpRequest::AnalyzeServerless [GOOD] >> TFlatTest::CopyCopiedTableAndDropFirstCopy [GOOD] >> TFlatTest::PathSorting >> TFlatTest::CopyTableAndReturnPartAfterCompaction [GOOD] >> TFlatTest::CopyTableDropOriginalAndReturnPartAfterCompaction >> TFlatTest::Ls >> TFlatTest::LargeDatashardReplyDistributed [GOOD] >> TFlatTest::LargeDatashardReplyRW >> TFlatTest::SelectRangeItemsLimit >> TLocksTest::Range_BrokenLock2 >> ShowCreateView::WithTwoTablePathPrefixes [GOOD] >> SystemView::AuthGroups >> TFlatTest::SelectRangeBytesLimit >> TLocksTest::GoodSameKeyLock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::AnalyzeServerless [GOOD] Test command err: 2025-06-24T21:24:06.272725Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:06.273244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:06.273339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00482e/r3tmp/tmpmN3OfQ/pdisk_1.dat 2025-06-24T21:24:06.671789Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30240, node 1 2025-06-24T21:24:07.025018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:07.025076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:07.025121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:07.025653Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:07.028288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:07.138136Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:07.138324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:07.164924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25749 2025-06-24T21:24:07.815502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:11.220608Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:11.261713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:11.261848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:11.292435Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:11.295702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:11.539560Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:11.564311Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.564776Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.565394Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.565708Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.565791Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.565870Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.565994Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.566078Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.566155Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.764570Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:11.764713Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:11.784944Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:11.948342Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:12.011871Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:12.012009Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:12.068716Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:12.068899Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:12.069104Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:12.069165Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:12.069214Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:12.069267Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:12.069351Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:12.069412Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:12.069794Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:12.104612Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:12.104744Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1791:2563], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:12.111503Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1798:2569] 2025-06-24T21:24:12.127204Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1839:2589] 2025-06-24T21:24:12.127539Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1839:2589], schemeshard id = 72075186224037897 2025-06-24T21:24:12.135967Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:24:12.164228Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:12.164310Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:12.164406Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:24:12.178474Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:12.192691Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:12.192872Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:12.368022Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:12.574593Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:12.641165Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:13.335362Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:13.374280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:14.107866Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:14.308030Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:24:14.308105Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:24:14.308206Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2501:2904], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:24:14.309548Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2502:2905] 2025-06-24T21:24:14.310501Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2502:2905], schemeshard id = 72075186224037899 2025-06-24T21:24:15.696776Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2629:3198], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:15.696901Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06 ... xProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:18.925619Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:18.925920Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:18.926226Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:18.926522Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:18.926816Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:18.927136Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:20.158835Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:4244:3366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:20.158936Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:20.172260Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715663:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-24T21:24:20.220508Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:24:20.221344Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:24:20.222138Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037912;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:24:20.223887Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:24:20.224839Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:24:20.225360Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:24:20.225917Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:24:20.226754Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:24:20.227341Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:24:20.227889Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:24:20.977092Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:4403:3408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:20.977939Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:20.982613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715664:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-24T21:24:21.039032Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:24:21.039690Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:24:21.040431Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037912;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:24:21.041697Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:24:21.042448Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:24:21.042873Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:24:21.043374Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:24:21.044016Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:24:21.044501Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:24:21.044979Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; waiting actualization: 0/0.000015s 2025-06-24T21:24:29.757795Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:6295:5611] 2025-06-24T21:24:29.761513Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:6291:4003] , Record { OperationId: "\000\000\000\000\"\tX\3774/6\231\302\004\253\331" Tables { PathId { OwnerId: 72057594046644480 LocalId: 2 } } } 2025-06-24T21:24:29.761592Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:47: [72075186224037894] TTxAnalyze::Execute. Create new force traversal operation, OperationId=" X4/6 2025-06-24T21:24:29.761642Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:65: [72075186224037894] TTxAnalyze::Execute. Create new force traversal table, OperationId=" X4/6 , PathId [OwnerId: 72057594046644480, LocalPathId: 2] Answer: 'Analyze sent. OperationId: 00000008g9b3zk8bspk7109ays' FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; >> TLocksTest::SetLockFail ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::CopyCopiedTableAndDropFirstCopy [GOOD] Test command err: 2025-06-24T21:24:20.446179Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629858754524345:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.446251Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa6/r3tmp/tmpTLJvwr/pdisk_1.dat 2025-06-24T21:24:20.919436Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.921032Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.921174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.923325Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629858754524235:2079] 1750800260438304 != 1750800260438307 2025-06-24T21:24:20.933353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12711 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.217930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-24T21:24:21.251224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:21.487835Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:21.558554Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.011s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-24T21:24:21.559920Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.004s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-24T21:24:21.587846Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-24T21:24:21.600929Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.003s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 Copy TableOld to Table 2025-06-24T21:24:21.776882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1/Dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table" CopyFromTable: "/dc-1/Dir/TableOld" } } TxId: 281474976710676 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:24:21.777292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_copy_table.cpp:344: TCopyTable Propose, path: /dc-1/Dir/Table, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-06-24T21:24:21.777891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: Dir, child name: Table, child id: [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-24T21:24:21.777945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 0 2025-06-24T21:24:21.777960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:24:21.778002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710676:0 type: TxCopyTable target path: [OwnerId: 72057594046644480, LocalPathId: 4] source path: [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-24T21:24:21.778042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-24T21:24:21.778066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-24T21:24:21.778206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 3 2025-06-24T21:24:21.778330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710676:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:24:21.780598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:24:21.780643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 4 2025-06-24T21:24:21.781293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710676, response: Status: StatusAccepted TxId: 281474976710676 SchemeshardId: 72057594046644480 PathId: 4, at schemeshard: 72057594046644480 2025-06-24T21:24:21.781541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710676, database: /dc-1, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /dc-1/Dir/Table 2025-06-24T21:24:21.781723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:24:21.781744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:24:21.781880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 4] 2025-06-24T21:24:21.781990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T21:24:21.782011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519629858754524731:2241], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 2 2025-06-24T21:24:21.782037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519629858754524731:2241], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 4 2025-06-24T21:24:21.782076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710676:0, at schemeshard: 72057594046644480 2025-06-24T21:24:21.782118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710676:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046644480 waiting... 2025-06-24T21:24:21.782507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-24T21:24:21.782664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 4 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-24T21:24:21.784534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710676 2025-06-24T21:24:21.784625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710676 2025-06-24T21:24:21.784636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710676 2025-06-24T21:24:21.784651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710676, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 2025-06-24T21:24:21.784665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-24T21:24:21.784825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generatio ... mentPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-24T21:24:28.680917Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-24T21:24:28.681019Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-24T21:24:28.681035Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-24T21:24:28.681049Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-24T21:24:28.681118Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:28.681191Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:28.681878Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-24T21:24:28.681906Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-06-24T21:24:28.681951Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-24T21:24:28.681972Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-24T21:24:28.681980Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-24T21:24:28.681999Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-24T21:24:28.682362Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T21:24:28.682375Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T21:24:28.682421Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:24:28.682706Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-24T21:24:28.682889Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T21:24:28.682975Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-24T21:24:28.683035Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-24T21:24:28.683049Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-24T21:24:28.683182Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-24T21:24:28.683281Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:24:28.683294Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T21:24:28.683328Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:24:28.684546Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037893 2025-06-24T21:24:28.684594Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037893 2025-06-24T21:24:28.685441Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-24T21:24:28.685455Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-24T21:24:28.685493Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T21:24:28.685511Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T21:24:28.685539Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:24:28.685855Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037892 from 72075186224037890 is reset 2025-06-24T21:24:28.685900Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037888 state Offline 2025-06-24T21:24:28.685921Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037888 reason = ReasonStop 2025-06-24T21:24:28.685951Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:7519629895175088146:2385], serverId# [3:7519629895175088147:2386], sessionId# [0:0:0] 2025-06-24T21:24:28.685967Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-24T21:24:28.685978Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037893 from 72075186224037891 is reset 2025-06-24T21:24:28.685989Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-24T21:24:28.686005Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [3:7519629895175088421:2565], serverId# [3:7519629895175088422:2566], sessionId# [0:0:0] 2025-06-24T21:24:28.686327Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519629895175088345 RawX2: 4503612512274700 } TabletId: 72075186224037890 State: 4 2025-06-24T21:24:28.686361Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:24:28.686880Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-24T21:24:28.686951Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-24T21:24:28.687001Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-24T21:24:28.687215Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-24T21:24:28.688516Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-24T21:24:28.688563Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-24T21:24:28.690924Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-24T21:24:28.690935Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:28.692502Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T21:24:28.692744Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-24T21:24:28.692936Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:24:28.692959Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-24T21:24:28.693022Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:24:28.693614Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T21:24:28.693646Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T21:24:28.694369Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:24:28.694676Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-24T21:24:28.694723Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [3:7519629895175088439:2579], serverId# [3:7519629895175088442:2582], sessionId# [0:0:0] 2025-06-24T21:24:28.695236Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-24T21:24:28.695285Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-24T21:24:28.696112Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 Check that tablet 72075186224037893 was deleted Check that tablet 72075186224037888 was deleted Check that tablet 72075186224037889 was deleted Check that tablet 72075186224037890 was deleted Check that tablet 72075186224037891 was deleted 2025-06-24T21:24:28.967647Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037892) 2025-06-24T21:24:28.968038Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037893) 2025-06-24T21:24:28.968358Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-24T21:24:28.968612Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037889) 2025-06-24T21:24:28.969143Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037890) 2025-06-24T21:24:28.969443Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037891) >> TFlatTest::SelectRangeReverseIncludeKeys [GOOD] >> SystemView::TabletsShards [GOOD] >> SystemView::TabletsFollowers >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfterAndTimeout [GOOD] >> HttpRequest::Analyze [GOOD] >> SystemView::AuthOwners_Access [GOOD] >> SystemView::AuthOwners_ResultOrder >> TFlatTest::PathSorting [GOOD] >> TFlatTest::PartBloomFilter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfterAndTimeout [GOOD] Test command err: Trying to start YDB, gRPC: 3331, MsgBus: 5978 2025-06-24T21:23:49.813141Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629728617449546:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:49.813209Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003105/r3tmp/tmp2U3ZJ0/pdisk_1.dat 2025-06-24T21:23:50.314059Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629728617449527:2079] 1750800229811340 != 1750800229811343 2025-06-24T21:23:50.321383Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:50.333531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:50.333644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 3331, node 1 2025-06-24T21:23:50.368867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:50.462735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:50.462765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:50.462775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:50.462897Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5978 2025-06-24T21:23:50.852534Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5978 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:51.154238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:51.175875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:23:51.198065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:51.397919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:51.585298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:51.656998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:53.433520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629745797320364:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.433617Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:53.746739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.785213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.832334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.893043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.929828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:53.966447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:54.046270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:54.117200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629750092288321:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:54.117286Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:54.117522Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629750092288326:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:54.121946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:54.138924Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629750092288328:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:23:54.242671Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629750092288379:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:54.815028Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629728617449546:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:54.815112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:55.258018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... sconnected 2025-06-24T21:24:23.640945Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:23.645288Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2064, node 5 2025-06-24T21:24:23.703857Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:23.703884Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:23.703897Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:23.704094Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5572 TClient is connected to server localhost:5572 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:24:24.397880Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:24.412813Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.489468Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:24.500039Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.711968Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.813095Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:27.776835Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519629889442992646:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:27.776964Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:27.893613Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:27.941595Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:27.984409Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:28.033450Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:28.081555Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:28.168354Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:28.253404Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:28.352603Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519629893737960611:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:28.352682Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:28.352992Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519629893737960616:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:28.357061Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:28.371721Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519629893737960618:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:24:28.429741Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519629893737960669:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:24:28.463679Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519629872263121848:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:28.463750Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:29.922388Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:29.925444Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:29.927472Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:30.278169Z node 5 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=5&id=ZTRjMDZlMTUtNGI3NWFhMTMtN2Y3OWMxODItMWM1MDkzNGI=, ActorId: [5:7519629902327895817:2499], ActorState: ExecuteState, TraceId: 01jyhx80jv07kv260mff5tcpee, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeReverseIncludeKeys [GOOD] Test command err: 2025-06-24T21:24:26.133221Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629885884241756:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:26.133532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa3/r3tmp/tmpoZXQ7d/pdisk_1.dat 2025-06-24T21:24:26.426909Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:26.427626Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629885884241738:2079] 1750800266132394 != 1750800266132397 2025-06-24T21:24:26.513742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:26.513957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:26.515471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17815 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:26.797095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:26.821078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:26.829234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:27.140999Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:29.559540Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629898594787204:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:29.559606Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa3/r3tmp/tmpHCE5Ox/pdisk_1.dat 2025-06-24T21:24:29.675488Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:29.679028Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629898594787183:2079] 1750800269558655 != 1750800269558658 2025-06-24T21:24:29.704387Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:29.704510Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:29.705991Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1248 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:29.893320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:29.910519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TFlatTest::SelectRangeItemsLimit [GOOD] >> TFlatTest::SelectRangeForbidNullArgs4 >> TFlatTest::CopyTableDropOriginalAndReturnPartAfterCompaction [GOOD] >> TFlatTest::Ls [GOOD] >> TFlatTest::LsPathId >> TFlatTest::CopyTableAndCompareColumnsSchema >> TFlatTest::SelectRangeBytesLimit [GOOD] >> TFlatTest::SelectRangeForbidNullArgs1 >> TLocksTest::UpdateLockedKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::Analyze [GOOD] Test command err: 2025-06-24T21:24:12.308270Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:12.308678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:12.308882Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047f7/r3tmp/tmpBElvth/pdisk_1.dat 2025-06-24T21:24:12.693641Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7001, node 1 2025-06-24T21:24:12.908758Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:12.908834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:12.908871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:12.909212Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:12.919506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:13.033668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:13.033842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:13.049656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3392 2025-06-24T21:24:13.676392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:16.777961Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:16.812926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:16.813065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:16.851778Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:16.855049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:17.054740Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:17.082893Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.083506Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.083989Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.084112Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.084366Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.084467Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.084544Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.084610Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.084730Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.258994Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:17.259111Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:17.272747Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:17.409531Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:17.445083Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:17.445180Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:17.468776Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:17.470259Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:17.470496Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:17.470559Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:17.470613Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:17.470672Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:17.470721Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:17.470775Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:17.472478Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:17.509109Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:17.509245Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:17.516603Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2568] 2025-06-24T21:24:17.528821Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1852:2588] 2025-06-24T21:24:17.529498Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1852:2588], schemeshard id = 72075186224037897 2025-06-24T21:24:17.534598Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:24:17.553118Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:17.553177Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:17.553248Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:24:17.565413Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:17.573656Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:17.573823Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:17.735417Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:17.875189Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:17.964006Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:18.578522Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:18.822367Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2137:3019], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:18.822547Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:18.846137Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:24:19.132883Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2271:2820];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:24:19.133129Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2271:2820];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:24:19.133416Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2271:2820];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:24:19.133552Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2271:2820];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:24:19.133684Z node 2 :TX_COLUMNSHARD WARN: l ... essTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:21.901258Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:21.901764Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:21.902295Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:21.903404Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:21.903961Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:21.905134Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-24T21:24:22.859975Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3696:3173], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:22.860309Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:22.864051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-24T21:24:22.915489Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:22.916401Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:22.917120Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:22.918029Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:22.919376Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:22.920298Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:22.920727Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:22.921142Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:22.921597Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:22.922754Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:24:23.658733Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3862:3221], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:23.659125Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:23.663268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-24T21:24:23.718762Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:23.719462Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:23.721283Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:23.721898Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:23.722502Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:23.724397Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:23.724997Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:23.725584Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:23.726151Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-24T21:24:23.727400Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; waiting actualization: 0/0.000020s 2025-06-24T21:24:32.297263Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:5777:5397] 2025-06-24T21:24:32.301193Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:5774:3827] , Record { OperationId: "\000\000\000\000\0340\024\375\370\334?#\375B\021\004" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } } 2025-06-24T21:24:32.301309Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:47: [72075186224037894] TTxAnalyze::Execute. Create new force traversal operation, OperationId= 0?#B 2025-06-24T21:24:32.301364Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:65: [72075186224037894] TTxAnalyze::Execute. Create new force traversal table, OperationId= 0?#B , PathId [OwnerId: 72075186224037897, LocalPathId: 4] Answer: 'Analyze sent. OperationId: 000000071g2kyzhq1z4fym4484' FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; >> TLocksFatTest::RangeSetNotBreak [GOOD] >> TLocksTest::SetLockFail [GOOD] >> TLocksTest::SetEraseSet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::CopyTableDropOriginalAndReturnPartAfterCompaction [GOOD] Test command err: 2025-06-24T21:24:27.785608Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629889406747205:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:27.789109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa2/r3tmp/tmpmJvgbz/pdisk_1.dat 2025-06-24T21:24:28.197610Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629889406747184:2079] 1750800267777313 != 1750800267777316 2025-06-24T21:24:28.206277Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:28.214916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:28.214983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:28.218486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26350 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:28.549853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:28.565902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:28.587862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:28.743502Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-24T21:24:28.750616Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.003s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-24T21:24:28.778494Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-24T21:24:28.783093Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 2025-06-24T21:24:28.796421Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800268700 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) Copy TableOld to Table 2025-06-24T21:24:28.941281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1/Dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table" PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 100000 InMemStepsToSnapshot: 2 InMemForceStepsToSnapshot: 3 InMemForceSizeToSnapshot: 1000000 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 200000 ReadAheadLoThreshold: 100000 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 10000 CountToCompact: 2 ForceCountToCompact: 2 ForceSizeToCompact: 20000 CompactionBrokerQueue: 1 KeepInCache: true } } ColumnFamilies { Id: 0 ColumnCache: ColumnCacheNone Storage: ColumnStorageTest_1_2_1k } } CopyFromTable: "/dc-1/Dir/TableOld" } } TxId: 281474976710676 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:24:28.941814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_copy_table.cpp:344: TCopyTable Propose, path: /dc-1/Dir/Table, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-06-24T21:24:28.942588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: Dir, child name: Table, child id: [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-24T21:24:28.942630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 0 2025-06-24T21:24:28.942638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:24:28.942663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710676:0 type: TxCopyTable target path: [OwnerId: 72057594046644480, LocalPathId: 4] source path: [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-24T21:24:28.942724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-24T21:24:28.942735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-24T21:24:28.942925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 3 2025-06-24T21:24:28.943095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710676:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:24:28.943843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:24:28.943878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 4 2025-06-24T21:24:28.944603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710676, response: Status: StatusAccepted TxId: 281474976710676 SchemeshardId: 72057594046644480 PathId: 4, at schemeshard: 72057594046644480 2025-06-24T21:24:28.944858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710676, database: /dc-1, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /dc-1/Dir/Table 2025-06-24T21:24:28.945096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:24:28.945127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:24:28.945251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 4] 2025-06-24T21:24:28.945335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T21:24:28.945358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519629893701714977:2245], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 2 2025-06-24T21:24:28.945407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519629893701714977:2245], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 4 2025-06-24T21:24:28.945458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710676:0, at schemeshard: 72057594046644480 2025-06-24T21:24:28.945496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710676:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046644480 2025-06-24T21:24:28.945798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-24T21:24:28.945885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710676:0 CreateRequest Even ... DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715784 ready parts: 1/1 2025-06-24T21:24:33.239107Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715784:0 2025-06-24T21:24:33.239116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715784:0 2025-06-24T21:24:33.239227Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715784 datashard 72075186224037891 state PreOffline 2025-06-24T21:24:33.239262Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037891 Got TEvSchemaChangedResult from SS at 72075186224037891 2025-06-24T21:24:33.239320Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 3 2025-06-24T21:24:33.239373Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715784 datashard 72075186224037890 state PreOffline 2025-06-24T21:24:33.239395Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-24T21:24:33.239606Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:24:33.239724Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:128: 72075186224037891 parts [ [72075186224037889:1:16:1:12288:306:0] [72075186224037889:1:23:1:12288:253:0] ] return ack processed 2025-06-24T21:24:33.239761Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037891 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:24:33.239821Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037891 Initiating switch from PreOffline to Offline state 2025-06-24T21:24:33.241498Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037891 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:24:33.241615Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [2:7519629918510840844:2649], serverId# [2:7519629918510840848:3432], sessionId# [0:0:0] 2025-06-24T21:24:33.241659Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037890 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:24:33.241682Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519629909920904535 RawX2: 4503608217307345 } TabletId: 72075186224037889 State: 4 2025-06-24T21:24:33.241706Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037890 Initiating switch from PreOffline to Offline state 2025-06-24T21:24:33.241726Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:24:33.242082Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 Check that tablet 72075186224037888 was deleted 2025-06-24T21:24:33.242897Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-24T21:24:33.243123Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-24T21:24:33.243388Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:24:33.243417Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519629914215872127 RawX2: 4503608217307401 } TabletId: 72075186224037891 State: 4 2025-06-24T21:24:33.243456Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:24:33.243878Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-24T21:24:33.243895Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-24T21:24:33.244104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T21:24:33.244284Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519629914215872129 RawX2: 4503608217307402 } TabletId: 72075186224037890 State: 4 2025-06-24T21:24:33.244309Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-24T21:24:33.244315Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:24:33.244389Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-24T21:24:33.244443Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:33.244511Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:24:33.244532Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T21:24:33.244576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:24:33.244756Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-24T21:24:33.245154Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T21:24:33.245179Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T21:24:33.245412Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:33.245479Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:24:33.245750Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-24T21:24:33.245784Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline Check that tablet 72075186224037889 was deleted 2025-06-24T21:24:33.246261Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037889) 2025-06-24T21:24:33.246457Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-24T21:24:33.246489Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-24T21:24:33.246513Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-24T21:24:33.246658Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 Check that tablet 72075186224037890 was deleted 2025-06-24T21:24:33.246811Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T21:24:33.246893Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-24T21:24:33.246957Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-24T21:24:33.246981Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-24T21:24:33.247073Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:24:33.247092Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-24T21:24:33.247130Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:24:33.247444Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-24T21:24:33.247469Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found Check that tablet 72075186224037891 was deleted 2025-06-24T21:24:33.247664Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T21:24:33.247690Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T21:24:33.247727Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T21:24:33.247743Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T21:24:33.247923Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:24:33.247968Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037890) 2025-06-24T21:24:33.248561Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-24T21:24:33.248619Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-24T21:24:33.249465Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037891) >> SystemView::DescribeSystemFolder [GOOD] >> SystemView::DescribeAccessDenied >> TLocksTest::CK_GoodLock >> TObjectStorageListingTest::CornerCases ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksFatTest::RangeSetNotBreak [GOOD] Test command err: 2025-06-24T21:24:20.446176Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629860122176176:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.446277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aaf/r3tmp/tmpqumPdv/pdisk_1.dat 2025-06-24T21:24:20.823336Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629860122176073:2079] 1750800260438228 != 1750800260438231 2025-06-24T21:24:20.838615Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.905311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.905423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.906751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15760 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.254989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:21.275035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:21.290435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.446510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.455847Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:21.507829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:25.446481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629860122176176:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:25.446570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aaf/r3tmp/tmp34ZDwZ/pdisk_1.dat 2025-06-24T21:24:28.086509Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:28.149664Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:28.151581Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629892313226408:2079] 1750800267991903 != 1750800267991906 2025-06-24T21:24:28.163922Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:28.164010Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:28.169231Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62995 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:28.402314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:28.408734Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:28.420890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:28.426357Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:28.514864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:28.574251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:29.030190Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TObjectStorageListingTest::MaxKeysAndSharding >> TFlatTest::PartBloomFilter [GOOD] >> TFlatTest::SelectRangeForbidNullArgs4 [GOOD] >> TFlatTest::SelectRangeForbidNullArgs1 [GOOD] >> TFlatTest::LsPathId [GOOD] >> TLocksTest::CK_Range_BrokenLock [GOOD] >> TLocksTest::CK_Range_BrokenLockInf >> TLocksTest::BrokenSameKeyLock [GOOD] >> TLocksTest::BrokenSameShardLock >> TLocksTest::Range_IncorrectNullDot1 >> TLocksTest::Range_IncorrectDot1 [GOOD] >> TLocksTest::Range_IncorrectDot2 >> SystemView::TabletsFollowers [GOOD] >> SystemView::TabletsRanges ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::PartBloomFilter [GOOD] Test command err: 2025-06-24T21:24:31.212208Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629909416487827:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:31.212440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa1/r3tmp/tmpTIfvZ9/pdisk_1.dat 2025-06-24T21:24:31.594694Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:31.634318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:31.634492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:31.636627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20520 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:31.957542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:31.976160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-24T21:24:32.007078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... waiting... waiting... TClient::Ls request: /dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750800272018 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 15 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 15 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 13 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "A" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710663 CreateStep: 1750800272095 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "B" PathId: 4 Sche... (TRUNCATED) 2025-06-24T21:24:34.590977Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629919317081142:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:34.591065Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa1/r3tmp/tmplfVBvB/pdisk_1.dat 2025-06-24T21:24:34.732269Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:34.734264Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629919317081122:2079] 1750800274590125 != 1750800274590128 2025-06-24T21:24:34.756078Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:34.756201Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:34.757363Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62343 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:24:34.970648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:34.977499Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:24:34.984980Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:35.530067Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715719:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) waiting... 2025-06-24T21:24:35.603075Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TLocksTest::Range_CorrectNullDot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeForbidNullArgs4 [GOOD] Test command err: 2025-06-24T21:24:31.541780Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629907647317488:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:31.542079Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9e/r3tmp/tmp3rNzkb/pdisk_1.dat 2025-06-24T21:24:31.971916Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:31.975480Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629907647317307:2079] 1750800271530442 != 1750800271530445 2025-06-24T21:24:31.999726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:31.999855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:32.001545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27558 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:32.268811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:32.310269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:32.541953Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:34.973267Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629921847840227:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:34.973368Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9e/r3tmp/tmpkVAN1W/pdisk_1.dat 2025-06-24T21:24:35.181097Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:35.186978Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629921847840208:2079] 1750800274963220 != 1750800274963223 2025-06-24T21:24:35.195580Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:35.195661Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:35.197405Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8017 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:35.425405Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:35.431871Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:35.447699Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::LsPathId [GOOD] Test command err: 2025-06-24T21:24:31.410343Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629907403196296:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:31.410395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa0/r3tmp/tmp0Oukzy/pdisk_1.dat 2025-06-24T21:24:31.844965Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:31.846264Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629907403196279:2079] 1750800271409829 != 1750800271409832 2025-06-24T21:24:31.901161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:31.901289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:31.904636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17243 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:32.182082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient::Ls request: / TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "/" PathId: 1 SchemeshardId: 0 PathType: EPathTypeDir CreateFinished: true } Children { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true } } Path: "/" TClient::Ls request: TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 7 ErrorReason: "Invalid path" TClient::Ls request: // TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 7 ErrorReason: "Invalid path" TClient::Ls request: / TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "/" PathId: 1 SchemeshardId: 0 PathType: EPathTypeDir CreateFinished: true } Children { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true } } Path: "/" TClient::Ls request: /dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750800272249 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePo... (TRUNCATED) TClient::Ls request: /dc-11 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Root not found" TClient::Ls request: /dc-2 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Root not found" waiting... 2025-06-24T21:24:32.379686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 TClient::Ls request: / TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "/" PathId: 1 SchemeshardId: 0 PathType: EPathTypeDir CreateFinished: true } Children { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true } } Path: "/" TClient::Ls request: /dc-1 2025-06-24T21:24:32.429191Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750800272249 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Berkanavt" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750800272417 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depr... (TRUNCATED) TClient::Ls request: /dc-1/Berkanavt TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Berkanavt" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750800272417 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 1 PathsLimit: 10000 Shard... (TRUNCATED) 2025-06-24T21:24:32.453951Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629911698164184:2326] txid# 281474976715659, issues: { message: "Check failed: path: \'/dc-1/Berkanavt\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } Error 1: Check failed: path: '/dc-1/Berkanavt', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges) TClient::Ls request: /dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750800272249 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Berkanavt" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750800272417 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depr... (TRUNCATED) TClient::Ls request: /dc-1/arcadia TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" waiting... TClient::Ls request: /dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750800272249 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Berkanavt" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750800272417 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "arcadia" Path... (TRUNCATED) TClient::Ls request: /dc-1/arcadia TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "arcadia" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976715660 CreateStep: 1750800272550 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsI... (TRUNCATED) 2025-06-24T21:24:34.966941Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629921661094022:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:34.968183Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa0/r3tmp/tmpJ2TKdn/pdisk_1.dat 2025-06-24T21:24:35.141665Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:35.146155Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:35.146243Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:35.147062Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629921661093995:2079] 1750800274965324 != 1750800274965327 2025-06-24T21:24:35.160235Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13487 TClient::Ls request: / TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "/" PathId: 1 SchemeshardId: 0 PathType: EPathTypeDir CreateFinished: true } Children { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true } } Path: "/" WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:35.386354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:35.392338Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:35.447805Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:35.509759Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeForbidNullArgs1 [GOOD] Test command err: 2025-06-24T21:24:31.658911Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629908894239560:2175];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:31.659830Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9d/r3tmp/tmpPZwnao/pdisk_1.dat 2025-06-24T21:24:32.027254Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629908894239411:2079] 1750800271641453 != 1750800271641456 2025-06-24T21:24:32.063927Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:32.110112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:32.110214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:32.112121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4559 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:32.433181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:32.449464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:32.460637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:24:32.468796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:32.663953Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:35.108160Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629926775132184:2106];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:35.116457Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9d/r3tmp/tmpY9K8ZS/pdisk_1.dat 2025-06-24T21:24:35.278297Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:35.281364Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629926775132116:2079] 1750800275092399 != 1750800275092402 2025-06-24T21:24:35.292344Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:35.292430Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:35.294332Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31423 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:35.540470Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:35.546149Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:35.560526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TObjectStorageListingTest::CornerCases [GOOD] >> TObjectStorageListingTest::Decimal >> TLocksTest::NoLocksSet [GOOD] >> TLocksTest::MultipleLocks >> SystemView::AuthGroups [GOOD] >> SystemView::AuthGroupMembers >> TLocksTest::BrokenLockUpdate >> TFlatTest::LargeDatashardReplyRW [GOOD] >> TFlatTest::SelectRangeReverse >> TLocksTest::SetEraseSet [GOOD] >> TFlatTest::ShardUnfreezeNonFrozen >> TFlatTest::WriteSplitKillRead ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::LargeDatashardReplyRW [GOOD] Test command err: 2025-06-24T21:24:20.452611Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629862513174593:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.452679Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa5/r3tmp/tmpM5eixK/pdisk_1.dat 2025-06-24T21:24:20.870387Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.873353Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629862513174376:2079] 1750800260439559 != 1750800260439562 2025-06-24T21:24:20.898423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.898519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.908867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21983 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.214245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:21.239621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:21.251227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:24:21.260870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.457520Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:25.452161Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629862513174593:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:25.452216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:30.651181Z node 1 :MINIKQL_ENGINE ERROR: datashard__engine_host.cpp:516: Shard %72075186224037889, txid %281474976711360, engine error: Error executing transaction (read-only: 1): Datashard 72075186224037889: reply size limit exceeded. (61442990 > 50331648) 2025-06-24T21:24:30.669266Z node 1 :TX_DATASHARD ERROR: execute_data_tx_unit.cpp:268: Datashard execution error for [1750800270093:281474976711360] at 72075186224037889: Datashard 72075186224037889: reply size limit exceeded. (61442990 > 50331648) 2025-06-24T21:24:30.679979Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519629905462853770:5931] txid# 281474976711360 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# RESULT_UNAVAILABLE shard id 72075186224037889 marker# P12 2025-06-24T21:24:30.680079Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519629905462853770:5931] txid# 281474976711360 RESPONSE Status# ExecResultUnavailable marker# P13c DataShardErrors: [REPLY_SIZE_EXCEEDED] Datashard 72075186224037889: reply size limit exceeded. (61442990 > 50331648) proxy error code: ExecResultUnavailable 2025-06-24T21:24:31.439173Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629909140351744:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:31.439239Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa5/r3tmp/tmpzUgxvw/pdisk_1.dat 2025-06-24T21:24:31.591509Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:31.591593Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:31.598535Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:31.599534Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7053 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:31.833157Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:31.840940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:31.859739Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:32.448208Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:36.439233Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629909140351744:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:36.439300Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:42.056772Z node 2 :MINIKQL_ENGINE ERROR: datashard__engine_host.cpp:516: Shard %72075186224037888, txid %281474976716361, engine error: Error executing transaction (read-only: 0): Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) 2025-06-24T21:24:42.068870Z node 2 :TX_DATASHARD ERROR: execute_data_tx_unit.cpp:268: Datashard execution error for [0:281474976716361] at 72075186224037888: Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) 2025-06-24T21:24:42.072154Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976716361 at tablet 72075186224037888 status: RESULT_UNAVAILABLE errors: REPLY_SIZE_EXCEEDED (Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648)) | 2025-06-24T21:24:42.072341Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7519629952090031063:5910] txid# 281474976716361 RESPONSE Status# ExecResultUnavailable marker# P13c DataShardErrors: [REPLY_SIZE_EXCEEDED] Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) proxy error code: ExecResultUnavailable >> TObjectStorageListingTest::Decimal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::SetEraseSet [GOOD] Test command err: 2025-06-24T21:24:32.550438Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629912781832346:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:32.565395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9b/r3tmp/tmpcbyBDD/pdisk_1.dat 2025-06-24T21:24:32.995250Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:33.016118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:33.016246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:33.018073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23339 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:33.302877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-24T21:24:33.325828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:33.482438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:33.554496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:33.571010Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:33.605009Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976715662: Validate (783): Key validation status: 3 2025-06-24T21:24:33.605343Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7519629917076800458:2499] txid# 281474976715662 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-24T21:24:33.605409Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7519629917076800458:2499] txid# 281474976715662 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-24T21:24:33.605444Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7519629917076800458:2499] txid# 281474976715662 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 2025-06-24T21:24:33.608884Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976715663: Validate (783): Key validation status: 3 2025-06-24T21:24:33.609314Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7519629917076800480:2506] txid# 281474976715663 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-24T21:24:33.609430Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7519629917076800480:2506] txid# 281474976715663 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-24T21:24:33.609457Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7519629917076800480:2506] txid# 281474976715663 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 DataShardErrors: [SCHEME_ERROR] Validate (783): Key validation status: 3 proxy error code: ProxyShardNotAvailable 2025-06-24T21:24:33.613479Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976715664: Validate (783): Key validation status: 3 2025-06-24T21:24:33.613888Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7519629917076800487:2510] txid# 281474976715664 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-24T21:24:33.614004Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7519629917076800487:2510] txid# 281474976715664 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-24T21:24:33.614039Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7519629917076800487:2510] txid# 281474976715664 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 2025-06-24T21:24:33.618212Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976715665: Validate (783): Key validation status: 3 2025-06-24T21:24:33.618326Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7519629917076800493:2513] txid# 281474976715665 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-24T21:24:33.618414Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7519629917076800493:2513] txid# 281474976715665 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-24T21:24:33.618441Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7519629917076800493:2513] txid# 281474976715665 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 DataShardErrors: [SCHEME_ERROR] Validate (783): Key validation status: 3 proxy error code: ProxyShardNotAvailable 2025-06-24T21:24:35.976629Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629925728795730:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:35.976675Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9b/r3tmp/tmp9aU9oT/pdisk_1.dat 2025-06-24T21:24:36.178077Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:36.179885Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629925728795712:2079] 1750800275975777 != 1750800275975780 2025-06-24T21:24:36.190985Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:36.191094Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:36.197571Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3793 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:24:36.413506Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:36.435224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:36.513914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:36.562719Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:39.449656Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629944368048025:2177];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9b/r3tmp/tmpmzgLFp/pdisk_1.dat 2025-06-24T21:24:39.547093Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:24:39.597957Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:39.611362Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629944368047860:2079] 1750800279390113 != 1750800279390116 2025-06-24T21:24:39.611496Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:39.611581Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:39.615848Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3388 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:39.813917Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-24T21:24:39.838956Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:39.890825Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:39.972119Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TAsyncIndexTests::MergeIndexWithReboots[PipeResets] [GOOD] >> TObjectStorageListingTest::ManyDeletes [GOOD] >> SystemView::AuthOwners_ResultOrder [GOOD] >> SystemView::AuthOwners_TableRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeIndexWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:23:43.744291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:23:43.744398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:43.744450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:23:43.744485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:23:43.744537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:23:43.744583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:23:43.744647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:23:43.744731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:23:43.746447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:23:43.748278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:23:43.847906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:23:43.847964Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:43.848623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:23:43.877834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:23:43.878380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:23:43.878569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:23:43.929663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:23:43.930651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:23:43.935511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:43.936011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:23:43.944392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:43.957249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:23:43.978306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:23:43.978426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:23:43.986981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:23:43.987100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:23:43.991504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:23:43.991838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:23:44.006227Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:23:44.219318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:23:44.223421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:44.227308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:23:44.227408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:23:44.231345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:23:44.231575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:44.244259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:44.247487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:23:44.247796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:44.247871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:23:44.247913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:23:44.247964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:23:44.255956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:44.256018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:23:44.256050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:23:44.268528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:44.268611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:23:44.268669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:23:44.268743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:23:44.281317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:23:44.288006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:23:44.295294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:23:44.296602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:23:44.296800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... rceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:24:44.887773Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:24:44.888124Z node 26 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 400us result status StatusSuccess 2025-06-24T21:24:44.889097Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TFlatTest::ShardUnfreezeNonFrozen [GOOD] >> TFlatTest::ShardFreezeUnfreezeRejectScheme >> TFlatTest::SelectRangeReverse [GOOD] >> TFlatTest::SelectRangeReverseExcludeKeys >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink+UseDataQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::Decimal [GOOD] Test command err: 2025-06-24T21:24:37.440632Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629934547115642:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:37.447655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a97/r3tmp/tmpjxpQv2/pdisk_1.dat 2025-06-24T21:24:37.799495Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:37.799630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:37.825834Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629934547115599:2079] 1750800277434018 != 1750800277434021 2025-06-24T21:24:37.834900Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:37.839656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27896, node 1 2025-06-24T21:24:37.932876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:37.932918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:37.932929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:37.933075Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17531 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:38.298617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:38.327853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:38.344257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:38.358203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:38.469250Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:41.200796Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629952702000776:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:41.200872Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a97/r3tmp/tmp81xuTb/pdisk_1.dat 2025-06-24T21:24:41.359028Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:41.360395Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629952702000755:2079] 1750800281199107 != 1750800281199110 TServer::EnableGrpc on GrpcPort 21357, node 2 2025-06-24T21:24:41.389521Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:41.389625Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:41.419940Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:41.459736Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:41.459762Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:41.459770Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:41.459872Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23010 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:41.719587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:41.743379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TFlatTest::WriteSplitKillRead [GOOD] >> TFlatTest::WriteSplitWriteSplit >> TFlatTest::ShardFreezeRejectBadProtobuf >> TLocksTest::Range_Pinhole ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::ManyDeletes [GOOD] Test command err: 2025-06-24T21:24:20.447982Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629858913567980:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.452703Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aaa/r3tmp/tmpJScjn3/pdisk_1.dat 2025-06-24T21:24:20.834487Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629858913567860:2079] 1750800260438290 != 1750800260438293 2025-06-24T21:24:20.847692Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.908828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.908931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.910570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31113, node 1 2025-06-24T21:24:21.182982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:21.183011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:21.183024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:21.183214Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:21.459301Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13206 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.777712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:21.822187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:25.449096Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629858913567980:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:25.449203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:26.594525Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629888084897748:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:26.594578Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aaa/r3tmp/tmpaSZirl/pdisk_1.dat 2025-06-24T21:24:26.769101Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:26.769186Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:26.770320Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:26.772025Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629888084897728:2079] 1750800266593938 != 1750800266593941 2025-06-24T21:24:26.786287Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23511, node 2 2025-06-24T21:24:26.843694Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:26.843725Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:26.843733Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:26.843843Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15192 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:27.118031Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:27.125570Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:27.142920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... .2025-06-24T21:24:27.609677Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; .2025-06-24T21:24:31.595590Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629888084897748:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:31.595692Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:36.417889Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:24:36.417917Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037890 2025-06-24T21:24:36.418649Z node 2 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976716500 at tablet 72075186224037890 2025-06-24T21:24:36.418825Z node 2 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976716500 at tablet 72075186224037889 2025-06-24T21:24:36.418916Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037891 2025-06-24T21:24:36.419123Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037892 2025-06-24T21:24:36.419693Z node 2 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976716500 at tablet 72075186224037891 2025-06-24T21:24:36.419879Z node 2 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976716500 at tablet 72075186224037892 2025-06-24T21:24:36.422419Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037890 2025-06-24T21:24:36.422521Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T21:24:36.422902Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037891 2025-06-24T21:24:36.423019Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037892 2025-06-24T21:24:36.432477Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976716500 at step 1750800276470 at tablet 72075186224037892 { Transactions { TxId: 281474976716500 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750800276470 MediatorID: 72057594046382081 TabletID: 72075186224037892 } 2025-06-24T21:24:36.432477Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976716500 at step 1750800276470 at tablet 72075186224037890 { Transactions { TxId: 281474976716500 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750800276470 MediatorID: 72057594046382081 TabletID: 72075186224037890 } 2025-06-24T21:24:36.432509Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:24:36.432510Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 720751862240378 ... 24037891 2025-06-24T21:24:44.825758Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:24:44.825797Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750800284870 : 281474976716911] from 72075186224037891 at tablet 72075186224037891 send result to client [2:7519629965394323347:3907], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:24:44.825819Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-24T21:24:44.825939Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1750800284870} 2025-06-24T21:24:44.825998Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-24T21:24:44.826089Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1750800284870} 2025-06-24T21:24:44.826161Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-24T21:24:44.826669Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037890 restored its data 2025-06-24T21:24:44.826807Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037892 restored its data 2025-06-24T21:24:44.827535Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976716911 released its data 2025-06-24T21:24:44.827556Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:24:44.827605Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976716911 released its data 2025-06-24T21:24:44.827625Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:24:44.827652Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037892 step# 1750800284870} 2025-06-24T21:24:44.827768Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:24:44.828001Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-24T21:24:44.828390Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037889 restored its data 2025-06-24T21:24:44.828866Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037892 restored its data 2025-06-24T21:24:44.829448Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:24:44.829595Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-24T21:24:44.829693Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976716911 released its data 2025-06-24T21:24:44.829729Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:24:44.830072Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037890 restored its data 2025-06-24T21:24:44.837707Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:24:44.838460Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037889 restored its data 2025-06-24T21:24:44.840241Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976716911 released its data 2025-06-24T21:24:44.840275Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:24:44.842973Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-24T21:24:44.843019Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750800284870 : 281474976716911] from 72075186224037892 at tablet 72075186224037892 send result to client [2:7519629965394323347:3907], exec latency: 4 ms, propose latency: 18 ms 2025-06-24T21:24:44.843044Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-24T21:24:44.849452Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-24T21:24:44.850275Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037889 restored its data 2025-06-24T21:24:44.853799Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:24:44.854899Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:24:44.854948Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750800284870 : 281474976716911] from 72075186224037890 at tablet 72075186224037890 send result to client [2:7519629965394323347:3907], exec latency: 30 ms, propose latency: 31 ms 2025-06-24T21:24:44.854969Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:24:44.855383Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:24:44.858470Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:24:44.858533Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750800284870 : 281474976716911] from 72075186224037889 at tablet 72075186224037889 send result to client [2:7519629965394323347:3907], exec latency: 30 ms, propose latency: 34 ms 2025-06-24T21:24:44.858568Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:24:44.877297Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037889 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:24:44.877990Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037889 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 1 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:24:44.878601Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037889 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 2 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:24:44.880077Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037889 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 3 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:24:44.880285Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037889 S3 Listing: finished status: 0 description: "" contents: 0 common prefixes: 1 2025-06-24T21:24:44.880683Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037891 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:24:44.880808Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037891 S3 Listing: finished status: 0 description: "" contents: 0 common prefixes: 0 2025-06-24T21:24:44.881043Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:24:44.881563Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 1 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:24:44.881948Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 2 last path: "" contents: 0 common prefixes: 0 2025-06-24T21:24:44.882369Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/Godfather.avi") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 3 last path: "/Videos/Godfather.avi" contents: 2 common prefixes: 0 2025-06-24T21:24:44.882785Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/House of Cards/Season 1/Chapter 1.avi") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 4 last path: "/Videos/House of Cards/Season 1/Chapter 1.avi" contents: 3 common prefixes: 1 2025-06-24T21:24:44.883274Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/Terminator 2.avi") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 5 last path: "/Videos/Terminator 2.avi" contents: 4 common prefixes: 1 2025-06-24T21:24:44.883420Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037892 S3 Listing: finished status: 0 description: "" contents: 4 common prefixes: 1 >> KqpJoinOrder::TPCHEveryQueryWorks+ColumnStore [GOOD] >> SystemView::DescribeAccessDenied [GOOD] >> SystemView::CollectScriptingQueries >> SystemView::TabletsRanges [GOOD] >> SystemView::TabletsRangesPredicateExtractDisabled >> TLocksTest::Range_BrokenLock0 >> TFlatTest::Init >> TFlatTest::ShardFreezeUnfreezeRejectScheme [GOOD] >> TFlatTest::SelectRangeReverseExcludeKeys [GOOD] >> TLocksTest::GoodLock >> TFlatTest::ShardFreezeRejectBadProtobuf [GOOD] >> TFlatTest::SelectRangeSkipNullKeys >> TFlatTest::WriteSplitWriteSplit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCHEveryQueryWorks+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 3639, MsgBus: 1289 2025-06-24T21:17:15.136302Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628037019732965:2197];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:17:15.147004Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ba4/r3tmp/tmpZ6qwED/pdisk_1.dat 2025-06-24T21:17:15.707448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:17:15.707553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:17:15.714423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:17:15.765019Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:17:15.767140Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628037019732806:2079] 1750799835089047 != 1750799835089050 TServer::EnableGrpc on GrpcPort 3639, node 1 2025-06-24T21:17:15.950660Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:17:15.950685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:17:15.950692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:17:15.950783Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:17:16.148888Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1289 TClient is connected to server localhost:1289 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:17:16.686428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:17:18.873365Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628049904635335:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.873492Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.873726Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628049904635347:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:17:18.884365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:17:18.914036Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628049904635349:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:17:19.012252Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628054199602696:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:17:19.541531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:17:19.908472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:19.908761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:19.909080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:19.909191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:19.909308Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:19.909436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:19.909548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:19.909658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:19.909762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:19.909907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:17:19.910011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519628054199602973:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:17:19.933640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628054199602979:2324];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:17:19.933705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628054199602979:2324];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:17:19.933871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628054199602979:2324];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:17:19.933996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628054199602979:2324];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:17:19.934136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628054199602979:2324];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:17:19.934257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628054199602979:2324];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:17:19.934389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628054199602979:2324];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:17:19.934498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628054199602979:2324];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:17:19.934625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628054199602979:2324];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:17:19.934731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519628054199602979:2324];tablet_id=72075186224037891;process=TTxInitSchema::Execu ... ustomer`\n cross join `/Root/nation` n1\n cross join `/Root/nation` n2\n where\n s_suppkey = l_suppkey\n and o_orderkey = l_orderkey\n and c_custkey = o_custkey\n and s_nationkey = n1.n_nationkey\n and c_nationkey = n2.n_nationkey\n and (\n (n1.n_name = 'PERU' and n2.n_name = 'MOZAMBIQUE')\n or (n1.n_name = 'MOZAMBIQUE' and n2.n_name = 'PERU')\n )\n and l_shipdate between date('1995-01-01') and date('1996-12-31')\n ) as shipping\ngroup by\n supp_nation,\n cust_nation,\n l_year\norder by\n supp_nation,\n cust_nation,\n l_year;\n", parameters: 0b 2025-06-24T21:20:51.050078Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx0xgx8eyyyt6v2jyzhtrw", SessionId: ydb://session/3?node_id=1&id=NTk2YWIyMjYtMTY0MDE1OWQtNGVjNDRkMTEtZDdhODljMGE=, Slow query, duration: 13.643727s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n\n-- TPC-H/TPC-R National Market Share Query (Q8)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\nselect\n o_year,\n sum(case\n when nation = 'MOZAMBIQUE' then volume\n else 0\n end) / sum(volume) as mkt_share\nfrom\n (\n select\n DateTime::GetYear(o_orderdate) as o_year,\n l_extendedprice * (1 - l_discount) as volume,\n n2.n_name as nation\n from\n `/Root/part`\n cross join `/Root/supplier`\n cross join `/Root/lineitem`\n cross join `/Root/orders`\n cross join `/Root/customer`\n cross join `/Root/nation` n1\n cross join `/Root/nation` n2\n cross join `/Root/region`\n where\n p_partkey = l_partkey\n and s_suppkey = l_suppkey\n and l_orderkey = o_orderkey\n and o_custkey = c_custkey\n and c_nationkey = n1.n_nationkey\n and n1.n_regionkey = r_regionkey\n and r_name = 'AFRICA'\n and s_nationkey = n2.n_nationkey\n and o_orderdate between date('1995-01-01') and date('1996-12-31')\n and p_type = 'ECONOMY PLATED COPPER'\n ) as all_nations\ngroup by\n o_year\norder by\n o_year;\n\n", parameters: 0b 2025-06-24T21:21:02.243258Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx1avpdgn0fd15mjpykvq1", SessionId: ydb://session/3?node_id=1&id=NTk2YWIyMjYtMTY0MDE1OWQtNGVjNDRkMTEtZDdhODljMGE=, Slow query, duration: 11.165622s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n\n-- TPC-H/TPC-R Product Type Profit Measure Query (Q9)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\nselect\n nation,\n o_year,\n sum(amount) as sum_profit\nfrom\n (\n select\n n_name as nation,\n DateTime::GetYear(o_orderdate) as o_year,\n l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount\n from\n `/Root/part`\n cross join `/Root/supplier`\n cross join `/Root/lineitem`\n cross join `/Root/partsupp`\n cross join `/Root/orders`\n cross join `/Root/nation`\n where\n s_suppkey = l_suppkey\n and ps_suppkey = l_suppkey\n and ps_partkey = l_partkey\n and p_partkey = l_partkey\n and o_orderkey = l_orderkey\n and s_nationkey = n_nationkey\n and p_name like '%rose%'\n ) as profit\ngroup by\n nation,\n o_year\norder by\n nation,\n o_year desc;\n", parameters: 0b 2025-06-24T21:21:24.212172Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx1zdc245etkr6gkjeba41", SessionId: ydb://session/3?node_id=1&id=NTk2YWIyMjYtMTY0MDE1OWQtNGVjNDRkMTEtZDdhODljMGE=, Slow query, duration: 12.103267s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n\n-- TPC-H/TPC-R Important Stock Identification Query (Q11)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\nselect\n `/Root/partsupp`.ps_partkey as ps_partkey,\n sum(ps_supplycost * ps_availqty) as value\nfrom\n `/Root/partsupp`\n cross join `/Root/supplier`\n cross join `/Root/nation`\n cross join (\n select\n sum(ps_supplycost * ps_availqty) * 0.0001000000 as ps_threshold\n from\n `/Root/partsupp`\n cross join `/Root/supplier`\n cross join `/Root/nation`\n where\n ps_suppkey = s_suppkey\n and s_nationkey = n_nationkey\n and n_name = 'CANADA'\n ) as threshold\nwhere\n ps_suppkey = s_suppkey\n and s_nationkey = n_nationkey\n and n_name = 'CANADA'\ngroup by\n `/Root/partsupp`.ps_partkey, threshold.ps_threshold having\n sum(ps_supplycost * ps_availqty) > threshold.ps_threshold\norder by\n value desc;\n", parameters: 0b 2025-06-24T21:21:36.842096Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyhx2pcc0w3zvc74kyyx6mtq, SessionId: CompileActor 2025-06-24 21:21:36.841 WARN ydb-core-kqp-ut-join(pid=882050, tid=0x00007F7733E85640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-24T21:22:30.970762Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx3vac5yqe7sjsmdxyxdkp", SessionId: ydb://session/3?node_id=1&id=NTk2YWIyMjYtMTY0MDE1OWQtNGVjNDRkMTEtZDdhODljMGE=, Slow query, duration: 17.517221s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n\n-- TPC-H/TPC-R Suppliers Who Kept Orders Waiting Query (Q21)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\nselect\n `/Root/supplier`.s_name as s_name,\n count(*) as numwait\nfrom\n `/Root/supplier`\n cross join `/Root/lineitem` l1\n cross join `/Root/orders`\n cross join `/Root/nation`\n left semi join (\n select\n l2.l_orderkey as l_orderkey\n from\n `/Root/lineitem` l2\n cross join `/Root/supplier`\n cross join `/Root/lineitem` l1\n cross join `/Root/orders`\n cross join `/Root/nation`\n where\n s_suppkey = l1.l_suppkey\n and o_orderkey = l1.l_orderkey\n and o_orderstatus = 'F'\n and l1.l_receiptdate > l1.l_commitdate\n and s_nationkey = n_nationkey\n and n_name = 'EGYPT'\n and l2.l_orderkey = l1.l_orderkey\n and l2.l_suppkey <> l1.l_suppkey\n ) as l2 on l2.l_orderkey = l1.l_orderkey\n left only join (\n select\n l3.l_orderkey as l_orderkey\n from\n `/Root/lineitem` l3\n cross join `/Root/supplier`\n cross join `/Root/lineitem` l1\n cross join `/Root/orders`\n cross join `/Root/nation`\n where\n s_suppkey = l1.l_suppkey\n and o_orderkey = l1.l_orderkey\n and o_orderstatus = 'F'\n and l1.l_receiptdate > l1.l_commitdate\n and s_nationkey = n_nationkey\n and n_name = 'EGYPT'\n and l3.l_orderkey = l1.l_orderkey\n and l3.l_suppkey <> l1.l_suppkey\n and l3.l_receiptdate > l3.l_commitdate\n ) as l3 on l3.l_orderkey = l1.l_orderkey\nwhere\n s_suppkey = l1.l_suppkey\n and o_orderkey = l1.l_orderkey\n and o_orderstatus = 'F'\n and l1.l_receiptdate > l1.l_commitdate\n and s_nationkey = n_nationkey\n and n_name = 'EGYPT'\ngroup by\n `/Root/supplier`.s_name\norder by\n numwait desc,\n s_name\nlimit 100;\n", parameters: 0b 2025-06-24T21:22:38.568969Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyhx4jvc9dqw3yy3gmhne3jx, SessionId: CompileActor 2025-06-24 21:22:38.568 WARN ydb-core-kqp-ut-join(pid=882050, tid=0x00007F7733676640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-24T21:23:07.673169Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyhx5ftsdcm452fjc14vee52, SessionId: CompileActor 2025-06-24 21:23:07.672 WARN ydb-core-kqp-ut-join(pid=882050, tid=0x00007F7733E85640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-24T21:23:28.985344Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx5s23fhh6vy75afchtb5f", SessionId: ydb://session/3?node_id=1&id=NTk2YWIyMjYtMTY0MDE1OWQtNGVjNDRkMTEtZDdhODljMGE=, Slow query, duration: 12.309200s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "-- TPC-H/TPC-R National Market Share Query (Q8)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\n$join1 = (\nselect\n l.l_extendedprice * (1 - l.l_discount) as volume,\n l.l_suppkey as l_suppkey,\n l.l_orderkey as l_orderkey\nfrom\n `/Root/part` as p\njoin\n `/Root/lineitem` as l\non\n p.p_partkey = l.l_partkey\nwhere\n p.p_type = 'ECONOMY PLATED COPPER'\n);\n$join2 = (\nselect\n j.volume as volume,\n j.l_orderkey as l_orderkey,\n s.s_nationkey as s_nationkey\nfrom\n $join1 as j\njoin\n `/Root/supplier` as s\non\n s.s_suppkey = j.l_suppkey\n);\n$join3 = (\nselect\n j.volume as volume,\n j.l_orderkey as l_orderkey,\n n.n_name as nation\nfrom\n $join2 as j\njoin\n `/Root/nation` as n\non\n n.n_nationkey = j.s_nationkey\n);\n$join4 = (\nselect\n j.volume as volume,\n j.nation as nation,\n DateTime::GetYear(cast(o.o_orderdate as Timestamp)) as o_year,\n o.o_custkey as o_custkey\nfrom\n $join3 as j\njoin\n `/Root/orders` as o\non\n o.o_orderkey = j.l_orderkey\nwhere o_orderdate between Date('1995-01-01') and Date('1996-12-31')\n);\n$join5 = (\nselect\n j.volume as volume,\n j.nation as nation,\n j.o_year as o_year,\n c.c_nationkey as c_nationkey\nfrom\n $join4 as j\njoin\n `/Root/customer` as c\non\n c.c_custkey = j.o_custkey\n);\n$join6 = (\nselect\n j.volume as volume,\n j.nation as nation,\n j.o_year as o_year,\n n.n_regionkey as n_regionkey\nfrom\n $join5 as j\njoin\n `/Root/nation` as n\non\n n.n_nationkey = j.c_nationkey\n);\n$join7 = (\nselect\n j.volume as volume,\n j.nation as nation,\n j.o_year as o_year\nfrom\n $join6 as j\njoin\n `/Root/region` as r\non\n r.r_regionkey = j.n_regionkey\nwhere\n r.r_name = 'AFRICA'\n);\n\nselect\n o_year,\n sum(case\n when nation = 'MOZAMBIQUE' then volume\n else 0\n end) / sum(volume) as mkt_share\nfrom\n $join7 as all_nations\ngroup by\n o_year\norder by\n o_year;\n", parameters: 0b 2025-06-24T21:24:01.155372Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyhx73fy6jh93safpm38cp3b, SessionId: CompileActor 2025-06-24 21:24:01.154 WARN ydb-core-kqp-ut-join(pid=882050, tid=0x00007F7733676640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed >> TLocksTest::GoodSameKeyLock [GOOD] >> TLocksTest::GoodSameShardLock >> TLocksTest::Range_BrokenLock2 [GOOD] >> TLocksTest::Range_BrokenLock3 >> TLocksTest::MultipleLocks [GOOD] >> TFlatTest::CopyTableAndCompareColumnsSchema [GOOD] >> TFlatTest::CopyTableAndDropCopy >> SystemView::ShowCreateTableColumn [GOOD] >> SystemView::ShowCreateTableKeyBloomFilter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::ShardFreezeUnfreezeRejectScheme [GOOD] Test command err: 2025-06-24T21:24:42.972114Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629957167489626:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:42.972336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7c/r3tmp/tmpydgoBd/pdisk_1.dat 2025-06-24T21:24:43.365650Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629957167489461:2079] 1750800282947587 != 1750800282947590 2025-06-24T21:24:43.395246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:43.395346Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:43.398829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:43.401173Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TClient is connected to server localhost:62548 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:43.714204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:43.751480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:24:43.774601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:43.937945Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629961462457420:2363] txid# 281474976710659, issues: { message: "Requested freeze state already set" severity: 1 } Error 1: Requested freeze state already set 2025-06-24T21:24:43.971546Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:46.249239Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629972618533967:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:46.249320Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7c/r3tmp/tmpELBVON/pdisk_1.dat 2025-06-24T21:24:46.400517Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:46.403291Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629972618533948:2079] 1750800286248321 != 1750800286248324 2025-06-24T21:24:46.423819Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:46.423895Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:46.428319Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6147 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:46.662620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:46.668991Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:24:46.672451Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:46.767063Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) waiting... Error 128: Table is frozen. Only unfreeze alter is allowed 2025-06-24T21:24:46.781355Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519629972618534650:2392] txid# 281474976715660, issues: { message: "Table is frozen. Only unfreeze alter is allowed" severity: 1 } 2025-06-24T21:24:46.783811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) waiting... waiting... 2025-06-24T21:24:46.795425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeReverseExcludeKeys [GOOD] Test command err: 2025-06-24T21:24:42.958570Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629957140576473:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:42.958602Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7b/r3tmp/tmpQd3cpS/pdisk_1.dat 2025-06-24T21:24:43.381978Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:43.394689Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629957140576454:2079] 1750800282957290 != 1750800282957293 2025-06-24T21:24:43.437979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:43.438067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:43.444515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3507 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:43.782424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:43.803784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:43.817051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:43.830170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:43.979109Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:46.281958Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629973076230332:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:46.298522Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7b/r3tmp/tmpxsQWG3/pdisk_1.dat 2025-06-24T21:24:46.423564Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:46.425635Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629973076230299:2079] 1750800286280584 != 1750800286280587 2025-06-24T21:24:46.445209Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:46.445336Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:46.450489Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29106 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:46.648830Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:46.676324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::WriteSplitWriteSplit [GOOD] Test command err: 2025-06-24T21:24:42.984738Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629955061435594:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:42.984844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7a/r3tmp/tmptA7ApR/pdisk_1.dat 2025-06-24T21:24:43.372799Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:43.373840Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629955061435576:2079] 1750800282984057 != 1750800282984060 2025-06-24T21:24:43.404109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:43.404255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:43.405989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1128 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:43.697408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:43.710861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:43.727582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:43.956636Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.009s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-24T21:24:43.960953Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1155 521 2626)b }, ecr=1.000 2025-06-24T21:24:44.005086Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:44.014720Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.004s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1621 647 6413)b }, ecr=1.000 2025-06-24T21:24:44.025362Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.005s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2390 1432 5183)b }, ecr=1.000 2025-06-24T21:24:44.066032Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.21, eph 3} end=Done, 4 blobs 8r (max 9), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (3543 2180 6413)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800283848 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) 2025-06-24T21:24:44.205620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { OperationType: ESchemeOpSplitMergeTablePartitions SplitMergeTablePartitions { TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 200 } } } } } } TxId: 281474976710680 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:24:44.205926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /dc-1/Dir/TableOld, tableId: , opId: 281474976710680:0, at schemeshard: 72057594046644480, request: TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 200 } } } } 2025-06-24T21:24:44.206167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:24:44.206234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-24T21:24:44.206258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 5 2025-06-24T21:24:44.206466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 6 2025-06-24T21:24:44.206490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710680:0 type: TxSplitTablePartition target path: [OwnerId: 72057594046644480, LocalPathId: 3] source path: 2025-06-24T21:24:44.206723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /dc-1/Dir/TableOld, tableId: , opId: 281474976710680:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\001\000\000\000\000\200" KeyRangeEnd: "\001\000\004\000\000\000\377\377\377\177" TabletID: 72075186224037888 ShardIdx: 1 } DestinationRanges { KeyRangeBegin: "\001\000\000\000\000\200" KeyRangeEnd: "\001\000\004\000\000\000d\000\000\000" ShardIdx: 3 } DestinationRanges { KeyRangeBegin: "\001\000\004\000\000\000d\000\000\000" KeyRangeEnd: "\001\000\004\000\000\000\310\000\000\000" ShardIdx: 4 } DestinationRanges { KeyRangeBegin: "\001\000\004\000\000\000\310\000\000\000" KeyRangeEnd: "\001\000\004\000\000\000\377\377\377\177" ShardIdx: 5 }, request: TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 200 } } } } 2025-06-24T21:24:44.206761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710680:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:24:44.207771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710680, response: Status: StatusAccepted TxId: 281474976710680 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:24:44.207917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710680, subject: , status: StatusAccepted, operation: ALTER TABLE PARTITIONS, path: /dc-1/Dir/TableOld 2025-06-24T21:24:44.208051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710680:0, at schemeshard: 72057594046644480 2025-06-24T21:24:44.208084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710680:0 ProgressState, operation type: TxSplitTablePartition, at tablet# 72057594046644480 waiting... 2025-06-24T21:24:44.209562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710680:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-24T21:24:44.209726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710680:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 4 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-24T21:24:44.209801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710680:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 5 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-24T21:24:44.210019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710680, at schemeshard: 72057594046644480 2025-06-24T21:24:44.210036Z node 1 :FLAT_TX_SCHEMESHAR ... 046644480 2025-06-24T21:24:47.455227Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-24T21:24:47.455556Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 7 2025-06-24T21:24:47.456403Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-24T21:24:47.456637Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-24T21:24:47.456669Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-24T21:24:47.460046Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519629975886620697 RawX2: 4503608217307441 } TabletId: 72075186224037894 State: 4 2025-06-24T21:24:47.460094Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037894, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:24:47.460542Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519629975886620696 RawX2: 4503608217307440 } TabletId: 72075186224037893 State: 4 2025-06-24T21:24:47.460573Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037893, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:24:47.460690Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519629975886620514 RawX2: 4503608217307415 } TabletId: 72075186224037892 State: 4 2025-06-24T21:24:47.460713Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037892, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:24:47.460790Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519629971591652862 RawX2: 4503608217307346 } TabletId: 72075186224037889 State: 4 2025-06-24T21:24:47.460824Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:24:47.460894Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519629975886620506 RawX2: 4503608217307413 } TabletId: 72075186224037890 State: 4 2025-06-24T21:24:47.460914Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:24:47.461023Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:7 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:47.461253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519629975886620507 RawX2: 4503608217307414 } TabletId: 72075186224037891 State: 4 2025-06-24T21:24:47.461285Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:24:47.461643Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:6 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:47.461709Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:47.461748Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:47.461777Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:47.461921Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:24:47.463122Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046644480 ShardLocalIdx: 7, at schemeshard: 72057594046644480 2025-06-24T21:24:47.463320Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 6 2025-06-24T21:24:47.463490Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046644480 ShardLocalIdx: 6, at schemeshard: 72057594046644480 2025-06-24T21:24:47.463605Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 5 2025-06-24T21:24:47.463698Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-24T21:24:47.463806Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-24T21:24:47.463894Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-24T21:24:47.463998Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:24:47.464082Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T21:24:47.464176Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T21:24:47.464249Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-24T21:24:47.464374Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T21:24:47.464466Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:24:47.464490Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T21:24:47.464521Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" 2025-06-24T21:24:47.466122Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037894 not found 2025-06-24T21:24:47.466151Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037893 not found 2025-06-24T21:24:47.466163Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037892 not found 2025-06-24T21:24:47.466175Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-24T21:24:47.466188Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-24T21:24:47.466205Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-24T21:24:47.466274Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-06-24T21:24:47.466286Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:7 tabletId 72075186224037894 2025-06-24T21:24:47.466319Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-24T21:24:47.466326Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-06-24T21:24:47.466342Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-24T21:24:47.466349Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-24T21:24:47.466364Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T21:24:47.466372Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T21:24:47.466388Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T21:24:47.466395Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T21:24:47.466412Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T21:24:47.466424Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T21:24:47.466447Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:24:47.557518Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::MultipleLocks [GOOD] Test command err: 2025-06-24T21:24:23.153855Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629872854844494:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:23.153999Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa4/r3tmp/tmp9tYTaS/pdisk_1.dat 2025-06-24T21:24:23.481092Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:23.482300Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629872854844475:2079] 1750800263152871 != 1750800263152874 2025-06-24T21:24:23.551676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:23.551816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:23.553529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5188 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:23.771860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:23.785543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:23.801650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:23.945937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.022100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.179883Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:26.289987Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629887439865001:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:26.290072Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa4/r3tmp/tmpC2xLj1/pdisk_1.dat 2025-06-24T21:24:26.415853Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:26.417311Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629887439864982:2079] 1750800266289284 != 1750800266289287 2025-06-24T21:24:26.443817Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:26.443921Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:26.445495Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7284 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:26.669448Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:26.694395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:26.745818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:26.802435Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:29.784371Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629899847509396:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:29.784432Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa4/r3tmp/tmpociJQY/pdisk_1.dat 2025-06-24T21:24:29.920108Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:29.921436Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629899847509377:2079] 1750800269783444 != 1750800269783447 2025-06-24T21:24:29.964847Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:29.964946Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:29.968146Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24957 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:30.143270Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:30.162033Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... -1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa4/r3tmp/tmpHPDjXF/pdisk_1.dat 2025-06-24T21:24:37.668175Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:37.697925Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:37.698019Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:37.699780Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6550 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:37.906427Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:37.925921Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:37.996703Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:38.070438Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:41.704613Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519629952985666847:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:41.704710Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa4/r3tmp/tmpjFbuxg/pdisk_1.dat 2025-06-24T21:24:41.885123Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:41.887426Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519629952985666826:2079] 1750800281703657 != 1750800281703660 2025-06-24T21:24:41.900874Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:41.900975Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:41.904893Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9103 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:42.125584Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:42.147449Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:42.219722Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:42.277111Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:46.132187Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519629972634848080:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:46.132265Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa4/r3tmp/tmpHNkBA3/pdisk_1.dat 2025-06-24T21:24:46.318581Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:46.341149Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:46.341265Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:46.343261Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2869 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:46.629427Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:46.639828Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:46.656367Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:46.736971Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:46.799043Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TFlatTest::Init [GOOD] >> TFlatTest::LargeDatashardReply >> TLocksTest::BrokenLockErase >> SystemView::AuthGroupMembers [GOOD] >> SystemView::AuthGroupMembers_Access >> TFlatTest::SelectRangeSkipNullKeys [GOOD] >> TLocksTest::UpdateLockedKey [GOOD] >> TLocksTest::SetLockNothing >> TFlatTest::AutoSplitBySize >> TLocksFatTest::RangeSetRemove >> TFlatTest::SplitEmptyToMany >> TFlatTest::CopyTableAndDropCopy [GOOD] >> TObjectStorageListingTest::TestFilter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeSkipNullKeys [GOOD] Test command err: 2025-06-24T21:24:46.940876Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629972861807223:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:46.942953Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a79/r3tmp/tmp0yFzwa/pdisk_1.dat 2025-06-24T21:24:47.307487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:47.307613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:47.310596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:47.314967Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:47.316738Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629972861807127:2079] 1750800286921142 != 1750800286921145 TClient is connected to server localhost:28169 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:47.660702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:47.691529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:24:47.698898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:47.849104Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629977156775087:2363] txid# 281474976715659, issues: { message: "Mix freeze cmd with other options is forbidden" severity: 1 } Error 128: Mix freeze cmd with other options is forbidden 2025-06-24T21:24:47.853434Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629977156775100:2369] txid# 281474976715660, issues: { message: "Unexpected freeze state" severity: 1 } Error 128: Unexpected freeze state 2025-06-24T21:24:47.857571Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629977156775106:2374] txid# 281474976715661, issues: { message: "Mix freeze cmd with other options is forbidden" severity: 1 } Error 128: Mix freeze cmd with other options is forbidden 2025-06-24T21:24:47.860398Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629977156775112:2379] txid# 281474976715662, issues: { message: "Mix freeze cmd with other options is forbidden" severity: 1 } Error 128: Mix freeze cmd with other options is forbidden 2025-06-24T21:24:50.046563Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629990482743140:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:50.046647Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a79/r3tmp/tmpQ1W43L/pdisk_1.dat 2025-06-24T21:24:50.160043Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:50.161784Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629990482743124:2079] 1750800290045983 != 1750800290045986 2025-06-24T21:24:50.191883Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:50.191976Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:50.193344Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3851 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:50.385175Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:50.409541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TLocksTest::CK_GoodLock [GOOD] >> TLocksTest::CK_BrokenLock >> SystemView::TabletsRangesPredicateExtractDisabled [GOOD] >> SystemView::CollectScriptingQueries [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::CopyTableAndDropCopy [GOOD] Test command err: 2025-06-24T21:24:35.069178Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629923255663690:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:35.069509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9a/r3tmp/tmpFDiEin/pdisk_1.dat 2025-06-24T21:24:35.517692Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629923255663582:2079] 1750800275060592 != 1750800275060595 2025-06-24T21:24:35.517930Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:35.558167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:35.558270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:35.561426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5724 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:35.852590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:35.879531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:35.893787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:24:35.898556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:36.074338Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls request: /dc-1/Dir/Table_1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800276008 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_1" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "col_0" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) TClient::Ls request: /dc-1/Dir/Table_1_Copy TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_1_Copy" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750800276113 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_1_Copy" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "col_0" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot... (TRUNCATED) 2025-06-24T21:24:36.145813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... TClient::Ls request: /dc-1/Dir/Table_2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_2" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710661 CreateStep: 1750800276218 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_2" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col_1" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "col_0" ... (TRUNCATED) TClient::Ls request: /dc-1/Dir/Table_2_Copy TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_2_Copy" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710662 CreateStep: 1750800276295 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_2_Copy" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col_1" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: ... (TRUNCATED) 2025-06-24T21:24:36.329333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... TClient::Ls request: /dc-1/Dir/Table_3 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_3" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710663 CreateStep: 1750800276414 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_3" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col_1" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "col... (TRUNCATED) TClient::Ls request: /dc-1/Dir/Table_3_Copy TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_3_Copy" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710664 CreateStep: 1750800276456 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_3_Copy" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col_1" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } Columns { ... (TRUNCATED) 2025-06-24T21:24:36.480143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... TClient::Ls request: /dc-1/Dir/Table_4 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_4" PathId: 9 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710665 CreateStep: 17 ... 281474976715686 ready parts: 1/1 2025-06-24T21:24:51.901932Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037894 Got TEvSchemaChangedResult from SS at 72075186224037894 2025-06-24T21:24:51.901937Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715686:0 2025-06-24T21:24:51.901945Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715686:0 2025-06-24T21:24:51.902049Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 4 2025-06-24T21:24:51.905758Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:7519629993582337618:3004], serverId# [2:7519629993582337619:3005], sessionId# [0:0:0] 2025-06-24T21:24:51.905865Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-24T21:24:51.907257Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-24T21:24:51.907314Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T21:24:51.910527Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037895, clientId# [2:7519629993582337628:3011], serverId# [2:7519629993582337629:3012], sessionId# [0:0:0] 2025-06-24T21:24:51.910663Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-24T21:24:51.911971Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-24T21:24:51.912021Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-24T21:24:51.915050Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-24T21:24:51.916222Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-24T21:24:51.916271Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T21:24:51.918940Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-24T21:24:51.921139Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-24T21:24:51.921204Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-24T21:24:51.924052Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-24T21:24:51.925300Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-24T21:24:51.925355Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T21:24:51.926664Z node 2 :OPS_COMPACT INFO: Compact{72075186224037894.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-24T21:24:51.927491Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037894, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T21:24:51.927537Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037894, table# 1001, finished edge# 0, front# 0 2025-06-24T21:24:51.929917Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-24T21:24:51.931294Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-24T21:24:51.931350Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-24T21:24:51.934895Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-24T21:24:51.936088Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-24T21:24:51.936144Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T21:24:51.936631Z node 2 :OPS_COMPACT INFO: Compact{72075186224037895.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-24T21:24:51.937507Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037895, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T21:24:51.937555Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037895, table# 1001, finished edge# 0, front# 0 2025-06-24T21:24:51.939699Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-24T21:24:51.940885Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-24T21:24:51.940941Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-24T21:24:51.943902Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-24T21:24:51.945079Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-24T21:24:51.945138Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T21:24:51.948167Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-24T21:24:51.949443Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-24T21:24:51.949507Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-24T21:24:51.953313Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-24T21:24:51.954843Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-24T21:24:51.954898Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T21:24:51.957046Z node 2 :OPS_COMPACT INFO: Compact{72075186224037894.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1589 647 6413)b }, ecr=1.000 2025-06-24T21:24:51.957563Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037894, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T21:24:51.957588Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037894, table# 1001, finished edge# 0, front# 0 2025-06-24T21:24:51.957882Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-24T21:24:51.959362Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-24T21:24:51.959425Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-24T21:24:51.962496Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-24T21:24:51.964556Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-24T21:24:51.964642Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T21:24:51.965420Z node 2 :OPS_COMPACT INFO: Compact{72075186224037895.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2342 1432 5183)b }, ecr=1.000 2025-06-24T21:24:51.966187Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037895, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-24T21:24:51.966210Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037895, table# 1001, finished edge# 0, front# 0 2025-06-24T21:24:51.967911Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-24T21:24:51.969708Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-24T21:24:51.969767Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-24T21:24:51.972485Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-24T21:24:51.974370Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-24T21:24:51.974427Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-24T21:24:51.977764Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-24T21:24:51.979499Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-24T21:24:51.979557Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 Check that tablet 72075186224037892 was deleted 2025-06-24T21:24:51.980316Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037892) Check that tablet 72075186224037893 was deleted 2025-06-24T21:24:51.980747Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037893) Check that tablet 72075186224037888 was deleted 2025-06-24T21:24:51.980942Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) Check that tablet 72075186224037889 was deleted 2025-06-24T21:24:51.981131Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037889) Check that tablet 72075186224037890 was deleted 2025-06-24T21:24:51.981374Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037890) Check that tablet 72075186224037891 was deleted 2025-06-24T21:24:51.981707Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037891) >> TLocksTest::Range_IncorrectNullDot1 [GOOD] >> TLocksTest::Range_IncorrectNullDot2 >> TLocksTest::GoodDupLock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::TabletsRangesPredicateExtractDisabled [GOOD] Test command err: 2025-06-24T21:23:28.838247Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629637821510266:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:28.840744Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae4/r3tmp/tmpyRyxci/pdisk_1.dat 2025-06-24T21:23:29.501583Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:29.515161Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629637821510071:2079] 1750800208823357 != 1750800208823360 2025-06-24T21:23:29.530021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:29.530124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:29.533018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28374, node 1 2025-06-24T21:23:29.840371Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:30.200333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:30.200366Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:30.200374Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:30.200502Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:31.185023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:32.700613Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629655001379904:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:32.703248Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629655001379892:2288], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:32.703377Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:32.714041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:32.728864Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629655001379906:2292], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:23:32.800225Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629655001379957:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:33.843295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629637821510266:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:33.843379Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:34.518271Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhx67d37vqk61vggeta52ax, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjE0ZDRiMDItMjE5N2IwNmQtZTQxOTdhZi1hYjhmYWE1OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:23:34.634176Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519629663591314601:2303], owner: [1:7519629663591314597:2301], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:23:34.636237Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519629663591314601:2303], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:23:34.682999Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519629663591314601:2303], row count: 4, finished: 1 2025-06-24T21:23:34.699327Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519629663591314601:2303], owner: [1:7519629663591314597:2301], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:23:34.730546Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800214413, txId: 281474976710660] shutting down 2025-06-24T21:23:35.131480Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhx6aq451kzcd0hvt1rfj20, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjhiOWUzYTItMTJlYjgyYzYtMjg5OWYxYTAtYmViZjU4ZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:23:35.134655Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519629667886281942:2318], owner: [1:7519629667886281939:2316], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:23:35.140852Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519629667886281942:2318], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:23:35.141192Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519629667886281942:2318], row count: 2, finished: 1 2025-06-24T21:23:35.141275Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519629667886281942:2318], owner: [1:7519629667886281939:2316], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:23:35.143330Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800215123, txId: 281474976710662] shutting down 2025-06-24T21:23:35.328883Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jyhx6b44cjh1q1e3e8jpfz7v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTdkY2I3MTYtYTc1OWNkOWYtNGQ2M2YyYmItYzhmYWM4NzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:23:35.332439Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519629667886281974:2327], owner: [1:7519629667886281971:2325], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:23:35.343371Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519629667886281974:2327], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:23:35.347386Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519629667886281974:2327], row count: 3, finished: 1 2025-06-24T21:23:35.347498Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519629667886281974:2327], owner: [1:7519629667886281971:2325], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:23:35.350344Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800215327, txId: 281474976710664] shutting down 2025-06-24T21:23:35.528473Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jyhx6bad6pkwyzw5tpzd4aw3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU0NWM0ODEtNTEzZDVlMTAtODQzNTE0ZTYtNGEwYmZiZTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:23:35.530692Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519629667886282006:2336], owner: [1:7519629667886282003:2334], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:23:35.531396Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519629667886282006:2336], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:23:35.531795Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:75 ... impl.h:47: Scan started, actor: [20:7519629976442536242:2407], owner: [20:7519629976442536239:2405], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:24:47.183761Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [20:7519629976442536242:2407], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:24:47.184159Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [20:7519629976442536242:2407], row count: 4, finished: 1 2025-06-24T21:24:47.184255Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [20:7519629976442536242:2407], owner: [20:7519629976442536239:2405], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:24:47.187873Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800287178, txId: 281474976715677] shutting down 2025-06-24T21:24:47.409263Z node 20 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jyhx8hes6r4brdnyq4473bt8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=20&id=N2Q3YTZlOS0xMzE2MGFiYy03MTlmNTA4Zi1iNjgwZjYwMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:24:47.412305Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [20:7519629976442536274:2416], owner: [20:7519629976442536271:2414], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:24:47.414455Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [20:7519629976442536274:2416], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:24:47.414880Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [20:7519629976442536274:2416], row count: 4, finished: 1 2025-06-24T21:24:47.414977Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [20:7519629976442536274:2416], owner: [20:7519629976442536271:2414], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:24:47.417415Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800287408, txId: 281474976715679] shutting down 2025-06-24T21:24:48.529143Z node 21 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[21:7519629980231639269:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:48.529260Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae4/r3tmp/tmpH9zHsP/pdisk_1.dat 2025-06-24T21:24:48.756769Z node 21 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:48.759308Z node 21 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [21:7519629980231639245:2079] 1750800288527938 != 1750800288527941 2025-06-24T21:24:48.775818Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:48.775950Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:48.780491Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14777, node 21 2025-06-24T21:24:48.840105Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:48.840143Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:48.840160Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:48.840356Z node 21 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11822 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:24:49.347118Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:49.551967Z node 21 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:53.529636Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[21:7519629980231639269:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:53.529738Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:54.351037Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:54.512013Z node 21 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [21:7519630006001443923:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:54.512076Z node 21 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [21:7519630006001443931:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:54.512135Z node 21 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:54.517911Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:54.537096Z node 21 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [21:7519630006001443937:2318], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:24:54.628116Z node 21 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [21:7519630006001443992:2496] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:24:54.977696Z node 21 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyhx8rk96yrszf0wep4m30rb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=21&id=MWI0NGE3YzMtZGRlZGJhNWItZmRkZThiY2UtYzQ3MWNiYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:24:54.980573Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [21:7519630006001444036:2330], owner: [21:7519630006001444035:2329], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:24:54.981147Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [21:7519630006001444036:2330], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:24:54.981728Z node 21 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [21:7519630006001444036:2330], row count: 4, finished: 1 2025-06-24T21:24:54.981824Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [21:7519630006001444036:2330], owner: [21:7519630006001444035:2329], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:24:54.982076Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [21:7519630006001444042:2333], owner: [21:7519630006001444035:2329], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:24:54.983838Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [21:7519630006001444042:2333], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:24:54.984397Z node 21 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [21:7519630006001444042:2333], row count: 4, finished: 1 2025-06-24T21:24:54.984471Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [21:7519630006001444042:2333], owner: [21:7519630006001444035:2329], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:24:54.987229Z node 21 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800294974, txId: 281474976710661] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::CollectScriptingQueries [GOOD] Test command err: 2025-06-24T21:23:29.416105Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629641625307061:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:29.416172Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b04/r3tmp/tmp78IsHa/pdisk_1.dat 2025-06-24T21:23:30.072967Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:30.092729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:30.092920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:30.127676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:30.198328Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 3404, node 1 2025-06-24T21:23:30.437574Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:30.496143Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:30.496171Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:30.496178Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:30.496313Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15022 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:31.152996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:31.254914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:23:31.287938Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519629649779728281:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:31.287986Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:23:31.315298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:31.595823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:31.595906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:31.605237Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-24T21:23:31.643460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:31.647501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:31.647619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:31.667359Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-24T21:23:31.669306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:32.053563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:23:32.175734Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629654227720304:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:32.281759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:23:32.278700Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629656267929587:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:32.175780Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; waiting... 2025-06-24T21:23:32.471226Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:32.564208Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:32.655605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:32.658180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:32.746321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:32.748735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:32.749318Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:23:32.765239Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:23:32.767501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:32.772767Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:33.034990Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:23:33.211307Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:33.279648Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:34.435216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629641625307061:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:34.435330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:36.291289Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519629649779728281:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:36.291395Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:37.152241Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519629656267929587:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:37.152330Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:37.178265Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629654227720304:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:37.178335Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:39.426590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:39.923984Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor ... 025-06-24T21:24:44.582934Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/.sys
: Error: Access denied 2025-06-24T21:24:44.619978Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/Tenant1/.sys
: Error: Access denied 2025-06-24T21:24:44.699602Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/.sys/partition_stats
: Error: Access denied 2025-06-24T21:24:44.736792Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/Tenant1/.sys/partition_stats
: Error: Access denied 2025-06-24T21:24:44.752971Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 36 2025-06-24T21:24:44.753747Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:24:44.753907Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 34 2025-06-24T21:24:44.754380Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:24:44.756644Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 35 2025-06-24T21:24:44.757016Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(35, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:24:44.758149Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 33 2025-06-24T21:24:44.758831Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(33, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:24:44.761486Z node 32 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[33:7519629939428414806:2108], Type=268959746 2025-06-24T21:24:44.761551Z node 32 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[33:7519629939428414806:2108], Type=268959746 2025-06-24T21:24:44.761576Z node 32 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[33:7519629939428414806:2108], Type=268959746 2025-06-24T21:24:44.761606Z node 32 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[33:7519629939428414806:2108], Type=268959746 2025-06-24T21:24:48.414050Z node 37 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[37:7519629979050369045:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:48.414154Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b04/r3tmp/tmp4eTxyL/pdisk_1.dat 2025-06-24T21:24:48.584109Z node 37 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:48.587348Z node 37 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [37:7519629979050369026:2079] 1750800288413483 != 1750800288413486 2025-06-24T21:24:48.600739Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:48.600881Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:48.605089Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1758, node 37 2025-06-24T21:24:48.690505Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:48.690538Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:48.690554Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:48.690763Z node 37 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27916 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:24:49.166413Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:49.176302Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:49.181339Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:49.423097Z node 37 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:53.414178Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[37:7519629979050369045:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:53.414275Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:53.960656Z node 37 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [37:7519630000525206222:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:53.960743Z node 37 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [37:7519630000525206228:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:53.960760Z node 37 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:53.965468Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:24:53.980097Z node 37 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [37:7519630000525206236:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:24:54.038305Z node 37 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [37:7519630004820173583:2391] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:24:54.161008Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhx8r261jk5d6zffa6dffn8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=M2ZlOWQxMTgtYWE0YzUxM2EtZDVmMWJmMWUtOTc3Nzg5ZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:24:54.476581Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhx8rbxe2dprgpy5qr70e5e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=MzdkMjI3NGQtZmRkNTkzMmItYjQ1Y2E3N2YtMzQ3NTAzMTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:24:54.507335Z node 37 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800294516, txId: 281474976715662] shutting down 2025-06-24T21:24:54.801470Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhx8rmwb0j3mnrb4t4c6ahc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=YjRkYTQ2OGUtZTZmYjFkMTAtMzJjNjRiMDQtMjQyZmM0ZTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:24:54.804131Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [37:7519630004820173723:2333], owner: [37:7519630004820173719:2331], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:24:54.805179Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [37:7519630004820173723:2333], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:24:54.806018Z node 37 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [37:7519630004820173723:2333], row count: 2, finished: 1 2025-06-24T21:24:54.806144Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [37:7519630004820173723:2333], owner: [37:7519630004820173719:2331], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:24:54.808844Z node 37 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800294799, txId: 281474976715664] shutting down >> TObjectStorageListingTest::TestFilter [GOOD] >> TObjectStorageListingTest::TestSkipShards >> TLocksTest::Range_CorrectNullDot [GOOD] >> TLocksTest::Range_EmptyKey >> TLocksTest::Range_GoodLock0 >> TFlatTest::LargeProxyReply >> TFlatTest::MiniKQLRanges >> TLocksTest::BrokenLockUpdate [GOOD] >> TLocksTest::BrokenNullLock >> TLocksTest::CK_Range_BrokenLockInf [GOOD] >> SystemView::AuthOwners_TableRange [GOOD] >> SystemView::AuthPermissions >> TObjectStorageListingTest::TestSkipShards [GOOD] >> TLocksTest::BrokenSameShardLock [GOOD] >> TLocksTest::SetLockNothing [GOOD] >> TLocksTest::Range_IncorrectDot2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::CK_Range_BrokenLockInf [GOOD] Test command err: 2025-06-24T21:24:20.452729Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629858663059234:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.453204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa9/r3tmp/tmpZvlLye/pdisk_1.dat 2025-06-24T21:24:20.916779Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.919241Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629858663059118:2079] 1750800260438756 != 1750800260438759 2025-06-24T21:24:20.922506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.922587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.924708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10004 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.214039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:21.246373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.396790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.455277Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:21.476433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:23.840102Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629871451487888:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:23.840156Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa9/r3tmp/tmpo6LPEY/pdisk_1.dat 2025-06-24T21:24:24.016080Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:24.020202Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629871451487868:2079] 1750800263833788 != 1750800263833791 2025-06-24T21:24:24.031711Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:24.031780Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:24.033997Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9798 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:24.256178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:24.279121Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.360744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.444893Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:27.152110Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629890220135773:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:27.152164Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa9/r3tmp/tmpTu8N42/pdisk_1.dat 2025-06-24T21:24:27.276198Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:27.284273Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629890220135751:2079] 1750800267151295 != 1750800267151298 2025-06-24T21:24:27.309540Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:27.309638Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:27.311014Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10999 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:27.510599Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:27.534247Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:27.590287Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Op ... n [8:7519629976415513382:2079] 1750800287201078 != 1750800287201081 2025-06-24T21:24:47.370668Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:47.370765Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:47.373939Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4221 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:47.690146Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:47.714974Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:47.721121Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:47.814363Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:47.870788Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:51.318313Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519629995743929728:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:51.318439Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa9/r3tmp/tmp3JScoy/pdisk_1.dat 2025-06-24T21:24:51.439475Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:51.440622Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519629995743929709:2079] 1750800291317701 != 1750800291317704 2025-06-24T21:24:51.465794Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:51.465903Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:51.467684Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1996 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:51.758753Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:51.780042Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:51.859278Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:51.928354Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:56.317091Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630014413818813:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:56.317206Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aa9/r3tmp/tmpkMwIQ2/pdisk_1.dat 2025-06-24T21:24:56.484283Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:56.489641Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630014413818791:2079] 1750800296316001 != 1750800296316004 2025-06-24T21:24:56.500201Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:56.500291Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:56.502898Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11648 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:24:56.809124Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:56.840038Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:56.911111Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:56.994941Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TLocksFatTest::RangeSetRemove [GOOD] >> TLocksFatTest::ShardLocks >> TFlatTest::LargeDatashardReply [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::TestSkipShards [GOOD] Test command err: 2025-06-24T21:24:54.718283Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630008503606139:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:54.718358Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a70/r3tmp/tmp99isYj/pdisk_1.dat 2025-06-24T21:24:55.065377Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:55.075048Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630008503606116:2079] 1750800294716182 != 1750800294716185 TServer::EnableGrpc on GrpcPort 8499, node 1 2025-06-24T21:24:55.135987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:55.136142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:55.138598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:55.162929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:55.162958Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:55.162965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:55.163180Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6713 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:55.497425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:55.535675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:55.735270Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a70/r3tmp/tmpwtS0pO/pdisk_1.dat TServer::EnableGrpc on GrpcPort 21333, node 2 TClient is connected to server localhost:3376 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... waiting... waiting... >> TFlatTest::AutoSplitBySize [GOOD] >> TFlatTest::AutoMergeBySize ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::BrokenSameShardLock [GOOD] Test command err: 2025-06-24T21:24:20.461155Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629861548331291:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.461274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aac/r3tmp/tmphUg0YS/pdisk_1.dat 2025-06-24T21:24:20.835366Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629861548331084:2079] 1750800260438326 != 1750800260438329 2025-06-24T21:24:20.854386Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.860088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.860224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.870335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28041 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.244233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:21.263729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:21.286240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.410938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.453824Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:21.486295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:23.823686Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629871437712075:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:23.823777Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aac/r3tmp/tmppNkCKe/pdisk_1.dat 2025-06-24T21:24:24.122855Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:24.122931Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:24.125372Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:24.131320Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629871437712059:2079] 1750800263815487 != 1750800263815490 2025-06-24T21:24:24.136767Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3696 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:24.384819Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:24.392017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:24.400801Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:24.404876Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.458312Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.518191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:27.473045Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629888650629657:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:27.473177Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aac/r3tmp/tmpyTZ7MB/pdisk_1.dat 2025-06-24T21:24:27.629524Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:27.630490Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629888650629638:2079] 1750800267472581 != 1750800267472584 2025-06-24T21:24:27.646757Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:27.646833Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:27.648930Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61936 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:24:27.853849Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__ope ... 47.811864Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1495 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:48.098005Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:48.104309Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:48.120842Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:48.133596Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:48.211079Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:48.276821Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:52.143409Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519629996827879722:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:52.143558Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aac/r3tmp/tmpTJdr88/pdisk_1.dat 2025-06-24T21:24:52.276121Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:52.277303Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519629996827879703:2079] 1750800292142833 != 1750800292142836 2025-06-24T21:24:52.294326Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:52.294446Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:52.298568Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26470 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:52.640555Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:52.661719Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:52.729312Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:52.804634Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:56.820668Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630013369294351:2243];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aac/r3tmp/tmpNlsj5O/pdisk_1.dat 2025-06-24T21:24:56.896988Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:24:56.963431Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:56.963532Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:56.966606Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:56.975327Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630013369294119:2079] 1750800296781966 != 1750800296781969 2025-06-24T21:24:56.991987Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TClient is connected to server localhost:14480 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:57.317746Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:57.341237Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:57.348157Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:57.421921Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:57.552336Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TFlatTest::MiniKQLRanges [GOOD] >> TFlatTest::MergeEmptyAndWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::SetLockNothing [GOOD] Test command err: 2025-06-24T21:24:35.316332Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629927096684033:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:35.316850Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a99/r3tmp/tmpVEb0tB/pdisk_1.dat 2025-06-24T21:24:35.799192Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:35.800738Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629927096683914:2079] 1750800275308292 != 1750800275308295 2025-06-24T21:24:35.808298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:35.808396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:35.812112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61520 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:36.126775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:36.142249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:36.150246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:24:36.155813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:36.302562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:36.316393Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:24:36.364472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:38.942271Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629937562332862:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:38.942341Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a99/r3tmp/tmpzGqcaq/pdisk_1.dat 2025-06-24T21:24:39.145277Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:39.148629Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:39.148701Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:39.153147Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629937562332846:2079] 1750800278941579 != 1750800278941582 2025-06-24T21:24:39.168091Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15296 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:39.429397Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:39.436080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:39.457075Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:39.523511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:39.610450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:42.549979Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629953710849991:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:42.550092Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a99/r3tmp/tmpU9DFLS/pdisk_1.dat 2025-06-24T21:24:42.689910Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:42.691824Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629953710849972:2079] 1750800282549473 != 1750800282549476 2025-06-24T21:24:42.706494Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:42.706588Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:42.708536Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14349 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:42.916446Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alt ... for subscription [5:7519629984269998630:2079] 1750800289760691 != 1750800289760694 2025-06-24T21:24:49.912867Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:49.912967Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:49.914789Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28303 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:50.155332Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:50.176686Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:50.247596Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:50.341505Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:53.523161Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519630003615486742:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:53.523256Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a99/r3tmp/tmphuVx3S/pdisk_1.dat 2025-06-24T21:24:53.672164Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:53.674608Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519630003615486720:2079] 1750800293522179 != 1750800293522182 2025-06-24T21:24:53.698335Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:53.698471Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:53.700696Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29696 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:53.930808Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:53.935869Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:53.948581Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:54.023212Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:54.095165Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:57.626855Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519630020845606734:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:57.630167Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a99/r3tmp/tmpDyNodg/pdisk_1.dat 2025-06-24T21:24:57.773224Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:57.776681Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519630020845606685:2079] 1750800297617859 != 1750800297617862 2025-06-24T21:24:57.786824Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:57.786930Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:57.788476Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8582 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:24:58.076070Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:58.101265Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:58.164954Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:58.216926Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_IncorrectDot2 [GOOD] Test command err: 2025-06-24T21:24:20.456400Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629859932422028:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:20.457234Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab1/r3tmp/tmpp0yznc/pdisk_1.dat 2025-06-24T21:24:20.933033Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629859932421810:2079] 1750800260438739 != 1750800260438742 2025-06-24T21:24:20.957572Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.965771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:20.965870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:20.967886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25907 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:21.206422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:21.233125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:21.240826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:24:21.246538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.410263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:21.456343Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:21.485958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:23.876523Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629871726559398:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:23.876613Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab1/r3tmp/tmpucMmbS/pdisk_1.dat 2025-06-24T21:24:24.056037Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:24.058020Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629871726559379:2079] 1750800263875218 != 1750800263875221 2025-06-24T21:24:24.070063Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:24.070160Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:24.073284Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1624 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:24:24.352450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:24.358658Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:24.372142Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:24.376769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:24.434027Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:24.486301Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:27.440709Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629890291586278:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:27.440798Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab1/r3tmp/tmpPcfFwe/pdisk_1.dat 2025-06-24T21:24:27.568212Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:27.570490Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629890291586259:2079] 1750800267440286 != 1750800267440289 2025-06-24T21:24:27.604360Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:27.604462Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:27.612035Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9392 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:27.784259Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 2814749 ... Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:48.287587Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519629982407701537:2079] 1750800288062233 != 1750800288062236 2025-06-24T21:24:48.300539Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:48.300636Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:48.304858Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13973 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:48.589270Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:48.595491Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:48.612965Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:48.694482Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:48.751454Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:52.507786Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519629997140611551:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:52.507872Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab1/r3tmp/tmpBVadpx/pdisk_1.dat 2025-06-24T21:24:52.687424Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:52.687537Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:52.687771Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:52.690199Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519629997140611529:2079] 1750800292506653 != 1750800292506656 2025-06-24T21:24:52.711865Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29685 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:53.015023Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:53.043206Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:53.151801Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:53.223042Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ab1/r3tmp/tmpkdl7x8/pdisk_1.dat 2025-06-24T21:24:57.419450Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:57.424290Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:57.435412Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630021308786271:2079] 1750800297179645 != 1750800297179648 2025-06-24T21:24:57.443124Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:57.443248Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:57.446459Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29088 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:24:57.785389Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:57.818278Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:57.913156Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:57.973350Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::LargeDatashardReply [GOOD] Test command err: 2025-06-24T21:24:48.988063Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629980489221782:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:48.988118Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a76/r3tmp/tmpIXSGXL/pdisk_1.dat 2025-06-24T21:24:49.367337Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629980489221764:2079] 1750800288987358 != 1750800288987361 2025-06-24T21:24:49.374081Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:49.395247Z node 1 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [1:7519629984784189272:2100] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-24T21:24:49.395371Z node 1 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Execute 2025-06-24T21:24:49.395493Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:49.395530Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T21:24:49.395557Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T21:24:49.395598Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T21:24:49.395618Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T21:24:49.395722Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:49.395964Z node 1 :HIVE DEBUG: hive_impl.cpp:808: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 1 Location DataCenter: "1" Module: "1" Rack: "1" Unit: "1" 2025-06-24T21:24:49.396029Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-24T21:24:49.396067Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-24T21:24:49.396097Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-24T21:24:49.396127Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-24T21:24:49.396780Z node 1 :HIVE DEBUG: tx__register_node.cpp:95: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Complete 2025-06-24T21:24:49.396836Z node 1 :HIVE DEBUG: node_info.cpp:373: HIVE#72057594037968897 Node(1) Ping([1:7519629984784189272:2100]) 2025-06-24T21:24:49.396927Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-24T21:24:49.397546Z node 1 :HIVE DEBUG: hive_impl.cpp:737: HIVE#72057594037968897 THive::Handle::TEvSyncTablets 2025-06-24T21:24:49.397638Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:41: HIVE#72057594037968897 THive::TTxSyncTablets([1:7519629984784189272:2100])::Execute 2025-06-24T21:24:49.397681Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T21:24:49.397743Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:130: HIVE#72057594037968897 THive::TTxSyncTablets([1:7519629984784189272:2100])::Complete 2025-06-24T21:24:49.397908Z node 1 :HIVE DEBUG: hive_impl.cpp:731: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 1: Status: 0 StartTime: 1750800289019118 ResourceMaximum { Memory: 270443339776 } 2025-06-24T21:24:49.397987Z node 1 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(1)::Execute 2025-06-24T21:24:49.398029Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:49.398179Z node 1 :HIVE DEBUG: hive_impl.cpp:2791: HIVE#72057594037968897 AddRegisteredDataCentersNode(1, 1) 2025-06-24T21:24:49.398227Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-24T21:24:49.398249Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-24T21:24:49.398364Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-24T21:24:49.398395Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-24T21:24:49.398408Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-24T21:24:49.398427Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-24T21:24:49.399517Z node 1 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(1)::Complete 2025-06-24T21:24:49.399555Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete TClient is connected to server localhost:27946 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-24T21:24:49.547456Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519629984784189294:2104] Handle TEvNavigate describe path dc-1 2025-06-24T21:24:49.573668Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519629984784189570:2257] HANDLE EvNavigateScheme dc-1 2025-06-24T21:24:49.574964Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519629984784189570:2257] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:24:49.628016Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519629984784189570:2257] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-24T21:24:49.645327Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519629984784189570:2257] Handle TEvDescribeSchemeResult Forward to# [1:7519629984784189569:2256] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:49.671421Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519629984784189294:2104] Handle TEvProposeTransaction 2025-06-24T21:24:49.671460Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519629984784189294:2104] TxId# 281474976710657 ProcessProposeTransaction 2025-06-24T21:24:49.671676Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519629984784189294:2104] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519629984784189583:2263] 2025-06-24T21:24:49.799672Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519629984784189583:2263] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "" PeerName: "" 2025-06-24T21:24:49.799759Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519629984784189583:2263] txid# 281474976710657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:24:49.799832Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519629984784189583:2263] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:24:49.800194Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519629984784189583:2263] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:24:49.800312Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519629984784189583:2263] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-24T21:24:49.800366Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519629984784189583:2263] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-24T21:24:49.800533Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1: ... 50.643997Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519629984784189294:2104] TxId# 281474976710674 ProcessProposeTransaction 2025-06-24T21:24:50.644035Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519629984784189294:2104] Cookie# 0 userReqId# "" txid# 281474976710674 SEND to# [1:7519629989079157869:2961] 2025-06-24T21:24:50.644542Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:24:50.644589Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-24T21:24:50.645939Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037894 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:24:50.645991Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037894 Initiating switch from PreOffline to Offline state 2025-06-24T21:24:50.647018Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519629989079157869:2961] txid# 281474976710674 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1/Berkanavt/tables" OperationType: ESchemeOpDropTable Drop { Name: "Classes" } } } UserToken: "" PeerName: "" 2025-06-24T21:24:50.647054Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519629989079157869:2961] txid# 281474976710674 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:24:50.647136Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519629989079157869:2961] txid# 281474976710674 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:24:50.647315Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037897 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:24:50.647405Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:24:50.647448Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037894 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:24:50.647481Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037895 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:24:50.647499Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519629989079157869:2961] txid# 281474976710674 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:24:50.647567Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037891 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:24:50.647597Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519629989079157869:2961] HANDLE EvNavigateKeySetResult, txid# 281474976710674 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:24:50.647638Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519629989079157869:2961] txid# 281474976710674 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710674 TabletId# 72057594046644480} 2025-06-24T21:24:50.647645Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037891 Initiating switch from PreOffline to Offline state 2025-06-24T21:24:50.647811Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519629989079157869:2961] txid# 281474976710674 HANDLE EvClientConnected 2025-06-24T21:24:50.648912Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037893 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:24:50.648952Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037893 Initiating switch from PreOffline to Offline state 2025-06-24T21:24:50.649666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1/Berkanavt/tables" OperationType: ESchemeOpDropTable Drop { Name: "Classes" } } TxId: 281474976710674 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:24:50.649820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_table.cpp:492: TDropTable Propose, path: /dc-1/Berkanavt/tables/Classes, pathId: 0, opId: 281474976710674:0, at schemeshard: 72057594046644480 2025-06-24T21:24:50.649922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 2 2025-06-24T21:24:50.649934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710674:0 type: TxDropTable target path: [OwnerId: 72057594046644480, LocalPathId: 6] source path: 2025-06-24T21:24:50.649983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710674:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:24:50.650159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T21:24:50.650190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 3 2025-06-24T21:24:50.650283Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037896 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:24:50.650330Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037896 Initiating switch from PreOffline to Offline state 2025-06-24T21:24:50.650481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710674, response: Status: StatusAccepted TxId: 281474976710674 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:24:50.650629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710674, database: /dc-1, subject: , status: StatusAccepted, operation: DROP TABLE, path: /dc-1/Berkanavt/tables/Classes 2025-06-24T21:24:52.349482Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629998319433343:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:52.349685Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a76/r3tmp/tmpXSD2CS/pdisk_1.dat 2025-06-24T21:24:52.469666Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:52.471112Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629998319433321:2079] 1750800292347582 != 1750800292347585 2025-06-24T21:24:52.490849Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:52.490919Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:52.492186Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20135 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:24:52.653465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:52.675256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:53.385821Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:57.349974Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629998319433343:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:57.350041Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:25:02.617136Z node 2 :MINIKQL_ENGINE ERROR: datashard__engine_host.cpp:516: Shard %72075186224037888, txid %281474976716360, engine error: Error executing transaction (read-only: 1): Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) 2025-06-24T21:25:02.629663Z node 2 :TX_DATASHARD ERROR: execute_data_tx_unit.cpp:268: Datashard execution error for [0:281474976716360] at 72075186224037888: Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) 2025-06-24T21:25:02.632191Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976716360 at tablet 72075186224037888 status: RESULT_UNAVAILABLE errors: REPLY_SIZE_EXCEEDED (Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648)) | 2025-06-24T21:25:02.650502Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7519630036974145373:5906] txid# 281474976716360 RESPONSE Status# ExecResultUnavailable marker# P13c DataShardErrors: [REPLY_SIZE_EXCEEDED] Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) proxy error code: ExecResultUnavailable >> TLocksTest::Range_Pinhole [GOOD] >> TLocksTest::SetBreakSetEraseBreak >> TLocksFatTest::PointSetBreak >> TObjectStorageListingTest::MaxKeysAndSharding [GOOD] >> TObjectStorageListingTest::SchemaChecks >> TCancelTx::CrossShardReadOnly >> SystemView::AuthGroupMembers_Access [GOOD] >> SystemView::AuthGroupMembers_ResultOrder >> TFlatTest::WriteSplitByPartialKeyAndRead >> TLocksTest::Range_BrokenLockMax >> TFlatTest::LargeProxyReply [GOOD] >> TFlatTest::LargeProxyReplyRW >> TFlatTest::SplitInvalidPath >> TLocksTest::Range_BrokenLock0 [GOOD] >> TLocksTest::Range_BrokenLock1 >> TFlatTest::MergeEmptyAndWrite [GOOD] >> TFlatTest::CopyTableAndRead >> LabeledDbCounters::OneTablet [GOOD] >> LabeledDbCounters::OneTabletRemoveCounters >> TLocksTest::GoodLock [GOOD] >> TLocksTest::GoodNullLock >> TCancelTx::CrossShardReadOnly [GOOD] >> TCancelTx::CrossShardReadOnlyWithReadSets ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::MergeEmptyAndWrite [GOOD] Test command err: 2025-06-24T21:25:00.516270Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630030480132679:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:00.517283Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6c/r3tmp/tmptCD5Qy/pdisk_1.dat 2025-06-24T21:25:00.858401Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:00.921574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:00.921746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:00.924363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10816 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:01.136360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:01.173077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:03.875839Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630046062544101:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:03.875916Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6c/r3tmp/tmpsXp18T/pdisk_1.dat 2025-06-24T21:25:04.015851Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:04.017987Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630046062544078:2079] 1750800303874370 != 1750800303874373 2025-06-24T21:25:04.030478Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:04.030567Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:04.032730Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27829 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:04.286056Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:04.317491Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:04.413196Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-24T21:25:04.420893Z node 2 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-24T21:25:04.451077Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-24T21:25:04.466387Z node 2 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.007s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800304414 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) 2025-06-24T21:25:04.524075Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:04.526070Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-24T21:25:04.526274Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:25:04.527669Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-24T21:25:04.527835Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:04.528493Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037888 restored its data 2025-06-24T21:25:04.529301Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-24T21:25:04.529439Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:25:04.529901Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037889 restored its data 2025-06-24T21:25:04.530572Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-24T21:25:04.530724Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:04.531223Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037888 restored its data 2025-06-24T21:25:04.531875Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-24T21:25:04.531985Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:25:04.532380Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037889 restored its data 2025-06-24T21:25:04.533056Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-24T21:25:04.533160Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:04.533578Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037888 restored its data 2025-06-24T21:25:04.534203Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-24T21:25:04.534807Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:25:04.535243Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037889 restored its data 2025-06-24T21:25:04.535927Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-24T21:25:04.536041Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:04.536482Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037888 restored its data 2025-06-24T21:25:04.537378Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715 ... 87:2139], exec latency: 0 ms, propose latency: 3 ms 2025-06-24T21:25:04.809933Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715687 state PreOffline TxInFly 0 2025-06-24T21:25:04.809965Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:25:04.810041Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046644480, message: TxKind: TX_KIND_SCHEME Origin: 72075186224037890 Status: COMPLETE TxId: 281474976715687 Step: 1750800304855 OrderId: 281474976715687 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037890 CpuTimeUsec: 896 } } CommitVersion { Step: 1750800304855 TxId: 281474976715687 } 2025-06-24T21:25:04.810054Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 281474976715687, tablet: 72075186224037890, partId: 0 2025-06-24T21:25:04.810161Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976715687:0, at schemeshard: 72057594046644480, message: TxKind: TX_KIND_SCHEME Origin: 72075186224037890 Status: COMPLETE TxId: 281474976715687 Step: 1750800304855 OrderId: 281474976715687 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037890 CpuTimeUsec: 896 } } CommitVersion { Step: 1750800304855 TxId: 281474976715687 } 2025-06-24T21:25:04.810235Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046644480, ev# TxKind: TX_KIND_SCHEME Origin: 72075186224037890 Status: COMPLETE TxId: 281474976715687 Step: 1750800304855 OrderId: 281474976715687 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037890 CpuTimeUsec: 896 } } CommitVersion { Step: 1750800304855 TxId: 281474976715687 } 2025-06-24T21:25:04.810355Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715687 2025-06-24T21:25:04.810425Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-24T21:25:04.810781Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630050357512271 RawX2: 4503608217307404 } Origin: 72075186224037890 State: 5 TxId: 281474976715687 Step: 0 Generation: 1 2025-06-24T21:25:04.810809Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 281474976715687, tablet: 72075186224037890, partId: 0 2025-06-24T21:25:04.810919Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976715687:0, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630050357512271 RawX2: 4503608217307404 } Origin: 72075186224037890 State: 5 TxId: 281474976715687 Step: 0 Generation: 1 2025-06-24T21:25:04.810961Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 281474976715687:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-24T21:25:04.811058Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 281474976715687:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7519630050357512271 RawX2: 4503608217307404 } Origin: 72075186224037890 State: 5 TxId: 281474976715687 Step: 0 Generation: 1 2025-06-24T21:25:04.811118Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715687:0, shardIdx: 72057594046644480:3, shard: 72075186224037890, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-24T21:25:04.811164Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-24T21:25:04.811179Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976715687:0, datashard: 72075186224037890, at schemeshard: 72057594046644480 2025-06-24T21:25:04.811207Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715687:0 129 -> 240 2025-06-24T21:25:04.811588Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-24T21:25:04.811684Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715687 datashard 72075186224037890 state PreOffline 2025-06-24T21:25:04.811709Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-24T21:25:04.811741Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 281474976715687:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:25:04.811742Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-24T21:25:04.812080Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:25:04.812239Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715687:0 progress is 1/1 2025-06-24T21:25:04.812259Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715687 ready parts: 1/1 2025-06-24T21:25:04.812279Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715687:0 progress is 1/1 2025-06-24T21:25:04.812294Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715687 ready parts: 1/1 2025-06-24T21:25:04.812310Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715687, ready parts: 1/1, is published: true 2025-06-24T21:25:04.812357Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:7519630050357512492:2352] message: TxId: 281474976715687 2025-06-24T21:25:04.812382Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715687 ready parts: 1/1 2025-06-24T21:25:04.812399Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715687:0 2025-06-24T21:25:04.812409Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715687:0 2025-06-24T21:25:04.812491Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 TClient::Ls request: /dc-1/Dir/TableOld 2025-06-24T21:25:04.813077Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037890 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:25:04.813139Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037890 Initiating switch from PreOffline to Offline state 2025-06-24T21:25:04.814616Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:25:04.814937Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630050357512271 RawX2: 4503608217307404 } TabletId: 72075186224037890 State: 4 2025-06-24T21:25:04.814989Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:25:04.815332Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:04.815357Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-24T21:25:04.816625Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T21:25:04.816824Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T21:25:04.817012Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:25:04.817039Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T21:25:04.817085Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:25:04.817277Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-24T21:25:04.817701Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-24T21:25:04.817771Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-24T21:25:04.818290Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-24T21:25:04.818634Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T21:25:04.818653Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T21:25:04.818695Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" 2025-06-24T21:25:04.895773Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TFlatTest::WriteSplitByPartialKeyAndRead [GOOD] >> TFlatTest::WriteSplitAndReadFromFollower >> TObjectStorageListingTest::SchemaChecks [GOOD] >> TFlatTest::SplitInvalidPath [GOOD] >> TFlatTest::SplitThenMerge >> TLocksFatTest::ShardLocks [GOOD] >> TLocksTest::BrokenLockErase [GOOD] >> TLocksTest::BrokenDupLock >> TFlatTest::CopyTableAndRead [GOOD] >> TFlatTest::CopyTableAndDropOriginal ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::SchemaChecks [GOOD] Test command err: 2025-06-24T21:24:37.884435Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629933948829781:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:37.884611Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a96/r3tmp/tmpHQAyTA/pdisk_1.dat 2025-06-24T21:24:38.215387Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629933948829761:2079] 1750800277883391 != 1750800277883394 2025-06-24T21:24:38.218573Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13133, node 1 2025-06-24T21:24:38.305927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:38.309774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:38.313284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:38.341311Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:38.341338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:38.341350Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:38.341478Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9649 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:38.652486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:38.688923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:38.903448Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:42.886760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629933948829781:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:42.889070Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:24:53.177458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:24:53.177490Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:05.705062Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630055435993629:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:05.705131Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a96/r3tmp/tmpklmYrG/pdisk_1.dat 2025-06-24T21:25:05.858648Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:05.878577Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630055435993609:2079] 1750800305704626 != 1750800305704629 2025-06-24T21:25:05.882613Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:05.882729Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:05.885429Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20672, node 2 2025-06-24T21:25:05.963806Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:25:05.963830Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:25:05.963838Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:25:05.963997Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3934 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:06.256212Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:06.263456Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:06.272956Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:25:06.280471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:06.736442Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TFlatTest::SelectBigRangePerf >> TFlatTest::LargeProxyReplyRW [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksFatTest::ShardLocks [GOOD] Test command err: 2025-06-24T21:24:53.916584Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630001185850499:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:53.916723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a72/r3tmp/tmp5Zl3Vv/pdisk_1.dat 2025-06-24T21:24:54.239776Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:54.241980Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630001185850479:2079] 1750800293915466 != 1750800293915469 2025-06-24T21:24:54.313147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:54.313294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:54.315126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62430 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:54.574018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:54.613031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:54.619430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:54.774600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:54.843337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:54.936614Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:58.630957Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630022049127094:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:58.631037Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a72/r3tmp/tmpWGkGZc/pdisk_1.dat 2025-06-24T21:24:58.776986Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:58.778280Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630022049127069:2079] 1750800298625199 != 1750800298625202 2025-06-24T21:24:58.793673Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:58.793756Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:58.795642Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19131 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:58.993559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:58.999775Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:59.020635Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.086723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.136648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.650584Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:03.184465Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630044404478171:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:03.184515Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a72/r3tmp/tmpptfcE7/pdisk_1.dat 2025-06-24T21:25:03.323659Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:03.324753Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630044404478149:2079] 1750800303183643 != 1750800303183646 2025-06-24T21:25:03.348688Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:03.348779Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:03.349714Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10996 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:03.661547Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:03.668349Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:03.675945Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:25:03.680687Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:03.753510Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:03.825925Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:07.100216Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519630062517698269:2142];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a72/r3tmp/tmpSyW25v/pdisk_1.dat 2025-06-24T21:25:07.105624Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:07.236617Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:07.241823Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519630062517698163:2079] 1750800307080950 != 1750800307080953 2025-06-24T21:25:07.260667Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:07.260752Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:07.262083Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23441 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:07.512809Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:07.554861Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:25:07.639943Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:07.717168Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TLocksTest::GoodSameShardLock [GOOD] >> TLocksTest::Range_BrokenLock3 [GOOD] >> TFlatTest::SplitThenMerge [GOOD] >> TLocksFatTest::PointSetBreak [GOOD] >> TLocksFatTest::LocksLimit >> TLocksTest::SetBreakSetEraseBreak [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::LargeProxyReplyRW [GOOD] Test command err: 2025-06-24T21:25:00.416942Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630033369434627:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:00.417017Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6d/r3tmp/tmpPLNvRw/pdisk_1.dat 2025-06-24T21:25:00.788231Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630033369434596:2079] 1750800300414820 != 1750800300414823 2025-06-24T21:25:00.788432Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:00.838763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:00.838868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:00.840508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:32412 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:01.041514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-24T21:25:01.077332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:01.437459Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:05.417484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630033369434627:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:05.417548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:25:05.853412Z node 1 :TX_PROXY ERROR: datareq.cpp:2703: Actor# [1:7519630054844274627:4133] txid# 281474976711010 MergeResult Result too large TDataReq marker# P18 2025-06-24T21:25:05.853488Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519630054844274627:4133] txid# 281474976711010 RESPONSE Status# ExecResultUnavailable marker# P13c MiniKQLErrors: Query result size limit exceeded. (71692241 > 50331648) proxy error code: ExecResultUnavailable 2025-06-24T21:25:06.456429Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630060067916093:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:06.456486Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6d/r3tmp/tmpy3NlAS/pdisk_1.dat 2025-06-24T21:25:06.616672Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630060067916070:2079] 1750800306455073 != 1750800306455076 2025-06-24T21:25:06.630028Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:06.639194Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:06.639299Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:06.641201Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19845 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:06.875257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:06.899853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:07.476416Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:11.458390Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630060067916093:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:11.458476Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:25:12.147778Z node 2 :TX_PROXY ERROR: datareq.cpp:2703: Actor# [2:7519630081542756101:4135] txid# 281474976716011 MergeResult Result too large TDataReq marker# P18 2025-06-24T21:25:12.147876Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7519630081542756101:4135] txid# 281474976716011 RESPONSE Status# ExecResultUnavailable marker# P13c MiniKQLErrors: Query result size limit exceeded. (71692241 > 50331648) proxy error code: ExecResultUnavailable >> TFlatTest::RejectByPerShardReadSize >> SystemView::AuthPermissions [GOOD] >> SystemView::AuthPermissions_Access >> TFlatTest::CopyTableAndDropOriginal [GOOD] >> TFlatTest::WriteSplitAndReadFromFollower [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::GoodSameShardLock [GOOD] Test command err: 2025-06-24T21:24:31.828491Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629909640395115:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:31.828775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9c/r3tmp/tmpGcyyOy/pdisk_1.dat 2025-06-24T21:24:32.201319Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:32.202530Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629909640395078:2079] 1750800271826778 != 1750800271826781 2025-06-24T21:24:32.235468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:32.235597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:32.236628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11747 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:32.540594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:32.571820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:32.704453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:32.763643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:32.844672Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:35.403666Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629927095681998:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:35.403701Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9c/r3tmp/tmpNeTEyT/pdisk_1.dat 2025-06-24T21:24:35.596995Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:35.678360Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:35.678455Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:35.680322Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6827 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:35.892773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:35.917467Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:35.994573Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:36.064853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9c/r3tmp/tmp1HFXYl/pdisk_1.dat 2025-06-24T21:24:39.175499Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:39.178424Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:39.179845Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629939269873833:2079] 1750800278970291 != 1750800278970294 2025-06-24T21:24:39.197455Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:39.197542Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:39.201021Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23334 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:39.459320Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:39.467596Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:39.478107Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:39.484225Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:39.551365Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but p ... chemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:59.020897Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:59.028040Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:59.045935Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.129406Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.189380Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:03.434618Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630044502319539:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:03.434776Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9c/r3tmp/tmpCdoBZa/pdisk_1.dat 2025-06-24T21:25:03.607820Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:03.620939Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630044502319519:2079] 1750800303434040 != 1750800303434043 2025-06-24T21:25:03.624893Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:03.625006Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:03.627631Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1263 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:04.016285Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:04.025920Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:04.036359Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:25:04.042288Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:04.123748Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:25:04.183254Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:08.399526Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630066856718704:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:08.407212Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9c/r3tmp/tmpSQAY5f/pdisk_1.dat 2025-06-24T21:25:08.645033Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:08.649682Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630066856718686:2079] 1750800308393491 != 1750800308393494 2025-06-24T21:25:08.662352Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:08.662466Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:08.672031Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2075 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:09.053735Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:09.075934Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:25:09.085639Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:25:09.171581Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:09.241305Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:09.423845Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TFlatTest::SelectBigRangePerf [GOOD] >> TFlatTest::SelectRangeBothLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SplitThenMerge [GOOD] Test command err: 2025-06-24T21:25:06.752577Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630060074462627:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:06.752908Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a67/r3tmp/tmpsFffc1/pdisk_1.dat 2025-06-24T21:25:07.224805Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630060074462528:2079] 1750800306738956 != 1750800306738959 2025-06-24T21:25:07.231016Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:07.243773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:07.243900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:07.245884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24063 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:07.590453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:07.609447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:07.626718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation_split_merge.cpp:816: TSplitMerge Propose failed StatusNameConflict Check failed: path: '/dc-1/Dir1', error: path is not a table (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges), tableStr: /dc-1/Dir1, tableId: , opId: 281474976710659:0, at schemeshard: 72057594046644480, request: TablePath: "/dc-1/Dir1" SourceTabletId: 100500 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 42 } } } } 2025-06-24T21:25:07.630725Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630064369430394:2298] txid# 281474976710659, issues: { message: "Check failed: path: \'/dc-1/Dir1\', error: path is not a table (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } Error 128: Check failed: path: '/dc-1/Dir1', error: path is not a table (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges) 2025-06-24T21:25:10.362602Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630077203291386:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:10.362629Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a67/r3tmp/tmp4xZ6AQ/pdisk_1.dat 2025-06-24T21:25:10.638717Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:10.641744Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630077203291366:2079] 1750800310361667 != 1750800310361670 2025-06-24T21:25:10.655556Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:10.655640Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:10.663094Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25955 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:25:10.854493Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:10.878592Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:11.045449Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.010s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-24T21:25:11.059401Z node 2 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-24T21:25:11.095361Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.003s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-24T21:25:11.114262Z node 2 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.010s,wait=0.002s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800310980 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) 2025-06-24T21:25:11.174380Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:11.176520Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-24T21:25:11.176734Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:25:11.178127Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-24T21:25:11.178650Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:11.179343Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710676 at 72075186224037888 restored its data 2025-06-24T21:25:11.180307Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-24T21:25:11.180451Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:25:11.180937Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710676 at 72075186224037889 restored its data 2025-06-24T21:25:11.181701Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-24T21:25:11.182169Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:11.182646Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710676 at 72075186224037888 restored its data 2025-06-24T21:25:11.183427Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-24T21:25:11.183547Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:25:11.183985Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710676 at 72075186224037889 restored its data 2025-06-24T21:25:11.184733Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-24T21:25:11.185254Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:11.185683Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710676 at 720751 ... CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710693:0, shardIdx: 72057594046644480:7, shard: 72075186224037894, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-24T21:25:11.941735Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-24T21:25:11.941757Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976710693:0, datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-24T21:25:11.941770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 281474976710693:0, datashard: 72075186224037894, at schemeshard: 72057594046644480 2025-06-24T21:25:11.941785Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710693:0 129 -> 240 2025-06-24T21:25:11.942213Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-24T21:25:11.942288Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-24T21:25:11.942567Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-24T21:25:11.942616Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-24T21:25:11.942699Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710693 datashard 72075186224037889 state PreOffline 2025-06-24T21:25:11.942705Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-24T21:25:11.942734Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 281474976710693:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:25:11.942774Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-24T21:25:11.942899Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710693 datashard 72075186224037894 state PreOffline 2025-06-24T21:25:11.942926Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037894 Got TEvSchemaChangedResult from SS at 72075186224037894 2025-06-24T21:25:11.943108Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-24T21:25:11.943328Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710693:0 progress is 1/1 2025-06-24T21:25:11.943350Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710693 ready parts: 1/1 2025-06-24T21:25:11.943368Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710693:0 progress is 1/1 2025-06-24T21:25:11.943378Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710693 ready parts: 1/1 2025-06-24T21:25:11.943392Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710693, ready parts: 1/1, is published: true 2025-06-24T21:25:11.943443Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:7519630081498260115:2387] message: TxId: 281474976710693 2025-06-24T21:25:11.943467Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710693 ready parts: 1/1 2025-06-24T21:25:11.943486Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710693:0 2025-06-24T21:25:11.943498Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710693:0 2025-06-24T21:25:11.943583Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:25:11.944245Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037894 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:25:11.944351Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037894 Initiating switch from PreOffline to Offline state 2025-06-24T21:25:11.945622Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-24T21:25:11.945691Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-24T21:25:11.946851Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037894 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:25:11.947109Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:25:11.947416Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630081498259893 RawX2: 4503608217307438 } TabletId: 72075186224037894 State: 4 2025-06-24T21:25:11.947466Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037894, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:25:11.947627Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630077203291966 RawX2: 4503608217307346 } TabletId: 72075186224037889 State: 4 2025-06-24T21:25:11.947657Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:25:11.947820Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037894 state Offline 2025-06-24T21:25:11.947844Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:7 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:11.947909Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:11.947999Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline TClient::Ls request: /dc-1/Dir/TableOld 2025-06-24T21:25:11.949559Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046644480 ShardLocalIdx: 7, at schemeshard: 72057594046644480 2025-06-24T21:25:11.949797Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T21:25:11.949979Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-24T21:25:11.950137Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T21:25:11.950254Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:25:11.950286Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T21:25:11.950325Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:25:11.951896Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-06-24T21:25:11.951927Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:7 tabletId 72075186224037894 2025-06-24T21:25:11.951968Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T21:25:11.951983Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T21:25:11.952019Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:25:11.955358Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037894 reason = ReasonStop 2025-06-24T21:25:11.955408Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:7519630081498260006:2790], serverId# [2:7519630081498260007:2791], sessionId# [0:0:0] 2025-06-24T21:25:11.955428Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-24T21:25:11.955442Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [2:7519630081498259383:2392], serverId# [2:7519630081498259384:2393], sessionId# [0:0:0] 2025-06-24T21:25:11.955707Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037894 2025-06-24T21:25:11.955778Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037894 2025-06-24T21:25:11.955870Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037894 not found 2025-06-24T21:25:11.956166Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-24T21:25:11.957180Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-24T21:25:11.957238Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_BrokenLock3 [GOOD] Test command err: 2025-06-24T21:24:31.581415Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629908275916041:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:31.593501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9f/r3tmp/tmpM9akZ6/pdisk_1.dat 2025-06-24T21:24:32.088511Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:32.098975Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:32.099405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:32.102157Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22472 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:32.365634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:32.411931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:32.555754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:32.590363Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:32.636148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:35.199630Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629923918661054:2165];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:35.209145Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9f/r3tmp/tmp19fweN/pdisk_1.dat 2025-06-24T21:24:35.352606Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:35.359510Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629923918660911:2079] 1750800275176415 != 1750800275176418 2025-06-24T21:24:35.370893Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:35.371020Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:35.376629Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16142 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:35.600779Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:35.612187Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:35.628599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:35.710402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:35.784214Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:38.828214Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629938668458896:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:38.828292Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9f/r3tmp/tmpBU9lyy/pdisk_1.dat 2025-06-24T21:24:39.024009Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:39.025088Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629938668458877:2079] 1750800278827594 != 1750800278827597 2025-06-24T21:24:39.037009Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:39.037092Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:39.038659Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26625 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:39.274512Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:39.300070Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:39.362295Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part ... :18576 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:59.167882Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:59.174856Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:59.191224Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.258608Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.335612Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:03.440268Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630045491061058:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:03.440348Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9f/r3tmp/tmplXfXCy/pdisk_1.dat 2025-06-24T21:25:03.692722Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:03.694161Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630045491061037:2079] 1750800303439210 != 1750800303439213 2025-06-24T21:25:03.713434Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:03.713560Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:03.715903Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10685 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:04.066093Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:04.077603Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:04.094299Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:25:04.178429Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:04.241803Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:08.694199Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630066693557613:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:08.694264Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a9f/r3tmp/tmpi4mBBp/pdisk_1.dat 2025-06-24T21:25:08.927269Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:08.931300Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630066693557581:2079] 1750800308691470 != 1750800308691473 2025-06-24T21:25:08.948795Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:08.948911Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:08.954569Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19674 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:09.369908Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:09.376461Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:09.392766Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:09.484105Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:09.568505Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:09.732408Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::SetBreakSetEraseBreak [GOOD] Test command err: 2025-06-24T21:24:47.306097Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629975862844390:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:47.306137Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a78/r3tmp/tmpfB4WXF/pdisk_1.dat 2025-06-24T21:24:47.722253Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:47.724327Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629975862844360:2079] 1750800287305087 != 1750800287305090 2025-06-24T21:24:47.744159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:47.744274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:47.746563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3025 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:48.046538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:48.068524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:48.086271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:48.233359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:48.317180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:48.337116Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:50.634465Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629988332345628:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:50.634548Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a78/r3tmp/tmpXKN6Qo/pdisk_1.dat 2025-06-24T21:24:50.756219Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:50.757853Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629988332345608:2079] 1750800290633799 != 1750800290633802 2025-06-24T21:24:50.784166Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:50.784275Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:50.786010Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63642 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:50.968655Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:50.974707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:50.988625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:51.038531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:51.079403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:54.110060Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630005521483056:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:54.110146Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a78/r3tmp/tmpMoVxcw/pdisk_1.dat 2025-06-24T21:24:54.229899Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:54.233958Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630005521483037:2079] 1750800294109652 != 1750800294109655 2025-06-24T21:24:54.263975Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:54.264058Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:54.266277Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25465 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:54.450316Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:54.455571Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at ... VolatileState: Unknown -> Disconnected 2025-06-24T21:25:01.832888Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:01.834140Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:65223 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:02.037702Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:02.053640Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:02.141589Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:02.198067Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:05.525584Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519630053594414823:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:05.525682Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a78/r3tmp/tmpkhdX53/pdisk_1.dat 2025-06-24T21:25:05.672858Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:05.673977Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519630053594414807:2079] 1750800305524898 != 1750800305524901 2025-06-24T21:25:05.694162Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:05.694229Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:05.695837Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3041 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:05.959668Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:05.984067Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:06.046000Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:06.100920Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:09.863321Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519630073007873337:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:09.863385Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a78/r3tmp/tmp2yArQ9/pdisk_1.dat 2025-06-24T21:25:10.025424Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:10.025513Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:10.032376Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:10.035898Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:10.039408Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519630073007873317:2079] 1750800309862467 != 1750800309862470 TClient is connected to server localhost:64056 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:10.350979Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:10.358020Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:10.370976Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:25:10.375640Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.458510Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.514113Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TLocksTest::GoodDupLock [GOOD] >> TLocksTest::CK_Range_GoodLock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::CopyTableAndDropOriginal [GOOD] Test command err: 2025-06-24T21:25:07.841572Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630063329973213:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:07.856976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a66/r3tmp/tmpavHtc8/pdisk_1.dat 2025-06-24T21:25:08.288003Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:08.289254Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630063329973107:2079] 1750800307797318 != 1750800307797321 2025-06-24T21:25:08.328925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:08.329019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:08.331199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6241 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:08.612679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:08.633570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:08.644840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:25:08.653727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:08.841232Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:08.854033Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-24T21:25:08.862954Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.004s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-24T21:25:08.902872Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.003s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-24T21:25:08.911957Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 Copy TableOld to Table 2025-06-24T21:25:09.078934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1/Dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table" CopyFromTable: "/dc-1/Dir/TableOld" } } TxId: 281474976710676 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:25:09.079411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_copy_table.cpp:344: TCopyTable Propose, path: /dc-1/Dir/Table, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-06-24T21:25:09.080024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: Dir, child name: Table, child id: [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-24T21:25:09.080086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 0 2025-06-24T21:25:09.080104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:25:09.080126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710676:0 type: TxCopyTable target path: [OwnerId: 72057594046644480, LocalPathId: 4] source path: [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-24T21:25:09.080158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-24T21:25:09.080176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-24T21:25:09.080330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 3 2025-06-24T21:25:09.080472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710676:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:25:09.081226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:25:09.081254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 4 2025-06-24T21:25:09.081774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710676, response: Status: StatusAccepted TxId: 281474976710676 SchemeshardId: 72057594046644480 PathId: 4, at schemeshard: 72057594046644480 2025-06-24T21:25:09.081897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710676, database: /dc-1, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /dc-1/Dir/Table 2025-06-24T21:25:09.082065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:25:09.082079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:25:09.082209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 4] 2025-06-24T21:25:09.082264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T21:25:09.082278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519630067624940888:2236], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 2 2025-06-24T21:25:09.082289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519630067624940888:2236], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 4 2025-06-24T21:25:09.082313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710676:0, at schemeshard: 72057594046644480 2025-06-24T21:25:09.082341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710676:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046644480 2025-06-24T21:25:09.083895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-24T21:25:09.084025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 4 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } waiting... 2025-06-24T21:25:09.086210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710676 2025-06-24T21:25:09.086349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710676 2025-06-24T21:25:09.086364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710676 2025-06-24T21:25:09.086399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710676, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 2025-06 ... 5-06-24T21:25:12.487526Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630078759294150 RawX2: 4503608217307346 } TabletId: 72075186224037888 State: 4 2025-06-24T21:25:12.487546Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037888, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:25:12.487644Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630083054261758 RawX2: 4503608217307405 } TabletId: 72075186224037891 State: 4 2025-06-24T21:25:12.487665Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:25:12.487748Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630083054261757 RawX2: 4503608217307404 } TabletId: 72075186224037890 State: 4 2025-06-24T21:25:12.487767Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:25:12.487846Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630083054261757 RawX2: 4503608217307404 } TabletId: 72075186224037890 State: 4 2025-06-24T21:25:12.487861Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:25:12.488017Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:12.488086Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:12.488122Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:12.488151Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:12.488179Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:12.488325Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-24T21:25:12.488350Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037888 state Offline 2025-06-24T21:25:12.488375Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-24T21:25:12.488387Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-24T21:25:12.488397Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-24T21:25:12.489621Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-24T21:25:12.489887Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T21:25:12.490069Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-24T21:25:12.490209Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T21:25:12.490311Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-24T21:25:12.490435Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-24T21:25:12.490525Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T21:25:12.490623Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-24T21:25:12.490714Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T21:25:12.490811Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 2 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:25:12.490828Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-24T21:25:12.490873Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:25:12.490889Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T21:25:12.490904Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:25:12.491172Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-24T21:25:12.491204Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037888 reason = ReasonStop 2025-06-24T21:25:12.491233Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:7519630083054261558:2385], serverId# [2:7519630083054261559:2386], sessionId# [0:0:0] 2025-06-24T21:25:12.491251Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-24T21:25:12.491263Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-24T21:25:12.491524Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T21:25:12.491546Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T21:25:12.491582Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-24T21:25:12.491589Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-24T21:25:12.492078Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-24T21:25:12.492093Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-24T21:25:12.492105Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-24T21:25:12.492118Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-24T21:25:12.492339Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T21:25:12.492349Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T21:25:12.492375Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T21:25:12.492388Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T21:25:12.492404Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T21:25:12.492429Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:25:12.492548Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [2:7519630083054261568:2392], serverId# [2:7519630083054261569:2393], sessionId# [0:0:0] 2025-06-24T21:25:12.492847Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-24T21:25:12.492983Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-24T21:25:12.494299Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-24T21:25:12.494361Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-24T21:25:12.495362Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-24T21:25:12.495405Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-24T21:25:12.496325Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-24T21:25:12.496363Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-24T21:25:12.498408Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Check that tablet 72075186224037889 was deleted 2025-06-24T21:25:12.795581Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-24T21:25:12.796965Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037889) Check that tablet 72075186224037890 was deleted 2025-06-24T21:25:12.797417Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037890) Check that tablet 72075186224037891 was deleted 2025-06-24T21:25:12.797896Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037891) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::WriteSplitAndReadFromFollower [GOOD] Test command err: 2025-06-24T21:25:06.410319Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630057634960359:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:06.426807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a69/r3tmp/tmphnulTT/pdisk_1.dat 2025-06-24T21:25:06.831267Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630057634960262:2079] 1750800306390161 != 1750800306390164 2025-06-24T21:25:06.838906Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:06.853624Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:06.853751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:06.855828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16389 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:07.146671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:07.161363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:07.185222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /dc-1/Dir/TableOld 2025-06-24T21:25:07.412557Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800307291 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Key2" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Va... (TRUNCATED) 2025-06-24T21:25:07.444152Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:07.445697Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:25:07.445728Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:25:07.549925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { OperationType: ESchemeOpSplitMergeTablePartitions SplitMergeTablePartitions { TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } } } TxId: 281474976710668 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:25:07.550281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /dc-1/Dir/TableOld, tableId: , opId: 281474976710668:0, at schemeshard: 72057594046644480, request: TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } 2025-06-24T21:25:07.550559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T21:25:07.550627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:25:07.550974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-24T21:25:07.551238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710668:0 type: TxSplitTablePartition target path: [OwnerId: 72057594046644480, LocalPathId: 3] source path: 2025-06-24T21:25:07.554628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /dc-1/Dir/TableOld, tableId: , opId: 281474976710668:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037888 ShardIdx: 1 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000d\000\000\000\000\000\000\200" ShardIdx: 2 } DestinationRanges { KeyRangeBegin: "\002\000\004\000\000\000d\000\000\000\000\000\000\200" KeyRangeEnd: "" ShardIdx: 3 }, request: TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } 2025-06-24T21:25:07.554718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710668:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:25:07.555757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710668, response: Status: StatusAccepted TxId: 281474976710668 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:25:07.555879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710668, subject: , status: StatusAccepted, operation: ALTER TABLE PARTITIONS, path: /dc-1/Dir/TableOld 2025-06-24T21:25:07.556176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-24T21:25:07.556225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710668:0 ProgressState, operation type: TxSplitTablePartition, at tablet# 72057594046644480 2025-06-24T21:25:07.556604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710668:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 2 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-24T21:25:07.556782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 281474976710668:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-24T21:25:07.557273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710668:0 from tablet: 72057594046644480 to tablet: 72057594037968897 cookie: 72057594046644480:2 msg type: 268697601 2025-06-24T21:25:07.557405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710668:0 from tablet: 72057594046644480 to tablet: 72057594037968897 cookie: 72057594046644480:3 msg type: 268697601 2025-06-24T21:25:07.557488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710668, partId: 0, tablet: 72057594037968897 2025-06-24T21:25:07.557526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1810: TOperation RegisterRelationByShardIdx, TxId: 281474976710668, shardIdx: 72057594046644480:2, partId: 0 2025-06-24T21:25:07.557536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1810: TOperation RegisterRelationByShardIdx, TxId: 281474976710668, shardIdx: 72057594046644480:3, partId: 0 waiting... 2025-06-24T21:25:07.559699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5947: Handle TEvCreateTabletReply at schemeshard: 72057594046644480 message: Status: OK Owner: 72057594046644480 OwnerIdx: 2 TabletID: 72075186224037889 Origin: 72057594037968897 2025-06-24T21:25:07.559741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation FindRelatedPartByShardIdx, TxId: 281474976710668, shardIdx: 72057594046644480:2, partId: 0 2025-06-24T21:25:07.559868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976710668:0, at schemeshard: 72057594046644480, message: Status: OK Owner: 72057594046644480 OwnerIdx: 2 TabletID: 72075186224037889 Origin: 72057594037968897 2025-06-24T21:25:07.559909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:176: TCreateParts opId# 2814749767106 ... 91 reason = ReasonStop 2025-06-24T21:25:12.215653Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-24T21:25:12.215804Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-24T21:25:12.215982Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T21:25:12.216009Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T21:25:12.216812Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-24T21:25:12.216900Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-24T21:25:12.217733Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-24T21:25:12.218428Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-24T21:25:12.218444Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found TClient::Ls response: 2025-06-24T21:25:12.219351Z node 3 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037892 Initiating switch from PreOffline to Offline state Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" 2025-06-24T21:25:12.220355Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T21:25:12.220386Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T21:25:12.222850Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-24T21:25:12.224271Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-24T21:25:12.225625Z node 3 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:25:12.227381Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:7519630082511446016:2274], serverId# [3:7519630082511446021:2492], sessionId# [0:0:0] 2025-06-24T21:25:12.227406Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:7519630082511446018:2275], serverId# [3:7519630082511446022:2493], sessionId# [0:0:0] 2025-06-24T21:25:12.229685Z node 3 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037892 Reporting state Offline to schemeshard 72057594046644480 2025-06-24T21:25:12.229798Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630078216478233 RawX2: 4503612512274638 } TabletId: 72075186224037888 State: 4 2025-06-24T21:25:12.229851Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037888, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:25:12.230115Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630078216478484 RawX2: 4503612512274644 } TabletId: 72075186224037890 State: 4 2025-06-24T21:25:12.230135Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:25:12.231008Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630078216478485 RawX2: 4503612512274645 } TabletId: 72075186224037892 State: 4 2025-06-24T21:25:12.231044Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037892, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:25:12.244082Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:12.244200Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:12.244258Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:25:12.247269Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037888 state Offline 2025-06-24T21:25:12.247323Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-24T21:25:12.247342Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037892 state Offline 2025-06-24T21:25:12.249607Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-24T21:25:12.249824Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:25:12.250019Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T21:25:12.250152Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T21:25:12.250243Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-24T21:25:12.250368Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T21:25:12.250433Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-24T21:25:12.250451Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-24T21:25:12.250459Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:25:12.250465Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037892 not found 2025-06-24T21:25:12.250483Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T21:25:12.250520Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:25:12.251047Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-24T21:25:12.252006Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-24T21:25:12.256231Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-24T21:25:12.257659Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037888 reason = ReasonStop 2025-06-24T21:25:12.263446Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-24T21:25:12.271847Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-24T21:25:12.271947Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-24T21:25:12.273816Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-24T21:25:12.274925Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-24T21:25:12.274982Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037892 reason = ReasonStop 2025-06-24T21:25:12.275887Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-24T21:25:12.275964Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-24T21:25:12.276998Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-24T21:25:12.277050Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-24T21:25:12.277072Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-24T21:25:12.278370Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-24T21:25:12.278441Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-24T21:25:12.279534Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-24T21:25:12.282126Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-24T21:25:12.282154Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-24T21:25:12.282204Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T21:25:12.282212Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T21:25:12.282229Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-24T21:25:12.282241Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-24T21:25:12.282294Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:25:12.284706Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-24T21:25:12.284745Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-24T21:25:12.284764Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found >> TFlatTest::SplitEmptyAndWrite >> TCancelTx::CrossShardReadOnlyWithReadSets [GOOD] >> TCancelTx::ImmediateReadOnly >> TLocksTest::Range_GoodLock0 [GOOD] >> TLocksTest::Range_GoodLock1 >> SystemView::AuthGroupMembers_ResultOrder [GOOD] >> SystemView::AuthGroupMembers_TableRange >> TLocksTest::CK_BrokenLock [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernel >> TestProgram::Like >> TestProgram::YqlKernelEndsWith >> TFlatTest::SelectRangeBothLimit [GOOD] >> TestProgram::YqlKernel [GOOD] >> TestProgram::YqlKernelEndsWith [GOOD] >> TestProgram::Like [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEndsWith [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \020EndsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \020EndsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::Like [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Bytes: "001" } } } Command { Assign { Column { Id: 16 } Constant { Bytes: "uid" } } } Command { Assign { Column { Id: 17 } Function { Id: 33 Arguments { Id: 7 } Arguments { Id: 16 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Assign { Column { Id: 18 } Function { Id: 34 Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 1 } } } Command { Assign { Column { Id: 19 } Function { Id: 18 Arguments { Id: 17 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 20 } Function { Id: 18 Arguments { Id: 18 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 21 } Function { Id: 11 Arguments { Id: 19 } Arguments { Id: 20 } FunctionType: SIMPLE_ARROW } } } Command { Projection { Columns { Id: 21 } } } Kernels: "O\006\006Arg\022BlockFunc\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\004\203\014?\006\001\235?\004\001\235?\010\001\n\000\t\211\004?\016\235?\000\001\235?\002\000\n\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\004?\020\235?\006\001?$\n\000\t\211\006?$\203\005@?\024?\026\006\000\003?(\024StartsWith?\034? \001\t\211\006?$\203\005@?\024?\026\006\000\003?0\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Bytes: "001" } } } Command { Assign { Column { Id: 16 } Constant { Bytes: "uid" } } } Command { Assign { Column { Id: 17 } Function { Id: 33 Arguments { Id: 7 } Arguments { Id: 16 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Assign { Column { Id: 18 } Function { Id: 34 Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 1 } } } Command { Assign { Column { Id: 19 } Function { Id: 18 Arguments { Id: 17 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 20 } Function { Id: 18 Arguments { Id: 18 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 21 } Function { Id: 11 Arguments { Id: 19 } Arguments { Id: 20 } FunctionType: SIMPLE_ARROW } } } Command { Projection { Columns { Id: 21 } } } Kernels: "O\006\006Arg\022BlockFunc\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\004\203\014?\006\001\235?\004\001\235?\010\001\n\000\t\211\004?\016\235?\000\001\235?\002\000\n\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\004?\020\235?\006\001?$\n\000\t\211\006?$\203\005@?\024?\026\006\000\003?(\024StartsWith?\034? \001\t\211\006?$\203\005@?\024?\026\006\000\003?0\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N6(0):{\"p\":{\"v\":\"001\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N0(0):{\"p\":{\"v\":\"uid\"},\"o\":\"16\",\"t\":\"Const\"}\n"]; N2[shape=box, label="N4(15):{\"i\":\"7,16\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"17\",\"t\":\"Calculation\"}\nREMOVE:16"]; N1 -> N2[label="1"]; N4 -> N2[label="2"]; N3[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"7\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N10 -> N3[label="1"]; N4[shape=box, label="N3(7):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N3 -> N4[label="1"]; N5[shape=box, label="N7(15):{\"i\":\"7,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"18\",\"t\":\"Calculation\"}\nREMOVE:7,15"]; N0 -> N5[label="1"]; N4 -> N5[label="2"]; N6[shape=box, label="N5(23):{\"i\":\"17\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"19\",\"t\":\"Calculation\"}\nREMOVE:17"]; N2 -> N6[label="1"]; N7[shape=box, label="N8(23):{\"i\":\"18\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"20\",\"t\":\"Calculation\"}\nREMOVE:18"]; N5 -> N7[label="1"]; N8[shape=box, label="N9(54):{\"i\":\"19,20\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"21\",\"t\":\"Calculation\"}\nREMOVE:19,20"]; N6 -> N8[label="1"]; N7 -> N8[label="2"]; N9[shape=box, label="N10(54):{\"i\":\"21\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N8 -> N9[label="1"]; N10[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N1->N10->N3->N4->N2->N6->N0->N5->N7->N8->N9[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[]},{"owner_id":2,"inputs":[{"from":1},{"from":4}]},{"owner_id":3,"inputs":[{"from":10}]},{"owner_id":4,"inputs":[{"from":3}]},{"owner_id":5,"inputs":[{"from":0},{"from":4}]},{"owner_id":6,"inputs":[{"from":2}]},{"owner_id":7,"inputs":[{"from":5}]},{"owner_id":8,"inputs":[{"from":6},{"from":7}]},{"owner_id":9,"inputs":[{"from":8}]},{"owner_id":10,"inputs":[]}],"nodes":{"1":{"p":{"p":{"v":"uid"},"o":"16","t":"Const"},"w":0,"id":1},"3":{"p":{"i":"0","p":{"data":[{"name":"string","id":7}]},"o":"7","t":"FetchOriginalData"},"w":2,"id":3},"8":{"p":{"i":"19,20","p":{"kernel":{"class_name":"SIMPLE"}},"o":"21","t":"Calculation"},"w":54,"id":8},"2":{"p":{"i":"7,16","p":{"kernel":{"class_name":"SIMPLE"}},"o":"17","t":"Calculation"},"w":15,"id":2},"0":{"p":{"p":{"v":"001"},"o":"15","t":"Const"},"w":0,"id":0},"5":{"p":{"i":"7,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"18","t":"Calculation"},"w":15,"id":5},"9":{"p":{"i":"21","t":"Projection"},"w":54,"id":9},"7":{"p":{"i":"18","p":{"kernel":{"class_name":"SIMPLE"}},"o":"20","t":"Calculation"},"w":23,"id":7},"4":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":7,"id":4},"10":{"p":{"p":{"data":[{"name":"string","id":7}]},"o":"0","t":"ReserveMemory"},"w":0,"id":10},"6":{"p":{"i":"17","p":{"kernel":{"class_name":"SIMPLE"}},"o":"19","t":"Calculation"},"w":23,"id":6}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow11BooleanTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow11BooleanTypeE; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernel [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 3 } Arguments { Id: 4 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\002\213\002?\000\001\235?\002\001\235?\004\001\002\000\t\211\002?\n\235?\000\001\002\000\t\251\000?\020\014Arg\000\000\t\211\002?\014?\020\002\000\t\211\006?\020\203\005@?\020?\020$BlockFunc\000\003?\034\006Add?\026?\026\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 3 } Arguments { Id: 4 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\002\213\002?\000\001\235?\002\001\235?\004\001\002\000\t\211\002?\n\235?\000\001\002\000\t\251\000?\020\014Arg\000\000\t\211\002?\014?\020\002\000\t\211\006?\020\203\005@?\020?\020$BlockFunc\000\003?\034\006Add?\026?\026\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"sum\",\"id\":3},{\"name\":\"vat\",\"id\":4}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"3,4\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:3,4"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"3\",\"p\":{\"address\":{\"name\":\"sum\",\"id\":3}},\"o\":\"3\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"4\",\"p\":{\"address\":{\"name\":\"vat\",\"id\":4}},\"o\":\"4\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"sum\",\"id\":3},{\"name\":\"vat\",\"id\":4}]},\"o\":\"3,4\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"3","p":{"address":{"name":"sum","id":3}},"o":"3","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"sum","id":3},{"name":"vat","id":4}]},"o":"3,4","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"sum","id":3},{"name":"vat","id":4}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"4","p":{"address":{"name":"vat","id":4}},"o":"4","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"3,4","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; >> TFlatTest::SplitEmptyAndWrite [GOOD] >> TFlatTest::SplitBoundaryRead >> TLocksTest::Range_IncorrectNullDot2 [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonValueBinary ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::CK_BrokenLock [GOOD] Test command err: 2025-06-24T21:24:37.299521Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629935012263365:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:37.299626Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a98/r3tmp/tmpOxR3XC/pdisk_1.dat 2025-06-24T21:24:37.647473Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629935012263342:2079] 1750800277297943 != 1750800277297946 2025-06-24T21:24:37.656911Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:37.688487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:37.688619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:37.690634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26682 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:37.976261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:37.987582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:38.002691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:38.114165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:38.172656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:38.306322Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:40.842824Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629946249669593:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:40.854943Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a98/r3tmp/tmp6tKHis/pdisk_1.dat 2025-06-24T21:24:40.982794Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:40.984533Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629946249669525:2079] 1750800280832985 != 1750800280832988 2025-06-24T21:24:40.993239Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:40.993328Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:40.995089Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27523 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:41.242493Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:41.264382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:41.342142Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:41.388310Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:44.291906Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629963329807773:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:44.291967Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a98/r3tmp/tmpPbSONF/pdisk_1.dat 2025-06-24T21:24:44.419492Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:44.433393Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:44.433477Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:44.436135Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28828 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:44.634695Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:44.653200Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:44.726815Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part ... 97 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:04.275360Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:04.280457Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29087 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:04.576679Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:04.600770Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:04.681903Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:04.749292Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:09.274234Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630069602208044:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:09.274295Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a98/r3tmp/tmpA2Ui1V/pdisk_1.dat 2025-06-24T21:25:09.460695Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:09.461803Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630069602208024:2079] 1750800309273052 != 1750800309273055 2025-06-24T21:25:09.467841Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:09.467948Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:09.472279Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23688 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:09.892413Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:09.906681Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:09.932143Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:25:09.944108Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.041805Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.123300Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.320113Z node 9 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:14.130425Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630094469964516:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:14.130579Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a98/r3tmp/tmp9eMAjF/pdisk_1.dat 2025-06-24T21:25:14.288488Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:14.310840Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:14.310959Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:14.316496Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15122 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:14.628017Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:14.649921Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:14.726500Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:14.779864Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TestProgram::CountWithNulls >> TestProgram::CountWithNulls [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeBothLimit [GOOD] Test command err: 2025-06-24T21:25:12.289742Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630082322853138:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:12.289811Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a55/r3tmp/tmp0bmbIP/pdisk_1.dat 2025-06-24T21:25:12.695331Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630082322853112:2079] 1750800312282583 != 1750800312282586 2025-06-24T21:25:12.703162Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:12.758583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:12.758721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:12.759905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1679 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:13.061235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:13.075717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-24T21:25:13.103385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:13.308946Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; insert finished 14685 usec 10136 usec 11110 usec 10783 usec 11416 usec 10298 usec 14847 usec 10440 usec 10512 usec 10854 usec 2025-06-24T21:25:15.604835Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630096867215709:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:15.604912Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a55/r3tmp/tmpUvOsBL/pdisk_1.dat 2025-06-24T21:25:15.729866Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:15.762693Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:15.762796Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:15.764387Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28289 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:15.961178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:15.979603Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:16.612263Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TCancelTx::ImmediateReadOnly [GOOD] >> TLocksFatTest::LocksLimit [GOOD] >> TestProgram::JsonValueBinary [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::CountWithNulls [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } } } Command { Projection { Columns { Id: 10001 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } } } Command { Projection { Columns { Id: 10001 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N3(15):{\"i\":\"2\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10001\",\"t\":\"Calculation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N1[shape=box, label="N1(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N4 -> N1[label="1"]; N2[shape=box, label="N2(7):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N1 -> N2[label="1"]; N3[shape=box, label="N4(15):{\"i\":\"10001\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N3[label="1"]; N4[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N4->N1->N2->N0->N3[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":4}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[{"from":0}]},{"owner_id":4,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2}]},"o":"2","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"i":"10001","t":"Projection"},"w":15,"id":3},"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":7,"id":2},"4":{"p":{"p":{"data":[{"name":"uid","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":4},"0":{"p":{"i":"2","p":{"kernel":{"class_name":"SIMPLE"}},"o":"10001","t":"Calculation"},"w":15,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestScript::StepMerging [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonValueBinary [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\032\000\t\211\004?\020\235?\002\001\235?\004\000\032\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\032\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?nNJson2.JsonDocumentSqlValueConvertToUtf8\202\003?p\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?b?:\t\211\014?d\211\002?d\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\206\203\005@\200\203\005@\202\022\000\003?\222\"Json2.CompilePath\202\003?\224\000\002\017\003?\210\000\003?\212\000\003?\214\000\003?\216\000?2\036\010\000?j\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\264\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\032\000\t\211\004?\020\235?\002\001\235?\004\000\032\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\032\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?nNJson2.JsonDocumentSqlValueConvertToUtf8\202\003?p\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?b?:\t\211\014?d\211\002?d\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\206\203\005@\200\203\005@\202\022\000\003?\222\"Json2.CompilePath\202\003?\224\000\002\017\003?\210\000\003?\212\000\003?\214\000\003?\216\000?2\036\010\000?j\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\264\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 7B226B6579223A31307D, 7B226B6579223A302E317D, 7B226B6579223A66616C73657D, 7B22616E6F74686572223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 0102000021000000140000008403000001000000800300006B6579000000000000002440, 0102000021000000140000008403000001000000800300006B6579009A9999999999B93F, 0102000021000000140000000000000001000000800300006B657900, 01020000210000001400000003030000020000008004000040050000616E6F746865720076616C756500, 010100000000000000000000 ] Check output for Utf8 FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t" ... { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203B\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?6 VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p@Json2.JsonDocumentSqlValueNumber\202\003?r\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?d?<\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\210\203\005@\200\203\005@\202\022\000\003?\224\"Json2.CompilePath\202\003?\226\000\002\017\003?\212\000\003?\214\000\003?\216\000\003?\220\000?4\036\010\000?l\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000\t\211\004?6\203\005@?F\030Invoke\000\003?\270\016Convert?\266\001\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 7B226B6579223A31307D, 7B226B6579223A302E317D, 7B226B6579223A66616C73657D, 7B22616E6F74686572223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 0102000021000000140000008403000001000000800300006B6579000000000000002440, 0102000021000000140000008403000001000000800300006B6579009A9999999999B93F, 0102000021000000140000000000000001000000800300006B657900, 01020000210000001400000003030000020000008004000040050000616E6F746865720076616C756500, 010100000000000000000000 ] Check output for Float FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p@Json2.JsonDocumentSqlValueNumber\202\003?r\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?d?<\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\210\203\005@\200\203\005@\202\022\000\003?\224\"Json2.CompilePath\202\003?\226\000\002\017\003?\212\000\003?\214\000\003?\216\000\003?\220\000?4\036\010\000?l\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\266\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p@Json2.JsonDocumentSqlValueNumber\202\003?r\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?d?<\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\210\203\005@\200\203\005@\202\022\000\003?\224\"Json2.CompilePath\202\003?\226\000\002\017\003?\212\000\003?\214\000\003?\216\000\003?\220\000?4\036\010\000?l\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\266\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 7B226B6579223A31307D, 7B226B6579223A302E317D, 7B226B6579223A66616C73657D, 7B22616E6F74686572223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 0102000021000000140000008403000001000000800300006B6579000000000000002440, 0102000021000000140000008403000001000000800300006B6579009A9999999999B93F, 0102000021000000140000000000000001000000800300006B657900, 01020000210000001400000003030000020000008004000040050000616E6F746865720076616C756500, 010100000000000000000000 ] Check output for Double FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; >> TLocksTest::Range_EmptyKey [GOOD] >> SystemView::ShowCreateTableKeyBloomFilter [GOOD] >> SystemView::ShowCreateTable |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestScript::StepMerging [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_IncorrectNullDot2 [GOOD] Test command err: 2025-06-24T21:24:39.158632Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629943486384981:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:39.158678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a95/r3tmp/tmp7yvFzh/pdisk_1.dat 2025-06-24T21:24:39.613127Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:39.613953Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629943486384939:2079] 1750800279154755 != 1750800279154758 2025-06-24T21:24:39.625143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:39.625275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:39.627086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10217 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:39.880675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:39.898989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:39.915890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:24:39.923489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:40.064176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:40.148145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:40.168954Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:42.508005Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629956270003794:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:42.509723Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a95/r3tmp/tmp6iWUsD/pdisk_1.dat 2025-06-24T21:24:42.647928Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:42.652803Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629956270003775:2079] 1750800282507509 != 1750800282507512 2025-06-24T21:24:42.668293Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:42.668375Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:42.670095Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25147 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:42.886686Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:42.891919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:42.910215Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:42.985895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:43.063112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:45.701619Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629969921875436:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:45.701709Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a95/r3tmp/tmpOpX1DL/pdisk_1.dat 2025-06-24T21:24:45.821249Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629969921875416:2079] 1750800285701213 != 1750800285701216 2025-06-24T21:24:45.821353Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:45.849105Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:45.849202Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:45.850752Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13571 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:46.030336Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alt ... 05.492699Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11451 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:05.767432Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:05.775436Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:05.793998Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:25:05.804582Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:05.915855Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:05.995302Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.335420Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630074557480021:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:10.335469Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a95/r3tmp/tmp0c0zRH/pdisk_1.dat 2025-06-24T21:25:10.547755Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:10.547852Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:10.550202Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:10.554207Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:10.555858Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630074557480000:2079] 1750800310334731 != 1750800310334734 TClient is connected to server localhost:19346 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:10.844606Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:10.851706Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:10.868717Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.945055Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:11.020373Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:15.388971Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630096541435714:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:15.389064Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a95/r3tmp/tmpNypQvU/pdisk_1.dat 2025-06-24T21:25:15.548284Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:15.549664Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630096541435695:2079] 1750800315388232 != 1750800315388235 2025-06-24T21:25:15.563459Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:15.563554Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:15.565077Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16075 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:25:15.944797Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:15.966786Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:16.031088Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:16.105007Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEquals |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEquals [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TCancelTx::ImmediateReadOnly [GOOD] Test command err: 2025-06-24T21:25:06.049520Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630057213416647:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:06.049755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6a/r3tmp/tmp2KMklm/pdisk_1.dat 2025-06-24T21:25:06.484057Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:06.507239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:06.507370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:06.508705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4951 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:06.865600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:06.888952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:07.059995Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4951 2025-06-24T21:25:07.295176Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519630061508384616:2381] txid# 281474976710660 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-24T21:25:07.295277Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519630061508384616:2381] txid# 281474976710660 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:07.311036Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519630061508384629:2391] txid# 281474976710661 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-24T21:25:07.311096Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519630061508384629:2391] txid# 281474976710661 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:07.324037Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519630061508384642:2401] txid# 281474976710662 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-24T21:25:07.324102Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519630061508384642:2401] txid# 281474976710662 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:07.351182Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519630061508384668:2421] txid# 281474976710664 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-24T21:25:07.351275Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519630061508384668:2421] txid# 281474976710664 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:07.364463Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519630061508384681:2431] txid# 281474976710665 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-24T21:25:07.364520Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519630061508384681:2431] txid# 281474976710665 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:07.388633Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519630061508384694:2441] txid# 281474976710666 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-24T21:25:07.388706Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519630061508384694:2441] txid# 281474976710666 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:09.582232Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630071061749564:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:09.608922Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6a/r3tmp/tmpBCzyQU/pdisk_1.dat 2025-06-24T21:25:09.752955Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:09.753315Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630071061749442:2079] 1750800309573541 != 1750800309573544 2025-06-24T21:25:09.770953Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:09.771053Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:09.775405Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18912 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:25:09.992608Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:25:09.998761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:25:10.006539Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient is connected to server localhost:18912 2025-06-24T21:25:13.279967Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630089569450283:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:13.280070Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6a/r3tmp/tmp15TJLU/pdisk_1.dat 2025-06-24T21:25:13.422860Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:13.424338Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630089569450264:2079] 1750800313279491 != 1750800313279494 2025-06-24T21:25:13.440526Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:13.440610Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:13.442145Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1146 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:13.657081Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:13.665433Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient is connected to server localhost:1146 2025-06-24T21:25:13.984100Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519630089569450969:2381] txid# 281474976715660 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-24T21:25:13.984172Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519630089569450969:2381] txid# 281474976715660 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:13.995746Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519630089569450985:2394] txid# 281474976715661 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-24T21:25:13.995827Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519630089569450985:2394] txid# 281474976715661 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:14.008401Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519630093864418294:2404] txid# 281474976715662 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-24T21:25:14.008470Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519630093864418294:2404] txid# 281474976715662 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:14.035789Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519630093864418321:2425] txid# 281474976715664 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-24T21:25:14.035861Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519630093864418321:2425] txid# 281474976715664 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:14.049102Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519630093864418334:2435] txid# 281474976715665 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-24T21:25:14.049175Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519630093864418334:2435] txid# 281474976715665 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:14.063006Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519630093864418347:2445] txid# 281474976715666 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-24T21:25:14.063102Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519630093864418347:2445] txid# 281474976715666 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:17.104064Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519630103603885350:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:17.104147Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6a/r3tmp/tmpgYjCsW/pdisk_1.dat 2025-06-24T21:25:17.244695Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:17.246922Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519630103603885330:2079] 1750800317103541 != 1750800317103544 2025-06-24T21:25:17.264378Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:17.264497Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:17.265960Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25025 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:17.521380Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:17.529908Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient is connected to server localhost:25025 2025-06-24T21:25:17.827486Z node 4 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710660 at tablet 72075186224037888 status: CANCELLED errors: EXECUTION_CANCELLED (Tx was cancelled) | 2025-06-24T21:25:17.827676Z node 4 :TX_PROXY ERROR: datareq.cpp:883: Actor# [4:7519630103603886032:2380] txid# 281474976710660 RESPONSE Status# ExecCancelled marker# P13c 2025-06-24T21:25:17.842240Z node 4 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710662 at tablet 72075186224037889 status: CANCELLED errors: EXECUTION_CANCELLED (Tx was cancelled) | 2025-06-24T21:25:17.842329Z node 4 :TX_PROXY ERROR: datareq.cpp:883: Actor# [4:7519630103603886046:2388] txid# 281474976710662 RESPONSE Status# ExecCancelled marker# P13c ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksFatTest::LocksLimit [GOOD] Test command err: 2025-06-24T21:25:05.826327Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630055257428369:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:05.826390Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6b/r3tmp/tmpVCBBcX/pdisk_1.dat 2025-06-24T21:25:06.224601Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630055257428348:2079] 1750800305824828 != 1750800305824831 2025-06-24T21:25:06.242291Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:06.265620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:06.265790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:06.267730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:65084 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:06.533875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:06.556598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:06.566940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:25:06.572700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:06.710030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:06.768893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:06.839916Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:10.826804Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630055257428369:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:10.826868Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:25:14.158822Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630092340039052:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:14.158887Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6b/r3tmp/tmpNVnqJ7/pdisk_1.dat 2025-06-24T21:25:14.282840Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630092340039033:2079] 1750800314158434 != 1750800314158437 2025-06-24T21:25:14.292781Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:14.299983Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:14.300073Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:14.301281Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2778 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:14.509041Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:14.527988Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:25:14.602614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:14.677391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:15.166459Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:17.560758Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630105575720196:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:17.560837Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6b/r3tmp/tmpcuPMB1/pdisk_1.dat 2025-06-24T21:25:17.698789Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:17.702039Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630105575720177:2079] 1750800317560382 != 1750800317560385 2025-06-24T21:25:17.728979Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:17.729068Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:17.730728Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31582 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:17.928375Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:17.951674Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:18.024953Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:18.096279Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:18.584795Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelContains ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEquals [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 10 } Arguments { Id: 11 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\020\203B\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\001\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\014Equals?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 10 } Arguments { Id: 11 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\020\203B\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\001\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\014Equals?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"10,11\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:10,11"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"10\",\"p\":{\"address\":{\"name\":\"i16\",\"id\":10}},\"o\":\"10\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"11\",\"p\":{\"address\":{\"name\":\"float\",\"id\":11}},\"o\":\"11\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"10,11\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"10","p":{"address":{"name":"i16","id":10}},"o":"10","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"i16","id":10},{"name":"float","id":11}]},"o":"10,11","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"i16","id":10},{"name":"float","id":11}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"11","p":{"address":{"name":"float","id":11}},"o":"11","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"10,11","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"10,11\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:10,11"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"10\",\"p\":{\"address\":{\"name\":\"i16\",\"id\":10}},\"o\":\"10\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"11\",\"p\":{\"address\":{\"name\":\"float\",\"id\":11}},\"o\":\"11\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"10,11\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; } FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> TestProgram::YqlKernelStartsWithScalar >> TestProgram::YqlKernelContains [GOOD] >> TestProgram::YqlKernelStartsWithScalar [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TFlatTest::SplitBoundaryRead [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelContains [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\005@\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \034StringContains?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\005@\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \034StringContains?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> TestProgram::JsonExistsBinary |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_EmptyKey [GOOD] Test command err: 2025-06-24T21:24:40.263601Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629945622899960:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:40.267070Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a94/r3tmp/tmpXhE98P/pdisk_1.dat 2025-06-24T21:24:40.727913Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:40.730521Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629945622899911:2079] 1750800280231385 != 1750800280231388 2025-06-24T21:24:40.735375Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:40.735484Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:40.738375Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61369 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:41.020654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:41.035331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:41.048810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:41.186583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:41.269911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:41.284882Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:43.631382Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629959393233552:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:43.631577Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a94/r3tmp/tmpIZDsYs/pdisk_1.dat 2025-06-24T21:24:43.805612Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:43.808082Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629959393233531:2079] 1750800283628307 != 1750800283628310 2025-06-24T21:24:43.819253Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:43.819350Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:43.820906Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5384 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:44.045369Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:44.049379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:44.063711Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:44.125500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:44.174529Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:47.158314Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629976990750185:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:47.158371Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a94/r3tmp/tmpT0rtZI/pdisk_1.dat 2025-06-24T21:24:47.279977Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:47.281438Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629976990750164:2079] 1750800287157815 != 1750800287157818 2025-06-24T21:24:47.310172Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:47.315999Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:47.317381Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22091 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:47.516581Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:47.522132Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at ... .363917Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23473 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:07.710252Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:07.718551Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:07.734835Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:07.823488Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:07.891782Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:12.148906Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630084277065356:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:12.148983Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a94/r3tmp/tmp7xi2Fs/pdisk_1.dat 2025-06-24T21:25:12.458372Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:12.462186Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630084277065326:2079] 1750800312147680 != 1750800312147683 2025-06-24T21:25:12.475603Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:12.475722Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:12.477147Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30751 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:12.819497Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:12.845650Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:12.926057Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:12.990103Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:17.062866Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630103417600476:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:17.062987Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a94/r3tmp/tmpSJTZml/pdisk_1.dat 2025-06-24T21:25:17.224228Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:17.226529Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630103417600452:2079] 1750800317061761 != 1750800317061764 2025-06-24T21:25:17.235999Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:17.236085Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:17.238232Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14831 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:17.564854Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:17.572618Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:17.590653Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:25:17.596828Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:17.677986Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:17.739514Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExistsBinary [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelStartsWithScalar [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Bytes: "Lorem" } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\024StartsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Bytes: "Lorem" } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\024StartsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"Lorem\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"7,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:7,15"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"7\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"7,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"string","id":7}]},"o":"7","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"string","id":7}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"Lorem"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TLocksTest::BrokenNullLock [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExistsBinary [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\203\021H\214\n\210\203\001H\214\002?6\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?J\203\005@\200\203\005@\202\022\000\003?d6Json2.JsonDocumentSqlExists\202\003?f\000\002\017\003?L\000\003?N\000\003?P\000\003?R\000\027?T?<\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?|\203\005@\200\203\005@\202\022\000\003?\210\"Json2.CompilePath\202\003?\212\000\002\017\003?~\000\003?\200\000\003?\202\000\003?\204\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\203\021H\214\n\210\203\001H\214\002?6\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?J\203\005@\200\203\005@\202\022\000\003?d6Json2.JsonDocumentSqlExists\202\003?f\000\002\017\003?L\000\003?N\000\003?P\000\003?R\000\027?T?<\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?|\203\005@\200\203\005@\202\022\000\003?\210\"Json2.CompilePath\202\003?\212\000\002\017\003?~\000\003?\200\000\003?\202\000\003?\204\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 010100000000000000000000 ] FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> TestProgram::JsonValue |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelStartsWith >> TestProgram::YqlKernelStartsWith [GOOD] >> TLocksTest::Range_BrokenLockMax [GOOD] >> TLocksTest::Range_CorrectDot >> TestProgram::JsonExists >> TestProgram::JsonValue [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SplitBoundaryRead [GOOD] Test command err: 2025-06-24T21:25:16.878126Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630103211858379:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:16.881735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a50/r3tmp/tmpJSSPjm/pdisk_1.dat 2025-06-24T21:25:17.222464Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:17.223626Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630103211858356:2079] 1750800316876463 != 1750800316876466 2025-06-24T21:25:17.303241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:17.303367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:17.305058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20528 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:17.557574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:17.573312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:17.582941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:25:17.591305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:17.747901Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-24T21:25:17.751868Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-24T21:25:17.784328Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-24T21:25:17.788376Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800317700 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) waiting... 2025-06-24T21:25:17.924092Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:17.943818Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.23, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-24T21:25:17.943922Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.22, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-24T21:25:17.944255Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.24, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-24T21:25:17.944275Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.25, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-24T21:25:17.945710Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.26, eph 3} end=Done, 4 blobs 2r (max 2), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 0 +0, (1907 1533 0)b }, ecr=1.000 2025-06-24T21:25:17.951935Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.33, eph 3} end=Done, 4 blobs 8r (max 8), put Spent{time=0.004s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 5 +0, (3250 2180 6413)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800317700 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) waiting... TClient::Ls request: /dc-1/Dir/TableOld 2025-06-24T21:25:18.073317Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T21:25:18.076630Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-24T21:25:18.080708Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-24T21:25:18.080754Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T21:25:18.080865Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" 2025-06-24T21:25:20.178643Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630117097346651:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:20.178749Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a50/r3tmp/tmpjjocSM/pdisk_1.dat 2025-06-24T21:25:20.347112Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:20.359736Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630117097346632:2079] 1750800320177331 != 1750800320177334 2025-06-24T21:25:20.365566Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:20.365646Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:20.368366Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10510 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:20.566237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/y ... kie: 281474976715678 TabletId: 72075186224037890 2025-06-24T21:25:20.998362Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976715678:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976715678:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976715678 TabletId: 72075186224037890 2025-06-24T21:25:20.998786Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-24T21:25:20.999559Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976715678:0, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715678 TabletId: 72075186224037891 2025-06-24T21:25:20.999599Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976715678:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976715678:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976715678 TabletId: 72075186224037891 2025-06-24T21:25:20.999631Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715678:0 3 -> 131 2025-06-24T21:25:20.999973Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-24T21:25:21.000054Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-24T21:25:21.000082Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:334: TSplitMerge TTransferData operationId# 281474976715678:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:25:21.000112Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_split_merge.cpp:353: TSplitMerge TTransferData operationId# 281474976715678:0 Starting split on src datashard 72075186224037888 splitOpId# 281474976715678:0 at tablet 72057594046644480 2025-06-24T21:25:21.000388Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976715678:0 from tablet: 72057594046644480 to tablet: 72075186224037888 cookie: 72057594046644480:1 msg type: 269553154 2025-06-24T21:25:21.000470Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976715678, partId: 0, tablet: 72075186224037888 2025-06-24T21:25:21.005834Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.25, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-24T21:25:21.006150Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.26, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-24T21:25:21.006364Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.27, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-24T21:25:21.006537Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.28, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-24T21:25:21.006734Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.29, eph -9223372036854775808} end=Done, 0 blobs 0r (max 0), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-24T21:25:21.011693Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976715678:0, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715678 TabletId: 72075186224037888 2025-06-24T21:25:21.011759Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976715678:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715678 TabletId: 72075186224037888 2025-06-24T21:25:21.012069Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715678:0 131 -> 132 2025-06-24T21:25:21.012179Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 6 2025-06-24T21:25:21.012530Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-24T21:25:21.012664Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:25:21.012685Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715678, path id: [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-24T21:25:21.012901Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T21:25:21.012927Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:7519630117097347117:2240], at schemeshard: 72057594046644480, txId: 281474976715678, path id: 3 2025-06-24T21:25:21.012964Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-24T21:25:21.012999Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:437: TSplitMerge TNotifySrc, operationId: 281474976715678:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:25:21.013019Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_split_merge.cpp:468: Notify src datashard 72075186224037888 on partitioning changed splitOp# 281474976715678 at tablet 72057594046644480 2025-06-24T21:25:21.014753Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715678 2025-06-24T21:25:21.014838Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715678 2025-06-24T21:25:21.014853Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715678 2025-06-24T21:25:21.014869Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715678, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], version: 4 2025-06-24T21:25:21.014886Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 7 2025-06-24T21:25:21.014940Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715678, ready parts: 0/1, is published: true 2025-06-24T21:25:21.015072Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976715678:0 from tablet: 72057594046644480 to tablet: 72075186224037888 cookie: 72057594046644480:1 msg type: 269553158 2025-06-24T21:25:21.016401Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715678 2025-06-24T21:25:21.017027Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 281474976715678:0, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715678 TabletId: 72075186224037888 2025-06-24T21:25:21.017063Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976715678:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-06-24T21:25:21.017114Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715678:0 progress is 1/1 2025-06-24T21:25:21.017125Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715678 ready parts: 1/1 2025-06-24T21:25:21.017142Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715678:0 progress is 1/1 2025-06-24T21:25:21.017155Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715678 ready parts: 1/1 2025-06-24T21:25:21.017168Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715678, ready parts: 1/1, is published: true 2025-06-24T21:25:21.017204Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:7519630117097347542:2318] message: TxId: 281474976715678 2025-06-24T21:25:21.017233Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715678 ready parts: 1/1 2025-06-24T21:25:21.017251Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715678:0 2025-06-24T21:25:21.017261Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715678:0 2025-06-24T21:25:21.017400Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 6 2025-06-24T21:25:21.017714Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-24T21:25:21.017726Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:260: Unable to activate 281474976715678:0 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800320696 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) >> TestProgram::JsonExists [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelStartsWith [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \024StartsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \024StartsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonValue [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\"\000\t\211\004?\020\235?\002\001\235?\004\000\"\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\"\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?p6Json2.SqlValueConvertToUtf8\202\003?r\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?d\t\211\014?b\311\002?b\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\214\005\205\004\203\010\203\005@\032\036\003?\222\002\003?\224\000\003\001\003?\216\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\244\203\005@\200\203\005@\202\022\000\003?\260\026Json2.Parse\202\003?\262\000\002\017\003?\246\000\003?\250\000\003?\252\000\003?\254\000?:\036\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\312\203\005@\200\203\005@\202\022\000\003?\326\"Json2.CompilePath\202\003?\330\000\002\017\003?\314\000\003?\316\000\003?\320\000\003?\322\000?2\036\010\000?l\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\370\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\"\000\t\211\004?\020\235?\002\001\235?\004\000\"\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\"\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?p6Json2.SqlValueConvertToUtf8\202\003?r\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?d\t\211\014?b\311\002?b\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\214\005\205\004\203\010\203\005@\032\036\003?\222\002\003?\224\000\003\001\003?\216\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\244\203\005@\200\203\005@\202\022\000\003?\260\026Json2.Parse\202\003?\262\000\002\017\003?\246\000\003?\250\000\003?\252\000\003?\254\000?:\036\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\312\203\005@\200\203\005@\202\022\000\003?\326\"Json2.CompilePath\202\003?\330\000\002\017\003?\314\000\003?\316\000\003?\320\000\003?\322\000?2\036\010\000?l\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\370\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Utf8 FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r$Json2.SqlValueBool\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r$Json2.SqlValueBool\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\" ... } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203B\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?6 VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000\t\211\004?6\203\005@?F\030Invoke\000\003?\374\016Convert?\372\001\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Float FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Double FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; >> TestProgram::SimpleFunction [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExists [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\214\002\214\n\210\203\001H?>?6\016\000\203\004\203\005@\203\004\203\004\207\214\002\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?H\203\005@\200\203\005@\202\022\000\003?d\036Json2.SqlExists\202\003?f\000\002\017\003?J\000\003?L\000\003?N\000\003?P\000\027?T\t\211\014?R\311\002?R\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\200\005\205\004\203\010\203\005@\032\036\003?\206\002\003?\210\000\003\001\003?\202\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\230\203\005@\200\203\005@\202\022\000\003?\244\026Json2.Parse\202\003?\246\000\002\017\003?\232\000\003?\234\000\003?\236\000\003?\240\000?<\036\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\276\203\005@\200\203\005@\202\022\000\003?\312\"Json2.CompilePath\202\003?\314\000\002\017\003?\300\000\003?\302\000\003?\304\000\003?\306\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\214\002\214\n\210\203\001H?>?6\016\000\203\004\203\005@\203\004\203\004\207\214\002\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?H\203\005@\200\203\005@\202\022\000\003?d\036Json2.SqlExists\202\003?f\000\002\017\003?J\000\003?L\000\003?N\000\003?P\000\027?T\t\211\014?R\311\002?R\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\200\005\205\004\203\010\203\005@\032\036\003?\206\002\003?\210\000\003\001\003?\202\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\230\203\005@\200\203\005@\202\022\000\003?\244\026Json2.Parse\202\003?\246\000\002\017\003?\232\000\003?\234\000\003?\236\000\003?\240\000?<\036\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\276\203\005@\200\203\005@\202\022\000\003?\312\"Json2.CompilePath\202\003?\314\000\002\017\003?\300\000\003?\302\000\003?\304\000\003?\306\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "[]" ] FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::SimpleFunction [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Id: 8 Arguments { Id: 2 } } } } Command { Projection { Columns { Id: 15 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Id: 8 Arguments { Id: 2 } } } } Command { Projection { Columns { Id: 15 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N3(15):{\"i\":\"2\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N1[shape=box, label="N1(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N4 -> N1[label="1"]; N2[shape=box, label="N2(7):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N1 -> N2[label="1"]; N3[shape=box, label="N4(15):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N3[label="1"]; N4[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N4->N1->N2->N0->N3[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":4}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[{"from":0}]},{"owner_id":4,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2}]},"o":"2","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"i":"15","t":"Projection"},"w":15,"id":3},"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":7,"id":2},"4":{"p":{"p":{"data":[{"name":"uid","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":4},"0":{"p":{"i":"2","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":15,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::BrokenNullLock [GOOD] Test command err: 2025-06-24T21:24:42.509965Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629956796043382:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:42.517357Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7d/r3tmp/tmp1fwOnW/pdisk_1.dat 2025-06-24T21:24:42.883480Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:42.921859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:42.921997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:42.923463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28913 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:43.232837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:43.258189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:24:43.263298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:43.408563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:43.496449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:43.517213Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7d/r3tmp/tmpLqOgEi/pdisk_1.dat 2025-06-24T21:24:46.135212Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:46.136931Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:46.137233Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629967753180682:2079] 1750800286001842 != 1750800286001845 2025-06-24T21:24:46.162539Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:46.162616Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:46.164381Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8402 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:24:46.372321Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:46.386283Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:46.391347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:46.472735Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:46.523752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7d/r3tmp/tmpR4IuKl/pdisk_1.dat 2025-06-24T21:24:49.530422Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:49.532857Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:49.534420Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519629984881633731:2079] 1750800289421836 != 1750800289421839 2025-06-24T21:24:49.572379Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:49.572471Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:49.579651Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24443 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:49.724472Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:49.741104Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:49.792705Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/cor ... ecting 2025-06-24T21:25:09.972434Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15991 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:25:10.340777Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:25:10.349539Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:10.366054Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:25:10.375054Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.461956Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:25:10.529286Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:10.704355Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:14.597433Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630092571795918:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:14.597583Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7d/r3tmp/tmpGj6ixG/pdisk_1.dat 2025-06-24T21:25:14.774159Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:14.775351Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630092571795897:2079] 1750800314596489 != 1750800314596492 2025-06-24T21:25:14.788572Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:14.788694Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:14.792286Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4356 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:15.066952Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:15.094431Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:15.172448Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:15.244238Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:19.269629Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630112454227384:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:19.269887Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7d/r3tmp/tmpNedfdy/pdisk_1.dat 2025-06-24T21:25:19.416459Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:19.419084Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630112454227361:2079] 1750800319268817 != 1750800319268820 2025-06-24T21:25:19.431272Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:19.431388Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:19.433240Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1950 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:19.741220Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:19.766280Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:19.853017Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:19.929277Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::NumRowsWithNulls [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::NumRowsWithNulls [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 10001 } Function { Id: 7 Arguments { Id: 2 } } } } Command { Filter { Predicate { Id: 10001 } } } Command { GroupBy { Aggregates { Column { Id: 10002 } Function { Id: 2 } } } } Command { Projection { Columns { Id: 10002 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 10001 } Function { Id: 7 Arguments { Id: 2 } } } } Command { Filter { Predicate { Id: 10001 } } } Command { GroupBy { Aggregates { Column { Id: 10002 } Function { Id: 2 } } } } Command { Projection { Columns { Id: 10002 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N3(15):{\"i\":\"2\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10001\",\"t\":\"Calculation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N1[shape=box, label="N1(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N1[label="1"]; N2[shape=box, label="N2(7):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N1 -> N2[label="1"]; N3[shape=box, label="N4(15):{\"i\":\"10001\",\"t\":\"Filter\"}\nREMOVE:10001",style=filled,color="#FFAAAA"]; N0 -> N3[label="1"]; N4[shape=box, label="N5(8):{\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10002\",\"t\":\"Calculation\"}\n"]; N5[shape=box, label="N6(8):{\"i\":\"10002\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N4 -> N5[label="1"]; N6[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N6->N1->N2->N0->N3->N4->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":6}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[{"from":0}]},{"owner_id":4,"inputs":[]},{"owner_id":5,"inputs":[{"from":4}]},{"owner_id":6,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2}]},"o":"2","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"i":"10001","t":"Filter"},"w":15,"id":3},"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":7,"id":2},"6":{"p":{"p":{"data":[{"name":"uid","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":6},"5":{"p":{"i":"10002","t":"Projection"},"w":8,"id":5},"4":{"p":{"p":{"kernel":{"class_name":"SIMPLE"}},"o":"10002","t":"Calculation"},"w":8,"id":4},"0":{"p":{"i":"2","p":{"kernel":{"class_name":"SIMPLE"}},"o":"10001","t":"Calculation"},"w":15,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; >> TestProgram::YqlKernelEndsWithScalar |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEndsWithScalar [GOOD] >> SystemView::AuthPermissions_Access [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEndsWithScalar [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Bytes: "amet." } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Bytes: "amet." } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"amet.\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"7,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:7,15"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"7\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"7,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"string","id":7}]},"o":"7","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"string","id":7}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"amet."},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink-UseDataQuery |97.1%| [TA] $(B)/ydb/core/tx/columnshard/engines/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.1%| [TA] {RESULT} $(B)/ydb/core/tx/columnshard/engines/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::AuthPermissions_Access [GOOD] Test command err: 2025-06-24T21:23:32.620200Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629656052459657:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:32.627279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ad0/r3tmp/tmpdKgmfd/pdisk_1.dat 2025-06-24T21:23:33.207450Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:33.229277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:33.229388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:33.238480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:33.272042Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 19724, node 1 2025-06-24T21:23:33.371914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:33.371937Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:33.371947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:33.372055Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:33.647207Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28614 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:33.916858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:34.115456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "Tenant1" } } TxId: 281474976710658 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:23:34.115761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_extsubdomain.cpp:58: TCreateExtSubDomain Propose, path/Root/Tenant1, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-24T21:23:34.115846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: Tenant1, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-24T21:23:34.116011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-24T21:23:34.116030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710658:0 type: TxCreateExtSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-24T21:23:34.116165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:23:34.116264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:23:34.116288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:23:34.116372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T21:23:34.116428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:23:34.118639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710658, response: Status: StatusAccepted TxId: 281474976710658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-24T21:23:34.118861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: root@builtin, status: StatusAccepted, operation: CREATE DATABASE, path: /Root/Tenant1 2025-06-24T21:23:34.119093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:23:34.119116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-24T21:23:34.119264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:23:34.119373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T21:23:34.119407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519629660347427513:2396], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 1 2025-06-24T21:23:34.119421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519629660347427513:2396], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 2 2025-06-24T21:23:34.119470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-24T21:23:34.119488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-24T21:23:34.119517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710658:0, at tablet# 72057594046644480 2025-06-24T21:23:34.119559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976710658 ready parts: 1/1 2025-06-24T21:23:34.126827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710658 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 waiting... 2025-06-24T21:23:34.128915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-24T21:23:34.129025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-24T21:23:34.129039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710658 2025-06-24T21:23:34.129056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710658, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 4 2025-06-24T21:23:34.129087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-24T21:23:34.129394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-24T21:23:34.129438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-24T21:23:34.129444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710658 2025-06-24T21:23:34.129454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710658, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 2 2025-06-24T21:23:34.129469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-24T21:23:34.129506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710658, ready parts: 0/1, is published: true 2025-06-24T21:23:34.131499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710658, a ... tatus: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user6tenant1admin },{ Sid: user2 },{ Sid: user1rootadmin }] Groups: [] } Children [.metadata,Dir1,Dir2,Table0,Tenant1,Tenant2] }] } 2025-06-24T21:25:24.340203Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630136142698750:2391], row count: 4, finished: 0 2025-06-24T21:25:24.340367Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:25:24.340558Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata TableId: [72057594046644480:5:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [workload_manager] }] } 2025-06-24T21:25:24.340595Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630136142698750:2391], row count: 0, finished: 0 2025-06-24T21:25:24.341063Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:25:24.341450Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager TableId: [72057594046644480:6:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [pools] }] } 2025-06-24T21:25:24.341628Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630136142698750:2391], row count: 0, finished: 0 2025-06-24T21:25:24.342335Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:25:24.343326Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools TableId: [72057594046644480:7:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [default] }] } 2025-06-24T21:25:24.343371Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630136142698750:2391], row count: 0, finished: 0 2025-06-24T21:25:24.343450Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools/default TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:25:24.343670Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools/default TableId: [72057594046644480:8:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindResourcePool DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:25:24.343764Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630136142698750:2391], row count: 6, finished: 0 2025-06-24T21:25:24.343878Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:25:24.344059Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-24T21:25:24.344097Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630136142698750:2391], row count: 1, finished: 0 2025-06-24T21:25:24.344679Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir2 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:25:24.345142Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir2 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-24T21:25:24.345194Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630136142698750:2391], row count: 0, finished: 0 2025-06-24T21:25:24.345942Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:25:24.346157Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [72057594046644480:4:1] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:25:24.346188Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630136142698750:2391], row count: 0, finished: 0 2025-06-24T21:25:24.346259Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7519630136142698750:2391], owner: [41:7519630136142698746:2389], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:25:24.347455Z node 41 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [41:7519630097487990905:2099], database# , query hash# 12107705915200741666, cpu time# 177275 2025-06-24T21:25:24.348194Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800324294, txId: 281474976710692] shutting down 2025-06-24T21:25:24.357549Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 45 2025-06-24T21:25:24.358074Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(45, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:25:24.358772Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:25:24.375664Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 42 2025-06-24T21:25:24.376375Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:25:24.376543Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 43 2025-06-24T21:25:24.376440Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:25:24.380063Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(43, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:25:24.380401Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 44 2025-06-24T21:25:24.381234Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(44, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:25:24.385630Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[44:7519630101855286798:2105], Type=268959746 >> TLocksTest::Range_BrokenLock1 [GOOD] >> TFlatTest::RejectByPerShardReadSize [GOOD] >> TFlatTest::RejectByPerRequestSize >> TLocksTest::GoodNullLock [GOOD] >> SystemView::AuthGroupMembers_TableRange [GOOD] >> SystemView::AuthEffectivePermissions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_BrokenLock1 [GOOD] Test command err: 2025-06-24T21:24:48.719802Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629982254039448:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:48.720355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a77/r3tmp/tmpTy5kJz/pdisk_1.dat 2025-06-24T21:24:49.061697Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:49.063016Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629982254039430:2079] 1750800288718945 != 1750800288718948 2025-06-24T21:24:49.124181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:49.124307Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:49.128563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18947 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:49.398303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:49.423637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:49.434755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:24:49.439994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:49.598403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:49.672551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:49.729467Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:51.975438Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629992680073317:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:51.975501Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a77/r3tmp/tmpeEglIU/pdisk_1.dat 2025-06-24T21:24:52.095214Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:52.096844Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519629992680073289:2079] 1750800291972064 != 1750800291972067 2025-06-24T21:24:52.126335Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:52.126414Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:52.127905Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9836 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:52.300598Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:52.327610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:52.400490Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:52.474350Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:55.500219Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630012826945164:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:55.500394Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a77/r3tmp/tmpqfPDSa/pdisk_1.dat 2025-06-24T21:24:55.620449Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630012826945145:2079] 1750800295499766 != 1750800295499769 2025-06-24T21:24:55.628732Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:55.649088Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:55.649171Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:55.650840Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27341 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:55.827084Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:55.835478Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at ... [8:7519630101775770457:2079] 1750800316139174 != 1750800316139177 2025-06-24T21:25:16.322841Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:16.322946Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:16.325761Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1391 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:16.623864Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:16.644274Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:16.723742Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:16.786231Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:20.614960Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630120157181188:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:20.615253Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a77/r3tmp/tmp3VzGbk/pdisk_1.dat 2025-06-24T21:25:20.842156Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:20.842262Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:20.846653Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:20.851397Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630120157181167:2079] 1750800320613015 != 1750800320613018 2025-06-24T21:25:20.873977Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30593 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:21.222100Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:21.228156Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:21.243722Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:21.314695Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:21.374984Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:25.301324Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630139816413381:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:25.301422Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a77/r3tmp/tmpVrtz5T/pdisk_1.dat 2025-06-24T21:25:25.483215Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:25.495327Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630139816413356:2079] 1750800325299601 != 1750800325299604 2025-06-24T21:25:25.495834Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:25.495934Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:25.499240Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26081 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:25.816080Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:25.837573Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:25.926865Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:25.992887Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TBlobStorageWardenTest::TestSendUsefulMonitoring >> TBlobStorageWardenTest::TestUnmonitoredEventsThenNoMonitorings >> TBlobStorageWardenTest::TestLimitedKeylessGroupThenNoMonitoring >> TBlobStorageWardenTest::TestSendToInvalidGroupId >> BindQueue::Basic >> TBlobStorageWardenTest::TestHttpMonPage |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestWriteVeryBigMessage >> SlowTopicAutopartitioning::CDC_Write >> TPQTestSlow::TestOnDiskStoredSourceIds ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::GoodNullLock [GOOD] Test command err: 2025-06-24T21:24:49.942339Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629984842094809:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:49.942490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a75/r3tmp/tmprFUOvF/pdisk_1.dat 2025-06-24T21:24:50.243465Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629984842094791:2079] 1750800289941487 != 1750800289941490 2025-06-24T21:24:50.245592Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:50.311509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:50.311624Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:50.313417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30356 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:50.562128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:50.603708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:50.735783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:24:50.779834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:53.104825Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630000794948539:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:53.104900Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a75/r3tmp/tmp9ZzESq/pdisk_1.dat 2025-06-24T21:24:53.251677Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:53.253124Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630000794948518:2079] 1750800293103995 != 1750800293103998 2025-06-24T21:24:53.281188Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:53.281277Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:53.283500Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18147 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:53.458226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:53.467619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-24T21:24:53.483657Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:53.577518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:53.633258Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:56.715863Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630013238862988:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:56.716387Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a75/r3tmp/tmpzcf7om/pdisk_1.dat 2025-06-24T21:24:56.843879Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:56.855273Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:56.855298Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630013238862939:2079] 1750800296700983 != 1750800296700986 2025-06-24T21:24:56.855365Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:56.858584Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21083 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:57.067294Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:57.082136Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:24:57.086502Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/ ... Unknown -> Disconnected 2025-06-24T21:25:17.641686Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:17.644348Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5142 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:17.894957Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:17.903474Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:17.919442Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:25:17.924684Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:18.004641Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:18.064570Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:22.118755Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630126968879210:2148];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a75/r3tmp/tmpeNYbEj/pdisk_1.dat 2025-06-24T21:25:22.178027Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:22.282409Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:22.284797Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630126968879085:2079] 1750800322100094 != 1750800322100097 2025-06-24T21:25:22.303218Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:22.303344Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:22.305732Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20408 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:25:22.614237Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:22.638696Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:22.716932Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:22.780550Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:26.942013Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630143206019678:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:26.942123Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a75/r3tmp/tmpAmf4Hw/pdisk_1.dat 2025-06-24T21:25:27.079474Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:27.081452Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630143206019659:2079] 1750800326941419 != 1750800326941422 2025-06-24T21:25:27.101343Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:27.101440Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:27.103070Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21424 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:27.404587Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:27.422595Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:27.502331Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:27.559995Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TBlobStorageWardenTest::TestUnmonitoredEventsThenNoMonitorings [GOOD] >> TBlobStorageWardenTest::TestSendToInvalidGroupId [GOOD] >> TBlobStorageWardenTest::TestSendUsefulMonitoring [GOOD] >> TBlobStorageWardenTest::TestLimitedKeylessGroupThenNoMonitoring [GOOD] >> TPQTabletTests::Multiple_PQTablets_1 >> TPartitionTests::SetOffset >> TPartitionTests::CorrectRange_Multiple_Transactions >> TPQTestInternal::TestPartitionedBlobSimpleTest [GOOD] >> TPQTestInternal::TestPartitionedBigTest >> TPartitionTests::TabletConfig_Is_Newer_That_PartitionConfig >> TTypeCodecsTest::TestFixedLenCodec [GOOD] >> TTypeCodecsTest::TestVarLenCodec [GOOD] >> TTypeCodecsTest::TestVarIntCodec [GOOD] >> TTypeCodecsTest::TestZigZagCodec [GOOD] >> TTypeCodecsTest::TestDeltaZigZagCodec [GOOD] >> TLocksTest::BrokenDupLock [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestUnmonitoredEventsThenNoMonitorings [GOOD] Test command err: 2025-06-24T21:25:32.410313Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.413014Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.413799Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a5a/r3tmp/tmpR2bkgy/pdisk_1.dat 2025-06-24T21:25:32.591934Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.592065Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.592324Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:33.062622Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [e2e5f1b9c917f854] bootstrap ActorId# [1:481:2459] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1341:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T21:25:33.062819Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1341:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.062867Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1341:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.062895Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1341:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.062922Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1341:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.062949Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1341:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.062975Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1341:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.063017Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [e2e5f1b9c917f854] restore Id# [72057594037932033:2:8:0:0:1341:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T21:25:33.063111Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1341:1] Marker# BPG33 2025-06-24T21:25:33.063185Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1341:1] Marker# BPG32 2025-06-24T21:25:33.063230Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1341:2] Marker# BPG33 2025-06-24T21:25:33.063257Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1341:2] Marker# BPG32 2025-06-24T21:25:33.063294Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1341:3] Marker# BPG33 2025-06-24T21:25:33.063323Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1341:3] Marker# BPG32 2025-06-24T21:25:33.063486Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:42:2087] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1341:3] FDS# 1341 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.063552Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:35:2080] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1341:2] FDS# 1341 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.063600Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:56:2101] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1341:1] FDS# 1341 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.065873Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1341:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 4 } Cost# 90559 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 5 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-24T21:25:33.066092Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1341:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90559 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-24T21:25:33.066180Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1341:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90559 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-24T21:25:33.066257Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [e2e5f1b9c917f854] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1341:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-24T21:25:33.066319Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [e2e5f1b9c917f854] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1341:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T21:25:33.066515Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.198 sample PartId# [72057594037932033:2:8:0:0:1341:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.198 sample PartId# [72057594037932033:2:8:0:0:1341:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.199 sample PartId# [72057594037932033:2:8:0:0:1341:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 3.509 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 3.693 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 3.779 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } ] } 2025-06-24T21:25:33.130426Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [a55b41de52eb2a08] bootstrap ActorId# [1:527:2496] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:9:0:0:224:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T21:25:33.130594Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.130644Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.130673Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.130702Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.130728Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.130754Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [a55b41de52eb2a08] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.130789Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [a55b41de52eb2a08] restore Id# [72057594037932033:2:9:0:0:224:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T21:25:33.130864Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [a55b41de52eb2a08] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:9:0:0:224:1] Marker# BPG33 2025-06-24T21:25:33.130911Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [a55b41de52eb2a08] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:9:0:0:224:1] Marker# BPG32 2025-06-24T21:25:33.130953Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [a55b41de52eb2a08] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:9:0:0:224:2] Marker# BPG33 2025-06-24T21:25:33.130989Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [a55b41de52eb2a08] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:9:0:0:224:2] Marker# BPG32 2025-06-24T21:25:33.131023Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [a55b41de52eb2a08] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:9:0:0:224:3] Marker# BPG33 2025-06-24T21:25:33.131050Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [a55b41de52eb2a08] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:9:0:0:224:3] Marker# BPG32 2025-06-24T21:25:33.131223Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:35:2080] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:224:3] FDS# 224 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.131289Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:56:2101] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:224:2] FDS# 224 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.131371Z node 1 :BS_PROXY DEBUG: group_sessions.h: ... vVPut# {ID# [72057594037932033:2:10:0:0:238:3] FDS# 238 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.165622Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:49:2094] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:10:0:0:238:2] FDS# 238 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.165658Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:42:2087] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:10:0:0:238:1] FDS# 238 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.168665Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [1a43693427d0a82b] received {EvVPutResult Status# OK ID# [72057594037932033:2:10:0:0:238:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 5 } Cost# 81874 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 6 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-24T21:25:33.168878Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [1a43693427d0a82b] received {EvVPutResult Status# OK ID# [72057594037932033:2:10:0:0:238:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 81874 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:2:0] Marker# BPP01 2025-06-24T21:25:33.168970Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [1a43693427d0a82b] received {EvVPutResult Status# OK ID# [72057594037932033:2:10:0:0:238:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 12 } Cost# 81874 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 13 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-24T21:25:33.169053Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [1a43693427d0a82b] Result# TEvPutResult {Id# [72057594037932033:2:10:0:0:238:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-24T21:25:33.169112Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [1a43693427d0a82b] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:10:0:0:238:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T21:25:33.169301Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.042 sample PartId# [72057594037932033:2:10:0:0:238:3] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.043 sample PartId# [72057594037932033:2:10:0:0:238:2] QueryCount# 1 VDiskId# [2000000:1:0:2:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.043 sample PartId# [72057594037932033:2:10:0:0:238:1] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 4.113 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 4.27 VDiskId# [2000000:1:0:2:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 4.368 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-24T21:25:33.170902Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-24T21:25:33.170956Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-24T21:25:33.173268Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:528:2497] Create Queue# [1:532:2500] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.173427Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:528:2497] Create Queue# [1:533:2501] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.173538Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:528:2497] Create Queue# [1:534:2502] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.173658Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:528:2497] Create Queue# [1:535:2503] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.173771Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:528:2497] Create Queue# [1:536:2504] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.173887Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:528:2497] Create Queue# [1:537:2505] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.174001Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:528:2497] Create Queue# [1:538:2506] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.174030Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-24T21:25:33.174713Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.174820Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.174898Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.174964Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.175013Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.175084Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.175123Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.175140Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:194: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-24T21:25:33.175196Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:80: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-24T21:25:33.175339Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [bba3bffd2e286f4b] bootstrap ActorId# [1:539:2507] Group# 2181038082 TabletId# 1234 Generation# 1 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-24T21:25:33.175393Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [bba3bffd2e286f4b] Sending TEvVBlock Tablet# 1234 Generation# 1 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-24T21:25:33.175594Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:532:2500] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 1 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 4487274662089644063 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-24T21:25:33.176821Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [bba3bffd2e286f4b] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-24T21:25:33.176901Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [bba3bffd2e286f4b] Result# TEvBlockResult {Status# OK} Marker# DSPB04 2025-06-24T21:25:33.177257Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:532:2500] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 2025-06-24T21:25:33.178998Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:301: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 0 Marker# DSP57 initialize full monitoring 2025-06-24T21:25:33.179730Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [d70ef3c23a1a2346] bootstrap ActorId# [1:541:2509] Group# 2181038082 TabletId# 1234 Generation# 3 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-24T21:25:33.179793Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [d70ef3c23a1a2346] Sending TEvVBlock Tablet# 1234 Generation# 3 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-24T21:25:33.179965Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:532:2500] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 3 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 1231177051124577385 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-24T21:25:33.184961Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [d70ef3c23a1a2346] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-24T21:25:33.185052Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [d70ef3c23a1a2346] Result# TEvBlockResult {Status# OK} Marker# DSPB04 2025-06-24T21:25:33.185560Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [91379e686f748e92] bootstrap ActorId# [1:542:2510] Group# 2181038082 TabletId# 1234 Generation# 4 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-24T21:25:33.185623Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [91379e686f748e92] Sending TEvVBlock Tablet# 1234 Generation# 4 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-24T21:25:33.185808Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:532:2500] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 4 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 14760176079845937175 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-24T21:25:33.186944Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [91379e686f748e92] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-24T21:25:33.187020Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [91379e686f748e92] Result# TEvBlockResult {Status# OK} Marker# DSPB04 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestSendToInvalidGroupId [GOOD] Test command err: 2025-06-24T21:25:32.413234Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.413819Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.415103Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.416352Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.416551Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.417160Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.417789Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a57/r3tmp/tmpRBpeKD/pdisk_1.dat 2025-06-24T21:25:33.023194Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [e2e5f1b9c917f854] bootstrap ActorId# [1:485:2463] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1338:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T21:25:33.023435Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1338:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.023509Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1338:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.023540Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1338:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.023582Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1338:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.023609Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1338:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.023648Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1338:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.023691Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [e2e5f1b9c917f854] restore Id# [72057594037932033:2:8:0:0:1338:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T21:25:33.023775Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1338:1] Marker# BPG33 2025-06-24T21:25:33.023850Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1338:1] Marker# BPG32 2025-06-24T21:25:33.023904Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1338:2] Marker# BPG33 2025-06-24T21:25:33.023938Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1338:2] Marker# BPG32 2025-06-24T21:25:33.023971Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1338:3] Marker# BPG33 2025-06-24T21:25:33.024000Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1338:3] Marker# BPG32 2025-06-24T21:25:33.024192Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:42:2087] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1338:3] FDS# 1338 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.024266Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:35:2080] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1338:2] FDS# 1338 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.024314Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:56:2101] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1338:1] FDS# 1338 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.026463Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1338:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90535 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-24T21:25:33.026952Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1338:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90535 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-24T21:25:33.027068Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1338:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90535 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-24T21:25:33.027166Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [e2e5f1b9c917f854] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1338:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-24T21:25:33.027247Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [e2e5f1b9c917f854] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1338:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T21:25:33.027446Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 2.201 sample PartId# [72057594037932033:2:8:0:0:1338:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 2.201 sample PartId# [72057594037932033:2:8:0:0:1338:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 2.202 sample PartId# [72057594037932033:2:8:0:0:1338:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 4.403 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 4.854 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 4.955 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } ] } 2025-06-24T21:25:33.045981Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 4294967295 IsLimitedKeyless# 0 fullIfPossible# 1 Marker# DSP58 2025-06-24T21:25:33.048803Z node 1 :BS_PROXY CRIT: dsproxy_impl.h:309: The request was sent for an invalid groupID Group# 4294967295 HandleError ev# TEvBlock {TabletId# 1234 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} Response# TEvBlockResult {Status# ERROR ErrorReason# "Created as unconfigured in error state (DSPE11). It happens when the request was sent for an invalid groupID"} Marker# DSP31 Sending TEvPut 2025-06-24T21:25:33.049189Z node 1 :BS_PROXY DEBUG: dsproxy_impl.h:309: The request was sent for an invalid groupID Group# 4294967295 HandleError ev# TEvPut {Id# [1234:1:0:0:0:5:0] Size# 5 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:1:0:0:0:5:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as unconfigured in error state (DSPE11). It happens when the request was sent for an invalid groupID" ApproximateFreeSpaceShare# 0} Marker# DSP31 2025-06-24T21:25:33.049366Z node 1 :BS_PROXY DEBUG: dsproxy_impl.h:309: The request was sent for an invalid groupID Group# 4294967295 HandleError ev# TEvCollectGarbage {TabletId# 1234 RecordGeneration# 4294967295 PerGenerationCounter# 4294967295 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 4294967295 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 1 IsMonitored# 1} Response# TEvCollectGarbageResult {TabletId# 1234 RecordGeneration# 4294967295 PerGenerationCounter# 4294967295 Channel# 0 Status# ERROR ErrorReason# "Created as unconfigured in error state (DSPE11). It happens when the request was sent for an invalid groupID"} Marker# DSP31 >> TPQTest::TestCmdReadWithLastOffset ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestSendUsefulMonitoring [GOOD] Test command err: 2025-06-24T21:25:32.417004Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.418060Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.419736Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.419893Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.419984Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.421755Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a48/r3tmp/tmpWu4HxD/pdisk_1.dat 2025-06-24T21:25:33.031136Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [3ca1a99c83a6f037] bootstrap ActorId# [1:551:2463] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1329:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T21:25:33.031336Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1329:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.031381Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1329:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.031421Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1329:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.031451Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1329:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.031476Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1329:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.031505Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1329:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.031545Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [3ca1a99c83a6f037] restore Id# [72057594037932033:2:8:0:0:1329:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T21:25:33.031620Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1329:1] Marker# BPG33 2025-06-24T21:25:33.031666Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1329:1] Marker# BPG32 2025-06-24T21:25:33.031709Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1329:2] Marker# BPG33 2025-06-24T21:25:33.031741Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1329:2] Marker# BPG32 2025-06-24T21:25:33.031773Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1329:3] Marker# BPG33 2025-06-24T21:25:33.031796Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1329:3] Marker# BPG32 2025-06-24T21:25:33.031949Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:67:2092] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1329:3] FDS# 1329 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.032018Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2085] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1329:2] FDS# 1329 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.032067Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:81:2106] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1329:1] FDS# 1329 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.034094Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1329:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90464 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-24T21:25:33.034351Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1329:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90464 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-24T21:25:33.034448Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1329:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90464 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-24T21:25:33.034548Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [3ca1a99c83a6f037] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1329:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-24T21:25:33.034614Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [3ca1a99c83a6f037] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1329:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T21:25:33.034814Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.125 sample PartId# [72057594037932033:2:8:0:0:1329:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.126 sample PartId# [72057594037932033:2:8:0:0:1329:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.127 sample PartId# [72057594037932033:2:8:0:0:1329:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 3.204 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 3.414 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 3.51 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-24T21:25:33.090260Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [bba3bffd2e286f4b] bootstrap ActorId# [1:597:2500] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:9:0:0:229:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T21:25:33.090445Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090504Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090546Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090572Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090598Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090624Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090666Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [bba3bffd2e286f4b] restore Id# [72057594037932033:2:9:0:0:229:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T21:25:33.090750Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:9:0:0:229:1] Marker# BPG33 2025-06-24T21:25:33.090799Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:9:0:0:229:1] Marker# BPG32 2025-06-24T21:25:33.090845Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:9:0:0:229:2] Marker# BPG33 2025-06-24T21:25:33.090871Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:9:0:0:229:2] Marker# BPG32 2025-06-24T21:25:33.090900Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:9:0:0:229:3] Marker# BPG33 2025-06-24T21:25:33.090925Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:9:0:0:229:3] Marker# BPG32 2025-06-24T21:25:33.091092Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2085] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:3] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.091174Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:81:2106] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:2] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.091226Z node 1 :BS_PROXY DEBUG: group_sessions.h:16 ... oup# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.137157Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.137253Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.137334Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.137406Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.137455Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.137507Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.137539Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:194: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-24T21:25:33.137597Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:80: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-24T21:25:33.137644Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:301: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 0 Marker# DSP57 initialize full monitoring 2025-06-24T21:25:33.138533Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [d70ef3c23a1a2346] bootstrap ActorId# [1:609:2511] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:5:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-24T21:25:33.138656Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [d70ef3c23a1a2346] Id# [1234:2:0:0:0:5:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.138691Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [d70ef3c23a1a2346] restore Id# [1234:2:0:0:0:5:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T21:25:33.138732Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [d70ef3c23a1a2346] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:5:1] Marker# BPG33 2025-06-24T21:25:33.138760Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [d70ef3c23a1a2346] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:5:1] Marker# BPG32 2025-06-24T21:25:33.138855Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:602:2504] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:5:1] FDS# 5 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.141944Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [d70ef3c23a1a2346] received {EvVPutResult Status# OK ID# [1234:2:0:0:0:5:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 80039 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-24T21:25:33.142043Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [d70ef3c23a1a2346] Result# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 2181038082 Marker# BPP12 2025-06-24T21:25:33.142096Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [d70ef3c23a1a2346] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T21:25:33.142258Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.52 sample PartId# [1234:2:0:0:0:5:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 3.62 VDiskId# [82000002:1:0:0:0] NodeId# 1 Status# OK } ] } 2025-06-24T21:25:33.142823Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# IsLimitedKeyless# false Marker# DSP02 2025-06-24T21:25:33.142871Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:57: Group# 2181038082 SetStateUnconfigured Marker# DSP07 2025-06-24T21:25:33.143030Z node 2 :BS_PROXY DEBUG: dsproxy_impl.h:205: Group# 2181038082 HandleEnqueue# TEvCollectGarbage {TabletId# 1234 RecordGeneration# 4294967295 PerGenerationCounter# 4294967295 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 4294967295 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 1 IsMonitored# 1} Marker# DSP17 2025-06-24T21:25:33.144231Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-06-24T21:25:33.144296Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-06-24T21:25:33.146402Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:613:2107] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.146570Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:614:2108] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.146690Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:615:2109] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.146831Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:616:2110] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.146939Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:617:2111] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.147050Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:618:2112] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.147190Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:619:2113] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.147222Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-24T21:25:33.148620Z node 2 :BS_NODE ERROR: {NW19@node_warden_group.cpp:212} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/2btm/004a48/r3tmp/tmpWu4HxD//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-06-24T21:25:33.149053Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.149301Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.149390Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.149621Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.149706Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.149778Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.149830Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.149858Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:194: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-24T21:25:33.149898Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:80: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-24T21:25:33.150076Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:613:2107] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestLimitedKeylessGroupThenNoMonitoring [GOOD] Test command err: 2025-06-24T21:25:32.415295Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.416031Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.417383Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.417523Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.417597Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.419307Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a4e/r3tmp/tmpG8nam6/pdisk_1.dat 2025-06-24T21:25:33.034288Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [3ca1a99c83a6f037] bootstrap ActorId# [1:551:2463] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1328:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T21:25:33.034548Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1328:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.034610Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1328:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.034652Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1328:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.034692Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1328:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.034744Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1328:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.034780Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1328:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.034827Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [3ca1a99c83a6f037] restore Id# [72057594037932033:2:8:0:0:1328:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T21:25:33.034915Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1328:1] Marker# BPG33 2025-06-24T21:25:33.034975Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1328:1] Marker# BPG32 2025-06-24T21:25:33.035033Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1328:2] Marker# BPG33 2025-06-24T21:25:33.035074Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1328:2] Marker# BPG32 2025-06-24T21:25:33.035118Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1328:3] Marker# BPG33 2025-06-24T21:25:33.035171Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1328:3] Marker# BPG32 2025-06-24T21:25:33.035351Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:67:2092] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1328:3] FDS# 1328 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.035421Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2085] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1328:2] FDS# 1328 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.035489Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:81:2106] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1328:1] FDS# 1328 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.037471Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1328:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90456 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-24T21:25:33.037670Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1328:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90456 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-24T21:25:33.037737Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1328:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90456 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-24T21:25:33.037797Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [3ca1a99c83a6f037] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1328:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-24T21:25:33.037855Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [3ca1a99c83a6f037] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1328:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T21:25:33.038024Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.438 sample PartId# [72057594037932033:2:8:0:0:1328:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.439 sample PartId# [72057594037932033:2:8:0:0:1328:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.44 sample PartId# [72057594037932033:2:8:0:0:1328:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 3.447 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 3.605 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 3.67 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-24T21:25:33.090421Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [bba3bffd2e286f4b] bootstrap ActorId# [1:597:2500] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:9:0:0:229:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-24T21:25:33.090607Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090669Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090699Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090729Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090759Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090790Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.090835Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [bba3bffd2e286f4b] restore Id# [72057594037932033:2:9:0:0:229:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T21:25:33.090913Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:9:0:0:229:1] Marker# BPG33 2025-06-24T21:25:33.090967Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:9:0:0:229:1] Marker# BPG32 2025-06-24T21:25:33.091017Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:9:0:0:229:2] Marker# BPG33 2025-06-24T21:25:33.091047Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:9:0:0:229:2] Marker# BPG32 2025-06-24T21:25:33.091078Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:9:0:0:229:3] Marker# BPG33 2025-06-24T21:25:33.091107Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:9:0:0:229:3] Marker# BPG32 2025-06-24T21:25:33.091308Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2085] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:3] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.091387Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:81:2106] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:2] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.091435Z node 1 :BS_PROXY DEBUG: group_sessions.h:165 ... utLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-24T21:25:33.145727Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [cd65997ea3b51537] Result# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 2181038082 Marker# BPP12 2025-06-24T21:25:33.145790Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [cd65997ea3b51537] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T21:25:33.145903Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.56 sample PartId# [1234:2:0:0:0:5:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 3.632 VDiskId# [82000002:1:0:0:0] NodeId# 1 Status# OK } ] } 2025-06-24T21:25:33.146443Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# IsLimitedKeyless# false Marker# DSP02 2025-06-24T21:25:33.146510Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:57: Group# 2181038082 SetStateUnconfigured Marker# DSP07 2025-06-24T21:25:33.146612Z node 2 :BS_PROXY DEBUG: dsproxy_impl.h:205: Group# 2181038082 HandleEnqueue# TEvBlock {TabletId# 1234 Generation# 3 Deadline# 18446744073709551 IsMonitored# 1} Marker# DSP17 2025-06-24T21:25:33.147632Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-06-24T21:25:33.147668Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-06-24T21:25:33.149347Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:613:2106] Create Queue# [2:615:2107] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.149487Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:613:2106] Create Queue# [2:616:2108] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.149611Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:613:2106] Create Queue# [2:617:2109] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.149724Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:613:2106] Create Queue# [2:618:2110] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.149797Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:613:2106] Create Queue# [2:619:2111] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.149863Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:613:2106] Create Queue# [2:620:2112] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.149963Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:613:2106] Create Queue# [2:621:2113] targetNodeId# 1 Marker# DSP01 2025-06-24T21:25:33.149990Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-24T21:25:33.151102Z node 2 :BS_NODE ERROR: {NW19@node_warden_group.cpp:212} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/2btm/004a4e/r3tmp/tmpG8nam6//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-06-24T21:25:33.151505Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.151723Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.151819Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.151935Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.152005Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.152110Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.152151Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-24T21:25:33.152180Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:194: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-24T21:25:33.152215Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:80: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-24T21:25:33.152363Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [efc53170c63234c6] bootstrap ActorId# [2:622:2114] Group# 2181038082 TabletId# 1234 Generation# 3 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-24T21:25:33.152415Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [efc53170c63234c6] Sending TEvVBlock Tablet# 1234 Generation# 3 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-24T21:25:33.152587Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:615:2107] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 3 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 7952081625286566355 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-24T21:25:33.153818Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [efc53170c63234c6] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-24T21:25:33.153874Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [efc53170c63234c6] Result# TEvBlockResult {Status# OK} Marker# DSPB04 Sending TEvPut 2025-06-24T21:25:33.154140Z node 2 :BS_PROXY INFO: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:3:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:3:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-24T21:25:33.154300Z node 2 :BS_PROXY DEBUG: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:4:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:4:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-24T21:25:33.154582Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [c85e1a21dcb31b54] bootstrap ActorId# [1:623:2514] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:11:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-24T21:25:33.154690Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [c85e1a21dcb31b54] Id# [1234:2:0:0:0:11:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-24T21:25:33.154724Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [c85e1a21dcb31b54] restore Id# [1234:2:0:0:0:11:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-24T21:25:33.154790Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [c85e1a21dcb31b54] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG33 2025-06-24T21:25:33.154820Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [c85e1a21dcb31b54] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG32 2025-06-24T21:25:33.154928Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:602:2504] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:11:1] FDS# 11 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-24T21:25:33.155112Z node 1 :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:569: PDiskId# 1000 VDISK[82000002:_:0:0:0]: (2181038082) TEvVPut: failed to pass the Hull check; id# [1234:2:0:0:0:11:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-24T21:25:33.155420Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [c85e1a21dcb31b54] received {EvVPutResult Status# BLOCKED ErrorReason# "blocked" ID# [1234:2:0:0:0:11:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 80086 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-24T21:25:33.155518Z node 1 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [c85e1a21dcb31b54] Result# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038082 Marker# BPP12 2025-06-24T21:25:33.155591Z node 1 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [c85e1a21dcb31b54] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-24T21:25:33.155684Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.476 sample PartId# [1234:2:0:0:0:11:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 1 } ] } 2025-06-24T21:25:33.155991Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:615:2107] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 >> TPQTest::TestPartitionTotalQuota >> TBlobStorageWardenTest::TestHttpMonPage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TTypeCodecsTest::TestDeltaZigZagCodec [GOOD] Test command err: Size: 8002 Create chunk: 0.000189s Read by index: 0.000023s Iterate: 0.000148s Size: 8256 Create chunk: 0.000185s Read by index: 0.000030s Iterate: 0.000071s Size: 8532 Create chunk: 0.000129s Read by index: 0.000043s Iterate: 0.000036s Size: 7769 Create chunk: 0.000116s Read by index: 0.000049s Iterate: 0.000049s Size: 2853 Create chunk: 0.000131s Read by index: 0.000079s Iterate: 0.000036s Size: 2419 Create chunk: 0.000105s Read by index: 0.000098s Iterate: 0.000045s Size: 2929 Create chunk: 0.000136s Read by index: 0.000087s Iterate: 0.000039s Size: 2472 Create chunk: 0.000112s Read by index: 0.000094s Iterate: 0.000043s Size: 2407 Create chunk: 0.000093s Read by index: 0.000090s Iterate: 0.000042s Size: 2061 Create chunk: 0.000111s Read by index: 0.000093s Iterate: 0.000047s ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestHttpMonPage [GOOD] Test command err: 2025-06-24T21:25:32.413139Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.413725Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.415088Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.416179Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.416465Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.417168Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:32.418148Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a4a/r3tmp/tmpUFeLSV/pdisk_1.dat 2025-06-24T21:25:33.663082Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:33.663885Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a4a/r3tmp/tmp4anFvh/pdisk_1.dat 2025-06-24T21:25:33.756658Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:33.756788Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-24T21:25:33.756861Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 >> TPQTabletTests::Multiple_PQTablets_1 [GOOD] >> TFlatTest::RejectByPerRequestSize [GOOD] >> TPartitionTests::WriteSubDomainOutOfSpace_DisableExpiration >> TPQTestInternal::TestPartitionedBigTest [GOOD] >> TPQTestInternal::TestToHex [GOOD] >> TPQUserInfoTest::UserDataDeprecatedSerializaion [GOOD] >> TPQUtilsTest::TLastCounter [GOOD] >> TPartitionTests::TabletConfig_Is_Newer_That_PartitionConfig [GOOD] >> TPQTabletTests::DropTablet_And_Tx >> TPartitionTests::SetOffset [GOOD] >> TPartitionTests::CorrectRange_Multiple_Transactions [GOOD] >> TPartitionTests::ShadowPartitionCountersRestore >> TPartitionTests::OldPlanStep >> TPartitionTests::WriteSubDomainOutOfSpace_DisableExpiration [GOOD] >> TPartitionTests::CorrectRange_Rollback >> TPQTabletTests::DropTablet_And_Tx [GOOD] >> TPQTabletTests::DropTablet_Before_Write >> TPartitionTests::WriteSubDomainOutOfSpace_IgnoreQuotaDeadline >> TPartitionTests::WriteSubDomainOutOfSpace_IgnoreQuotaDeadline [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQUtilsTest::TLastCounter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::BrokenDupLock [GOOD] Test command err: 2025-06-24T21:24:52.737692Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629996448664396:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:52.737815Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a74/r3tmp/tmpbhgT5G/pdisk_1.dat 2025-06-24T21:24:53.073572Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:53.074313Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629996448664378:2079] 1750800292736679 != 1750800292736682 2025-06-24T21:24:53.141185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:53.141369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:53.144549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5771 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:53.371510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:53.405922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:53.562491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:53.647181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:53.749507Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:55.845694Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630012325417046:2163];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:55.845938Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a74/r3tmp/tmpJFrpM0/pdisk_1.dat 2025-06-24T21:24:55.940883Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:55.947301Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630012325416919:2079] 1750800295808471 != 1750800295808474 2025-06-24T21:24:55.984957Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:55.985042Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:55.988528Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22414 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:56.214490Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:56.219527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:56.238005Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:56.311249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:56.383081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.223683Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630028863749512:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:59.223751Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a74/r3tmp/tmpXSjpjs/pdisk_1.dat 2025-06-24T21:24:59.347410Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:59.350137Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630028863749493:2079] 1750800299223262 != 1750800299223265 2025-06-24T21:24:59.379442Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:59.379524Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:59.380720Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23602 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:59.575696Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:59.581470Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:24:59.597545Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is un ... patcher.cpp:1228: Notification cookie mismatch for subscription [8:7519630119124122249:2079] 1750800320164882 != 1750800320164885 TClient is connected to server localhost:16565 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:25:20.645174Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:25:20.659018Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:20.679392Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:25:20.684963Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:20.786988Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:20.858076Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:24.737359Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630135765865529:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:24.737581Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a74/r3tmp/tmpzc6wqI/pdisk_1.dat 2025-06-24T21:25:24.893785Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:24.896548Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630135765865508:2079] 1750800324732008 != 1750800324732011 2025-06-24T21:25:24.914352Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:24.914484Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:24.916501Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8035 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:25.197750Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:25.204097Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:25.219968Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:25.301128Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:25.364873Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:29.142098Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630159125045413:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:29.142176Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a74/r3tmp/tmpaOEx3D/pdisk_1.dat 2025-06-24T21:25:29.287023Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:29.288836Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630159125045394:2079] 1750800329141501 != 1750800329141504 2025-06-24T21:25:29.307451Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:29.307561Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:29.309598Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20485 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:29.592579Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:29.614987Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:29.688823Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:29.748671Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TPQTabletTests::DropTablet_Before_Write [GOOD] >> TMeteringSink::FlushThroughputV1 [GOOD] >> TMeteringSink::UsedStorageV1 [GOOD] >> TMicrosecondsSlidingWindow::Basic [GOOD] >> TMultiBucketCounter::InsertAndUpdate [GOOD] >> TMultiBucketCounter::ManyCounters [GOOD] >> TPQRBDescribes::PartitionLocations >> TPQTabletTests::DropTablet_And_UnplannedConfigTransaction >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotBoundary_Test >> TPartitionTests::OldPlanStep [GOOD] >> TQuotaTracker::TestSmallMessages [GOOD] >> TQuotaTracker::TestBigMessages [GOOD] >> TSourceIdTests::ExpensiveCleanup >> TPartitionTests::CorrectRange_Rollback [GOOD] >> TPartitionTests::ShadowPartitionCountersRestore [GOOD] >> TSourceIdTests::SourceIdStorageAdd [GOOD] >> TSourceIdTests::ProtoSourceIdStorageParseAndAdd [GOOD] >> TSourceIdTests::SourceIdStorageComplexDelete >> TPQTabletTests::Partition_Send_Predicate_With_False >> TSourceIdTests::SourceIdStorageComplexDelete [GOOD] >> TSourceIdTests::HeartbeatEmitter [GOOD] >> TSourceIdTests::SourceIdMinSeqNo [GOOD] >> TPartitionTests::ReserveSubDomainOutOfSpace >> TPartitionTests::DataTxCalcPredicateOk >> TPartitionTests::TestNonConflictingActsBatchOk >> TPQTabletTests::DropTablet_And_UnplannedConfigTransaction [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_1_Test >> TPQTabletTests::Huge_ProposeTransacton >> BindQueue::Basic [GOOD] >> TBlobStorageWardenTest::ObtainPDiskKeySamePin [GOOD] >> CacheEviction::DeleteKeys [GOOD] >> PQCountersLabeled::Partition >> TPQTabletTests::Partition_Send_Predicate_With_False [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::RejectByPerRequestSize [GOOD] Test command err: 2025-06-24T21:25:14.548387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630090612659486:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:14.548623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a53/r3tmp/tmpEXg2bc/pdisk_1.dat 2025-06-24T21:25:14.872026Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:14.878691Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630090612659468:2079] 1750800314547653 != 1750800314547656 2025-06-24T21:25:14.962608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:14.962768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:14.964985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14458 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:15.161523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:15.194815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:15.560685Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:19.548817Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630090612659486:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:19.548907Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:25:22.377876Z node 1 :TX_DATASHARD ERROR: check_data_tx_unit.cpp:133: Transaction read size 51002469 exceeds limit 10000 at tablet 72075186224037888 txId 281474976715760 2025-06-24T21:25:22.378045Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715760 at tablet 72075186224037888 status: BAD_REQUEST errors: READ_SIZE_EXECEEDED (Transaction read size 51002469 exceeds limit 10000 at tablet 72075186224037888 txId 281474976715760) | 2025-06-24T21:25:22.378255Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519630124972399455:2923] txid# 281474976715760 RESPONSE Status# WrongRequest marker# P13c 2025-06-24T21:25:23.084980Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630131575297045:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:23.085071Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a53/r3tmp/tmpUTQdEf/pdisk_1.dat 2025-06-24T21:25:23.217364Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:23.228473Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630131575297027:2079] 1750800323084436 != 1750800323084439 2025-06-24T21:25:23.241090Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:23.241174Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:23.242141Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4228 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:23.421913Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:23.443034Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:24.111389Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:28.085379Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630131575297045:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:28.085457Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:25:30.119673Z node 2 :TX_DATASHARD ERROR: check_data_tx_unit.cpp:133: Transaction read size 51002357 exceeds limit 10000 at tablet 72075186224037888 txId 281474976715760 2025-06-24T21:25:30.119753Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715760 at tablet 72075186224037888 status: BAD_REQUEST errors: READ_SIZE_EXECEEDED (Transaction read size 51002357 exceeds limit 10000 at tablet 72075186224037888 txId 281474976715760) | 2025-06-24T21:25:30.119884Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7519630161640069710:2921] txid# 281474976715760 RESPONSE Status# WrongRequest marker# P13c 2025-06-24T21:25:30.859827Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630162723942562:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:30.859899Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a53/r3tmp/tmp7cwE9y/pdisk_1.dat 2025-06-24T21:25:30.974600Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:30.978160Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630162723942543:2079] 1750800330859273 != 1750800330859276 2025-06-24T21:25:30.999665Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:30.999748Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:31.001240Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28257 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:31.195971Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:31.220095Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:31.868665Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:34.060133Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [3:7519630162723942675:2096] Handle TEvProposeTransaction 2025-06-24T21:25:34.060164Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [3:7519630162723942675:2096] TxId# 281474976715700 ProcessProposeTransaction 2025-06-24T21:25:34.060209Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:273: actor# [3:7519630162723942675:2096] Cookie# 0 userReqId# "" txid# 281474976715700 SEND to# [3:7519630179903812790:2597] DataReq marker# P0 2025-06-24T21:25:34.060262Z node 3 :TX_PROXY DEBUG: datareq.cpp:1330: Actor# [3:7519630179903812790:2597] Cookie# 0 txid# 281474976715700 HANDLE TDataReq marker# P1 2025-06-24T21:25:34.060797Z node 3 :TX_PROXY DEBUG: datareq.cpp:1245: Actor [3:7519630179903812790:2597] txid 281474976715700 disallow followers cause of operation 2 read target mode 0 2025-06-24T21:25:34.060820Z node 3 :TX_PROXY DEBUG: datareq.cpp:1245: Actor [3:7519630179903812790:2597] txid 281474976715700 disallow followers cause of operation 2 read target mode 0 2025-06-24T21:25:34.060855Z node 3 :TX_PROXY DEBUG: datareq.cpp:1453: Actor# [3:7519630179903812790:2597] txid# 281474976715700 SEND to# [3:7519630162723942784:2114] TSchemeCache with 2 scheme entries. DataReq marker# P2 2025-06-24T21:25:34.061060Z node 3 :TX_PROXY DEBUG: datareq.cpp:1620: Actor# [3:7519630179903812790:2597] txid# 281474976715700 HANDLE EvResolveKeySetResult TDataReq marker# P3 ErrorCount# 0 2025-06-24T21:25:34.062673Z node 3 :TX_PROXY DEBUG: datareq.cpp:1115: Actor# [3:7519630179903812790:2597] txid# 281474976715700 SEND TEvProposeTransaction to datashard 72075186224037888 with 734 bytes program affected shards 2 followers disallowed marker# P4 2025-06-24T21:25:34.062967Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:25:34.063003Z node 3 :TX_PROXY DEBUG: datareq.cpp:1115: Actor# [3:7519630179903812790:2597] txid# 281474976715700 SEND TEvProposeTransaction to datashard 72075186224037889 with 734 bytes program affected shards 2 followers disallowed marker# P4 2025-06-24T21:25:34.064439Z node 3 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976715700 at tablet 72075186224037888 2025-06-24T21:25:34.064750Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-24T21:25:34.065694Z node 3 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976715700 at tablet 72075186224037889 2025-06-24T21:25:34.066695Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:25:34.066783Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-24T21:25:34.066787Z node 3 :TX_PROXY DEBUG: datareq.cpp:1873: Actor# [3:7519630179903812790:2597] txid# 281474976715700 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# PREPARED shard id 72075186224037888 read size 17000855 out readset size 0 marker# P6 2025-06-24T21:25:34.066823Z node 3 :TX_PROXY DEBUG: datareq.cpp:1873: Actor# [3:7519630179903812790:2597] txid# 281474976715700 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# PREPARED shard id 72075186224037889 read size 9000463 out readset size 0 marker# P6 2025-06-24T21:25:34.066874Z node 3 :TX_PROXY ERROR: datareq.cpp:2829: Actor# [3:7519630179903812790:2597] txid# 281474976715700 FailProposedRequest: Transaction total read size 26001318 exceeded limit 10000 Status# ExecError 2025-06-24T21:25:34.066938Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519630179903812790:2597] txid# 281474976715700 RESPONSE Status# ExecError marker# P13c 2025-06-24T21:25:34.067032Z node 3 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037888 txId 281474976715700 2025-06-24T21:25:34.067064Z node 3 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037888 txId 281474976715700 2025-06-24T21:25:34.067538Z node 3 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037889 txId 281474976715700 2025-06-24T21:25:34.067565Z node 3 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037889 txId 281474976715700 |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TSourceIdTests::SourceIdMinSeqNo [GOOD] >> TPartitionTests::ReserveSubDomainOutOfSpace [GOOD] >> TSourceIdTests::ExpensiveCleanup [GOOD] >> TPQTabletTests::ProposeTx_Missing_Operations >> TPartitionTests::ShadowPartitionCounters >> TPQTabletTests::ProposeTx_Missing_Operations [GOOD] >> TPQTabletTests::ProposeTx_Unknown_Partition_1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TSourceIdTests::ExpensiveCleanup [GOOD] Test command err: 2025-06-24T21:25:35.360878Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:35.360966Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:35.759775Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:35.759828Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info processed_blobs=41800 quoted_time=9.980000s Iteration 0 Iteration 1 Iteration 2 Iteration 3 Iteration 4 Iteration 5 Iteration 6 Iteration 7 Iteration 8 Iteration 9 Iteration 10 Iteration 11 Iteration 12 Iteration 13 Iteration 14 Iteration 15 Iteration 16 Iteration 17 Iteration 18 Iteration 19 Iteration 20 Iteration 21 Iteration 22 Iteration 23 Iteration 24 Iteration 25 Iteration 26 Iteration 27 Iteration 28 Iteration 29 Iteration 30 Iteration 31 Iteration 32 Iteration 33 Iteration 34 Iteration 35 Iteration 36 Iteration 37 Iteration 38 Iteration 39 Iteration 40 Iteration 41 Iteration 42 Iteration 43 Iteration 44 Iteration 45 Iteration 46 Iteration 47 Iteration 48 Iteration 49 Iteration 50 Iteration 51 Iteration 52 Iteration 53 Iteration 54 Iteration 55 Iteration 56 Iteration 57 Iteration 58 Iteration 59 Iteration 60 Iteration 61 Iteration 62 Iteration 63 Iteration 64 Iteration 65 Iteration 66 Iteration 67 Iteration 68 Iteration 69 Iteration 70 Iteration 71 Iteration 72 Iteration 73 Iteration 74 Iteration 75 Iteration 76 Iteration 77 Iteration 78 Iteration 79 Iteration 80 Iteration 81 Iteration 82 Iteration 83 Iteration 84 Iteration 85 Iteration 86 Iteration 87 Iteration 88 Iteration 89 Iteration 90 Iteration 91 Iteration 92 Iteration 93 Iteration 94 Iteration 95 Iteration 96 Iteration 97 Iteration 98 Iteration 99 Iteration 100 Iteration 101 Iteration 102 Iteration 103 Iteration 104 Iteration 105 Iteration 106 Iteration 107 Iteration 108 Iteration 109 Iteration 110 Iteration 111 Iteration 112 Iteration 113 Iteration 114 Iteration 115 Iteration 116 Iteration 117 Iteration 118 Iteration 119 Iteration 120 Iteration 121 Iteration 122 Iteration 123 Iteration 124 Iteration 125 Iteration 126 Iteration 127 Iteration 128 Iteration 129 Iteration 130 Iteration 131 Iteration 132 Iteration 133 Iteration 134 Iteration 135 Iteration 136 Iteration 137 Iteration 138 Iteration 139 Iteration 140 Iteration 141 Iteration 142 Iteration 143 Iteration 144 Iteration 145 Iteration 146 Iteration 147 Iteration 148 Iteration 149 Iteration 150 Iteration 151 Iteration 152 Iteration 153 Iteration 154 Iteration 155 Iteration 156 Iteration 157 Iteration 158 Iteration 159 Iteration 160 Iteration 161 Iteration 162 Iteration 163 Iteration 164 Iteration 165 Iteration 166 Iteration 167 Iteration 168 Iteration 169 Iteration 170 Iteration 171 Iteration 172 Iteration 173 Iteration 174 Iteration 175 Iteration 176 Iteration 177 Iteration 178 Iteration 179 Iteration 180 Iteration 181 Iteration 182 Iteration 183 Iteration 184 Iteration 185 Iteration 186 Iteration 187 Iteration 188 Iteration 189 Iteration 190 Iteration 191 Iteration 192 Iteration 193 Iteration 194 Iteration 195 Iteration 196 Iteration 197 Iteration 198 Iteration 199 Iteration 200 Iteration 201 Iteration 202 Iteration 203 Iteration 204 Iteration 205 Iteration 206 Iteration 207 Iteration 208 Iteration 209 Iteration 210 Iteration 211 Iteration 212 Iteration 213 Iteration 214 Iteration 215 Iteration 216 Iteration 217 Iteration 218 Iteration 219 Iteration 220 Iteration 221 Iteration 222 Iteration 223 Iteration 224 Iteration 225 Iteration 226 Iteration 227 Iteration 228 Iteration 229 Iteration 230 Iteration 231 Iteration 232 Iteration 233 Iteration 234 Iteration 235 Iteration 236 Iteration 237 Iteration 238 Iteration 239 Iteration 240 Iteration 241 Iteration 242 Iteration 243 Iteration 244 Iteration 245 Iteration 246 Iteration 247 Iteration 248 Iteration 249 Iteration 250 Iteration 251 Iteration 252 Iteration 253 Iteration 254 Iteration 255 Iteration 256 Iteration 257 Iteration 258 Iteration 259 Iteration 260 Iteration 261 Iteration 262 Iteration 263 Iteration 264 Iteration 265 Iteration 266 Iteration 267 Iteration 268 Iteration 269 Iteration 270 Iteration 271 Iteration 272 Iteration 273 Iteration 274 Iteration 275 Iteration 276 Iteration 277 Iteration 278 Iteration 279 Iteration 280 Iteration 281 Iteration 282 Iteration 283 Iteration 284 Iteration 285 Iteration 286 Iteration 287 Iteration 288 Iteration 289 Iteration 290 Iteration 291 Iteration 292 Iteration 293 Iteration 294 Iteration 295 Iteration 296 Iteration 297 Iteration 298 Iteration 299 Iteration 300 Iteration 301 Iteration 302 Iteration 303 Iteration 304 Iteration 305 Iteration 306 Iteration 307 Iteration 308 Iteration 309 Iteration 310 Iteration 311 Iteration 312 Iteration 313 Iteration 314 Iteration 315 Iteration 316 Iteration 317 Iteration 318 Iteration 319 Iteration 320 Iteration 321 Iteration 322 Iteration 323 Iteration 324 Iteration 325 Iteration 326 Iteration 327 Iteration 328 Iteration 329 Iteration 330 Iteration 331 Iteration 332 Iteration 333 Iteration 334 Iteration 335 Iteration 336 Iteration 337 Iteration 338 Iteration 339 Iteration 340 Iteration 341 Iteration 342 Iteration 343 Iteration 344 Iteration 345 Iteration 346 Iteration 347 Iteration 348 Iteration 349 Iteration 350 Iteration 351 Iteration 352 Iteration 353 Iteration 354 Iteration 355 Iteration 356 Iteration 357 Iteration 358 Iteration 359 Iteration 360 Iteration 361 Iteration 362 Iteration 363 Iteration 364 Iteration 365 Iteration 366 Iteration 367 Iteration 368 Iteration 369 Iteration 370 Iteration 371 Iteration 372 Iteration 373 Iteration 374 Iteration 375 Iteration 376 Iteration 377 Iteration 378 Iteration 379 Iteration 380 Iteration 381 Iteration 382 Iteration 383 Iteration 384 Iteration 385 Iteration 386 Iteration 387 Iteration 388 Iteration 389 Iteration 390 Iteration 391 Iteration 392 Iteration 393 Iteration 394 Iteration 395 Iteration 396 Iteration 397 Iteration 398 Iteration 399 Iteration 400 Iteration 401 Iteration 402 Iteration 403 Iteration 404 Iteration 405 Iteration 406 Iteration 407 Iteration 408 Iteration 409 Iteration 410 Iteration 411 Iteration 412 Iteration 413 Iteration 414 Iteration 415 Iteration 416 Iteration 417 Iteration 418 Iteration 419 Iteration 420 Iteration 421 Iteration 422 Iteration 423 Iteration 424 Iteration 425 Iteration 426 Iteration 427 Iteration 428 Iteration 429 Iteration 430 Iteration 431 Iteration 432 Iteration 433 Iteration 434 Iteration 435 Iteration 436 Iteration 437 Iteration 438 Iteration 439 Iteration 440 Iteration 441 Iteration 442 Iteration 443 Iteration 444 Iteration 445 Iteration 446 Iteration 447 Iteration 448 Iteration 449 Iteration 450 Iteration 451 Iteration 452 Iteration 453 Iteration 454 Iteration 455 Iteration 456 Iteration 457 Iteration 458 Iteration 459 Iteration 460 Iteration 461 Iteration 462 Iteration 463 Iteration 464 Iteration 465 Iteration 466 Iteration 467 Iteration 468 Iteration 469 Iteration 470 Iteration 471 Iteration 472 Iteration 473 Iteration 474 Iteration 475 Iteration 476 Iteration 477 Iteration 478 Iteration 479 Iteration 480 Iteration 481 Iteration 482 Iteration 483 Iteration 484 Iteration 485 Iteration 486 Iteration 487 Iteration 488 Iteration 489 Iteration 490 Iteration 491 Iteration 492 Iteration 493 Iteration 494 Iteration 495 Iteration 496 Iteration 497 Iteration 498 Iteration 499 Iteration 500 Iteration 501 Iteration 502 Iteration 503 Iteration 504 Iteration 505 Iteration 506 Iteration 507 Iteration 508 Iteration 509 Iteration 510 Iteration 511 Iteration 512 Iteration 513 Iteration 514 Iteration 515 Iteration 516 Iteration 517 Iteration 518 Iteration 519 Iteration 520 Iteration 521 Iteration 522 Iteration 523 Iteration 524 Iteration 525 Iteration 526 Iteration 527 Iteration 528 Iteration 529 Iteration 530 Iteration 531 Iteration 532 Iteration 533 Iteration 534 Iteration 535 Iteration 536 Iteration 537 Iteration 538 Iteration 539 Iteration 540 Iteration 541 Iteration 542 Iteration 543 Iteration 544 Iteration 545 Iteration 546 Iteration 547 Iteration 548 Iteration 549 Iteration 550 Iteration 551 Iteration 552 Iteration 553 Iteration 554 Iteration 555 Iteration 556 Iteration 557 Iteration 558 Iteration 559 Iteration 560 Iteration 561 Iteration 562 Iteration 563 Iteration 564 Iteration 565 Iteration 566 Iteration 567 Iteration 568 Iteration 569 Iteration 570 Iteration 571 Iteration 572 Iteration 573 Iteration 574 Iteration 575 Iteration 576 Iteration 577 Iteration 578 Iteration 579 Iteration 580 Iteration 581 Iteration 582 Iteration 583 Iteration 584 Iteration 585 Iteration 586 Iteration 587 Iteration 588 Iteration 589 Iteration 590 Iteration 591 Iteration 592 Iteration 593 Iteration 594 Iteration 595 Iteration 596 Iteration 597 Iteration 598 Iteration 599 Iteration 600 Iteration 601 Iteration 602 Iteration 603 Iteration 604 Iteration 605 Iteration 606 Iteration 607 Iteration 608 Iteration 609 Iteration 610 Iteration 611 Iteration 612 Iteration 613 Iteration 614 Iteration 615 Iteration 616 Iteration 617 Iteration 618 Iteration 619 Iteration 620 Iteration 621 Iteration 622 Iteration 623 Iteration 624 Iteration 625 Iteration 626 Iteration 627 Iteration 628 Iteration 629 Iteration 630 Iteration 631 Iteration 632 Iteration 633 Iteration 634 Iteration 635 Iteration 636 Iteration 637 Iteration 638 Iteration 639 Iteration 640 Iteration 641 Iteration 642 Iteration 643 Iteration 644 Iteration 645 Iteration 646 Iteration 647 Iteration 648 Iteration 649 Iteration 650 Iteration 651 Iteration 652 Iteration 653 Iteration 654 Iteration 655 Iteration 656 Iteration 657 Iteration 658 Iteration 659 Iteration 660 Iteration 661 Iteration 662 Iteration 663 Iteration 664 Iteration 665 Iteration 666 Iteration 667 Iteration 668 Iteration 669 Iteration 670 Iteration 671 Iteration 672 Iteration 673 Iteration 674 Iteration 675 Iteration 676 Iteration 677 Iteration 678 Iteration 679 Iteration 680 Iteration 681 Iteration 682 Iteration 683 Iteration 684 Iteration 685 Iteration 686 Iteration 687 Iteration 688 Iteration 689 Iteration 690 Iteration 691 Iteration 692 Iteration 693 Iteration 694 Iteration 695 Iteration 696 Iteration 697 Iteration 698 Iteration 699 Iteration 700 Iteration 701 Iteration 702 Iteration 703 Iteration 704 Iteration 705 Iteration 706 Iteration 707 Iteration 708 Iteration 709 Iteration 710 Iteration 711 Iteration 712 Iteration 713 Iteration 714 Iteration 715 Iteration 716 Iteration 717 Iteration 718 Iteration 719 Iteration 720 Iteration 721 Iteration 722 Iteration 723 Iteration 724 Iteration 725 Iteration 726 Iteration 727 Iteration 728 Iteration 729 Iteration 730 Iteration 731 Iteration 732 Iteration 733 Iteration 734 Iteration 735 Iteration 736 Iteration 737 Iteration 738 Iteration 739 Iteration 740 Iteration 741 Iteration 742 Iteration 743 Iteration 744 Iteration 745 Iteration 746 Iteration 747 Iteration 748 Iteration 749 Iteration 750 Iteration 751 Iteration 752 Iteration 753 Iteration 754 Iteration 755 Iteration 756 Iteration 757 Iteration 758 Iteration 759 Iteration 760 Iteration 761 Iteration 762 Iteration 763 Iteration 764 Iteration 765 Iteration 766 Iteration 767 Iteration 768 Iteration 769 Iteration 770 Iteration 771 Iteration 772 Iteration 773 Iteration 774 Iteration 775 Iteration 776 Iteration 777 Iteration 778 Iteration 779 Iteration 780 Iteration 781 Iteration 782 Iteration 783 Iteration 784 Iteration 785 Iteration 786 Iteration 787 Iteration 788 Iteration 789 Iteration 790 Iteration 791 Iteration 792 Iteration 793 Iteration 794 Iteration 795 Iteration 796 Iteration 797 Iteration 798 Iteration 799 Iteration 800 Iteration 801 Iteration 802 Iteration 803 Iteration 804 Iteration 805 Iteration 806 Iteration 807 Iteration 808 Iteration 809 Iteration 810 Iteration 811 Iteration 812 Iteration 813 Iteration 814 Iteration 815 Iteration 816 Iteration 817 Iteration 818 Iteration 819 Iteration 820 Iteration 821 Iteration 822 Iteration 823 Iteration 824 Iteration 825 Iteration 826 Iteration 827 Iteration 828 Iteration 829 Iteration 830 Iteration 831 Iteration 832 Iteration 833 Iteration 834 Iteration 835 Iteration 836 Iteration 837 Iteration 838 Iteration 839 Iteration 840 Iteration 841 Iteration 842 Iteration 843 Iteration 844 Iteration 845 Iteration 846 Iteration 847 Iteration 848 Iteration 849 Iteration 850 Iteration 851 Iteration 852 Iteration 853 Iteration 854 Iteration 855 Iteration 856 Iteration 857 Iteration 858 Iteration 859 Iteration 860 Iteration 861 Iteration 862 Iteration 863 Iteration 864 Iteration 865 Iteration 866 Iteration 867 Iteration 868 Iteration 869 Iteration 870 Iteration 871 Iteration 872 Iteration 873 Iteration 874 Iteration 875 Iteration 876 Iteration 877 Iteration 878 Iteration 879 Iteration 880 Iteration 881 Iteration 882 Iteration 883 Iteration 884 Iteration 885 Iteration 886 Iteration 887 Iteration 888 Iteration 889 Iteration 890 Iteration 891 Iteration 892 Iteration 893 Iteration 894 Iteration 895 Iteration 896 Iteration 897 Iteration 898 Iteration 899 Iteration 900 Iteration 901 Iteration 902 Iteration 903 Iteration 904 Iteration 905 Iteration 906 Iteration 907 Iteration 908 Iteration 909 Iteration 910 Iteration 911 Iteration 912 Iteration 913 Iteration 914 Iteration 915 Iteration 916 Iteration 917 Iteration 918 Iteration 919 Iteration 920 Iteration 921 Iteration 922 Iteration 923 Iteration 924 Iteration 925 Iteration 926 Iteration 927 Iteration 928 Iteration 929 Iteration 930 Iteration 931 Iteration 932 Iteration 933 Iteration 934 Iteration 935 Iteration 936 Iteration 937 Iteration 938 Iteration 939 Iteration 940 Iteration 941 Iteration 942 Iteration 943 Iteration 944 Iteration 945 Iteration 946 Iteration 947 Iteration 948 Iteration 949 Iteration 950 Iteration 951 Iteration 952 Iteration 953 Iteration 954 Iteration 955 Iteration 956 Iteration 957 Iteration 958 Iteration 959 Iteration 960 Iteration 961 Iteration 962 Iteration 963 Iteration 964 Iteration 965 Iteration 966 Iteration 967 Iteration 968 Iteration 969 Iteration 970 Iteration 971 Iteration 972 Iteration 973 Iteration 974 Iteration 975 Iteration 976 Iteration 977 Iteration 978 Iteration 979 Iteration 980 Iteration 981 Iteration 982 Iteration 983 Iteration 984 Iteration 985 Iteration 986 Iteration 987 Iteration 988 Iteration 989 Iteration 990 Iteration 991 Iteration 992 Iteration 993 Iteration 994 Iteration 995 Iteration 996 Iteration 997 Iteration 998 Iteration 999 >> TPQTabletTests::ProposeTx_Unknown_Partition_1 [GOOD] >> TPQTestInternal::TestBatchPacking >> TPartitionTests::ConflictingSrcIdForTxInDifferentBatches >> TPQTestInternal::TestBatchPacking [GOOD] >> TPQTestInternal::TestKeyRange [GOOD] >> TPQTestInternal::TestAsInt [GOOD] >> TPQTestInternal::TestAsIntWide [GOOD] >> TPQTestInternal::StoreKeys [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::ObtainPDiskKeySamePin [GOOD] Test command err: Pick Delete nodeId# 87 Add nodeId# 101 Pick Disable nodeId# 43 Enable nodeId# 43 Disable nodeId# 100 Disable nodeId# 92 Delete nodeId# 24 Pick Disable nodeId# 53 Delete nodeId# 21 Disable nodeId# 48 Delete nodeId# 12 Disable nodeId# 75 Delete nodeId# 27 Enable nodeId# 75 Enable nodeId# 48 Add nodeId# 102 Add nodeId# 103 Delete nodeId# 58 Pick Add nodeId# 104 Add nodeId# 105 Add nodeId# 106 Delete nodeId# 101 Delete nodeId# 8 Delete nodeId# 75 Pick Delete nodeId# 6 Delete nodeId# 104 Delete nodeId# 17 Disable nodeId# 23 Delete nodeId# 83 Enable nodeId# 100 Enable nodeId# 23 Enable nodeId# 92 Disable nodeId# 52 Add nodeId# 107 Enable nodeId# 52 Delete nodeId# 30 Pick Delete nodeId# 39 Delete nodeId# 14 Delete nodeId# 26 Disable nodeId# 64 Pick Enable nodeId# 64 Add nodeId# 108 Delete nodeId# 2 Delete nodeId# 7 Disable nodeId# 69 Add nodeId# 109 Add nodeId# 110 Disable nodeId# 55 Pick Pick Disable nodeId# 57 Add nodeId# 111 Add nodeId# 112 Add nodeId# 113 Enable nodeId# 55 Pick Disable nodeId# 78 Enable nodeId# 78 Disable nodeId# 108 Delete nodeId# 48 Enable nodeId# 53 Delete nodeId# 89 Pick Pick Delete nodeId# 34 Add nodeId# 114 Enable nodeId# 69 Disable nodeId# 103 Delete nodeId# 71 Disable nodeId# 72 Pick Disable nodeId# 10 Enable nodeId# 103 Pick Pick Add nodeId# 115 Pick Enable nodeId# 10 Delete nodeId# 41 Add nodeId# 116 Pick Add nodeId# 117 Disable nodeId# 46 Add nodeId# 118 Add nodeId# 119 Pick Delete nodeId# 103 Pick Add nodeId# 120 Enable nodeId# 46 Pick Delete nodeId# 60 Add nodeId# 121 Delete nodeId# 62 Pick Add nodeId# 122 Delete nodeId# 95 Pick Add nodeId# 123 Delete nodeId# 78 Enable nodeId# 57 Disable nodeId# 110 Pick Pick Delete nodeId# 31 Disable nodeId# 18 Enable nodeId# 72 Add nodeId# 124 Delete nodeId# 94 Enable nodeId# 18 Add nodeId# 125 Delete nodeId# 51 Pick Delete nodeId# 35 Delete nodeId# 42 Add nodeId# 126 Pick Disable nodeId# 32 Enable nodeId# 108 Pick Pick Pick Enable nodeId# 32 Pick Pick Delete nodeId# 106 Pick Add nodeId# 127 Pick Enable nodeId# 110 Delete nodeId# 3 Delete nodeId# 65 Delete nodeId# 115 Delete nodeId# 44 Delete nodeId# 99 Delete nodeId# 32 Delete nodeId# 79 Add nodeId# 128 Pick Pick Pick Pick Pick Disable nodeId# 90 Enable nodeId# 90 Pick Add nodeId# 129 Disable nodeId# 33 Add nodeId# 130 Add nodeId# 131 Disable nodeId# 67 Add nodeId# 132 Disable nodeId# 120 Disable nodeId# 111 Disable nodeId# 50 Disable nodeId# 122 Add nodeId# 133 Delete nodeId# 123 Delete nodeId# 1 Disable nodeId# 76 Pick Pick Disable nodeId# 28 Pick Pick Delete nodeId# 92 Delete nodeId# 81 Enable nodeId# 50 Pick Delete nodeId# 93 Delete nodeId# 38 Disable nodeId# 46 Delete nodeId# 127 Add nodeId# 134 Enable nodeId# 122 Add nodeId# 135 Enable nodeId# 67 Enable nodeId# 46 Pick Delete nodeId# 53 Enable nodeId# 111 Disable nodeId# 86 Enable nodeId# 28 Pick Add nodeId# 136 Disable nodeId# 66 Enable nodeId# 66 Enable nodeId# 86 Enable nodeId# 76 Disable nodeId# 80 Disable nodeId# 113 Add nodeId# 137 Add nodeId# 138 Delete nodeId# 67 Enable nodeId# 33 Pick Pick Disable nodeId# 111 Pick Enable nodeId# 80 Disable nodeId# 52 Enable nodeId# 113 Add nodeId# 139 Disable nodeId# 128 Pick Pick Disable nodeId# 56 Delete nodeId# 84 Add nodeId# 140 Enable nodeId# 128 Add nodeId# 141 Add nodeId# 142 Add nodeId# 143 Disable nodeId# 131 Delete nodeId# 132 Disable nodeId# 23 Delete nodeId# 56 Pick Disable nodeId# 133 Disable nodeId# 129 Delete nodeId# 63 Add nodeId# 144 Disable nodeId# 107 Disable nodeId# 47 Enable nodeId# 107 Enable nodeId# 129 Add nodeId# 145 Pick Enable nodeId# 131 Pick Disable nodeId# 96 Delete nodeId# 143 Enable nodeId# 133 Enable nodeId# 23 Disable nodeId# 126 Enable nodeId# 126 Enable nodeId# 47 Disable nodeId# 138 Disable nodeId# 136 Delete nodeId# 98 Enable nodeId# 96 Disable nodeId# 15 Delete nodeId# 82 Disable nodeId# 25 Pick Delete nodeId# 25 Enable nodeId# 138 Disable nodeId# 91 Delete nodeId# 139 Enable nodeId# 91 Enable nodeId# 52 Pick Delete nodeId# 50 Enable nodeId# 15 Pick Disable nodeId# 18 Pick Disable nodeId# 107 Add nodeId# 146 Pick Pick Enable nodeId# 111 Pick Add nodeId# 147 Pick Pick Disable nodeId# 33 Disable nodeId# 137 Pick Delete nodeId# 66 Add nodeId# 148 Delete nodeId# 134 Pick Enable nodeId# 33 Pick Add nodeId# 149 Enable nodeId# 107 Add nodeId# 150 Enable nodeId# 137 Pick Pick Add nodeId# 151 Add nodeId# 152 Pick Disable nodeId# 45 Disable nodeId# 105 Enable nodeId# 105 Enable nodeId# 120 Pick Enable nodeId# 18 Add nodeId# 153 Add nodeId# 154 Disable nodeId# 91 Disable nodeId# 150 Enable nodeId# 91 Delete nodeId# 55 Disable nodeId# 130 Disable nodeId# 154 Delete nodeId# 59 Pick Add nodeId# 155 Enable nodeId# 45 Disable nodeId# 91 Disable nodeId# 155 Add nodeId# 156 Enable nodeId# 150 Pick Pick Pick Pick Enable nodeId# 91 Delete nodeId# 145 Disable nodeId# 4 Disable nodeId# 88 Delete nodeId# 29 Disable nodeId# 110 Pick Enable nodeId# 130 Disable nodeId# 102 Pick Pick Enable nodeId# 110 Enable nodeId# 136 Delete nodeId# 155 Delete nodeId# 40 Pick Enable nodeId# 154 Pick Enable nodeId# 4 Delete nodeId# 108 Pick Disable nodeId# 124 Add nodeId# 157 Add nodeId# 158 Disable nodeId# 69 Pick Add nodeId# 159 Disable nodeId# 28 Delete nodeId# 33 Enable nodeId# 28 Add nodeId# 160 Pick Add nodeId# 161 Disable nodeId# 151 Disable nodeId# 96 Add nodeId# 162 Pick Delete nodeId# 28 Enable nodeId# 102 Pick Pick Add nodeId# 163 Enable nodeId# 69 Delete nodeId# 46 Add nodeId# 164 Disable nodeId# 112 Enable nodeId# 96 Add nodeId# 165 Delete nodeId# 140 Disable nodeId# 4 Disable nodeId# 120 Add nodeId# 166 Add nodeId# 167 Enable nodeId# 4 Pick Add nodeId# 168 Disable nodeId# 52 Delete nodeId# 91 Add nodeId# 169 Pick Add nodeId# 170 Disable nodeId# 116 Enable nodeId# 88 Add nodeId# 171 Delete nodeId# 156 Add nodeId# 172 Delete nodeId# 64 Add nodeId# 173 Add nodeId# 174 Pick Pick Add nodeId# 175 Enable nodeId# 151 Add nodeId# 176 Disable nodeId# 146 Pick Enable nodeId# 120 Add nodeId# 177 Pick Pick Enable nodeId# 116 Enable nodeId# 146 Delete nodeId# 160 Add nodeId# 178 Delete nodeId# 76 Disable nodeId# 130 Delete nodeId# 166 Add nodeId# 179 Enable nodeId# 130 Pick Enable nodeId# 112 Add nodeId# 180 Delete nodeId# 178 Delete nodeId# 163 Add nodeId# 181 Add nodeId# 182 Enable nodeId# 52 Disable nodeId# 73 Delete nodeId# 174 Disable nodeId# 133 Enable nodeId# 73 Pick Add nodeId# 183 Pick Delete nodeId# 137 Pick Disable nodeId# 57 Pick Pick Delete nodeId# 150 Disable nodeId# 142 Pick Add nodeId# 184 Add nodeId# 185 Disable nodeId# 20 Pick Delete nodeId# 37 Enable nodeId# 133 Delete nodeId# 118 Enable nodeId# 124 Add nodeId# 186 Delete nodeId# 23 Add nodeId# 187 Disable nodeId# 187 Disable nodeId# 149 Pick Add nodeId# 188 Enable nodeId# 149 Pick Delete nodeId# 109 Pick Enable nodeId# 57 Delete nodeId# 149 Delete nodeId# 188 Pick Add nodeId# 189 Pick Disable nodeId# 153 Disable nodeId# 154 Disable nodeId# 10 Enable nodeId# 153 Delete nodeId# 144 Pick Disable nodeId# 131 Disable nodeId# 36 Enable nodeId# 10 Pick Disable nodeId# 72 Pick Add nodeId# 190 Add nodeId# 191 Disable nodeId# 148 Enable nodeId# 142 Enable nodeId# 72 Disable nodeId# 135 Enable nodeId# 148 Add nodeId# 192 Disable nodeId# 80 Delete nodeId# 73 Add nodeId# 193 Pick Disable nodeId# 151 Delete nodeId# 148 Disable nodeId# 9 Disable nodeId# 77 Add nodeId# 194 Pick Delete nodeId# 116 Enable nodeId# 154 Disable nodeId# 54 Enable nodeId# 187 Add nodeId# 195 Delete nodeId# 190 Disable nodeId# 114 Disable nodeId# 120 Add nodeId# 196 Pick Delete nodeId# 135 Disable nodeId# 159 Enable nodeId# 77 Pick Disable nodeId# 185 Disable nodeId# 121 Delete nodeId# 177 Pick Delete nodeId# 16 Disable nodeId# 161 Disable nodeId# 157 Enable nodeId# 120 Disable nodeId# 129 Disable nodeId# 77 Disable nodeId# 170 Delete nodeId# 129 Delete nodeId# 70 Pick Add nodeId# 197 Enable nodeId# 157 Delete nodeId# 45 Delete nodeId# 193 Pick Add nodeId# 198 Enable nodeId# 9 Pick Delete nodeId# 152 Enable nodeId# 20 Enable nodeId# 131 Disable nodeId# 167 Disable nodeId# 189 Enable nodeId# 159 Add nodeId# 199 Add nodeId# 200 Enable nodeId# 54 Delete nodeId# 187 Enable nodeId# 185 Add nodeId# 201 Pick Delete nodeId# 133 Pick Pick Disable nodeId# 179 Pick Disable nodeId# 192 Pick Enable nodeId# 114 Enable nodeId# 121 Pick Delete nodeId# 69 Disable nodeId# 131 Delete nodeId# 122 Enable nodeId# 167 Add nodeId# 202 Pick Add nodeId# 203 Pick Enable nodeId# 179 Disable nodeId# 124 Delete nodeId# 126 Add nodeId# 204 Pick Disable nodeId# 183 Add nodeId# 205 Delete nodeId# 114 Pick Pick Pick Pick Pick Enable nodeId# 36 Delete nodeId# 4 Add nodeId# 206 Add nodeId# 207 Disable nodeId# 47 Disable nodeId# 204 Delete nodeId# 161 Add nodeId# 208 Delete nodeId# 52 Enable nodeId# 151 Enable nodeId# 131 Disable nodeId# 36 Enable nodeId# 36 Delete nodeId# 208 Disable nodeId# 131 Enable nodeId# 124 Pick Enable nodeId# 204 Pick Add nodeId# 209 Pick Pick Enable nodeId# 183 Enable nodeId# 189 Disable nodeId# 22 Add nodeId# 210 Delete nodeId# 22 Pick Add nodeId# 211 Add nodeId# 212 Enable nodeId# 47 Pick Disable nodeId# 100 Disable nodeId# 19 Add nodeId# 213 Delete nodeId# 191 Add nodeId# 214 Pick Add nodeId# 215 Add nodeId# 216 Pick Enable nodeId# 80 Disable nodeId# 10 Disable nodeId# 105 Disable nodeId# 90 Enable nodeId# 192 Delete nodeId# 157 Enable nodeId# 105 Disable nodeId# 43 Delete nodeId# 170 Enable nodeId# 100 Add nodeId# 217 Delete nodeId# 217 Pick Delete nodeId# 201 Add nodeId# 218 Disable nodeId# 211 Add nodeId# 219 Disable nodeId# 216 Delete nodeId# 102 Pick Enable nodeId# 19 Pick Delete nodeId# 77 Disable nodeId# 205 Disable nodeId# 11 Delete nodeId# 219 Pick Delete nodeId# 18 Disable nodeId# 107 Pick Enable nodeId# 131 Pick Delete nodeId# 142 Disable nodeId# 198 Enable nodeId# 216 Disable nodeId# 212 Enable nodeId# 205 Disable nodeId# 196 Enable nodeId# 11 Delete nodeId# 195 Disable nodeId# 124 Add nodeId# 220 Delete nodeId# 220 Add nodeId# 221 Enable nodeId# 107 Pick Enable nodeId# 211 Disable nodeId# 36 Enable nodeId# 10 Add nodeId# 222 Delete nodeId# 194 Disable nodeId# 184 Enable nodeId# 36 Pick Add nodeId# 223 Pick Delete nodeId# 49 Add nodeId# 224 Enable nodeId# 184 Enable nodeId# 212 Enable nodeId# 198 Delete nodeId# 182 Pick Add nodeId# 225 Add nodeId# 226 Pick Add nodeId# 227 Disable nodeId# 151 Disable nodeId# 128 Pick Add nodeId# 228 Delete nodeId# 209 Pick Delete nodeId# 117 Pick Delete nodeId# 141 Delete nodeId# 216 Pick Add nodeId# 229 Enable nodeId# 196 Delete nodeId# 204 Disable nodeId# 224 Pick Enable nodeId# 224 Pick Disable nodeId# 199 Disable nodeId# 171 Enable nodeId# 43 Delete nodeId# 198 Delete nodeId# 173 Add nodeId# 230 Pick Disable nodeId# 218 Enable nodeId# 90 Delete nodeId# 223 Delete nodeId# 218 Disable nodeId# 131 Delete nodeId# 196 Delete nodeId# 202 Enable nodeId# 171 Delete nodeId# 88 Enable nodeId# 199 Delete nodeId# 96 Add nodeId# 231 Enable nodeId# 124 Disable nodeId# 213 Pick Enable nodeId# 213 Delete nodeId# 168 Enable nodeId# 128 Delete nodeId# 213 Enable nodeId# 151 Disable nodeId# 5 Delete nodeId# 147 Delete nodeId# 206 Add nodeId# 232 Disable nodeId# 199 Add nodeId# 233 Pick Add nodeId# 234 Delete nodeId# 192 Delete nodeId# 19 Add nodeId# 235 Enable nodeId# 5 Delete nodeId# 175 Pick Delete nodeId# 171 Delete nodeId# 205 Delete nodeId# 210 Delete nodeId# 176 Add nodeId# 236 Delete nodeId# 221 Pick Pick Enable nodeId# 131 Pick Enable nodeId# 199 Disable nodeId# 85 Disable nodeId# 13 Enable nodeId# 13 Enable nodeId# 85 Pick Add nodeId# 237 Disable nodeId# 128 Add nodeId# 238 Add nodeId# 239 Delete nodeId# 80 Enable nodeId# 128 Add nodeId# 240 Pick Pick Delete nodeId# 13 Add nodeId# 241 Delete nodeId# 128 Disable nodeId# 138 Pick Add nodeId# 242 Delete nodeId# 179 Disable nodeId# 238 Add nodeId# 243 Enable nodeId# 138 Delete nodeId# 184 Pick Disable nodeId# 5 Enable nodeId# 5 Add nodeId# 244 Add nodeId# 245 Pick Disable nodeId# 164 Disable nodeId# 199 Delete nodeId# 74 Enable nodeId# 164 Delete nodeId# 85 Delete nodeId# 68 Delete nodeId# 211 Delete nodeId# 165 Pick Delete nodeId# 186 Pick Pick Delete no ... nodeId# 20217 Delete nodeId# 20195 Enable nodeId# 20228 Delete nodeId# 20201 Delete nodeId# 20160 Add nodeId# 20239 Enable nodeId# 20177 Add nodeId# 20240 Enable nodeId# 20224 Add nodeId# 20241 Enable nodeId# 20221 Pick Add nodeId# 20242 Add nodeId# 20243 Enable nodeId# 20174 Delete nodeId# 20242 Disable nodeId# 20200 Pick Pick Disable nodeId# 20229 Add nodeId# 20244 Disable nodeId# 20241 Delete nodeId# 20228 Pick Disable nodeId# 20230 Add nodeId# 20245 Delete nodeId# 20244 Delete nodeId# 20178 Add nodeId# 20246 Delete nodeId# 20237 Enable nodeId# 20230 Disable nodeId# 20208 Add nodeId# 20247 Add nodeId# 20248 Pick Add nodeId# 20249 Delete nodeId# 20240 Pick Pick Pick Pick Add nodeId# 20250 Add nodeId# 20251 Pick Add nodeId# 20252 Disable nodeId# 20226 Pick Add nodeId# 20253 Add nodeId# 20254 Disable nodeId# 20230 Add nodeId# 20255 Disable nodeId# 20175 Pick Delete nodeId# 20245 Delete nodeId# 20169 Enable nodeId# 20229 Add nodeId# 20256 Enable nodeId# 20230 Add nodeId# 20257 Disable nodeId# 20231 Pick Enable nodeId# 20241 Pick Add nodeId# 20258 Pick Pick Delete nodeId# 20253 Disable nodeId# 20148 Pick Delete nodeId# 20182 Delete nodeId# 20215 Add nodeId# 20259 Pick Add nodeId# 20260 Delete nodeId# 20177 Add nodeId# 20261 Disable nodeId# 20204 Delete nodeId# 20225 Delete nodeId# 20255 Add nodeId# 20262 Enable nodeId# 20175 Pick Add nodeId# 20263 Delete nodeId# 20188 Disable nodeId# 20213 Add nodeId# 20264 Pick Delete nodeId# 20221 Disable nodeId# 20207 Delete nodeId# 20247 Disable nodeId# 20248 Add nodeId# 20265 Pick Add nodeId# 20266 Enable nodeId# 20213 Add nodeId# 20267 Disable nodeId# 20214 Add nodeId# 20268 Disable nodeId# 20210 Add nodeId# 20269 Add nodeId# 20270 Delete nodeId# 20110 Disable nodeId# 20233 Enable nodeId# 20226 Disable nodeId# 20226 Pick Disable nodeId# 20222 Pick Delete nodeId# 20175 Delete nodeId# 20254 Enable nodeId# 20148 Disable nodeId# 20144 Disable nodeId# 20241 Delete nodeId# 20252 Add nodeId# 20271 Pick Disable nodeId# 20232 Disable nodeId# 20243 Delete nodeId# 20213 Pick Add nodeId# 20272 Pick Pick Pick Enable nodeId# 20204 Pick Enable nodeId# 20200 Enable nodeId# 20231 Pick Pick Pick Pick Disable nodeId# 20265 Add nodeId# 20273 Enable nodeId# 20265 Pick Delete nodeId# 20260 Enable nodeId# 20214 Delete nodeId# 20265 Add nodeId# 20274 Delete nodeId# 20230 Delete nodeId# 20233 Add nodeId# 20275 Pick Disable nodeId# 20229 Delete nodeId# 20226 Delete nodeId# 20272 Pick Pick Disable nodeId# 20264 Enable nodeId# 20229 Disable nodeId# 20174 Enable nodeId# 20207 Delete nodeId# 20238 Disable nodeId# 20267 Disable nodeId# 20271 Enable nodeId# 20271 Disable nodeId# 20176 Disable nodeId# 20171 Disable nodeId# 20186 Pick Disable nodeId# 20271 Disable nodeId# 20214 Disable nodeId# 20263 Disable nodeId# 20200 Enable nodeId# 20186 Delete nodeId# 20249 Disable nodeId# 20137 Pick Pick Enable nodeId# 20214 Disable nodeId# 20246 Disable nodeId# 20229 Add nodeId# 20276 Disable nodeId# 20266 Disable nodeId# 20251 Enable nodeId# 20229 Add nodeId# 20277 Enable nodeId# 20174 Delete nodeId# 20234 Delete nodeId# 20239 Pick Pick Add nodeId# 20278 Enable nodeId# 20263 Disable nodeId# 20277 Add nodeId# 20279 Pick Add nodeId# 20280 Disable nodeId# 20236 Disable nodeId# 20250 Add nodeId# 20281 Pick Add nodeId# 20282 Enable nodeId# 20171 Enable nodeId# 20200 Pick Add nodeId# 20283 Delete nodeId# 20261 Enable nodeId# 20264 Enable nodeId# 20243 Enable nodeId# 20208 Disable nodeId# 20231 Delete nodeId# 20207 Disable nodeId# 20280 Add nodeId# 20284 Delete nodeId# 20218 Disable nodeId# 20256 Enable nodeId# 20277 Delete nodeId# 20217 Enable nodeId# 20144 Enable nodeId# 20267 Disable nodeId# 20282 Delete nodeId# 20274 Add nodeId# 20285 Enable nodeId# 20246 Disable nodeId# 20144 Delete nodeId# 20275 Pick Disable nodeId# 20277 Disable nodeId# 20276 Pick Delete nodeId# 20285 Enable nodeId# 20231 Add nodeId# 20286 Delete nodeId# 20248 Disable nodeId# 20262 Pick Enable nodeId# 20256 Add nodeId# 20287 Disable nodeId# 20185 Enable nodeId# 20210 Pick Disable nodeId# 20208 Add nodeId# 20288 Delete nodeId# 20231 Add nodeId# 20289 Enable nodeId# 20250 Pick Enable nodeId# 20222 Disable nodeId# 20168 Pick Delete nodeId# 20283 Enable nodeId# 20251 Disable nodeId# 20281 Delete nodeId# 20168 Delete nodeId# 20281 Add nodeId# 20290 Disable nodeId# 20229 Delete nodeId# 20288 Add nodeId# 20291 Pick Delete nodeId# 20267 Pick Disable nodeId# 20219 Disable nodeId# 20264 Delete nodeId# 20276 Pick Enable nodeId# 20277 Enable nodeId# 20264 Pick Pick Enable nodeId# 20280 Add nodeId# 20292 Pick Delete nodeId# 20278 Delete nodeId# 20171 Add nodeId# 20293 Add nodeId# 20294 Disable nodeId# 20292 Add nodeId# 20295 Delete nodeId# 20289 Delete nodeId# 20259 Delete nodeId# 20241 Delete nodeId# 20256 Enable nodeId# 20185 Enable nodeId# 20232 Enable nodeId# 20208 Enable nodeId# 20292 Pick Pick Delete nodeId# 20268 Add nodeId# 20296 Delete nodeId# 20279 Add nodeId# 20297 Disable nodeId# 20290 Delete nodeId# 20211 Delete nodeId# 20222 Pick Add nodeId# 20298 Pick Pick Pick Enable nodeId# 20271 Disable nodeId# 20185 Pick Enable nodeId# 20137 Enable nodeId# 20219 Disable nodeId# 20246 Enable nodeId# 20236 Delete nodeId# 20297 Disable nodeId# 20122 Delete nodeId# 20137 Disable nodeId# 20227 Pick Enable nodeId# 20246 Enable nodeId# 20176 Pick Delete nodeId# 20292 Add nodeId# 20299 Delete nodeId# 20148 Disable nodeId# 20200 Pick Enable nodeId# 20282 Disable nodeId# 20293 Pick Enable nodeId# 20144 Disable nodeId# 20284 Disable nodeId# 20291 Enable nodeId# 20293 Add nodeId# 20300 Disable nodeId# 20224 Add nodeId# 20301 Enable nodeId# 20266 Delete nodeId# 20208 Delete nodeId# 20286 Enable nodeId# 20284 Delete nodeId# 20280 Disable nodeId# 20270 Add nodeId# 20302 Enable nodeId# 20224 Enable nodeId# 20229 Enable nodeId# 20200 Delete nodeId# 20200 Enable nodeId# 20262 Add nodeId# 20303 Enable nodeId# 20185 Add nodeId# 20304 Pick Enable nodeId# 20291 Pick Add nodeId# 20305 Add nodeId# 20306 Pick Add nodeId# 20307 Enable nodeId# 20290 Enable nodeId# 20122 Pick Pick Enable nodeId# 20270 Enable nodeId# 20227 Pick Add nodeId# 20308 Pick Pick Pick Add nodeId# 20309 Add nodeId# 20310 Disable nodeId# 20273 Add nodeId# 20311 Delete nodeId# 20176 Pick Add nodeId# 20312 Delete nodeId# 20308 Disable nodeId# 20210 Pick Delete nodeId# 20271 Add nodeId# 20313 Disable nodeId# 20293 Pick Enable nodeId# 20273 Delete nodeId# 20302 Enable nodeId# 20210 Pick Delete nodeId# 20303 Enable nodeId# 20293 Add nodeId# 20314 Disable nodeId# 20210 Enable nodeId# 20210 Pick Add nodeId# 20315 Add nodeId# 20316 Disable nodeId# 20310 Delete nodeId# 20313 Delete nodeId# 20214 Disable nodeId# 20257 Disable nodeId# 20236 Add nodeId# 20317 Disable nodeId# 20301 Pick Add nodeId# 20318 Delete nodeId# 20220 Delete nodeId# 20294 Pick Pick Disable nodeId# 20309 Pick Delete nodeId# 20258 Disable nodeId# 20290 Disable nodeId# 20306 Delete nodeId# 20318 Add nodeId# 20319 Disable nodeId# 20307 Enable nodeId# 20301 Delete nodeId# 20229 Add nodeId# 20320 Add nodeId# 20321 Add nodeId# 20322 Add nodeId# 20323 Add nodeId# 20324 Delete nodeId# 20224 Disable nodeId# 20128 Enable nodeId# 20257 Delete nodeId# 20212 Disable nodeId# 20264 Add nodeId# 20325 Delete nodeId# 20287 Pick Disable nodeId# 20324 Add nodeId# 20326 Disable nodeId# 20312 Pick Add nodeId# 20327 Delete nodeId# 20310 Enable nodeId# 20290 Delete nodeId# 20273 Add nodeId# 20328 Enable nodeId# 20306 Disable nodeId# 20257 Delete nodeId# 20284 Delete nodeId# 20305 Delete nodeId# 20328 Enable nodeId# 20324 Pick Enable nodeId# 20312 Enable nodeId# 20309 Disable nodeId# 20269 Pick Pick Add nodeId# 20329 Pick Add nodeId# 20330 Enable nodeId# 20236 Pick Disable nodeId# 20293 Add nodeId# 20331 Delete nodeId# 20321 Add nodeId# 20332 Add nodeId# 20333 Disable nodeId# 20282 Enable nodeId# 20293 Add nodeId# 20334 Delete nodeId# 20192 Pick Add nodeId# 20335 Pick Enable nodeId# 20264 Add nodeId# 20336 Enable nodeId# 20282 Add nodeId# 20337 Pick Add nodeId# 20338 Disable nodeId# 20319 Delete nodeId# 20263 Disable nodeId# 20282 Enable nodeId# 20128 Disable nodeId# 20322 Delete nodeId# 20236 Pick Pick Enable nodeId# 20322 Pick Disable nodeId# 20290 Disable nodeId# 20311 Pick Add nodeId# 20339 Add nodeId# 20340 Pick Add nodeId# 20341 Delete nodeId# 20300 Disable nodeId# 20174 Add nodeId# 20342 Disable nodeId# 20227 Enable nodeId# 20227 Enable nodeId# 20290 Pick Enable nodeId# 20307 Pick Disable nodeId# 20204 Enable nodeId# 20174 Delete nodeId# 20309 Pick Delete nodeId# 20311 Disable nodeId# 20295 Add nodeId# 20343 Enable nodeId# 20269 Delete nodeId# 20209 Pick Disable nodeId# 20320 Pick Add nodeId# 20344 Delete nodeId# 20246 Add nodeId# 20345 Add nodeId# 20346 Enable nodeId# 20282 Enable nodeId# 20295 Add nodeId# 20347 Enable nodeId# 20204 Add nodeId# 20348 Add nodeId# 20349 Pick Delete nodeId# 20291 Pick Disable nodeId# 20337 Pick Disable nodeId# 20348 Add nodeId# 20350 Add nodeId# 20351 Disable nodeId# 20339 Disable nodeId# 20335 Delete nodeId# 20266 Enable nodeId# 20257 Enable nodeId# 20339 Enable nodeId# 20319 Add nodeId# 20352 Enable nodeId# 20348 Disable nodeId# 20341 Enable nodeId# 20341 Delete nodeId# 20322 Enable nodeId# 20320 Pick Add nodeId# 20353 Enable nodeId# 20335 Delete nodeId# 20349 Disable nodeId# 20329 Enable nodeId# 20337 Enable nodeId# 20329 Disable nodeId# 20346 Disable nodeId# 20312 Add nodeId# 20354 Pick Pick Enable nodeId# 20346 Add nodeId# 20355 Disable nodeId# 20298 Pick Disable nodeId# 20354 Enable nodeId# 20354 Delete nodeId# 20128 Delete nodeId# 20331 Disable nodeId# 20185 Disable nodeId# 20219 Disable nodeId# 20354 Add nodeId# 20356 Enable nodeId# 20354 Add nodeId# 20357 Pick Enable nodeId# 20185 Delete nodeId# 20326 Enable nodeId# 20312 Disable nodeId# 20270 Disable nodeId# 20337 Pick Pick Add nodeId# 20358 Pick Pick Disable nodeId# 20358 Add nodeId# 20359 Pick Disable nodeId# 20340 Enable nodeId# 20340 Enable nodeId# 20358 Add nodeId# 20360 Disable nodeId# 20345 Disable nodeId# 20334 Delete nodeId# 20174 Delete nodeId# 20342 Pick Disable nodeId# 20351 Delete nodeId# 20282 Delete nodeId# 20340 Delete nodeId# 20355 Delete nodeId# 20354 Add nodeId# 20361 Disable nodeId# 20324 Delete nodeId# 20270 Add nodeId# 20362 Enable nodeId# 20334 Pick Enable nodeId# 20345 Delete nodeId# 20338 Add nodeId# 20363 Enable nodeId# 20351 Disable nodeId# 20363 Delete nodeId# 20360 Pick Pick Delete nodeId# 20332 Add nodeId# 20364 Pick Disable nodeId# 20299 Add nodeId# 20365 Delete nodeId# 20277 Delete nodeId# 20250 Delete nodeId# 20324 Delete nodeId# 20312 Add nodeId# 20366 Add nodeId# 20367 Disable nodeId# 20262 Add nodeId# 20368 Disable nodeId# 20257 Add nodeId# 20369 Add nodeId# 20370 Delete nodeId# 20296 Add nodeId# 20371 Delete nodeId# 20352 Disable nodeId# 20235 Disable nodeId# 20319 Add nodeId# 20372 Pick Delete nodeId# 20327 Disable nodeId# 20144 Add nodeId# 20373 Delete nodeId# 20361 Add nodeId# 20374 Delete nodeId# 20264 Add nodeId# 20375 Disable nodeId# 20210 Pick Enable nodeId# 20235 Disable nodeId# 20367 Add nodeId# 20376 Pick Disable nodeId# 20373 Add nodeId# 20377 Add nodeId# 20378 Enable nodeId# 20299 Disable nodeId# 20204 Add nodeId# 20379 Enable nodeId# 20257 Add nodeId# 20380 Enable nodeId# 20337 Add nodeId# 20381 Disable nodeId# 20257 Disable nodeId# 20337 Pick Enable nodeId# 20319 Pick Disable nodeId# 20347 Enable nodeId# 20144 Disable nodeId# 20369 Add nodeId# 20382 Delete nodeId# 20372 Enable nodeId# 20257 Delete nodeId# 20382 Delete nodeId# 20306 Enable nodeId# 20367 Pick Add nodeId# 20383 Pick Add nodeId# 20384 Enable nodeId# 20298 Add nodeId# 20385 Enable nodeId# 20337 Add nodeId# 20386 Enable nodeId# 20210 Pick Delete nodeId# 20232 Enable nodeId# 20363 Pick Disable nodeId# 20359 Disable nodeId# 20251 Delete nodeId# 20368 Delete nodeId# 20298 Add nodeId# 20387 Disable nodeId# 20380 Pick Enable nodeId# 20373 Pick Disable nodeId# 20381 Disable nodeId# 20346 Disable nodeId# 20387 Pick Delete nodeId# 20323 Disable nodeId# 20373 Pick Delete nodeId# 20204 Disable nodeId# 20362 Disable nodeId# 20377 Pick Pick >> TPQTabletTests::ProposeTx_Unknown_WriteId >> TPQTabletTests::ProposeTx_Unknown_WriteId [GOOD] >> TPQTabletTests::ProposeTx_Unknown_Partition_2 |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTestInternal::StoreKeys [GOOD] |97.1%| [TA] $(B)/ydb/core/blobstorage/nodewarden/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> PQCountersLabeled::Partition [GOOD] >> PQCountersLabeled::PartitionFirstClass |97.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TFlatTest::AutoMergeBySize [GOOD] >> TFlatTest::AutoSplitMergeQueue >> TPQTabletTests::ProposeTx_Unknown_Partition_2 [GOOD] >> TPartitionTests::IncorrectRange >> TLocksTest::CK_Range_GoodLock [GOOD] >> TPQTest::TestDirectReadHappyWay >> TPQTabletTests::ProposeTx_Command_After_Propose >> TPQTabletTests::ProposeTx_Command_After_Propose [GOOD] >> TPartitionTests::IncorrectRange [GOOD] >> TPQTest::DirectReadBadSessionOrPipe >> TPartitionTests::GetPartitionWriteInfoSuccess ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::ProposeTx_Command_After_Propose [GOOD] Test command err: 2025-06-24T21:25:36.447965Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:25:36.452647Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:25:36.453021Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-24T21:25:36.453090Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:25:36.453131Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-24T21:25:36.453181Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:25:36.453255Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:36.453339Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:36.487752Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:209:2213], now have 1 active actors on pipe 2025-06-24T21:25:36.487930Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:25:36.507017Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T21:25:36.510193Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T21:25:36.510368Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:36.511409Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T21:25:36.511620Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:36.512051Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:36.512467Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:217:2219] 2025-06-24T21:25:36.513479Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-24T21:25:36.513545Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:217:2219] 2025-06-24T21:25:36.513598Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:36.514160Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:36.514266Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-24T21:25:36.514315Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-24T21:25:36.514511Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:36.514685Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:36.514919Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:36.517900Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:36.517984Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:36.518415Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:224:2224], now have 1 active actors on pipe 2025-06-24T21:25:36.519176Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:227:2226], now have 1 active actors on pipe 2025-06-24T21:25:36.519982Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3225: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 180 RawX2: 4294969489 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 2 Consumer: "user" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-24T21:25:36.520038Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3401: [PQ: 72057594037927937] distributed transaction 2025-06-24T21:25:36.520118Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3715: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-24T21:25:36.520170Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-24T21:25:36.520209Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-24T21:25:36.520253Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3940: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-24T21:25:36.520292Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-24T21:25:36.520349Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T21:25:36.520519Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 134 MaxStep: 30134 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 2 Consumer: "user" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 4294969489 } Partitions { } 2025-06-24T21:25:36.520648Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:36.527618Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:36.527706Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:36.527761Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-24T21:25:36.527804Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-06-24T21:25:36.531806Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3427: [PQ: 72057594037927937] Handle TEvTxProcessing::TEvPlanStep Transactions { TxId: 67890 AckTo { RawX1: 180 RawX2: 4294969489 } } Step: 100 2025-06-24T21:25:36.531908Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARED 2025-06-24T21:25:36.531972Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State PREPARED 2025-06-24T21:25:36.532039Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PLANNING 2025-06-24T21:25:36.532083Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3816: [PQ: 72057594037927937] PlanStep 100, PlanTxId 67890 2025-06-24T21:25:36.532140Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T21:25:36.532288Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PLANNED MinStep: 134 MaxStep: 30134 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 2 Consumer: "user" Path: "/topic" } Step: 100 Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 4294969489 } Partitions { } 2025-06-24T21:25:36.532388Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:36.536619Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:36.536711Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PLANNING 2025-06-24T21:25:36.536779Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State PLANNING 2025-06-24T21:25:36.536852Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PLANNED 2025-06-24T21:25:36.536926Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from PLANNING to PLANNED 2025-06-24T21:25:36.536973Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4427: [PQ: 72057594037927937] TxQueue.size 1 2025-06-24T21:25:36.537013Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:836: [PQ: 72057594037927937] New ExecStep 100, ExecTxId 67890 2025-06-24T21:25:36.537089Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState CALCULATING 2025-06-24T21:25:36.537144Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from PLANNED to CALCULATING 2025-06-24T21:25:36.537342Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 100, TxId 67890 2025-06-24T21:25:36.537442Z node 1 :PERSQUEUE DEBUG: partition.cpp:2468: [PQ: 72057594037927937, Parti ... Ms: 0 Generation: 6 Important: false } 2025-06-24T21:25:38.863613Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:38.864301Z node 6 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 6 actor [6:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 6 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 6 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 6 Important: false } 2025-06-24T21:25:38.864412Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:38.864776Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:38.865037Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [6:189:2200] 2025-06-24T21:25:38.866065Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-24T21:25:38.866123Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [6:189:2200] 2025-06-24T21:25:38.866185Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:38.866629Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:38.866724Z node 6 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 6 2025-06-24T21:25:38.866771Z node 6 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 6 done 2025-06-24T21:25:38.866945Z node 6 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:38.867081Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:38.867306Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:38.870199Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:38.870281Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:38.870670Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [6:196:2205], now have 1 active actors on pipe 2025-06-24T21:25:38.871324Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [6:199:2207], now have 1 active actors on pipe 2025-06-24T21:25:38.871423Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-24T21:25:38.871477Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-24T21:25:38.871526Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2721: [PQ: 72057594037927937] partition {0, {0, 3}, 100000} for WriteId {0, 3} 2025-06-24T21:25:38.871735Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3600: [PQ: 72057594037927937] send TEvSubscribeLock for WriteId {0, 3} 2025-06-24T21:25:38.871836Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:38.874261Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:38.874848Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:38.875249Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:38.875515Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] bootstrapping {0, {0, 3}, 100000} [6:205:2212] 2025-06-24T21:25:38.876459Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDiskStatusStep 2025-06-24T21:25:38.877945Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitMetaStep 2025-06-24T21:25:38.878253Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitInfoRangeStep 2025-06-24T21:25:38.878645Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDataRangeStep 2025-06-24T21:25:38.878886Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDataStep 2025-06-24T21:25:38.878931Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:25:38.878975Z node 6 :PERSQUEUE INFO: partition_init.cpp:895: [topic:{0, {0, 3}, 100000}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:25:38.879022Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:{0, {0, 3}, 100000}:Initializer] Initializing completed. 2025-06-24T21:25:38.879078Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] init complete for topic 'topic' partition {0, {0, 3}, 100000} generation 2 [6:205:2212] 2025-06-24T21:25:38.879161Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] SYNC INIT topic topic partitition {0, {0, 3}, 100000} so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:38.879217Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:38.879497Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] no data for compaction 2025-06-24T21:25:38.879602Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie -=[ 0wn3r ]=-|34cb2507-9ba4d1cd-e095cb7-9ec09650_0 generated for partition {0, {0, 3}, 100000} topic 'topic' owner -=[ 0wn3r ]=- 2025-06-24T21:25:38.879734Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] TPartition::ReplyOwnerOk. Partition: {0, {0, 3}, 100000} 2025-06-24T21:25:38.879954Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 4 2025-06-24T21:25:38.880341Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037927937] server disconnected, pipe [6:199:2207] destroyed 2025-06-24T21:25:38.880431Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:25:38.880626Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [6:217:2219], now have 1 active actors on pipe 2025-06-24T21:25:38.880844Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3225: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 180 RawX2: 25769805969 } TxId: 2 Data { Operations { PartitionId: 0 Path: "/topic" SupportivePartition: 100000 } Immediate: false WriteId { NodeId: 0 KeyId: 3 KafkaTransaction: false } } 2025-06-24T21:25:38.880929Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3265: [PQ: 72057594037927937] PartitionId {0, {0, 3}, 100000} for WriteId {0, 3} 2025-06-24T21:25:38.880978Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3355: [PQ: 72057594037927937] TxId 2 has WriteId {0, 3} 2025-06-24T21:25:38.881019Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3401: [PQ: 72057594037927937] distributed transaction 2025-06-24T21:25:38.881088Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3715: [PQ: 72057594037927937] Propose TxId 2, WriteId {0, 3} 2025-06-24T21:25:38.881127Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3727: [PQ: 72057594037927937] Link TxId 2 with WriteId {0, 3} 2025-06-24T21:25:38.881165Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-24T21:25:38.881206Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 2, State UNKNOWN 2025-06-24T21:25:38.881249Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3940: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-24T21:25:38.881291Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 2, NewState PREPARING 2025-06-24T21:25:38.881341Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 2 2025-06-24T21:25:38.881475Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 2] save tx TxId: 2 State: PREPARED MinStep: 231 MaxStep: 30231 Operations { PartitionId: 0 Path: "/topic" SupportivePartition: 100000 } Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 25769805969 } WriteId { NodeId: 0 KeyId: 3 KafkaTransaction: false } Partitions { } 2025-06-24T21:25:38.881573Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:38.885400Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:38.885476Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:38.885516Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 2, State PREPARING 2025-06-24T21:25:38.885564Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 2, NewState PREPARED 2025-06-24T21:25:38.885912Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [6:224:2225], now have 1 active actors on pipe 2025-06-24T21:25:38.886038Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-24T21:25:38.886085Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-24T21:25:38.886132Z node 6 :PERSQUEUE WARN: event_helpers.cpp:42: tablet 72057594037927937 topic 'topic error: it is forbidden to write after a commit 2025-06-24T21:25:38.886221Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1426: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 2, Error it is forbidden to write after a commit 2025-06-24T21:25:38.886259Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:401: Answer error topic: 'topic' partition: 0 messageNo: 0 requestId: error: it is forbidden to write after a commit >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionActive_BoundaryTrue_Test >> TLocksTest::Range_GoodLock1 [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_NewSourceId_Test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::CK_Range_GoodLock [GOOD] Test command err: 2025-06-24T21:24:57.650840Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630019936020432:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:57.651915Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6f/r3tmp/tmp5hETYJ/pdisk_1.dat 2025-06-24T21:24:58.001780Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:58.003067Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630019936020219:2079] 1750800297631801 != 1750800297631804 2025-06-24T21:24:58.073874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:58.074006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:58.076115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13794 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:58.321966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:58.362131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:58.498866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:58.545676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:58.648112Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:00.827534Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630034030201647:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:00.827593Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6f/r3tmp/tmptTZauq/pdisk_1.dat 2025-06-24T21:25:00.958079Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:00.960218Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630034030201626:2079] 1750800300826633 != 1750800300826636 2025-06-24T21:25:00.977263Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:00.977383Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:00.979275Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26286 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:01.193245Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:01.199977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:01.215132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:01.271434Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:25:01.311520Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:04.279313Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630050450597179:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:04.279458Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6f/r3tmp/tmpP70HTN/pdisk_1.dat 2025-06-24T21:25:04.477374Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:04.483287Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630050450597152:2079] 1750800304271390 != 1750800304271393 2025-06-24T21:25:04.494772Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:04.494905Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:04.496997Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12996 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:25:04.734133Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:04.757778Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... [8:7519630140327363556:2079] 1750800325109011 != 1750800325109014 2025-06-24T21:25:25.296750Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:25.296867Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:25.298949Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1379 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:25.655261Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:25.680453Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:25.759177Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:25.815790Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:29.353654Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630154919565381:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:29.353745Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6f/r3tmp/tmpZhERZO/pdisk_1.dat 2025-06-24T21:25:29.476116Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630154919565362:2079] 1750800329353048 != 1750800329353051 2025-06-24T21:25:29.476805Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:29.500074Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:29.500157Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:29.501932Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10293 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:29.780702Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:29.803326Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:29.858509Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:29.910612Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:33.425669Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630173630290359:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:33.425768Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6f/r3tmp/tmpk7ASka/pdisk_1.dat 2025-06-24T21:25:33.626987Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:33.628371Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:33.628492Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:33.629482Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630173630290343:2079] 1750800333425131 != 1750800333425134 2025-06-24T21:25:33.652109Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8952 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:34.006170Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:34.012781Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:34.032919Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:34.099935Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:34.163045Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TPartitionTests::ShadowPartitionCounters [GOOD] >> TPQTest::TestDirectReadHappyWay [GOOD] >> TPQTest::TestMessageNo >> TPartitionTests::ShadowPartitionCountersFirstClass >> TPartitionTests::GetPartitionWriteInfoSuccess [GOOD] >> TPartitionTests::GetPartitionWriteInfoError >> TPartitionTests::UserActCount >> TPartitionTests::ConflictingSrcIdForTxInDifferentBatches [GOOD] >> TPartitionTests::DataTxCalcPredicateOk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_GoodLock1 [GOOD] Test command err: 2025-06-24T21:24:58.870852Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630023463443375:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:58.870972Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6e/r3tmp/tmp2PUGJz/pdisk_1.dat 2025-06-24T21:24:59.266306Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:59.267649Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630023463443352:2079] 1750800298868591 != 1750800298868594 2025-06-24T21:24:59.284737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:59.284842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:59.286564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18787 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:59.563405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:59.587767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:24:59.592854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.726252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.781962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:59.882488Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:02.223566Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630041771105510:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:02.223618Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6e/r3tmp/tmpzSy0Lh/pdisk_1.dat 2025-06-24T21:25:02.362697Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:02.365469Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630041771105483:2079] 1750800302221855 != 1750800302221858 2025-06-24T21:25:02.382755Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:02.382822Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:02.388071Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9630 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:02.606193Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:02.629571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:02.705501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:02.751487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:05.763401Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630054972244711:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:05.763489Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6e/r3tmp/tmpULuL1o/pdisk_1.dat 2025-06-24T21:25:05.924033Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:05.927258Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630054972244688:2079] 1750800305761100 != 1750800305761103 2025-06-24T21:25:05.942234Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:05.942318Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:05.947484Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62294 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:06.150036Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:06.159686Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:06.168238Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 28147497 ... shard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:26.198895Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519630144120916217:2079] 1750800326023572 != 1750800326023575 2025-06-24T21:25:26.217941Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:26.218064Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:26.220954Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2122 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:26.534288Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:26.555753Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:26.632730Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:26.686971Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:30.374089Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630160777226907:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:30.374195Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6e/r3tmp/tmp4FockF/pdisk_1.dat 2025-06-24T21:25:30.496074Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:30.497430Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630160777226888:2079] 1750800330373454 != 1750800330373457 2025-06-24T21:25:30.515365Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:30.515452Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:30.557789Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25647 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:30.817949Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:30.839752Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:30.912803Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:31.038207Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:34.764804Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630179290558412:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:34.764922Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a6e/r3tmp/tmpXCH07U/pdisk_1.dat 2025-06-24T21:25:34.939624Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:34.941074Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630179290558392:2079] 1750800334764090 != 1750800334764093 2025-06-24T21:25:34.963026Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:34.963168Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:34.965979Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19592 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:35.303030Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:35.326920Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:35.406178Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:35.483442Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TPartitionTests::ConflictingSrcIdForTxWithHead >> TPartitionTests::GetPartitionWriteInfoError [GOOD] >> TPartitionTests::DataTxCalcPredicateError >> TPQTest::TestMessageNo [GOOD] >> TPQTest::TestGetTimestamps >> TPartitionTests::NonConflictingCommitsBatch >> TPQTest::DirectReadBadSessionOrPipe [GOOD] >> TPQTest::DirectReadOldPipe >> TPartitionTests::TestNonConflictingActsBatchOk [GOOD] >> TPartitionTests::TestBatchingWithChangeConfig >> TPQTabletTests::Single_PQTablet_And_Multiple_Partitions >> TPQTabletTests::Single_PQTablet_And_Multiple_Partitions [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_The_Number_Of_Senders_And_Recipients_Match >> TPQTest::DirectReadOldPipe [GOOD] >> TPQTest::TestAccountReadQuota >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_The_Number_Of_Senders_And_Recipients_Match [GOOD] >> TPQTest::TestCmdReadWithLastOffset [GOOD] >> TPQTest::TestDescribeBalancer >> TPQTabletTests::TEvReadSet_comes_before_TEvPlanStep >> TPQTest::TestSeveralOwners >> KqpJoinOrder::CanonizedJoinOrderTPCDS64 [GOOD] >> TPQTabletTests::TEvReadSet_comes_before_TEvPlanStep [GOOD] >> TPQTest::TestGetTimestamps [GOOD] >> TPQTest::TestOffsetEstimation [GOOD] >> TPQTest::TestMaxTimeLagRewind >> TPQTabletTests::Read_TEvTxCommit_After_Restart >> SystemView::AuthEffectivePermissions [GOOD] >> TPQTabletTests::Read_TEvTxCommit_After_Restart [GOOD] >> TPartitionTests::ShadowPartitionCountersFirstClass [GOOD] >> TPQTabletTests::TEvReadSet_Is_Not_Sent_Ahead_Of_Time >> TPQTest::TestSeveralOwners [GOOD] >> TPQTest::TestReserveBytes >> TPartitionTests::ConflictingSrcIdForTxWithHead [GOOD] >> TPQTest::TestDescribeBalancer [GOOD] >> TPQTest::TestCheckACL >> TPartitionTests::DataTxCalcPredicateError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::ShadowPartitionCountersFirstClass [GOOD] Test command err: 2025-06-24T21:25:34.660637Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:34.661179Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:34.692036Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:182:2195] 2025-06-24T21:25:34.695558Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:34.000000Z 2025-06-24T21:25:34.695649Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [1:182:2195] Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\260\371\323\236\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\260\371\323\236\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\005\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\005\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\260\371\323\236\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\005\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\005\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-24T21:25:35.126590Z node 1 :PERSQUEUE WARN: partition.cpp:3248: [PQ: 72057594037927937, Partition: 0, State: StateIdle] commit to future - topic Root/PQ/rt3.dc1--account--topic partition 0 client client EndOffset 10 offset 13 Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\260\371\323\236\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\n\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\n\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-24T21:25:35.558332Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:35.558414Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:35.575579Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [2:182:2195] 2025-06-24T21:25:35.577612Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:35.000000Z 2025-06-24T21:25:35.577696Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [2:182:2195] 2025-06-24T21:25:36.393082Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:36.393138Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:36.408312Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [3:180:2193] 2025-06-24T21:25:36.409218Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 1 generation 0 [3:180:2193] 2025-06-24T21:25:36.410059Z node 3 :PERSQUEUE INFO: partition.cpp:3800: [PQ: 72057594037927937, Partition: 1, State: StateIdle] SubDomainOutOfSpace was changed. Topic: "Root/PQ/rt3.dc1--account--topic". Partition: 1. SubDomainOutOfSpace: 1 2025-06-24T21:25:36.410182Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|1265f8c7-5a052ccc-e1c781d9-6a9517dd_0 generated for partition 1 topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 Send disk status response with cookie: 0 2025-06-24T21:25:36.747110Z node 3 :PERSQUEUE INFO: partition.cpp:3800: [PQ: 72057594037927937, Partition: 1, State: StateIdle] SubDomainOutOfSpace was changed. Topic: "Root/PQ/rt3.dc1--account--topic". Partition: 1. SubDomainOutOfSpace: 0 2025-06-24T21:25:37.215761Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:37.215842Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:37.234693Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] bootstrapping {0, {0, 1111}, 123} [4:182:2195] 2025-06-24T21:25:37.239336Z node 4 :PERSQUEUE INFO: partition_init.cpp:911: [rt3.dc1--account--topic:{0, {0, 1111}, 123}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:37.000000Z 2025-06-24T21:25:37.239435Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] init complete for topic 'rt3.dc1--account--topic' partition {0, {0, 1111}, 123} generation 0 [4:182:2195] 2025-06-24T21:25:37.604135Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|fcd83e82-eb79568e-11deb2a-fa92b379_0 generated for partition {0, {0, 1111}, 123} topic 'rt3.dc1--account--topic' owner owner1 Send write: 0 Send write: 1 Send write: 2 Send write: 3 Send write: 4 Send write: 5 Send write: 6 Send write: 7 Send write: 8 Send write: 9 2025-06-24T21:25:40.856699Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:40.856760Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:40.871024Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] bootstrapping {0, {0, 1111}, 123} [5:182:2195] 2025-06-24T21:25:40.872449Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:{0, {0, 1111}, 123}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:40.000000Z 2025-06-24T21:25:40.872509Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition {0, {0, 1111}, 123} generation 0 [5:182:2195] 2025-06-24T21:25:41.215211Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|dfa67339-d80bd117-b6997180-dfb2928e_0 generated for partition {0, {0, 1111}, 123} topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 Send write: 0 Send write: 1 Send write: 2 Send write: 3 Send write: 4 Send write: 5 Send write: 6 Send write: 7 Send write: 8 Send write: 9 >> TPartitionTests::DataTxCalcPredicateOrder >> TPartitionTests::ConflictingCommitsInSeveralBatches >> TPartitionTests::NonConflictingCommitsBatch [GOOD] >> TPartitionTests::GetUsedStorage >> TPQTabletTests::TEvReadSet_Is_Not_Sent_Ahead_Of_Time [GOOD] >> TPQTabletTests::TEvReadSet_For_A_Non_Existent_Tablet >> PQCountersLabeled::PartitionFirstClass [GOOD] >> PQCountersLabeled::ImportantFlagSwitching >> TPQTabletTests::TEvReadSet_For_A_Non_Existent_Tablet [GOOD] >> TPartitionTests::GetUsedStorage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::AuthEffectivePermissions [GOOD] Test command err: 2025-06-24T21:23:29.550071Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629642164294441:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:29.550247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aea/r3tmp/tmpOABn23/pdisk_1.dat 2025-06-24T21:23:30.341772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:30.341883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:30.365522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:30.372783Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:30.556212Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 26268, node 1 2025-06-24T21:23:30.787078Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:30.787103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:30.787109Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:30.787266Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6987 TClient is connected to server localhost:6987 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:31.737709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:34.124175Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629663639131870:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:34.124288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629663639131859:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:34.124441Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:34.128287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:34.156266Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629663639131873:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:23:34.218768Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629663639131940:2733] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:34.547269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629642164294441:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:34.547344Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:34.625592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:36.051864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:23:36.632632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:37.388932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:38.027276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:23:38.656478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:23:39.210903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:39.368956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:23:43.254971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710715:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T21:23:45.286515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:23:45.286553Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:46.530360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710744:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T21:23:46.605720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710747:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:47.029435Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519629719473710529:3077], owner: [1:7519629719473710526:3075], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-24T21:23:47.029522Z node 1 :SYSTEM_VIEWS INFO: show_create.cpp:107: Scan prepared, actor: [1:7519629719473710529:3077] 2025-06-24T21:23:47.195988Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519629719473710529:3077], row count: 1, finished: 1 2025-06-24T21:23:47.196103Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519629719473710529:3077], owner: [1:7519629719473710526:3075], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-24T21:23:50.439263Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519629729948344386:2075];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aea/r3tmp/tmpfdreRO/pdisk_1.dat 2025-06- ... DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [72057594046644480:4:1] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:25:39.796069Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630200784779149:2347], row count: 1, finished: 0 2025-06-24T21:25:39.796348Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7519630200784779149:2347], owner: [41:7519630200784779146:2345], scan id: 0, sys view info: Type: EAuthEffectivePermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:25:39.797792Z node 41 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [41:7519630170720006212:2095], database# , query hash# 11342553055430868283, cpu time# 144360 2025-06-24T21:25:39.798498Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800339781, txId: 281474976710676] shutting down 2025-06-24T21:25:39.949774Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710679. Ctx: { TraceId: 01jyhxa4v29xmh0vy8sryxewsy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=OTUyODNhNTUtNGY5OTZlYzItMWU5MjhmNTItYzgwMzlmY2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:25:39.953177Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7519630200784779202:2356], owner: [41:7519630200784779199:2354], scan id: 0, sys view info: Type: EAuthEffectivePermissions SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-24T21:25:39.954819Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7519630200784779202:2356], schemeshard id: 72075186224037888, hive id: 72057594037968897, database: /Root/Tenant1, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 2], database node count: 2 2025-06-24T21:25:39.954855Z node 41 :SYSTEM_VIEWS DEBUG: auth_scan_base.h:100: ProceedToScan, tenant name: /Root/Tenant1 tenant owner: root@builtin subject sid: empty require admin access: 0 is admin: 1 2025-06-24T21:25:39.954939Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:25:39.955265Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1 TableId: [72075186224037888:1:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037889 Coordinators: 72075186224037890 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037891 Mediators: 72075186224037892 SchemeShard: 72075186224037888 SysViewProcessor: 72075186224037893 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 }] Groups: [] } Children [Dir2,Table1] }] } 2025-06-24T21:25:39.955346Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630200784779202:2356], row count: 1, finished: 0 2025-06-24T21:25:39.955514Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Dir2 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:25:39.956938Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Dir2 TableId: [72075186224037888:3:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037889 Coordinators: 72075186224037890 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037891 Mediators: 72075186224037892 SchemeShard: 72075186224037888 SysViewProcessor: 72075186224037893 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-24T21:25:39.957074Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630200784779202:2356], row count: 2, finished: 0 2025-06-24T21:25:39.957838Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Table1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:25:39.959213Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Table1 TableId: [72075186224037888:2:1] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037889 Coordinators: 72075186224037890 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037891 Mediators: 72075186224037892 SchemeShard: 72075186224037888 SysViewProcessor: 72075186224037893 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:25:39.959271Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519630200784779202:2356], row count: 1, finished: 0 2025-06-24T21:25:39.960009Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7519630200784779202:2356], owner: [41:7519630200784779199:2354], scan id: 0, sys view info: Type: EAuthEffectivePermissions SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-24T21:25:39.961798Z node 41 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [41:7519630170720006212:2095], database# , query hash# 17325808444334437222, cpu time# 139452 2025-06-24T21:25:39.962555Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800339948, txId: 281474976710678] shutting down 2025-06-24T21:25:39.971898Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:25:39.978304Z node 45 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:25:39.984577Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 42 2025-06-24T21:25:39.985694Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:25:39.986049Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 43 2025-06-24T21:25:39.986549Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(43, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:25:39.986688Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 45 2025-06-24T21:25:39.990220Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(45, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:25:39.992668Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 44 2025-06-24T21:25:39.993949Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(44, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:25:39.995684Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[44:7519630176147576451:2106], Type=268959746 2025-06-24T21:25:39.995729Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[44:7519630176147576451:2106], Type=268959746 2025-06-24T21:25:39.995758Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[44:7519630176147576451:2106], Type=268959746 2025-06-24T21:25:39.995792Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[44:7519630176147576451:2106], Type=268959746 2025-06-24T21:25:39.997759Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[44:7519630176147576451:2106], Type=268959746 2025-06-24T21:25:39.997824Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[44:7519630176147576451:2106], Type=268959746 2025-06-24T21:25:40.182680Z node 45 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:25:40.191608Z node 45 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [45:7519630204975762140:2305], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:25:40.252613Z node 45 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [45:7519630204975762140:2305], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:25:40.440450Z node 45 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [45:7519630204975762140:2305], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:25:40.820420Z node 45 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [45:7519630204975762140:2305], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:25:41.183445Z node 45 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:25:41.496815Z node 45 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [45:7519630204975762140:2305], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } >> TPQTabletTests::Huge_ProposeTransacton [GOOD] >> TLocksTest::Range_CorrectDot [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::TEvReadSet_For_A_Non_Existent_Tablet [GOOD] Test command err: 2025-06-24T21:25:42.386260Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:25:42.391409Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:25:42.391777Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-24T21:25:42.391845Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:25:42.391909Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-24T21:25:42.391966Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:25:42.392013Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:42.392089Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:42.413039Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:181:2194], now have 1 active actors on pipe 2025-06-24T21:25:42.413178Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:25:42.432462Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T21:25:42.435957Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T21:25:42.436164Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:42.438001Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T21:25:42.438157Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:42.438266Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:42.438856Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:42.439376Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-24T21:25:42.440501Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-24T21:25:42.440571Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:189:2200] 2025-06-24T21:25:42.440660Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:42.441208Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:42.441318Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-24T21:25:42.441369Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-24T21:25:42.441555Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:42.441714Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:42.442104Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:42.442453Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:191:2202] 2025-06-24T21:25:42.443370Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:1:Initializer] Initializing completed. 2025-06-24T21:25:42.443443Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [1:191:2202] 2025-06-24T21:25:42.443493Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 1, State: StateInit] SYNC INIT topic topic partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:42.443879Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:42.443960Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit request with generation 1 2025-06-24T21:25:42.444001Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit with generation 1 done 2025-06-24T21:25:42.444126Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:42.444247Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:42.444543Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:42.444710Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T21:25:42.449549Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:42.449650Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:42.450516Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:42.450588Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T21:25:42.451062Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:204:2211], now have 1 active actors on pipe 2025-06-24T21:25:42.451879Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:207:2213], now have 1 active actors on pipe 2025-06-24T21:25:42.452875Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3225: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 180 RawX2: 4294969489 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Operations { PartitionId: 1 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Immediate: false } 2025-06-24T21:25:42.452951Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3401: [PQ: 72057594037927937] distributed transaction 2025-06-24T21:25:42.453046Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3715: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-24T21:25:42.453105Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-24T21:25:42.453157Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-24T21:25:42.453204Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3940: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-24T21:25:42.453257Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-24T21:25:42.453330Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T21:25:42.453493Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 231 MaxStep: 30231 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Operations { PartitionId: 1 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 4294969489 } Partitions { } 2025-06-24T21:25:42.453633Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:42.457938Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:42.458010Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:42.458062Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-24T21:25:42.458102Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-0 ... 8 ReadSet: "\010\001" Seqno: 0 2025-06-24T21:25:45.300263Z node 6 :PERSQUEUE DEBUG: transaction.cpp:274: [TxId: 67890] Handle TEvReadSet 2025-06-24T21:25:45.300298Z node 6 :PERSQUEUE DEBUG: transaction.cpp:291: [TxId: 67890] Predicates 1/1 2025-06-24T21:25:45.300351Z node 6 :PERSQUEUE DEBUG: pqtablet_mock.cpp:72: Connected to tablet 72057594037927937 from tablet 72057594037950158 2025-06-24T21:25:45.301588Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:45.301643Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PLANNING 2025-06-24T21:25:45.301675Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State PLANNING 2025-06-24T21:25:45.301713Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PLANNED 2025-06-24T21:25:45.301757Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from PLANNING to PLANNED 2025-06-24T21:25:45.301790Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4427: [PQ: 72057594037927937] TxQueue.size 1 2025-06-24T21:25:45.301827Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:836: [PQ: 72057594037927937] New ExecStep 100, ExecTxId 67890 2025-06-24T21:25:45.301877Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState CALCULATING 2025-06-24T21:25:45.301910Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from PLANNED to CALCULATING 2025-06-24T21:25:45.301994Z node 6 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 100, TxId 67890 2025-06-24T21:25:45.302147Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3507: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCalcPredicateResult Step 100, TxId 67890, Partition 0, Predicate 1 2025-06-24T21:25:45.302178Z node 6 :PERSQUEUE DEBUG: transaction.cpp:218: [TxId: 67890] Handle TEvTxCalcPredicateResult 2025-06-24T21:25:45.302208Z node 6 :PERSQUEUE DEBUG: transaction.cpp:267: [TxId: 67890] Partition responses 1/1 2025-06-24T21:25:45.302256Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state CALCULATING 2025-06-24T21:25:45.302289Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State CALCULATING 2025-06-24T21:25:45.302318Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State CALCULATING FrontTxId 67890 2025-06-24T21:25:45.302346Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4443: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-24T21:25:45.302380Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState CALCULATED 2025-06-24T21:25:45.302425Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from CALCULATING to CALCULATED 2025-06-24T21:25:45.302472Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T21:25:45.302591Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: CALCULATED MinStep: 134 MaxStep: 30134 PredicatesReceived { TabletId: 72057594037950158 Predicate: true } PredicateRecipients: 72057594037950158 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 25769805969 } Partitions { } 2025-06-24T21:25:45.302664Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:45.308320Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:45.308385Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-24T21:25:45.308421Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State CALCULATED 2025-06-24T21:25:45.308469Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State CALCULATED FrontTxId 67890 2025-06-24T21:25:45.308515Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS 2025-06-24T21:25:45.308566Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from CALCULATED to WAIT_RS 2025-06-24T21:25:45.308619Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3988: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-24T21:25:45.308663Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3998: [PQ: 72057594037927937] Send TEvReadSet to tablet 72057594037950158 2025-06-24T21:25:45.308784Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72057594037927937] HaveParticipantsDecision 1 2025-06-24T21:25:45.308858Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState EXECUTING 2025-06-24T21:25:45.308908Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from WAIT_RS to EXECUTING 2025-06-24T21:25:45.308954Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72057594037927937] Received 0, Expected 1 2025-06-24T21:25:45.309166Z node 6 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 100, TxId 67890 2025-06-24T21:25:45.309235Z node 6 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 67890 2025-06-24T21:25:45.309447Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:45.310746Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2918: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-24T21:25:45.310813Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037927937] Connected to tablet 72057594037950158 2025-06-24T21:25:45.310902Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2918: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-24T21:25:45.310956Z node 6 :PERSQUEUE DEBUG: transaction.cpp:324: [TxId: 67890] Predicate acks 1/1 2025-06-24T21:25:45.311017Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state EXECUTING 2025-06-24T21:25:45.311082Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State EXECUTING 2025-06-24T21:25:45.311130Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State EXECUTING FrontTxId 67890 2025-06-24T21:25:45.311201Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72057594037927937] Received 0, Expected 1 2025-06-24T21:25:45.311244Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4327: [PQ: 72057594037927937] TxId 67890 status has not changed 2025-06-24T21:25:45.314069Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:45.314172Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:45.314237Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3553: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCommitDone Step 100, TxId 67890, Partition 0 2025-06-24T21:25:45.314286Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state EXECUTING 2025-06-24T21:25:45.314326Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State EXECUTING 2025-06-24T21:25:45.314374Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State EXECUTING FrontTxId 67890 2025-06-24T21:25:45.314434Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-24T21:25:45.314478Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4185: [PQ: 72057594037927937] TxId: 67890 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-24T21:25:45.314524Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4516: [PQ: 72057594037927937] complete TxId 67890 2025-06-24T21:25:45.314572Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4534: [PQ: 72057594037927937] delete partitions for TxId 67890 2025-06-24T21:25:45.314615Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState EXECUTED 2025-06-24T21:25:45.314660Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from EXECUTING to EXECUTED 2025-06-24T21:25:45.314714Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T21:25:45.314864Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: EXECUTED MinStep: 134 MaxStep: 30134 PredicatesReceived { TabletId: 72057594037950158 Predicate: true } PredicateRecipients: 72057594037950158 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 25769805969 } Partitions { } 2025-06-24T21:25:45.314936Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:45.317536Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:45.317581Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state EXECUTED 2025-06-24T21:25:45.317608Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State EXECUTED 2025-06-24T21:25:45.317634Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State EXECUTED FrontTxId 67890 2025-06-24T21:25:45.317674Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4007: [PQ: 72057594037927937] TPersQueue::SendEvReadSetAckToSenders 2025-06-24T21:25:45.317725Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4009: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSetAck {TEvReadSet step# 100 txid# 67890 TabletSource# 72057594037950158 TabletDest# 72057594037927937 SetTabletConsumer# 72057594037927937 Flags# 0 Seqno# 0} 2025-06-24T21:25:45.317765Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS_ACKS 2025-06-24T21:25:45.317799Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from EXECUTED to WAIT_RS_ACKS 2025-06-24T21:25:45.317858Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 1/1 2025-06-24T21:25:45.317886Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4560: [PQ: 72057594037927937] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-24T21:25:45.317914Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 1/1 2025-06-24T21:25:45.317946Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4621: [PQ: 72057594037927937] add an TxId 67890 to the list for deletion 2025-06-24T21:25:45.317989Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState DELETING 2025-06-24T21:25:45.318029Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3852: [PQ: 72057594037927937] delete key for TxId 67890 2025-06-24T21:25:45.318087Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:45.321487Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:45.321541Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state DELETING 2025-06-24T21:25:45.321567Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State DELETING 2025-06-24T21:25:45.321607Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72057594037927937] delete TxId 67890 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::GetUsedStorage [GOOD] Test command err: 2025-06-24T21:25:38.792340Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:38.792460Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:38.813738Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:182:2195] 2025-06-24T21:25:38.815704Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:38.000000Z 2025-06-24T21:25:38.815774Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [1:182:2195] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\320\230\324\236\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\320\230\324\236\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\262\222\004" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\320\230\324\236\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\263\222\004" StorageChannel: INLINE } 2025-06-24T21:25:39.588403Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:39.588475Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:39.606286Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:{2, {0, 10}, 100001}:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:39.606559Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:{2, {0, 10}, 100001}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:39.606807Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] bootstrapping {2, {0, 10}, 100001} [2:181:2194] 2025-06-24T21:25:39.607792Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:{2, {0, 10}, 100001}:Initializer] Initializing completed. 2025-06-24T21:25:39.607882Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} generation 0 [2:181:2194] 2025-06-24T21:25:39.607946Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition {2, {0, 10}, 100001} so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:39.608000Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:39.608188Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|e23986fb-9705ac67-f428683f-58823118_0 generated for partition {2, {0, 10}, 100001} topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 2025-06-24T21:25:39.608307Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::ReplyOwnerOk. Partition: {2, {0, 10}, 100001} 2025-06-24T21:25:39.608475Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] no data for compaction 2025-06-24T21:25:39.608768Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob processing sourceId 'SourceId' seqNo 2 partNo 0 2025-06-24T21:25:39.609630Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob complete sourceId 'SourceId' seqNo 2 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 118 count 1 nextOffset 101 batches 1 2025-06-24T21:25:39.610214Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} compactOffset 100,1 HeadOffset 0 endOffset 0 curOffset 101 D0000100001_00000000000000000100_00000_0000000001_00000? size 104 WTime 128 2025-06-24T21:25:39.662349Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:39.662502Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {2, {0, 10}, 100001} 2025-06-24T21:25:39.662605Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Answering for message sourceid: 'SourceId', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: {2, {0, 10}, 100001}, SeqNo: 2, partNo: 0, Offset: 100 is stored on disk 2025-06-24T21:25:39.662866Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=104, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:25:39.954933Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob processing sourceId 'SourceId' seqNo 4 partNo 0 2025-06-24T21:25:39.955761Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob complete sourceId 'SourceId' seqNo 4 partNo 0 FormedBlobsCount 0 NewHead: Offset 101 PartNo 0 PackedSize 118 count 1 nextOffset 102 batches 1 2025-06-24T21:25:39.956165Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} compactOffset 101,1 HeadOffset 101 endOffset 101 curOffset 102 D0000100001_00000000000000000101_00000_0000000001_00000? size 104 WTime 1129 2025-06-24T21:25:39.997792Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:39.997900Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {2, {0, 10}, 100001} 2025-06-24T21:25:39.997968Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Answering for message sourceid: 'SourceId', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: {2, {0, 10}, 100001}, SeqNo: 4, partNo: 0, Offset: 101 is stored on disk 2025-06-24T21:25:39.998176Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=208, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:25:40.216421Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob processing sourceId 'SourceId' seqNo 6 partNo 0 2025-06-24T21:25:40.220262Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob complete sourceId 'SourceId' seqNo 6 partNo 0 FormedBlobsCount 0 NewHead: Offset 102 PartNo 0 PackedSize 118 count 1 nextOffset 103 batches 1 2025-06-24T21:25:40.220743Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} compactOffset 102,1 HeadOffset 102 endOffset 102 curOffset 103 D0000100001_00000000000000000102_00000_0000000001_00000? size 104 WTime 2130 2025-06-24T21:25:40.251843Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:40.251952Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {2, {0, 10}, 100001} 2025-06-24T21:25:40.252031Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Answering for message sourceid: 'SourceId', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: {2, {0, 10}, 100001}, SeqNo: 6, partNo: 0, Offset: 102 is stored on disk 2025-06-24T21:25:40.252216Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=312, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:25:40.490459Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob processing sourceId 'SourceId' seqNo 7 partNo 0 2025-06-24T21:25:40.491536Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob complete sourceId 'SourceId' seqNo 7 partNo 0 FormedBlobsCount 0 NewHead: Offset 110 PartNo 0 PackedSize 118 count 1 nextOffset 111 batches 1 2025-06-24T21:25:40.492207Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} compactOffset 110,1 HeadOffset 103 endOffset 103 curOffset 111 D0000100001_00000000000000000110_00000_0000000001_00000? size 104 WTime 3231 2025-06-24T21:25:40.523498Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:40.523647Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {2, {0, 10}, 100001} 2025-06-24T21:25:40.523761Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: {2, {0, 10 ... _00000_0000000001_00000?, size: 0 Body key last D0000100001_00000000000000000110_00000_0000000001_00000?, size: 312 2025-06-24T21:25:41.175859Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:41.175942Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:41.189793Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] bootstrapping {2, {0, 10}, 100001} [3:180:2193] 2025-06-24T21:25:41.191682Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:{2, {0, 10}, 100001}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:41.000000Z 2025-06-24T21:25:41.191757Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} generation 0 [3:180:2193] 2025-06-24T21:25:41.534939Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|ca93b2a4-ed53aa20-dc685b6d-832f32b6_0 generated for partition {2, {0, 10}, 100001} topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 Wait write info error(2) 2025-06-24T21:25:41.984410Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:41.984481Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:41.998341Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-24T21:25:41.998678Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:41.998958Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:181:2194] 2025-06-24T21:25:41.999994Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T21:25:42.000162Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T21:25:42.000334Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T21:25:42.001410Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T21:25:42.001743Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:42.001913Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:42.002020Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T21:25:42.002053Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:25:42.002100Z node 4 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:42.000000Z 2025-06-24T21:25:42.002142Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:42.002186Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [4:181:2194] 2025-06-24T21:25:42.002231Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:25:42.002278Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:42.002342Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-2 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:42.002393Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-2 send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T21:25:42.002446Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T21:25:42.002470Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 readTimeStamp for offset 0 initiated queuesize 1 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T21:25:42.002698Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:42.002814Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-2 offset 0 count 1 size 1024000 endOffset 50 max time lag 0ms effective offset 0 2025-06-24T21:25:42.003042Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 684 count 50 last offset 0, current partition end offset: 50 2025-06-24T21:25:42.003090Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-24T21:25:42.344878Z node 4 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 session is set to 0 (startOffset 50) session session-client-0 Got batch complete: 1 Got KV request Got KV request Got KV request Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\360\267\324\236\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient-0" Value: "\010\000\020\001\030\001\"\020session-client-0(\0000\000@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient-0" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-client-0" StorageChannel: INLINE } 2025-06-24T21:25:42.376423Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:42.376530Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:43.347593Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Created Tx with id 3 as act# 3 Created Tx with id 4 as act# 4 2025-06-24T21:25:43.347900Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T21:25:43.347993Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 3 2025-06-24T21:25:43.348016Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 4 2025-06-24T21:25:44.659631Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:44.659791Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 6 Wait batch completion Wait kv request 2025-06-24T21:25:44.660100Z node 4 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 offset is set to 5 (startOffset 50) session session-client-0 2025-06-24T21:25:44.660166Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 3 2025-06-24T21:25:44.660202Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 3 2025-06-24T21:25:44.660245Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 4 2025-06-24T21:25:44.660278Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 4 2025-06-24T21:25:44.660332Z node 4 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 offset is set to 10 (startOffset 50) session session-client-0 2025-06-24T21:25:44.673096Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(ABORTED), reason=incorrect offset range (gap) Got KV request Wait tx committed for tx 3 2025-06-24T21:25:44.694097Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:44.694179Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait tx committed for tx 4 Wait immediate tx complete 6 Got propose resutl: Origin: 72057594037927937 Status: ABORTED TxId: 6 Errors { Kind: BAD_REQUEST Reason: "incorrect offset range (gap)" } 2025-06-24T21:25:45.018073Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:45.018148Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:45.036018Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] bootstrapping {2, {0, 10}, 100001} [5:182:2195] 2025-06-24T21:25:45.037739Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:{2, {0, 10}, 100001}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:45.000000Z 2025-06-24T21:25:45.037812Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} generation 0 [5:182:2195] >> TPartitionTests::TestBatchingWithChangeConfig [GOOD] >> TListAllTopicsTests::PlainList >> TPQTabletTests::Limit_On_The_Number_Of_Transactons ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCDS64 [GOOD] Test command err: Trying to start YDB, gRPC: 15719, MsgBus: 28734 2025-06-24T21:21:53.544096Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629228646431819:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:53.544454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00391a/r3tmp/tmprJccAh/pdisk_1.dat 2025-06-24T21:21:54.310627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:54.319360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:54.324536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:54.402025Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:54.403385Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629228646431616:2079] 1750800113449006 != 1750800113449009 TServer::EnableGrpc on GrpcPort 15719, node 1 2025-06-24T21:21:54.444618Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:54.651373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:54.651395Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:54.651401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:54.651497Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28734 TClient is connected to server localhost:28734 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:55.844478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:58.523283Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629228646431819:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:58.569243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:58.851764Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629250121268740:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:58.851862Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:58.852160Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629250121268752:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:58.856396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:58.883586Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629250121268754:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:21:58.965134Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629250121268805:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:59.408985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:59.699660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:59.699907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:59.700188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:59.700320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:59.700453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:59.700585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:59.700765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:59.700881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:59.700985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:59.701094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:59.701201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519629254416236302:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:59.707017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629254416236359:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:59.707089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629254416236359:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:59.707319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629254416236359:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:59.707472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629254416236359:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:59.707595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629254416236359:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:59.707712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629254416236359:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:59.707810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629254416236359:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:59.707930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629254416236359:2316];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:59.708036Z node 1 :TX_COLU ... 76710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.533980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.544999Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.545607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.549546Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.550105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.550878Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.551904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.556739Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.557353Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.557505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.557969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.564150Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.564681Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.564867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.565249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.571379Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.571831Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.572185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.572783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-24T21:23:22.577922Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.578299Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-24T21:23:22.728615Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx50gke1wy6tmhz6yg0jf5", SessionId: ydb://session/3?node_id=1&id=NDg5YTRkMDctNjc0YjE0MTAtOGZjMWQyNmMtYTdlYjJhMDM=, Slow query, duration: 31.188678s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-24T21:23:23.414153Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:23.414779Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:23:23.415504Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519629486344506665:7871];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-24T21:23:23.416126Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-24T21:25:31.279477Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhx7t5y34j727rksgvs528w", SessionId: ydb://session/3?node_id=1&id=NDg5YTRkMDctNjc0YjE0MTAtOGZjMWQyNmMtYTdlYjJhMDM=, Slow query, duration: 67.920380s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "PRAGMA TablePathPrefix='/Root/test/ds';\n\n-- NB: Subquerys\n\n$cs_ui =\n\n (select catalog_sales.cs_item_sk cs_item_sk\n\n ,sum(cs_ext_list_price) as sale,sum(cr_refunded_cash+cr_reversed_charge+cr_store_credit) as refund\n\n from catalog_sales as catalog_sales\n\n cross join catalog_returns as catalog_returns\n\n where cs_item_sk = cr_item_sk\n\n and cs_order_number = cr_order_number\n\n group by catalog_sales.cs_item_sk\n\n having sum(cs_ext_list_price)>2*sum(cr_refunded_cash+cr_reversed_charge+cr_store_credit));\n\n$cross_sales =\n\n (select item.i_product_name product_name\n\n ,item.i_item_sk item_sk\n\n ,store.s_store_name store_name\n\n ,store.s_zip store_zip\n\n ,ad1.ca_street_number b_street_number\n\n ,ad1.ca_street_name b_street_name\n\n ,ad1.ca_city b_city\n\n ,ad1.ca_zip b_zip\n\n ,ad2.ca_street_number c_street_number\n\n ,ad2.ca_street_name c_street_name\n\n ,ad2.ca_city c_city\n\n ,ad2.ca_zip c_zip\n\n ,d1.d_year as syear\n\n ,d2.d_year as fsyear\n\n ,d3.d_year s2year\n\n ,count(*) cnt\n\n ,sum(ss_wholesale_cost) s1\n\n ,sum(ss_list_price) s2\n\n ,sum(ss_coupon_amt) s3\n\n FROM store_sales as store_sales\n\n cross join store_returns as store_returns\n\n cross join $cs_ui cs_ui\n\n cross join date_dim d1\n\n cross join date_dim d2\n\n cross join date_dim d3\n\n cross join store as store\n\n cross join customer as customer\n\n cross join customer_demographics cd1\n\n cross join customer_demographics cd2\n\n cross join promotion as promotion\n\n cross join household_demographics hd1\n\n cross join household_demographics hd2\n\n cross join customer_address ad1\n\n cross join customer_address ad2\n\n cross join income_band ib1\n\n cross join income_band ib2\n\n cross join item as item\n\n WHERE ss_store_sk = s_store_sk AND\n\n ss_sold_date_sk = d1.d_date_sk AND\n\n ss_customer_sk = c_customer_sk AND\n\n ss_cdemo_sk= cd1.cd_demo_sk AND\n\n ss_hdemo_sk = hd1.hd_demo_sk AND\n\n ss_addr_sk = ad1.ca_address_sk and\n\n ss_item_sk = i_item_sk and\n\n ss_item_sk = sr_item_sk and\n\n ss_ticket_number = sr_ticket_number and\n\n ss_item_sk = cs_ui.cs_item_sk and\n\n c_current_cdemo_sk = cd2.cd_demo_sk AND\n\n c_current_hdemo_sk = hd2.hd_demo_sk AND\n\n c_current_addr_sk = ad2.ca_address_sk and\n\n c_first_sales_date_sk = d2.d_date_sk and\n\n c_first_shipto_date_sk = d3.d_date_sk and\n\n ss_promo_sk = p_promo_sk and\n\n hd1.hd_income_band_sk = ib1.ib_income_band_sk and\n\n hd2.hd_income_band_sk = ib2.ib_income_band_sk and\n\n cd1.cd_marital_status <> cd2.cd_marital_status and\n\n i_color in ('azure','gainsboro','misty','blush','hot','lemon') and\n\n i_current_price between 80 and 80 + 10 and\n\n i_current_price between 80 + 1 and 80 + 15\n\ngroup by item.i_product_name\n\n ,item.i_item_sk\n\n ,store.s_store_name\n\n ,store.s_zip\n\n ,ad1.ca_street_number\n\n ,ad1.ca_street_name\n\n ,ad1.ca_city\n\n ,ad1.ca_zip\n\n ,ad2.ca_street_number\n\n ,ad2.ca_street_name\n\n ,ad2.ca_city\n\n ,ad2.ca_zip\n\n ,d1.d_year\n\n ,d2.d_year\n\n ,d3.d_year\n\n);\n\n-- start query 1 in stream 0 using template query64.tpl and seed 1220860970\n\nselect cs1.product_name\n\n ,cs1.store_name\n\n ,cs1.store_zip\n\n ,cs1.b_street_number\n\n ,cs1.b_street_name\n\n ,cs1.b_city\n\n ,cs1.b_zip\n\n ,cs1.c_street_number\n\n ,cs1.c_street_name\n\n ,cs1.c_city\n\n ,cs1.c_zip\n\n ,cs1.syear\n\n ,cs1.cnt\n\n ,cs1.s1 as s11\n\n ,cs1.s2 as s21\n\n ,cs1.s3 as s31\n\n ,cs2.s1 as s12\n\n ,cs2.s2 as s22\n\n ,cs2.s3 as s32\n\n ,cs2.syear\n\n ,cs2.cnt\n\nfrom $cross_sales cs1 cross join $cross_sales cs2\n\nwhere cs1.item_sk=cs2.item_sk and\n\n cs1.syear = 1999 and\n\n cs2.syear = 1999 + 1 and\n\n cs2.cnt <= cs1.cnt and\n\n cs1.store_name = cs2.store_name and\n\n cs1.store_zip = cs2.store_zip\n\norder by cs1.product_name\n\n ,cs1.store_name\n\n ,cs2.cnt\n\n ,s11\n\n ,s21\n\n ,s22;\n\n\n\n-- end query 1 in stream 0 using template query64.tpl\n", parameters: 0b >> TPartitionTests::TestBatchingWithProposeConfig >> TPQTest::TestAccountReadQuota [GOOD] >> TPQTest::TestAlreadyWritten >> TPQTest::TestPQPartialRead >> TPQTabletTests::Limit_On_The_Number_Of_Transactons [GOOD] >> TPQTest::TestWriteSplit >> TPQTabletTests::Multiple_PQTablets_2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_CorrectDot [GOOD] Test command err: 2025-06-24T21:25:06.486267Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630058699303549:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:06.491965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a68/r3tmp/tmp3CfgJK/pdisk_1.dat 2025-06-24T21:25:06.900951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:06.901071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:06.911958Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:06.918095Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630058699303389:2079] 1750800306468722 != 1750800306468725 2025-06-24T21:25:06.927058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24482 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:07.330373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:07.347824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:07.364320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:07.484476Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:07.542202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:07.629150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.111611Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630074477869061:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:10.131375Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a68/r3tmp/tmpIieOHh/pdisk_1.dat 2025-06-24T21:25:10.353953Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:10.354028Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:10.355901Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:10.356883Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:10.359377Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630074477869008:2079] 1750800310051287 != 1750800310051290 TClient is connected to server localhost:28638 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:10.573636Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:10.580106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:10.594613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.665395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:10.713546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:13.812313Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630090071183895:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:13.812506Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a68/r3tmp/tmpJ07cP1/pdisk_1.dat 2025-06-24T21:25:13.949298Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:13.963279Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630090071183876:2079] 1750800313810570 != 1750800313810573 2025-06-24T21:25:13.976432Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:13.976512Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:13.980423Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3305 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-24T21:25:14.189660Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:14.205029Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 28147497 ... Unknown -> Disconnected 2025-06-24T21:25:32.618293Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:32.620485Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2927 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:32.937383Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:32.962949Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:33.038735Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:33.093318Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:36.836058Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519630188163047091:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:36.836175Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a68/r3tmp/tmpNANwzM/pdisk_1.dat 2025-06-24T21:25:37.024618Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:37.024733Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:37.024985Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519630188163047071:2079] 1750800336835473 != 1750800336835476 2025-06-24T21:25:37.040861Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:37.045149Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7691 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:37.353744Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:37.367628Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:25:37.376076Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-24T21:25:37.381443Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:37.500773Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:37.565455Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:41.753502Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519630209276071862:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:41.753598Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a68/r3tmp/tmp5ze6vq/pdisk_1.dat 2025-06-24T21:25:41.919748Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:41.921506Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519630209276071843:2079] 1750800341752872 != 1750800341752875 2025-06-24T21:25:41.938526Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:41.938634Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:41.940589Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28546 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:25:42.209114Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:42.228180Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:42.295344Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:42.348292Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TPartitionChooserSuite::TBoundaryChooserTest >> TPQTabletTests::Multiple_PQTablets_2 [GOOD] >> TPartitionChooserSuite::TBoundaryChooserTest [GOOD] >> TPartitionChooserSuite::TBoundaryChooser_GetTabletIdTest [GOOD] >> TPartitionChooserSuite::THashChooserTest [GOOD] >> TPartitionChooserSuite::THashChooser_GetTabletIdTest [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_BadSourceId_Test >> TPQRBDescribes::PartitionLocations [GOOD] >> TPartitionTests::ConflictingCommitsInSeveralBatches [GOOD] >> TPQTabletTests::Parallel_Transactions_1 >> TPartitionTests::DataTxCalcPredicateOrder [GOOD] >> TPartitionTests::ConflictingCommitFails >> TPQTabletTests::Parallel_Transactions_1 [GOOD] >> TPQTabletTests::Parallel_Transactions_2 >> TPQTabletTests::Parallel_Transactions_2 [GOOD] >> TPQTabletTests::PQTablet_Send_RS_With_Abort ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::DataTxCalcPredicateOrder [GOOD] Test command err: 2025-06-24T21:25:34.660776Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:34.661198Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:34.692049Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:182:2195] 2025-06-24T21:25:34.695583Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:34.000000Z 2025-06-24T21:25:34.695687Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [1:182:2195] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\260\371\323\236\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\260\371\323\236\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\264\222\004" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\001\020\001\030\001\"\007session(\0000\001@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\001\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-24T21:25:35.564039Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:35.564118Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:35.585027Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [2:182:2195] 2025-06-24T21:25:35.587328Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:35.000000Z 2025-06-24T21:25:35.587407Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [2:182:2195] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\230\201\324\236\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-24T21:25:36.376546Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:36.376619Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:36.393838Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-24T21:25:36.394063Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:36.394325Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:179:2192] 2025-06-24T21:25:36.395475Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T21:25:36.395672Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T21:25:36.395847Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T21:25:36.396049Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T21:25:36.396324Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:36.396541Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:36.396686Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T21:25:36.396728Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:25:36.396782Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:36.000000Z 2025-06-24T21:25:36.396826Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:36.396880Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [3:179:2192] 2025-06-24T21:25:36.396942Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:25:36.396998Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:36.397244Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:37.753066Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:37.753436Z node 3 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client session is set to 0 (startOffset 50) session session Got KV request Got batch complete: 1 Got KV request Got KV request Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\200\211\324\236\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-24T21:25:37.764704Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:37.764849Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 2025-06-24T21:25:37.765170Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T21:25:39.025510Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait first predicate result 2025-06-24T21:25:39.025697Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Create distr tx with id = 2 and act no: 3 2025-06-24T21:25:39.025994Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-06-24T21:25:40.209345Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait second predicate result 2025-06-24T21:25:40.209493Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:25:40.209551Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-24T21:25:40.209617Z node 3 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-24T21:25:40.209679Z node 3 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got batch complete: 1 2025-06-24T21:25:40.209888Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-06-24T21:25:40.209928Z node 3 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-06-24T21:25:40.209969Z node 3 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got KV request Send disk status response with cookie: 0 2025-06-24T21:25:40.210290Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|a5de28f7-a6ccce11-6d279b0b-296f49da_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 2025-06-24T21:25:40.221396Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 2025-06-24T21:25:40.221543Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:25:40.221611Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got batch complete: 1 2025-06-24T21:25:40.222301Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'SourceId' seqNo 5 partNo 0 2025-06-24T21:25:40.223312Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'SourceId' seqNo 5 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 118 count 1 nextOffset 52 batches 1 2025-06-24T21:25:40.223914Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 51,1 HeadOffset 50 endOffset 50 curOffset 52 d0000000000_00000000000000000051_00000_0000000001_00000? size 104 WTime 15243 Got KV request Got batch complete ... tart initializing step TInitMetaStep Got KV request 2025-06-24T21:25:41.830509Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T21:25:41.830757Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T21:25:41.831033Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-24T21:25:41.831270Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-24T21:25:41.831409Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T21:25:41.831479Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:25:41.831535Z node 4 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:41.000000Z 2025-06-24T21:25:41.831578Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:41.831628Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [4:181:2194] 2025-06-24T21:25:41.831683Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-24T21:25:41.831734Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:41.831971Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:43.169050Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:43.169284Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie SourceId|26c38c39-80260f1-ce7b7d62-6c641f97_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner SourceId 2025-06-24T21:25:43.169444Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 Wait write response Wait kv request 2025-06-24T21:25:43.169767Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'SourceId' seqNo 4 partNo 0 2025-06-24T21:25:43.170921Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'SourceId' seqNo 4 partNo 0 FormedBlobsCount 0 NewHead: Offset 11 PartNo 0 PackedSize 118 count 1 nextOffset 12 batches 1 2025-06-24T21:25:43.171584Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 11,1 HeadOffset 1 endOffset 1 curOffset 12 d0000000000_00000000000000000011_00000_0000000001_00000? size 104 WTime 5132 Got KV request Got batch complete: 1 Got KV request Got KV request 2025-06-24T21:25:43.182327Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:43.182464Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:25:43.182557Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'SourceId', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 11 is stored on disk 2025-06-24T21:25:43.182813Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=104, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Wait second predicate result Create distr tx with id = 0 and act no: 1 2025-06-24T21:25:43.183069Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T21:25:44.456528Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=104, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:25:44.456710Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 2025-06-24T21:25:44.833959Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:44.834033Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:44.849426Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request Got KV request 2025-06-24T21:25:44.849649Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:44.849934Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:181:2194] 2025-06-24T21:25:44.851041Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T21:25:44.851261Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T21:25:44.851435Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T21:25:44.851627Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T21:25:44.851899Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:44.852105Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:44.852245Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T21:25:44.852287Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:25:44.852340Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:44.000000Z 2025-06-24T21:25:44.852378Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:44.852430Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [5:181:2194] 2025-06-24T21:25:44.852487Z node 5 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:25:44.852533Z node 5 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:44.852780Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:46.187485Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 2025-06-24T21:25:46.187738Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T21:25:47.458054Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:47.458254Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Create distr tx with id = 2 and act no: 3 2025-06-24T21:25:47.458553Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-06-24T21:25:47.458635Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-24T21:25:47.458688Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-24T21:25:47.458763Z node 5 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:25:48.703646Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:48.703837Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 2025-06-24T21:25:48.704068Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-06-24T21:25:48.704125Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-06-24T21:25:48.704186Z node 5 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got KV request Send disk status response with cookie: 0 Wait tx committed for tx 0 2025-06-24T21:25:48.715624Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 2025-06-24T21:25:48.715713Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait tx committed for tx 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::Limit_On_The_Number_Of_Transactons [GOOD] Test command err: 2025-06-24T21:25:34.649041Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:25:34.660205Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:25:34.660580Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-24T21:25:34.660651Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:25:34.660699Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-24T21:25:34.660754Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:25:34.660811Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:34.661176Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:34.699309Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:209:2213], now have 1 active actors on pipe 2025-06-24T21:25:34.699474Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:25:34.712634Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-24T21:25:34.715934Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-24T21:25:34.716066Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:34.717281Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-24T21:25:34.717478Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:34.717977Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:34.718461Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:217:2219] 2025-06-24T21:25:34.719569Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-24T21:25:34.719633Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:217:2219] 2025-06-24T21:25:34.719705Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:34.720658Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:34.720764Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-24T21:25:34.720836Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-24T21:25:34.720887Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer reinit request with generation 1 2025-06-24T21:25:34.720914Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer reinit with generation 1 done 2025-06-24T21:25:34.721126Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:34.721166Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:34.721341Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:34.721569Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:34.726873Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:34.726968Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:34.727415Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:224:2224], now have 1 active actors on pipe 2025-06-24T21:25:34.730233Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:227:2226], now have 1 active actors on pipe 2025-06-24T21:25:34.731173Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3225: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 180 RawX2: 4294969489 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-24T21:25:34.731234Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3401: [PQ: 72057594037927937] distributed transaction 2025-06-24T21:25:34.731337Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3715: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-24T21:25:34.731392Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-24T21:25:34.731430Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-24T21:25:34.731469Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3940: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-24T21:25:34.731524Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-24T21:25:34.731578Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T21:25:34.731867Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 134 MaxStep: 30134 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 4294969489 } Partitions { } 2025-06-24T21:25:34.731996Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:34.736221Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:34.736287Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:34.736524Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-24T21:25:34.736567Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-06-24T21:25:34.736939Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3225: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 180 RawX2: 4294969489 } TxId: 67891 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-24T21:25:34.737010Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3401: [PQ: 72057594037927937] distributed transaction 2025-06-24T21:25:34.737084Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3715: [PQ: 72057594037927937] Propose TxId 67891, WriteId (empty maybe) 2025-06-24T21:25:34.737144Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-24T21:25:34.737197Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67891, State UNKNOWN 2025-06-24T21:25:34.737244Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3940: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-24T21:25:34.737289Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67891, NewState PREPARING 2025-06-24T21:25:34.737346Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67891 2025-06-24T21:25:34.737471Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67891] save tx TxId: 67891 State: PREPARED MinStep: 136 MaxStep: 30136 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 4294969489 } Partitions { } 2025-06-24T21:25:34.737561Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE ... node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68858, State PREPARING 2025-06-24T21:25:47.658014Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68858, NewState PREPARED 2025-06-24T21:25:47.658045Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658072Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68859, State PREPARING 2025-06-24T21:25:47.658094Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68859, NewState PREPARED 2025-06-24T21:25:47.658114Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658133Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68860, State PREPARING 2025-06-24T21:25:47.658151Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68860, NewState PREPARED 2025-06-24T21:25:47.658170Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658188Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68861, State PREPARING 2025-06-24T21:25:47.658206Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68861, NewState PREPARED 2025-06-24T21:25:47.658225Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658245Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68862, State PREPARING 2025-06-24T21:25:47.658336Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68862, NewState PREPARED 2025-06-24T21:25:47.658360Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658379Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68863, State PREPARING 2025-06-24T21:25:47.658423Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68863, NewState PREPARED 2025-06-24T21:25:47.658453Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658473Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68864, State PREPARING 2025-06-24T21:25:47.658492Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68864, NewState PREPARED 2025-06-24T21:25:47.658515Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658534Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68865, State PREPARING 2025-06-24T21:25:47.658553Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68865, NewState PREPARED 2025-06-24T21:25:47.658585Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658608Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68866, State PREPARING 2025-06-24T21:25:47.658644Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68866, NewState PREPARED 2025-06-24T21:25:47.658666Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658687Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68867, State PREPARING 2025-06-24T21:25:47.658709Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68867, NewState PREPARED 2025-06-24T21:25:47.658729Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658749Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68868, State PREPARING 2025-06-24T21:25:47.658769Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68868, NewState PREPARED 2025-06-24T21:25:47.658788Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658814Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68869, State PREPARING 2025-06-24T21:25:47.658844Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68869, NewState PREPARED 2025-06-24T21:25:47.658864Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658899Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68870, State PREPARING 2025-06-24T21:25:47.658932Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68870, NewState PREPARED 2025-06-24T21:25:47.658969Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.658991Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68871, State PREPARING 2025-06-24T21:25:47.659012Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68871, NewState PREPARED 2025-06-24T21:25:47.659029Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659047Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68872, State PREPARING 2025-06-24T21:25:47.659070Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68872, NewState PREPARED 2025-06-24T21:25:47.659088Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659114Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68873, State PREPARING 2025-06-24T21:25:47.659136Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68873, NewState PREPARED 2025-06-24T21:25:47.659172Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659190Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68874, State PREPARING 2025-06-24T21:25:47.659219Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68874, NewState PREPARED 2025-06-24T21:25:47.659239Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659257Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68875, State PREPARING 2025-06-24T21:25:47.659276Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68875, NewState PREPARED 2025-06-24T21:25:47.659297Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659317Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68876, State PREPARING 2025-06-24T21:25:47.659341Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68876, NewState PREPARED 2025-06-24T21:25:47.659364Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659384Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68877, State PREPARING 2025-06-24T21:25:47.659405Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68877, NewState PREPARED 2025-06-24T21:25:47.659439Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659466Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68878, State PREPARING 2025-06-24T21:25:47.659501Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68878, NewState PREPARED 2025-06-24T21:25:47.659523Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659547Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68879, State PREPARING 2025-06-24T21:25:47.659570Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68879, NewState PREPARED 2025-06-24T21:25:47.659588Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659607Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68880, State PREPARING 2025-06-24T21:25:47.659628Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68880, NewState PREPARED 2025-06-24T21:25:47.659649Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659670Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68881, State PREPARING 2025-06-24T21:25:47.659691Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68881, NewState PREPARED 2025-06-24T21:25:47.659710Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659729Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68882, State PREPARING 2025-06-24T21:25:47.659769Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68882, NewState PREPARED 2025-06-24T21:25:47.659801Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659820Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68883, State PREPARING 2025-06-24T21:25:47.659841Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68883, NewState PREPARED 2025-06-24T21:25:47.659859Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.659880Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68884, State PREPARING 2025-06-24T21:25:47.659898Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68884, NewState PREPARED 2025-06-24T21:25:47.659975Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.660003Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68885, State PREPARING 2025-06-24T21:25:47.660025Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68885, NewState PREPARED 2025-06-24T21:25:47.660043Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.660065Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68886, State PREPARING 2025-06-24T21:25:47.660094Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68886, NewState PREPARED 2025-06-24T21:25:47.660116Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.660135Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68887, State PREPARING 2025-06-24T21:25:47.660152Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68887, NewState PREPARED 2025-06-24T21:25:47.660171Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.660189Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68888, State PREPARING 2025-06-24T21:25:47.660207Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68888, NewState PREPARED 2025-06-24T21:25:47.660227Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:47.660245Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 68889, State PREPARING 2025-06-24T21:25:47.660267Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 68889, NewState PREPARED ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQRBDescribes::PartitionLocations [GOOD] Test command err: Bucket: 100 elems count: 97 Bucket: 200 elems count: 104 Bucket: 500 elems count: 288 Bucket: 1000 elems count: 528 Bucket: 2000 elems count: 1008 Bucket: 5000 elems count: 2976 2025-06-24T21:25:36.301109Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630185371180041:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:36.301317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:36.375358Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630186633188216:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:36.375471Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:36.512642Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:25:36.528491Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b41/r3tmp/tmpeZ8e81/pdisk_1.dat 2025-06-24T21:25:36.779384Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:36.799015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:36.799140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:36.801821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:36.801923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:36.807343Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:36.807887Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:25:36.808563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9105, node 1 2025-06-24T21:25:37.040497Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003b41/r3tmp/yandexDCSwqd.tmp 2025-06-24T21:25:37.040529Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003b41/r3tmp/yandexDCSwqd.tmp 2025-06-24T21:25:37.040812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003b41/r3tmp/yandexDCSwqd.tmp 2025-06-24T21:25:37.041017Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:37.303348Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:37.309062Z INFO: TTestServer started on Port 21120 GrpcPort 9105 2025-06-24T21:25:37.384891Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21120 PQClient connected to localhost:9105 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:37.633549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:37.710534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:25:39.430585Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630199518090446:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.430655Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630199518090458:2276], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.430739Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.445792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:25:39.463569Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630199518090461:2277], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:25:39.536368Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630199518090488:2175] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:25:39.916604Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630198256082855:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:39.918270Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519630199518090502:2281], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:39.918536Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZjYxODNjZmEtMjg5ZTMzMDQtZTAzYjc0NTAtYTQxMWIyNmE=, ActorId: [2:7519630199518090445:2272], ActorState: ExecuteState, TraceId: 01jyhxa4f38bfsrrdbs3v4pbr5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:39.921905Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Mzg1NTAwYTUtNGE5ZDhhZC0zOGVhOWM1Zi03NjhkZTE1NA==, ActorId: [1:7519630198256082772:2295], ActorState: ExecuteState, TraceId: 01jyhxa4fkez3a2jhf5z69tdak, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:39.922281Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:39.922296Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:39.982997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:40.063843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:40.169572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB firs ... 4T21:25:46.629159Z node 2 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976710677] PredicateAcks: 0/0 2025-06-24T21:25:46.629185Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4621: [PQ: 72075186224037892] add an TxId 281474976710677 to the list for deletion 2025-06-24T21:25:46.629204Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976710677, NewState DELETING 2025-06-24T21:25:46.629228Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3852: [PQ: 72075186224037892] delete key for TxId 281474976710677 2025-06-24T21:25:46.629293Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:46.631685Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:46.631713Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-24T21:25:46.631727Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037892] TxId 281474976710677, State DELETING 2025-06-24T21:25:46.631746Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72075186224037892] delete TxId 281474976710677 ===Query complete TClient::Ls request: /Root/PQ/rt3.dc1--topic TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "rt3.dc1--topic" PathId: 13 SchemeshardId: 72057594046644480 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710677 CreateStep: 1750800346610 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186224037893 } PersQueueGroup { Name: "rt3.dc1--topic" PathId: 13 TotalGroupCount: 5 PartitionPerTablet: 5 PQTabletConfig { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 2000... (TRUNCATED) GetTopicVersionFromPath: record Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "rt3.dc1--topic" PathId: 13 SchemeshardId: 72057594046644480 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710677 CreateStep: 1750800346610 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186224037893 } PersQueueGroup { Name: "rt3.dc1--topic" PathId: 13 TotalGroupCount: 5 PartitionPerTablet: 5 PQTabletConfig { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } LocalDC: true ReadRules: "user" ReadFromTimestampsMs: 0 ConsumerFormatVersions: 0 ConsumerCodecs { } Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } ReadRuleVersions: 0 YdbDatabasePath: "/Root" } Partitions { PartitionId: 0 TabletId: 72075186224037892 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186224037892 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186224037892 Status: Active } Partitions { PartitionId: 3 TabletId: 72075186224037892 Status: Active } Partitions { PartitionId: 4 TabletId: 72075186224037892 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186224037893 NextPartitionId: 5 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 12 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 5 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } Path: "/Root/PQ/rt3.dc1--topic" name rt3.dc1--topic version1 CallPersQueueGRPC request to localhost:9105 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--topic" } } CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC 2025-06-24T21:25:46.690054Z node 1 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC request to localhost:9105 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--topic" } } 2025-06-24T21:25:47.193916Z node 1 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 1 ErrorCode: OK MetaResponse { CmdGetTopicMetadataResult { TopicInfo { Topic: "rt3.dc1--topic" NumPartitions: 5 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } Version: 1 LocalDC: true Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } TopicPath: "/Root/PQ/rt3.dc1--topic" YdbDatabasePath: "/Root" Consumers { Name: "user" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } Version: 0 Important: false } } ErrorCode: OK } } } === Topic created, have version: 1 TClient::Ls request: /Root/PQ/rt3.dc1--topic TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "rt3.dc1--topic" PathId: 13 SchemeshardId: 72057594046644480 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710677 CreateStep: 1750800346610 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186224037893 } PersQueueGroup { Name: "rt3.dc1--topic" PathId: 13 TotalGroupCount: 5 PartitionPerTablet: 5 PQTabletConfig { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 2000... (TRUNCATED) 2025-06-24T21:25:47.220289Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--topic] pipe [1:7519630232615822373:3439] connected; active server actors: 1 2025-06-24T21:25:47.220350Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 2, Generation 1 2025-06-24T21:25:47.220364Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 1, NodeId 2, Generation 1 2025-06-24T21:25:47.220374Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 2, NodeId 2, Generation 1 2025-06-24T21:25:47.220385Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 2, Generation 1 2025-06-24T21:25:47.220396Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 4, NodeId 2, Generation 1 2025-06-24T21:25:47.220636Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--topic] pipe [1:7519630232615822375:3441] connected; active server actors: 1 2025-06-24T21:25:47.220667Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 2, Generation 1 2025-06-24T21:25:47.220675Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 1, NodeId 2, Generation 1 response: 2025-06-24T21:25:47.220681Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 2, NodeId 2, Generation 1 2025-06-24T21:25:47.220688Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 2, Generation 1 2025-06-24T21:25:47.220696Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 4, NodeId 2, Generation 1 Status: true Locations { PartitionId: 0 NodeId: 2 Generation: 1 } Locations { PartitionId: 1 NodeId: 2 Generation: 1 } Locations { PartitionId: 2 NodeId: 2 Generation: 1 } Locations { PartitionId: 3 NodeId: 2 Generation: 1 } Locations { PartitionId: 4 NodeId: 2 Generation: 1 } 2025-06-24T21:25:47.221093Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--topic] pipe [1:7519630232615822377:3443] connected; active server actors: 1 response: 2025-06-24T21:25:47.221137Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 2, Generation 1 Status: true Locations { PartitionId: 3 NodeId: 2 Generation: 1 } 2025-06-24T21:25:47.221395Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--topic] pipe [1:7519630232615822379:3445] connected; active server actors: 1 response: Status: false >> TPQTabletTests::PQTablet_Send_RS_With_Abort [GOOD] >> TSourceIdTests::SourceIdStorageParseAndAdd [GOOD] >> TSourceIdTests::SourceIdStorageMinDS [GOOD] >> TSourceIdTests::SourceIdStorageTestClean >> TPQTest::TestWriteSplit [GOOD] >> TPQTest::TestWriteTimeStampEstimate >> TPQTabletTests::One_Tablet_For_All_Partitions >> TSourceIdTests::SourceIdStorageTestClean [GOOD] >> TSourceIdTests::SourceIdStorageDeleteByMaxCount [GOOD] >> TSourceIdTests::SourceIdStorageDeleteAndOwnersMark [GOOD] >> TPQTest::TestCheckACL [GOOD] >> TPQTest::TestAlreadyWrittenWithoutDeduplication [GOOD] >> TPQTest::TestComactifiedWithRetention >> TPQTabletTests::One_Tablet_For_All_Partitions [GOOD] |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TSourceIdTests::SourceIdStorageDeleteAndOwnersMark [GOOD] >> TPQTest::TestWriteTimeStampEstimate [GOOD] >> TPQTest::TestWriteTimeLag >> TPQTabletTests::One_New_Partition_In_Another_Tablet >> TPartitionTests::TestBatchingWithProposeConfig [GOOD] >> TPQTabletTests::One_New_Partition_In_Another_Tablet [GOOD] >> KqpLimits::OutOfSpaceYQLUpsertFail-useSink [GOOD] >> KqpLimits::ManyPartitionsSortingLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::TestBatchingWithProposeConfig [GOOD] Test command err: 2025-06-24T21:25:34.660610Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:34.661173Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:34.693565Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:182:2195] 2025-06-24T21:25:34.700251Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:34.000000Z 2025-06-24T21:25:34.700404Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [1:182:2195] Got cmd write: CmdDeleteRange { Range { From: "m0000000003cclient-1" IncludeFrom: true To: "m0000000003cclient-1" IncludeTo: true } } CmdDeleteRange { Range { From: "m0000000003uclient-1" IncludeFrom: true To: "m0000000003uclient-1" IncludeTo: true } } CmdWrite { Key: "i0000000003" Value: "\030\000(\260\371\323\236\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-2" Value: "\010\000\020\000\030\000\"\000(\0000\000@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-2" Value: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" StorageChannel: INLINE } CmdWrite { Key: "_config_3" Value: "\022\t\030\200\243\0058\200\200\200\005\030\000\"\027rt3.dc1--account--topic(\0020\001\272\001 /Root/PQ/rt3.dc1--account--topic\352\001\000\372\001\002\010\000\212\002\007account\220\002\001\242\002\002\010\000\252\002\016\n\010client-2@\000H\000" StorageChannel: INLINE } 2025-06-24T21:25:35.577396Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:35.577483Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:35.595787Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] bootstrapping {0, {0, 1111}, 123} [2:182:2195] 2025-06-24T21:25:35.600611Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [rt3.dc1--account--topic:{0, {0, 1111}, 123}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:35.000000Z 2025-06-24T21:25:35.600698Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] init complete for topic 'rt3.dc1--account--topic' partition {0, {0, 1111}, 123} generation 0 [2:182:2195] 2025-06-24T21:25:36.361885Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:36.361954Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:36.374185Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-24T21:25:36.374341Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:36.374526Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:179:2192] 2025-06-24T21:25:36.375375Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T21:25:36.375579Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T21:25:36.375735Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T21:25:36.375926Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T21:25:36.376167Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:36.376354Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:36.376477Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T21:25:36.376520Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:25:36.376568Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:36.000000Z 2025-06-24T21:25:36.376605Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:36.376649Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [3:179:2192] 2025-06-24T21:25:36.376702Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:25:36.376751Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:36.376974Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:36.731168Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src3|174d2874-3d20c384-344a98ed-c79fc4ff_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src3 2025-06-24T21:25:36.731362Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-24T21:25:36.731591Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src4|fde330d7-631005f3-8da52ee2-ddc35d62_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src4 2025-06-24T21:25:36.731667Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-24T21:25:37.720914Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:39.001347Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create immediate tx with id = 3 and act no: 4 Create immediate tx with id = 6 and act no: 7 2025-06-24T21:25:39.001832Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 Create distr tx with id = 8 and act no: 9 Create distr tx with id = 10 and act no: 11 2025-06-24T21:25:39.461816Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-06-24T21:25:39.461933Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 10 2025-06-24T21:25:39.462012Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:25:39.462063Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:25:40.231572Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:40.231758Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:25:41.567003Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:41.567210Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:25:41.567493Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 17 Wait batch completion 2025-06-24T21:25:41.567779Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 10 Wait kv request 2025-06-24T21:25:41.837755Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-24T21:25:41.837844Z node 3 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-24T21:25:41.837941Z node 3 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:25:41.838049Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:25:41.838095Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:25:41.838241Z node 3 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T21:25:41.838301Z node 3 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:25:41.866716Z node 3 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-24T21:25:41.866907Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1257: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Already written message. Topic: 'Root/PQ/rt3.dc1--account--topic' Partition: 0 SourceId: 'src4'. Message seqNo: 7. Committed seqNo: (NULL). Writing seqNo: 7. EndOffset: 50. CurOffset: 50. Offset: 50 2025-06-24T21:25:41.867068Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 8 partNo 0 2025-06-24T21:25:41.868040Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 8 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize ... : 2 Wait batch completion Send disk status response with cookie: 0 2025-06-24T21:25:46.174035Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:46.174297Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T21:25:46.174451Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-24T21:25:46.174914Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got KV request Got batch complete: 1 Got KV request Got KV request Got KV request Got KV request Got KV request Got KV request Wait batch completion Send disk status response with cookie: 0 Wait immediate tx complete 3 2025-06-24T21:25:46.207559Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:46.207775Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 3 2025-06-24T21:25:46.682936Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:46.683017Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:46.704554Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request Got KV request 2025-06-24T21:25:46.704938Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:46.705277Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:181:2194] 2025-06-24T21:25:46.706497Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T21:25:46.706704Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T21:25:46.706906Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T21:25:46.708135Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T21:25:46.708488Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:46.708720Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:46.708879Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T21:25:46.708923Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:25:46.708980Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:46.000000Z 2025-06-24T21:25:46.709022Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:46.709081Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [5:181:2194] 2025-06-24T21:25:46.709140Z node 5 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:25:46.709186Z node 5 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:46.709262Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:46.709300Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T21:25:46.709347Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T21:25:46.709578Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:46.709687Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 offset 0 count 1 size 1024000 endOffset 50 max time lag 0ms effective offset 0 2025-06-24T21:25:46.709906Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 684 count 50 last offset 0, current partition end offset: 50 2025-06-24T21:25:46.709961Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-24T21:25:48.051113Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 2025-06-24T21:25:48.051431Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T21:25:48.051566Z node 5 :PERSQUEUE DEBUG: partition.cpp:1079: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvProposePartitionConfig Step 1, TxId 3 2025-06-24T21:25:49.279369Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait batch completion 2025-06-24T21:25:49.279544Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:25:49.279679Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T21:25:49.279737Z node 5 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= Got KV request Got batch complete: 2 Got KV request Got KV request 2025-06-24T21:25:50.462570Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Send disk status response with cookie: 0 Wait immediate tx complete 2 2025-06-24T21:25:50.462907Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:50.463041Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got batch complete: 1 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 2 Wait batch completion 2025-06-24T21:25:50.463338Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 3 2025-06-24T21:25:50.463479Z node 5 :PERSQUEUE DEBUG: partition.cpp:3297: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 drop done Got KV request Send disk status response with cookie: 0 2025-06-24T21:25:50.475340Z node 5 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72057594037927937 topic 'Root/PQ/rt3.dc1--account--topic' partition 0 error: cannot finish read request. Consumer client-1 is gone from partition 2025-06-24T21:25:50.475541Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:50.475640Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T21:25:50.475697Z node 5 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-24T21:25:50.475855Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got KV request Got KV request 2025-06-24T21:25:50.476392Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 readTimeStamp for offset 5 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:50.476455Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 send read request for offset 5 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 Got KV request Got batch complete: 1 Got KV request Got KV request Got KV request 2025-06-24T21:25:50.476792Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 3 Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 offset 5 count 1 size 1024000 endOffset 50 max time lag 0ms effective offset 5 2025-06-24T21:25:50.477030Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 3 added 1 blobs, size 0 count 45 last offset 0, current partition end offset: 50 2025-06-24T21:25:50.477084Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 3. Send blob request. Got KV request Got KV request Wait batch completion Send disk status response with cookie: 0 Wait immediate tx complete 4 2025-06-24T21:25:50.508095Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:50.508234Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 4 >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotBoundary_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_Active_Test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::One_New_Partition_In_Another_Tablet [GOOD] Test command err: 2025-06-24T21:25:48.169051Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:25:48.174058Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:25:48.174398Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-24T21:25:48.174460Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:25:48.174494Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-24T21:25:48.174531Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:25:48.174589Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:48.174660Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:48.209322Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:209:2213], now have 1 active actors on pipe 2025-06-24T21:25:48.209493Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:25:48.232592Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer-1" ImportantClientId: "consumer-2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer-1" Generation: 1 Important: true } Consumers { Name: "consumer-2" Generation: 1 Important: true } 2025-06-24T21:25:48.236069Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer-1" ImportantClientId: "consumer-2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer-1" Generation: 1 Important: true } Consumers { Name: "consumer-2" Generation: 1 Important: true } 2025-06-24T21:25:48.236227Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:48.237122Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer-1" ImportantClientId: "consumer-2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer-1" Generation: 1 Important: true } Consumers { Name: "consumer-2" Generation: 1 Important: true } 2025-06-24T21:25:48.237271Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:48.237696Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:48.238034Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:217:2219] 2025-06-24T21:25:48.238779Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-24T21:25:48.238833Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:217:2219] 2025-06-24T21:25:48.238886Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:48.239787Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:48.239889Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-24T21:25:48.239955Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-24T21:25:48.239995Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer-1 reinit request with generation 1 2025-06-24T21:25:48.240019Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer-1 reinit with generation 1 done 2025-06-24T21:25:48.240040Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer-2 reinit request with generation 1 2025-06-24T21:25:48.240065Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer-2 reinit with generation 1 done 2025-06-24T21:25:48.240241Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:48.240269Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer-2 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:48.240288Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer-1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:48.240459Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:48.240646Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:48.243242Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:48.243332Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:48.243830Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:224:2224], now have 1 active actors on pipe 2025-06-24T21:25:48.244574Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:227:2226], now have 1 active actors on pipe 2025-06-24T21:25:48.245463Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3225: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 180 RawX2: 4294969489 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer-1" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-24T21:25:48.245528Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3401: [PQ: 72057594037927937] distributed transaction 2025-06-24T21:25:48.245648Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3715: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-24T21:25:48.245698Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-24T21:25:48.245756Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-24T21:25:48.245799Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3940: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-24T21:25:48.245842Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-24T21:25:48.245906Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T21:25:48.246071Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 134 MaxStep: 30134 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer-1" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 180 RawX2: 4294969489 } Partitions { } 2025-06-24T21:25:48.246164Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:48.250263Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:48.250341Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-24T21:25:48.250396Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-24T21:25:48.250440Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-06-24T21:25:48.250804Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3225: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 180 RawX2: 4294969489 } TxId: 67891 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer-2" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-24T21:25:48.250866Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3401: [PQ: 72057594037927937] distributed transaction 2025-06-24T21:25:48.251069Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3715: [PQ: 72057594037927937] Propose TxId 67891, WriteId (empty maybe) 2025-06-24T21:25:48.251103Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute t ... 27937] TxId 67890, NewState CALCULATED 2025-06-24T21:25:50.780717Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from CALCULATING to CALCULATED 2025-06-24T21:25:50.780763Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T21:25:50.781096Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: CALCULATED MinStep: 134 MaxStep: 18446744073709551615 PredicateRecipients: 22222 Step: 100 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 10485760 } TopicName: "rt3.dc1--account--topic" Version: 2 LocalDC: true TopicPath: "/Root/PQ/rt3.dc1--account--topic" YdbDatabasePath: "" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 FederationAccount: "account" MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 ChildPartitionIds: 1 ChildPartitionIds: 2 TabletId: 72057594037927937 } AllPartitions { PartitionId: 1 ParentPartitionIds: 0 TabletId: 72057594037927937 } AllPartitions { PartitionId: 2 ParentPartitionIds: 0 TabletId: 22222 } Consumers { Name: "client-1" Generation: 2 Important: false } Consumers { Name: "client-3" Generation: 2 Important: false } } BootstrapConfig { } SourceActor { RawX1: 180 RawX2: 25769805969 } Partitions { Partition { PartitionId: 0 } Partition { PartitionId: 1 } } 2025-06-24T21:25:50.781224Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:50.784541Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:50.784602Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-24T21:25:50.784646Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State CALCULATED 2025-06-24T21:25:50.784690Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State CALCULATED FrontTxId 67890 2025-06-24T21:25:50.784731Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS 2025-06-24T21:25:50.784775Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from CALCULATED to WAIT_RS 2025-06-24T21:25:50.784840Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3988: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-06-24T21:25:50.784885Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3998: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-24T21:25:50.784999Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72057594037927937] HaveParticipantsDecision 1 2025-06-24T21:25:50.785151Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState EXECUTING 2025-06-24T21:25:50.785193Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from WAIT_RS to EXECUTING 2025-06-24T21:25:50.785228Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72057594037927937] Received 0, Expected 2 2025-06-24T21:25:50.785363Z node 6 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 100, TxId 67890 2025-06-24T21:25:50.785520Z node 6 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user client-1 reinit with generation 2 done 2025-06-24T21:25:50.785561Z node 6 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user client-3 reinit with generation 2 done 2025-06-24T21:25:50.785599Z node 6 :PERSQUEUE DEBUG: partition.cpp:3297: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user drop done 2025-06-24T21:25:50.785889Z node 6 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 100, TxId 67890 2025-06-24T21:25:50.786094Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:50.786338Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:50.788749Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2918: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-24T21:25:50.788819Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037927937] Connected to tablet 22222 2025-06-24T21:25:50.791601Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:50.791682Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:50.792108Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3553: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCommitDone Step 100, TxId 67890, Partition 0 2025-06-24T21:25:50.792163Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state EXECUTING 2025-06-24T21:25:50.792202Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State EXECUTING 2025-06-24T21:25:50.792245Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State EXECUTING FrontTxId 67890 2025-06-24T21:25:50.792282Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72057594037927937] Received 1, Expected 2 2025-06-24T21:25:50.792316Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4327: [PQ: 72057594037927937] TxId 67890 status has not changed 2025-06-24T21:25:50.793800Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:50.793866Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T21:25:50.793994Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3553: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCommitDone Step 100, TxId 67890, Partition 1 2025-06-24T21:25:50.794032Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state EXECUTING 2025-06-24T21:25:50.794063Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State EXECUTING 2025-06-24T21:25:50.794093Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State EXECUTING FrontTxId 67890 2025-06-24T21:25:50.794120Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72057594037927937] Received 2, Expected 2 2025-06-24T21:25:50.794158Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4185: [PQ: 72057594037927937] TxId: 67890 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-24T21:25:50.794206Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4516: [PQ: 72057594037927937] complete TxId 67890 2025-06-24T21:25:50.794469Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config PartitionConfig { LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 10485760 } TopicName: "rt3.dc1--account--topic" Version: 2 LocalDC: true TopicPath: "/Root/PQ/rt3.dc1--account--topic" YdbDatabasePath: "" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 FederationAccount: "account" MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 ChildPartitionIds: 1 ChildPartitionIds: 2 TabletId: 72057594037927937 } AllPartitions { PartitionId: 1 ParentPartitionIds: 0 TabletId: 72057594037927937 } AllPartitions { PartitionId: 2 ParentPartitionIds: 0 TabletId: 22222 } Consumers { Name: "client-1" Generation: 2 Important: false } Consumers { Name: "client-3" Generation: 2 Important: false } 2025-06-24T21:25:50.794543Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:50.794629Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4534: [PQ: 72057594037927937] delete partitions for TxId 67890 2025-06-24T21:25:50.794671Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState EXECUTED 2025-06-24T21:25:50.794713Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from EXECUTING to EXECUTED 2025-06-24T21:25:50.794760Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72057594037927937] write key for TxId 67890 2025-06-24T21:25:50.795030Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: EXECUTED MinStep: 134 MaxStep: 18446744073709551615 PredicateRecipients: 22222 Step: 100 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 10485760 } TopicName: "rt3.dc1--account--topic" Version: 2 LocalDC: true TopicPath: "/Root/PQ/rt3.dc1--account--topic" YdbDatabasePath: "" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 FederationAccount: "account" MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 ChildPartitionIds: 1 ChildPartitionIds: 2 TabletId: 72057594037927937 } AllPartitions { PartitionId: 1 ParentPartitionIds: 0 TabletId: 72057594037927937 } AllPartitions { PartitionId: 2 ParentPartitionIds: 0 TabletId: 22222 } Consumers { Name: "client-1" Generation: 2 Important: false } Consumers { Name: "client-3" Generation: 2 Important: false } } BootstrapConfig { } SourceActor { RawX1: 180 RawX2: 25769805969 } Partitions { Partition { PartitionId: 0 } Partition { PartitionId: 1 } } 2025-06-24T21:25:50.795281Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:25:50.798479Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:25:50.798531Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72057594037927937] Try execute txs with state EXECUTED 2025-06-24T21:25:50.798562Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72057594037927937] TxId 67890, State EXECUTED 2025-06-24T21:25:50.798591Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72057594037927937] TxId 67890 State EXECUTED FrontTxId 67890 2025-06-24T21:25:50.798626Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4007: [PQ: 72057594037927937] TPersQueue::SendEvReadSetAckToSenders 2025-06-24T21:25:50.798676Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS_ACKS 2025-06-24T21:25:50.798714Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72057594037927937] TxId 67890 moved from EXECUTED to WAIT_RS_ACKS 2025-06-24T21:25:50.798754Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/1 2025-06-24T21:25:50.798789Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4560: [PQ: 72057594037927937] HaveAllRecipientsReceive 0, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-24T21:25:50.798822Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/1 2025-06-24T21:25:50.803160Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [6:368:2338], now have 1 active actors on pipe 2025-06-24T21:25:50.803354Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3441: [PQ: 72057594037927937] Handle TEvTxProcessing::TEvReadSet Step: 100 TxId: 67890 TabletSource: 22222 TabletDest: 72057594037927937 TabletProducer: 22222 ReadSet: "\010\001" Seqno: 0 2025-06-24T21:25:50.803398Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3469: [PQ: 72057594037927937] send TEvReadSetAck to 22222 2025-06-24T21:25:50.803448Z node 6 :PERSQUEUE DEBUG: pqtablet_mock.cpp:72: Connected to tablet 72057594037927937 from tablet 22222 >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_1_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionNotExists_Test >> TPQTest::TestWriteTimeLag [GOOD] >> TPQTest::The_Value_Of_CreationUnixTime_Must_Not_Decrease >> TPQTest::TestComactifiedWithRetention [GOOD] >> TPQTest::TestChangeConfig >> TSourceIdTests::SourceIdWriterAddMessage [GOOD] >> TSourceIdTests::SourceIdWriterClean [GOOD] >> TSourceIdTests::SourceIdWriterFormCommand [GOOD] >> TTypeCodecsTest::TestBoolCodec [GOOD] >> TTypeCodecsTest::TestDeltaVarIntCodecAndRev [GOOD] >> TPQTest::TestReserveBytes [GOOD] >> TPQTest::TestSourceIdDropBySourceIdCount ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TTypeCodecsTest::TestDeltaVarIntCodecAndRev [GOOD] Test command err: Size: 128 Create chunk: 0.000032s Read by index: 0.000012s Iterate: 0.000012s Size: 252 Create chunk: 0.000052s Read by index: 0.000010s Iterate: 0.000011s Size: 1887 Create chunk: 0.000060s Read by index: 0.000082s Iterate: 0.000067s Size: 1658 Create chunk: 0.000054s Read by index: 0.000089s Iterate: 0.000055s Size: 1889 Create chunk: 0.000051s Read by index: 0.000061s Iterate: 0.000024s Size: 1660 Create chunk: 0.000068s Read by index: 0.000056s Iterate: 0.000032s >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionActive_BoundaryTrue_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionActive_BoundaryFalse_Test >> TPQTest::TestChangeConfig [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_NewSourceId_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_BadSourceId_Test |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestChangeConfig [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T21:25:34.649062Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:25:34.660229Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:25:34.660723Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-24T21:25:34.660800Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:25:34.660861Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-24T21:25:34.660919Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:25:34.660978Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:34.661203Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-24T21:25:34.684241Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:182:2195], now have 1 active actors on pipe 2025-06-24T21:25:34.684432Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:25:34.704680Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-24T21:25:34.708117Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-24T21:25:34.708302Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:34.709234Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-24T21:25:34.710495Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:34.711173Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:34.711643Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-24T21:25:34.714560Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--asdfgs--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:34.714647Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-24T21:25:34.714712Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--asdfgs--topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:34.717409Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:34.718780Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit request with generation 1 2025-06-24T21:25:34.718872Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit with generation 1 done 2025-06-24T21:25:34.718949Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 reinit request with generation 1 2025-06-24T21:25:34.718990Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 reinit with generation 1 done 2025-06-24T21:25:34.719263Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:34.719309Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:34.719485Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:34.719833Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:34.726890Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:34.727004Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:34.727455Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:196:2205], now have 1 active actors on pipe 2025-06-24T21:25:34.739008Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:199:2207], now have 1 active actors on pipe 2025-06-24T21:25:34.739182Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-06-24T21:25:34.739234Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-06-24T21:25:34.739622Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 1 partNo : 0 messageNo: 0 size 102400 offset: 0 2025-06-24T21:25:34.739679Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 2 partNo : 0 messageNo: 0 size 102400 offset: 1 2025-06-24T21:25:34.739713Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 3 partNo : 0 messageNo: 0 size 102400 offset: 2 2025-06-24T21:25:34.739744Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 4 partNo : 0 messageNo: 0 size 102400 offset: 3 2025-06-24T21:25:34.739791Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 5 partNo : 0 messageNo: 0 size 102400 offset: 4 2025-06-24T21:25:34.739821Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 6 partNo : 0 messageNo: 0 size 102400 offset: 5 2025-06-24T21:25:34.739850Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 7 partNo : 0 messageNo: 0 size 102400 offset: 6 2025-06-24T21:25:34.739884Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 8 partNo : 0 messageNo: 0 size 102400 offset: 7 2025-06-24T21:25:34.739916Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 9 partNo : 0 messageNo: 0 size 102400 offset: 8 2025-06-24T21:25:34.739963Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 10 partNo : 0 messageNo: 0 size 102400 offset: 9 2025-06-24T21:25:34.740004Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 11 partNo : 0 messageNo: 0 size 102400 offset: 10 2025-06-24T21:25:34.740035Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 12 partNo : 0 messageNo: 0 size 102400 offset: 11 2025-06-24T21:25:34.740078Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 13 partNo : 0 messageNo: 0 size 102400 offset: 12 2025-06-24T21:25:34.740113Z node 1 :PERSQUEUE DEB ... ate: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 7 generation 2 [14:238:2236] 2025-06-24T21:25:53.153167Z node 14 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 8, State: StateInit] bootstrapping 8 [14:239:2237] 2025-06-24T21:25:53.154683Z node 14 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 8, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 8 generation 2 [14:239:2237] 2025-06-24T21:25:53.157252Z node 14 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 9, State: StateInit] bootstrapping 9 [14:240:2238] 2025-06-24T21:25:53.158773Z node 14 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 9, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 9 generation 2 [14:240:2238] 2025-06-24T21:25:53.184591Z node 14 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 13 actor [14:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 5 MaxSizeInPartition: 1048576 LifetimeSeconds: 86400 ImportantClientId: "bbb" ImportantClientId: "ccc" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 PartitionIds: 4 PartitionIds: 5 PartitionIds: 6 PartitionIds: 7 PartitionIds: 8 PartitionIds: 9 TopicName: "rt3.dc1--asdfgs--topic" Version: 13 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } Partitions { PartitionId: 4 } Partitions { PartitionId: 5 } Partitions { PartitionId: 6 } Partitions { PartitionId: 7 } Partitions { PartitionId: 8 } Partitions { PartitionId: 9 } ReadRuleGenerations: 12 ReadRuleGenerations: 13 ReadRuleGenerations: 13 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } AllPartitions { PartitionId: 4 } AllPartitions { PartitionId: 5 } AllPartitions { PartitionId: 6 } AllPartitions { PartitionId: 7 } AllPartitions { PartitionId: 8 } AllPartitions { PartitionId: 9 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 12 Important: false } Consumers { Name: "bbb" Generation: 13 Important: true } Consumers { Name: "ccc" Generation: 13 Important: true } 2025-06-24T21:25:53.192507Z node 14 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|353e3268-250b92a0-49bfcb52-5da38d79_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:53.202071Z node 14 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|91df0b97-791894de-41daff87-803beb6c_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:53.209620Z node 14 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3305f697-f184cbc7-fb7e7205-f0dbd300_0 generated for partition 9 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:106:2057] recipient: [15:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:106:2057] recipient: [15:104:2136] Leader for TabletID 72057594037927937 is [15:110:2140] sender: [15:111:2057] recipient: [15:104:2136] 2025-06-24T21:25:53.702680Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:53.702767Z node 15 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [15:152:2057] recipient: [15:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [15:152:2057] recipient: [15:150:2171] Leader for TabletID 72057594037927938 is [15:156:2175] sender: [15:157:2057] recipient: [15:150:2171] Leader for TabletID 72057594037927937 is [15:110:2140] sender: [15:180:2057] recipient: [15:14:2061] 2025-06-24T21:25:53.719192Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:53.721097Z node 15 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 14 actor [15:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 PartitionIds: 4 TopicName: "rt3.dc1--asdfgs--topic" Version: 14 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } Partitions { PartitionId: 4 } ReadRuleGenerations: 14 ReadRuleGenerations: 14 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } AllPartitions { PartitionId: 4 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 14 Important: false } Consumers { Name: "aaa" Generation: 14 Important: true } 2025-06-24T21:25:53.721985Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [15:186:2197] 2025-06-24T21:25:53.724009Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [15:186:2197] 2025-06-24T21:25:53.726305Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [15:187:2198] 2025-06-24T21:25:53.728188Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [15:187:2198] 2025-06-24T21:25:53.730168Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [15:188:2199] 2025-06-24T21:25:53.731739Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [15:188:2199] 2025-06-24T21:25:53.733708Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [15:189:2200] 2025-06-24T21:25:53.735163Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 3 generation 2 [15:189:2200] 2025-06-24T21:25:53.737023Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 4, State: StateInit] bootstrapping 4 [15:190:2201] 2025-06-24T21:25:53.738438Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 4, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 4 generation 2 [15:190:2201] 2025-06-24T21:25:53.747807Z node 15 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3e5e77a1-6d92ee5b-ccfdd69f-63b2a3f7_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:53.755638Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:53.759245Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 5, State: StateInit] bootstrapping 5 [15:234:2232] 2025-06-24T21:25:53.761124Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 5, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 5 generation 2 [15:234:2232] 2025-06-24T21:25:53.764165Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 6, State: StateInit] bootstrapping 6 [15:235:2233] 2025-06-24T21:25:53.765715Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 6, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 6 generation 2 [15:235:2233] 2025-06-24T21:25:53.768297Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 7, State: StateInit] bootstrapping 7 [15:236:2234] 2025-06-24T21:25:53.769766Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 7, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 7 generation 2 [15:236:2234] 2025-06-24T21:25:53.772495Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 8, State: StateInit] bootstrapping 8 [15:237:2235] 2025-06-24T21:25:53.773914Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 8, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 8 generation 2 [15:237:2235] 2025-06-24T21:25:53.777378Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 9, State: StateInit] bootstrapping 9 [15:238:2236] 2025-06-24T21:25:53.778933Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 9, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 9 generation 2 [15:238:2236] 2025-06-24T21:25:53.810359Z node 15 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 15 actor [15:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 5 MaxSizeInPartition: 1048576 LifetimeSeconds: 86400 ImportantClientId: "bbb" ImportantClientId: "ccc" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 PartitionIds: 4 PartitionIds: 5 PartitionIds: 6 PartitionIds: 7 PartitionIds: 8 PartitionIds: 9 TopicName: "rt3.dc1--asdfgs--topic" Version: 15 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } Partitions { PartitionId: 4 } Partitions { PartitionId: 5 } Partitions { PartitionId: 6 } Partitions { PartitionId: 7 } Partitions { PartitionId: 8 } Partitions { PartitionId: 9 } ReadRuleGenerations: 14 ReadRuleGenerations: 15 ReadRuleGenerations: 15 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } AllPartitions { PartitionId: 4 } AllPartitions { PartitionId: 5 } AllPartitions { PartitionId: 6 } AllPartitions { PartitionId: 7 } AllPartitions { PartitionId: 8 } AllPartitions { PartitionId: 9 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 14 Important: false } Consumers { Name: "bbb" Generation: 15 Important: true } Consumers { Name: "ccc" Generation: 15 Important: true } 2025-06-24T21:25:53.812432Z node 15 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9ecf24f2-2292596e-da290026-9c6b26b_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:53.821352Z node 15 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|674cd2dd-17696b64-c1ebf0d9-f0dc378d_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:53.826977Z node 15 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8939627c-2b418ba0-f8be5401-a9f4e804_0 generated for partition 9 topic 'rt3.dc1--asdfgs--topic' owner default >> TargetDiscoverer::SystemObjects >> TargetDiscoverer::Dirs >> TargetDiscoverer::Transfer >> TargetDiscoverer::Negative >> TargetDiscoverer::InvalidCredentials >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunning [GOOD] >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunningExtSubdomain >> TPartitionTests::ConflictingCommitFails [GOOD] >> TPartitionTests::ConflictingCommitProccesAfterRollback |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> KqpLimits::ManyPartitionsSortingLimit [GOOD] >> PQCountersLabeled::ImportantFlagSwitching [GOOD] >> PQCountersLabeled::NewConsumersCountersAppear >> TargetDiscoverer::IndexedTable >> TListAllTopicsTests::PlainList [GOOD] >> TListAllTopicsTests::RecursiveList >> TPartitionTests::ConflictingCommitProccesAfterRollback [GOOD] >> TargetDiscoverer::Basic >> TargetDiscoverer::Transfer [GOOD] >> TargetDiscoverer::Negative [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::ConflictingCommitProccesAfterRollback [GOOD] Test command err: 2025-06-24T21:25:38.032714Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:38.032815Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:38.053450Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-24T21:25:38.053751Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:38.054186Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:181:2194] 2025-06-24T21:25:38.055412Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T21:25:38.055617Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T21:25:38.055801Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T21:25:38.055996Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T21:25:38.056400Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:38.056625Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:38.056772Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T21:25:38.056816Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:25:38.056862Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:38.000000Z 2025-06-24T21:25:38.056902Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:38.056952Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [1:181:2194] 2025-06-24T21:25:38.057009Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:25:38.057069Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:38.057381Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:38.403314Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|ac122313-29c7aaa1-58908f2-64801512_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-24T21:25:38.403505Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-24T21:25:39.397054Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create distr tx with id = 2 and act no: 3 Create distr tx with id = 4 and act no: 5 Create distr tx with id = 6 and act no: 7 2025-06-24T21:25:39.397447Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T21:25:39.397574Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-06-24T21:25:39.397650Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 4 2025-06-24T21:25:39.397756Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 6 2025-06-24T21:25:40.732305Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:40.732498Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:25:40.732612Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:25:40.732686Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:25:40.732745Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Wait batch of 1 completion Wait batch completion Expect no KV request 2025-06-24T21:25:40.732957Z node 1 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-24T21:25:40.733008Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-24T21:25:40.733092Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got batch complete: 1 Waif or tx 3 predicate failure 2025-06-24T21:25:40.980660Z node 1 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-06-24T21:25:40.980721Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-06-24T21:25:40.980766Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got batch complete: 2 Waif or tx 4 predicate failure Wait batch of 3 completion Wait batch completion Expect no KV request Wait batch completion Wait for no tx committed Got KV request Wait kv request Wait for commits Wait tx committed for tx 0 2025-06-24T21:25:41.466197Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 2025-06-24T21:25:41.466308Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait tx committed for tx 2 2025-06-24T21:25:41.804940Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:41.805002Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:41.819666Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-24T21:25:41.819905Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:41.820199Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:181:2194] 2025-06-24T21:25:41.821142Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T21:25:41.821313Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T21:25:41.821450Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T21:25:41.821635Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T21:25:41.821853Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-24T21:25:41.822017Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-24T21:25:41.822144Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T21:25:41.822179Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:25:41.822219Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:41.000000Z 2025-06-24T21:25:41.822252Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:41.822298Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [2:181:2194] 2025-06-24T21:25:41.822347Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-24T21:25:41.822395Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:41.822628Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:42.166203Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|3d641e31-bf7cf8fc-9c0f2e7b-eab449d6_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-24T21:25:42.166395Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-24T21:25:43.159367Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 2025-06-24T21:25:43.159723Z node 2 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T21:25:44.421329Z node 2 :PERSQUEUE DEBUG: partition_comp ... E DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:51.801005Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 2 Wait batch completion 2025-06-24T21:25:51.801291Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-06-24T21:25:51.801347Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-06-24T21:25:51.801431Z node 4 :PERSQUEUE DEBUG: partition.cpp:2461: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Partition 0 Consumer 'client-0' Bad request (gap) Offset 5 Begin 0 Got batch complete: 1 Wait kv request Got KV request Wait tx committed for tx 2 2025-06-24T21:25:51.822534Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:51.822659Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait for no tx committed 2025-06-24T21:25:53.059584Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 4 and act no: 5 Created Tx with id 7 as act# 7 2025-06-24T21:25:53.059905Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 4 2025-06-24T21:25:53.060038Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 7 2025-06-24T21:25:54.314072Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:54.314403Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 2 2025-06-24T21:25:54.314793Z node 4 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 offset is set to 3 (startOffset 50) session session-client-1 Got KV request Wait batch completion Wait kv request 2025-06-24T21:25:54.565333Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:54.565471Z node 4 :PERSQUEUE DEBUG: partition.cpp:2461: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Partition 0 Consumer 'client-1' Bad request (gap) Offset 3 Begin 0 2025-06-24T21:25:54.565549Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got batch complete: 1 Wait batch completion Wait kv request Got KV request Create distr tx with id = 8 and act no: 9 2025-06-24T21:25:54.566227Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-06-24T21:25:54.587194Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:54.587308Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:55.601366Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:55.601620Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 3 Wait kv request 2025-06-24T21:25:55.601978Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T21:25:55.602046Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-24T21:25:55.602128Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(ABORTED), reason=incorrect offset range (gap) Got KV request Wait immediate tx complete 10 2025-06-24T21:25:55.612807Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:55.612893Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 10 Wait immediate tx complete 11 Got propose resutl: Origin: 72057594037927937 Status: ABORTED TxId: 11 Errors { Kind: BAD_REQUEST Reason: "incorrect offset range (gap)" } 2025-06-24T21:25:55.941201Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:55.941273Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:55.957409Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request Got KV request 2025-06-24T21:25:55.957714Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:55.957964Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:181:2194] 2025-06-24T21:25:55.959000Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-24T21:25:55.959174Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-24T21:25:55.959328Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-24T21:25:55.960009Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-24T21:25:55.960283Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:55.960479Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-24T21:25:55.960627Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-24T21:25:55.960670Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:25:55.960718Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:25:55.000000Z 2025-06-24T21:25:55.960757Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:55.960804Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [5:181:2194] 2025-06-24T21:25:55.960858Z node 5 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:25:55.960909Z node 5 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:55.960994Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:55.961039Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T21:25:55.961081Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T21:25:55.961285Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:55.961416Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 offset 0 count 1 size 1024000 endOffset 50 max time lag 0ms effective offset 0 2025-06-24T21:25:55.961629Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 684 count 50 last offset 0, current partition end offset: 50 2025-06-24T21:25:55.961679Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-24T21:25:57.305219Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Created Tx with id 0 as act# 0 Created Tx with id 1 as act# 1 2025-06-24T21:25:57.305452Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-24T21:25:57.305592Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 1 Got batch complete: 1 Wait batch completion Got batch complete: 1 Wait batch completion Wait kv request 2025-06-24T21:25:57.523086Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 1 2025-06-24T21:25:57.523192Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 1 Got KV request Wait tx committed for tx 1 2025-06-24T21:25:57.544126Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:57.544238Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait for no tx committed >> TargetDiscoverer::InvalidCredentials [GOOD] >> TargetDiscoverer::SystemObjects [GOOD] >> TargetDiscoverer::Dirs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Negative [GOOD] Test command err: 2025-06-24T21:25:55.270395Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630267402781228:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:55.270478Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044df/r3tmp/tmpRolJST/pdisk_1.dat 2025-06-24T21:25:55.635731Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630267402781196:2079] 1750800355266196 != 1750800355266199 2025-06-24T21:25:55.651168Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:55.692700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:55.692835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:55.697702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62261 TServer::EnableGrpc on GrpcPort 20692, node 1 2025-06-24T21:25:55.972363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:25:55.972394Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:25:55.972399Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:25:55.972506Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:56.277428Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62261 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:56.521325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:56.603041Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: SCHEME_ERROR, issues: {
: Error: Path not found } } } 2025-06-24T21:25:56.603105Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root/Table, status# SCHEME_ERROR, issues# {
: Error: Path not found } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Transfer [GOOD] Test command err: 2025-06-24T21:25:55.270841Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630269906113089:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:55.270911Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044f8/r3tmp/tmp0yDFb3/pdisk_1.dat 2025-06-24T21:25:55.627370Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630269906113056:2079] 1750800355266139 != 1750800355266142 2025-06-24T21:25:55.632040Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:55.691532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:55.691640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:55.693502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24244 TServer::EnableGrpc on GrpcPort 24065, node 1 2025-06-24T21:25:55.972154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:25:55.972181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:25:55.972188Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:25:55.972321Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:56.282412Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24244 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:56.529141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:56.702090Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Topic, owner: root@builtin, type: Topic, size_bytes: 0, created_at: { plan_step: 1750800356669, tx_id: 281474976710658 } } } 2025-06-24T21:25:56.702132Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root/Topic 2025-06-24T21:25:56.712853Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-24T21:25:56.734728Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:166: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTopicResponse { Result: { status: SUCCESS, issues: } } 2025-06-24T21:25:56.734760Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:181: [TargetDiscoverer][rid 1] Describe topic succeeded: path# /Root/Topic 2025-06-24T21:25:56.734794Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:191: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Topic, dstPath# /Root/Replicated/Table, kind# Transfer ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::InvalidCredentials [GOOD] Test command err: 2025-06-24T21:25:55.270500Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630267458192133:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:55.270559Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044ed/r3tmp/tmpGPY4yW/pdisk_1.dat 2025-06-24T21:25:55.655724Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:55.658489Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630267458192101:2079] 1750800355266176 != 1750800355266179 2025-06-24T21:25:55.690455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:55.690596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:55.693123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9027 TServer::EnableGrpc on GrpcPort 12033, node 1 2025-06-24T21:25:55.972360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:25:55.972384Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:25:55.972391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:25:55.972541Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:56.282304Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9027 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:56.549126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:56.566782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:56.879998Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: CLIENT_UNAUTHENTICATED, issues: {
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/types/credentials/login/login.cpp:217: Cannot find user: user } } } 2025-06-24T21:25:56.880072Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root, status# CLIENT_UNAUTHENTICATED, issues# {
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/types/credentials/login/login.cpp:217: Cannot find user: user } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::SystemObjects [GOOD] Test command err: 2025-06-24T21:25:55.270632Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630268950186594:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:55.270691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044f5/r3tmp/tmpWNJCBx/pdisk_1.dat 2025-06-24T21:25:55.647450Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630268950186562:2079] 1750800355266194 != 1750800355266197 2025-06-24T21:25:55.654269Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:55.691443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:55.691544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:55.696160Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23076 TServer::EnableGrpc on GrpcPort 11041, node 1 2025-06-24T21:25:55.972176Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:25:55.972223Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:25:55.972239Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:25:55.972389Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:56.280560Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23076 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:56.522121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:56.547252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:25:56.697949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:56.767553Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750800356578, tx_id: 1 } } } 2025-06-24T21:25:56.767582Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-24T21:25:56.791515Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800356669, tx_id: 281474976710658 } }, { name: export-100500, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750800356739, tx_id: 281474976710659 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-24T21:25:56.791553Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-24T21:25:58.002041Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800356669, tx_id: 281474976710658 } } } 2025-06-24T21:25:58.002072Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-24T21:25:58.002099Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_BadSourceId_Test [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Dirs [GOOD] Test command err: 2025-06-24T21:25:55.270444Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630270268950141:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:55.270517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044d4/r3tmp/tmpKbox0C/pdisk_1.dat 2025-06-24T21:25:55.615776Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:55.625135Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630270268950110:2079] 1750800355266247 != 1750800355266250 2025-06-24T21:25:55.690298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:55.690462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:55.693070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27549 TServer::EnableGrpc on GrpcPort 11425, node 1 2025-06-24T21:25:55.976046Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:25:55.976078Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:25:55.976092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:25:55.976264Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:56.281858Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27549 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:56.528063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:56.571719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:56.714110Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750800356599, tx_id: 1 } } } 2025-06-24T21:25:56.714148Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-24T21:25:56.740047Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Dir, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750800356613, tx_id: 281474976710658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-24T21:25:56.740087Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-24T21:25:56.764493Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800356683, tx_id: 281474976710659 } }] } } 2025-06-24T21:25:56.764529Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root/Dir 2025-06-24T21:25:58.227624Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800356683, tx_id: 281474976710659 } } } 2025-06-24T21:25:58.227658Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Dir/Table 2025-06-24T21:25:58.227704Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Dir/Table, dstPath# /Root/Replicated/Dir/Table, kind# Table >> SystemView::ShowCreateTable [GOOD] >> SystemView::ShowCreateTableChangefeeds >> TargetDiscoverer::IndexedTable [GOOD] >> TPQTest::TestPartitionTotalQuota [GOOD] >> TPQTest::TestPartitionPerConsumerQuota >> SystemView::PartitionStatsTtlFields [GOOD] >> SystemView::PartitionStatsLocksFields >> TPQTest::The_Value_Of_CreationUnixTime_Must_Not_Decrease [GOOD] >> TPQTestInternal::RestoreKeys [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_BadSourceId_Test [GOOD] Test command err: 2025-06-24T21:25:48.564796Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630239388441828:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:48.565087Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:48.605610Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630237724088017:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:48.605683Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a62/r3tmp/tmp5Axhtp/pdisk_1.dat 2025-06-24T21:25:48.764275Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:25:48.764598Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:25:48.936073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:48.936161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:48.941208Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:25:48.947382Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:48.950586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9286, node 1 2025-06-24T21:25:48.988129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:48.988213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:49.001075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:49.050827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003a62/r3tmp/yandexDXfMfK.tmp 2025-06-24T21:25:49.050872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003a62/r3tmp/yandexDXfMfK.tmp 2025-06-24T21:25:49.051064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003a62/r3tmp/yandexDXfMfK.tmp 2025-06-24T21:25:49.051241Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:49.115337Z INFO: TTestServer started on Port 4655 GrpcPort 9286 TClient is connected to server localhost:4655 PQClient connected to localhost:9286 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:49.411451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:49.460436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:49.574841Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:49.611817Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-24T21:25:51.643388Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630250608990179:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:51.643506Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:51.643448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630252273344823:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:51.643534Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:51.643612Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630250608990187:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:51.643810Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630252273344838:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:51.647908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:25:51.652727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630252273344880:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:51.652831Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:51.653015Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630250608990194:2125] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:25:51.666258Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630252273344840:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:25:51.666330Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630250608990193:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:25:51.725461Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630252273344928:2805] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:25:51.763698Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630250608990221:2131] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:25:51.929524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:51.946778Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630252273344946:2313], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:51.946779Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519630250608990236:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:51.947002Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NDBmMDFhNWUtNzcwZTc2YjEtZmY1NTAwZDItYWI0MTEwMDE=, ActorId: [2:7519630250608990177:2268], ActorState: ExecuteState, TraceId: 01jyhxagcjem45g5t7knny5gqy, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:51.947016Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTkxNGExYTgtNTE1YmM3ZDctNGRjYTNkN2EtN2Q5MDlhM2I=, ActorId: [1:7519630252273344806:2300], ActorState: ExecuteState, TraceId: 01jyhxagccekp6kstx2cq54647, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:51.949369Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:51.949364Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:52.011020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:52.096457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:25:52.295318Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jyhxagxa4rz5z9s808gxq0af, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjdjOWRjOGItZDljM2EzMjctYTU5YzYyMjktZDUwZTc3MjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7519630256568312615:3087] 2025-06-24T21:25:53.564965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630239388441828:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:53.565047Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:25:53.605922Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630237724088017:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:53.605993Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:25:58.552537Z node 1 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [1:7519630282338116714:3226] (SourceId=base64:a***, PreferedPartition=(NULL)) Start idle 2025-06-24T21:25:58.552610Z node 1 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [1:7519630282338116714:3226] (SourceId=base64:a***, PreferedPartition=(NULL)) ReplyError: Bad SourceId Received TEvChooseError: Bad SourceId ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::IndexedTable [GOOD] Test command err: 2025-06-24T21:25:56.850127Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630271863583466:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:56.850237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044b2/r3tmp/tmp8rHyBj/pdisk_1.dat 2025-06-24T21:25:57.168945Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:57.170368Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630271863583446:2079] 1750800356849343 != 1750800356849346 2025-06-24T21:25:57.206904Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:57.207018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:57.209164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7441 TServer::EnableGrpc on GrpcPort 23774, node 1 2025-06-24T21:25:57.359268Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:25:57.359299Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:25:57.359313Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:25:57.359483Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7441 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:57.711884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:57.745346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:57.991581Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:58.137733Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750800357775, tx_id: 1 } } } 2025-06-24T21:25:58.137771Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-24T21:25:58.161383Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800358055, tx_id: 281474976710658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-24T21:25:58.161423Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-24T21:25:59.444634Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800358055, tx_id: 281474976710658 } } } 2025-06-24T21:25:59.444674Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-24T21:25:59.444724Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table 2025-06-24T21:25:59.444828Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:140: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table/Index, dstPath# /Root/Replicated/Table/Index/indexImplTable, kind# IndexTable |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Basic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTestInternal::RestoreKeys [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T21:25:48.201722Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:48.201840Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:182:2057] recipient: [1:14:2061] 2025-06-24T21:25:48.220110Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:48.238574Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-24T21:25:48.239380Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-24T21:25:48.241914Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:188:2199] 2025-06-24T21:25:48.245210Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:189:2200] 2025-06-24T21:25:48.247025Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:189:2200] 2025-06-24T21:25:48.258491Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8b16c01d-230d4196-627894b4-29197745_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:235:2057] recipient: [1:102:2135] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:238:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:239:2057] recipient: [1:237:2237] Leader for TabletID 72057594037927937 is [1:240:2238] sender: [1:241:2057] recipient: [1:237:2237] 2025-06-24T21:25:48.365058Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:48.365129Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:48.365659Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:289:2279] 2025-06-24T21:25:48.367667Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:290:2280] 2025-06-24T21:25:48.375094Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:25:48.375213Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [1:290:2280] 2025-06-24T21:25:48.387794Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:25:48.387889Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [1:289:2279] Leader for TabletID 72057594037927937 is [1:240:2238] sender: [1:316:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:111:2057] recipient: [2:104:2136] 2025-06-24T21:25:48.800662Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:48.800747Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927938 is [2:156:2175] sender: [2:157:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:180:2057] recipient: [2:14:2061] 2025-06-24T21:25:48.817737Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:48.818743Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } Consumers { Name: "user1" Generation: 2 Important: true } 2025-06-24T21:25:48.819482Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:186:2197] 2025-06-24T21:25:48.822109Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:186:2197] 2025-06-24T21:25:48.824970Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:187:2198] 2025-06-24T21:25:48.826920Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:187:2198] 2025-06-24T21:25:48.838603Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|439cc1b8-1cd9b469-8f8b784e-49184157_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:233:2057] recipient: [2:102:2135] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:236:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:237:2057] recipient: [2:235:2235] Leader for TabletID 72057594037927937 is [2:238:2236] sender: [2:239:2057] recipient: [2:235:2235] 2025-06-24T21:25:48.937302Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:48.937370Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:48.938014Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:287:2277] 2025-06-24T21:25:48.940486Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:288:2278] 2025-06-24T21:25:48.949177Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:25:48.949262Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:288:2278] 2025-06-24T21:25:48.967078Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:25:48.967255Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:287:2277] Leader for TabletID 72057594037927937 is [2:238:2236] sender: [2:314:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:111:2057] recipient: [3:104:2136] 2025-06-24T21:25:49.426940Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:49.427031Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927938 is [3:156:2175] sender: [3:157:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:182:2057] recipient: [3:14:2061] 2025-06-24T21:25:49.446951Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:49.447909Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 3 actor [3:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } Consumers { Name: "user1" Generation: 3 Important: tru ... 'topic' owner default 2025-06-24T21:25:54.080525Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|affbcc94-6e98ac1f-ed74dcd3-c2da4003_16 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:54.180267Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e7d71cbe-382e5d5d-fab24d3c-97dced00_17 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:54.260297Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|243bf35f-ece61b9-c2b81f27-199a1de1_18 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:54.349764Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1afb5165-f9ebc1a8-21d6ba29-8a42cc95_19 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:54.444088Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bfb089ec-45cf32b9-a7cbc00c-7bb69035_20 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:54.536113Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2ff5214b-ba316e75-ff94c13-697f2fa1_21 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:54.617914Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|88d334d5-58b2cd76-8335ad59-82c34d17_22 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:54.717216Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3cffbe6e-759631f9-295b2bf5-a3973c69_23 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:54.905280Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:54.905346Z node 6 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:54.905897Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [6:652:2588] 2025-06-24T21:25:54.912267Z node 6 :PERSQUEUE INFO: partition_init.cpp:895: [topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:25:54.912377Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 3 [6:652:2588] 2025-06-24T21:25:55.098957Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 1 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099047Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 2 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099087Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 3 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099127Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 4 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099185Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 5 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099226Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 6 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099260Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 7 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099294Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 8 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099328Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 9 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099368Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 10 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099403Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 11 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099442Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 12 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099475Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 13 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099508Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 14 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-24T21:25:55.099540Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 15 partno 0 count 1 parts 2 suffix '63' size 1048783 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 1 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 2 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 3 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 4 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 5 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 6 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 7 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 8 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 9 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 10 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 11 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 12 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 13 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 14 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 15 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 16 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] 2025-06-24T21:26:00.404515Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 16 partno 0 count 1 parts 2 suffix '63' size 1048783 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 17 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] 2025-06-24T21:26:00.424261Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 17 partno 0 count 1 parts 2 suffix '63' size 1048783 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 18 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] 2025-06-24T21:26:00.436624Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 18 partno 0 count 1 parts 2 suffix '63' size 1048783 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 19 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] 2025-06-24T21:26:00.448325Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 19 partno 0 count 1 parts 2 suffix '63' size 1048783 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 20 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] 2025-06-24T21:26:00.453005Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 20 partno 0 count 1 parts 0 suffix '63' size 41021 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 21 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] 2025-06-24T21:26:00.455105Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 21 partno 0 count 1 parts 0 suffix '63' size 41021 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 22 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] 2025-06-24T21:26:00.457002Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 22 partno 0 count 1 parts 0 suffix '63' size 41021 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 23 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] 2025-06-24T21:26:00.458666Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 23 partno 0 count 1 parts 0 suffix '63' size 41021 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 24 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [6:180:2193] 2025-06-24T21:26:00.460555Z node 6 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 24 partno 0 count 1 parts 0 suffix '63' size 41021 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Basic [GOOD] Test command err: 2025-06-24T21:25:58.311770Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630279894293102:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:58.311915Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044af/r3tmp/tmp7ve5MR/pdisk_1.dat 2025-06-24T21:25:58.624225Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:58.627828Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630279894293079:2079] 1750800358310520 != 1750800358310523 2025-06-24T21:25:58.695758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:58.695896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:58.697898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22134 TServer::EnableGrpc on GrpcPort 1676, node 1 2025-06-24T21:25:58.849463Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:25:58.849488Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:25:58.849494Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:25:58.849597Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22134 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:59.179870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:25:59.194152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:59.317609Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:59.339790Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750800359231, tx_id: 1 } } } 2025-06-24T21:25:59.339830Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-24T21:25:59.365309Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800359301, tx_id: 281474976715658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-24T21:25:59.365341Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-24T21:26:00.826620Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800359301, tx_id: 281474976715658 } } } 2025-06-24T21:26:00.826648Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-24T21:26:00.826674Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table |97.2%| [TA] $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/test-results/unittest/{meta.json ... results_accumulator.log} |97.2%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/test-results/unittest/{meta.json ... results_accumulator.log} >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_Active_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveConfig_Test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::ManyPartitionsSortingLimit [GOOD] Test command err: Trying to start YDB, gRPC: 15612, MsgBus: 61404 2025-06-24T21:16:12.939903Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519627762738373102:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:12.940010Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003214/r3tmp/tmpr8SUOT/pdisk_1.dat 2025-06-24T21:16:13.444904Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519627762738373084:2079] 1750799772939170 != 1750799772939173 2025-06-24T21:16:13.460788Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:16:13.509180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:16:13.509254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 15612, node 1 2025-06-24T21:16:13.512872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:16:13.571797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:16:13.571825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:16:13.571833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:16:13.571963Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61404 2025-06-24T21:16:13.947714Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61404 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:16:14.186098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:16:14.233860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.395686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.606283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:14.719338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:16.245725Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627779918244899:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:16.245820Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:16.624153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.682222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.741155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.797866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.869192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:16.932951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:17.006133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:16:17.092683Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627784213213308:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:17.092761Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:17.092973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519627784213213313:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:16:17.096598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:16:17.119207Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519627784213213315:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:16:17.207411Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519627784213213395:4854] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:16:17.943248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519627762738373102:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:16:17.959021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:16:18.245437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:16:28.395368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot ... rvice] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:05.862591Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519629538144319031:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:05.868465Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:05.904995Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519629538144319033:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:23:06.002361Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519629538144319108:2698] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:14.435669Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:23:14.435705Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:16.536974Z node 3 :TX_DATASHARD ERROR: datashard__stats.cpp:649: CPU usage 68.8419 is higher than threshold of 60 in-flight Tx: 0 immediate Tx: 1 readIterators: 0 at datashard: 72075186224037888 table: [/Root/LargeTable] 2025-06-24T21:25:27.707312Z node 3 :TX_DATASHARD ERROR: datashard__stats.cpp:649: CPU usage 62.3841 is higher than threshold of 60 in-flight Tx: 0 immediate Tx: 0 readIterators: 0 at datashard: 72075186224037890 table: [/Root/LargeTable] 2025-06-24T21:25:39.019476Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-24T21:25:49.138303Z node 3 :HIVE ERROR: tx__update_tablet_groups.cpp:135: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923008055616}: tablet 72075186224037890 could not find a group for channel 0 pool /Root:test 2025-06-24T21:25:49.138379Z node 3 :HIVE ERROR: tx__update_tablet_groups.cpp:135: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923008055616}: tablet 72075186224037890 could not find a group for channel 1 pool /Root:test 2025-06-24T21:25:49.138400Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923008055616}: tablet 72075186224037890 wasn't changed 2025-06-24T21:25:49.138425Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923008055616}: tablet 72075186224037890 skipped channel 0 2025-06-24T21:25:49.138461Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923008055616}: tablet 72075186224037890 skipped channel 1 2025-06-24T21:25:49.339824Z node 3 :BS_SKELETON WARN: skeleton_oos_tracker.cpp:62: PDiskId# 1 VDISK[82000000:_:0:0:0]: (2181038080) TDskSpaceTrackerActor: YELLOW ZONE Marker# BSVSOOST01 2025-06-24T21:25:49.891739Z node 3 :BS_SKELETON WARN: skeleton_oos_tracker.cpp:62: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TDskSpaceTrackerActor: YELLOW ZONE Marker# BSVSOOST01 2025-06-24T21:25:50.138689Z node 3 :TX_DATASHARD ERROR: check_data_tx_unit.cpp:83: Cannot perform transaction: out of disk space at tablet 72075186224037890 txId 281474976715760 2025-06-24T21:25:50.138775Z node 3 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976715760 at tablet 72075186224037890 errors: OUT_OF_SPACE (Cannot perform transaction: out of disk space at tablet 72075186224037890 txId 281474976715760) | 2025-06-24T21:25:50.138883Z node 3 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715760 at tablet 72075186224037890 status: ERROR errors: OUT_OF_SPACE (Cannot perform transaction: out of disk space at tablet 72075186224037890 txId 281474976715760) | 2025-06-24T21:25:50.139109Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [3:7519630242518968822:2302] TxId: 281474976715760. Ctx: { TraceId: 01jyhxae973x2awty3dp0kk9mv, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NWE2YmE3YjItZjI3ZjUwN2EtYjdjYzIzZWMtYTY0NzMwYzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ERROR: [OUT_OF_SPACE] Cannot perform transaction: out of disk space at tablet 72075186224037890 txId 281474976715760; 2025-06-24T21:25:50.139693Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NWE2YmE3YjItZjI3ZjUwN2EtYjdjYzIzZWMtYTY0NzMwYzI=, ActorId: [3:7519629538144319009:2302], ActorState: ExecuteState, TraceId: 01jyhxae973x2awty3dp0kk9mv, Create QueryResponse for error on request, msg: Got out of space. Successfully inserted 30 x 0 lines, each of size 1048576bytes2025-06-24T21:25:50.343763Z node 3 :BS_SKELETON ERROR: skeleton_oos_tracker.cpp:62: PDiskId# 1 VDISK[82000000:_:0:0:0]: (2181038080) TDskSpaceTrackerActor: LIGHT_ORANGE ZONE Marker# BSVSOOST01 Trying to start YDB, gRPC: 16820, MsgBus: 21170 2025-06-24T21:25:51.188436Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519630253501746722:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:51.188536Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003214/r3tmp/tmpQ8Hj83/pdisk_1.dat 2025-06-24T21:25:51.335885Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:51.338476Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519630253501746699:2079] 1750800351187601 != 1750800351187604 2025-06-24T21:25:51.358932Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:51.359044Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:51.361747Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16820, node 4 2025-06-24T21:25:51.407808Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:25:51.407835Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:25:51.407849Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:25:51.408038Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21170 TClient is connected to server localhost:21170 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:25:51.957051Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:25:51.982085Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:53.335819Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:54.958247Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630266386653572:2598], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:54.958322Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630266386653553:2595], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:54.958660Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:54.963577Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:25:54.976968Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519630266386653582:2599], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:25:55.067374Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630270681620931:5239] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TPQTest::TestPQPartialRead [GOOD] >> TPQTest::TestPQRead [GOOD] >> TPQTest::TestPQSmallRead |97.2%| [TA] $(B)/ydb/core/kqp/ut/query/test-results/unittest/{meta.json ... results_accumulator.log} |97.2%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/query/test-results/unittest/{meta.json ... results_accumulator.log} >> THealthCheckTest::Issues100GroupsListing >> THealthCheckTest::RedGroupIssueWhenDisintegratedGroupStatus >> THealthCheckTest::StaticGroupIssue >> THealthCheckTest::StorageLimit95 >> THealthCheckTest::Basic >> THealthCheckTest::OneIssueListing >> THealthCheckTest::ShardsLimit999 >> THealthCheckTest::Issues100Groups100VCardListing >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_BadSourceId_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_NewSourceId_Test >> TListAllTopicsTests::RecursiveList [GOOD] >> TListAllTopicsTests::ListLimitAndPaging >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink+UseDataQuery >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionNotExists_Test [GOOD] >> TPartitionGraphTest::BuildGraph [GOOD] >> TPartitionTests::AfterRestart_1 |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> TPartitionTests::AfterRestart_1 [GOOD] |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> TPartitionTests::AfterRestart_2 >> TPartitionTests::AfterRestart_2 [GOOD] >> THealthCheckTest::Basic [GOOD] >> THealthCheckTest::BasicNodeCheckRequest >> TPartitionTests::UserActCount [GOOD] |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> PQCountersLabeled::NewConsumersCountersAppear [GOOD] >> PQCountersSimple::Partition >> Worker::Basic >> TPartitionTests::TooManyImmediateTxs >> TPQTest::TestPartitionPerConsumerQuota [GOOD] >> TPQTest::TestPartitionWriteQuota >> PQCountersSimple::Partition [GOOD] >> TPQTestSlow::TestWriteVeryBigMessage [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionActive_BoundaryFalse_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_0_Test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::AfterRestart_2 [GOOD] Test command err: 2025-06-24T21:25:36.611358Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630188729051515:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:36.615376Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:36.676398Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630185232513756:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:36.676624Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:36.852207Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b25/r3tmp/tmpsKu15A/pdisk_1.dat 2025-06-24T21:25:36.866362Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:25:37.020276Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:37.035778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:37.035883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:37.038750Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:25:37.039668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:37.073357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:37.073438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 9482, node 1 2025-06-24T21:25:37.086977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:37.120276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003b25/r3tmp/yandexRRrWaB.tmp 2025-06-24T21:25:37.120319Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003b25/r3tmp/yandexRRrWaB.tmp 2025-06-24T21:25:37.120525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003b25/r3tmp/yandexRRrWaB.tmp 2025-06-24T21:25:37.120665Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:37.303882Z INFO: TTestServer started on Port 7388 GrpcPort 9482 TClient is connected to server localhost:7388 PQClient connected to localhost:9482 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:37.624692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:25:37.636414Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-24T21:25:37.693877Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:37.711009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:25:39.859758Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630198117415922:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.859837Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630198117415905:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.859894Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.865596Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630201613954508:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.865660Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630201613954516:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.865707Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.867666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:25:39.890151Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630201613954524:2739] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:25:39.891240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-24T21:25:39.893269Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630201613954523:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:25:39.893420Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630198117415932:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:25:39.965114Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630198117415960:2131] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:25:39.972248Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630201613954620:2804] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:25:40.242217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:40.245093Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630201613954630:2313], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:40.245350Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OTM3YWIzYjItOTQyOWM1MjQtNGU2NzI1MDAtM2JlYWZmN2E=, ActorId: [1:7519630201613954506:2301], ActorState: ExecuteState, TraceId: 01jyhxa4wr2cmbkratww5mhwgp, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:40.247884Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:40.248294Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519630198117415975:2277], status: SCHEME_ERROR, issues:
: Error: Type annota ... f8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T21:26:05.571709Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:116: TPartitionChooser [3:7519630313692638904:3722] (SourceId=A_Source_5, PreferedPartition=(NULL)) GetOwnershipFast Partition=1 TabletId=1001 2025-06-24T21:26:05.571872Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [3:7519630313692638905:3722], Recipient [3:7519630300807736071:3184]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [3:7519630313692638904:3722] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-24T21:26:05.571954Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [3:7519630313692638904:3722], Recipient [3:7519630300807736071:3184]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 SourceId: "A_Source_5" 2025-06-24T21:26:05.572025Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:139: StateOwnershipFast, received event# 271188558, Sender [3:7519630300807736071:3184], Recipient [3:7519630313692638904:3722]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-24T21:26:05.572052Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [3:7519630313692638904:3722] (SourceId=A_Source_5, PreferedPartition=(NULL)) InitTable: SourceId=A_Source_5 TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-06-24T21:26:05.572096Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [3:7519630313692638904:3722], Recipient [3:7519630300807736071:3184]: NActors::TEvents::TEvPoison 2025-06-24T21:26:05.572206Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [3:7519630257858061248:2071], Recipient [3:7519630313692638904:3722]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-06-24T21:26:05.572236Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7519630313692638904:3722] (SourceId=A_Source_5, PreferedPartition=(NULL)) StartKqpSession 2025-06-24T21:26:05.574385Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [3:7519630257858061359:2173], Recipient [3:7519630313692638904:3722]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=3&id=Njg4YWEyNWMtM2Q0MTdmOGUtYWM0ZjdiYTItNTMxYTZmYzY=" NodeId: 3 } YdbStatus: SUCCESS ResourceExhausted: false 2025-06-24T21:26:05.574419Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7519630313692638904:3722] (SourceId=A_Source_5, PreferedPartition=(NULL)) Select from the table 2025-06-24T21:26:05.720335Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [3:7519630257858061359:2173], Recipient [3:7519630313692638904:3722]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=3&id=Njg4YWEyNWMtM2Q0MTdmOGUtYWM0ZjdiYTItNTMxYTZmYzY=" PreparedQuery: "5681cf06-2a113bf3-da606898-afcc8b7f" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jyhxay4h5tmvc60y8k11vrxn" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint32_value: 0 } items { uint64_value: 1750800365424 } items { uint64_value: 1750800365424 } items { uint64_value: 13 } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 80 2025-06-24T21:26:05.720534Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [3:7519630313692638904:3722] (SourceId=A_Source_5, PreferedPartition=(NULL)) Selected from table PartitionId=0 SeqNo=13 2025-06-24T21:26:05.720564Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:209: TPartitionChooser [3:7519630313692638904:3722] (SourceId=A_Source_5, PreferedPartition=(NULL)) OnPartitionChosen 2025-06-24T21:26:05.720763Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [3:7519630313692638956:3722], Recipient [3:7519630300807736071:3184]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [3:7519630313692638904:3722] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-24T21:26:05.720864Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [3:7519630313692638904:3722], Recipient [3:7519630300807736071:3184]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 2025-06-24T21:26:05.720945Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:240: StateCheckPartition, received event# 271188558, Sender [3:7519630300807736071:3184], Recipient [3:7519630313692638904:3722]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-24T21:26:05.720980Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7519630313692638904:3722] (SourceId=A_Source_5, PreferedPartition=(NULL)) Update the table 2025-06-24T21:26:05.721318Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [3:7519630313692638904:3722], Recipient [3:7519630300807736071:3184]: NActors::TEvents::TEvPoison 2025-06-24T21:26:05.819856Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:212: StateUpdate, received event# 271646721, Sender [3:7519630257858061359:2173], Recipient [3:7519630313692638904:3722]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=3&id=Njg4YWEyNWMtM2Q0MTdmOGUtYWM0ZjdiYTItNTMxYTZmYzY=" PreparedQuery: "c6d045f3-8297d479-c0ed5265-e5c96a2c" QueryParameters { Name: "$AccessTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$CreateTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Partition" Type { Kind: Data Data { Scheme: 2 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SeqNo" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 58 Received TEvChooseResult: 2025-06-24T21:26:05.819923Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7519630313692638904:3722] (SourceId=A_Source_5, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 1 2025-06-24T21:26:05.819960Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519630313692638904:3722] (SourceId=A_Source_5, PreferedPartition=(NULL)) ReplyResult: Partition=1, SeqNo=13 2025-06-24T21:26:05.819990Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7519630313692638904:3722] (SourceId=A_Source_5, PreferedPartition=(NULL)) Start idle Run query: --!syntax_v1 SELECT Partition, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash = 11131928866524144434 AND Topic = "Root" AND ProducerId = "00415F536F757263655F35" 2025-06-24T21:26:05.977510Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715708. Ctx: { TraceId: 01jyhxay8fcs3fxnqgsvpfgt02, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzRkNGQ4Zi04MGM4MDRjOS1iZjI5ZTBjOS04OTk5YTU0ZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:26:06.243213Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [3:7519630317987606331:2640] TxId: 281474976715709. Ctx: { TraceId: 01jyhxaybm5cyftbqh5pet84c0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTU4ZTdhZDMtZGZmZTZiZTgtMzlmMzU2OWQtZGU5ZGE4Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 2025-06-24T21:26:06.243912Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519630317987606335:2640], TxId: 281474976715709, task: 3. Ctx: { SessionId : ydb://session/3?node_id=3&id=NTU4ZTdhZDMtZGZmZTZiZTgtMzlmMzU2OWQtZGU5ZGE4Y2M=. CustomerSuppliedId : . TraceId : 01jyhxaybm5cyftbqh5pet84c0. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7519630317987606331:2640], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-24T21:26:07.623539Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:07.623680Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:26:07.647306Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [5:182:2195] 2025-06-24T21:26:07.650488Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:26:07.000000Z 2025-06-24T21:26:07.650576Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [5:182:2195] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\230\373\325\236\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\316\255\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\004\020\000\030\000\"\007session(\0000\000@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\004\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000session" StorageChannel: INLINE } 2025-06-24T21:26:08.472950Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:08.473036Z node 6 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:26:08.493166Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [6:182:2195] 2025-06-24T21:26:08.496578Z node 6 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-24T21:26:08.000000Z 2025-06-24T21:26:08.496678Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [6:182:2195] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestWriteVeryBigMessage [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T21:25:33.075934Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:33.076034Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:182:2057] recipient: [1:14:2061] 2025-06-24T21:25:33.120028Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:33.148319Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T21:25:33.151461Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-24T21:25:33.155622Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:188:2199] 2025-06-24T21:25:33.158816Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:189:2200] 2025-06-24T21:25:33.160596Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:189:2200] 2025-06-24T21:25:33.174350Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|23520ff2-949f514d-deb54d51-55751d3d_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:33.210314Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bae4d8b5-bcfa594a-751b75de-f61fc20e_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:33.236489Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6a44068c-63aa0894-9b9a85b0-41983491_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:241:2057] recipient: [1:102:2135] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:244:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:245:2057] recipient: [1:243:2243] Leader for TabletID 72057594037927937 is [1:246:2244] sender: [1:247:2057] recipient: [1:243:2243] 2025-06-24T21:25:33.309660Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:33.309734Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:33.310252Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:295:2285] 2025-06-24T21:25:33.312557Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:296:2286] 2025-06-24T21:25:33.319251Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:25:33.319338Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [1:295:2285] 2025-06-24T21:25:33.320824Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:25:33.320887Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [1:296:2286] 2025-06-24T21:25:33.331934Z node 1 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1048786 Leader for TabletID 72057594037927937 is [1:246:2244] sender: [1:322:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:111:2057] recipient: [2:104:2136] 2025-06-24T21:25:33.783518Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:33.783645Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927938 is [2:156:2175] sender: [2:157:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:180:2057] recipient: [2:14:2061] 2025-06-24T21:25:33.805666Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:33.806871Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-24T21:25:33.807617Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:186:2197] 2025-06-24T21:25:33.810284Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:186:2197] 2025-06-24T21:25:33.812183Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:187:2198] 2025-06-24T21:25:33.814270Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:187:2198] 2025-06-24T21:25:33.822562Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7571c93c-1197d021-bee69279-ea7c65c9_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:33.831319Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|123b1a0-99892266-b7b77f6b-88043882_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:33.852769Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7d6c61c5-a9a1f5b2-2c7c0fdc-3a8b6677_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default !Reboot 72057594037927937 (actor [2:110:2140]) on event NKikimr::TEvPersQueue::TEvOffsets ! Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:240:2057] recipient: [2:102:2135] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:243:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:244:2057] recipient: [2:242:2242] Leader for TabletID 72057594037927937 is [2:245:2243] sender: [2:246:2057] recipient: [2:242:2242] 2025-06-24T21:25:33.915962Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:33.916025Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:33.916792Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:294:2284] 2025-06-24T21:25:33.918468Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:295:2285] 2025-06-24T21:25:33.925352Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:25:33.925438Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:294:2284] 2025-06-24T21:25:33.928776Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:25:33.928846Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:295:2285] 2025-06-24T21:25:33.939711Z node 2 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1048786 !Reboot 72057594037927937 (actor [2:110:2140]) rebooted! !Reboot 72057594037927937 (actor [2:110:2140]) tablet resolver refreshed! new actor is[2:245:2243] Leader for TabletID 72057594037927937 is [2:245:2243] sender: [2:349:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:245:2243] sender: [2:352:2057] recipient: [2:102:2135] Leader for TabletID 72057594037927937 is [2:245:2243] sender: [2:355:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:245:2243] sender: [2:356:2057] recipient: [2:354:2316] Leader for TabletID 72057594037927937 is [2:357:2317] sender: [2:358:2057] recipient: [2:354:2316] 2025-06-24T21:25:35.250747Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:35.250813Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:35.251517Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:408:2360] 2025-06- ... Init] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 4 [52:384:2350] 2025-06-24T21:26:07.310544Z node 52 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:07.310617Z node 52 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 4 [52:385:2351] 2025-06-24T21:26:07.336940Z node 52 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1048786 !Reboot 72057594037927937 (actor [52:248:2246]) rebooted! !Reboot 72057594037927937 (actor [52:248:2246]) tablet resolver refreshed! new actor is[52:333:2307] Leader for TabletID 72057594037927937 is [52:333:2307] sender: [52:439:2057] recipient: [52:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:106:2057] recipient: [53:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:106:2057] recipient: [53:104:2136] Leader for TabletID 72057594037927937 is [53:110:2140] sender: [53:111:2057] recipient: [53:104:2136] 2025-06-24T21:26:08.977994Z node 53 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:08.978082Z node 53 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [53:152:2057] recipient: [53:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [53:152:2057] recipient: [53:150:2171] Leader for TabletID 72057594037927938 is [53:156:2175] sender: [53:157:2057] recipient: [53:150:2171] Leader for TabletID 72057594037927937 is [53:110:2140] sender: [53:182:2057] recipient: [53:14:2061] 2025-06-24T21:26:09.003353Z node 53 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:09.004409Z node 53 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 53 actor [53:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 53 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 53 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 53 Important: false } 2025-06-24T21:26:09.005109Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [53:188:2199] 2025-06-24T21:26:09.008298Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [53:188:2199] 2025-06-24T21:26:09.010374Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [53:189:2200] 2025-06-24T21:26:09.012794Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [53:189:2200] 2025-06-24T21:26:09.020993Z node 53 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|504a8b88-4226c529-983e4dce-2207e3b_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:09.028586Z node 53 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|cd0aef52-a9fb8222-42a97327-2f736768_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:09.049681Z node 53 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b6c463ee-3f8d5a25-c4b8459-117281bd_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [53:110:2140] sender: [53:243:2057] recipient: [53:102:2135] Leader for TabletID 72057594037927937 is [53:110:2140] sender: [53:246:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [53:110:2140] sender: [53:247:2057] recipient: [53:245:2245] Leader for TabletID 72057594037927937 is [53:248:2246] sender: [53:249:2057] recipient: [53:245:2245] 2025-06-24T21:26:09.109795Z node 53 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:09.109866Z node 53 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:26:09.110585Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [53:297:2287] 2025-06-24T21:26:09.112882Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [53:298:2288] 2025-06-24T21:26:09.120296Z node 53 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:09.120359Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [53:297:2287] 2025-06-24T21:26:09.123358Z node 53 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:09.123414Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [53:298:2288] 2025-06-24T21:26:09.132763Z node 53 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1048786 Leader for TabletID 72057594037927937 is [53:248:2246] sender: [53:326:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:106:2057] recipient: [54:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:106:2057] recipient: [54:104:2136] Leader for TabletID 72057594037927937 is [54:110:2140] sender: [54:111:2057] recipient: [54:104:2136] 2025-06-24T21:26:09.582894Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:09.582980Z node 54 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [54:152:2057] recipient: [54:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [54:152:2057] recipient: [54:150:2171] Leader for TabletID 72057594037927938 is [54:156:2175] sender: [54:157:2057] recipient: [54:150:2171] Leader for TabletID 72057594037927937 is [54:110:2140] sender: [54:182:2057] recipient: [54:14:2061] 2025-06-24T21:26:09.604966Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:09.605914Z node 54 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 54 actor [54:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 54 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 54 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 54 Important: false } 2025-06-24T21:26:09.606596Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:188:2199] 2025-06-24T21:26:09.609500Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [54:188:2199] 2025-06-24T21:26:09.611553Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:189:2200] 2025-06-24T21:26:09.613794Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [54:189:2200] 2025-06-24T21:26:09.620397Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|67850c72-ace634aa-2615d0a8-93abbe6a_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:09.627412Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a5f43a7b-a8cb04cf-f66abce6-41165a9b_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:09.646787Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a25c8063-94847d8-9d89eaa2-d5ae5750_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [54:110:2140] sender: [54:241:2057] recipient: [54:102:2135] Leader for TabletID 72057594037927937 is [54:110:2140] sender: [54:243:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [54:110:2140] sender: [54:245:2057] recipient: [54:244:2243] Leader for TabletID 72057594037927937 is [54:246:2244] sender: [54:247:2057] recipient: [54:244:2243] 2025-06-24T21:26:09.706856Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:09.706926Z node 54 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:26:09.707769Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:295:2285] 2025-06-24T21:26:09.710681Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:296:2286] 2025-06-24T21:26:09.718920Z node 54 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:09.719004Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [54:295:2285] 2025-06-24T21:26:09.722461Z node 54 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:09.722536Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [54:296:2286] 2025-06-24T21:26:09.733245Z node 54 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1048786 Leader for TabletID 72057594037927937 is [54:246:2244] sender: [54:324:2057] recipient: [54:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> PQCountersSimple::Partition [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:129:2057] recipient: [1:127:2159] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:129:2057] recipient: [1:127:2159] Leader for TabletID 72057594037927937 is [1:133:2163] sender: [1:134:2057] recipient: [1:127:2159] 2025-06-24T21:25:36.855539Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:36.855626Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:175:2057] recipient: [1:173:2194] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:175:2057] recipient: [1:173:2194] Leader for TabletID 72057594037927938 is [1:179:2198] sender: [1:180:2057] recipient: [1:173:2194] Leader for TabletID 72057594037927937 is [1:133:2163] sender: [1:203:2057] recipient: [1:14:2061] 2025-06-24T21:25:36.875832Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:36.891248Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:201:2214] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T21:25:36.892018Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:209:2220] 2025-06-24T21:25:36.893623Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:209:2220] 2025-06-24T21:25:36.895075Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:210:2221] 2025-06-24T21:25:36.896310Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:210:2221] 2025-06-24T21:25:36.903381Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|93450e5-748956cc-e01f3b27-d1cf19a9_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:36.910862Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1101dd14-3121387f-9f381e80-1c05eeb7_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:36.916423Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|4824b68a-ca4c38c4-f2a04b11-beb499a0_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Expected: { "sensors": [ { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/MessageLagByCommitted" }, "value": 30 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/MessageLagByLastRead" }, "value": 29 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/PartitionMaxReadQuotaUsage" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesAvailAvgMin" }, "value": 1000000000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesAvailAvgSec" }, "value": 1000000000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerDay" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerHour" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerMin" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerSec" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesPerDay" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesPerHour" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesPerMin" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesPerSec" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesQuota" }, "value": 1000000000 }, { "kind": "RATE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadOffsetRewindSum" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadTimeLagMs" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/SizeLagByCommitted" }, "value": 747 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/SizeLagByLastRead" }, "value": 747 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/TimeSinceLastReadMs" }, "value": 5000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/TotalMessageLagByLastRead" }, "value": 29 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/TotalSizeLagByLastRead" }, "value": 747 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/TotalTimeLagMsByLastRead" }, "value": 5000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/UserPartitionsAnswered" }, "value": 2 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/WriteTimeLagMsByLastRead" }, "value": 30 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/WriteTimeLagMsByLastReadOld" }, "value": 5000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/MessageLagByCommitted" }, "value": 30 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/MessageLagByLastRead" }, "value": 29 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/PartitionMaxReadQuotaUsage" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesAvailAvgMin" }, "value": 1000000000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesAvailAvgSec" }, "value": 1000000000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerDay" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerHour" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerMin" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_count ... BillingMeteringConfig 2025-06-24T21:25:47.533861Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 5 actor [3:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 5 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } NEW ANS: ANS GROUP total/total/rt3.dc1--asdfgs--topic ANS GROUP user/0/total ANS GROUP user/total/total ANS GROUP user/0/rt3.dc1--asdfgs--topic ANS GROUP total/total/total ANS GROUP rt3.dc1--asdfgs--topic ANS GROUP total/0/rt3.dc1--asdfgs--topic ANS GROUP total CHECKING GROUP user/0/rt3.dc1--asdfgs--topic 2025-06-24T21:25:49.617775Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:49.625982Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 6 actor [3:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user" ImportantClientId: "user2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 6 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 ReadRuleGenerations: 6 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: true } Consumers { Name: "user2" Generation: 6 Important: true } NEW ANS: ANS GROUP total/total/rt3.dc1--asdfgs--topic ANS GROUP user2/1/total ANS GROUP user/1/rt3.dc1--asdfgs--topic ANS GROUP user/1/total ANS GROUP user2/total/total ANS GROUP user/total/total ANS GROUP user2/1/rt3.dc1--asdfgs--topic ANS GROUP total/total/total ANS GROUP rt3.dc1--asdfgs--topic ANS GROUP total ANS GROUP total/1/rt3.dc1--asdfgs--topic CHECKING GROUP user/1/rt3.dc1--asdfgs--topic CHECKING GROUP user2/1/rt3.dc1--asdfgs--topic 2025-06-24T21:25:51.794293Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:51.803181Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 7 actor [3:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 7 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 ReadRuleGenerations: 6 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: true } Consumers { Name: "user2" Generation: 6 Important: false } NEW ANS: ANS GROUP total/total/rt3.dc1--asdfgs--topic ANS GROUP user/1/rt3.dc1--asdfgs--topic ANS GROUP user2/0/total ANS GROUP user/1/total ANS GROUP user2/total/total ANS GROUP user/total/total ANS GROUP user2/0/rt3.dc1--asdfgs--topic ANS GROUP total/total/total ANS GROUP total/0/rt3.dc1--asdfgs--topic ANS GROUP rt3.dc1--asdfgs--topic ANS GROUP total ANS GROUP total/1/rt3.dc1--asdfgs--topic CHECKING GROUP user/1/rt3.dc1--asdfgs--topic CHECKING GROUP user2/0/rt3.dc1--asdfgs--topic 2025-06-24T21:25:53.981011Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:53.986437Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 8 actor [3:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 8 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: true } NEW ANS: ANS GROUP total/total/rt3.dc1--asdfgs--topic ANS GROUP user/1/rt3.dc1--asdfgs--topic ANS GROUP user/1/total ANS GROUP user/total/total ANS GROUP total/total/total ANS GROUP rt3.dc1--asdfgs--topic ANS GROUP total ANS GROUP total/1/rt3.dc1--asdfgs--topic CHECKING GROUP user/1/rt3.dc1--asdfgs--topic 2025-06-24T21:25:56.691090Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:56.691241Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:56.713892Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:56.715219Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 9 actor [4:201:2214] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 3600 ImportantClientId: "client" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 9 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 9 ReadRuleGenerations: 9 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 9 Important: false } Consumers { Name: "client" Generation: 9 Important: true } 2025-06-24T21:25:56.716122Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:209:2220] 2025-06-24T21:25:56.717274Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [4:209:2220] 2025-06-24T21:25:56.718774Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [4:210:2221] 2025-06-24T21:25:56.719674Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [4:210:2221] 2025-06-24T21:25:56.739647Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][] pipe [4:254:2251] connected; active server actors: 1 2025-06-24T21:25:56.744182Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a7a891fc-fba80f7d-13448774-42e968ac_0 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:56.750858Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bb0b90fe-8a670455-45b72f59-95459b25_1 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:56.758287Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8311d59a-a8a85268-49cd33f9-3f8302e5_2 generated for partition 0 topic 'topic' owner default 2025-06-24T21:25:56.764311Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][topic] pipe [4:301:2292] connected; active server actors: 1 2025-06-24T21:26:02.662460Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][topic] pipe [4:430:2380] connected; active server actors: 1 2025-06-24T21:26:09.797989Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:09.798087Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:26:09.819589Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:09.820636Z node 5 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 10 actor [5:201:2214] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 10 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 10 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 10 Important: false } 2025-06-24T21:26:09.821400Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:209:2220] 2025-06-24T21:26:09.824457Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [5:209:2220] 2025-06-24T21:26:09.826620Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [5:210:2221] 2025-06-24T21:26:09.828827Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [5:210:2221] 2025-06-24T21:26:09.836791Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bb3b1972-15ed3f24-d5ebcbe9-eb27dd9e_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:09.843984Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|fcd90db1-2fb49b21-b943f56c-c81e781b_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:09.853222Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3fa6a29c-4ae3b3de-99d6ba4d-61ad340b_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:09.859766Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1206c00b-e154dc10-d37dc63f-498984f0_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:09.861631Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|524dbcb5-6b55b97d-5e37db4b-3fa2d431_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> LabeledDbCounters::OneTabletRemoveCounters [GOOD] >> LabeledDbCounters::OneTabletRestart >> TPartitionTests::TooManyImmediateTxs [GOOD] >> TPartitionTests::WriteSubDomainOutOfSpace >> TPartitionTests::WriteSubDomainOutOfSpace [GOOD] >> THealthCheckTest::BasicNodeCheckRequest [GOOD] >> THealthCheckTest::BlueGroupIssueWhenPartialGroupStatusAndReplicationDisks >> TPartitionTests::TestTxBatchInFederation >> THealthCheckTest::OneIssueListing [GOOD] >> THealthCheckTest::OnlyDiskIssueOnFaultyPDisks >> THealthCheckTest::StaticGroupIssue [GOOD] >> THealthCheckTest::StorageLimit50 >> SystemView::PartitionStatsLocksFields [GOOD] >> SystemView::QueryStatsAllTables >> THealthCheckTest::ShardsLimit999 [GOOD] >> THealthCheckTest::ShardsLimit995 |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> THealthCheckTest::Issues100GroupsListing [GOOD] >> THealthCheckTest::Issues100VCardListing >> THealthCheckTest::StorageLimit95 [GOOD] >> THealthCheckTest::StorageLimit87 >> THealthCheckTest::Issues100Groups100VCardListing [GOOD] >> THealthCheckTest::Issues100Groups100VCardMerging >> THealthCheckTest::RedGroupIssueWhenDisintegratedGroupStatus [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor70LargeVdisksIssues >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveConfig_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveActor_Test >> TFlatTest::AutoSplitMergeQueue [GOOD] >> TCacheTest::WatchRoot >> TCacheTest::MigrationCommon >> TCacheTest::List >> TCacheTest::Navigate >> TCacheTest::CheckSystemViewAccess >> TCacheTest::Navigate [GOOD] >> TCacheTest::PathBelongsToDomain >> TCacheTest::CheckSystemViewAccess [GOOD] >> TCacheTest::CookiesArePreserved ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::AutoSplitMergeQueue [GOOD] Test command err: 2025-06-24T21:24:53.871661Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630003314800371:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:53.871716Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a73/r3tmp/tmpOdsyUj/pdisk_1.dat 2025-06-24T21:24:54.188969Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630003314800347:2079] 1750800293865959 != 1750800293865962 2025-06-24T21:24:54.211733Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:54.288397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:54.288552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:54.292460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2571 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:54.541988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:24:54.559520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-24T21:24:54.575742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800294684 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "String" TypeId: 4097 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) A-0 2025-06-24T21:24:54.894216Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:55.074627Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.9, eph 1} end=Done, 2 blobs 1r (max 1), put Spent{time=0.015s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (6291598 0 0)b }, ecr=1.000 2025-06-24T21:24:55.079752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 6291502 rowCount 1 cpuUsage 0 B-0 2025-06-24T21:24:55.095021Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 2 blobs 1r (max 1), put Spent{time=0.016s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (6291598 0 0)b }, ecr=1.000 2025-06-24T21:24:55.096737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 6291502 rowCount 1 cpuUsage 0 2025-06-24T21:24:55.182124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-24T21:24:55.182263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:1 data size 6291502 row count 1 2025-06-24T21:24:55.182349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=T1, is column=0, is olap=0, RowCount 1, DataSize 6291502 2025-06-24T21:24:55.182478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037888 2025-06-24T21:24:55.182605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 A-1 2025-06-24T21:24:55.442500Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.14, eph 2} end=Done, 2 blobs 1r (max 1), put Spent{time=0.040s,wait=0.002s,interrupts=1} Part{ 1 pk, lobs 0 +0, (6291598 0 0)b }, ecr=1.000 2025-06-24T21:24:55.446932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 12583004 rowCount 2 cpuUsage 0 2025-06-24T21:24:55.471408Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 3 blobs 2r (max 2), put Spent{time=0.025s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (12583126 0 0)b }, ecr=1.000 2025-06-24T21:24:55.473286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 12583004 rowCount 2 cpuUsage 0 2025-06-24T21:24:55.549302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-24T21:24:55.549430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:1 data size 12583004 row count 2 2025-06-24T21:24:55.549495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=T1, is column=0, is olap=0, RowCount 2, DataSize 12583004 2025-06-24T21:24:55.549585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037888 2025-06-24T21:24:55.551304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 B-1 2025-06-24T21:24:55.759968Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.19, eph 3} end=Done, 2 blobs 1r (max 1), put Spent{time=0.021s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (6291598 0 0)b }, ecr=1.000 2025-06-24T21:24:55.789378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 18874506 rowCount 3 cpuUsage 0 2025-06-24T21:24:55.843626Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.21, eph 3} end=Done, 4 blobs 3r (max 3), put Spent{time=0.058s,wait=0.001s,interrupts=1} Part{ 1 pk, lobs 0 +0, (18874656 0 0)b }, ecr=1.000 2025-06-24T21:24:55.887579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-24T21:24:55.887690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:1 data size 18874506 row count 3 2025-06-24T21:24:55.887745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=T1, is column=0, is olap=0, RowCount 3, DataSize 18874506 2025-06-24T21:24:55.887893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:31: [BackgroundCompaction] [Start] Compacting for pathId# [OwnerId: 72057594046644480, LocalPathId: 3], datashard# 72075186224037888, compactionInfo# {72057594046644480:1, SH# 2, Rows# 3, Deletes# 0, Compaction# 1970-01-01T00:00:00.000000Z}, next wakeup in# 0.000000s, rate# 5.787037037e-06, in queue# 1 shards, waiting after compaction# 0 shards, running# 0 shards at schemeshard 72057594046644480 2025-06-24T21:24:55.892818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 599 seconds 2025-06-24T21:24:55.892892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037888 2025-06-24T21:24:55.893048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-24T21:24:55.897831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:112: [BackgroundCompaction] [Finished] Compaction completed for pathId# [OwnerId: 72057594046644480, LocalPathId: 3], datashard# 72075186224037888, shardIdx# 72057594046644480:1 in# 9 ms, with status# 1, next wakeup in# 599.990017s, rate# 5.787037037e-06, in queue# 1 shards, waiting after compaction# 1 shards, running# 0 shards at schemeshard 72057594046644480 2025-06-24T21:24:55.927551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [Owner ... 659 CreateStep: 1750800339036 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 24 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 24 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 22 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800339036 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 24 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 24 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 22 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800339036 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 24 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 24 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 22 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800339036 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 24 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 24 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 22 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800339036 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 24 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 24 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 22 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800339036 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 24 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 24 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 22 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800339036 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 24 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 24 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 22 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) 2025-06-24T21:26:12.882961Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037912 not found 2025-06-24T21:26:12.882991Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037919 not found 2025-06-24T21:26:12.884973Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037918 not found TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800339036 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 25 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 25 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 23 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) 2025-06-24T21:26:13.868766Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037922 not found 2025-06-24T21:26:13.868806Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037920 not found 2025-06-24T21:26:13.869067Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037923 not found 2025-06-24T21:26:13.869125Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037921 not found 2025-06-24T21:26:13.929360Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037925 not found 2025-06-24T21:26:13.929394Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037924 not found TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800339036 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 28 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 28 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 26 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750800339036 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 28 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 28 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 26 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) >> TCacheTest::List [GOOD] >> TCacheTest::MigrationCommit >> TCacheTest::WatchRoot [GOOD] >> TCacheTestWithDrops::LookupErrorUponEviction >> TCacheTest::PathBelongsToDomain [GOOD] >> TCacheTest::CookiesArePreserved [GOOD] >> TCacheTest::MigrationCommon [GOOD] >> TCacheTest::MigrationDeletedPathNavigate >> TListAllTopicsTests::ListLimitAndPaging [GOOD] >> TMeteringSink::FlushPutEventsV1 [GOOD] >> TMeteringSink::FlushResourcesReservedV1 [GOOD] >> TMeteringSink::FlushStorageV1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::PathBelongsToDomain [GOOD] Test command err: 2025-06-24T21:26:15.973392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:15.973438Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:16.181374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:26:16.205920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 2025-06-24T21:26:16.590969Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:16.591035Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:16.645292Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-06-24T21:26:16.651223Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:26:16.657970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 2025-06-24T21:26:16.667241Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:287: Path does not belong to the specified domain: self# [2:228:2205], domain# [OwnerId: 72057594046678944, LocalPathId: 1], path's domain# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:26:16.667467Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:287: Path does not belong to the specified domain: self# [2:230:2207], domain# [OwnerId: 72057594046678944, LocalPathId: 1], path's domain# [OwnerId: 72057594046678944, LocalPathId: 2] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::CookiesArePreserved [GOOD] Test command err: 2025-06-24T21:26:15.976375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:15.976431Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:16.175052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-06-24T21:26:16.188942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:26:16.197199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T21:26:16.199862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TestModificationResult got TxId: 102, wait until txId: 102 2025-06-24T21:26:16.204840Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:214:2197], for# user1@builtin, access# DescribeSchema 2025-06-24T21:26:16.205578Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:220:2203], for# user1@builtin, access# 2025-06-24T21:26:16.590358Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:16.590416Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:16.646496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-06-24T21:26:16.652437Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:26:16.658650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 >> TPQTest::TestAlreadyWritten [GOOD] >> TPQTest::PQ_Tablet_Removes_Blobs_Asynchronously >> TCacheTest::MigrationCommit [GOOD] >> TPartitionTests::TestTxBatchInFederation [GOOD] >> TPQTest::TestMaxTimeLagRewind [GOOD] >> TPQTest::TestManyConsumers >> Worker::Basic [GOOD] >> TPartitionTests::The_DeletePartition_Message_Arrives_Before_The_ApproveWriteQuota_Message ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::MigrationCommit [GOOD] Test command err: 2025-06-24T21:26:15.980925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:15.980996Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:16.168079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 FAKE_COORDINATOR: Erasing txId 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 TestWaitNotification wait txId: 103 2025-06-24T21:26:16.200341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:26:16.200433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:26:16.200678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-24T21:26:16.552695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:16.552756Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:16.609173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 Leader for TabletID 72057594046678944 is [2:72:2110] sender: [2:177:2067] recipient: [2:47:2094] Leader for TabletID 72057594046678944 is [2:72:2110] sender: [2:180:2067] recipient: [2:24:2071] Leader for TabletID 72057594046678944 is [2:72:2110] sender: [2:181:2067] recipient: [2:179:2172] Leader for TabletID 72057594046678944 is [2:182:2173] sender: [2:183:2067] recipient: [2:179:2172] 2025-06-24T21:26:16.656782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:16.656841Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 101 Leader for TabletID 72057594046678944 is [2:182:2173] sender: [2:213:2067] recipient: [2:24:2071] 2025-06-24T21:26:16.685334Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-24T21:26:16.692156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:249:2067] recipient: [2:240:2214] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:249:2067] recipient: [2:240:2214] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:250:2067] recipient: [2:243:2216] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:250:2067] recipient: [2:243:2216] Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:253:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:253:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:254:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:254:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409546 is [2:252:2220] sender: [2:255:2067] recipient: [2:240:2214] Leader for TabletID 72075186233409547 is [2:257:2222] sender: [2:258:2067] recipient: [2:243:2216] TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 2025-06-24T21:26:16.724126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 Leader for TabletID 72075186233409546 is [2:252:2220] sender: [2:292:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [2:257:2222] sender: [2:293:2067] recipient: [2:24:2071] FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 2025-06-24T21:26:16.819303Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 104:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 TestWaitNotification wait txId: 104 Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:343:2067] recipient: [2:339:2286] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:343:2067] recipient: [2:339:2286] Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:344:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:344:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409548 is [2:346:2290] sender: [2:347:2067] recipient: [2:339:2286] Leader for TabletID 72075186233409548 is [2:346:2290] sender: [2:348:2067] recipient: [2:24:2071] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-24T21:26:17.002695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomain, opId: 105:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:1232) Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:422:2067] recipient: [2:418:2334] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:422:2067] recipient: [2:418:2334] Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:423:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:423:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409549 is [2:425:2338] sender: [2:426:2067] recipient: [2:418:2334] Leader for TabletID 72075186233409549 is [2:425:2338] sender: [2:427:2067] recipient: [2:24:2071] 2025-06-24T21:26:17.044998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:17.045065Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 TestWaitNotification: OK eventTxId 105 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } TestModificationResults wait txId: 106 2025-06-24T21:26:17.065126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5471: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:26:17.065188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5471: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T21:26:17.065515Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__operation_upgrade_subdomain.cpp:1464: TWait ProgressState, dependent transaction: 106, parent transaction: 105, at schemeshard: 72057594046678944 2025-06-24T21:26:17.065670Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomainDecision, opId: 106:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:571) TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T21:26:17.085853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5932: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2025-06-24T21:26:17.086365Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5932: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } Leader for TabletID 72057594046678944 is [2:182:2173] sender: [2:513:2067] recipient: [2:47:2094] Leader for TabletID 72057594046678944 is [2:182:2173] sender: [2:515:2067] recipient: [2:24:2071] Leader for TabletID 72057594046678944 is [2:182:2173] sender: [2:517:2067] recipient: [2:516:2408] Leader for TabletID 72057594046678944 is [2:518:2409] sender: [2:519:2067] recipient: [2:516:2408] 2025-06-24T21:26:17.136178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:17.136230Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded Leader for TabletID 72057594046678944 is [2:518:2409] sender: [2:546:2067] recipient: [2:24:2071] { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } >> TCacheTest::Recreate >> TPQTest::PQ_Tablet_Removes_Blobs_Asynchronously [GOOD] >> TPQTest::PQ_Tablet_Does_Not_Remove_The_Blob_Until_The_Reading_Is_Complete ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TMeteringSink::FlushStorageV1 [GOOD] Test command err: 2025-06-24T21:25:46.475609Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630231100515210:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:46.475806Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003a90/r3tmp/tmpeLcyr9/pdisk_1.dat 2025-06-24T21:25:46.669333Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:25:46.823598Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:46.824896Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630231100515184:2079] 1750800346474328 != 1750800346474331 TServer::EnableGrpc on GrpcPort 6161, node 1 2025-06-24T21:25:46.883636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:46.884574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:46.891649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:46.909238Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003a90/r3tmp/yandexAFDNgz.tmp 2025-06-24T21:25:46.909269Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003a90/r3tmp/yandexAFDNgz.tmp 2025-06-24T21:25:46.909505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003a90/r3tmp/yandexAFDNgz.tmp 2025-06-24T21:25:46.909668Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:46.958236Z INFO: TTestServer started on Port 26421 GrpcPort 6161 TClient is connected to server localhost:26421 PQClient connected to localhost:6161 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:47.192705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:47.219500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:25:47.487294Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:49.278806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630243985417870:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:49.278880Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630243985417862:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:49.279050Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:49.282768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:25:49.293513Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630243985417876:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:25:49.520814Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630243985417941:2441] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:25:49.555184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:49.608037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:49.649266Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630243985417951:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:49.649612Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=M2ZjMTVkZDAtZGNhNWUzZGUtMzY2ZDVjNWYtMzkxZjlhMGI=, ActorId: [1:7519630243985417860:2295], ActorState: ExecuteState, TraceId: 01jyhxae2w0ffgexxzxvh18d3b, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:49.652175Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:49.693320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:25:49.917025Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jyhxaej42dxz7g4vam0pbnae, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWIzMGRlYmQtNWIxMzA1YzEtYTViMWMyODctOWI0MGZhNzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7519630243985418241:2619] 2025-06-24T21:25:51.475882Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630231100515210:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:51.475955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:25:55.985970Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:186: new Create topic request 2025-06-24T21:25:55.988589Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:186: new Create topic request 2025-06-24T21:25:56.021951Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037894] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:25:56.022009Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:25:56.022245Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037894] Registered with mediator time cast 2025-06-24T21:25:56.022245Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3094: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-24T21:25:56.022875Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72075186224037894] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:25:56.022875Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:25:56.023057Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-24T21:25:56.023072Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:25:56.023091Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-24T21:25:56.023119Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72075186224037892] Tx ... 281474976715677, NewState CALCULATED 2025-06-24T21:26:16.546438Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037896] TxId 281474976715677 moved from CALCULATING to CALCULATED 2025-06-24T21:26:16.546456Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72075186224037896] write key for TxId 281474976715677 2025-06-24T21:26:16.546758Z node 3 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715677] save tx TxId: 281474976715677 State: CALCULATED MinStep: 1750800376542 MaxStep: 18446744073709551615 Step: 1750800376591 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic3" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir2/topic3" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519630316733827017 RawX2: 12884904035 } Partitions { Partition { PartitionId: 0 } } 2025-06-24T21:26:16.546845Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:26:16.546969Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:26:16.546988Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037894] Try execute txs with state DELETING 2025-06-24T21:26:16.547001Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037894] TxId 281474976715676, State DELETING 2025-06-24T21:26:16.547016Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72075186224037894] delete TxId 281474976715676 2025-06-24T21:26:16.547555Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:26:16.547577Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037896] Try execute txs with state CALCULATED 2025-06-24T21:26:16.547590Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037896] TxId 281474976715677, State CALCULATED 2025-06-24T21:26:16.547603Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037896] TxId 281474976715677 State CALCULATED FrontTxId 281474976715677 2025-06-24T21:26:16.547618Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037896] TxId 281474976715677, NewState WAIT_RS 2025-06-24T21:26:16.547636Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037896] TxId 281474976715677 moved from CALCULATED to WAIT_RS 2025-06-24T21:26:16.547669Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3988: [PQ: 72075186224037896] Send TEvTxProcessing::TEvReadSet to 0 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-06-24T21:26:16.547686Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72075186224037896] HaveParticipantsDecision 1 2025-06-24T21:26:16.547733Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037896] TxId 281474976715677, NewState EXECUTING 2025-06-24T21:26:16.547749Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72075186224037896, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1750800376591, TxId 281474976715677 2025-06-24T21:26:16.547750Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037896] TxId 281474976715677 moved from WAIT_RS to EXECUTING 2025-06-24T21:26:16.547761Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72075186224037896] Received 0, Expected 1 2025-06-24T21:26:16.547943Z node 3 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:26:16.548569Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3553: [PQ: 72075186224037896] Handle TEvPQ::TEvTxCommitDone Step 1750800376591, TxId 281474976715677, Partition 0 2025-06-24T21:26:16.548591Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037896] Try execute txs with state EXECUTING 2025-06-24T21:26:16.548605Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037896] TxId 281474976715677, State EXECUTING 2025-06-24T21:26:16.548617Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:26:16.548620Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037896] TxId 281474976715677 State EXECUTING FrontTxId 281474976715677 2025-06-24T21:26:16.548630Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4512: [PQ: 72075186224037896] Received 1, Expected 1 2025-06-24T21:26:16.548649Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4185: [PQ: 72075186224037896] TxId: 281474976715677 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-24T21:26:16.548654Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037896, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:26:16.548665Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4516: [PQ: 72075186224037896] complete TxId 281474976715677 2025-06-24T21:26:16.548892Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72075186224037896] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic3" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir2/topic3" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } 2025-06-24T21:26:16.548966Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72075186224037896] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:16.549023Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4534: [PQ: 72075186224037896] delete partitions for TxId 281474976715677 2025-06-24T21:26:16.549043Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037896] TxId 281474976715677, NewState EXECUTED 2025-06-24T21:26:16.549060Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037896] TxId 281474976715677 moved from EXECUTING to EXECUTED 2025-06-24T21:26:16.549079Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3836: [PQ: 72075186224037896] write key for TxId 281474976715677 2025-06-24T21:26:16.549330Z node 3 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715677] save tx TxId: 281474976715677 State: EXECUTED MinStep: 1750800376542 MaxStep: 18446744073709551615 Step: 1750800376591 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic3" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir2/topic3" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519630316733827017 RawX2: 12884904035 } Partitions { Partition { PartitionId: 0 } } 2025-06-24T21:26:16.549457Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:26:16.550363Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:26:16.550388Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037896] Try execute txs with state EXECUTED 2025-06-24T21:26:16.550403Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037896] TxId 281474976715677, State EXECUTED 2025-06-24T21:26:16.550418Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4306: [PQ: 72075186224037896] TxId 281474976715677 State EXECUTED FrontTxId 281474976715677 2025-06-24T21:26:16.550434Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4007: [PQ: 72075186224037896] TPersQueue::SendEvReadSetAckToSenders 2025-06-24T21:26:16.550449Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037896] TxId 281474976715677, NewState WAIT_RS_ACKS 2025-06-24T21:26:16.550462Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4284: [PQ: 72075186224037896] TxId 281474976715677 moved from EXECUTED to WAIT_RS_ACKS 2025-06-24T21:26:16.550482Z node 3 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715677] PredicateAcks: 0/0 2025-06-24T21:26:16.550490Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4560: [PQ: 72075186224037896] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-24T21:26:16.550500Z node 3 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715677] PredicateAcks: 0/0 2025-06-24T21:26:16.550512Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4621: [PQ: 72075186224037896] add an TxId 281474976715677 to the list for deletion 2025-06-24T21:26:16.550531Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037896] TxId 281474976715677, NewState DELETING 2025-06-24T21:26:16.550552Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3852: [PQ: 72075186224037896] delete key for TxId 281474976715677 2025-06-24T21:26:16.550592Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3653: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-24T21:26:16.551072Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1233: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-24T21:26:16.551093Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4314: [PQ: 72075186224037896] Try execute txs with state DELETING 2025-06-24T21:26:16.551106Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4359: [PQ: 72075186224037896] TxId 281474976715677, State DELETING 2025-06-24T21:26:16.551122Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4571: [PQ: 72075186224037896] delete TxId 281474976715677 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> Worker::Basic [GOOD] Test command err: 2025-06-24T21:26:09.905781Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630328283365815:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:26:09.905851Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002deb/r3tmp/tmpAy5aw9/pdisk_1.dat 2025-06-24T21:26:10.349311Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630328283365783:2079] 1750800369901842 != 1750800369901845 2025-06-24T21:26:10.362105Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:10.388871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:26:10.388990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:26:10.391674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4649 TServer::EnableGrpc on GrpcPort 12536, node 1 2025-06-24T21:26:10.694750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:10.694770Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:10.694778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:10.694927Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:10.916535Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4649 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:26:11.190105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:26:11.394136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800371509 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-24T21:26:11.547929Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Handshake: worker# [1:7519630336873301178:2418] 2025-06-24T21:26:11.548090Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handshake: worker# [1:7519630336873301178:2418] 2025-06-24T21:26:11.548470Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:26:11.548910Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 3] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:26:11.548955Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Send handshake: worker# [1:7519630336873301178:2418] 2025-06-24T21:26:11.549039Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7519630336873301178:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-24T21:26:11.549065Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:162: [Worker][1:7519630336873301178:2418] Handshake with writer: sender# [1:7519630336873301180:2418] 2025-06-24T21:26:11.558067Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Create read session: session# [1:7519630336873301186:2290] 2025-06-24T21:26:11.558126Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7519630336873301178:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-24T21:26:11.558142Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:154: [Worker][1:7519630336873301178:2418] Handshake with reader: sender# [1:7519630336873301179:2418] 2025-06-24T21:26:11.558194Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-24T21:26:11.606564Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_1_18013592589695281982_v1 } } 2025-06-24T21:26:11.609745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:26:12.686712Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630341168268662:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:12.686730Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630341168268646:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:12.686846Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630341168268663:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:12.686938Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:12.690883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:26:12.697208Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630341168268668:2494] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:26:12.701443Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630341168268666:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:26:12.701443Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630341168268667:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transa ... schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:26:15.462396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:26:15.814283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:26:16.471796Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-24T21:26:16.462000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-24T21:26:16.471877Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7519630336873301178:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-24T21:26:16.462000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-24T21:26:16.471933Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-24T21:26:16.462000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-24T21:26:16.472115Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 0 BodySize: 36 }] } 2025-06-24T21:26:16.473276Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519630358348138600:2418] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T21:26:16.473351Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-24T21:26:16.473468Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519630358348138600:2418] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-24T21:26:16.474888Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519630358348138600:2418] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:26:16.474937Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-24T21:26:16.475005Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [0] } 2025-06-24T21:26:16.475082Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7519630336873301178:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-24T21:26:16.475162Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-24T21:26:16.632406Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-24T21:26:16.625000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-24T21:26:16.632481Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7519630336873301178:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-24T21:26:16.625000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-24T21:26:16.632562Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-24T21:26:16.625000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-24T21:26:16.632664Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 36 }] } 2025-06-24T21:26:16.632770Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519630358348138600:2418] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-24T21:26:16.634781Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519630358348138600:2418] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:26:16.634841Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-24T21:26:16.634892Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-24T21:26:16.634939Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7519630336873301178:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-24T21:26:16.634990Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-24T21:26:16.781982Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-24T21:26:16.775000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-24T21:26:16.782048Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7519630336873301178:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-24T21:26:16.775000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-24T21:26:16.782098Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-24T21:26:16.775000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-24T21:26:16.782211Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 36 }] } 2025-06-24T21:26:16.782274Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519630358348138600:2418] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-24T21:26:16.783658Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519630358348138600:2418] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-24T21:26:16.783711Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-24T21:26:16.783741Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519630336873301180:2418] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2] } 2025-06-24T21:26:16.783782Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7519630336873301178:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-24T21:26:16.783819Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-24T21:26:16.895597Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:119: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Handle NKikimr::NReplication::TEvYdbProxy::TEvTopicReaderGone { Result: { status: UNAVAILABLE, issues: {
: Error: PartitionSessionClosed { Partition session id: 1 Topic: "topic" Partition: 0 Reason: ConnectionLost } } } } 2025-06-24T21:26:16.895616Z node 1 :REPLICATION_SERVICE INFO: topic_reader.cpp:131: [RemoteTopicReader][/Root/topic][0][1:7519630336873301179:2418] Leave 2025-06-24T21:26:16.895665Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:235: [Worker][1:7519630336873301178:2418] Reader has gone: sender# [1:7519630336873301179:2418] 2025-06-24T21:26:16.895752Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7519630358348138774:2418] Handshake: worker# [1:7519630336873301178:2418] 2025-06-24T21:26:16.898479Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7519630358348138774:2418] Create read session: session# [1:7519630358348138775:2290] 2025-06-24T21:26:16.898527Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7519630336873301178:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-24T21:26:16.898535Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:154: [Worker][1:7519630336873301178:2418] Handshake with reader: sender# [1:7519630358348138774:2418] 2025-06-24T21:26:16.898558Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519630358348138774:2418] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } >> TCacheTest::Recreate [GOOD] >> TCacheTest::SysLocks >> TPartitionTests::The_DeletePartition_Message_Arrives_Before_The_ApproveWriteQuota_Message [GOOD] >> TCacheTest::RacyRecreateAndSync >> TCacheTest::SystemView |97.2%| [TA] $(B)/ydb/core/tx/replication/service/ut_worker/test-results/unittest/{meta.json ... results_accumulator.log} |97.2%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_worker/test-results/unittest/{meta.json ... results_accumulator.log} >> TCacheTest::SysLocks [GOOD] >> TCacheTest::RacyRecreateAndSync [GOOD] >> TCacheTest::RacyCreateAndSync >> TCacheTest::SystemView [GOOD] >> TCacheTest::TableSchemaVersion >> THealthCheckTest::BlueGroupIssueWhenPartialGroupStatusAndReplicationDisks [GOOD] >> THealthCheckTest::GreenStatusWhenCreatingGroup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::SysLocks [GOOD] Test command err: 2025-06-24T21:26:18.336864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:18.336924Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:18.501493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:26:18.523862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T21:26:18.525627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:26:18.560971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 FAKE_COORDINATOR: Erasing txId 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T21:26:18.572257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-06-24T21:26:18.816202Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:18.816251Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:18.860239Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::The_DeletePartition_Message_Arrives_Before_The_ApproveWriteQuota_Message [GOOD] Test command err: 2025-06-24T21:25:41.685417Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:41.685521Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:25:41.705397Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:182:2195] 2025-06-24T21:25:41.706510Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 1 generation 0 [1:182:2195] Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChan ... ition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:26:15.945862Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:26:17.205408Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:26:17.205608Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-24T21:26:17.205821Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 17 Wait batch completion 2025-06-24T21:26:17.206040Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 10 Wait kv request 2025-06-24T21:26:17.443968Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-24T21:26:17.444047Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-24T21:26:17.444127Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:26:17.444218Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:26:17.444262Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:26:17.444377Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T21:26:17.444432Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-24T21:26:17.444489Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-24T21:26:17.444569Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1257: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Already written message. Topic: 'rt3.dc1--account--topic' Partition: 0 SourceId: 'src4'. Message seqNo: 7. Committed seqNo: (NULL). Writing seqNo: 7. EndOffset: 50. CurOffset: 50. Offset: 50 2025-06-24T21:26:17.444694Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 8 partNo 0 2025-06-24T21:26:17.445574Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 8 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 84 count 1 nextOffset 52 batches 1 2025-06-24T21:26:17.445661Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 9 partNo 0 2025-06-24T21:26:17.445710Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 9 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 136 count 2 nextOffset 53 batches 1 2025-06-24T21:26:17.445752Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 10 partNo 0 2025-06-24T21:26:17.445787Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 10 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 188 count 3 nextOffset 54 batches 1 2025-06-24T21:26:17.445825Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 11 partNo 0 2025-06-24T21:26:17.445857Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 11 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 240 count 4 nextOffset 55 batches 1 2025-06-24T21:26:17.445892Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 12 partNo 0 2025-06-24T21:26:17.445928Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 12 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 292 count 5 nextOffset 56 batches 1 2025-06-24T21:26:17.445965Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-24T21:26:17.446008Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 51 PartNo 0 PackedSize 292 count 5 nextOffset 56 batches 1 2025-06-24T21:26:17.446047Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-24T21:26:17.446096Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 10 2025-06-24T21:26:17.446136Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 51 PartNo 0 PackedSize 292 count 5 nextOffset 56 batches 1 2025-06-24T21:26:17.446722Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--account--topic' partition 0 compactOffset 51,5 HeadOffset 50 endOffset 50 curOffset 56 d0000000000_00000000000000000051_00000_0000000005_00000? size 189 WTime 21151 Got KV request Got KV request Got KV request Got KV request Got KV request Got KV request Got KV request Got KV request Wait tx committed for tx 0 2025-06-24T21:26:17.467876Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 85 WriteNewSizeFromSupportivePartitions# 4 2025-06-24T21:26:17.467969Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:26:17.468045Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 50 is already written 2025-06-24T21:26:17.468088Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:26:17.468123Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 50 is already written 2025-06-24T21:26:17.468150Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:26:17.468183Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 50 is already written 2025-06-24T21:26:17.468208Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:26:17.468260Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 50 is already written 2025-06-24T21:26:17.468287Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:26:17.468320Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 11, partNo: 0, Offset: 50 is already written 2025-06-24T21:26:17.468345Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:26:17.468377Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 12, partNo: 0, Offset: 50 is already written 2025-06-24T21:26:17.468598Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=189, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Wait immediate tx complete 3 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 3 Wait immediate tx complete 6 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 6 Wait tx committed for tx 10 2025-06-24T21:26:17.906138Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:17.906212Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:26:17.920761Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {1, {2, 3}, 4}, State: StateInit] bootstrapping {1, {2, 3}, 4} [5:182:2195] 2025-06-24T21:26:17.921665Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {1, {2, 3}, 4}, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition {1, {2, 3}, 4} generation 0 [5:182:2195] Got cmd write: CmdDeleteRange { Range { From: "M0000000004" IncludeFrom: true To: "M0000000005" IncludeTo: false } } CmdDeleteRange { Range { From: "D0000000004" IncludeFrom: true To: "D0000000005" IncludeTo: false } } CmdDeleteRange { Range { From: "X0000000004" IncludeFrom: true To: "X0000000005" IncludeTo: false } } CmdDeleteRange { Range { From: "J0000000004" IncludeFrom: true To: "J0000000005" IncludeTo: false } } CmdDeleteRange { Range { From: "K0000000004" IncludeFrom: true To: "K0000000005" IncludeTo: false } } >> TCacheTest::MigrationLostMessage >> TCacheTest::RacyCreateAndSync [GOOD] >> THealthCheckTest::StorageLimit87 [GOOD] >> THealthCheckTest::StorageLimit80 >> TCacheTest::TableSchemaVersion [GOOD] >> THealthCheckTest::Issues100VCardListing [GOOD] >> THealthCheckTest::Issues100GroupsMerging >> TPQTest::TestPQSmallRead [GOOD] >> TPQTest::TestPQReadAhead [GOOD] >> TPQTest::TestOwnership ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::RacyCreateAndSync [GOOD] Test command err: 2025-06-24T21:26:19.064367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:19.064430Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:19.191375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:26:19.206773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T21:26:19.208952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:26:19.245464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 FAKE_COORDINATOR: Erasing txId 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T21:26:19.266057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-06-24T21:26:19.530968Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:19.531036Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:19.575415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:26:19.585900Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 >> THealthCheckTest::OnlyDiskIssueOnFaultyPDisks [GOOD] >> THealthCheckTest::NoStoragePools ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::TableSchemaVersion [GOOD] Test command err: 2025-06-24T21:26:19.046727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:19.046784Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:19.171278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 2025-06-24T21:26:19.522880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:19.522947Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:19.575409Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-06-24T21:26:19.602845Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T21:26:19.739132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 >> THealthCheckTest::ShardsLimit995 [GOOD] >> THealthCheckTest::ShardsLimit905 >> THealthCheckTest::Issues100Groups100VCardMerging [GOOD] >> THealthCheckTest::GreenStatusWhenInitPending >> THealthCheckTest::StorageLimit50 [GOOD] >> THealthCheckTest::SpecificServerless >> TCacheTest::MigrationLostMessage [GOOD] >> TCacheTest::MigrationUndo >> TCacheTest::Attributes >> THealthCheckTest::ProtobufUnderLimitFor70LargeVdisksIssues [GOOD] >> THealthCheckTest::ServerlessBadTablets >> TCacheTest::MigrationUndo [GOOD] >> TCacheTest::Attributes [GOOD] >> TCacheTest::CheckAccess >> TCacheTest::CheckAccess [GOOD] |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::MigrationUndo [GOOD] Test command err: 2025-06-24T21:26:19.787524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:19.787577Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:19.945182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 Leader for TabletID 72057594046678944 is [1:72:2110] sender: [1:177:2067] recipient: [1:47:2094] Leader for TabletID 72057594046678944 is [1:72:2110] sender: [1:180:2067] recipient: [1:179:2172] Leader for TabletID 72057594046678944 is [1:72:2110] sender: [1:181:2067] recipient: [1:24:2071] Leader for TabletID 72057594046678944 is [1:182:2173] sender: [1:183:2067] recipient: [1:179:2172] 2025-06-24T21:26:20.001309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:20.001374Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 101 Leader for TabletID 72057594046678944 is [1:182:2173] sender: [1:213:2067] recipient: [1:24:2071] 2025-06-24T21:26:20.030787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-24T21:26:20.039351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) Leader for TabletID 72075186233409546 is [0:0:0] sender: [1:249:2067] recipient: [1:240:2214] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [1:249:2067] recipient: [1:240:2214] Leader for TabletID 72075186233409547 is [0:0:0] sender: [1:250:2067] recipient: [1:242:2216] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [1:250:2067] recipient: [1:242:2216] Leader for TabletID 72075186233409546 is [0:0:0] sender: [1:251:2067] recipient: [1:24:2071] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [1:251:2067] recipient: [1:24:2071] Leader for TabletID 72075186233409547 is [0:0:0] sender: [1:253:2067] recipient: [1:24:2071] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [1:253:2067] recipient: [1:24:2071] Leader for TabletID 72075186233409546 is [1:254:2220] sender: [1:257:2067] recipient: [1:240:2214] Leader for TabletID 72075186233409547 is [1:256:2222] sender: [1:258:2067] recipient: [1:242:2216] TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 2025-06-24T21:26:20.074330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 Leader for TabletID 72075186233409546 is [1:254:2220] sender: [1:292:2067] recipient: [1:24:2071] Leader for TabletID 72075186233409547 is [1:256:2222] sender: [1:293:2067] recipient: [1:24:2071] FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 2025-06-24T21:26:20.162028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 104:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 TestWaitNotification wait txId: 104 Leader for TabletID 72075186233409548 is [0:0:0] sender: [1:343:2067] recipient: [1:339:2286] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [1:343:2067] recipient: [1:339:2286] Leader for TabletID 72075186233409548 is [0:0:0] sender: [1:344:2067] recipient: [1:24:2071] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [1:344:2067] recipient: [1:24:2071] Leader for TabletID 72075186233409548 is [1:346:2290] sender: [1:347:2067] recipient: [1:339:2286] Leader for TabletID 72075186233409548 is [1:346:2290] sender: [1:348:2067] recipient: [1:24:2071] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-24T21:26:20.319633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomain, opId: 105:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:1232) Leader for TabletID 72075186233409549 is [0:0:0] sender: [1:423:2067] recipient: [1:419:2335] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [1:423:2067] recipient: [1:419:2335] Leader for TabletID 72075186233409549 is [0:0:0] sender: [1:425:2067] recipient: [1:24:2071] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [1:425:2067] recipient: [1:24:2071] Leader for TabletID 72075186233409549 is [1:426:2338] sender: [1:427:2067] recipient: [1:419:2335] 2025-06-24T21:26:20.361389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:20.361456Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 Leader for TabletID 72075186233409549 is [1:426:2338] sender: [1:454:2067] recipient: [1:24:2071] TestWaitNotification: OK eventTxId 105 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } TestModificationResults wait txId: 106 2025-06-24T21:26:20.410704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5471: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:26:20.410762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5471: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T21:26:20.411023Z node 1 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__operation_upgrade_subdomain.cpp:1464: TWait ProgressState, dependent transaction: 106, parent transaction: 105, at schemeshard: 72057594046678944 2025-06-24T21:26:20.411170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomainDecision, opId: 106:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:571) TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T21:26:20.427615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5932: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2025-06-24T21:26:20.427962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5932: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } TestModificationResults wait txId: 107 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 skipDeleteNotification path: /Root/USER_0/DirA/Table1 pathId: [OwnerId: 72057594046678944, LocalPathId: 4] Strong: 1 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-24T21:26:20.466936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 108:0, at schemeshard: 72075186233409549, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 skipDeleteNotification path: /Root/USER_0/DirA pathId: [OwnerId: 72057594046678944, LocalPathId: 3] Strong: 1 TestWaitNotification: OK eventTxId 108 TestModificationResults wait txId: 109 skipDeleteNotification path: /Root/USER_0/DirA pathId: [OwnerId: 72057594046678944, LocalPathId: 3] Strong: 1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 TestWaitNotification: OK eventTxId 109 TestModificationResults wait txId: 110 2025-06-24T21:26:20.564037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 110:0, at schemeshard: 72075186233409549, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Leader for TabletID 72075186233409550 is [0:0:0] sender: [1:629:2067] recipient: [1:618:2504] IGNORE Leader for TabletID 72075186233409550 is [0:0:0] sender: [1:629:2067] recipient: [1:618:2504] Leader for TabletID 72075186233409550 is [0:0:0] sender: [1:631:2067] recipient: [1:24:2071] IGNORE ... emeshard: 72057594046678944 2025-06-24T21:26:21.543576Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.543774Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.544175Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.544283Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.544494Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.544657Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.544749Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.544957Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.545055Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.545242Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.545523Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.545606Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.545738Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.545800Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.545848Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.546082Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:26:21.547360Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:26:21.547512Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:26:21.548405Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435083, Sender [2:517:2402], Recipient [2:517:2402]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:26:21.548448Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5014: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:26:21.549030Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:21.549087Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:21.549159Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:26:21.549192Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:26:21.549233Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:26:21.549282Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:26:21.549391Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 274399233, Sender [2:533:2402], Recipient [2:517:2402]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:26:21.549417Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5105: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-24T21:26:21.549458Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:26:21.582802Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:162:2158], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:26:21.582963Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:162:2158], cacheItem# { Subscriber: { Subscriber: [2:385:2320] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 5000002 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] DomainId: [OwnerId: 72057594046678944, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:26:21.583247Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:546:2420], recipient# [2:545:2419], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0 TableId: [72057594046678944:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } { Path: Root/USER_0 TableId: [72057594046678944:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:26:21.583741Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:162:2158], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0/DirA TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:26:21.583903Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:162:2158], cacheItem# { Subscriber: { Subscriber: [2:394:2323] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 200 PathId: [OwnerId: 72057594046678944, LocalPathId: 3] DomainId: [OwnerId: 72057594046678944, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/USER_0/DirA TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:26:21.584174Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:548:2422], recipient# [2:547:2421], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:26:21.584707Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:162:2158], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0/DirA/Table1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:26:21.584852Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:162:2158], cacheItem# { Subscriber: { Subscriber: [2:403:2326] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 250 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] DomainId: [OwnerId: 72057594046678944, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: Root/USER_0/DirA/Table1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:26:21.585073Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:550:2424], recipient# [2:549:2423], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0/DirA/Table1 TableId: [72057594046678944:4:1] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_NewSourceId_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_RegisteredSourceId_Test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::CheckAccess [GOOD] Test command err: 2025-06-24T21:26:21.517080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:21.517137Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:21.652098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:26:21.668438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 2025-06-24T21:26:22.007230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:22.007295Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:22.062107Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-24T21:26:22.073060Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TestModificationResult got TxId: 102, wait until txId: 102 2025-06-24T21:26:22.076646Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [2:199:2188], for# user1@builtin, access# DescribeSchema 2025-06-24T21:26:22.077092Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [2:203:2192], for# user1@builtin, access# DescribeSchema >> TMonitoringTests::InvalidActorId >> TPQTest::PQ_Tablet_Does_Not_Remove_The_Blob_Until_The_Reading_Is_Complete [GOOD] >> TMonitoringTests::InvalidActorId [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::InvalidActorId [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::PQ_Tablet_Does_Not_Remove_The_Blob_Until_The_Reading_Is_Complete [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T21:25:39.684769Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:25:39.689943Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:25:39.690320Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-24T21:25:39.690367Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:25:39.690409Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-24T21:25:39.690504Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:25:39.690576Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:39.690656Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:182:2057] recipient: [1:14:2061] 2025-06-24T21:25:39.709823Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:181:2194], now have 1 active actors on pipe 2025-06-24T21:25:39.709962Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:25:39.729690Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-24T21:25:39.732980Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-24T21:25:39.733157Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:39.734072Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-24T21:25:39.734178Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:39.734817Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:39.735275Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-24T21:25:39.737833Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--asdfgs--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:39.737909Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:188:2199] 2025-06-24T21:25:39.737966Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--asdfgs--topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:39.740335Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:39.740481Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit request with generation 1 2025-06-24T21:25:39.740544Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit with generation 1 done 2025-06-24T21:25:39.740617Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 reinit request with generation 1 2025-06-24T21:25:39.740666Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 reinit with generation 1 done 2025-06-24T21:25:39.740907Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:39.740958Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:39.741110Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:39.741382Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:39.744603Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:39.744702Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:39.771722Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:195:2204], now have 1 active actors on pipe 2025-06-24T21:25:39.774431Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:198:2206], now have 1 active actors on pipe 2025-06-24T21:25:39.774550Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-06-24T21:25:39.774600Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-06-24T21:25:39.775443Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2167: [PQ: 72057594037927937] got client PART message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid2' SeqNo: 1 partNo : 0 messageNo: 0 size: 511957 2025-06-24T21:25:39.775973Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2167: [PQ: 72057594037927937] got client PART message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid2' SeqNo: 1 partNo : 1 messageNo: 0 size: 511957 2025-06-24T21:25:39.776541Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2167: [PQ: 72057594037927937] got client PART message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid2' SeqNo: 1 partNo : 2 messageNo: 0 size: 511957 2025-06-24T21:25:39.777057Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2167: [PQ: 72057594037927937] got client PART message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid2' SeqNo: 1 partNo : 3 messageNo: 0 size: 511957 2025-06-24T21:25:39.777160Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2167: [PQ: 72057594037927937] got client PART message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid2' SeqNo: 1 partNo : 4 messageNo: 0 size: 49324 2025-06-24T21:25:39.777207Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid2' SeqNo: 1 partNo : 4 messageNo: 0 size 49324 offset: 0 2025-06-24T21:25:39.777311Z node 1 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72057594037927937 topic 'rt3.dc1--asdfgs--topic' partition 0 error: new GetOwnership request needed for owner 2025-06-24T21:25:39.777493Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1426: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 1, Error new GetOwnership request needed for owner 2025-06-24T21:25:39.777546Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:401: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: error: new GetOwnership request needed for owner 2025-06-24T21:25:39.778010Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:200:2208], now have 1 active actors on pipe 2025-06-24T21:25:39.778108Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-06-24T21:25:39.778169Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-06-24T21:25:39.778273Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5e0a454-daa2628f-acc9adb-3ab408f6_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:39.778389Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:25:39.778495Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:25:39.778826Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:202:2210], ... unt 1 count 0 size 512005 from pos 0 cbcount 1 2025-06-24T21:26:23.265966Z node 26 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 0 size 512005 from pos 0 cbcount 1 2025-06-24T21:26:23.267071Z node 26 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 0 size 512005 from pos 0 cbcount 1 2025-06-24T21:26:23.268148Z node 26 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 0 size 512005 from pos 0 cbcount 1 2025-06-24T21:26:23.269201Z node 26 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 0 size 512005 from pos 0 cbcount 1 2025-06-24T21:26:23.269675Z node 26 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 1 size 172682 from pos 0 cbcount 1 2025-06-24T21:26:23.272181Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 123 2025-06-24T21:26:23.306759Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [26:627:2573], now have 1 active actors on pipe 2025-06-24T21:26:23.306949Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-24T21:26:23.307022Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-24T21:26:23.307100Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: topic partition: 0 SourceId: 'sourceid1' SeqNo: 15 partNo : 0 messageNo: 1 size 102400 offset: 14 2025-06-24T21:26:23.307225Z node 26 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72057594037927937 topic 'topic' partition 0 error: new GetOwnership request needed for owner 2025-06-24T21:26:23.307433Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1426: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 45, Error new GetOwnership request needed for owner 2025-06-24T21:26:23.307484Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:401: Answer error topic: 'topic' partition: 0 messageNo: 1 requestId: error: new GetOwnership request needed for owner 2025-06-24T21:26:23.307551Z node 26 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:26:23.307613Z node 26 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000002_00000_0000000001_00014(+) to d0000000000_00000000000000000002_00000_0000000001_00014(+) 2025-06-24T21:26:23.307652Z node 26 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000003_00000_0000000001_00014(+) to d0000000000_00000000000000000003_00000_0000000001_00014(+) 2025-06-24T21:26:23.310922Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:26:23.311038Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:26:23.320230Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [26:647:2592], now have 1 active actors on pipe 2025-06-24T21:26:23.320394Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-24T21:26:23.320452Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-24T21:26:23.320606Z node 26 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|eefdc78c-8a79baf5-6d25983f-d2c99b3f_14 generated for partition 0 topic 'topic' owner default 2025-06-24T21:26:23.333552Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:26:23.333756Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:26:23.334499Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [26:649:2594], now have 1 active actors on pipe 2025-06-24T21:26:23.334578Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-24T21:26:23.334610Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-24T21:26:23.334662Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: topic partition: 0 SourceId: 'sourceid1' SeqNo: 15 partNo : 0 messageNo: 0 size 102400 offset: 14 2025-06-24T21:26:23.334772Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:1843: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Send write quota request. Topic: "topic". Partition: 0. Amount: 102409. Cookie: 15 2025-06-24T21:26:23.334876Z node 26 :PERSQUEUE DEBUG: partition.cpp:3720: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Got quota. Topic: "topic". Partition: 0: Cookie: 15 2025-06-24T21:26:23.335007Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob processing sourceId 'sourceid1' seqNo 15 partNo 0 2025-06-24T21:26:23.336029Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob complete sourceId 'sourceid1' seqNo 15 partNo 0 FormedBlobsCount 0 NewHead: Offset 14 PartNo 0 PackedSize 102472 count 1 nextOffset 15 batches 1 2025-06-24T21:26:23.336975Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'topic' partition 0 compactOffset 14,1 HeadOffset 14 endOffset 14 curOffset 15 d0000000000_00000000000000000014_00000_0000000001_00000? size 102462 WTime 1199 2025-06-24T21:26:23.337206Z node 26 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:26:23.337294Z node 26 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 14 partNo 0 count 1 size 102462 2025-06-24T21:26:23.341667Z node 26 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 14 count 1 size 102462 actorID [26:137:2161] 2025-06-24T21:26:23.341842Z node 26 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72057594037927937' partition 0 offset 14 partno 0 count 1 parts 0 suffix '63' size 102462 2025-06-24T21:26:23.341965Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 102409 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:26:23.342040Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:26:23.342130Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'sourceid1', Topic: 'topic', Partition: 0, SeqNo: 15, partNo: 0, Offset: 14 is stored on disk 2025-06-24T21:26:23.342406Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:179: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need run compaction for 102462 bytes in 1 blobs 2025-06-24T21:26:23.342507Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] begin compaction for 102462 bytes in 1 blobs 2025-06-24T21:26:23.342619Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:230: [PQ: 72057594037927937, Partition: 0, State: StateIdle] request key d0000000000_00000000000000000014_00000_0000000001_00000?, size 102462 2025-06-24T21:26:23.342697Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:238: [PQ: 72057594037927937, Partition: 0, State: StateIdle] request 1 blobs for compaction 2025-06-24T21:26:23.342777Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:26:23.342882Z node 26 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 14 partno 0 count 1 parts_count 0 source 1 size 102462 accessed 0 times before, last time 1970-01-01T00:00:01.000000Z 2025-06-24T21:26:23.342941Z node 26 :PERSQUEUE DEBUG: read.h:121: Reading cookie 0. All 1 blobs are from cache. 2025-06-24T21:26:23.343037Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:245: [PQ: 72057594037927937, Partition: 0, State: StateIdle] continue compaction 2025-06-24T21:26:23.343285Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob processing sourceId 'sourceid1' seqNo 15 partNo 0 2025-06-24T21:26:23.344148Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob complete sourceId 'sourceid1' seqNo 15 partNo 0 FormedBlobsCount 0 NewHead: Offset 14 PartNo 0 PackedSize 102472 count 1 nextOffset 15 batches 1 2025-06-24T21:26:23.345012Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'topic' partition 0 compactOffset 14,1 HeadOffset 14 endOffset 14 curOffset 15 d0000000000_00000000000000000014_00000_0000000001_00000| size 102462 WTime 1201 2025-06-24T21:26:23.345258Z node 26 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72057594037927937' partition 0 offset 14 partno 0 count 1 parts 0 suffix '63' 2025-06-24T21:26:23.345328Z node 26 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:26:23.345416Z node 26 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 14 partNo 0 count 1 size 102462 2025-06-24T21:26:23.349251Z node 26 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 14 count 1 size 102462 actorID [26:137:2161] 2025-06-24T21:26:23.349381Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:323: [PQ: 72057594037927937, Partition: 0, State: StateIdle] compaction completed 2025-06-24T21:26:23.349805Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:26:23.349908Z node 26 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72057594037927937' partition 0 offset 14 partno 0 count 1 parts 0 suffix '124' size 102462 2025-06-24T21:26:23.349965Z node 26 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:26:23.350006Z node 26 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000014_00000_0000000001_00000?(+) to d0000000000_00000000000000000014_00000_0000000001_00000?(+) 2025-06-24T21:26:23.352273Z node 26 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 14 count 1 actorID [26:137:2161] 2025-06-24T21:26:23.352404Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:26:23.352470Z node 26 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:26:23.352676Z node 26 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72057594037927937' partition 0 offset 14 partno 0 count 1 parts 0 suffix '63' size 102462 2025-06-24T21:26:23.357711Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [26:672:2614], now have 1 active actors on pipe |97.3%| [TA] $(B)/ydb/core/tx/scheme_board/ut_monitoring/test-results/unittest/{meta.json ... results_accumulator.log} |97.3%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_monitoring/test-results/unittest/{meta.json ... results_accumulator.log} >> THealthCheckTest::NoStoragePools [GOOD] >> THealthCheckTest::NoBscResponse >> THealthCheckTest::SpecificServerless [GOOD] >> THealthCheckTest::SpecificServerlessWithExclusiveNodes >> THealthCheckTest::ServerlessBadTablets [GOOD] >> THealthCheckTest::ServerlessWhenTroublesWithSharedNodes >> TTabletPipeTest::TestSendAfterOpen >> TResourceBroker::TestRealUsage >> TPipeTrackerTest::TestAddSameTabletTwice [GOOD] >> TPipeTrackerTest::TestAddTwoTablets [GOOD] >> TTabletPipeTest::TestSendAfterReboot >> TResourceBroker::TestOverusage >> TTabletPipeTest::TestConsumerSidePipeReset >> TFlatMetrics::MaximumValue3 [GOOD] >> TFlatMetrics::MaximumValue4 [GOOD] >> TTabletLabeledCountersAggregator::SimpleAggregation >> THealthCheckTest::GreenStatusWhenCreatingGroup [GOOD] >> THealthCheckTest::DontIgnoreServerlessWithExclusiveNodesWhenNotSpecific >> TTabletLabeledCountersAggregator::SimpleAggregation [GOOD] >> TTabletLabeledCountersAggregator::HeavyAggregation |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::MaximumValue4 [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeTrackerTest::TestAddTwoTablets [GOOD] >> BasicStatistics::TwoTables [GOOD] >> TTabletPipeTest::TestSendAfterOpen [GOOD] >> TResourceBroker::TestRealUsage [GOOD] >> TResourceBroker::TestRandomQueue >> TResourceBroker::TestOverusage [GOOD] >> TResourceBroker::TestNotifyActorDied >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveActor_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test >> TTabletPipeTest::TestSendAfterReboot [GOOD] >> TTabletPipeTest::TestConsumerSidePipeReset [GOOD] >> TTabletPipeTest::TestConnectReject >> TPQTest::TestManyConsumers [GOOD] >> TResourceBroker::TestRandomQueue [GOOD] >> TResourceBroker::TestNotifyActorDied [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_0_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_Test >> THealthCheckTest::ShardsLimit905 [GOOD] >> THealthCheckTest::ShardsLimit800 |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterOpen [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterReboot [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:111:2057] recipient: [1:107:2138] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:111:2057] recipient: [1:107:2138] Leader for TabletID 9437185 is [0:0:0] sender: [1:112:2057] recipient: [1:108:2139] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:112:2057] recipient: [1:108:2139] Leader for TabletID 9437184 is [1:119:2146] sender: [1:120:2057] recipient: [1:107:2138] Leader for TabletID 9437185 is [1:121:2147] sender: [1:122:2057] recipient: [1:108:2139] Leader for TabletID 9437184 is [1:119:2146] sender: [1:157:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:121:2147] sender: [1:159:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:121:2147] sender: [1:162:2057] recipient: [1:104:2137] Leader for TabletID 9437185 is [1:121:2147] sender: [1:164:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:121:2147] sender: [1:166:2057] recipient: [1:165:2175] Leader for TabletID 9437185 is [1:167:2176] sender: [1:168:2057] recipient: [1:165:2175] Leader for TabletID 9437185 is [1:167:2176] sender: [1:196:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:119:2146] sender: [1:199:2057] recipient: [1:103:2136] Leader for TabletID 9437184 is [1:119:2146] sender: [1:202:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:119:2146] sender: [1:203:2057] recipient: [1:201:2199] Leader for TabletID 9437184 is [1:204:2200] sender: [1:205:2057] recipient: [1:201:2199] Leader for TabletID 9437184 is [1:204:2200] sender: [1:234:2057] recipient: [1:14:2061] >> TTabletPipeTest::TestConnectReject [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestRandomQueue [GOOD] Test command err: 2025-06-24T21:26:26.806379Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-10 (10 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806509Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-18 (18 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806554Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-21 (21 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806589Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-22 (22 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806655Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-25 (25 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806725Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-28 (28 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806779Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-30 (30 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806820Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-32 (32 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806855Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-33 (33 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806887Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-34 (34 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806935Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-36 (36 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.806988Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-39 (39 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807038Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-42 (42 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807203Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-53 (53 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807265Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-57 (57 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807299Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-59 (59 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807352Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-61 (61 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807423Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-66 (66 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807519Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-74 (74 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807557Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-76 (76 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807600Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-79 (79 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807639Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-82 (82 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807760Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-93 (93 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807785Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-94 (94 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807814Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-96 (96 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807874Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-99 (99 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.807913Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-101 (101 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808010Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-109 (109 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808044Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-110 (110 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808076Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-111 (111 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808142Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-115 (115 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808171Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-116 (116 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808212Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-119 (119 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808235Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-120 (120 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808264Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-121 (121 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808316Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-124 (124 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808349Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-125 (125 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808505Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-136 (136 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808530Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-137 (137 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808562Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-139 (139 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808585Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-140 (140 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808619Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-141 (141 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808646Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-142 (142 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808691Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-145 (145 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808722Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-147 (147 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808744Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-148 (148 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808841Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-155 (155 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808891Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-158 (158 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.808953Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-162 (162 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809010Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-164 (164 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809063Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-168 (168 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809094Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-170 (170 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809134Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-173 (173 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809231Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-180 (180 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809266Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-182 (182 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809288Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-183 (183 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809323Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-185 (185 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809348Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-186 (186 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809438Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-192 (192 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809515Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-196 (196 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809582Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-202 (202 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809604Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-203 (203 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809668Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-207 (207 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809696Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-208 (208 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809730Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-209 (209 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809887Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-219 (219 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.809996Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task ... ROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-562 (562 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839166Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-641 (641 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839212Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-643 (643 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839252Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-666 (666 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839277Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-696 (696 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839321Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-712 (712 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839340Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-720 (720 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839397Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-762 (762 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839428Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-774 (774 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839507Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-846 (846 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839607Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-933 (933 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839633Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-936 (936 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839662Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-944 (944 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839687Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-975 (975 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839724Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-22 (22 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839761Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-30 (30 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839865Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-42 (42 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839916Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-57 (57 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839932Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-59 (59 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.839987Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-82 (82 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840042Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-93 (93 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840062Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-101 (101 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840131Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-139 (139 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840201Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-185 (185 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840260Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-202 (202 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840288Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-208 (208 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840346Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-219 (219 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840384Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-248 (248 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840422Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-253 (253 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840446Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-266 (266 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840482Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-292 (292 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840521Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-298 (298 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840572Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-338 (338 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840597Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-341 (341 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840633Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-365 (365 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840668Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-374 (374 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840690Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-379 (379 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840713Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-381 (381 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840737Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-392 (392 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840810Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-438 (438 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840855Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-482 (482 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840893Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-518 (518 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840944Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-523 (523 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.840977Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-535 (535 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841017Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-550 (550 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841077Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-599 (599 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841132Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-647 (647 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841170Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-663 (663 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841197Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-670 (670 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841220Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-683 (683 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841258Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-687 (687 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841320Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-764 (764 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841353Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-793 (793 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841387Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-800 (800 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841421Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-823 (823 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841491Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-865 (865 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841516Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-872 (872 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841546Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-888 (888 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841591Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-943 (943 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841617Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-947 (947 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841640Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-950 (950 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841670Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-953 (953 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841691Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-954 (954 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841757Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-958 (958 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841802Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-965 (965 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841856Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-970 (970 by [2:102:2135])' of unknown type 'wrong' to default queue 2025-06-24T21:26:26.841891Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-971 (971 by [2:102:2135])' of unknown type 'wrong' to default queue |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestNotifyActorDied [GOOD] >> THealthCheckTest::StorageLimit80 [GOOD] >> THealthCheckTest::StorageNoQuota |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestConnectReject [GOOD] >> THealthCheckTest::GreenStatusWhenInitPending [GOOD] >> THealthCheckTest::IgnoreOtherGenerations ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoTables [GOOD] Test command err: 2025-06-24T21:24:10.156310Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:10.156634Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:10.156773Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00480c/r3tmp/tmpm4dqj0/pdisk_1.dat 2025-06-24T21:24:10.552733Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13475, node 1 2025-06-24T21:24:10.853621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:10.853671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:10.853700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:10.853934Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:10.856501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:10.984493Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:10.984655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:11.009597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25327 2025-06-24T21:24:11.574276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:14.886645Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:14.925025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:14.925175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:14.985828Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:14.989235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:15.191084Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:15.226434Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.226985Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.227511Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.227631Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.227728Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.227937Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.228000Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.228064Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.228140Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.409041Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:15.409175Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:15.422708Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:15.588465Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:15.646506Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:15.646636Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:15.698102Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:15.699696Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:15.699951Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:15.700016Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:15.700096Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:15.700163Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:15.700227Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:15.700286Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:15.701261Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:15.747461Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:15.747606Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:15.754990Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:24:15.758884Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:24:15.759228Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:24:15.767366Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:24:15.795038Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:15.795104Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:15.795228Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:24:15.812193Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:15.820896Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:15.821071Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:16.026716Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:16.184344Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:16.230073Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:16.776420Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:17.045626Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.045780Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.063728Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:17.316736Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2280:3056], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.316895Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.318379Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2285:3060]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:24:17.318639Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:24:17.318746Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2287:3062] 2025-06-24T21:24:17.318824Z no ... e 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:19.368461Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is data table. 2025-06-24T21:26:19.368520Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-24T21:26:19.369025Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:19.403621Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:19.408828Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6641:4748], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:19.408944Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6651:4753], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:19.409494Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:19.422251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:26:19.490232Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6655:4756], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:26:19.665810Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:6753:4803] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:26:19.721210Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:6782:4818]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:19.721443Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-24T21:26:19.721492Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:6782:4818], StatRequests.size() = 1 2025-06-24T21:26:19.880381Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzRlMzRlOGEtODFjYjYwZTEtY2M1ODRjMGMtZWRmOWFmMGQ=, TxId: 2025-06-24T21:26:19.880458Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzRlMzRlOGEtODFjYjYwZTEtY2M1ODRjMGMtZWRmOWFmMGQ=, TxId: 2025-06-24T21:26:19.883533Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:19.898561Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-24T21:26:19.898650Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:20.392165Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:6810:4834]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:20.392406Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-24T21:26:20.392440Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:6810:4834], StatRequests.size() = 1 2025-06-24T21:26:21.690300Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:6847:4852]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:21.690672Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-24T21:26:21.690722Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:6847:4852], StatRequests.size() = 1 2025-06-24T21:26:22.329637Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:26:22.329867Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:22.329900Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:22.329936Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-24T21:26:22.329964Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:22.330200Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:22.332567Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:22.343180Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZDY3MzU3NzItM2MxNjA0ZmMtZDkyNTM0M2QtNzY2Y2ZmMDA=, TxId: 2025-06-24T21:26:22.343239Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZDY3MzU3NzItM2MxNjA0ZmMtZDkyNTM0M2QtNzY2Y2ZmMDA=, TxId: 2025-06-24T21:26:22.343636Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:22.357825Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:22.357891Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:22.938923Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:6909:4888]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:22.939269Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-24T21:26:22.939321Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:6909:4888], StatRequests.size() = 1 2025-06-24T21:26:24.281282Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:6950:4909]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:24.281599Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-24T21:26:24.281648Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:6950:4909], StatRequests.size() = 1 2025-06-24T21:26:24.945432Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:26:24.945661Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:26:24.945962Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:24.946006Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:24.946050Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-24T21:26:24.946086Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:26:24.946439Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:24.949494Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:26:24.949676Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:24.960764Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTM5MzgxOTUtOGZlZjU3MTctMWQ0YTU2ZjMtMmEyYTc3MTI=, TxId: 2025-06-24T21:26:24.960853Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTM5MzgxOTUtOGZlZjU3MTctMWQ0YTU2ZjMtMmEyYTc3MTI=, TxId: 2025-06-24T21:26:24.961442Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:24.975879Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:26:24.975941Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:25.567256Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:7007:4940]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:25.567536Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-24T21:26:25.567581Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:7007:4940], StatRequests.size() = 1 2025-06-24T21:26:25.568314Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 127 ], ReplyToActorId[ [2:7009:4942]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:25.571729Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 127 ] 2025-06-24T21:26:25.571798Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 127, ReplyToActorId = [2:7009:4942], StatRequests.size() = 1 >> THealthCheckTest::Issues100GroupsMerging [GOOD] >> THealthCheckTest::Issues100VCardMerging >> TResourceBrokerInstant::Test >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestManyConsumers [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T21:25:38.931458Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:25:38.935750Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:25:38.936024Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-24T21:25:38.936059Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:25:38.936093Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-24T21:25:38.936146Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:25:38.936212Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:38.936290Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-24T21:25:38.956187Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:182:2195], now have 1 active actors on pipe 2025-06-24T21:25:38.956335Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:25:38.973447Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-24T21:25:38.976604Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-24T21:25:38.976777Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:38.977712Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-24T21:25:38.977829Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:25:38.978413Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:25:38.978852Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-24T21:25:38.981425Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--asdfgs--topic:0:Initializer] Initializing completed. 2025-06-24T21:25:38.981499Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-24T21:25:38.981559Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--asdfgs--topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:25:38.983981Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:25:38.984113Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit request with generation 1 2025-06-24T21:25:38.984166Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit with generation 1 done 2025-06-24T21:25:38.984225Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 reinit request with generation 1 2025-06-24T21:25:38.984268Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 reinit with generation 1 done 2025-06-24T21:25:38.984519Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:38.984559Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:25:38.984700Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:25:38.984979Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:38.988084Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:25:38.988167Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:25:38.988494Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:196:2205], now have 1 active actors on pipe 2025-06-24T21:25:38.991347Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:199:2207], now have 1 active actors on pipe 2025-06-24T21:25:38.991482Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-06-24T21:25:38.991536Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-06-24T21:25:38.992311Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2167: [PQ: 72057594037927937] got client PART message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 1 partNo : 0 messageNo: 0 size: 511957 2025-06-24T21:25:38.992835Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2167: [PQ: 72057594037927937] got client PART message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 1 partNo : 1 messageNo: 0 size: 511957 2025-06-24T21:25:38.993384Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2167: [PQ: 72057594037927937] got client PART message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 1 partNo : 2 messageNo: 0 size: 511957 2025-06-24T21:25:38.993921Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2167: [PQ: 72057594037927937] got client PART message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 1 partNo : 3 messageNo: 0 size: 511957 2025-06-24T21:25:38.994030Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2167: [PQ: 72057594037927937] got client PART message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 1 partNo : 4 messageNo: 0 size: 49324 2025-06-24T21:25:38.994093Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 1 partNo : 4 messageNo: 0 size 49324 offset: 0 2025-06-24T21:25:38.994209Z node 1 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72057594037927937 topic 'rt3.dc1--asdfgs--topic' partition 0 error: new GetOwnership request needed for owner 2025-06-24T21:25:38.994348Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1426: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 1, Error new GetOwnership request needed for owner 2025-06-24T21:25:38.994402Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:401: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: error: new GetOwnership request needed for owner 2025-06-24T21:25:38.994804Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:201:2209], now have 1 active actors on pipe 2025-06-24T21:25:38.994907Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-06-24T21:25:38.994987Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-06-24T21:25:38.995086Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d324209a-fb0235ac-fa4fc0ec-ca216b95_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:38.995241Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:25:38.995345Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:25:38.995629Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [1:203:2211] ... UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.474217Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.507063Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1004:2998], now have 1 active actors on pipe 2025-06-24T21:26:25.508574Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.520062Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.553855Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1007:3001], now have 1 active actors on pipe 2025-06-24T21:26:25.555772Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.572031Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.609674Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1010:3004], now have 1 active actors on pipe 2025-06-24T21:26:25.611576Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.629476Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.695924Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1013:3007], now have 1 active actors on pipe 2025-06-24T21:26:25.697923Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.715091Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.752547Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1016:3010], now have 1 active actors on pipe 2025-06-24T21:26:25.754388Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.771177Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.807027Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1019:3013], now have 1 active actors on pipe 2025-06-24T21:26:25.808898Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.825656Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.863274Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1022:3016], now have 1 active actors on pipe 2025-06-24T21:26:25.865098Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:25.882255Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.011960Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1025:3019], now have 1 active actors on pipe 2025-06-24T21:26:26.013387Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.024967Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.049801Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1028:3022], now have 1 active actors on pipe 2025-06-24T21:26:26.051355Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.061888Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.086474Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1031:3025], now have 1 active actors on pipe 2025-06-24T21:26:26.088138Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.099827Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.124313Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1034:3028], now have 1 active actors on pipe 2025-06-24T21:26:26.125903Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.138612Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.168540Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037927937] server connected, pipe [29:1037:3031], now have 1 active actors on pipe 2025-06-24T21:26:26.170107Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.198873Z node 29 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:26.225777Z node 29 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [29:1040:3034] connected; active server actors: 1 >> TResourceBrokerInstant::Test [GOOD] >> TResourceBrokerInstant::TestErrors >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet [GOOD] >> TTabletResolver::TabletResolvePriority [GOOD] >> TPipeTrackerTest::TestSimpleAdd [GOOD] >> TResourceBroker::TestAutoTaskId >> TResourceBrokerInstant::TestErrors [GOOD] >> TResourceBroker::TestQueueWithConfigure >> TTabletPipeTest::TestKillClientBeforServerIdKnown |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet [GOOD] >> TResourceBroker::TestErrors >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamed |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletResolver::TabletResolvePriority [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBrokerInstant::TestErrors [GOOD] Test command err: 2025-06-24T21:26:28.725368Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:1080: FinishTaskInstant failed for task 2: cannot finish unknown task >> TResourceBroker::TestAutoTaskId [GOOD] >> TResourceBroker::TestQueueWithConfigure [GOOD] >> TResourceBroker::TestOverusageDifferentResources >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor >> TTabletPipeTest::TestKillClientBeforServerIdKnown [GOOD] >> TTabletPipeTest::TestInterconnectSession >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamed [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestAutoTaskId [GOOD] >> TResourceBroker::TestErrors [GOOD] >> TResourceBroker::TestExecutionStat >> BasicStatistics::SimpleGlobalIndex [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck [GOOD] >> TResourceBroker::TestOverusageDifferentResources [GOOD] >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor [GOOD] >> THealthCheckTest::ServerlessWhenTroublesWithSharedNodes [GOOD] >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes >> TResourceBroker::TestExecutionStat [GOOD] >> TTabletPipeTest::TestInterconnectSession [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestOverusageDifferentResources [GOOD] Test command err: 2025-06-24T21:26:29.330536Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'compaction1\' uses unknown queue \'queue_default1\'" 2025-06-24T21:26:29.330769Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'unknown\' is required" 2025-06-24T21:26:29.330915Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'unknown\' uses unknown queue \'queue_default\'" >> BasicStatistics::Simple [GOOD] >> TTabletPipeTest::TestTwoNodesAndRebootOfProducer >> TPQTest::TestOwnership [GOOD] >> TPQTest::TestPQCacheSizeManagement [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestExecutionStat [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestInterconnectSession [GOOD] Test command err: 2025-06-24T21:26:29.377583Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:315: [9437185] Detach 2025-06-24T21:26:29.393042Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:338: [9437185] Activate 2025-06-24T21:26:29.401014Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:338: [9437185] Activate 2025-06-24T21:26:29.404021Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:131:2155] 2025-06-24T21:26:29.404085Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:131:2155] 2025-06-24T21:26:29.404404Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:131:2155] 2025-06-24T21:26:29.404461Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:131:2155] 2025-06-24T21:26:29.404532Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:131:2155] 2025-06-24T21:26:29.404563Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:131:2155] 2025-06-24T21:26:29.404644Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:131:2155] 2025-06-24T21:26:29.404793Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:131:2155] Type# 269877249 Reason# ActorUnknown 2025-06-24T21:26:29.404956Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:134:2157] 2025-06-24T21:26:29.404985Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:134:2157] 2025-06-24T21:26:29.405033Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:134:2157] 2025-06-24T21:26:29.405071Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:134:2157] 2025-06-24T21:26:29.405138Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:134:2157] 2025-06-24T21:26:29.405162Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:134:2157] 2025-06-24T21:26:29.405210Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:134:2157] 2025-06-24T21:26:29.405326Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:134:2157] Type# 269877249 Reason# ActorUnknown 2025-06-24T21:26:29.405410Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:136:2159] 2025-06-24T21:26:29.405443Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:136:2159] 2025-06-24T21:26:29.405489Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:136:2159] 2025-06-24T21:26:29.405536Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:136:2159] 2025-06-24T21:26:29.405583Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:136:2159] 2025-06-24T21:26:29.405606Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:136:2159] 2025-06-24T21:26:29.405660Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:136:2159] 2025-06-24T21:26:29.405756Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:136:2159] Type# 269877249 Reason# ActorUnknown |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor [GOOD] >> THealthCheckTest::SpecificServerlessWithExclusiveNodes [GOOD] >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes >> TTabletPipeTest::TestTwoNodes >> TPipeTrackerTest::TestShareTablet [GOOD] >> TPipeTrackerTest::TestIdempotentAttachDetach [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestPQCacheSizeManagement [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T21:25:47.377623Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:47.377731Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:182:2057] recipient: [1:14:2061] 2025-06-24T21:25:47.400321Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:47.423988Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "aaa" Generation: 1 Important: true } 2025-06-24T21:25:47.425107Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-24T21:25:47.427868Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:188:2199] 2025-06-24T21:25:47.431299Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:189:2200] 2025-06-24T21:25:47.433265Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:189:2200] 2025-06-24T21:25:47.446076Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e4831450-920d367f-310a047a-3cc291db_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 1 } Cookie: 123 } via pipe: [1:180:2193] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:111:2057] recipient: [2:104:2136] 2025-06-24T21:25:47.955549Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:47.955644Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927938 is [2:156:2175] sender: [2:157:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:180:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:110:2140]) on event NKikimr::TEvPersQueue::TEvUpdateConfigBuilder ! Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:182:2057] recipient: [2:102:2135] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:185:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:186:2057] recipient: [2:184:2194] Leader for TabletID 72057594037927937 is [2:187:2195] sender: [2:188:2057] recipient: [2:184:2194] 2025-06-24T21:25:47.997025Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:47.997110Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [2:110:2140]) rebooted! !Reboot 72057594037927937 (actor [2:110:2140]) tablet resolver refreshed! new actor is[2:187:2195] Leader for TabletID 72057594037927937 is [2:187:2195] sender: [2:267:2057] recipient: [2:14:2061] 2025-06-24T21:25:49.576629Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:49.577732Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } Consumers { Name: "aaa" Generation: 2 Important: true } 2025-06-24T21:25:49.578701Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:273:2257] 2025-06-24T21:25:49.581370Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:273:2257] 2025-06-24T21:25:49.584563Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:274:2258] 2025-06-24T21:25:49.586486Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:274:2258] 2025-06-24T21:25:49.600559Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bcecf957-9bca8ba0-b744d879-7ff75b21_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 1 } Cookie: 123 } via pipe: [2:178:2191] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:111:2057] recipient: [3:104:2136] 2025-06-24T21:25:49.980168Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:49.980237Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927938 is [3:156:2175] sender: [3:157:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:182:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:110:2140]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:184:2057] recipient: [3:102:2135] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:186:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:188:2057] recipient: [3:187:2196] Leader for TabletID 72057594037927937 is [3:189:2197] sender: [3:190:2057] recipient: [3:187:2196] 2025-06-24T21:25:50.019932Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:50.020010Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [3:110:2140]) rebooted! !Reboot 72057594037927937 (actor [3:110:2140]) tablet resolver refreshed! new actor is[3:189:2197] Leader for TabletID 72057594037927937 is [3:189:2197] sender: [3:269:2057] recipient: [3:14:2061] 2025-06-24T21:25:51.584654Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:51.585597Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 3 actor [3:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } Consumers { Name: "aaa" Generation: 3 Important: true } 2025-06-24T21:25:51.586427Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:275:2259] 2025-06-24T21:25:51.588848Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [3:275:2259] 2025-06-24T21:25:51.591632Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [3:276:2260] 2025-06-24T21:25:51.593545Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [3:276:2260] 2025-06-24T21:25:51.606468Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6522c281-aa5f6d47-2475569c-634ccb38_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 1 } Cookie: 123 } via pipe: [3:180:2193] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:106:2057] recipient: [4:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:106:2057] recipient: [4:104:2136] Leader for TabletID 72057594037927937 is [4:110:2140] sender: [4:111:2057] recipient: ... info.cpp:30: new Cookie default|565ba82-da75b0a6-d57687de-1e84836_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:28.670894Z node 52 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|968aebec-a7a38733-28ded9e1-750458a2_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:106:2057] recipient: [53:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:106:2057] recipient: [53:104:2136] Leader for TabletID 72057594037927937 is [53:110:2140] sender: [53:111:2057] recipient: [53:104:2136] 2025-06-24T21:26:29.049551Z node 53 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:29.049638Z node 53 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [53:152:2057] recipient: [53:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [53:152:2057] recipient: [53:150:2171] Leader for TabletID 72057594037927938 is [53:156:2175] sender: [53:157:2057] recipient: [53:150:2171] Leader for TabletID 72057594037927937 is [53:110:2140] sender: [53:182:2057] recipient: [53:14:2061] 2025-06-24T21:26:29.072879Z node 53 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:29.073736Z node 53 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 53 actor [53:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 10 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 53 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 53 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 53 Important: false } 2025-06-24T21:26:29.074457Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [53:188:2199] 2025-06-24T21:26:29.077282Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [53:188:2199] 2025-06-24T21:26:29.079390Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [53:189:2200] 2025-06-24T21:26:29.081013Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [53:189:2200] 2025-06-24T21:26:29.088617Z node 53 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|96f90fd8-eea2513a-75804333-160fc6d2_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default !Reboot 72057594037927937 (actor [53:110:2140]) on event NKikimr::TEvPersQueue::TEvRequest ! Leader for TabletID 72057594037927937 is [53:110:2140] sender: [53:209:2057] recipient: [53:102:2135] Leader for TabletID 72057594037927937 is [53:110:2140] sender: [53:212:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [53:110:2140] sender: [53:213:2057] recipient: [53:211:2215] Leader for TabletID 72057594037927937 is [53:214:2216] sender: [53:215:2057] recipient: [53:211:2215] 2025-06-24T21:26:29.123924Z node 53 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:29.124013Z node 53 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:26:29.124871Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [53:267:2261] 2025-06-24T21:26:29.127439Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [53:268:2262] 2025-06-24T21:26:29.134755Z node 53 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:29.134827Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [53:267:2261] 2025-06-24T21:26:29.135251Z node 53 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:29.135288Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [53:268:2262] !Reboot 72057594037927937 (actor [53:110:2140]) rebooted! !Reboot 72057594037927937 (actor [53:110:2140]) tablet resolver refreshed! new actor is[53:214:2216] SCHEDULER LIMIT REACHED Leader for TabletID 72057594037927937 is [53:214:2216] sender: [53:300:2057] recipient: [53:14:2061] 2025-06-24T21:26:29.505291Z node 53 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b386edd4-2154f547-aefe0761-bb6f6a38_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:106:2057] recipient: [54:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:106:2057] recipient: [54:104:2136] Leader for TabletID 72057594037927937 is [54:110:2140] sender: [54:111:2057] recipient: [54:104:2136] 2025-06-24T21:26:29.889895Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:29.889987Z node 54 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [54:152:2057] recipient: [54:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [54:152:2057] recipient: [54:150:2171] Leader for TabletID 72057594037927938 is [54:156:2175] sender: [54:157:2057] recipient: [54:150:2171] Leader for TabletID 72057594037927937 is [54:110:2140] sender: [54:182:2057] recipient: [54:14:2061] 2025-06-24T21:26:29.911779Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:29.912686Z node 54 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 54 actor [54:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 10 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 54 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 54 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 54 Important: false } 2025-06-24T21:26:29.913434Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:188:2199] 2025-06-24T21:26:29.915643Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [54:188:2199] 2025-06-24T21:26:29.917227Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:189:2200] 2025-06-24T21:26:29.918822Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [54:189:2200] 2025-06-24T21:26:29.925269Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a02c2422-931e4ef1-32962478-a398efb8_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:29.925771Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e0349c95-9127d38e-1e6a8973-5d4315be_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:106:2057] recipient: [55:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:106:2057] recipient: [55:104:2136] Leader for TabletID 72057594037927937 is [55:110:2140] sender: [55:111:2057] recipient: [55:104:2136] 2025-06-24T21:26:30.320334Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:30.320426Z node 55 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [55:152:2057] recipient: [55:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [55:152:2057] recipient: [55:150:2171] Leader for TabletID 72057594037927938 is [55:156:2175] sender: [55:157:2057] recipient: [55:150:2171] Leader for TabletID 72057594037927937 is [55:110:2140] sender: [55:180:2057] recipient: [55:14:2061] 2025-06-24T21:26:30.342185Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:30.343296Z node 55 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 55 actor [55:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 10 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 55 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 55 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 55 Important: false } 2025-06-24T21:26:30.344270Z node 55 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [55:186:2197] 2025-06-24T21:26:30.346854Z node 55 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [55:186:2197] 2025-06-24T21:26:30.349180Z node 55 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [55:187:2198] 2025-06-24T21:26:30.351737Z node 55 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [55:187:2198] 2025-06-24T21:26:30.366866Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3de0adf7-597cc94f-4b17bf1e-8c921b42_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:30.367573Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3bf2094c-bbfd0055-599c16b6-b39b9290_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::SimpleGlobalIndex [GOOD] Test command err: 2025-06-24T21:24:06.386529Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:06.386924Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:06.387114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004831/r3tmp/tmpn2BUqC/pdisk_1.dat 2025-06-24T21:24:06.838916Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61307, node 1 2025-06-24T21:24:07.141973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:07.142043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:07.142087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:07.142409Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:07.156421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:07.275656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:07.275858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:07.290209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63048 2025-06-24T21:24:07.882715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:11.445588Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:11.497935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:11.498061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:11.563787Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:11.568685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:11.814870Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:11.849958Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.850636Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.851237Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.851389Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.851483Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.851720Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.851813Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.851900Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.851981Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:12.024804Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:12.024927Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:12.038453Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:12.192839Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:12.241318Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:12.241455Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:12.267048Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:12.268420Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:12.268623Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:12.268691Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:12.268759Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:12.268823Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:12.268889Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:12.268942Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:12.270895Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:12.315097Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:12.315264Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:12.322832Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2568] 2025-06-24T21:24:12.336141Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1852:2588] 2025-06-24T21:24:12.336965Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1852:2588], schemeshard id = 72075186224037897 2025-06-24T21:24:12.342704Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:24:12.364714Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:12.364786Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:12.364871Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:24:12.376745Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:12.391908Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:12.392073Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:12.621067Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:12.868212Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:12.931696Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:13.484031Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:13.726557Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2142:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.726810Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.749871Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:14.164563Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2353:3065], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:14.164765Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:14.166384Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2358:3069]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:24:14.166658Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:24:14.166759Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2360:3071] 2025-06-24T21:24:14.166819Z no ... 736]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:21.928050Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-24T21:26:21.928090Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:6685:4736], StatRequests.size() = 1 2025-06-24T21:26:22.650304Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:22.650403Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:22.650460Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-24T21:26:22.650513Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:22.650894Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:22.667884Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:22.672708Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6708:4755], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:22.672843Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6718:4760], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:22.673106Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:22.688854Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:26:22.760040Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6722:4763], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:26:22.969507Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:6820:4811] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:26:23.018602Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:6849:4826]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:23.018846Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-24T21:26:23.018896Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:6849:4826], StatRequests.size() = 1 2025-06-24T21:26:23.157699Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDQ4NDVhZWEtNjlhNWRiODYtN2VmNmYwZTMtZjY1YjMwZGE=, TxId: 2025-06-24T21:26:23.157775Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDQ4NDVhZWEtNjlhNWRiODYtN2VmNmYwZTMtZjY1YjMwZGE=, TxId: 2025-06-24T21:26:23.158264Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:23.172205Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:23.172275Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:23.632875Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:6877:4842]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:23.633204Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-24T21:26:23.633409Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:6877:4842], StatRequests.size() = 1 2025-06-24T21:26:24.875579Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:6912:4860]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:24.875852Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-24T21:26:24.875893Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:6912:4860], StatRequests.size() = 1 2025-06-24T21:26:25.549179Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:26:25.560153Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:25.560200Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:25.560228Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 6] is data table. 2025-06-24T21:26:25.560269Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 6] 2025-06-24T21:26:25.560480Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:25.563229Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:25.573589Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDA5NTY1NmUtNDE1YjJlZi1mOGQzNjJhNS0zYmEwY2FhYg==, TxId: 2025-06-24T21:26:25.573644Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDA5NTY1NmUtNDE1YjJlZi1mOGQzNjJhNS0zYmEwY2FhYg==, TxId: 2025-06-24T21:26:25.574147Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:25.588228Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 6] 2025-06-24T21:26:25.588286Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:26.167831Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:6976:4896]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:26.168106Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-24T21:26:26.168143Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:6976:4896], StatRequests.size() = 1 2025-06-24T21:26:27.536145Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:7015:4916]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:27.536468Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-24T21:26:27.536508Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:7015:4916], StatRequests.size() = 1 2025-06-24T21:26:28.241580Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:26:28.241712Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:26:28.242062Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:26:28.253010Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:28.253060Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:28.253089Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-24T21:26:28.253113Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:26:28.253380Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:28.255625Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:28.263776Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OTEzMTM1NmQtOWQxNzFlYmQtYjhmMWZlOTUtOTc5MzM4YWI=, TxId: 2025-06-24T21:26:28.263828Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OTEzMTM1NmQtOWQxNzFlYmQtYjhmMWZlOTUtOTc5MzM4YWI=, TxId: 2025-06-24T21:26:28.264204Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:28.278142Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:26:28.278197Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:28.869639Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:7076:4949]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:28.869902Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-24T21:26:28.869928Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:7076:4949], StatRequests.size() = 1 >> TTabletCountersPercentile::SingleBucket [GOOD] >> TTabletCountersPercentile::StartFromZero [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeTrackerTest::TestIdempotentAttachDetach [GOOD] >> TTabletPipeTest::TestTwoNodesAndRebootOfProducer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::Simple [GOOD] Test command err: 2025-06-24T21:24:08.116169Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:08.116560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:08.116728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004820/r3tmp/tmpu0fAD6/pdisk_1.dat 2025-06-24T21:24:08.587545Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17117, node 1 2025-06-24T21:24:08.869723Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:08.869786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:08.869824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:08.870090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:08.873844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:09.007766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.007954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.022027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6292 2025-06-24T21:24:09.642639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:12.993844Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:13.037096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:13.037244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:13.103201Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:13.107386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:13.334859Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:13.370259Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.370907Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.371473Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.371646Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.371887Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.371975Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.372061Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.372139Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.372215Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.572751Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:13.572870Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:13.593910Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:13.727951Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:13.776655Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:13.776779Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:13.806030Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:13.807289Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:13.807497Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:13.807550Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:13.807597Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:13.807637Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:13.807679Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:13.807732Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:13.808606Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:13.842168Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:13.842284Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:13.848833Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:24:13.851934Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:24:13.852232Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:24:13.859819Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:24:13.895255Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:13.895343Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:13.895441Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:24:13.913851Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.928674Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:13.928858Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:14.108166Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:14.280668Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:14.341196Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:14.848261Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:15.114596Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:15.114740Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:15.134690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:15.380351Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2280:3056], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:15.380502Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:15.381778Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2285:3060]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:24:15.381963Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:24:15.382058Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2287:3062] 2025-06-24T21:24:15.382123Z nod ... e 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:6408:4621], StatRequests.size() = 1 2025-06-24T21:26:21.615646Z node 2 :SYSTEM_VIEWS WARN: tx_top_partitions.cpp:151: [72075186224037891] TEvSendTopPartitions, time mismath: , partition interval end# 1970-01-01T00:02:01.000000Z, event time# 1970-01-01T00:02:01.081252Z 2025-06-24T21:26:21.718447Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:26:21.718797Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:26:21.719241Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:26:21.774330Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-24T21:26:21.774442Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 183.000000s, at schemeshard: 72075186224037897 2025-06-24T21:26:21.774749Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 49 2025-06-24T21:26:21.788467Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:26:22.398626Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:6441:4637]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:22.398920Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-24T21:26:22.398967Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:6441:4637], StatRequests.size() = 1 2025-06-24T21:26:23.105021Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:23.105099Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:23.105146Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-24T21:26:23.105192Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:23.105564Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:23.119742Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:23.124308Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6464:4656], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:23.124451Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6474:4661], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:23.124602Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:23.138343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:26:23.193879Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6478:4664], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:26:23.348971Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:6573:4712] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:26:23.393142Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:6602:4727]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:23.393478Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-24T21:26:23.393534Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:6602:4727], StatRequests.size() = 1 2025-06-24T21:26:23.519025Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OGFmZDBiYzYtNDA5M2VkZWEtM2RhOWQ3ZTYtODQ0N2ZjM2Q=, TxId: 2025-06-24T21:26:23.519094Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OGFmZDBiYzYtNDA5M2VkZWEtM2RhOWQ3ZTYtODQ0N2ZjM2Q=, TxId: 2025-06-24T21:26:23.519575Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:23.533607Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:23.533667Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:24.038411Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:6633:4743]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:24.038631Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-24T21:26:24.038662Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:6633:4743], StatRequests.size() = 1 2025-06-24T21:26:25.299376Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:6668:4761]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:25.299639Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-24T21:26:25.299674Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:6668:4761], StatRequests.size() = 1 2025-06-24T21:26:25.994556Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:26:26.005387Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:26.005478Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:26.005519Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-24T21:26:26.005554Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:26:26.005839Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:26.008499Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:26.017484Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-06-24T21:26:24.000000Z 2025-06-24T21:26:26.018762Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODE5NTY1NDItOTVhMjgzZWQtMzIzZTc1YjMtZGZlNmQ5YTU=, TxId: 2025-06-24T21:26:26.018811Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODE5NTY1NDItOTVhMjgzZWQtMzIzZTc1YjMtZGZlNmQ5YTU=, TxId: 2025-06-24T21:26:26.019242Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:26.032953Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:26:26.033015Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:26.621220Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:6732:4797]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:26.621522Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-24T21:26:26.621569Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:6732:4797], StatRequests.size() = 1 2025-06-24T21:26:27.977497Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:6773:4818]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:27.977851Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-24T21:26:27.977901Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:6773:4818], StatRequests.size() = 1 2025-06-24T21:26:28.665112Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:26:28.665531Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:26:28.665812Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:26:28.676754Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:28.676830Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:29.254405Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:6806:4834]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:29.254621Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-24T21:26:29.254669Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:6806:4834], StatRequests.size() = 1 >> TTabletPipeTest::TestTwoNodes [GOOD] >> THealthCheckTest::DontIgnoreServerlessWithExclusiveNodesWhenNotSpecific [GOOD] >> THealthCheckTest::CLusterNotBootstrapped |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersPercentile::StartFromZero [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodesAndRebootOfProducer [GOOD] >> TFlatMetrics::TimeSeriesKV2 [GOOD] >> TPipeCacheTest::TestAutoConnect >> TResourceBrokerConfig::UpdateQueues [GOOD] >> TResourceBrokerConfig::DefaultConfig [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodes [GOOD] >> TTabletCountersPercentile::WithoutZero [GOOD] >> TTabletLabeledCountersAggregator::DbAggregation >> TTabletLabeledCountersAggregator::HeavyAggregation [GOOD] >> BootstrapperTest::LoneBootstrapper >> TFlatMetrics::TimeSeriesAvg4 [GOOD] >> TFlatMetrics::TimeSeriesKV [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBrokerConfig::DefaultConfig [GOOD] Test command err: Queues { Name: "queue_default" Weight: 30 Limit { Cpu: 2 } } Queues { Name: "queue_compaction_gen0" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_compaction_gen1" Weight: 100 Limit { Cpu: 6 } } Queues { Name: "queue_compaction_gen2" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_gen3" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_borrowed" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_cs_indexation" Weight: 100 Limit { Cpu: 3 Memory: 1073741824 } } Queues { Name: "queue_cs_ttl" Weight: 100 Limit { Cpu: 3 Memory: 1073741824 } } Queues { Name: "queue_cs_general" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_scan_read" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_normalizer" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 10737418240 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } ResourceLimit { Cpu: 256 Memory: 17179869184 } Total queues cpu: 89 >> TTabletLabeledCountersAggregator::DbAggregation [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::TimeSeriesKV [GOOD] >> TTabletLabeledCountersAggregator::Version3Aggregation >> BootstrapperTest::LoneBootstrapper [GOOD] >> BootstrapperTest::MultipleBootstrappers >> TResourceBrokerConfig::UpdateTasks [GOOD] >> TResourceBrokerConfig::UpdateResourceLimit [GOOD] >> TTabletPipeTest::TestSendWithoutWaitOpen |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletLabeledCountersAggregator::DbAggregation [GOOD] >> TTabletLabeledCountersAggregator::Version3Aggregation [GOOD] >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer >> TTabletResolver::NodeProblem |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBrokerConfig::UpdateResourceLimit [GOOD] >> TTabletPipeTest::TestSendWithoutWaitOpen [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletLabeledCountersAggregator::HeavyAggregation [GOOD] Test command err: 2025-06-24T21:26:26.544875Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1983: aggregator new request V2 [2:8:2055] 2025-06-24T21:26:26.545137Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:9:2056] worker 0 2025-06-24T21:26:26.545179Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:10:2057] worker 1 2025-06-24T21:26:26.545197Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:11:2058] worker 2 2025-06-24T21:26:26.545213Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:12:2059] worker 3 2025-06-24T21:26:26.545227Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:13:2060] worker 4 2025-06-24T21:26:26.545265Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:14:2061] worker 5 2025-06-24T21:26:26.545286Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:15:2062] worker 6 2025-06-24T21:26:26.545313Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:16:2063] worker 7 2025-06-24T21:26:26.545339Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:17:2064] worker 8 2025-06-24T21:26:26.545359Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:18:2065] worker 9 Sending message to [2:10:2057] from [2:8:2055] id 1 Sending message to [2:11:2058] from [2:8:2055] id 2 Sending message to [2:12:2059] from [2:8:2055] id 3 Sending message to [2:13:2060] from [2:8:2055] id 4 Sending message to [2:14:2061] from [2:8:2055] id 5 Sending message to [2:15:2062] from [2:8:2055] id 6 Sending message to [2:16:2063] from [2:8:2055] id 7 Sending message to [2:17:2064] from [2:8:2055] id 8 Sending message to [2:18:2065] from [2:8:2055] id 9 Sending message to [2:9:2056] from [2:8:2055] id 10 2025-06-24T21:26:27.169406Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 4 [2:13:2060] 2025-06-24T21:26:27.169492Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 5 [2:14:2061] 2025-06-24T21:26:27.169524Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 6 [2:15:2062] 2025-06-24T21:26:27.169554Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 7 [2:16:2063] 2025-06-24T21:26:27.169588Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 8 [2:17:2064] 2025-06-24T21:26:27.169621Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 9 [2:18:2065] 2025-06-24T21:26:27.169838Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 10 [2:9:2056] 2025-06-24T21:26:27.169870Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 1 [2:10:2057] 2025-06-24T21:26:27.169896Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 2 [2:11:2058] 2025-06-24T21:26:27.169945Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 3 [2:12:2059] 2025-06-24T21:26:27.169980Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 10 [2:9:2056] 2025-06-24T21:26:27.171091Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 10 [2:9:2056] 2025-06-24T21:26:27.199356Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:9:2056] Initiator [2:8:2055] 2025-06-24T21:26:27.215815Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [2:10:2057] 2025-06-24T21:26:27.217113Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [2:10:2057] 2025-06-24T21:26:27.244527Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:10:2057] Initiator [2:8:2055] 2025-06-24T21:26:27.260556Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [2:11:2058] 2025-06-24T21:26:27.261549Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [2:11:2058] 2025-06-24T21:26:27.289092Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:11:2058] Initiator [2:8:2055] 2025-06-24T21:26:27.309455Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [2:12:2059] 2025-06-24T21:26:27.311236Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [2:12:2059] 2025-06-24T21:26:27.339619Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:12:2059] Initiator [2:8:2055] 2025-06-24T21:26:27.355405Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [2:13:2060] 2025-06-24T21:26:27.356799Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [2:13:2060] 2025-06-24T21:26:27.385604Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:13:2060] Initiator [2:8:2055] 2025-06-24T21:26:27.403418Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [2:14:2061] 2025-06-24T21:26:27.404526Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [2:14:2061] 2025-06-24T21:26:27.431497Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:14:2061] Initiator [2:8:2055] 2025-06-24T21:26:27.451815Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [2:15:2062] 2025-06-24T21:26:27.453367Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [2:15:2062] 2025-06-24T21:26:27.482892Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:15:2062] Initiator [2:8:2055] 2025-06-24T21:26:27.499434Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [2:16:2063] 2025-06-24T21:26:27.500864Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [2:16:2063] 2025-06-24T21:26:27.527844Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:16:2063] Initiator [2:8:2055] 2025-06-24T21:26:27.543652Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [2:17:2064] 2025-06-24T21:26:27.544956Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [2:17:2064] 2025-06-24T21:26:27.571725Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:17:2064] Initiator [2:8:2055] 2025-06-24T21:26:27.587768Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [2:18:2065] 2025-06-24T21:26:27.589063Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [2:18:2065] 2025-06-24T21:26:27.614952Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:18:2065] Initiator [2:8:2055] 2025-06-24T21:26:27.629872Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 0 [2:8:2055] 2025-06-24T21:26:27.629992Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 0 [2:8:2055] 2025-06-24T21:26:27.633611Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [2:8:2055] 2025-06-24T21:26:27.633778Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [2:8:2055] 2025-06-24T21:26:27.637803Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [2:8:2055] 2025-06-24T21:26:27.637928Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [2:8:2055] 2025-06-24T21:26:27.641698Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [2:8:2055] 2025-06-24T21:26:27.641824Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [2:8:2055] 2025-06-24T21:26:27.646732Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [2:8:2055] 2025-06-24T21:26:27.646843Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [2:8:2055] 2025-06-24T21:26:27.650674Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [2:8:2055] 2025-06-24T21:26:27.650762Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [2:8:2055] 2025-06-24T21:26:27.654440Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [2:8:2055] 2025-06-24T21:26:27.654526Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [2:8:2055] 2025-06-24T21:26:27.660169Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [2:8:2055] 2025-06-24T21:26:27.660291Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [2:8:2055] 2025-06-24T21:26:27.663797Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [2:8:2055] 2025-06-24T21:26:27.663897Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [2:8:2055] 2025-06-24T21:26:27.668201Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [2:8:2055] 2025-06-24T21:26:27.668302Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [2:8:2055] 2025-06-24T21:26:27.672249Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:8:2055] Initiator [2:7:2054] TEST 2 10 duration 1.239219s 2025-06-24T21:26:27.815234Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1983: aggregator new request V2 [3:8:2055] 2025-06-24T21:26:27.815754Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [3:8:2055] self [3:9:2056] worker 0 2025-06-24T21:26:27.815822Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [3:8:2055] self [3:10:2057] worker 1 2025-06-24T21:26:27.815862Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_ag ... ctor got response node 3 [3:8:2055] 2025-06-24T21:26:28.890858Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [3:8:2055] 2025-06-24T21:26:28.894465Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [3:8:2055] 2025-06-24T21:26:28.894567Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [3:8:2055] 2025-06-24T21:26:28.898447Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [3:8:2055] 2025-06-24T21:26:28.898537Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [3:8:2055] 2025-06-24T21:26:28.902171Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [3:8:2055] Initiator [3:7:2054] TEST 2 20 duration 1.197717s 2025-06-24T21:26:29.094072Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1983: aggregator new request V2 [4:8:2055] 2025-06-24T21:26:29.094231Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [4:8:2055] self [4:9:2056] worker 0 Sending message to [4:9:2056] from [4:8:2055] id 1 Sending message to [4:9:2056] from [4:8:2055] id 2 Sending message to [4:9:2056] from [4:8:2055] id 3 Sending message to [4:9:2056] from [4:8:2055] id 4 Sending message to [4:9:2056] from [4:8:2055] id 5 Sending message to [4:9:2056] from [4:8:2055] id 6 Sending message to [4:9:2056] from [4:8:2055] id 7 Sending message to [4:9:2056] from [4:8:2055] id 8 Sending message to [4:9:2056] from [4:8:2055] id 9 Sending message to [4:9:2056] from [4:8:2055] id 10 2025-06-24T21:26:29.840563Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 1 [4:9:2056] 2025-06-24T21:26:29.840608Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 2 [4:9:2056] 2025-06-24T21:26:29.840624Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 3 [4:9:2056] 2025-06-24T21:26:29.840638Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 4 [4:9:2056] 2025-06-24T21:26:29.840692Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 5 [4:9:2056] 2025-06-24T21:26:29.840717Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 6 [4:9:2056] 2025-06-24T21:26:29.840739Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 7 [4:9:2056] 2025-06-24T21:26:29.840759Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 8 [4:9:2056] 2025-06-24T21:26:29.840779Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 9 [4:9:2056] 2025-06-24T21:26:29.840805Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 10 [4:9:2056] 2025-06-24T21:26:29.841038Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [4:9:2056] 2025-06-24T21:26:29.842041Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [4:9:2056] 2025-06-24T21:26:29.869756Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [4:9:2056] 2025-06-24T21:26:29.871430Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [4:9:2056] 2025-06-24T21:26:29.901801Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [4:9:2056] 2025-06-24T21:26:29.902785Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [4:9:2056] 2025-06-24T21:26:29.930749Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [4:9:2056] 2025-06-24T21:26:29.931992Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [4:9:2056] 2025-06-24T21:26:29.959470Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [4:9:2056] 2025-06-24T21:26:29.960872Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [4:9:2056] 2025-06-24T21:26:29.993005Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [4:9:2056] 2025-06-24T21:26:29.994398Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [4:9:2056] 2025-06-24T21:26:30.021266Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [4:9:2056] 2025-06-24T21:26:30.022372Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [4:9:2056] 2025-06-24T21:26:30.050737Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [4:9:2056] 2025-06-24T21:26:30.051899Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [4:9:2056] 2025-06-24T21:26:30.078461Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [4:9:2056] 2025-06-24T21:26:30.079967Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [4:9:2056] 2025-06-24T21:26:30.107442Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 10 [4:9:2056] 2025-06-24T21:26:30.108724Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 10 [4:9:2056] 2025-06-24T21:26:30.156310Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [4:9:2056] Initiator [4:8:2055] 2025-06-24T21:26:30.386302Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 0 [4:8:2055] 2025-06-24T21:26:30.387023Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 0 [4:8:2055] 2025-06-24T21:26:30.442925Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [4:8:2055] Initiator [4:7:2054] TEST 2 1 duration 1.527073s 2025-06-24T21:26:30.777022Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [5:7:2054] self [5:8:2055] worker 0 Sending message to [5:8:2055] from [5:8:2055] id 1 Sending message to [5:8:2055] from [5:8:2055] id 2 Sending message to [5:8:2055] from [5:8:2055] id 3 Sending message to [5:8:2055] from [5:8:2055] id 4 Sending message to [5:8:2055] from [5:8:2055] id 5 Sending message to [5:8:2055] from [5:8:2055] id 6 Sending message to [5:8:2055] from [5:8:2055] id 7 Sending message to [5:8:2055] from [5:8:2055] id 8 Sending message to [5:8:2055] from [5:8:2055] id 9 Sending message to [5:8:2055] from [5:8:2055] id 10 2025-06-24T21:26:31.326285Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 1 [5:8:2055] 2025-06-24T21:26:31.326343Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 2 [5:8:2055] 2025-06-24T21:26:31.326367Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 3 [5:8:2055] 2025-06-24T21:26:31.326391Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 4 [5:8:2055] 2025-06-24T21:26:31.326414Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 5 [5:8:2055] 2025-06-24T21:26:31.326489Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 6 [5:8:2055] 2025-06-24T21:26:31.326526Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 7 [5:8:2055] 2025-06-24T21:26:31.326559Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 8 [5:8:2055] 2025-06-24T21:26:31.326592Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 9 [5:8:2055] 2025-06-24T21:26:31.326624Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 10 [5:8:2055] 2025-06-24T21:26:31.326915Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [5:8:2055] 2025-06-24T21:26:31.328458Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [5:8:2055] 2025-06-24T21:26:31.362153Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [5:8:2055] 2025-06-24T21:26:31.363599Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [5:8:2055] 2025-06-24T21:26:31.398537Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [5:8:2055] 2025-06-24T21:26:31.400086Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [5:8:2055] 2025-06-24T21:26:31.434999Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [5:8:2055] 2025-06-24T21:26:31.436519Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [5:8:2055] 2025-06-24T21:26:31.469299Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [5:8:2055] 2025-06-24T21:26:31.470353Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [5:8:2055] 2025-06-24T21:26:31.505092Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [5:8:2055] 2025-06-24T21:26:31.506272Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [5:8:2055] 2025-06-24T21:26:31.531584Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [5:8:2055] 2025-06-24T21:26:31.532654Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [5:8:2055] 2025-06-24T21:26:31.561726Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [5:8:2055] 2025-06-24T21:26:31.563104Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [5:8:2055] 2025-06-24T21:26:31.594281Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [5:8:2055] 2025-06-24T21:26:31.595696Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [5:8:2055] 2025-06-24T21:26:31.622341Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 10 [5:8:2055] 2025-06-24T21:26:31.623460Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 10 [5:8:2055] 2025-06-24T21:26:31.666128Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [5:8:2055] Initiator [5:7:2054] TEST 2 1 duration 1.231113s >> TTabletPipeTest::TestPipeWithVersionInfo >> TPipeCacheTest::TestAutoConnect [GOOD] >> TResourceBrokerInstant::TestMerge >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen [GOOD] >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendWithoutWaitOpen [GOOD] >> TBlockBlobStorageTest::DelayedErrorsNotIgnored >> TTabletPipeTest::TestPipeWithVersionInfo [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeCacheTest::TestAutoConnect [GOOD] >> TResourceBrokerInstant::TestMerge [GOOD] >> TTabletCountersAggregator::ColumnShardCounters >> TTabletPipeTest::TestPipeConnectToHint >> TTabletResolver::NodeProblem [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen [GOOD] Test command err: { LabeledCountersByGroup { Group: "aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } LabeledCountersByGroup { Group: "cons/aaa|1|aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } CounterNames: "value1" } 2025-06-24T21:26:33.333025Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437185] NodeDisconnected NodeId# 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:164:2058] recipient: [1:162:2138] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:164:2058] recipient: [1:162:2138] Leader for TabletID 9437184 is [1:170:2142] sender: [1:171:2058] recipient: [1:162:2138] Leader for TabletID 9437185 is [0:0:0] sender: [2:174:2049] recipient: [2:165:2096] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [2:174:2049] recipient: [2:165:2096] Leader for TabletID 9437185 is [2:186:2099] sender: [2:188:2049] recipient: [2:165:2096] Leader for TabletID 9437184 is [1:170:2142] sender: [1:214:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:186:2099] sender: [1:216:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:186:2099] sender: [2:218:2049] recipient: [2:43:2053] Leader for TabletID 9437185 is [2:186:2099] sender: [2:219:2049] recipient: [2:159:2095] Leader for TabletID 9437185 is [2:186:2099] sender: [1:222:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:186:2099] sender: [2:224:2049] recipient: [2:43:2053] Leader for TabletID 9437185 is [2:186:2099] sender: [2:225:2049] recipient: [2:223:2112] Leader for TabletID 9437185 is [2:226:2113] sender: [2:227:2049] recipient: [2:223:2112] Leader for TabletID 9437185 is [2:226:2113] sender: [1:256:2058] recipient: [1:15:2062] >> TTabletCountersAggregator::ColumnShardCounters [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestPipeWithVersionInfo [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletResolver::NodeProblem [GOOD] Test command err: 2025-06-24T21:26:33.582482Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StInit ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.582725Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [1:214:2137] CurrentLeaderTablet: [1:215:2138] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-24T21:26:33.582759Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-06-24T21:26:33.582804Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [1:214:2137] 2025-06-24T21:26:33.582983Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StInit ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.583174Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [1:220:2141] CurrentLeaderTablet: [1:221:2142] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-24T21:26:33.583216Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-06-24T21:26:33.583266Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [1:220:2141] 2025-06-24T21:26:33.584295Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.584343Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [1:214:2137] 2025-06-24T21:26:33.584523Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.584558Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [1:220:2141] 2025-06-24T21:26:33.584695Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 1 max(problemEpoch): 2 2025-06-24T21:26:33.584728Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 123 leader: [1:214:2137] by NodeId 2025-06-24T21:26:33.584768Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StProblemResolve ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.584909Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [2:230:2095] CurrentLeaderTablet: [2:231:2096] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-24T21:26:33.584934Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-06-24T21:26:33.584963Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [2:230:2095] 2025-06-24T21:26:33.585127Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 234 leader: [1:220:2141] by NodeId 2025-06-24T21:26:33.585221Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StProblemResolve ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.585493Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [2:236:2097] CurrentLeaderTablet: [2:237:2098] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-24T21:26:33.585536Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-06-24T21:26:33.585644Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:236:2097] 2025-06-24T21:26:33.586943Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 2 2025-06-24T21:26:33.586990Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.587023Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [2:230:2095] 2025-06-24T21:26:33.587285Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.587339Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:236:2097] 2025-06-24T21:26:33.587535Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 4 2025-06-24T21:26:33.587570Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 123 leader: [2:230:2095] by NodeId 2025-06-24T21:26:33.587614Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StProblemResolve ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.587795Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [3:248:2095] CurrentLeaderTablet: [3:249:2096] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-24T21:26:33.587824Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-06-24T21:26:33.587854Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [3:248:2095] 2025-06-24T21:26:33.588036Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.588068Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:236:2097] 2025-06-24T21:26:33.588226Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 5 2025-06-24T21:26:33.588261Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.588292Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [3:248:2095] 2025-06-24T21:26:33.588438Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 234 leader: [2:236:2097] by NodeId 2025-06-24T21:26:33.588476Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StProblemResolve ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-24T21:26:33.588658Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [3:254:2097] CurrentLeaderTablet: [3:255:2098] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-24T21:26:33.588692Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-06-24T21:26:33.588730Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [3:254:2097] >> TFlatMetrics::MaximumValue1 [GOOD] >> TFlatMetrics::MaximumValue2 [GOOD] >> TTabletPipeTest::TestPipeConnectToHint [GOOD] >> TFlatMetrics::TimeSeriesAvg16 [GOOD] >> TFlatMetrics::TimeSeriesAVG [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedSingleBucket |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::ColumnShardCounters [GOOD] |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::MaximumValue2 [GOOD] >> SystemView::ShowCreateTableChangefeeds [FAIL] >> SystemView::ShowCreateTableColumnAlterColumn >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedSingleBucket [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegular |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestPipeConnectToHint [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::TimeSeriesAVG [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegularCheckSingleTablet >> TTabletCountersAggregator::IntegralPercentileAggregationRegular [GOOD] >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor >> TTabletCountersAggregator::IntegralPercentileAggregationRegularCheckSingleTablet [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck >> BootstrapperTest::KeepExistingTablet >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationRegular [GOOD] >> TTabletPipeTest::TestOpen >> TResourceBroker::TestResubmitTask >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck [GOOD] >> TFlatMetrics::TimeSeriesAvg16x60 [GOOD] >> TFlatMetrics::TimeSeriesAvg16Signed [GOOD] >> THealthCheckTest::ShardsLimit800 [GOOD] >> THealthCheckTest::ShardsNoLimit >> THealthCheckTest::IgnoreOtherGenerations [GOOD] >> THealthCheckTest::IgnoreServerlessWhenNotSpecific >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes [GOOD] >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor [GOOD] >> THealthCheckTest::StorageNoQuota [GOOD] >> THealthCheckTest::TestBootingTabletIsNotDead >> TTabletPipeTest::TestOpen [GOOD] >> TResourceBroker::TestResubmitTask [GOOD] >> TResourceBroker::TestUpdateCookie >> TTabletPipeTest::TestRewriteSameNode >> TBlockBlobStorageTest::DelayedErrorsNotIgnored [GOOD] >> TFlatMetrics::DecayingAverageAvg [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::TimeSeriesAvg16Signed [GOOD] >> THealthCheckTest::NoBscResponse [GOOD] >> THealthCheckTest::LayoutIncorrect >> TTabletPipeTest::TestShutdown >> BootstrapperTest::RestartUnavailableTablet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:111:2057] recipient: [1:107:2138] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:111:2057] recipient: [1:107:2138] Leader for TabletID 9437185 is [0:0:0] sender: [1:112:2057] recipient: [1:108:2139] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:112:2057] recipient: [1:108:2139] Leader for TabletID 9437184 is [1:119:2146] sender: [1:120:2057] recipient: [1:107:2138] Leader for TabletID 9437185 is [1:121:2147] sender: [1:122:2057] recipient: [1:108:2139] Leader for TabletID 9437184 is [1:119:2146] sender: [1:157:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:121:2147] sender: [1:159:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:121:2147] sender: [1:162:2057] recipient: [1:104:2137] Leader for TabletID 9437185 is [1:121:2147] sender: [1:164:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:121:2147] sender: [1:166:2057] recipient: [1:165:2175] Leader for TabletID 9437185 is [1:167:2176] sender: [1:168:2057] recipient: [1:165:2175] Leader for TabletID 9437185 is [1:167:2176] sender: [1:196:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:119:2146] sender: [1:199:2057] recipient: [1:103:2136] Leader for TabletID 9437184 is [1:119:2146] sender: [1:202:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:119:2146] sender: [1:203:2057] recipient: [1:201:2199] Leader for TabletID 9437184 is [1:204:2200] sender: [1:205:2057] recipient: [1:201:2199] Leader for TabletID 9437184 is [1:204:2200] sender: [1:234:2057] recipient: [1:14:2061] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestOpen [GOOD] >> TResourceBroker::TestCounters >> TResourceBroker::TestUpdateCookie [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::DecayingAverageAvg [GOOD] Test command err: ... waiting for all block results ... passing block result OK for [1:104:2136] ... blocking block result NO_GROUP for [1:105:2136] ... blocking block result NO_GROUP for [1:106:2136] ... blocking block result NO_GROUP for [1:107:2136] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes [GOOD] Test command err: 2025-06-24T21:26:11.847160Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.847870Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.847974Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.848031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:11.848337Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.848375Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d0/r3tmp/tmp98cfQR/pdisk_1.dat 2025-06-24T21:26:12.178277Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12464, node 1 TClient is connected to server localhost:29678 2025-06-24T21:26:12.556593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:12.556653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:12.556686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:12.557090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:19.538835Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:19.539293Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:19.539494Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:19.540907Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:19.541252Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:19.541358Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d0/r3tmp/tmp5NLDkn/pdisk_1.dat 2025-06-24T21:26:19.808026Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10428, node 3 TClient is connected to server localhost:29743 2025-06-24T21:26:20.139666Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:20.139739Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:20.139779Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:20.140267Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:24.374658Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:24.375002Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:24.375087Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d0/r3tmp/tmpnEM67t/pdisk_1.dat 2025-06-24T21:26:24.651976Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20619, node 5 TClient is connected to server localhost:4263 2025-06-24T21:26:24.992545Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:24.992601Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:24.992629Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:24.992960Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:28.987256Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:271:2314], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:28.987909Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:28.988002Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d0/r3tmp/tmpqJZDRQ/pdisk_1.dat 2025-06-24T21:26:29.316416Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 29555, node 7 TClient is connected to server localhost:20297 2025-06-24T21:26:34.017479Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:34.017788Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:34.017928Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d0/r3tmp/tmpx5VG9v/pdisk_1.dat 2025-06-24T21:26:34.333338Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5684, node 8 TClient is connected to server localhost:31413 2025-06-24T21:26:34.734507Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:34.734587Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:34.734632Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:34.735442Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> BootstrapperTest::KeepExistingTablet [GOOD] >> BootstrapperTest::DuplicateNodes >> TTabletPipeTest::TestShutdown [GOOD] >> THealthCheckTest::Issues100VCardMerging [GOOD] >> THealthCheckTest::LayoutCorrect >> THealthCheckTest::CLusterNotBootstrapped [GOOD] >> TTabletPipeTest::TestRewriteSameNode [GOOD] >> TResourceBroker::TestCounters [GOOD] >> TResourceBroker::TestChangeTaskType |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestUpdateCookie [GOOD] >> TPipeCacheTest::TestIdleRefresh >> BootstrapperTest::MultipleBootstrappers [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestShutdown [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes [GOOD] Test command err: 2025-06-24T21:26:11.334086Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.334756Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.334860Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.334915Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:11.335364Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.335415Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043c7/r3tmp/tmpaCgiLf/pdisk_1.dat 2025-06-24T21:26:11.707828Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28495, node 1 TClient is connected to server localhost:2516 2025-06-24T21:26:12.082199Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:12.082281Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:12.082319Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:12.082735Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: DEGRADED issue_log { id: "YELLOW-70fb-1231c6b1" status: YELLOW message: "Database has multiple issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" reason: "YELLOW-5321-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-1" reason: "YELLOW-e9e2-1231c6b1-2" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-1" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 1 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-2" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 2 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-5321-1231c6b1" status: YELLOW message: "Storage degraded" location { database { name: "/Root" } } reason: "YELLOW-595f-1231c6b1-80c02825" type: "STORAGE" level: 2 } issue_log { id: "YELLOW-595f-1231c6b1-80c02825" status: YELLOW message: "Pool degraded" location { storage { pool { name: "static" } } database { name: "/Root" } } reason: "YELLOW-ef3e-1231c6b1-0" type: "STORAGE_POOL" level: 3 } issue_log { id: "RED-4847-1231c6b1-1-0-3-55-0-55" status: RED message: "VDisk is not available" location { storage { node { id: 1 host: "::1" port: 12001 } pool { name: "static" group { vdisk { id: "0-3-55-0-55" } } } } database { name: "/Root" } } type: "VDISK" level: 5 } issue_log { id: "YELLOW-ef3e-1231c6b1-0" status: YELLOW message: "Group degraded" location { storage { pool { name: "static" group { id: "0" } } } database { name: "/Root" } } reason: "RED-4847-1231c6b1-1-0-3-55-0-55" type: "STORAGE_GROUP" level: 4 } location { id: 1 host: "::1" port: 12001 } 2025-06-24T21:26:18.602397Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.602865Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.603089Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:18.604955Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.605439Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.605588Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043c7/r3tmp/tmp1A8CdO/pdisk_1.dat 2025-06-24T21:26:18.943583Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27783, node 3 TClient is connected to server localhost:9582 2025-06-24T21:26:19.310705Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:19.310759Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:19.310791Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:19.311360Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:23.826381Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:23.826696Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:23.826786Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043c7/r3tmp/tmpvpTNXz/pdisk_1.dat 2025-06-24T21:26:24.105086Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1716, node 5 TClient is connected to server localhost:30890 2025-06-24T21:26:24.468383Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:24.468452Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:24.468494Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:24.469218Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:28.977388Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:493:2375], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:28.977720Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:28.977933Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043c7/r3tmp/tmpwcJdfw/pdisk_1.dat 2025-06-24T21:26:29.366221Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20830, node 7 TClient is connected to server localhost:2353 2025-06-24T21:26:29.799512Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:29.799588Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:29.799637Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:29.800489Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:34.336348Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:34.336617Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:34.336781Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043c7/r3tmp/tmpB7ZtDz/pdisk_1.dat 2025-06-24T21:26:34.658626Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62266, node 10 TClient is connected to server localhost:10885 2025-06-24T21:26:35.094582Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:35.094662Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:35.094707Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:35.095082Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestRewriteSameNode [GOOD] >> BootstrapperTest::RestartUnavailableTablet [GOOD] >> BootstrapperTest::UnavailableStateStorage >> TResourceBroker::TestChangeTaskType [GOOD] >> TPipeCacheTest::TestIdleRefresh [GOOD] >> TPipeCacheTest::TestTabletNode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> BootstrapperTest::MultipleBootstrappers [GOOD] Test command err: ... waiting for pipe to connect ... stopping current instance ... waiting for pipe to disconnect ... waiting for pipe to connect ... sleeping for 2 seconds 2025-06-24T21:26:33.040296Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:33.040379Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:33.040450Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:33.041392Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-24T21:26:33.041443Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 8427358873417017059 2025-06-24T21:26:33.041809Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-24T21:26:33.041843Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 314095936534775797 2025-06-24T21:26:33.041893Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-24T21:26:33.041931Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 4772764162469967008 2025-06-24T21:26:33.042945Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: UNKNOWN 2025-06-24T21:26:33.043069Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: FREE 2025-06-24T21:26:33.043242Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: FREE 2025-06-24T21:26:33.043294Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: FREE 2025-06-24T21:26:33.043345Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: FREE 2025-06-24T21:26:33.043377Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:499: tablet: 9437184, type: Dummy, lost round, wait for 0.106295s 2025-06-24T21:26:33.043491Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: FREE 2025-06-24T21:26:33.043522Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:499: tablet: 9437184, type: Dummy, lost round, wait for 0.101463s 2025-06-24T21:26:33.043575Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: FREE 2025-06-24T21:26:33.043602Z node 4 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot 2025-06-24T21:26:33.248511Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:33.249247Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:283:2097] 2025-06-24T21:26:33.249723Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-24T21:26:33.249774Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting 2025-06-24T21:26:33.260081Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:33.260765Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:283:2097] 2025-06-24T21:26:33.261258Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-24T21:26:33.261296Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... waiting for pipe to connect ... tablet initially started on node 4 (idx 2) in gen 2 ... disconnecting other nodes ... sleeping for 2 seconds (tablet expected to survive) 2025-06-24T21:26:34.196347Z node 4 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 3 2025-06-24T21:26:34.196411Z node 4 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 5 2025-06-24T21:26:34.196776Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-06-24T21:26:34.196831Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:34.196940Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-06-24T21:26:34.196958Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:34.199341Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:283:2097] 2025-06-24T21:26:34.199608Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:283:2097] 2025-06-24T21:26:34.201012Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-24T21:26:34.201084Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting 2025-06-24T21:26:34.201234Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-24T21:26:34.201256Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... disconnecting other nodes (new tablet connections fail) ... sleeping for 2 seconds (tablet expected to survive) 2025-06-24T21:26:34.988784Z node 4 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 3 2025-06-24T21:26:34.988874Z node 4 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 5 2025-06-24T21:26:34.989026Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-06-24T21:26:34.989098Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:34.989175Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-06-24T21:26:34.989215Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:34.991517Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:283:2097] 2025-06-24T21:26:34.991768Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:283:2097] ... disconnecting nodes 2 <-> 1 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 ... disconnecting nodes 2 <-> 3 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 2025-06-24T21:26:34.992606Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-24T21:26:34.992660Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 10053858333920509680 2025-06-24T21:26:34.992831Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-24T21:26:34.992859Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 11851482555838222794 2025-06-24T21:26:34.993473Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: OWNER 2025-06-24T21:26:34.993528Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 4 (owner) 2025-06-24T21:26:34.993667Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: FREE 2025-06-24T21:26:34.993800Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: WAITFOR 2025-06-24T21:26:34.993827Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 3 ... disconnect other nodes (new owner expected) ... sleeping for 2 seconds (new tablet expected to start once) 2025-06-24T21:26:35.769190Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 4, round 16045690984833335029 2025-06-24T21:26:35.769280Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 4, round 16045690984833335029 2025-06-24T21:26:35.769346Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:35.769634Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:35.770402Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:283:2097] 2025-06-24T21:26:35.770864Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:283:2097] ... disconnecting nodes 2 <-> 1 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 2025-06-24T21:26:35.771309Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-24T21:26:35.771367Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 8470239763125230813 ... disconnecting nodes 2 <-> 3 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 ... disconnecting nodes 2 <-> 1 (bootstrap watch attempt) ... blocking NKikimr::TEvBootstrapper::TEvWatch from TABLET_BOOTSTRAPPER to TABLET_BOOTSTRAPPER cookie 16045690984833335031 2025-06-24T21:26:35.771888Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-24T21:26:35.771931Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 6622044195218853944 2025-06-24T21:26:35.772031Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: UNKNOWN 2025-06-24T21:26:35.772177Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:403: tablet: 9437184, type: Dummy, disconnected from 4, round 16045690984833335031 2025-06-24T21:26:35.772219Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: DISCONNECTED 2025-06-24T21:26:35.772260Z node 3 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot ... disconnecting nodes 2 <-> 3 (bootstrap watch attempt) ... blocking NKikimr::TEvBootstrapper::TEvWatch from TABLET_BOOTSTRAPPER to TABLET_BOOTSTRAPPER cookie 16045690984833335031 2025-06-24T21:26:35.772848Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: OWNER 2025-06-24T21:26:35.772884Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 3 (owner) 2025-06-24T21:26:35.772954Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 4, round 16045690984833335031 2025-06-24T21:26:35.795725Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:715: tablet: 9437184, type: Dummy, tablet dead 2025-06-24T21:26:35.795817Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:35.799105Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:433:2097] 2025-06-24T21:26:35.814245Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-24T21:26:35.814312Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... waiting for pipe to connect ... disconnecting nodes 2 <-> 0 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to cookie 1 >> TPipeCacheTest::TestTabletNode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::CLusterNotBootstrapped [GOOD] Test command err: 2025-06-24T21:26:06.135323Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630315548216129:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:26:06.135430Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e5/r3tmp/tmpm1dKI3/pdisk_1.dat 2025-06-24T21:26:06.573723Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:06.597294Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630315548216095:2079] 1750800366121016 != 1750800366121019 2025-06-24T21:26:06.614676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:26:06.614794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:26:06.617538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6053, node 1 2025-06-24T21:26:06.807013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:06.807049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:06.807068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:06.807217Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:07.141172Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3833 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:26:07.444062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:26:08.982504Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630325229256853:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:26:08.982601Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e5/r3tmp/tmp8dbTpJ/pdisk_1.dat 2025-06-24T21:26:09.106971Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:09.108811Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630325229256834:2079] 1750800368981917 != 1750800368981920 TServer::EnableGrpc on GrpcPort 25729, node 2 2025-06-24T21:26:09.129286Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:26:09.129384Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:26:09.131177Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:26:09.153585Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:09.153609Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:09.153615Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:09.153722Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25747 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:26:09.329383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:26:17.636486Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:17.636905Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:17.637078Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:17.638550Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:17.638856Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:17.638977Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e5/r3tmp/tmpJvojms/pdisk_1.dat 2025-06-24T21:26:17.925133Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5436, node 3 TClient is connected to server localhost:14299 2025-06-24T21:26:18.259431Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:18.259485Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:18.259518Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:18.259881Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:24.692925Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:24.693378Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:24.693495Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:24.693705Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:24.693868Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:24.694058Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e5/r3tmp/tmpwM97VL/pdisk_1.dat 2025-06-24T21:26:24.987870Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18512, node 5 TClient is connected to server localhost:8885 2025-06-24T21:26:25.359014Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:25.359078Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:25.359119Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:25.360048Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 5 host: "::1" port: 12001 } 2025-06-24T21:26:29.881172Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:493:2375], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:29.881487Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:29.881622Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e5/r3tmp/tmp5lYtz1/pdisk_1.dat 2025-06-24T21:26:30.200309Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20089, node 7 TClient is connected to server localhost:13518 2025-06-24T21:26:30.619431Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:30.619521Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:30.619567Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:30.620247Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:35.334195Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:35.334411Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:35.334570Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e5/r3tmp/tmphuMEQ9/pdisk_1.dat 2025-06-24T21:26:35.703162Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61956, node 10 TClient is connected to server localhost:22011 2025-06-24T21:26:36.116738Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:36.116797Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:36.116834Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:36.117407Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestChangeTaskType [GOOD] >> BootstrapperTest::DuplicateNodes [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeCacheTest::TestTabletNode [GOOD] >> BootstrapperTest::UnavailableStateStorage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> BootstrapperTest::DuplicateNodes [GOOD] Test command err: ... waiting for pipe to connect ... sleeping (original instance should be preserved) ... waiting for original instance to stop ... waiting for original instance to stop (done) ... waiting for pipe to connect 2025-06-24T21:26:37.193424Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:37.193515Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-24T21:26:37.193979Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-24T21:26:37.194012Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 2303809724928703835 2025-06-24T21:26:37.194183Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-24T21:26:37.194212Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 13151740404452589043 2025-06-24T21:26:37.194785Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: UNKNOWN 2025-06-24T21:26:37.194840Z node 4 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot 2025-06-24T21:26:37.195137Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: OWNER 2025-06-24T21:26:37.195187Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 4 (owner) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> BootstrapperTest::UnavailableStateStorage [GOOD] Test command err: ... waiting for pipe to connect ... waiting for blocked connect attempt ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 ... waiting for blocked connect attempt (done) ... disconnecting nodes 2 <-> 1 ... waiting for pipe to disconnect ... waiting for pipe to connect ... waiting for pipe to connect ... waiting for multiple state storage lookup attempts 2025-06-24T21:26:37.985392Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 0} for [4:3:2050]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 0 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 1} for [4:6:2053]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 1 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 2} for [4:9:2056]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 2 2025-06-24T21:26:37.986178Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: ERROR, leader: [0:0:0] 2025-06-24T21:26:37.986231Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:242: tablet: 9437184, type: Dummy, state storage unavailable, sleeping for 0.182540s 2025-06-24T21:26:38.166945Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 0} for [4:3:2050]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 0 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 1} for [4:6:2053]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 1 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 2} for [4:9:2056]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 2 ... waiting for multiple state storage lookup attempts (done) >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_RegisteredSourceId_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_PreferedPartition_Test >> TTabletPipeTest::TestSendBeforeBootTarget >> TReplicaTest::Handshake >> TReplicaTest::Merge >> TReplicaTest::Unsubscribe >> TReplicaTest::Update >> TReplicaTest::HandshakeWithStaleGeneration >> TReplicaCombinationTest::UpdatesCombinationsDomainRoot >> TReplicaTest::CommitWithoutHandshake >> TReplicaTest::UpdateWithoutHandshake >> TReplicaTest::Subscribe >> TReplicaTest::Merge [GOOD] >> TReplicaTest::IdempotencyUpdatesWithoutSubscribers >> TReplicaTest::Unsubscribe [GOOD] >> TReplicaTest::UnsubscribeUnknownPath >> TReplicaTest::UpdateWithoutHandshake [GOOD] >> TReplicaTest::UpdateWithStaleGeneration >> TReplicaTest::Update [GOOD] >> TReplicaTest::UnsubscribeWithoutSubscribe >> TReplicaTest::Subscribe [GOOD] >> TReplicaTest::SubscribeUnknownPath >> TReplicaTest::HandshakeWithStaleGeneration [GOOD] >> TReplicaTest::IdempotencyUpdatesAliveSubscriber >> TReplicaCombinationTest::UpdatesCombinationsDomainRoot [GOOD] >> TReplicaCombinationTest::UpdatesCombinationsMigratedPath >> TReplicaTest::CommitWithoutHandshake [GOOD] >> TReplicaTest::CommitWithStaleGeneration >> TReplicaTest::Handshake [GOOD] >> TReplicaTest::DoubleUnsubscribe >> TReplicaTest::Commit >> THealthCheckTest::IgnoreServerlessWhenNotSpecific [GOOD] >> THealthCheckTest::HealthCheckConfigUpdate >> TReplicaTest::CommitWithStaleGeneration [GOOD] >> TReplicaTest::Delete >> TReplicaTest::DoubleUnsubscribe [GOOD] >> TReplicaTest::DoubleDelete >> TReplicaTest::IdempotencyUpdatesWithoutSubscribers [GOOD] >> TReplicaTest::StrongNotificationAfterCommit >> TReplicaTest::UnsubscribeUnknownPath [GOOD] >> TReplicaTest::UpdateWithStaleGeneration [GOOD] >> TReplicaTest::UnsubscribeWithoutSubscribe [GOOD] >> TReplicaTest::SubscribeUnknownPath [GOOD] >> TReplicaTest::SyncVersion >> TReplicaTest::IdempotencyUpdatesAliveSubscriber [GOOD] >> TReplicaTest::IdempotencyUpdatesVariant2 >> TReplicaCombinationTest::UpdatesCombinationsMigratedPath [GOOD] >> TReplicaCombinationTest::MigratedPathRecreation >> TReplicaTest::Commit [GOOD] >> TReplicaTest::AckNotifications >> TReplicaTest::IdempotencyUpdatesVariant2 [GOOD] >> TReplicaCombinationTest::MigratedPathRecreation [GOOD] >> TReplicaTest::Delete [GOOD] >> TReplicaTest::DoubleDelete [GOOD] >> TReplicaTest::StrongNotificationAfterCommit [GOOD] >> TReplicaTest::SyncVersion [GOOD] >> THealthCheckTest::LayoutIncorrect [GOOD] >> TReplicaTest::AckNotifications [GOOD] >> TReplicaTest::AckNotificationsUponPathRecreation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UnsubscribeUnknownPath [GOOD] Test command err: 2025-06-24T21:26:40.674960Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:40.675050Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.675184Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:9:2056] 2025-06-24T21:26:40.675225Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# path 2025-06-24T21:26:40.680141Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.680289Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:10:2057] 2025-06-24T21:26:40.680341Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:10:2057], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.680466Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.680504Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.686487Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:40.688459Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:9:2056] 2025-06-24T21:26:40.688512Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7:2054] Unsubscribe: subscriber# [1:9:2056], path# path 2025-06-24T21:26:40.688599Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 40 2025-06-24T21:26:40.688641Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-24T21:26:40.688671Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [1:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.935426Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UpdateWithStaleGeneration [GOOD] Test command err: 2025-06-24T21:26:40.678570Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.678620Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:797: [1:7:2054] Reject update from unknown populator: sender# [1:8:2055], owner# 1, generation# 1 2025-06-24T21:26:40.678705Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2025-06-24T21:26:40.678735Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# path 2025-06-24T21:26:40.680107Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.680241Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:8:2055] 2025-06-24T21:26:40.681401Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# path 2025-06-24T21:26:40.681485Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:8:2055] 2025-06-24T21:26:40.681532Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.681591Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.681686Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [1:8:2055] 2025-06-24T21:26:40.681737Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.944919Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-24T21:26:40.945002Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.945124Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 0 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.945172Z node 2 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:805: [2:7:2054] Reject update from stale populator: sender# [2:8:2055], owner# 1, generation# 0, pending generation# 1 2025-06-24T21:26:40.945237Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2025-06-24T21:26:40.945265Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# path 2025-06-24T21:26:40.945331Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.945418Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2025-06-24T21:26:40.945459Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:7:2054] Unsubscribe: subscriber# [2:8:2055], path# path 2025-06-24T21:26:40.945509Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:8:2055] 2025-06-24T21:26:40.945538Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.945579Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.945659Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [2:8:2055] 2025-06-24T21:26:40.945696Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:7:2054] Unsubscribe: subscriber# [2:8:2055], path# [OwnerId: 1, LocalPathId: 1] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UnsubscribeWithoutSubscribe [GOOD] Test command err: 2025-06-24T21:26:40.672517Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:40.672567Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.675813Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.675862Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.680405Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:40.680565Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2025-06-24T21:26:40.680645Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.680761Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:8:2055] 2025-06-24T21:26:40.681412Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# path 2025-06-24T21:26:40.681521Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:8:2055] 2025-06-24T21:26:40.681584Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.681678Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [1:8:2055] 2025-06-24T21:26:40.681724Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.926403Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-24T21:26:40.926509Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.926610Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.926662Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.926707Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:40.926783Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] >> ResultFormatter::FormatEmptySchema >> ResultFormatter::EmptyResultSet [GOOD] >> ResultFormatter::EmptyList >> ResultFormatter::Optional [GOOD] >> ResultFormatter::Pg >> ResultFormatter::Primitive [GOOD] >> ResultFormatter::Struct [GOOD] >> TReplicaTest::AckNotificationsUponPathRecreation [GOOD] >> ResultFormatter::List [GOOD] >> ResultFormatter::Null [GOOD] >> ResultFormatter::FormatEmptySchema [GOOD] >> ResultFormatter::FormatNonEmptySchema [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::IdempotencyUpdatesVariant2 [GOOD] >> ResultFormatter::EmptyList [GOOD] Test command err: 2025-06-24T21:26:40.673000Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [1:8:2055] 2025-06-24T21:26:40.673076Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 2 2025-06-24T21:26:40.673177Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:40.673214Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:763: [1:7:2054] Reject handshake from stale populator: sender# [1:8:2055], owner# 1, generation# 1, pending generation# 2 2025-06-24T21:26:40.929079Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-24T21:26:40.929141Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.929281Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:9:2056] 2025-06-24T21:26:40.929336Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.929465Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.929606Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.929647Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.934321Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:40.934499Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2025-06-24T21:26:40.934525Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-24T21:26:40.934553Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.934633Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.934663Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.934686Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [2:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.934746Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.934784Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-24T21:26:40.934846Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:40.934952Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 2] DomainOwnerId: 0 }: sender# [2:10:2057] 2025-06-24T21:26:40.935010Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:10:2057], path# [OwnerId: 1, LocalPathId: 2], domainOwnerId# 0, capabilities# 2025-06-24T21:26:41.191621Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-24T21:26:41.191673Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:41.191779Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:41.191815Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:41.191908Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:41.191985Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:41.192010Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-24T21:26:41.192035Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:41.192073Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:41.192142Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-24T21:26:41.192169Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# true 2025-06-24T21:26:41.192190Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 2] 2025-06-24T21:26:41.192241Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:41.192267Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:41.192290Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [3:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:41.192352Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:41.192382Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-24T21:26:41.192408Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [3:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 2] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::Delete [GOOD] Test command err: 2025-06-24T21:26:40.672138Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:40.672249Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:969: [1:7:2054] Reject commit from unknown populator: sender# [1:8:2055], owner# 1, generation# 1 2025-06-24T21:26:40.672318Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:40.672369Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.928840Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 0 }: sender# [2:8:2055] 2025-06-24T21:26:40.928914Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 0 2025-06-24T21:26:40.928979Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:9:2056] 2025-06-24T21:26:40.929018Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.929068Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [2:9:2056] 2025-06-24T21:26:40.929101Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:7:2054] Commit generation: owner# 1, generation# 1 2025-06-24T21:26:40.929135Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 0 }: sender# [2:8:2055] 2025-06-24T21:26:40.929170Z node 2 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:979: [2:7:2054] Reject commit from stale populator: sender# [2:8:2055], owner# 1, generation# 0, pending generation# 1 2025-06-24T21:26:40.929205Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [2:8:2055] 2025-06-24T21:26:40.929227Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 2 2025-06-24T21:26:41.187587Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-24T21:26:41.187633Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:41.187735Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:41.187764Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], deletion# false 2025-06-24T21:26:41.192920Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 42, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:41.193094Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2025-06-24T21:26:41.193176Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:41.193305Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 42, LocalPathId: 1] DomainOwnerId: 0 }: sender# [3:10:2057] 2025-06-24T21:26:41.193346Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# [OwnerId: 42, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-24T21:26:41.193430Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-24T21:26:41.193453Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], deletion# true 2025-06-24T21:26:41.193473Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 42, LocalPathId: 1] 2025-06-24T21:26:41.193565Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:11:2058] 2025-06-24T21:26:41.193600Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:11:2058], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:41.193679Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 42, LocalPathId: 1] DomainOwnerId: 0 }: sender# [3:12:2059] 2025-06-24T21:26:41.193721Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:12:2059], path# [OwnerId: 42, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-24T21:26:41.193814Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:13:2060] 2025-06-24T21:26:41.193857Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:13:2060], path# path, domainOwnerId# 0, capabilities# ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::DoubleDelete [GOOD] Test command err: 2025-06-24T21:26:40.673096Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:40.673168Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.925567Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-24T21:26:40.925636Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.925754Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.925783Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.929902Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:40.930026Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2025-06-24T21:26:40.930104Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.930210Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2025-06-24T21:26:40.930263Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:7:2054] Unsubscribe: subscriber# [2:8:2055], path# path 2025-06-24T21:26:40.930302Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2025-06-24T21:26:41.205430Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-24T21:26:41.205493Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:41.205623Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2025-06-24T21:26:41.205663Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7:2054] Upsert description: path# path 2025-06-24T21:26:41.205728Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:41.205900Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:41.205941Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:41.205998Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:41.206189Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-24T21:26:41.206226Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-24T21:26:41.206258Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:41.206402Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:10:2057] 2025-06-24T21:26:41.206467Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:41.206556Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-24T21:26:41.206597Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::StrongNotificationAfterCommit [GOOD] Test command err: 2025-06-24T21:26:40.674677Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:9:2056] 2025-06-24T21:26:40.674750Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# path 2025-06-24T21:26:40.680116Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.680316Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:10:2057] 2025-06-24T21:26:40.680384Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.680461Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:10:2057], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.680540Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:40.680579Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.680688Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.680726Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.687395Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:40.688469Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 40 2025-06-24T21:26:40.688514Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-24T21:26:40.688566Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [1:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.925698Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-24T21:26:40.925796Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.925878Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:9:2056] 2025-06-24T21:26:40.925904Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.925952Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.926077Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.926110Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.926152Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:40.926253Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2025-06-24T21:26:40.926302Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-24T21:26:40.926346Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.926434Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [2:9:2056] 2025-06-24T21:26:40.926487Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:7:2054] Unsubscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.926537Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.926575Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.926610Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [2:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.926667Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.926695Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-24T21:26:40.926736Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:40.926820Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 2] DomainOwnerId: 0 }: sender# [2:10:2057] 2025-06-24T21:26:40.926868Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:10:2057], path# [OwnerId: 1, LocalPathId: 2], domainOwnerId# 0, capabilities# 2025-06-24T21:26:41.205782Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 1 }: sender# [3:9:2056] 2025-06-24T21:26:41.205836Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7:2054] Upsert description: path# path 2025-06-24T21:26:41.205907Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 1, capabilities# 2025-06-24T21:26:41.206041Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-24T21:26:41.206081Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:41.206161Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-24T21:26:41.206196Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:7:2054] Commit generation: owner# 1, generation# 1 2025-06-24T21:26:41.206308Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:997: [3:7:2054] Handle NKikimr::NSchemeBoard::TReplica::TEvPrivate::TEvSendStrongNotifications { Owner: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::SyncVersion [GOOD] Test command err: 2025-06-24T21:26:40.698846Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:40.698921Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:40.699097Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:40.699139Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.705183Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:40.705365Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2025-06-24T21:26:40.705468Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:40.705592Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 40 2025-06-24T21:26:40.705632Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-24T21:26:40.705671Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [1:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:40.959977Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2025-06-24T21:26:40.960057Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# path 2025-06-24T21:26:40.960114Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:41.229935Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-24T21:26:41.229980Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:41.230119Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 76 2025-06-24T21:26:41.230152Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:41.230197Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 100500, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 32} 2025-06-24T21:26:41.230264Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:8:2055] 2025-06-24T21:26:41.230309Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-24T21:26:41.230399Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:8:2055], cookie# 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaCombinationTest::MigratedPathRecreation [GOOD] Test command err: 2025-06-24T21:26:40.672386Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:40.672451Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 800, generation# 1 2025-06-24T21:26:40.672533Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:40.672561Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:7:2054] Commit generation: owner# 800, generation# 1 2025-06-24T21:26:40.672635Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:9:2056] 2025-06-24T21:26:40.672668Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 800, generation# 1 2025-06-24T21:26:40.672713Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:9:2056] 2025-06-24T21:26:40.672731Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:7:2054] Commit generation: owner# 800, generation# 1 2025-06-24T21:26:40.676601Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 103 2025-06-24T21:26:40.676680Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-24T21:26:40.680888Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-24T21:26:40.681098Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:9:2056], cookie# 0, event size# 103 2025-06-24T21:26:40.681146Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-24T21:26:40.681200Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-24T21:26:40.681316Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:10:2057] 2025-06-24T21:26:40.681400Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:10:2057], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2025-06-24T21:26:40.710708Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:11:2058] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:12:2059] 2025-06-24T21:26:40.710746Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:11:2058] Successful handshake: owner# 800, generation# 1 2025-06-24T21:26:40.710808Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:11:2058] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:12:2059] 2025-06-24T21:26:40.710843Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:11:2058] Commit generation: owner# 800, generation# 1 2025-06-24T21:26:40.710896Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:11:2058] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [1:13:2060] 2025-06-24T21:26:40.710921Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:11:2058] Successful handshake: owner# 900, generation# 1 2025-06-24T21:26:40.711008Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:11:2058] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [1:13:2060] 2025-06-24T21:26:40.711032Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:11:2058] Commit generation: owner# 900, generation# 1 2025-06-24T21:26:40.711110Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:11:2058] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:12:2059], cookie# 0, event size# 103 2025-06-24T21:26:40.711137Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:11:2058] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-24T21:26:40.711209Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:11:2058] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-24T21:26:40.711265Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:11:2058] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [1:13:2060], cookie# 0, event size# 103 2025-06-24T21:26:40.711286Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:11:2058] Update description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], deletion# false 2025-06-24T21:26:40.711321Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:884: [1:11:2058] Replace GSS by TSS description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], domainId# [OwnerId: 800, LocalPathId: 2], curPathId# [OwnerId: 800, LocalPathId: 2], curDomainId# [OwnerId: 800, LocalPathId: 2] 2025-06-24T21:26:40.711372Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:11:2058] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 900, LocalPathId: 1], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-24T21:26:40.711441Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:11:2058] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:14:2061] 2025-06-24T21:26:40.711476Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:11:2058] Subscribe: subscriber# [1:14:2061], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 1 PathOwnerId: 900 2025-06-24T21:26:40.711697Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:15:2062] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:16:2063] 2025-06-24T21:26:40.711717Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:15:2062] Successful handshake: owner# 800, generation# 1 2025-06-24T21:26:40.711757Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:15:2062] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:16:2063] 2025-06-24T21:26:40.711780Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:15:2062] Commit generation: owner# 800, generation# 1 2025-06-24T21:26:40.711817Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:15:2062] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:17:2064] 2025-06-24T21:26:40.711833Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:15:2062] Successful handshake: owner# 800, generation# 1 2025-06-24T21:26:40.711877Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:15:2062] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:17:2064] 2025-06-24T21:26:40.711900Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:15:2062] Commit generation: owner# 800, generation# 1 2025-06-24T21:26:40.711944Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:15:2062] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:16:2063], cookie# 0, event size# 103 2025-06-24T21:26:40.711972Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:15:2062] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-24T21:26:40.712017Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:15:2062] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-24T21:26:40.712067Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:15:2062] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:17:2064], cookie# 0, event size# 103 2025-06-24T21:26:40.712088Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:15:2062] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-24T21:26:40.712115Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:15:2062] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 2, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-24T21:26:40.712163Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:15:2062] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:18:2065] 2025-06-24T21:26:40.712184Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:15:2062] Subscribe: subscriber# [1:18:2065], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 2 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2025-06-24T21:26:40.712392Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:19:2066] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:20:2067] 2025-06-24T21:26:40.712414Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:19:2066] Successful handshake: owner# 800, generation# 1 2025-06-24T21:26:40.712441Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:19:2066] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:20:2067] 2025-06-24T21:26:40.712455Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:19:2066] Commit generation: owner# 800, generation# 1 2025-06-24T21:26:40.712482Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:19:2066] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Gener ... DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2025-06-24T21:26:41.073033Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:399:2446] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:400:2447] 2025-06-24T21:26:41.073061Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:399:2446] Successful handshake: owner# 910, generation# 1 2025-06-24T21:26:41.073133Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:399:2446] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:400:2447] 2025-06-24T21:26:41.073160Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:399:2446] Commit generation: owner# 910, generation# 1 2025-06-24T21:26:41.073195Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:399:2446] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:401:2448] 2025-06-24T21:26:41.073217Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:399:2446] Successful handshake: owner# 910, generation# 1 2025-06-24T21:26:41.073248Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:399:2446] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:401:2448] 2025-06-24T21:26:41.073264Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:399:2446] Commit generation: owner# 910, generation# 1 2025-06-24T21:26:41.073300Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:399:2446] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:400:2447], cookie# 0, event size# 64 2025-06-24T21:26:41.073316Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:399:2446] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2025-06-24T21:26:41.073337Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:399:2446] Upsert description: path# [OwnerId: 910, LocalPathId: 9] 2025-06-24T21:26:41.073385Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:399:2446] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:401:2448], cookie# 0, event size# 130 2025-06-24T21:26:41.073400Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:399:2446] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# false 2025-06-24T21:26:41.073421Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [2:399:2446] Path was explicitly deleted, ignoring: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9] 2025-06-24T21:26:41.073475Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:399:2446] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 0 }: sender# [2:402:2449] 2025-06-24T21:26:41.073492Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:399:2446] Upsert description: path# /Root/Tenant/table_inside 2025-06-24T21:26:41.073519Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:399:2446] Subscribe: subscriber# [2:402:2449], path# /Root/Tenant/table_inside, domainOwnerId# 0, capabilities# =========== Left ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Right ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 2 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2025-06-24T21:26:41.075036Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:403:2450] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:404:2451] 2025-06-24T21:26:41.075062Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:403:2450] Successful handshake: owner# 910, generation# 1 2025-06-24T21:26:41.075102Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:403:2450] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:404:2451] 2025-06-24T21:26:41.075119Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:403:2450] Commit generation: owner# 910, generation# 1 2025-06-24T21:26:41.075175Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:403:2450] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:405:2452] 2025-06-24T21:26:41.075198Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:403:2450] Successful handshake: owner# 910, generation# 1 2025-06-24T21:26:41.075245Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:403:2450] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:405:2452] 2025-06-24T21:26:41.075273Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:403:2450] Commit generation: owner# 910, generation# 1 2025-06-24T21:26:41.075356Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:403:2450] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:404:2451], cookie# 0, event size# 64 2025-06-24T21:26:41.075378Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:403:2450] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2025-06-24T21:26:41.075394Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:403:2450] Upsert description: path# [OwnerId: 910, LocalPathId: 9] 2025-06-24T21:26:41.075438Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:403:2450] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:405:2452], cookie# 0, event size# 64 2025-06-24T21:26:41.075465Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:403:2450] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2025-06-24T21:26:41.075506Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:403:2450] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 0 }: sender# [2:406:2453] 2025-06-24T21:26:41.075522Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:403:2450] Upsert description: path# /Root/Tenant/table_inside 2025-06-24T21:26:41.075555Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:403:2450] Subscribe: subscriber# [2:406:2453], path# /Root/Tenant/table_inside, domainOwnerId# 0, capabilities# =========== Left ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Right ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2025-06-24T21:26:41.204425Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [3:8:2055] 2025-06-24T21:26:41.204491Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 800, generation# 1 2025-06-24T21:26:41.204572Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [3:8:2055] 2025-06-24T21:26:41.204616Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:7:2054] Commit generation: owner# 800, generation# 1 2025-06-24T21:26:41.204688Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [3:9:2056] 2025-06-24T21:26:41.204720Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 900, generation# 1 2025-06-24T21:26:41.204825Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [3:9:2056] 2025-06-24T21:26:41.204852Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:7:2054] Commit generation: owner# 900, generation# 1 2025-06-24T21:26:41.204956Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 118 2025-06-24T21:26:41.204993Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], deletion# false 2025-06-24T21:26:41.205043Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 800, LocalPathId: 1111], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-06-24T21:26:41.205200Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [3:9:2056], cookie# 0, event size# 117 2025-06-24T21:26:41.205237Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], deletion# false 2025-06-24T21:26:41.205285Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:884: [3:7:2054] Update description by newest path form tenant schemeshard: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], domainId# [OwnerId: 800, LocalPathId: 1], curPathId# [OwnerId: 800, LocalPathId: 1111], curDomainId# [OwnerId: 800, LocalPathId: 1] 2025-06-24T21:26:41.205324Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111] 2025-06-24T21:26:41.205371Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 900, LocalPathId: 11], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-06-24T21:26:41.205487Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 0 }: sender# [3:10:2057] 2025-06-24T21:26:41.205556Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# /root/db/dir_inside, domainOwnerId# 0, capabilities# =========== Path: "/root/db/dir_inside" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 1 } } } PathId: 1111 PathOwnerId: 800 =========== Path: "/root/db/dir_inside" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 1 } } } PathId: 11 PathOwnerId: 900 =========== DomainId: [OwnerId: 800, LocalPathId: 1] IsDeletion: 0 PathId: [OwnerId: 900, LocalPathId: 11] Versions: 1 >> ResultFormatter::EmptyTuple [GOOD] >> ResultFormatter::Pg [GOOD] >> TTabletPipeTest::TestSendBeforeBootTarget [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Null [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::AckNotificationsUponPathRecreation [GOOD] Test command err: 2025-06-24T21:26:41.091188Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:41.091256Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:41.091358Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-24T21:26:41.091392Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:7:2054] Commit generation: owner# 1, generation# 1 2025-06-24T21:26:41.091440Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [1:8:2055] 2025-06-24T21:26:41.091464Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 2 2025-06-24T21:26:41.348192Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:9:2056] 2025-06-24T21:26:41.348258Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# path 2025-06-24T21:26:41.348385Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-06-24T21:26:41.348467Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-24T21:26:41.348502Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:41.348587Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:41.348613Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:41.354367Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:41.354520Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:9:2056] 2025-06-24T21:26:41.354596Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2025-06-24T21:26:41.354631Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-24T21:26:41.354659Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:41.354711Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [2:9:2056] 2025-06-24T21:26:41.625159Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-24T21:26:41.625214Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-24T21:26:41.625328Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:41.625367Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:41.625453Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 2, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:41.625562Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2025-06-24T21:26:41.625639Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-06-24T21:26:41.625759Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:41.625797Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-24T21:26:41.625847Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 3, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:41.626070Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-24T21:26:41.626125Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-24T21:26:41.626154Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-24T21:26:41.626217Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7:2054] Upsert description: path# path 2025-06-24T21:26:41.626269Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-06-24T21:26:41.626329Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-24T21:26:41.626417Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 3 }: sender# [3:9:2056] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Struct [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Pg [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendBeforeBootTarget [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 9437184 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] Leader for TabletID 9437184 is [1:111:2141] sender: [1:131:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [0:0:0] sender: [1:160:2057] recipient: [1:158:2163] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:160:2057] recipient: [1:158:2163] Leader for TabletID 9437185 is [1:164:2167] sender: [1:165:2057] recipient: [1:158:2163] Leader for TabletID 9437185 is [1:164:2167] sender: [1:190:2057] recipient: [1:14:2061] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::FormatNonEmptySchema [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::EmptyTuple [GOOD] >> THealthCheckTest::LayoutCorrect [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::LayoutIncorrect [GOOD] Test command err: 2025-06-24T21:26:11.179399Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.180857Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.180960Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.181021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:11.181459Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.181502Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d8/r3tmp/tmp7s5FfV/pdisk_1.dat 2025-06-24T21:26:11.568961Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16348, node 1 TClient is connected to server localhost:64287 2025-06-24T21:26:11.899200Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:11.899247Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:11.899276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:11.899572Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:18.393274Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.393711Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.393915Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:18.395712Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.396157Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.396295Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d8/r3tmp/tmpc3zAcs/pdisk_1.dat 2025-06-24T21:26:18.700099Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65296, node 3 TClient is connected to server localhost:10256 2025-06-24T21:26:19.032325Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:19.032393Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:19.032430Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:19.032831Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-a594-3-3-42" status: RED message: "PDisk state is FAULTY" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-42" path: "/home/runner/.ya/build/build_root/2btm/0043d8/r3tmp/tmpc3zAcs/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-a594-3-3-43" status: RED message: "PDisk state is FAULTY" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-43" path: "/home/runner/.ya/build/build_root/2btm/0043d8/r3tmp/tmpc3zAcs/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-a594-3-3-44" status: RED message: "PDisk state is FAULTY" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-44" path: "/home/runner/.ya/build/build_root/2btm/0043d8/r3tmp/tmpc3zAcs/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 3 host: "::1" port: 12001 } 2025-06-24T21:26:23.150785Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:23.151286Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:23.151423Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d8/r3tmp/tmp5xTsj9/pdisk_1.dat 2025-06-24T21:26:23.465836Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11009, node 5 TClient is connected to server localhost:17721 2025-06-24T21:26:23.825893Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:23.825957Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:23.825998Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:23.826738Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:28.097940Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:28.098287Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:28.098344Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d8/r3tmp/tmpPZZGUb/pdisk_1.dat 2025-06-24T21:26:28.446208Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61247, node 7 TClient is connected to server localhost:64772 2025-06-24T21:26:28.876801Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:28.876871Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:28.876916Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:28.877558Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:28.959685Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:26:28.959852Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:26:28.976932Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:26:29.660220Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:26:39.929383Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:411:2371], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:39.929776Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:39.929890Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043d8/r3tmp/tmp9HIoys/pdisk_1.dat 2025-06-24T21:26:40.259036Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30133, node 9 TClient is connected to server localhost:16802 2025-06-24T21:26:40.717837Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:40.717902Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:40.717942Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:40.718309Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration |97.4%| [TA] $(B)/ydb/core/tx/scheme_board/ut_replica/test-results/unittest/{meta.json ... results_accumulator.log} |97.4%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_replica/test-results/unittest/{meta.json ... results_accumulator.log} |97.4%| [TA] $(B)/ydb/core/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.4%| [TA] {RESULT} $(B)/ydb/core/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> ResultFormatter::Tuple [GOOD] >> ResultFormatter::Tagged [GOOD] >> ResultFormatter::StructWithNoFields [GOOD] >> ResultFormatter::StructTypeNameAsString [GOOD] >> ResultFormatter::EmptyDict [GOOD] >> ResultFormatter::Dict [GOOD] >> ResultFormatter::Decimal [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::StructTypeNameAsString [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::LayoutCorrect [GOOD] Test command err: 2025-06-24T21:26:11.430297Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.430962Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.431068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.431126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:11.431599Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.431653Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e4/r3tmp/tmpcmr6c2/pdisk_1.dat 2025-06-24T21:26:11.812693Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18027, node 1 TClient is connected to server localhost:20402 2025-06-24T21:26:12.146088Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:12.146146Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:12.146192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:12.146494Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:18.416269Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.416571Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.416730Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:18.417914Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.418217Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.418318Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e4/r3tmp/tmpEUfX2Y/pdisk_1.dat 2025-06-24T21:26:18.710822Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27740, node 3 TClient is connected to server localhost:17257 2025-06-24T21:26:19.052371Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:19.052426Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:19.052456Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:19.052796Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:26.053198Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:26.053706Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:26.053799Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:26.053980Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:26.054193Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:26.054378Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e4/r3tmp/tmpYijx09/pdisk_1.dat 2025-06-24T21:26:26.353127Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14514, node 5 TClient is connected to server localhost:31504 2025-06-24T21:26:26.696667Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:26.696732Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:26.696778Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:26.697774Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:34.729101Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:34.729369Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:34.729687Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:34.729814Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:34.729869Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:34.730004Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e4/r3tmp/tmpq8lwep/pdisk_1.dat 2025-06-24T21:26:35.082006Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29763, node 7 TClient is connected to server localhost:8485 2025-06-24T21:26:35.526026Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:35.526124Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:35.526168Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:35.527419Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:40.665138Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:411:2371], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:40.665553Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:40.665663Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e4/r3tmp/tmphCjxQy/pdisk_1.dat 2025-06-24T21:26:40.990388Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30583, node 9 TClient is connected to server localhost:24913 2025-06-24T21:26:41.397829Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:41.397907Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:41.397953Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:41.398387Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Tagged [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Decimal [GOOD] >> THealthCheckTest::ShardsNoLimit [GOOD] >> TCacheTest::MigrationDeletedPathNavigate [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotWritten_Test >> TPQTest::TestPartitionWriteQuota [GOOD] >> TPQTest::TestReadRuleVersions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::MigrationDeletedPathNavigate [GOOD] Test command err: 2025-06-24T21:26:15.973317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:15.973377Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:16.154354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 2025-06-24T21:26:16.170931Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 65543, Sender [1:176:2171], Recipient [1:72:2110]: NActors::TEvents::TEvPoison 2025-06-24T21:26:16.171556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:72:2110] sender: [1:177:2067] recipient: [1:47:2094] Leader for TabletID 72057594046678944 is [1:72:2110] sender: [1:180:2067] recipient: [1:179:2172] Leader for TabletID 72057594046678944 is [1:72:2110] sender: [1:181:2067] recipient: [1:24:2071] Leader for TabletID 72057594046678944 is [1:182:2173] sender: [1:183:2067] recipient: [1:179:2172] 2025-06-24T21:26:16.178525Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateInit, received event# 268828672, Sender [1:179:2172], Recipient [1:182:2173]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:26:16.180704Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateInit, received event# 268828673, Sender [1:179:2172], Recipient [1:182:2173]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:26:16.180851Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateInit, received event# 268828684, Sender [1:179:2172], Recipient [1:182:2173]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:26:16.192720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:26:16.192813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:16.192850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:26:16.192885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:26:16.192927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:26:16.192946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:26:16.192983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:16.193046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:26:16.193607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:26:16.193831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:26:16.205610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:26:16.206618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:26:16.206755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:26:16.206926Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateInit, received event# 65542, Sender [1:7238242728502259555:7369577], Recipient [1:182:2173]: TSystem::Undelivered 2025-06-24T21:26:16.206962Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4892: StateInit, processing event TEvents::TEvUndelivered 2025-06-24T21:26:16.206996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:16.207017Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:16.207175Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__root_data_erasure_manager.cpp:92: [RootDataErasureManager] Clear operation queue and active pipes 2025-06-24T21:26:16.207207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:26:16.207814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:16.207908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.208934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.209460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.209566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:26:16.209804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.210704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.210828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.210929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.210999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.211183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.212381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.212518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.214549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.214749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.215626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.215739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.215835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.216016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.216096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.216219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.217176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.217263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.217390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.217440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.217488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:26:16.218505Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-24T21:26:16.219821Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:26:16.219982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:26:16.220560Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 2146435083, Sender [1:182:2173], Recipient [1:182:2173]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:26:16.220600Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5014: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-24T21:26:16.221182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:16.221223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:16.221338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:26:16.221393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:26:16.221444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:26:16.221473Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-24T21:26:16.221757Z no ... essor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:26:16.844093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:26:16.844129Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-24T21:26:17.214264Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:17.214333Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:17.270605Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 Leader for TabletID 72057594046678944 is [2:72:2110] sender: [2:177:2067] recipient: [2:47:2094] Leader for TabletID 72057594046678944 is [2:72:2110] sender: [2:180:2067] recipient: [2:24:2071] Leader for TabletID 72057594046678944 is [2:72:2110] sender: [2:181:2067] recipient: [2:179:2172] Leader for TabletID 72057594046678944 is [2:182:2173] sender: [2:183:2067] recipient: [2:179:2172] 2025-06-24T21:26:17.319878Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:17.319948Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 101 Leader for TabletID 72057594046678944 is [2:182:2173] sender: [2:213:2067] recipient: [2:24:2071] 2025-06-24T21:26:17.349037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-24T21:26:17.357220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:249:2067] recipient: [2:240:2214] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:249:2067] recipient: [2:240:2214] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:250:2067] recipient: [2:243:2216] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:250:2067] recipient: [2:243:2216] Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:253:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:253:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:254:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:254:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409546 is [2:252:2220] sender: [2:255:2067] recipient: [2:240:2214] Leader for TabletID 72075186233409547 is [2:257:2222] sender: [2:258:2067] recipient: [2:243:2216] TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 2025-06-24T21:26:17.378291Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 Leader for TabletID 72075186233409546 is [2:252:2220] sender: [2:292:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [2:257:2222] sender: [2:293:2067] recipient: [2:24:2071] FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 2025-06-24T21:26:17.422387Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 104:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 TestWaitNotification wait txId: 104 Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:343:2067] recipient: [2:339:2286] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:343:2067] recipient: [2:339:2286] Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:344:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:344:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409548 is [2:346:2290] sender: [2:347:2067] recipient: [2:339:2286] Leader for TabletID 72075186233409548 is [2:346:2290] sender: [2:348:2067] recipient: [2:24:2071] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-24T21:26:17.546035Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomain, opId: 105:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:1232) Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:422:2067] recipient: [2:418:2334] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:422:2067] recipient: [2:418:2334] Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:423:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:423:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409549 is [2:425:2338] sender: [2:426:2067] recipient: [2:418:2334] Leader for TabletID 72075186233409549 is [2:425:2338] sender: [2:427:2067] recipient: [2:24:2071] 2025-06-24T21:26:17.584738Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:17.584801Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 TestWaitNotification: OK eventTxId 105 TestModificationResults wait txId: 106 2025-06-24T21:26:17.602568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5471: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:26:17.602637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5471: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-24T21:26:17.602968Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__operation_upgrade_subdomain.cpp:1464: TWait ProgressState, dependent transaction: 106, parent transaction: 105, at schemeshard: 72057594046678944 2025-06-24T21:26:17.603129Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomainDecision, opId: 106:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:571) TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-24T21:26:17.620847Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5932: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2025-06-24T21:26:17.621432Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5932: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 TestModificationResults wait txId: 107 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-24T21:26:17.675263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 108:0, at schemeshard: 72075186233409549, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:556:2067] recipient: [2:553:2441] IGNORE Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:556:2067] recipient: [2:553:2441] Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:559:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:559:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409550 is [2:560:2445] sender: [2:561:2067] recipient: [2:553:2441] Leader for TabletID 72075186233409550 is [2:560:2445] sender: [2:562:2067] recipient: [2:24:2071] TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 Forgetting tablet 72075186233409548 TestWaitNotification: OK eventTxId 108 2025-06-24T21:26:20.023022Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:26:20.023113Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:20.086378Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:26:20.086440Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded >> TPQTestSlow::TestOnDiskStoredSourceIds [GOOD] >> ResultFormatter::Void [GOOD] >> ResultFormatter::VariantTuple [GOOD] >> ColumnStatistics::CountMinSketchStatistics [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::ShardsNoLimit [GOOD] Test command err: 2025-06-24T21:26:11.306095Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.306678Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.306777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.306816Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:11.307219Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.307272Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e6/r3tmp/tmp9P852P/pdisk_1.dat 2025-06-24T21:26:11.674049Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15825, node 1 TClient is connected to server localhost:3576 2025-06-24T21:26:12.044709Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:12.044764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:12.044798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:12.045171Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:18.667177Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.667550Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.667687Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:18.669028Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.669298Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.669385Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e6/r3tmp/tmpPgrmke/pdisk_1.dat 2025-06-24T21:26:18.945556Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16156, node 3 TClient is connected to server localhost:31241 2025-06-24T21:26:19.221934Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:19.221989Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:19.222018Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:19.222424Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:25.882002Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:25.882617Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:25.882733Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:25.882993Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:25.883268Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:25.883506Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e6/r3tmp/tmpr40oOU/pdisk_1.dat 2025-06-24T21:26:26.139363Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29754, node 5 TClient is connected to server localhost:19142 2025-06-24T21:26:26.381491Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:26.381534Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:26.381555Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:26.382175Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:34.078250Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:34.078653Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:34.079131Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:34.079292Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:34.079359Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:34.079551Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e6/r3tmp/tmp16haEq/pdisk_1.dat 2025-06-24T21:26:34.435660Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12959, node 7 TClient is connected to server localhost:26089 2025-06-24T21:26:34.708515Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:34.708579Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:34.708609Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:34.709555Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:42.044491Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:42.044868Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:42.045102Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:42.045151Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:42.045222Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:42.045446Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e6/r3tmp/tmpBLK6KJ/pdisk_1.dat 2025-06-24T21:26:42.347715Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22466, node 9 TClient is connected to server localhost:25337 2025-06-24T21:26:42.664860Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:42.664923Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:42.664957Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:42.665196Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> BasicStatistics::TwoNodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestOnDiskStoredSourceIds [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T21:25:33.075942Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:33.076032Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:182:2057] recipient: [1:14:2061] 2025-06-24T21:25:33.119969Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:33.148323Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T21:25:33.151426Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-24T21:25:33.155635Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:188:2199] 2025-06-24T21:25:33.158816Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:189:2200] 2025-06-24T21:25:33.160520Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:189:2200] 2025-06-24T21:25:33.174279Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|de7a4676-1df46490-1bc7ab44-fb377fb0_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:33.204118Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d332a00f-fbcc9a64-db384295-6a062232_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:33.228202Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e6b8750f-ab869a66-7407a110-fbf1b395_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:33.237658Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9c509d6f-a571d076-f68f0748-83c28781_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:33.245427Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|aed6817b-ba662510-b44806cf-2e7bffd8_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:33.254135Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b19a750c-94d6b933-9327b819-6e9f8321_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:111:2057] recipient: [2:104:2136] 2025-06-24T21:25:33.781702Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:33.781801Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927938 is [2:156:2175] sender: [2:157:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:180:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:110:2140]) on event NKikimr::TEvPersQueue::TEvUpdateConfigBuilder ! Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:182:2057] recipient: [2:102:2135] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:185:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:186:2057] recipient: [2:184:2194] Leader for TabletID 72057594037927937 is [2:187:2195] sender: [2:188:2057] recipient: [2:184:2194] 2025-06-24T21:25:33.838637Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:33.838728Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [2:110:2140]) rebooted! !Reboot 72057594037927937 (actor [2:110:2140]) tablet resolver refreshed! new actor is[2:187:2195] Leader for TabletID 72057594037927937 is [2:187:2195] sender: [2:267:2057] recipient: [2:14:2061] 2025-06-24T21:25:35.422133Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:35.423250Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-24T21:25:35.424148Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:273:2257] 2025-06-24T21:25:35.426037Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:273:2257] 2025-06-24T21:25:35.428312Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:274:2258] 2025-06-24T21:25:35.429639Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:274:2258] 2025-06-24T21:25:35.438924Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a7af584-49e3de0e-32d40e6b-46b0b01b_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:35.444951Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b01a63f5-852f1b9a-648cc4b3-f34a6798_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:35.469792Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|75933a3-b9e2fca5-e5d285db-71eedfea_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:35.480125Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d8bd7f6d-151517fe-60fa5ee2-af782b44_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:35.490505Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|72cf240b-93a320ef-9a136c2-9206d88d_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:25:35.500048Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6789c2c3-ea205c55-5e7cce01-b43f77df_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:111:2057] recipient: [3:104:2136] 2025-06-24T21:25:35.998595Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:35.998672Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927938 is [3:156:2175] sender: [3:157:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:182:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:110:2140]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:184:2057] recipient: [3:102:2135] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:186:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:188:2057] recipient: [3:187:2196] Leader for TabletID 72057594037927937 is [3:189:2197] sender: [3:190:2057] recipient: [3:187:2196] 2025-06-24T21:25:36.043636Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:36.043722Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [3:110:2140]) rebooted! !Reboot 72057594037927937 (actor [3:110:2140]) tablet resolver refreshed! new actor is[3:189:2197] Leader for TabletID 72057594037927937 is [3:189:2197] sender: [3:269:2057] recipient: [3:14:2061] 2025-06-24T21:25:37.640168Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:37.641028Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 3 actor [3:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-24T21:25:37.641811Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 720 ... 27937, Partition: 0, State: StateInit] bootstrapping 0 [47:188:2199] 2025-06-24T21:26:41.969788Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [47:188:2199] 2025-06-24T21:26:41.972149Z node 47 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [47:189:2200] 2025-06-24T21:26:41.974768Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [47:189:2200] 2025-06-24T21:26:41.986082Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|64c640bd-21d4088a-b8e0949-8ba40361_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:41.991606Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2cf9dcd6-6ec9df13-47b0e8d8-4e9df451_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:42.020174Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|438ef6e0-32bdc9df-9d061648-5d0b82b0_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:42.029345Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bc4a7748-82de4c5d-bf4e5e1-8f5df0e9_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:42.038249Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|fccc1c89-93acaa97-19fb9880-967d5f56_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:42.047185Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9ab45f35-7fc2e0d0-9624072d-b4ff75bc_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default !Reboot 72057594037927937 (actor [47:110:2140]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [47:110:2140] sender: [47:286:2057] recipient: [47:102:2135] Leader for TabletID 72057594037927937 is [47:110:2140] sender: [47:289:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [47:110:2140] sender: [47:290:2057] recipient: [47:288:2281] Leader for TabletID 72057594037927937 is [47:291:2282] sender: [47:292:2057] recipient: [47:288:2281] 2025-06-24T21:26:42.103602Z node 47 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:42.103682Z node 47 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:26:42.104844Z node 47 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [47:340:2323] 2025-06-24T21:26:42.108052Z node 47 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [47:341:2324] 2025-06-24T21:26:42.117138Z node 47 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:42.117210Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [47:340:2323] 2025-06-24T21:26:42.119333Z node 47 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:42.119398Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [47:341:2324] !Reboot 72057594037927937 (actor [47:110:2140]) rebooted! !Reboot 72057594037927937 (actor [47:110:2140]) tablet resolver refreshed! new actor is[47:291:2282] Leader for TabletID 72057594037927937 is [47:291:2282] sender: [47:391:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:106:2057] recipient: [48:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:106:2057] recipient: [48:104:2136] Leader for TabletID 72057594037927937 is [48:110:2140] sender: [48:111:2057] recipient: [48:104:2136] 2025-06-24T21:26:43.747957Z node 48 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:43.748034Z node 48 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [48:152:2057] recipient: [48:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [48:152:2057] recipient: [48:150:2171] Leader for TabletID 72057594037927938 is [48:156:2175] sender: [48:157:2057] recipient: [48:150:2171] Leader for TabletID 72057594037927937 is [48:110:2140] sender: [48:182:2057] recipient: [48:14:2061] 2025-06-24T21:26:43.768636Z node 48 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:43.769609Z node 48 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 48 actor [48:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 48 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 48 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 48 Important: false } 2025-06-24T21:26:43.770455Z node 48 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [48:188:2199] 2025-06-24T21:26:43.773372Z node 48 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [48:188:2199] 2025-06-24T21:26:43.775357Z node 48 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [48:189:2200] 2025-06-24T21:26:43.777521Z node 48 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [48:189:2200] 2025-06-24T21:26:43.786723Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|4e2a4f39-c17da450-2e5552e6-3fa5e78b_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:43.792440Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|38ee07af-dbf2b65e-9aa1d348-8d9ff289_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:43.820708Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|766a3064-27a89b2f-35fb9630-d81a4105_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:43.829855Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|90edd8b-c4816797-ac9b2d51-28c5c405_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:43.838688Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|32973cdc-1df42d9-9a23298f-6d710b99_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:43.846353Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|725e1e66-76b0f22d-7f78729f-f8d5de03_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:106:2057] recipient: [49:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:106:2057] recipient: [49:104:2136] Leader for TabletID 72057594037927937 is [49:110:2140] sender: [49:111:2057] recipient: [49:104:2136] 2025-06-24T21:26:44.328256Z node 49 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:44.328320Z node 49 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [49:152:2057] recipient: [49:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [49:152:2057] recipient: [49:150:2171] Leader for TabletID 72057594037927938 is [49:156:2175] sender: [49:157:2057] recipient: [49:150:2171] Leader for TabletID 72057594037927937 is [49:110:2140] sender: [49:182:2057] recipient: [49:14:2061] 2025-06-24T21:26:44.344348Z node 49 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:44.345312Z node 49 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 49 actor [49:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 49 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 49 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 49 Important: false } 2025-06-24T21:26:44.346214Z node 49 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [49:188:2199] 2025-06-24T21:26:44.348404Z node 49 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [49:188:2199] 2025-06-24T21:26:44.350527Z node 49 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [49:189:2200] 2025-06-24T21:26:44.352868Z node 49 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [49:189:2200] 2025-06-24T21:26:44.362891Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|4bc3edda-50f169ad-5c67953b-f33672de_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:44.367942Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e63b56c0-8c9db222-4a861a7d-9cf110a8_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:44.390025Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|23903983-e5f8f3b6-55a71963-e0f34188_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:44.398401Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3f443017-e6957cfa-aface510-ad0b5b22_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:44.405455Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|19073c3d-cb28e9ac-1ed34a24-1a12160c_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:44.413303Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|df548614-e505858f-67466a0e-8b92f471_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantTuple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test [GOOD] Test command err: 2025-06-24T21:25:36.295789Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630186217061601:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:36.296113Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:36.388740Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630185321584776:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:36.388795Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003b2f/r3tmp/tmptPDMHp/pdisk_1.dat 2025-06-24T21:25:36.542938Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:25:36.542927Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:25:36.761671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:36.761820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:36.765067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:36.765137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:36.771187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:36.798364Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:36.806720Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:25:36.807438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26995, node 1 2025-06-24T21:25:37.037360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003b2f/r3tmp/yandexcgvfB4.tmp 2025-06-24T21:25:37.037388Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003b2f/r3tmp/yandexcgvfB4.tmp 2025-06-24T21:25:37.038993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003b2f/r3tmp/yandexcgvfB4.tmp 2025-06-24T21:25:37.039180Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:37.291792Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:37.303892Z INFO: TTestServer started on Port 2520 GrpcPort 26995 2025-06-24T21:25:37.392985Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2520 PQClient connected to localhost:26995 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:37.627741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:37.711940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:25:39.620420Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630198206487008:2276], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.620505Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630198206486991:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.620556Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:39.625688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:25:39.647005Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630198206487020:2277], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-24T21:25:39.713809Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630198206487047:2175] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:25:40.003892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:40.010218Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630199101964446:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:40.010486Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDI4Mjk3NDYtNGVkNjhiYTEtNmM2MWQ5OTctYmMwODliYzQ=, ActorId: [1:7519630199101964405:2297], ActorState: ExecuteState, TraceId: 01jyhxa4s73bhjecytr31sb1z9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:40.011587Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519630198206487061:2281], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:40.012300Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:40.013076Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=Mjg2YThlM2QtZDRmZDQ5ZDYtNjBlMTdjNjMtY2UxZmExNjc=, ActorId: [2:7519630198206486989:2272], ActorState: ExecuteState, TraceId: 01jyhxa4n2e317evjvk0egcjgb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:40.013422Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:40.086698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:40.194241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` ... 7199536658146131:7762515]; 2025-06-24T21:26:32.074167Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:26:32.173713Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:26:32.455366Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhxbr2y4q0cncvh06rzb713, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=NGRiNDI4NmEtMmY0MTY3MjctODE0ZWYwMGYtNTAwMDBmZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [9:7519630427637793287:3071] === CheckClustersList. Ok 2025-06-24T21:26:37.568459Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:26:38.350762Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:26:39.001143Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:26:39.668959Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:26:40.383207Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:26:41.015303Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715699:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Run query: --!syntax_v1 UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES (16261273835729377752, "Root", "00415F536F757263655F3130", 1750800401820, 1750800401820, 0, 13); 2025-06-24T21:26:41.956090Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715704. Ctx: { TraceId: 01jyhxc1dpcayf0x217g1bwg32, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=NjJiY2VkNzMtNzg3ZDQ0MzEtNmIyYzQ1YzItZGE5Zjk0M2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:26:41.973840Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T21:26:41.973873Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T21:26:41.973885Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T21:26:41.973906Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:116: TPartitionChooser [9:7519630466292500257:3763] (SourceId=A_Source_10, PreferedPartition=1) GetOwnershipFast Partition=1 TabletId=1001 2025-06-24T21:26:41.974054Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [9:7519630466292500258:3763], Recipient [9:7519630449112630023:3180]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [9:7519630466292500257:3763] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-24T21:26:41.974149Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [9:7519630466292500257:3763], Recipient [9:7519630449112630023:3180]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 SourceId: "A_Source_10" 2025-06-24T21:26:41.974250Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:139: StateOwnershipFast, received event# 271188558, Sender [9:7519630449112630023:3180], Recipient [9:7519630466292500257:3763]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-24T21:26:41.974283Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [9:7519630466292500257:3763] (SourceId=A_Source_10, PreferedPartition=1) InitTable: SourceId=A_Source_10 TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-06-24T21:26:41.974372Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [9:7519630466292500257:3763], Recipient [9:7519630449112630023:3180]: NActors::TEvents::TEvPoison 2025-06-24T21:26:41.974446Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [9:7519630406162955225:2070], Recipient [9:7519630466292500257:3763]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-06-24T21:26:41.974472Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [9:7519630466292500257:3763] (SourceId=A_Source_10, PreferedPartition=1) StartKqpSession 2025-06-24T21:26:41.978067Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [9:7519630406162955316:2150], Recipient [9:7519630466292500257:3763]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=9&id=YzUxYzQyYTgtOTMzMjM1OTEtNGNkODRhMjYtMmQ2ZGVjODI=" NodeId: 9 } YdbStatus: SUCCESS ResourceExhausted: false 2025-06-24T21:26:41.978115Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [9:7519630466292500257:3763] (SourceId=A_Source_10, PreferedPartition=1) Select from the table 2025-06-24T21:26:42.170428Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [9:7519630406162955316:2150], Recipient [9:7519630466292500257:3763]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=YzUxYzQyYTgtOTMzMjM1OTEtNGNkODRhMjYtMmQ2ZGVjODI=" PreparedQuery: "386f358e-fd75f944-b10f7796-64b61799" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jyhxc1qk65wt5e80m39m7ngg" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint32_value: 0 } items { uint64_value: 1750800401820 } items { uint64_value: 1750800401820 } items { uint64_value: 13 } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 121 2025-06-24T21:26:42.170587Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [9:7519630466292500257:3763] (SourceId=A_Source_10, PreferedPartition=1) Selected from table PartitionId=0 SeqNo=13 2025-06-24T21:26:42.170627Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:209: TPartitionChooser [9:7519630466292500257:3763] (SourceId=A_Source_10, PreferedPartition=1) OnPartitionChosen 2025-06-24T21:26:42.170660Z node 9 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [9:7519630466292500257:3763] (SourceId=A_Source_10, PreferedPartition=1) ReplyError: MessageGroupId A_Source_10 is already bound to PartitionGroupId 1, but client provided 2. MessageGroupId->PartitionGroupId binding cannot be changed, either use another MessageGroupId, specify PartitionGroupId 1, or do not specify PartitionGroupId at all. Received TEvChooseError: MessageGroupId A_Source_10 is already bound to PartitionGroupId 1, but client provided 2. MessageGroupId->PartitionGroupId binding cannot be changed, either use another MessageGroupId, specify PartitionGroupId 1, or do not specify PartitionGroupId at all. Run query: --!syntax_v1 SELECT Partition, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash = 16261273835729377752 AND Topic = "Root" AND ProducerId = "00415F536F757263655F3130" 2025-06-24T21:26:42.285909Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:26:42.285956Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:42.390353Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715708. Ctx: { TraceId: 01jyhxc1rnbmtcerdh17t1awt3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=M2E2MTQ4ZmEtYzRkOTJlNjctYzMwZGZhLWMzMDQ4ZGFk, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |97.5%| [TA] $(B)/ydb/core/fq/libs/result_formatter/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.5%| [TA] {RESULT} $(B)/ydb/core/fq/libs/result_formatter/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> ColumnStatistics::CountMinSketchStatistics [GOOD] Test command err: 2025-06-24T21:24:07.673984Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:07.674597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:07.674698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004824/r3tmp/tmp9zmvID/pdisk_1.dat 2025-06-24T21:24:08.042047Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3543, node 1 2025-06-24T21:24:08.333574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:08.333645Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:08.333697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:08.334280Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:08.337127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:08.440963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:08.441148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:08.456969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61391 2025-06-24T21:24:09.049038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:12.471652Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:12.521074Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:12.521229Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:12.561633Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:12.563686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:12.794245Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:12.828833Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:12.829433Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:12.829989Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:12.830153Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:12.830394Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:12.830482Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:12.830583Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:12.830665Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:12.830743Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.027439Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:13.027550Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:13.041218Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:13.206711Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:13.256920Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:13.257017Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:13.314119Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:13.315813Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:13.316103Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:13.316178Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:13.316242Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:13.316323Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:13.316398Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:13.316470Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:13.317013Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:13.372803Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:13.372940Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:13.381117Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:24:13.390179Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1838:2585] 2025-06-24T21:24:13.391585Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1838:2585], schemeshard id = 72075186224037897 2025-06-24T21:24:13.397077Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:24:13.418996Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:13.419067Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:13.419211Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:24:13.435854Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:13.445124Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:13.445311Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:13.641000Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:13.833883Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:13.926707Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:14.424335Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:14.699386Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2143:3020], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:14.699519Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:14.714215Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:24:14.836017Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2793];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:24:14.836256Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2793];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:24:14.836568Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2793];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:24:14.836703Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2217:2793];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:24:14.836844Z node 2 :TX_COLUMNSHARD WARN: ... ats size# 51 2025-06-24T21:26:39.266777Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:26:40.548776Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:40.548880Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:40.548945Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-24T21:26:40.548985Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:40.549338Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:40.552712Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:40.556521Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7211:5293], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:40.556606Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7221:5298], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:40.556671Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:40.566824Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:26:40.614859Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7225:5301], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:26:40.824774Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7321:5347] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:26:40.889311Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7350:5362]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:40.889562Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:26:40.889641Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7352:5364] 2025-06-24T21:26:40.889698Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7352:5364] 2025-06-24T21:26:40.890004Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7353:5365] 2025-06-24T21:26:40.890149Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7353:5365], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:26:40.890206Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:26:40.890344Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7352:5364], server id = [2:7353:5365], tablet id = 72075186224037894, status = OK 2025-06-24T21:26:40.890399Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:26:40.890497Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7350:5362], StatRequests.size() = 1 2025-06-24T21:26:41.027632Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODE2NDQ4MGQtMmIxNzE2YWMtNzFmNDI3MGUtY2Q5NmM1ZDI=, TxId: 2025-06-24T21:26:41.027711Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODE2NDQ4MGQtMmIxNzE2YWMtNzFmNDI3MGUtY2Q5NmM1ZDI=, TxId: 2025-06-24T21:26:41.028119Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:41.042179Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:41.042264Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:41.107354Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:26:41.107457Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:26:41.172628Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7352:5364], schemeshard count = 1 2025-06-24T21:26:43.533844Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:43.533926Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:43.533981Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-24T21:26:43.534067Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:26:43.537396Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:26:43.554679Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:26:43.555187Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:26:43.555271Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:26:43.556134Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:26:43.580657Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:26:43.580909Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-24T21:26:43.581545Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7461:5424], server id = [2:7462:5425], tablet id = 72075186224037899, status = OK 2025-06-24T21:26:43.581959Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7461:5424], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:26:43.585598Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:26:43.585717Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:26:43.586066Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:26:43.586271Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:26:43.586466Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7461:5424], server id = [2:7462:5425], tablet id = 72075186224037899 2025-06-24T21:26:43.586507Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:26:43.586749Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:43.589572Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:26:43.621307Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7482:5444]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:43.621489Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:26:43.621518Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7482:5444], StatRequests.size() = 1 2025-06-24T21:26:43.712863Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YWUzZjBlY2MtN2ZiZTkwZWQtMmZhMTg2MTItMzQ5OWExMWM=, TxId: 2025-06-24T21:26:43.712925Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YWUzZjBlY2MtN2ZiZTkwZWQtMmZhMTg2MTItMzQ5OWExMWM=, TxId: 2025-06-24T21:26:43.713503Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:43.714438Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:7495:5541]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:43.714656Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:26:43.714696Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:26:43.716406Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:26:43.716477Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:26:43.716534Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:26:43.725549Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 >> SlowTopicAutopartitioning::CDC_Write [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoNodes [GOOD] Test command err: 2025-06-24T21:24:07.663749Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:493:2375], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:07.663921Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:07.663986Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004823/r3tmp/tmpv4BG4d/pdisk_1.dat 2025-06-24T21:24:08.102108Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19308, node 1 2025-06-24T21:24:08.362572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:08.362638Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:08.362667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:08.363073Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:08.369023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:08.481880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:08.482008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:08.500751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30418 2025-06-24T21:24:09.127429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:15.602144Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:15.602862Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-24T21:24:15.665835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:15.665960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:15.666333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:15.666394Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:15.729593Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:15.729985Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:24:15.732596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:15.733885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:15.945477Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:15.979897Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.980412Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.980899Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.981040Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.981314Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.981426Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.981536Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.981628Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.981743Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:16.169781Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:16.169881Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:16.170607Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:16.170679Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:16.183496Z node 2 :HIVE WARN: hive_impl.cpp:781: HIVE#72075186224037888 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:24:16.184284Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:16.185699Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:16.339356Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:16.396534Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:16.396661Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:16.437138Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:16.437754Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:16.437957Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:16.438010Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:16.438064Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:16.438133Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:16.438183Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:16.438237Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:16.438692Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:16.464844Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:16.464944Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2168:2559], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:16.470835Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2182:2569] 2025-06-24T21:24:16.474579Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2200:2579] 2025-06-24T21:24:16.475497Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2200:2579], schemeshard id = 72075186224037897 2025-06-24T21:24:16.484724Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:24:16.506103Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:16.506173Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:16.506248Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:24:16.522139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976725657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:16.529785Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976725657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:16.529925Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976725657 2025-06-24T21:24:16.745103Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:16.959530Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976725657. Doublechecking... 2025-06-24T21:24:17.069563Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:17.645138Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:17.645235Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:17.897261Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2557:3038], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.897400Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.915135Z node 2 :F ... 132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:26:36.380363Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:7384:4553]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:36.380635Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-24T21:26:36.380669Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:7384:4553], StatRequests.size() = 1 2025-06-24T21:26:37.125853Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:37.125944Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:37.126009Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-24T21:26:37.126101Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:37.126622Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:37.139101Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:37.143107Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7411:4572], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:37.143233Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7421:4577], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:37.143350Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:37.158250Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976725658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:26:37.216802Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7425:4580], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976725658 completed, doublechecking } 2025-06-24T21:26:37.396977Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7518:4626] txid# 281474976725659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:26:37.453592Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:7548:4642]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:37.453851Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-24T21:26:37.453909Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:7548:4642], StatRequests.size() = 1 2025-06-24T21:26:37.604047Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDNjZjJkMy1iN2M5NzgwOC01MDYwZTI5Yi0yODk2ODc4NQ==, TxId: 2025-06-24T21:26:37.604147Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDNjZjJkMy1iN2M5NzgwOC01MDYwZTI5Yi0yODk2ODc4NQ==, TxId: 2025-06-24T21:26:37.604983Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:37.630885Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:37.630965Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:38.140993Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:7583:4652]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:38.141210Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-24T21:26:38.141240Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:7583:4652], StatRequests.size() = 1 2025-06-24T21:26:39.512200Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:7624:4667]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:39.512437Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-24T21:26:39.512466Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:7624:4667], StatRequests.size() = 1 2025-06-24T21:26:40.266845Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:26:40.277920Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:40.278000Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:40.278061Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-24T21:26:40.278101Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:26:40.278726Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:26:40.282010Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:40.297794Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MTczZjY0NGEtOGQyNjI0ZGMtZDY1NGJkZDYtYjVlNWE0YzY=, TxId: 2025-06-24T21:26:40.297889Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MTczZjY0NGEtOGQyNjI0ZGMtZDY1NGJkZDYtYjVlNWE0YzY=, TxId: 2025-06-24T21:26:40.298737Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:40.313562Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:26:40.313644Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:40.968128Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:7692:4698]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:40.968469Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-24T21:26:40.968528Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:7692:4698], StatRequests.size() = 1 2025-06-24T21:26:42.427068Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:7737:4714]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:42.427429Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-24T21:26:42.427480Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:7737:4714], StatRequests.size() = 1 2025-06-24T21:26:43.176207Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:26:43.176605Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:26:43.176949Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:26:43.187897Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:43.187954Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:43.818495Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:7774:4730]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:43.818705Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-24T21:26:43.818734Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:7774:4730], StatRequests.size() = 1 2025-06-24T21:26:43.819223Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [3:7776:2977]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:43.821982Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:26:43.822099Z node 3 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [3:7786:2981] 2025-06-24T21:26:43.822149Z node 3 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [3:7786:2981] 2025-06-24T21:26:43.824751Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7794:4732] 2025-06-24T21:26:43.825214Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:7786:2981], server id = [2:7794:4732], tablet id = 72075186224037894, status = OK 2025-06-24T21:26:43.825566Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7794:4732], node id = 3, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:26:43.825632Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 3, schemeshard count = 1 2025-06-24T21:26:43.826137Z node 3 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 3 2025-06-24T21:26:43.826219Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [3:7776:2977], StatRequests.size() = 1 >> TPQTest::TestReadRuleVersions [GOOD] >> TPQTest::TestPartitionedBlobFails [GOOD] >> TPQTest::TestReadAndDeleteConsumer >> THealthCheckTest::HealthCheckConfigUpdate [GOOD] >> THealthCheckTest::TestBootingTabletIsNotDead [GOOD] >> TCacheTestWithDrops::LookupErrorUponEviction [GOOD] |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestGetWithMustRestoreFirst |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestSync >> ReadOnlyVDisk::TestWrites >> ReadOnlyVDisk::TestGarbageCollect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::HealthCheckConfigUpdate [GOOD] Test command err: 2025-06-24T21:26:11.638505Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.639273Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.639384Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.639437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:11.639899Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.639946Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043da/r3tmp/tmpT5urkX/pdisk_1.dat 2025-06-24T21:26:11.988249Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8356, node 1 TClient is connected to server localhost:21787 2025-06-24T21:26:12.346415Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:12.346489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:12.346533Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:12.346949Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:18.615879Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.616197Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.616372Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:18.617755Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.618140Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.618308Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043da/r3tmp/tmpu3JvqU/pdisk_1.dat 2025-06-24T21:26:18.936489Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28790, node 3 TClient is connected to server localhost:17870 2025-06-24T21:26:19.280971Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:19.281045Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:19.281084Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:19.281441Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:25.984643Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:25.985115Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:25.985199Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:25.985369Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:25.985519Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:25.985654Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043da/r3tmp/tmp3E9mFP/pdisk_1.dat 2025-06-24T21:26:26.268677Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1147, node 5 TClient is connected to server localhost:6686 2025-06-24T21:26:26.594830Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:26.594893Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:26.594929Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:26.595655Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 5 host: "::1" port: 12001 } 2025-06-24T21:26:34.070369Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:34.070772Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:34.071246Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:34.071417Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:34.071486Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:34.071666Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043da/r3tmp/tmpPxkqMw/pdisk_1.dat 2025-06-24T21:26:34.396155Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14256, node 7 TClient is connected to server localhost:21700 2025-06-24T21:26:34.877289Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:34.877362Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:34.877417Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:34.878746Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-7" reason: "YELLOW-e9e2-1231c6b1-8" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-7" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 7 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-8" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 8 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 7 host: "::1" port: 12001 } 2025-06-24T21:26:39.456976Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:411:2371], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:39.457318Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:39.457396Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043da/r3tmp/tmpdQ7tvk/pdisk_1.dat 2025-06-24T21:26:39.746983Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19826, node 9 TClient is connected to server localhost:21370 2025-06-24T21:26:40.138579Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:40.138637Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:40.138679Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:40.138989Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:44.759224Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:411:2371], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:44.759421Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:44.759559Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043da/r3tmp/tmpmCVRhM/pdisk_1.dat 2025-06-24T21:26:45.116213Z node 11 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1769, node 11 TClient is connected to server localhost:20765 2025-06-24T21:26:45.529484Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:45.529572Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:45.529629Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:45.531586Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTestWithDrops::LookupErrorUponEviction [GOOD] Test command err: 2025-06-24T21:26:15.981206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:15.981270Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:16.184411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:26:16.204434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-24T21:26:16.205933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:26:16.237048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-24T21:26:16.785691Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:16.785764Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-24T21:26:16.830290Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::TestBootingTabletIsNotDead [GOOD] Test command err: 2025-06-24T21:26:11.665946Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.666405Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:11.666473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.666510Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:11.666778Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:11.666817Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e8/r3tmp/tmpTZgT8X/pdisk_1.dat 2025-06-24T21:26:11.997809Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32065, node 1 TClient is connected to server localhost:4997 2025-06-24T21:26:12.320686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:12.320744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:12.320782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:12.321116Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:18.154629Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.155015Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.155266Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:18.156969Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:18.157381Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:18.157516Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e8/r3tmp/tmpO3w9c6/pdisk_1.dat 2025-06-24T21:26:18.453371Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16662, node 3 TClient is connected to server localhost:4602 2025-06-24T21:26:18.742363Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:18.742428Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:18.742457Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:18.742879Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:25.919075Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:25.919549Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:25.919630Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:25.919801Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:25.919988Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:25.920169Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e8/r3tmp/tmpSyEWQx/pdisk_1.dat 2025-06-24T21:26:26.168569Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5411, node 5 TClient is connected to server localhost:30696 2025-06-24T21:26:26.399988Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:26.400028Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:26.400054Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:26.400633Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:34.346388Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:34.346743Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:34.347204Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:34.347342Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:34.347400Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:34.347574Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e8/r3tmp/tmpjTK3SV/pdisk_1.dat 2025-06-24T21:26:34.693356Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18963, node 7 TClient is connected to server localhost:30328 2025-06-24T21:26:35.003980Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:35.004043Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:35.004076Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:35.004986Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:41.685499Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:366:2220], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:41.685640Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:26:41.685902Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:41.686228Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:703:2318], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:26:41.686448Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:26:41.686562Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0043e8/r3tmp/tmpqYbyqY/pdisk_1.dat 2025-06-24T21:26:42.004452Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31622, node 9 TClient is connected to server localhost:12685 2025-06-24T21:26:45.254111Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:26:45.254175Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:26:45.254210Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:26:45.255274Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:26:45.298423Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:26:45.298604Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:26:45.338878Z node 9 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 11 Cookie 11 2025-06-24T21:26:45.340208Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-10" reason: "YELLOW-e9e2-1231c6b1-11" reason: "YELLOW-e9e2-1231c6b1-9" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-9" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 9 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-10" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 10 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-11" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 11 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 9 host: "::1" port: 12001 } >> TSchemeShardTest::CreateTable >> TSchemeShardTest::InitRootAgain >> TSchemeShardTest::RmDirTwice >> TSchemeShardTest::DropIndexedTableAndForceDropSimultaneously >> TSchemeShardTest::PathName >> TSchemeShardTest::CacheEffectiveACL [GOOD] >> TSchemeShardTest::ConsistentCopyTable >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-false >> TSchemeShardTest::CreateIndexedTable |97.5%| [TA] $(B)/ydb/core/tx/scheme_board/ut_cache/test-results/unittest/{meta.json ... results_accumulator.log} |97.5%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_cache/test-results/unittest/{meta.json ... results_accumulator.log} |97.5%| [TA] $(B)/ydb/core/health_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.5%| [TA] {RESULT} $(B)/ydb/core/health_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSubscriberTest::InvalidNotification >> TSubscriberTest::Sync >> TSubscriberTest::ReconnectOnFailure >> TSubscriberTest::SyncPartial >> TSubscriberTest::StrongNotificationAfterCommit >> TSubscriberTest::NotifyDelete >> TSubscriberTest::SyncWithOutdatedReplica >> TSubscriberCombinationsTest::MigratedPathRecreation >> TSubscriberTest::NotifyUpdate >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink+UseDataQuery >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-false [GOOD] >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-true >> TSchemeShardTest::InitRootAgain [GOOD] >> TSchemeShardTest::InitRootWithOwner >> TSubscriberTest::NotifyUpdate [GOOD] >> TSubscriberTest::SyncPartial [GOOD] >> TSubscriberTest::StrongNotificationAfterCommit [GOOD] >> TSubscriberTest::NotifyDelete [GOOD] >> TSubscriberTest::InvalidNotification [GOOD] >> TSubscriberTest::Sync [GOOD] >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] >> TSubscriberCombinationsTest::MigratedPathRecreation [GOOD] >> TSubscriberTest::Boot >> TSubscriberTest::ReconnectOnFailure [GOOD] >> TSchemeShardTest::RmDirTwice [GOOD] >> TSchemeShardTest::TopicMeteringMode >> TSchemeShardTest::PathName [GOOD] >> TSchemeShardTest::PathName_SetLocale >> TestDataErasure::SimpleTestForTables ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::StrongNotificationAfterCommit [GOOD] Test command err: 2025-06-24T21:26:48.930286Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:26:48.932802Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-24T21:26:48.932936Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-24T21:26:48.932980Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-24T21:26:48.933038Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-24T21:26:48.933421Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-24T21:26:48.933480Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.933560Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-24T21:26:48.933607Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.934034Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-24T21:26:48.934105Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-24T21:26:48.934149Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:36:2066][path] Update to strong state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.934360Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-24T21:26:48.934414Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-24T21:26:48.934454Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::InvalidNotification [GOOD] Test command err: 2025-06-24T21:26:48.930271Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:26:48.933093Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-24T21:26:48.933222Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-24T21:26:48.933272Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-24T21:26:48.933346Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-24T21:26:48.933438Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-24T21:26:48.933496Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.933578Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-24T21:26:48.933628Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.935736Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { PathId: [OwnerId: 1, LocalPathId: 1] Version: 0 }: sender# [1:35:2065] 2025-06-24T21:26:48.935843Z node 1 :SCHEME_BOARD_SUBSCRIBER ERROR: subscriber.cpp:821: [main][1:36:2066][path] Suspicious NKikimrSchemeBoard.TEvNotify { PathId: [OwnerId: 1, LocalPathId: 1] Version: 0 }: sender# [1:35:2065] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::NotifyUpdate [GOOD] Test command err: 2025-06-24T21:26:48.930273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:26:48.932820Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-24T21:26:48.932947Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-24T21:26:48.932988Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-24T21:26:48.933073Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-24T21:26:48.933453Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-24T21:26:48.933517Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.933589Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-24T21:26:48.933642Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.935853Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-24T21:26:48.935941Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:37:2066] 2025-06-24T21:26:48.936000Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:36:2066][path] Update to strong state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } >> TSubscriberTest::Boot [GOOD] >> TSchemeShardTest::DropIndexedTableAndForceDropSimultaneously [GOOD] >> TSchemeShardTest::DependentOps ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::SyncPartial [GOOD] Test command err: 2025-06-24T21:26:48.930256Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:26:48.932130Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-24T21:26:48.932253Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-24T21:26:48.932294Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-24T21:26:48.932356Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-24T21:26:48.933478Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-24T21:26:48.933593Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.933684Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-24T21:26:48.933744Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.934033Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 1 2025-06-24T21:26:48.934200Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:37:2066], cookie# 1 2025-06-24T21:26:48.934273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 1 2025-06-24T21:26:48.934325Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 1 2025-06-24T21:26:48.934390Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-06-24T21:26:48.934484Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-06-24T21:26:48.934577Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:37:2066], cookie# 1 2025-06-24T21:26:48.934626Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-24T21:26:48.934678Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-24T21:26:48.934752Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.934839Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:38:2066], cookie# 1 2025-06-24T21:26:48.934890Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 1 2025-06-24T21:26:48.934928Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:39:2066], cookie# 1 2025-06-24T21:26:48.934974Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:36:2066][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-24T21:26:48.935118Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 2 2025-06-24T21:26:48.935288Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:37:2066], cookie# 2 2025-06-24T21:26:48.935322Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 2, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-24T21:26:48.935363Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 2 2025-06-24T21:26:48.935419Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 2 2025-06-24T21:26:48.935474Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:9:2056], cookie# 2 2025-06-24T21:26:48.935521Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:38:2066], cookie# 2 2025-06-24T21:26:48.935572Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 2, ring group# 0, size# 3, half# 1, successes# 0, failures# 2 2025-06-24T21:26:48.935620Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-24T21:26:48.935667Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.935705Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:39:2066], cookie# 2 2025-06-24T21:26:48.935748Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:986: [main][1:36:2066][path] Sync is done in the ring group: cookie# 2, ring group# 0, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2025-06-24T21:26:48.935787Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][1:36:2066][path] Sync is incomplete in one of the ring groups: cookie# 2 2025-06-24T21:26:48.935908Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 3 2025-06-24T21:26:48.936007Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:37:2066], cookie# 3 2025-06-24T21:26:48.936047Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-24T21:26:48.936080Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:38:2066], cookie# 3 2025-06-24T21:26:48.936104Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 2 2025-06-24T21:26:48.936127Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 3 2025-06-24T21:26:48.936188Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:39:2066], cookie# 3 2025-06-24T21:26:48.936210Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:986: [main][1:36:2066][path] Sync is done in the ring group: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 3, partial# 1 2025-06-24T21:26:48.936226Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][1:36:2066][path] Sync is incomplete in one of the ring groups: cookie# 3 2025-06-24T21:26:48.936267Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-24T21:26:48.936296Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::ReconnectOnFailure [GOOD] Test command err: 2025-06-24T21:26:48.930183Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][2:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:26:48.936078Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-24T21:26:48.936204Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-24T21:26:48.936258Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-24T21:26:48.936387Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2066] 2025-06-24T21:26:48.936531Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:38:2066] 2025-06-24T21:26:48.936585Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][2:36:2066][path] Set up state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.936647Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:39:2066] 2025-06-24T21:26:48.936696Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.939200Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2066] 2025-06-24T21:26:48.939281Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.939419Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:38:2066] 2025-06-24T21:26:48.939463Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.939496Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:39:2066] 2025-06-24T21:26:48.939522Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.950782Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:47:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-24T21:26:48.950944Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2066] 2025-06-24T21:26:48.951030Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.951185Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:48:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-24T21:26:48.951240Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:49:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-24T21:26:48.951331Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:38:2066] 2025-06-24T21:26:48.951371Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.951420Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:39:2066] 2025-06-24T21:26:48.951441Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.952030Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:47:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-24T21:26:48.952102Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [2:37:2066] 2025-06-24T21:26:48.952170Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][2:36:2066][path] Update to strong state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::NotifyDelete [GOOD] Test command err: 2025-06-24T21:26:48.936270Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:37:2067][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:26:48.938428Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-24T21:26:48.938557Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:6:2053] 2025-06-24T21:26:48.938646Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:9:2056] 2025-06-24T21:26:48.938735Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:38:2067] 2025-06-24T21:26:48.938805Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:39:2067] 2025-06-24T21:26:48.938868Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:37:2067][path] Set up state: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.938968Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:40:2067] 2025-06-24T21:26:48.939029Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.939347Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:9:2056] 2025-06-24T21:26:48.939470Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:3:2050] 2025-06-24T21:26:48.939540Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:6:2053] 2025-06-24T21:26:48.939670Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:40:2067] 2025-06-24T21:26:48.939776Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:37:2067][path] Path was updated to new version: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.939866Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:38:2067] 2025-06-24T21:26:48.939933Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.939985Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:39:2067] 2025-06-24T21:26:48.940033Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::Sync [GOOD] Test command err: 2025-06-24T21:26:48.936212Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:37:2067][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:26:48.938362Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-24T21:26:48.938596Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:6:2053] 2025-06-24T21:26:48.938649Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:9:2056] 2025-06-24T21:26:48.938708Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:38:2067] 2025-06-24T21:26:48.938781Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:39:2067] 2025-06-24T21:26:48.938843Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:37:2067][path] Set up state: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.938932Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:40:2067] 2025-06-24T21:26:48.938986Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.939096Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:37:2067][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 1 2025-06-24T21:26:48.939221Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2067], cookie# 1 2025-06-24T21:26:48.939283Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2067], cookie# 1 2025-06-24T21:26:48.939392Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:40:2067], cookie# 1 2025-06-24T21:26:48.939456Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:3:2050], cookie# 1 2025-06-24T21:26:48.939492Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-06-24T21:26:48.939520Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-06-24T21:26:48.939575Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:38:2067], cookie# 1 2025-06-24T21:26:48.939619Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:26:48.939657Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:39:2067], cookie# 1 2025-06-24T21:26:48.939682Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:26:48.939713Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:40:2067], cookie# 1 2025-06-24T21:26:48.939773Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:37:2067][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] Test command err: 2025-06-24T21:26:48.936286Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:37:2067][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:26:48.938020Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [1:3:2050] 2025-06-24T21:26:48.938134Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:6:2053] 2025-06-24T21:26:48.938167Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:9:2056] 2025-06-24T21:26:48.938217Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [1:38:2067] 2025-06-24T21:26:48.938274Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:39:2067] 2025-06-24T21:26:48.938319Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:37:2067][path] Set up state: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.938402Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:40:2067] 2025-06-24T21:26:48.938447Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.938557Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:37:2067][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 1 2025-06-24T21:26:48.938638Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2067], cookie# 1 2025-06-24T21:26:48.938686Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2067], cookie# 1 2025-06-24T21:26:48.938785Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:40:2067], cookie# 1 2025-06-24T21:26:48.938835Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:3:2050], cookie# 1 2025-06-24T21:26:48.938879Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-06-24T21:26:48.938900Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-06-24T21:26:48.938935Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:38:2067], cookie# 1 2025-06-24T21:26:48.938984Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-24T21:26:48.939024Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:39:2067], cookie# 1 2025-06-24T21:26:48.939077Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-24T21:26:48.939123Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:40:2067], cookie# 1 2025-06-24T21:26:48.939199Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:37:2067][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-true [GOOD] >> TSchemeShardTest::AlterTableAndConcurrentSplit >> TSchemeShardTest::CreateTable [GOOD] >> TSchemeShardTest::CreateTableWithDate >> TSchemeShardTest::InitRootWithOwner [GOOD] >> TSchemeShardTest::MkRmDir >> TSchemeShardTest::CreateIndexedTable [GOOD] >> TSchemeShardTest::CreateIndexedTableAndForceDrop >> TSchemeShardTest::PathName_SetLocale [GOOD] >> TSchemeShardTest::ModifyACL ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::Boot [GOOD] Test command err: 2025-06-24T21:26:48.932291Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-24T21:26:48.932367Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:3:2050] Successful handshake: owner# 800, generation# 1 2025-06-24T21:26:48.932585Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-24T21:26:48.932624Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:3:2050] Commit generation: owner# 800, generation# 1 2025-06-24T21:26:48.932686Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [1:37:2067] 2025-06-24T21:26:48.932721Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 900, generation# 1 2025-06-24T21:26:48.933003Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [1:37:2067] 2025-06-24T21:26:48.933043Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:6:2053] Commit generation: owner# 900, generation# 1 2025-06-24T21:26:48.933149Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:39:2069][/root/db/dir_inside] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:26:48.933681Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:43:2069] 2025-06-24T21:26:48.933725Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:3:2050] Upsert description: path# /root/db/dir_inside 2025-06-24T21:26:48.936540Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:3:2050] Subscribe: subscriber# [1:43:2069], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T21:26:48.936776Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:44:2069] 2025-06-24T21:26:48.936808Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# /root/db/dir_inside 2025-06-24T21:26:48.936937Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:44:2069], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T21:26:48.937120Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:45:2069] 2025-06-24T21:26:48.937147Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:9:2056] Upsert description: path# /root/db/dir_inside 2025-06-24T21:26:48.937188Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:9:2056] Subscribe: subscriber# [1:45:2069], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-24T21:26:48.937273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:3:2050] 2025-06-24T21:26:48.937334Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:43:2069] 2025-06-24T21:26:48.937382Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:44:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:6:2053] 2025-06-24T21:26:48.937426Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:44:2069] 2025-06-24T21:26:48.937462Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:45:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:9:2056] 2025-06-24T21:26:48.937533Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:45:2069] 2025-06-24T21:26:48.937628Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:40:2069] 2025-06-24T21:26:48.937713Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:41:2069] 2025-06-24T21:26:48.937762Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:39:2069][/root/db/dir_inside] Set up state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:48.937820Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:42:2069] 2025-06-24T21:26:48.937858Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:39:2069][/root/db/dir_inside] Ignore empty state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== !argsLeft.IsDeletion 2025-06-24T21:26:48.938141Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:3:2050] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:36:2066], cookie# 0, event size# 118 2025-06-24T21:26:48.938190Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:3:2050] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], deletion# false 2025-06-24T21:26:48.945677Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:3:2050] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 800, LocalPathId: 1111], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-06-24T21:26:48.945950Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 800, LocalPathId: 1111] Version: 1 }: sender# [1:3:2050] 2025-06-24T21:26:48.946052Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:43:2069] 2025-06-24T21:26:48.946117Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 800, LocalPathId: 1111] Version: 1 }: sender# [1:40:2069] 2025-06-24T21:26:48.946187Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:39:2069][/root/db/dir_inside] Update to strong state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 1111], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() < argsRight.GetSuperId() =========== !argsRight.IsDeletion 2025-06-24T21:26:48.946461Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [1:37:2067], cookie# 0, event size# 117 2025-06-24T21:26:48.946511Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], deletion# false 2025-06-24T21:26:48.946579Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:6:2053] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 900, LocalPathId: 11], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-06-24T21:26:48.946713Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:44:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 900, LocalPathId: 11] Version: 1 }: sender# [1:6:2053] 2025-06-24T21:26:48.946762Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:44:2069] 2025-06-24T21:26:48.946834Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 900, LocalPathId: 11] Version: 1 }: sender# [1:41:2069] 2025-06-24T21:26:48.946919Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:39:2069][/root/db/dir_inside] Path was updated to new version: owner# [1:38:2068], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 1111], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 900, LocalPathId: 11], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:49.400143Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][3:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-24T21:26:49.400829Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:3:2050] 2025-06-24T21:26:49.400899Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:6:2053] 2025-06-24T21:26:49.400955Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:9:2056] 2025-06-24T21:26:49.401038Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:37:2066] 2025-06-24T21:26:49.401103Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:38:2066] 2025-06-24T21:26:49.401149Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:36:2066][path] Set up state: owner# [3:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-24T21:26:49.401230Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:39:2066] 2025-06-24T21:26:49.401307Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:36:2066][path] Ignore empty state: owner# [3:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } >> TSchemeShardTest::DependentOps [GOOD] >> TSchemeShardTest::DefaultColumnFamiliesWithNonCanonicName >> TSchemeShardTest::TopicMeteringMode [GOOD] >> TSchemeShardTest::Restart >> ReadOnlyVDisk::TestGetWithMustRestoreFirst [GOOD] >> TSchemeShardTest::MkRmDir [GOOD] >> TSchemeShardTest::DropTableTwice >> TSchemeShardTest::ModifyACL [GOOD] >> TSchemeShardTest::NameFormat |97.5%| [TA] $(B)/ydb/core/tx/scheme_board/ut_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |97.5%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::CreateIndexedTableAndForceDrop [GOOD] >> TSchemeShardTest::CreateAlterTableWithCodec ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestGetWithMustRestoreFirst [GOOD] Test command err: RandomSeed# 9788060866151338944 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but writes go through === SEND TEvPut with key [1:1:1:0:0:32768:0] 2025-06-24T21:26:48.811228Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] 2025-06-24T21:26:48.814772Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-06-24T21:26:48.818497Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-06-24T21:26:48.820476Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:5:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:7:0:0:32768:0] 2025-06-24T21:26:48.826174Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:8:0:0:131072:0] 2025-06-24T21:26:48.828219Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:9:0:0:32768:0] 2025-06-24T21:26:48.830186Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:10:0:0:131072:0] 2025-06-24T21:26:48.832184Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 11 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Put 2 more VDisks to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Write 10 more blobs, expect errors === SEND TEvPut with key [1:1:11:0:0:32768:0] 2025-06-24T21:26:50.479688Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:50.479784Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:50.479894Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:50.480570Z 1 00h05m30.160512s :BS_PROXY_PUT ERROR: [e8f064356dff5e50] Result# TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:12:0:0:131072:0] 2025-06-24T21:26:50.481863Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:50.482195Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:50.483168Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:12:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 4 Situations# SUUUUU } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } { OrderNumber# 7 Situations# UUUSUU } { OrderNumber# 0 Situations# UUUUEU } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:13:0:0:32768:0] 2025-06-24T21:26:50.484599Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:50.485155Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:50.485765Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:13:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:14:0:0:131072:0] 2025-06-24T21:26:50.486719Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:50.487545Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:50.488032Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:14:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:15:0:0:32768:0] 2025-06-24T21:26:50.488883Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:50.488945Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:50.489561Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:15:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 1 Situations# EUUUUU } { OrderNumber# 2 Situations# UEUUUU } { OrderNumber# 3 Situations# UUSUUU } { OrderNumber# 4 Situations# UUUSUU } { OrderNumber# 5 Situations# UUUUSU } { OrderNumber# 6 Situations# UUUUUS } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:16:0:0:131072:0] 2025-06-24T21:26:50.490865Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:50.490925Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:50.491763Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:16:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 1 Situations# EUUUUU } { OrderNumber# 2 Situations# UEUUUU } { OrderNumber# 3 Situations# UUSUUU } { OrderNumber# 4 Situations# UUUSUU } { OrderNumber# 5 Situations# UUUUSU } { OrderNumber# 6 Situations# UUUUUS } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:17:0:0:32768:0] 2025-06-24T21:26:50.493140Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:50.493379Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:50.493441Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:17:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# UEUUUU } { OrderNumber# 2 Situations# UUEUUU } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUSU } { OrderNumber# 5 Situations# UUUUUS } { OrderNumber# 6 Situations# SUUUUU } { OrderNumber# 7 Situations# UUSUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:18:0:0:131072:0] 2025-06-24T21:26:50.494974Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:50.495108Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:50.495223Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:18:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 7 Situations# SUUUUU } { OrderNumber# 0 Situations# UEUUUU } { OrderNumber# 1 Situations# UUEUUU } { OrderNumber# 2 Situations# UUUEUU } { OrderNumber# 3 Situations# UUUUSU } { OrderNumber# 4 Situations# UUUUUS } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:19:0:0:32768:0] 2025-06-24T21:26:50.496965Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:50.497118Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:50.497176Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:19:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 6 Situations# SUUUUU } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# UUEUUU } { OrderNumber# 1 Situations# UUUEUU } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } { OrderNumber# 4 Situations# UUSUUU } { OrderNumber# 5 Situations# UUUUSU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:20:0:0:131072:0] 2025-06-24T21:26:50.498748Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:50.498836Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:50.498921Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:20:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvGet with key [1:1:11:0:0:32768:0] 2025-06-24T21:26:50.503031Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-24T21:26:50.503180Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-24T21:26:50.503222Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-24T21:26:50.503687Z 1 00h05m30.160512s :BS_PROXY_GET ERROR: [4260d938a10114f7] Response# TEvGetResult {Status# ERROR ResponseSz# 1 {[1:1:11:0:0:32768:0] ERROR Size# 0 RequestedSize# 32768} ErrorReason# "TStrategyBase saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# PUUUUU } { OrderNumber# 6 Situations# UPUUUU } { OrderNumber# 7 Situations# UUPUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# AAAPAA } { OrderNumber# 4 Situations# AAAAAA } ] "} Marker# BPG29 2025-06-24T21:26:50.503794Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-24T21:26:50.503848Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] TEvGetResult: TEvGetResult {Status# ERROR ResponseSz# 1 {[1:1:11:0:0:32768:0] ERROR Size# 0 RequestedSize# 32768} ErrorReason# "TStrategyBase saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# PUUUUU } { OrderNumber# 6 Situations# UPUUUU } { OrderNumber# 7 Situations# UUPUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# AAAPAA } { OrderNumber# 4 Situations# AAAAAA } ] "} |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TSchemeShardTest::ConsistentCopyTable [GOOD] >> TSchemeShardTest::ConsistentCopyTableAwait >> TNebiusAccessServiceTest::Authenticate [GOOD] |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TSchemeShardTest::AlterTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::AlterTable >> TSchemeShardTest::CreateTableWithDate [GOOD] >> TSchemeShardTest::CreateIndexedTableRejects >> TNebiusAccessServiceTest::Authorize >> TSchemeShardTest::Restart [GOOD] >> TSchemeShardTest::SchemeErrors >> TSchemeShardTest::DropTableTwice [GOOD] >> TSchemeShardTest::IgnoreUserColumnIds >> TNebiusAccessServiceTest::Authorize [GOOD] >> TSchemeShardTest::DefaultColumnFamiliesWithNonCanonicName [GOOD] >> TSchemeShardTest::DropPQ >> ReadOnlyVDisk::TestWrites [GOOD] >> TSchemeShardTest::CreateAlterTableWithCodec [GOOD] >> TSchemeShardTest::CopyTableTwiceSimultaneously >> TSchemeShardTest::NameFormat [GOOD] >> TSchemeShardTest::ParallelCreateTable ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authenticate [GOOD] Test command err: 2025-06-24T21:26:51.591102Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000002b08] Connect to grpc://localhost:61262 2025-06-24T21:26:51.599907Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000002b08] Request AuthenticateRequest { iam_token: "**** (3C4833B6)" } 2025-06-24T21:26:51.618045Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000002b08] Status 7 Permission Denied 2025-06-24T21:26:51.618440Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000002b08] Request AuthenticateRequest { iam_token: "**** (86DDB286)" } 2025-06-24T21:26:51.621585Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000002b08] Response AuthenticateResponse { account { user_account { id: "1234" } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authorize [GOOD] Test command err: 2025-06-24T21:26:51.732983Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000004388] Connect to grpc://localhost:11946 2025-06-24T21:26:51.742463Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "path_id" } } iam_token: "**** (717F937C)" } } } 2025-06-24T21:26:51.753049Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000004388] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user_id" } } } } } 2025-06-24T21:26:51.753686Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "path_id" } } iam_token: "**** (79225CA9)" } } } 2025-06-24T21:26:51.756027Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000004388] Status 7 Permission Denied 2025-06-24T21:26:51.756483Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "denied" } resource_path { path { id: "path_id" } } iam_token: "**** (717F937C)" } } } 2025-06-24T21:26:51.758119Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000004388] Status 7 Permission Denied 2025-06-24T21:26:51.758588Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "p" } } iam_token: "**** (717F937C)" } } } 2025-06-24T21:26:51.759964Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000004388] Status 7 Permission Denied >> TSchemeShardTest::SchemeErrors [GOOD] >> TSchemeShardTest::SerializedCellVec [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldNotUpdate >> TSchemeShardTest::IgnoreUserColumnIds [GOOD] >> TSchemeShardTest::DropTableAndConcurrentSplit >> TSchemeShardTest::AlterTable [GOOD] >> TSchemeShardTest::AlterTableDropColumnReCreateSplit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestWrites [GOOD] Test command err: RandomSeed# 10773524453492930559 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but writes go through === SEND TEvPut with key [1:1:1:0:0:32768:0] 2025-06-24T21:26:48.824032Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] 2025-06-24T21:26:48.828479Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-06-24T21:26:48.833117Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-06-24T21:26:48.835696Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:5:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:7:0:0:32768:0] 2025-06-24T21:26:48.843215Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:8:0:0:131072:0] 2025-06-24T21:26:48.845706Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:9:0:0:32768:0] 2025-06-24T21:26:48.848532Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:10:0:0:131072:0] 2025-06-24T21:26:48.851127Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 11 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Put 2 more VDisks to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Write 10 more blobs, expect errors === SEND TEvPut with key [1:1:11:0:0:32768:0] 2025-06-24T21:26:49.819925Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] 2025-06-24T21:26:49.820045Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] 2025-06-24T21:26:49.820172Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] 2025-06-24T21:26:49.821114Z 1 00h03m30.110512s :BS_PROXY_PUT ERROR: [e72568bb89983f2a] Result# TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:12:0:0:131072:0] 2025-06-24T21:26:49.822837Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] 2025-06-24T21:26:49.822990Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] 2025-06-24T21:26:49.824103Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:12:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 4 Situations# SUUUUU } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } { OrderNumber# 7 Situations# UUUSUU } { OrderNumber# 0 Situations# UUUUEU } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:13:0:0:32768:0] 2025-06-24T21:26:49.825996Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] 2025-06-24T21:26:49.826858Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] 2025-06-24T21:26:49.827753Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:13:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:14:0:0:131072:0] 2025-06-24T21:26:49.829042Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] 2025-06-24T21:26:49.829927Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5319:698] 2025-06-24T21:26:49.830372Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:14:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only m ... ey [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:11:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:11:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:12:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:12:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:13:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:13:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:14:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:14:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:15:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:15:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:16:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:16:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:17:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:17:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:18:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:18:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:19:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:19:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:20:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:20:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #0 === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but the writes still go through === SEND TEvPut with key [1:1:21:0:0:32768:0] 2025-06-24T21:26:51.725484Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] 2025-06-24T21:26:51.725651Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:22:0:0:131072:0] 2025-06-24T21:26:51.729682Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] 2025-06-24T21:26:51.730929Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:23:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:24:0:0:131072:0] 2025-06-24T21:26:51.734832Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:25:0:0:32768:0] 2025-06-24T21:26:51.737202Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] 2025-06-24T21:26:51.737278Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:26:0:0:131072:0] 2025-06-24T21:26:51.739629Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] 2025-06-24T21:26:51.739736Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:27:0:0:32768:0] 2025-06-24T21:26:51.742213Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] 2025-06-24T21:26:51.742282Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:28:0:0:131072:0] 2025-06-24T21:26:51.744722Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] 2025-06-24T21:26:51.745013Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:29:0:0:32768:0] 2025-06-24T21:26:51.747660Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] 2025-06-24T21:26:51.747747Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:30:0:0:131072:0] 2025-06-24T21:26:51.749874Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5333:712] 2025-06-24T21:26:51.750036Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5326:705] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} === Read all 31 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:11:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:11:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:12:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:12:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:13:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:13:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:14:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:14:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:15:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:15:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:16:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:16:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:17:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:17:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:18:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:18:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:19:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:19:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:20:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:20:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:21:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:21:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:22:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:22:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:23:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:23:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:24:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:24:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:25:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:25:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:26:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:26:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:27:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:27:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:28:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:28:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:29:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:29:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:30:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:30:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} >> TSchemeShardTest::ConsistentCopyTableAwait [GOOD] >> TSchemeShardTest::ConsistentCopyTableRejects >> TNebiusAccessServiceTest::PassRequestId [GOOD] >> TSchemeShardTest::CopyTableTwiceSimultaneously [GOOD] >> TSchemeShardTest::CopyTableWithAlterConfig >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldNotUpdate [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldUpdate >> TSchemeShardTest::ParallelCreateTable [GOOD] >> TSchemeShardTest::ParallelCreateSameTable ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::PassRequestId [GOOD] Test command err: 2025-06-24T21:26:53.055169Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000003908]{reqId} Connect to grpc://localhost:9779 2025-06-24T21:26:53.059955Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000003908]{reqId} Request AuthenticateRequest { iam_token: "**** (717F937C)" } 2025-06-24T21:26:53.068763Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000003908]{reqId} Response AuthenticateResponse { account { user_account { id: "1234" } } } >> TSchemeShardTest::AlterTableDropColumnReCreateSplit [GOOD] >> TSchemeShardTest::AlterTableDropColumnSplitThenReCreate |97.5%| [TA] $(B)/ydb/library/ncloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.5%| [TA] {RESULT} $(B)/ydb/library/ncloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TVPatchTests::PatchPartFastXorDiffWithEmptyDiffBuffer >> TVPatchTests::FindingPartsWhenSeveralPartsExist >> TOosLogicTests::RenderHtml [GOOD] >> TVPatchTests::FindingPartsWhenError >> TVPatchTests::PatchPartOk >> TVPatchTests::PatchPartPutError >> TVPatchTests::FullPatchTest [GOOD] >> TVPatchTests::FullPatchTestSpecialCase1 [GOOD] >> TVPatchTests::PatchPartFastXorDiffDisorder >> TVPatchTests::PatchPartGetError >> TSchemeShardTest::DropTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::DropTable >> BasicStatistics::Serverless [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldUpdate [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonStorageConfig >> TVPatchTests::PatchPartFastXorDiffBeyoundBlob >> TVPatchTests::PatchPartFastXorDiffDisorder [GOOD] >> TVPatchTests::PatchPartFastXorDiffBeyoundBlob [GOOD] >> TVPatchTests::FullPatchTestXorDiffFasterVGetResult [GOOD] >> TVPatchTests::PatchPartGetError [GOOD] >> TVPatchTests::PatchPartFastXorDiffWithEmptyDiffBuffer [GOOD] >> TVPatchTests::FindingPartsWhenSeveralPartsExist [GOOD] >> TVPatchTests::FindingPartsWithTimeout >> TVPatchTests::FindingPartsWhenError [GOOD] >> TVPatchTests::PatchPartOk [GOOD] >> TVPatchTests::PatchPartPutError [GOOD] >> TVPatchTests::FindingPartsWhenPartsAreDontExist |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FullPatchTestSpecialCase1 [GOOD] >> TVPatchTests::FindingPartsWithTimeout [GOOD] >> TSchemeShardTest::CreateIndexedTableRejects [GOOD] >> TSchemeShardTest::CreateIndexedTableAndForceDropSimultaneously >> TVPatchTests::FindingPartsWhenPartsAreDontExist [GOOD] >> TVPatchTests::FindingPartsWhenOnlyOnePartExists ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartFastXorDiffWithEmptyDiffBuffer [GOOD] Test command err: Recv 65537 2025-06-24T21:26:54.089287Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.093609Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-06-24T21:26:54.093665Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-06-24T21:26:54.093818Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-24T21:26:54.094975Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 5 PatchedPartId# 5 XorReceiver# yes ParityPart# yes ForceEnd# no 2025-06-24T21:26:54.095019Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:100:0] PullingPart# 5 Send NKikimr::TEvBlobStorage::TEvVGet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartFastXorDiffDisorder [GOOD] Test command err: Recv 65537 2025-06-24T21:26:54.089373Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.094545Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-06-24T21:26:54.094616Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-06-24T21:26:54.094808Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult 2025-06-24T21:26:54.094933Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-24T21:26:54.095101Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 0 PatchedPartId# 0 Status# ERROR ErrorReason# [XorDiff from datapart] the start of the diff at index 0 righter than the start of the diff at index 1; PrevDiffStart# 2 DiffStart# 0 Send NKikimr::TEvBlobStorage::TEvVPatchResult Recv NKikimr::TEvVPatchDyingConfirm ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartGetError [GOOD] Test command err: Recv 65537 2025-06-24T21:26:54.089293Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.093777Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-24T21:26:54.093850Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-24T21:26:54.094044Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-06-24T21:26:54.094914Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.095124Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# ERROR ErrorReason# Recieve not OK status from VGetResult, received status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-24T21:26:54.095221Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FullPatchTestXorDiffFasterVGetResult [GOOD] Test command err: Recv 65537 2025-06-24T21:26:54.114207Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.115316Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-06-24T21:26:54.115380Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-06-24T21:26:54.115579Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult 2025-06-24T21:26:54.115663Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-24T21:26:54.115819Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 0 PatchedPartId# 0 Status# ERROR ErrorReason# The diff at index 0 went beyound the blob part; DiffStart# 100 DiffEnd# 96 BlobPartSize# 32 Send NKikimr::TEvBlobStorage::TEvVPatchResult Recv NKikimr::TEvVPatchDyingConfirm ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FindingPartsWhenError [GOOD] Test command err: Recv 65537 2025-06-24T21:26:54.089359Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.093909Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# ERROR ResultSize# 1 2025-06-24T21:26:54.094012Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [] Status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts 2025-06-24T21:26:54.094102Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> TSchemeShardTest::UpdateChannelsBindingSolomonStorageConfig [GOOD] >> TSchemeShardTest::RejectAlterSolomon >> TVPatchTests::FindingPartsWhenOnlyOnePartExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartOk [GOOD] Test command err: Recv 65537 2025-06-24T21:26:54.089273Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.093722Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-24T21:26:54.093785Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-24T21:26:54.093969Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-06-24T21:26:54.094884Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.095089Z node 1 :BS_VDISK_PATCH INFO: {BSVSP08@skeleton_vpatch_actor.cpp:383} [0:1:0:0:0] TEvVPatch: received part data; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 DataParts# 4 ReceivedBlobId# [1:2:3:4:6:10:1] Status# OK ResultSize# 1 ParityPart# no 2025-06-24T21:26:54.096765Z node 1 :BS_VDISK_PATCH INFO: {BSVSP14@skeleton_vpatch_actor.cpp:462} [0:1:0:0:0] TEvVPatch: send xor diffs; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorDiffCount# 0 2025-06-24T21:26:54.096859Z node 1 :BS_VDISK_PATCH INFO: {BSVSP15@skeleton_vpatch_actor.cpp:502} [0:1:0:0:0] TEvVPatch: send vPut; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 ReceivedXorDiffs# 0 ExpectedXorDiffs# 0 Send NKikimr::TEvBlobStorage::TEvVPut Recv NKikimr::TEvBlobStorage::TEvVPutResult 2025-06-24T21:26:54.097087Z node 1 :BS_VDISK_PATCH INFO: {BSVSP10@skeleton_vpatch_actor.cpp:627} [0:1:0:0:0] TEvVPatch: received put result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK 2025-06-24T21:26:54.097198Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-24T21:26:54.097279Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartPutError [GOOD] Test command err: Recv 65537 2025-06-24T21:26:54.089335Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.093899Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-24T21:26:54.093979Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-24T21:26:54.094180Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-06-24T21:26:54.094895Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.095068Z node 1 :BS_VDISK_PATCH INFO: {BSVSP08@skeleton_vpatch_actor.cpp:383} [0:1:0:0:0] TEvVPatch: received part data; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 DataParts# 4 ReceivedBlobId# [1:2:3:4:6:10:1] Status# OK ResultSize# 1 ParityPart# no 2025-06-24T21:26:54.096750Z node 1 :BS_VDISK_PATCH INFO: {BSVSP14@skeleton_vpatch_actor.cpp:462} [0:1:0:0:0] TEvVPatch: send xor diffs; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorDiffCount# 0 2025-06-24T21:26:54.096838Z node 1 :BS_VDISK_PATCH INFO: {BSVSP15@skeleton_vpatch_actor.cpp:502} [0:1:0:0:0] TEvVPatch: send vPut; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 ReceivedXorDiffs# 0 ExpectedXorDiffs# 0 Send NKikimr::TEvBlobStorage::TEvVPut Recv NKikimr::TEvBlobStorage::TEvVPutResult 2025-06-24T21:26:54.097039Z node 1 :BS_VDISK_PATCH INFO: {BSVSP10@skeleton_vpatch_actor.cpp:627} [0:1:0:0:0] TEvVPatch: received put result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# ERROR 2025-06-24T21:26:54.097101Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# ERROR ErrorReason# Recieve not OK status from VPutResult, received status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-24T21:26:54.097170Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> TSchemeShardTest::ParallelCreateSameTable [GOOD] >> TSchemeShardTest::MultipleColumnFamilies ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FindingPartsWithTimeout [GOOD] Test command err: Recv 65537 2025-06-24T21:26:54.089305Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.094096Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-24T21:26:54.094160Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1 2] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-24T21:26:54.094306Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# yes 2025-06-24T21:26:54.094887Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: received force end; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-24T21:26:54.094952Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm Recv 65537 2025-06-24T21:26:54.371774Z node 2 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NActors::TEvents::TEvWakeup 2025-06-24T21:26:54.382224Z node 2 :BS_VDISK_PATCH ERROR: {BSVSP11@skeleton_vpatch_actor.cpp:734} [0:1:0:0:0] TEvVPatch: the vpatch actor died due to a deadline, before receiving diff; 2025-06-24T21:26:54.382314Z node 2 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [] Status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts 2025-06-24T21:26:54.382406Z node 2 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> TSchemeShardTest::ConsistentCopyTableRejects [GOOD] >> TSchemeShardTest::ConsistentCopyTableToDeletedPath >> TSchemeShardTest::AlterTableDropColumnSplitThenReCreate [GOOD] >> TSchemeShardTest::AlterTableById >> TestDataErasure::SimpleTestForTables [GOOD] >> TPQTest::TestReadAndDeleteConsumer [GOOD] >> TSchemeShardTest::DropPQ [GOOD] >> TSchemeShardTest::DropPQAbort ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::Serverless [GOOD] Test command err: 2025-06-24T21:24:08.527878Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:08.528239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:08.528436Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004819/r3tmp/tmpR4WkNA/pdisk_1.dat 2025-06-24T21:24:08.920692Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19673, node 1 2025-06-24T21:24:09.184862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:09.184925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:09.184964Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:09.185217Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:09.188032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:09.300657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.300872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.315041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27501 2025-06-24T21:24:09.942432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:13.386537Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:13.425978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:13.426123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:13.489657Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:13.492998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:13.683921Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:13.720971Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.721639Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.722218Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.722374Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.722636Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.722749Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.726137Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.726328Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.726463Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:13.942786Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:13.942918Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:13.957048Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:14.111490Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:14.158309Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:14.158414Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:14.183388Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:14.184794Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:14.185037Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:14.185102Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:14.185154Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:14.185216Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:14.185269Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:14.185347Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:14.186372Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:14.225998Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:14.226147Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:14.233248Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:24:14.236690Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:24:14.237011Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:24:14.251798Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:24:14.283681Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:14.283753Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:14.283833Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:24:14.302934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:14.311455Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:14.311582Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:14.510554Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:14.716545Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:14.774773Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:15.319302Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:15.349568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:15.955344Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:16.127331Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:24:16.127407Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:24:16.127525Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2496:2900], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:24:16.130062Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2504:2904] 2025-06-24T21:24:16.130336Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2504:2904], schemeshard id = 72075186224037899 2025-06-24T21:24:17.283544Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2623:3195], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:17.283758Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06 ... atistics] RequestId[ 118 ], ReplyToActorId[ [2:7513:5402]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:45.652928Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 118 ] 2025-06-24T21:26:45.652966Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 118, ReplyToActorId = [2:7513:5402], StatRequests.size() = 1 2025-06-24T21:26:46.872390Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:46.872482Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:46.872543Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-24T21:26:46.872594Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:46.873080Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:26:46.888437Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:46.892988Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7549:5428], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:46.893131Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7559:5433], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:46.893364Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:46.909259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:26:46.978633Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7563:5436], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:26:47.082006Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [2:7658:5484]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:47.082389Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-24T21:26:47.082441Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:7658:5484], StatRequests.size() = 1 2025-06-24T21:26:47.172331Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7663:5486] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:26:47.222955Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:7692:5501]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:47.223202Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-24T21:26:47.223374Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-24T21:26:47.223436Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:26:47.223583Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:26:47.223641Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:7692:5501], StatRequests.size() = 1 2025-06-24T21:26:47.339897Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=M2ZmMmFiYTQtY2ZiYTFiYjYtN2Y2ODVlMi0xZmZhNDc3Mw==, TxId: 2025-06-24T21:26:47.339969Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=M2ZmMmFiYTQtY2ZiYTFiYjYtN2Y2ODVlMi0xZmZhNDc3Mw==, TxId: 2025-06-24T21:26:47.340482Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:47.354719Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:47.354789Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:47.419698Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:26:47.419786Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:26:47.485722Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:2957:3058], schemeshard count = 1 2025-06-24T21:26:47.752132Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-24T21:26:47.752195Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 218.000000s, at schemeshard: 72075186224037899 2025-06-24T21:26:47.752435Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 26 2025-06-24T21:26:47.766136Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:26:48.770165Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:7754:5538]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:48.770375Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-24T21:26:48.770405Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:7754:5538], StatRequests.size() = 1 2025-06-24T21:26:50.090098Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:26:50.101188Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:50.101252Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:50.101305Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-24T21:26:50.101342Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:26:50.101570Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:26:50.104162Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:50.114404Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-06-24T21:26:48.000000Z 2025-06-24T21:26:50.115048Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzMyYmFjZmYtMTUzYWYzZTUtOTliMmY2ZTktNjNjZTMzOTU=, TxId: 2025-06-24T21:26:50.115102Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzMyYmFjZmYtMTUzYWYzZTUtOTliMmY2ZTktNjNjZTMzOTU=, TxId: 2025-06-24T21:26:50.115532Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:50.129814Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:26:50.129878Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:50.198984Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:7822:5579]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:50.199377Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-24T21:26:50.199431Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:7822:5579], StatRequests.size() = 1 2025-06-24T21:26:51.668403Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:7870:5605]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:51.668783Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-24T21:26:51.668833Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:7870:5605], StatRequests.size() = 1 2025-06-24T21:26:52.931060Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 2 2025-06-24T21:26:52.931435Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:26:52.931954Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:26:52.943447Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:52.943510Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:53.039971Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:7910:5627]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:53.040259Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-24T21:26:53.040309Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:7910:5627], StatRequests.size() = 1 >> TSchemeShardTest::CopyTableWithAlterConfig [GOOD] >> TSchemeShardTest::CopyTableOmitFollowers ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FindingPartsWhenOnlyOnePartExists [GOOD] Test command err: Recv 65537 2025-06-24T21:26:54.392033Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.392952Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-24T21:26:54.393004Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts 2025-06-24T21:26:54.393070Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm Recv 65537 2025-06-24T21:26:54.664341Z node 2 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-24T21:26:54.664615Z node 2 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-24T21:26:54.664677Z node 2 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-24T21:26:54.664865Z node 2 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# yes 2025-06-24T21:26:54.664942Z node 2 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: received force end; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-24T21:26:54.665005Z node 2 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> TSchemeShardTest::CreateIndexedTableAndForceDropSimultaneously [GOOD] >> TSchemeShardTest::CreateTableWithUniformPartitioning >> TSchemeShardTest::RejectAlterSolomon [GOOD] >> TSchemeShardTest::SimultaneousDropForceDrop |97.6%| [TA] $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleTestForTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:26:50.571477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:26:50.571585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:50.571634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:26:50.571699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:26:50.575939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:26:50.576038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:26:50.576264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:50.576395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:26:50.577321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:26:50.578595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:26:50.699903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:50.699980Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:50.719859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:26:50.720452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:26:50.720724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:26:50.733339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:26:50.733669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:26:50.741968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:50.742626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:26:50.751904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:50.755388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:26:50.792506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:50.792630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:50.792963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:26:50.793031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:26:50.793174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:26:50.793295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:26:50.802306Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:26:50.955420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:26:50.958307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:50.959631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:26:50.959701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:26:50.965998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:26:50.966214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:26:50.973030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:50.975181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:26:50.975468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:50.975598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:26:50.975660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:26:50.975701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:26:50.978299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:50.978377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:26:50.978413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:26:50.980332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:50.980382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:50.980421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:50.980473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:26:50.994365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:26:50.996825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:26:50.998221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:26:50.999130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:50.999305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:26:50.999363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:51.000758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:26:51.000818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:51.001020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:26:51.001100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:26:51.003476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:51.003535Z node 1 :FLAT_TX_SCHEMESHARD ... [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-24T21:26:53.876755Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877760, Sender [1:1964:3630], Recipient [1:294:2276]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037932033 Status: OK ServerId: [1:1965:3631] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:26:53.876797Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5046: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:26:53.876825Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5783: Handle TEvClientConnected, tabletId: 72057594037932033, status: OK, at schemeshard: 72057594046678944 2025-06-24T21:26:53.876944Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:297:2278], Recipient [1:294:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: false Progress10k: 0 2025-06-24T21:26:53.876973Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T21:26:53.877008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T21:26:53.877069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T21:26:53.877128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-06-24T21:26:53.877194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T21:26:53.877247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T21:26:54.314384Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.314474Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.314562Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.314592Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.314639Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.314661Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.314742Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:460:2410], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.314782Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.314861Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:838:2718], Recipient [1:838:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.314883Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.314934Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:294:2276], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.314958Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.356225Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T21:26:54.356305Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T21:26:54.356349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-24T21:26:54.356612Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:297:2278], Recipient [1:294:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: false Progress10k: 5000 2025-06-24T21:26:54.356645Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T21:26:54.356674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T21:26:54.356741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T21:26:54.356781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-24T21:26:54.356873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-24T21:26:54.356933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-24T21:26:54.711841Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.711907Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.711971Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.712059Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.712102Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.712118Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4959: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-24T21:26:54.712156Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:838:2718], Recipient [1:838:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.712181Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.712242Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:294:2276], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.712257Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.712285Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271124999, Sender [1:460:2410], Recipient [1:460:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.712305Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-24T21:26:54.753517Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T21:26:54.753595Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5152: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-24T21:26:54.753629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-24T21:26:54.753903Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 268637738, Sender [1:297:2278], Recipient [1:294:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-24T21:26:54.753941Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5151: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-24T21:26:54.753983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7879: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-24T21:26:54.754052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-24T21:26:54.754084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-24T21:26:54.754148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.933000s, Timestamp# 1970-01-01T00:00:05.111000Z 2025-06-24T21:26:54.754208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-06-24T21:26:54.756507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-24T21:26:54.757224Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:1986:3652], Recipient [1:294:2276]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:26:54.757290Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:26:54.757342Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046678944 2025-06-24T21:26:54.757465Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271125519, Sender [1:276:2265], Recipient [1:294:2276]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-24T21:26:54.757499Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5149: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-24T21:26:54.757533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7830: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesTimeout >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TSchemeShardTest::DropTable [GOOD] >> TSchemeShardTest::DropTableById >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestReadAndDeleteConsumer [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T21:25:34.660659Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:34.661160Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:182:2057] recipient: [1:14:2061] 2025-06-24T21:25:34.689210Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:34.714223Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "important_user" Generation: 1 Important: true } 2025-06-24T21:25:34.715427Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-24T21:25:34.718316Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:188:2199] 2025-06-24T21:25:34.737405Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d647a6fd-a56bfe3c-e09c71af-b2b3ac2_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [1:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [1:180:2193] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:111:2057] recipient: [2:104:2136] 2025-06-24T21:25:43.468389Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:43.468468Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927938 is [2:156:2175] sender: [2:157:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:180:2057] recipient: [2:14:2061] 2025-06-24T21:25:43.483476Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:43.484179Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } Consumers { Name: "important_user" Generation: 2 Important: true } 2025-06-24T21:25:43.484769Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:186:2197] 2025-06-24T21:25:43.487249Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:186:2197] 2025-06-24T21:25:43.496259Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|efb609e6-b900a589-6cbc35f0-c27445ad_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [2:178:2191] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [2:178:2191] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:111:2057] recipient: [3:104:2136] 2025-06-24T21:25:51.906328Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:51.906433Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927938 is [3:156:2175] sender: [3:157:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:182:2057] recipient: [3:14:2061] 2025-06-24T21:25:51.924417Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:51.925021Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 3 actor [3:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 3 ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } Consumers { Name: "important_user" Generation: 3 Important: true } 2025-06-24T21:25:51.925599Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:188:2199] 2025-06-24T21:25:51.928177Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [3:188:2199] 2025-06-24T21:25:51.938614Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3a620ac8-75bd956f-4dd07a69-f8e94587_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [3:180:2193] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [3:180:2193] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:106:2057] recipient: [4:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:106:2057] recipient: [4:104:2136] Leader for TabletID 72057594037927937 is [4:110:2140] sender: [4:111:2057] recipient: [4:104:2136] 2025-06-24T21:26:00.348551Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:00.348650Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:152:2057] recipient: [4:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:152:2057] recipient: [4:150:2171] Leader for TabletID 72057594037927938 is [4:156:2175] sender: [4:157:2057] recipient: [4:150:2171] Leader for TabletID 72057594037927937 is [4:110:2140] sender: [4:180:2057] recipient: [4:14:2061] 2025-06-24T21:26:00.369831Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:00.370546Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 4 actor [4:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 4 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 4 ReadRuleGenerations: 4 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 4 Important: false } Consumers { Name: "important_user" Generation: 4 Important: true } 2025-06-24T21:26:00.371194Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:186:2197] 2025-06-24T21:26:00.373800Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [4:186:2197] 2025-06-24T21:26:00.384872Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6ccefb94-40963bbc-68a5b373-475332bd_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie ... 47543Z node 14 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [14:188:2199] 2025-06-24T21:26:49.350984Z node 14 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [14:188:2199] 2025-06-24T21:26:49.366453Z node 14 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b7a147eb-a30f6b57-759e0a13-262b0c19_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:50.301006Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:50.552538Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:50.594918Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:50.649346Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:50.838133Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:51.039164Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:51.078149Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:51.265234Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:51.311044Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:51.471529Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:51.514043Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:51.769133Z node 14 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [14:110:2140] sender: [14:394:2057] recipient: [14:102:2135] Leader for TabletID 72057594037927937 is [14:110:2140] sender: [14:396:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:110:2140] sender: [14:398:2057] recipient: [14:397:2374] Leader for TabletID 72057594037927937 is [14:399:2375] sender: [14:400:2057] recipient: [14:397:2374] 2025-06-24T21:26:51.827090Z node 14 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:51.827217Z node 14 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:26:51.827994Z node 14 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [14:448:2416] 2025-06-24T21:26:51.839204Z node 14 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:51.839333Z node 14 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [14:448:2416] Leader for TabletID 72057594037927937 is [14:399:2375] sender: [14:467:2057] recipient: [14:14:2061] 2025-06-24T21:26:51.877730Z node 14 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:51.882287Z node 14 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1003 actor [14:464:2426] txId 42 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "user1" ImportantClientId: "user2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 Version: 1003 LocalDC: true Topic: "topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1002 AllPartitions { PartitionId: 0 } Consumers { Name: "user2" Generation: 1002 Important: true } Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:106:2057] recipient: [15:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:106:2057] recipient: [15:104:2136] Leader for TabletID 72057594037927937 is [15:110:2140] sender: [15:111:2057] recipient: [15:104:2136] 2025-06-24T21:26:52.503113Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:52.503209Z node 15 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [15:152:2057] recipient: [15:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [15:152:2057] recipient: [15:150:2171] Leader for TabletID 72057594037927938 is [15:156:2175] sender: [15:157:2057] recipient: [15:150:2171] Leader for TabletID 72057594037927937 is [15:110:2140] sender: [15:180:2057] recipient: [15:14:2061] 2025-06-24T21:26:52.520627Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:52.521157Z node 15 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1004 actor [15:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "user1" ImportantClientId: "user2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1004 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1004 ReadRuleGenerations: 1004 ReadRuleGenerations: 1004 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1004 Important: false } Consumers { Name: "user1" Generation: 1004 Important: true } Consumers { Name: "user2" Generation: 1004 Important: true } 2025-06-24T21:26:52.521618Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [15:186:2197] 2025-06-24T21:26:52.523670Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [15:186:2197] 2025-06-24T21:26:52.537006Z node 15 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e8586263-1ad66fd5-2a3dfc67-6d29bc65_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:26:53.469795Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:53.712078Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:53.755068Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:53.951867Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:54.168145Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:54.348986Z node 15 :PERSQUEUE ERROR: cache_eviction.h:422: Can't evict. No such blob in L1. Partition 0 offset 405 size 8296447 cause it's been evicted from L2. Actual L1 size: 4 2025-06-24T21:26:54.349081Z node 15 :PERSQUEUE ERROR: cache_eviction.h:422: Can't evict. No such blob in L1. Partition 0 offset 324 size 8296447 cause it's been evicted from L2. Actual L1 size: 4 2025-06-24T21:26:54.349115Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:54.391296Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:54.579789Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:54.616099Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:54.669841Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-24T21:26:54.878472Z node 15 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [15:110:2140] sender: [15:392:2057] recipient: [15:102:2135] Leader for TabletID 72057594037927937 is [15:110:2140] sender: [15:395:2057] recipient: [15:394:2372] Leader for TabletID 72057594037927937 is [15:110:2140] sender: [15:396:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:397:2373] sender: [15:398:2057] recipient: [15:394:2372] 2025-06-24T21:26:54.919564Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:54.919630Z node 15 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:26:54.920070Z node 15 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [15:446:2414] 2025-06-24T21:26:54.927324Z node 15 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:54.927391Z node 15 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [15:446:2414] Leader for TabletID 72057594037927937 is [15:397:2373] sender: [15:465:2057] recipient: [15:14:2061] 2025-06-24T21:26:54.960896Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:54.964212Z node 15 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1005 actor [15:462:2424] txId 42 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "user1" ImportantClientId: "user2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 Version: 1005 LocalDC: true Topic: "topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1004 AllPartitions { PartitionId: 0 } Consumers { Name: "user2" Generation: 1004 Important: true } >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TSchemeShardTest::AlterTableById [GOOD] >> TSchemeShardTest::AlterTableConfig >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_PreferedPartition_Test [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailesOnNotATopic >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnEmptyTopicName >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> ReadOnlyVDisk::TestGarbageCollect [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> TSchemeShardTest::CopyTableOmitFollowers [GOOD] >> TSchemeShardTest::CreateIndexedTableAfterBackup >> TSchemeShardTest::SimultaneousDropForceDrop [GOOD] >> TSchemeShardTest::RejectSystemViewPath |97.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_data_erasure/test-results/unittest/{meta.json ... results_accumulator.log} |97.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> SlowTopicAutopartitioning::CDC_Write [GOOD] Test command err: 2025-06-24T21:25:32.772788Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630169708059001:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:32.782746Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:32.993887Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044d2/r3tmp/tmpHQIqV2/pdisk_1.dat 2025-06-24T21:25:33.148687Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:33.148694Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630169708058807:2079] 1750800332761475 != 1750800332761478 2025-06-24T21:25:33.215741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:33.215891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:33.217623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2818, node 1 2025-06-24T21:25:33.455921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0044d2/r3tmp/yandexJ8HTUq.tmp 2025-06-24T21:25:33.455950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0044d2/r3tmp/yandexJ8HTUq.tmp 2025-06-24T21:25:33.457193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0044d2/r3tmp/yandexJ8HTUq.tmp 2025-06-24T21:25:33.457516Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:33.772296Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:33.867506Z INFO: TTestServer started on Port 30389 GrpcPort 2818 TClient is connected to server localhost:30389 PQClient connected to localhost:2818 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:34.225326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:34.298047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:25:35.881651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630182592961504:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:35.881651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630182592961491:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:35.881850Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:35.894941Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630182592961533:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:35.895059Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:35.899839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:25:35.909725Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630182592961506:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:25:35.999546Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630182592961562:2443] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:25:36.465198Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630186887928874:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:36.470031Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Y2M2NjI1Y2MtNzM4OWYyY2UtYTIyOWI2NjEtNTU5YWU2YzI=, ActorId: [1:7519630182592961489:2297], ActorState: ExecuteState, TraceId: 01jyhxa0zj0ayb8dp25a570xpz, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:36.473695Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:36.529450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:36.562124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:36.658118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519630191182896459:2622] 2025-06-24T21:25:37.772054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630169708059001:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:37.772142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:25:43.108245Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-24T21:25:43.175741Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 269877761, Sender [1:7519630216952700452:2704], Recipient [1:7519630174003026427:2143]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:25:43.175780Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5048: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:25:43.175793Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5833: Pipe server connected, at tablet: 72057594046644480 2025-06-24T21:25:43.175825Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4951: StateWork, received event# 271122432, Sender [1:7519630216952700448:2701], Recipient [1:7519630174003026427:2143]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-06-24T21:25:43.175835Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-24T21:25:43.212950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "origin" Columns { Name: "id" Type: "U ... artition_write.cpp:162: [PQ: 72075186224037956, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:26:45.293824Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037956, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:26:45.293994Z node 1 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [1:7519630216952703030:2632], Partition 1, Sender [0:0:0], Recipient [1:7519630221247670727:2754], Cookie: 0 2025-06-24T21:26:45.294023Z node 1 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [1:7519630440291005970:3104], Partition 3, Sender [0:0:0], Recipient [1:7519630440291006015:3110], Cookie: 0 2025-06-24T21:26:45.294026Z node 1 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [1:7519630221247670727:2754]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.294034Z node 1 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.294049Z node 1 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037956, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:26:45.294060Z node 1 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [1:7519630440291006015:3110]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.294067Z node 1 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037956, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:26:45.294074Z node 1 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.294075Z node 1 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037956, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:26:45.294086Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037956, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:26:45.294102Z node 1 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037958, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:26:45.294143Z node 1 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037958, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:26:45.294159Z node 1 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037958, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:26:45.294177Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037958, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:26:45.393932Z node 1 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [1:7519630440291005970:3104], Partition 2, Sender [0:0:0], Recipient [1:7519630440291006014:3109], Cookie: 0 2025-06-24T21:26:45.393931Z node 1 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [1:7519630216952703030:2632], Partition 0, Sender [0:0:0], Recipient [1:7519630221247670739:2764], Cookie: 0 2025-06-24T21:26:45.393988Z node 1 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [1:7519630440291006014:3109]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.393988Z node 1 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [1:7519630221247670739:2764]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.394028Z node 1 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.394029Z node 1 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.394056Z node 1 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037958, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:26:45.394060Z node 1 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037956, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:26:45.394104Z node 1 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037958, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:26:45.394104Z node 1 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037956, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:26:45.394115Z node 1 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037958, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:26:45.394118Z node 1 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037956, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:26:45.394133Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037958, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:26:45.394133Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037956, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:26:45.394349Z node 1 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [1:7519630440291005970:3104], Partition 3, Sender [0:0:0], Recipient [1:7519630440291006015:3110], Cookie: 0 2025-06-24T21:26:45.394356Z node 1 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [1:7519630216952703030:2632], Partition 1, Sender [0:0:0], Recipient [1:7519630221247670727:2754], Cookie: 0 2025-06-24T21:26:45.394375Z node 1 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [1:7519630221247670727:2754]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.394375Z node 1 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [1:7519630440291006015:3110]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.394381Z node 1 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.394384Z node 1 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-24T21:26:45.394397Z node 1 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037956, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:26:45.394398Z node 1 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037958, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-24T21:26:45.394419Z node 1 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037958, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:26:45.394423Z node 1 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037956, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-24T21:26:45.394427Z node 1 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037958, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:26:45.394436Z node 1 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037956, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-24T21:26:45.394439Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037958, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:26:45.394450Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037956, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-24T21:26:45.410903Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 65538, Sender [0:0:0], Recipient [1:7519630440291005970:3104]: NActors::TEvents::TEvWakeup 2025-06-24T21:26:45.410908Z node 1 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 65538 (NActors::TEvents::TEvWakeup), Tablet [1:7519630440291005970:3104], Partition 2, Sender [0:0:0], Recipient [1:7519630440291006014:3109], Cookie: 0 2025-06-24T21:26:45.410957Z node 1 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 65538, Sender [0:0:0], Recipient [1:7519630440291006014:3109]: NActors::TEvents::TEvWakeup 2025-06-24T21:26:45.411094Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188501, Sender [1:7519630440291006014:3109], Recipient [1:7519630440291005970:3104]: NKikimr::TEvPQ::TEvPartitionCounters 2025-06-24T21:26:45.411120Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5276: HandleHook, processing event TEvPQ::TEvPartitionCounters 2025-06-24T21:26:45.411135Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:1266: [PQ: 72075186224037958] Handle TEvPQ::TEvPartitionCounters PartitionId 2 2025-06-24T21:26:45.411191Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037958, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T21:26:45.411240Z node 1 :PERSQUEUE TRACE: partition.h:577: StateIdle event# 65538 (NActors::TEvents::TEvWakeup), Tablet [1:7519630440291005970:3104], Partition 3, Sender [0:0:0], Recipient [1:7519630440291006015:3110], Cookie: 0 2025-06-24T21:26:45.411281Z node 1 :PERSQUEUE TRACE: partition.h:579: StateIdle, received event# 65538, Sender [0:0:0], Recipient [1:7519630440291006015:3110]: NActors::TEvents::TEvWakeup 2025-06-24T21:26:45.411344Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188503, Sender [1:7519630440291006014:3109], Recipient [1:7519630440291005970:3104]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:26:45.411367Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5278: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:26:45.411410Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188503, Sender [1:7519630440291006014:3109], Recipient [1:7519630440291005970:3104]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:26:45.411426Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037958, Partition: 3, State: StateIdle] no data for compaction 2025-06-24T21:26:45.411429Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5278: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:26:45.411459Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188501, Sender [1:7519630440291006015:3110], Recipient [1:7519630440291005970:3104]: NKikimr::TEvPQ::TEvPartitionCounters 2025-06-24T21:26:45.411474Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5276: HandleHook, processing event TEvPQ::TEvPartitionCounters 2025-06-24T21:26:45.411483Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:1266: [PQ: 72075186224037958] Handle TEvPQ::TEvPartitionCounters PartitionId 3 2025-06-24T21:26:45.411627Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188503, Sender [1:7519630440291006015:3110], Recipient [1:7519630440291005970:3104]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:26:45.411646Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5278: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:26:45.411676Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5264: HandleHook, received event# 271188503, Sender [1:7519630440291006015:3110], Recipient [1:7519630440291005970:3104]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-24T21:26:45.411691Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5278: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailesOnNotATopic [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TSchemeShardTest::DropTableById [GOOD] >> TSchemeShardTest::DropPQFail >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedTopicName >> TSchemeShardTest::ConsistentCopyTableToDeletedPath [GOOD] >> TSchemeShardTest::CopyIndexedTable >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedTopicName >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestGarbageCollect [GOOD] Test command err: RandomSeed# 3859492922454229655 SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:1:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 2 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:1:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:2:0:0:131072:0] 2025-06-24T21:26:48.963572Z 1 00h01m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} 2025-06-24T21:26:48.968320Z 1 00h01m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] SEND TEvGet with key [1:1:2:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-06-24T21:26:49.817908Z 1 00h03m20.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:49.818691Z 2 00h03m20.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} 2025-06-24T21:26:50.297180Z 1 00h04m20.161024s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:50.297379Z 2 00h04m20.161024s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-06-24T21:26:50.642510Z 1 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:50.643775Z 2 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:50.644755Z 3 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:50.645160Z 1 00h05m00.200000s :BS_PROXY_PUT ERROR: [f9dfdf3945980d4c] Result# TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:4:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:4:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} 2025-06-24T21:26:51.092078Z 1 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:51.092318Z 2 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:51.092384Z 3 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] 2025-06-24T21:26:51.816282Z 1 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:51.816498Z 2 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:51.816563Z 3 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:51.816622Z 4 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5337:719] === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] 2025-06-24T21:26:52.095033Z 1 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:52.095225Z 2 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:52.095269Z 3 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:52.095304Z 4 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5337:719] 2025-06-24T21:26:52.095339Z 5 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5344:726] === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] 2025-06-24T21:26:52.317673Z 1 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:52.317852Z 2 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:52.317918Z 3 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:52.317996Z 4 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5337:719] 2025-06-24T21:26:52.318061Z 5 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5344:726] 2025-06-24T21:26:52.318104Z 6 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5351:733] === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] 2025-06-24T21:26:52.516384Z 1 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-24T21:26:52.516541Z 2 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:52.516602Z 3 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:52.516660Z 4 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5337:719] 2025-06-24T21:26:52.516703Z 5 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5344:726] 2025-06-24T21:26:52.516745Z 6 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5351:733] 2025-06-24T21:26:52.516782Z 7 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5358:740] === Putting VDisk #0 to normal === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] 2025-06-24T21:26:52.713156Z 2 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-24T21:26:52.713233Z 3 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:52.713272Z 4 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5337:719] 2025-06-24T21:26:52.713306Z 5 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5344:726] 2025-06-24T21:26:52.713342Z 6 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5351:733] 2025-06-24T21:26:52.713378Z 7 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5358:740] === Putting VDisk #1 to normal === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] 2025-06-24T21:26:52.976346Z 3 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-24T21:26:52.976418Z 4 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5337:719] 2025-06-24T21:26:52.976459Z 5 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5344:726] 2025-06-24T21:26:52.976512Z 6 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5351:733] 2025-06-24T21:26:52.976549Z 7 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5358:740] === Putting VDisk #2 to normal === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] 2025-06-24T21:26:53.264307Z 4 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5337:719] 2025-06-24T21:26:53.264407Z 5 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5344:726] 2025-06-24T21:26:53.264465Z 6 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5351:733] 2025-06-24T21:26:53.264519Z 7 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5358:740] === Putting VDisk #3 to normal === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] 2025-06-24T21:26:53.651094Z 5 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5344:726] 2025-06-24T21:26:53.651219Z 6 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5351:733] 2025-06-24T21:26:53.651278Z 7 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5358:740] Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] 2025-06-24T21:26:54.553124Z 6 00h14m00.461536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5351:733] 2025-06-24T21:26:54.553206Z 7 00h14m00.461536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5358:740] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] 2025-06-24T21:26:54.974310Z 7 00h14m40.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5358:740] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} SEND TEvPut with key [1:1:4:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} SEND TEvGet with key [1:1:4:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:1:0] NODATA Size# 0}} >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedPartition >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition >> TSchemeShardTest::RejectSystemViewPath [GOOD] >> TSchemeShardTest::SplitKey [GOOD] >> TSchemeShardTest::SplitAlterCopy >> TSchemeShardTest::CreateTableWithUniformPartitioning [GOOD] >> TSchemeShardTest::CreateTableWithSplitBoundaries >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedPartition [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_PreferedPartition_Test [GOOD] Test command err: 2025-06-24T21:25:40.366392Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630205328396393:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:40.366485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:40.394660Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630203273005616:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:40.394716Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:40.560304Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:25:40.564922Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ac1/r3tmp/tmpnBeFWc/pdisk_1.dat 2025-06-24T21:25:40.733334Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:40.746401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:40.746535Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:40.751720Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:25:40.752688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27475, node 1 2025-06-24T21:25:40.802017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:40.802111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:40.804944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:40.808914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003ac1/r3tmp/yandexi2OxzH.tmp 2025-06-24T21:25:40.808959Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003ac1/r3tmp/yandexi2OxzH.tmp 2025-06-24T21:25:40.809129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003ac1/r3tmp/yandexi2OxzH.tmp 2025-06-24T21:25:40.809245Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:40.855706Z INFO: TTestServer started on Port 65232 GrpcPort 27475 TClient is connected to server localhost:65232 PQClient connected to localhost:27475 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:41.128487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:41.179115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:25:41.380570Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:41.400515Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:43.183212Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630216157907784:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:43.183212Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630216157907773:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:43.183298Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:43.187943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:25:43.203047Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630216157907787:2272], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:25:43.280965Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630216157907815:2130] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:25:43.543745Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630218213299449:2307], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:43.543836Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519630216157907830:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:43.544065Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZGQxYzFlNWUtMjljYjdmOTQtMzFmODJmMWEtNGQxNjNlZGY=, ActorId: [2:7519630216157907771:2267], ActorState: ExecuteState, TraceId: 01jyhxa84d15bksgtp0nvh9dev, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:43.544064Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTBjZDE0OGYtNzM4ZDkzYTUtMzRiODkxOGEtNzZmMjhjZjY=, ActorId: [1:7519630218213299409:2300], ActorState: ExecuteState, TraceId: 01jyhxa88eckxxfq3r4w480229, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:43.545666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:43.545856Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:43.545869Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:43.626587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:43.714309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster ... oposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:26:44.108170Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhxc3e438xnay1apxshs992, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=OGMzNzRjZTgtOGU2Njg2Mi04YWY0OGIwYi1lZThkNDE3ZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [9:7519630478784316198:3074] 2025-06-24T21:26:44.662226Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[9:7519630457309478122:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:26:44.662336Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:26:44.668347Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519630458193314913:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:26:44.668428Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-24T21:26:49.330140Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T21:26:49.330164Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T21:26:49.330172Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T21:26:49.330188Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [9:7519630500259153014:3232] (SourceId=A_Source, PreferedPartition=0) InitTable: SourceId=A_Source TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-06-24T21:26:49.334458Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:26:50.079667Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:26:50.865275Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:26:51.775794Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:26:52.507179Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715697:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:26:53.216028Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715701:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:26:53.918381Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [9:7519630457309478118:2069], Recipient [9:7519630500259153014:3232]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-06-24T21:26:53.918430Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [9:7519630500259153014:3232] (SourceId=A_Source, PreferedPartition=0) StartKqpSession 2025-06-24T21:26:53.922121Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [9:7519630457309478217:2151], Recipient [9:7519630500259153014:3232]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=9&id=NzA2ODkyMTQtNGUwOTM4MDEtMzNmOTQ1ZDUtYWQzZGY4ZGU=" NodeId: 9 } YdbStatus: SUCCESS ResourceExhausted: false 2025-06-24T21:26:53.922174Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [9:7519630500259153014:3232] (SourceId=A_Source, PreferedPartition=0) Select from the table 2025-06-24T21:26:54.128817Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [9:7519630457309478217:2151], Recipient [9:7519630500259153014:3232]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=NzA2ODkyMTQtNGUwOTM4MDEtMzNmOTQ1ZDUtYWQzZGY4ZGU=" PreparedQuery: "6921283d-6766d595-9ce577bb-db2d6f03" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jyhxcddaasmceh6jcnkng72q" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 131 2025-06-24T21:26:54.128980Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [9:7519630500259153014:3232] (SourceId=A_Source, PreferedPartition=0) Selected from table PartitionId=(NULL) SeqNo=(NULL) 2025-06-24T21:26:54.129009Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__old_chooser_actor.h:113: TPartitionChooser [9:7519630500259153014:3232] (SourceId=A_Source, PreferedPartition=0) OnPartitionChosen 2025-06-24T21:26:54.129029Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [9:7519630500259153014:3232] (SourceId=A_Source, PreferedPartition=0) Update the table 2025-06-24T21:26:54.261529Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:212: StateUpdate, received event# 271646721, Sender [9:7519630457309478217:2151], Recipient [9:7519630500259153014:3232]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=NzA2ODkyMTQtNGUwOTM4MDEtMzNmOTQ1ZDUtYWQzZGY4ZGU=" PreparedQuery: "8da36f2c-a30ccb30-1c9e793a-3342124e" QueryParameters { Name: "$AccessTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$CreateTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Partition" Type { Kind: Data Data { Scheme: 2 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SeqNo" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 81 Received TEvChooseResult: 0 2025-06-24T21:26:54.261583Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [9:7519630500259153014:3232] (SourceId=A_Source, PreferedPartition=0) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-24T21:26:54.261625Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [9:7519630500259153014:3232] (SourceId=A_Source, PreferedPartition=0) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-24T21:26:54.261654Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [9:7519630500259153014:3232] (SourceId=A_Source, PreferedPartition=0) Start idle 2025-06-24T21:26:54.784467Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:26:54.784508Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:55.144351Z node 9 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [9:7519630526028957919:2644] TxId: 281474976715709. Ctx: { TraceId: 01jyhxce7t31z2nxdd44gemavv, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=NmY2NTE4N2EtOWI5MGU1OGUtOTY0OTc2MmItYzZhYmVlYWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 10 2025-06-24T21:26:55.144519Z node 9 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [9:7519630526028957925:2644], TxId: 281474976715709, task: 2. Ctx: { TraceId : 01jyhxce7t31z2nxdd44gemavv. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=9&id=NmY2NTE4N2EtOWI5MGU1OGUtOTY0OTc2MmItYzZhYmVlYWE=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [9:7519630526028957919:2644], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> TSchemeShardTest::AlterTableConfig [GOOD] >> TSchemeShardTest::AlterTableCompactionPolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesTimeout >> TSchemeShardTest::CreateIndexedTableAfterBackup [GOOD] >> TSchemeShardTest::CreateFinishedInDescription ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-24T21:26:56.782388Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:56.787571Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:56.787994Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-24T21:26:56.788052Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:56.788099Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-24T21:26:56.788150Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:56.788241Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.788328Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-24T21:26:56.789134Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:265:2255], now have 1 active actors on pipe 2025-06-24T21:26:56.789230Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:26:56.815745Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:56.819201Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:56.819410Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.820426Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928037] Config applied version 1 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:56.820638Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:56.821167Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:56.821539Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:273:2261] 2025-06-24T21:26:56.824329Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-24T21:26:56.824411Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:273:2261] 2025-06-24T21:26:56.824480Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:56.824539Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:56.824887Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:26:56.825562Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:276:2263], now have 1 active actors on pipe 2025-06-24T21:26:56.883728Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:56.887550Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:56.887935Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928138] doesn't have tx info 2025-06-24T21:26:56.887991Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:56.888035Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-24T21:26:56.888076Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:56.888132Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.888209Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-24T21:26:56.888965Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [2:408:2361], now have 1 active actors on pipe 2025-06-24T21:26:56.889101Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:26:56.889309Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928138] Config update version 2(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T21:26:56.891854Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T21:26:56.891985Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.892849Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928138] Config applied version 2 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T21:26:56.892969Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:56.893343Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:56.893585Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [2:416:2367] 2025-06-24T21:26:56.895872Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-24T21:26:56.895941Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [2:416:2367] 2025-06-24T21:26:56.896003Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:56.896059Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:56.896328Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T21:26:56.896851Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [2:419:2369], now have 1 active actors on pipe 2025-06-24T21:26:56.914661Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:56.918422Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:56.918696Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T21:26:56.918746Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:56.918780Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-24T21:26:56.918813Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:56.918851Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.918897Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T21:26:56.919448Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:468:2406], now have 1 active actors on pipe 2025-06-24T21:26:56.919546Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:26:56.919700Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928139] Config update version 3(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:56.921437Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:56.921541Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.922191Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928139] Config applied version 3 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:56.922284Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:56.922538Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:56.922697Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:476:2412] 2025-06-24T21:26:56.924071Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T21:26:56.924145Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:476:2412] 2025-06-24T21:26:56.924207Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:56.924255Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:56.924508Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T21:26:56.924894Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:479:2414], now have 1 active actors on pipe REQUEST MetaRequest { CmdGetReadSessionsInfo { ClientId: "client_id" Topic: "rt3.dc1--topic1" Topic: "rt3.dc1--topic2" } } Ticket: "client_id@builtin" 2025-06-24T21:26:56.933825Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:486:2417], now have 1 active actors on pipe 2025-06-24T21:26:56.934093Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [2:489:2418], now have 1 active actors on pipe 2025-06-24T21:26:56.934378Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:490:2418], now have 1 active actors on pipe 2025-06-24T21:26:56.934749Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928037] server disconnected, pipe [2:486:2417] destroyed 2025-06-24T21:26:56.935250Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928138] server disconnected, pipe [2:489:2418] destroyed 2025-06-24T21:26:56.935301Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928139] server disconnected, pipe [2:490:2418] destroyed RESULT Status: 1 ErrorCode: OK MetaResponse { CmdGetReadSessionsInfoResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 0 ErrorCode: INITIALIZING ErrorReason: "tablet for partition is not running" } PartitionResult { Partition: 1 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } PartitionResult { Partition: 2 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } ErrorCode: OK } } } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "TopicRequest must have Topic field." ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple TopicRequest for topic \'rt3.dc1--topic1\'" ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } >> TSchemeShardTest::CopyIndexedTable [GOOD] >> TSchemeShardTest::CopyTable >> TSchemeShardTest::MultipleColumnFamilies [GOOD] >> TSchemeShardTest::MultipleColumnFamiliesWithStorage >> TSchemeShardTest::CreateTableWithSplitBoundaries [GOOD] >> TSchemeShardTest::CreateTableWithConfig >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::SuccessfullyPassesResponsesFromTablets ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "multiple TopicRequest for topic \'rt3.dc1--topic1\'" ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-06-24T21:26:56.706394Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:56.717897Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:56.718308Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-24T21:26:56.719119Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:56.719203Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-24T21:26:56.719294Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:56.719419Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.719529Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-24T21:26:56.720297Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:265:2255], now have 1 active actors on pipe 2025-06-24T21:26:56.720391Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:26:56.741303Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:56.744159Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:56.744292Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.749547Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928037] Config applied version 1 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:56.749796Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:56.751176Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:56.752440Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:273:2261] 2025-06-24T21:26:56.755177Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-24T21:26:56.755991Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:273:2261] 2025-06-24T21:26:56.756068Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:56.756119Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:56.758153Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:26:56.758929Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:276:2263], now have 1 active actors on pipe 2025-06-24T21:26:56.822619Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:56.827417Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:56.827821Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928137] doesn't have tx info 2025-06-24T21:26:56.827882Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:56.827929Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-06-24T21:26:56.827979Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:56.828033Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.828099Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928137] doesn't have tx writes info 2025-06-24T21:26:56.828852Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928137] server connected, pipe [2:409:2362], now have 1 active actors on pipe 2025-06-24T21:26:56.828977Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:26:56.829176Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:56.832213Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:56.832363Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.833303Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928137] Config applied version 2 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:56.833470Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:56.833900Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:56.834194Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [2:417:2368] 2025-06-24T21:26:56.836598Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-06-24T21:26:56.836686Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [2:417:2368] 2025-06-24T21:26:56.836753Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:56.836808Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:56.837126Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928137, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:26:56.837747Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928137] server connected, pipe [2:420:2370], now have 1 active actors on pipe 2025-06-24T21:26:56.857211Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:56.861500Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:56.861885Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928138] doesn't have tx info 2025-06-24T21:26:56.861962Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:56.862004Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-24T21:26:56.862053Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:56.862100Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.862162Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-24T21:26:56.862868Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [2:469:2407], now have 1 active actors on pipe 2025-06-24T21:26:56.863003Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:26:56.863235Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T21:26:56.866026Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T21:26:56.866168Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:56.867028Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928138] Config applied version 3 actor [2 ... : 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:57.605202Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:57.606190Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928139] Config applied version 8 actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:57.606338Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:57.606759Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:57.606996Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:537:2458] 2025-06-24T21:26:57.609124Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T21:26:57.609209Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:537:2458] 2025-06-24T21:26:57.609275Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:57.609368Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:57.609758Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T21:26:57.610425Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:540:2460], now have 1 active actors on pipe 2025-06-24T21:26:57.611596Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [3:546:2463], now have 1 active actors on pipe 2025-06-24T21:26:57.611939Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:57.612069Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [3:547:2464], now have 1 active actors on pipe 2025-06-24T21:26:57.612325Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:57.612386Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:548:2464], now have 1 active actors on pipe 2025-06-24T21:26:57.612560Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-24T21:26:57.623708Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:556:2471], now have 1 active actors on pipe 2025-06-24T21:26:57.648321Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:57.650248Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:57.650477Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T21:26:57.650515Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:57.650620Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:57.651486Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:57.651533Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T21:26:57.651610Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:57.651848Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:57.652020Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:613:2516] 2025-06-24T21:26:57.653328Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-24T21:26:57.654204Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-24T21:26:57.654380Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-24T21:26:57.654622Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-24T21:26:57.654841Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-24T21:26:57.654873Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:26:57.654907Z node 3 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:57.654939Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T21:26:57.654994Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:613:2516] 2025-06-24T21:26:57.655043Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:57.655086Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:57.655354Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T21:26:57.655831Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928138] server disconnected, pipe [3:547:2464] destroyed 2025-06-24T21:26:57.655878Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928037] server disconnected, pipe [3:546:2463] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionStatusResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 39 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 39 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 79 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 79 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } PartitionResult { Partition: 2 Status: STATUS_UNKNOWN } ErrorCode: OK } } } >> TSchemeShardTest::SplitAlterCopy [GOOD] >> TSchemeShardTest::TopicReserveSize >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TSchemeShardTest::CreateFinishedInDescription [GOOD] >> TSchemeShardTest::CreateBlockStoreVolume >> TSchemeShardTest::AlterTableCompactionPolicy [GOOD] >> TSchemeShardTest::AlterPersQueueGroup >> TSchemeShardTest::CreateTableWithConfig [GOOD] >> TSchemeShardTest::CreateTableWithNamedConfig >> TSchemeShardTest::CopyTable [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentChanges >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic >> TSchemeShardTest::CreateBlockStoreVolume [GOOD] >> TSchemeShardTest::CreateBlockStoreVolumeWithVolumeChannelsProfiles >> TSchemeShardTest::MultipleColumnFamiliesWithStorage [GOOD] >> TSchemeShardTest::ParallelModifying >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic [GOOD] >> TSchemeShardTest::DropPQFail [GOOD] >> TSchemeShardTest::ManyDirs >> TSchemeShardTest::CreateBlockStoreVolumeWithVolumeChannelsProfiles [GOOD] >> TSchemeShardTest::CreateBlockStoreVolumeWithNonReplicatedPartitions >> TSchemeShardTest::CreateTableWithNamedConfig [GOOD] >> TSchemeShardTest::CreateTableWithUnknownNamedConfig ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-24T21:26:58.484380Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:58.488952Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:58.489304Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-24T21:26:58.489352Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:58.489400Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-24T21:26:58.489455Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:58.489510Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:58.489616Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-24T21:26:58.490317Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:265:2255], now have 1 active actors on pipe 2025-06-24T21:26:58.490401Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:26:58.511893Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:58.514648Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:58.514812Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:58.515668Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928037] Config applied version 1 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:58.515826Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:58.516245Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:58.516545Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:273:2261] 2025-06-24T21:26:58.518906Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-24T21:26:58.518974Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:273:2261] 2025-06-24T21:26:58.519021Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:58.519072Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:58.519388Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:26:58.519937Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:276:2263], now have 1 active actors on pipe 2025-06-24T21:26:58.568965Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:58.572666Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:58.573008Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T21:26:58.573060Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:58.573101Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-24T21:26:58.573137Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:58.573203Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:58.573268Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T21:26:58.573971Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:409:2362], now have 1 active actors on pipe 2025-06-24T21:26:58.574087Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:26:58.574261Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:58.576636Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:58.576756Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:58.577605Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928139] Config applied version 2 actor [2:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:58.577727Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:58.578121Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:58.578351Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:417:2368] 2025-06-24T21:26:58.580390Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T21:26:58.580464Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:417:2368] 2025-06-24T21:26:58.580517Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:58.580567Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:58.580814Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T21:26:58.581392Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:420:2370], now have 1 active actors on pipe 2025-06-24T21:26:58.582721Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [2:426:2373], now have 1 active actors on pipe 2025-06-24T21:26:58.582776Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [2:428:2374], now have 1 active actors on pipe 2025-06-24T21:26:58.583183Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928037] server disconnected, pipe [2:426:2373] destroyed 2025-06-24T21:26:58.583599Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928139] server disconnected, pipe [2:428:2374] destroyed 2025-06-24T21:26:59.032528Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:59.035320Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:59.035622Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-24T21:26:59.035679Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:59.035725Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-24T21:26:59.035768Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:59.035817Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:59.035868Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-24T21:26:59.036442Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [3:264:2254], now have 1 active actors on pipe 2025-06-24T21:26:59.036548Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:26:59.036741Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928037] Config update version 3(current 0) received from actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 3 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-24T21:26:59.038491Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new con ... titionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T21:26:59.116640Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:59.117337Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928138] Config applied version 5 actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 5 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-24T21:26:59.117472Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:59.117840Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:59.118058Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [3:477:2413] 2025-06-24T21:26:59.119649Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-24T21:26:59.119704Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [3:477:2413] 2025-06-24T21:26:59.119768Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:59.119819Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:59.120071Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T21:26:59.120603Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [3:480:2415], now have 1 active actors on pipe 2025-06-24T21:26:59.135810Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:59.139710Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:59.140048Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T21:26:59.140098Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:59.140144Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:977: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-24T21:26:59.140180Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:59.140231Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:59.140286Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T21:26:59.141012Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:529:2452], now have 1 active actors on pipe 2025-06-24T21:26:59.141141Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1462: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-24T21:26:59.141310Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1648: [PQ: 72057594037928139] Config update version 6(current 0) received from actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:59.143587Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:59.143696Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:59.144355Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037928139] Config applied version 6 actor [3:102:2135] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-24T21:26:59.144463Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:59.144744Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:59.144900Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:537:2458] 2025-06-24T21:26:59.146424Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T21:26:59.146477Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:537:2458] 2025-06-24T21:26:59.146526Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:59.146580Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:59.146827Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T21:26:59.147419Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:540:2460], now have 1 active actors on pipe 2025-06-24T21:26:59.148390Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928037] server connected, pipe [3:546:2463], now have 1 active actors on pipe 2025-06-24T21:26:59.148467Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928138] server connected, pipe [3:547:2464], now have 1 active actors on pipe 2025-06-24T21:26:59.148533Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:548:2464], now have 1 active actors on pipe 2025-06-24T21:26:59.159522Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72057594037928139] server connected, pipe [3:553:2468], now have 1 active actors on pipe 2025-06-24T21:26:59.179694Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3101: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-24T21:26:59.181998Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3133: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-24T21:26:59.182231Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-24T21:26:59.182269Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-24T21:26:59.182380Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4910: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-24T21:26:59.183031Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:26:59.183077Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-24T21:26:59.183182Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-24T21:26:59.183448Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-24T21:26:59.183651Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:610:2513] 2025-06-24T21:26:59.185006Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-24T21:26:59.186065Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-24T21:26:59.186258Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-24T21:26:59.186506Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-24T21:26:59.186752Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-24T21:26:59.186789Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-24T21:26:59.186826Z node 3 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-24T21:26:59.186870Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-24T21:26:59.186919Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:610:2513] 2025-06-24T21:26:59.186966Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-24T21:26:59.187007Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-24T21:26:59.187271Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T21:26:59.187812Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928138] server disconnected, pipe [3:547:2464] destroyed 2025-06-24T21:26:59.187886Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72057594037928037] server disconnected, pipe [3:546:2463] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionLocationsResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionLocation { Partition: 0 Host: "::1" HostId: 3 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionLocation { Partition: 1 Host: "::1" HostId: 3 ErrorCode: OK } PartitionLocation { Partition: 2 Host: "::1" HostId: 3 ErrorCode: OK } ErrorCode: OK } } } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } >> TSchemeShardTest::TopicReserveSize [GOOD] >> TSchemeShardTest::TopicWithAutopartitioningReserveSize >> BasicStatistics::ServerlessGlobalIndex [GOOD] |97.6%| [TA] $(B)/ydb/core/client/server/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.6%| [TA] {RESULT} $(B)/ydb/core/client/server/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TSchemeShardTest::CreateBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::CreateAlterBlockStoreVolumeWithInvalidPoolKinds >> TSchemeShardTest::CopyTableAndConcurrentChanges [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentSplit >> TSchemeShardTest::CreateTableWithUnknownNamedConfig [GOOD] >> TSchemeShardTest::CreatePersQueueGroup >> TTxAllocatorClientTest::InitiatingRequest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TTxAllocatorClientTest::ZeroRange >> TTxAllocatorClientTest::AllocateOverTheEdge |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::ServerlessGlobalIndex [GOOD] Test command err: 2025-06-24T21:24:11.475470Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:11.475815Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:11.475977Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004806/r3tmp/tmpHQM8Ro/pdisk_1.dat 2025-06-24T21:24:11.878680Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9948, node 1 2025-06-24T21:24:12.108574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:12.108636Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:12.108667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:12.108924Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:12.111508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:12.227912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:12.228092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:12.242098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19844 2025-06-24T21:24:12.805408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:16.048366Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:16.102179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:16.102334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:16.169667Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:16.172918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:16.370964Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:16.406240Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:16.406868Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:16.407470Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:16.407614Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:16.407870Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:16.407960Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:16.408059Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:16.408155Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:16.408259Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:16.588013Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:16.588142Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:16.601801Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:16.748264Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:16.798812Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:16.798954Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:16.832700Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:16.834268Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:16.834531Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:16.834618Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:16.834674Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:16.834745Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:16.834814Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:16.834877Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:16.835888Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:16.874187Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:16.874330Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:16.880180Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:24:16.882834Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:24:16.883045Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:24:16.888929Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:24:16.911492Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:16.911562Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:16.911640Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:24:16.926695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:16.934892Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:16.935016Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:17.111257Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:17.258798Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:17.317125Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:17.845197Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:17.873876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:18.438448Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:18.586710Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:24:18.586792Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:24:18.586890Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2496:2900], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:24:18.590007Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2504:2904] 2025-06-24T21:24:18.590390Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2504:2904], schemeshard id = 72075186224037899 2025-06-24T21:24:19.683822Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2623:3195], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:19.683977Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06- ... :26:53.847498Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7770:5531], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:53.847661Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7780:5536], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:53.847911Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:26:53.860618Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:26:53.925549Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7784:5539], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:26:54.018195Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [2:7879:5587]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:54.018490Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-24T21:26:54.018541Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:7879:5587], StatRequests.size() = 1 2025-06-24T21:26:54.106639Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7884:5589] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:26:54.148887Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:7913:5604]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:54.149105Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-24T21:26:54.149296Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-24T21:26:54.149342Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:26:54.149453Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:26:54.149518Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:7913:5604], StatRequests.size() = 1 2025-06-24T21:26:54.266799Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTExY2YwZGUtNTRlNWU2YTItMjVkNDgzNWUtNzU4NDM0ZTc=, TxId: 2025-06-24T21:26:54.266875Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTExY2YwZGUtNTRlNWU2YTItMjVkNDgzNWUtNzU4NDM0ZTc=, TxId: 2025-06-24T21:26:54.267455Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:54.281935Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:26:54.282040Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:54.337320Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:26:54.337424Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:26:54.392393Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:3129:3126], schemeshard count = 1 2025-06-24T21:26:54.666180Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037899 2025-06-24T21:26:54.666249Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 239.000000s, at schemeshard: 72075186224037899 2025-06-24T21:26:54.666468Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 50 2025-06-24T21:26:54.680300Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:26:55.549295Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:7976:5643]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:55.549608Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-24T21:26:55.549662Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:7976:5643], StatRequests.size() = 1 2025-06-24T21:26:56.726246Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:26:56.737240Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:56.737325Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:56.737374Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 4] is data table. 2025-06-24T21:26:56.737405Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 4] 2025-06-24T21:26:56.737700Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:26:56.740878Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:56.754620Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZWI1NDJhMGItNzJkMTkyZTYtNjEzMDkwZjctMTJkNzIyYmU=, TxId: 2025-06-24T21:26:56.754711Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZWI1NDJhMGItNzJkMTkyZTYtNjEzMDkwZjctMTJkNzIyYmU=, TxId: 2025-06-24T21:26:56.755366Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:56.770258Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 4] 2025-06-24T21:26:56.770335Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:56.838178Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:8044:5684]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:56.838435Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-24T21:26:56.838473Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:8044:5684], StatRequests.size() = 1 2025-06-24T21:26:58.155474Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:8092:5710]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:58.155827Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-24T21:26:58.155882Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:8092:5710], StatRequests.size() = 1 2025-06-24T21:26:59.261614Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 2 2025-06-24T21:26:59.261938Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:26:59.262404Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:26:59.273965Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:26:59.274046Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:26:59.274097Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-24T21:26:59.274153Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:26:59.274494Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:26:59.278080Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:26:59.287468Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTk3NDMyZDktODYyNDczM2ItMjFiOTJiN2MtMzVjMDRjNTE=, TxId: 2025-06-24T21:26:59.287527Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTk3NDMyZDktODYyNDczM2ItMjFiOTJiN2MtMzVjMDRjNTE=, TxId: 2025-06-24T21:26:59.287986Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:26:59.302651Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:26:59.302712Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:26:59.398825Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:8156:5747]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:26:59.399172Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-24T21:26:59.399220Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:8156:5747], StatRequests.size() = 1 >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotWritten_Test [GOOD] >> TTxAllocatorClientTest::AllocateOverTheEdge [GOOD] >> TTxAllocatorClientTest::InitiatingRequest [GOOD] >> TSchemeShardTest::TopicWithAutopartitioningReserveSize [GOOD] >> HttpRequest::ProbeServerless [GOOD] >> TSchemeShardTest::CreateAlterBlockStoreVolumeWithInvalidPoolKinds [GOOD] >> TSchemeShardTest::CreateDropKesus ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::AllocateOverTheEdge [GOOD] Test command err: 2025-06-24T21:27:01.560828Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T21:27:01.563536Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T21:27:01.565197Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T21:27:01.594899Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.599420Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T21:27:01.617334Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.617541Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.617690Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T21:27:01.617853Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.618046Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.618168Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T21:27:01.618300Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-24T21:27:01.619169Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#5000 2025-06-24T21:27:01.621651Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.621732Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.621827Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 5000 2025-06-24T21:27:01.621870Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult from# 0 to# 5000 2025-06-24T21:27:01.622110Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-24T21:27:01.622299Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-24T21:27:01.622479Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-24T21:27:01.622650Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-24T21:27:01.622791Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#5000 2025-06-24T21:27:01.625184Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.625273Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.625407Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 5000 Reserved to# 10000 2025-06-24T21:27:01.625455Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult from# 5000 to# 10000 2025-06-24T21:27:01.625687Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 500 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-24T21:27:01.625879Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-24T21:27:01.626128Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 2500 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-24T21:27:01.626430Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-24T21:27:01.626588Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#5000 2025-06-24T21:27:01.627052Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.627137Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.627273Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 10000 Reserved to# 15000 2025-06-24T21:27:01.627310Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult from# 10000 to# 15000 2025-06-24T21:27:01.627505Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 3000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::InitiatingRequest [GOOD] Test command err: 2025-06-24T21:27:01.560522Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T21:27:01.563546Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T21:27:01.565225Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T21:27:01.594801Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.599418Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T21:27:01.617430Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.617657Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.617821Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T21:27:01.618017Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.618190Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.618322Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T21:27:01.618451Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-24T21:27:01.619445Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#5000 2025-06-24T21:27:01.621708Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.621797Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.621926Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 5000 2025-06-24T21:27:01.621976Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult from# 0 to# 5000 >> ReadOnlyVDisk::TestSync [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::TopicWithAutopartitioningReserveSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:26:48.764227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:26:48.764320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.764363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:26:48.764407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:26:48.765342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:26:48.765380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:26:48.765436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.765505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:26:48.766216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:26:48.769061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:26:48.845758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:48.845835Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:48.861973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:26:48.862352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:26:48.862599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:26:48.872152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:26:48.874890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:26:48.876479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:48.880006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:26:48.888329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.889697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:26:48.897439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:48.897527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.898324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:26:48.898374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:26:48.898424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:26:48.898500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:26:48.903955Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:26:49.038322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:26:49.038553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.038819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:26:49.038870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:26:49.039073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:26:49.039134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:26:49.041173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.041349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:26:49.041505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.041552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:26:49.041605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:26:49.041641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:26:49.043382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.043430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:26:49.043470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:26:49.045033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.045088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.045133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.045182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:26:49.054336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:26:49.056368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:26:49.056532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:26:49.057362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.057496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:26:49.057549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.057875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:26:49.057933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.058106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:26:49.058184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:26:49.060123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:49.060181Z node 1 :FLAT_TX_SCHEMESHARD ... 075186233409549, partId: 0 2025-06-24T21:27:01.430098Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409549 Status: COMPLETE TxId: 104 Step: 5000005 2025-06-24T21:27:01.430134Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409549 Status: COMPLETE TxId: 104 Step: 5000005 2025-06-24T21:27:01.430164Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-24T21:27:01.430192Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-24T21:27:01.430345Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 128 -> 240 2025-06-24T21:27:01.430546Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-24T21:27:01.439666Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:27:01.439852Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:27:01.439957Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:27:01.440055Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:27:01.440133Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:27:01.440217Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:27:01.440389Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:01.440426Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:27:01.440668Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:01.440697Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [13:208:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-24T21:27:01.440764Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:27:01.440806Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T21:27:01.440966Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:27:01.441034Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:27:01.441092Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:27:01.441143Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:27:01.441207Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-24T21:27:01.441268Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:27:01.441332Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:27:01.441380Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:27:01.441642Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 10 2025-06-24T21:27:01.441707Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 104, publications: 1, subscribers: 0 2025-06-24T21:27:01.441763Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-24T21:27:01.442668Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:27:01.442758Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:27:01.442798Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:27:01.442871Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T21:27:01.442929Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-24T21:27:01.443031Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-24T21:27:01.451214Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T21:27:01.481094Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T21:27:01.481170Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T21:27:01.481728Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T21:27:01.481867Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:27:01.481942Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [13:1485:3287] TestWaitNotification: OK eventTxId 104 2025-06-24T21:27:01.482623Z node 13 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:27:01.482876Z node 13 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 288us result status StatusSuccess 2025-06-24T21:27:01.483600Z node 13 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 4 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 6 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 7 PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "A" } Status: Inactive ParentPartitionIds: 0 ChildPartitionIds: 3 ChildPartitionIds: 4 } Partitions { PartitionId: 2 TabletId: 72075186233409549 KeyRange { FromBound: "A" } Status: Inactive ParentPartitionIds: 0 ChildPartitionIds: 5 } Partitions { PartitionId: 3 TabletId: 72075186233409550 KeyRange { ToBound: "0" } Status: Active ParentPartitionIds: 1 } Partitions { PartitionId: 4 TabletId: 72075186233409551 KeyRange { FromBound: "0" ToBound: "A" } Status: Inactive ParentPartitionIds: 1 ChildPartitionIds: 5 } Partitions { PartitionId: 5 TabletId: 72075186233409552 KeyRange { FromBound: "0" } Status: Active ParentPartitionIds: 2 ParentPartitionIds: 4 } AlterVersion: 4 BalancerTabletID: 72075186233409547 NextPartitionId: 6 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 494 AccountSize: 494 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 6 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTest::CopyTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentMerge >> KqpNamedExpressions::NamedExpressionSimple+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestSync [GOOD] Test command err: RandomSeed# 15860680689295324485 Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:0:0:0:131072:0] 2025-06-24T21:26:48.993743Z 1 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:8816:940] 2025-06-24T21:26:48.994052Z 2 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:8823:947] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:1:0:0:32768:0] 2025-06-24T21:26:50.803108Z 3 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:8830:954] 2025-06-24T21:26:50.803315Z 2 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:8823:947] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] SEND TEvPut with key [1:1:2:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-06-24T21:26:54.671798Z 5 00h14m00.361536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:8844:968] 2025-06-24T21:26:54.671872Z 4 00h14m00.361536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:8837:961] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-06-24T21:26:56.788796Z 6 00h18m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:8851:975] 2025-06-24T21:26:56.788898Z 5 00h18m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:8844:968] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvPut with key [1:1:5:0:0:32768:0] 2025-06-24T21:26:58.795812Z 7 00h22m00.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:8858:982] 2025-06-24T21:26:58.795883Z 6 00h22m00.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:8851:975] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:6:0:0:131072:0] 2025-06-24T21:27:00.607720Z 7 00h26m00.561536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:8858:982] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 7 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotWritten_Test [GOOD] Test command err: 2025-06-24T21:25:39.871585Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630200453776497:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:39.871910Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:39.899358Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630201179020697:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:25:39.905270Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:25:40.047385Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:25:40.059882Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ac5/r3tmp/tmp2elHPF/pdisk_1.dat 2025-06-24T21:25:40.291525Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:40.293311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:40.293416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:40.294544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:25:40.294605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:25:40.297357Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:25:40.298369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:25:40.299001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2812, node 1 2025-06-24T21:25:40.371416Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/003ac5/r3tmp/yandexqXBGRh.tmp 2025-06-24T21:25:40.371554Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/003ac5/r3tmp/yandexqXBGRh.tmp 2025-06-24T21:25:40.371819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/003ac5/r3tmp/yandexqXBGRh.tmp 2025-06-24T21:25:40.372010Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:25:40.430191Z INFO: TTestServer started on Port 8804 GrpcPort 2812 TClient is connected to server localhost:8804 PQClient connected to localhost:2812 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:25:40.739703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:25:40.777033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:25:40.881173Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:25:40.905700Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-24T21:25:43.076270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630217633646765:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:43.076475Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630217633646800:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:43.076580Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:25:43.080450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:25:43.097988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-24T21:25:43.098344Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630217633646802:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T21:25:43.288455Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630217633646888:2793] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:25:43.313682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:43.315160Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519630218358890173:2274], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:43.315399Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZTc5ODhkNjctMTFiNzM0NTQtNjUyNGYyYWUtODAxNTZjMw==, ActorId: [2:7519630218358890148:2268], ActorState: ExecuteState, TraceId: 01jyhxa826fw1r5s3qcf385x8m, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:43.315960Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630217633646898:2313], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:25:43.316135Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MTM5N2VlOGUtN2NjMTJlN2YtNDBiMjhmMzQtYmRkYzc4Njc=, ActorId: [1:7519630217633646761:2300], ActorState: ExecuteState, TraceId: 01jyhxa80s0qc90hj9ed7xe78g, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:25:43.317238Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:43.317247Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:25:43.393489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:25:43.472283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (G ... , AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T21:26:59.243436Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T21:26:59.243449Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T21:26:59.243477Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:116: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) GetOwnershipFast Partition=1 TabletId=1001 2025-06-24T21:26:59.243627Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [9:7519630544218783165:3810], Recipient [9:7519630522743945648:3222]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [9:7519630544218783164:3810] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-24T21:26:59.243722Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [9:7519630544218783164:3810], Recipient [9:7519630522743945648:3222]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 SourceId: "A_Source_7" 2025-06-24T21:26:59.243810Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:139: StateOwnershipFast, received event# 271188558, Sender [9:7519630522743945648:3222], Recipient [9:7519630544218783164:3810]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-24T21:26:59.243852Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) InitTable: SourceId=A_Source_7 TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-06-24T21:26:59.243922Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [9:7519630544218783164:3810], Recipient [9:7519630522743945648:3222]: NActors::TEvents::TEvPoison 2025-06-24T21:26:59.243975Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [9:7519630479794270783:2070], Recipient [9:7519630544218783164:3810]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-06-24T21:26:59.244005Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) StartKqpSession 2025-06-24T21:26:59.246695Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [9:7519630479794270906:2173], Recipient [9:7519630544218783164:3810]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=9&id=MWI0M2Q3N2ItMjM4ZWJmZjktNTM2NTA5M2ItZjM2ODAwNjQ=" NodeId: 9 } YdbStatus: SUCCESS ResourceExhausted: false 2025-06-24T21:26:59.246735Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) Select from the table 2025-06-24T21:26:59.399537Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [9:7519630479794270906:2173], Recipient [9:7519630544218783164:3810]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=MWI0M2Q3N2ItMjM4ZWJmZjktNTM2NTA5M2ItZjM2ODAwNjQ=" PreparedQuery: "6fbe9cde-12f12cc7-63ab2343-3fb155a" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jyhxcjj18vzchrq6nfj2xwge" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint32_value: 0 } items { uint64_value: 1750800419121 } items { uint64_value: 1750800419121 } items { uint64_value: 13 } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 96 2025-06-24T21:26:59.399770Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) Selected from table PartitionId=0 SeqNo=13 2025-06-24T21:26:59.399814Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:151: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) GetOldSeqNo 2025-06-24T21:26:59.399996Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [9:7519630544218783194:3810], Recipient [9:7519630522743945647:3221]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1000 Status: OK ServerId: [9:7519630544218783164:3810] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-24T21:26:59.400127Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271187968, Sender [9:7519630544218783164:3810], Recipient [9:7519630522743945647:3221]: NKikimrClient.TPersQueueRequest PartitionRequest { Partition: 0 CmdGetMaxSeqNo { SourceId: "\000A_Source_7" } PipeClient { RawX1: 7519630544218783194 RawX2: 38654709474 } } 2025-06-24T21:26:59.400207Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:209: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) OnPartitionChosen 2025-06-24T21:26:59.400318Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [9:7519630544218783164:3810], Recipient [9:7519630522743945647:3221]: NActors::TEvents::TEvPoison 2025-06-24T21:26:59.400326Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [9:7519630544218783195:3810], Recipient [9:7519630522743945648:3222]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [9:7519630544218783164:3810] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-24T21:26:59.400383Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [9:7519630544218783164:3810], Recipient [9:7519630522743945648:3222]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 2025-06-24T21:26:59.400444Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:240: StateCheckPartition, received event# 271188558, Sender [9:7519630522743945648:3222], Recipient [9:7519630544218783164:3810]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-24T21:26:59.400473Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) Update the table 2025-06-24T21:26:59.400641Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [9:7519630544218783164:3810], Recipient [9:7519630522743945648:3222]: NActors::TEvents::TEvPoison 2025-06-24T21:26:59.503421Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:212: StateUpdate, received event# 271646721, Sender [9:7519630479794270906:2173], Recipient [9:7519630544218783164:3810]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=MWI0M2Q3N2ItMjM4ZWJmZjktNTM2NTA5M2ItZjM2ODAwNjQ=" PreparedQuery: "25a1432f-11e22370-6594f01d-f258a414" QueryParameters { Name: "$AccessTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$CreateTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Partition" Type { Kind: Data Data { Scheme: 2 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SeqNo" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 62 Received TEvChooseResult: 1 2025-06-24T21:26:59.503493Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-24T21:26:59.503536Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) ReplyResult: Partition=1, SeqNo=13 2025-06-24T21:26:59.503566Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [9:7519630544218783164:3810] (SourceId=A_Source_7, PreferedPartition=(NULL)) Start idle Run query: --!syntax_v1 SELECT Partition, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash = 6541068412312944787 AND Topic = "Root" AND ProducerId = "00415F536F757263655F37" 2025-06-24T21:26:59.689056Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715706. Ctx: { TraceId: 01jyhxcjp6fbgrcd3r075gg9j5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=NDYwNjlkN2MtOWNmMmZkZjMtNTAzMDJkNmEtZDAyNTI3Mjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:26:59.760313Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:26:59.760338Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:00.141879Z node 9 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [9:7519630548513750588:2644] TxId: 281474976715707. Ctx: { TraceId: 01jyhxck4y8zt00byz1k1p6fd2, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=ZjI2MGQ5NzUtMzFlZjQ3OGYtNzU0OGY2NTktMmVlMWRjMTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 10 2025-06-24T21:27:00.142699Z node 9 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [9:7519630548513750595:2644], TxId: 281474976715707, task: 2. Ctx: { TraceId : 01jyhxck4y8zt00byz1k1p6fd2. SessionId : ydb://session/3?node_id=9&id=ZjI2MGQ5NzUtMzFlZjQ3OGYtNzU0OGY2NTktMmVlMWRjMTk=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [9:7519630548513750588:2644], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> KqpNewEngine::StreamLookupWithView >> KqpNewEngine::DuplicatedResults >> KqpRanges::UpdateMulti >> KqpNotNullColumns::UpsertNotNullPk >> KqpReturning::ReturningTwice >> TSchemeShardTest::CreateDropKesus [GOOD] >> TSchemeShardTest::CreateAlterKesus ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::ProbeServerless [GOOD] Test command err: 2025-06-24T21:24:04.225616Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:04.225964Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:04.226152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004855/r3tmp/tmpqTrOZx/pdisk_1.dat 2025-06-24T21:24:04.633555Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4517, node 1 2025-06-24T21:24:04.991802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:04.991874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:04.991908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:04.992158Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:04.994793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:05.112313Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:05.112495Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:05.126172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3848 2025-06-24T21:24:05.824165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:09.354823Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:09.404206Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.404367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.473984Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:09.477953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:09.742288Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:09.780796Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.781419Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.781952Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.782132Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.782214Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.782424Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.782507Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.782591Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.782667Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.992489Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.992642Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:10.012929Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:10.210180Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:10.265400Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:10.265553Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:10.303529Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:10.304874Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:10.305114Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:10.305183Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:10.305236Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:10.305329Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:10.305386Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:10.305439Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:10.306449Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:10.341854Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:10.341974Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:10.348495Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:24:10.353059Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:24:10.353344Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:24:10.360523Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:24:10.384772Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:10.384837Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:10.384910Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:24:10.398186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:10.404839Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:10.404957Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:10.604554Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:10.852633Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:10.903954Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:11.431309Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:11.461383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:12.037219Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:12.204904Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:24:12.204993Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:24:12.205134Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2496:2900], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:24:12.207695Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2504:2904] 2025-06-24T21:24:12.207986Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2504:2904], schemeshard id = 72075186224037899 2025-06-24T21:24:13.353202Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2623:3195], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.353392Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-2 ... omplete 2025-06-24T21:27:00.853599Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:27:00.878574Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:27:00.878819Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:27:00.879755Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11493:8542], server id = [2:11498:8547], tablet id = 72075186224037905, status = OK 2025-06-24T21:27:00.879831Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11493:8542], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:00.880088Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11494:8543], server id = [2:11499:8548], tablet id = 72075186224037906, status = OK 2025-06-24T21:27:00.880129Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11494:8543], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:00.880657Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11495:8544], server id = [2:11501:8550], tablet id = 72075186224037907, status = OK 2025-06-24T21:27:00.880699Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11495:8544], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:00.881741Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11496:8545], server id = [2:11500:8549], tablet id = 72075186224037908, status = OK 2025-06-24T21:27:00.881805Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11496:8545], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:00.882002Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11497:8546], server id = [2:11502:8551], tablet id = 72075186224037909, status = OK 2025-06-24T21:27:00.882049Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11497:8546], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:00.883536Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-24T21:27:00.884071Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-24T21:27:00.884488Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11493:8542], server id = [2:11498:8547], tablet id = 72075186224037905 2025-06-24T21:27:00.884517Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:00.884835Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-24T21:27:00.885135Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11494:8543], server id = [2:11499:8548], tablet id = 72075186224037906 2025-06-24T21:27:00.885164Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:00.885275Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-24T21:27:00.885589Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037909 2025-06-24T21:27:00.885740Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11495:8544], server id = [2:11501:8550], tablet id = 72075186224037907 2025-06-24T21:27:00.885758Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:00.885960Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11508:8557], server id = [2:11511:8560], tablet id = 72075186224037910, status = OK 2025-06-24T21:27:00.886035Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11508:8557], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:00.886705Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11509:8558], server id = [2:11510:8559], tablet id = 72075186224037911, status = OK 2025-06-24T21:27:00.886756Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11509:8558], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:00.887451Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11496:8545], server id = [2:11500:8549], tablet id = 72075186224037908 2025-06-24T21:27:00.887475Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:00.887748Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11497:8546], server id = [2:11502:8551], tablet id = 72075186224037909 2025-06-24T21:27:00.887769Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:00.888040Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11512:8561], server id = [2:11514:8563], tablet id = 72075186224037912, status = OK 2025-06-24T21:27:00.888102Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11512:8561], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:00.888773Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11513:8562], server id = [2:11516:8565], tablet id = 72075186224037913, status = OK 2025-06-24T21:27:00.888832Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11513:8562], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:00.889369Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11515:8564], server id = [2:11517:8566], tablet id = 72075186224037914, status = OK 2025-06-24T21:27:00.889415Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11515:8564], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:00.889522Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037910 2025-06-24T21:27:00.890361Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037911 2025-06-24T21:27:00.890477Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11508:8557], server id = [2:11511:8560], tablet id = 72075186224037910 2025-06-24T21:27:00.890498Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:00.890734Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037912 2025-06-24T21:27:00.891191Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11509:8558], server id = [2:11510:8559], tablet id = 72075186224037911 2025-06-24T21:27:00.891220Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:00.891518Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11512:8561], server id = [2:11514:8563], tablet id = 72075186224037912 2025-06-24T21:27:00.891550Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:00.891666Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037913 2025-06-24T21:27:00.891826Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037914 2025-06-24T21:27:00.891857Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:27:00.892022Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:27:00.892172Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:27:00.892481Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:27:00.894169Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11513:8562], server id = [2:11516:8565], tablet id = 72075186224037913 2025-06-24T21:27:00.894205Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:00.894513Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11515:8564], server id = [2:11517:8566], tablet id = 72075186224037914 2025-06-24T21:27:00.894532Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:00.894940Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:27:00.916682Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzQyZjUxZjQtYzRlMTBhZmYtYjc1NTkzOGMtNzI1MjEyM2Q=, TxId: 2025-06-24T21:27:00.916769Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzQyZjUxZjQtYzRlMTBhZmYtYjc1NTkzOGMtNzI1MjEyM2Q=, TxId: 2025-06-24T21:27:00.917205Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:27:00.932808Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:27:00.932891Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=CtsdA, ActorId=[1:6274:3996] 2025-06-24T21:27:00.934029Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:11545:6206]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:00.934219Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:27:00.934258Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:27:00.934506Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:27:00.934550Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-24T21:27:00.934608Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 2 ] 2025-06-24T21:27:00.943261Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 Answer: '/Root/Database/Table1[Value]=4' >> KqpRanges::IsNull >> KqpRanges::WhereInSubquery |97.6%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} |97.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::CreatePersQueueGroup [GOOD] >> TSchemeShardTest::CreatePersQueueGroupWithKeySchema >> KqpNewEngine::Select1 >> KqpNotNullColumns::UpdateNotNullPk >> KqpSqlIn::TableSource |97.6%| [TA] $(B)/ydb/services/ext_index/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::CreateAlterKesus [GOOD] >> TSchemeShardTest::CreateDropSolomon >> TSchemeShardTest::ParallelModifying [GOOD] >> TSchemeShardTest::PQGroupExplicitChannels >> TSchemeShardTest::AlterPersQueueGroup [GOOD] >> TSchemeShardTest::AlterPersQueueGroupWithKeySchema >> KqpReturning::ReturningWorksIndexedDelete+QueryService >> TSchemeShardTest::CopyTableAndConcurrentMerge [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentSplitMerge |97.6%| [TA] {RESULT} $(B)/ydb/services/ext_index/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AlterRemoveTest >> TGRpcCmsTest::SimpleTenantsTest >> TSchemeShardTest::AlterPersQueueGroupWithKeySchema [GOOD] >> TGRpcCmsTest::DisabledTxTest |97.6%| [TA] $(B)/ydb/core/persqueue/ut/slow/test-results/unittest/{meta.json ... results_accumulator.log} >> TGRpcCmsTest::AuthTokenTest >> TSchemeShardTest::CreatePersQueueGroupWithKeySchema [GOOD] >> TSchemeShardTest::AlterBlockStoreVolume >> TSchemeShardTest::CreateTableWithCompactionStrategies >> TSchemeShardTest::PQGroupExplicitChannels [GOOD] >> TSchemeShardTest::CreateDropSolomon [GOOD] >> TSchemeShardTest::CreateAlterDropSolomon >> KqpNotNullColumns::UpsertNotNullPk [GOOD] >> KqpQueryService::TableSink_OlapUpdate >> TSchemeShardTest::CreateTableWithCompactionStrategies [GOOD] >> KqpQueryService::DdlUser >> SystemView::ShowCreateTableColumnAlterColumn [GOOD] >> SystemView::ShowCreateTableColumnUpsertOptions >> TSchemeShardTest::AlterBlockStoreVolume [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentSplitMerge [GOOD] >> TSchemeShardTest::CreateWithIntermediateDirs >> TSchemeShardTest::CreateAlterDropSolomon [GOOD] >> TSchemeShardTest::ReadOnlyMode >> TSchemeShardTest::AlterBlockStoreVolumeWithNonReplicatedPartitions >> TSchemeShardTest::CopyTableForBackup >> KqpNotNullColumns::UpsertNotNull >> TSchemeShardTest::ReadOnlyMode [GOOD] |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> KqpNewEngine::Select1 [GOOD] >> KqpNewEngine::ShuffleWrite >> KqpNotNullColumns::UpdateNotNullPk [GOOD] >> TSchemeShardTest::CreateWithIntermediateDirs [GOOD] >> TSchemeShardViewTest::DropView >> TSchemeShardViewTest::EmptyQueryText >> TSchemeShardViewTest::AsyncDropSameView >> KqpNamedExpressions::NamedExpressionSimple+UseSink [GOOD] >> KqpNotNullColumns::UpdateNotNullPkPg >> TSchemeShardTest::AlterBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::PathErrors >> TSchemeShardTest::CreateTopicOverDiskSpaceQuotas >> TSchemeShardTest::AdoptDropSolomon >> TSchemeShardTest::PathErrors [GOOD] >> KqpNamedExpressions::NamedExpressionSimple-UseSink >> KqpNewEngine::DuplicatedResults [GOOD] >> BasicStatistics::TwoServerlessDbs [GOOD] >> TSchemeShardTest::CreateTopicOverDiskSpaceQuotas [GOOD] >> KqpRanges::UpdateMulti [GOOD] >> KqpRanges::WhereInSubquery [GOOD] >> TGRpcCmsTest::DisabledTxTest [GOOD] >> TGRpcCmsTest::AlterRemoveTest [GOOD] >> TColumnShardTestSchema::ExportAfterFail >> TSchemeShardViewTest::AsyncDropSameView [GOOD] >> KqpReturning::ReturningTwice [GOOD] >> TSchemeShardViewTest::DropView [GOOD] >> TSchemeShardViewTest::EmptyQueryText [GOOD] >> KqpRanges::IsNull [GOOD] >> TSchemeShardTest::AdoptDropSolomon [GOOD] >> TSchemeShardTest::NestedDirs >> TSchemeShardTest::CreateSystemColumn >> KqpRanges::UpdateWhereInNoFullScan+UseSink >> KqpRanges::UpdateWhereInBigLiteralList >> KqpRanges::IsNotNullSecondComponent >> KqpReturning::ReplaceSerial >> TSchemeShardTest::AlterTableAndAfterSplit >> TSchemeShardTest::NestedDirs [GOOD] >> KqpNewEngine::FlatmapLambdaMutiusedConnections >> TSchemeShardTest::NewOwnerOnDatabase |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest |97.7%| [TA] {RESULT} $(B)/ydb/core/persqueue/ut/slow/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::DropView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:27:09.008682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:27:09.008806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:27:09.008863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:27:09.008902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:27:09.011089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:27:09.011172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:27:09.011248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:27:09.011345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:27:09.012192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:27:09.012554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:27:09.110534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:27:09.110601Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:09.128442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:27:09.128895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:27:09.129088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:27:09.138610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:27:09.138815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:27:09.139693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.140888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:27:09.149962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:09.150189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:27:09.156814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:09.156918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:09.157195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:27:09.157259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:09.157330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:27:09.157429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.164952Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:27:09.316808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:27:09.317161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.317409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:27:09.317462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:27:09.317710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:27:09.317888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:27:09.322561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.323698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:27:09.323932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.324067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:27:09.324113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:27:09.324152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:27:09.326665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.326749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:27:09.326811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:27:09.329556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.329615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.329679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:09.329754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:27:09.338537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:27:09.342636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:27:09.344039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:27:09.345269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.345430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:09.345480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:09.347034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:27:09.347111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:09.347430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:09.347536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:27:09.350893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:09.350945Z node 1 :FLAT_TX_SCHEMESHARD ... hard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:27:09.444441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-24T21:27:09.444598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-24T21:27:09.444970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.445096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:09.445150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_view.cpp:43: [72057594046678944] TDropView TPropose, opId: 102:0 HandleReply TEvOperationPlan, step: 5000003 2025-06-24T21:27:09.445325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:0 128 -> 240 2025-06-24T21:27:09.445496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:09.445557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:27:09.447886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:09.447942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:09.448145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:27:09.448311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:09.448370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-24T21:27:09.448420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:27:09.448782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.448832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:27:09.448928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:27:09.448964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:27:09.449005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:27:09.449038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:27:09.449072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T21:27:09.449113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:27:09.449150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:27:09.449182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:27:09.449271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:27:09.449324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T21:27:09.449374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:27:09.449406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T21:27:09.450358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:27:09.450462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:27:09.450517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:27:09.450579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:27:09.450651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:27:09.451901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:27:09.451987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:27:09.452020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:27:09.452050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:27:09.452100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:27:09.452189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:27:09.452643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:27:09.452708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:27:09.452786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:09.454645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:27:09.456441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:27:09.456579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:27:09.456797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:27:09.456836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:27:09.457309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:27:09.457417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:27:09.457471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:325:2314] TestWaitNotification: OK eventTxId 102 2025-06-24T21:27:09.458011Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:27:09.458241Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 216us result status StatusPathDoesNotExist 2025-06-24T21:27:09.458428Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyView\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyView" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::CreateAlterDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:26:48.764188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:26:48.764275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.764305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:26:48.764339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:26:48.765329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:26:48.765380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:26:48.765437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.765515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:26:48.766166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:26:48.769031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:26:48.862622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:48.862685Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:48.874466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:26:48.874833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:26:48.874999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:26:48.881670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:26:48.881842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:26:48.882433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:48.882705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:26:48.888244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.889693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:26:48.897305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:48.897365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.898276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:26:48.898321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:26:48.898363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:26:48.898435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:26:48.903982Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:26:49.019514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:26:49.019736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.019943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:26:49.019976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:26:49.020116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:26:49.020162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:26:49.022293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.022428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:26:49.022549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.022585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:26:49.022640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:26:49.022667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:26:49.024199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.024241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:26:49.024283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:26:49.025517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.025560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.025610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.025659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:26:49.028034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:26:49.029467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:26:49.029608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:26:49.030279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.030371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:26:49.030407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.030646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:26:49.030683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.030815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:26:49.030863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:26:49.032420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:49.032477Z node 1 :FLAT_TX_SCHEMESHARD ... p:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:27:07.006875Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-24T21:27:07.009554Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:27:07.009630Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:27:07.009662Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:27:07.009692Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:27:07.010389Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:27:07.011651Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-24T21:27:07.011987Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-24T21:27:07.012157Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-24T21:27:07.012549Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409548 2025-06-24T21:27:07.012992Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:07.013238Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:27:07.014885Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-24T21:27:07.015222Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-24T21:27:07.015427Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409546 2025-06-24T21:27:07.017521Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409549 2025-06-24T21:27:07.018536Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:27:07.018769Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 Forgetting tablet 72075186233409547 2025-06-24T21:27:07.019097Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:27:07.020072Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:27:07.020163Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:27:07.020318Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:07.021311Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-24T21:27:07.021407Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-24T21:27:07.023452Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:27:07.023503Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:27:07.023600Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-24T21:27:07.023630Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-24T21:27:07.025316Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:27:07.025401Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:27:07.025606Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-24T21:27:07.026058Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T21:27:07.026143Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-24T21:27:07.026783Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T21:27:07.026926Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:27:07.026996Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [16:541:2493] TestWaitNotification: OK eventTxId 103 2025-06-24T21:27:07.027839Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:27:07.028139Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Solomon" took 348us result status StatusPathDoesNotExist 2025-06-24T21:27:07.028369Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted wait until 72075186233409549 is deleted 2025-06-24T21:27:07.028989Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-24T21:27:07.029090Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-24T21:27:07.029144Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 2025-06-24T21:27:07.029216Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409549 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 Deleted tabletId 72075186233409549 2025-06-24T21:27:07.029910Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:27:07.030162Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 315us result status StatusSuccess 2025-06-24T21:27:07.030756Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncDropSameView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:27:09.008664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:27:09.008775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:27:09.008807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:27:09.008847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:27:09.009830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:27:09.009882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:27:09.010011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:27:09.010083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:27:09.010812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:27:09.012444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:27:09.110492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:27:09.110561Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:09.128366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:27:09.128804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:27:09.129044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:27:09.139607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:27:09.139823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:27:09.140496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.140850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:27:09.149192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:09.149387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:27:09.156900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:09.157006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:09.157246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:27:09.157321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:09.157374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:27:09.157468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.164884Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:27:09.332599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:27:09.332941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.333178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:27:09.333229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:27:09.333486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:27:09.333621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:27:09.339181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.339444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:27:09.339685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.339759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:27:09.339807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:27:09.339845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:27:09.343742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.343815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:27:09.343877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:27:09.346444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.346526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.346602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:09.346674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:27:09.351164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:27:09.353637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:27:09.353824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:27:09.354968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.355174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:09.355243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:09.355578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:27:09.355771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:09.355982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:09.356066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:27:09.358505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:09.358599Z node 1 :FLAT_TX_SCHEMESHARD ... th for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:09.458821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:27:09.461271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:09.461319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:09.461493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:27:09.461648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:09.461703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-24T21:27:09.461756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:27:09.462079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.462127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:27:09.462222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:27:09.462257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:27:09.462293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:27:09.462326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:27:09.462388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T21:27:09.462432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:27:09.462485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:27:09.462520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:27:09.462599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:27:09.462638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T21:27:09.462697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-24T21:27:09.462733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T21:27:09.463571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:27:09.463723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:27:09.463773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:27:09.463812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-24T21:27:09.463851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:27:09.464831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:27:09.464923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:27:09.464957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:27:09.464984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:27:09.465045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:27:09.465126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:27:09.466068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:27:09.466119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:27:09.466192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:09.468802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:27:09.470836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:27:09.470958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 102 2025-06-24T21:27:09.471290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:27:09.471335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-24T21:27:09.471418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-24T21:27:09.471441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 TestWaitNotification wait txId: 104 2025-06-24T21:27:09.471483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T21:27:09.471503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T21:27:09.472145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:27:09.472242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:27:09.472280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:331:2320] 2025-06-24T21:27:09.472374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-24T21:27:09.472508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-24T21:27:09.472541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:331:2320] 2025-06-24T21:27:09.472651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T21:27:09.472719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:27:09.472758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:331:2320] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 2025-06-24T21:27:09.473288Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:27:09.473539Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 239us result status StatusPathDoesNotExist 2025-06-24T21:27:09.473712Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyView\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyView" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::EmptyQueryText [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:27:09.008670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:27:09.008800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:27:09.008842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:27:09.008878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:27:09.009819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:27:09.009883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:27:09.009955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:27:09.010074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:27:09.010830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:27:09.012458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:27:09.110492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:27:09.110553Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:09.128362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:27:09.128809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:27:09.129081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:27:09.137161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:27:09.137423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:27:09.139630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.140853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:27:09.147759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:09.149125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:27:09.156848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:09.156959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:09.157230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:27:09.157298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:09.157377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:27:09.157471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.164883Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:27:09.309493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:27:09.311636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.314493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:27:09.314617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:27:09.317187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:27:09.317387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:27:09.327304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.327602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:27:09.327839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.327918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:27:09.327966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:27:09.328006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:27:09.331514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.331597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:27:09.331661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:27:09.336491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.336564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.336633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:09.336722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:27:09.340776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:27:09.343077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:27:09.344427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:27:09.345562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.345728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:09.345783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:09.347072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:27:09.347166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:09.347402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:09.347493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:27:09.353080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:09.353147Z node 1 :FLAT_TX_SCHEMESHARD ... : actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:27:09.391555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "MyView" QueryText: "" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:27:09.391827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/MyView, opId: 101:0 2025-06-24T21:27:09.391911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/MyView, opId: 101:0, viewDescription: Name: "MyView" QueryText: "" 2025-06-24T21:27:09.393151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:343: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: MyView, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:27:09.393251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-24T21:27:09.393296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 101:0 type: TxCreateView target path: [OwnerId: 72057594046678944, LocalPathId: 2] source path: 2025-06-24T21:27:09.393352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:27:09.394337Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:27:09.401700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusAccepted TxId: 101 SchemeshardId: 72057594046678944 PathId: 2, at schemeshard: 72057594046678944 2025-06-24T21:27:09.401984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE VIEW, path: /MyRoot/MyView 2025-06-24T21:27:09.402247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.402305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:30: [72057594046678944] TCreateView::TPropose, opId: 101:0 ProgressState 2025-06-24T21:27:09.402364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 101 ready parts: 1/1 2025-06-24T21:27:09.402500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:27:09.403261Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:27:09.404950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-24T21:27:09.405087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-24T21:27:09.405428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:09.405541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:09.405613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002 2025-06-24T21:27:09.405757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 128 -> 240 2025-06-24T21:27:09.405960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:09.406042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 101 2025-06-24T21:27:09.409817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:09.409888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:09.410101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:27:09.410218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:09.410262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-24T21:27:09.410323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-24T21:27:09.410685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:27:09.410739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:27:09.410847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:27:09.410886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:27:09.410934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:27:09.410981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:27:09.411030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-24T21:27:09.411080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:27:09.411128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:27:09.411190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:27:09.411295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:27:09.411346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-24T21:27:09.411383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-24T21:27:09.411412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-24T21:27:09.412283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:27:09.412400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:27:09.412444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:27:09.412494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-24T21:27:09.412549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:27:09.413286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:27:09.413367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-24T21:27:09.413416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-24T21:27:09.413448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-24T21:27:09.413484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:27:09.413556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-24T21:27:09.418164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:27:09.419407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoServerlessDbs [GOOD] Test command err: 2025-06-24T21:24:12.116137Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:12.116486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:12.116641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004802/r3tmp/tmpzQmLQM/pdisk_1.dat 2025-06-24T21:24:12.478689Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17313, node 1 2025-06-24T21:24:12.726440Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:12.726497Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:12.726532Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:12.726793Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:12.734229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:12.847665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:12.847839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:12.861575Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11170 2025-06-24T21:24:13.467844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:16.881185Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:16.917345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:16.917479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:16.978138Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:16.980814Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:17.199781Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:17.234588Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.235322Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.235860Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.235995Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.236076Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.236304Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.236400Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.236516Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.236590Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.400983Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:17.401105Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:17.414134Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:17.543472Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:17.583534Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:17.583658Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:17.607841Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:17.608874Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:17.609037Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:17.609081Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:17.609139Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:17.609179Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:17.609229Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:17.609272Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:17.610591Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:17.645155Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:17.645253Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:17.652083Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2568] 2025-06-24T21:24:17.664058Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1852:2588] 2025-06-24T21:24:17.664795Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1852:2588], schemeshard id = 72075186224037897 2025-06-24T21:24:17.670169Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:24:17.689207Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:17.689273Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:17.689344Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:24:17.701128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:17.708869Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:17.709012Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:17.888100Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:18.042985Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:18.101501Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:18.576639Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:18.605831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:19.165463Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:19.291378Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:24:19.291432Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:24:19.291501Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:2491:2896], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-24T21:24:19.293208Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2494:2898] 2025-06-24T21:24:19.293482Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2494:2898], schemeshard id = 72075186224037899 2025-06-24T21:24:20.115114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:20.715105Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:20.976616Z node 2 :STATISTICS DEBUG: schemeshard_impl. ... Pool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:01.931575Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:8843:6345], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-24T21:27:02.114368Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8941:6393] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:02.166701Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 118 ], ReplyToActorId[ [2:8970:6408]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:02.167021Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 118 ] 2025-06-24T21:27:02.167332Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-24T21:27:02.167415Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:27:02.167593Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:27:02.167692Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 118, ReplyToActorId = [2:8970:6408], StatRequests.size() = 1 2025-06-24T21:27:02.314578Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZWNlOTRlZWYtOTQxMTJiYzQtODNmNTE0Ni0yNjI3MWZmNQ==, TxId: 2025-06-24T21:27:02.314697Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZWNlOTRlZWYtOTQxMTJiYzQtODNmNTE0Ni0yNjI3MWZmNQ==, TxId: 2025-06-24T21:27:02.316002Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:27:02.330600Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-24T21:27:02.330681Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:27:02.374907Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-24T21:27:02.374988Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:27:02.429892Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:3616:3336], schemeshard count = 1 2025-06-24T21:27:02.679450Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-24T21:27:02.679529Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 191.000000s, at schemeshard: 72075186224037899 2025-06-24T21:27:02.679728Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 26 2025-06-24T21:27:02.694888Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:27:02.871613Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [2:9004:6430]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:02.871989Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-24T21:27:02.872040Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:9004:6430], StatRequests.size() = 1 2025-06-24T21:27:04.073432Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:9053:6459]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:04.073818Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-24T21:27:04.073856Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:9053:6459], StatRequests.size() = 1 2025-06-24T21:27:04.499644Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037905 2025-06-24T21:27:04.499725Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 192.000000s, at schemeshard: 72075186224037905 2025-06-24T21:27:04.499955Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037905, stats size# 26 2025-06-24T21:27:04.515591Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:27:04.736635Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:27:04.748174Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:27:04.748242Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:27:04.748285Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-24T21:27:04.748324Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:27:04.748777Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:27:04.753082Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:27:04.770394Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YmY2OWMyZTItMjQyZWQ4OTEtZDNkM2EwNTQtYmQxNzQyYjM=, TxId: 2025-06-24T21:27:04.770466Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YmY2OWMyZTItMjQyZWQ4OTEtZDNkM2EwNTQtYmQxNzQyYjM=, TxId: 2025-06-24T21:27:04.771029Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:27:04.786316Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:27:04.786369Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:27:05.362793Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:9130:6509]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:05.363300Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-24T21:27:05.363339Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:9130:6509], StatRequests.size() = 1 2025-06-24T21:27:06.713642Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:9183:6539]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:06.713874Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-24T21:27:06.713924Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:9183:6539], StatRequests.size() = 1 2025-06-24T21:27:07.337245Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 3 2025-06-24T21:27:07.337627Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:27:07.338270Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:27:07.349925Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:27:07.350001Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:27:07.350045Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037905, LocalPathId: 2] is data table. 2025-06-24T21:27:07.350082Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037905, LocalPathId: 2] 2025-06-24T21:27:07.350436Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:27:07.353831Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:27:07.366673Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NWJjZDM0MjItNGEwMTY4ZjYtYTJiYTcwZmUtZWE3OTVmYmY=, TxId: 2025-06-24T21:27:07.366757Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NWJjZDM0MjItNGEwMTY4ZjYtYTJiYTcwZmUtZWE3OTVmYmY=, TxId: 2025-06-24T21:27:07.367642Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:27:07.385230Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037905, LocalPathId: 2] 2025-06-24T21:27:07.385288Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:27:08.011640Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:9253:6583]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:08.012103Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-24T21:27:08.012156Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:9253:6583], StatRequests.size() = 1 2025-06-24T21:27:08.013074Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:9255:6585]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:08.017740Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-24T21:27:08.017822Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:9255:6585], StatRequests.size() = 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::DisabledTxTest [GOOD] Test command err: 2025-06-24T21:27:05.829962Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630567691640783:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:05.830529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003e5b/r3tmp/tmpRIaRno/pdisk_1.dat 2025-06-24T21:27:06.344249Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:06.361507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:06.361605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:06.384501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61956, node 1 2025-06-24T21:27:06.630606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:06.630625Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:06.630632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:06.630731Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:27:06.830033Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20507 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:07.302449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:07.483476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) 2025-06-24T21:27:07.519794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AlterRemoveTest [GOOD] Test command err: 2025-06-24T21:27:05.832663Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630567619109637:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:05.832921Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003e65/r3tmp/tmpixhMSS/pdisk_1.dat 2025-06-24T21:27:06.339619Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:06.361626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:06.361740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:06.369396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2323, node 1 2025-06-24T21:27:06.630331Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:06.630368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:06.630380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:06.630556Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:27:06.842897Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26955 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:07.313478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:07.452214Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7519630576209044978:2273], Recipient [1:7519630571914077397:2203]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:53472" } 2025-06-24T21:27:07.452260Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-24T21:27:07.452287Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.452301Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.452437Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:53472" 2025-06-24T21:27:07.452564Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1750800427451445) 2025-06-24T21:27:07.455520Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1750800427451445 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-24T21:27:07.455775Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-24T21:27:07.459873Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-24T21:27:07.460746Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427451445&action=1" } } } 2025-06-24T21:27:07.460903Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.460965Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-24T21:27:07.461077Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-24T21:27:07.461494Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-24T21:27:07.461591Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-24T21:27:07.469594Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-24T21:27:07.469656Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-24T21:27:07.469729Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7519630576209044986:2203], Recipient [1:7519630571914077397:2203]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-24T21:27:07.469747Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-24T21:27:07.469773Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.469785Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.469831Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-24T21:27:07.469866Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-24T21:27:07.469954Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-24T21:27:07.473305Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519630576209044993:2274], Recipient [1:7519630571914077397:2203]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427451445&action=1" } UserToken: "" } 2025-06-24T21:27:07.473354Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T21:27:07.473600Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427451445&action=1" } } 2025-06-24T21:27:07.481927Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-24T21:27:07.481960Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.481965Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.481972Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.482026Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-24T21:27:07.482052Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1750800427451445 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T21:27:07.486494Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-24T21:27:07.486642Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.486690Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-24T21:27:07.486702Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-24T21:27:07.493186Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-24T21:27:07.495509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:27:07.500516Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-06-24T21:27:07.500606Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: ... MS_TENANTS TRACE: console_tenants_manager.cpp:651: TSubdomainManip(/Root/users/user-1) send subdomain drop cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root/users" OperationType: ESchemeOpForceDropExtSubDomain Drop { Name: "user-1" } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-24T21:27:07.603104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5492: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976715660 2025-06-24T21:27:07.604822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpForceDropExtSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp:309) 2025-06-24T21:27:07.607031Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519630576209045171:2284], Recipient [1:7519630571914077397:2203]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427593766&action=2" } UserToken: "" } 2025-06-24T21:27:07.607056Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T21:27:07.607230Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427593766&action=2" } } 2025-06-24T21:27:07.609042Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-24T21:27:07.609070Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.609597Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715659 2025-06-24T21:27:07.609610Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-24T21:27:07.609656Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-24T21:27:07.609766Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 2025-06-24T21:27:07.609801Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976715660 2025-06-24T21:27:07.609858Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435076, Sender [1:7519630576209045084:2203], Recipient [1:7519630571914077397:2203]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-24T21:27:07.609914Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:979: StateWork, processing event TEvPrivate::TEvSubdomainReady 2025-06-24T21:27:07.609930Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:3661: Ignoring ready subdomain for tenant /Root/users/user-1 in REMOVING_SUBDOMAIN state 2025-06-24T21:27:07.610087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5492: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976715660 2025-06-24T21:27:07.612367Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976715660 2025-06-24T21:27:07.628040Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715660 2025-06-24T21:27:07.628066Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-24T21:27:07.628122Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-24T21:27:07.628252Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7519630576209045164:2203], Recipient [1:7519630571914077397:2203]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-24T21:27:07.628287Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-24T21:27:07.628324Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.628338Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.628383Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-24T21:27:07.628407Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1750800427593766 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T21:27:07.628475Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750800427593766 issue= 2025-06-24T21:27:07.631860Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-24T21:27:07.631999Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.632029Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.632212Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519630571914077263:2197], Recipient [1:7519630571914077397:2203]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.632235Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-24T21:27:07.632251Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.632260Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.632284Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-24T21:27:07.632309Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1750800427593766 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T21:27:07.635816Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-24T21:27:07.635864Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.635897Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-24T21:27:07.636014Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-24T21:27:07.636707Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-24T21:27:07.636800Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-24T21:27:07.641620Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-24T21:27:07.641723Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7519630576209045257:2203], Recipient [1:7519630571914077397:2203]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-24T21:27:07.644164Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-24T21:27:07.644196Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.644205Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.644248Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-24T21:27:07.644270Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-24T21:27:07.656607Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-24T21:27:07.656645Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.656655Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.656661Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.656719Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1750800427593766 2025-06-24T21:27:07.656752Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750800427593766 issue= 2025-06-24T21:27:07.656772Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1750800427593766 issue= 2025-06-24T21:27:07.656783Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-24T21:27:07.656853Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1750800427593766 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T21:27:07.658721Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-24T21:27:07.658773Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.659963Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519630576209045275:2286], Recipient [1:7519630571914077397:2203]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427593766&action=2" } UserToken: "" } 2025-06-24T21:27:07.659993Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T21:27:07.660131Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427593766&action=2" ready: true status: SUCCESS } } >> TColumnShardTestSchema::RebootHotTiersAfterTtl >> TSchemeShardTest::CreateSystemColumn [GOOD] >> KqpNewEngine::StreamLookupWithView [GOOD] >> KqpNewEngine::Truncated |97.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_view/test-results/unittest/{meta.json ... results_accumulator.log} |97.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_view/test-results/unittest/{meta.json ... results_accumulator.log} >> TGRpcCmsTest::AuthTokenTest [GOOD] >> TGRpcCmsTest::SimpleTenantsTest [GOOD] >> KqpNotNullColumns::UpsertNotNull [GOOD] >> KqpNotNullColumns::UpsertNotNullPg >> TSchemeShardTest::NewOwnerOnDatabase [GOOD] >> TSchemeShardTest::PreserveColumnOrder ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::CreateSystemColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:26:48.764180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:26:48.764295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.764336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:26:48.764366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:26:48.765283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:26:48.765317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:26:48.765367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.765451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:26:48.766175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:26:48.769076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:26:48.863317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:48.863372Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:48.877509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:26:48.877866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:26:48.878019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:26:48.884092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:26:48.884235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:26:48.884703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:48.884889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:26:48.888090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.889696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:26:48.897332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:48.897401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.898276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:26:48.898328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:26:48.898369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:26:48.898457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:26:48.904518Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:26:49.027550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:26:49.027822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.028022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:26:49.028062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:26:49.028249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:26:49.028299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:26:49.030170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.030324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:26:49.030450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.030491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:26:49.030540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:26:49.030575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:26:49.032073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.032126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:26:49.032162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:26:49.033535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.033584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.033638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.033681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:26:49.036272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:26:49.037788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:26:49.037917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:26:49.038650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.038756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:26:49.038805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.039044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:26:49.039082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.039225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:26:49.039319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:26:49.040935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:49.040970Z node 1 :FLAT_TX_SCHEMESHARD ... ached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: SystemColumnInCopyAllowed, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:27:10.237343Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-24T21:27:10.237410Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:27:10.237485Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 103:0 type: TxCopyTable target path: [OwnerId: 72057594046678944, LocalPathId: 3] source path: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:27:10.237576Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:27:10.237781Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:27:10.238066Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:27:10.238795Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:27:10.238897Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:27:10.242102Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944 PathId: 3, at schemeshard: 72057594046678944 2025-06-24T21:27:10.242513Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /MyRoot/SystemColumnInCopyAllowed 2025-06-24T21:27:10.242884Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:10.242973Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:10.243312Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:27:10.243457Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:10.243530Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:207:2207], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-24T21:27:10.243615Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:207:2207], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-24T21:27:10.244409Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:27:10.244513Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 103:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046678944 2025-06-24T21:27:10.244903Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:358: TCreateParts opId# 103:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046678944 OwnerIdx: 2 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } 2025-06-24T21:27:10.246057Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:27:10.246244Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:27:10.246322Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:27:10.246396Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2025-06-24T21:27:10.246478Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-24T21:27:10.247748Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 1 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:27:10.247856Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 1 PathOwnerId: 72057594046678944, cookie: 103 2025-06-24T21:27:10.247894Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-24T21:27:10.247929Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 1 2025-06-24T21:27:10.247966Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:27:10.248056Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-24T21:27:10.251505Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:2 msg type: 268697601 2025-06-24T21:27:10.251735Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72057594037968897 2025-06-24T21:27:10.251811Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1810: TOperation RegisterRelationByShardIdx, TxId: 103, shardIdx: 72057594046678944:2, partId: 0 2025-06-24T21:27:10.252515Z node 15 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 2 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } 2025-06-24T21:27:10.252854Z node 15 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 2, type DataShard, boot OK, tablet id 72075186233409547 2025-06-24T21:27:10.253067Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5947: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-06-24T21:27:10.253141Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation FindRelatedPartByShardIdx, TxId: 103, shardIdx: 72057594046678944:2, partId: 0 2025-06-24T21:27:10.253321Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-06-24T21:27:10.253411Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:176: TCreateParts opId# 103:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2025-06-24T21:27:10.253538Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:179: TCreateParts opId# 103:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-06-24T21:27:10.253683Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 103:0 2 -> 3 2025-06-24T21:27:10.255377Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:27:10.256204Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-24T21:27:10.259816Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:27:10.260190Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-24T21:27:10.260283Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:72: TCopyTable TConfigureParts operationId# 103:0 ProgressState at tablet# 72057594046678944 2025-06-24T21:27:10.260388Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:104: TCopyTable TConfigureParts operationId# 103:0 Propose modify scheme on dstDatashard# 72075186233409547 idx# 72057594046678944:2 srcDatashard# 72075186233409546 idx# 72057594046678944:1 operationId# 103:0 seqNo# 2:2 at tablet# 72057594046678944 2025-06-24T21:27:10.265276Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-06-24T21:27:10.265449Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 269549568 2025-06-24T21:27:10.265544Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72075186233409547 2025-06-24T21:27:10.265580Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72075186233409546 TestModificationResult got TxId: 103, wait until txId: 103 >> TSchemeShardTest::AlterTableAndAfterSplit [GOOD] >> TSchemeShardTest::AlterIndexTableDirectly >> KqpSqlIn::TableSource [GOOD] >> KqpSqlIn::SimpleKey_Negated ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTest [GOOD] Test command err: 2025-06-24T21:27:05.831058Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630568570102506:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:05.833597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003e82/r3tmp/tmpwxP6p9/pdisk_1.dat 2025-06-24T21:27:06.339577Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:06.351451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:06.351560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:06.368326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23892, node 1 2025-06-24T21:27:06.630273Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:06.630298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:06.630309Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:06.630449Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:27:06.832217Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14112 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:07.302613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:07.455264Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7519630577160037748:2273], Recipient [1:7519630572865070152:2202]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:50572" } 2025-06-24T21:27:07.455338Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-24T21:27:07.455366Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.455384Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.455567Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:50572" 2025-06-24T21:27:07.455721Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1750800427454881) 2025-06-24T21:27:07.456487Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1750800427454881 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-24T21:27:07.456706Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-24T21:27:07.463897Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-24T21:27:07.465001Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427454881&action=1" } } } 2025-06-24T21:27:07.465220Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.465299Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-24T21:27:07.465441Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-24T21:27:07.465998Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-24T21:27:07.466151Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-24T21:27:07.473836Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519630577160037761:2274], Recipient [1:7519630572865070152:2202]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427454881&action=1" } UserToken: "" } 2025-06-24T21:27:07.473887Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T21:27:07.474166Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427454881&action=1" } } 2025-06-24T21:27:07.476139Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-24T21:27:07.476193Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-24T21:27:07.476299Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7519630577160037756:2202], Recipient [1:7519630572865070152:2202]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-24T21:27:07.476319Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-24T21:27:07.476342Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.476351Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.476393Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-24T21:27:07.476411Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-24T21:27:07.476482Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-24T21:27:07.487964Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-24T21:27:07.488001Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.488010Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.488019Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.488092Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-24T21:27:07.488118Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1750800427454881 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T21:27:07.490414Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-24T21:27:07.490576Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.490619Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-24T21:27:07.490631Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-24T21:27:07.517376Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-24T21:27:07.519035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:27:07.522519Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-06-24T21:27:07.522627Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request ... /Root/users/user-1) done 2025-06-24T21:27:08.074353Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-24T21:27:08.074519Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7519630581455005619:2202], Recipient [1:7519630572865070152:2202]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-24T21:27:08.074542Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-24T21:27:08.074557Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:08.074566Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:08.074602Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-24T21:27:08.074632Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1750800428035270 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T21:27:08.074676Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750800428035270 issue= 2025-06-24T21:27:08.074459Z node 3 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72075186224037888 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found - using supplied 72075186224037888 2025-06-24T21:27:08.077162Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-24T21:27:08.077299Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-24T21:27:08.077335Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:08.077643Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519630572865070029:2201], Recipient [1:7519630572865070152:2202]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-24T21:27:08.077680Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-24T21:27:08.077716Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:08.077729Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:08.077768Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-24T21:27:08.077994Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1750800428035270 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T21:27:08.079747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-24T21:27:08.085225Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-24T21:27:08.085310Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:08.085376Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-24T21:27:08.085527Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-24T21:27:08.089743Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-24T21:27:08.089786Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-24T21:27:08.089799Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-24T21:27:08.089806Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037894 not found 2025-06-24T21:27:08.086954Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-24T21:27:08.087072Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-24T21:27:08.093216Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-24T21:27:08.095816Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-24T21:27:08.095936Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7519630581455005741:2202], Recipient [1:7519630572865070152:2202]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-24T21:27:08.095994Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-24T21:27:08.096020Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:08.096037Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:08.096088Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-24T21:27:08.096112Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-24T21:27:08.096940Z node 1 :HIVE WARN: tx__block_storage_result.cpp:56: HIVE#72057594037968897 THive::TTxBlockStorageResult retrying for 72075186224037888 because of ERROR 2025-06-24T21:27:08.104722Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-24T21:27:08.105038Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:08.105085Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:08.105093Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:08.105194Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1750800428035270 2025-06-24T21:27:08.105226Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750800428035270 issue= 2025-06-24T21:27:08.105256Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1750800428035270 issue= 2025-06-24T21:27:08.105273Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-24T21:27:08.105527Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1750800428035270 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T21:27:08.107721Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519630581455005774:2349], Recipient [1:7519630572865070152:2202]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800428035270&action=2" } UserToken: "" } 2025-06-24T21:27:08.107774Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T21:27:08.107944Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800428035270&action=2" } } 2025-06-24T21:27:08.108642Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-24T21:27:08.108856Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:08.162830Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519630581455005779:2351], Recipient [1:7519630572865070152:2202]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800428035270&action=2" } UserToken: "" } 2025-06-24T21:27:08.162867Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T21:27:08.163313Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800428035270&action=2" ready: true status: SUCCESS } } 2025-06-24T21:27:08.165651Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519630581455005782:2353], Recipient [1:7519630572865070152:2202]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:50572" } 2025-06-24T21:27:08.165678Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-24T21:27:08.165798Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3377: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: NOT_FOUND issues { message: "Unknown tenant /Root/users/user-1" severity: 1 } } } 2025-06-24T21:27:08.169373Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285123, Sender [1:7519630581455005785:2354], Recipient [1:7519630572865070152:2202]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:50572" } 2025-06-24T21:27:08.169403Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:967: StateWork, processing event TEvConsole::TEvListTenantsRequest 2025-06-24T21:27:08.169558Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3421: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { } } } } 2025-06-24T21:27:08.171869Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-24T21:27:08.172142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:27:08.602717Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AuthTokenTest [GOOD] Test command err: 2025-06-24T21:27:05.833708Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630570828532294:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:05.839788Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003e24/r3tmp/tmpkWocoQ/pdisk_1.dat 2025-06-24T21:27:06.335955Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:06.379200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:06.379325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:06.383893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14491, node 1 2025-06-24T21:27:06.630256Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:06.630287Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:06.630297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:06.630452Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:27:06.839420Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3457 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:07.302948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:07.451611Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7519630579418467578:2273], Recipient [1:7519630575123499976:2196]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:38688" } 2025-06-24T21:27:07.451667Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-24T21:27:07.451688Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.451703Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.451871Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:38688" 2025-06-24T21:27:07.452054Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1750800427451642) 2025-06-24T21:27:07.454838Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1750800427451642 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-24T21:27:07.457287Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-24T21:27:07.464107Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-24T21:27:07.472042Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427451642&action=1" } } } 2025-06-24T21:27:07.472192Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.472266Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-24T21:27:07.472428Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-24T21:27:07.472942Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-24T21:27:07.473062Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-24T21:27:07.478987Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-24T21:27:07.479072Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-24T21:27:07.479168Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7519630579418467586:2196], Recipient [1:7519630575123499976:2196]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-24T21:27:07.479191Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-24T21:27:07.479215Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.479229Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.479766Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-24T21:27:07.479805Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-24T21:27:07.479885Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-24T21:27:07.482794Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519630579418467594:2274], Recipient [1:7519630575123499976:2196]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427451642&action=1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" } 2025-06-24T21:27:07.482827Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-24T21:27:07.483046Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750800427451642&action=1" } } 2025-06-24T21:27:07.488498Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-24T21:27:07.488527Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.488542Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.488550Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.488617Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-24T21:27:07.488646Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1750800427451642 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T21:27:07.492949Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-24T21:27:07.493137Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.493178Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-24T21:27:07.493188Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-24T21:27:07.498669Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "Root" 2025-06-24T21:27:07.506274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/cor ... serToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:38688" } 2025-06-24T21:27:07.915218Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-24T21:27:07.915262Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.915347Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519630575123499853:2198], Recipient [1:7519630575123499976:2196]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.915384Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-24T21:27:07.915897Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-24T21:27:07.920097Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519630579418468195:2349], Recipient [1:7519630575123499976:2196]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:38688" } 2025-06-24T21:27:07.920128Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-24T21:27:07.920184Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.920314Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519630575123499853:2198], Recipient [1:7519630575123499976:2196]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.920326Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-24T21:27:07.920861Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-24T21:27:07.923095Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519630579418468202:2350], Recipient [1:7519630575123499976:2196]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:38688" } 2025-06-24T21:27:07.923125Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-24T21:27:07.923181Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.923312Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519630575123499853:2198], Recipient [1:7519630575123499976:2196]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.923339Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-24T21:27:07.923873Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-24T21:27:07.923945Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976710659 2025-06-24T21:27:07.923960Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-24T21:27:07.923985Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-24T21:27:07.924136Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435076, Sender [1:7519630579418467689:2196], Recipient [1:7519630575123499976:2196]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-24T21:27:07.924166Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:979: StateWork, processing event TEvPrivate::TEvSubdomainReady 2025-06-24T21:27:07.924184Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-24T21:27:07.924194Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-24T21:27:07.924242Z node 1 :CMS_TENANTS DEBUG: console__update_confirmed_subdomain.cpp:22: TTxUpdateConfirmedSubdomain for tenant /Root/users/user-1 to 2 2025-06-24T21:27:07.924265Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=RUNNING txid=1750800427451642 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-24T21:27:07.924347Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2913: Update database for /Root/users/user-1 confirmedsubdomain=2 2025-06-24T21:27:07.925927Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519630579418468206:2351], Recipient [1:7519630575123499976:2196]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:38688" } 2025-06-24T21:27:07.925949Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-24T21:27:07.926003Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.926171Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519630575123499853:2198], Recipient [1:7519630575123499976:2196]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.926194Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-24T21:27:07.926739Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-24T21:27:07.931766Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519630579418468212:2352], Recipient [1:7519630575123499976:2196]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:38688" } 2025-06-24T21:27:07.931792Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-24T21:27:07.931826Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.931867Z node 1 :CMS_TENANTS DEBUG: console__update_confirmed_subdomain.cpp:42: TTxUpdateConfirmedSubdomain complete for /Root/users/user-1 2025-06-24T21:27:07.931889Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-24T21:27:07.931984Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519630575123499853:2198], Recipient [1:7519630575123499976:2196]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-24T21:27:07.932001Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-24T21:27:07.932581Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: RUNNING required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } TClient is connected to server localhost:3457 TClient::Ls request: /Root/users/user-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root/users/user-1" PathId: 1 SchemeshardId: 72075186224037897 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72075186224037897 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 3 ProcessingParams { Version: 3 PlanReso... (TRUNCATED) 2025-06-24T21:27:08.203125Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-24T21:27:08.203653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-24T21:27:08.547671Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/users/user-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:27:08.553144Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:09.548205Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/users/user-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; >> KqpQueryService::ClosedSessionRemovedWhileActiveWithQuery >> KqpDocumentApi::RestrictWrite >> TVectorIndexTests::CreateTablePrefixInvalidKeyType >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink >> KqpQueryService::Followers >> MoveTable::WithUncomittedData >> KqpQueryService::ExecuteQueryWithWorkloadManager >> KqpNewEngine::Update-UseSink >> TSchemeShardTest::PreserveColumnOrder [GOOD] >> TColumnShardTestSchema::RebootHotTiersTtl |97.7%| [TA] $(B)/ydb/services/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.7%| [TA] {RESULT} $(B)/ydb/services/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNotNullColumns::UpdateNotNullPkPg [GOOD] >> KqpNotNullColumns::UpdateNotNull >> KqpQueryService::DdlUser [GOOD] >> KqpQueryService::DdlTx |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> KqpReturning::ReturningWorksIndexedDelete+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedDelete-QueryService >> TVectorIndexTests::CreateTablePrefixInvalidKeyType [GOOD] >> KqpQueryService::DdlGroup |97.7%| [TA] $(B)/ydb/core/statistics/service/ut/ut_aggregation/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::PreserveColumnOrder [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:26:48.764262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:26:48.764359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.764406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:26:48.764443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:26:48.765383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:26:48.765442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:26:48.765530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.765615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:26:48.766421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:26:48.769092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:26:48.851245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:48.851298Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:48.867250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:26:48.867611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:26:48.867815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:26:48.875932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:26:48.876172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:26:48.876887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:48.880035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:26:48.889006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.889737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:26:48.897395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:48.897475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.898361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:26:48.898429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:26:48.898490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:26:48.898581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:26:48.905689Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:26:49.046666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:26:49.046899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.047233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:26:49.047288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:26:49.047578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:26:49.047655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:26:49.049929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.050135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:26:49.050328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.050392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:26:49.050451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:26:49.050490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:26:49.052414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.052475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:26:49.052516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:26:49.054339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.054420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.054474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.054531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:26:49.058440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:26:49.060507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:26:49.060681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:26:49.061713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.061873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:26:49.061929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.062282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:26:49.062348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.062539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:26:49.062615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:26:49.064761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:49.064813Z node 1 :FLAT_TX_SCHEMESHARD ... 09546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 2280 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:27:12.419914Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 2280 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-24T21:27:12.421111Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 308 RawX2: 64424511733 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:27:12.421197Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-24T21:27:12.421422Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 308 RawX2: 64424511733 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:27:12.421544Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:27:12.421732Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 308 RawX2: 64424511733 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-24T21:27:12.421886Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:12.421969Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:27:12.422038Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:27:12.422116Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:27:12.425879Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:27:12.426761Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:27:12.426919Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:27:12.427499Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:27:12.427721Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:27:12.427798Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:27:12.428035Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:27:12.428113Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:27:12.428193Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 1/1 2025-06-24T21:27:12.428255Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:27:12.428336Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-24T21:27:12.428466Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [15:336:2313] message: TxId: 101 2025-06-24T21:27:12.428561Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-24T21:27:12.428649Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:27:12.428705Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:27:12.428930Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:27:12.431743Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:27:12.431852Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [15:337:2314] TestWaitNotification: OK eventTxId 101 2025-06-24T21:27:12.432723Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:27:12.433180Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 524us result status StatusSuccess 2025-06-24T21:27:12.434277Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "col01" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col02" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "col03" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "col04" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false IsBuildInProgress: false } Columns { Name: "col05" Type: "Utf8" TypeId: 4608 Id: 5 NotNull: false IsBuildInProgress: false } Columns { Name: "col06" Type: "Utf8" TypeId: 4608 Id: 6 NotNull: false IsBuildInProgress: false } Columns { Name: "col07" Type: "Utf8" TypeId: 4608 Id: 7 NotNull: false IsBuildInProgress: false } Columns { Name: "col08" Type: "Utf8" TypeId: 4608 Id: 8 NotNull: false IsBuildInProgress: false } Columns { Name: "col09" Type: "Utf8" TypeId: 4608 Id: 9 NotNull: false IsBuildInProgress: false } Columns { Name: "col10" Type: "Utf8" TypeId: 4608 Id: 10 NotNull: false IsBuildInProgress: false } Columns { Name: "col11" Type: "Utf8" TypeId: 4608 Id: 11 NotNull: false IsBuildInProgress: false } Columns { Name: "col12" Type: "Utf8" TypeId: 4608 Id: 12 NotNull: false IsBuildInProgress: false } Columns { Name: "col13" Type: "Utf8" TypeId: 4608 Id: 13 NotNull: false IsBuildInProgress: false } Columns { Name: "col14" Type: "Utf8" TypeId: 4608 Id: 14 NotNull: false IsBuildInProgress: false } Columns { Name: "col15" Type: "Utf8" TypeId: 4608 Id: 15 NotNull: false IsBuildInProgress: false } Columns { Name: "col16" Type: "Utf8" TypeId: 4608 Id: 16 NotNull: false IsBuildInProgress: false } Columns { Name: "col17" Type: "Utf8" TypeId: 4608 Id: 17 NotNull: false IsBuildInProgress: false } Columns { Name: "col18" Type: "Utf8" TypeId: 4608 Id: 18 NotNull: false IsBuildInProgress: false } Columns { Name: "col19" Type: "Utf8" TypeId: 4608 Id: 19 NotNull: false IsBuildInProgress: false } Columns { Name: "col20" Type: "Utf8" TypeId: 4608 Id: 20 NotNull: false IsBuildInProgress: false } Columns { Name: "col21" Type: "Utf8" TypeId: 4608 Id: 21 NotNull: false IsBuildInProgress: false } Columns { Name: "col22" Type: "Utf8" TypeId: 4608 Id: 22 NotNull: false IsBuildInProgress: false } Columns { Name: "col23" Type: "Utf8" TypeId: 4608 Id: 23 NotNull: false IsBuildInProgress: false } Columns { Name: "col24" Type: "Utf8" TypeId: 4608 Id: 24 NotNull: false IsBuildInProgress: false } Columns { Name: "col25" Type: "Utf8" TypeId: 4608 Id: 25 NotNull: false IsBuildInProgress: false } Columns { Name: "col26" Type: "Utf8" TypeId: 4608 Id: 26 NotNull: false IsBuildInProgress: false } Columns { Name: "col27" Type: "Utf8" TypeId: 4608 Id: 27 NotNull: false IsBuildInProgress: false } Columns { Name: "col28" Type: "Utf8" TypeId: 4608 Id: 28 NotNull: false IsBuildInProgress: false } Columns { Name: "col29" Type: "Utf8" TypeId: 4608 Id: 29 NotNull: false IsBuildInProgress: false } KeyColumnNames: "col01" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |97.7%| [TA] {RESULT} $(B)/ydb/core/statistics/service/ut/ut_aggregation/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNewEngine::ShuffleWrite [GOOD] >> KqpNewEngine::SelfJoin |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTablePrefixInvalidKeyType [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:27:12.850273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:27:12.850363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:27:12.850400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:27:12.850436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:27:12.850483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:27:12.850535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:27:12.850591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:27:12.850667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:27:12.851451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:27:12.851808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:27:12.925124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:27:12.925185Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:12.941679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:27:12.942123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:27:12.942295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:27:12.960258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:27:12.960464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:27:12.961059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:12.961402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:27:12.964610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:12.964807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:27:12.965826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:12.965893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:12.966171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:27:12.966214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:12.966250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:27:12.966341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:27:12.973666Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:27:13.102417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:27:13.102622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:13.102834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:27:13.102871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:27:13.103087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:27:13.103242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:27:13.106270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:13.106530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:27:13.106789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:13.106871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:27:13.106937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:27:13.106987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:27:13.109692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:13.109767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:27:13.109838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:27:13.112372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:13.112438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:13.112500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:13.112595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:27:13.117583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:27:13.120434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:27:13.120657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:27:13.121839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:13.122038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:13.122101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:13.122404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:27:13.122470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:27:13.122672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:13.122776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:27:13.125743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:13.125791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:13.126043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:13.126110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:27:13.126509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:27:13.126562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:27:13.126660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:27:13.126694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:27:13.126731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:27:13.126764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:27:13.126831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:27:13.126899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:27:13.126949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:27:13.126999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:27:13.127086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:27:13.127140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:27:13.127204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:27:13.129565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:27:13.129710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:27:13.129759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:27:13.129836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:27:13.129934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:13.130057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:27:13.134730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:27:13.135452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T21:27:13.151895Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:27:13.174435Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:27:13.177446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "vectors" Columns { Name: "id" Type: "Uint64" } Columns { Name: "embedding" Type: "String" } Columns { Name: "covered" Type: "String" } Columns { Name: "prefix" Type: "Float" } KeyColumnNames: "id" } IndexDescription { Name: "idx_vector" KeyColumnNames: "prefix" KeyColumnNames: "embedding" Type: EIndexTypeGlobalVectorKmeansTree DataColumnNames: "covered" VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:27:13.177958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /MyRoot/vectors domain path id: [OwnerId: 72057594046678944, LocalPathId: 1] domain path: /MyRoot shardsToCreate: 2 GetShardsInside: 0 MaxShards: 200000 2025-06-24T21:27:13.178197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Column 'prefix' has wrong key type Float for being key, at schemeshard: 72057594046678944 2025-06-24T21:27:13.178246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusInvalidParameter, reason: Column 'prefix' has wrong key type Float for being key, at schemeshard: 72057594046678944 2025-06-24T21:27:13.187713Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:27:13.191972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusInvalidParameter Reason: "Column \'prefix\' has wrong key type Float for being key" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:13.192269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Column 'prefix' has wrong key type Float for being key, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/vectors 2025-06-24T21:27:13.193046Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-24T21:27:13.193293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-24T21:27:13.193344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-24T21:27:13.193943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-24T21:27:13.194101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:27:13.194145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:286:2275] TestWaitNotification: OK eventTxId 101 2025-06-24T21:27:13.194699Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:27:13.194951Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector" took 273us result status StatusPathDoesNotExist 2025-06-24T21:27:13.195194Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/vectors/idx_vector\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/vectors/idx_vector" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> KqpQueryService::TempTablesDrop >> KqpNamedExpressions::NamedExpressionSimple-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink-UseDataQuery >> KqpNewEngine::FlatmapLambdaMutiusedConnections [GOOD] >> KqpQueryService::TableSink_Htap+withOltpSink >> KqpNewEngine::EmptyMapWithBroadcast >> KqpReturning::ReplaceSerial [GOOD] >> KqpReturning::ReturningSerial >> KqpNotNullColumns::UpsertNotNullPg [GOOD] >> KqpNotNullColumns::UpdateTable_DontChangeNotNullWithIndex >> KqpQueryService::Write |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> KqpQueryService::TableSink_OlapUpdate [GOOD] >> KqpQueryService::TableSink_OlapOrder >> KqpRanges::UpdateWhereInNoFullScan+UseSink [GOOD] >> KqpRanges::UpdateWhereInNoFullScan-UseSink >> KqpRanges::UpdateWhereInBigLiteralList [GOOD] >> KqpRanges::UpdateWhereInBigLiteralListPrefix >> KqpNewEngine::Truncated [GOOD] >> KqpNewEngine::Update+UseSink >> KqpRanges::IsNotNullSecondComponent [GOOD] >> KqpRanges::IsNotNullInValue >> KqpNotNullColumns::UpdateNotNull [GOOD] >> KqpNotNullColumns::UpdateTable_DontChangeNotNull >> KqpQueryService::SessionFromPoolError >> KqpQueryService::TableSink_OltpReplace+HasSecondaryIndex >> KqpQueryService::DdlTx [GOOD] >> KqpQueryService::DdlWithExplicitTransaction >> KqpDocumentApi::RestrictWrite [GOOD] >> KqpDocumentApi::AllowRead >> KqpQueryService::ExecuteQueryWithWorkloadManager [GOOD] >> KqpQueryService::ExecuteQueryWithResourcePoolClassifier >> KqpQueryService::ClosedSessionRemovedWhileActiveWithQuery [GOOD] >> KqpQueryService::CloseSessionsWithLoad >> KqpNewEngine::Update-UseSink [GOOD] >> KqpNewEngine::UpdateFromParams >> KqpQueryService::TempTablesDrop [GOOD] >> KqpQueryService::Tcl >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink [GOOD] >> KqpSqlIn::SimpleKey_Negated [GOOD] >> KqpSqlIn::TupleParameter >> KqpNewEngine::SelfJoin [GOOD] >> KqpNewEngine::ScalarFunctions >> KqpQueryService::DdlGroup [GOOD] >> KqpQueryService::DdlPermission ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 21064, MsgBus: 18298 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044e4/r3tmp/tmpDuUMVy/pdisk_1.dat TServer::EnableGrpc on GrpcPort 21064, node 1 TClient is connected to server localhost:18298 TClient is connected to server localhost:18298 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpQueryService::Write [GOOD] >> KqpQueryServiceScripts::CancelScriptExecution >> KqpNotNullColumns::UpdateTable_DontChangeNotNullWithIndex [GOOD] >> KqpNotNullColumns::UpdateTable_UniqIndex >> KqpReturning::ReturningWorksIndexedDelete-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedDeleteV2+QueryService >> KqpNewEngine::EmptyMapWithBroadcast [GOOD] >> KqpNewEngine::FlatMapLambdaInnerPrecompute >> KqpQueryService::Followers [GOOD] >> KqpQueryService::FlowControllOnHugeRealTable-LongRow >> TColumnShardTestSchema::TTL-Reboot-Internal-FirstPkColumn >> KqpNotNullColumns::UpdateTable_DontChangeNotNull [GOOD] >> KqpNotNullColumns::UpdateNotNullPg >> KqpNewEngine::Update+UseSink [GOOD] >> KqpNewEngine::StaleRO_IndexFollowers-EnableFollowers >> KqpQueryService::SessionFromPoolError [GOOD] >> KqpQueryService::ReturnAndCloseSameTime >> SystemView::QueryStatsAllTables [GOOD] >> SystemView::QueryStatsRetries >> TSchemeShardTest::CopyTableForBackup [GOOD] >> TSchemeShardTest::ConsistentCopyTablesForBackup >> KqpRanges::UpdateWhereInNoFullScan-UseSink [GOOD] >> KqpRanges::UpdateWhereInWithNull >> KqpNewEngine::UpdateFromParams [GOOD] >> KqpNewEngine::UpsertEmptyInput >> KqpQueryService::TableSink_OltpReplace+HasSecondaryIndex [GOOD] >> KqpQueryService::TableSink_OltpReplace-HasSecondaryIndex >> KqpQueryService::DdlWithExplicitTransaction [GOOD] >> KqpQueryService::Ddl_Dml >> KqpQueryService::Tcl [GOOD] >> KqpReturning::ReturningSerial [GOOD] >> KqpQueryService::TableSink_ReplaceFromSelectOlap >> KqpReturning::ReturningWorks+QueryService >> KqpDocumentApi::AllowRead [GOOD] >> KqpDocumentApi::RestrictAlter >> KqpRanges::UpdateWhereInBigLiteralListPrefix [GOOD] >> KqpRanges::UpdateWhereInMultipleUpdate >> KqpRanges::IsNotNullInValue [GOOD] >> KqpRanges::IsNotNullInJsonValue >> TSchemeShardTest::ConsistentCopyTablesForBackup [GOOD] >> TSchemeShardTest::CopyLockedTableForBackup >> TFlatTest::SplitEmptyToMany [GOOD] >> TFlatTest::SplitEmptyTwice ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::WithUncomittedData Test command err: 2025-06-24T21:27:12.732232Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:27:12.754627Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:27:12.754856Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:27:12.761262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:12.761437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:12.761633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:12.761766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:12.761840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:12.761939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:12.762022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:12.762105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:12.762202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:12.762281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:12.762366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:12.791036Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:27:12.791364Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:27:12.791428Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:27:12.791631Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:12.791806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:12.791888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:12.791932Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:27:12.792019Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:27:12.792095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:12.792162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:12.792200Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:27:12.792398Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:12.792490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:12.792531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:12.792564Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:27:12.792654Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:27:12.792707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:12.792753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:12.792789Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:27:12.792847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:12.792896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:12.792956Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:27:12.793264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:12.793327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:12.793360Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:27:12.793563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:12.793617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:12.793649Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:27:12.793782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:12.793888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:12.793926Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:27:12.794004Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:12.794076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:27:12.794115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:27:12.794143Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:27:12.794603Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=49; 2025-06-24T21:27:12.794734Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=50; 2025-06-24T21:27:12.794832Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=43; 2025-06-24T21:27:12.794937Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=40; 2025-06-24T21:27:12.795044Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:27:12.795171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:27:12.795217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:27:12.795266Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... e/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #28 0x10607ea7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #29 0x10607ea7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #30 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #31 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #32 0x10e8aab5 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #33 0x10e63518 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #34 0x10607073 in NKikimr::NTestSuiteMoveTable::TCurrentTest::Execute() /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1 #35 0x10e64de5 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #36 0x10e8502c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #37 0x7fbb591f4d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) previously allocated by thread T0 here: #0 0x106d317d in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x2b088365 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x2b088365 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x2b088365 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x2b088365 in get_node /-S/util/generic/hash_table.h:497:43 #5 0x2b088365 in new_node /-S/util/generic/hash_table.h:947:19 #6 0x2b088365 in emplace_unique_noresize /-S/util/generic/hash_table.h:1018:17 #7 0x2b088365 in std::__y1::pair<__yhashtable_iterator>, bool> THashTable, NKikimr::NColumnShard::TSchemeShardLocalPathId, THash, TSelect1st, TEqualTo, std::__y1::allocator>::emplace_unique(NKikimr::NColumnShard::TSchemeShardLocalPathId const&, NKikimr::NColumnShard::TInternalPathId const&) /-S/util/generic/hash_table.h:710:16 #8 0x2b080b49 in emplace /-S/util/generic/hash.h:177:20 #9 0x2b080b49 in NKikimr::NColumnShard::TTablesManager::MoveTablePropose(NKikimr::NColumnShard::TSchemeShardLocalPathId) /-S/ydb/core/tx/columnshard/tables_manager.cpp:454:5 #10 0x105187f1 in NKikimr::NColumnShard::TSchemaTransactionOperator::DoStartProposeOnExecute(NKikimr::NColumnShard::TColumnShard&, NKikimr::NTabletFlatExecutor::TTransactionContext&) /-S/ydb/core/tx/columnshard/transactions/operators/schema.cpp:147:33 #11 0x265f1628 in NKikimr::NColumnShard::TTxController::ITransactionOperator::StartProposeOnExecute(NKikimr::NColumnShard::TColumnShard&, NKikimr::NTabletFlatExecutor::TTransactionContext&) /-S/ydb/core/tx/columnshard/transactions/tx_controller.h:362:32 #12 0x265ed1a2 in NKikimr::NColumnShard::TTxController::StartProposeOnExecute(NKikimr::NColumnShard::TFullTxInfo const&, TBasicString> const&, NKikimr::NTabletFlatExecutor::TTransactionContext&) /-S/ydb/core/tx/columnshard/transactions/tx_controller.cpp:356:25 #13 0x266df482 in NKikimr::NColumnShard::TTxProposeTransaction::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&) /-S/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp:75:54 #14 0x18e3b944 in NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*) /-S/ydb/core/tablet_flat/flat_executor.cpp:1976:35 #15 0x18e37321 in NKikimr::NTabletFlatExecutor::TExecutor::DoExecute(TAutoPtr, NKikimr::NTabletFlatExecutor::TExecutor::ETxMode) /-S/ydb/core/tablet_flat/flat_executor.cpp:1880:13 #16 0x18e3e4de in NKikimr::NTabletFlatExecutor::TExecutor::Execute(TAutoPtr, NActors::TActorContext const&) /-S/ydb/core/tablet_flat/flat_executor.cpp:1894:5 #17 0x18dd546a in Execute /-S/ydb/core/tablet_flat/tablet_flat_executed.cpp:62:46 #18 0x18dd546a in NKikimr::NTabletFlatExecutor::TTabletExecutedFlat::Execute(TAutoPtr, NActors::TActorContext const&) /-S/ydb/core/tablet_flat/tablet_flat_executed.cpp:57:5 #19 0x266dc00a in NKikimr::NColumnShard::TColumnShard::Handle(TAutoPtr, TDelete>&, NActors::TActorContext const&) /-S/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp:123:5 #20 0x2665df8d in NKikimr::NColumnShard::TColumnShard::StateWork(TAutoPtr&) /-S/ydb/core/tx/columnshard/columnshard_impl.h:409:13 #21 0x11f29ccc in NActors::IActor::Receive(TAutoPtr&) /-S/ydb/library/actors/core/actor.cpp:280:13 #22 0x2d9498b4 in NActors::TTestActorRuntimeBase::SendInternal(TAutoPtr, unsigned int, bool) /-S/ydb/library/actors/testlib/test_runtime.cpp:1702:33 #23 0x2d942129 in NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant) /-S/ydb/library/actors/testlib/test_runtime.cpp:1295:45 #24 0x2d94c4a3 in NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.cpp:1554:22 #25 0x3211dc73 in NKikimr::TEvColumnShard::TEvProposeTransactionResult::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventIf(TSet, std::__y1::allocator> const&, std::__y1::function const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:477:13 #26 0x3211d812 in GrabEdgeEvent /-S/ydb/library/actors/testlib/test_runtime.h:526:20 #27 0x3211d812 in NKikimr::TEvColumnShard::TEvProposeTransactionResult::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEvent(NActors::TActorId const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:532:20 #28 0x320fd0fa in NKikimr::NTxUT::(anonymous namespace)::ProposeSchemaTxOptional(NActors::TTestBasicRuntime&, NActors::TActorId&, TBasicString> const&, unsigned long) /-S/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp:66:23 #29 0x320feb76 in NKikimr::NTxUT::ProposeSchemaTx(NActors::TTestBasicRuntime&, NActors::TActorId&, TBasicString> const&, unsigned long) /-S/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp:85:25 #30 0x105feb13 in NKikimr::NTestSuiteMoveTable::TTestCaseWithUncomittedData::Execute_(NUnitTest::TTestContext&) /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:65:20 #31 0x10607ea7 in operator() /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1 #32 0x10607ea7 in __invoke<(lambda at /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #33 0x10607ea7 in __call<(lambda at /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #34 0x10607ea7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #35 0x10607ea7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #36 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #37 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #38 0x10e8aab5 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #39 0x10e63518 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #40 0x10607073 in NKikimr::NTestSuiteMoveTable::TCurrentTest::Execute() /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1 #41 0x10e64de5 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #42 0x10e8502c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #43 0x7fbb591f4d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) SUMMARY: AddressSanitizer: heap-use-after-free /-S/contrib/libs/cxxsupp/libcxx/include/__utility/pair.h:149:22 in pair Shadow bytes around the buggy address: 0x503000a52b80: fa fa fd fd fd fd fa fa 00 00 00 00 fa fa 00 00 0x503000a52c00: 00 00 fa fa fd fd fd fd fa fa fd fd fd fd fa fa 0x503000a52c80: fd fd fd fd fa fa fd fd fd fd fa fa fd fd fd fd 0x503000a52d00: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd 0x503000a52d80: fd fd fa fa fd fd fd fd fa fa fd fd fd fd fa fa =>0x503000a52e00: fd fd[fd]fa fa fa 00 00 00 00 fa fa fd fd fd fd 0x503000a52e80: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd 0x503000a52f00: fd fa fa fa fd fd fd fa fa fa fd fd fd fa fa fa 0x503000a52f80: fd fd fd fd fa fa fd fd fd fd fa fa fd fd fd fd 0x503000a53000: fa fa 00 00 00 00 fa fa fd fd fd fa fa fa fd fd 0x503000a53080: fd fa fa fa 00 00 00 00 fa fa 00 00 00 00 fa fa Shadow byte legend (one shadow byte represents 8 application bytes): Addressable: 00 Partially addressable: 01 02 03 04 05 06 07 Heap left redzone: fa Freed heap region: fd Stack left redzone: f1 Stack mid redzone: f2 Stack right redzone: f3 Stack after return: f5 Stack use after scope: f8 Global redzone: f9 Global init order: f6 Poisoned by user: f7 Container overflow: fc Array cookie: ac Intra object redzone: bb ASan internal: fe Left alloca redzone: ca Right alloca redzone: cb ==974766==ABORTING >> TSchemeShardTest::DropPQAbort [GOOD] >> TSchemeShardTest::DropBlockStoreVolume >> TSchemeShardTest::CopyLockedTableForBackup [GOOD] >> TSchemeShardTest::ConfigColumnFamily >> TSchemeShardTest::DropBlockStoreVolume [GOOD] >> TSchemeShardTest::DropBlockStoreVolumeWithNonReplicatedPartitions >> KqpQueryService::DdlPermission [GOOD] >> KqpQueryService::DdlSecret >> KqpQueryService::TableSink_OltpReplace-HasSecondaryIndex [GOOD] >> KqpQueryService::TableSink_OltpOrder >> KqpQueryService::TableSink_ReplaceFromSelectLargeOlap >> KqpNotNullColumns::UpdateNotNullPg [GOOD] >> KqpNotNullColumns::UpdateOnNotNull >> KqpQueryService::TableSink_Htap+withOltpSink [GOOD] >> KqpQueryService::TableSink_Htap-withOltpSink >> TSchemeShardTest::ConfigColumnFamily [GOOD] >> TSchemeShardTest::ConsistentCopyAfterDropIndexes >> TSchemeShardTest::DropBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::DropBlockStoreVolume2 >> KqpQueryServiceScripts::CancelScriptExecution [GOOD] >> KqpQueryServiceScripts::EmptyNextFetchToken >> KqpSqlIn::TupleParameter [GOOD] >> KqpSqlIn::TupleLiteral >> KqpQueryService::ExecuteQueryWithResourcePoolClassifier [GOOD] >> KqpQueryService::ExecuteRetryQuery >> KqpNewEngine::FlatMapLambdaInnerPrecompute [GOOD] >> KqpNewEngine::DqSourceSequentialLimit >> KqpNewEngine::UpsertEmptyInput [GOOD] >> KqpNotNullColumns::AlterAddNotNullColumn >> TSchemeShardTest::DropBlockStoreVolume2 [GOOD] >> TSchemeShardTest::DropBlockStoreVolumeWithFillGeneration >> KqpDocumentApi::RestrictAlter [GOOD] >> KqpDocumentApi::RestrictDrop >> KqpNotNullColumns::UpdateTable_UniqIndex [GOOD] >> KqpNotNullColumns::UpdateTable_UniqIndexPg >> KqpQueryService::FlowControllOnHugeRealTable-LongRow [GOOD] >> KqpQueryService::ForbidInteractiveTxOnImplicitSession >> KqpReturning::ReturningWorksIndexedDeleteV2+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedDeleteV2-QueryService >> TFlatTest::SplitEmptyTwice [GOOD] >> TSchemeShardTest::DropBlockStoreVolumeWithFillGeneration [GOOD] >> TSchemeShardTest::DocumentApiVersion >> TSchemeShardTest::ConsistentCopyAfterDropIndexes [GOOD] >> KqpRanges::UpdateWhereInWithNull [GOOD] >> KqpRanges::ValidatePredicates >> KqpQueryService::TableSink_ReplaceFromSelectOlap [GOOD] >> KqpRanges::IsNotNullInJsonValue [GOOD] >> KqpRanges::IsNotNullInJsonValue2 >> TSchemeShardTest::DocumentApiVersion [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Dir ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::ConsistentCopyAfterDropIndexes [GOOD] Test command err: canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@sta ... pe: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:31.859363Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:27:31.859832Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy1" took 450us result status StatusSuccess 2025-06-24T21:27:31.860514Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy1" PathDescription { Self { Name: "Copy1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000007 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Copy1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Sync" LocalPathId: 6 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:31.861919Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:27:31.862279Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy2" took 376us result status StatusSuccess 2025-06-24T21:27:31.862892Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy2" PathDescription { Self { Name: "Copy2" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Copy2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:31.864430Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:27:31.864852Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy3" took 433us result status StatusSuccess 2025-06-24T21:27:31.865468Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy3" PathDescription { Self { Name: "Copy3" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Copy3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Sync" LocalPathId: 10 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 9 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpNotNullColumns::UpdateOnNotNull [GOOD] >> KqpNotNullColumns::UpdateOnNotNullPg >> KqpRanges::UpdateWhereInMultipleUpdate [GOOD] >> KqpRanges::UpdateWhereInFullScan+UseSink >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink+UseDataQuery [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_ReplaceFromSelectOlap [GOOD] Test command err: Trying to start YDB, gRPC: 2047, MsgBus: 19859 2025-06-24T21:27:14.328614Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630608706206807:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:14.328721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030e6/r3tmp/tmpYKUQOx/pdisk_1.dat 2025-06-24T21:27:14.645852Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630608706206787:2079] 1750800434327625 != 1750800434327628 2025-06-24T21:27:14.656973Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2047, node 1 2025-06-24T21:27:14.721962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:14.726983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:14.742630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:14.769680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:14.769751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:14.769765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:14.769956Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19859 TClient is connected to server localhost:19859 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:15.303909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:15.345425Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:17.496426Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630621591109325:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:17.496428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630621591109317:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:17.496537Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:17.501076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:17.515990Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630621591109331:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:27:17.614958Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630621591109382:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:17.946693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:18.231537Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630625886076916:2463] txid# 281474976710664, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:18.241627Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630625886076923:2468] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZmUwYzQzNTgtZTE0ZTk5OGQtN2Q0ZGQxZmUtN2RhOGFhZGI=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:18.259732Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T21:27:18.273186Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630625886076976:2326], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/test/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:27:18.273408Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZmUwYzQzNTgtZTE0ZTk5OGQtN2Q0ZGQxZmUtN2RhOGFhZGI=, ActorId: [1:7519630621591109309:2289], ActorState: ExecuteState, TraceId: 01jyhxd4zm7bh6n1d75rnrd9x4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:27:18.320626Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630625886076987:2333], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/test/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:27:18.320997Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDgxZmJkZjgtYmQ4ZTU0NTYtYjI0OGUxM2EtM2M2NGFlZDk=, ActorId: [1:7519630625886076983:2330], ActorState: ExecuteState, TraceId: 01jyhxd5100hn58hjzxc9ggky6, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 11534, MsgBus: 17556 2025-06-24T21:27:19.091193Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630630446163688:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:19.091266Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030e6/r3tmp/tmpgdEBtO/pdisk_1.dat 2025-06-24T21:27:19.245910Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:19.248088Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630630446163662:2079] 1750800439090419 != 1750800439090422 2025-06-24T21:27:19.262925Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:19.263032Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:19.264699Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11534, node 2 2025-06-24T21:27:19.358661Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:19.358685Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:19.358691Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:19.358788Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17556 TClient is connected to server localhost:17556 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:19.832321Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195 ... Write;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:27:30.493897Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7519630669781249604:2305];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:27:30.493932Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7519630669781249551:2302];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:27:30.493956Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7519630669781249550:2301];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:27:31.283120Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.283126Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.286181Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.287259Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.287779Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.287824Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.288081Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.288261Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.288502Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.288721Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.288729Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.288923Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-24T21:27:31.776455Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.776700Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.776769Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.776996Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.779707Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.780174Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.780303Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.780742Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.780928Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.780943Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.781142Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.781719Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.781846Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=23;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037909,72075186224037910;receive=72075186224037915; 2025-06-24T21:27:31.781912Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=24;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037909,72075186224037910;receive=72075186224037915; 2025-06-24T21:27:31.781988Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=25;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037909,72075186224037910;receive=72075186224037915; 2025-06-24T21:27:31.782042Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=26;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037909,72075186224037910;receive=72075186224037915; 2025-06-24T21:27:31.782093Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=27;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037909,72075186224037910;receive=72075186224037915; 2025-06-24T21:27:31.782145Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=28;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037909,72075186224037910;receive=72075186224037915; 2025-06-24T21:27:31.782291Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=30;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037910;receive=72075186224037909; 2025-06-24T21:27:31.782364Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=31;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037910;receive=72075186224037909; 2025-06-24T21:27:31.782421Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=32;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037910;receive=72075186224037909; 2025-06-24T21:27:31.782476Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=33;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037910;receive=72075186224037909; 2025-06-24T21:27:31.782510Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:27:31.782527Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=34;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037910;receive=72075186224037909; 2025-06-24T21:27:31.782578Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630669781250236:2459];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=35;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037910;receive=72075186224037909; 2025-06-24T21:27:31.784290Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; >> KqpReturning::ReturningWorks+QueryService [GOOD] >> KqpReturning::ReturningWorks-QueryService >> BasicStatistics::TwoDatabases [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Dir [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Table >> SystemView::QueryStatsRetries [GOOD] >> KqpQueryService::Ddl_Dml [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithWorkloadManager ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoDatabases [GOOD] Test command err: 2025-06-24T21:24:02.963302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:333:2217], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:02.963909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:24:02.964097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0048a7/r3tmp/tmpL9w39G/pdisk_1.dat 2025-06-24T21:24:03.748216Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7297, node 1 2025-06-24T21:24:04.805328Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:04.805424Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:04.805458Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:04.805695Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:04.815324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:04.950665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:04.950852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:04.974476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63668 2025-06-24T21:24:05.654362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:09.192692Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-24T21:24:09.276640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.276781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.331290Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:24:09.333807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:09.558481Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:09.595263Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.598243Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.598905Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.599114Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.599440Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.599537Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.599622Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.599708Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.599794Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.795100Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.795280Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.810000Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:09.974890Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:10.033704Z node 3 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:10.033844Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:10.068984Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:10.072925Z node 3 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:10.073132Z node 3 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:10.073190Z node 3 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:10.073233Z node 3 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:10.073298Z node 3 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:10.073368Z node 3 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:10.073423Z node 3 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:10.073861Z node 3 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:10.115768Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:10.115879Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [3:1866:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:10.129905Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1879:2569] 2025-06-24T21:24:10.136103Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1900:2579] 2025-06-24T21:24:10.136768Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [3:1900:2579], schemeshard id = 72075186224037897 2025-06-24T21:24:10.149582Z node 3 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database1 2025-06-24T21:24:10.173734Z node 3 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:10.173800Z node 3 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:10.173867Z node 3 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database1/.metadata/_statistics 2025-06-24T21:24:10.202582Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:10.210818Z node 3 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:10.210958Z node 3 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:10.460115Z node 3 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:10.670055Z node 3 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:10.750432Z node 3 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:11.305413Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:11.465360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:14.997569Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:15.045909Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:15.046001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:15.098248Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:15.100385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:15.324708Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.325399Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.325958Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.326110Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.326325Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.326411Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:15.326478Z node 2 :HIVE WARN: tx__cr ... ath_id; 2025-06-24T21:27:27.650083Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:10202:4410], DatabaseId: /Root/Database2, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:27.650477Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:10213:4415], DatabaseId: /Root/Database2, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:27.650732Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database2, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:27.668129Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976730658:2, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:27.772174Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:10216:4418], DatabaseId: /Root/Database2, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976730658 completed, doublechecking } 2025-06-24T21:27:27.932041Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:10306:4467] txid# 281474976730659, issues: { message: "Check failed: path: \'/Root/Database2/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224038898, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:27.952349Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:10335:4482]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:27.952750Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:27:27.952864Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:10337:4484] 2025-06-24T21:27:27.952943Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:10337:4484] 2025-06-24T21:27:27.953537Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224038895] EvServerConnected, pipe server id = [2:10338:4485] 2025-06-24T21:27:27.953808Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10337:4484], server id = [2:10338:4485], tablet id = 72075186224038895, status = OK 2025-06-24T21:27:27.953926Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224038895] EvConnectNode, pipe server id = [2:10338:4485], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:27:27.953989Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224038895] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:27:27.954195Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:27:27.954306Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:10335:4482], StatRequests.size() = 1 2025-06-24T21:27:28.070039Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZGUwOTY4NGMtYzk4NDBmOTYtOWEzYTI2YzMtNWI5NTg3N2I=, TxId: 2025-06-24T21:27:28.070106Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZGUwOTY4NGMtYzk4NDBmOTYtOWEzYTI2YzMtNWI5NTg3N2I=, TxId: 2025-06-24T21:27:28.070757Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-24T21:27:28.086039Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038898, LocalPathId: 3] 2025-06-24T21:27:28.086100Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:27:28.164956Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224038895] EvFastPropagateCheck 2025-06-24T21:27:28.165029Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224038895] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:27:28.244726Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:10337:4484], schemeshard count = 1 2025-06-24T21:27:29.084457Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:27:29.100668Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:27:29.100741Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:27:29.100807Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-24T21:27:29.100847Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:27:29.101332Z node 3 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database1 2025-06-24T21:27:29.105133Z node 3 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:27:29.124046Z node 3 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=YzlhZDI5ODEtMzFmNTk5ZmMtNzIyNDI4NGMtZTU3ZjM1MjU=, TxId: 2025-06-24T21:27:29.124124Z node 3 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=YzlhZDI5ODEtMzFmNTk5ZmMtNzIyNDI4NGMtZTU3ZjM1MjU=, TxId: 2025-06-24T21:27:29.124712Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:27:29.144296Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:27:29.144365Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:27:29.228868Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [3:10427:4763]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:29.229357Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-24T21:27:29.229428Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [3:10427:4763], StatRequests.size() = 1 2025-06-24T21:27:31.282877Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [3:10488:4783]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:31.283385Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-24T21:27:31.283443Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [3:10488:4783], StatRequests.size() = 1 2025-06-24T21:27:32.099182Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224038895] ScheduleNextTraversal 2025-06-24T21:27:32.099262Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224038895] ScheduleNextTraversal. No force traversals. 2025-06-24T21:27:32.099328Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224038895] IsColumnTable. Path [OwnerId: 72075186224038898, LocalPathId: 4] is data table. 2025-06-24T21:27:32.099380Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224038895] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224038898, LocalPathId: 4] 2025-06-24T21:27:32.099855Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database2 2025-06-24T21:27:32.103187Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:27:32.122611Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:204: [72075186224038891] TEvIntervalQuerySummary, wrong stage: node id# 2 2025-06-24T21:27:32.123457Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZDEzZjllYjctNzkwZDAxNWMtZTBlMzlmOGQtZjVmNjlmZjE=, TxId: 2025-06-24T21:27:32.123518Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZDEzZjllYjctNzkwZDAxNWMtZTBlMzlmOGQtZjVmNjlmZjE=, TxId: 2025-06-24T21:27:32.124075Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-24T21:27:32.138725Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038898, LocalPathId: 4] 2025-06-24T21:27:32.138787Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:27:33.084546Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 2 2025-06-24T21:27:33.085142Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:27:33.085785Z node 3 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 3 2025-06-24T21:27:33.097413Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:27:33.097497Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:27:33.180869Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [3:10567:4797]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:33.181228Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-24T21:27:33.181281Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [3:10567:4797], StatRequests.size() = 1 2025-06-24T21:27:33.182285Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:10569:4555]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:33.187182Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:27:33.187270Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:10569:4555], StatRequests.size() = 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink+UseDataQuery [GOOD] Test command err: Trying to start YDB, gRPC: 15461, MsgBus: 26057 2025-06-24T21:22:44.293721Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629447350048974:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:44.294453Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003186/r3tmp/tmpY3GiXH/pdisk_1.dat 2025-06-24T21:22:45.014584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:22:45.014730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:22:45.038180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:22:45.114965Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15461, node 1 2025-06-24T21:22:45.307345Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:22:45.379717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:22:45.379745Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:22:45.379753Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:22:45.380106Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26057 TClient is connected to server localhost:26057 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:22:46.353427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:22:46.399507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:46.678596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:46.918011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:47.005084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:22:49.288189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629468824886944:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:49.288317Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:49.295306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629447350048974:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:22:49.295374Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:22:49.637330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:49.675797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:49.744485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:49.787492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:49.860342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:49.949644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:50.009529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:50.095994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629473119854900:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:50.096106Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:50.096478Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629473119854905:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:22:50.100102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:22:50.117626Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629473119854907:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:22:50.221054Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629473119854960:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:22:51.750583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:22:51.834121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /ho ... 5-06-24T21:27:22.476107Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:22.480462Z node 28 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [28:7519630644471254052:2079] 1750800442206459 != 1750800442206462 2025-06-24T21:27:22.499960Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14835, node 28 2025-06-24T21:27:22.571885Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:22.571923Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:22.571939Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:22.572134Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2840 TClient is connected to server localhost:2840 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:23.220407Z node 28 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:23.233260Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:23.250720Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:23.346157Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:23.606028Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:23.745272Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:27.214527Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[28:7519630644471254175:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:27.214623Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:28.600302Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7519630670241059473:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:28.600541Z node 28 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:28.664709Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:28.716958Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:28.766042Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:28.840652Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:28.915488Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:28.961342Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:29.013211Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:29.123519Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7519630674536027444:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:29.123661Z node 28 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:29.123988Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7519630674536027449:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:29.131022Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:29.161547Z node 28 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [28:7519630674536027451:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:29.239800Z node 28 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [28:7519630674536027504:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:31.275800Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:31.337406Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:31.398424Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) [[["739a1c70-8b53-4111-b5a0-d504fe7b67f6"]]] [[["739a1c70-8b53-4111-b5a0-d504fe7b67f6"]]] |97.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TSchemeShardTest::DisablePublicationsOfDropping_Table [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_IndexedTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::Ddl_Dml [GOOD] Test command err: Trying to start YDB, gRPC: 18080, MsgBus: 21388 2025-06-24T21:27:07.100622Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630577884243813:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:07.100692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003103/r3tmp/tmpyxhmb7/pdisk_1.dat 2025-06-24T21:27:07.445224Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630577884243792:2079] 1750800427099348 != 1750800427099351 2025-06-24T21:27:07.468723Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18080, node 1 2025-06-24T21:27:07.512097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:07.512268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:07.514148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:07.536155Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:07.536188Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:07.536195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:07.536357Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21388 TClient is connected to server localhost:21388 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:27:08.120470Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:08.125045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:08.155501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:08.288542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:08.441384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:08.497821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:10.209007Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630590769147324:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:10.209151Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:10.584603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:10.617425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:10.692020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:10.726690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:10.752397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:10.781064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:10.852140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:10.921372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630590769147988:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:10.921478Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:10.921720Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630590769147993:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:10.926560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:10.939785Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630590769147995:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:11.041393Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630595064115342:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:12.100714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630577884243813:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:12.100804Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:12.161494Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630599359082932:3609] txid# 281474976710673, issues: { message: "User already exists" severity: 1 } 2025-06-24T21:27:12.168847Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MTFjZDczMjEtNWVhNTUzN2UtY2Q3NTRiYjctYTM3ZjZkYmE=, ActorId: [1:7519630599359082926:2479], ActorState: ExecuteState, TraceId: 01jyhxcz0f0rcs0txxd58ckk04, Create QueryResponse for error on request, msg: 2025-06-24T21:2 ... .609293Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=NWIwMzkzYTAtYzk0ZmU0MzctYzY3NzA3OWItZDVlMDYwMQ==, ActorId: [4:7519630677806326981:2487], ActorState: ExecuteState, TraceId: 01jyhxdgt40d9749j5h43bxmhf, Create QueryResponse for error on request, msg: 2025-06-24T21:27:30.707288Z node 4 :KQP_COMPILE_SERVICE WARN: kqp_compile_service.cpp:570: queryId in recompile request and queryId in cache are different, queryId in request: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT INTO TestDdlDml2 (Key, Value1) VALUES (1, \"1\");\n SELECT * FROM TestDdlDml2;\n UPSERT INTO TestDdlDml2 (Key, Value1) VALUES (2, \"2\");\n SELECT * FROM TestDdlDml2;\n CREATE TABLE TestDdlDml33 (\n Key Uint64,\n PRIMARY KEY (Key)\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_database":"Root" }, "settings": { "ydb_database":"Root" }, "rollback_settings": { } } }}, queryId in cache: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT INTO TestDdlDml2 (Key, Value1, Value2) VALUES (1, \"1\", \"1\");\n SELECT * FROM TestDdlDml2;\n ALTER TABLE TestDdlDml2 DROP COLUMN Value2;\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_database":"Root" }, "settings": { "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-24T21:27:30.959038Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:31.222348Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:31.501670Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519630682101294694:2566], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:11:17: Error: At function: KiReadTable!
:11:17: Error: Cannot find table 'db.[/Root/TestDdlDml5]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:27:31.503105Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NmZkNTYzY2QtNjYwNmU2MTUtZWE1OGQxYTEtOTAyNzg2ZTM=, ActorId: [4:7519630682101294571:2545], ActorState: ExecuteState, TraceId: 01jyhxdhkb4rtwg919mgdyddfa, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:27:31.600439Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:31.775289Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:32.323179Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630686396262293:3994] txid# 281474976715697, issues: { message: "Check failed: path: \'/Root/TestDdl1\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 20], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:32.323305Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715697, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/TestDdl1', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 20], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-24T21:27:32.323514Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=MzUyMjYyNTYtZmIxMjQyOWUtOWU1OWYxNjMtNTZjNTM4YmU=, ActorId: [4:7519630686396262280:2622], ActorState: ExecuteState, TraceId: 01jyhxdjp5c6gwddhy980hh2k9, Create QueryResponse for error on request, msg: 2025-06-24T21:27:32.362276Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630686396262317:4005] txid# 281474976715699, issues: { message: "Check failed: path: \'/Root/TestDdl2\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 21], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:32.362396Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715699, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/TestDdl2', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 21], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-24T21:27:32.362635Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZjY4YWM5MDctNzJmN2NhY2YtMTYzNjNmMzItYzgzNjBhZmM=, ActorId: [4:7519630686396262304:2629], ActorState: ExecuteState, TraceId: 01jyhxdjqc8netmcra6j1fv2zj, Create QueryResponse for error on request, msg: 2025-06-24T21:27:32.745035Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715703:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:32.872522Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630686396262496:4100] txid# 281474976715704, issues: { message: "Check failed: path: \'/Root/TestDdl2\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 21], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:32.872866Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715704, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/TestDdl2', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 21], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-24T21:27:32.873048Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=NTIwNjk0ZmYtZGI1MzVjODEtYjM4MjkwNmUtODlmN2IzYTk=, ActorId: [4:7519630686396262387:2654], ActorState: ExecuteState, TraceId: 01jyhxdjzv493j0xq83rfq5zxz, Create QueryResponse for error on request, msg: 2025-06-24T21:27:33.030068Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519630690691229846:2693], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/TestDdl4]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:27:33.030338Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=ZTg1ZTA5NC01MWI2MmZjMC04MmVmNmZhMC1mM2Y5ZWZjMQ==, ActorId: [4:7519630690691229843:2691], ActorState: ExecuteState, TraceId: 01jyhxdkck8xhyk2cjqeapctqh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:27:33.230928Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715711:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:33.899363Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:468: Get parsing result with error, self: [4:7519630690691230074:2743], owner: [4:7519630669216391275:2356], statement id: 1 2025-06-24T21:27:33.899691Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=MzY1NWNlOWMtOGM4N2M0MDEtYmFlOThiMmYtMmUwYWU4ODU=, ActorId: [4:7519630690691230072:2742], ActorState: ExecuteState, TraceId: 01jyhxdm7y9bjpj0khb9d2khhr, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:27:34.095563Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519630694986197419:2760], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:29: Error: At function: KiWriteTable!
:3:44: Error: Failed to convert type: Struct<'Key':Int32,'Value':String> to Struct<'Key':Uint64?,'Value':Uint64?>
:3:44: Error: Failed to convert 'Value': String to Optional
:3:44: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:27:34.095875Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=YjMzY2U5N2QtZmE0MWY2Y2MtODAwYWNjYzQtODNmYTlkYjE=, ActorId: [4:7519630694986197401:2754], ActorState: ExecuteState, TraceId: 01jyhxdmbrac0twgc3sj7q45kk, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:27:34.167359Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715720:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:34.283964Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519630694986197532:2782], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:8:29: Error: At function: KiWriteTable!
:8:44: Error: Failed to convert type: Struct<'Key':Int32,'Value':String> to Struct<'Key':Uint64?,'Value':Uint64?>
:8:44: Error: Failed to convert 'Value': String to Optional
:8:44: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:27:34.287526Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NWE0OGYwNzUtYjA0Y2QxOS05NmU3YzA2NC02OTM3NWExOQ==, ActorId: [4:7519630694986197445:2769], ActorState: ExecuteState, TraceId: 01jyhxdmfc96e9wb3btzzkf00s, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::QueryStatsRetries [GOOD] Test command err: 2025-06-24T21:23:29.511597Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629642358837063:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:29.511667Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae2/r3tmp/tmpvGJmoc/pdisk_1.dat 2025-06-24T21:23:30.188008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:30.188154Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:30.217881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:30.393022Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3261, node 1 2025-06-24T21:23:30.646776Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:30.800142Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:30.800174Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:30.800185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:30.800323Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17976 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:31.331953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:31.442340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:23:31.522708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:31.522828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:31.571911Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519629651457674265:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:31.572207Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:23:31.560886Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-24T21:23:31.580254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:31.634582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:31.634706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:31.645195Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-24T21:23:31.687675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:31.720052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:32.213480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-24T21:23:32.250410Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519629656604038814:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:32.250461Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:23:32.279516Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629654546919195:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:32.279677Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:23:32.317219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:32.418677Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:32.418754Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:32.469630Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:23:32.469875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:32.492591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:32.492694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:32.502064Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:23:32.504010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:32.562494Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:32.667352Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:33.339408Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:33.403319Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:34.515400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629642358837063:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:34.515473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:36.547763Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519629651457674265:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:36.547875Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:37.256518Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519629656604038814:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:37.256601Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:37.283299Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519629654546919195:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:37.283382Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:39.190959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:39.765161Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629685308511461:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: ... rId: [71:7519630637368334701:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.048721Z node 71 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.054161Z node 71 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:21.081886Z node 71 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [71:7519630637368334716:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:27:21.169305Z node 71 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [71:7519630637368334783:2712] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:21.437019Z node 71 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhxd7pp4bvg4bce1fav59px, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=71&id=ZDU3YjBiYmItZTkzOThkOWEtZmI3MzkwMTctZjkzNTExMzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:27:21.650223Z node 71 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhxd83eayd9p7k98nh8er78, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=71&id=ZDE2YmVkYmEtOGY1YjczMWQtMzAzM2EzMWYtYWI0MWVlMDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:27:21.654045Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [71:7519630637368334865:2329], owner: [71:7519630637368334861:2327], scan id: 0, sys view info: Type: ETopQueriesByRequestUnitsOneHour SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:27:21.655020Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [71:7519630637368334865:2329], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:27:21.655657Z node 71 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [71:7519630637368334865:2329], row count: 1, finished: 1 2025-06-24T21:27:21.655757Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [71:7519630637368334865:2329], owner: [71:7519630637368334861:2327], scan id: 0, sys view info: Type: ETopQueriesByRequestUnitsOneHour SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:27:21.660744Z node 71 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800441648, txId: 281474976710662] shutting down 2025-06-24T21:27:24.515746Z node 76 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[76:7519630652620075323:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:24.515873Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae2/r3tmp/tmpkApAkD/pdisk_1.dat 2025-06-24T21:27:24.854848Z node 76 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:24.898714Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:24.898866Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:24.911708Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10394, node 76 2025-06-24T21:27:25.125772Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:25.125830Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:25.125848Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:25.126096Z node 76 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:27:25.539296Z node 76 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29200 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:25.820449Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:25.875903Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:29.516083Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[76:7519630652620075323:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:29.516173Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:32.069197Z node 76 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [76:7519630686979814686:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:32.069397Z node 76 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:32.070101Z node 76 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [76:7519630686979814698:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:32.079351Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:32.125776Z node 76 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [76:7519630686979814700:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:27:32.224689Z node 76 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [76:7519630686979814776:2712] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:32.516713Z node 76 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyhxdjf25cmb3a1f1f31be7p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=76&id=NTJiYWU2YTctZjE0YWRhN2YtZjhjNDE0MGMtODMwNDAwMTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:27:32.759248Z node 76 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhxdjyr426qd75jd9qxasey, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=76&id=YzA3MTBhMmEtNWQ2MjE0NDAtYzJkMDM2MS04NjZiNGRm, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:27:32.763469Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [76:7519630686979814861:2331], owner: [76:7519630686979814858:2329], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:27:32.764347Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [76:7519630686979814861:2331], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-24T21:27:32.765196Z node 76 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [76:7519630686979814861:2331], row count: 1, finished: 1 2025-06-24T21:27:32.765338Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [76:7519630686979814861:2331], owner: [76:7519630686979814858:2329], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-24T21:27:32.768293Z node 76 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800452757, txId: 281474976710662] shutting down >> KqpQueryService::ExecuteRetryQuery [GOOD] >> KqpNotNullColumns::AlterAddNotNullColumn [GOOD] >> KqpNotNullColumns::AlterAddNotNullColumnPg ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SplitEmptyTwice [GOOD] Test command err: 2025-06-24T21:24:53.998381Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630002307651148:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:24:54.000058Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a71/r3tmp/tmpd4HAlT/pdisk_1.dat 2025-06-24T21:24:54.312239Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:54.364786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:54.364881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:54.366903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:32199 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:24:54.656330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:24:54.718383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:24:54.873611Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.004s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-24T21:24:54.880730Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.003s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-24T21:24:54.907377Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-24T21:24:54.916775Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800294824 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750800294824 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) 2025-06-24T21:24:54.999892Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:24:57.108814Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.88, eph 1} end=Done, 2 blobs 201r (max 201), put Spent{time=0.010s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (58340 0 0)b }, ecr=1.000 2025-06-24T21:24:57.138207Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.108, eph 1} end=Done, 2 blobs 756r (max 756), put Spent{time=0.009s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (49618 0 0)b }, ecr=1.000 2025-06-24T21:24:57.268151Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.176, eph 2} end=Done, 2 blobs 453r (max 454), put Spent{time=0.028s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (131114 0 0)b }, ecr=1.000 2025-06-24T21:24:57.268783Z node 1 :OPS_COMPACT INFO: Compact{72057594046644480.2.514, eph 1} end=Done, 2 blobs 3r (max 3), put Spent{time=0.018s,wait=0.008s,interrupts=1} Part{ 1 pk, lobs 0 +0, (187 0 0)b }, ecr=1.000 2025-06-24T21:24:57.339636Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.200, eph 1} end=Done, 2 blobs 2r (max 2), put Spent{time=0.065s,wait=0.058s,interrupts=1} Part{ 1 pk, lobs 0 +0, (252 0 0)b }, ecr=1.000 2025-06-24T21:24:57.341325Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.201, eph 1} end=Done, 2 blobs 2r (max 2), put Spent{time=0.060s,wait=0.059s,interrupts=1} Part{ 1 pk, lobs 0 +0, (181 0 0)b }, ecr=1.000 2025-06-24T21:24:57.348825Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.203, eph 1} end=Done, 2 blobs 501r (max 501), put Spent{time=0.067s,wait=0.044s,interrupts=1} Part{ 1 pk, lobs 0 +0, (31966 0 0)b }, ecr=1.000 2025-06-24T21:24:57.398974Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.205, eph 1} end=Done, 2 blobs 1503r (max 1503), put Spent{time=0.094s,wait=0.050s,interrupts=1} Part{ 1 pk, lobs 0 +0, (103274 0 0)b }, ecr=1.000 2025-06-24T21:24:57.430872Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.207, eph 2} end=Done, 2 blobs 1509r (max 1512), put Spent{time=0.088s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (98941 0 0)b }, ecr=1.000 2025-06-24T21:24:57.459509Z node 1 :OPS_COMPACT INFO: Compact{72057594046644480.2.541, eph 1} end=Done, 2 blobs 10001r (max 10001), put Spent{time=0.140s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (553660 0 0)b }, ecr=1.000 2025-06-24T21:24:57.509358Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.296, eph 3} end=Done, 2 blobs 704r (max 705), put Spent{time=0.020s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (203646 0 0)b }, ecr=1.000 2025-06-24T21:24:57.552250Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.320, eph 3} end=Done, 2 blobs 2262r (max 2265), put Spent{time=0.028s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (148264 0 0)b }, ecr=1.000 2025-06-24T21:24:57.714576Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.403, eph 4} end=Done, 2 blobs 955r (max 956), put Spent{time=0.026s,wait=0.002s,interrupts=1} Part{ 1 pk, lobs 0 +0, (276133 0 0)b }, ecr=1.000 2025-06-24T21:24:57.721154Z node 1 :OPS_COMPACT INFO: Compact{72057594046644480.2.1025, eph 2} end=Done, 2 blobs 3r (max 5), put Spent{time=0.005s,wait=0.002s,interrupts=1} Part{ 1 pk, lobs 0 +0, (187 0 0)b }, ecr=1.000 2025-06-24T21:24:57.722519Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.423, eph 2} end=Done, 2 blobs 2r (max 3), put Spent{time=0.002s,wait=0.001s,interrupts=1} Part{ 1 pk, lobs 0 +0, (252 0 0)b }, ecr=1.000 2025-06-24T21:24:57.722566Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.424, eph 2} end=Done, 2 blobs 2r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (181 0 0)b }, ecr=1.000 2025-06-24T21:24:57.772028Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.426, eph 2} end=Done, 2 blobs 1002r (max 1002), put Spent{time=0.050s,wait=0.027s,interrupts=1} Part{ 1 pk, lobs 0 +0, (63729 0 0)b }, ecr=1.000 2025-06-24T21:24:57.807812Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.427, eph 2} end=Done, 2 blobs 3003r (max 3003), put Spent{time=0.086s,wait=0.035s,interrupts=1} Part{ 1 pk, lobs 0 +0, (206156 0 0)b }, ecr=1.000 2025-06-24T21:24:57.809254Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.430, eph 4} end=Done, 2 blobs 3015r (max 3018), put Spent{time=0.085s,wait=0.001s,interrupts=1} Part{ 1 pk, lobs 0 +0, (197587 0 0)b }, ecr=1.000 2025-06-24T21:24:57.832324Z node 1 :OPS_COMPACT INFO: Compact{72057594046644480.2.1051, eph 2} end=Done, 2 blobs 10001r (max 10504), put Spent{time=0.108s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (553660 0 0)b }, ecr=1.000 2025-06-24T21:24:57.876664Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.510, eph 5} end=Done, 2 blobs 1206r (max 1207), put Spent{time=0.021s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (348665 0 0)b }, ecr=1.000 2025-06-24T21:24:57.900335Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.529, eph 5} end=Done, 2 blobs 3768r (max 3771), put Spent{time=0.021s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (246910 0 0)b }, ecr=1.000 2025-06-24T21:24:57.982924Z node 1 :OPS_COMPACT INFO: Compact{72057594046644480.2.1535, eph 3} end=Done, 2 blobs 3r (max 5), put Spent{time=0.005s,wait=0.004s,interrupts=1} Part{ 1 pk, lobs 0 +0, (187 0 0)b }, ecr=1.000 2025-06-24T21:24:57.989250Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.612, eph 3} end=Done, 2 blobs 2r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (252 0 0)b }, ecr=1.000 2025-06-24T21:24:57.990624Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.613, eph 3} end=Done, 2 blobs 2r (max 3), put Spent{time=0.003s,wait=0.001s,interrupts=1} Part{ 1 pk, lobs 0 +0, (181 0 0)b }, ecr=1.000 2025-06-24T21:24:58.00 ... 8 2025-06-24T21:27:27.499132Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:27:27.501555Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-24T21:27:27.501868Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 7 2025-06-24T21:27:27.502304Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-24T21:27:27.502369Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-24T21:27:27.511109Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630661347411855 RawX2: 4503608217307345 } TabletId: 72075186224037889 State: 4 2025-06-24T21:27:27.511203Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:27:27.511737Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-24T21:27:27.512023Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:27:27.513976Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-24T21:27:27.514223Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 6 2025-06-24T21:27:27.514813Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630661347412176 RawX2: 4503608217307405 } TabletId: 72075186224037891 State: 4 2025-06-24T21:27:27.514854Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:27:27.515007Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630661347412181 RawX2: 4503608217307407 } TabletId: 72075186224037890 State: 4 2025-06-24T21:27:27.515064Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:27:27.515723Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-24T21:27:27.515737Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T21:27:27.515803Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T21:27:27.515947Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630665642379647 RawX2: 4503608217307424 } TabletId: 72075186224037895 State: 4 2025-06-24T21:27:27.516010Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037895, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:27:27.516196Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630665642379652 RawX2: 4503608217307426 } TabletId: 72075186224037893 State: 4 2025-06-24T21:27:27.516222Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037893, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:27:27.516674Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:27:27.516781Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:27:27.517235Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:8 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:27:27.517315Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:6 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:27:27.517701Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519630665642379648 RawX2: 4503608217307425 } TabletId: 72075186224037894 State: 4 2025-06-24T21:27:27.517790Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037894, state: Offline, at schemeshard: 72057594046644480 2025-06-24T21:27:27.518535Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:7 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:27:27.518709Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-24T21:27:27.519026Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 5 2025-06-24T21:27:27.519303Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T21:27:27.519582Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-24T21:27:27.520773Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-24T21:27:27.520803Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-24T21:27:27.522400Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-24T21:27:27.522432Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-24T21:27:27.522504Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T21:27:27.522516Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T21:27:27.522854Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 8 ShardOwnerId: 72057594046644480 ShardLocalIdx: 8, at schemeshard: 72057594046644480 2025-06-24T21:27:27.523080Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:27:27.523337Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046644480 ShardLocalIdx: 6, at schemeshard: 72057594046644480 2025-06-24T21:27:27.523552Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T21:27:27.525318Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:8 2025-06-24T21:27:27.525368Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:8 tabletId 72075186224037895 2025-06-24T21:27:27.525550Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046644480 ShardLocalIdx: 7, at schemeshard: 72057594046644480 2025-06-24T21:27:27.525883Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T21:27:27.525900Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037893 not found 2025-06-24T21:27:27.526018Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037895 not found 2025-06-24T21:27:27.526277Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:27:27.526315Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T21:27:27.526417Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:27:27.526613Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-24T21:27:27.526639Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-06-24T21:27:27.529477Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-06-24T21:27:27.529505Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:7 tabletId 72075186224037894 2025-06-24T21:27:27.529613Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:27:27.530259Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037894 not found TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> KqpQueryService::ForbidInteractiveTxOnImplicitSession [GOOD] >> KqpDocumentApi::RestrictDrop [GOOD] >> KqpNewEngine::DqSourceSequentialLimit [GOOD] >> KqpNewEngine::DqSourceLocksEffects |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> KqpQueryService::TableSink_Olap_Replace >> KqpNewEngine::ScalarFunctions [GOOD] >> KqpNewEngine::ScalarMultiUsage |97.7%| [TA] $(B)/ydb/core/client/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryService::IssuesInCaseOfSuccess ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteRetryQuery [GOOD] Test command err: Trying to start YDB, gRPC: 12991, MsgBus: 18597 2025-06-24T21:27:12.609825Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630599024863686:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:12.609940Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ee/r3tmp/tmpm9Amj4/pdisk_1.dat 2025-06-24T21:27:12.934941Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12991, node 1 2025-06-24T21:27:12.994619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:12.994742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:13.005319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:13.027114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:13.027160Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:13.027168Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:13.027300Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18597 TClient is connected to server localhost:18597 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:13.579971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:13.615615Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:13.625884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:13.762043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:13.945553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:14.026453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:15.885032Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630611909767173:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:15.887019Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.151017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.177906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.202809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.270072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.301785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.375279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.404371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.454769Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630616204735131:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.454844Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.454984Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630616204735136:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.458777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:16.468120Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630616204735138:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:16.539202Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630616204735189:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:17.610078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630599024863686:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:17.610174Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:17.980761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630620499702793:2489], DatabaseId: /Root, PoolId: another_pool_id, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool another_pool_id not found or you don't have access permissions } 2025-06-24T21:27:17.980761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630620499702791:2487], DatabaseId: /Root, PoolId: another_pool_id, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool another_pool_id not found or you don't have access permissions } 2025-06-24T21:27:17.980831Z ... b/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Wait resource pool classifier 0.097644s: status = SUCCESS, issues = 2025-06-24T21:27:29.661194Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NmZjNWU0MC1lY2QxYTQ4MS0yN2FmNjA1NC0xYmRkY2YyMg==, ActorId: [2:7519630671439254641:2779], ActorState: ExecuteState, TraceId: 01jyhxdg3s02jef9vs8zx4527r, Create QueryResponse for error on request, msg: Query failed during adding/waiting in workload pool MyPool Trying to start YDB, gRPC: 10145, MsgBus: 26581 2025-06-24T21:27:30.535436Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630677936533654:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:30.535676Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ee/r3tmp/tmpgEgQPA/pdisk_1.dat 2025-06-24T21:27:30.658353Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:30.660584Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630677936533612:2079] 1750800450534237 != 1750800450534240 2025-06-24T21:27:30.676573Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:30.676664Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 10145, node 3 2025-06-24T21:27:30.677838Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:30.727187Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:30.727215Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:30.727230Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:30.727451Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26581 TClient is connected to server localhost:26581 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:31.261233Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:31.277944Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:31.367547Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:31.547114Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:31.559419Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:31.646923Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:34.243523Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630695116404437:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:34.243623Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:34.315662Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:34.354623Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:34.392648Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:34.426675Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:34.460543Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:34.500343Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:34.574441Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:34.642373Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630695116405097:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:34.642479Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:34.642624Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630695116405102:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:34.647024Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:34.659053Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630695116405104:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:34.734954Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630695116405155:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:35.535765Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630677936533654:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:35.535859Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |97.7%| [TA] {RESULT} $(B)/ydb/core/client/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryServiceScripts::EmptyNextFetchToken [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_IndexedTable [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Pq >> TColumnShardTestSchema::ForgetWithLostAnswer >> KqpService::SessionBusy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ForbidInteractiveTxOnImplicitSession [GOOD] Test command err: Trying to start YDB, gRPC: 31340, MsgBus: 62601 2025-06-24T21:27:15.493295Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:27:15.493718Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:27:15.493967Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003101/r3tmp/tmpX7RqTf/pdisk_1.dat 2025-06-24T21:27:15.845279Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 31340, node 1 2025-06-24T21:27:15.992156Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:15.999934Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:16.000020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:16.000164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:16.000552Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:27:16.000859Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800432513455 != 1750800432513459 2025-06-24T21:27:16.052303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:16.052518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:16.068947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62601 TClient is connected to server localhost:62601 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:16.370299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:16.493871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:16.634863Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:16.877368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:17.278880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:17.595474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:18.438905Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1680:3276], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:18.439196Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:18.471248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:18.716175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:18.969792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:19.280343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:19.536817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:19.886677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:20.166650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:20.496129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2356:3775], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:20.496253Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:20.496569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2361:3780], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:20.502709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:20.658263Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2363:3782], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:20.727886Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2421:3821] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 19165, MsgBus: 29925 2025-06-24T21:27:22.762280Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630642147934622:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:22.762348Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003101/r3tmp/tmpmpphmy/pdisk ... , but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:27.763797Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630642147934622:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:27.763877Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 120 Trying to start YDB, gRPC: 32328, MsgBus: 23589 2025-06-24T21:27:31.646586Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630681377815092:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:31.648279Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003101/r3tmp/tmpFFTO3k/pdisk_1.dat 2025-06-24T21:27:31.875014Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:31.875095Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:31.891539Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:31.892589Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32328, node 3 2025-06-24T21:27:31.947277Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:31.947304Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:31.947312Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:31.947450Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23589 TClient is connected to server localhost:23589 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:32.551002Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:27:32.562688Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:32.649562Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:32.697225Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:32.812427Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:32.894538Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:35.362753Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630698557685871:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.362816Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.422328Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.473134Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.501788Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.535194Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.569835Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.642859Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.680691Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.742250Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630698557686534:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.742324Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.742487Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630698557686539:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.768058Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:35.784807Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630698557686541:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:35.865626Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630698557686592:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:36.646579Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630681377815092:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:36.646696Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::TableSink_OlapOrder [GOOD] >> KqpQueryService::TableSink_OlapRWQueries ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpDocumentApi::RestrictDrop [GOOD] Test command err: Trying to start YDB, gRPC: 8848, MsgBus: 61095 2025-06-24T21:27:12.561824Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630597482956375:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:12.561900Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030f4/r3tmp/tmp8WJHu5/pdisk_1.dat 2025-06-24T21:27:12.894603Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8848, node 1 2025-06-24T21:27:12.962053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:12.962198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:12.962491Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:12.962502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:12.962514Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:12.962696Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:27:12.964295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61095 TClient is connected to server localhost:61095 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:13.568292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:27:13.576300Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:27:13.588475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:13.602118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:13.770190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:13.928832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:14.008519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:16.006478Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630614662827160:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.006626Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.383858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.413764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.453719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.517885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.549273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.587077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.620694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.673007Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630614662827821:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.673092Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.673112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630614662827826:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.676963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:16.690445Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630614662827828:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:16.747068Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630614662827879:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:17.563003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630597482956375:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:17.563120Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:17.757518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:17.836882Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:75196 ... ARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519630678130724504:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:30.989054Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030f4/r3tmp/tmpczf3rI/pdisk_1.dat 2025-06-24T21:27:31.111910Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:31.112080Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519630678130724483:2079] 1750800450988079 != 1750800450988082 TServer::EnableGrpc on GrpcPort 13869, node 4 2025-06-24T21:27:31.152758Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:31.152866Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:31.164290Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:31.198442Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:31.198467Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:31.198479Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:31.198607Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23162 TClient is connected to server localhost:23162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:31.733272Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:31.754243Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:31.846220Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:32.016095Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:32.056085Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:32.144931Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:34.924275Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630695310595304:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:34.924383Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:34.999695Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.037339Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.107026Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.178211Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.215633Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.257061Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.297261Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.562098Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630699605563261:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.562199Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.562596Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630699605563266:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.567723Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:35.588111Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519630699605563268:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:35.658309Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630699605563319:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:35.989988Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519630678130724504:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:35.990073Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:36.929693Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Type annotation, code: 1030
:2:24: Error: At function: KiDropTable!
:2:24: Error: Document API table cannot be modified from YQL query: /Root/DocumentApiTest, code: 2008 >> TSchemeShardTest::AlterIndexTableDirectly [GOOD] >> KqpNotNullColumns::UpdateOnNotNullPg [GOOD] >> KqpQueryServiceScripts::ExecuteScriptStatsBasic |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::EmptyNextFetchToken [GOOD] Test command err: Trying to start YDB, gRPC: 62842, MsgBus: 17584 2025-06-24T21:27:15.980440Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630613074049107:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:15.980530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030e3/r3tmp/tmpcuSWbH/pdisk_1.dat 2025-06-24T21:27:16.372478Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62842, node 1 2025-06-24T21:27:16.407945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:16.408771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:16.411119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:16.435332Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:16.435363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:16.435404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:16.435531Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17584 TClient is connected to server localhost:17584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:16.987665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:27:16.989743Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:27:17.015510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:17.138237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:17.302327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:17.387805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:19.095840Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630630253919891:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:19.095966Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:19.418471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:19.452206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:19.478886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:19.510504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:19.540970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:19.589285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:19.622317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:19.684772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630630253920547:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:19.684856Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:19.685073Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630630253920552:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:19.688577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:19.701857Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630630253920554:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:19.762582Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630630253920605:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:20.980769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630613074049107:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:20.980887Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 14956, MsgBus: 14646 2025-06-24T21:27:21.818291Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630638274042020:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:21.818447Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030e3/r3tmp/tmp0Cj3xa/pdisk_1.dat 2025-06-24T21:27:21.915834Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for ... 30.032798Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:30.034859Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:30.069340Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:30.069366Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:30.069378Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:30.069511Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5083 TClient is connected to server localhost:5083 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:30.605709Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:30.612475Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:30.624320Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:30.708652Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:27:30.860264Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:30.910053Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:30.963468Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:33.615088Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630688168823102:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:33.615256Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:33.678472Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:33.710129Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:33.743312Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:33.778503Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:33.815474Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:33.856534Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:33.892424Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:33.951739Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630688168823757:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:33.951831Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:33.952248Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630688168823762:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:33.955938Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:33.965950Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630688168823764:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:34.046497Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630692463791111:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:34.890757Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630670988952312:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:34.890858Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:35.150223Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.152555Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:35.154921Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:37.735463Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800457770, txId: 281474976715704] shutting down >> TSchemeShardTest::DisablePublicationsOfDropping_Pq [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::AlterIndexTableDirectly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:26:48.764209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:26:48.764294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.764361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:26:48.764419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:26:48.765352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:26:48.765396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:26:48.765470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.765539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:26:48.766340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:26:48.769070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:26:48.858701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:48.858764Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:48.874620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:26:48.875041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:26:48.875279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:26:48.882883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:26:48.883065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:26:48.883752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:48.884018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:26:48.888462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.889693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:26:48.897371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:48.897444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.898349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:26:48.898412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:26:48.898492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:26:48.898582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:26:48.905210Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:26:49.032995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:26:49.033264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.033536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:26:49.033586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:26:49.033910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:26:49.034001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:26:49.036163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.036348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:26:49.036519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.036569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:26:49.036626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:26:49.036665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:26:49.038445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.038515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:26:49.038563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:26:49.040301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.040358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.040410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.040463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:26:49.044141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:26:49.046048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:26:49.046210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:26:49.047114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.047280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:26:49.047332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.047628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:26:49.047694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.047848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:26:49.047951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:26:49.049825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:49.049878Z node 1 :FLAT_TX_SCHEMESHARD ... 0 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:39.021074Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/table/indexByValue" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:27:39.021556Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/table/indexByValue" took 518us result status StatusSuccess 2025-06-24T21:27:39.022817Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/table/indexByValue" PathDescription { Self { Name: "indexByValue" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 3 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1592 DataSize: 1592 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "indexByValue" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 3 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 100500 MinPartitionsCount: 1 FastSplitSettings { SizeThreshold: 100500 RowCountThreshold: 100500 } } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:39.030515Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/table/indexByValue/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:27:39.031081Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/table/indexByValue/indexImplTable" took 603us result status StatusSuccess 2025-06-24T21:27:39.032587Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/table/indexByValue/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 3 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 100500 MinPartitionsCount: 1 FastSplitSettings { SizeThreshold: 100500 RowCountThreshold: 100500 } } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1592 DataSize: 1592 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpSqlIn::TupleLiteral [GOOD] >> KqpSqlIn::TupleSelect >> TColumnShardTestSchema::EnableColdTiersAfterNoEviction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::UpdateOnNotNullPg [GOOD] Test command err: Trying to start YDB, gRPC: 28555, MsgBus: 5530 2025-06-24T21:27:04.106547Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630564608749282:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:04.106847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00327d/r3tmp/tmpwSRFxw/pdisk_1.dat 2025-06-24T21:27:04.460657Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630564608749264:2079] 1750800424105791 != 1750800424105794 2025-06-24T21:27:04.476108Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28555, node 1 2025-06-24T21:27:04.523090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:04.523249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:04.541729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:04.597146Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:04.597170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:04.597190Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:04.597352Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5530 TClient is connected to server localhost:5530 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:05.098904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:05.117360Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:05.117903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:07.019744Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630577493651792:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.019852Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.353237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:07.498533Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630577493651897:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.498605Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.498727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630577493651902:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.503079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:07.513849Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630577493651904:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:27:07.572412Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630577493651955:2392] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:07.917633Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630577493652019:2321], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:61: Error: At function: KiUpdateTable!
:1:61: Error: Cannot update primary key column: Key 2025-06-24T21:27:07.917932Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWJjNDMzOTctMmJjODllY2QtNDYxMWRmODItMTFlYTYyOQ==, ActorId: [1:7519630573198684493:2288], ActorState: ExecuteState, TraceId: 01jyhxctv95e0s127dkn2m4ngy, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:27:07.945043Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630577493652028:2325], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:63: Error: At function: KiUpdateTable!
:1:63: Error: Cannot update primary key column: Key 2025-06-24T21:27:07.945391Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWJjNDMzOTctMmJjODllY2QtNDYxMWRmODItMTFlYTYyOQ==, ActorId: [1:7519630573198684493:2288], ActorState: ExecuteState, TraceId: 01jyhxctwp65r5mzx8p3925pfc, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 7792, MsgBus: 20758 2025-06-24T21:27:08.593393Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630583105864228:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:08.593449Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00327d/r3tmp/tmpA5L1rc/pdisk_1.dat 2025-06-24T21:27:08.742667Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630583105864209:2079] 1750800428592772 != 1750800428592775 2025-06-24T21:27:08.746504Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:08.750116Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:08.750174Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:08.752633Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7792, node 2 2025-06-24T21:27:08.803743Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:08.803770Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:08.803784Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:08.803929Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20758 TClient is connected to server localhost:20758 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:09.289948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:09.298687Z node 2 :FLAT_TX_ ... first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:29.891251Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:32.992691Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519630684369554800:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:32.992776Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:33.012274Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:33.099741Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519630688664522199:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:33.099870Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:33.100379Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519630688664522204:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:33.104528Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:33.116374Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519630688664522206:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:27:33.177826Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519630688664522257:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:33.386688Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7519630688664522317:2320], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:63: Error: At function: KiWriteTable!
:1:45: Error: Failed to convert type: Struct<'Key':Int32,'Value':Null> to Struct<'Key':Uint64?,'Value':String>
:1:45: Error: Failed to convert 'Value': Null to String
:1:45: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:27:33.387406Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=6&id=NjdkOWRlMzktNDJhZjcyOTQtZTAzOGMyYzItYWZmMzA5ZGY=, ActorId: [6:7519630684369554782:2290], ActorState: ExecuteState, TraceId: 01jyhxdkqrd5dvfqb0fm74gh24, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 20312, MsgBus: 4121 2025-06-24T21:27:34.198014Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519630695111874942:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:34.198084Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00327d/r3tmp/tmpAakJlo/pdisk_1.dat 2025-06-24T21:27:34.347476Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:34.351541Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519630695111874907:2079] 1750800454197280 != 1750800454197283 2025-06-24T21:27:34.368306Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:34.368433Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:34.370526Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20312, node 7 2025-06-24T21:27:34.433622Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:34.433649Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:34.433663Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:34.433841Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4121 TClient is connected to server localhost:4121 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:35.162339Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:35.211774Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:38.281898Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630712291744728:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:38.282056Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:38.308158Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:38.400918Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630712291744829:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:38.401056Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:38.401362Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630712291744834:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:38.406284Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:38.423846Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630712291744836:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:27:38.496146Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630712291744887:2391] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:38.891350Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [7:7519630712291744958:2290], TxId: 281474976715663, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=ZDgxNzdjMDAtZDNjYjg1YWYtNzM0OGZjZTgtNWY1MjJjNg==. CustomerSuppliedId : . TraceId : 01jyhxds0t01epte5wqnjn2my2. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: BAD_REQUEST KIKIMR_BAD_COLUMN_TYPE: {
: Error: Tried to insert NULL value into NOT NULL column: Value, code: 2031 }. 2025-06-24T21:27:38.891956Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=7&id=ZDgxNzdjMDAtZDNjYjg1YWYtNzM0OGZjZTgtNWY1MjJjNg==, ActorId: [7:7519630712291744710:2290], ActorState: ExecuteState, TraceId: 01jyhxds0t01epte5wqnjn2my2, Create QueryResponse for error on request, msg: >> KqpQueryService::ExecuteQueryUpsertDoesntChangeIndexedValuesIfNotChanged >> KqpQueryService::StreamExecuteQueryPure >> KqpNewEngine::StaleRO_IndexFollowers-EnableFollowers [GOOD] >> KqpNewEngine::UnionAllPure >> KqpReturning::ReturningWorksIndexedDeleteV2-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedInsert+QueryService >> KqpQueryServiceScripts::ForgetScriptExecutionOnLongQuery >> KqpRanges::IsNotNullInJsonValue2 [GOOD] >> KqpRanges::DuplicateKeyPredicateParam >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon [GOOD] >> TColumnShardTestSchema::RebootHotTiers >> KqpQueryService::ExecuteQueryPg |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> KqpRanges::UpdateWhereInFullScan+UseSink [GOOD] >> KqpRanges::UpdateWhereInFullScan-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:26:48.764236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:26:48.764312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.764361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:26:48.764386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:26:48.765349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:26:48.765402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:26:48.765465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.765528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:26:48.766167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:26:48.769093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:26:48.857868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:48.857914Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:48.873230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:26:48.873624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:26:48.873825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:26:48.881107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:26:48.881319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:26:48.881968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:48.882270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:26:48.888276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.889714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:26:48.897379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:48.897457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.898270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:26:48.898305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:26:48.898370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:26:48.898434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:26:48.904077Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:26:49.044121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:26:49.044332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.044555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:26:49.044604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:26:49.044815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:26:49.044900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:26:49.046935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.047092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:26:49.047252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.047320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:26:49.047367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:26:49.047399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:26:49.049165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.049211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:26:49.049250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:26:49.050858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.050913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.050957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.051003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:26:49.054645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:26:49.056396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:26:49.056527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:26:49.057391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.057509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:26:49.057554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.057872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:26:49.057923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.058106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:26:49.058183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:26:49.060179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:49.060219Z node 1 :FLAT_TX_SCHEMESHARD ... 24T21:27:41.693877Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:27:41.694077Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 64424511596 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:27:41.694175Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_solomon.cpp:47: TDropSolomon TPropose operationId# 104:0 HandleReply TEvOperationPlan, step: 5000005, at schemeshard: 72057594046678944 2025-06-24T21:27:41.694272Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 3] name: Obj type: EPathTypeSolomonVolume state: EPathStateDrop stepDropped: 0 droppedTxId: 104 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:41.694327Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:27:41.694482Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:27:41.694587Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 128 -> 130 2025-06-24T21:27:41.694803Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:41.694902Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:27:41.695830Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:27:41.697416Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:27:41.699488Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:27:41.699535Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:27:41.699709Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-24T21:27:41.699891Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:27:41.699928Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:446:2404], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-24T21:27:41.699969Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:446:2404], at schemeshard: 72057594046678944, txId: 104, path id: 3 FAKE_COORDINATOR: Erasing txId 104 2025-06-24T21:27:41.700392Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:27:41.700452Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:417: [72057594046678944] TDeleteParts opId# 104:0 ProgressState 2025-06-24T21:27:41.700554Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:27:41.700620Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:27:41.700695Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 1/1 2025-06-24T21:27:41.700752Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:27:41.700822Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-24T21:27:41.700881Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-24T21:27:41.700947Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:27:41.701008Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:27:41.701187Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:27:41.701256Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-24T21:27:41.701316Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-24T21:27:41.701374Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-24T21:27:41.701934Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:27:41.702024Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:27:41.702067Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:27:41.702135Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-24T21:27:41.702210Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:27:41.702581Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:27:41.702657Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-24T21:27:41.702688Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-24T21:27:41.702716Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-24T21:27:41.702748Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:27:41.702820Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-24T21:27:41.706474Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:27:41.708528Z node 15 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-24T21:27:41.708866Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:27:41.709299Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409547 2025-06-24T21:27:41.710050Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:27:41.710130Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:27:41.710236Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:27:41.712385Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:27:41.712564Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-24T21:27:41.714702Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:27:41.714819Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:27:41.715024Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-24T21:27:41.715619Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-24T21:27:41.715692Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-24T21:27:41.716399Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-24T21:27:41.716550Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:27:41.716614Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [15:572:2511] TestWaitNotification: OK eventTxId 104 >> KqpQueryServiceScripts::ExecuteScriptWithWorkloadManager [GOOD] >> KqpQueryServiceScripts::ExplainScript >> KqpQueryService::TableSink_Olap_Replace [GOOD] >> KqpQueryService::TableSink_OlapUpsert >> KqpQueryService::TableSink_Htap-withOltpSink [GOOD] >> KqpQueryService::TableSink_DisableSink >> KqpNotNullColumns::AlterAddNotNullColumnPg [GOOD] >> KqpNotNullColumns::AlterDropNotNullColumn |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> KqpService::SessionBusy [GOOD] >> KqpService::SessionBusyRetryOperation >> KqpReturning::ReturningWorks-QueryService [GOOD] >> KqpReturning::ReturningColumnsOrder >> SystemView::ShowCreateTableColumnUpsertOptions [GOOD] >> SystemView::ShowCreateTableColumnUpsertIndex >> KqpQueryService::TableSink_OlapRWQueries [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink-UseDataQuery >> KqpNewEngine::ScalarMultiUsage [GOOD] >> KqpNewEngine::SequentialReadsPragma+Enabled |97.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpNewEngine::DqSourceLocksEffects [GOOD] >> KqpNewEngine::FullScanCount >> KqpQueryService::IssuesInCaseOfSuccess [GOOD] >> KqpQueryService::MaterializeTxResults |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OlapRWQueries [GOOD] Test command err: Trying to start YDB, gRPC: 11190, MsgBus: 5683 2025-06-24T21:27:06.988364Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630575641711870:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:06.988495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003104/r3tmp/tmpcDPTGY/pdisk_1.dat 2025-06-24T21:27:07.366040Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:07.367319Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630575641711851:2079] 1750800426987500 != 1750800426987503 TServer::EnableGrpc on GrpcPort 11190, node 1 2025-06-24T21:27:07.398657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:07.398792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:07.401379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:07.467817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:07.467843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:07.467856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:07.467986Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5683 TClient is connected to server localhost:5683 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:27:08.025780Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:08.098369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:10.180507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630592821581677:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:10.180644Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:10.471777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:27:10.619497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:10.619719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:10.620045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:10.620184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:10.620306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:10.620465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:10.620575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:10.620700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:10.620835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:10.620979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:10.621112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630592821581790:2298];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:10.629478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:10.629637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:10.630611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:10.630778Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:10.630915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:10.631075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:10.631224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:10.631379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:10.631498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:10.631628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:10.631782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519630592821581807:2303];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:10.659010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630592821581791:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:10.659091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630592821581791:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:10.659381Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630592821581791:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:10.659529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630592821581791:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;de ... 2025-06-24T21:27:43.707302Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519630731028968585:2297];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:43.707440Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519630731028968585:2297];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:43.707562Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519630731028968585:2297];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:43.707688Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519630731028968585:2297];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:43.707810Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519630731028968585:2297];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:43.707929Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519630731028968585:2297];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:43.708071Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519630731028968585:2297];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:43.712277Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:43.712342Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:43.712457Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:43.712491Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:43.712720Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:43.712753Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:43.712850Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:43.712896Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:43.712947Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:43.712977Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:43.713211Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:43.713242Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:43.713519Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:43.713554Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:43.713705Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:43.713739Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:43.713812Z node 3 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:43.713861Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:27:43.713898Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:27:43.714605Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:27:43.714649Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:27:43.720079Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519630731028968585:2297];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976710658;this=88923024282624;method=TTxController::StartProposeOnExecute;tx_info=281474976710658:TX_KIND_SCHEMA;min=1750800463719;max=18446744073709551615;plan=0;src=[3:7519630713849099001:2148];cookie=22:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:43.726612Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:43.726760Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:43.736208Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:43.736967Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:43.737634Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:43.744187Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:43.776264Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630731028968696:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.776362Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.776769Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630731028968701:2322], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.782260Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:43.794140Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630731028968703:2323], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:27:43.850143Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630731028968754:2430] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:44.619340Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630713849098715:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:44.619453Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:45.026651Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976710665;tx_id=281474976710665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710665; 2025-06-24T21:27:45.027873Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976710665;tx_id=281474976710665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710665; 2025-06-24T21:27:45.028537Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976710665;tx_id=281474976710665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710665; >> KqpNotNullColumns::UpdateTable_UniqIndexPg [GOOD] >> KqpNotNullColumns::UpdateTable_Immediate >> KqpQueryService::StreamExecuteQueryPure [GOOD] >> KqpQueryService::StreamExecuteQueryMultiResult >> KqpQueryServiceScripts::ExecuteScriptStatsBasic [GOOD] >> KqpQueryServiceScripts::ExecuteScriptStatsFull >> TColumnShardTestSchema::TTL+Reboot+Internal-FirstPkColumn >> KqpQueryService::ExecuteQueryPg [GOOD] >> KqpQueryService::ExecuteQueryPgTableSelect >> KqpQueryService::TableSink_OltpLiteralUpsert >> KqpQueryService::SessionFromPoolSuccess >> KqpQueryService::TableSink_OlapUpsert [GOOD] >> KqpQueryService::TableSink_OltpDelete >> KqpNewEngine::UnionAllPure [GOOD] >> KqpNewEngine::StreamLookupForDataQuery+StreamLookupJoin >> KqpQueryService::TableSink_OltpOrder [GOOD] >> KqpNotNullColumns::AlterDropNotNullColumn [GOOD] >> KqpNotNullColumns::AlterAddIndex >> KqpRanges::DuplicateKeyPredicateParam [GOOD] >> KqpRanges::DuplicateKeyPredicateMixed >> TColumnShardTestSchema::ExportAfterFail [GOOD] >> KqpQueryServiceScripts::ForgetScriptExecutionOnLongQuery [GOOD] >> KqpQueryServiceScripts::ForgetScriptExecutionRace ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OltpOrder [GOOD] Test command err: Trying to start YDB, gRPC: 64352, MsgBus: 1412 2025-06-24T21:27:18.266576Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630625573046972:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:18.266753Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030e0/r3tmp/tmpbiVrZq/pdisk_1.dat 2025-06-24T21:27:18.656828Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64352, node 1 2025-06-24T21:27:18.707942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:18.708110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:18.711919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:18.733444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:18.733468Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:18.733476Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:18.733588Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1412 TClient is connected to server localhost:1412 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:27:19.279192Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:19.301889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:21.236480Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630638457949469:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.236648Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.533380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:21.661752Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630638457949624:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.661872Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.662081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630638457949629:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.666180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:21.678317Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630638457949631:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:27:21.774592Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630638457949682:2428] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:23.267263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630625573046972:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:23.267364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 27605, MsgBus: 19469 2025-06-24T21:27:24.653490Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630650839961767:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:24.653565Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030e0/r3tmp/tmpoqrH5D/pdisk_1.dat 2025-06-24T21:27:24.784668Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:24.784761Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:24.787839Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:24.795662Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27605, node 2 2025-06-24T21:27:24.871865Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:24.871887Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:24.871895Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:24.872010Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19469 TClient is connected to server localhost:19469 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:25.365707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:25.374287Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:25.663304Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:27.614539Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630663724864242:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:27.614631Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:27.629696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:27.676948Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630663724864345:2301], DatabaseId: /R ... QtM2JlNWRlZjYtZjZlMzJmYjg=, ActorId: [3:7519630683688107555:2300], ActorState: ExecuteState, TraceId: 01jyhxe2a310w8esszz41b26tf, Create QueryResponse for error on request, msg: 2025-06-24T21:27:48.523522Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=97; 2025-06-24T21:27:48.523875Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [3:7519630752407587748:2300], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7519630683688107555:2300]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7519630752407587748:2300].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:27:48.523957Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519630752407587739:2300], SessionActorId: [3:7519630683688107555:2300], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7519630683688107555:2300]. isRollback=0 2025-06-24T21:27:48.524152Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, ActorId: [3:7519630683688107555:2300], ActorState: ExecuteState, TraceId: 01jyhxe2f3dawkxymv6precre4, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7519630752407587740:2300] from: [3:7519630752407587739:2300] 2025-06-24T21:27:48.524224Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519630752407587740:2300] TxId: 281474976715756. Ctx: { TraceId: 01jyhxe2f3dawkxymv6precre4, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:27:48.524383Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, ActorId: [3:7519630683688107555:2300], ActorState: ExecuteState, TraceId: 01jyhxe2f3dawkxymv6precre4, Create QueryResponse for error on request, msg: 2025-06-24T21:27:48.683396Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=98; 2025-06-24T21:27:48.683721Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [3:7519630752407587783:2300], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7519630683688107555:2300]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7519630752407587783:2300].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:27:48.683785Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519630752407587773:2300], SessionActorId: [3:7519630683688107555:2300], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7519630683688107555:2300]. isRollback=0 2025-06-24T21:27:48.683961Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, ActorId: [3:7519630683688107555:2300], ActorState: ExecuteState, TraceId: 01jyhxe2kxez6fm5wvh60ccjpe, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7519630752407587774:2300] from: [3:7519630752407587773:2300] 2025-06-24T21:27:48.684022Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519630752407587774:2300] TxId: 281474976715757. Ctx: { TraceId: 01jyhxe2kxez6fm5wvh60ccjpe, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:27:48.684163Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, ActorId: [3:7519630683688107555:2300], ActorState: ExecuteState, TraceId: 01jyhxe2kxez6fm5wvh60ccjpe, Create QueryResponse for error on request, msg: 2025-06-24T21:27:48.878068Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=99; 2025-06-24T21:27:48.878392Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [3:7519630752407587822:2326], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7519630683688107678:2326]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7519630752407587822:2326].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:27:48.878470Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519630752407587812:2326], SessionActorId: [3:7519630683688107678:2326], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7519630683688107678:2326]. isRollback=0 2025-06-24T21:27:48.878673Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=YzY2MjVmOTUtM2NhYTY3MmQtY2ZiMzFlZmQtOGNhYmQyNjg=, ActorId: [3:7519630683688107678:2326], ActorState: ExecuteState, TraceId: 01jyhxe2s1aq4r2jyb5nanfazg, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7519630752407587813:2326] from: [3:7519630752407587812:2326] 2025-06-24T21:27:48.878748Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519630752407587813:2326] TxId: 281474976715758. Ctx: { TraceId: 01jyhxe2s1aq4r2jyb5nanfazg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzY2MjVmOTUtM2NhYTY3MmQtY2ZiMzFlZmQtOGNhYmQyNjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:27:48.878924Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YzY2MjVmOTUtM2NhYTY3MmQtY2ZiMzFlZmQtOGNhYmQyNjg=, ActorId: [3:7519630683688107678:2326], ActorState: ExecuteState, TraceId: 01jyhxe2s1aq4r2jyb5nanfazg, Create QueryResponse for error on request, msg: 2025-06-24T21:27:49.041856Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=100; 2025-06-24T21:27:49.042042Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 100 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:27:49.042164Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 100 at tablet 72075186224037888 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:27:49.042500Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [3:7519630756702555150:2300], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7519630683688107555:2300]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7519630756702555150:2300].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:27:49.042568Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519630756702555142:2300], SessionActorId: [3:7519630683688107555:2300], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7519630683688107555:2300]. isRollback=0 2025-06-24T21:27:49.042742Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, ActorId: [3:7519630683688107555:2300], ActorState: ExecuteState, TraceId: 01jyhxe2z6a56r09h3kn6kgmwr, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7519630756702555143:2300] from: [3:7519630756702555142:2300] 2025-06-24T21:27:49.042809Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519630756702555143:2300] TxId: 281474976715759. Ctx: { TraceId: 01jyhxe2z6a56r09h3kn6kgmwr, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:27:49.042970Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, ActorId: [3:7519630683688107555:2300], ActorState: ExecuteState, TraceId: 01jyhxe2z6a56r09h3kn6kgmwr, Create QueryResponse for error on request, msg: 2025-06-24T21:27:49.181427Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=101; 2025-06-24T21:27:49.181697Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [3:7519630756702555184:2300], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7519630683688107555:2300]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7519630756702555184:2300].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:27:49.181764Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519630756702555176:2300], SessionActorId: [3:7519630683688107555:2300], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7519630683688107555:2300]. isRollback=0 2025-06-24T21:27:49.181917Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, ActorId: [3:7519630683688107555:2300], ActorState: ExecuteState, TraceId: 01jyhxe34222b558gzmgh6etgt, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7519630756702555177:2300] from: [3:7519630756702555176:2300] 2025-06-24T21:27:49.181982Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519630756702555177:2300] TxId: 281474976715760. Ctx: { TraceId: 01jyhxe34222b558gzmgh6etgt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:27:49.182118Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZWI4Y2YxNGEtYzM3MWE3MzQtM2JlNWRlZjYtZjZlMzJmYjg=, ActorId: [3:7519630683688107555:2300], ActorState: ExecuteState, TraceId: 01jyhxe34222b558gzmgh6etgt, Create QueryResponse for error on request, msg: >> KqpSqlIn::TupleSelect [GOOD] >> KqpSqlIn::SimpleKey_In_And_In ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ExportAfterFail [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801029.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801029.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799829.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; 2025-06-24T21:27:12.162129Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:27:12.193084Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:27:12.194223Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:27:12.207616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:12.207934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:12.208266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:12.208405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:12.208512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:12.208599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:12.208674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:12.208764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:12.208864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:12.208970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:12.209044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:12.247480Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:27:12.247829Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:27:12.247914Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:27:12.248125Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:12.251687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:12.251819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:12.251896Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:27:12.252022Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:27:12.252103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:12.252154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:12.252190Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:27:12.252373Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:12.252443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:12.252491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:12.252525Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:27:12.252636Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:27:12.252698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:12.252749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:12.252783Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:27:12.252853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:12.252898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:12.252930Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:27:12.253152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:12.253199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:12.253232Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:27:12.253478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:12.253565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:12.253613Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:27:12.253755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:12.253810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:12.253844Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:27:12.253948Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:12.254025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:27:12.254073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:27:12.254106Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:27:12.254637Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=65; 2025-06-24T21:27:12.255503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=795; 2025-06-24T21:27:12.255614Z node 1 :TX_COLUMNS ... ce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:80000;schema=timestamp: timestamp[us];);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:27:50.436985Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:27:50.437031Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:27:50.437207Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:27:50.437330Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:80000;schema=timestamp: timestamp[us];);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:27:50.437370Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:27:50.437473Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=80000; 2025-06-24T21:27:50.437523Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=640000;num_rows=80000;batch_columns=timestamp; 2025-06-24T21:27:50.437799Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:828:2796];bytes=640000;rows=80000;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us]; Got TEvKqpCompute::TEvScanData [1:829:2797]->[1:828:2796] 2025-06-24T21:27:50.437927Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:27:50.438039Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:27:50.438133Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:27:50.438271Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:27:50.438370Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:27:50.438454Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:27:50.438490Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:829:2797] finished for tablet 9437184 2025-06-24T21:27:50.438932Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:828:2796];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.009},{"events":["f_ack","l_task_result"],"t":0.604},{"events":["l_ProduceResults","f_Finish"],"t":0.606},{"events":["l_ack","l_processing","l_Finish"],"t":0.607}],"full":{"a":1750800469831508,"name":"_full_task","f":1750800469831508,"d_finished":0,"c":0,"l":1750800470438541,"d":607033},"events":[{"name":"bootstrap","f":1750800469831817,"d_finished":8765,"c":1,"l":1750800469840582,"d":8765},{"a":1750800470438253,"name":"ack","f":1750800470435931,"d_finished":2101,"c":2,"l":1750800470438157,"d":2389},{"a":1750800470438238,"name":"processing","f":1750800469840725,"d_finished":421459,"c":16,"l":1750800470438160,"d":421762},{"name":"ProduceResults","f":1750800469834047,"d_finished":5565,"c":20,"l":1750800470438474,"d":5565},{"a":1750800470438477,"name":"Finish","f":1750800470438477,"d_finished":0,"c":0,"l":1750800470438541,"d":64},{"name":"task_result","f":1750800469840749,"d_finished":418924,"c":14,"l":1750800470435713,"d":418924}],"id":"9437184::7"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:27:50.439004Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:828:2796];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:27:50.439434Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:828:2796];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.009},{"events":["f_ack","l_task_result"],"t":0.604},{"events":["l_ProduceResults","f_Finish"],"t":0.606},{"events":["l_ack","l_processing","l_Finish"],"t":0.607}],"full":{"a":1750800469831508,"name":"_full_task","f":1750800469831508,"d_finished":0,"c":0,"l":1750800470439044,"d":607536},"events":[{"name":"bootstrap","f":1750800469831817,"d_finished":8765,"c":1,"l":1750800469840582,"d":8765},{"a":1750800470438253,"name":"ack","f":1750800470435931,"d_finished":2101,"c":2,"l":1750800470438157,"d":2892},{"a":1750800470438238,"name":"processing","f":1750800469840725,"d_finished":421459,"c":16,"l":1750800470438160,"d":422265},{"name":"ProduceResults","f":1750800469834047,"d_finished":5565,"c":20,"l":1750800470438474,"d":5565},{"a":1750800470438477,"name":"Finish","f":1750800470438477,"d_finished":0,"c":0,"l":1750800470439044,"d":567},{"name":"task_result","f":1750800469840749,"d_finished":418924,"c":14,"l":1750800470435713,"d":418924}],"id":"9437184::7"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:829:2797]->[1:828:2796] 2025-06-24T21:27:50.439528Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:27:49.826862Z;index_granules=0;index_portions=2;index_batches=1038;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=9739224;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=9739224;selected_rows=0; 2025-06-24T21:27:50.439574Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:27:50.439852Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:829:2797];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 160000/9739224 160000/9739224 >> ColumnStatistics::CountMinSketchServerlessStatistics [GOOD] >> KqpQueryServiceScripts::ExplainScript [GOOD] >> KqpQueryServiceScripts::ForgetScriptExecution >> KqpQueryService::TableSink_DisableSink [GOOD] >> KqpService::SessionBusyRetryOperation [GOOD] >> KqpService::RangeCache-UseCache >> KqpQueryService::MaterializeTxResults [GOOD] >> KqpQueryService::MixedReadQueryWithoutStreamLookup |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_DisableSink [GOOD] Test command err: Trying to start YDB, gRPC: 10809, MsgBus: 4735 2025-06-24T21:27:15.319391Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630611278511695:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:15.319489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030e5/r3tmp/tmpEtDc0S/pdisk_1.dat 2025-06-24T21:27:15.659190Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630611278511671:2079] 1750800435318696 != 1750800435318699 2025-06-24T21:27:15.660227Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10809, node 1 2025-06-24T21:27:15.723726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:15.723861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:15.725481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:15.746194Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:15.746232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:15.746248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:15.746445Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4735 TClient is connected to server localhost:4735 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:16.303432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:16.333211Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:18.410483Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630624163414202:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:18.410631Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:18.720554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:27:18.884478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:18.884728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:18.885016Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:18.885274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:18.885402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:18.885550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:18.885665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:18.885833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:18.885979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:18.886102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:18.886253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519630624163414354:2299];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:18.906502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:18.906632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:18.906826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:18.906949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:18.907049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:18.907159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:18.907288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:18.907421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:18.907526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:18.907626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:18.907725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630624163414374:2305];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:18.924359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630624163414355:2300];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:18.924420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630624163414355:2300];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:18.924599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630624163414355:2300];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:18.924706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630624163414355:2300];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;de ... 0;self_id=[3:7519630756587559530:2309];ev=NActors::IEventHandle;tablet_id=72075186224037890;tx_id=281474976710658;this=88923062501504;method=TTxController::StartProposeOnExecute;tx_info=281474976710658:TX_KIND_SCHEMA;min=1750800469707;max=18446744073709551615;plan=0;src=[3:7519630735112722528:2149];cookie=32:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.708441Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[3:7519630756587559526:2305];ev=NActors::IEventHandle;tablet_id=72075186224037899;tx_id=281474976710658;this=88923059372448;method=TTxController::StartProposeOnExecute;tx_info=281474976710658:TX_KIND_SCHEMA;min=1750800469708;max=18446744073709551615;plan=0;src=[3:7519630735112722528:2149];cookie=122:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.715890Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.715944Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.724371Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.724380Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.725294Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.725294Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.733032Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.733033Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.733883Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.733882Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.741810Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.742473Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.742751Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.743259Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.750908Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.750908Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.751820Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.751823Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.757094Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.757962Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.758435Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.759501Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.765517Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.766370Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.766453Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.767193Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.774409Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.775340Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.775512Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.777705Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:49.783500Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.785069Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:49.785171Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630735112722220:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:49.785352Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:49.832650Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630756587560034:2398], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:49.832787Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:49.833967Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630756587560039:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:49.839953Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:49.853542Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630756587560041:2402], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:27:49.950992Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630756587560094:2695] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:50.074741Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2029: ActorId: [3:7519630760882527415:2397] TxId: 281474976710661. Ctx: { TraceId: 01jyhxe3t41gqzptjz8ffgggsm, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZGZjYWU1MDYtM2JhN2JjYzUtOTg5YTRkNGQtNzA4NmNmNjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Data manipulation queries do not support column shard tables. 2025-06-24T21:27:50.074992Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZGZjYWU1MDYtM2JhN2JjYzUtOTg5YTRkNGQtNzA4NmNmNjk=, ActorId: [3:7519630756587560032:2397], ActorState: ExecuteState, TraceId: 01jyhxe3t41gqzptjz8ffgggsm, Create QueryResponse for error on request, msg: >> KqpQueryService::ExecuteQueryPgTableSelect [GOOD] >> KqpQueryService::ExecuteQueryMultiScalar ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> ColumnStatistics::CountMinSketchServerlessStatistics [GOOD] Test command err: 2025-06-24T21:24:02.960099Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:574:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:02.960713Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:02.960804Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004871/r3tmp/tmp9hrJCt/pdisk_1.dat 2025-06-24T21:24:03.752933Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2370, node 1 2025-06-24T21:24:04.806859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:04.806910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:04.806950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:04.807345Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:04.817036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:04.965572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:04.965724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:04.992916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63401 2025-06-24T21:24:05.660844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:09.327740Z node 4 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 4 2025-06-24T21:24:09.390692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.390822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.423587Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-24T21:24:09.427973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:09.673018Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:09.698549Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.699258Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.699856Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.700025Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.700299Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.700400Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.700488Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.700567Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.700654Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.910314Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.910439Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.928796Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:10.123999Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:10.198519Z node 4 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:10.198641Z node 4 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:10.229257Z node 4 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:10.229408Z node 4 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:10.229731Z node 4 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:10.229797Z node 4 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:10.229863Z node 4 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:10.229923Z node 4 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:10.229995Z node 4 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:10.230065Z node 4 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:10.230558Z node 4 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:10.267201Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:10.267340Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [4:1961:2565], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:10.271569Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [4:1968:2571] 2025-06-24T21:24:10.282329Z node 4 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-24T21:24:10.287513Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [4:2015:2592] 2025-06-24T21:24:10.292159Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [4:2015:2592], schemeshard id = 72075186224037897 2025-06-24T21:24:10.319128Z node 4 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:10.319248Z node 4 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:10.319323Z node 4 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-24T21:24:10.338557Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:10.346933Z node 4 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:10.347092Z node 4 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:10.569843Z node 4 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:10.801424Z node 4 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:10.931896Z node 4 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:11.788653Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:11.819102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:15.363258Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-24T21:24:15.413572Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:15.413685Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:15.414081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:15.414162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:15.461443Z node 4 :HIVE WARN: hive_impl.cpp:781: HIVE#72075186224037888 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:24:15.464607Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:15.562794Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:15.684611Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:24:15.686099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:15.796593Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7890: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-24T21:24:15.796660Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7906: Handle TEvTxProxySchem ... Z node 4 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:27:45.752132Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:27:45.752217Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:27:45.752260Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-24T21:27:45.752311Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:27:45.757246Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:27:45.777061Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:27:45.777789Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:27:45.777874Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:27:45.779131Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:27:45.810362Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:27:45.810683Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 4, Round: 2, current Round: 0 2025-06-24T21:27:45.811625Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:12177:7523], server id = [4:12178:7524], tablet id = 72075186224037911, status = OK 2025-06-24T21:27:45.812058Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:12177:7523], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-24T21:27:45.816628Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037911 2025-06-24T21:27:45.816774Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 4 2025-06-24T21:27:45.816981Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:27:45.817192Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:27:45.817566Z node 4 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:27:45.820111Z node 4 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 4, client id = [4:12177:7523], server id = [4:12178:7524], tablet id = 72075186224037911 2025-06-24T21:27:45.820180Z node 4 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:45.820880Z node 4 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:27:45.868098Z node 4 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [4:12198:7543]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:45.868533Z node 4 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:27:45.868606Z node 4 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [4:12198:7543], StatRequests.size() = 1 2025-06-24T21:27:46.005880Z node 4 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=M2QyMWFhOGMtMzI5OTkxODYtMThjNGNiZWUtZGQ5NjdhYWE=, TxId: 2025-06-24T21:27:46.005955Z node 4 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=M2QyMWFhOGMtMzI5OTkxODYtMThjNGNiZWUtZGQ5NjdhYWE=, TxId: 2025-06-24T21:27:46.006712Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:27:46.022416Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:27:46.022479Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:27:46.807695Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-24T21:27:46.807795Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-24T21:27:49.459775Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:27:49.460133Z node 4 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 4 2025-06-24T21:27:49.482831Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:27:49.482912Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:27:49.482956Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037905, LocalPathId: 2] is column table. 2025-06-24T21:27:49.482994Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037905, LocalPathId: 2] 2025-06-24T21:27:49.488935Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-24T21:27:49.509181Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-24T21:27:49.509916Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-24T21:27:49.509994Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-24T21:27:49.513942Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:27:49.554519Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:27:49.554817Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 4, Round: 3, current Round: 0 2025-06-24T21:27:49.555729Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:12344:7615], server id = [4:12345:7616], tablet id = 72075186224037912, status = OK 2025-06-24T21:27:49.555834Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:12344:7615], path = { OwnerId: 72075186224037905 LocalId: 2 } 2025-06-24T21:27:49.560110Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037912 2025-06-24T21:27:49.560236Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 4 2025-06-24T21:27:49.560516Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:27:49.560708Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:27:49.561023Z node 4 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-24T21:27:49.563401Z node 4 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 4, client id = [4:12344:7615], server id = [4:12345:7616], tablet id = 72075186224037912 2025-06-24T21:27:49.563450Z node 4 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:27:49.564385Z node 4 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:27:49.598343Z node 4 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=ZjkxYThjMGItMzFiY2VhNDItOGI0MzEzYzctNTE4MjczZjM=, TxId: 2025-06-24T21:27:49.598424Z node 4 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=ZjkxYThjMGItMzFiY2VhNDItOGI0MzEzYzctNTE4MjczZjM=, TxId: 2025-06-24T21:27:49.599698Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:27:49.601385Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:12368:6170]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:49.601839Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:27:49.601909Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:27:49.606731Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:27:49.606816Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-24T21:27:49.606902Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:27:49.624845Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-24T21:27:49.626260Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:12368:6170]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:27:49.626645Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:27:49.626723Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:27:49.627123Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:27:49.627223Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-24T21:27:49.627282Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037905, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-24T21:27:49.632499Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 >> KqpQueryService::TableSink_OltpLiteralUpsert [GOOD] >> KqpQueryService::TableSink_OltpInsert >> KqpQueryService::TableSink_HtapInteractive-withOltpSink >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunningExtSubdomain [GOOD] >> KqpReturning::ReturningWorksIndexedInsert+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedInsert-QueryService >> KqpQueryService::StreamExecuteQueryMultiResult [GOOD] >> KqpQueryService::TableSink_BadTransactions >> KqpRanges::UpdateWhereInFullScan-UseSink [GOOD] >> KqpRanges::ScanKeyPrefix >> KqpReturning::ReturningColumnsOrder [GOOD] >> KqpReturning::ReturningTypes >> KqpNewEngine::SequentialReadsPragma+Enabled [GOOD] >> KqpNewEngine::SequentialReadsPragma-Enabled ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunningExtSubdomain [GOOD] Test command err: 2025-06-24T21:22:33.011826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:33.011907Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:33.192948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:35.106937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:35.107007Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:35.173034Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:36.572668Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:36.572736Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:36.618594Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:41.684151Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:41.684236Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:41.736972Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:46.750134Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:46.750212Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:46.796561Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:48.337769Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:48.337852Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:48.404753Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:49.123447Z node 6 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 7 2025-06-24T21:22:49.123601Z node 6 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037936131] NodeDisconnected NodeId# 7 2025-06-24T21:22:49.123757Z node 6 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 7 2025-06-24T21:22:49.124317Z node 7 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [7:351:2088] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-06-24T21:22:50.334758Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:50.334844Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:50.384643Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:51.156383Z node 8 :BS_CONTROLLER ERROR: {BSC26@console_interaction.cpp:112} failed to parse config obtained from Console ErrorReason# ydb/library/yaml_config/yaml_config_parser.cpp:1362: Condition violated: `config.HasDomainsConfig()' Yaml# --- metadata: kind: MainConfig cluster: "" version: 1 config: log_config: cluster_name: cluster1 allowed_labels: test: type: enum values: ? true selector_config: [] 2025-06-24T21:22:52.405180Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:22:52.405258Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:22:52.468969Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:22:53.661250Z node 10 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:22:53.661770Z node 10 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/002769/r3tmp/tmpgkB3ia/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:22:53.662411Z node 10 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002769/r3tmp/tmpgkB3ia/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/002769/r3tmp/tmpgkB3ia/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 3770737325196069202 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:22:53.834478Z node 9 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 9 Type# 268639257 2025-06-24T21:22:53.834652Z node 9 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 9 Type# 268639257 2025-06-24T21:22:53.846350Z node 10 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000000:_:0:0:0]: (2147483648) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/002769/r3tmp/tmpgkB3ia/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T21:22:53.860935Z node 10 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000001:_:0:0:0]: (2147483649) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyn ... 2025-06-24T21:26:40.856871Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-1) cannot create pool '/dc-1/users/tenant-1:hdd-1' (0): Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:26:40.857263Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-1 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:26:46.881761Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-1) cannot create pool '/dc-1/users/tenant-1:hdd-1' (0): Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:26:46.881934Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-1 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:26:52.952975Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-1) cannot create pool '/dc-1/users/tenant-1:hdd-1' (0): Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:26:52.953334Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-1 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:26:53.073841Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:26:53.074367Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:26:58.840947Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:26:58.841175Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:05.162164Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:05.162511Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:11.607177Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:11.607308Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:17.980466Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:17.980794Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:24.477080Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:24.477428Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:31.199600Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:31.199866Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:37.616259Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:37.616585Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:44.513668Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:44.513939Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:51.366256Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-24T21:27:51.366606Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> KqpQueryService::TableSink_OltpDelete [GOOD] >> KqpDocumentApi::RestrictWriteExplicitPrepare |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |97.8%| [TA] $(B)/ydb/core/cms/console/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.8%| [TA] {RESULT} $(B)/ydb/core/cms/console/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryServiceScripts::ExecuteScriptStatsFull [GOOD] >> KqpQueryServiceScripts::ExecuteScriptStatsNone >> KqpQueryService::ExecuteQueryUpsertDoesntChangeIndexedValuesIfNotChanged [GOOD] >> KqpQueryService::ExecuteQueryPure >> KqpNewEngine::FullScanCount [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OltpDelete [GOOD] Test command err: Trying to start YDB, gRPC: 5164, MsgBus: 2495 2025-06-24T21:27:38.312087Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630711534643767:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:38.312173Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030cb/r3tmp/tmp9FkyuD/pdisk_1.dat 2025-06-24T21:27:38.714280Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:38.714604Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630711534643579:2079] 1750800458290154 != 1750800458290157 TServer::EnableGrpc on GrpcPort 5164, node 1 2025-06-24T21:27:38.763249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:38.771588Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:38.780109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:38.811610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:38.811633Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:38.811641Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:38.811793Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2495 TClient is connected to server localhost:2495 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:39.297041Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:39.442740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:41.509342Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630724419546110:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:41.509475Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:41.789399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:27:41.969286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:41.969285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:41.969560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:41.969594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:41.969875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:41.970015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:41.970139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:41.970269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:41.970323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:41.970423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:41.970479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:41.970541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:41.970586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:41.970675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:41.970721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:41.970810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:41.970863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:41.970952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519630724419546248:2298];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:41.970987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:41.972845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:41.973103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:41.973249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630724419546243:2295];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:42.009614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630724419546353:2304];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:42.009686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630724419546353:2304];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:42.009916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630724419546353:2304];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:42.010035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630724419546353:2304];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;desc ... ode 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T21:27:47.749963Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:47.750500Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:47.755943Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T21:27:47.755964Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-24T21:27:47.771399Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630750423326517:2360], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:47.771490Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:47.771863Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630750423326522:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:47.775051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:47.786612Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630750423326524:2364], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:27:47.854184Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630750423326575:2568] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:47.984874Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:27:47.985655Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-24T21:27:48.590984Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:27:48.591396Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:27:48.633683Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519630754718294223:2513], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Missing key column in input: Col1 for table: /Root/DataShard, code: 2029 2025-06-24T21:27:48.634778Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YzkyMzBiNjktOGRmMTQ1MDEtNjY4YTQ5OTgtMzE0YzQ4OGM=, ActorId: [2:7519630754718294221:2512], ActorState: ExecuteState, TraceId: 01jyhxe2kxc7jm4qmexw34a2pk, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: Trying to start YDB, gRPC: 23061, MsgBus: 31385 2025-06-24T21:27:49.743995Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630758987442676:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:49.744046Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030cb/r3tmp/tmpG2kgQU/pdisk_1.dat 2025-06-24T21:27:49.909713Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:49.909801Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:49.912508Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:49.928370Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:49.928749Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630758987442647:2079] 1750800469742292 != 1750800469742295 TServer::EnableGrpc on GrpcPort 23061, node 3 2025-06-24T21:27:49.995705Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:49.995730Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:49.995741Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:49.995864Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31385 TClient is connected to server localhost:31385 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:50.621422Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:50.628481Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:50.754644Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:53.409758Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630776167312466:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:53.409871Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:53.426070Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:53.516001Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630776167312569:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:53.516079Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:53.516130Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630776167312574:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:53.519849Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:53.532288Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630776167312576:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:27:53.626366Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630776167312627:2389] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryService::SessionFromPoolSuccess [GOOD] >> KqpQueryService::SeveralCTAS+UseSink >> KqpQueryService::ShowCreateTable >> KqpNotNullColumns::UpdateTable_Immediate [GOOD] >> KqpService::SwitchCache-UseCache >> TColumnShardTestSchema::ColdCompactionSmoke >> KqpQueryService::DdlSecret [GOOD] >> KqpQueryService::DdlMixedDml ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::FullScanCount [GOOD] Test command err: Trying to start YDB, gRPC: 21793, MsgBus: 64122 2025-06-24T21:27:03.082594Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630562482298114:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:03.082831Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032d7/r3tmp/tmp2b8C7L/pdisk_1.dat 2025-06-24T21:27:03.391944Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:03.392394Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630562482298085:2079] 1750800423081810 != 1750800423081813 TServer::EnableGrpc on GrpcPort 21793, node 1 2025-06-24T21:27:03.450774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:03.452694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:03.455097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:03.476920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:03.476945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:03.476962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:03.477097Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64122 TClient is connected to server localhost:64122 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:04.045672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:04.063064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.096374Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:27:04.208659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:04.339431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.424573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:05.939909Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630571072234319:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:05.940040Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.274266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.304357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.331179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.362197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.392901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.434267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.463453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.514449Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630575367202269:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.514542Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.514588Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630575367202274:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.517731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:06.527159Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630575367202276:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:06.618224Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630575367202327:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:08.082424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630562482298114:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:08.082490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 7317, MsgBus: 9927 2025-06-24T21:27:08.814230Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630581077200913:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:08.814303Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:47.063364Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519630743421507365:2079] 1750800466890903 != 1750800466890906 2025-06-24T21:27:47.074776Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:47.074895Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:47.077817Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16496, node 7 2025-06-24T21:27:47.126419Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:47.126448Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:47.126459Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:47.126636Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15710 TClient is connected to server localhost:15710 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:47.803775Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:47.813432Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:47.819997Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:47.903275Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:47.921505Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:27:48.220496Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:48.331836Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:51.244738Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630764896345486:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:51.244885Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:51.322507Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:51.397097Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:51.440736Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:51.484508Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:51.526906Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:51.573221Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:51.631963Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:51.748926Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630764896346145:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:51.749031Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:51.749390Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630764896346150:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:51.754750Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:51.768759Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630764896346152:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:51.867919Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630764896346203:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:51.892828Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519630743421507401:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:51.892898Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Warning: Type annotation, code: 1030
:3:17: Warning: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject
:3:33: Warning: At function: Filter, At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108
: Warning: Type annotation, code: 1030
:3:17: Warning: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject
:3:33: Warning: At function: Filter, At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 >> TColumnShardTestSchema::RebootHotTiersTtl [GOOD] >> KqpQueryService::TableSink_OltpInsert [GOOD] >> KqpQueryService::TableSink_OltpInteractive >> KqpNotNullColumns::AlterAddIndex [GOOD] >> KqpQueryService::StreamExecuteQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::UpdateTable_Immediate [GOOD] Test command err: Trying to start YDB, gRPC: 30451, MsgBus: 20173 2025-06-24T21:27:03.235669Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630560384698559:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:03.235771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003297/r3tmp/tmpRAIHMS/pdisk_1.dat 2025-06-24T21:27:03.632914Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:03.633626Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630560384698537:2079] 1750800423234789 != 1750800423234792 TServer::EnableGrpc on GrpcPort 30451, node 1 2025-06-24T21:27:03.648491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:03.648610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:03.655963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:03.696283Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:03.696311Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:03.696322Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:03.696461Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20173 TClient is connected to server localhost:20173 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:04.205344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:04.215934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:04.244240Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:06.053143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630573269601065:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.053253Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.343258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.466737Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630573269601169:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.466837Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.466911Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630573269601174:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.470347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:06.480123Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630573269601176:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:27:06.583972Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630573269601227:2391] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:06.710240Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630573269601269:2315], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing key column in input: Key for table: /Root/TestUpsertNotNullPk, code: 2029 2025-06-24T21:27:06.710453Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MTQ2OWM2NzQtNzE0YWU2ODctMzNkMGUxZTQtN2VmZTZhYTU=, ActorId: [1:7519630573269601062:2288], ActorState: ExecuteState, TraceId: 01jyhxcsp12q4yakx04yrqbd7d, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-24T21:27:06.731094Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630573269601278:2319], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:47: Error: Failed to convert type: Struct<'Key':Null,'Value':String> to Struct<'Key':Uint64,'Value':String?>
:1:47: Error: Failed to convert 'Key': Null to Uint64
:1:47: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-24T21:27:06.731320Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MTQ2OWM2NzQtNzE0YWU2ODctMzNkMGUxZTQtN2VmZTZhYTU=, ActorId: [1:7519630573269601062:2288], ActorState: ExecuteState, TraceId: 01jyhxcspw0xgd8pqp0t1agzta, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 65485, MsgBus: 27096 2025-06-24T21:27:07.369851Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630579559376848:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:07.369924Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003297/r3tmp/tmprrcJ0S/pdisk_1.dat 2025-06-24T21:27:07.494482Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65485, node 2 2025-06-24T21:27:07.552274Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:07.552441Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:07.555115Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:07.580851Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:07.580873Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:07.580881Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:07.580995Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27096 TClient is connected to server localhost:27096 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:08.077830Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operatio ... ain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:32.523922Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:35.728951Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519630699185692627:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.728954Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519630699185692632:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.729048Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:35.734514Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:35.752169Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519630699185692641:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:27:35.823471Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519630699185692692:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:35.886250Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:36.514051Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519630682005822844:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:36.514148Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:44.913475Z node 6 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhxdx477pz6ktfcdnnk5mys, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=YTlhMDY4ZDEtNDk0NDMxMS1lZjk3MTdhNi01NGVmODM3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:27:44.913734Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=6&id=YTlhMDY4ZDEtNDk0NDMxMS1lZjk3MTdhNi01NGVmODM3OQ==, ActorId: [6:7519630729250464588:2490], ActorState: ExecuteState, TraceId: 01jyhxdx477pz6ktfcdnnk5mys, Create QueryResponse for error on request, msg: 2025-06-24T21:27:46.673490Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:27:46.673528Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:46.895161Z node 6 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyhxdz0w1t0vn4n9bmjw7a03, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=MTMzOTE4NTMtYjVlYWM2NDctNjhhYjNhMzQtYjZkMDA4ZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-24T21:27:46.895487Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=6&id=MTMzOTE4NTMtYjVlYWM2NDctNjhhYjNhMzQtYjZkMDA4ZDM=, ActorId: [6:7519630737840399252:2515], ActorState: ExecuteState, TraceId: 01jyhxdz0w1t0vn4n9bmjw7a03, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 14447, MsgBus: 20724 2025-06-24T21:27:47.843644Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519630751241093415:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:47.843734Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003297/r3tmp/tmpFEKgTC/pdisk_1.dat 2025-06-24T21:27:48.067070Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:48.083204Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:48.083334Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:48.089118Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14447, node 7 2025-06-24T21:27:48.167945Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:48.167986Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:48.167999Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:48.168180Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20724 2025-06-24T21:27:48.891321Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20724 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:48.959590Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:52.664635Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630772715930495:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.664755Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.704688Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:52.794027Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630772715930645:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.794155Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.794465Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630772715930650:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.799498Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:52.811542Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630772715930652:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:27:52.846254Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519630751241093415:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:52.846360Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:52.871011Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630772715930703:2430] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpNewEngine::StreamLookupForDataQuery+StreamLookupJoin [GOOD] >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootHotTiersTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801036.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801036.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150801036.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801036.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801036.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130801036.000000s;Name=;Codec=}; WaitEmptyAfter=1;Tiers={{Column=timestamp;EvictAfter=150801036.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801036.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130799836.000000s;Name=;Codec=}; 2025-06-24T21:27:16.587713Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:27:16.614827Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:27:16.615219Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:27:16.622945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:16.623227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:16.623512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:16.623643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:16.623757Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:16.623887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:16.623998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:16.624111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:16.624228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:16.624348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:16.624458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:16.655594Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:27:16.655892Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:27:16.655976Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:27:16.656179Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:16.656372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:16.656446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:16.656493Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:27:16.656592Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:27:16.656662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:16.656710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:16.656745Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:27:16.656922Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:16.656994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:16.657041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:16.657076Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:27:16.657187Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:27:16.657248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:16.657299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:16.657335Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:27:16.657403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:16.657447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:16.657481Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:27:16.657689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:16.657736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:16.657772Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:27:16.657989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:16.658049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:16.658083Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:27:16.658242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:16.658304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:16.658355Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:27:16.658449Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:16.658524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:27:16.658574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract. ... :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=12; 2025-06-24T21:27:57.484514Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=160; 2025-06-24T21:27:57.484555Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=9537; 2025-06-24T21:27:57.484602Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=9659; 2025-06-24T21:27:57.484668Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=13; 2025-06-24T21:27:57.484840Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=126; 2025-06-24T21:27:57.484884Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=10425; 2025-06-24T21:27:57.485025Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=90; 2025-06-24T21:27:57.485138Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=66; 2025-06-24T21:27:57.485317Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=125; 2025-06-24T21:27:57.485441Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=74; 2025-06-24T21:27:57.489907Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=4402; 2025-06-24T21:27:57.495264Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=5240; 2025-06-24T21:27:57.495398Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=16; 2025-06-24T21:27:57.495468Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=14; 2025-06-24T21:27:57.495524Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=9; 2025-06-24T21:27:57.495649Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=53; 2025-06-24T21:27:57.495709Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-24T21:27:57.495811Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=64; 2025-06-24T21:27:57.495858Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-24T21:27:57.495929Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=39; 2025-06-24T21:27:57.496024Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=55; 2025-06-24T21:27:57.496333Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=270; 2025-06-24T21:27:57.496379Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=30318; 2025-06-24T21:27:57.496546Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=24365192;raw_bytes=35131129;count=5;records=400000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:27:57.496667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:27:57.496720Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:27:57.496789Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:27:57.516735Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T21:27:57.516888Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:27:57.516988Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-24T21:27:57.517064Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800174302;tx_id=18446744073709551615;;current_snapshot_ts=1750800437893; 2025-06-24T21:27:57.517111Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:27:57.517164Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:27:57.517209Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:27:57.517300Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:27:57.518199Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;self_id=[1:1851:3703];tablet_id=9437184;parent=[1:1810:3670];fline=manager.cpp:88;event=ask_data;request=request_id=71;9438184000001={portions_count=5};; 2025-06-24T21:27:57.518980Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:27:57.519434Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:27:57.519468Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:27:57.519492Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:27:57.519536Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:27:57.519614Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-24T21:27:57.519676Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800174302;tx_id=18446744073709551615;;current_snapshot_ts=1750800437893; 2025-06-24T21:27:57.519722Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:27:57.519772Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:27:57.519811Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:27:57.519900Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1810:3670];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 80000/4886744 0/0 >> KqpRanges::DuplicateKeyPredicateMixed [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithUnspecifiedMode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::AlterAddIndex [GOOD] Test command err: Trying to start YDB, gRPC: 3505, MsgBus: 63411 2025-06-24T21:27:12.732261Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630598188596151:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:12.732777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003246/r3tmp/tmpevSZ3E/pdisk_1.dat 2025-06-24T21:27:13.047023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:13.047180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:13.054706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:13.090627Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:13.091428Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630598188596053:2079] 1750800432720846 != 1750800432720849 TServer::EnableGrpc on GrpcPort 3505, node 1 2025-06-24T21:27:13.148767Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:13.148817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:13.148832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:13.148977Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63411 TClient is connected to server localhost:63411 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:27:13.732673Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:13.794533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:13.809452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:13.822864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:13.983030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:14.170211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:14.233320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:15.930735Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630611073499580:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:15.930879Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.286733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.320873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.355770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.391333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.421109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.456311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.488230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.585700Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630615368467533:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.585797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.585905Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630615368467538:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.589856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:16.602004Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630615368467540:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:16.684204Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630615368467591:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:17.727774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630598188596151:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:17.727844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8633, MsgBus: 26878 2025-06-24T21:27:19.079766Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630631371244132:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:19.080023Z node 2 :METADATA_PROVIDER ERROR: log.cpp:7 ... 6-24T21:27:50.592435Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:50.598344Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:50.599585Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18683, node 7 2025-06-24T21:27:50.701626Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:50.701663Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:50.701676Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:50.701831Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11016 TClient is connected to server localhost:11016 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:51.336841Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:51.343666Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:51.357050Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:51.412294Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:51.496653Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:51.706043Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:51.801108Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:54.765773Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630778601036767:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:54.765907Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:54.894026Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:54.974216Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.021064Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.102105Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.146095Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.204845Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.258930Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.351287Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630782896004725:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:55.351489Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:55.351787Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630782896004730:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:55.358652Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:55.378329Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630782896004732:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:55.388299Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519630761421165982:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:55.388417Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:55.465995Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630782896004785:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:57.001387Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.100311Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.149169Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) >> KqpQueryService::ExecuteQueryMultiScalar [GOOD] >> KqpQueryService::TableSink_HtapInteractive-withOltpSink [GOOD] >> KqpQueryService::TableSink_OlapInsert >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink >> KqpQueryServiceScripts::ForgetScriptExecutionRace [GOOD] >> KqpQueryServiceScripts::InvalidFetchToken ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpRanges::DuplicateKeyPredicateMixed [GOOD] Test command err: Trying to start YDB, gRPC: 29397, MsgBus: 8741 2025-06-24T21:27:03.314543Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630559613515243:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:03.314641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003289/r3tmp/tmpRK5J2V/pdisk_1.dat 2025-06-24T21:27:03.725603Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:03.726032Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630559613515205:2079] 1750800423313073 != 1750800423313076 2025-06-24T21:27:03.743992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:03.744121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:03.745735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29397, node 1 2025-06-24T21:27:03.789978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:03.790000Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:03.790012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:03.790164Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8741 TClient is connected to server localhost:8741 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:04.260236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:04.281093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.347120Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:04.397587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.574700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.662020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:06.489501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630572498418731:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.489611Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.860305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.883593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.905771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.931443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.958782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.987672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:07.014907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:07.068736Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630576793386682:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.068833Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.068850Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630576793386687:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.072107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:07.081996Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630576793386689:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:07.171122Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630576793386740:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:08.265661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.314626Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630559613515243:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:08.314705Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:08.521408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part ... ], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:47.654474Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519630749535131335:3429] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 31674, MsgBus: 21696 2025-06-24T21:27:50.792735Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519630764254717332:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:50.792804Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003289/r3tmp/tmpMJhc7C/pdisk_1.dat 2025-06-24T21:27:50.936063Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:50.937238Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519630764254717308:2079] 1750800470790929 != 1750800470790932 2025-06-24T21:27:50.957722Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:50.957834Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 31674, node 7 2025-06-24T21:27:50.964139Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:51.026095Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:51.026120Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:51.026131Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:51.026281Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21696 TClient is connected to server localhost:21696 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:51.718577Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:51.739243Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:51.859543Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:27:51.896381Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:52.089388Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:52.176854Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:55.213842Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630785729555422:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:55.213960Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:55.262682Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.303795Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.383385Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.439234Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.483250Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.530502Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.578111Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.659539Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630785729556082:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:55.659735Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:55.660257Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630785729556087:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:55.665365Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:55.680092Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630785729556089:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:55.781759Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630785729556140:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:55.793480Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519630764254717332:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:55.793577Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::TableSink_BadTransactions [GOOD] >> TAsyncIndexTests::DropTableWithInflightChanges[PipeResets] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteQueryMultiScalar [GOOD] Test command err: Trying to start YDB, gRPC: 16627, MsgBus: 4654 2025-06-24T21:27:42.914941Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630726378128993:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:42.915025Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ba/r3tmp/tmpt3nSY9/pdisk_1.dat 2025-06-24T21:27:43.328603Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630726378128972:2079] 1750800462905852 != 1750800462905855 2025-06-24T21:27:43.337945Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:43.377242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:43.377378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 16627, node 1 2025-06-24T21:27:43.384976Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:43.431271Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:43.431295Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:43.431316Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:43.431444Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4654 TClient is connected to server localhost:4654 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:43.945198Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:44.090956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:44.112084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:44.242645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:44.403618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:44.483897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:46.183103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630743557999794:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:46.183261Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:46.435342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.466613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.500028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.544164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.580251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.621323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.695532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.796602Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630743558000461:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:46.796724Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:46.797419Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630743558000466:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:46.801430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:46.816474Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630743558000468:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:46.913957Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630743558000519:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:47.915584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630726378128993:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:47.915668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 16667, MsgBus: 29617 2025-06-24T21:27:48.965162Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630753887387520:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:48.965227Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... 306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:27:52.461846Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630771067257483:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 9951, MsgBus: 22236 2025-06-24T21:27:53.389391Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630773573989878:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:53.389469Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ba/r3tmp/tmptuz1GO/pdisk_1.dat 2025-06-24T21:27:53.507200Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:53.507201Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630773573989859:2079] 1750800473388419 != 1750800473388422 TServer::EnableGrpc on GrpcPort 9951, node 3 2025-06-24T21:27:53.536429Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:53.536526Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:53.547382Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:53.605055Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:53.605078Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:53.605087Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:53.605218Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22236 TClient is connected to server localhost:22236 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:54.152126Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:54.167061Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:54.234187Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:54.402691Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:54.483607Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:54.574125Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:56.908054Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630786458893378:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.908110Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.973211Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.011401Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.047850Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.084304Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.126034Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.169018Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.213401Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.282648Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630790753861331:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:57.282750Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630790753861336:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:57.282802Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:57.286809Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:57.299050Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630790753861338:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:57.367808Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630790753861389:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:58.391252Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630773573989878:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:58.391325Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpDocumentApi::RestrictWriteExplicitPrepare [GOOD] >> KqpDocumentApi::Scripting >> TColumnShardTestSchema::RebootForgetAfterFail >> KqpQueryService::SeveralCTAS+UseSink [GOOD] >> KqpQueryService::SeveralCTAS-UseSink >> KqpQueryService::ExecuteQueryPure [GOOD] >> KqpQueryService::ExecuteQueryScalar ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_BadTransactions [GOOD] Test command err: Trying to start YDB, gRPC: 14295, MsgBus: 20963 2025-06-24T21:27:41.787695Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630723404845911:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:41.787767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030be/r3tmp/tmpPHW7SJ/pdisk_1.dat 2025-06-24T21:27:42.151334Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630723404845888:2079] 1750800461786389 != 1750800461786392 2025-06-24T21:27:42.160410Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14295, node 1 2025-06-24T21:27:42.204632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:42.205006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:42.207798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:42.267806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:42.267834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:42.267846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:42.267993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20963 TClient is connected to server localhost:20963 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:42.838639Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:42.946188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:42.972215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:42.983562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:43.183216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:43.367438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:43.455838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:45.195701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630740584716707:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.195831Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.474212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.516697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.547636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.582273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.617078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.690454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.760786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.851554Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630740584717377:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.851638Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.851639Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630740584717382:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.856378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:45.867853Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630740584717384:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:45.955996Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630740584717435:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:46.791273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630723404845911:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:46.791366Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 10683, MsgBus: 16172 2025-06-24T21:27:47.820797Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630747693540552:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:47.820870Z node 2 :METADATA_PROVIDER ERROR: log.cp ... 4T21:27:58.115234Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:58.123918Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:58.124074Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:58.124801Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:58.124981Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:58.131327Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:58.131348Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:58.132126Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:58.132148Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:58.137898Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:58.138035Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:58.138653Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:58.138889Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:27:58.144757Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:58.144878Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:27:58.153593Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:58.220196Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630797194612638:2444], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:58.220307Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:58.220547Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630797194612643:2447], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:58.224337Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:58.236985Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630797194612645:2448], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T21:27:58.342138Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630797194612696:2664] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:58.532101Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976710663;tx_id=281474976710663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710663; 2025-06-24T21:27:58.532124Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976710663;tx_id=281474976710663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710663; 2025-06-24T21:27:58.532664Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976710663;tx_id=281474976710663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710663; 2025-06-24T21:27:58.533560Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976710663;tx_id=281474976710663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710663; 2025-06-24T21:27:58.877461Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630775719774859:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:58.877545Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:58.923558Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=Mjg1YWE1Zi0yYzc4MGFiMy1mYWRkMTYyZC1jZGQyMWJkNA==, ActorId: [3:7519630797194612823:2470], ActorState: ExecuteState, TraceId: 01jyhxece55c0mvn1561rr5hp7, Create QueryResponse for error on request, msg: Write transactions between column and row tables are disabled at current time. 2025-06-24T21:27:59.069015Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=MjhkZDJiYi02YmM4NDdhMC1jNGQ5ZGRkYi1mZmM2MjRjYQ==, ActorId: [3:7519630797194612841:2477], ActorState: ExecuteState, TraceId: 01jyhxecpwa5jy0nrnh8vsjba7, Create QueryResponse for error on request, msg: Write transactions between column and row tables are disabled at current time. 2025-06-24T21:27:59.227523Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ODgzZDdiZGUtZjhiNDcxMzUtMjhiMzc5YzctMzQ3ZDA2ZWQ=, ActorId: [3:7519630801489580158:2485], ActorState: ExecuteState, TraceId: 01jyhxecvb1747v7gh2atbtht0, Create QueryResponse for error on request, msg: Write transactions between column and row tables are disabled at current time. 2025-06-24T21:27:59.619076Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=MjZlYWQyNTctYzQ1YmUzZjMtOTUwNTlmOTAtYmY3YjIzMTU=, ActorId: [3:7519630801489580179:2494], ActorState: ExecuteState, TraceId: 01jyhxed05742zatych2p8xbq1, Create QueryResponse for error on request, msg: Write transactions between column and row tables are disabled at current time. 2025-06-24T21:27:59.806979Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NzhmMGRkZjMtNzgwYzQwNi1hNzIxNWNmNS01ZTA0YTc5Yg==, ActorId: [3:7519630801489580198:2502], ActorState: ExecuteState, TraceId: 01jyhxedce78q4w4wkywt86v98, Create QueryResponse for error on request, msg: Write transactions between column and row tables are disabled at current time. 2025-06-24T21:27:59.967669Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976710671;tx_id=281474976710671;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710671; 2025-06-24T21:27:59.968278Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976710671;tx_id=281474976710671;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710671; 2025-06-24T21:27:59.968629Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519630792899644835:2300];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=21;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037889,72075186224037894;receive=72075186224037893; 2025-06-24T21:27:59.968693Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519630792899644835:2300];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=22;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037889,72075186224037894;receive=72075186224037893; 2025-06-24T21:27:59.968833Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519630792899644835:2300];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=24;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894;receive=72075186224037889; 2025-06-24T21:27:59.968889Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519630792899644835:2300];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=25;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037894;receive=72075186224037889; 2025-06-24T21:27:59.969856Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976710671;tx_id=281474976710671;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710671; 2025-06-24T21:27:59.970600Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976710671;tx_id=281474976710671;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710671; >> KqpQueryService::MixedReadQueryWithoutStreamLookup [GOOD] >> KqpRanges::ScanKeyPrefix [GOOD] >> KqpQueryServiceScripts::TestPaging >> KqpQueryServiceScripts::ExecuteScriptWithParameters >> KqpQueryService::ShowCreateTable [GOOD] >> KqpQueryService::ShowCreateTableDisable >> KqpNewEngine::SequentialReadsPragma-Enabled [GOOD] >> KqpQueryServiceScripts::ExecuteScriptStatsNone [GOOD] >> KqpReturning::ReturningTypes [GOOD] >> KqpSqlIn::SimpleKey_In_And_In [GOOD] >> KqpSqlIn::TupleNotOnlyOfKeys ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpRanges::ScanKeyPrefix [GOOD] Test command err: Trying to start YDB, gRPC: 8737, MsgBus: 2261 2025-06-24T21:27:03.115317Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630560408798957:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:03.115396Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032b0/r3tmp/tmpKoLTbi/pdisk_1.dat 2025-06-24T21:27:03.455258Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:03.456855Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630560408798937:2079] 1750800423114388 != 1750800423114391 TServer::EnableGrpc on GrpcPort 8737, node 1 2025-06-24T21:27:03.477519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:03.477831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:03.486499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:03.522467Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:03.522527Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:03.522560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:03.522723Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2261 TClient is connected to server localhost:2261 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:04.048090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:04.077291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.124085Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:04.234254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.389482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.466949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:06.003057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630568998735180:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.003246Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.376847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.405525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.435009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.464576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.491642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.526177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.554161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.633451Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630573293703136:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.633512Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.633512Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630573293703141:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.637134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:06.646144Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630573293703143:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:06.722032Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630573293703194:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:08.115302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630560408798957:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:08.115367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 20193, MsgBus: 31179 2025-06-24T21:27:09.209304Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630588024381369:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:09.209359Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # ... 055919Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Warning: Type annotation, code: 1030
:1:44: Warning: At lambda, At function: Coalesce
:1:58: Warning: At function: SqlIn
:1:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 Trying to start YDB, gRPC: 8498, MsgBus: 1666 2025-06-24T21:27:53.975486Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519630775973134260:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:53.975577Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032b0/r3tmp/tmpGt82Y7/pdisk_1.dat 2025-06-24T21:27:54.138464Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:54.140319Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:54.140431Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:54.146370Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8498, node 7 2025-06-24T21:27:54.243759Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:54.243781Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:54.243791Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:54.243934Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1666 TClient is connected to server localhost:1666 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:54.994340Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:55.005947Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:55.029689Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:27:55.121233Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.362125Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:55.476555Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:58.976051Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519630775973134260:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:58.976151Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:59.000405Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630797447972358:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.000501Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.102133Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.148129Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.202641Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.256060Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.305053Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.396535Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.478859Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.582785Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630801742940323:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.582912Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.582996Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630801742940328:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.587997Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:59.604875Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630801742940330:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:59.662324Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630801742940381:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::MixedReadQueryWithoutStreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 26413, MsgBus: 13639 2025-06-24T21:27:38.705371Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630712684652676:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:38.705412Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030c9/r3tmp/tmps4y4Uq/pdisk_1.dat 2025-06-24T21:27:39.070632Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26413, node 1 2025-06-24T21:27:39.152723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:39.152831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:39.155418Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:39.175260Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:39.175298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:39.175306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:39.175463Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13639 TClient is connected to server localhost:13639 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:27:39.716504Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:39.789898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:39.814471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:39.833981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:40.017010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:40.186998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:40.260335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:42.164854Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630729864523453:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:42.165084Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:42.559198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:42.597242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:42.637503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:42.668576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:42.707660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:42.751302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:42.793661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:42.855194Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630729864524112:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:42.855295Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:42.855550Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630729864524117:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:42.860649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:42.875807Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630729864524119:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:42.961160Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630729864524170:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:43.723879Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630712684652676:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:43.723990Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:44.081124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:44.133401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok ... T21:27:59.569705Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037950;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.570418Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037946;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.571766Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037916;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.572340Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.576773Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037946;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.577482Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037917;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.577772Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.578318Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.583516Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037917;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.583516Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.584224Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037932;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.584229Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.589873Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.589873Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037932;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.590555Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.590566Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.594963Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.597063Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.599513Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.600279Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.602906Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.603614Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.605540Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.606128Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.609365Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.610054Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.610065Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.610719Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.614748Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.615390Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.616489Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.617115Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.621026Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.621698Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.622683Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.623356Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.627356Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.627447Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.628492Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.628589Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-24T21:27:59.634203Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.634203Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037921;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-24T21:27:59.969124Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:27:59.969302Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:27:59.969727Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:27:59.970384Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630793847468134:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=13;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037907,72075186224037947;receive=72075186224037899; 2025-06-24T21:27:59.970535Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[3:7519630793847468134:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=15;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037907;receive=72075186224037947; 2025-06-24T21:27:59.971033Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; >> KqpQueryService::TableSink_OltpInteractive [GOOD] >> KqpQueryService::StreamExecuteQuery [GOOD] >> KqpQueryService::StreamExecuteCollectMeta >> MoveTable::RenameAbsentTable_Negative ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::SequentialReadsPragma-Enabled [GOOD] Test command err: Trying to start YDB, gRPC: 27961, MsgBus: 24073 2025-06-24T21:27:04.046813Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630565343392718:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:04.046929Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003264/r3tmp/tmpjGUVrE/pdisk_1.dat 2025-06-24T21:27:04.402414Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:04.402714Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630565343392696:2079] 1750800424046047 != 1750800424046050 TServer::EnableGrpc on GrpcPort 27961, node 1 2025-06-24T21:27:04.434806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:04.434945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:04.436646Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:04.476084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:04.476117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:04.476129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:04.476348Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24073 TClient is connected to server localhost:24073 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:05.040031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:05.053733Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:06.668991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630573933327934:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.669019Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630573933327925:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.669148Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.672778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:06.682226Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630573933327939:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:27:06.781639Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630573933327990:2331] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 27069, MsgBus: 18909 2025-06-24T21:27:07.994074Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630576529577659:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:07.994174Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003264/r3tmp/tmp0wkmip/pdisk_1.dat 2025-06-24T21:27:08.110570Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:08.111534Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630576529577639:2079] 1750800427993265 != 1750800427993268 TServer::EnableGrpc on GrpcPort 27069, node 2 2025-06-24T21:27:08.146409Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:08.146503Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:08.152189Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:08.183706Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:08.183736Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:08.183744Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:08.183873Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18909 TClient is connected to server localhost:18909 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:08.640810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:08.659511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:08.804767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:08.989744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:08.998662Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:09.085315Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:11.144837Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630593709448475:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:11.144937Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:11.246133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, ... 435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:51.810539Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519630768021819316:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 23707, MsgBus: 1405 2025-06-24T21:27:54.977329Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519630780612729860:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:54.977416Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003264/r3tmp/tmpY4Mtbg/pdisk_1.dat 2025-06-24T21:27:55.128195Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519630780612729836:2079] 1750800474976453 != 1750800474976456 2025-06-24T21:27:55.143595Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:55.144802Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:55.144902Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:55.150215Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23707, node 7 2025-06-24T21:27:55.215288Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:55.215314Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:55.215328Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:55.215495Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1405 TClient is connected to server localhost:1405 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:55.945802Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:55.966405Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:55.983352Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:56.053421Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:56.273678Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:56.412778Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:59.874005Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630802087567959:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.874152Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.934900Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.978640Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519630780612729860:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:59.979646Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:59.987158Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.038749Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.132543Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.177048Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.219419Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.266720Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.368392Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630806382535921:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:00.368528Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:00.368835Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630806382535926:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:00.374831Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:00.389066Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630806382535928:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:00.477340Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630806382535979:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptStatsNone [GOOD] Test command err: Trying to start YDB, gRPC: 15498, MsgBus: 7746 2025-06-24T21:27:39.885096Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630714127602507:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:39.885146Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030c4/r3tmp/tmptacNNS/pdisk_1.dat 2025-06-24T21:27:40.306974Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:40.307320Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630714127602474:2079] 1750800459883887 != 1750800459883890 TServer::EnableGrpc on GrpcPort 15498, node 1 2025-06-24T21:27:40.343438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:40.343565Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:40.345398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:40.385990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:40.386013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:40.386026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:40.386195Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7746 TClient is connected to server localhost:7746 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:27:40.897140Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:40.962628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:40.990102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:41.189346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:27:41.320840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:41.406506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:43.124998Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630731307473308:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.125106Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.553664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.609375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.680637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.720638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.767188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.813466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.856311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.947562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630731307473975:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.947676Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.950746Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630731307473980:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.955218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:43.970839Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630731307473982:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:44.033084Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630735602441329:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:44.885567Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630714127602507:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:44.885654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:45.092675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.093966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part ... 25-06-24T21:27:56.112198Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:56.112289Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:56.117952Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20926, node 3 2025-06-24T21:27:56.175561Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:56.175590Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:56.175599Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:56.175734Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1638 TClient is connected to server localhost:1638 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:56.670496Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:56.678584Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:56.684077Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:56.773453Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:56.958626Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:56.960562Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:57.043666Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:59.330085Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630802522579616:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.330187Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.397148Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.435680Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.471738Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.505498Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.537387Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.593062Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.670242Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.760973Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630802522580280:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.761046Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.761091Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630802522580285:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.764465Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:59.774830Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630802522580287:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:59.836564Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630802522580338:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:00.952767Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630785342708835:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:00.953894Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:00.981861Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.983565Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.985095Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpReturning::ReturningTypes [GOOD] Test command err: Trying to start YDB, gRPC: 13305, MsgBus: 4163 2025-06-24T21:27:03.250101Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630559550269413:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:03.250245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00331a/r3tmp/tmpGfXrfP/pdisk_1.dat 2025-06-24T21:27:03.607337Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:03.607664Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630559550269391:2079] 1750800423249332 != 1750800423249335 TServer::EnableGrpc on GrpcPort 13305, node 1 2025-06-24T21:27:03.656478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:03.657205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:03.662137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:03.683504Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:03.683540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:03.683558Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:03.683733Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4163 TClient is connected to server localhost:4163 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:04.249714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:27:04.256683Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:27:04.279066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.406709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.565964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.664221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:06.119241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630572435172918:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.119330Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.387023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.416556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.444427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.516027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.541150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.607534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.673305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.751322Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630572435173591:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.751393Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.751436Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630572435173596:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.754665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:06.763705Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630572435173598:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:06.857757Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630572435173649:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:07.956982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.002773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.040850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ... 17] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:52.170019Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) [[[2];["321"]];[["111"];[2]]] Trying to start YDB, gRPC: 9211, MsgBus: 14000 2025-06-24T21:27:54.423216Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519630777916094461:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:54.423295Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00331a/r3tmp/tmpRWWngp/pdisk_1.dat 2025-06-24T21:27:54.599270Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:54.599390Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:54.603532Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:54.619276Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9211, node 7 2025-06-24T21:27:54.687880Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:54.687910Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:54.687922Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:54.688081Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14000 TClient is connected to server localhost:14000 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:55.447267Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:55.520880Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:55.542838Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:55.629509Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:55.910449Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:56.012350Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:59.211307Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630799390932540:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.211439Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.304778Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.359788Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.416792Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.423597Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519630777916094461:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:59.423682Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:59.520445Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.598633Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.656492Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.740143Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.935651Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630799390933212:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.935800Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.936146Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630799390933217:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.941501Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:59.957717Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630799390933219:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:00.042847Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630803685900566:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryService::DdlMixedDml [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OltpInteractive [GOOD] Test command err: Trying to start YDB, gRPC: 7680, MsgBus: 11438 2025-06-24T21:27:49.064590Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630758306951370:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:49.064678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030b7/r3tmp/tmpUAIXC5/pdisk_1.dat 2025-06-24T21:27:49.445473Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7680, node 1 2025-06-24T21:27:49.485628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:49.485778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:49.487928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:49.493598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:49.493616Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:49.493621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:49.493734Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11438 TClient is connected to server localhost:11438 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:27:50.071521Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:50.081907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:52.124768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630771191853861:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.124910Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.405866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:52.568235Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630771191853965:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.568312Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.568369Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630771191853970:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.572735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:52.583224Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630771191853972:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:27:52.675797Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630771191854023:2392] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 8377, MsgBus: 17612 2025-06-24T21:27:53.654296Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630775390039506:2197];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:53.654411Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030b7/r3tmp/tmpKfdxg8/pdisk_1.dat 2025-06-24T21:27:53.751515Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:53.752816Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630775390039345:2079] 1750800473614365 != 1750800473614368 2025-06-24T21:27:53.768099Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:53.768173Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8377, node 2 2025-06-24T21:27:53.769715Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:53.803646Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:53.803666Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:53.803675Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:53.803790Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17612 TClient is connected to server localhost:17612 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:54.273272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:54.280816Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:54.627294Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:56.911374Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630788274941867:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.911488Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.943188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:56.994552Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630788274941969:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.994638Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_ ... emeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:57.008532Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630788274941976:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:27:57.083905Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630792569909323:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:57.475115Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=4; 2025-06-24T21:27:57.486640Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 4 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:27:57.486820Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 4 at tablet 72075186224037888 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-24T21:27:57.487056Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [2:7519630792569909407:2326], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [2:7519630792569909391:2326]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[2:7519630792569909407:2326].{
: Error: Conflict with existing key., code: 2012 } 2025-06-24T21:27:57.488129Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519630792569909400:2326], SessionActorId: [2:7519630792569909391:2326], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[2:7519630792569909391:2326]. isRollback=0 2025-06-24T21:27:57.488440Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=ZDdkNWViMzgtMzg4MzRjNDktNjVlNGYzNzctYmY0ODdj, ActorId: [2:7519630792569909391:2326], ActorState: ExecuteState, TraceId: 01jyhxeb7533rrjf6a7104bz1w, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [2:7519630792569909401:2326] from: [2:7519630792569909400:2326] 2025-06-24T21:27:57.488532Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519630792569909401:2326] TxId: 281474976715663. Ctx: { TraceId: 01jyhxeb7533rrjf6a7104bz1w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDdkNWViMzgtMzg4MzRjNDktNjVlNGYzNzctYmY0ODdj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-24T21:27:57.488775Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZDdkNWViMzgtMzg4MzRjNDktNjVlNGYzNzctYmY0ODdj, ActorId: [2:7519630792569909391:2326], ActorState: ExecuteState, TraceId: 01jyhxeb7533rrjf6a7104bz1w, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 24442, MsgBus: 1214 2025-06-24T21:27:58.318966Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630795517104952:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:58.319040Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030b7/r3tmp/tmpdFJQiS/pdisk_1.dat 2025-06-24T21:27:58.474877Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630795517104928:2079] 1750800478318507 != 1750800478318510 2025-06-24T21:27:58.485549Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:58.486026Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:58.486102Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:58.490872Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24442, node 3 2025-06-24T21:27:58.541914Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:58.541936Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:58.541942Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:58.542036Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1214 TClient is connected to server localhost:1214 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:59.057109Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:59.063984Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:59.339255Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:01.908648Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630808402007449:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:01.908753Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:01.951647Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.180916Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.455221Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630812696976126:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.455315Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.462802Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630812696976130:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.462884Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.462978Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630812696976135:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.466313Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:02.477611Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630812696976137:2410], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T21:28:02.542816Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630812696976188:3204] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:03.319863Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630795517104952:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:03.319960Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryServiceScripts::ExecuteScriptWithUnspecifiedMode [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithTimeout >> KqpQueryService::TableSink_OlapInsert [GOOD] >> KqpQueryService::TableSink_OlapDelete >> KqpService::Shutdown >> KqpQueryService::ExecuteCollectMeta >> KqpReturning::ReturningWorksIndexedInsert-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService >> MoveTable::RenameAbsentTable_Negative [GOOD] |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> KqpDocumentApi::Scripting [GOOD] >> KqpQueryService::AlterTable_DropNotNull_Valid |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::RenameAbsentTable_Negative [GOOD] Test command err: 2025-06-24T21:28:05.162261Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:28:05.187327Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:28:05.187598Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:28:05.194995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:28:05.195281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:28:05.195768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:28:05.195897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:28:05.195997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:28:05.196097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:28:05.196183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:28:05.196297Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:28:05.196395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:28:05.196494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:28:05.196584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:28:05.222801Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:28:05.223070Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:28:05.223120Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:28:05.223324Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:28:05.223535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:28:05.223617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:28:05.223664Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:28:05.223776Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:28:05.223853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:28:05.223903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:28:05.223950Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:28:05.224152Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:28:05.224230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:28:05.224283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:28:05.224314Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:28:05.224404Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:28:05.224459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:28:05.224589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:28:05.224633Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:28:05.224713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:28:05.224758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:28:05.224830Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:28:05.225058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:28:05.225110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:28:05.225140Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:28:05.225341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:28:05.225388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:28:05.225417Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:28:05.225546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:28:05.225607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:28:05.225641Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:28:05.225714Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:28:05.225773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:28:05.225807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:28:05.225828Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:28:05.226234Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=53; 2025-06-24T21:28:05.226353Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=53; 2025-06-24T21:28:05.226447Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=40; 2025-06-24T21:28:05.226542Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=43; 2025-06-24T21:28:05.226693Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:28:05.226775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:28:05.226810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:28:05.226868Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... rLoadingTime=35; 2025-06-24T21:28:05.488595Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=52; 2025-06-24T21:28:05.488742Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=58; 2025-06-24T21:28:05.488915Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=131; 2025-06-24T21:28:05.489278Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=324; 2025-06-24T21:28:05.489331Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=15; 2025-06-24T21:28:05.489379Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=16; 2025-06-24T21:28:05.489437Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-24T21:28:05.489607Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=124; 2025-06-24T21:28:05.489688Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=8; 2025-06-24T21:28:05.489795Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=58; 2025-06-24T21:28:05.489842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-24T21:28:05.489919Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=40; 2025-06-24T21:28:05.490010Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=28; 2025-06-24T21:28:05.490081Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=29; 2025-06-24T21:28:05.490117Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=3298; 2025-06-24T21:28:05.490294Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:28:05.490391Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:28:05.490525Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:28:05.490780Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:28:05.490841Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:449;problem=Background activities cannot be started: no index at tablet; 2025-06-24T21:28:05.491232Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:28:05.491318Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:28:05.491349Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:28:05.491374Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:28:05.491422Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:28:05.491486Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:449;problem=Background activities cannot be started: no index at tablet; 2025-06-24T21:28:06.055927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004791712;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-24T21:28:06.056017Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004791712;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;fline=schema.h:38;event=sync_schema; 2025-06-24T21:28:06.068658Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;this=88923004791712;op_tx=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_this=89129165613696;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T21:28:06.068786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;this=88923004791712;op_tx=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_this=89129165613696;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:158:2180]; 2025-06-24T21:28:06.068842Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;this=88923004791712;op_tx=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750800486129;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_this=89129165613696;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=10; 2025-06-24T21:28:06.069194Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T21:28:06.069314Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750800486129 at tablet 9437184, mediator 0 2025-06-24T21:28:06.069354Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] execute at tablet 9437184 2025-06-24T21:28:06.069724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-24T21:28:06.069829Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000001, ss: 1} ttl settings: { Version: 1 } at tablet 9437184 2025-06-24T21:28:06.075990Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:28:06.076157Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-24T21:28:06.076243Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-24T21:28:06.082186Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-24T21:28:06.082368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:215;event=finished_tx;tx_id=10; 2025-06-24T21:28:06.108147Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 2025-06-24T21:28:06.109067Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004830240;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750800486132;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;fline=schema.cpp:134;propose_execute=move_table;src=111;dst=2; 2025-06-24T21:28:06.109162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004830240;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750800486132;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=111;result=not_found; 2025-06-24T21:28:06.109236Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004830240;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750800486132;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;fline=tx_controller.cpp:364;error=problem on start;message=No such table; 2025-06-24T21:28:06.121844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750800486132;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;this=88923004830240;op_tx=11:TX_KIND_SCHEMA;min=1750800486132;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:102:2135]; 2025-06-24T21:28:06.121926Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750800486132;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;this=88923004830240;op_tx=11:TX_KIND_SCHEMA;min=1750800486132;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;fline=propose_tx.cpp:23;message=No such table;tablet_id=9437184;tx_id=11; >> KqpQueryService::CloseSessionsWithLoad [GOOD] >> KqpQueryService::ClosedSessionRemovedFromPool ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::DdlMixedDml [GOOD] Test command err: Trying to start YDB, gRPC: 30675, MsgBus: 25476 2025-06-24T21:27:13.719414Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630603052959333:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:13.719480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ec/r3tmp/tmpvh65YW/pdisk_1.dat 2025-06-24T21:27:14.191344Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:14.192449Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630603052959310:2079] 1750800433718540 != 1750800433718543 TServer::EnableGrpc on GrpcPort 30675, node 1 2025-06-24T21:27:14.216044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:14.216148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:14.219624Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:14.273664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:14.273690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:14.273719Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:14.273852Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25476 TClient is connected to server localhost:25476 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:14.739245Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:14.864048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:14.891186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:15.017157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:15.187032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:15.253555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:17.106705Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630620232830138:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:17.106813Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:17.529282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:17.565098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:17.595255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:17.633013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:17.666108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:17.699332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:17.769578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:17.864069Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630620232830801:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:17.864151Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:17.864315Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630620232830806:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:17.868354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:17.883362Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630620232830808:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:17.947794Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630620232830859:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:18.723184Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630603052959333:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:18.723285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:19.012616Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630628822765745:3608] txid# 281474976710673, issues: { message: "Group already exists" severity: 1 } 2025-06-24T21:27:19.023202Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NTZmZjZhYjgtOWE2NjJlMjYtMTg5MTRmMi04MGZlZTAwMA==, ActorId: [1:7519630624527798442:2479], ActorState: ExecuteState, TraceId: 01jyhxd5pm3wb6j9cz1qxfp16d, Create QueryResponse for error on request, msg: 2025-06-24T21: ... t=undelivered;self_id=[4:7519630796790040151:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:58.003549Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ec/r3tmp/tmp4XS0rC/pdisk_1.dat 2025-06-24T21:27:58.181944Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:58.189202Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:58.189314Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:58.193235Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15757, node 4 2025-06-24T21:27:58.259802Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:58.259830Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:58.259841Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:58.260024Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1341 TClient is connected to server localhost:1341 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:58.921160Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:58.932456Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:58.944753Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:59.015589Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:59.040517Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:59.251520Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:27:59.359601Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.551349Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630813969910924:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.551482Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.631167Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.679833Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.724614Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.774274Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.826962Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.901830Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.982956Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:03.007550Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519630796790040151:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:03.007622Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:03.128487Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630818264878888:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:03.128597Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:03.128655Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630818264878893:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:03.135764Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:03.153183Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519630818264878895:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:03.246469Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630818264878947:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:04.727943Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519630822559846546:2480], status: GENERIC_ERROR, issues:
: Error: Optimization, code: 1070
:8:25: Error: Queries with mixed data and scheme operations are not supported. Use separate queries for different types of operations., code: 2009 2025-06-24T21:28:04.731570Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NWZiNGJjZDktMTVmZDNmZTAtMjljNTVhOTYtMTk4OGE0YzY=, ActorId: [4:7519630822559846539:2476], ActorState: ExecuteState, TraceId: 01jyhxejadbrrez3xh4tes0fxw, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpQueryService::SeveralCTAS-UseSink [GOOD] >> KqpQueryServiceScripts::ParseScript >> KqpQueryService::ReadManyRanges >> KqpService::RangeCache-UseCache [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::SeveralCTAS-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 10416, MsgBus: 20592 2025-06-24T21:27:49.529221Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630758217540226:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:49.529523Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030b5/r3tmp/tmpPgh5ru/pdisk_1.dat 2025-06-24T21:27:49.913415Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:49.913846Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630758217540115:2079] 1750800469514291 != 1750800469514294 2025-06-24T21:27:49.954920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:49.955026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:49.956708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10416, node 1 2025-06-24T21:27:50.036501Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:50.036524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:50.036539Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:50.036653Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20592 TClient is connected to server localhost:20592 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:50.534608Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:50.758011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:27:50.779335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:50.930311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:51.105461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:51.180091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:52.820624Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630771102443654:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:52.820756Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:53.116481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:53.146315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:53.218842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:53.252345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:53.294950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:53.364208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:53.408398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:53.494731Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630775397411615:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:53.494792Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:53.494952Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630775397411620:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:53.499627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:53.517007Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630775397411622:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:53.605939Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630775397411675:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:54.525175Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630758217540226:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:54.525259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 32514, MsgBus: 61306 2025-06-24T21:27:56.635272Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630786907947782:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:56.635352Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:59.978369Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630799792850298:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:28:00.050540Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630804087817645:2334] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:00.147007Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.344478Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630804087817928:2501] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:00.362539Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630804087817935:2506] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/OWE1ZTllODQtNTJjYmVhYjMtMmEyMGZjODUtYTcwOGE0NTI=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:00.366112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.556444Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630804087818122:2619] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:00.560289Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630804087818129:2624] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/OWE1ZTllODQtNTJjYmVhYjMtMmEyMGZjODUtYTcwOGE0NTI=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:00.563626Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 61467, MsgBus: 25187 2025-06-24T21:28:02.102852Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630812637044767:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:02.102924Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030b5/r3tmp/tmpwfv7to/pdisk_1.dat 2025-06-24T21:28:02.223968Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:02.244739Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:02.244827Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:02.246790Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61467, node 3 2025-06-24T21:28:02.308427Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:02.308463Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:02.308474Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:02.308612Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25187 TClient is connected to server localhost:25187 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:02.840793Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:02.848055Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:03.110624Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:05.402021Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630825521947251:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:05.402022Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630825521947259:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:05.402109Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:05.405837Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:05.417020Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630825521947265:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:28:05.509054Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630825521947317:2331] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:05.580508Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:05.783192Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630825521947602:2499] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:05.786136Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630825521947609:2504] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZmNlMTc2ZTYtYjI0MTk4MWMtZGE3NDhiMmQtNDUzYjg3ZDY=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:05.789542Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:05.965206Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630825521947796:2617] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:05.968463Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630825521947803:2622] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZmNlMTc2ZTYtYjI0MTk4MWMtZGE3NDhiMmQtNDUzYjg3ZDY=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:05.971292Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin [GOOD] >> TColumnShardTestSchema::HotTiersTtl >> KqpQueryService::ShowCreateTableDisable [GOOD] >> KqpQueryService::ShowCreateSysView >> KqpQueryService::FlowControllOnHugeLiteralAsTable |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> KqpQueryService::ExecuteQueryExplicitTxTLI >> KqpQueryService::ExecuteQueryScalar [GOOD] >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpService::RangeCache-UseCache [GOOD] Test command err: Trying to start YDB, gRPC: 6885, MsgBus: 25433 2025-06-24T21:27:39.449670Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630717405335558:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:39.449895Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030c7/r3tmp/tmp31y06K/pdisk_1.dat 2025-06-24T21:27:39.830625Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630717405335535:2079] 1750800459448359 != 1750800459448362 2025-06-24T21:27:39.832105Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:39.868065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:39.868181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:39.872828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6885, node 1 2025-06-24T21:27:39.973930Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:39.973956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:39.973961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:39.974073Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25433 TClient is connected to server localhost:25433 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:40.473957Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:40.576000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:40.592092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:40.602054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:40.771382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:40.937721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:41.017365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:42.833267Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630730290239057:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:42.833371Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.172587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.211181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.293665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.329501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.374828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.460903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.499671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.575036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630734585207018:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.575160Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.575375Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630734585207023:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:43.580355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:43.627188Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630734585207025:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:43.720463Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630734585207076:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:44.449956Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630717405335558:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:44.450021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:44.696682Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=1&id=M2E2MTk1YjEtMzFjOTRiNDQtN2ZlNDExYTQtMzhkZjM4ZmU=, ActorId: [1:7519630738880174641:2473], ActorState: ExecuteState, TraceId: 01jyhxdysn3e1yx6tzhn99n4vd, Reply query error, msg: Pending previous query completion prox ... n proxyRequestId: 64 2025-06-24T21:27:51.474239Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=2&id=YTYzZTAwNTQtMWMxN2I0MmUtMzM2MDk2ZTMtZGVjMmNjZDQ=, ActorId: [2:7519630764791281988:2577], ActorState: ExecuteState, TraceId: 01jyhxe5dh7nfhj2cf636v9fbq, Reply query error, msg: Pending previous query completion proxyRequestId: 67 Trying to start YDB, gRPC: 3290, MsgBus: 2179 2025-06-24T21:27:52.592590Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630772359517326:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:52.592670Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030c7/r3tmp/tmpi3rbNI/pdisk_1.dat 2025-06-24T21:27:52.730857Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:52.746344Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:52.746435Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:52.749133Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3290, node 3 2025-06-24T21:27:52.799288Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:52.799307Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:52.799315Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:52.799414Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2179 TClient is connected to server localhost:2179 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:53.314379Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:53.326811Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:53.337977Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:53.403421Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:53.569926Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:53.614133Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:27:53.654908Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:56.347640Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630789539388118:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.347761Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.413266Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:56.448948Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:56.482359Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:56.516894Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:56.556981Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:56.629363Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:56.704031Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:56.776636Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630789539388783:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.776743Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.776940Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630789539388788:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.782264Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:56.800381Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630789539388790:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:56.890613Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630789539388841:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:57.592276Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630772359517326:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:57.592328Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; took: 9.271478s took: 9.275520s took: 9.277438s took: 9.279496s took: 9.280912s took: 9.280632s took: 9.276343s took: 9.281842s took: 9.279366s took: 9.317014s >> KqpQueryServiceScripts::InvalidFetchToken [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 30954, MsgBus: 14691 2025-06-24T21:27:03.038130Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630562667032213:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:03.038250Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00330b/r3tmp/tmpHbVNSQ/pdisk_1.dat 2025-06-24T21:27:03.385827Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:03.387370Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630562667032191:2079] 1750800423037399 != 1750800423037402 TServer::EnableGrpc on GrpcPort 30954, node 1 2025-06-24T21:27:03.415408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:03.415567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:03.417670Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:03.454251Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:03.454273Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:03.454285Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:03.454422Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14691 TClient is connected to server localhost:14691 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:03.973665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:04.048831Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:05.775835Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630571256967420:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:05.775954Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.165956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.311342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.350121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:27:06.385662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715762:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.416274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715763:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:27:06.435548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.469304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715766:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.496974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715767:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:27:06.524808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715770:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.552159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715771:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:27:06.580282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715774:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.608973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715775:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:27:06.632851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.664908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715778:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.692972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715779:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:27:06.721010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715782:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.748427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715783:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:27:06.804813Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630575551936047:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resou ... OR: schemereq.cpp:553: Actor# [6:7519630778810847996:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 3649, MsgBus: 28055 2025-06-24T21:27:58.966296Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519630797616968354:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:58.966383Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00330b/r3tmp/tmpn7Yf4k/pdisk_1.dat 2025-06-24T21:27:59.119265Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519630797616968332:2079] 1750800478965251 != 1750800478965254 2025-06-24T21:27:59.165851Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:59.165964Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:59.173024Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3649, node 7 2025-06-24T21:27:59.175747Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:59.256930Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:59.256957Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:59.256969Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:59.257181Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28055 2025-06-24T21:27:59.973768Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28055 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:00.062458Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:00.071989Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:00.081914Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:00.163183Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:00.377548Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:00.495346Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:03.978670Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519630797616968354:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:03.978867Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:03.999334Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630819091806456:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:03.999482Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:04.082045Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:04.134145Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:04.178833Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:04.237626Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:04.315822Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:04.400592Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:04.450987Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:04.572925Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630823386774416:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:04.573036Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:04.573232Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630823386774421:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:04.579677Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:04.600291Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630823386774423:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:04.709319Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630823386774474:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryService::StreamExecuteCollectMeta [GOOD] >> KqpQueryService::ShowCreateViewOnTable >> KqpDataIntegrityTrails::BrokenReadLock+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteQueryScalar [GOOD] Test command err: Trying to start YDB, gRPC: 9234, MsgBus: 10696 2025-06-24T21:27:41.630622Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630724201316974:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:41.630700Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030c1/r3tmp/tmphz9aWH/pdisk_1.dat 2025-06-24T21:27:42.034033Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:42.034938Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630724201316955:2079] 1750800461629835 != 1750800461629838 TServer::EnableGrpc on GrpcPort 9234, node 1 2025-06-24T21:27:42.081090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:42.081210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:42.084148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:42.151857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:42.151908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:42.151918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:42.152050Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10696 2025-06-24T21:27:42.661707Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10696 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:42.884448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:42.911912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:42.927168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:43.070184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:43.240150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:43.315446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:45.033135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630741381187792:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.033231Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.345816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.380635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.408906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.484280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.558308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.594045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.630161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:45.712584Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630741381188452:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.712693Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.712966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630741381188457:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.716601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:45.726694Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630741381188459:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:45.826011Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630741381188510:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:46.631635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630724201316974:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:46.631704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:46.864103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... 46644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:01.067926Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630787639027119:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:01.068005Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8961, MsgBus: 26125 2025-06-24T21:28:02.518774Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630813828707433:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:02.518872Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030c1/r3tmp/tmpA7OXkv/pdisk_1.dat 2025-06-24T21:28:02.641288Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:02.655286Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630813828707411:2079] 1750800482517908 != 1750800482517911 2025-06-24T21:28:02.668277Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:02.668411Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8961, node 3 2025-06-24T21:28:02.670011Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:02.740127Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:02.740157Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:02.740169Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:02.740330Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26125 TClient is connected to server localhost:26125 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:28:03.363412Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:03.382106Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:03.468394Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:03.595817Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:03.655457Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:03.741017Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.266792Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630831008578236:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:06.266909Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:06.329765Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.420686Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.466134Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.523223Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.586002Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.631885Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.679338Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.765467Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630831008578894:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:06.765631Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:06.765947Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630831008578899:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:06.770322Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:06.781823Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630831008578901:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:06.875771Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630831008578952:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:07.518964Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630813828707433:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:07.519038Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 24742, MsgBus: 29599 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044dc/r3tmp/tmpV3FkOF/pdisk_1.dat TServer::EnableGrpc on GrpcPort 24742, node 1 TClient is connected to server localhost:29599 TClient is connected to server localhost:29599 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> BasicStatistics::TwoServerlessTwoSharedDbs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::InvalidFetchToken [GOOD] Test command err: Trying to start YDB, gRPC: 12037, MsgBus: 10746 2025-06-24T21:27:42.213158Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630727909993926:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:42.234470Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030bb/r3tmp/tmpa5J9aY/pdisk_1.dat 2025-06-24T21:27:42.683670Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630727909993873:2079] 1750800462194555 != 1750800462194558 2025-06-24T21:27:42.684292Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:42.699802Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:42.699913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:42.701456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12037, node 1 2025-06-24T21:27:42.843781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:42.843813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:42.843823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:42.843961Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10746 2025-06-24T21:27:43.232055Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10746 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:43.497713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:43.535862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:27:43.692559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:43.852319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:43.933124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:45.766806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630740794897406:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:45.766889Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:46.117875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.155000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.229603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.255806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.287736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.360795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.437292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:46.502396Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630745089865369:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:46.502467Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:46.502555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630745089865374:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:46.506407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:46.528008Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630745089865376:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:46.621214Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630745089865427:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:47.216444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630727909993926:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:47.216546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:47.685775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:47.686935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... :01.067395Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630804479034775:2079] 1750800480838114 != 1750800480838117 2025-06-24T21:28:01.076796Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:01.076890Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:01.078750Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24832, node 3 2025-06-24T21:28:01.139724Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:01.139750Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:01.139759Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:01.139911Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5067 TClient is connected to server localhost:5067 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:28:01.725929Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:01.735801Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:01.808115Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:01.928644Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:02.044087Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:02.120169Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:04.944165Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630821658905577:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:04.944267Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:05.020621Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:05.062559Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:05.132892Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:05.172892Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:05.245720Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:05.310760Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:05.348575Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:05.408023Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630825953873538:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:05.408158Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:05.408455Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630825953873543:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:05.413889Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:05.425038Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630825953873545:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:05.512208Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630825953873596:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:05.856022Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630804479034795:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:05.856089Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:06.756472Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.760118Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.762080Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpQueryServiceScripts::ExecuteScriptWithParameters [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithForgetAfter >> MoveTable::WithData >> KqpQueryServiceScripts::TestPaging [GOOD] >> KqpQueryServiceScripts::TestFetchMoreThanLimit >> KqpQueryService::AlterTable_DropNotNull_Valid [GOOD] >> KqpQueryService::AlterCdcTopic >> KqpQueryService::ShowCreateTableNotSuccess >> KqpQueryService::ExecuteCollectMeta [GOOD] >> KqpQueryService::ExecuteQuery >> KqpQueryService::CreateTempTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoServerlessTwoSharedDbs [GOOD] Test command err: 2025-06-24T21:24:12.317121Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:488:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:12.317401Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:24:12.317598Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0047f9/r3tmp/tmp9a7fFj/pdisk_1.dat 2025-06-24T21:24:12.711318Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16377, node 1 2025-06-24T21:24:12.974786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:12.974850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:12.974884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:12.975530Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:12.978620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:13.106162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:13.106315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:13.126385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62116 2025-06-24T21:24:13.734998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:17.050294Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-24T21:24:17.085469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:17.085618Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:17.114561Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:24:17.116682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:17.327632Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:17.352229Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.352934Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.353538Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.353720Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.353841Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.354146Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.354228Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.354281Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.354333Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:17.526086Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:17.526190Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:17.539211Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:17.693490Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:17.732161Z node 3 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:17.732262Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:17.766792Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:17.767122Z node 3 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:17.767326Z node 3 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:17.767380Z node 3 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:17.767437Z node 3 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:17.767476Z node 3 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:17.767529Z node 3 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:17.767574Z node 3 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:17.767947Z node 3 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:17.798784Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:17.798876Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [3:1872:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:17.808194Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1900:2575] 2025-06-24T21:24:17.810967Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1919:2584] 2025-06-24T21:24:17.811472Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [3:1919:2584], schemeshard id = 72075186224037897 2025-06-24T21:24:17.826589Z node 3 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared1 2025-06-24T21:24:17.842361Z node 3 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:17.842430Z node 3 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:17.842509Z node 3 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared1/.metadata/_statistics 2025-06-24T21:24:17.853950Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:17.859531Z node 3 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:17.859688Z node 3 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:18.035949Z node 3 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:18.210575Z node 3 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:18.277118Z node 3 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:18.916594Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:19.053371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:22.527689Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:22.574644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:22.574831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:22.640481Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:22.642591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:22.839918Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:22.840561Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:22.841281Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:22.841464Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:22.841740Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:22.841811Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:22.841880Z node 2 :HIVE WARN: tx__creat ... 2025-06-24T21:28:03.118618Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:12558:5265] txid# 281474976730659, issues: { message: "Check failed: path: \'/Root/Shared2/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224038898, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:03.186173Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:12587:5280]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:28:03.186672Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:28:03.186782Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:12589:5282] 2025-06-24T21:28:03.186906Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:12589:5282] 2025-06-24T21:28:03.187684Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224038895] EvServerConnected, pipe server id = [2:12590:5283] 2025-06-24T21:28:03.188206Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224038895] EvConnectNode, pipe server id = [2:12590:5283], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-24T21:28:03.188288Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224038895] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:28:03.188546Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:12589:5282], server id = [2:12590:5283], tablet id = 72075186224038895, status = OK 2025-06-24T21:28:03.188715Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:28:03.188796Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:12587:5280], StatRequests.size() = 1 2025-06-24T21:28:03.326845Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTc1YzNkMDMtNWQwYjQwNWUtYjE1N2M3M2EtYjlkYTM0Njc=, TxId: 2025-06-24T21:28:03.326918Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZTc1YzNkMDMtNWQwYjQwNWUtYjE1N2M3M2EtYjlkYTM0Njc=, TxId: 2025-06-24T21:28:03.327681Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-24T21:28:03.344927Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038898, LocalPathId: 3] 2025-06-24T21:28:03.344997Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:28:03.425980Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224038895] EvFastPropagateCheck 2025-06-24T21:28:03.426066Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224038895] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-24T21:28:03.524267Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:12589:5282], schemeshard count = 1 2025-06-24T21:28:04.226307Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-24T21:28:04.226380Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 188.000000s, at schemeshard: 72075186224037899 2025-06-24T21:28:04.226690Z node 3 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 26 2025-06-24T21:28:04.261461Z node 3 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:28:04.554101Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:28:04.565771Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:28:04.565862Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:28:04.565904Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-24T21:28:04.565940Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:28:04.566211Z node 3 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared1 2025-06-24T21:28:04.571133Z node 3 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:28:04.588141Z node 3 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=M2M5ZDE3ZjYtYjFkZmU1MTUtNjMxYTAyMWUtYzVhMWEzNWM=, TxId: 2025-06-24T21:28:04.588242Z node 3 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=M2M5ZDE3ZjYtYjFkZmU1MTUtNjMxYTAyMWUtYzVhMWEzNWM=, TxId: 2025-06-24T21:28:04.588974Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:28:04.605989Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-24T21:28:04.606054Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:28:04.827757Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [3:12688:5608]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:28:04.828352Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-24T21:28:04.828402Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [3:12688:5608], StatRequests.size() = 1 2025-06-24T21:28:07.180750Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [3:12760:5636]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:28:07.181352Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-24T21:28:07.181410Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [3:12760:5636], StatRequests.size() = 1 2025-06-24T21:28:07.832174Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224038900 2025-06-24T21:28:07.832249Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 186.000000s, at schemeshard: 72075186224038900 2025-06-24T21:28:07.832605Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224038895] TTxSchemeShardStats::Execute: schemeshard id# 72075186224038900, stats size# 26 2025-06-24T21:28:07.857489Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224038895] TTxSchemeShardStats::Complete 2025-06-24T21:28:08.157476Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224038895] ScheduleNextTraversal 2025-06-24T21:28:08.157542Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224038895] ScheduleNextTraversal. No force traversals. 2025-06-24T21:28:08.157602Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224038895] IsColumnTable. Path [OwnerId: 72075186224038900, LocalPathId: 2] is data table. 2025-06-24T21:28:08.157639Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224038895] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224038900, LocalPathId: 2] 2025-06-24T21:28:08.158131Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared2 2025-06-24T21:28:08.161874Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-24T21:28:08.176986Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:204: [72075186224038891] TEvIntervalQuerySummary, wrong stage: node id# 2 2025-06-24T21:28:08.177903Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODZlMGEwYTgtYTkwNTcxZWQtMWVmOGNlMGMtYmM0M2NlNzU=, TxId: 2025-06-24T21:28:08.177961Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODZlMGEwYTgtYTkwNTcxZWQtMWVmOGNlMGMtYmM0M2NlNzU=, TxId: 2025-06-24T21:28:08.178627Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-24T21:28:08.196055Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038900, LocalPathId: 2] 2025-06-24T21:28:08.196120Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-24T21:28:09.376708Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 3 2025-06-24T21:28:09.377600Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:28:09.378232Z node 3 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 3 2025-06-24T21:28:09.392828Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:28:09.392897Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:28:09.607702Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [3:12857:5656]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:28:09.608133Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-24T21:28:09.608187Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [3:12857:5656], StatRequests.size() = 1 2025-06-24T21:28:09.609385Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:12859:5370]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:28:09.616476Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-24T21:28:09.616901Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224038895] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-24T21:28:09.616960Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224038895] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-24T21:28:09.617281Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:28:09.617395Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:12859:5370], StatRequests.size() = 1 >> KqpQueryServiceScripts::ForgetScriptExecution [GOOD] >> KqpQueryServiceScripts::ValidateScript >> TColumnShardTestSchema::OneColdTier >> KqpQueryService::TableSink_OlapDelete [GOOD] >> KqpQueryServiceScripts::ParseScript [GOOD] >> KqpQueryServiceScripts::ListScriptExecutions >> KqpQueryService::ReadManyRanges [GOOD] >> KqpQueryService::ReadManyShardsRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ForgetScriptExecution [GOOD] Test command err: Trying to start YDB, gRPC: 11567, MsgBus: 3434 2025-06-24T21:27:35.496568Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630698674724844:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:35.496713Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030cc/r3tmp/tmprGWQ4D/pdisk_1.dat 2025-06-24T21:27:35.939557Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11567, node 1 2025-06-24T21:27:35.957907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:35.958005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:35.960545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:36.015651Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:36.015674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:36.015682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:36.015833Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3434 TClient is connected to server localhost:3434 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:36.507460Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:36.619857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:36.631347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:36.644283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:36.809868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:36.996448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:37.077746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:38.711111Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630711559628331:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:38.711204Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:39.078155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:39.112330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:39.142194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:39.175564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:39.206467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:39.239569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:39.312488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:39.391585Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630715854596285:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:39.391667Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:39.392171Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630715854596290:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:39.397318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:39.416394Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630715854596292:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:39.487426Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630715854596343:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:40.497387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630698674724844:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:40.497494Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:40.575909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:40.577686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, b ... 7968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:51.972080Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15673, node 3 2025-06-24T21:27:52.022739Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:52.022766Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:52.022787Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:52.022962Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8824 TClient is connected to server localhost:8824 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:52.642828Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:52.652473Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:52.665043Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:52.744212Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:52.843953Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:52.917452Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:52.993556Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:55.644236Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630784291654616:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:55.644337Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:55.710783Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.751901Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.786917Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.820430Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.851739Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.888413Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:55.965887Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:56.071708Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630788586622574:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.071836Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.072282Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630788586622579:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.077353Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:56.093365Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630788586622581:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:56.162833Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630788586622632:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:56.791696Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630767111783826:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:56.791784Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:57.390910Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.393230Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:57.395361Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.931582Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:28:06.931614Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded >> KqpService::Shutdown [GOOD] >> KqpService::SessionBusyRetryOperationSync ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OlapDelete [GOOD] Test command err: Trying to start YDB, gRPC: 20402, MsgBus: 14441 2025-06-24T21:27:53.758571Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630774242394834:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:53.759552Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030b4/r3tmp/tmpkkyQsd/pdisk_1.dat 2025-06-24T21:27:54.160158Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20402, node 1 2025-06-24T21:27:54.160441Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630774242394716:2079] 1750800473740264 != 1750800473740267 2025-06-24T21:27:54.194853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:54.195425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:54.197818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:54.243676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:54.243698Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:54.243713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:54.243814Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14441 TClient is connected to server localhost:14441 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:54.761079Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:54.842836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:54.858083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:56.963240Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630787127297245:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:56.963377Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:57.352784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:27:57.560520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:57.560520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:57.560746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:57.560893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:57.561226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:57.561226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:57.561396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:57.561522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:57.561571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:57.561658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:57.561687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:57.561764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:57.561797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:57.561889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:57.561916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:57.561998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:57.562026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:57.562130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:57.562151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:57.562265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:57.562269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519630791422264693:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:57.562368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519630791422264694:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:57.600053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519630791422264695:2301];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:57.600053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630791422264691:2297];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:57.600108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519630791422264691:2297];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:57.600108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:7 ... led to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.584737Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.585059Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630848463591854:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.589638Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:10.604303Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630848463591856:2365], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:28:10.699975Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630848463591907:2572] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:10.903883Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:28:10.903883Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:28:10.904597Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-24T21:28:11.323317Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630831283721587:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:11.323411Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:11.849272Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:28:11.849554Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:28:11.849737Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:28:11.850465Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:28:11.850633Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:28:11.851076Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:28:11.851249Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:28:11.851311Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:28:11.851446Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:28:11.851612Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-24T21:28:12.301863Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[3:7519630848463591490:2296];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.301940Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[3:7519630848463591490:2296];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302054Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519630848463591466:2295];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302078Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519630848463591466:2295];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302143Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519630848463591620:2304];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302164Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519630848463591620:2304];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302227Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519630848463591509:2297];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302247Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519630848463591509:2297];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302310Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7519630848463591514:2301];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302332Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7519630848463591514:2301];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302394Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7519630848463591512:2300];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302417Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7519630848463591512:2300];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302521Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7519630848463591520:2302];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302544Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7519630848463591520:2302];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302618Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7519630848463591535:2303];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302642Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7519630848463591535:2303];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302752Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7519630848463591511:2299];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302797Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7519630848463591511:2299];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302873Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519630848463591510:2298];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.302894Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519630848463591510:2298];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-24T21:28:12.431096Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-24T21:28:12.432297Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Complete;commit_tx_id=281474976715672;commit_lock_id=281474976715671;fline=manager.cpp:94;broken_lock_id=281474976715669; >> KqpQueryService::Ddl >> KqpQueryService::ShowCreateSysView [GOOD] >> KqpQueryService::ExecuteQueryExplicitTxTLI [GOOD] >> KqpQueryService::ExecuteQueryInteractiveTx >> KqpQueryService::FlowControllOnHugeLiteralAsTable [GOOD] >> KqpQueryService::FlowControllOnHugeRealTable+LongRow >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ShowCreateSysView [GOOD] Test command err: Trying to start YDB, gRPC: 22019, MsgBus: 19903 2025-06-24T21:27:57.018479Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630788150448483:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:57.018576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ae/r3tmp/tmp7gFBMy/pdisk_1.dat TServer::EnableGrpc on GrpcPort 22019, node 1 2025-06-24T21:27:57.436561Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:57.456162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:57.456306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:57.475691Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:57.516391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:57.516413Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:57.516424Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:57.516554Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19903 2025-06-24T21:27:58.000036Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19903 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:58.138377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:58.150283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:58.163789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:58.320852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:58.483294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:58.567359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:00.310187Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630805330319263:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:00.310294Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:00.637431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.676014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.707908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.786670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.843882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.922411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:01.022050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:01.091894Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630809625287224:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:01.091991Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:01.092151Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630809625287229:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:01.095961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:01.110035Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630809625287231:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:01.193296Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630809625287282:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:02.027268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630788150448483:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:02.027533Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:02.422180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 65121, MsgBus: 19872 2025-06-24T21:28:03.443220Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: ... ET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:09.418193Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29282 TClient is connected to server localhost:29282 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:09.986684Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:09.993244Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:10.009997Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:10.070671Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:10.222258Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:10.274393Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:10.356008Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:12.853418Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630857522736658:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:12.853530Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:12.905200Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:12.979837Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.050641Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.088977Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.124904Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.202278Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.272213Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.341911Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630861817704623:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:13.342007Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:13.342279Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630861817704628:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:13.346698Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:13.360502Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630861817704630:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:13.437880Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630861817704681:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:14.187787Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630844637833163:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:14.187862Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:14.570003Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:14.655779Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519630866112672333:2487], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/.sys/show_create]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:28:14.655992Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=OWZhNGZkYjUtMTJkY2NiMTktNDBhNGUyOGItNmU5MjIwMWY=, ActorId: [3:7519630866112672248:2473], ActorState: ExecuteState, TraceId: 01jyhxew0p55pha3ppn7c1nqbh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:28:14.692484Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519630866112672346:2490], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/.sys/show_create]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:28:14.692876Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=OWZhNGZkYjUtMTJkY2NiMTktNDBhNGUyOGItNmU5MjIwMWY=, ActorId: [3:7519630866112672248:2473], ActorState: ExecuteState, TraceId: 01jyhxew258snd2j6q1wb1m897, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> KqpDataIntegrityTrails::BrokenReadLock+UseSink [GOOD] >> KqpQueryService::ShowCreateViewOnTable [GOOD] >> KqpSqlIn::TupleNotOnlyOfKeys [GOOD] >> TPQTest::TestSourceIdDropBySourceIdCount [GOOD] >> TPQTest::TestSetClientOffset ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::WithData Test command err: 2025-06-24T21:28:12.251885Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:28:12.276165Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:28:12.276490Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:28:12.283328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:28:12.283568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:28:12.283826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:28:12.283936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:28:12.284096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:28:12.284233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:28:12.284392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:28:12.284598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:28:12.284745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:28:12.284886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:28:12.285035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:28:12.317921Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:28:12.318200Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:28:12.318259Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:28:12.318464Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:28:12.318628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:28:12.318713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:28:12.318757Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:28:12.318858Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:28:12.318947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:28:12.318997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:28:12.319030Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:28:12.319265Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:28:12.319360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:28:12.319406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:28:12.319461Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:28:12.319560Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:28:12.319610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:28:12.319663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:28:12.319692Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:28:12.319750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:28:12.319789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:28:12.319846Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:28:12.320071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:28:12.320121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:28:12.320157Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:28:12.320389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:28:12.320443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:28:12.320476Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:28:12.320599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:28:12.320656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:28:12.320691Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:28:12.320826Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:28:12.320898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:28:12.320946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:28:12.320977Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:28:12.321430Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=44; 2025-06-24T21:28:12.321586Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=72; 2025-06-24T21:28:12.321678Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=43; 2025-06-24T21:28:12.321761Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=36; 2025-06-24T21:28:12.321870Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:28:12.321976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:28:12.322048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:28:12.322096Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... -S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #28 0x10607ea7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #29 0x10607ea7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #30 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #31 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #32 0x10e8aab5 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #33 0x10e63518 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #34 0x10607073 in NKikimr::NTestSuiteMoveTable::TCurrentTest::Execute() /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1 #35 0x10e64de5 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #36 0x10e8502c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #37 0x7f6ed0927d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) previously allocated by thread T0 here: #0 0x106d317d in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x2b088365 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x2b088365 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x2b088365 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x2b088365 in get_node /-S/util/generic/hash_table.h:497:43 #5 0x2b088365 in new_node /-S/util/generic/hash_table.h:947:19 #6 0x2b088365 in emplace_unique_noresize /-S/util/generic/hash_table.h:1018:17 #7 0x2b088365 in std::__y1::pair<__yhashtable_iterator>, bool> THashTable, NKikimr::NColumnShard::TSchemeShardLocalPathId, THash, TSelect1st, TEqualTo, std::__y1::allocator>::emplace_unique(NKikimr::NColumnShard::TSchemeShardLocalPathId const&, NKikimr::NColumnShard::TInternalPathId const&) /-S/util/generic/hash_table.h:710:16 #8 0x2b080b49 in emplace /-S/util/generic/hash.h:177:20 #9 0x2b080b49 in NKikimr::NColumnShard::TTablesManager::MoveTablePropose(NKikimr::NColumnShard::TSchemeShardLocalPathId) /-S/ydb/core/tx/columnshard/tables_manager.cpp:454:5 #10 0x105187f1 in NKikimr::NColumnShard::TSchemaTransactionOperator::DoStartProposeOnExecute(NKikimr::NColumnShard::TColumnShard&, NKikimr::NTabletFlatExecutor::TTransactionContext&) /-S/ydb/core/tx/columnshard/transactions/operators/schema.cpp:147:33 #11 0x265f1628 in NKikimr::NColumnShard::TTxController::ITransactionOperator::StartProposeOnExecute(NKikimr::NColumnShard::TColumnShard&, NKikimr::NTabletFlatExecutor::TTransactionContext&) /-S/ydb/core/tx/columnshard/transactions/tx_controller.h:362:32 #12 0x265ed1a2 in NKikimr::NColumnShard::TTxController::StartProposeOnExecute(NKikimr::NColumnShard::TFullTxInfo const&, TBasicString> const&, NKikimr::NTabletFlatExecutor::TTransactionContext&) /-S/ydb/core/tx/columnshard/transactions/tx_controller.cpp:356:25 #13 0x266df482 in NKikimr::NColumnShard::TTxProposeTransaction::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&) /-S/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp:75:54 #14 0x18e3b944 in NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*) /-S/ydb/core/tablet_flat/flat_executor.cpp:1976:35 #15 0x18e37321 in NKikimr::NTabletFlatExecutor::TExecutor::DoExecute(TAutoPtr, NKikimr::NTabletFlatExecutor::TExecutor::ETxMode) /-S/ydb/core/tablet_flat/flat_executor.cpp:1880:13 #16 0x18e3e4de in NKikimr::NTabletFlatExecutor::TExecutor::Execute(TAutoPtr, NActors::TActorContext const&) /-S/ydb/core/tablet_flat/flat_executor.cpp:1894:5 #17 0x18dd546a in Execute /-S/ydb/core/tablet_flat/tablet_flat_executed.cpp:62:46 #18 0x18dd546a in NKikimr::NTabletFlatExecutor::TTabletExecutedFlat::Execute(TAutoPtr, NActors::TActorContext const&) /-S/ydb/core/tablet_flat/tablet_flat_executed.cpp:57:5 #19 0x266dc00a in NKikimr::NColumnShard::TColumnShard::Handle(TAutoPtr, TDelete>&, NActors::TActorContext const&) /-S/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp:123:5 #20 0x2665df8d in NKikimr::NColumnShard::TColumnShard::StateWork(TAutoPtr&) /-S/ydb/core/tx/columnshard/columnshard_impl.h:409:13 #21 0x11f29ccc in NActors::IActor::Receive(TAutoPtr&) /-S/ydb/library/actors/core/actor.cpp:280:13 #22 0x2d9498b4 in NActors::TTestActorRuntimeBase::SendInternal(TAutoPtr, unsigned int, bool) /-S/ydb/library/actors/testlib/test_runtime.cpp:1702:33 #23 0x2d942129 in NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant) /-S/ydb/library/actors/testlib/test_runtime.cpp:1295:45 #24 0x2d94c4a3 in NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.cpp:1554:22 #25 0x3211dc73 in NKikimr::TEvColumnShard::TEvProposeTransactionResult::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventIf(TSet, std::__y1::allocator> const&, std::__y1::function const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:477:13 #26 0x3211d812 in GrabEdgeEvent /-S/ydb/library/actors/testlib/test_runtime.h:526:20 #27 0x3211d812 in NKikimr::TEvColumnShard::TEvProposeTransactionResult::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEvent(NActors::TActorId const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:532:20 #28 0x320fd0fa in NKikimr::NTxUT::(anonymous namespace)::ProposeSchemaTxOptional(NActors::TTestBasicRuntime&, NActors::TActorId&, TBasicString> const&, unsigned long) /-S/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp:66:23 #29 0x320feb76 in NKikimr::NTxUT::ProposeSchemaTx(NActors::TTestBasicRuntime&, NActors::TActorId&, TBasicString> const&, unsigned long) /-S/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp:85:25 #30 0x10600176 in NKikimr::NTestSuiteMoveTable::TTestCaseWithData::Execute_(NUnitTest::TTestContext&) /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:88:20 #31 0x10607ea7 in operator() /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1 #32 0x10607ea7 in __invoke<(lambda at /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #33 0x10607ea7 in __call<(lambda at /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #34 0x10607ea7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #35 0x10607ea7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #36 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #37 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #38 0x10e8aab5 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #39 0x10e63518 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #40 0x10607073 in NKikimr::NTestSuiteMoveTable::TCurrentTest::Execute() /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1 #41 0x10e64de5 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #42 0x10e8502c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #43 0x7f6ed0927d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) SUMMARY: AddressSanitizer: heap-use-after-free /-S/contrib/libs/cxxsupp/libcxx/include/__utility/pair.h:149:22 in pair Shadow bytes around the buggy address: 0x503000aa4f00: fa fa fd fd fd fd fa fa fd fd fd fd fa fa 00 00 0x503000aa4f80: 00 00 fa fa 00 00 00 00 fa fa fd fd fd fd fa fa 0x503000aa5000: fd fd fd fd fa fa fd fd fd fd fa fa fd fd fd fd 0x503000aa5080: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd 0x503000aa5100: fd fd fa fa fd fd fd fd fa fa fd fd fd fd fa fa =>0x503000aa5180: fd fd fd fd fa fa fd fd[fd]fa fa fa 00 00 00 00 0x503000aa5200: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd 0x503000aa5280: fd fd fa fa fd fd fd fa fa fa fd fd fd fa fa fa 0x503000aa5300: fd fd fd fa fa fa fd fd fd fd fa fa fd fd fd fd 0x503000aa5380: fa fa fd fd fd fd fa fa fd fd fd fd fa fa 00 00 0x503000aa5400: 00 00 fa fa fd fd fd fa fa fa fd fd fd fa fa fa Shadow byte legend (one shadow byte represents 8 application bytes): Addressable: 00 Partially addressable: 01 02 03 04 05 06 07 Heap left redzone: fa Freed heap region: fd Stack left redzone: f1 Stack mid redzone: f2 Stack right redzone: f3 Stack after return: f5 Stack use after scope: f8 Global redzone: f9 Global init order: f6 Poisoned by user: f7 Container overflow: fc Array cookie: ac Intra object redzone: bb ASan internal: fe Left alloca redzone: ca Right alloca redzone: cb ==996001==ABORTING ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 8499, MsgBus: 21862 2025-06-24T21:27:04.777744Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630562905487861:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:04.778170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003251/r3tmp/tmptvfwOi/pdisk_1.dat 2025-06-24T21:27:05.129824Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8499, node 1 2025-06-24T21:27:05.173855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:05.174004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:05.174717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:05.174744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:05.174752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:05.174938Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:27:05.175937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21862 TClient is connected to server localhost:21862 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:05.637376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:05.665081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:05.781749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:05.849712Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:05.934410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:06.010990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:07.839865Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630575790391358:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.839991Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:08.173619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.240968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.309025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.335931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.363189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.395035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.424559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.502850Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630580085359320:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:08.502914Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:08.502958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630580085359325:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:08.506223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:08.516330Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630580085359327:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:08.578130Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630580085359378:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:09.590143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:09.626267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:09.663775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/ ... :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519630829978157796:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:06.508031Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003251/r3tmp/tmpNfxGzR/pdisk_1.dat 2025-06-24T21:28:06.742786Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519630829978157775:2079] 1750800486507282 != 1750800486507285 2025-06-24T21:28:06.753950Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:06.758387Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:06.758510Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:06.759596Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10740, node 7 2025-06-24T21:28:06.819686Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:06.819715Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:06.819729Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:06.819928Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23351 2025-06-24T21:28:07.523669Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23351 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:07.742056Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:07.751426Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:07.758510Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:07.866867Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:08.116959Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:08.278735Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:11.511309Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519630829978157796:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:11.511393Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:11.631728Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630851452995906:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.631835Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.703520Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.758111Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.805321Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.848194Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.899019Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.959419Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:12.041735Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:12.129769Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630855747963860:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:12.129907Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:12.130447Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630855747963865:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:12.135640Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:12.153014Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630855747963867:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:12.225427Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630855747963918:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:13.916273Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpQueryService::ExecuteQuery [GOOD] >> KqpQueryService::ExecuteQueryExplicitBeginCommitRollback ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLock+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 19855, MsgBus: 10831 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044d9/r3tmp/tmpFO9bAs/pdisk_1.dat TServer::EnableGrpc on GrpcPort 19855, node 1 TClient is connected to server localhost:10831 TClient is connected to server localhost:10831 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ShowCreateViewOnTable [GOOD] Test command err: Trying to start YDB, gRPC: 4809, MsgBus: 30026 2025-06-24T21:27:58.604695Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630796493500104:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:58.604737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00309f/r3tmp/tmpgmo3Rh/pdisk_1.dat 2025-06-24T21:27:58.965403Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630796493500073:2079] 1750800478603624 != 1750800478603627 2025-06-24T21:27:58.973334Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4809, node 1 2025-06-24T21:27:59.047551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:59.048250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:59.050661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:59.082375Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:59.082398Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:59.082404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:59.082498Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30026 TClient is connected to server localhost:30026 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:27:59.613689Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:59.673213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:59.690801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:59.822102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:00.002053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:00.075990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:01.981071Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630809378403616:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:01.981214Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.342768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.387952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.425821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.457924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.494964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.533557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.575304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:02.643761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630813673371567:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.643854Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.643931Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630813673371572:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:02.647298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:02.658531Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630813673371574:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:02.735007Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630813673371625:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:03.606314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630796493500104:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:03.606422Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 15635, MsgBus: 16962 2025-06-24T21:28:04.861230Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630823716155980:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:04.861294Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:10.977781Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630848614177521:2079] 1750800490825268 != 1750800490825271 2025-06-24T21:28:10.980081Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:10.980177Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:10.981650Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19048, node 3 2025-06-24T21:28:11.079536Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:11.079563Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:11.079576Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:11.079706Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27638 TClient is connected to server localhost:27638 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:11.612019Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:11.628653Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:11.712017Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:11.846294Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:11.886284Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.971804Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:14.604016Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630865794048332:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:14.604101Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:14.666812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:14.735872Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:14.768474Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:14.831454Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:14.871954Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:14.914999Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:14.951130Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:15.008658Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630870089016290:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:15.008721Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:15.008843Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630870089016295:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:15.011423Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:15.022993Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630870089016297:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:15.121375Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630870089016348:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:15.828567Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630848614177580:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:15.828642Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:16.370857Z node 3 :SYSTEM_VIEWS ERROR: scan_actor_base_impl.h:98: Scan error, actor: [3:7519630874383983938:2484], owner: [3:7519630874383983935:2482], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 }, error: Path type mismatch, expected: View, found: Table 2025-06-24T21:28:16.371889Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519630874383983936:2483], TxId: 281474976715672, task: 2. Ctx: { TraceId : 01jyhxexja6dmb6n5snhr9m54w. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=ZDczM2ZhZmQtOGQwNzZmODgtZTUzNzM4OWItNDhmZGVjMWM=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7519630874383983932:2473], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-24T21:28:16.372338Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZDczM2ZhZmQtOGQwNzZmODgtZTUzNzM4OWItNDhmZGVjMWM=, ActorId: [3:7519630874383983916:2473], ActorState: ExecuteState, TraceId: 01jyhxexja6dmb6n5snhr9m54w, Create QueryResponse for error on request, msg: >> KqpQueryService::ShowCreateTableNotSuccess [GOOD] >> KqpQueryService::ShowCreateTableOnView >> KqpQueryService::ReadManyShardsRange [GOOD] >> KqpQueryService::ReadManyRangesAndPoints >> KqpQueryService::CreateTempTable [GOOD] >> KqpQueryService::CreateAndDropTopic >> KqpQueryServiceScripts::ValidateScript [GOOD] >> KqpQueryServiceScripts::TestTruncatedByRows ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::TupleNotOnlyOfKeys [GOOD] Test command err: Trying to start YDB, gRPC: 63907, MsgBus: 63324 2025-06-24T21:27:04.260002Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630565840420246:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:04.267746Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003255/r3tmp/tmpYLj4qo/pdisk_1.dat 2025-06-24T21:27:04.587287Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630565840420209:2079] 1750800424258433 != 1750800424258436 2025-06-24T21:27:04.595664Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63907, node 1 2025-06-24T21:27:04.670548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:04.670948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:04.676161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:04.689435Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:04.689455Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:04.689473Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:04.689613Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63324 TClient is connected to server localhost:63324 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:05.177400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:05.203056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:05.274322Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:05.333872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:05.486043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:05.561447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:07.101164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630578725323742:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.101262Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.424214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:07.457273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:07.495670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:07.529022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:07.559920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:07.632551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:07.673103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:07.743506Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630578725324402:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.743592Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.743852Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630578725324407:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:07.748600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:07.764606Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630578725324409:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:07.852712Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630578725324460:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:08.919545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:08.954407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:09.035713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part propo ... ble configuration TClient is connected to server localhost:14467 TClient is connected to server localhost:14467 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:05.114240Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:05.114253Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:05.121005Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:05.131311Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:05.216103Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:05.445676Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:05.538119Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:08.995404Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630838224082332:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:08.995556Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:09.084652Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:09.105371Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519630821044211520:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:09.105463Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:09.210083Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:09.289206Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:09.339389Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:09.399210Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:09.471843Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:09.559211Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:09.643802Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630842519050293:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:09.643934Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:09.644262Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519630842519050298:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:09.649067Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:09.661948Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519630842519050300:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:09.736507Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519630842519050351:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:11.310522Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.406748Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.465573Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Warning: Type annotation, code: 1030
:5:21: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:6:26: Warning: At function: Filter, At lambda, At function: Coalesce
:7:37: Warning: At function: SqlIn
:7:37: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108
: Warning: Type annotation, code: 1030
:5:21: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:6:26: Warning: At function: Filter, At lambda, At function: Coalesce
:7:37: Warning: At function: SqlIn
:7:37: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 >> KqpQueryService::AlterCdcTopic [GOOD] >> TColumnShardTestSchema::RebootHotTiersAfterTtl [GOOD] >> KqpQueryPerf::IdxLookupJoinThreeWay+QueryService >> KqpQueryPerf::Replace-QueryService+UseSink >> KqpQueryPerf::Insert-QueryService-UseSink >> KqpQueryPerf::IndexInsert-QueryService-UseSink >> KqpQueryPerf::Update+QueryService-UseSink >> KqpWorkload::KV >> KqpQueryPerf::RangeRead+QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootHotTiersAfterTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=150801034.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801034.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801034.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150801034.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801034.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801034.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150801034.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799834.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130801034.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130801034.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799834.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130799834.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130799834.000000s;Name=;Codec=}; 2025-06-24T21:27:14.554486Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:27:14.583822Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:27:14.584202Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:27:14.592342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:14.592617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:14.592900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:14.593035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:14.593146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:14.593273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:14.593384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:14.593503Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:14.593613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:14.593743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:14.593879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:14.626539Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:27:14.626832Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:27:14.626923Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:27:14.627158Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:14.627357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:14.627444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:14.627506Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:27:14.627604Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:27:14.627669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:14.627716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:14.627748Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:27:14.627945Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:14.628009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:14.628053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:14.628089Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:27:14.628185Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:27:14.628244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:14.628303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:14.628337Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:27:14.628403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:14.628445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:14.628481Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:27:14.628694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:14.628740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:14.628771Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:27:14.628993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:14.629078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:14.629119Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:27:14.629270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:14.629323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:14.629358Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:27:14.629443Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:14.629515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute; ... INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=11; 2025-06-24T21:28:18.731043Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=171; 2025-06-24T21:28:18.731087Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=10769; 2025-06-24T21:28:18.731133Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=10884; 2025-06-24T21:28:18.731211Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=12; 2025-06-24T21:28:18.731416Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=163; 2025-06-24T21:28:18.731453Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=11666; 2025-06-24T21:28:18.731616Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=109; 2025-06-24T21:28:18.731734Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=65; 2025-06-24T21:28:18.731875Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=94; 2025-06-24T21:28:18.732009Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=87; 2025-06-24T21:28:18.736014Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=3949; 2025-06-24T21:28:18.739853Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=3759; 2025-06-24T21:28:18.739941Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2025-06-24T21:28:18.740010Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=12; 2025-06-24T21:28:18.740053Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=7; 2025-06-24T21:28:18.740126Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=37; 2025-06-24T21:28:18.740173Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-24T21:28:18.740276Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=66; 2025-06-24T21:28:18.740323Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-24T21:28:18.740389Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=32; 2025-06-24T21:28:18.740474Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=51; 2025-06-24T21:28:18.740837Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=327; 2025-06-24T21:28:18.740878Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=31287; 2025-06-24T21:28:18.741003Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=29251936;raw_bytes=43173354;count=6;records=480000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:28:18.741108Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:28:18.741157Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:28:18.741219Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:28:18.758657Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T21:28:18.758806Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:28:18.758887Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=3; 2025-06-24T21:28:18.758953Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800184374;tx_id=18446744073709551615;;current_snapshot_ts=1750800435852; 2025-06-24T21:28:18.758994Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:28:18.759041Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:18.759078Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:18.759237Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:28:18.759760Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;self_id=[1:2066:3867];tablet_id=9437184;parent=[1:2027:3836];fline=manager.cpp:88;event=ask_data;request=request_id=86;9438184000001={portions_count=6};; 2025-06-24T21:28:18.760474Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:28:18.760887Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:28:18.760921Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:28:18.760957Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:28:18.760998Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:28:18.761082Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=3; 2025-06-24T21:28:18.761141Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800184374;tx_id=18446744073709551615;;current_snapshot_ts=1750800435852; 2025-06-24T21:28:18.761180Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:28:18.761223Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:18.761272Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:18.761358Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9752224 160000/9752224 160000/9752224 80000/4886744 0/0 >> KqpQueryPerf::IndexDeleteOn-QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::AlterCdcTopic [GOOD] Test command err: Trying to start YDB, gRPC: 9311, MsgBus: 12330 2025-06-24T21:27:55.347497Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630784740011885:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:55.347541Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030b3/r3tmp/tmpOBT69Q/pdisk_1.dat 2025-06-24T21:27:55.728644Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630784740011866:2079] 1750800475346720 != 1750800475346723 2025-06-24T21:27:55.728719Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9311, node 1 2025-06-24T21:27:55.771568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:55.772039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:55.776083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:55.834505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:55.834539Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:55.834547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:55.834716Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12330 TClient is connected to server localhost:12330 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:27:56.365391Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:56.409513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:56.432192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:27:56.441489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:56.603018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:56.769940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:56.868998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:58.552915Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630797624915389:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:58.553023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:58.900560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:58.932285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:58.967534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.002772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.047326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.093827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.132088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:59.210934Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630801919883340:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.211040Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.211605Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630801919883345:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:59.215557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:59.229313Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630801919883347:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:59.290106Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630801919883398:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:00.347687Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630784740011885:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:00.347761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:00.349493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... from file: (empty maybe) 2025-06-24T21:28:12.709992Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1394 TClient is connected to server localhost:1394 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:13.290848Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:13.303964Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:13.321794Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:13.396103Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:13.534699Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:13.580459Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.667545Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:16.265339Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630874448295143:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.265454Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.354614Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.396376Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.434608Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.469401Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.507241Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.554951Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.596853Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.694953Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630874448295800:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.695064Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.695343Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630874448295805:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.700889Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:16.714970Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519630874448295807:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:16.811049Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630874448295858:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:17.506360Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519630857268424353:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:17.506436Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:18.001558Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:18.160167Z node 4 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037922:1][4:7519630883038230911:2496] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:19:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-24T21:28:18.288387Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-24T21:28:18.317115Z node 4 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-24T21:28:18.360218Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630883038231048:3777] txid# 281474976715675, issues: { message: "Cannot change partition count. Use split/merge instead" severity: 1 } 2025-06-24T21:28:18.360516Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YmJmOWIwMzQtNDgxYWVmNDUtY2Y2YjZiYmItYWRkMDYyZGQ=, ActorId: [4:7519630883038230978:2510], ActorState: ExecuteState, TraceId: 01jyhxezn3c2wsh93pdrwaxp6j, Create QueryResponse for error on request, msg: Query failed, status: BAD_REQUEST:
: Error: Cannot change partition count. Use split/merge instead, code: 2017 2025-06-24T21:28:18.370516Z node 4 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request >> KqpQueryPerf::MultiRead+QueryService >> KqpQueryServiceScripts::ExecuteScriptWithTimeout [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtlAndForgetAfter >> KqpQueryService::ClosedSessionRemovedFromPool [GOOD] >> KqpQueryService::CloseConnection >> KqpQueryService::Ddl [GOOD] >> KqpQueryService::DdlColumnTable >> KqpQueryService::FlowControllOnHugeRealTable+LongRow [GOOD] >> KqpQueryService::Explain >> KqpService::SessionBusyRetryOperationSync [GOOD] >> KqpService::SwitchCache+UseCache >> KqpQueryServiceScripts::ExecuteScriptWithForgetAfter [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtl >> KqpQueryService::ExecuteQueryInteractiveTx [GOOD] >> KqpQueryService::ExecuteQueryInteractiveTxCommitWithQuery >> KqpQueryServiceScripts::TestFetchMoreThanLimit [GOOD] >> KqpQueryServiceScripts::TestAstWithCompression >> KqpQueryPerf::Upsert+QueryService-UseSink >> KqpQueryPerf::DeleteOn+QueryService-UseSink >> KqpQueryService::CreateAndDropTopic [GOOD] >> KqpQueryService::CreateAndAlterTopic >> KqpQueryService::ReadManyRangesAndPoints [GOOD] >> KqpService::SwitchCache-UseCache [GOOD] >> KqpService::ToDictCache+UseCache >> KqpQueryService::ShowCreateTableOnView [GOOD] >> KqpQueryService::ShowCreateView >> KqpQueryService::ExecuteQueryExplicitBeginCommitRollback [GOOD] >> KqpQueryService::ExecuteDDLStatusCodeSchemeError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ReadManyRangesAndPoints [GOOD] Test command err: Trying to start YDB, gRPC: 19385, MsgBus: 13143 2025-06-24T21:28:07.945682Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630835434836644:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:07.946011Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00306d/r3tmp/tmpJsItVm/pdisk_1.dat 2025-06-24T21:28:08.325236Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:08.360342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:08.360459Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 19385, node 1 2025-06-24T21:28:08.367034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:08.455785Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:08.455819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:08.455827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:08.455953Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13143 TClient is connected to server localhost:13143 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:28:08.947639Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:09.106390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:11.253775Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630852614706269:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.253868Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.528577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.734599Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630852614706822:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.734671Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.734736Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630852614706827:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.738572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:11.748738Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630852614706829:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:28:11.847131Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630852614706880:2689] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:12.947258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630835434836644:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:12.947327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 2513, MsgBus: 63605 2025-06-24T21:28:13.958169Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630863092671095:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:13.958237Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00306d/r3tmp/tmpNXK0L3/pdisk_1.dat 2025-06-24T21:28:14.057604Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2513, node 2 2025-06-24T21:28:14.098153Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:14.098245Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:14.099366Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:14.116756Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:14.116785Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:14.116795Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:14.116906Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63605 TClient is connected to server localhost:63605 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:14.574020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:14.583387Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:14.963020Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:17.069116Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630880272540868:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:17.069213Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:17.097090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:17.179787Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630880272541087:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:17.179871Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:17.179979Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630880272541092:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:17.183075Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:17.192297Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630880272541094:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:28:17.276833Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630880272541145:2465] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 28682, MsgBus: 1321 2025-06-24T21:28:18.556195Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630883557228721:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:18.556298Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00306d/r3tmp/tmposAoqN/pdisk_1.dat 2025-06-24T21:28:18.689473Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:18.701995Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630883557228679:2079] 1750800498542072 != 1750800498542075 2025-06-24T21:28:18.705343Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:18.705431Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:18.707951Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28682, node 3 2025-06-24T21:28:18.758136Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:18.758160Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:18.758170Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:18.758293Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1321 TClient is connected to server localhost:1321 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:19.369729Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:19.376310Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:19.563455Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:22.140442Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630900737098497:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.140526Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.167556Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:22.347304Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630900737098937:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.347415Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.347754Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630900737098942:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.351348Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:22.364465Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630900737098944:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:28:22.467827Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630900737098995:2612] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryPerf::Replace-QueryService+UseSink [GOOD] >> KqpQueryPerf::Update+QueryService-UseSink [GOOD] >> KqpQueryPerf::Insert-QueryService-UseSink [GOOD] >> KqpQueryPerf::Insert-QueryService+UseSink >> KqpQueryPerf::RangeRead+QueryService [GOOD] >> TColumnShardTestSchema::ForgetWithLostAnswer [GOOD] >> KqpQueryPerf::MultiRead+QueryService [GOOD] >> KqpQueryPerf::IdxLookupJoinThreeWay+QueryService [GOOD] >> KqpQueryPerf::IdxLookupJoinThreeWay-QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 28311, MsgBus: 11150 2025-06-24T21:28:19.588204Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630887130100403:2121];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:19.595186Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b0e/r3tmp/tmpQXyKUY/pdisk_1.dat 2025-06-24T21:28:20.067259Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630887130100307:2079] 1750800499564777 != 1750800499564780 2025-06-24T21:28:20.079622Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:20.086431Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:20.086539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:20.101284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28311, node 1 2025-06-24T21:28:20.316522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:20.316552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:20.316563Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:20.316699Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:20.597782Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11150 TClient is connected to server localhost:11150 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:21.131386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:21.160871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.383228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.536534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.618666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:22.887803Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630900015003846:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.887962Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.432925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.470543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.496767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.526399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.566104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.609054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.645242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.767454Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630904309971804:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.767543Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.767787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630904309971809:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.774163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:23.784583Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630904309971811:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:23.872974Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630904309971864:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:24.591258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630887130100403:2121];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:24.600224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::IndexReplace-QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Update+QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 15221, MsgBus: 24792 2025-06-24T21:28:19.707204Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630888458915524:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:19.707317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b07/r3tmp/tmpxSULPi/pdisk_1.dat 2025-06-24T21:28:20.105166Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630888458915501:2079] 1750800499705658 != 1750800499705661 2025-06-24T21:28:20.117753Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15221, node 1 2025-06-24T21:28:20.156703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:20.156833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:20.160587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:20.319746Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:20.319782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:20.319799Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:20.319902Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:20.725470Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24792 TClient is connected to server localhost:24792 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:21.169644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:21.185219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:21.197861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.355027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.502852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.597171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:22.987868Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630901343819034:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.988013Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.433035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.504794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.536270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.566839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.602224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.649882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.730521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.791586Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630905638787001:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.791667Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.791873Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630905638787006:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.795489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:23.806431Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630905638787008:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:23.867943Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630905638787059:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:24.707713Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630888458915524:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:24.707781Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ForgetWithLostAnswer [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801059.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150801059.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801059.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130801059.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799859.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130799859.000000s;Name=;Codec=}; 2025-06-24T21:27:41.292534Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:27:41.321307Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:27:41.321646Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:27:41.329775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:41.330032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:41.330339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:41.330492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:41.330606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:41.330723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:41.330832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:41.330948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:41.331066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:41.331214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:41.331332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:41.363815Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:27:41.364108Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:27:41.364170Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:27:41.364354Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:41.364557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:41.364628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:41.364671Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:27:41.364758Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:27:41.364814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:41.364852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:41.364881Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:27:41.365059Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:41.365146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:41.365185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:41.365213Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:27:41.365306Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:27:41.365362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:41.365402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:41.365432Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:27:41.365495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:41.365532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:41.365560Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:27:41.365802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:41.365852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:41.365902Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:27:41.366099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:41.366153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:41.366184Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:27:41.366311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:41.366357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:41.366386Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:27:41.366465Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:41.366530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:27:41.366569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:27:41.366599Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:27:41.367953Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=100; 2025-06-24T21:27:41.368076Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=49; 2025-06-24T21:27:41.36 ... 1:870:2828] 2025-06-24T21:28:26.385326Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:871:2829];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:28:26.070059Z;index_granules=0;index_portions=1;index_batches=522;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=4873744;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=4873744;selected_rows=0; 2025-06-24T21:28:26.385365Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:871:2829];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:28:26.385633Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:871:2829];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:28:26.386524Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 7 at tablet 9437184 2025-06-24T21:28:26.386766Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800497430:max} readable: {1750800497430:max} at tablet 9437184 2025-06-24T21:28:26.386907Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:28:26.387085Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800497430:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:28:26.387170Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800497430:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:28:26.387726Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800497430:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:28:26.387827Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800497430:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:28:26.388311Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800497430:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:878:2836];trace_detailed=; 2025-06-24T21:28:26.388720Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:28:26.388952Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:28:26.389132Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:26.389270Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:26.389561Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:28:26.389674Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:26.389771Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:26.389809Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:878:2836] finished for tablet 9437184 2025-06-24T21:28:26.390236Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:877:2835];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750800506388243,"name":"_full_task","f":1750800506388243,"d_finished":0,"c":0,"l":1750800506389879,"d":1636},"events":[{"name":"bootstrap","f":1750800506388427,"d_finished":874,"c":1,"l":1750800506389301,"d":874},{"a":1750800506389536,"name":"ack","f":1750800506389536,"d_finished":0,"c":0,"l":1750800506389879,"d":343},{"a":1750800506389509,"name":"processing","f":1750800506389509,"d_finished":0,"c":0,"l":1750800506389879,"d":370},{"name":"ProduceResults","f":1750800506389047,"d_finished":454,"c":2,"l":1750800506389793,"d":454},{"a":1750800506389796,"name":"Finish","f":1750800506389796,"d_finished":0,"c":0,"l":1750800506389879,"d":83}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:26.390320Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:877:2835];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:28:26.390747Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:877:2835];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750800506388243,"name":"_full_task","f":1750800506388243,"d_finished":0,"c":0,"l":1750800506390368,"d":2125},"events":[{"name":"bootstrap","f":1750800506388427,"d_finished":874,"c":1,"l":1750800506389301,"d":874},{"a":1750800506389536,"name":"ack","f":1750800506389536,"d_finished":0,"c":0,"l":1750800506390368,"d":832},{"a":1750800506389509,"name":"processing","f":1750800506389509,"d_finished":0,"c":0,"l":1750800506390368,"d":859},{"name":"ProduceResults","f":1750800506389047,"d_finished":454,"c":2,"l":1750800506389793,"d":454},{"a":1750800506389796,"name":"Finish","f":1750800506389796,"d_finished":0,"c":0,"l":1750800506390368,"d":572}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:878:2836]->[1:877:2835] 2025-06-24T21:28:26.390858Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:28:26.387795Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:28:26.390908Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:28:26.391045Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::RangeRead+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 28087, MsgBus: 17938 2025-06-24T21:28:20.280006Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630892062208520:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:20.287296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b03/r3tmp/tmpd5X8S8/pdisk_1.dat 2025-06-24T21:28:20.615748Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28087, node 1 2025-06-24T21:28:20.700568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:20.700996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:20.703631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:20.750079Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:20.750097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:20.750101Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:20.750193Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17938 TClient is connected to server localhost:17938 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:21.288297Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:21.305047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:21.331244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.537180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.706941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.785997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.626276Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630904947111950:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.626390Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.897811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.925912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.956458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.987643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.016785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.051443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.101710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.164990Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630909242079908:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.165068Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.165531Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630909242079913:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.170358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:24.182164Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630909242079915:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:24.275444Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630909242079966:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:25.280723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630892062208520:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:25.282641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> SystemView::ShowCreateTableColumnUpsertIndex [GOOD] >> SystemView::ShowCreateTableColumnAlterObject ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiRead+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 1063, MsgBus: 18212 2025-06-24T21:28:20.957464Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630893308522470:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:20.957563Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aff/r3tmp/tmp8F7RXF/pdisk_1.dat 2025-06-24T21:28:21.319241Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630893308522448:2079] 1750800500949904 != 1750800500949907 2025-06-24T21:28:21.319748Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1063, node 1 2025-06-24T21:28:21.340079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:21.340190Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:21.342871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:21.381067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:21.381092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:21.381100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:21.381229Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18212 TClient is connected to server localhost:18212 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:21.964966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:21.972547Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:21.993634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:22.159366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:22.347601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:22.432228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:24.067490Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630910488393267:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.067600Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.384643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.426135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.455712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.489921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.520837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.567091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.604334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.674243Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630910488393924:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.674368Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.675273Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630910488393929:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.680881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:24.695705Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630910488393931:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:24.777106Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630910488393982:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:25.957922Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630893308522470:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:25.958000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::Explain [GOOD] >> KqpQueryPerf::Upsert+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexInsert-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexInsert-QueryService+UseSink >> KqpRanges::ValidatePredicates [GOOD] >> KqpRanges::ValidatePredicatesDataQuery >> KqpQueryPerf::IndexDeleteOn-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexDeleteOn-QueryService+UseSink >> KqpQueryPerf::DeleteOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::DeleteOn+QueryService+UseSink >> KqpWorkload::STOCK >> KqpQueryService::ExecuteQueryInteractiveTxCommitWithQuery [GOOD] >> KqpQueryService::ExecuteQueryMultiResult >> KqpQueryService::ExecuteDDLStatusCodeSchemeError [GOOD] >> KqpQueryPerf::IdxLookupJoin+QueryService >> KqpQueryPerf::Update-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Upsert+QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 15542, MsgBus: 17097 2025-06-24T21:28:22.459042Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630899057418646:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:22.459656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004aec/r3tmp/tmpYxUNk9/pdisk_1.dat 2025-06-24T21:28:22.913290Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630899057418623:2079] 1750800502450000 != 1750800502450003 2025-06-24T21:28:22.927990Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:22.935580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:22.935681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 15542, node 1 2025-06-24T21:28:22.938198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:23.003814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:23.003833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:23.003839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:23.003956Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17097 TClient is connected to server localhost:17097 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:28:23.470453Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:23.553851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:23.571936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:23.583787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.750149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:23.885284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.946391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:25.495959Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630911942322139:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.496055Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.812346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.843640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.878773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.968146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.001108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.031710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.064497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.124229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630916237290089:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.124344Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.125547Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630916237290094:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.129826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:26.143113Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630916237290096:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:26.225008Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630916237290147:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:27.459302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630899057418646:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:27.505763Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::Explain [GOOD] Test command err: Trying to start YDB, gRPC: 8163, MsgBus: 8958 2025-06-24T21:28:09.333931Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630842582638398:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:09.334046Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003066/r3tmp/tmpAB32Ua/pdisk_1.dat 2025-06-24T21:28:09.761441Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8163, node 1 2025-06-24T21:28:09.783407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:09.783547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:09.785946Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:09.856439Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:09.856470Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:09.856481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:09.856605Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8958 2025-06-24T21:28:10.340529Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8958 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:10.504656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:10.534103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:10.678225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:10.842153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:10.919893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:12.584111Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630855467541882:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:12.584253Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:12.976140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.048216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.074527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.105127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.183749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.234211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.269888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.335005Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630859762509837:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:13.335132Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:13.335348Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630859762509842:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:13.338936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:13.349255Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630859762509844:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:13.418857Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630859762509897:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:14.334265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630842582638398:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:14.334350Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 26380, MsgBus: 30902 2025-06-24T21:28:15.891632Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630871252209873:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:15.891880Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003066/r3tmp/tmpJE3N3k/pdisk_1.dat 2025-06-24T21:28:16.020107Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:16.0 ... actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:20.891240Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630871252209873:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:20.891314Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 10 Trying to start YDB, gRPC: 10596, MsgBus: 5953 2025-06-24T21:28:22.002633Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630895743000502:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:22.029418Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003066/r3tmp/tmpX6Nzt5/pdisk_1.dat 2025-06-24T21:28:22.107405Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630895743000356:2079] 1750800501982027 != 1750800501982030 2025-06-24T21:28:22.134513Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10596, node 3 2025-06-24T21:28:22.165057Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:22.165147Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:22.167031Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:22.255726Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:22.255751Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:22.255763Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:22.255887Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5953 TClient is connected to server localhost:5953 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:28:22.784738Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:22.799617Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:22.871813Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.001328Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:23.033010Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.112820Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:25.587788Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630912922871152:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.587883Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.647038Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.700309Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.740190Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.774823Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.837152Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.880343Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.917880Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.982046Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630912922871807:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.982127Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.982232Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630912922871812:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.986468Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:26.003346Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630912922871814:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:26.081445Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630917217839163:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:26.992860Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630895743000502:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:26.992924Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::CreateAndAlterTopic [GOOD] >> KqpQueryService::CreateOrDropTopicOverTable >> KqpQueryServiceScripts::ListScriptExecutions [GOOD] >> KqpQueryServiceScripts::Tcl >> KqpQueryPerf::Upsert-QueryService-UseSink >> KqpQueryPerf::Delete-QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteDDLStatusCodeSchemeError [GOOD] Test command err: Trying to start YDB, gRPC: 1471, MsgBus: 26649 2025-06-24T21:28:06.403728Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630832706479437:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:06.403842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003086/r3tmp/tmpcdBYjs/pdisk_1.dat 2025-06-24T21:28:06.846278Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:06.847098Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630832706479413:2079] 1750800486402002 != 1750800486402005 TServer::EnableGrpc on GrpcPort 1471, node 1 2025-06-24T21:28:06.856701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:06.856856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:06.858914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:06.929738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:06.929905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:06.929917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:06.930042Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26649 2025-06-24T21:28:07.424867Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26649 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:07.562648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:07.589004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:07.603946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:07.830399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:07.999490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:08.075078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:09.715861Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630845591382946:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:09.715975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.087721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.161389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.205316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.234576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.266943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.313127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.355524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.448983Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630849886350909:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.449083Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.449241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630849886350914:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.453674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:10.487763Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630849886350916:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:10.574710Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630849886350967:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:11.403978Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630832706479437:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:11.404054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4108, MsgBus: 18030 2025-06-24T21:28:12.725826Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630856034550308:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:12.725896Z node 2 :METADATA_PROVIDER ERROR: log.cpp:7 ... 5: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:21.620687Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:21.654842Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:21.731377Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:21.774187Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:21.845487Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:21.936857Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:22.012616Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630900136256636:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.012735Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.013265Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630900136256641:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.017323Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:22.056041Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630900136256643:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:22.142170Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630900136256694:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:23.150249Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630882956385166:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:23.150340Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:23.350930Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZTY0NzI5YTMtN2NjNjdiMzItMWE5ZThmNTEtNzJhZmMyMzc=, ActorId: [3:7519630904431224262:2473], ActorState: ReadyState, TraceId: 01jyhxf4hb4wwyw5nq94hqp7j4, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 28272, MsgBus: 9622 2025-06-24T21:28:24.378506Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519630910572387516:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:24.378557Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003086/r3tmp/tmpOstXlh/pdisk_1.dat 2025-06-24T21:28:24.568652Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:24.574158Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519630910572387493:2079] 1750800504377553 != 1750800504377556 2025-06-24T21:28:24.576066Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:24.576163Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:24.578680Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28272, node 4 2025-06-24T21:28:24.637448Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:24.637472Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:24.637482Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:24.637611Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9622 TClient is connected to server localhost:9622 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:25.200562Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:25.400444Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:28.162743Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630927752257321:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:28.162784Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630927752257296:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:28.162891Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:28.167592Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:28.182177Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519630927752257324:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:28:28.268208Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630927752257375:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:28.358834Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630927752257406:2343] txid# 281474976710660, issues: { message: "Type \'TzTimestamp\' specified for column \'payload\' is not supported by storage" severity: 1 } 2025-06-24T21:28:28.359215Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=YzExNmY4Y2UtNjdlYzlhZS01N2Q4NDM0OC0zODJkYTJjNg==, ActorId: [4:7519630927752257291:2289], ActorState: ExecuteState, TraceId: 01jyhxf6c75f0daf2hx6qxmg8y, Create QueryResponse for error on request, msg: >> KqpQueryService::ShowCreateView [GOOD] >> KqpQueryServiceScripts::TestAstWithCompression [GOOD] >> KqpQueryPerf::RangeLimitRead-QueryService >> KqpQueryPerf::IndexUpdateOn+QueryService-UseSink >> KqpQueryPerf::Insert-QueryService+UseSink [GOOD] >> TAsyncIndexTests::DropTableWithInflightChanges[PipeResets] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ShowCreateView [GOOD] Test command err: Trying to start YDB, gRPC: 4949, MsgBus: 7288 2025-06-24T21:28:12.567140Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630858835495574:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:12.567262Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003056/r3tmp/tmpf30wjN/pdisk_1.dat 2025-06-24T21:28:12.956670Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630858835495553:2079] 1750800492565566 != 1750800492565569 2025-06-24T21:28:12.974486Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4949, node 1 2025-06-24T21:28:13.012128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:13.012281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:13.015132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:13.059748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:13.059775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:13.059786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:13.059892Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7288 TClient is connected to server localhost:7288 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:13.577083Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:13.649913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:13.665859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:13.678976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:13.847572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:14.007651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:14.073091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:15.734556Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630871720399068:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:15.734686Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.059138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.095787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.130116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.157250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.186023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.230527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.265324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.317097Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630876015367026:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.317208Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.317270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630876015367031:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.320807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:16.330522Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630876015367033:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:16.403952Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630876015367084:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:17.502768Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630880310334657:2478], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:35: Error: At function: KiReadTable!
:2:35: Error: Cannot find table 'db.[/Root/test_show_create]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:28:17.503075Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OWMyZTIzYzQtZjBhMDZkZTAtOWQyODQ0M2YtZGI0OWFlYzc=, ActorId: [1:7519630880310334648:2472], ActorState: ExecuteState, TraceId: 01jyhxeysza3jckmqbfvnm58hm, ReplyQuer ... nId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519630903760456147:2472], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-24T21:28:23.426204Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NjllNmExODUtNzdhMzRiMTAtNTA1MmM5YjktMWMwMmI4OTk=, ActorId: [2:7519630903760456099:2472], ActorState: ExecuteState, TraceId: 01jyhxf4fncndy8ebe8pjkea4w, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 31492, MsgBus: 8289 2025-06-24T21:28:24.354348Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630908372888209:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:24.354412Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003056/r3tmp/tmpdhu6di/pdisk_1.dat 2025-06-24T21:28:24.523331Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630908372888179:2079] 1750800504334686 != 1750800504334689 2025-06-24T21:28:24.524420Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:24.531222Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:24.531328Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:24.533580Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31492, node 3 2025-06-24T21:28:24.639078Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:24.639101Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:24.639110Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:24.639248Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8289 TClient is connected to server localhost:8289 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:25.181165Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:25.207272Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:25.287331Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.380485Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:25.458928Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:25.528595Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.056282Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630925552759013:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:28.056406Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:28.109114Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.144862Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.190530Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.230052Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.269412Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.331930Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.370186Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.433828Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630925552759674:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:28.433919Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630925552759679:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:28.433967Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:28.438036Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:28.449862Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630925552759681:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:28.548633Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630925552759732:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:29.357041Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630908372888209:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:29.365386Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TPQTest::TestSetClientOffset [GOOD] >> TPQTest::TestReadSessions >> KqpQueryPerf::UpdateOn+QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::TestAstWithCompression [GOOD] Test command err: Trying to start YDB, gRPC: 10298, MsgBus: 18775 2025-06-24T21:28:03.063201Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630818975989802:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:03.063456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003097/r3tmp/tmpY6C181/pdisk_1.dat 2025-06-24T21:28:03.467864Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10298, node 1 2025-06-24T21:28:03.523998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:03.524163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:03.525362Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:03.547568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:03.547589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:03.547595Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:03.547714Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18775 TClient is connected to server localhost:18775 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:04.071024Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:04.099388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:04.119261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:04.272288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:04.427517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:04.501551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:06.289199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630831860893303:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:06.289308Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:06.685488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.730437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.802758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.838519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.869745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:06.940738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:07.012690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:07.092618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630836155861268:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:07.092705Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630836155861273:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:07.092734Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:07.096341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:07.109346Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630836155861275:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:07.192184Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630836155861326:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:08.064703Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630818975989802:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:08.108975Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:08.351680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:08.353093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /ho ... 025-06-24T21:28:22.501825Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:22.501920Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:22.503801Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3589, node 3 2025-06-24T21:28:22.582855Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:22.582887Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:22.582897Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:22.583058Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8150 TClient is connected to server localhost:8150 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:23.211099Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:23.216193Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:23.223531Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.324056Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.335615Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:23.526724Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.618373Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:26.153049Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630916453014825:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.153139Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.216796Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.251639Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.328931Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.366592Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.443512Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.485464Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.555843Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.649737Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630916453015495:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.649852Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.649941Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630916453015500:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.654415Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:26.667084Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630916453015502:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:26.753513Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630916453015553:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:27.372802Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630899273144045:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:27.373153Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:28.002230Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.004668Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.012126Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::DropTableWithInflightChanges[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:28:01.624609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:28:01.624711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:01.624759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:28:01.624802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:28:01.624851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:28:01.624915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:28:01.624987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:01.625074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:28:01.625974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:28:01.626383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:28:01.696813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:28:01.696872Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:01.697579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:28:01.718056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:28:01.718616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:28:01.718802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:28:01.729934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:28:01.730200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:28:01.730957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:01.731227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:28:01.735886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:01.736166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:28:01.737566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:01.737657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:01.737894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:28:01.737961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:28:01.738013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:28:01.738157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:28:01.747666Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:28:01.924608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:28:01.924911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:01.925167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:28:01.925224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:28:01.925483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:28:01.925648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:01.928511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:01.928744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:28:01.928996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:01.929076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:28:01.929140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:28:01.929186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:28:01.932055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:01.932147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:28:01.932198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:28:01.937273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:01.937350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:01.937400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:01.937475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:28:01.942015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:28:01.948625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:28:01.948959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:28:01.950183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:01.950354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... 594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-24T21:28:31.730272Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-24T21:28:31.730309Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-24T21:28:31.730351Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T21:28:31.730464Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1003, ready parts: 2/3, is published: true 2025-06-24T21:28:31.731944Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-24T21:28:31.732008Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:28:31.732273Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:28:31.732408Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1003:0 progress is 3/3 2025-06-24T21:28:31.732446Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-24T21:28:31.732491Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1003:0 progress is 3/3 2025-06-24T21:28:31.732526Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-24T21:28:31.732565Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1003, ready parts: 3/3, is published: true 2025-06-24T21:28:31.732602Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-24T21:28:31.732647Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1003:0 2025-06-24T21:28:31.732682Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1003:0 2025-06-24T21:28:31.732789Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:28:31.732837Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1003:1 2025-06-24T21:28:31.732863Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1003:1 2025-06-24T21:28:31.732905Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:28:31.732933Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1003:2 2025-06-24T21:28:31.732958Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1003:2 2025-06-24T21:28:31.733010Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-24T21:28:31.734277Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:28:31.735840Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:28:31.735973Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:28:31.736018Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:28:31.741656Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:28:31.741907Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:28:31.744567Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 348 RawX2: 111669152026 } TabletId: 72075186233409546 State: 4 2025-06-24T21:28:31.744665Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-24T21:28:31.748336Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:28:31.748877Z node 26 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409546 2025-06-24T21:28:31.749073Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:28:31.749358Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 Forgetting tablet 72075186233409546 2025-06-24T21:28:31.752281Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:28:31.752337Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-24T21:28:31.752428Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T21:28:31.752475Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T21:28:31.752521Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:28:31.755419Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:28:31.755494Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409546 2025-06-24T21:28:31.756623Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-24T21:28:31.756871Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-24T21:28:31.756912Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-24T21:28:31.757856Z node 26 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-24T21:28:31.757949Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-24T21:28:31.757986Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [26:629:2553] 2025-06-24T21:28:31.764036Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 354 RawX2: 111669152030 } TabletId: 72075186233409547 State: 4 2025-06-24T21:28:31.764138Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409547, state: Offline, at schemeshard: 72057594046678944 2025-06-24T21:28:31.766503Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:28:31.767044Z node 26 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409547 2025-06-24T21:28:31.767254Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:31.767538Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409547 2025-06-24T21:28:31.770539Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:28:31.770597Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:28:31.770681Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:28:31.774998Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:28:31.775081Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409547 2025-06-24T21:28:31.775813Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1003 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-24T21:28:31.776230Z node 26 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-24T21:28:31.776305Z node 26 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 >> TColumnShardTestSchema::TTL+Reboot+Internal-FirstPkColumn [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Insert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 13050, MsgBus: 15627 2025-06-24T21:28:19.576038Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630888047400108:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:19.576876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b0f/r3tmp/tmpgcAvdZ/pdisk_1.dat 2025-06-24T21:28:20.041979Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:20.071281Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630888047399996:2079] 1750800499564784 != 1750800499564787 2025-06-24T21:28:20.073006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:20.073114Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:20.075621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13050, node 1 2025-06-24T21:28:20.319708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:20.319911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:20.319920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:20.320027Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:20.615262Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15627 TClient is connected to server localhost:15627 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:21.249828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:21.266876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:21.281291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.441105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.615485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:21.696197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.082438Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630905227270830:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.082565Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.432719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.502089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.538679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.571837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.619993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.661758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.697847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.790142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630905227271489:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.790232Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.790433Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630905227271494:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.793793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:23.803506Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630905227271496:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:23.868406Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630905227271547:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:24.576147Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630888047400108:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:24.586925Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 12385, MsgBus: 19875 2025-06-24T21:28:26.314052Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630916946916544:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:26.314112Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b0f/r3tmp/tmpAOxttU/pdisk_1.dat 2025-06-24T21:28:26.426025Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:26.427307Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630916946916522:2079] 1750800506313058 != 1750800506313061 2025-06-24T21:28:26.460554Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:26.460648Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:26.465588Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12385, node 2 2025-06-24T21:28:26.514145Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:26.514175Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:26.514183Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:26.514312Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19875 TClient is connected to server localhost:19875 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:26.957346Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:26.964247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:26.977953Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:27.057578Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:27.225354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:27.310335Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:27.322218Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:29.552665Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630929831820072:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:29.552764Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:29.613738Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:29.648154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:29.690234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:29.727886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:29.771944Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:29.817181Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:29.896383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.010641Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630934126788030:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:30.010738Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:30.010990Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630934126788035:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:30.016066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:30.036753Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630934126788037:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:30.097303Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630934126788088:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:31.314377Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630916946916544:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:31.314433Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtl [GOOD] >> KqpQueryPerf::IdxLookupJoinThreeWay-QueryService [GOOD] >> KqpQueryService::CloseConnection [GOOD] >> KqpQueryPerf::IndexDeleteOn+QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL+Reboot+Internal-FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-24T21:27:48.984311Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:27:48.989705Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:27:48.990207Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:27:49.019179Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:27:49.019522Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:27:49.027503Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:49.027796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:49.028052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:49.028299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:49.028424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:49.028543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:49.028666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:49.028780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:49.028890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:49.029022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:49.029135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:49.055273Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:27:49.060179Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:27:49.060446Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:27:49.060508Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:27:49.060717Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:49.060937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:49.061022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:49.061066Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:27:49.061164Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:27:49.061228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:49.061275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:49.061323Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:27:49.061531Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:49.061607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:49.061687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:49.061720Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:27:49.061815Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:27:49.061883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:49.061954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:49.061987Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:27:49.062053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:49.062092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:49.062136Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:27:49.062392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:49.062447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:49.062479Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:27:49.062692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:49.062744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:49.062796Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:27:49.062928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:49.062990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:49.063021Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:27:49.063112Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:49.063208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:27:49.063267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:27:49.063319Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:27:49.063815Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=66; 2025-06-24T21:27:49.063911Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=42; 2025-06-24T21:27:49.064006Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=40; 2025-06-24T21:27:49.064105Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=45; 2025-06-24T21:27:49.064208Z node 1 : ... _received;interval_idx=0;intervalId=295; 2025-06-24T21:28:32.881533Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=295; 2025-06-24T21:28:32.881601Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:28:32.881711Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:28:32.881747Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-24T21:28:32.881793Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:28:32.882264Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:28:32.882496Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:28:32.882561Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:28:32.882745Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;);columns=1;rows=1000; 2025-06-24T21:28:32.882829Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=saved_at; 2025-06-24T21:28:32.883227Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:787:2764];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=saved_at: uint64; 2025-06-24T21:28:32.883455Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:28:32.883600Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:28:32.883781Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:28:32.884053Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:28:32.884243Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:28:32.884409Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:28:32.884469Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:788:2765] finished for tablet 9437184 2025-06-24T21:28:32.885113Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:787:2764];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.006},{"events":["f_ack","l_task_result"],"t":0.018},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.02}],"full":{"a":1750800512863849,"name":"_full_task","f":1750800512863849,"d_finished":0,"c":0,"l":1750800512884543,"d":20694},"events":[{"name":"bootstrap","f":1750800512864183,"d_finished":4003,"c":1,"l":1750800512868186,"d":4003},{"a":1750800512884016,"name":"ack","f":1750800512882223,"d_finished":1597,"c":1,"l":1750800512883820,"d":2124},{"a":1750800512883988,"name":"processing","f":1750800512869900,"d_finished":9069,"c":9,"l":1750800512883825,"d":9624},{"name":"ProduceResults","f":1750800512866361,"d_finished":3403,"c":12,"l":1750800512884444,"d":3403},{"a":1750800512884451,"name":"Finish","f":1750800512884451,"d_finished":0,"c":0,"l":1750800512884543,"d":92},{"name":"task_result","f":1750800512869934,"d_finished":7280,"c":8,"l":1750800512881939,"d":7280}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:28:32.885227Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:787:2764];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:28:32.885874Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:787:2764];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.006},{"events":["f_ack","l_task_result"],"t":0.018},{"events":["l_ProduceResults","f_Finish"],"t":0.02},{"events":["l_ack","l_processing","l_Finish"],"t":0.021}],"full":{"a":1750800512863849,"name":"_full_task","f":1750800512863849,"d_finished":0,"c":0,"l":1750800512885289,"d":21440},"events":[{"name":"bootstrap","f":1750800512864183,"d_finished":4003,"c":1,"l":1750800512868186,"d":4003},{"a":1750800512884016,"name":"ack","f":1750800512882223,"d_finished":1597,"c":1,"l":1750800512883820,"d":2870},{"a":1750800512883988,"name":"processing","f":1750800512869900,"d_finished":9069,"c":9,"l":1750800512883825,"d":10370},{"name":"ProduceResults","f":1750800512866361,"d_finished":3403,"c":12,"l":1750800512884444,"d":3403},{"a":1750800512884451,"name":"Finish","f":1750800512884451,"d_finished":0,"c":0,"l":1750800512885289,"d":838},{"name":"task_result","f":1750800512869934,"d_finished":7280,"c":8,"l":1750800512881939,"d":7280}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:28:32.885984Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:28:32.860485Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59288;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59288;selected_rows=0; 2025-06-24T21:28:32.886048Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:28:32.886515Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:788:2765];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;; >> KqpQueryPerf::DeleteOn+QueryService+UseSink [GOOD] >> KqpQueryPerf::Update-QueryService-UseSink >> KqpQueryPerf::Replace-QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IdxLookupJoinThreeWay-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 27584, MsgBus: 23909 2025-06-24T21:28:19.571968Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630889268864566:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:19.572221Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b0a/r3tmp/tmpugJilI/pdisk_1.dat 2025-06-24T21:28:20.051450Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:20.061844Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630889268864530:2079] 1750800499565834 != 1750800499565837 2025-06-24T21:28:20.069314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:20.069447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:20.073606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27584, node 1 2025-06-24T21:28:20.323696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:20.323729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:20.323736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:20.323868Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:20.586521Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23909 TClient is connected to server localhost:23909 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:21.125327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:21.159489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.371737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.544339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.631944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:22.706453Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630902153768052:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.706579Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.433659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.467333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.504239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.535930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.566110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.661448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.706251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.770796Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630906448736014:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.770879Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.771035Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630906448736019:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.775169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:23.787122Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630906448736021:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:23.859016Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630906448736072:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:24.571888Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630889268864566:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:24.580812Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 5599, MsgBus: 8677 2025-06-24T21:28:26.942501Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630916500542016:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:26.942538Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b0a/r3tmp/tmp2NZXI9/pdisk_1.dat 2025-06-24T21:28:27.104566Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:27.111291Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630916500541998:2079] 1750800506941896 != 1750800506941899 2025-06-24T21:28:27.121755Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:27.121839Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:27.127915Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5599, node 2 2025-06-24T21:28:27.211643Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:27.211660Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:27.211665Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:27.211757Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8677 TClient is connected to server localhost:8677 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:27.624295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:27.631325Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:27.649209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:27.756305Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:27.906146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:27.953729Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:27.989793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.180370Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630933680412807:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:30.180469Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:30.244551Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.296754Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.351659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.389234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.431541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.492484Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.537799Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.627002Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630933680413467:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:30.627126Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:30.627221Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630933680413472:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:30.631748Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:30.641816Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630933680413474:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:30.703641Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630933680413525:3413] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:31.947261Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630916500542016:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:31.947333Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtl [GOOD] Test command err: Trying to start YDB, gRPC: 5134, MsgBus: 5354 2025-06-24T21:28:03.414801Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630819666384137:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:03.421410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00308e/r3tmp/tmphrgqBx/pdisk_1.dat 2025-06-24T21:28:03.791430Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:03.794338Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630819666384112:2079] 1750800483408981 != 1750800483408984 TServer::EnableGrpc on GrpcPort 5134, node 1 2025-06-24T21:28:03.819310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:03.819380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:03.829702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:03.865321Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:03.865346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:03.865360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:03.865502Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5354 TClient is connected to server localhost:5354 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:04.379960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:04.415847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:04.421515Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:04.545783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:04.704878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:04.774069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:06.777971Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630832551287636:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:06.778066Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:07.069261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:07.108004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:07.175743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:07.214872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:07.253242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:07.297871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:07.373033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:07.450974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630836846255597:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:07.451089Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:07.454163Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630836846255602:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:07.458339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:07.469503Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630836846255604:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:07.528971Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630836846255655:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:08.410910Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630819666384137:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:08.411023Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:08.636018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:08.637504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part pr ... 7515Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:22.327670Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24623 TClient is connected to server localhost:24623 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:22.961903Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:22.978910Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.060594Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.169333Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:23.238810Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.318341Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:25.863024Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630911688203497:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.863120Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.932510Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.971773Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.012456Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.069380Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.106557Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.144239Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.184267Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.249297Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630915983171452:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.249385Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.249435Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630915983171457:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.253345Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:26.269174Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630915983171459:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:26.350186Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630915983171510:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:27.110040Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630898803300010:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:27.110118Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:27.550481Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:27.552676Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:27.553938Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.362500Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800510396, txId: 281474976715708] shutting down 2025-06-24T21:28:30.731241Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800510767, txId: 281474976715711] shutting down 2025-06-24T21:28:31.094982Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800511131, txId: 281474976715714] shutting down 2025-06-24T21:28:31.126800Z node 3 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TGetScriptExecutionResultQueryActor] TraceId: ce27fd8d-ee094db8-59f6e334-a6b2abbe, State: Get results info, Finish with NOT_FOUND, Issues: {
: Error: Results are expired }, SessionId: ydb://session/3?node_id=3&id=ZWZmYzI5MTQtMTc3MzA0OTYtNmY5Nzc4ZmItZjI0YzY1MjI=, TxId: >> KqpQueryPerf::RangeLimitRead+QueryService >> KqpQueryPerf::Update-QueryService+UseSink [GOOD] >> KqpQueryPerf::IndexReplace-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexReplace-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::DeleteOn+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 23443, MsgBus: 11599 2025-06-24T21:28:22.967285Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630899817895169:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:22.968535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae9/r3tmp/tmpQvztGy/pdisk_1.dat 2025-06-24T21:28:23.427354Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630899817894985:2079] 1750800502940953 != 1750800502940956 2025-06-24T21:28:23.437145Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23443, node 1 2025-06-24T21:28:23.443349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:23.443481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:23.445730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:23.514738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:23.514760Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:23.514767Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:23.514895Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11599 2025-06-24T21:28:23.949206Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11599 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:24.095198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:24.119941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:24.134313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:24.297245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:24.473546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:24.558610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:26.280879Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630916997765822:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.280995Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.547853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.578309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.608810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.636799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.670057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.702592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.748015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.799905Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630916997766476:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.799988Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.800149Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630916997766481:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.804223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:26.816899Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630916997766483:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:26.890879Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630916997766534:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:27.951309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630899817895169:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:27.951395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 20726, MsgBus: 3317 2025-06-24T21:28:28.993237Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630923865634188:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:28.993340Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae9/r3tmp/tmpjeuMUU/pdisk_1.dat 2025-06-24T21:28:29.131084Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:29.142031Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:29.142114Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:29.143392Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20726, node 2 2025-06-24T21:28:29.199708Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:29.199731Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:29.199738Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:29.199836Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3317 TClient is connected to server localhost:3317 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:29.704537Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:29.712142Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:29.718954Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:29.802755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:29.999403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:30.020149Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:30.087264Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:32.337031Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630941045504961:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.337134Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.389496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.419124Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.448647Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.481263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.513839Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.547895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.609482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.673572Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630941045505613:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.673660Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.673848Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630941045505618:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.677278Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:32.687615Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630941045505620:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:32.748729Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630941045505671:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryPerf::IdxLookupJoin+QueryService [GOOD] >> KqpQueryPerf::IdxLookupJoin-QueryService >> KqpQueryPerf::Delete-QueryService-UseSink [GOOD] >> KqpQueryPerf::Delete-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::CloseConnection [GOOD] Test command err: Trying to start YDB, gRPC: 19879, MsgBus: 26256 2025-06-24T21:27:12.519419Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630597790172474:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:12.519514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ef/r3tmp/tmpiXIX0K/pdisk_1.dat 2025-06-24T21:27:12.865549Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:12.867428Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630597790172451:2079] 1750800432518003 != 1750800432518006 TServer::EnableGrpc on GrpcPort 19879, node 1 2025-06-24T21:27:12.906315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:12.906434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:12.908469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:12.922518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:12.922544Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:12.922556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:12.922681Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26256 TClient is connected to server localhost:26256 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:13.508415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:13.531716Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:13.546195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:13.702172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:13.886940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:13.971080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:15.677192Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630610675075996:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:15.677331Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:15.996606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.031203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.066159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.099183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.130828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.177561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.211527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:16.274965Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630614970043951:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.275046Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.275275Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630614970043956:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:16.278952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:16.293351Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630614970043958:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:16.351034Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630614970044009:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:17.521862Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630597790172474:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:17.521946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8351, MsgBus: 1680 2025-06-24T21:27:19.147288Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630628241727503:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:19.147379Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... . TraceId : 01jyhxfam05q7jtg3dr3r1cj5p. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519630931787034270:2528], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:28:29.585889Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519630931787034276:2530], TxId: 281474976715674, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODU3MTZkOTktNzg5NGM2MWEtYmM0ZjlhNDUtZWM0YzJkODQ=. TraceId : 01jyhxfam05q7jtg3dr3r1cj5p. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519630931787034270:2528], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:28:29.586123Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519630931787034277:2531], TxId: 281474976715674, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODU3MTZkOTktNzg5NGM2MWEtYmM0ZjlhNDUtZWM0YzJkODQ=. TraceId : 01jyhxfam05q7jtg3dr3r1cj5p. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519630931787034270:2528], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:28:29.586201Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519630931787034278:2532], TxId: 281474976715674, task: 3. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODU3MTZkOTktNzg5NGM2MWEtYmM0ZjlhNDUtZWM0YzJkODQ=. CustomerSuppliedId : . TraceId : 01jyhxfam05q7jtg3dr3r1cj5p. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519630931787034270:2528], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:28:29.586377Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519630931787034279:2533], TxId: 281474976715674, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODU3MTZkOTktNzg5NGM2MWEtYmM0ZjlhNDUtZWM0YzJkODQ=. TraceId : 01jyhxfam05q7jtg3dr3r1cj5p. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519630931787034270:2528], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:28:29.586652Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ODU3MTZkOTktNzg5NGM2MWEtYmM0ZjlhNDUtZWM0YzJkODQ=, ActorId: [4:7519630931787034267:2528], ActorState: ExecuteState, TraceId: 01jyhxfam05q7jtg3dr3r1cj5p, Create QueryResponse for error on request, msg: 2025-06-24T21:28:29.916179Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519630931787034540:2602] TxId: 281474976715684. Ctx: { TraceId: 01jyhxfayad6qmwk7wzmxmpyc0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2E2ZDQ0OGQtMjM4YWIxN2YtYWNiNzYzNjEtYmRiNTc1MGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-24T21:28:29.916425Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2E2ZDQ0OGQtMjM4YWIxN2YtYWNiNzYzNjEtYmRiNTc1MGM=, ActorId: [4:7519630931787034537:2602], ActorState: ExecuteState, TraceId: 01jyhxfayad6qmwk7wzmxmpyc0, Create QueryResponse for error on request, msg: 2025-06-24T21:28:29.916929Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-24T21:28:30.265687Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-24T21:28:30.266235Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519630936082001970:2638] TxId: 281474976715690. Ctx: { TraceId: 01jyhxfb8fa7e1s387bpdg3tjf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=N2U5MGY2MzItYWJhMzY3MTUtYTBlNDg4NjMtZjY1ZTgxMjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-24T21:28:30.266365Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519630936082001980:2644], TxId: 281474976715690, task: 5. Ctx: { TraceId : 01jyhxfb8fa7e1s387bpdg3tjf. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=N2U5MGY2MzItYWJhMzY3MTUtYTBlNDg4NjMtZjY1ZTgxMjc=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519630936082001970:2638], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:28:30.363936Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519630936082001977:2641], TxId: 281474976715690, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=N2U5MGY2MzItYWJhMzY3MTUtYTBlNDg4NjMtZjY1ZTgxMjc=. CustomerSuppliedId : . TraceId : 01jyhxfb8fa7e1s387bpdg3tjf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519630936082001970:2638], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:28:30.364545Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=N2U5MGY2MzItYWJhMzY3MTUtYTBlNDg4NjMtZjY1ZTgxMjc=, ActorId: [4:7519630936082001967:2638], ActorState: ExecuteState, TraceId: 01jyhxfb8fa7e1s387bpdg3tjf, Create QueryResponse for error on request, msg: 2025-06-24T21:28:30.767888Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-24T21:28:30.963309Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-24T21:28:30.963928Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519630936082002508:2786] TxId: 281474976715709. Ctx: { TraceId: 01jyhxfbxr6dp1hjs1y9fsax9c, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ODRmYTdjZmEtYmUzN2IwZjItYjVkMzg3NzEtY2Q3YjgwYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-24T21:28:30.964161Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519630936082002519:2791], TxId: 281474976715709, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODRmYTdjZmEtYmUzN2IwZjItYjVkMzg3NzEtY2Q3YjgwYzU=. TraceId : 01jyhxfbxr6dp1hjs1y9fsax9c. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519630936082002508:2786], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:28:30.964289Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519630936082002520:2792], TxId: 281474976715709, task: 5. Ctx: { CustomerSuppliedId : . TraceId : 01jyhxfbxr6dp1hjs1y9fsax9c. SessionId : ydb://session/3?node_id=4&id=ODRmYTdjZmEtYmUzN2IwZjItYjVkMzg3NzEtY2Q3YjgwYzU=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519630936082002508:2786], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:28:30.964574Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519630936082002517:2789], TxId: 281474976715709, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODRmYTdjZmEtYmUzN2IwZjItYjVkMzg3NzEtY2Q3YjgwYzU=. CustomerSuppliedId : . TraceId : 01jyhxfbxr6dp1hjs1y9fsax9c. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519630936082002508:2786], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-24T21:28:30.964619Z node 4 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2914: ActorId: [4:7519630936082002508:2786] TxId: 281474976715709. Ctx: { TraceId: 01jyhxfbxr6dp1hjs1y9fsax9c, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ODRmYTdjZmEtYmUzN2IwZjItYjVkMzg3NzEtY2Q3YjgwYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Unexpected event while waiting for shutdown: NYql::NDq::TEvDqCompute::TEvChannelData 2025-06-24T21:28:30.964912Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ODRmYTdjZmEtYmUzN2IwZjItYjVkMzg3NzEtY2Q3YjgwYzU=, ActorId: [4:7519630936082002505:2786], ActorState: ExecuteState, TraceId: 01jyhxfbxr6dp1hjs1y9fsax9c, Create QueryResponse for error on request, msg: 2025-06-24T21:28:31.646011Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-24T21:28:31.648613Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519630940376970481:2971] TxId: 281474976715733. Ctx: { TraceId: 01jyhxfcjhc2e1jyscz2p8nd6a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NzNkYzVjNTUtNDM2MjE5OTEtMjFhNzljZmUtZjUwMDgzZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-24T21:28:31.653802Z node 4 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2914: ActorId: [4:7519630940376970481:2971] TxId: 281474976715733. Ctx: { TraceId: 01jyhxfcjhc2e1jyscz2p8nd6a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NzNkYzVjNTUtNDM2MjE5OTEtMjFhNzljZmUtZjUwMDgzZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Unexpected event while waiting for shutdown: NKikimr::NKqp::TEvKqpNode::TEvStartKqpTasksResponse 2025-06-24T21:28:32.650130Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=NzNkYzVjNTUtNDM2MjE5OTEtMjFhNzljZmUtZjUwMDgzZDU=, ActorId: [4:7519630940376970478:2971], ActorState: ExecuteState, TraceId: 01jyhxfcjhc2e1jyscz2p8nd6a, Create QueryResponse for error on request, msg: 2025-06-24T21:28:32.653190Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519630940376970490:2974], TxId: 281474976715733, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NzNkYzVjNTUtNDM2MjE5OTEtMjFhNzljZmUtZjUwMDgzZDU=. TraceId : 01jyhxfcjhc2e1jyscz2p8nd6a. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle undelivered TEvState event, abort execution 2025-06-24T21:28:32.653611Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519630940376970488:2973], TxId: 281474976715733, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyhxfcjhc2e1jyscz2p8nd6a. SessionId : ydb://session/3?node_id=4&id=NzNkYzVjNTUtNDM2MjE5OTEtMjFhNzljZmUtZjUwMDgzZDU=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-24T21:28:32.654763Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519630940376970491:2975], TxId: 281474976715733, task: 3. Ctx: { TraceId : 01jyhxfcjhc2e1jyscz2p8nd6a. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NzNkYzVjNTUtNDM2MjE5OTEtMjFhNzljZmUtZjUwMDgzZDU=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-24T21:28:32.654764Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519630940376970492:2976], TxId: 281474976715733, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=NzNkYzVjNTUtNDM2MjE5OTEtMjFhNzljZmUtZjUwMDgzZDU=. TraceId : 01jyhxfcjhc2e1jyscz2p8nd6a. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-24T21:28:32.655094Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519630940376970493:2977], TxId: 281474976715733, task: 5. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NzNkYzVjNTUtNDM2MjE5OTEtMjFhNzljZmUtZjUwMDgzZDU=. TraceId : 01jyhxfcjhc2e1jyscz2p8nd6a. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle undelivered TEvState event, abort execution >> KqpQueryService::ExecuteQueryMultiResult [GOOD] >> KqpQueryPerf::Upsert-QueryService-UseSink [GOOD] >> KqpQueryPerf::KvRead+QueryService >> KqpQueryPerf::IndexInsert-QueryService+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Update-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 10491, MsgBus: 16882 2025-06-24T21:28:29.478256Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630929637617639:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:29.478327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ade/r3tmp/tmpEeMC0Q/pdisk_1.dat 2025-06-24T21:28:29.891264Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630929637617621:2079] 1750800509477635 != 1750800509477638 2025-06-24T21:28:29.909965Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10491, node 1 2025-06-24T21:28:29.916445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:29.916550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:29.919806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:30.108969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:30.109019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:30.109029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:30.109140Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16882 2025-06-24T21:28:30.505753Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16882 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:30.746436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:30.762415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:30.766946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:30.914728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:31.079250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:31.148921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:32.889388Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630942522521144:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.889538Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.184728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.219088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.250228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.283135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.317044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.374432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.411847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.467980Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630946817489096:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.468071Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.468320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630946817489101:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.473391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:33.486971Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630946817489103:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:33.580794Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630946817489156:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:34.481698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630929637617639:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:34.481779Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::ComputeLength+QueryService >> KqpQueryPerf::Upsert-QueryService+UseSink >> KqpQueryService::CreateOrDropTopicOverTable [GOOD] >> KqpQueryPerf::IndexDeleteOn-QueryService+UseSink [GOOD] >> TTxAllocatorClientTest::ZeroRange [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Upsert-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 23835, MsgBus: 20644 2025-06-24T21:28:29.911876Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630931201383554:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:29.911929Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004add/r3tmp/tmp9Bk2Zu/pdisk_1.dat 2025-06-24T21:28:30.428553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:30.429227Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:30.432105Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:30.463346Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630931201383539:2079] 1750800509904671 != 1750800509904674 2025-06-24T21:28:30.478157Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23835, node 1 2025-06-24T21:28:30.549513Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:30.549536Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:30.549546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:30.549708Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20644 2025-06-24T21:28:30.938631Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:31.153601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:31.187200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:31.307372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:31.458637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:31.534049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:33.344053Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630948381254363:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.344169Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.715076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.745280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.776266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.815499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.850201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.889523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.933189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.988760Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630948381255025:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.988875Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.989082Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630948381255030:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.994099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:34.008053Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630948381255032:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:34.080093Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630952676222379:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:34.912147Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630931201383554:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:34.912245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::RangeLimitRead-QueryService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteQueryMultiResult [GOOD] Test command err: Trying to start YDB, gRPC: 14183, MsgBus: 18101 2025-06-24T21:28:09.446233Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630845469135611:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:09.446411Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00305f/r3tmp/tmpfYjT0F/pdisk_1.dat 2025-06-24T21:28:09.824774Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:09.825715Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630845469135593:2079] 1750800489417124 != 1750800489417127 2025-06-24T21:28:09.871069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:09.871219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14183, node 1 2025-06-24T21:28:09.873619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:09.939164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:09.939190Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:09.939198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:09.939342Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18101 TClient is connected to server localhost:18101 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:28:10.446156Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:10.555115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:10.572253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:10.589951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:10.750333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:10.914027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:11.008429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:12.766241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630858354039115:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:12.766353Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:13.056962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.087182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.158130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.191813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.232346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.265652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.299566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:13.352429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630862649007067:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:13.352505Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630862649007072:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:13.352606Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:13.357153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:13.367522Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630862649007074:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:13.464254Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630862649007127:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:14.419248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630845469135611:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:14.419324Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:14.880788Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519630866943974748:2473], SessionActorId: [1:7519630866943974693:2473], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TwoShard`, code: 2001 . sessionActorId=[1:7519630866943974693:2473]. isRollback=0 2 ... :7519630915230253428:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:27.239262Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630898050381932:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:27.239347Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 16156, MsgBus: 8107 2025-06-24T21:28:29.220131Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519630931110521729:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:29.220316Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00305f/r3tmp/tmpRYjFzj/pdisk_1.dat 2025-06-24T21:28:29.376089Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:29.389173Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:29.389272Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:29.392161Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16156, node 4 2025-06-24T21:28:29.518863Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:29.518888Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:29.518896Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:29.519020Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8107 TClient is connected to server localhost:8107 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:30.185559Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:30.200269Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:30.233981Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:30.316908Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:30.554612Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:30.636109Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:33.161997Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630948290392511:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.162093Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.221070Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.257012Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.330952Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.372195Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.416691Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.464927Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.509109Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.585774Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630948290393174:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.585873Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.586084Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519630948290393179:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.590153Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:33.600869Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519630948290393181:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:33.660062Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630948290393232:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:34.220367Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519630931110521729:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:34.220439Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> SystemView::ShowCreateTableColumnAlterObject [FAIL] >> KqpQueryPerf::UpdateOn-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexInsert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 14767, MsgBus: 29828 2025-06-24T21:28:19.572474Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630889152656741:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:19.572589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b0b/r3tmp/tmp5Dlug3/pdisk_1.dat 2025-06-24T21:28:20.069184Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:20.069536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:20.069614Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:20.071290Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630889152656698:2079] 1750800499569375 != 1750800499569378 2025-06-24T21:28:20.078686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14767, node 1 2025-06-24T21:28:20.319911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:20.319933Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:20.319939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:20.320050Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:20.580411Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29828 TClient is connected to server localhost:29828 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:21.122762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:21.175661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.341264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.492473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.574031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:22.813180Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630902037560235:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:22.813319Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.433631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.466366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.496790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.526010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.604815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.645269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.721164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.792685Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630906332528197:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.792783Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.793040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630906332528202:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.796888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:23.809963Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630906332528204:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:23.876048Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630906332528257:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:24.575351Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630889152656741:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:24.575403Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:25.196886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.245561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... 8.546300Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630925634221847:2079] 1750800508438810 != 1750800508438813 TServer::EnableGrpc on GrpcPort 63372, node 2 2025-06-24T21:28:28.595548Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:28.595647Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:28.597225Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:28.605368Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:28.605393Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:28.605400Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:28.605533Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28114 TClient is connected to server localhost:28114 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:29.167893Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:28:29.183804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:29.250733Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:29.398730Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:29.482556Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:29.513460Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:31.945260Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630938519125351:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:31.945372Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.003542Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.077601Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.113787Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.144235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.178113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.214545Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.249628Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.342245Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630942814093311:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.342346Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.342737Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630942814093316:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.346826Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:32.362906Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630942814093318:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:32.459484Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630942814093369:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:33.439587Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630925634221868:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:33.439648Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:33.604052Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.640870Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.715706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpQueryPerf::UpdateOn+QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::ZeroRange [GOOD] Test command err: 2025-06-24T21:27:01.561545Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-24T21:27:01.563564Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-24T21:27:01.565225Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-24T21:27:01.594853Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.599415Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-24T21:27:01.615680Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.615826Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.615943Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-24T21:27:01.616060Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.616191Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.616272Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-24T21:27:01.616373Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-24T21:27:01.618574Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2106] requested range size#5000 2025-06-24T21:27:01.621632Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.621705Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:27:01.621798Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 5000 2025-06-24T21:27:01.621836Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2106] TEvAllocateResult from# 0 to# 5000 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexDeleteOn-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 64566, MsgBus: 21394 2025-06-24T21:28:20.435004Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630890443137130:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:20.435671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b02/r3tmp/tmpTCFvCb/pdisk_1.dat 2025-06-24T21:28:20.815519Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:20.821863Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630890443137033:2079] 1750800500412363 != 1750800500412366 TServer::EnableGrpc on GrpcPort 64566, node 1 2025-06-24T21:28:20.878465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:20.878624Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:20.886969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:20.931652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:20.931697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:20.931712Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:20.931866Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21394 TClient is connected to server localhost:21394 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:28:21.453971Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:21.559691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:21.583303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.741580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:21.916803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:22.006782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.826277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630903328040564:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.826380Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.127634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.156389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.188286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.221595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.250980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.297982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.344302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:24.441622Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630907623008518:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.441694Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.441860Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630907623008523:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:24.445574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:24.458085Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630907623008525:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:24.551732Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630907623008578:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:25.423269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630890443137130:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:25.423357Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:25.871443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.912163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... -06-24T21:28:29.068222Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:29.068311Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:29.069565Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11136, node 2 2025-06-24T21:28:29.127709Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:29.127732Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:29.127738Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:29.127871Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18031 TClient is connected to server localhost:18031 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:29.605415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:29.612139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:29.626154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:29.711368Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:29.934961Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:29.936668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:30.028398Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:32.328914Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630944456676477:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.329002Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.388853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.420161Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.450958Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.488861Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.520475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.553029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.597866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.690548Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630944456677134:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.690621Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630944456677139:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.690663Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.694116Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:32.705444Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630944456677141:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:32.796930Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630944456677192:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:33.921873Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630927276805709:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:33.921950Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:33.948022Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:34.038372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:34.078434Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::CreateOrDropTopicOverTable [GOOD] Test command err: Trying to start YDB, gRPC: 10581, MsgBus: 20226 2025-06-24T21:28:12.928809Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630857073576606:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:12.929274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00304b/r3tmp/tmpdxx1AJ/pdisk_1.dat 2025-06-24T21:28:13.243349Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630857073576425:2079] 1750800492911818 != 1750800492911821 2025-06-24T21:28:13.282851Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:13.286139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:13.286320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:13.291794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10581, node 1 2025-06-24T21:28:13.386791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:13.386820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:13.386827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:13.387284Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20226 TClient is connected to server localhost:20226 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:13.922649Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:13.983202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:14.001550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:28:16.122121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630874253446259:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.122255Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.122396Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630874253446270:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.127344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:16.139706Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630874253446273:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:28:16.223617Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630874253446324:2339] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:16.534540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.841626Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630874253446559:2464] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/OGU3ZWNkNjQtNDllZWU1NWYtNmJlZmQ5MGEtNWEwMzI5MjI=\', error: path is temporary (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:16.851995Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=OGU3ZWNkNjQtNDllZWU1NWYtNmJlZmQ5MGEtNWEwMzI5MjI=, ActorId: [1:7519630874253446238:2290], ActorState: ExecuteState, TraceId: 01jyhxey5ge36kj59a8nfgfxjc, Create QueryResponse for error on request, msg: 2025-06-24T21:28:16.872051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) 2025-06-24T21:28:16.879133Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T21:28:16.882815Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519630874253446596:2327], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:28:16.883082Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWRkMzlmMjEtZmZhMjQ3MmYtYzgwODE4NzQtYmEyYTQwYjY=, ActorId: [1:7519630874253446592:2326], ActorState: ExecuteState, TraceId: 01jyhxey707z0sd8cbknf9eb62, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:28:16.885864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:67) 2025-06-24T21:28:17.922007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630857073576606:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:17.922137Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 23026, MsgBus: 19920 2025-06-24T21:28:18.644877Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630883756651051:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:18.670247Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00304b/r3tmp/tmpHtbhqM/pdisk_1.dat 2025-06-24T21:28:18.771606Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:18.772549Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630883756651031:2079] 1750800498642793 != 1750800498642796 TServer::EnableGrpc on GrpcPort 23026, node 2 2025-06-24T21:28:18.815917Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:18.815994Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:18.817554Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:18.833254Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:18.833279Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:18.833288Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:18.833417Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19920 TClient is connected to server localhost:19920 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" P ... _work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.898865Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630953915992447:3652] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/TmpTable\', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup" severity: 1 } 2025-06-24T21:28:35.898963Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715673, ProxyStatus: ExecError, SchemeShardReason: Check failed: path: '/Root/TmpTable', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup 2025-06-24T21:28:35.899170Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Njk2NjM0ZDAtZTEyZDQ4NmEtNWY1N2VlNTQtZDFlNGQ2M2E=, ActorId: [4:7519630953915992437:2493], ActorState: ExecuteState, TraceId: 01jyhxfgs40y3ges1ejkz2hckn, Create QueryResponse for error on request, msg: Query failed, status: GENERIC_ERROR:
: Error: Scheme operation failed, status: ExecError, reason: Check failed: path: '/Root/TmpTable', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup Scheme entry: { name: .metadata, owner: metadata@system, type: Directory, size_bytes: 0, created_at: { plan_step: 1750800514442, tx_id: 281474976715669 } } Scheme entry: { name: BatchUpload, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800511257, tx_id: 281474976715661 } } Scheme entry: { name: EightShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800510963, tx_id: 281474976715659 } } Scheme entry: { name: Join1, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514274, tx_id: 281474976715666 } } Scheme entry: { name: Join2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514351, tx_id: 281474976715667 } } Scheme entry: { name: KeyValue, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514050, tx_id: 281474976715662 } } Scheme entry: { name: KeyValue2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514092, tx_id: 281474976715663 } } Scheme entry: { name: KeyValueLargePartition, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514162, tx_id: 281474976715664 } } Scheme entry: { name: Logs, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800511061, tx_id: 281474976715660 } } Scheme entry: { name: Test, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514197, tx_id: 281474976715665 } } Scheme entry: { name: TmpTable, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800515849, tx_id: 281474976715672 } } Scheme entry: { name: TuplePrimaryDescending, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514393, tx_id: 281474976715668 } } Scheme entry: { name: TwoShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800510767, tx_id: 281474976715658 } } Scheme entry: { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } } 2025-06-24T21:28:35.975480Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630953915992476:3664] txid# 281474976715675, issues: { message: "Check failed: path: \'/Root/TmpTable\', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup" severity: 1 } 2025-06-24T21:28:35.975710Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715675, ProxyStatus: ExecError, SchemeShardReason: Check failed: path: '/Root/TmpTable', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup 2025-06-24T21:28:35.975901Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Njk2NjM0ZDAtZTEyZDQ4NmEtNWY1N2VlNTQtZDFlNGQ2M2E=, ActorId: [4:7519630953915992437:2493], ActorState: ExecuteState, TraceId: 01jyhxfgv68t5atym1enbwwgdw, Create QueryResponse for error on request, msg: Query failed, status: GENERIC_ERROR:
: Error: Scheme operation failed, status: ExecError, reason: Check failed: path: '/Root/TmpTable', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup Scheme entry: { name: .metadata, owner: metadata@system, type: Directory, size_bytes: 0, created_at: { plan_step: 1750800514442, tx_id: 281474976715669 } } Scheme entry: { name: BatchUpload, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800511257, tx_id: 281474976715661 } } Scheme entry: { name: EightShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800510963, tx_id: 281474976715659 } } Scheme entry: { name: Join1, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514274, tx_id: 281474976715666 } } Scheme entry: { name: Join2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514351, tx_id: 281474976715667 } } Scheme entry: { name: KeyValue, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514050, tx_id: 281474976715662 } } Scheme entry: { name: KeyValue2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514092, tx_id: 281474976715663 } } Scheme entry: { name: KeyValueLargePartition, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514162, tx_id: 281474976715664 } } Scheme entry: { name: Logs, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800511061, tx_id: 281474976715660 } } Scheme entry: { name: Test, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514197, tx_id: 281474976715665 } } Scheme entry: { name: TmpTable, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800515849, tx_id: 281474976715672 } } Scheme entry: { name: TuplePrimaryDescending, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514393, tx_id: 281474976715668 } } Scheme entry: { name: TwoShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800510767, tx_id: 281474976715658 } } Scheme entry: { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } } 2025-06-24T21:28:36.040647Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630958210959795:3675] txid# 281474976715677, issues: { message: "Check failed: path: \'/Root/TmpTable\', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:36.040881Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715677, ProxyStatus: ExecError, SchemeShardReason: Check failed: path: '/Root/TmpTable', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-24T21:28:36.041035Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Njk2NjM0ZDAtZTEyZDQ4NmEtNWY1N2VlNTQtZDFlNGQ2M2E=, ActorId: [4:7519630953915992437:2493], ActorState: ExecuteState, TraceId: 01jyhxfgxmdp07z8fyaqa8t28d, Create QueryResponse for error on request, msg: Query failed, status: GENERIC_ERROR:
: Error: Scheme operation failed, status: ExecError, reason: Check failed: path: '/Root/TmpTable', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-24T21:28:36.069471Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519630958210959810:3682] txid# 281474976715679, issues: { message: "Check failed: path: \'/Root/TmpTable\', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:36.069551Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715679, ProxyStatus: ExecError, SchemeShardReason: Check failed: path: '/Root/TmpTable', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-24T21:28:36.069715Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Njk2NjM0ZDAtZTEyZDQ4NmEtNWY1N2VlNTQtZDFlNGQ2M2E=, ActorId: [4:7519630953915992437:2493], ActorState: ExecuteState, TraceId: 01jyhxfgyhbc72bnzdbf374wck, Create QueryResponse for error on request, msg: Query failed, status: GENERIC_ERROR:
: Error: Scheme operation failed, status: ExecError, reason: Check failed: path: '/Root/TmpTable', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges) Scheme entry: { name: .metadata, owner: metadata@system, type: Directory, size_bytes: 0, created_at: { plan_step: 1750800514442, tx_id: 281474976715669 } } Scheme entry: { name: BatchUpload, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800511257, tx_id: 281474976715661 } } Scheme entry: { name: EightShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800510963, tx_id: 281474976715659 } } Scheme entry: { name: Join1, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514274, tx_id: 281474976715666 } } Scheme entry: { name: Join2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514351, tx_id: 281474976715667 } } Scheme entry: { name: KeyValue, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514050, tx_id: 281474976715662 } } Scheme entry: { name: KeyValue2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514092, tx_id: 281474976715663 } } Scheme entry: { name: KeyValueLargePartition, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514162, tx_id: 281474976715664 } } Scheme entry: { name: Logs, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800511061, tx_id: 281474976715660 } } Scheme entry: { name: Test, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514197, tx_id: 281474976715665 } } Scheme entry: { name: TmpTable, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800515849, tx_id: 281474976715672 } } Scheme entry: { name: TuplePrimaryDescending, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800514393, tx_id: 281474976715668 } } Scheme entry: { name: TwoShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750800510767, tx_id: 281474976715658 } } Scheme entry: { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } } 2025-06-24T21:28:36.134819Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::RangeLimitRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 7174, MsgBus: 11043 2025-06-24T21:28:31.500364Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630939882118237:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:31.500497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004adb/r3tmp/tmpu3bS9H/pdisk_1.dat 2025-06-24T21:28:31.912014Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:31.914667Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630939882118217:2079] 1750800511498845 != 1750800511498848 TServer::EnableGrpc on GrpcPort 7174, node 1 2025-06-24T21:28:31.949505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:31.949718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:31.959448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:32.005779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:32.005807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:32.005818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:32.005931Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11043 TClient is connected to server localhost:11043 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:28:32.518187Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:32.634511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:32.666327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:32.813602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:32.988156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.059242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:34.636708Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630952767021751:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:34.636838Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:34.964410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.006538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.048690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.089423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.120386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.152609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.189223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.246943Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630957061989706:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:35.247059Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:35.247242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630957061989711:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:35.251243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:35.264478Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630957061989713:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:35.327621Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630957061989764:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:36.503285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630939882118237:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:36.503376Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |97.9%| [TA] $(B)/ydb/core/tx/tx_allocator_client/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryPerf::UpdateOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::MultiDeleteFromTable-QueryService-UseSink |97.9%| [TA] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryServiceScripts::Tcl [GOOD] >> KqpQueryPerf::DeleteOn-QueryService-UseSink >> KqpQueryPerf::MultiRead-QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::UpdateOn+QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 63916, MsgBus: 6505 2025-06-24T21:28:32.740165Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630941236980948:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:32.740219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ad9/r3tmp/tmpn8dq3p/pdisk_1.dat 2025-06-24T21:28:33.070776Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:33.085514Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630941236980929:2079] 1750800512739071 != 1750800512739074 2025-06-24T21:28:33.085906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:33.086023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:33.089411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63916, node 1 2025-06-24T21:28:33.169349Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:33.169374Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:33.169382Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:33.169556Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6505 TClient is connected to server localhost:6505 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:33.746726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:33.748870Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:33.779320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:33.925716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:34.076927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:34.158971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:35.737559Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630954121884459:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:35.737719Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:36.068531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:36.102101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:36.172310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:36.236597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:36.269682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:36.317380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:36.358909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:36.426401Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630958416852422:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:36.426486Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:36.426780Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630958416852427:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:36.430834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:36.443474Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630958416852429:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:36.510166Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630958416852480:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:37.740367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630941236980948:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:37.740432Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::Update+QueryService+UseSink >> KqpJoinOrder::Chain65Nodes [FAIL] >> KqpQueryPerf::Replace+QueryService-UseSink >> KqpQueryPerf::IndexUpsert-QueryService-UseSink >> KqpQueryPerf::IndexUpdateOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexUpdateOn+QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::Tcl [GOOD] Test command err: Trying to start YDB, gRPC: 19508, MsgBus: 18314 2025-06-24T21:28:07.584412Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630835521553538:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:07.584572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00307a/r3tmp/tmpZjwcxt/pdisk_1.dat 2025-06-24T21:28:07.958884Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:07.961086Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630835521553432:2079] 1750800487572452 != 1750800487572455 2025-06-24T21:28:07.981714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:07.981873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:07.983889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19508, node 1 2025-06-24T21:28:08.127758Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:08.127783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:08.127790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:08.127910Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18314 TClient is connected to server localhost:18314 2025-06-24T21:28:08.585313Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:08.721381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:08.743968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:08.750887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:08.886382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:09.040333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:09.130427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:10.843244Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630848406456954:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.843352Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.310896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.354319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.384436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.418127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.450400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.485438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.556382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:11.632489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630852701424916:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.632572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.632729Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630852701424921:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:11.637258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:11.649070Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630852701424923:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:11.751789Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630852701424974:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:12.580121Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630835521553538:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:12.580210Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 10477, MsgBus: 23339 2025-06-24T21:28:13.601370Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630859487780023:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:13.601463Z node 2 :METADATA_PROVIDER ERROR: log.cp ... se: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:30.674018Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:28:30.687299Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.749690Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:30.778946Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:30.975980Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:31.066485Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:33.923373Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630946881766077:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.923518Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.990102Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:34.061357Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:34.099600Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:34.170548Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:34.206132Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:34.247473Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:34.284149Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:34.351901Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630951176734041:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:34.352021Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630951176734046:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:34.352028Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:34.356516Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:34.370146Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630951176734048:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:34.440141Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630951176734099:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:34.749720Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630929701895449:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:34.749801Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:35.760547Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.763749Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.773384Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:36.171134Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519630959766669244:2501], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:13: Error: At function: Commit!
:3:13: Error: COMMIT not supported inside YDB query, code: 2008 2025-06-24T21:28:36.171448Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=MWExYTg2NGYtNTMzNGZjMDItNjI5ZTdkYzUtMzM4ZWQwODk=, ActorId: [3:7519630959766669240:2499], ActorState: ExecuteState, TraceId: 01jyhxfgnacmqwq6mvs6a7za2m, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:28:37.658616Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519630964061637151:2711], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:13: Error: At function: Commit!
:3:13: Error: ROLLBACK not supported inside YDB query, code: 2008 2025-06-24T21:28:37.660388Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=ODA4MWNhMTgtYThiYjRjMmUtYTEwYjc0NzItNTM5MTNhNWY=, ActorId: [3:7519630964061637144:2707], ActorState: ExecuteState, TraceId: 01jyhxfjfpezpy2gcbhk24h1x6, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpQueryPerf::IndexReplace+QueryService-UseSink >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup-QueryService >> KqpQueryPerf::Update-QueryService-UseSink [GOOD] >> KqpQueryPerf::Replace-QueryService-UseSink [GOOD] >> KqpQueryPerf::RangeLimitRead+QueryService [GOOD] >> KqpQueryPerf::MultiDeleteFromTable-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Update-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 25410, MsgBus: 18670 2025-06-24T21:28:34.721264Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630951341747837:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:34.721360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ad7/r3tmp/tmpscHp10/pdisk_1.dat 2025-06-24T21:28:35.094082Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:35.095314Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630951341747819:2079] 1750800514720425 != 1750800514720428 TServer::EnableGrpc on GrpcPort 25410, node 1 2025-06-24T21:28:35.152572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:35.156721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:35.168637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:35.188952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:35.188993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:35.189017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:35.189234Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18670 TClient is connected to server localhost:18670 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:35.730592Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:35.751069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:35.786377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:35.949457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:36.115998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:36.196427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:37.977479Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630964226651347:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:37.977586Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.299996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.330545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.359225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.393618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.462031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.494010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.528354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.583052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630968521619301:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.583113Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.583334Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630968521619306:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.587116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:38.597011Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630968521619308:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:38.687779Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630968521619359:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:39.724446Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630951341747837:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:39.724514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::Delete-QueryService+UseSink [GOOD] >> KqpQueryPerf::IdxLookupJoin-QueryService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 30157, MsgBus: 14854 2025-06-24T21:28:34.960639Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630950958379241:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:34.960705Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ad6/r3tmp/tmpFANmbL/pdisk_1.dat 2025-06-24T21:28:35.289361Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630950958379219:2079] 1750800514959225 != 1750800514959228 2025-06-24T21:28:35.290758Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:35.303018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:35.303185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:35.331560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30157, node 1 2025-06-24T21:28:35.413930Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:35.413956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:35.413961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:35.414097Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14854 TClient is connected to server localhost:14854 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:35.967848Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:35.980275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:36.010542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.159075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.311573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.393383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:38.085305Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630968138250037:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.085469Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.437793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.468416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.557417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.592188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.626829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.658678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.728040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.815364Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630968138250703:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.815449Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630968138250708:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.815480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.820342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:38.840074Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630968138250710:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:38.908925Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630968138250761:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:39.961024Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630950958379241:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:39.961084Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::KvRead+QueryService [GOOD] >> KqpQueryPerf::KvRead-QueryService >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup-QueryService >> KqpQueryPerf::IndexDeleteOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexDeleteOn+QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::RangeLimitRead+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 2734, MsgBus: 18465 2025-06-24T21:28:35.566926Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630957958515671:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:35.566987Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ad5/r3tmp/tmp2jHN8k/pdisk_1.dat 2025-06-24T21:28:35.859065Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630957958515652:2079] 1750800515566046 != 1750800515566049 2025-06-24T21:28:35.860985Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2734, node 1 2025-06-24T21:28:35.947961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:35.948208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:35.977736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:36.006710Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:36.006734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:36.006740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:36.006841Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18465 TClient is connected to server localhost:18465 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:36.574075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:36.578729Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:36.591522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.748031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.903200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.983961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:38.811544Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630970843419180:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.811673Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.163582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.237254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.276655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.311612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.347179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.383928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.417386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.479219Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630975138387133:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.479292Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.479506Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630975138387138:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.484215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:39.495412Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630975138387140:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:39.572075Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630975138387191:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:40.567249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630957958515671:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:40.567316Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Delete-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 30620, MsgBus: 19613 2025-06-24T21:28:30.032182Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630932487194445:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:30.032257Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004adc/r3tmp/tmpgeOFpx/pdisk_1.dat 2025-06-24T21:28:30.508430Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:30.509215Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630928192227016:2079] 1750800509994489 != 1750800509994492 2025-06-24T21:28:30.526185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:30.526274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 30620, node 1 2025-06-24T21:28:30.529407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:30.580591Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:30.580619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:30.580631Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:30.580757Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19613 2025-06-24T21:28:31.046671Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19613 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:31.202109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:31.220013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:31.234177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:31.369772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:31.524161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:31.614013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:33.210665Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630945372097841:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.210791Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.526068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.557416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.587197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.617317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.648760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.689226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.759088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.818645Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630945372098498:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.818725Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.818885Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630945372098503:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.822731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:33.834097Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630945372098505:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:33.926559Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630945372098556:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:35.035283Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630932487194445:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:35.035591Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 26120, MsgBus: 11693 2025-06-24T21:28:36.207551Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630960009556293:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:36.207666Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004adc/r3tmp/tmpzIfSgQ/pdisk_1.dat 2025-06-24T21:28:36.315078Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:36.315868Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630960009556274:2079] 1750800516206621 != 1750800516206624 TServer::EnableGrpc on GrpcPort 26120, node 2 2025-06-24T21:28:36.358326Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:36.358434Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:36.361462Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:36.383710Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:36.383735Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:36.383743Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:36.383850Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11693 TClient is connected to server localhost:11693 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:36.822623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:36.830531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:36.834497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.913593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:37.070168Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:37.139340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:37.310253Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:39.232951Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630972894459786:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.233040Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.294430Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.324866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.383939Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.450704Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.483126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.516531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.591797Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.658278Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630972894460448:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.658367Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.658411Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630972894460453:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.661274Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:39.670123Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630972894460455:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:39.754505Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630972894460506:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:41.209833Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630960009556293:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:41.209909Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::Upsert-QueryService+UseSink [GOOD] >> KqpQueryPerf::ComputeLength+QueryService [GOOD] >> KqpQueryPerf::ComputeLength-QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IdxLookupJoin-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 10097, MsgBus: 4495 2025-06-24T21:28:29.315867Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630931636125916:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:29.315941Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae1/r3tmp/tmpRftWLd/pdisk_1.dat 2025-06-24T21:28:29.695831Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630931636125894:2079] 1750800509306340 != 1750800509306343 2025-06-24T21:28:29.720854Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10097, node 1 2025-06-24T21:28:29.784913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:29.786189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:29.808463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:29.832363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:29.832383Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:29.832390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:29.832501Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4495 TClient is connected to server localhost:4495 2025-06-24T21:28:30.334081Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:30.499194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:30.519900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:30.535997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:30.706681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:30.871180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:30.946161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:32.570125Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630944521029430:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.570253Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.868013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.900030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.930795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.966902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.997242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.041695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.111563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.202598Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630948815997392:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.202688Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.203317Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630948815997397:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:33.207373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:33.219703Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630948815997399:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:33.299587Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630948815997450:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:34.307992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630931636125916:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:34.308068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 3376, MsgBus: 9310 2025-06-24T21:28:35.931968Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630956713783755:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:35.932039Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae1/r3tmp/tmpb7Wtma/pdisk_1.dat 2025-06-24T21:28:36.046420Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:36.047710Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630956713783733:2079] 1750800515925846 != 1750800515925849 TServer::EnableGrpc on GrpcPort 3376, node 2 2025-06-24T21:28:36.073861Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:36.073942Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:36.078933Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:36.125511Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:36.125535Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:36.125543Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:36.125638Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9310 TClient is connected to server localhost:9310 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:36.639978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:36.653360Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.738574Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.903991Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.952940Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:36.982979Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:38.925062Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630969598687246:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.925191Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.997583Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.025343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.054576Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.082102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.109616Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.138675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.165948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.253932Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630973893655204:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.254033Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630973893655209:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.254042Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.257688Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:39.267239Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630973893655211:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:39.362091Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630973893655262:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:40.935281Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630956713783755:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:40.935403Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::IndexUpdateOn-QueryService-UseSink >> KqpQueryPerf::UpdateOn-QueryService+UseSink [GOOD] >> TColumnShardTestSchema::EnableColdTiersAfterNoEviction [GOOD] >> KqpQueryPerf::IndexReplace-QueryService+UseSink [GOOD] >> KqpQueryPerf::UpdateOn+QueryService+UseSink [GOOD] >> KqpQueryPerf::Upsert+QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Upsert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 1102, MsgBus: 26858 2025-06-24T21:28:37.286012Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630964322686547:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:37.286077Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004acf/r3tmp/tmprqWEs1/pdisk_1.dat 2025-06-24T21:28:37.626312Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:37.627247Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630964322686362:2079] 1750800517273674 != 1750800517273677 2025-06-24T21:28:37.640841Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:37.645495Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:37.647609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1102, node 1 2025-06-24T21:28:37.721876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:37.721914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:37.721925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:37.722040Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26858 TClient is connected to server localhost:26858 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:38.297323Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:38.299888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:38.323833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:38.477748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:38.640786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.704717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:40.480229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630977207589872:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:40.480346Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:40.743379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.815864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.883116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.913748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.940877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.014053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.063494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.163469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630981502557838:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.163598Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.167448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630981502557843:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.172051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:41.183516Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630981502557845:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:41.274878Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630981502557896:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:42.291440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630964322686547:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:42.291512Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::RangeRead-QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::EnableColdTiersAfterNoEviction [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801064.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801064.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150801064.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801064.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801064.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150801064.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799864.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130801064.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130801064.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799864.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130799864.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130799864.000000s;Name=;Codec=}; 2025-06-24T21:27:44.848972Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:27:44.873358Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:27:44.873730Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:27:44.882078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:44.882311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:44.882610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:44.882755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:44.882863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:44.882978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:44.883090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:44.883218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:44.883325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:44.883451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:44.883571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:44.916029Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:27:44.916346Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:27:44.916436Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:27:44.916628Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:44.916818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:44.916896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:44.916941Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:27:44.917033Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:27:44.917104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:44.917145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:44.917173Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:27:44.917378Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:44.917452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:44.917495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:44.917527Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:27:44.917614Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:27:44.917697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:44.917741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:44.917771Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:27:44.917834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:44.917871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:44.917900Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:27:44.918100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:44.918144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:44.918177Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:27:44.918380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:44.919504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:44.919549Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:27:44.919708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:44.919762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:44.919801Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:27:44.919886Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:44.919955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;ev ... 16Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:28:43.526465Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:43.526503Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:43.526588Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:28:43.526726Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800513168:max} readable: {1750800513168:max} at tablet 9437184 2025-06-24T21:28:43.526841Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:28:43.526981Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800513168:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:28:43.527029Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800513168:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:28:43.527483Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800513168:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:28:43.527568Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800513168:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:28:43.527982Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800513168:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:1448:3392];trace_detailed=; 2025-06-24T21:28:43.528307Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:28:43.528482Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:28:43.528620Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:43.528715Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:43.528936Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:28:43.529036Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:43.529126Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:43.529157Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1448:3392] finished for tablet 9437184 2025-06-24T21:28:43.529547Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1447:3391];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750800523527923,"name":"_full_task","f":1750800523527923,"d_finished":0,"c":0,"l":1750800523529208,"d":1285},"events":[{"name":"bootstrap","f":1750800523528086,"d_finished":654,"c":1,"l":1750800523528740,"d":654},{"a":1750800523528921,"name":"ack","f":1750800523528921,"d_finished":0,"c":0,"l":1750800523529208,"d":287},{"a":1750800523528908,"name":"processing","f":1750800523528908,"d_finished":0,"c":0,"l":1750800523529208,"d":300},{"name":"ProduceResults","f":1750800523528555,"d_finished":373,"c":2,"l":1750800523529145,"d":373},{"a":1750800523529147,"name":"Finish","f":1750800523529147,"d_finished":0,"c":0,"l":1750800523529208,"d":61}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:43.529619Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1447:3391];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:28:43.529948Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1447:3391];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750800523527923,"name":"_full_task","f":1750800523527923,"d_finished":0,"c":0,"l":1750800523529656,"d":1733},"events":[{"name":"bootstrap","f":1750800523528086,"d_finished":654,"c":1,"l":1750800523528740,"d":654},{"a":1750800523528921,"name":"ack","f":1750800523528921,"d_finished":0,"c":0,"l":1750800523529656,"d":735},{"a":1750800523528908,"name":"processing","f":1750800523528908,"d_finished":0,"c":0,"l":1750800523529656,"d":748},{"name":"ProduceResults","f":1750800523528555,"d_finished":373,"c":2,"l":1750800523529145,"d":373},{"a":1750800523529147,"name":"Finish","f":1750800523529147,"d_finished":0,"c":0,"l":1750800523529656,"d":509}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1448:3392]->[1:1447:3391] 2025-06-24T21:28:43.530024Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:28:43.527544Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:28:43.530062Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:28:43.530172Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1448:3392];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 160000/9752224 80000/4886744 0/0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::UpdateOn-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 16158, MsgBus: 23710 2025-06-24T21:28:38.080595Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630970396258947:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:38.081524Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ace/r3tmp/tmpvXjgA5/pdisk_1.dat 2025-06-24T21:28:38.466822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:38.466941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:38.469791Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:38.505705Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:38.507260Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630970396258909:2079] 1750800518069877 != 1750800518069880 TServer::EnableGrpc on GrpcPort 16158, node 1 2025-06-24T21:28:38.567747Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:38.567777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:38.567789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:38.567943Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23710 TClient is connected to server localhost:23710 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:39.090971Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:39.136109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:39.148174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:39.155353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:39.309054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:39.455402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:39.531496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.230643Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630983281162454:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.230744Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.579620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.613499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.645937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.684525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.752954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.800692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.835682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.930453Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630983281163118:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.930565Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.930761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630983281163123:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.935399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:41.949436Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630983281163125:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:42.053490Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630987576130472:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:43.079058Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630970396258947:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:43.079138Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpService::ToDictCache+UseCache [GOOD] >> KqpService::ToDictCache-UseCache >> KqpQueryPerf::Insert+QueryService-UseSink >> KqpQueryPerf::DeleteOn-QueryService-UseSink [GOOD] >> KqpQueryPerf::DeleteOn-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::UpdateOn+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 14316, MsgBus: 11736 2025-06-24T21:28:38.318074Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630968148992519:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:38.318132Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a93/r3tmp/tmpMBjj88/pdisk_1.dat 2025-06-24T21:28:38.741718Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14316, node 1 2025-06-24T21:28:38.764600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:38.764720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:38.766656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:38.818255Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:38.818280Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:38.818294Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:38.818394Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11736 TClient is connected to server localhost:11736 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:39.399924Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:39.404914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:39.433395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:39.563860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:39.712403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:39.775715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.538985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630981033896012:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.539116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.821272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.853760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.881637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.909843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.945987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.006705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.076495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.168568Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630985328863972:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.168634Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.168699Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630985328863977:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.173340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:42.188000Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630985328863979:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:42.271775Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630985328864030:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:43.318338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630968148992519:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:43.318380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::AggregateToScalar+QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexReplace-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 17672, MsgBus: 14063 2025-06-24T21:28:27.247810Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630919579881250:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:27.247885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae6/r3tmp/tmpteHY5d/pdisk_1.dat 2025-06-24T21:28:27.652452Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630919579881227:2079] 1750800507243216 != 1750800507243219 2025-06-24T21:28:27.661511Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:27.692253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:27.692393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 17672, node 1 2025-06-24T21:28:27.701674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:27.759683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:27.759711Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:27.759718Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:27.759854Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14063 TClient is connected to server localhost:14063 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:28.258861Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:28.306253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:28:28.332546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:28.475580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:28.637592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:28.715475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.516862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630932464784756:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:30.516977Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:30.774708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.809642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.838198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.870901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.937953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:30.987439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:31.055833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:31.125678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630936759752715:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:31.126091Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:31.126410Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630936759752720:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:31.131211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:31.144004Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630936759752722:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:31.199852Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630936759752773:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:32.248168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630919579881250:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:32.248243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:32.407446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:32.450548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... 5.869174Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630955197010619:2079] 1750800515751531 != 1750800515751534 2025-06-24T21:28:35.895564Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:35.895659Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 11788, node 2 2025-06-24T21:28:35.900513Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:35.931013Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:35.931041Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:35.931048Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:35.931191Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26705 TClient is connected to server localhost:26705 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:36.477857Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:36.497418Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.573155Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.740192Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:36.769771Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:36.844674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.988441Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630968081914150:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.988528Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.055770Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.096561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.126037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.155016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.228235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.300128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.337630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.398099Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630972376882110:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.398210Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.398428Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630972376882115:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.402417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:39.411663Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630972376882117:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:39.493323Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630972376882168:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:40.563379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.614429Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.658509Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.752464Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630955197010640:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:40.752542Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::MultiDeleteFromTable-QueryService-UseSink [GOOD] >> KqpQueryPerf::MultiRead-QueryService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::Chain65Nodes [FAIL] Test command err: Trying to start YDB, gRPC: 22029, MsgBus: 16064 2025-06-24T21:20:43.624385Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628928007296946:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:43.624425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003997/r3tmp/tmpq1eM2E/pdisk_1.dat 2025-06-24T21:20:44.304822Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628928007296927:2079] 1750800043598686 != 1750800043598689 2025-06-24T21:20:44.331905Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:44.332796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:44.332887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:44.341462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22029, node 1 2025-06-24T21:20:44.563697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:44.563728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:44.563736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:44.563833Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:20:44.699401Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16064 TClient is connected to server localhost:16064 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:45.703386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:45.777091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:20:48.628014Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628928007296946:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:48.628117Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:49.258149Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628953777101354:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:49.258271Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:49.796313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:50.035359Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628958072068758:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.035464Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.064551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:50.139603Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628958072068833:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.139691Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.169517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:50.239951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628958072068910:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.240022Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.262408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:50.347707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628958072068985:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.347786Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.360336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:50.455661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628958072069062:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.455760Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.478136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:50.555330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628958072069138:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.555424Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.572990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:50.639463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628958072069213:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:50.639577Z node 1 :KQP_WORKLOAD_SERVICE WARN: ... /ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:55.965641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628979546909594:2835], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:55.965719Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:55.980102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710716:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.039284Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628983841876966:2844], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.039374Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.060832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710717:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.135380Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628983841877042:2853], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.135469Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.178263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710718:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.235797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628983841877118:2862], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.235864Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.251292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710719:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.315592Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628983841877197:2872], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.315664Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.330277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710720:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.384009Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628983841877271:2881], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.384088Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.396475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710721:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.472048Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628983841877347:2890], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.472215Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.478487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710722:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:56.529225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628983841877423:2899], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.529310Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.529653Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628983841877428:2902], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:56.534147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710723:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:56.556516Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628983841877430:2903], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710723 completed, doublechecking } 2025-06-24T21:20:56.623254Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628983841877481:5507] txid# 281474976710724, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 70], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:59.331769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:20:59.331805Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:25:56.528087Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NDEzMThjNDItNjA0ZGE5NTYtZTliY2JiZmEtNDdkODgxMWU=, ActorId: [1:7519628953777101338:2291], ActorState: ExecuteState, TraceId: 01jyhx1g6gbvv2c1z86hrdf4t4, Create QueryResponse for error on request, msg:
: Error: Request timeout 300000ms exceeded
: Error: Cancelling after 299999ms during compilation assertion failed at ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:387, void NKikimr::NKqp::TChainTester::JoinTables(): (result.GetStatus() == EStatus::SUCCESS) failed: (TIMEOUT != SUCCESS) , with diff: (TIM|SUCC)E(OUT|SS) 0. /-S/util/system/backtrace.cpp:284: ?? @ 0x19F5618B 1. /tmp//-S/library/cpp/testing/unittest/registar.cpp:46: RaiseError @ 0x1A427E3F 2. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:387: JoinTables @ 0x19B51F4B 3. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:333: Test @ 0x19B05D50 4. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:554: Execute_ @ 0x19B05D50 5. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552: operator() @ 0x19B4E597 6. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149: __invoke<(lambda at /-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552:1) &> @ 0x19B4E597 7. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224: __call<(lambda at /-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552:1) &> @ 0x19B4E597 8. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169: operator() @ 0x19B4E597 9. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314: operator() @ 0x19B4E597 10. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431: operator() @ 0x1A45F025 11. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990: operator() @ 0x1A45F025 12. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:525: Run @ 0x1A45F025 13. /tmp//-S/library/cpp/testing/unittest/registar.cpp:373: Run @ 0x1A42E9C8 14. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552: Execute @ 0x19B4D763 15. /tmp//-S/library/cpp/testing/unittest/registar.cpp:494: Execute @ 0x1A430295 16. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:872: RunMain @ 0x1A45959C 17. ??:0: ?? @ 0x7F47DF3C6D8F 18. ??:0: ?? @ 0x7F47DF3C6E3F 19. ??:0: ?? @ 0x16F73028 >> KqpQueryPerf::Update+QueryService+UseSink [GOOD] >> KqpQueryPerf::IndexUpsert+QueryService-UseSink >> KqpQueryPerf::Replace+QueryService-UseSink [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtlAndForgetAfter [GOOD] >> KqpQueryPerf::MultiDeleteFromTable+QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiDeleteFromTable-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 20683, MsgBus: 26474 2025-06-24T21:28:38.973081Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630970084425883:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:38.973146Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a91/r3tmp/tmph6pzFt/pdisk_1.dat 2025-06-24T21:28:39.307821Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630970084425864:2079] 1750800518971831 != 1750800518971834 2025-06-24T21:28:39.318438Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20683, node 1 2025-06-24T21:28:39.377844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:39.377870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:39.377879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:39.378024Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:39.389373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:39.389516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:39.390503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26474 TClient is connected to server localhost:26474 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:39.954432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:39.987685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:39.993136Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:40.130739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:40.283320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:40.363111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:42.207986Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630987264296705:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.208194Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.513011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.543303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.572080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.603310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.643564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.692294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.723305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.774733Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630987264297362:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.774822Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.774952Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630987264297367:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.778627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:42.789236Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630987264297369:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:42.856208Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630987264297420:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:43.973445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630970084425883:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:43.973530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 27257, MsgBus: 17392 2025-06-24T21:28:39.777179Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630973756329442:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:39.777311Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a8b/r3tmp/tmpYWmlxu/pdisk_1.dat 2025-06-24T21:28:40.124073Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630973756329423:2079] 1750800519776226 != 1750800519776229 2025-06-24T21:28:40.134520Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27257, node 1 2025-06-24T21:28:40.165785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:40.165882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:40.167634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:40.219629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:40.219657Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:40.219664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:40.219809Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17392 TClient is connected to server localhost:17392 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:40.789954Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:40.817929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:40.856962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.036047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.186299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:41.265621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.872277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630986641232952:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.872404Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.187790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.216130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.243750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.277401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.306738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.354458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.384404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.442261Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630990936200910:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.442356Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.442678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630990936200915:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.446376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:43.456405Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630990936200917:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:43.550106Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630990936200968:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:44.777359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630973756329442:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:44.777437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::Replace+QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Update+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 19723, MsgBus: 4634 2025-06-24T21:28:40.126860Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630975587418143:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:40.126940Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a89/r3tmp/tmpZKGZZw/pdisk_1.dat 2025-06-24T21:28:40.492612Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:40.498778Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630975587418120:2079] 1750800520125508 != 1750800520125511 TServer::EnableGrpc on GrpcPort 19723, node 1 2025-06-24T21:28:40.539871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:40.540009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:40.544871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:40.575845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:40.575884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:40.575893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:40.576041Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4634 TClient is connected to server localhost:4634 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:41.152700Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:41.210409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:41.236628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.382176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.547608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.627047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.307506Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630988472321663:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.307631Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.569501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.599737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.630034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.661385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.688389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.721367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.754356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.839449Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630988472322324:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.839524Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.839586Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630988472322329:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.845070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:43.858930Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630988472322331:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:43.939979Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630988472322382:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:45.127191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630975587418143:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:45.127271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::UpdateOn-QueryService-UseSink >> KqpQueryPerf::IndexInsert+QueryService-UseSink >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup-QueryService [GOOD] >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup+QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace+QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 4999, MsgBus: 24040 2025-06-24T21:28:40.401298Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630978911651145:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:40.401383Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a87/r3tmp/tmphinFCi/pdisk_1.dat 2025-06-24T21:28:40.743581Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630978911651122:2079] 1750800520399686 != 1750800520399689 2025-06-24T21:28:40.751363Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:40.794640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:40.794739Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 4999, node 1 2025-06-24T21:28:40.803812Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:40.923805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:40.923844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:40.923852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:40.923970Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24040 TClient is connected to server localhost:24040 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:28:41.421953Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:41.556099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:41.587746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.737995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.907453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.982204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.685904Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630991796554660:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.686030Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.997509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.032749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.065679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.101011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.132819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.182936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.252319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.310468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630996091522615:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.310541Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.310768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630996091522620:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.314398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:44.324471Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630996091522622:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:44.396674Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630996091522673:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:45.401864Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630978911651145:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:45.401927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtlAndForgetAfter [GOOD] Test command err: Trying to start YDB, gRPC: 28779, MsgBus: 18467 2025-06-24T21:28:00.007282Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630801518431804:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:00.011494Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00309d/r3tmp/tmpqO83vN/pdisk_1.dat 2025-06-24T21:28:00.342041Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630801518431771:2079] 1750800479997286 != 1750800479997289 2025-06-24T21:28:00.342504Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28779, node 1 2025-06-24T21:28:00.413259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:00.414371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:00.417462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:00.462869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:00.462896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:00.462909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:00.463083Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18467 TClient is connected to server localhost:18467 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:01.017852Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:01.049290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:01.080089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:01.088690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:01.283936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:01.462516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:01.546243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:03.306423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630818698302595:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:03.306538Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:03.557286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:03.629943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:03.702277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:03.741139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:03.787664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:03.858863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:03.893202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:03.984379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630818698303260:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:03.984504Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:03.984706Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630818698303265:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:03.989271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:04.003171Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630818698303267:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:04.119081Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630822993270616:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:05.003162Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630801518431804:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:05.003242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 14428, MsgBus: 28877 2025-06-24T21:28:05.990476Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630827873862836:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:05.990589Z node 2 :METADATA_PROVIDER ERROR: log.cp ... 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:34.861279Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:34.884072Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:34.967401Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:35.058550Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:35.161644Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:35.245156Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:38.456174Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519630968619990036:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.456336Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.533857Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.574852Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.618415Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.669247Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.742164Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.790380Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.874448Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.959469Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519630968619990697:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.959586Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.959733Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519630968619990702:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.964735Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:38.978534Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519630968619990704:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:39.039499Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519630972914958051:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:39.043282Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519630951440119239:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:39.043342Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:40.295825Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.297811Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.299633Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.709871Z node 5 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TGetScriptExecutionOperationQueryActor] TraceId: a95d861-770acca4-c1ff7b94-b152b9f0, Finish with NOT_FOUND, Issues: {
: Error: No such execution }, SessionId: ydb://session/3?node_id=5&id=ZjhjZDFjMjYtNGNmNjEyMTAtMjRhMmRmOGYtMjFmNzlmMzc=, TxId: 2025-06-24T21:28:43.838127Z node 5 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: a95d861-770acca4-c1ff7b94-b152b9f0, Finish with NOT_FOUND, Issues: {
: Error: No such execution }, SessionId: ydb://session/3?node_id=5&id=YjhjNWU2MTktZjJlNTJiODktODk4NmM2ZTktMThmYWZlZGE=, TxId: 2025-06-24T21:28:44.036055Z node 5 :KQP_PROXY WARN: kqp_script_executions.cpp:1077: [ScriptExecutions] [TForgetScriptExecutionOperationActor] ExecutionId: a95d861-770acca4-c1ff7b94-b152b9f0, reply NOT_FOUND, issues: {
: Error: No such execution } 2025-06-24T21:28:44.076552Z node 5 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: a95d861-770acca4-c1ff7b94-b152b9f0, Finish with NOT_FOUND, Issues: {
: Error: No such execution }, SessionId: ydb://session/3?node_id=5&id=MWRmOWE3NTYtNTk5MDBjMjktZGU0ZGYyN2YtN2ZmMWU3N2I=, TxId: 2025-06-24T21:28:44.076704Z node 5 :KQP_PROXY WARN: kqp_script_executions.cpp:1674: [ScriptExecutions] [TCancelScriptExecutionOperationActor] ExecutionId: a95d861-770acca4-c1ff7b94-b152b9f0, check lease failed 2025-06-24T21:28:44.494478Z node 5 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TGetScriptExecutionResultQueryActor] TraceId: a95d861-770acca4-c1ff7b94-b152b9f0, State: Get results info, Finish with NOT_FOUND, Issues: {
: Error: Script execution not found }, SessionId: ydb://session/3?node_id=5&id=ZmE3ZmM2NjUtNjUzM2NjY2UtM2JjNWYzNDEtNTBkMDBjMjA=, TxId: >> KqpQueryPerf::KvRead-QueryService [GOOD] >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup-QueryService [GOOD] >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup+QueryService >> KqpQueryPerf::MultiDeleteFromTable-QueryService+UseSink [GOOD] >> KqpQueryPerf::Delete+QueryService-UseSink >> KqpQueryPerf::IndexUpsert-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexUpsert-QueryService+UseSink >> KqpQueryPerf::IndexReplace+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexReplace+QueryService+UseSink >> KqpQueryPerf::IndexUpdateOn+QueryService+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::KvRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 24042, MsgBus: 9117 2025-06-24T21:28:36.530668Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630959947291997:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:36.530732Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ad3/r3tmp/tmpSja74z/pdisk_1.dat 2025-06-24T21:28:36.919980Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630959947291978:2079] 1750800516529852 != 1750800516529855 2025-06-24T21:28:36.933230Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:36.971566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:36.971697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:36.973142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24042, node 1 2025-06-24T21:28:37.059928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:37.059953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:37.059963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:37.060103Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9117 TClient is connected to server localhost:9117 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:37.550985Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:37.611094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:37.639600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:37.818260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:37.994714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:38.056075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.652427Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630972832195510:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:39.652561Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:40.003631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.054211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.085717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.113384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.136124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.164240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.197999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.278704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630977127163469:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:40.278781Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:40.278999Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630977127163474:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:40.282986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:40.294375Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630977127163476:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:40.394757Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630977127163527:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:41.530745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630959947291997:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:41.530810Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21626, MsgBus: 15548 2025-06-24T21:28:42.357848Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630985836884757:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:42.357910Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ad3/r3tmp/tmpp2y0kV/pdisk_1.dat 2025-06-24T21:28:42.480758Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630985836884740:2079] 1750800522357298 != 1750800522357301 2025-06-24T21:28:42.493223Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21626, node 2 2025-06-24T21:28:42.521749Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:42.521842Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:42.524516Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:42.557338Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:42.557365Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:42.557373Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:42.557538Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15548 TClient is connected to server localhost:15548 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:43.012590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:43.019976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:28:43.029240Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.102746Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.238983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.302684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.374282Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:45.460499Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630998721788246:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.460614Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.521219Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.555694Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.585140Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.618366Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.650756Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.680703Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.750871Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.821640Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630998721788905:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.821736Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.822049Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630998721788910:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.826359Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:45.836293Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630998721788912:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:45.916051Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630998721788963:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:47.358330Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630985836884757:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:47.358407Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::ComputeLength-QueryService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiDeleteFromTable-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24201, MsgBus: 6852 2025-06-24T21:28:41.993707Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630982087109701:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:41.996476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7f/r3tmp/tmpQuL4Po/pdisk_1.dat 2025-06-24T21:28:42.410814Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:42.411379Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630982087109681:2079] 1750800521978188 != 1750800521978191 TServer::EnableGrpc on GrpcPort 24201, node 1 2025-06-24T21:28:42.433034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:42.433147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:42.436094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:42.497642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:42.497671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:42.497679Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:42.497813Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6852 TClient is connected to server localhost:6852 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:43.015629Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:43.042286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:43.064447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.207068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:43.342535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.396683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:45.114543Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630999266980505:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.114666Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.397096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.438209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.472526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.505016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.537477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.589509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.622992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.691021Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630999266981161:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.691137Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.691561Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630999266981166:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.697560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:45.709072Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630999266981168:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:45.779005Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630999266981219:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:46.979782Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630982087109701:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:46.979866Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TColumnShardTestSchema::RebootHotTiers [GOOD] >> KqpQueryPerf::Upsert+QueryService+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexUpdateOn+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 19572, MsgBus: 20448 2025-06-24T21:28:31.724671Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630936689804567:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:31.726254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ada/r3tmp/tmpN7EPXW/pdisk_1.dat 2025-06-24T21:28:32.116938Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:32.117992Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630936689804347:2079] 1750800511680338 != 1750800511680341 TServer::EnableGrpc on GrpcPort 19572, node 1 2025-06-24T21:28:32.183845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:32.183938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:32.186673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:32.238306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:32.238353Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:32.238363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:32.238483Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20448 TClient is connected to server localhost:20448 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:28:32.720900Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:32.823833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:32.837023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:32.847330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:32.961982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:33.097183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:33.161915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:34.949755Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630949574707882:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:34.949923Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:35.306611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.335858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.367383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.441031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.473054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.505672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.536402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.589990Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630953869675837:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:35.590068Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:35.590173Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630953869675842:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:35.595007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:35.605678Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630953869675844:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:35.692141Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630953869675895:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:36.722121Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630936689804567:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:36.722213Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:37.111277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 40.691071Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630977401478503:2079] 1750800520539948 != 1750800520539951 2025-06-24T21:28:40.707587Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:40.707679Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:40.709811Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8078, node 2 2025-06-24T21:28:40.758195Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:40.758214Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:40.758222Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:40.758332Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29520 TClient is connected to server localhost:29520 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:41.242385Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:41.258367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.337587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.557530Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:41.571204Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.645026Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.907336Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630990286382014:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.907422Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.964724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.036266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.077146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.108518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.140793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.212051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.245735Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.332836Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630994581349974:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.332922Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.333202Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630994581349979:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.336544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:44.348251Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630994581349981:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:44.410942Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630994581350032:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:45.457781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.501287Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.555225Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630977401478688:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:45.555333Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:45.581186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> KqpQueryPerf::DeleteOn-QueryService+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::ComputeLength-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 31571, MsgBus: 5876 2025-06-24T21:28:37.198437Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630963262628414:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:37.198502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ad2/r3tmp/tmpVmVpPY/pdisk_1.dat 2025-06-24T21:28:37.621950Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630963262628392:2079] 1750800517196928 != 1750800517196931 2025-06-24T21:28:37.638293Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31571, node 1 2025-06-24T21:28:37.646275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:37.646400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:37.649090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:37.743890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:37.743919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:37.743931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:37.744087Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5876 TClient is connected to server localhost:5876 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:28:38.224542Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:38.361319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:38.385729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:38.559719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:38.708172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:38.787558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.450348Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630976147531917:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:40.450480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:40.793023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.824105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.849647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.885729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.927985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:40.969540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.007383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:41.069842Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630980442499870:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.069933Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.070213Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630980442499875:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:41.073579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:41.084454Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630980442499877:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:41.163737Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630980442499928:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:42.198903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630963262628414:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:42.198972Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 16222, MsgBus: 21067 2025-06-24T21:28:43.448442Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630991943029398:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:43.448515Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ad2/r3tmp/tmpqBsRoW/pdisk_1.dat 2025-06-24T21:28:43.564285Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:43.565587Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630991943029379:2079] 1750800523447768 != 1750800523447771 TServer::EnableGrpc on GrpcPort 16222, node 2 2025-06-24T21:28:43.602757Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:43.602865Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:43.604566Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:43.617731Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:43.617755Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:43.617762Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:43.617866Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21067 TClient is connected to server localhost:21067 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:28:44.088466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:44.104397Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:44.182675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:44.326201Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:44.402845Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:44.468943Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:46.528924Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631004827932885:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.529007Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.587588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.658937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.691182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.721497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.753971Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.786156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.820532Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.875452Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631004827933543:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.875563Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.875577Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631004827933548:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.878859Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:46.893434Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631004827933550:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:46.968193Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631004827933601:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:48.448443Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630991943029398:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:48.448537Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::RangeRead-QueryService [GOOD] >> KqpQueryPerf::IndexDeleteOn+QueryService+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootHotTiers [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801065.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801065.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150801065.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801065.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801065.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150801065.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799865.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130801065.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130801065.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799865.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130799865.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130799865.000000s;Name=;Codec=}; 2025-06-24T21:27:46.077905Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:27:46.103828Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:27:46.104131Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:27:46.110567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:46.110803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:46.111011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:46.111095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:46.111218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:46.111351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:46.111465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:46.111551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:46.111622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:46.111728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:46.111799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:46.141505Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:27:46.141801Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:27:46.141884Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:27:46.142119Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:46.142336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:46.142417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:46.142469Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:27:46.142562Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:27:46.142622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:46.142668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:46.142699Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:27:46.142860Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:46.142926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:46.142967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:46.143002Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:27:46.143091Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:27:46.143172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:46.143216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:46.143256Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:27:46.143326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:46.143373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:46.143406Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:27:46.143565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:46.143605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:46.143636Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:27:46.143825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:46.143896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:46.143932Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:27:46.144029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:46.144065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:46.144096Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:27:46.144170Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:46.144230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=ab ... INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=12; 2025-06-24T21:28:49.440655Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=181; 2025-06-24T21:28:49.440695Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=10294; 2025-06-24T21:28:49.440748Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=10422; 2025-06-24T21:28:49.440818Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=14; 2025-06-24T21:28:49.441027Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=160; 2025-06-24T21:28:49.441063Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=11153; 2025-06-24T21:28:49.441213Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=92; 2025-06-24T21:28:49.441354Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=88; 2025-06-24T21:28:49.441520Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=93; 2025-06-24T21:28:49.441639Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=76; 2025-06-24T21:28:49.445962Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=4254; 2025-06-24T21:28:49.450413Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=4337; 2025-06-24T21:28:49.450522Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2025-06-24T21:28:49.450581Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=13; 2025-06-24T21:28:49.450625Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=9; 2025-06-24T21:28:49.450702Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=42; 2025-06-24T21:28:49.450742Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-24T21:28:49.450832Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=54; 2025-06-24T21:28:49.450875Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-24T21:28:49.450958Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=45; 2025-06-24T21:28:49.451060Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=66; 2025-06-24T21:28:49.451437Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=332; 2025-06-24T21:28:49.451482Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=28756; 2025-06-24T21:28:49.451624Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=29251936;raw_bytes=43173354;count=6;records=480000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:28:49.451738Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:28:49.451795Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:28:49.451861Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:28:49.469646Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T21:28:49.469800Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:28:49.469891Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-24T21:28:49.469964Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800215905;tx_id=18446744073709551615;;current_snapshot_ts=1750800467383; 2025-06-24T21:28:49.470008Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:28:49.470088Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:49.470132Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:49.470221Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:28:49.470916Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;self_id=[1:2092:3893];tablet_id=9437184;parent=[1:2051:3860];fline=manager.cpp:88;event=ask_data;request=request_id=88;9438184000001={portions_count=6};; 2025-06-24T21:28:49.471896Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:28:49.472775Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:28:49.472814Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:28:49.472843Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:28:49.472891Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:28:49.472988Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-24T21:28:49.473053Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800215905;tx_id=18446744073709551615;;current_snapshot_ts=1750800467383; 2025-06-24T21:28:49.473096Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:28:49.473148Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:49.473189Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:49.473279Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 160000/9752224 80000/4886744 0/0 >> TColumnShardTestSchema::HotTiersTtl [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Upsert+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2705, MsgBus: 2106 2025-06-24T21:28:44.295562Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630995509022168:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:44.296234Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a64/r3tmp/tmpauhCN8/pdisk_1.dat 2025-06-24T21:28:44.612517Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:44.614816Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630995509021978:2079] 1750800524274289 != 1750800524274292 TServer::EnableGrpc on GrpcPort 2705, node 1 2025-06-24T21:28:44.681940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:44.682067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:44.685318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:44.716281Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:44.716318Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:44.716328Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:44.716464Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2106 TClient is connected to server localhost:2106 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:45.281498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:45.291292Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:45.314830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:45.493123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:45.651281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:45.723524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.450135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631008393925501:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:47.450557Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:47.846288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.880533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.917756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.953924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.986236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.058020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.132842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.220500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631012688893464:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.220576Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.220919Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631012688893469:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.225117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:48.236277Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631012688893471:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:48.321584Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631012688893524:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:49.300751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630995509022168:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:49.301461Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryServiceScripts::TestTruncatedByRows [GOOD] >> KqpQueryServiceScripts::TestTruncatedBySize >> KqpQueryPerf::Insert+QueryService-UseSink [GOOD] >> KqpQueryPerf::Insert+QueryService+UseSink >> KqpQueryPerf::AggregateToScalar+QueryService [GOOD] >> KqpQueryPerf::AggregateToScalar-QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::DeleteOn-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 11879, MsgBus: 4509 2025-06-24T21:28:39.470379Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630974404569295:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:39.470413Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a8d/r3tmp/tmpJd3lyA/pdisk_1.dat 2025-06-24T21:28:39.832173Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:39.832605Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630974404569271:2079] 1750800519466848 != 1750800519466851 TServer::EnableGrpc on GrpcPort 11879, node 1 2025-06-24T21:28:39.884090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:39.884116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:39.884127Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:39.884270Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:39.889251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:39.889408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:39.891793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4509 TClient is connected to server localhost:4509 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:40.473791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:40.480438Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:40.500777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:40.666391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:40.822869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:40.882977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:42.444371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630987289472809:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.444495Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:42.781945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.811679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.838264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.865852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.895351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.940136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.008816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.070118Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630991584440767:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.070172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.070217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630991584440772:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.073733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:43.085044Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630991584440774:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:43.158702Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630991584440825:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:44.470725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630974404569295:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:44.470800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 64032, MsgBus: 8528 2025-06-24T21:28:45.300906Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630997602775117:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:45.300958Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a8d/r3tmp/tmpGwPbNA/pdisk_1.dat 2025-06-24T21:28:45.406040Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630997602775096:2079] 1750800525300473 != 1750800525300476 2025-06-24T21:28:45.419800Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64032, node 2 2025-06-24T21:28:45.434356Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:45.434437Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:45.438868Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:45.513668Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:45.513703Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:45.513710Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:45.513822Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8528 TClient is connected to server localhost:8528 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:45.969701Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:45.982454Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.056508Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.218397Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.277583Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.308932Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:48.244801Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631010487678616:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.244915Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.291313Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.335774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.366654Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.399050Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.429799Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.464638Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.510966Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.579129Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631010487679278:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.579236Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.579623Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631010487679283:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.583855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:48.595859Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631010487679285:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:48.660698Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631010487679336:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::RangeRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 26371, MsgBus: 3913 2025-06-24T21:28:44.840061Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630996378749690:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:44.840131Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a63/r3tmp/tmpvFCr8l/pdisk_1.dat 2025-06-24T21:28:45.137646Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:45.139522Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630996378749667:2079] 1750800524838334 != 1750800524838337 TServer::EnableGrpc on GrpcPort 26371, node 1 2025-06-24T21:28:45.213130Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:45.213232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:45.217937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:45.246232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:45.246255Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:45.246262Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:45.246418Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3913 TClient is connected to server localhost:3913 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:45.783364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:45.810191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:45.857727Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:45.967722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.120632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.186016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:47.938888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631009263653183:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:47.939028Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.235296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.276469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.348526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.377865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.417672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.456630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.489458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.563321Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631013558621139:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.563452Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.563676Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631013558621144:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.567975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:48.582858Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631013558621146:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:48.647010Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631013558621197:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:49.845205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630996378749690:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:49.845262Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexDeleteOn+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 62849, MsgBus: 25431 2025-06-24T21:28:34.506917Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630949963192308:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:34.507057Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ad8/r3tmp/tmpasehA0/pdisk_1.dat 2025-06-24T21:28:34.826676Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:34.827277Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630949963192284:2079] 1750800514503960 != 1750800514503963 TServer::EnableGrpc on GrpcPort 62849, node 1 2025-06-24T21:28:34.900042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:34.900069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:34.900077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:34.900215Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:34.923195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:34.923382Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:34.925376Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25431 TClient is connected to server localhost:25431 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:35.479742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:35.501524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:35.521141Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:35.657111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:35.819569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:35.897359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:37.641293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630962848095825:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:37.641412Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:37.957214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:37.989181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.025046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.056863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.125536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.199818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.240735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:38.312808Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630967143063785:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.312907Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.313619Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630967143063790:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:38.318386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:38.332263Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630967143063792:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:38.433863Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630967143063843:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:39.507362Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630949963192308:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:39.507454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:39.607738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:39.667462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... :42.860523Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519630988022346785:2079] 1750800522747271 != 1750800522747274 2025-06-24T21:28:42.903785Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:42.903877Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 30341, node 2 2025-06-24T21:28:42.905323Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:42.944917Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:42.944941Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:42.944947Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:42.945063Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1433 TClient is connected to server localhost:1433 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:28:43.388128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:43.402028Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.475231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.615803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.690419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.813308Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:46.089723Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631005202217598:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.089840Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.149948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.180165Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.209120Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.238738Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.267244Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.301372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.338424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.421576Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631005202218258:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.421662Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.421723Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631005202218263:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.425252Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:46.434532Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631005202218265:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:46.502446Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631005202218316:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:47.543793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.590545Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.637274Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.750082Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630988022346804:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:47.750182Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::IndexUpdateOn-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexUpdateOn-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::HotTiersTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801092.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801092.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150801092.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801092.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801092.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130801092.000000s;Name=;Codec=}; WaitEmptyAfter=1;Tiers={{Column=timestamp;EvictAfter=150801092.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801092.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130799892.000000s;Name=;Codec=}; 2025-06-24T21:28:13.026783Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:28:13.048236Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:28:13.048564Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:28:13.054796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:28:13.055047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:28:13.055343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:28:13.055480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:28:13.055592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:28:13.055715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:28:13.055821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:28:13.055926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:28:13.056022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:28:13.056136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:28:13.056237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:28:13.091539Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:28:13.091832Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:28:13.091912Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:28:13.092120Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:28:13.092321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:28:13.092400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:28:13.092449Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:28:13.092545Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:28:13.092611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:28:13.092662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:28:13.092697Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:28:13.092894Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:28:13.092965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:28:13.093011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:28:13.093044Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:28:13.093134Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:28:13.093195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:28:13.093241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:28:13.093302Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:28:13.093372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:28:13.093418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:28:13.093450Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:28:13.093674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:28:13.093720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:28:13.093755Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:28:13.093984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:28:13.094046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:28:13.094082Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:28:13.094209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:28:13.094255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:28:13.094287Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:28:13.094399Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:28:13.094484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:28:13.094530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract. ... de 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:28:50.649902Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:50.649947Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:28:50.650053Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:28:50.650268Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800529347:max} readable: {1750800529347:max} at tablet 9437184 2025-06-24T21:28:50.650415Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:28:50.650576Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800529347:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:28:50.650642Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800529347:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:28:50.651201Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800529347:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:28:50.651316Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800529347:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:28:50.651856Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800529347:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:1323:3292];trace_detailed=; 2025-06-24T21:28:50.652313Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:28:50.652559Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:28:50.652755Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:50.652889Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:50.653230Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1323:3292];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:28:50.653355Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1323:3292];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:50.653476Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1323:3292];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:50.653525Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1323:3292] finished for tablet 9437184 2025-06-24T21:28:50.654000Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1323:3292];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1322:3291];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750800530651776,"name":"_full_task","f":1750800530651776,"d_finished":0,"c":0,"l":1750800530653598,"d":1822},"events":[{"name":"bootstrap","f":1750800530652000,"d_finished":926,"c":1,"l":1750800530652926,"d":926},{"a":1750800530653201,"name":"ack","f":1750800530653201,"d_finished":0,"c":0,"l":1750800530653598,"d":397},{"a":1750800530653179,"name":"processing","f":1750800530653179,"d_finished":0,"c":0,"l":1750800530653598,"d":419},{"name":"ProduceResults","f":1750800530652673,"d_finished":484,"c":2,"l":1750800530653501,"d":484},{"a":1750800530653505,"name":"Finish","f":1750800530653505,"d_finished":0,"c":0,"l":1750800530653598,"d":93}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:28:50.654097Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1323:3292];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1322:3291];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:28:50.654538Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1323:3292];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1322:3291];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750800530651776,"name":"_full_task","f":1750800530651776,"d_finished":0,"c":0,"l":1750800530654149,"d":2373},"events":[{"name":"bootstrap","f":1750800530652000,"d_finished":926,"c":1,"l":1750800530652926,"d":926},{"a":1750800530653201,"name":"ack","f":1750800530653201,"d_finished":0,"c":0,"l":1750800530654149,"d":948},{"a":1750800530653179,"name":"processing","f":1750800530653179,"d_finished":0,"c":0,"l":1750800530654149,"d":970},{"name":"ProduceResults","f":1750800530652673,"d_finished":484,"c":2,"l":1750800530653501,"d":484},{"a":1750800530653505,"name":"Finish","f":1750800530653505,"d_finished":0,"c":0,"l":1750800530654149,"d":644}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1323:3292]->[1:1322:3291] 2025-06-24T21:28:50.654672Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1323:3292];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:28:50.651284Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:28:50.654723Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1323:3292];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:28:50.654846Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1323:3292];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 80000/4886744 0/0 >> KqpService::SwitchCache+UseCache [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> KqpQueryPerf::Replace+QueryService+UseSink [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TStorageTenantTest::CreateTableOutsideDatabaseFailToStartTabletsButDropIsOk [GOOD] >> TStorageTenantTest::Empty [GOOD] >> KqpQueryPerf::MultiDeleteFromTable+QueryService-UseSink [GOOD] >> KqpQueryPerf::MultiDeleteFromTable+QueryService+UseSink >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup+QueryService [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TStorageTenantTest::CreateTableOutsideDatabaseFailToStartTabletsButDropIsOk [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TStorageTenantTest::Empty [GOOD] >> KqpQueryPerf::UpdateOn-QueryService-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpService::SwitchCache+UseCache [GOOD] Test command err: Trying to start YDB, gRPC: 1181, MsgBus: 19832 2025-06-24T21:28:06.405869Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630832833065519:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:06.405978Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003089/r3tmp/tmpj6w5aB/pdisk_1.dat 2025-06-24T21:28:06.859334Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:06.860111Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630832833065496:2079] 1750800486404104 != 1750800486404107 2025-06-24T21:28:06.874466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:06.874573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 1181, node 1 2025-06-24T21:28:06.876407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:06.942484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:06.942507Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:06.942518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:06.942665Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19832 2025-06-24T21:28:07.429189Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19832 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:07.607255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:07.635975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:07.668520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:07.830319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:08.003112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:08.074604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:09.856632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630845717969019:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:09.856744Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.225942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.261001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.301698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.373706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.407874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.461797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.538744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:10.639985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630850012936979:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.640069Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.640255Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630850012936984:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:10.643873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:10.655011Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630850012936986:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:10.735079Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630850012937037:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:11.405919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630832833065519:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:11.406016Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:12.823572Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519630858602873027:2524] TxId: 281474976715770. Ctx: { TraceId: 01jyhxescve4g6t681mbjb057g, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzY1YTY3NzctYTMyMDJiNTgtZGU0MjEwOS04ZjQzMjA5OQ==, CurrentExecutionId: , C ... ivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003089/r3tmp/tmpbDIWEe/pdisk_1.dat 2025-06-24T21:28:22.201521Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630900336540020:2079] 1750800502015977 != 1750800502015980 2025-06-24T21:28:22.204920Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:22.225322Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:22.225428Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:22.227640Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20674, node 3 2025-06-24T21:28:22.296007Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:22.296034Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:22.296049Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:22.296205Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3212 TClient is connected to server localhost:3212 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:22.866433Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:22.874082Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:22.882719Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:22.964934Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.053430Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:23.144403Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:23.227097Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:25.833797Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630913221443543:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.833905Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:25.899694Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:25.932514Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.000675Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.069792Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.109730Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.180394Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.256792Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:26.353980Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630917516411508:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.354101Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.354294Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519630917516411513:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:26.359721Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:26.371084Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519630917516411515:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:26.473243Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519630917516411566:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:27.023249Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630900336540041:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:27.023336Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:27.692879Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:37.172619Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:28:37.172648Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded took: 23.284183s took: 23.287453s took: 23.291412s took: 23.289318s took: 23.294267s took: 23.296506s took: 23.297936s took: 23.298757s took: 23.303660s took: 23.303027s ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24449, MsgBus: 5831 2025-06-24T21:28:46.994113Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631005177049389:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:46.994228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a5e/r3tmp/tmp0PgsZh/pdisk_1.dat 2025-06-24T21:28:47.343926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:47.344081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:47.347942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:47.348726Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631005177049370:2079] 1750800526992779 != 1750800526992782 2025-06-24T21:28:47.352842Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24449, node 1 2025-06-24T21:28:47.456963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:47.456995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:47.457003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:47.457154Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5831 TClient is connected to server localhost:5831 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:48.019982Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:48.023846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:48.044463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.212024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:48.343566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.408126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:50.223625Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631022356920187:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.223769Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.551335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.580582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.606111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.631313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.666001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.709769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.779047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.831733Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631022356920843:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.831806Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.831887Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631022356920848:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.835330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:50.846072Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631022356920850:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:50.931690Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631022356920901:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:51.994904Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631005177049389:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:51.994975Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup+QueryService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 24244, MsgBus: 19752 2025-06-24T21:28:40.783851Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630979292984443:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:40.783923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a81/r3tmp/tmpya7lHt/pdisk_1.dat 2025-06-24T21:28:41.160597Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:41.165542Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630979292984341:2079] 1750800520778418 != 1750800520778421 TServer::EnableGrpc on GrpcPort 24244, node 1 2025-06-24T21:28:41.252069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:41.252173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:41.261506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:41.261524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:41.261530Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:41.261624Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:41.263509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19752 TClient is connected to server localhost:19752 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:41.805573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:41.807592Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:41.820096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:41.834005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:42.019977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:42.191725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:42.269108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:44.042032Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630996472855172:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.042162Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.401342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.434002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.505022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.537939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.568956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.641062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.711647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.766006Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630996472855834:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.766100Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.766299Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630996472855839:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.770855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:44.783396Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630996472855841:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:44.848868Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630996472855894:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:45.785450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630979292984443:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:45.785540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 62159, MsgBus: 30008 2025-06-24T21:28:47.407751Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631008635772629:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:47.407821Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a81/r3tmp/tmpp2Aqs5/pdisk_1.dat 2025-06-24T21:28:47.552330Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:47.557136Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:47.557238Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:47.559010Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62159, node 2 2025-06-24T21:28:47.597316Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:47.597336Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:47.597342Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:47.597485Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30008 TClient is connected to server localhost:30008 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:48.062465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:48.074500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.151553Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.313297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.395554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.511364Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:50.437450Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631021520676122:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.437872Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.488272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.561717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.593358Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.623230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.656763Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.692754Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.745469Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.799565Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631021520676785:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.799655Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.800107Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631021520676790:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.803538Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:50.813443Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631021520676792:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:50.899270Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631021520676843:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:52.408517Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631008635772629:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:52.408586Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |98.0%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryPerf::IndexUpsert+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexUpsert+QueryService+UseSink |98.0%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::UpdateOn-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2537, MsgBus: 22847 2025-06-24T21:28:47.174929Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631008835683967:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:47.175087Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a5d/r3tmp/tmpwH79Gj/pdisk_1.dat 2025-06-24T21:28:47.528336Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:47.535262Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631008835683942:2079] 1750800527173606 != 1750800527173609 TServer::EnableGrpc on GrpcPort 2537, node 1 2025-06-24T21:28:47.581909Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:47.582014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:47.593456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:47.643772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:47.643803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:47.643810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:47.643949Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22847 TClient is connected to server localhost:22847 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:48.187806Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:48.219905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:48.236084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:48.248494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:48.386822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.554422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.622164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:50.305518Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631021720587461:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.305674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.657160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.687403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.712763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.739749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.769992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.801130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.868929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.950390Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631021720588124:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.950476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.952209Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631021720588129:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.955566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:50.964394Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631021720588131:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:51.036131Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631026015555478:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:52.174832Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631008835683967:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:52.174903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::Delete+QueryService-UseSink [GOOD] >> KqpQueryPerf::Delete+QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 21264, MsgBus: 20308 2025-06-24T21:28:42.592811Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630987205983179:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:42.596495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7e/r3tmp/tmpSPYtOI/pdisk_1.dat 2025-06-24T21:28:42.950954Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:42.953072Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630987205983148:2079] 1750800522579555 != 1750800522579558 TServer::EnableGrpc on GrpcPort 21264, node 1 2025-06-24T21:28:42.986921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:42.987341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:42.988907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:43.007648Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:43.007669Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:43.007675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:43.007780Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20308 TClient is connected to server localhost:20308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:43.498203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:43.515259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.591194Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:43.630394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.757679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.833129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:45.480023Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631000090886672:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.480152Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:45.808496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.842864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.877254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.918218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.965107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.995208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.063661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:46.117717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631004385854630:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.117812Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.117907Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631004385854635:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.121582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:46.131465Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631004385854637:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:46.186281Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631004385854688:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:47.588054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630987205983179:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:47.588368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 1182, MsgBus: 14596 2025-06-24T21:28:48.431250Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631013696693862:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:48.431319Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a7e/r3tmp/tmpPz2o3V/pdisk_1.dat 2025-06-24T21:28:48.561642Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:48.562916Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631013696693846:2079] 1750800528430760 != 1750800528430763 TServer::EnableGrpc on GrpcPort 1182, node 2 2025-06-24T21:28:48.581813Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:48.581907Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:48.583680Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:48.627721Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:48.627742Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:48.627751Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:48.627873Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14596 TClient is connected to server localhost:14596 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:49.129488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:49.135971Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:49.147063Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:49.224944Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:49.390375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:49.443534Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:49.468353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:51.674692Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631026581597392:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:51.674778Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:51.728174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:51.758767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:51.829032Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:51.860672Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:51.892498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:51.931395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.005985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.063726Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631030876565351:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.063817Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.064174Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631030876565356:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.068020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:52.078240Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631030876565358:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:52.161065Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631030876565409:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:53.431292Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631013696693862:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:53.431416Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::IndexInsert+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexInsert+QueryService+UseSink |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> KqpQueryPerf::Insert+QueryService+UseSink [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::CreateSubdomain >> TSchemeShardAuditSettings::AlterSubdomain >> KqpQueryPerf::AggregateToScalar-QueryService [GOOD] >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true >> TSchemeShardAuditSettings::CreateExtSubdomain >> KqpQueryPerf::IndexUpsert-QueryService+UseSink [GOOD] >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false >> TPQTest::TestReadSessions [GOOD] >> TPQTest::TestReadSubscription >> KqpRanges::ValidatePredicatesDataQuery [GOOD] >> KqpReturning::Random >> KqpQueryPerf::IndexReplace+QueryService+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Insert+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 61444, MsgBus: 17901 2025-06-24T21:28:45.216521Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630999172087224:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:45.216561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a62/r3tmp/tmpRwtX9i/pdisk_1.dat 2025-06-24T21:28:45.565433Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630999172087200:2079] 1750800525208769 != 1750800525208772 2025-06-24T21:28:45.573538Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61444, node 1 2025-06-24T21:28:45.659633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:45.659799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:45.661501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:45.685091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:45.685116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:45.685125Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:45.685308Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17901 TClient is connected to server localhost:17901 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:46.227432Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:46.260273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:46.276644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.449939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.598992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.660641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.476149Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631012056990728:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.476278Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.854866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.882071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.912404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.951238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.983622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:49.052290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:49.123315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:49.181889Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631016351958686:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.181978Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.182189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631016351958691:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.186004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:49.198577Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631016351958693:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:49.284669Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631016351958746:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:50.217035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630999172087224:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:50.217109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 27710, MsgBus: 5162 2025-06-24T21:28:51.601746Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631023751113817:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:51.601832Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a62/r3tmp/tmpMnd37o/pdisk_1.dat 2025-06-24T21:28:51.745779Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:51.749942Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631023751113798:2079] 1750800531600316 != 1750800531600319 2025-06-24T21:28:51.765316Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:51.765429Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:51.766856Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27710, node 2 2025-06-24T21:28:51.801316Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:51.801345Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:51.801356Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:51.801492Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5162 TClient is connected to server localhost:5162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:52.334555Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:52.342199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:52.355013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:52.457257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.614791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.617692Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:52.696929Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:54.850746Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631036636017329:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:54.850852Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:54.893857Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.927916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.955836Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.985536Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.016176Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.084636Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.133380Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.189986Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631040930985287:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:55.190086Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:55.190147Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631040930985292:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:55.193529Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:55.202515Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631040930985294:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:55.268632Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631040930985345:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::AggregateToScalar-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 5684, MsgBus: 24746 2025-06-24T21:28:45.487327Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630998398760632:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:45.487967Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a61/r3tmp/tmp1H1t0x/pdisk_1.dat 2025-06-24T21:28:45.791381Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:45.794973Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630998398760608:2079] 1750800525485555 != 1750800525485558 TServer::EnableGrpc on GrpcPort 5684, node 1 2025-06-24T21:28:45.895340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:45.897289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:45.911241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:45.935741Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:45.935763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:45.935770Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:45.935886Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24746 TClient is connected to server localhost:24746 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:46.493925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:46.496033Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:46.512785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.673041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.810938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.875005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.544399Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631011283664134:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.544552Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:48.836232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.865748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.895453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.926787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.961626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:49.010923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:49.041938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:49.097780Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631015578632085:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.097874Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.098139Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631015578632090:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.101763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:49.111297Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631015578632092:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:49.201318Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631015578632143:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:50.487580Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630998398760632:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:50.487654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 31399, MsgBus: 11719 2025-06-24T21:28:51.551705Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631022644160358:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:51.551823Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a61/r3tmp/tmpMJjqoi/pdisk_1.dat 2025-06-24T21:28:51.660407Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:51.661091Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631022644160339:2079] 1750800531551258 != 1750800531551261 TServer::EnableGrpc on GrpcPort 31399, node 2 2025-06-24T21:28:51.691295Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:51.691403Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:51.693062Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:51.725658Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:51.725686Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:51.725700Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:51.725817Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11719 TClient is connected to server localhost:11719 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:52.189619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:52.200379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.277807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.435272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.532615Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.699000Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:54.713933Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631035529063848:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:54.714041Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:54.758656Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.788143Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.816983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.848951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.876804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.907257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.939267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.991008Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631035529064500:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:54.991106Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:54.991226Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631035529064505:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:54.994717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:55.006616Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631035529064507:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:55.104194Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631039824031854:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:56.552204Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631022644160358:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:56.552305Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexUpsert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 18138, MsgBus: 5918 2025-06-24T21:28:40.604333Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630978034680713:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:40.604456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a85/r3tmp/tmp1BYh5N/pdisk_1.dat 2025-06-24T21:28:41.009398Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18138, node 1 2025-06-24T21:28:41.051999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:41.052104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:41.053278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:41.129331Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:41.129352Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:41.129360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:41.129519Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5918 TClient is connected to server localhost:5918 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:28:41.611291Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:41.715904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:41.727933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:41.746504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.882215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:42.048519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:42.128314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:43.866284Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630990919584117:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.866450Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.106532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.133583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.162118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.194982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.226379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.258384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.327972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.389687Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630995214552073:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.389795Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.389897Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630995214552078:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.394438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:44.408871Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630995214552080:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:44.468824Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630995214552131:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:45.596947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630978034680713:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:45.597020Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:45.649602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.689472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, b ... ubscription [2:7519631009842848586:2079] 1750800528992142 != 1750800528992145 2025-06-24T21:28:49.129979Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5003, node 2 2025-06-24T21:28:49.142347Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:49.142445Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:49.148690Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:49.185552Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:49.185574Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:49.185583Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:49.185685Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12227 TClient is connected to server localhost:12227 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:49.639971Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:49.658482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:49.710803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:49.888512Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:49.966554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:50.025502Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:52.574777Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631027022719404:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.574897Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.634290Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.668881Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.704755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.752506Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.790689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.829675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.880412Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.972105Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631027022720072:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.972187Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.972441Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631027022720077:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.976447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:52.987577Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631027022720079:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:53.061570Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631031317687426:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:53.993203Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631009842848607:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:53.993289Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:54.143423Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.218959Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.294254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TSchemeShardAuditSettings::CreateExtSubdomain [GOOD] >> TSchemeShardAuditSettings::CreateSubdomain [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexReplace+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 11045, MsgBus: 63371 2025-06-24T21:28:40.639924Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630976300110004:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:40.640067Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a83/r3tmp/tmpYQqvnQ/pdisk_1.dat 2025-06-24T21:28:40.994738Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:41.007522Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630976300109817:2079] 1750800520618581 != 1750800520618584 2025-06-24T21:28:41.012768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:41.012856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:41.017052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11045, node 1 2025-06-24T21:28:41.143757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:41.143784Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:41.143792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:41.143933Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63371 TClient is connected to server localhost:63371 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:28:41.644281Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:41.770007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:41.803063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:41.953441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:42.107725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:42.184352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:43.828074Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630989185013348:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:43.828156Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.110539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.148085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.177492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.246572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.276583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.313750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.344124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.430815Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630993479981306:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.430895Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.431035Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630993479981311:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:44.437244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:44.454376Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630993479981313:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:44.534828Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630993479981364:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:45.641652Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630976300110004:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:45.642100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:45.747753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:45.782693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation pa ... ver::EnableGrpc on GrpcPort 26274, node 2 2025-06-24T21:28:49.262057Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:49.262178Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:49.264646Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:49.292991Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:49.293013Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:49.293021Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:49.293152Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23660 TClient is connected to server localhost:23660 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:49.742508Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:49.751750Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:49.759832Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:49.818478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:49.969992Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:50.045472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:50.175868Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:52.449523Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631030068495077:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.449617Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.535703Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.617202Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.652429Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.684017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.753879Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.786472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.860852Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.928462Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631030068495740:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.928555Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.928774Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631030068495745:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.932404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:52.948995Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631030068495747:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:53.018619Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631034363463094:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:54.071674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.111364Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631017183591574:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:54.111449Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:54.155480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:54.230306Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> KqpQueryPerf::MultiDeleteFromTable+QueryService+UseSink [GOOD] >> RemoteTopicReader::ReadTopic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::CreateSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:28:57.930125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:28:57.930244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:57.930352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:28:57.930396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:28:57.931667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:28:57.931725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:28:57.931811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:57.931924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:28:57.932818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:28:57.939560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:28:58.028932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:28:58.029001Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:58.047392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:28:58.047906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:28:58.048070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:28:58.057950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:28:58.058106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:28:58.060300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.060673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:28:58.069960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.070998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:28:58.082823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.082927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.083236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:28:58.083303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:28:58.083361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:28:58.083458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.091013Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:28:58.233544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:28:58.233835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.236938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:28:58.237025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:28:58.239252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:28:58.239456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:58.243059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.243326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:28:58.243547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.243613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:28:58.243657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:28:58.243701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:28:58.245651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.245708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:28:58.245766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:28:58.247646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.247697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.247747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.247816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:28:58.252966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:28:58.255010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:28:58.255421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:28:58.256543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.256680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:28:58.256744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.258296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:28:58.258394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.258587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:28:58.258707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:28:58.261171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.261242Z node 1 :FLAT_TX_SCHEMESHARD ... eration_side_effects.cpp:653: Send tablet strongly msg operationId: 112:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:112 msg type: 269090816 2025-06-24T21:28:58.605988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 112, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 112 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 112 at step: 5000013 2025-06-24T21:28:58.606957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000013, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.607066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 112 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000013 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:28:58.607140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_unsafe.cpp:47: TDropForceUnsafe TPropose, operationId: 112:0 HandleReply TEvOperationPlan, step: 5000013, at schemeshard: 72057594046678944 2025-06-24T21:28:58.607234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 7] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 112 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:28:58.607267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-24T21:28:58.607388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 112:0 128 -> 130 2025-06-24T21:28:58.607590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:28:58.607683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-24T21:28:58.608613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T21:28:58.608959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 FAKE_COORDINATOR: Erasing txId 112 2025-06-24T21:28:58.610531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.610562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:28:58.610675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-24T21:28:58.610760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.610797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 112, path id: 1 2025-06-24T21:28:58.610834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 112, path id: 7 2025-06-24T21:28:58.611189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.611228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:417: [72057594046678944] TDeleteParts opId# 112:0 ProgressState 2025-06-24T21:28:58.611281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#112:0 progress is 1/1 2025-06-24T21:28:58.611301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-24T21:28:58.611324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#112:0 progress is 1/1 2025-06-24T21:28:58.611343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-24T21:28:58.611364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 112, ready parts: 1/1, is published: false 2025-06-24T21:28:58.611398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-24T21:28:58.611427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 112:0 2025-06-24T21:28:58.611478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 112:0 2025-06-24T21:28:58.611561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-24T21:28:58.611598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 112, publications: 2, subscribers: 0 2025-06-24T21:28:58.611630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 112, [OwnerId: 72057594046678944, LocalPathId: 1], 27 2025-06-24T21:28:58.611681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 112, [OwnerId: 72057594046678944, LocalPathId: 7], 18446744073709551615 2025-06-24T21:28:58.612273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T21:28:58.612360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T21:28:58.612415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 112 2025-06-24T21:28:58.612464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 27 2025-06-24T21:28:58.612512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:28:58.613413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T21:28:58.613518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T21:28:58.613552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 112 2025-06-24T21:28:58.613580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-06-24T21:28:58.613624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-24T21:28:58.613706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 112, subscribers: 0 2025-06-24T21:28:58.613954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:28:58.614049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-24T21:28:58.614144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-24T21:28:58.614969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:28:58.615019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-24T21:28:58.615086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:28:58.617286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T21:28:58.618989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T21:28:58.619086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:28:58.619187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2025-06-24T21:28:58.619499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2025-06-24T21:28:58.619534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2025-06-24T21:28:58.620036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2025-06-24T21:28:58.620138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2025-06-24T21:28:58.620182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:661:2650] TestWaitNotification: OK eventTxId 112 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::CreateExtSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:28:57.930120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:28:57.930224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:57.930278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:28:57.930316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:28:57.931661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:28:57.931719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:28:57.931828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:57.931929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:28:57.932752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:28:57.939547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:28:58.033417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:28:58.033475Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:58.048647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:28:58.049009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:28:58.049223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:28:58.059099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:28:58.059287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:28:58.060201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.060557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:28:58.070290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.070980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:28:58.082827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.082930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.083237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:28:58.083297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:28:58.083384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:28:58.083486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.092898Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:28:58.236259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:28:58.236457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.236957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:28:58.237051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:28:58.239067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:28:58.239259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:58.242714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.242903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:28:58.243081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.243205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:28:58.243235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:28:58.243262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:28:58.244931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.244989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:28:58.245055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:28:58.246862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.246911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.246956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.247028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:28:58.251766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:28:58.253766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:28:58.255424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:28:58.256526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.256681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:28:58.256758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.258318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:28:58.258387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.258657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:28:58.258736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:28:58.261178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.261225Z node 1 :FLAT_TX_SCHEMESHARD ... 46316545 FAKE_COORDINATOR: Add transaction: 112 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 112 at step: 5000013 2025-06-24T21:28:58.602878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000013, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.602960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 112 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000013 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:28:58.603003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:157: TDropExtSubdomain TPropose, operationId: 112:0 HandleReply TEvOperationPlan, step: 5000013, at schemeshard: 72057594046678944 2025-06-24T21:28:58.603066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 7] name: USER_0 type: EPathTypeExtSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 112 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:28:58.603091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-24T21:28:58.603119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 112:0 128 -> 134 2025-06-24T21:28:58.604336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T21:28:58.604802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T21:28:58.606368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.606421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:104: TDropExtSubdomain TDeleteExternalShards, operationId: 112:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:28:58.606531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 112:0 134 -> 135 2025-06-24T21:28:58.606696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:28:58.606752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 FAKE_COORDINATOR: Erasing txId 112 2025-06-24T21:28:58.608527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.608565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:28:58.608689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-24T21:28:58.608782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.608807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 112, path id: 1 2025-06-24T21:28:58.608833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 112, path id: 7 2025-06-24T21:28:58.608892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.608924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:399: [72057594046678944] TDeleteParts opId# 112:0 ProgressState 2025-06-24T21:28:58.608943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 112:0 135 -> 240 2025-06-24T21:28:58.609869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T21:28:58.609952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T21:28:58.609983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 112 2025-06-24T21:28:58.610012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 27 2025-06-24T21:28:58.610041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:28:58.610619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T21:28:58.610675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-24T21:28:58.610694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 112 2025-06-24T21:28:58.610717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-06-24T21:28:58.610735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-24T21:28:58.610777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 112, ready parts: 0/1, is published: true 2025-06-24T21:28:58.612603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.612662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 112:0 ProgressState 2025-06-24T21:28:58.612750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#112:0 progress is 1/1 2025-06-24T21:28:58.612786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-24T21:28:58.612828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#112:0 progress is 1/1 2025-06-24T21:28:58.612853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-24T21:28:58.612879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 112, ready parts: 1/1, is published: true 2025-06-24T21:28:58.612911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-24T21:28:58.612939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 112:0 2025-06-24T21:28:58.612962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 112:0 2025-06-24T21:28:58.613013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-24T21:28:58.613337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:28:58.613394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-24T21:28:58.613526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-24T21:28:58.613843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:28:58.613890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-24T21:28:58.613949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:28:58.615005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T21:28:58.615961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-24T21:28:58.618237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:28:58.618330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2025-06-24T21:28:58.618670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2025-06-24T21:28:58.618720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2025-06-24T21:28:58.619293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2025-06-24T21:28:58.619382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2025-06-24T21:28:58.619412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:661:2650] TestWaitNotification: OK eventTxId 112 >> KqpQueryPerf::IndexUpdateOn-QueryService+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiDeleteFromTable+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 28324, MsgBus: 29749 2025-06-24T21:28:46.809261Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631004231172913:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:46.809317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a5f/r3tmp/tmpa6qIGw/pdisk_1.dat 2025-06-24T21:28:47.166395Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:47.166766Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631004231172892:2079] 1750800526807523 != 1750800526807526 TServer::EnableGrpc on GrpcPort 28324, node 1 2025-06-24T21:28:47.197914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:47.198072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:47.205000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:47.223239Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:47.223268Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:47.223277Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:47.223414Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29749 TClient is connected to server localhost:29749 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:47.811415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:47.817877Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:47.829274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:47.842400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.014020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.170357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:48.239435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:49.858102Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631017116076410:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.858228Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.146744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.192747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.229282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.255279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.281255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.352703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.390729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.474732Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631021411044368:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.474797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.474864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631021411044373:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.478638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:50.489372Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631021411044375:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:50.558601Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631021411044426:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:51.809716Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631004231172913:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:51.809812Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 16335, MsgBus: 4111 2025-06-24T21:28:53.266790Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631035334957688:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:53.266882Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a5f/r3tmp/tmpHPlqZ4/pdisk_1.dat 2025-06-24T21:28:53.371364Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:53.372329Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631035334957669:2079] 1750800533266058 != 1750800533266061 TServer::EnableGrpc on GrpcPort 16335, node 2 2025-06-24T21:28:53.409566Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:53.409683Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:53.411058Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:53.435765Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:53.435794Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:53.435800Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:53.435941Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4111 TClient is connected to server localhost:4111 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:53.914311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:53.924380Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:53.977863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:54.135970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:54.213823Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:54.276110Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:56.258264Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631048219861190:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:56.258369Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:56.313661Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.356349Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.384080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.416630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.446385Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.521606Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.595391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.646921Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631048219861851:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:56.646990Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:56.647131Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631048219861856:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:56.650068Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:56.659895Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631048219861858:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:56.730255Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631048219861909:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:58.267004Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631035334957688:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:58.267079Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::Delete+QueryService+UseSink [GOOD] >> TSchemeShardAuditSettings::AlterSubdomain [GOOD] >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexUpdateOn-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 61912, MsgBus: 6706 2025-06-24T21:28:43.875385Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630991716351848:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:43.876257Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a65/r3tmp/tmp2B4BM2/pdisk_1.dat 2025-06-24T21:28:44.226671Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:44.227038Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630991716351807:2079] 1750800523872894 != 1750800523872897 TServer::EnableGrpc on GrpcPort 61912, node 1 2025-06-24T21:28:44.271719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:44.275745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:44.279277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:44.311833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:44.311862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:44.311871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:44.311977Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6706 TClient is connected to server localhost:6706 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:44.853212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:44.884823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:44.886061Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:45.010757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:45.188490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:45.254915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:46.921746Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631004601255355:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:46.921889Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:47.238043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.267572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.293436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.320600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.349214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.388754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.426318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:47.487282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631008896223307:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:47.487379Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:47.487791Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631008896223312:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:47.491701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:47.505506Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631008896223314:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:47.568020Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631008896223365:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:48.779043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.819880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.876259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExist ... 8:52.010169Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631023838414763:2079] 1750800531858352 != 1750800531858355 2025-06-24T21:28:52.022880Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:52.022973Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:52.024601Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1692, node 2 2025-06-24T21:28:52.068861Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:52.068882Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:52.068889Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:52.069015Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3443 TClient is connected to server localhost:3443 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:52.572420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:52.589828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.665318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.816989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.879219Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:52.896336Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:55.143339Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631041018285584:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:55.143433Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:55.203642Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.234709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.261307Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.291445Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.324206Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.393582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.463868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.521442Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631041018286246:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:55.521511Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:55.521569Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631041018286251:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:55.524898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:55.534331Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631041018286253:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:55.614428Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631041018286304:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:56.711621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.749586Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.821874Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.862204Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631023838414785:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:56.862728Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:28:57.930112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:28:57.930251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:57.930305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:28:57.930344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:28:57.931651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:28:57.931718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:28:57.931797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:57.931896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:28:57.932618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:28:57.941072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:28:58.030477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:28:58.030538Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:58.047401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:28:58.047876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:28:58.048126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:28:58.056921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:28:58.057138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:28:58.060270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.060572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:28:58.069699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.071195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:28:58.082896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.082988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.083291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:28:58.083352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:28:58.083412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:28:58.083499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.091318Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:28:58.233704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:28:58.233913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.236966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:28:58.237057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:28:58.239078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:28:58.239274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:58.243684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.243834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:28:58.243989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.244037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:28:58.244069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:28:58.244098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:28:58.245902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.245953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:28:58.245997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:28:58.247858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.247901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.247948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.248015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:28:58.258749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:28:58.260823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:28:58.261012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:28:58.261932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.262072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:28:58.262138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.262391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:28:58.262452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.262640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:28:58.262702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:28:58.264896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.264947Z node 1 :FLAT_TX_SCHEMESHARD ... s.cpp:653: Send tablet strongly msg operationId: 175:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:175 msg type: 269090816 2025-06-24T21:29:00.594908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 175, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 175 at step: 5000076 FAKE_COORDINATOR: advance: minStep5000076 State->FrontStep: 5000075 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 175 at step: 5000076 2025-06-24T21:29:00.595950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000076, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:00.596052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 175 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000076 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:29:00.596110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_unsafe.cpp:47: TDropForceUnsafe TPropose, operationId: 175:0 HandleReply TEvOperationPlan, step: 5000076, at schemeshard: 72057594046678944 2025-06-24T21:29:00.596150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 26] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 175 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:29:00.596170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-24T21:29:00.596252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 175:0 128 -> 130 2025-06-24T21:29:00.596389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:29:00.596435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 2025-06-24T21:29:00.597169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-24T21:29:00.597294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 FAKE_COORDINATOR: Erasing txId 175 2025-06-24T21:29:00.598781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:29:00.598839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:29:00.599007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-24T21:29:00.599162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:00.599204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 175, path id: 1 2025-06-24T21:29:00.599256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 175, path id: 26 2025-06-24T21:29:00.599592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-24T21:29:00.599638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:417: [72057594046678944] TDeleteParts opId# 175:0 ProgressState 2025-06-24T21:29:00.599710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#175:0 progress is 1/1 2025-06-24T21:29:00.599740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-24T21:29:00.599779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#175:0 progress is 1/1 2025-06-24T21:29:00.599823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-24T21:29:00.599878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: false 2025-06-24T21:29:00.599911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-24T21:29:00.599945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 175:0 2025-06-24T21:29:00.599974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 175:0 2025-06-24T21:29:00.600039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 3 2025-06-24T21:29:00.600070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 175, publications: 2, subscribers: 0 2025-06-24T21:29:00.600096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 175, [OwnerId: 72057594046678944, LocalPathId: 1], 103 2025-06-24T21:29:00.600121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 175, [OwnerId: 72057594046678944, LocalPathId: 26], 18446744073709551615 2025-06-24T21:29:00.600854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:00.600957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:00.600998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 175 2025-06-24T21:29:00.601031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 103 2025-06-24T21:29:00.601063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:29:00.602010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:00.602117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:00.602154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 175 2025-06-24T21:29:00.602197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 26], version: 18446744073709551615 2025-06-24T21:29:00.602248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 2025-06-24T21:29:00.602341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 175, subscribers: 0 2025-06-24T21:29:00.602620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:29:00.602669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-24T21:29:00.602763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 1 2025-06-24T21:29:00.603347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:29:00.603397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-24T21:29:00.603476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:29:00.606507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-24T21:29:00.608815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-24T21:29:00.608990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:29:00.609104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2025-06-24T21:29:00.610540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2025-06-24T21:29:00.610583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2025-06-24T21:29:00.611708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2025-06-24T21:29:00.611804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2025-06-24T21:29:00.611834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:2468:4457] TestWaitNotification: OK eventTxId 175 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:28:57.930113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:28:57.930244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:57.930300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:28:57.930349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:28:57.931649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:28:57.931724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:28:57.931816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:57.931941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:28:57.933030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:28:57.939648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:28:58.033710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:28:58.033766Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:58.048891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:28:58.049244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:28:58.049409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:28:58.056870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:28:58.057161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:28:58.060242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.060561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:28:58.069868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.070989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:28:58.082909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.082993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.083262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:28:58.083324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:28:58.083384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:28:58.083475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.091370Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:28:58.244356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:28:58.244673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.244906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:28:58.244975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:28:58.245278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:28:58.245475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:58.247844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.248090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:28:58.248291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.248350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:28:58.248418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:28:58.248457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:28:58.250621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.250687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:28:58.250756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:28:58.252950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.253007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.253064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.253129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:28:58.263384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:28:58.265224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:28:58.265494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:28:58.266520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.266677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:28:58.266755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.267049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:28:58.267106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.267315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:28:58.267395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:28:58.270279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.270335Z node 1 :FLAT_TX_SCHEMESHARD ... RDINATOR: Add transaction: 175 at step: 5000076 FAKE_COORDINATOR: advance: minStep5000076 State->FrontStep: 5000075 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 175 at step: 5000076 2025-06-24T21:29:00.704306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000076, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:00.704427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 175 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000076 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:29:00.704481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:157: TDropExtSubdomain TPropose, operationId: 175:0 HandleReply TEvOperationPlan, step: 5000076, at schemeshard: 72057594046678944 2025-06-24T21:29:00.704577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 26] name: USER_0 type: EPathTypeExtSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 175 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:29:00.704612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-24T21:29:00.704648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 175:0 128 -> 134 2025-06-24T21:29:00.705979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-24T21:29:00.706348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-24T21:29:00.708258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-24T21:29:00.708315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:104: TDropExtSubdomain TDeleteExternalShards, operationId: 175:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:29:00.708424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 175:0 134 -> 135 2025-06-24T21:29:00.708610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:29:00.708693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 FAKE_COORDINATOR: Erasing txId 175 2025-06-24T21:29:00.710786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:29:00.710835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:29:00.710984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-24T21:29:00.711103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:00.711180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 175, path id: 1 2025-06-24T21:29:00.711223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 175, path id: 26 2025-06-24T21:29:00.711720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-24T21:29:00.711784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:399: [72057594046678944] TDeleteParts opId# 175:0 ProgressState 2025-06-24T21:29:00.711822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 175:0 135 -> 240 2025-06-24T21:29:00.712583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:00.712674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:00.712716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-24T21:29:00.712749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 103 2025-06-24T21:29:00.712784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:29:00.713745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:00.713839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:00.713872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-24T21:29:00.713906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 26], version: 18446744073709551615 2025-06-24T21:29:00.713939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 3 2025-06-24T21:29:00.714020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 175, ready parts: 0/1, is published: true 2025-06-24T21:29:00.717233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-24T21:29:00.717290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 175:0 ProgressState 2025-06-24T21:29:00.717394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#175:0 progress is 1/1 2025-06-24T21:29:00.717426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-24T21:29:00.717460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#175:0 progress is 1/1 2025-06-24T21:29:00.717489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-24T21:29:00.717522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: true 2025-06-24T21:29:00.717559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-24T21:29:00.717593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 175:0 2025-06-24T21:29:00.717624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 175:0 2025-06-24T21:29:00.717708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 2025-06-24T21:29:00.718183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:29:00.718231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-24T21:29:00.718292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 1 2025-06-24T21:29:00.718550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:29:00.718595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-24T21:29:00.718660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:29:00.719451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-24T21:29:00.719607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-24T21:29:00.722239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:29:00.722343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2025-06-24T21:29:00.723753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2025-06-24T21:29:00.723803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2025-06-24T21:29:00.725391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2025-06-24T21:29:00.725544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2025-06-24T21:29:00.725583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:2606:4595] TestWaitNotification: OK eventTxId 175 >> TColumnShardTestSchema::OneColdTier [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Delete+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 11604, MsgBus: 22853 2025-06-24T21:28:48.944670Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631009713116297:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:48.959512Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a5b/r3tmp/tmpd2PZsX/pdisk_1.dat 2025-06-24T21:28:49.291044Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11604, node 1 2025-06-24T21:28:49.377374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:49.377693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:49.379781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:49.391787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:49.391827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:49.391840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:49.391971Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22853 TClient is connected to server localhost:22853 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:49.934163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:49.952998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:49.963410Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:49.967589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:50.092427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:50.271235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:50.342015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:51.981254Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631022598019770:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:51.981425Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.324866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.359237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.392943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.436749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.507892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.540574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.572652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.640036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631026892987726:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.640136Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.640441Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631026892987731:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:52.644562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:52.657383Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631026892987733:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:52.735564Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631026892987784:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:53.947306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631009713116297:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:53.947370Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 13598, MsgBus: 16507 2025-06-24T21:28:54.995822Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631038860676141:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:54.995909Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a5b/r3tmp/tmpgH6eq8/pdisk_1.dat 2025-06-24T21:28:55.092953Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:55.094353Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631038860676122:2079] 1750800534995333 != 1750800534995336 TServer::EnableGrpc on GrpcPort 13598, node 2 2025-06-24T21:28:55.134929Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:55.135036Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:55.136888Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:55.149852Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:55.149878Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:55.149884Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:55.150031Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16507 TClient is connected to server localhost:16507 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:55.579353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:55.596516Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:55.668352Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:55.803804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:55.904595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:56.032358Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:58.264003Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631056040546955:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:58.264101Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:58.317168Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:58.346457Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:58.371716Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:58.399588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:58.425554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:58.455723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:58.488567Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:58.543840Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631056040547612:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:58.543932Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:58.544120Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631056040547617:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:58.548078Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:58.559058Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631056040547619:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:58.647541Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631056040547670:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:59.996071Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631038860676141:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:59.996159Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::IndexUpsert+QueryService+UseSink [GOOD] >> TPQTest::TestReadSubscription [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::OneColdTier [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801093.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150801093.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801093.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130801093.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799893.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130799893.000000s;Name=;Codec=}; 2025-06-24T21:28:15.214311Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:28:15.237272Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:28:15.237612Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:28:15.245243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:28:15.245550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:28:15.245864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:28:15.245963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:28:15.246032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:28:15.246134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:28:15.246259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:28:15.246388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:28:15.246481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:28:15.246571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:28:15.246680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:28:15.270085Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:28:15.270371Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:28:15.270451Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:28:15.270678Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:28:15.270848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:28:15.270928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:28:15.270978Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:28:15.271079Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:28:15.271161Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:28:15.271213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:28:15.271247Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:28:15.271606Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:28:15.271692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:28:15.271738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:28:15.271780Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:28:15.271883Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:28:15.271942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:28:15.271991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:28:15.272022Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:28:15.272088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:28:15.272132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:28:15.272167Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:28:15.272370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:28:15.272448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:28:15.272489Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:28:15.272711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:28:15.272790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:28:15.272830Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:28:15.272936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:28:15.273001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:28:15.273037Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:28:15.273117Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:28:15.273170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:28:15.273199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:28:15.273221Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:28:15.273653Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=48; 2025-06-24T21:28:15.273726Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=30; 2025-06-24T21:28:15.273 ... 1:870:2828] 2025-06-24T21:29:01.575200Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:871:2829];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:29:01.284170Z;index_granules=0;index_portions=1;index_batches=522;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=4873744;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=4873744;selected_rows=0; 2025-06-24T21:29:01.575241Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:871:2829];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:29:01.575470Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:871:2829];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:29:01.576348Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 7 at tablet 9437184 2025-06-24T21:29:01.576624Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800531355:max} readable: {1750800531355:max} at tablet 9437184 2025-06-24T21:29:01.576770Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:29:01.576927Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800531355:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:29:01.576989Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800531355:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:29:01.577555Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800531355:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:29:01.577664Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800531355:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:29:01.578169Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800531355:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:878:2836];trace_detailed=; 2025-06-24T21:29:01.578590Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:29:01.578789Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:29:01.578966Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:01.579089Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:01.579360Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:29:01.579461Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:01.579550Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:01.579592Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:878:2836] finished for tablet 9437184 2025-06-24T21:29:01.580036Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:877:2835];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750800541578095,"name":"_full_task","f":1750800541578095,"d_finished":0,"c":0,"l":1750800541579670,"d":1575},"events":[{"name":"bootstrap","f":1750800541578318,"d_finished":805,"c":1,"l":1750800541579123,"d":805},{"a":1750800541579335,"name":"ack","f":1750800541579335,"d_finished":0,"c":0,"l":1750800541579670,"d":335},{"a":1750800541579315,"name":"processing","f":1750800541579315,"d_finished":0,"c":0,"l":1750800541579670,"d":355},{"name":"ProduceResults","f":1750800541578890,"d_finished":413,"c":2,"l":1750800541579573,"d":413},{"a":1750800541579578,"name":"Finish","f":1750800541579578,"d_finished":0,"c":0,"l":1750800541579670,"d":92}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:01.580122Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:877:2835];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:29:01.580521Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:877:2835];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750800541578095,"name":"_full_task","f":1750800541578095,"d_finished":0,"c":0,"l":1750800541580170,"d":2075},"events":[{"name":"bootstrap","f":1750800541578318,"d_finished":805,"c":1,"l":1750800541579123,"d":805},{"a":1750800541579335,"name":"ack","f":1750800541579335,"d_finished":0,"c":0,"l":1750800541580170,"d":835},{"a":1750800541579315,"name":"processing","f":1750800541579315,"d_finished":0,"c":0,"l":1750800541580170,"d":855},{"name":"ProduceResults","f":1750800541578890,"d_finished":413,"c":2,"l":1750800541579573,"d":413},{"a":1750800541579578,"name":"Finish","f":1750800541579578,"d_finished":0,"c":0,"l":1750800541580170,"d":592}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:878:2836]->[1:877:2835] 2025-06-24T21:29:01.580618Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:29:01.577632Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:29:01.580664Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:29:01.580757Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:878:2836];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TBlobStorageStoragePoolMonTest::ReducedSizeClassCalcTest [GOOD] |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestReadSubscription [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:106:2057] recipient: [1:104:2136] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:111:2057] recipient: [1:104:2136] 2025-06-24T21:25:43.467644Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:43.467751Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:152:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927938 is [1:156:2175] sender: [1:157:2057] recipient: [1:150:2171] Leader for TabletID 72057594037927937 is [1:110:2140] sender: [1:182:2057] recipient: [1:14:2061] 2025-06-24T21:25:43.485963Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:43.508442Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 1 actor [1:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-24T21:25:43.509441Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-24T21:25:43.512198Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:188:2199] 2025-06-24T21:25:43.514574Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:189:2200] 2025-06-24T21:25:43.516316Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:189:2200] 2025-06-24T21:25:43.522245Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|1cb6c94f-1b10b184-ce8cf5e8-f37e6083_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-06-24T21:25:43.522963Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|f479db6e-9b4d59bd-4f5131be-9c0a7aa7_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-24T21:25:43.539575Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|e088177d-7c54c8b3-8b134153-705c93a8_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:106:2057] recipient: [2:104:2136] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:111:2057] recipient: [2:104:2136] 2025-06-24T21:25:43.872379Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:43.872463Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:152:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927938 is [2:156:2175] sender: [2:157:2057] recipient: [2:150:2171] Leader for TabletID 72057594037927937 is [2:110:2140] sender: [2:180:2057] recipient: [2:14:2061] 2025-06-24T21:25:43.888282Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:43.889229Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-24T21:25:43.889772Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:186:2197] 2025-06-24T21:25:43.891694Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:186:2197] 2025-06-24T21:25:43.893336Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:187:2198] 2025-06-24T21:25:43.894650Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:187:2198] 2025-06-24T21:25:43.900870Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|e8f532d6-d4d164a0-7c55d022-2075d22d_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-06-24T21:25:43.901399Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|1916b674-89773370-ecce5ada-b1f5ea8c_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-24T21:25:43.918761Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|b7045e3f-39cfe459-52a9ea61-add10bd3_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:106:2057] recipient: [3:104:2136] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:111:2057] recipient: [3:104:2136] 2025-06-24T21:25:44.243961Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:44.244016Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:152:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927938 is [3:156:2175] sender: [3:157:2057] recipient: [3:150:2171] Leader for TabletID 72057594037927937 is [3:110:2140] sender: [3:182:2057] recipient: [3:14:2061] 2025-06-24T21:25:44.255849Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:44.256480Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 3 actor [3:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-24T21:25:44.256904Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:188:2199] 2025-06-24T21:25:44.258421Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [3:188:2199] 2025-06-24T21:25:44.259637Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [3:189:2200] 2025-06-24T21:25:44.260765Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [3:189:2200] 2025-06-24T21:25:44.265008Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|f02ab274-bd20ed3f-8eaa9f18-5487669_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-06-24T21:25:44.265302Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|f59b5151-dc139666-191278bb-695f53ce_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-24T21:25:44.277686Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|101f5ef7-f8e6854c-36908111-661e45fa_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:106:2057] recipient: [4:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:106:2057] recipient: [4:104:2136] Leader for TabletID 72057594037927937 is [4:110:2140] sender: [4:111:2057] recipient: [4:104:2136] 2025-06-24T21:25:44.665092Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:44.665157Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:152:2057] recipient: [4:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:152:2057] recipient: [4:150:2171] Leader for TabletID 72057594037927938 is [4:156:2175] sender: [4:157:2057] recipient: [4:150:2171] Leader for TabletID 72057594037927937 is [4:110:2140] sender: [4:180:2057] recipient: [4:14:2061] 2025-06-24T21:25:44.679709Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:25:44.680748Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 4 actor [4:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 4 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 4 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 4 Important: false } 2025-06-24T21:25:44.681453Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: Sta ... -asdfgs--topic' partition 0 generation 2 [144:188:2199] 2025-06-24T21:28:57.646588Z node 144 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [144:189:2200] 2025-06-24T21:28:57.648852Z node 144 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [144:189:2200] 2025-06-24T21:28:57.651865Z node 144 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [144:190:2201] 2025-06-24T21:28:57.654046Z node 144 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [144:190:2201] 2025-06-24T21:28:57.656187Z node 144 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [144:191:2202] 2025-06-24T21:28:57.658392Z node 144 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 3 generation 2 [144:191:2202] 2025-06-24T21:28:57.661321Z node 144 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 4, State: StateInit] bootstrapping 4 [144:192:2203] 2025-06-24T21:28:57.663495Z node 144 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 4, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 4 generation 2 [144:192:2203] 2025-06-24T21:28:57.677747Z node 144 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a19b18af-1ba3a95f-3c06834c-b6aaeafc_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:28:58.744666Z node 144 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|32b538b2-ff65dd87-317f58b4-70fb762e_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:28:58.757588Z node 144 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|653bb723-1176416a-e9b48e2a-41b7565b_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [145:106:2057] recipient: [145:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [145:106:2057] recipient: [145:104:2136] Leader for TabletID 72057594037927937 is [145:110:2140] sender: [145:111:2057] recipient: [145:104:2136] 2025-06-24T21:28:59.338280Z node 145 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:28:59.338401Z node 145 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [145:152:2057] recipient: [145:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [145:152:2057] recipient: [145:150:2171] Leader for TabletID 72057594037927938 is [145:156:2175] sender: [145:157:2057] recipient: [145:150:2171] Leader for TabletID 72057594037927937 is [145:110:2140] sender: [145:180:2057] recipient: [145:14:2061] 2025-06-24T21:28:59.364761Z node 145 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:28:59.367233Z node 145 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 145 actor [145:178:2191] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 PartitionIds: 4 TopicName: "rt3.dc1--asdfgs--topic" Version: 145 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } Partitions { PartitionId: 4 } ReadRuleGenerations: 145 ReadRuleGenerations: 145 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } AllPartitions { PartitionId: 4 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 145 Important: false } Consumers { Name: "user1" Generation: 145 Important: true } 2025-06-24T21:28:59.368670Z node 145 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [145:186:2197] 2025-06-24T21:28:59.372292Z node 145 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [145:186:2197] 2025-06-24T21:28:59.376120Z node 145 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [145:187:2198] 2025-06-24T21:28:59.377828Z node 145 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [145:187:2198] 2025-06-24T21:28:59.379894Z node 145 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [145:188:2199] 2025-06-24T21:28:59.381558Z node 145 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [145:188:2199] 2025-06-24T21:28:59.384707Z node 145 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [145:189:2200] 2025-06-24T21:28:59.387022Z node 145 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 3 generation 2 [145:189:2200] 2025-06-24T21:28:59.389586Z node 145 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 4, State: StateInit] bootstrapping 4 [145:190:2201] 2025-06-24T21:28:59.391062Z node 145 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 4, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 4 generation 2 [145:190:2201] 2025-06-24T21:28:59.403056Z node 145 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|4ff8f431-3f1f31a4-fcb2a299-74b12c55_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:29:00.400855Z node 145 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|86771b5-70411964-8a08ad97-ee1d490a_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:29:00.414997Z node 145 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7a3f8bde-5779c6bf-99a82f2c-d17557be_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [146:106:2057] recipient: [146:104:2136] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [146:106:2057] recipient: [146:104:2136] Leader for TabletID 72057594037927937 is [146:110:2140] sender: [146:111:2057] recipient: [146:104:2136] 2025-06-24T21:29:01.080612Z node 146 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:29:01.080702Z node 146 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [146:152:2057] recipient: [146:150:2171] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [146:152:2057] recipient: [146:150:2171] Leader for TabletID 72057594037927938 is [146:156:2175] sender: [146:157:2057] recipient: [146:150:2171] Leader for TabletID 72057594037927937 is [146:110:2140] sender: [146:182:2057] recipient: [146:14:2061] 2025-06-24T21:29:01.112753Z node 146 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:29:01.114542Z node 146 :PERSQUEUE INFO: pq_impl.cpp:1489: [PQ: 72057594037927937] Config applied version 146 actor [146:180:2193] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 PartitionIds: 4 TopicName: "rt3.dc1--asdfgs--topic" Version: 146 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } Partitions { PartitionId: 4 } ReadRuleGenerations: 146 ReadRuleGenerations: 146 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } AllPartitions { PartitionId: 4 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 146 Important: false } Consumers { Name: "user1" Generation: 146 Important: true } 2025-06-24T21:29:01.115794Z node 146 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [146:188:2199] 2025-06-24T21:29:01.118247Z node 146 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [146:188:2199] 2025-06-24T21:29:01.121308Z node 146 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [146:189:2200] 2025-06-24T21:29:01.123000Z node 146 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [146:189:2200] 2025-06-24T21:29:01.125472Z node 146 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [146:190:2201] 2025-06-24T21:29:01.127224Z node 146 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [146:190:2201] 2025-06-24T21:29:01.129471Z node 146 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [146:191:2202] 2025-06-24T21:29:01.131774Z node 146 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 3 generation 2 [146:191:2202] 2025-06-24T21:29:01.134788Z node 146 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 4, State: StateInit] bootstrapping 4 [146:192:2203] 2025-06-24T21:29:01.137277Z node 146 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 4, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 4 generation 2 [146:192:2203] 2025-06-24T21:29:01.153428Z node 146 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|cc54044e-16b9cf10-6da400c7-7f559fd2_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:29:02.214387Z node 146 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c6137871-5230a1fd-82c60e1b-14a1882f_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-24T21:29:02.228263Z node 146 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1cddcb7f-28ab925d-31a220b-a6a6bbe8_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TBlobStorageStoragePoolMonTest::SizeClassCalcTest [GOOD] |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> KqpQueryPerf::IndexInsert+QueryService+UseSink [GOOD] |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TBlobStorageStoragePoolMonTest::ReducedSizeClassCalcTest [GOOD] >> KqpQueryServiceScripts::TestTruncatedBySize [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexUpsert+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 10294, MsgBus: 19241 2025-06-24T21:28:46.454160Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631001724932926:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:46.455800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a60/r3tmp/tmpwKPdGk/pdisk_1.dat 2025-06-24T21:28:46.752514Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631001724932902:2079] 1750800526452297 != 1750800526452300 2025-06-24T21:28:46.776669Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:46.779476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:46.779612Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:46.785960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10294, node 1 2025-06-24T21:28:46.883862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:46.883904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:46.883915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:46.884068Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19241 TClient is connected to server localhost:19241 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:47.449860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:47.461682Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:28:47.474839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:47.487596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:47.634528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:47.797574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:47.862465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:49.500070Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631014609836432:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.500209Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.875358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:49.902688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:49.937101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:49.974745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.002375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.031769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.071253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.139520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631018904804384:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.139622Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.139956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631018904804389:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.143849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:50.154332Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631018904804391:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:50.209658Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631018904804442:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:51.404354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:51.441601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/yd ... 4.708196Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631038400816792:2079] 1750800534614275 != 1750800534614278 TServer::EnableGrpc on GrpcPort 19184, node 2 2025-06-24T21:28:54.756510Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:54.756594Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:54.758152Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:54.765341Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:54.765377Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:54.765384Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:54.765561Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27163 TClient is connected to server localhost:27163 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:55.192681Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:55.202411Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:55.251809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:55.401127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:55.477402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:55.627808Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:57.398701Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631051285720314:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:57.398832Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:57.454519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:57.488159Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:57.523525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:57.563578Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:57.597711Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:57.671488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:57.741024Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:57.805130Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631051285720979:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:57.805212Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:57.805331Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631051285720984:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:57.808487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:57.817699Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631051285720986:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:57.872864Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631051285721037:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:58.883723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:58.923877Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:59.002888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:59.614793Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631038400816811:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:59.614874Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TBlobStorageStoragePoolMonTest::SizeClassCalcTest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::ShowCreateTableColumnAlterObject [FAIL] Test command err: 2025-06-24T21:23:29.523803Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629643429701115:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:29.523886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b00/r3tmp/tmpQHgkwD/pdisk_1.dat 2025-06-24T21:23:30.177044Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:30.178090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:30.178144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:30.195651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8128, node 1 2025-06-24T21:23:30.498308Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:30.498333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:30.498343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:30.498526Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:23:30.560525Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14418 TClient is connected to server localhost:14418 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:31.579899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:33.801951Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:276: Subscribed for config changes 2025-06-24T21:23:33.801995Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:331: Updated config 2025-06-24T21:23:33.852296Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629660609571413:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:33.852407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:33.852941Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629660609571425:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:33.857480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:33.900586Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629660609571427:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:23:34.014739Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629664904538799:2746] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:34.016605Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-24T21:23:34.027265Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:413: Perform request, TraceId.SpanIdPtr: 0x000050F00002FCA8 2025-06-24T21:23:34.027371Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:423: Received compile request, sender: [1:7519629660609571384:2298], queryUid: , queryText: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n ", keepInCache: 1, split: 0{ TraceId: 01jyhx69tb72fwpce1nf2t1zwr, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTBmN2RkNWEtOTczMjc3OTMtMmI5NjdkMGUtMTQzNDVkNDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default} 2025-06-24T21:23:34.027585Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-24T21:23:34.027672Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:519: Added request to queue, sender: [1:7519629660609571384:2298], queueSize: 1 2025-06-24T21:23:34.028435Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:880: Created compile actor, sender: [1:7519629660609571384:2298], compileActor: [1:7519629664904538818:2311] 2025-06-24T21:23:34.494300Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhx69tb72fwpce1nf2t1zwr, SessionId: CompileActor 2025-06-24 21:23:34.494 INFO ydb-core-sys_view-ut(pid=913630, tid=0x00007F120E152640) [core dq] kqp_host.cpp:1375: Good place to weld in 2025-06-24T21:23:34.495895Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhx69tb72fwpce1nf2t1zwr, SessionId: CompileActor 2025-06-24 21:23:34.495 INFO ydb-core-sys_view-ut(pid=913630, tid=0x00007F120E152640) [core dq] kqp_host.cpp:1380: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1")) '('typeId (String '"EXTERNAL_DATA_SOURCE"))) (Void) '('('mode 'createObject) '('features $3)))) ) 2025-06-24T21:23:34.496738Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhx69tb72fwpce1nf2t1zwr, SessionId: CompileActor 2025-06-24 21:23:34.496 INFO ydb-core-sys_view-ut(pid=913630, tid=0x00007F120E152640) [KQP] kqp_host.cpp:1386: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (W ... TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.869 DEBUG ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 660us 2025-06-24T21:28:10.870698Z node 36 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.870 DEBUG ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [perf] yql_expr_constraint.cpp:3279: Execution of [ConstraintTransformer::DoTransform] took 366us 2025-06-24T21:28:10.870921Z node 36 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.870 DEBUG ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [perf] yql_expr_csee.cpp:620: Execution of [UpdateCompletness] took 159us 2025-06-24T21:28:10.871624Z node 36 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.871 DEBUG ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [perf] yql_expr_csee.cpp:633: Execution of [EliminateCommonSubExpressions] took 630us 2025-06-24T21:28:10.880008Z node 36 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.878 INFO ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [KQP] kqp_transform.cpp:33: Optimized expr: ( (let $1 (DataSink '"kikimr" '"db")) (let $2 '"//Root/.metadata/secrets/values") (let $3 '('('"mode" '"flush"))) (let $4 (KiExecDataQuery! world $1 (DataQueryBlocks (TKiDataQueryBlock '('((Take (Right! (KiReadTable! world (DataSource '"kikimr" '"db") (Key '('table (String $2))) (Void) '())) (Uint64 '"1001")) '() '"1000")) (KiEffects) '('('"db" $2 '"Select")) '())) $3 (Void))) (let $5 (DataSink 'result)) (let $6 (ResPull! (Left! $4) $5 (Key) (Nth (Right! $4) '0) '('('type) '('autoref)) '"kikimr")) (return (Commit! (Commit! $6 $5) $1 $3)) ) 2025-06-24T21:28:10.880113Z node 36 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.880 INFO ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [core exec] yql_execution.cpp:59: Begin, root #277 2025-06-24T21:28:10.880181Z node 36 :KQP_YQL INFO: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.880 INFO ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [core exec] yql_execution.cpp:72: Collect unused nodes for root #277, status: Ok 2025-06-24T21:28:10.880241Z node 36 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.880 TRACE ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [core exec] yql_execution.cpp:387: {0}, callable #277 2025-06-24T21:28:10.880302Z node 36 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.880 TRACE ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [core exec] yql_execution.cpp:387: {1}, callable #276 2025-06-24T21:28:10.880354Z node 36 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.880 TRACE ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [core exec] yql_execution.cpp:387: {2}, callable #275 2025-06-24T21:28:10.880462Z node 36 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.880 TRACE ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [core exec] yql_execution.cpp:387: {3}, callable #272 2025-06-24T21:28:10.880521Z node 36 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.880 TRACE ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [core exec] yql_execution.cpp:387: {4}, callable #265 2025-06-24T21:28:10.880609Z node 36 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.880 TRACE ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [core exec] yql_execution.cpp:387: {4}, callable #265 2025-06-24T21:28:10.881146Z node 36 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jyhxer2x1p9c8xksy22j2n7k, SessionId: CompileActor 2025-06-24 21:28:10.880 DEBUG ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F7DA2640) [core dq] kqp_runner.cpp:160: Before any rewrites: ( (let $1 '"//Root/.metadata/secrets/values") (return (DataQueryBlocks (TKiDataQueryBlock '('((Take (Right! (KiReadTable! world (DataSource '"kikimr" '"db") (Key '('table (String $1))) (Void) '())) (Uint64 '"1001")) '() '"1000")) (KiEffects) '('('"db" $1 '"Select")) '()))) ) 2025-06-24T21:28:10.884924Z node 36 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jyhxer3083st8xpg6vxfej16, SessionId: CompileActor 2025-06-24 21:28:10.884 DEBUG ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F5E7D640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 74us 2025-06-24T21:28:10.885033Z node 36 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jyhxer3083st8xpg6vxfej16, SessionId: CompileActor 2025-06-24 21:28:10.884 DEBUG ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F5E7D640) [perf] yql_expr_constraint.cpp:3279: Execution of [ConstraintTransformer::DoTransform] took 41us 2025-06-24T21:28:10.885315Z node 36 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jyhxer3083st8xpg6vxfej16, SessionId: CompileActor 2025-06-24 21:28:10.885 DEBUG ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F5E7D640) [perf] yql_expr_csee.cpp:620: Execution of [UpdateCompletness] took 217us 2025-06-24T21:28:10.886013Z node 36 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jyhxer3083st8xpg6vxfej16, SessionId: CompileActor 2025-06-24 21:28:10.885 DEBUG ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F5E7D640) [perf] yql_expr_csee.cpp:633: Execution of [EliminateCommonSubExpressions] took 622us 2025-06-24T21:28:10.887759Z node 36 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jyhxer3083st8xpg6vxfej16, SessionId: CompileActor 2025-06-24 21:28:10.886 TRACE ydb-core-sys_view-ut(pid=913630, tid=0x00007F11F5E7D640) [KQP] kqp_transform.cpp:33: PhysicalOptimizeTransformer: ( (let $1 (KqpTable '"//Root/.metadata/initialization/migrations" '"72057594046644480:7" '"" '1)) (let $2 '('"componentId" '"instant" '"modificationId")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '1)) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"componentId" $5) '('"instant" (OptionalType (DataType 'Uint32))) '('"modificationId" $5))) (let $7 '('('"_logical_id" '338) '('"_id" '"e99c4739-caf7a612-cde2ceee-768782cd") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"componentId") (Member $14 '"instant") (Member $14 '"modificationId"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (Take (NarrowMap (ToFlow $15) (lambda '($16 $17 $18) (AsStruct '('"componentId" $16) '('"instant" $17) '('"modificationId" $18)))) $3))) '('('"_logical_id" '351) '('"_id" '"b93100d9-99a14562-f1d22f23-6cfa2a6a")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) 2025-06-24T21:28:28.374465Z node 41 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[41:7519630926882189449:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:28.374592Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b00/r3tmp/tmpSks86r/pdisk_1.dat 2025-06-24T21:28:28.697521Z node 41 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:28.731024Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(41, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:28.731411Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(41, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:28.740271Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(41, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31353, node 41 2025-06-24T21:28:28.896238Z node 41 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:28.896276Z node 41 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:28.896291Z node 41 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:28.896576Z node 41 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:29.425787Z node 41 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13376 TClient is connected to server localhost:13376 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:30.147629Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:33.377792Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[41:7519630926882189449:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:33.377883Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:35.515084Z node 41 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyhxfb9r36w8jnpzjv64w2av", Request deadline has expired for 0.261867s seconds 2025-06-24T21:28:35.517984Z node 41 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:276: Subscribed for config changes 2025-06-24T21:28:35.518031Z node 41 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:331: Updated config (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexInsert+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 16111, MsgBus: 22271 2025-06-24T21:28:47.440306Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631006892375295:2084];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:47.441583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a5c/r3tmp/tmpeAhzmc/pdisk_1.dat 2025-06-24T21:28:47.845848Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16111, node 1 2025-06-24T21:28:47.866929Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:47.867056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:47.888536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:47.925457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:47.925483Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:47.925492Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:47.925634Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22271 TClient is connected to server localhost:22271 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:48.441805Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:48.464127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:48.490148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:28:48.503198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.648740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:28:48.789842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:48.855055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:50.498313Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631019777278778:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.498500Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:50.813806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.843134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.871127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.898072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.928479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:50.970074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:51.038441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:51.090224Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631024072246730:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:51.090314Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:51.090368Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631024072246735:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:51.094984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:51.105828Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631024072246737:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:51.180256Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631024072246788:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:52.348960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.394085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:52.432378Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;even ... 5.805554Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631041457983873:2079] 1750800535714728 != 1750800535714731 TServer::EnableGrpc on GrpcPort 18052, node 2 2025-06-24T21:28:55.846710Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:55.846849Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:55.848769Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:55.858039Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:55.858072Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:55.858079Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:55.858200Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19525 TClient is connected to server localhost:19525 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:56.262360Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:56.278221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:56.330481Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:56.491056Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:56.565761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:56.744853Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:58.989798Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631054342887406:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:58.989898Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:59.051370Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:59.086693Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:59.117146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:59.150867Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:59.183957Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:59.241268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:59.278019Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:59.365338Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631058637855361:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:59.365469Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:59.365562Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631058637855366:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:59.370428Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:59.384705Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631058637855368:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:59.478116Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631058637855419:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:00.402310Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:00.439392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:00.478717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:00.715849Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631041457983892:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:00.715929Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::TestTruncatedBySize [GOOD] Test command err: Trying to start YDB, gRPC: 62696, MsgBus: 14912 2025-06-24T21:28:13.295737Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630860397711192:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:13.295781Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003030/r3tmp/tmpgw6K8q/pdisk_1.dat TServer::EnableGrpc on GrpcPort 62696, node 1 2025-06-24T21:28:13.737327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:13.737451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:13.739173Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:13.743605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:13.745967Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630860397711173:2079] 1750800493294658 != 1750800493294661 2025-06-24T21:28:13.812699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:13.812716Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:13.812721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:13.812821Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14912 TClient is connected to server localhost:14912 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:28:14.304142Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:14.345741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:14.380376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:14.509869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:14.648604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:14.710121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:16.437341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630873282614695:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.437429Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:16.757361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.786360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.812289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.841374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.870139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.935988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:16.963232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:17.014024Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630877577582650:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:17.014114Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630877577582655:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:17.014159Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:17.017345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:17.026278Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630877577582657:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:17.114640Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630877577582708:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:18.296206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630860397711192:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:18.296320Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21545, MsgBus: 18370 2025-06-24T21:28:18.850669Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630882675605621:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:18.850719Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... 8563Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:51.608688Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:51.612102Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15649, node 3 2025-06-24T21:28:51.665022Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:51.665049Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:51.665062Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:51.665251Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24843 TClient is connected to server localhost:24843 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:52.334904Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:52.354681Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.434355Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.452707Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:52.643172Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:52.730074Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:55.773010Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631043618681669:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:55.773124Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:55.832779Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.863937Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.895975Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.931420Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:55.965223Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.002267Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.074636Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:56.134807Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631047913649626:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:56.134935Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:56.135021Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631047913649631:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:56.138335Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:56.147447Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519631047913649633:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:28:56.214809Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519631047913649684:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:56.444360Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519631026438810868:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:56.444428Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:57.732461Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:57.734088Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:57.735552Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:02.765681Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800542799, txId: 281474976715762] shutting down |98.1%| [TA] $(B)/ydb/core/persqueue/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:28:57.930113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:28:57.930264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:57.930316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:28:57.930359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:28:57.931654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:28:57.931709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:28:57.931898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:28:57.932011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:28:57.932871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:28:57.939584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:28:58.030080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:28:58.030131Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:58.047397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:28:58.047905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:28:58.048076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:28:58.057078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:28:58.057300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:28:58.060253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.060617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:28:58.069722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.070987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:28:58.082873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.082967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:28:58.083236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:28:58.083313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:28:58.083373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:28:58.083470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.091556Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:28:58.231018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:28:58.232659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.236941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:28:58.237043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:28:58.239081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:28:58.239281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:28:58.242975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.243210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:28:58.243424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.243503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:28:58.243543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:28:58.243589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:28:58.245572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.245617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:28:58.245657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:28:58.247669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.247724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:28:58.247771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.247833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:28:58.251985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:28:58.254137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:28:58.255426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:28:58.256598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:28:58.256767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:28:58.256837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.258352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:28:58.258457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:28:58.258650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:28:58.258738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:28:58.261237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:28:58.261293Z node 1 :FLAT_TX_SCHEMESHARD ... -24T21:29:04.427235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 175, path id: 1 2025-06-24T21:29:04.427265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 175, path id: 26 2025-06-24T21:29:04.427592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-24T21:29:04.427628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:399: [72057594046678944] TDeleteParts opId# 175:0 ProgressState 2025-06-24T21:29:04.427657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 175:0 135 -> 240 2025-06-24T21:29:04.428867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:04.428944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:04.428970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-24T21:29:04.429008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 103 2025-06-24T21:29:04.429044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:29:04.430043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:04.430138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-24T21:29:04.430161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-24T21:29:04.430181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 26], version: 18446744073709551615 2025-06-24T21:29:04.430203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 6 2025-06-24T21:29:04.430265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 175, ready parts: 0/1, is published: true 2025-06-24T21:29:04.432648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:74 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:29:04.432696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:73 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:29:04.432721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:75 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:29:04.433066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-24T21:29:04.433111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 175:0 ProgressState 2025-06-24T21:29:04.433193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#175:0 progress is 1/1 2025-06-24T21:29:04.433218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-24T21:29:04.433251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#175:0 progress is 1/1 2025-06-24T21:29:04.433282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-24T21:29:04.433307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: true 2025-06-24T21:29:04.433350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-24T21:29:04.433372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 175:0 2025-06-24T21:29:04.433392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 175:0 2025-06-24T21:29:04.433515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 5 2025-06-24T21:29:04.434292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-24T21:29:04.434921Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 74 TxId_Deprecated: 74 TabletID: 72075186233409619 2025-06-24T21:29:04.437440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 74 ShardOwnerId: 72057594046678944 ShardLocalIdx: 74, at schemeshard: 72057594046678944 2025-06-24T21:29:04.437739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 4 Forgetting tablet 72075186233409619 2025-06-24T21:29:04.438439Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 73 TxId_Deprecated: 73 TabletID: 72075186233409618 2025-06-24T21:29:04.439436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:29:04.442224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 73 ShardOwnerId: 72057594046678944 ShardLocalIdx: 73, at schemeshard: 72057594046678944 2025-06-24T21:29:04.442549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 3 2025-06-24T21:29:04.443046Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 75 TxId_Deprecated: 75 TabletID: 72075186233409620 Forgetting tablet 72075186233409618 2025-06-24T21:29:04.444560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 75 ShardOwnerId: 72057594046678944 ShardLocalIdx: 75, at schemeshard: 72057594046678944 2025-06-24T21:29:04.444771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 Forgetting tablet 72075186233409620 2025-06-24T21:29:04.446158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:29:04.446200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-24T21:29:04.446302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 1 2025-06-24T21:29:04.447488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:29:04.447528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-24T21:29:04.447584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:29:04.448084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-24T21:29:04.451379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:74 2025-06-24T21:29:04.451431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:74 tabletId 72075186233409619 2025-06-24T21:29:04.451869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:73 2025-06-24T21:29:04.451896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:73 tabletId 72075186233409618 2025-06-24T21:29:04.452172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:75 2025-06-24T21:29:04.452228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:75 tabletId 72075186233409620 2025-06-24T21:29:04.453577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:29:04.453654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2025-06-24T21:29:04.454870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2025-06-24T21:29:04.454902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2025-06-24T21:29:04.456303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2025-06-24T21:29:04.456387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2025-06-24T21:29:04.456413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:6748:7727] TestWaitNotification: OK eventTxId 175 |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |98.1%| [TA] {RESULT} $(B)/ydb/core/persqueue/ut/test-results/unittest/{meta.json ... results_accumulator.log} |98.1%| [TA] $(B)/ydb/core/tx/schemeshard/ut_auditsettings/test-results/unittest/{meta.json ... results_accumulator.log} |98.1%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/test-results/unittest/{meta.json ... results_accumulator.log} |98.1%| [TA] $(B)/ydb/core/blobstorage/storagepoolmon/ut/test-results/unittest/{meta.json ... results_accumulator.log} |98.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/storagepoolmon/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpReturning::Random [GOOD] >> KqpService::ToDictCache-UseCache [GOOD] >> RemoteTopicReader::ReadTopic [GOOD] >> FolderServiceTest::TFolderServiceTransitional >> FolderServiceTest::TFolderService |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::Authenticate >> FolderServiceTest::TFolderServiceAdapter >> TUserAccountServiceTest::Get ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpReturning::Random [GOOD] Test command err: Trying to start YDB, gRPC: 27637, MsgBus: 32310 2025-06-24T21:27:03.379684Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630559141342675:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:03.379770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003282/r3tmp/tmpirXEfe/pdisk_1.dat 2025-06-24T21:27:03.723737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:03.723852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:03.725916Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:03.726451Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630559141342657:2079] 1750800423378380 != 1750800423378383 2025-06-24T21:27:03.736466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27637, node 1 2025-06-24T21:27:03.818787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:03.818847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:03.818861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:03.819005Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32310 TClient is connected to server localhost:32310 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:04.308531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:04.327358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.393666Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:04.472985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.648932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.731793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:06.242967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630572026246186:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.243089Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.523591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.553824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.579482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.603667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.630036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.658896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.687640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.744498Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630572026246840:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.744575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630572026246845:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.744599Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.748249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:06.757905Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630572026246847:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:27:06.858337Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630572026246898:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Warning: Type annotation, code: 1030
:4:13: Warning: At function: RemovePrefixMembers, At function: RemoveSystemMembers, At function: PersistableRepr, At function: SqlProject
:4:27: Warning: At function: Filter, At lambda, At function: Coalesce
:4:50: Warning: At function: SqlIn
:4:50: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 2025-06-24T21:27:08.379556Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630559141342675:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:08.379623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying t ... 25-06-24T21:28:44.097977Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded Trying to start YDB, gRPC: 13973, MsgBus: 16713 2025-06-24T21:28:57.699788Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519631050381403569:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:57.699855Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003282/r3tmp/tmprrUCTN/pdisk_1.dat 2025-06-24T21:28:57.896426Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:57.896556Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:57.897701Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:57.930980Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519631050381403549:2079] 1750800537698892 != 1750800537698895 2025-06-24T21:28:57.934406Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13973, node 8 2025-06-24T21:28:58.004627Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:58.004652Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:58.004664Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:58.004862Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16713 2025-06-24T21:28:58.717180Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16713 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:58.858768Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:58.876049Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:58.965094Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:59.210010Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:59.319847Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:02.701028Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519631050381403569:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:02.701154Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:03.098235Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519631076151208973:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:03.098385Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:03.205350Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:03.287694Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:03.340324Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:03.417625Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:03.468733Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:03.524091Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:03.606103Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:03.713594Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519631076151209636:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:03.713736Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:03.714089Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519631076151209641:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:03.722166Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:03.746779Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7519631076151209643:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:03.802964Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7519631076151209696:3436] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:05.528241Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpService::ToDictCache-UseCache [GOOD] Test command err: Trying to start YDB, gRPC: 18552, MsgBus: 13589 2025-06-24T21:27:57.100559Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630791286149893:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:57.101398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ab/r3tmp/tmpUH0iyy/pdisk_1.dat 2025-06-24T21:27:57.539171Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630791286149871:2079] 1750800477098633 != 1750800477098636 2025-06-24T21:27:57.546650Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18552, node 1 2025-06-24T21:27:57.605439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:57.605533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:57.607117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:57.624060Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:57.624083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:57.624091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:57.624242Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13589 TClient is connected to server localhost:13589 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:27:58.114293Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:58.259658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:58.279417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:27:58.290199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:58.434253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:27:58.609569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:58.675117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:00.465928Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630804171053395:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:00.466050Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:00.837385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.878504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.937759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:00.987649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:01.030039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:01.105779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:01.143573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:01.242319Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630808466021358:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:01.242413Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:01.242662Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630808466021363:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:01.246469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:01.263221Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630808466021365:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:01.379705Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630808466021416:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:02.102574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630791286149893:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:02.102692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:02.417371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... took: 1.099860s took: 1.099384s took: 0.981035s took: 0.981399s took: 0.981483s took: 0.983672s Trying to start YDB, gRPC: 26953, MsgBus: 65467 2025-06-24T21:28:45.054383Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519630996912068395:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:45.054450Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030ab/r3tmp/tmpa6avb7/pdisk_1.dat 2025-06-24T21:28:45.200232Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:45.202022Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519630996912068372:2079] 1750800525053235 != 1750800525053238 2025-06-24T21:28:45.215516Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:45.215617Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:45.219622Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26953, node 3 2025-06-24T21:28:45.290764Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:45.290802Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:45.290814Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:45.290980Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65467 TClient is connected to server localhost:65467 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:45.910216Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:45.918757Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:28:46.062687Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:49.214906Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631014091938224:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.214992Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631014091938199:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.215045Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631014091938225:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.215094Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631014091938226:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.215174Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631014091938222:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.215279Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:49.221455Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:49.226008Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519631014091938236:2311] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:28:49.229686Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519631014091938239:2314] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:28:49.229915Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519631014091938238:2313] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:28:49.237769Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519631014091938232:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:28:49.237771Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519631014091938233:2315], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:28:49.237829Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519631014091938234:2316], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:28:49.237845Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519631014091938235:2317], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:28:49.291550Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519631014091938310:2359] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:49.326052Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519631014091938329:2368] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:49.327488Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519631014091938334:2372] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:49.336840Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519631014091938345:2380] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:50.054922Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519630996912068395:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:50.054995Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; took: 4.415494s took: 4.416433s took: 4.418492s took: 4.423901s took: 1.037193s took: 1.037096s took: 1.038002s took: 1.038108s took: 1.107863s took: 1.112078s took: 1.112301s took: 1.112513s took: 1.065422s took: 1.068141s took: 1.070177s took: 1.071225s took: 1.031803s took: 1.032997s took: 1.033979s took: 1.034462s took: 1.092781s took: 1.094128s took: 1.097337s took: 1.129318s took: 1.031889s took: 1.031922s took: 1.032082s took: 1.033886s took: 1.073804s took: 1.075304s took: 1.077709s took: 1.078243s took: 1.171067s took: 1.171075s took: 1.171573s took: 1.174165s took: 1.011287s took: 1.012649s took: 1.013440s took: 1.013874s 2025-06-24T21:29:00.159094Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:29:00.159130Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded took: 1.059625s took: 1.062631s took: 1.063907s took: 1.066207s took: 1.022199s took: 1.023031s took: 1.024609s took: 1.025265s took: 0.995436s took: 0.995929s took: 0.997826s took: 0.998871s took: 1.196001s took: 1.196912s took: 1.198898s took: 1.201213s took: 1.115528s took: 1.116807s took: 1.118371s took: 1.120412s took: 1.033243s took: 1.034808s took: 1.035392s took: 1.037020s ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> RemoteTopicReader::ReadTopic [GOOD] Test command err: 2025-06-24T21:28:59.574210Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631057643423074:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:59.574279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00457b/r3tmp/tmpn3Ndvt/pdisk_1.dat 2025-06-24T21:28:59.995566Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631057643423036:2079] 1750800539569240 != 1750800539569243 2025-06-24T21:29:00.017443Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:00.082883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:00.083005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:00.087896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22571 TServer::EnableGrpc on GrpcPort 14195, node 1 2025-06-24T21:29:00.582549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:00.582576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:00.582609Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:00.582779Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:00.582975Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22571 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:01.492751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:01.805792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:02.543453Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631070528325809:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:02.543453Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631070528325821:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:02.543561Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:02.549419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:02.559489Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631070528325823:2324], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T21:29:02.647170Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631070528325884:2466] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:04.155972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:29:04.574715Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631057643423074:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:04.574785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:04.619029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:05.174163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:29:05.628588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:173) 2025-06-24T21:29:06.119307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:29:06.993232Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7519631087708195778:2767] Handshake: worker# [1:7519631066233358222:2291] 2025-06-24T21:29:07.000045Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7519631087708195778:2767] Create read session: session# [1:7519631087708195779:2290] 2025-06-24T21:29:07.000348Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519631087708195778:2767] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-24T21:29:07.019685Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7519631087708195778:2767] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_1_17449090436880116920_v1 } } 2025-06-24T21:29:07.023849Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519631087708195778:2767] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 0 SeqNo: 1 CreateTime: 2025-06-24T21:29:06.889000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-24T21:29:07.024188Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519631087708195778:2767] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-24T21:29:07.085867Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519631087708195778:2767] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 1 SeqNo: 2 CreateTime: 2025-06-24T21:29:07.078000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-24T21:29:07.180852Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7519631092003163198:2808] Handshake: worker# [1:7519631066233358222:2291] 2025-06-24T21:29:07.184094Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7519631092003163198:2808] Create read session: session# [1:7519631092003163199:2290] 2025-06-24T21:29:07.184318Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519631092003163198:2808] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-24T21:29:07.189261Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7519631092003163198:2808] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_2_1094625714740805921_v1 } } 2025-06-24T21:29:07.191211Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519631092003163198:2808] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 1 SeqNo: 2 CreateTime: 2025-06-24T21:29:07.078000Z MessageGroupId: producer ProducerId: producer }] } } >> TServiceAccountServiceTest::Get [GOOD] |98.1%| [TA] $(B)/ydb/core/tx/replication/service/ut_topic_reader/test-results/unittest/{meta.json ... results_accumulator.log} |98.1%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_topic_reader/test-results/unittest/{meta.json ... results_accumulator.log} >> TAccessServiceTest::PassRequestId |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Large_2 >> KqpBatchDelete::Large_3 >> KqpBatchDelete::HasTxControl >> KqpBatchDelete::SimplePartitions >> KqpBatchUpdate::ColumnTable >> KqpBatchDelete::MultiStatement >> KqpBatchUpdate::ManyPartitions_3 >> KqpBatchUpdate::TableWithIndex >> KqpBatchUpdate::HasTxControl >> KqpBatchDelete::UnknownColumn >> KqpBatchUpdate::SimplePartitions >> KqpBatchDelete::TableWithIndex >> KqpBatchDelete::ManyPartitions_3 >> KqpBatchUpdate::ManyPartitions_2 >> KqpBatchDelete::Large_1 >> KqpBatchUpdate::UpdateOn |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::NotIdempotent >> KqpBatchDelete::ManyPartitions_2 >> FolderServiceTest::TFolderService [GOOD] >> FolderServiceTest::TFolderServiceTransitional [GOOD] >> KqpBatchDelete::SimpleOnePartition |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ManyPartitions_1 >> TAccessServiceTest::Authenticate [GOOD] >> KqpBatchUpdate::TableNotExists |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Returning >> KqpBatchUpdate::Large_1 >> FolderServiceTest::TFolderServiceAdapter [GOOD] >> TUserAccountServiceTest::Get [GOOD] >> KqpBatchUpdate::SimpleOnePartition ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderService [GOOD] Test command err: 2025-06-24T21:29:08.337173Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631099581811960:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:08.337418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00486c/r3tmp/tmpDyDuzK/pdisk_1.dat 2025-06-24T21:29:08.623893Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:08.627403Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631099581811942:2079] 1750800548336336 != 1750800548336339 2025-06-24T21:29:08.676808Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:08.676921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:08.678459Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30314 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:08.905507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:08.927720Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700009e388] Connect to grpc://localhost:2340 2025-06-24T21:29:08.947223Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700009e388] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-24T21:29:08.973067Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700009e388] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2340: Failed to connect to remote host: Connection refused 2025-06-24T21:29:08.974482Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700009e388] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-24T21:29:08.975036Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700009e388] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2340: Failed to connect to remote host: Connection refused 2025-06-24T21:29:09.355447Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:09.977463Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700009e388] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-24T21:29:09.982782Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700009e388] Status 5 Not Found 2025-06-24T21:29:09.983400Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700009e388] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2025-06-24T21:29:09.986879Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700009e388] Response ResolveFoldersResponse { resolved_folders { cloud_id: "response_cloud_id" } } ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderServiceTransitional [GOOD] Test command err: 2025-06-24T21:29:08.254308Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631096940467273:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:08.254425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004850/r3tmp/tmpEnc1KU/pdisk_1.dat 2025-06-24T21:29:08.571874Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:08.573217Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631096940467254:2079] 1750800548253598 != 1750800548253601 2025-06-24T21:29:08.644774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:08.644910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:08.648007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:08.911918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:08.926050Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700008e408] Connect to grpc://localhost:27694 2025-06-24T21:29:08.949328Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-24T21:29:08.973161Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:27694: Failed to connect to remote host: Connection refused 2025-06-24T21:29:08.974448Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-24T21:29:08.975060Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:27694: Failed to connect to remote host: Connection refused 2025-06-24T21:29:09.260960Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:09.976621Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-24T21:29:09.980242Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:27694: Failed to connect to remote host: Connection refused 2025-06-24T21:29:10.983612Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-24T21:29:10.993729Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 5 Not Found 2025-06-24T21:29:10.994487Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ListFoldersRequest { id: "i_am_exists" } 2025-06-24T21:29:11.005633Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700008e408] Response ListFoldersResponse { result { cloud_id: "response_cloud_id" } } ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::Authenticate [GOOD] Test command err: 2025-06-24T21:29:08.540706Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631097339626775:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:08.540933Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004866/r3tmp/tmp8ttqEf/pdisk_1.dat 2025-06-24T21:29:08.869832Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:08.871042Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631097339626755:2079] 1750800548539614 != 1750800548539617 2025-06-24T21:29:08.915316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:08.915401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:08.917102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4284 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:09.118175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:09.166298Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700008e408] Connect to grpc://localhost:18224 2025-06-24T21:29:09.168656Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request AuthenticateRequest { iam_token: "**** (047D44F1)" } 2025-06-24T21:29:09.176213Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 7 Permission Denied 2025-06-24T21:29:09.177840Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request AuthenticateRequest { iam_token: "**** (342498C1)" } 2025-06-24T21:29:09.180640Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700008e408] Response AuthenticateResponse { subject { user_account { id: "1234" } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TUserAccountServiceTest::Get [GOOD] Test command err: 2025-06-24T21:29:08.676172Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631097545772839:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:08.676323Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00485b/r3tmp/tmpPrn7wz/pdisk_1.dat 2025-06-24T21:29:08.974560Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:08.976105Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631097545772821:2079] 1750800548675343 != 1750800548675346 2025-06-24T21:29:09.073073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:09.073237Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:09.074800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16068 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:09.279678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:09.295402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 >> TAccessServiceTest::PassRequestId [GOOD] >> TServiceAccountServiceTest::IssueToken >> TServiceAccountServiceTest::IssueToken [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderServiceAdapter [GOOD] Test command err: 2025-06-24T21:29:08.629264Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631097826293058:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:08.629411Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004854/r3tmp/tmp5FBSGW/pdisk_1.dat 2025-06-24T21:29:08.935085Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:08.936827Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631097826293039:2079] 1750800548627985 != 1750800548627988 2025-06-24T21:29:08.993443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:08.993634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:08.995689Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15851 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:09.215500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:09.299203Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000075508] Connect to grpc://localhost:21952 2025-06-24T21:29:09.300471Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000075508] Request ListFoldersRequest { id: "i_am_exists" } 2025-06-24T21:29:09.313032Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000075508] Response ListFoldersResponse { result { cloud_id: "cloud_from_old_service" } } 2025-06-24T21:29:09.314344Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700009ff88] Connect to grpc://localhost:27217 2025-06-24T21:29:09.315281Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700009ff88] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2025-06-24T21:29:09.324887Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700009ff88] Response ResolveFoldersResponse { resolved_folders { cloud_id: "cloud_from_new_service" } } 2025-06-24T21:29:09.325483Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700009ff88] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-24T21:29:09.327580Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700009ff88] Status 5 Not Found 2025-06-24T21:29:09.328044Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000075508] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-24T21:29:09.329855Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000075508] Status 5 Not Found >> KqpBatchUpdate::Large_3 >> KqpBatchDelete::DeleteOn >> KqpBatchDelete::Large_2 ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::PassRequestId [GOOD] Test command err: 2025-06-24T21:29:09.624883Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631103302349717:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:09.625795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004849/r3tmp/tmpz1uqEZ/pdisk_1.dat 2025-06-24T21:29:09.966004Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631103302349689:2079] 1750800549616971 != 1750800549616974 2025-06-24T21:29:09.986340Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.066491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.066623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.068747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19141 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:10.311731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:10.349112Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700008e408]{trololo} Connect to grpc://localhost:21874 2025-06-24T21:29:10.350356Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408]{trololo} Request AuthenticateRequest { iam_token: "**** (717F937C)" } 2025-06-24T21:29:10.372135Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700008e408]{trololo} Response AuthenticateResponse { subject { user_account { id: "1234" } } } >> LabeledDbCounters::OneTabletRestart [GOOD] >> LabeledDbCounters::TwoTablets |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> TColumnShardTestSchema::RebootForgetAfterFail [GOOD] >> KqpBatchDelete::TableNotExists |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::MultiStatement ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootForgetAfterFail [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801081.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801081.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799881.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; 2025-06-24T21:28:04.142644Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:28:04.172922Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:28:04.173284Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:28:04.182094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:28:04.182366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:28:04.182697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:28:04.182828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:28:04.182936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:28:04.183053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:28:04.183207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:28:04.183327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:28:04.183456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:28:04.183586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:28:04.183710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:28:04.224397Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:28:04.224681Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:28:04.224743Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:28:04.224925Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:28:04.225129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:28:04.225213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:28:04.225266Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:28:04.225379Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:28:04.225458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:28:04.225505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:28:04.225540Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:28:04.225715Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:28:04.225786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:28:04.225829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:28:04.225864Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:28:04.225952Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:28:04.226011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:28:04.226059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:28:04.226087Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:28:04.226145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:28:04.226197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:28:04.226228Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:28:04.226467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:28:04.226527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:28:04.226565Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:28:04.226759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:28:04.226816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:28:04.226849Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:28:04.227032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:28:04.227105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:28:04.227167Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:28:04.227260Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:28:04.227335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:28:04.227387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:28:04.227420Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:28:04.227936Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=55; 2025-06-24T21:28:04.228037Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; 2025-06-24T21:28:04.228133Z node 1 :TX_COLUMNSH ... lumn_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=11697; 2025-06-24T21:29:14.235181Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=30; 2025-06-24T21:29:14.235436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=194; 2025-06-24T21:29:14.235479Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=12244; 2025-06-24T21:29:14.235528Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=12386; 2025-06-24T21:29:14.235602Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=13; 2025-06-24T21:29:14.235832Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=177; 2025-06-24T21:29:14.235877Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=13164; 2025-06-24T21:29:14.236100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=167; 2025-06-24T21:29:14.236219Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=66; 2025-06-24T21:29:14.236349Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=83; 2025-06-24T21:29:14.236453Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=62; 2025-06-24T21:29:14.240307Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=3789; 2025-06-24T21:29:14.244461Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=4049; 2025-06-24T21:29:14.244863Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=18; 2025-06-24T21:29:14.244932Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=13; 2025-06-24T21:29:14.244981Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=10; 2025-06-24T21:29:14.245066Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=48; 2025-06-24T21:29:14.245123Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=9; 2025-06-24T21:29:14.245242Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=72; 2025-06-24T21:29:14.245317Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=24; 2025-06-24T21:29:14.245412Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=49; 2025-06-24T21:29:14.245512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=53; 2025-06-24T21:29:14.245761Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=203; 2025-06-24T21:29:14.245811Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=30966; 2025-06-24T21:29:14.245961Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=21099992;raw_bytes=29608900;count=3;records=320000} evicted {blob_bytes=10565848;raw_bytes=16084450;count=1;records=160000} at tablet 9437184 2025-06-24T21:29:14.246085Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:29:14.246139Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:29:14.246210Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:29:14.257009Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T21:29:14.257215Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:29:14.257321Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T21:29:14.257397Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800240055;tx_id=18446744073709551615;;current_snapshot_ts=1750800485281; 2025-06-24T21:29:14.257441Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:29:14.257495Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:29:14.257551Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:29:14.257645Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:29:14.258336Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;self_id=[1:1746:3565];tablet_id=9437184;parent=[1:1709:3536];fline=manager.cpp:88;event=ask_data;request=request_id=55;9438184000001={portions_count=4};; 2025-06-24T21:29:14.259364Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:29:14.260650Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:29:14.260693Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:29:14.260721Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:29:14.260766Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:29:14.260872Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T21:29:14.260945Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800240055;tx_id=18446744073709551615;;current_snapshot_ts=1750800485281; 2025-06-24T21:29:14.260992Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:29:14.261048Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:29:14.261091Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:29:14.261189Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1709:3536];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/10565848 160000/10565848 0/0 160000/10565848 >> KqpBatchDelete::ManyPartitions_1 >> KqpBatchDelete::ColumnTable |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TServiceAccountServiceTest::Get [GOOD] Test command err: 2025-06-24T21:29:08.449171Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631098079119516:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:08.449264Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004876/r3tmp/tmpiVppsa/pdisk_1.dat 2025-06-24T21:29:08.701685Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631098079119497:2079] 1750800548448272 != 1750800548448275 2025-06-24T21:29:08.708321Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:08.815848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:08.816009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:08.818563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28882 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:08.967871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.061303Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631114292066930:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:12.061420Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004876/r3tmp/tmph21Y7z/pdisk_1.dat 2025-06-24T21:29:12.378890Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:12.387334Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631109997099574:2079] 1750800552040627 != 1750800552040630 2025-06-24T21:29:12.408368Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:12.408467Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:12.411871Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7399 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.689877Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.714541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 >> KqpBatchDelete::UnknownColumn [GOOD] >> KqpBatchUpdate::UpdateOn [GOOD] >> KqpBatchUpdate::NotIdempotent [GOOD] >> KqpBatchDelete::MultiStatement [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TServiceAccountServiceTest::IssueToken [GOOD] Test command err: 2025-06-24T21:29:09.016978Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631103671347471:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:09.020073Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004868/r3tmp/tmplr8bVl/pdisk_1.dat 2025-06-24T21:29:09.313809Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:09.315542Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631099376380152:2079] 1750800549001697 != 1750800549001700 2025-06-24T21:29:09.399035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:09.399182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:09.401222Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1342 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:09.601065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:09.623338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.818214Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631116330755684:2248];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:12.818568Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004868/r3tmp/tmpcP6mPq/pdisk_1.dat 2025-06-24T21:29:13.048441Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:13.059321Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631116330755446:2079] 1750800552610895 != 1750800552610898 2025-06-24T21:29:13.071230Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:13.071340Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:13.073795Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13714 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:13.337817Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:13.352072Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 >> KqpBatchUpdate::HasTxControl [GOOD] >> KqpBatchDelete::HasTxControl [GOOD] |98.2%| [TA] $(B)/ydb/library/ycloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |98.2%| [TA] {RESULT} $(B)/ydb/library/ycloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::UnknownColumn [GOOD] Test command err: Trying to start YDB, gRPC: 27454, MsgBus: 26432 2025-06-24T21:29:10.373684Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631107634770464:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.373756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bcb/r3tmp/tmpmIIcTm/pdisk_1.dat 2025-06-24T21:29:11.024296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:11.024418Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:11.095058Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:11.096816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27454, node 1 2025-06-24T21:29:11.234737Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.234765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.234772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.234876Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.381156Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26432 TClient is connected to server localhost:26432 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.116063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:29:12.150381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:12.350479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.588251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.695724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.527407Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631124814641259:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.527565Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.855676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.888555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.924742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.960623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.988201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.062559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.135937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.205534Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631129109609216:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.205609Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.205734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631129109609221:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.209771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.254544Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631129109609223:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.351489Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631129109609274:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.375370Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631107634770464:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.375434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.663961Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631133404576855:2483], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:31: Error: At lambda, At function: Coalesce, At function: And
:3:37: Error: At function: ==
:3:23: Error: At function: Member
:3:23: Error: Member not found: UnknownColumn 2025-06-24T21:29:16.665723Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YzczYTJiY2MtZTY0NjZhNDEtNDI2NTJmN2UtNGZhYzNlMDc=, ActorId: [1:7519631133404576846:2477], ActorState: ExecuteState, TraceId: 01jyhxgrhh7k0sz8z8qb8f4sey, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::UpdateOn [GOOD] Test command err: Trying to start YDB, gRPC: 5913, MsgBus: 25788 2025-06-24T21:29:10.373734Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631104675429163:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.373845Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002b63/r3tmp/tmpOAmprQ/pdisk_1.dat 2025-06-24T21:29:10.997332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:11.000134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:11.008832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:11.044868Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631104675429140:2079] 1750800550372530 != 1750800550372533 2025-06-24T21:29:11.048543Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5913, node 1 2025-06-24T21:29:11.200082Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.200107Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.200139Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.200296Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.397771Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25788 TClient is connected to server localhost:25788 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.243167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.271947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:29:12.283070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:12.498151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.710050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.831293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.271474Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631121855299945:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.271669Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.800360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.831080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.863024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.892946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.927338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.000088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.040401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.179250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631126150267903:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.179362Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.179689Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631126150267908:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.184656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.196386Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631126150267910:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.283706Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631126150267963:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.374675Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631104675429163:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.374750Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.612144Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631130445235543:2483], status: GENERIC_ERROR, issues:
:2:22: Error: BATCH UPDATE is unsupported with ON 2025-06-24T21:29:16.612560Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Y2NlODdkYWItZTlmZTU0ZWEtZWIzN2YzNDUtNTRhNTE5ZDE=, ActorId: [1:7519631130445235534:2477], ActorState: ExecuteState, TraceId: 01jyhxgrhmea9zx3wkz7jbhkha, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::MultiStatement [GOOD] Test command err: Trying to start YDB, gRPC: 8127, MsgBus: 11181 2025-06-24T21:29:10.365785Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631104489001804:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.365829Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bd0/r3tmp/tmp9rLWgD/pdisk_1.dat 2025-06-24T21:29:10.835450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.835558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.841170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:10.850953Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.903279Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631104489001780:2079] 1750800550359986 != 1750800550359989 TServer::EnableGrpc on GrpcPort 8127, node 1 2025-06-24T21:29:11.138056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.138077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.138119Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.138250Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.379720Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11181 TClient is connected to server localhost:11181 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.195030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.210576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.234491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.485707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.758976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.886125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.408095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631121668872621:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.408339Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.800342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.872084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.943097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.988776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.027915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.110209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.145148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.215455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631125963840584:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.215549Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.215853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631125963840589:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.222999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.237633Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631125963840591:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:15.295307Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631125963840642:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.373276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631104489001804:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.373350Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.727742Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631130258808223:2483], status: GENERIC_ERROR, issues:
:4:32: Error: BATCH can't be used with multiple writes or reads. 2025-06-24T21:29:16.728744Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OTY3YmFlODMtYjI0ZWZmMmItODU3ZDYwMzYtZGQ0NGVmZDY=, ActorId: [1:7519631130258808214:2477], ActorState: ExecuteState, TraceId: 01jyhxgrh2dy4sa7hsfc17dws2, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:16.760251Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631130258808227:2485], status: GENERIC_ERROR, issues:
:4:17: Error: BATCH can't be used with multiple writes or reads. 2025-06-24T21:29:16.760445Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OTY3YmFlODMtYjI0ZWZmMmItODU3ZDYwMzYtZGQ0NGVmZDY=, ActorId: [1:7519631130258808214:2477], ActorState: ExecuteState, TraceId: 01jyhxgrp3edg5y70y1233hhw9, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:16.790071Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631130258808231:2487], status: GENERIC_ERROR, issues:
:4:17: Error: BATCH can't be used with multiple writes or reads. 2025-06-24T21:29:16.791325Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OTY3YmFlODMtYjI0ZWZmMmItODU3ZDYwMzYtZGQ0NGVmZDY=, ActorId: [1:7519631130258808214:2477], ActorState: ExecuteState, TraceId: 01jyhxgrq78sc1w2rxfre2spe1, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:16.818907Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631130258808235:2489], status: GENERIC_ERROR, issues:
:4:29: Error: BATCH can't be used with multiple writes or reads. 2025-06-24T21:29:16.819480Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OTY3YmFlODMtYjI0ZWZmMmItODU3ZDYwMzYtZGQ0NGVmZDY=, ActorId: [1:7519631130258808214:2477], ActorState: ExecuteState, TraceId: 01jyhxgrr04qsxgbfbsdaysyf8, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:16.841594Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631130258808239:2491], status: GENERIC_ERROR, issues:
:3:29: Error: BATCH can't be used with multiple writes or reads. 2025-06-24T21:29:16.843461Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OTY3YmFlODMtYjI0ZWZmMmItODU3ZDYwMzYtZGQ0NGVmZDY=, ActorId: [1:7519631130258808214:2477], ActorState: ExecuteState, TraceId: 01jyhxgrrx54gaqqp7w3meqe01, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpBatchUpdate::Returning [GOOD] >> KqpBatchUpdate::UnknownColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::NotIdempotent [GOOD] Test command err: Trying to start YDB, gRPC: 22003, MsgBus: 5701 2025-06-24T21:29:10.833156Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631104404718297:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.833317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002b38/r3tmp/tmp5pEZZa/pdisk_1.dat 2025-06-24T21:29:11.371914Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:11.383085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:11.383491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:11.385065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22003, node 1 2025-06-24T21:29:11.557832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.557850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.557858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.557966Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.739130Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5701 TClient is connected to server localhost:5701 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.467071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.505650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.720199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.975062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:13.071236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.754676Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631121584588904:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.754798Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.143169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.218891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.247288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.274394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.314922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.350696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.394905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.458790Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631125879556857:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.458865Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.459288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631125879556862:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.485809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.495113Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631125879556864:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:15.591899Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631125879556915:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.803290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631104404718297:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.803379Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.766360Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631130174524490:2480], status: GENERIC_ERROR, issues:
: Error: Table intent determination, code: 1040
:3:43: Error: Batch update is only supported for idempotent updates. 2025-06-24T21:29:16.767367Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmJlMGYxZTQtNzVhM2U4MzQtMWY3ZjFhOWYtNDc4NDAwMGU=, ActorId: [1:7519631130174524480:2474], ActorState: ExecuteState, TraceId: 01jyhxgrnk9rp9yxcheq9xqfvq, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:16.799872Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631130174524496:2483], status: GENERIC_ERROR, issues:
: Error: Table intent determination, code: 1040
:3:43: Error: Batch update is only supported for idempotent updates. 2025-06-24T21:29:16.801706Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmJlMGYxZTQtNzVhM2U4MzQtMWY3ZjFhOWYtNDc4NDAwMGU=, ActorId: [1:7519631130174524480:2474], ActorState: ExecuteState, TraceId: 01jyhxgrq901k628r96rrmggg2, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:16.831758Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631130174524502:2486], status: GENERIC_ERROR, issues:
: Error: Table intent determination, code: 1040
:3:51: Error: Batch update is only supported for idempotent updates. 2025-06-24T21:29:16.833717Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmJlMGYxZTQtNzVhM2U4MzQtMWY3ZjFhOWYtNDc4NDAwMGU=, ActorId: [1:7519631130174524480:2474], ActorState: ExecuteState, TraceId: 01jyhxgrrffq4w0494kw9jjyjr, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpBatchUpdate::TableNotExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::HasTxControl [GOOD] Test command err: Trying to start YDB, gRPC: 1906, MsgBus: 29294 2025-06-24T21:29:10.364867Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631106895201526:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.364940Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bd6/r3tmp/tmpwcYQyG/pdisk_1.dat 2025-06-24T21:29:10.839374Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631106895201507:2079] 1750800550363381 != 1750800550363384 2025-06-24T21:29:10.850732Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.861612Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.861732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.868075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1906, node 1 2025-06-24T21:29:11.155736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.155766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.155773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.155884Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.383919Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29294 TClient is connected to server localhost:29294 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.058114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.102633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.115122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.314586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.562815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.687648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.124902Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631124075072344:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.125010Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.800356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.853222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.888803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.969503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.008583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.066141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.136683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.218562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128370040312:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.218640Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.220164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128370040317:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.225019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.234252Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631128370040319:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:15.304272Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631128370040370:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.367925Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631106895201526:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.368088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:17.120997Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MWYyNWVkMmMtNzdjYjczNjEtZGM3NzcwNTYtZGNiZjBhMjM=, ActorId: [1:7519631132665007941:2477], ActorState: ExecuteState, TraceId: 01jyhxgrgy1c1dvc5127a7ep5y, Create QueryResponse for error on request, msg: BATCH operation can be executed only in NoTx mode. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::HasTxControl [GOOD] Test command err: Trying to start YDB, gRPC: 32682, MsgBus: 9868 2025-06-24T21:29:10.369591Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631106055159950:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.369681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002b8b/r3tmp/tmpOeRDz2/pdisk_1.dat 2025-06-24T21:29:10.882190Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.894511Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631106055159921:2079] 1750800550366875 != 1750800550366878 2025-06-24T21:29:10.928741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.928853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.941643Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32682, node 1 2025-06-24T21:29:11.134924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.134959Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.134968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.135115Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.395588Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9868 TClient is connected to server localhost:9868 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.055633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.115471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.134098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.374728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.615618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:29:12.745561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.323164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631123235030733:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.323277Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.800260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.838361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.879910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.915925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.953159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.995984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.033489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.165143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631127529998689:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.165212Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.165274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631127529998694:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.172970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.185494Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631127529998696:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.288248Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631127529998749:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.369945Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631106055159950:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.369996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:17.155776Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=N2E5ZjdkODItYjBhMjcyZjItOGVhYzg0ZTQtMjk0MDIzMjA=, ActorId: [1:7519631131824966319:2477], ActorState: ExecuteState, TraceId: 01jyhxgrgy8m5qwzycsvdvhjvj, Create QueryResponse for error on request, msg: BATCH operation can be executed only in NoTx mode. >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink-UseDataQuery >> KqpBatchDelete::DeleteOn [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Returning [GOOD] Test command err: Trying to start YDB, gRPC: 63641, MsgBus: 10779 2025-06-24T21:29:12.313490Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631114642953676:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:12.323603Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002af6/r3tmp/tmpmexp79/pdisk_1.dat 2025-06-24T21:29:13.074386Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631114642953582:2079] 1750800552283851 != 1750800552283854 2025-06-24T21:29:13.092491Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:13.102983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:13.103103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:13.118541Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63641, node 1 2025-06-24T21:29:13.339735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:13.339766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:13.339774Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:13.339918Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:13.343262Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10779 TClient is connected to server localhost:10779 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:14.095627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:14.142673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.278158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.481072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.567931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:16.164894Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631131822824413:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.165026Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.507744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.544697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.620974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.653223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.685586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.726548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.798321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.875421Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631131822825077:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.875516Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.875881Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631131822825082:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.880217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:16.895561Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631131822825084:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:16.987818Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631131822825135:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:17.310234Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631114642953676:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:17.310316Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:18.175511Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631140412760009:2480], status: GENERIC_ERROR, issues:
:2:22: Error: BATCH UPDATE is unsupported with RETURNING 2025-06-24T21:29:18.177507Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODlhOTFkNDUtZmZhZmRhMGEtMmYwZjJlMGEtMTRjNTViODE=, ActorId: [1:7519631140412759999:2474], ActorState: ExecuteState, TraceId: 01jyhxgt1vfbgbqf2h4z0yt65p, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::TableNotExists [GOOD] Test command err: Trying to start YDB, gRPC: 9144, MsgBus: 16861 2025-06-24T21:29:12.198645Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631116593576861:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:12.198700Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ad9/r3tmp/tmpO2uK7f/pdisk_1.dat 2025-06-24T21:29:12.834625Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631116593576764:2079] 1750800552184312 != 1750800552184315 2025-06-24T21:29:12.841972Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:12.846516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:12.846638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:12.859478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9144, node 1 2025-06-24T21:29:13.151749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:13.151772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:13.151780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:13.151932Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:13.211478Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16861 TClient is connected to server localhost:16861 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:13.886984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:13.927433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.083647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.262412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.363025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:16.127878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631133773447601:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.127983Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.536541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.596145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.632395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.676248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.746599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.818685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.896546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:17.013823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631138068415565:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:17.013909Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:17.014368Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631138068415570:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:17.019358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:17.033258Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631138068415572:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:17.139557Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631138068415625:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:17.199270Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631116593576861:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:17.199344Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:18.324571Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631142363383207:2482], status: SCHEME_ERROR, issues:
: Error: Pre type annotation, code: 1020
:3:34: Error: Cannot find table 'db.[/Root/TestBatchNotExists]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:18.326500Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZjJhZDg2YS1kYmU4NGU1Mi0yYWE2YzhiYi0yZjAyNzdlMQ==, ActorId: [1:7519631142363383198:2476], ActorState: ExecuteState, TraceId: 01jyhxgt6jf97dkgh0hdwabeje, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:18.371172Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631142363383220:2485], status: SCHEME_ERROR, issues:
: Error: Pre type annotation, code: 1020
:4:41: Error: Cannot find table 'db.[/Root/TestBatchNotExists]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:18.373151Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZjJhZDg2YS1kYmU4NGU1Mi0yYWE2YzhiYi0yZjAyNzdlMQ==, ActorId: [1:7519631142363383198:2476], ActorState: ExecuteState, TraceId: 01jyhxgt81dfa0vb8217n6swtx, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::DeleteOn [GOOD] Test command err: Trying to start YDB, gRPC: 1027, MsgBus: 26701 2025-06-24T21:29:14.222810Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631122333103380:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:14.222885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002a8c/r3tmp/tmpm6cTlQ/pdisk_1.dat 2025-06-24T21:29:14.616941Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:14.627666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:14.627758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:14.628926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1027, node 1 2025-06-24T21:29:14.695649Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:14.695678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:14.695689Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:14.695823Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26701 TClient is connected to server localhost:26701 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:29:15.225234Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:15.282506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:29:15.303424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.449942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:15.610983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:15.691459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:17.508225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631135218006877:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:17.508329Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:17.825804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:17.866232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:17.901008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:17.940997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:17.995077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.071312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.111968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.190487Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631139512974832:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.190567Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.190787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631139512974837:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.195595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:18.212574Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631139512974839:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:18.268971Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631139512974892:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:19.222727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631122333103380:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:19.222801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:19.460761Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631143807942465:2479], status: GENERIC_ERROR, issues:
:2:22: Error: BATCH DELETE is unsupported with ON 2025-06-24T21:29:19.461803Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZjBjNmZkYmYtOGU4ZDAzMWUtZDU0MzlmNjYtODY5YWI0NmM=, ActorId: [1:7519631143807942456:2473], ActorState: ExecuteState, TraceId: 01jyhxgvar6t8sczczxc9kr7a8, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpBatchDelete::TableNotExists [GOOD] >> KqpBatchUpdate::MultiStatement [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::TableNotExists [GOOD] Test command err: Trying to start YDB, gRPC: 13474, MsgBus: 26826 2025-06-24T21:29:14.944923Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631122971559651:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:14.945031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002a4f/r3tmp/tmpOsttCp/pdisk_1.dat 2025-06-24T21:29:15.402899Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:15.407955Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631122971559632:2079] 1750800554944196 != 1750800554944199 TServer::EnableGrpc on GrpcPort 13474, node 1 2025-06-24T21:29:15.468406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:15.474045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:15.481617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:15.546031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:15.546055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:15.546069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:15.546206Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26826 2025-06-24T21:29:15.969388Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26826 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:16.173667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:16.201785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:16.216258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:16.402142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:16.633995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:16.726461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:18.532729Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631140151430448:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.532854Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.865128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.901387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.951130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.034730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.067704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.110288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.199269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.325020Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631144446398413:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:19.325117Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:19.325370Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631144446398418:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:19.329886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:19.345424Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631144446398420:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:19.422276Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631144446398471:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:19.946451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631122971559651:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:19.946548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:20.447882Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631148741366046:2479], status: SCHEME_ERROR, issues:
: Error: Pre type annotation, code: 1020
:2:35: Error: Cannot find table 'db.[/Root/TestBatchNotExists]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:20.448079Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDA0OTlmZDctMzYyOWViMDEtZDU1OTQ4ZjItMzdhODY1MmI=, ActorId: [1:7519631148741366037:2473], ActorState: ExecuteState, TraceId: 01jyhxgw95az3vbxnq1nbx3pjx, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:20.490635Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631148741366059:2482], status: SCHEME_ERROR, issues:
: Error: Pre type annotation, code: 1020
:3:41: Error: Cannot find table 'db.[/Root/TestBatchNotExists]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:20.492508Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDA0OTlmZDctMzYyOWViMDEtZDU1OTQ4ZjItMzdhODY1MmI=, ActorId: [1:7519631148741366037:2473], ActorState: ExecuteState, TraceId: 01jyhxgwa74y4fyjs0jsme1xn6, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestWrongSessionOrGeneration >> TPQCachingProxyTest::TestDeregister >> TPQCachingProxyTest::OutdatedSession >> TPQCachingProxyTest::TestPublishAndForget |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> KqpBatchDelete::TableWithIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::MultiStatement [GOOD] Test command err: Trying to start YDB, gRPC: 28567, MsgBus: 30185 2025-06-24T21:29:15.460494Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631129033888797:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.460540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002a0e/r3tmp/tmpWV123a/pdisk_1.dat 2025-06-24T21:29:15.880933Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631129033888774:2079] 1750800555447412 != 1750800555447415 2025-06-24T21:29:15.904554Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:15.907096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:15.907240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:15.913086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28567, node 1 2025-06-24T21:29:15.991876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:15.991902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:15.991909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:15.992036Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30185 TClient is connected to server localhost:30185 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:29:16.532839Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:16.576491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:16.602316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:16.790421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:17.014032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:17.115882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:18.964569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631141918792299:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.964672Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:19.412292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.465715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.501357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.534113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.567999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.643531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.725697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:19.800635Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631146213760258:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:19.800743Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:19.800898Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631146213760263:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:19.806772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:19.820167Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631146213760265:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:19.875699Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631146213760316:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:20.465718Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631129033888797:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:20.465784Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:21.052358Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631150508727894:2480], status: GENERIC_ERROR, issues:
:5:32: Error: BATCH can't be used with multiple writes or reads. 2025-06-24T21:29:21.052562Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmFlZjY2ZmItNmVjOTIwNDYtNjM2NTQxMjEtOGIyMGQ3ZmI=, ActorId: [1:7519631150508727885:2474], ActorState: ExecuteState, TraceId: 01jyhxgwtvd51312cpdj1pz7ph, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:21.074702Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631154803695194:2482], status: GENERIC_ERROR, issues:
:4:17: Error: BATCH can't be used with multiple writes or reads. 2025-06-24T21:29:21.074905Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmFlZjY2ZmItNmVjOTIwNDYtNjM2NTQxMjEtOGIyMGQ3ZmI=, ActorId: [1:7519631150508727885:2474], ActorState: ExecuteState, TraceId: 01jyhxgwx71pt7ttgr6k455nxc, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:21.093479Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631154803695198:2484], status: GENERIC_ERROR, issues:
:4:17: Error: BATCH can't be used with multiple writes or reads. 2025-06-24T21:29:21.093704Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmFlZjY2ZmItNmVjOTIwNDYtNjM2NTQxMjEtOGIyMGQ3ZmI=, ActorId: [1:7519631150508727885:2474], ActorState: ExecuteState, TraceId: 01jyhxgwxt87wddked587mjq81, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:21.112029Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631154803695202:2486], status: GENERIC_ERROR, issues:
:4:29: Error: BATCH can't be used with multiple writes or reads. 2025-06-24T21:29:21.113573Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmFlZjY2ZmItNmVjOTIwNDYtNjM2NTQxMjEtOGIyMGQ3ZmI=, ActorId: [1:7519631150508727885:2474], ActorState: ExecuteState, TraceId: 01jyhxgwybe36r5s4v8fe31ykf, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:21.131026Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631154803695206:2488], status: GENERIC_ERROR, issues:
:4:29: Error: BATCH can't be used with multiple writes or reads. 2025-06-24T21:29:21.132912Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmFlZjY2ZmItNmVjOTIwNDYtNjM2NTQxMjEtOGIyMGQ3ZmI=, ActorId: [1:7519631150508727885:2474], ActorState: ExecuteState, TraceId: 01jyhxgwz0d1yd5f6g1x0v9xy0, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpBatchUpdate::TableWithIndex [GOOD] >> TPQCachingProxyTest::MultipleSessions >> TPQCachingProxyTest::TestPublishAndForget [GOOD] >> TPQCachingProxyTest::OutdatedSession [GOOD] >> TPQCachingProxyTest::TestDeregister [GOOD] >> TPQCachingProxyTest::TestWrongSessionOrGeneration [GOOD] >> KqpBatchUpdate::ColumnTable [GOOD] >> TPQCachingProxyTest::MultipleSessions [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestPublishAndForget [GOOD] Test command err: 2025-06-24T21:29:23.340479Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:29:23.340606Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:29:23.362561Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:23.362723Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-24T21:29:23.362848Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-24T21:29:23.362892Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-24T21:29:23.363037Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:218: Direct read cache: forget read: 1 for session session1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::OutdatedSession [GOOD] Test command err: 2025-06-24T21:29:23.340438Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:29:23.340548Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:29:23.374553Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:23.374684Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-24T21:29:23.374780Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-24T21:29:23.374818Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-24T21:29:23.374925Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:293: Direct read cache: registered server session: session1:1 with generation 2, killed existing session with older generation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::TableWithIndex [GOOD] Test command err: Trying to start YDB, gRPC: 24080, MsgBus: 61467 2025-06-24T21:29:10.411963Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631107388164872:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.413068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bc7/r3tmp/tmpd9mpe7/pdisk_1.dat 2025-06-24T21:29:10.923355Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.945891Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631107388164754:2079] 1750800550360836 != 1750800550360839 2025-06-24T21:29:10.967825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.967928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.972364Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24080, node 1 2025-06-24T21:29:11.140673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.140702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.140708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.140812Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.411935Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61467 TClient is connected to server localhost:61467 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.110648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.175046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.370962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.607296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.742098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.486840Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631124568035574:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.487004Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.800883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.855252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.923998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.960401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.033623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.095257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.174545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.263638Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128863003545:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.263705Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.263770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128863003550:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.267921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.278858Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631128863003552:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.351858Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631128863003603:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.404607Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631107388164872:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.404692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.631003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.706919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.777758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.853814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestDeregister [GOOD] Test command err: 2025-06-24T21:29:23.340445Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:29:23.340572Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:29:23.363857Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:23.363952Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-24T21:29:23.364004Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session2:1 with generation 1 2025-06-24T21:29:23.364101Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: session1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestWrongSessionOrGeneration [GOOD] Test command err: 2025-06-24T21:29:23.340445Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:29:23.340565Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:29:23.360946Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:23.361874Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 2 2025-06-24T21:29:23.361975Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-24T21:29:23.362026Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 2 2025-06-24T21:29:23.362116Z node 1 :PQ_READ_PROXY INFO: caching_service.cpp:297: Direct read cache: attempted to register server session: session1:1 with stale generation 1, ignored 2025-06-24T21:29:23.362165Z node 1 :PQ_READ_PROXY ALERT: caching_service.cpp:159: Direct read cache: tried to stage direct read for session session1 with generation 1, previously had this session with generation 2. Data ignored 2025-06-24T21:29:23.362209Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-24T21:29:23.362278Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:218: Direct read cache: forget read: 1 for session session1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::TableWithIndex [GOOD] Test command err: Trying to start YDB, gRPC: 17134, MsgBus: 22944 2025-06-24T21:29:10.374358Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631106762240970:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.374498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bb8/r3tmp/tmplpmOc8/pdisk_1.dat 2025-06-24T21:29:11.126567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:11.126653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:11.156237Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:11.158352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:11.159319Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631106762240937:2079] 1750800550367960 != 1750800550367963 TServer::EnableGrpc on GrpcPort 17134, node 1 2025-06-24T21:29:11.291179Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.291201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.291208Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.291339Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.403852Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22944 TClient is connected to server localhost:22944 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.115790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.152352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.167264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.396212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.591035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.680702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.498093Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631123942111753:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.498194Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.829875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.884737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.928731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.968336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.007791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.056546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.101940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.198158Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128237079709:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.198322Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.198502Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128237079714:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.202327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.211267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-24T21:29:15.212791Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631128237079716:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:15.284564Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631128237079767:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.375370Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631106762240970:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.375424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.625694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.713092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.797082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.751412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TContinuousBackupTests::Basic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::MultipleSessions [GOOD] Test command err: 2025-06-24T21:29:23.805057Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1101: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-24T21:29:23.805164Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-24T21:29:23.822037Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:23.822170Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-24T21:29:23.822271Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-24T21:29:23.822332Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 2 for session: session1 2025-06-24T21:29:23.822374Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-24T21:29:23.822456Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 2 for session session1, Generation: 1 2025-06-24T21:29:23.822541Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session2:1 with generation 2 2025-06-24T21:29:23.822602Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 3 for session: session2 2025-06-24T21:29:23.822662Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 3 for session session2, Generation: 2 >> TVectorIndexTests::CreateTableMultiColumn >> KqpBatchUpdate::UnknownColumn [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |98.2%| [TA] $(B)/ydb/core/persqueue/dread_cache_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |98.2%| [TA] {RESULT} $(B)/ydb/core/persqueue/dread_cache_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ColumnTable [GOOD] Test command err: Trying to start YDB, gRPC: 9233, MsgBus: 17047 2025-06-24T21:29:10.395912Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631108144591190:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.396008Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002b8d/r3tmp/tmpCfCaAG/pdisk_1.dat 2025-06-24T21:29:10.813708Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631108144591167:2079] 1750800550393350 != 1750800550393353 2025-06-24T21:29:10.855545Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.902579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.906794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.912194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9233, node 1 2025-06-24T21:29:11.143516Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.143548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.143556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.143694Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.417327Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17047 TClient is connected to server localhost:17047 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.172363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.188084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.211095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.489692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.727497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.825726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.385250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631125324462003:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.385385Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.804025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.889107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.927736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.960323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.001333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.058658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.096199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.169879Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631129619429963:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.169977Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.170255Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631129619429968:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.175424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.190022Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631129619429970:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:15.291853Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631129619430021:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.399251Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631108144591190:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.399319Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.647941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_run ... R: kqp_write_actor.cpp:3004: SelfId: [1:7519631151094269707:2477], SessionActorId: [1:7519631151094269687:2477], statusCode=BAD_REQUEST. Issue=
: Error: Bad request. Table: `/Root/TestOlap`., code: 2017
: Error: not initialized lock info in commit message, code: 2017 . sessionActorId=[1:7519631151094269687:2477]. isRollback=0 2025-06-24T21:29:22.605383Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631151094269700:2477] TxId: 281474976715681. Ctx: { TraceId: 01jyhxgvjn8qapjyy1e894vnck, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTM0NWEyZDMtYTIxZDFiOWItNmFkMTYyOGMtNTY4YzhlMTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:22.605484Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631151094269694:2477] TxId: 281474976715678. Ctx: { TraceId: 01jyhxgvjn8qapjyy1e894vnck, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTM0NWEyZDMtYTIxZDFiOWItNmFkMTYyOGMtNTY4YzhlMTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:22.605586Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631151094269692:2477] TxId: 281474976715677. Ctx: { TraceId: 01jyhxgvjn8qapjyy1e894vnck, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTM0NWEyZDMtYTIxZDFiOWItNmFkMTYyOGMtNTY4YzhlMTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:22.605693Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631151094269702:2477] TxId: 281474976715682. Ctx: { TraceId: 01jyhxgvjn8qapjyy1e894vnck, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTM0NWEyZDMtYTIxZDFiOWItNmFkMTYyOGMtNTY4YzhlMTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:22.605892Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631138209365380:2509];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606013Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715681 2025-06-24T21:29:22.606056Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631138209365380:2509];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606107Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715678 2025-06-24T21:29:22.606137Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631138209365380:2509];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606182Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715682 2025-06-24T21:29:22.606214Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631138209365380:2509];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606260Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715677 2025-06-24T21:29:22.606293Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631138209365380:2509];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606337Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715683 2025-06-24T21:29:22.606371Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631138209365380:2509];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606414Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715676 2025-06-24T21:29:22.606457Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631138209365380:2509];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606504Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715679 2025-06-24T21:29:22.606536Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631138209365380:2509];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606582Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715684 2025-06-24T21:29:22.606615Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631138209365380:2509];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606661Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715685 2025-06-24T21:29:22.606694Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631138209365380:2509];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606753Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631138209365790:2558];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606798Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631138209365790:2558];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606843Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631138209365790:2558];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606882Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631138209365790:2558];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606920Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631138209365790:2558];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.606961Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631138209365790:2558];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.607002Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631138209365790:2558];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.607046Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631138209365790:2558];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.607117Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631138209365790:2558];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.607200Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631138209365790:2558];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:22.624207Z node 1 :KQP_EXECUTER ERROR: kqp_partitioned_executer.cpp:886: [PARTITIONED] ActorId: [1:7519631151094269687:2477], ActorState: AbortState, INTERNAL_ERROR: 2025-06-24T21:29:22.626446Z node 1 :KQP_SESSION ERROR: kqp_session_actor.cpp:2561: SessionId: ydb://session/3?node_id=1&id=ZTM0NWEyZDMtYTIxZDFiOWItNmFkMTYyOGMtNTY4YzhlMTA=, ActorId: [1:7519631133914397593:2477], ActorState: CleanupState, TraceId: 01jyhxgvjn8qapjyy1e894vnck, Failed to cleanup: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::UnknownColumn [GOOD] Test command err: Trying to start YDB, gRPC: 14928, MsgBus: 31547 2025-06-24T21:29:19.076128Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631142888555083:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:19.076230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00295b/r3tmp/tmpjmUViA/pdisk_1.dat 2025-06-24T21:29:19.510483Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14928, node 1 2025-06-24T21:29:19.541695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:19.555209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:19.577835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:19.616082Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:19.616114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:19.616123Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:19.616263Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31547 2025-06-24T21:29:20.083728Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31547 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:20.195853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:20.231887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:20.406063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:20.549831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:20.636211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:22.248078Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631155773458579:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:22.248199Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:22.559626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:22.599870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:22.632908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:22.663325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:22.730932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:22.807192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:22.844988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:22.906198Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631155773459242:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:22.906298Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:22.906505Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631155773459247:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:22.911016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:22.925817Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631155773459249:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:23.004514Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631155773459300:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:24.076275Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631142888555083:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:24.076341Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:24.144043Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631164363394170:2479], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:34: Error: At lambda, At function: Coalesce, At function: And
:4:41: Error: At function: ==
:4:27: Error: At function: Member
:4:27: Error: Member not found: UnknownColumn 2025-06-24T21:29:24.146241Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTU4MzAxY2QtZWNiYTg4NGUtMWNkYjU5ZWItZmE0OTc5YjA=, ActorId: [1:7519631164363394159:2472], ActorState: ExecuteState, TraceId: 01jyhxgzvw24tm81xkkwc0ejt5, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-24T21:29:24.198384Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631164363394179:2483], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:4:43: Error: At function: KiUpdateTable!
:4:43: Error: Column 'UnknownColumn' does not exist in table '/Root/Test'., code: 2017 2025-06-24T21:29:24.198617Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTU4MzAxY2QtZWNiYTg4NGUtMWNkYjU5ZWItZmE0OTc5YjA=, ActorId: [1:7519631164363394159:2472], ActorState: ExecuteState, TraceId: 01jyhxgzxzdcd8sgxf5pczr1xr, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: |98.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TVectorIndexTests::CreateTableMultiColumn [GOOD] |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop-Reboots+GenerateInternalPathId >> TAsyncIndexTests::SplitMainWithReboots[PipeResets] >> TColumnShardTestSchema::CreateTable+Reboots-GenerateInternalPathId ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableMultiColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:29:25.075048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:29:25.075164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:29:25.075208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:29:25.075240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:29:25.075285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:29:25.075337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:29:25.075412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:29:25.075472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:29:25.076235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:29:25.076593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:29:25.157553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:29:25.157611Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:25.173568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:29:25.177857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:29:25.178054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:29:25.187168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:29:25.187353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:29:25.187949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:25.188316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:29:25.190600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:25.190809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:29:25.192034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:29:25.192116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:25.192242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:29:25.192296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:29:25.192338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:29:25.192489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.198957Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:29:25.317652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:29:25.317911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.318191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:29:25.318245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:29:25.318462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:29:25.318554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:29:25.320936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:25.321157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:29:25.321364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.321424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:29:25.321486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:29:25.321519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:29:25.323785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.323873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:29:25.323935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:29:25.326895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.326953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.327014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:29:25.327091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:29:25.330526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:29:25.334645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:29:25.334819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:29:25.335656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:25.335773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:29:25.335810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:29:25.336050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:29:25.336098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:29:25.336262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:29:25.336359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:29:25.338482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:29:25.338531Z node 1 :FLAT_TX_SCHEMESHARD ... nNames: "covered1" DataColumnNames: "covered2" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:29:25.727115Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector/indexImplLevelTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:29:25.727382Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector/indexImplLevelTable" took 261us result status StatusSuccess 2025-06-24T21:29:25.727867Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/idx_vector/indexImplLevelTable" PathDescription { Self { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplLevelTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_id" Type: "Uint64" TypeId: 4 Id: 2 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_centroid" Type: "String" TypeId: 4097 Id: 3 NotNull: true IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "__ydb_id" KeyColumnIds: 1 KeyColumnIds: 2 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:29:25.729641Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector/indexImplPostingTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:29:25.729881Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector/indexImplPostingTable" took 248us result status StatusSuccess 2025-06-24T21:29:25.730330Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/idx_vector/indexImplPostingTable" PathDescription { Self { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplPostingTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "id1" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "id2" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "covered1" Type: "String" TypeId: 4097 Id: 4 NotNull: false IsBuildInProgress: false } Columns { Name: "covered2" Type: "String" TypeId: 4097 Id: 5 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "id1" KeyColumnNames: "id2" KeyColumnIds: 1 KeyColumnIds: 2 KeyColumnIds: 3 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TColumnShardTestSchema::RebootForgetWithLostAnswer >> TContinuousBackupTests::Basic [GOOD] >> TColumnShardTestSchema::ColdTiers >> TPersQueueNewSchemeCacheTest::CheckGrpcWriteNoDC >> TPersqueueControlPlaneTestSuite::TestAddRemoveReadRule >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClass >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_3 >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit >> TPersQueueCommonTest::TestWriteWithRateLimiterWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::Basic [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:29:25.335378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:29:25.335494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:29:25.335539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:29:25.335582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:29:25.339236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:29:25.339339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:29:25.339461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:29:25.339633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:29:25.340464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:29:25.351280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:29:25.452059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:29:25.452111Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:25.475659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:29:25.481502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:29:25.481825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:29:25.497730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:29:25.498081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:29:25.500294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:25.500801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:29:25.512345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:25.513690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:29:25.524943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:29:25.525079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:25.525293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:29:25.525362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:29:25.525483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:29:25.525694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.534040Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:29:25.657311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:29:25.659113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.660547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:29:25.660632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:29:25.661993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:29:25.662126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:29:25.665915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:25.666845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:29:25.667109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.667282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:29:25.667330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:29:25.667368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:29:25.671569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.671646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:29:25.671690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:29:25.675771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.675831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:25.675875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:29:25.675931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:29:25.680566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:29:25.687895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:29:25.690246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:29:25.691490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:25.691645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:29:25.691686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:29:25.693365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:29:25.693488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:29:25.693746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:29:25.693910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:29:25.696736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:29:25.696788Z node 1 :FLAT_TX_SCHEMESHARD ... rd: 72057594046678944, cookie: 104 2025-06-24T21:29:26.680357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6365: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 855 } } CommitVersion { Step: 5000005 TxId: 104 } 2025-06-24T21:29:26.680420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-24T21:29:26.680610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 855 } } CommitVersion { Step: 5000005 TxId: 104 } 2025-06-24T21:29:26.680736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 855 } } CommitVersion { Step: 5000005 TxId: 104 } FAKE_COORDINATOR: Erasing txId 104 2025-06-24T21:29:26.682170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5592: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-24T21:29:26.682228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1795: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-24T21:29:26.682419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:636: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-24T21:29:26.682484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1046: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-24T21:29:26.682580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1050: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 310 RawX2: 4294969591 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-24T21:29:26.682673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:669: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 104:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:26.682724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:29:26.682782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 104:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-24T21:29:26.682829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 104:0 129 -> 240 2025-06-24T21:29:26.689159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:29:26.692051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:29:26.692508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-24T21:29:26.692568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-24T21:29:26.692684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 3/3 2025-06-24T21:29:26.692737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-24T21:29:26.692781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#104:0 progress is 3/3 2025-06-24T21:29:26.692818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-24T21:29:26.692881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-06-24T21:29:26.692963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:338:2315] message: TxId: 104 2025-06-24T21:29:26.693017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-24T21:29:26.693070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:0 2025-06-24T21:29:26.693149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:0 2025-06-24T21:29:26.693315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:29:26.693364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:1 2025-06-24T21:29:26.693387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:1 2025-06-24T21:29:26.693423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:29:26.693449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 104:2 2025-06-24T21:29:26.693469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 104:2 2025-06-24T21:29:26.693538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T21:29:26.693972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:29:26.694028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T21:29:26.694151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-24T21:29:26.694208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:29:26.694251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:29:26.708244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-24T21:29:26.708331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:735:2647] 2025-06-24T21:29:26.708641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2025-06-24T21:29:26.709178Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:29:26.709834Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl" took 426us result status StatusPathDoesNotExist 2025-06-24T21:29:26.710003Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/continuousBackupImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/Table/continuousBackupImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:29:26.710537Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:29:26.710745Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl/streamImpl" took 180us result status StatusPathDoesNotExist 2025-06-24T21:29:26.711005Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/continuousBackupImpl/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |98.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TAsyncIndexTests::CdcAndMergeWithReboots[TabletReboots] |98.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} |98.3%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> TColumnShardTestSchema::RebootEnableColdTiersAfterNoEviction |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> KqpBatchDelete::ColumnTable [GOOD] >> TColumnShardTestSchema::CreateTable+Reboots-GenerateInternalPathId [GOOD] >> TAsyncIndexTests::SplitMainWithReboots[TabletReboots] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::CreateTable+Reboots-GenerateInternalPathId [GOOD] Test command err: 2025-06-24T21:29:26.726357Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:26.756465Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:26.756822Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:26.764123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:26.764375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:26.764644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:26.764775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:26.764916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:26.765055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:26.765181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:26.765333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:26.765453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:26.765562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:26.765692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:26.802209Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:26.802431Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:26.802496Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:26.802720Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:26.802938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:26.803040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:26.803099Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:26.803229Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:26.803304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:26.803358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:26.803424Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:26.803621Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:26.803704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:26.803761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:26.803797Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:26.803917Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:26.803984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:26.804050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:26.804083Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:26.804147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:26.804189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:26.804240Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:26.804555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:26.804614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:26.804649Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:26.804880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:26.804940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:26.804975Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:26.805119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:26.805171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:26.805210Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:26.805321Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:26.805410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:26.805456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:26.805487Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:26.806017Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=66; 2025-06-24T21:29:26.806146Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=46; 2025-06-24T21:29:26.806235Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=37; 2025-06-24T21:29:26.806347Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=53; 2025-06-24T21:29:26.806457Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:29:26.806565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:29:26.806613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:29:26.806675Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... blet_id=9437184;tx_id=119;this=88923004925888;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-24T21:29:28.374550Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:322:2330];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=119;this=88923004925888;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;fline=schema.h:38;event=sync_schema; 2025-06-24T21:29:28.387167Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004925888;op_tx=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129166063296;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T21:29:28.387257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004925888;op_tx=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129166063296;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T21:29:28.387298Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004925888;op_tx=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750800567799;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129166063296;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=119; 2025-06-24T21:29:28.387671Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T21:29:28.387797Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750800567799 at tablet 9437184, mediator 0 2025-06-24T21:29:28.387846Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[4] execute at tablet 9437184 2025-06-24T21:29:28.388152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-24T21:29:28.388253Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 20, ss: 20} ttl settings: { Version: 1 } at tablet 9437184 2025-06-24T21:29:28.388317Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:304;method=RegisterTable;path_id=20; 2025-06-24T21:29:28.388371Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine.h:144;event=RegisterTable;path_id=20; 2025-06-24T21:29:28.388798Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=20; 2025-06-24T21:29:28.388898Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tx_controller.cpp:215;event=finished_tx;tx_id=119; 2025-06-24T21:29:28.403732Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[4] complete at tablet 9437184 CreateTable: { SeqNo { Generation: 20 } EnsureTables { Tables { PathId: 21 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4609 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T21:29:28.405362Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:322:2330];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=120;this=88923004928576;method=TTxController::StartProposeOnExecute;tx_info=120:TX_KIND_SCHEMA;min=1750800567802;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T21:29:28.423967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750800567802;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;this=88923004928576;op_tx=120:TX_KIND_SCHEMA;min=1750800567802;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T21:29:28.424038Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750800567802;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;this=88923004928576;op_tx=120:TX_KIND_SCHEMA;min=1750800567802;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=120; CreateTable: { SeqNo { Generation: 21 } EnsureTables { Tables { PathId: 22 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4610 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T21:29:28.424973Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:322:2330];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=121;this=88923004930368;method=TTxController::StartProposeOnExecute;tx_info=121:TX_KIND_SCHEMA;min=1750800567803;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T21:29:28.440248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750800567803;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;this=88923004930368;op_tx=121:TX_KIND_SCHEMA;min=1750800567803;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T21:29:28.440346Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750800567803;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;this=88923004930368;op_tx=121:TX_KIND_SCHEMA;min=1750800567803;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=121; CreateTable: { SeqNo { Generation: 22 } EnsureTables { Tables { PathId: 23 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4612 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T21:29:28.441791Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:322:2330];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=122;this=88923004932160;method=TTxController::StartProposeOnExecute;tx_info=122:TX_KIND_SCHEMA;min=1750800567805;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T21:29:28.461156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750800567805;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;this=88923004932160;op_tx=122:TX_KIND_SCHEMA;min=1750800567805;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T21:29:28.461326Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750800567805;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;this=88923004932160;op_tx=122:TX_KIND_SCHEMA;min=1750800567805;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=122; |98.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TColumnShardTestSchema::Drop+Reboots+GenerateInternalPathId >> TColumnShardTestSchema::TTL-Reboot+Internal-FirstPkColumn |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ColumnTable [GOOD] Test command err: Trying to start YDB, gRPC: 22658, MsgBus: 15343 2025-06-24T21:29:16.962499Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631131068897793:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:16.962725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0029a7/r3tmp/tmpQbd4zh/pdisk_1.dat 2025-06-24T21:29:17.414264Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22658, node 1 2025-06-24T21:29:17.529382Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:17.529409Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:17.529422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:17.529556Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:17.691924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:17.692042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:17.699237Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15343 2025-06-24T21:29:17.951315Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15343 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:18.274025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:18.306744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:18.492237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:18.667823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:18.751718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:20.321876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631148248768408:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:20.322007Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:20.614720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.644388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.670444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.702855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.732778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.789323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.838235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.903658Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631148248769064:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:20.903734Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:20.903810Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631148248769069:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:20.907257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:20.917978Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631148248769071:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:21.001651Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631148248769124:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:21.952873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631131068897793:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:21.952949Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:21.974897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:29:22.667528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037953;self_id=[1:7519631156838704415:2483];tablet_id=72075186224037953;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:22.667794Z node 1 :TX_COLUMNSHARD ... write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.480342Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631156838704803:2547];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.480343Z node 1 :KQP_EXECUTER ERROR: kqp_partitioned_executer.cpp:886: [PARTITIONED] ActorId: [1:7519631169723608795:2472], ActorState: ExecuteState, INTERNAL_ERROR: {
: Error: ExecuteState, from BufferWriteActor by PartitionedExecuterActor } 2025-06-24T21:29:27.480384Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631156838704803:2547];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.480426Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631156838704803:2547];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.480472Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631156838704803:2547];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.480517Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631156838704803:2547];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.480584Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631169723608802:2472] TxId: 281474976715678. Ctx: { TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: got error from BufferWriteActor } 2025-06-24T21:29:27.480821Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631169723608816:2472] TxId: 281474976715685. Ctx: { TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:27.480921Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631169723608814:2472] TxId: 281474976715684. Ctx: { TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:27.481010Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631169723608812:2472] TxId: 281474976715683. Ctx: { TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:27.481098Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631169723608806:2472] TxId: 281474976715680. Ctx: { TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:27.481200Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631169723608810:2472] TxId: 281474976715682. Ctx: { TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:27.481308Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631169723608800:2472] TxId: 281474976715677. Ctx: { TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:27.481397Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631169723608804:2472] TxId: 281474976715679. Ctx: { TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:27.481507Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631169723608808:2472] TxId: 281474976715681. Ctx: { TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:27.481630Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631169723608798:2472] TxId: 281474976715676. Ctx: { TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-24T21:29:27.482474Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715677 2025-06-24T21:29:27.482549Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631156838704566:2492];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.482624Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715682 2025-06-24T21:29:27.482665Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631156838704566:2492];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.482732Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715683 2025-06-24T21:29:27.482772Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631156838704566:2492];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.482822Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715684 2025-06-24T21:29:27.482861Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631156838704566:2492];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.482912Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715685 2025-06-24T21:29:27.482948Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519631156838704566:2492];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.483014Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631156838704803:2547];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.483070Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631156838704803:2547];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.483181Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631156838704803:2547];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.483230Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631156838704803:2547];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.483297Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519631156838704803:2547];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-24T21:29:27.501721Z node 1 :KQP_EXECUTER ERROR: kqp_partitioned_executer.cpp:886: [PARTITIONED] ActorId: [1:7519631169723608795:2472], ActorState: AbortState, INTERNAL_ERROR: 2025-06-24T21:29:27.504165Z node 1 :KQP_SESSION ERROR: kqp_session_actor.cpp:2561: SessionId: ydb://session/3?node_id=1&id=NDcxMDg0NDEtYmQ1NjllMmUtNWMyMTczNWItMzljZWM5OGI=, ActorId: [1:7519631152543736686:2472], ActorState: CleanupState, TraceId: 01jyhxh0e092efqx6wcxs1dwj0, Failed to cleanup: |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop-Reboots+GenerateInternalPathId [GOOD] >> TColumnShardTestSchema::Drop-Reboots-GenerateInternalPathId |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop-Reboots+GenerateInternalPathId [GOOD] Test command err: 2025-06-24T21:29:26.503849Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:26.532950Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:26.533343Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:26.540944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:26.541171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:26.541416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:26.541521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:26.541646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:26.541760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:26.541862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:26.541973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:26.542062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:26.542202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:26.542313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:26.570687Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:26.570970Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:26.571029Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:26.571241Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:26.571405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:26.571505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:26.571557Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:26.571669Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:26.571735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:26.571776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:26.571820Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:26.572016Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:26.572090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:26.572136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:26.572183Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:26.572277Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:26.572330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:26.572374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:26.572403Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:26.572464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:26.572499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:26.572539Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:26.572755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:26.572813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:26.572850Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:26.573097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:26.573175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:26.573208Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:26.573365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:26.573416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:26.573446Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:26.573541Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:26.573613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:26.573650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:26.573680Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:26.574151Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=52; 2025-06-24T21:29:26.574240Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=32; 2025-06-24T21:29:26.574335Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=48; 2025-06-24T21:29:26.574424Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=38; 2025-06-24T21:29:26.574517Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:29:26.574627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:29:26.574691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:29:26.574741Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 1.413113Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:29:31.436124Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750800567694;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;this=88923022696256;op_tx=104:TX_KIND_SCHEMA;min=1750800567694;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750800567694;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_this=89129165195456;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T21:29:31.436270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750800567694;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;this=88923022696256;op_tx=104:TX_KIND_SCHEMA;min=1750800567694;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750800567694;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_this=89129165195456;method=TTxController::FinishProposeOnComplete;tx_id=104;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:102:2135]; 2025-06-24T21:29:31.436338Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750800567694;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;this=88923022696256;op_tx=104:TX_KIND_SCHEMA;min=1750800567694;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750800567694;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_this=89129165195456;method=TTxController::FinishProposeOnComplete;tx_id=104;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=104; 2025-06-24T21:29:31.436786Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T21:29:31.437011Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750800567694 at tablet 9437184, mediator 0 2025-06-24T21:29:31.437092Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[14] execute at tablet 9437184 2025-06-24T21:29:31.437432Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: DropTable for pathId: {internal: 9438184000001, ss: 1} at tablet 9437184 2025-06-24T21:29:31.437553Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=104;fline=tx_controller.cpp:215;event=finished_tx;tx_id=104; 2025-06-24T21:29:31.456346Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[14] complete at tablet 9437184 2025-06-24T21:29:31.457050Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800567694:max} readable: {1750800567694:max} at tablet 9437184 2025-06-24T21:29:31.457216Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:29:31.462936Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800567694:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:29:31.463054Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800567694:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:29:31.463914Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800567694:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:29:31.464064Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800567694:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:29:31.465348Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800567694:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:718:2730];trace_detailed=; 2025-06-24T21:29:31.466732Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:29:31.467028Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:29:31.467502Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:29:31.467645Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:31.467761Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:31.467810Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:718:2730] finished for tablet 9437184 2025-06-24T21:29:31.468253Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:712:2724];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.001},{"events":["f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.002}],"full":{"a":1750800571465278,"name":"_full_task","f":1750800571465278,"d_finished":0,"c":0,"l":1750800571467871,"d":2593},"events":[{"name":"bootstrap","f":1750800571465649,"d_finished":1584,"c":1,"l":1750800571467233,"d":1584},{"a":1750800571467471,"name":"ack","f":1750800571467471,"d_finished":0,"c":0,"l":1750800571467871,"d":400},{"a":1750800571467447,"name":"processing","f":1750800571467447,"d_finished":0,"c":0,"l":1750800571467871,"d":424},{"name":"ProduceResults","f":1750800571467206,"d_finished":271,"c":2,"l":1750800571467785,"d":271},{"a":1750800571467788,"name":"Finish","f":1750800571467788,"d_finished":0,"c":0,"l":1750800571467871,"d":83}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:31.468343Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:712:2724];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:29:31.468775Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:712:2724];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.001},{"events":["f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.002},{"events":["l_ack","l_processing","l_Finish"],"t":0.003}],"full":{"a":1750800571465278,"name":"_full_task","f":1750800571465278,"d_finished":0,"c":0,"l":1750800571468397,"d":3119},"events":[{"name":"bootstrap","f":1750800571465649,"d_finished":1584,"c":1,"l":1750800571467233,"d":1584},{"a":1750800571467471,"name":"ack","f":1750800571467471,"d_finished":0,"c":0,"l":1750800571468397,"d":926},{"a":1750800571467447,"name":"processing","f":1750800571467447,"d_finished":0,"c":0,"l":1750800571468397,"d":950},{"name":"ProduceResults","f":1750800571467206,"d_finished":271,"c":2,"l":1750800571467785,"d":271},{"a":1750800571467788,"name":"Finish","f":1750800571467788,"d_finished":0,"c":0,"l":1750800571468397,"d":609}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:31.468853Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:29:31.464022Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:29:31.468899Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:29:31.469000Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> TColumnShardTestSchema::TTL+Reboot-Internal+FirstPkColumn >> TColumnShardTestSchema::RebootEnableColdTiersAfterTtl |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |98.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TPersqueueControlPlaneTestSuite::TestAddRemoveReadRule [GOOD] >> TPersqueueDataPlaneTestSuite::WriteSession >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest [GOOD] >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TColumnShardTestSchema::Drop+Reboots+GenerateInternalPathId [GOOD] >> KqpBatchDelete::Large_2 [GOOD] >> TColumnShardTestSchema::RebootExportAfterFail ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop+Reboots+GenerateInternalPathId [GOOD] Test command err: 2025-06-24T21:29:30.381818Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:30.409798Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:30.410140Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:30.417901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:30.418143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:30.418401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:30.418530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:30.418663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:30.418791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:30.418897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:30.419004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:30.419098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:30.419299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:30.419438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:30.453693Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:30.453984Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:30.454047Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:30.454245Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:30.454433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:30.454508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:30.454551Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:30.454665Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:30.454725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:30.454775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:30.454825Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:30.455013Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:30.455095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:30.455140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:30.455201Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:30.455292Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:30.455344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:30.455396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:30.455425Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:30.455487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:30.455528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:30.455570Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:30.455869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:30.455937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:30.455974Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:30.456200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:30.456273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:30.456311Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:30.456431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:30.456482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:30.456513Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:30.456608Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:30.456685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:30.456727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:30.456761Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:30.457250Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=48; 2025-06-24T21:29:30.457360Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=44; 2025-06-24T21:29:30.457459Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=50; 2025-06-24T21:29:30.457563Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=49; 2025-06-24T21:29:30.457683Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:29:30.457798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:29:30.457851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:29:30.457904Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... umnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:29:35.987929Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:29:35.987977Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:29:35.988019Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:29:35.988087Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:29:35.988213Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=1; 2025-06-24T21:29:35.988322Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800271627;tx_id=18446744073709551615;;current_snapshot_ts=1750800571457; 2025-06-24T21:29:35.988398Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=1;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:29:35.988471Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:29:35.988527Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:29:35.988627Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.999000s; 2025-06-24T21:29:35.988695Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:29:36.095603Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800571627:max} readable: {1750800571627:max} at tablet 9437184 2025-06-24T21:29:36.095837Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:29:36.098543Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800571627:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:29:36.098696Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800571627:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:29:36.099764Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800571627:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:29:36.099946Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800571627:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:29:36.101275Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800571627:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:829:2815];trace_detailed=; 2025-06-24T21:29:36.102728Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:29:36.103057Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:29:36.103854Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:29:36.104050Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:36.104185Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:36.104266Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:829:2815] finished for tablet 9437184 2025-06-24T21:29:36.104847Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:822:2809];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0.002},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.003}],"full":{"a":1750800576101165,"name":"_full_task","f":1750800576101165,"d_finished":0,"c":0,"l":1750800576104353,"d":3188},"events":[{"name":"bootstrap","f":1750800576101510,"d_finished":1944,"c":1,"l":1750800576103454,"d":1944},{"a":1750800576103821,"name":"ack","f":1750800576103821,"d_finished":0,"c":0,"l":1750800576104353,"d":532},{"a":1750800576103793,"name":"processing","f":1750800576103793,"d_finished":0,"c":0,"l":1750800576104353,"d":560},{"name":"ProduceResults","f":1750800576103408,"d_finished":369,"c":2,"l":1750800576104224,"d":369},{"a":1750800576104229,"name":"Finish","f":1750800576104229,"d_finished":0,"c":0,"l":1750800576104353,"d":124}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:36.104943Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:822:2809];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:29:36.105411Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:822:2809];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0.002},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.003}],"full":{"a":1750800576101165,"name":"_full_task","f":1750800576101165,"d_finished":0,"c":0,"l":1750800576105028,"d":3863},"events":[{"name":"bootstrap","f":1750800576101510,"d_finished":1944,"c":1,"l":1750800576103454,"d":1944},{"a":1750800576103821,"name":"ack","f":1750800576103821,"d_finished":0,"c":0,"l":1750800576105028,"d":1207},{"a":1750800576103793,"name":"processing","f":1750800576103793,"d_finished":0,"c":0,"l":1750800576105028,"d":1235},{"name":"ProduceResults","f":1750800576103408,"d_finished":369,"c":2,"l":1750800576104224,"d":369},{"a":1750800576104229,"name":"Finish","f":1750800576104229,"d_finished":0,"c":0,"l":1750800576105028,"d":799}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:36.105505Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:29:36.099900Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:29:36.105572Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:29:36.105696Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> TColumnShardTestSchema::Drop-Reboots-GenerateInternalPathId [GOOD] >> KqpWorkload::STOCK [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop-Reboots-GenerateInternalPathId [GOOD] Test command err: 2025-06-24T21:29:32.590538Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:32.618906Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:32.619322Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:32.627023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:32.627298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:32.627555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:32.627704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:32.627876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:32.628020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:32.628145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:32.628258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:32.628363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:32.628487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:32.628640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:32.658360Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:32.658635Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:32.658696Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:32.658904Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:32.659050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:32.659136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:32.659195Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:32.659289Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:32.659354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:32.659395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:32.659434Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:32.659667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:32.659765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:32.659831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:32.659871Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:32.659955Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:32.660007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:32.660049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:32.660080Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:32.660136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:32.660172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:32.660210Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:32.660440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:32.660495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:32.660532Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:32.660738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:32.660820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:32.660862Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:32.660982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:32.661030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:32.661062Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:32.661188Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:32.661274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:32.661313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:32.661345Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:32.661808Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=56; 2025-06-24T21:29:32.661924Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=52; 2025-06-24T21:29:32.662009Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=31; 2025-06-24T21:29:32.662113Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=42; 2025-06-24T21:29:32.662220Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:29:32.662310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:29:32.662365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:29:32.662423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... 00s; 2025-06-24T21:29:37.241633Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:29:37.261794Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750800573784;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;this=88923022697600;op_tx=104:TX_KIND_SCHEMA;min=1750800573784;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750800573784;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_this=89129165189696;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T21:29:37.261911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750800573784;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;this=88923022697600;op_tx=104:TX_KIND_SCHEMA;min=1750800573784;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750800573784;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_this=89129165189696;method=TTxController::FinishProposeOnComplete;tx_id=104;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:102:2135]; 2025-06-24T21:29:37.261979Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750800573784;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;this=88923022697600;op_tx=104:TX_KIND_SCHEMA;min=1750800573784;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750800573784;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_this=89129165189696;method=TTxController::FinishProposeOnComplete;tx_id=104;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=104; 2025-06-24T21:29:37.262444Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T21:29:37.262714Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750800573784 at tablet 9437184, mediator 0 2025-06-24T21:29:37.262798Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[14] execute at tablet 9437184 2025-06-24T21:29:37.263187Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: DropTable for pathId: {internal: 9438184000001, ss: 1} at tablet 9437184 2025-06-24T21:29:37.263297Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=104;fline=tx_controller.cpp:215;event=finished_tx;tx_id=104; 2025-06-24T21:29:37.282092Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[14] complete at tablet 9437184 2025-06-24T21:29:37.282817Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800573784:max} readable: {1750800573784:max} at tablet 9437184 2025-06-24T21:29:37.283002Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:29:37.295456Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800573784:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:29:37.295606Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800573784:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:29:37.296277Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800573784:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:29:37.296400Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800573784:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:29:37.297535Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800573784:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:718:2730];trace_detailed=; 2025-06-24T21:29:37.298730Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:29:37.298980Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:29:37.299346Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:29:37.299473Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:37.299575Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:37.299615Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:718:2730] finished for tablet 9437184 2025-06-24T21:29:37.299941Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:712:2724];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0.001},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.002}],"full":{"a":1750800577297477,"name":"_full_task","f":1750800577297477,"d_finished":0,"c":0,"l":1750800577299664,"d":2187},"events":[{"name":"bootstrap","f":1750800577297822,"d_finished":1291,"c":1,"l":1750800577299113,"d":1291},{"a":1750800577299322,"name":"ack","f":1750800577299322,"d_finished":0,"c":0,"l":1750800577299664,"d":342},{"a":1750800577299307,"name":"processing","f":1750800577299307,"d_finished":0,"c":0,"l":1750800577299664,"d":357},{"name":"ProduceResults","f":1750800577299094,"d_finished":229,"c":2,"l":1750800577299596,"d":229},{"a":1750800577299601,"name":"Finish","f":1750800577299601,"d_finished":0,"c":0,"l":1750800577299664,"d":63}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:37.300016Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:712:2724];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:29:37.300316Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:712:2724];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0.001},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.002}],"full":{"a":1750800577297477,"name":"_full_task","f":1750800577297477,"d_finished":0,"c":0,"l":1750800577300058,"d":2581},"events":[{"name":"bootstrap","f":1750800577297822,"d_finished":1291,"c":1,"l":1750800577299113,"d":1291},{"a":1750800577299322,"name":"ack","f":1750800577299322,"d_finished":0,"c":0,"l":1750800577300058,"d":736},{"a":1750800577299307,"name":"processing","f":1750800577299307,"d_finished":0,"c":0,"l":1750800577300058,"d":751},{"name":"ProduceResults","f":1750800577299094,"d_finished":229,"c":2,"l":1750800577299596,"d":229},{"a":1750800577299601,"name":"Finish","f":1750800577299601,"d_finished":0,"c":0,"l":1750800577300058,"d":457}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:37.300379Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:29:37.296370Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:29:37.300421Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:29:37.300503Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:718:2730];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::Large_2 [GOOD] Test command err: Trying to start YDB, gRPC: 29437, MsgBus: 20328 2025-06-24T21:29:14.440146Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631123692640546:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:14.440202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002a78/r3tmp/tmp73wxvj/pdisk_1.dat 2025-06-24T21:29:14.799379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:14.799509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:14.807588Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:14.811322Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631123692640525:2079] 1750800554438349 != 1750800554438352 2025-06-24T21:29:14.814656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29437, node 1 2025-06-24T21:29:14.905251Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:14.905291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:14.905299Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:14.905439Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20328 TClient is connected to server localhost:20328 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:29:15.470486Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:15.554815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:15.577302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:15.742182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:15.913852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:29:15.980127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:17.944974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631136577544056:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:17.945092Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.336373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.383484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.462138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.493784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.524730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.606535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.649014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.781646Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631140872512026:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.784143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631140872512021:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.784224Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.785662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:18.798287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-24T21:29:18.798540Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631140872512028:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:18.898386Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631140872512081:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:19.462028Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631123692640546:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:19.462076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:19.991227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... Trying to start YDB, gRPC: 3908, MsgBus: 30532 2025-06-24T21:29:25.949701Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631169358699174:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:25.949751Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002a78/r3tmp/tmpGdxA4z/pdisk_1.dat 2025-06-24T21:29:26.095243Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:26.096178Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631169358699154:2079] 1750800565948979 != 1750800565948982 TServer::EnableGrpc on GrpcPort 3908, node 2 2025-06-24T21:29:26.126684Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:26.126820Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:26.140973Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:26.174522Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:26.174563Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:26.174573Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:26.174700Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30532 TClient is connected to server localhost:30532 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:26.766602Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:26.786504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:26.869583Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:26.988842Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:27.087893Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:27.171678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:29.939362Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631186538569985:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:29.939461Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:30.035922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:30.089876Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:30.131869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:30.189638Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:30.246850Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:30.334123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:30.395422Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:30.508227Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631190833537944:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:30.508341Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:30.508638Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631190833537949:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:30.514302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:30.534849Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631190833537951:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:30.613302Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631190833538002:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:30.949764Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631169358699174:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:30.949842Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:31.990607Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TColumnShardTestSchema::RebootOneColdTier ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpWorkload::STOCK [GOOD] Test command err: Trying to start YDB, gRPC: 19323, MsgBus: 27532 2025-06-24T21:28:29.163486Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630929723866384:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:29.163655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae3/r3tmp/tmpi9mvsz/pdisk_1.dat 2025-06-24T21:28:29.587822Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19323, node 1 2025-06-24T21:28:29.605199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:29.605332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:29.607467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:28:29.671780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:29.671814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:29.671830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:29.672001Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27532 2025-06-24T21:28:30.183292Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27532 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:30.500296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:30.516996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:32.549435Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630942608768881:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.549586Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:32.926463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.044052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:33.626128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:34.050240Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630951198707441:2593], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:34.050337Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:34.050446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630951198707446:2596], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:34.054681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:34.068760Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630951198707448:2597], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:28:34.129112Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630951198707499:4887] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:34.163631Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630929723866384:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:34.163726Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:44.531682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:28:44.531716Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded took: 0.650887s took: 0.650688s took: 0.659607s took: 0.640809s took: 0.634413s took: 0.657131s took: 0.659781s took: 0.678043s took: 0.703014s took: 0.715822s took: 6.687316s took: 6.694825s took: 6.705893s took: 6.695806s took: 6.712767s took: 6.719111s took: 6.723452s took: 6.718086s took: 6.724801s took: 6.732539s 2025-06-24T21:29:34.717597Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976711160; took: 7.780690s 2025-06-24T21:29:34.759313Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [1:7519631208896757569:5410], Table: `/Root/stock` ([72057594046644480:2:1]), SessionActorId: [1:7519631178831984959:5410]Got LOCKS BROKEN for table `/Root/stock`. ShardID=72075186224037888, Sink=[1:7519631208896757569:5410].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T21:29:34.759982Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519631208896756830:5410], SessionActorId: [1:7519631178831984959:5410], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/stock`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[1:7519631178831984959:5410]. isRollback=0 2025-06-24T21:29:34.760420Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=NWYwMzI2OC02NDc1NzIyOS1mNmIyYmVlYy1mYzc3ODkxMQ==, ActorId: [1:7519631178831984959:5410], ActorState: ExecuteState, TraceId: 01jyhxh3ja5182dnf47sbm4pnn, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519631208896757376:5410] from: [1:7519631208896756830:5410] 2025-06-24T21:29:34.760520Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519631208896757376:5410] TxId: 281474976711160. Ctx: { TraceId: 01jyhxh3ja5182dnf47sbm4pnn, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWYwMzI2OC02NDc1NzIyOS1mNmIyYmVlYy1mYzc3ODkxMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/stock`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T21:29:34.760790Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NWYwMzI2OC02NDc1NzIyOS1mNmIyYmVlYy1mYzc3ODkxMQ==, ActorId: [1:7519631178831984959:5410], ActorState: ExecuteState, TraceId: 01jyhxh3ja5182dnf47sbm4pnn, Create QueryResponse for error on request, msg: 2025-06-24T21:29:34.768477Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976711161; 2025-06-24T21:29:34.773033Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976711167; took: 7.796380s 2025-06-24T21:29:34.777999Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976711160; 2025-06-24T21:29:34.778218Z node 1 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [1750800574741 : 281474976711160] from 72075186224037889 at tablet 72075186224037889, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } 2025-06-24T21:29:34.778870Z node 1 :KQP_COMPUTE ERROR: kqp_writ ... abletStatus from node 1, TabletId: 72075186224037921 not found 2025-06-24T21:29:36.046149Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2025-06-24T21:29:36.046174Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037915 not found 2025-06-24T21:29:36.046194Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-24T21:29:36.046210Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037895 not found 2025-06-24T21:29:36.046226Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037902 not found 2025-06-24T21:29:36.046258Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037923 not found 2025-06-24T21:29:36.046274Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037912 not found 2025-06-24T21:29:36.046299Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037914 not found 2025-06-24T21:29:36.046321Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2025-06-24T21:29:36.046337Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037927 not found 2025-06-24T21:29:36.046354Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-24T21:29:36.046371Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037907 not found 2025-06-24T21:29:36.046390Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2025-06-24T21:29:36.046408Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-24T21:29:36.046461Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037916 not found 2025-06-24T21:29:36.046491Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037913 not found 2025-06-24T21:29:36.049250Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2025-06-24T21:29:36.049286Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037917 not found 2025-06-24T21:29:36.049304Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2025-06-24T21:29:36.049320Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037920 not found 2025-06-24T21:29:36.049337Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-24T21:29:36.049355Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037921 not found 2025-06-24T21:29:36.049376Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037910 not found 2025-06-24T21:29:36.060343Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037928 not found 2025-06-24T21:29:36.062827Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037924 not found 2025-06-24T21:29:36.080884Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2025-06-24T21:29:36.088584Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037914 not found 2025-06-24T21:29:36.088639Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037909 not found 2025-06-24T21:29:36.096039Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T21:29:36.251895Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037932 not found 2025-06-24T21:29:36.251941Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037936 not found 2025-06-24T21:29:36.251959Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037935 not found 2025-06-24T21:29:36.251977Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037963 not found 2025-06-24T21:29:36.252000Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037959 not found 2025-06-24T21:29:36.252017Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037949 not found 2025-06-24T21:29:36.252033Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037958 not found 2025-06-24T21:29:36.252050Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037940 not found 2025-06-24T21:29:36.252067Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037946 not found 2025-06-24T21:29:36.252085Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037961 not found 2025-06-24T21:29:36.349605Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037954 not found 2025-06-24T21:29:36.349649Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037968 not found 2025-06-24T21:29:36.349666Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037947 not found 2025-06-24T21:29:36.349683Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037933 not found 2025-06-24T21:29:36.349701Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037950 not found 2025-06-24T21:29:36.349719Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037931 not found 2025-06-24T21:29:36.349736Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037942 not found 2025-06-24T21:29:36.349759Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037941 not found 2025-06-24T21:29:36.349776Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037957 not found 2025-06-24T21:29:36.349793Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037967 not found 2025-06-24T21:29:36.349812Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037944 not found 2025-06-24T21:29:36.349833Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037937 not found 2025-06-24T21:29:36.349854Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037969 not found 2025-06-24T21:29:36.349874Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037953 not found 2025-06-24T21:29:36.349890Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037956 not found 2025-06-24T21:29:36.352939Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037930 not found 2025-06-24T21:29:36.357821Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037939 not found 2025-06-24T21:29:36.358783Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037965 not found 2025-06-24T21:29:36.358851Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037945 not found 2025-06-24T21:29:36.358873Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037966 not found 2025-06-24T21:29:36.358892Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037934 not found 2025-06-24T21:29:36.358912Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037962 not found 2025-06-24T21:29:36.358940Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037951 not found 2025-06-24T21:29:36.358959Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037938 not found 2025-06-24T21:29:36.389836Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037960 not found 2025-06-24T21:29:36.393858Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037955 not found 2025-06-24T21:29:36.393932Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037952 not found 2025-06-24T21:29:36.393959Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037964 not found 2025-06-24T21:29:36.396543Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037943 not found 2025-06-24T21:29:36.396573Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037948 not found >> KqpBatchDelete::Large_1 [GOOD] |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::EnableColdTiersAfterTtl |98.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::Large_1 [GOOD] Test command err: Trying to start YDB, gRPC: 29512, MsgBus: 8140 2025-06-24T21:29:10.381714Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631107075438071:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.390772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002b9e/r3tmp/tmpgJvr9v/pdisk_1.dat 2025-06-24T21:29:10.878532Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631107075437887:2079] 1750800550359909 != 1750800550359912 2025-06-24T21:29:10.878774Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.895181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.895275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.904088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29512, node 1 2025-06-24T21:29:11.143593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.143612Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.143617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.143715Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.379474Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8140 TClient is connected to server localhost:8140 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.134934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.149175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.158366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.368777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.579390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.675624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.338664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631124255308731:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.338778Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.800270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.852750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.894325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.932170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.970067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.014844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.057210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.171498Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128550276680:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.171600Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.171851Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128550276685:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.175959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.191126Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631128550276687:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:15.288907Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631128550276740:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.383459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631107075438071:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.406450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.585530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... A_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519631196719753811:2165];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002b9e/r3tmp/tmpHO9TPj/pdisk_1.dat 2025-06-24T21:29:31.637216Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:31.708588Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:31.709425Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519631196719753670:2079] 1750800571478311 != 1750800571478314 2025-06-24T21:29:31.724843Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:31.724929Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:31.730694Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9586, node 3 2025-06-24T21:29:31.784190Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:31.784213Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:31.784223Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:31.784359Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1750 TClient is connected to server localhost:1750 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:32.488353Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:32.496519Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:29:32.510623Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:32.521908Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:29:32.615095Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:32.822761Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:32.912374Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:35.659287Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631213899624477:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:35.659395Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:35.724410Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:35.822980Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:35.890038Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:35.937772Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:35.989191Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:36.043728Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:36.094591Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:36.188758Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631218194592431:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:36.188859Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:36.188946Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631218194592436:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:36.193866Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:36.208570Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519631218194592438:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:36.284234Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519631218194592489:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:36.515279Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519631196719753811:2165];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:36.515351Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:37.677038Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TPersqueueDataPlaneTestSuite::WriteSession [GOOD] >> KqpBatchUpdate::Large_2 [GOOD] >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] >> TPersQueueNewSchemeCacheTest::CheckGrpcWriteNoDC [GOOD] >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersqueueDataPlaneTestSuite::WriteSession [GOOD] Test command err: === Server->StartServer(false); 2025-06-24T21:29:27.647767Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631180104914187:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.649157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:27.731858Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631178132632560:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.731911Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:28.078283Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:28.087582Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0023e7/r3tmp/tmpSPkZ3h/pdisk_1.dat 2025-06-24T21:29:28.654764Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:28.658702Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:28.708518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.708887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.714005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.714108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.714974Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:29:28.740943Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:28.741106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:28.758957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:28.763661Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:29:28.781487Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 31572, node 1 2025-06-24T21:29:29.016809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0023e7/r3tmp/yandexChyGYn.tmp 2025-06-24T21:29:29.016836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0023e7/r3tmp/yandexChyGYn.tmp 2025-06-24T21:29:29.018110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0023e7/r3tmp/yandexChyGYn.tmp 2025-06-24T21:29:29.018224Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:29.366398Z INFO: TTestServer started on Port 21108 GrpcPort 31572 TClient is connected to server localhost:21108 PQClient connected to localhost:31572 === TenantModeEnabled() = 1 === Init PQ - start server on port 31572 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:30.116008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:29:30.116232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.116419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T21:29:30.116440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T21:29:30.116712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:29:30.116782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:29:30.122189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T21:29:30.122494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T21:29:30.122694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.122747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T21:29:30.122807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-24T21:29:30.122833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 2 -> 3 2025-06-24T21:29:30.127702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.127755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:29:30.127782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 3 -> 128 waiting... 2025-06-24T21:29:30.140008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.140725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.140794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.140835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-24T21:29:30.146873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:29:30.151893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:30.151922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-24T21:29:30.151963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:30.152044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-24T21:29:30.152179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:29:30.157068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750800570197, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:29:30.157241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750800570197 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:29:30.157273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.157547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 128 -> 240 2025-06-24T21:29:30.157580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose Handle ... node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:916: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 read init: from# ipv6:[::1]:39528, request# { init_request { topics_read_settings { topic: "/Root/account1/write_topic" } read_only_original: true consumer: "consumer_aba" read_params { max_read_size: 104857600 } } } 2025-06-24T21:29:40.252097Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 auth for : consumer_aba 2025-06-24T21:29:40.254074Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 Handle describe topics response 2025-06-24T21:29:40.254211Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 auth is DEAD 2025-06-24T21:29:40.254288Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 auth ok: topics# 1, initDone# 0 2025-06-24T21:29:40.255607Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1196: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 register session: topic# /Root/account1/write_topic 2025-06-24T21:29:40.256645Z :INFO: [/Root] [/Root] [79ace65d-52b92756-a02d34f7-d6eadffc] [null] Server session id: consumer_aba_3_2_2274013473936159597_v1 2025-06-24T21:29:40.256956Z :DEBUG: [/Root] [/Root] [79ace65d-52b92756-a02d34f7-d6eadffc] [null] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:40.257168Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 grpc read done: success# 1, data# { read { } } 2025-06-24T21:29:40.257308Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 got read request: guid# ce224cef-768b7b4a-6c790e32-e899dd24 2025-06-24T21:29:40.262194Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 assign: record# { Partition: 0 TabletId: 72075186224037893 Topic: "write_topic" Generation: 1 Step: 1 Session: "consumer_aba_3_2_2274013473936159597_v1" ClientId: "consumer_aba" PipeClient { RawX1: 7519631234794846303 RawX2: 4503612512274760 } Path: "/Root/account1/write_topic" } 2025-06-24T21:29:40.259256Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037894][write_topic] pipe [3:7519631234794846303:2376] connected; active server actors: 1 2025-06-24T21:29:40.259335Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1699: [72075186224037894][write_topic] consumer "consumer_aba" register session for pipe [3:7519631234794846303:2376] session consumer_aba_3_2_2274013473936159597_v1 2025-06-24T21:29:40.259408Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:635: [72075186224037894][write_topic] consumer consumer_aba register readable partition 0 2025-06-24T21:29:40.263229Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:1132: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 INITING TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) 2025-06-24T21:29:40.259467Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:665: [72075186224037894][write_topic] consumer consumer_aba family created family=1 (Status=Free, Partitions=[0]) 2025-06-24T21:29:40.259512Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:867: [72075186224037894][write_topic] consumer consumer_aba register reading session ReadingSession "consumer_aba_3_2_2274013473936159597_v1" (Sender=[3:7519631234794846300:2376], Pipe=[3:7519631234794846303:2376], Partitions=[], ActiveFamilyCount=0) 2025-06-24T21:29:40.259553Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037894][write_topic] consumer consumer_aba rebalancing was scheduled 2025-06-24T21:29:40.259604Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037894][write_topic] consumer consumer_aba balancing. Sessions=1, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-06-24T21:29:40.259661Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037894][write_topic] consumer consumer_aba balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "consumer_aba_3_2_2274013473936159597_v1" (Sender=[3:7519631234794846300:2376], Pipe=[3:7519631234794846303:2376], Partitions=[], ActiveFamilyCount=0) 2025-06-24T21:29:40.259731Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037894][write_topic] consumer consumer_aba family 1 status Active partitions [0] session "consumer_aba_3_2_2274013473936159597_v1" sender [3:7519631234794846300:2376] lock partition 0 for ReadingSession "consumer_aba_3_2_2274013473936159597_v1" (Sender=[3:7519631234794846300:2376], Pipe=[3:7519631234794846303:2376], Partitions=[], ActiveFamilyCount=1) generation 1 step 1 2025-06-24T21:29:40.259858Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037894][write_topic] consumer consumer_aba start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-24T21:29:40.259889Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037894][write_topic] consumer consumer_aba balancing duration: 0.000259s 2025-06-24T21:29:40.263542Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:972: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037893 Generation: 1, pipe: [3:7519631234794846306:2379] 2025-06-24T21:29:40.264196Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: consumer_aba_3_2_2274013473936159597_v1:1 with generation 1 2025-06-24T21:29:40.276972Z node 3 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 1 WriteTimestampMS: 1750800580122 CreateTimestampMS: 1750800580119 SizeLag: 165 WriteTimestampEstimateMS: 1750800580122 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-24T21:29:40.277044Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 INIT DONE TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) EndOffset 1 readOffset 0 committedOffset 0 2025-06-24T21:29:40.277120Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 sending to client partition status Got new read session event: CreatePartitionStream { PartitionStreamId: 1 TopicPath: account1/write_topic Cluster: PartitionId: 0 CommittedOffset: 0 EndOffset: 1 } 2025-06-24T21:29:40.278032Z :INFO: [/Root] [/Root] [79ace65d-52b92756-a02d34f7-d6eadffc] Closing read session. Close timeout: 0.000000s 2025-06-24T21:29:40.278081Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:account1/write_topic:0:1:0:0 2025-06-24T21:29:40.278129Z :INFO: [/Root] [/Root] [79ace65d-52b92756-a02d34f7-d6eadffc] Counters: { Errors: 0 CurrentSessionLifetimeMs: 31 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:29:40.278219Z :NOTICE: [/Root] [/Root] [79ace65d-52b92756-a02d34f7-d6eadffc] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:29:40.278267Z :DEBUG: [/Root] [/Root] [79ace65d-52b92756-a02d34f7-d6eadffc] [null] Abort session to cluster 2025-06-24T21:29:40.279101Z :NOTICE: [/Root] [/Root] [79ace65d-52b92756-a02d34f7-d6eadffc] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:29:40.279114Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 grpc read done: success# 0, data# { } 2025-06-24T21:29:40.279168Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 grpc read failed 2025-06-24T21:29:40.279195Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 grpc closed 2025-06-24T21:29:40.279231Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2274013473936159597_v1 is DEAD 2025-06-24T21:29:40.279557Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: consumer_aba_3_2_2274013473936159597_v1 2025-06-24T21:29:40.280201Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037894][write_topic] pipe [3:7519631234794846303:2376] disconnected; active server actors: 1 2025-06-24T21:29:40.280238Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037894][write_topic] pipe [3:7519631234794846303:2376] client consumer_aba disconnected session consumer_aba_3_2_2274013473936159597_v1 2025-06-24T21:29:40.554105Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519631234794846318:2382], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:40.554333Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=YmQ5NGI2ZWUtNjU2YjAyOTAtN2VhNjY3Y2ItNTk1ZDFhNmY=, ActorId: [3:7519631234794846316:2381], ActorState: ExecuteState, TraceId: 01jyhxhfx6akg9k91k140kpq92, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:40.554682Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:29:40.857076Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519631213320007803:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:40.857155Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TColumnShardTestSchema::CreateTable-Reboots+GenerateInternalPathId ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Large_2 [GOOD] Test command err: Trying to start YDB, gRPC: 2239, MsgBus: 15121 2025-06-24T21:29:10.406153Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631106855775677:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.406219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002cce/r3tmp/tmprdFqrK/pdisk_1.dat 2025-06-24T21:29:10.969853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.969945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.972270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:10.986279Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.993871Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631106855775471:2079] 1750800550362884 != 1750800550362887 TServer::EnableGrpc on GrpcPort 2239, node 1 2025-06-24T21:29:11.151752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.151774Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.151785Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.151931Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.395587Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15121 TClient is connected to server localhost:15121 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.054533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.114173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.328577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.541622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.639349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.208064Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631124035646280:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.208159Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.808474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.855103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.899301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.969992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.004197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.052184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.129803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.210136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128330614238:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.210236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128330614243:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.210275Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.213745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.224397Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631128330614245:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.314360Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631128330614296:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.407368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631106855775677:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.407446Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.571065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:25.887638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot g ... ROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631177670875556:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.463444Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002cce/r3tmp/tmpAoJVVT/pdisk_1.dat 2025-06-24T21:29:27.660873Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:27.663559Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631177670875539:2079] 1750800567453798 != 1750800567453801 2025-06-24T21:29:27.685348Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:27.685467Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:27.688230Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11906, node 2 2025-06-24T21:29:27.879945Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:27.879973Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:27.879984Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:27.880164Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32069 2025-06-24T21:29:28.499557Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32069 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:28.624641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:28.635668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:29:28.654324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:28.758958Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:29.017561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:29.119643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:31.900718Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631194850746348:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:31.900840Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:31.946889Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:31.991811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:32.031947Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:32.119970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:32.205905Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:32.271965Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:32.364580Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:32.449784Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631199145714311:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.449942Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.450132Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631199145714316:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.455637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:32.467240Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631177670875556:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:32.467316Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:32.472002Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631199145714318:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:32.538978Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631199145714371:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:33.848363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] >> TColumnShardTestSchema::ForgetAfterFail >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_3 [GOOD] >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_10 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] Test command err: === Server->StartServer(false); 2025-06-24T21:29:27.865700Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631179447388424:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.865768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:28.042408Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631185216120830:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:28.050619Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:28.375419Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:28.381918Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002406/r3tmp/tmppfvkZc/pdisk_1.dat 2025-06-24T21:29:28.865338Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:28.961000Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:28.963076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:29:28.993926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.994053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.998621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.998704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:29.043670Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:29.049107Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:29.051612Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:29.053527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15270, node 1 2025-06-24T21:29:29.259398Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002406/r3tmp/yandexgZgq50.tmp 2025-06-24T21:29:29.259425Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002406/r3tmp/yandexgZgq50.tmp 2025-06-24T21:29:29.259610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002406/r3tmp/yandexgZgq50.tmp 2025-06-24T21:29:29.259724Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:29.367253Z INFO: TTestServer started on Port 29016 GrpcPort 15270 TClient is connected to server localhost:29016 PQClient connected to localhost:15270 === TenantModeEnabled() = 1 === Init PQ - start server on port 15270 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:30.123123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976720657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:29:30.123345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.123542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T21:29:30.123602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976720657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T21:29:30.123867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976720657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:29:30.123943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:29:30.126679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976720657, response: Status: StatusAccepted TxId: 281474976720657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T21:29:30.126970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976720657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T21:29:30.127164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.127214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976720657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T21:29:30.129046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976720657:0 ProgressState no shards to create, do next state 2025-06-24T21:29:30.129097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976720657:0 2 -> 3 2025-06-24T21:29:30.132697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.132766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976720657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:29:30.132808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976720657:0 3 -> 128 waiting... 2025-06-24T21:29:30.135322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.135356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.135373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.135429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976720657 ready parts: 1/1 2025-06-24T21:29:30.165339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976720657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:29:30.165890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-24T21:29:30.165907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976720657, ready parts: 0/1, is published: true 2025-06-24T21:29:30.165941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-24T21:29:30.167762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976720657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976720657 msg type: 269090816 2025-06-24T21:29:30.167923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976720657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:29:30.171340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750800570218, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:29:30.171513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976720657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750800570218 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:29:30.171541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.171897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976720657:0 128 -> 240 2025-06-24T21:29:30.171960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.172141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId ... tition=0) Received event: NActors::IEventHandle ===ModifyAcl BEFORE MODIFY PERMISSIONS 2025-06-24T21:29:40.687990Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root/acc" OperationType: ESchemeOpModifyACL ModifyACL { Name: "topic1" DiffACL: "\n\031\010\001\022\025\032\023test_user_0@builtin" } } TxId: 281474976710666 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:45540" , at schemeshard: 72057594046644480 2025-06-24T21:29:40.688143Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_modify_acl.cpp:33: TModifyACL Propose, path: /Root/acc/topic1, operationId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-24T21:29:40.688248Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5298: ExamineTreeVFS visit path id [OwnerId: 72057594046644480, LocalPathId: 10] name: topic1 type: EPathTypePersQueueGroup state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046644480, LocalPathId: 9] 2025-06-24T21:29:40.688257Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5314: ExamineTreeVFS run path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-24T21:29:40.688380Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710666:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-06-24T21:29:40.688410Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-24T21:29:40.688486Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710666:0 progress is 1/1 2025-06-24T21:29:40.688496Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710666 ready parts: 1/1 2025-06-24T21:29:40.688511Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710666:0 progress is 1/1 2025-06-24T21:29:40.688519Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710666 ready parts: 1/1 2025-06-24T21:29:40.688557Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-06-24T21:29:40.688596Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710666, ready parts: 1/1, is published: false 2025-06-24T21:29:40.688613Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-06-24T21:29:40.688622Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710666 ready parts: 1/1 2025-06-24T21:29:40.688633Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710666:0 2025-06-24T21:29:40.688642Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976710666, publications: 1, subscribers: 0 2025-06-24T21:29:40.688652Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976710666, [OwnerId: 72057594046644480, LocalPathId: 10], 4 2025-06-24T21:29:40.691574Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710666, response: Status: StatusSuccess TxId: 281474976710666 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:29:40.691820Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710666, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, remove access: -():test_user_0@builtin:- 2025-06-24T21:29:40.691953Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:29:40.691966Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710666, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-24T21:29:40.692107Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T21:29:40.692121Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7519631217794566506:2366], at schemeshard: 72057594046644480, txId: 281474976710666, path id: 10 2025-06-24T21:29:40.693243Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710666 2025-06-24T21:29:40.693331Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710666 2025-06-24T21:29:40.693342Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976710666 2025-06-24T21:29:40.693355Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710666, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 4 2025-06-24T21:29:40.693369Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-06-24T21:29:40.693445Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710666, subscribers: 0 ===Wait for session created with token with removed ACE to die2025-06-24T21:29:40.709998Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710666 2025-06-24T21:29:40.943760Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519631213499598641:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:40.943838Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:41.103245Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519631218472782955:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:41.103321Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:41.183970Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519631239269403983:2346], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:41.186310Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=OTU1NjMxYzMtYWU4Y2QwYjgtYWY2YjUxMjMtMWMyMzM1MWE=, ActorId: [3:7519631239269403981:2345], ActorState: ExecuteState, TraceId: 01jyhxhgh5cy1bj6gz88jbfh8k, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:41.186767Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:29:41.661074Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T21:29:41.663873Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: test-group-id|bae2b383-5b4a6849-b96d9cc9-37b0f19f_0 describe result for acl check 2025-06-24T21:29:41.664022Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: access to topic 'Topic /Root/acc/topic1 in database: /Root' denied for 'test_user_0@builtin' due to 'no WriteTopic rights', Marker# PQ1125 sessionId: test-group-id|bae2b383-5b4a6849-b96d9cc9-37b0f19f_0 2025-06-24T21:29:41.664336Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-group-id|bae2b383-5b4a6849-b96d9cc9-37b0f19f_0 is DEAD 2025-06-24T21:29:41.664658Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison status: UNAUTHORIZED issues { message: "access to topic \'Topic /Root/acc/topic1 in database: /Root\' denied for \'test_user_0@builtin\' due to \'no WriteTopic rights\', Marker# PQ1125" issue_code: 500018 severity: 1 } 2025-06-24T21:29:42.212605Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519631243564371308:2354], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:42.212800Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=ZjYzMjAxOS0xNzg0MDlhYy00YTZlY2I0My05MjVkODY4OQ==, ActorId: [3:7519631243564371306:2353], ActorState: ExecuteState, TraceId: 01jyhxhhhdazxv562ay0c2mk8g, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:42.213181Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] Test command err: === Server->StartServer(false); 2025-06-24T21:29:27.622429Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631180204771803:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.622479Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:27.696653Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631180609793795:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.696700Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:28.027879Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0022f8/r3tmp/tmpmgEc1b/pdisk_1.dat 2025-06-24T21:29:28.095611Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:28.543028Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:28.546198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.546286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.548329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.548392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.637579Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:28.643922Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:28.671892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:29:28.672944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:28.673332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:28.723263Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 2849, node 1 2025-06-24T21:29:29.016569Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0022f8/r3tmp/yandexCa0rH3.tmp 2025-06-24T21:29:29.016588Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0022f8/r3tmp/yandexCa0rH3.tmp 2025-06-24T21:29:29.017789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0022f8/r3tmp/yandexCa0rH3.tmp 2025-06-24T21:29:29.017905Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:29.367795Z INFO: TTestServer started on Port 23384 GrpcPort 2849 TClient is connected to server localhost:23384 PQClient connected to localhost:2849 === TenantModeEnabled() = 1 === Init PQ - start server on port 2849 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:30.032166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:29:30.032391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.032669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T21:29:30.032714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T21:29:30.033025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:29:30.033096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:29:30.036123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T21:29:30.036337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T21:29:30.036507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.036552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T21:29:30.036670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-24T21:29:30.036685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 2 -> 3 2025-06-24T21:29:30.044577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.044629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:29:30.044669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 3 -> 128 2025-06-24T21:29:30.046602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.046644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.046663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.046701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 waiting... 2025-06-24T21:29:30.051801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:29:30.056262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-24T21:29:30.057837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:29:30.059662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:30.059684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-24T21:29:30.059736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:30.065002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750800570106, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:29:30.065181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750800570106 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:29:30.065216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.065868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 128 -> 240 2025-06-24T21:29:30.065917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.066190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72 ... rver" ip=ipv6:[::1]:48318 proto=v1 topic=/Root/PQ/account/topic durationSec=0 2025-06-24T21:29:41.844519Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T21:29:41.845406Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 4 sessionId: describe result for acl check 2025-06-24T21:29:41.845572Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-24T21:29:41.845584Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T21:29:41.845592Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-24T21:29:41.845625Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519631239466512497:2398] (SourceId=123, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-24T21:29:41.845646Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 4 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T21:29:41.846227Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037893] server connected, pipe [3:7519631239466512500:2398], now have 1 active actors on pipe 2025-06-24T21:29:41.846249Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037893 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037893, NodeId 3, Generation: 1 2025-06-24T21:29:41.846278Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-24T21:29:41.846300Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-24T21:29:41.846377Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|34fe5766-55a918cd-fe8a211d-fe2da999_0 generated for partition 0 topic 'PQ/account/topic' owner 123 2025-06-24T21:29:41.846462Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:29:41.846560Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:29:41.846689Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-24T21:29:41.846709Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-24T21:29:41.846793Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:29:41.846875Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 4 partition: 0 MaxSeqNo: 2 sessionId: 123|34fe5766-55a918cd-fe8a211d-fe2da999_0 2025-06-24T21:29:41.851292Z :INFO: [] MessageGroupId [123] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750800581851 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:29:41.851414Z :INFO: [] MessageGroupId [123] SessionId [] Write session established. Init response: last_sequence_number: 2 session_id: "123|34fe5766-55a918cd-fe8a211d-fe2da999_0" topic: "PQ/account/topic" 2025-06-24T21:29:41.851771Z :DEBUG: [] MessageGroupId [123] SessionId [123|34fe5766-55a918cd-fe8a211d-fe2da999_0] Write 1 messages with Id from 1 to 1 2025-06-24T21:29:41.851873Z :DEBUG: [] MessageGroupId [123] SessionId [123|34fe5766-55a918cd-fe8a211d-fe2da999_0] Write session: try to update token 2025-06-24T21:29:41.851908Z :DEBUG: [] MessageGroupId [123] SessionId [123|34fe5766-55a918cd-fe8a211d-fe2da999_0] Send 1 message(s) (0 left), first sequence number is 3 2025-06-24T21:29:41.852128Z :INFO: [] MessageGroupId [123] SessionId [123|34fe5766-55a918cd-fe8a211d-fe2da999_0] Write session: close. Timeout = 10000 ms 2025-06-24T21:29:41.852410Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: 123|34fe5766-55a918cd-fe8a211d-fe2da999_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-24T21:29:41.852689Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-24T21:29:41.853033Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-24T21:29:41.853059Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-24T21:29:41.853179Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-06-24T21:29:41.853261Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T21:29:41.853410Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-24T21:29:41.853423Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-24T21:29:41.853461Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2201: [PQ: 72075186224037893] got client message topic: PQ/account/topic partition: 0 SourceId: '\000123' SeqNo: 3 partNo : 0 messageNo: 1 size 372 offset: -1 2025-06-24T21:29:41.853569Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1843: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Send write quota request. Topic: "PQ/account/topic". Partition: 0. Amount: 376. Cookie: 3 2025-06-24T21:29:41.853657Z node 3 :PERSQUEUE DEBUG: partition.cpp:3720: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Got quota. Topic: "PQ/account/topic". Partition: 0: Cookie: 3 2025-06-24T21:29:41.853832Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'PQ/account/topic' partition 0 part blob processing sourceId '\000123' seqNo 3 partNo 0 2025-06-24T21:29:41.854927Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'PQ/account/topic' partition 0 part blob complete sourceId '\000123' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 443 count 1 nextOffset 3 batches 1 2025-06-24T21:29:41.855472Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Add new write blob: topic 'PQ/account/topic' partition 0 compactOffset 2,1 HeadOffset 2 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000? size 431 WTime 1750800581855 2025-06-24T21:29:41.855631Z node 3 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:29:41.855705Z node 3 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 431 2025-06-24T21:29:41.858329Z node 3 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 2 count 1 size 431 actorID [3:7519631239466512169:2362] 2025-06-24T21:29:41.858396Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 376 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:29:41.858425Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:29:41.858422Z node 3 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037893' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' size 431 2025-06-24T21:29:41.858452Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Answering for message sourceid: '\000123', Topic: 'PQ/account/topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-24T21:29:41.858610Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037893, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1293, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:29:41.858637Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-24T21:29:41.858684Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T21:29:41.859419Z :DEBUG: [] MessageGroupId [123] SessionId [123|34fe5766-55a918cd-fe8a211d-fe2da999_0] Write session got write response: sequence_numbers: 3 offsets: 2 already_written: false write_statistics { persist_duration_ms: 3 queued_in_partition_duration_ms: 1 } 2025-06-24T21:29:41.859481Z :DEBUG: [] MessageGroupId [123] SessionId [123|34fe5766-55a918cd-fe8a211d-fe2da999_0] Write session: acknoledged message 1 2025-06-24T21:29:41.953970Z :INFO: [] MessageGroupId [123] SessionId [123|34fe5766-55a918cd-fe8a211d-fe2da999_0] Write session will now close 2025-06-24T21:29:41.954032Z :DEBUG: [] MessageGroupId [123] SessionId [123|34fe5766-55a918cd-fe8a211d-fe2da999_0] Write session: aborting 2025-06-24T21:29:41.954463Z :INFO: [] MessageGroupId [123] SessionId [123|34fe5766-55a918cd-fe8a211d-fe2da999_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:29:41.954511Z :DEBUG: [] MessageGroupId [123] SessionId [123|34fe5766-55a918cd-fe8a211d-fe2da999_0] Write session: destroy 2025-06-24T21:29:41.964778Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: 123|34fe5766-55a918cd-fe8a211d-fe2da999_0 grpc read done: success: 0 data: 2025-06-24T21:29:41.964812Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: 123|34fe5766-55a918cd-fe8a211d-fe2da999_0 grpc read failed 2025-06-24T21:29:41.964843Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: 123|34fe5766-55a918cd-fe8a211d-fe2da999_0 grpc closed 2025-06-24T21:29:41.964857Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: 123|34fe5766-55a918cd-fe8a211d-fe2da999_0 is DEAD 2025-06-24T21:29:41.970970Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:29:41.971261Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037893] server disconnected, pipe [3:7519631239466512500:2398] destroyed 2025-06-24T21:29:41.971297Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] >> ReadSessionImplTest::UsesOnRetryStateDuringRetries >> TColumnShardTestSchema::CreateTable-Reboots+GenerateInternalPathId [GOOD] >> ReadSessionImplTest::DecompressRaw >> ApplyClusterEndpointTest::NoPorts [GOOD] >> ApplyClusterEndpointTest::PortFromCds [GOOD] >> ApplyClusterEndpointTest::PortFromDriver [GOOD] >> BasicUsage::MaxByteSizeEqualZero >> ReadSessionImplTest::UsesOnRetryStateDuringRetries [GOOD] >> RetryPolicy::TWriteSession_TestPolicy >> TColumnShardTestSchema::Drop+Reboots-GenerateInternalPathId >> ReadSessionImplTest::DecompressRaw [GOOD] >> ReadSessionImplTest::DecompressGzip [GOOD] >> ReadSessionImplTest::DecompressZstd [GOOD] >> ReadSessionImplTest::DecompressRawEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressGzipEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressWithSynchronousExecutor [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::CreateTable-Reboots+GenerateInternalPathId [GOOD] Test command err: 2025-06-24T21:29:43.640673Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:43.667401Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:43.667767Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:43.675877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:43.676135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:43.676454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:43.676626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:43.676769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:43.676919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:43.677056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:43.677177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:43.677315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:43.677423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:43.677563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:43.707017Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:43.707229Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:43.707288Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:43.707492Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:43.707675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:43.707764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:43.707806Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:43.707905Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:43.707973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:43.708018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:43.708048Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:43.708230Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:43.708308Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:43.708373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:43.708411Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:43.708521Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:43.708598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:43.708653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:43.708683Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:43.708743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:43.708787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:43.708824Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:43.709118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:43.709172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:43.709230Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:43.709452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:43.709506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:43.709536Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:43.709656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:43.709709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:43.709738Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:43.709830Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:43.709916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:43.709961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:43.709991Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:43.710491Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=55; 2025-06-24T21:29:43.710596Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; 2025-06-24T21:29:43.710676Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-24T21:29:43.710777Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=56; 2025-06-24T21:29:43.710876Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:29:43.710972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:29:43.711020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:29:43.711068Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-24T21:29:44.928804Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=119;this=88923004878848;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;fline=schema.h:38;event=sync_schema; 2025-06-24T21:29:44.941496Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004878848;op_tx=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129165863616;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T21:29:44.941576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004878848;op_tx=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129165863616;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T21:29:44.941620Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;this=88923004878848;op_tx=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750800584657;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=019:0;;int_this=89129165863616;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=119; 2025-06-24T21:29:44.941935Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T21:29:44.942078Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750800584657 at tablet 9437184, mediator 0 2025-06-24T21:29:44.942124Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[36] execute at tablet 9437184 2025-06-24T21:29:44.942390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-24T21:29:44.942480Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000018, ss: 20} ttl settings: { Version: 1 } at tablet 9437184 2025-06-24T21:29:44.942567Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000018; 2025-06-24T21:29:44.942627Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000018; 2025-06-24T21:29:44.943102Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000018; 2025-06-24T21:29:44.943245Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tx_controller.cpp:215;event=finished_tx;tx_id=119; 2025-06-24T21:29:44.955896Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[36] complete at tablet 9437184 CreateTable: { SeqNo { Generation: 20 } EnsureTables { Tables { PathId: 21 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4609 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T21:29:44.957558Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=120;this=88923004881536;method=TTxController::StartProposeOnExecute;tx_info=120:TX_KIND_SCHEMA;min=1750800584660;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T21:29:44.971475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750800584660;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;this=88923004881536;op_tx=120:TX_KIND_SCHEMA;min=1750800584660;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T21:29:44.971551Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750800584660;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;this=88923004881536;op_tx=120:TX_KIND_SCHEMA;min=1750800584660;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=020:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=120; CreateTable: { SeqNo { Generation: 21 } EnsureTables { Tables { PathId: 22 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4610 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T21:29:44.972951Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=121;this=88923004883328;method=TTxController::StartProposeOnExecute;tx_info=121:TX_KIND_SCHEMA;min=1750800584661;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T21:29:44.985717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750800584661;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;this=88923004883328;op_tx=121:TX_KIND_SCHEMA;min=1750800584661;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T21:29:44.985808Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750800584661;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;this=88923004883328;op_tx=121:TX_KIND_SCHEMA;min=1750800584661;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=021:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=121; CreateTable: { SeqNo { Generation: 22 } EnsureTables { Tables { PathId: 23 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4612 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-24T21:29:44.987137Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:126:2156];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=122;this=88923004885120;method=TTxController::StartProposeOnExecute;tx_info=122:TX_KIND_SCHEMA;min=1750800584663;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-24T21:29:44.999451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750800584663;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;this=88923004885120;op_tx=122:TX_KIND_SCHEMA;min=1750800584663;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:157:2179]; 2025-06-24T21:29:44.999532Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750800584663;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;this=88923004885120;op_tx=122:TX_KIND_SCHEMA;min=1750800584663;max=18446744073709551615;plan=0;src=[1:157:2179];cookie=022:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=122; >> MoveTable::EmptyTable ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] Test command err: === Server->StartServer(false); 2025-06-24T21:29:27.665409Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631177254725655:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.665638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:27.729317Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631177243475209:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.729369Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:28.070076Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00237b/r3tmp/tmp8JXa73/pdisk_1.dat 2025-06-24T21:29:28.085854Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:28.516383Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:28.562755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.562868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.564508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.564566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.591757Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:28.591873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:28.595651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:28.663678Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 18238, node 1 2025-06-24T21:29:28.791283Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:29.031971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/00237b/r3tmp/yandexLGMZT7.tmp 2025-06-24T21:29:29.032016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/00237b/r3tmp/yandexLGMZT7.tmp 2025-06-24T21:29:29.032237Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/00237b/r3tmp/yandexLGMZT7.tmp 2025-06-24T21:29:29.032376Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:29.366758Z INFO: TTestServer started on Port 25562 GrpcPort 18238 TClient is connected to server localhost:25562 PQClient connected to localhost:18238 === TenantModeEnabled() = 1 === Init PQ - start server on port 18238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:30.139496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:29:30.139718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.139918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T21:29:30.139945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T21:29:30.140203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:29:30.140259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:30.146492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T21:29:30.146694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T21:29:30.146900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.146946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T21:29:30.146963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-24T21:29:30.146977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 2 -> 3 2025-06-24T21:29:30.150563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:30.150585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-24T21:29:30.150621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:30.151601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.151678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:29:30.151702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 3 -> 128 2025-06-24T21:29:30.153913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.153955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-24T21:29:30.153980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.154023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-24T21:29:30.172914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:29:30.176496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-24T21:29:30.176688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:29:30.180078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750800570225, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:29:30.180317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750800570225 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:29:30.180357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.180665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710657:0 128 -> 240 2025-06-24T21:29:30.180710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-24T21:29:30.180876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T21:29:30.180941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, Loc ... ate: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001235' seqNo 1 partNo 1 2025-06-24T21:29:43.912975Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001235' seqNo 1 partNo 2 2025-06-24T21:29:43.920772Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob complete sourceId '\0001235' seqNo 1 partNo 2 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 7201550 count 6 nextOffset 6 batches 13 2025-06-24T21:29:43.921870Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001236' seqNo 1 partNo 0 2025-06-24T21:29:43.922973Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001236' seqNo 1 partNo 1 2025-06-24T21:29:43.923488Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001236' seqNo 1 partNo 2 2025-06-24T21:29:43.930219Z :INFO: [] MessageGroupId [1236] SessionId [1236|1b1f39d1-e16e061f-775207a-ab551aa1_0] Write session will now close 2025-06-24T21:29:43.930292Z :DEBUG: [] MessageGroupId [1236] SessionId [1236|1b1f39d1-e16e061f-775207a-ab551aa1_0] Write session: aborting 2025-06-24T21:29:43.930776Z :INFO: [] MessageGroupId [1236] SessionId [1236|1b1f39d1-e16e061f-775207a-ab551aa1_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:29:43.930829Z :DEBUG: [] MessageGroupId [1236] SessionId [1236|1b1f39d1-e16e061f-775207a-ab551aa1_0] Write session: destroy DURATION 2.953153s 2025-06-24T21:29:43.948587Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 22 sessionId: 1236|1b1f39d1-e16e061f-775207a-ab551aa1_0 grpc closed 2025-06-24T21:29:43.948622Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 22 sessionId: 1236|1b1f39d1-e16e061f-775207a-ab551aa1_0 is DEAD 2025-06-24T21:29:43.952856Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037899 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:29:43.953326Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037899] server disconnected, pipe [1:7519631245974205755:2594] destroyed 2025-06-24T21:29:44.003973Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:96: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob sourceId '\0001236' seqNo 1 partNo 2 result is x0000000000_00000000000000000000_00000_0000000006_00014 size 8225586 2025-06-24T21:29:44.004040Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1117: [PQ: 72075186224037899, Partition: 0, State: StateIdle] writing blob: topic 'PQ/account3/folder1/folder2/topic' partition 0 old key x0000000000_00000000000000000000_00000_0000000006_00014 new key d0000000000_00000000000000000000_00000_0000000006_00014 size 8225586 WTime 1750800584004 2025-06-24T21:29:44.010832Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob complete sourceId '\0001236' seqNo 1 partNo 2 FormedBlobsCount 1 NewHead: Offset 6 PartNo 2 PackedSize 176227 count 1 nextOffset 7 batches 1 2025-06-24T21:29:44.011817Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:401: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Add new write blob: topic 'PQ/account3/folder1/folder2/topic' partition 0 compactOffset 6,1 HeadOffset 0 endOffset 0 curOffset 7 d0000000000_00000000000000000006_00002_0000000001_00000| size 176217 WTime 1750800584011 2025-06-24T21:29:44.012200Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:29:44.012269Z node 1 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 0 partNo 0 count 6 size 8225586 2025-06-24T21:29:44.012314Z node 1 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 6 partNo 2 count 1 size 176217 2025-06-24T21:29:44.012357Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:29:44.046565Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037893, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:29:44.053673Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 0 count 6 size 8225586 actorID [1:7519631233089303396:2522] 2025-06-24T21:29:44.053716Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 6 count 1 size 176217 actorID [1:7519631233089303396:2522] 2025-06-24T21:29:44.053767Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:323: [PQ: 72075186224037899, Partition: 0, State: StateIdle] compaction completed 2025-06-24T21:29:44.054264Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:29:44.054290Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:29:44.054311Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000006_00000_0000000001_00002?(+) to d0000000000_00000000000000000006_00000_0000000001_00002?(+) 2025-06-24T21:29:44.054318Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000005_00000_0000000001_00002?(+) to d0000000000_00000000000000000005_00000_0000000001_00002?(+) 2025-06-24T21:29:44.054327Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000004_00000_0000000001_00002?(+) to d0000000000_00000000000000000004_00000_0000000001_00002?(+) 2025-06-24T21:29:44.054333Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000003_00000_0000000001_00002?(+) to d0000000000_00000000000000000003_00000_0000000001_00002?(+) 2025-06-24T21:29:44.054339Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000002_00000_0000000001_00002?(+) to d0000000000_00000000000000000002_00000_0000000001_00002?(+) 2025-06-24T21:29:44.054347Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000001_00000_0000000001_00002?(+) to d0000000000_00000000000000000001_00000_0000000001_00002?(+) 2025-06-24T21:29:44.054353Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000000_00000_0000000001_00002?(+) to d0000000000_00000000000000000000_00000_0000000001_00002?(+) 2025-06-24T21:29:44.054731Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037899' partition 0 offset 0 partno 0 count 6 parts 14 suffix '0' size 8225586 2025-06-24T21:29:44.054761Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037899' partition 0 offset 6 partno 2 count 1 parts 0 suffix '124' size 176217 2025-06-24T21:29:44.060807Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 6 count 1 actorID [1:7519631233089303396:2522] 2025-06-24T21:29:44.060858Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 5 count 1 actorID [1:7519631233089303396:2522] 2025-06-24T21:29:44.060890Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 4 count 1 actorID [1:7519631233089303396:2522] 2025-06-24T21:29:44.060918Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 3 count 1 actorID [1:7519631233089303396:2522] 2025-06-24T21:29:44.060947Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 2 count 1 actorID [1:7519631233089303396:2522] 2025-06-24T21:29:44.060987Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 1 count 1 actorID [1:7519631233089303396:2522] 2025-06-24T21:29:44.061015Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 0 count 1 actorID [1:7519631233089303396:2522] 2025-06-24T21:29:44.061113Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:29:44.061163Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:29:44.061347Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 6 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-24T21:29:44.062101Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 5 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-24T21:29:44.062457Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 4 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-24T21:29:44.062878Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 3 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-24T21:29:44.063260Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 2 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-24T21:29:44.063581Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 1 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-24T21:29:44.063937Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-24T21:29:44.411165Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631250269173096:2600], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:44.411508Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=M2UwM2IxZTEtZDQ1MDgyMGEtZTFmNDZjNGEtOWZkOWFmYTg=, ActorId: [1:7519631250269173094:2599], ActorState: ExecuteState, TraceId: 01jyhxhkp42w58xrprkrjvfc45, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:44.411880Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |98.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> ReadSessionImplTest::ReconnectOnTmpError >> TColumnShardTestSchema::HotTiersAfterTtl >> ReadSessionImplTest::ReconnectOnTmpError [GOOD] >> ReadSessionImplTest::ReconnectOnTmpErrorAndThenTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeoutAndThenCreate [GOOD] >> ReadSessionImplTest::ReconnectsAfterFailure [GOOD] >> ReadSessionImplTest::SimpleDataHandlers >> KqpBatchDelete::Large_3 [GOOD] >> KqpBatchUpdate::Large_1 [GOOD] >> ReadSessionImplTest::SimpleDataHandlers [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] >> TColumnShardTestSchema::Drop+Reboots-GenerateInternalPathId [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] Test command err: 2025-06-24T21:29:50.539941Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.540018Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.540072Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:50.540558Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:29:50.540615Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.540643Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.541937Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.007731s 2025-06-24T21:29:50.555631Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:50.556173Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:29:50.556291Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.560286Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.560310Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.560331Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:50.560927Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:29:50.560966Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.560993Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.561070Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008571s 2025-06-24T21:29:50.561634Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:50.562186Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:29:50.562298Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.563318Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.563340Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.563357Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:50.563756Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-24T21:29:50.563795Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.563816Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.563880Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.185448s 2025-06-24T21:29:50.564405Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:50.564894Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:29:50.564979Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.565902Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.565922Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.565967Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:50.566437Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-24T21:29:50.566468Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.566532Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.566599Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.243555s 2025-06-24T21:29:50.567202Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:50.567773Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:29:50.567873Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.568771Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.568795Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.568834Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:50.569100Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:50.569548Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:50.581683Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.582609Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TRANSPORT_UNAVAILABLE. Description:
: Error: GRpc error: (14): 2025-06-24T21:29:50.582650Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.582667Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.582728Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.219324s 2025-06-24T21:29:50.582939Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-24T21:29:50.584686Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.584731Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.584763Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:50.585094Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:50.585494Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:50.585597Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.595231Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:50.696308Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.696552Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-24T21:29:50.696634Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:50.696702Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-24T21:29:50.696788Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-24T21:29:50.797097Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-24T21:29:50.797841Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-24T21:29:50.798943Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.798963Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.798995Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:50.799324Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:50.799712Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:50.799889Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.803792Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:50.905989Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:50.906212Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-24T21:29:50.906291Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:50.906343Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-24T21:29:50.906444Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-24T21:29:50.906557Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-24T21:29:50.906654Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-24T21:29:50.906746Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-24T21:29:50.907425Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> TColumnShardTestSchema::RebootExportWithLostAnswer ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::Large_3 [GOOD] Test command err: Trying to start YDB, gRPC: 65353, MsgBus: 18862 2025-06-24T21:29:10.369106Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631106392502314:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.369173Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bd7/r3tmp/tmpyyDZBj/pdisk_1.dat 2025-06-24T21:29:10.964810Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.970554Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631106392502294:2079] 1750800550359921 != 1750800550359924 2025-06-24T21:29:11.028037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:11.028166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:11.036455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65353, node 1 2025-06-24T21:29:11.160272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.160293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.160300Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.160434Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.397401Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18862 TClient is connected to server localhost:18862 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.077682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.118850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.136644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.333261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.585580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.676856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.267820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631123572373105:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.267904Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.800426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.838623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.880493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.915197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.994070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.046250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.126062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.196682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631127867341064:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.196792Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.197028Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631127867341069:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.200790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.223901Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631127867341071:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.303616Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631127867341124:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.371267Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631106392502314:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.371337Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.590170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... /build_root/2btm/002bd7/r3tmp/tmpURF6eI/pdisk_1.dat 2025-06-24T21:29:30.304752Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:30.428700Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:30.443311Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631192756305118:2079] 1750800570085528 != 1750800570085531 2025-06-24T21:29:30.454499Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:30.454598Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:30.457188Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19079, node 2 2025-06-24T21:29:30.605205Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:30.605256Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:30.605266Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:30.605428Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6140 2025-06-24T21:29:31.128667Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6140 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:31.447062Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:31.455965Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:29:31.468870Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:31.576741Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:31.775325Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:31.871377Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:34.374791Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631209936175925:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:34.374882Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:34.423227Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:34.456757Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:34.494533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:34.534275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:34.581918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:34.631645Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:34.683217Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:34.785628Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631209936176581:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:34.785743Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:34.786242Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631209936176586:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:34.790811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:34.810089Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631209936176588:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:34.899401Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631209936176639:3411] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:35.102102Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631192756305216:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:35.102220Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:36.270651Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:45.412726Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:29:45.412754Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Large_1 [GOOD] Test command err: Trying to start YDB, gRPC: 25930, MsgBus: 26963 2025-06-24T21:29:12.271418Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631116553216677:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:12.289371Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002af4/r3tmp/tmp3qxgIW/pdisk_1.dat 2025-06-24T21:29:12.779292Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631116553216496:2079] 1750800552244526 != 1750800552244529 2025-06-24T21:29:12.798195Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:12.801515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:12.801595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:12.806041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25930, node 1 2025-06-24T21:29:13.058478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:13.058503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:13.058512Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:13.058642Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:13.259414Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26963 TClient is connected to server localhost:26963 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:13.793207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:13.817385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.054720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.223065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.329435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:16.105360Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631133733087337:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.105501Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.452553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.486329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.519634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.582315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.612284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.670355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.742599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.815732Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631133733087999:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.815856Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.817048Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631133733088004:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.821413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:16.835948Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631133733088006:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:16.892696Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631133733088057:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:17.258771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631116553216677:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:17.258831Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:18.062270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:27.795282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot ... opose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... Trying to start YDB, gRPC: 2915, MsgBus: 25790 2025-06-24T21:29:40.884402Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519631234976243279:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:40.886089Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002af4/r3tmp/tmpk7DSU6/pdisk_1.dat 2025-06-24T21:29:41.044894Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:41.060160Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:41.060266Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:41.062246Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2915, node 3 2025-06-24T21:29:41.140763Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:41.140787Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:41.140796Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:41.140940Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25790 TClient is connected to server localhost:25790 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:29:41.744596Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:29:41.767239Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:41.864775Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:41.963646Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:42.118447Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:42.221608Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:44.559317Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631252156114071:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:44.559420Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:44.631115Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:44.674021Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:44.713343Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:44.754173Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:44.793405Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:44.838836Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:44.887620Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:44.964391Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631252156114728:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:44.964492Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:44.965034Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519631252156114733:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:44.969621Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:44.985131Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519631252156114735:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:45.076739Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519631256451082084:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:45.883391Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519631234976243279:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:45.883462Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:46.343244Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::EmptyTable Test command err: 2025-06-24T21:29:46.814423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:46.850722Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:46.851042Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:46.858899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:46.859140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:46.861236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:46.861402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:46.861534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:46.861661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:46.861788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:46.861930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:46.862067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:46.862186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:46.862324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:46.892108Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:46.892304Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:46.892349Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:46.892470Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:46.892604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:46.892660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:46.892695Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:46.892764Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:46.892809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:46.892842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:46.892868Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:46.893011Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:46.893069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:46.893095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:46.893115Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:46.893189Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:46.893235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:46.893295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:46.893327Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:46.893390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:46.893429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:46.893479Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:46.893698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:46.893749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:46.893780Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:46.893975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:46.894022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:46.894050Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:46.894209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:46.894255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:46.894288Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:46.894363Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:46.894418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:46.894453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:46.894482Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:46.894913Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=47; 2025-06-24T21:29:46.895000Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=30; 2025-06-24T21:29:46.895097Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=47; 2025-06-24T21:29:46.895228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=82; 2025-06-24T21:29:46.895315Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:29:46.895412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:29:46.895456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:29:46.895503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #28 0x10607ea7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #29 0x10607ea7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #30 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #31 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #32 0x10e8aab5 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #33 0x10e63518 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #34 0x10607073 in NKikimr::NTestSuiteMoveTable::TCurrentTest::Execute() /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1 #35 0x10e64de5 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #36 0x10e8502c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #37 0x7fcef666cd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) previously allocated by thread T0 here: #0 0x106d317d in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x2b088365 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x2b088365 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x2b088365 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x2b088365 in get_node /-S/util/generic/hash_table.h:497:43 #5 0x2b088365 in new_node /-S/util/generic/hash_table.h:947:19 #6 0x2b088365 in emplace_unique_noresize /-S/util/generic/hash_table.h:1018:17 #7 0x2b088365 in std::__y1::pair<__yhashtable_iterator>, bool> THashTable, NKikimr::NColumnShard::TSchemeShardLocalPathId, THash, TSelect1st, TEqualTo, std::__y1::allocator>::emplace_unique(NKikimr::NColumnShard::TSchemeShardLocalPathId const&, NKikimr::NColumnShard::TInternalPathId const&) /-S/util/generic/hash_table.h:710:16 #8 0x2b080b49 in emplace /-S/util/generic/hash.h:177:20 #9 0x2b080b49 in NKikimr::NColumnShard::TTablesManager::MoveTablePropose(NKikimr::NColumnShard::TSchemeShardLocalPathId) /-S/ydb/core/tx/columnshard/tables_manager.cpp:454:5 #10 0x105187f1 in NKikimr::NColumnShard::TSchemaTransactionOperator::DoStartProposeOnExecute(NKikimr::NColumnShard::TColumnShard&, NKikimr::NTabletFlatExecutor::TTransactionContext&) /-S/ydb/core/tx/columnshard/transactions/operators/schema.cpp:147:33 #11 0x265f1628 in NKikimr::NColumnShard::TTxController::ITransactionOperator::StartProposeOnExecute(NKikimr::NColumnShard::TColumnShard&, NKikimr::NTabletFlatExecutor::TTransactionContext&) /-S/ydb/core/tx/columnshard/transactions/tx_controller.h:362:32 #12 0x265ed1a2 in NKikimr::NColumnShard::TTxController::StartProposeOnExecute(NKikimr::NColumnShard::TFullTxInfo const&, TBasicString> const&, NKikimr::NTabletFlatExecutor::TTransactionContext&) /-S/ydb/core/tx/columnshard/transactions/tx_controller.cpp:356:25 #13 0x266df482 in NKikimr::NColumnShard::TTxProposeTransaction::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&) /-S/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp:75:54 #14 0x18e3b944 in NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*) /-S/ydb/core/tablet_flat/flat_executor.cpp:1976:35 #15 0x18e37321 in NKikimr::NTabletFlatExecutor::TExecutor::DoExecute(TAutoPtr, NKikimr::NTabletFlatExecutor::TExecutor::ETxMode) /-S/ydb/core/tablet_flat/flat_executor.cpp:1880:13 #16 0x18e3e4de in NKikimr::NTabletFlatExecutor::TExecutor::Execute(TAutoPtr, NActors::TActorContext const&) /-S/ydb/core/tablet_flat/flat_executor.cpp:1894:5 #17 0x18dd546a in Execute /-S/ydb/core/tablet_flat/tablet_flat_executed.cpp:62:46 #18 0x18dd546a in NKikimr::NTabletFlatExecutor::TTabletExecutedFlat::Execute(TAutoPtr, NActors::TActorContext const&) /-S/ydb/core/tablet_flat/tablet_flat_executed.cpp:57:5 #19 0x266dc00a in NKikimr::NColumnShard::TColumnShard::Handle(TAutoPtr, TDelete>&, NActors::TActorContext const&) /-S/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp:123:5 #20 0x2665df8d in NKikimr::NColumnShard::TColumnShard::StateWork(TAutoPtr&) /-S/ydb/core/tx/columnshard/columnshard_impl.h:409:13 #21 0x11f29ccc in NActors::IActor::Receive(TAutoPtr&) /-S/ydb/library/actors/core/actor.cpp:280:13 #22 0x2d9498b4 in NActors::TTestActorRuntimeBase::SendInternal(TAutoPtr, unsigned int, bool) /-S/ydb/library/actors/testlib/test_runtime.cpp:1702:33 #23 0x2d942129 in NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant) /-S/ydb/library/actors/testlib/test_runtime.cpp:1295:45 #24 0x2d94c4a3 in NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.cpp:1554:22 #25 0x3211dc73 in NKikimr::TEvColumnShard::TEvProposeTransactionResult::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventIf(TSet, std::__y1::allocator> const&, std::__y1::function const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:477:13 #26 0x3211d812 in GrabEdgeEvent /-S/ydb/library/actors/testlib/test_runtime.h:526:20 #27 0x3211d812 in NKikimr::TEvColumnShard::TEvProposeTransactionResult::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEvent(NActors::TActorId const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:532:20 #28 0x320fd0fa in NKikimr::NTxUT::(anonymous namespace)::ProposeSchemaTxOptional(NActors::TTestBasicRuntime&, NActors::TActorId&, TBasicString> const&, unsigned long) /-S/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp:66:23 #29 0x320feb76 in NKikimr::NTxUT::ProposeSchemaTx(NActors::TTestBasicRuntime&, NActors::TActorId&, TBasicString> const&, unsigned long) /-S/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp:85:25 #30 0x105fd543 in NKikimr::NTestSuiteMoveTable::TTestCaseEmptyTable::Execute_(NUnitTest::TTestContext&) /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:45:20 #31 0x10607ea7 in operator() /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1 #32 0x10607ea7 in __invoke<(lambda at /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #33 0x10607ea7 in __call<(lambda at /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #34 0x10607ea7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #35 0x10607ea7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #36 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #37 0x10e8aab5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #38 0x10e8aab5 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #39 0x10e63518 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #40 0x10607073 in NKikimr::NTestSuiteMoveTable::TCurrentTest::Execute() /-S/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp:32:1 #41 0x10e64de5 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #42 0x10e8502c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #43 0x7fcef666cd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) SUMMARY: AddressSanitizer: heap-use-after-free /-S/contrib/libs/cxxsupp/libcxx/include/__utility/pair.h:149:22 in pair Shadow bytes around the buggy address: 0x503000a3e880: 00 00 00 00 fa fa fd fd fd fd fa fa fd fd fd fd 0x503000a3e900: fa fa 00 00 00 00 fa fa 00 00 00 00 fa fa fd fd 0x503000a3e980: fd fd fa fa fd fd fd fd fa fa fd fd fd fd fa fa 0x503000a3ea00: fd fd fd fd fa fa fd fd fd fd fa fa fd fd fd fd 0x503000a3ea80: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd =>0x503000a3eb00: fd fd fa fa fd fd fd fd fa fa fd fd[fd]fa fa fa 0x503000a3eb80: 00 00 00 00 fa fa fd fd fd fd fa fa fd fd fd fd 0x503000a3ec00: fa fa fd fd fd fd fa fa fd fd fd fa fa fa fd fd 0x503000a3ec80: fd fa fa fa fd fd fd fa fa fa fd fd fd fd fa fa 0x503000a3ed00: fd fd fd fd fa fa fd fd fd fd fa fa 00 00 00 00 0x503000a3ed80: fa fa fd fd fd fa fa fa fd fd fd fa fa fa 00 00 Shadow byte legend (one shadow byte represents 8 application bytes): Addressable: 00 Partially addressable: 01 02 03 04 05 06 07 Heap left redzone: fa Freed heap region: fd Stack left redzone: f1 Stack mid redzone: f2 Stack right redzone: f3 Stack after return: f5 Stack use after scope: f8 Global redzone: f9 Global init order: f6 Poisoned by user: f7 Container overflow: fc Array cookie: ac Intra object redzone: bb ASan internal: fe Left alloca redzone: ca Right alloca redzone: cb ==1031890==ABORTING ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop+Reboots-GenerateInternalPathId [GOOD] Test command err: 2025-06-24T21:29:45.763701Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:45.793584Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:45.793952Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:45.802296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:45.802547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:45.802805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:45.802924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:45.803069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:45.803339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:45.803459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:45.803578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:45.803693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:45.803809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:45.803958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:45.835207Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:45.835479Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:45.835563Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:45.835757Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:45.835978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:45.836059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:45.836107Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:45.836198Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:45.836261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:45.836306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:45.836357Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:45.836543Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:45.836623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:45.836665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:45.836696Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:45.836790Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:45.836839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:45.836885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:45.836910Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:45.836972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:45.837006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:45.837043Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:45.837285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:45.837333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:45.837371Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:45.837571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:45.837639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:45.837674Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:45.837860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:45.837908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:45.837938Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:45.838028Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:45.838097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:45.838139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:45.838166Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:45.838615Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=47; 2025-06-24T21:29:45.838716Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=42; 2025-06-24T21:29:45.838798Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=39; 2025-06-24T21:29:45.838882Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=35; 2025-06-24T21:29:45.838986Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:29:45.839087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:29:45.839491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:29:45.839580Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... umnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:29:50.990485Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:29:50.990529Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:29:50.990578Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:29:50.990636Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:29:50.990731Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=1; 2025-06-24T21:29:50.990812Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800287001;tx_id=18446744073709551615;;current_snapshot_ts=1750800586830; 2025-06-24T21:29:50.990864Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=1;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:29:50.990929Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:29:50.990978Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:29:50.991070Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:167;event=skip_actualization;waiting=0.999000s; 2025-06-24T21:29:50.991130Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:29:51.108825Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800587001:max} readable: {1750800587001:max} at tablet 9437184 2025-06-24T21:29:51.109079Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:29:51.111791Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800587001:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:29:51.111927Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800587001:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:29:51.112775Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800587001:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:29:51.112947Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800587001:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:29:51.114311Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:772:2767];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800587001:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:829:2815];trace_detailed=; 2025-06-24T21:29:51.115818Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:29:51.116155Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:29:51.116731Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:29:51.116917Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:51.117077Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:51.117140Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:829:2815] finished for tablet 9437184 2025-06-24T21:29:51.117647Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:822:2809];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults","l_ProduceResults","f_Finish"],"t":0.002},{"events":["l_ack","l_processing","l_Finish"],"t":0.003}],"full":{"a":1750800591114220,"name":"_full_task","f":1750800591114220,"d_finished":0,"c":0,"l":1750800591117231,"d":3011},"events":[{"name":"bootstrap","f":1750800591114560,"d_finished":1795,"c":1,"l":1750800591116355,"d":1795},{"a":1750800591116698,"name":"ack","f":1750800591116698,"d_finished":0,"c":0,"l":1750800591117231,"d":533},{"a":1750800591116677,"name":"processing","f":1750800591116677,"d_finished":0,"c":0,"l":1750800591117231,"d":554},{"name":"ProduceResults","f":1750800591116330,"d_finished":364,"c":2,"l":1750800591117112,"d":364},{"a":1750800591117118,"name":"Finish","f":1750800591117118,"d_finished":0,"c":0,"l":1750800591117231,"d":113}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:51.117741Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:822:2809];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:29:51.118164Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:822:2809];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults","l_ProduceResults","f_Finish"],"t":0.002},{"events":["l_ack","l_processing","l_Finish"],"t":0.003}],"full":{"a":1750800591114220,"name":"_full_task","f":1750800591114220,"d_finished":0,"c":0,"l":1750800591117799,"d":3579},"events":[{"name":"bootstrap","f":1750800591114560,"d_finished":1795,"c":1,"l":1750800591116355,"d":1795},{"a":1750800591116698,"name":"ack","f":1750800591116698,"d_finished":0,"c":0,"l":1750800591117799,"d":1101},{"a":1750800591116677,"name":"processing","f":1750800591116677,"d_finished":0,"c":0,"l":1750800591117799,"d":1122},{"name":"ProduceResults","f":1750800591116330,"d_finished":364,"c":2,"l":1750800591117112,"d":364},{"a":1750800591117118,"name":"Finish","f":1750800591117118,"d_finished":0,"c":0,"l":1750800591117799,"d":681}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:29:51.118254Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:29:51.112905Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:29:51.118306Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:29:51.118416Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:829:2815];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> KqpWorkload::KV [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpWorkload::KV [GOOD] Test command err: Trying to start YDB, gRPC: 1766, MsgBus: 21824 2025-06-24T21:28:19.892780Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630885497690437:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:19.893029Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004b06/r3tmp/tmpNs6IDs/pdisk_1.dat 2025-06-24T21:28:20.317577Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:28:20.321785Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630885497690266:2079] 1750800499842469 != 1750800499842472 2025-06-24T21:28:20.332194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:20.332303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:20.336990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1766, node 1 2025-06-24T21:28:20.415898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:20.415922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:20.415955Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:20.416089Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21824 2025-06-24T21:28:20.895307Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21824 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:21.126790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:21.146674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:28:23.136175Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630902677560087:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.136300Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.466337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:23.964414Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630902677561718:2418], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.964505Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.964583Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630902677561723:2421], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:23.968696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:23.986776Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630902677561725:2422], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:28:24.063314Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630906972529072:3358] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:24.889844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630885497690437:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:24.889918Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:35.279398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:28:35.279438Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded took: 0.131750s took: 0.134284s took: 0.134273s took: 0.134441s took: 0.133963s took: 0.134779s took: 0.137820s took: 0.132891s took: 0.138252s took: 0.136679s took: 0.154734s took: 0.165747s took: 0.169824s took: 0.169924s took: 0.170231s took: 0.159474s took: 0.174662s took: 0.175681s took: 0.164657s took: 0.176899s took: 0.335096s took: 0.329114s took: 0.335562s took: 0.339448s took: 0.340098s took: 0.346548s took: 0.346819s took: 0.342929s took: 0.346691s took: 0.348698s took: 0.067924s took: 0.074151s took: 0.081379s took: 0.081482s took: 0.076550s took: 0.078869s took: 0.084502s took: 0.079784s took: 0.085379s took: 0.086081s took: 0.158324s took: 0.155576s took: 0.158678s took: 0.159212s took: 0.158554s took: 0.191258s took: 0.191151s took: 0.192645s took: 0.194745s took: 0.198185s 2025-06-24T21:29:51.653295Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037915 not found 2025-06-24T21:29:51.653344Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037924 not found 2025-06-24T21:29:51.653370Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037923 not found 2025-06-24T21:29:51.653392Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037922 not found 2025-06-24T21:29:51.653413Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037911 not found 2025-06-24T21:29:51.653435Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-24T21:29:51.653458Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-24T21:29:51.653487Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037918 not found 2025-06-24T21:29:51.676900Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-24T21:29:51.678438Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2025-06-24T21:29:51.678613Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-24T21:29:51.678632Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037919 not found 2025-06-24T21:29:51.678650Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037908 not found 2025-06-24T21:29:51.678667Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037895 not found 2025-06-24T21:29:51.678689Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2025-06-24T21:29:51.684254Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-24T21:29:51.684302Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037914 not found 2025-06-24T21:29:51.684326Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-06-24T21:29:51.689350Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037910 not found 2025-06-24T21:29:51.689391Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2025-06-24T21:29:51.689409Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037902 not found 2025-06-24T21:29:51.689427Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037903 not found 2025-06-24T21:29:51.689445Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2025-06-24T21:29:51.689504Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037894 not found 2025-06-24T21:29:51.689525Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037927 not found 2025-06-24T21:29:51.689544Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037925 not found 2025-06-24T21:29:51.689571Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037912 not found 2025-06-24T21:29:51.689603Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-24T21:29:51.689626Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037921 not found 2025-06-24T21:29:51.689644Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037917 not found 2025-06-24T21:29:51.689660Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037900 not found 2025-06-24T21:29:51.689684Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037906 not found 2025-06-24T21:29:51.689703Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037913 not found 2025-06-24T21:29:51.689724Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2025-06-24T21:29:51.689742Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037916 not found 2025-06-24T21:29:51.689768Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037920 not found 2025-06-24T21:29:51.689788Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037926 not found 2025-06-24T21:29:51.700619Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037909 not found 2025-06-24T21:29:51.702775Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037907 not found 2025-06-24T21:29:51.702817Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found |98.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> MoveTable::RenameToItself_Negative >> ReadSessionImplTest::ProperlyOrdersDecompressedData [GOOD] >> ReadSessionImplTest::PacksBatches_ExactlyTwoMessagesInBatch [GOOD] >> ReadSessionImplTest::PacksBatches_OneMessageInEveryBatch [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks >> TColumnShardTestSchema::TTL+Reboot+Internal+FirstPkColumn |98.3%| [TA] $(B)/ydb/core/kqp/ut/perf/test-results/unittest/{meta.json ... results_accumulator.log} |98.3%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/perf/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpBatchDelete::SimpleOnePartition [GOOD] >> MoveTable::RenameToItself_Negative [GOOD] >> TColumnShardTestSchema::TTL-Reboot+Internal+FirstPkColumn >> TColumnShardTestSchema::TTL+Reboot-Internal-FirstPkColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::RenameToItself_Negative [GOOD] Test command err: 2025-06-24T21:29:54.392365Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:54.423582Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:54.423938Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:54.433472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:54.433741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:54.434054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:54.434228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:54.434393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:54.434546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:54.434691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:54.434856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:54.434992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:54.435131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.435325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:54.472852Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:54.473206Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:54.473285Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:54.473541Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:54.473750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:54.473860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:54.473915Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:54.474028Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:54.474129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:54.474205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:54.474251Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:54.474481Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:54.474578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:54.474633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:54.474687Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:54.474792Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:54.474856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:54.474909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:54.474943Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:54.475014Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:54.475055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:54.475124Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:54.475411Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:54.475481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:54.475519Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:54.475837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:54.475911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:54.475950Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:54.476124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:54.476188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.476224Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.476314Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:54.476396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:54.476443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:54.476479Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:54.477026Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=63; 2025-06-24T21:29:54.477190Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=73; 2025-06-24T21:29:54.477292Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=46; 2025-06-24T21:29:54.477404Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=48; 2025-06-24T21:29:54.477534Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:29:54.477644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:29:54.477711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:29:54.477774Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... gerLoadingTime=531; 2025-06-24T21:29:54.761402Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=36; 2025-06-24T21:29:54.761479Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=39; 2025-06-24T21:29:54.761586Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=60; 2025-06-24T21:29:54.761744Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=65; 2025-06-24T21:29:54.761920Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=129; 2025-06-24T21:29:54.762299Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=324; 2025-06-24T21:29:54.762368Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=8; 2025-06-24T21:29:54.762429Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=9; 2025-06-24T21:29:54.762471Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=7; 2025-06-24T21:29:54.762557Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=48; 2025-06-24T21:29:54.762613Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=16; 2025-06-24T21:29:54.762718Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=64; 2025-06-24T21:29:54.762751Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=3; 2025-06-24T21:29:54.762797Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=20; 2025-06-24T21:29:54.762875Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=31; 2025-06-24T21:29:54.762916Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=18; 2025-06-24T21:29:54.762949Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=3067; 2025-06-24T21:29:54.763095Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:29:54.763247Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:29:54.763346Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:29:54.763718Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:29:54.763797Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:449;problem=Background activities cannot be started: no index at tablet; 2025-06-24T21:29:54.764169Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:29:54.764270Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:29:54.764319Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:29:54.764358Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:29:54.764429Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:29:54.764481Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:449;problem=Background activities cannot be started: no index at tablet; 2025-06-24T21:29:55.069328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004791712;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-24T21:29:55.069438Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004791712;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;fline=schema.h:38;event=sync_schema; 2025-06-24T21:29:55.089706Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;this=88923004791712;op_tx=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_this=89129165614016;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T21:29:55.089805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;this=88923004791712;op_tx=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_this=89129165614016;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:158:2180]; 2025-06-24T21:29:55.089867Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;this=88923004791712;op_tx=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750800595351;max=18446744073709551615;plan=0;src=[1:158:2180];cookie=00:0;;int_this=89129165614016;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=10; 2025-06-24T21:29:55.090214Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T21:29:55.090344Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750800595351 at tablet 9437184, mediator 0 2025-06-24T21:29:55.090401Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] execute at tablet 9437184 2025-06-24T21:29:55.090696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-24T21:29:55.090827Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000001, ss: 1} ttl settings: { Version: 1 } at tablet 9437184 2025-06-24T21:29:55.095750Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:29:55.095906Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-24T21:29:55.095990Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-24T21:29:55.101730Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-24T21:29:55.101973Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:215;event=finished_tx;tx_id=10; 2025-06-24T21:29:55.127931Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 2025-06-24T21:29:55.129549Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004830240;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750800595355;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;fline=schema.cpp:134;propose_execute=move_table;src=1;dst=1; 2025-06-24T21:29:55.129661Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004830240;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750800595355;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;fline=tx_controller.cpp:364;error=problem on start;message=Rename to existing table; 2025-06-24T21:29:55.142562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750800595355;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;this=88923004830240;op_tx=11:TX_KIND_SCHEMA;min=1750800595355;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:102:2135]; 2025-06-24T21:29:55.142650Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750800595355;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;this=88923004830240;op_tx=11:TX_KIND_SCHEMA;min=1750800595355;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:1;;fline=propose_tx.cpp:23;message=Rename to existing table;tablet_id=9437184;tx_id=11; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::SimpleOnePartition [GOOD] Test command err: Trying to start YDB, gRPC: 20622, MsgBus: 14952 2025-06-24T21:29:12.109087Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631115287000761:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:12.109774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ae4/r3tmp/tmpGMemYY/pdisk_1.dat 2025-06-24T21:29:12.614999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:12.615136Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:12.631432Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631115287000663:2079] 1750800552076699 != 1750800552076702 2025-06-24T21:29:12.682491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:12.683880Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20622, node 1 2025-06-24T21:29:12.902485Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:12.902509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:12.902515Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:12.902624Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:13.147372Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14952 TClient is connected to server localhost:14952 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:13.627544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:13.650091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:13.662576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:13.818331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.011824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.086705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:15.678922Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631128171904199:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.679081Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.986161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.015543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.051824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.084223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.137195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.216498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.292495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.351344Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631132466872164:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.351450Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.351556Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631132466872169:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.355665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:16.373753Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631132466872171:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:16.459820Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631132466872222:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:17.107779Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631115287000761:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:17.107858Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 12080, MsgBus: 24941 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ae4/r3tmp/tmpPC8Vis/pdisk_1.dat 2025-06-24T21:29:22.411931Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/mi ... d: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:37.466983Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519631202057743953:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:37.467041Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 16175, MsgBus: 25104 2025-06-24T21:29:43.733657Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519631246154857230:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:43.760415Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ae4/r3tmp/tmpqDcBc0/pdisk_1.dat 2025-06-24T21:29:43.927424Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:43.940623Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:43.940728Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:43.944608Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16175, node 4 2025-06-24T21:29:43.994646Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:43.994693Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:43.994702Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:43.994835Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25104 TClient is connected to server localhost:25104 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:44.504150Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:44.511606Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:44.516462Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:44.581886Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:44.766620Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:44.807543Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:44.899376Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:47.736750Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519631263334728006:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:47.736843Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:47.844616Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:47.881770Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:47.963721Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:48.020550Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:48.066271Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:48.117691Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:48.200918Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:48.321430Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519631267629695967:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:48.321516Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:48.321912Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519631267629695972:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:48.325576Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:48.341571Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519631267629695974:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:48.434098Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519631267629696025:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:48.734138Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519631246154857230:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:48.734210Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClass [GOOD] >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks [GOOD] >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime [GOOD] >> ReadSessionImplTest::PartitionStreamStatus [GOOD] >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] Test command err: 2025-06-24T21:29:54.220563Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:54.220596Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:54.220615Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:54.221304Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:54.222256Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:54.234509Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:54.235088Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:54.236553Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:29:54.237131Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:29:54.237370Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-24T21:29:54.237549Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:29:54.237636Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:54.237677Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-24T21:29:54.237708Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:29:54.237736Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:29:54.238990Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:54.239025Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:54.239062Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:54.239383Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:54.239848Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:54.240016Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:54.240339Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-24T21:29:54.241159Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:29:54.241354Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-24T21:29:54.241663Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-24T21:29:54.241842Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-24T21:29:54.241933Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:54.241981Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:29:54.242019Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:29:54.242180Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2025-06-24T21:29:54.242215Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:29:54.242241Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-24T21:29:54.242264Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:29:54.242366Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2025-06-24T21:29:54.242422Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-24T21:29:54.242436Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-24T21:29:54.242453Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:29:54.242515Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2025-06-24T21:29:54.242532Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-24T21:29:54.242544Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-24T21:29:54.242557Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:29:54.242624Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 2025-06-24T21:29:54.243902Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:54.243942Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:54.243999Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:54.244325Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:54.244791Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:54.244919Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:54.245204Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 100 Compressed message data size: 91 2025-06-24T21:29:54.246122Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:29:54.246293Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-24T21:29:54.247004Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-24T21:29:54.247276Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-24T21:29:54.247390Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:54.247438Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:29:54.247578Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 2). Partition stream id: 1 Getting new event 2025-06-24T21:29:54.247624Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:29:54.247648Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:29:54.247714Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [2, 3). Partition stream id: 1 Getting new event 2025-06-24T21:29:54.247761Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:29:54.247786Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:29:54.247888Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 4). Partition stream id: 1 Getting new event 2025-06-24T21:29:54.247922Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-24T21:29:54.247947Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStream ... tream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 190 SeqNo: 231 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 191 SeqNo: 232 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 192 SeqNo: 233 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 193 SeqNo: 234 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 194 SeqNo: 235 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 195 SeqNo: 236 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 196 SeqNo: 237 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 197 SeqNo: 238 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 198 SeqNo: 239 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 199 SeqNo: 240 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 200 SeqNo: 241 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:29:56.509576Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 201). Partition stream id: 1 2025-06-24T21:29:56.584699Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-24T21:29:56.584729Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-24T21:29:56.584756Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:56.591434Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:56.591974Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:56.592212Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-24T21:29:56.592640Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 1000000 Compressed message data size: 3028 Post function Getting new event 2025-06-24T21:29:56.719541Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-10) 2025-06-24T21:29:56.720736Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:56.733221Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:29:56.736169Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:29:56.737080Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-24T21:29:56.743098Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-24T21:29:56.744051Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-24T21:29:56.744986Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (7-7) 2025-06-24T21:29:56.745983Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (8-8) 2025-06-24T21:29:56.756097Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (9-9) 2025-06-24T21:29:56.757074Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (10-10) 2025-06-24T21:29:56.757175Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 10, size 10000000 bytes 2025-06-24T21:29:56.757363Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 9 SeqNo: 50 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 51 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:29:56.761552Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 11). Partition stream id: 1 2025-06-24T21:29:56.778421Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:56.778470Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:56.778511Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:56.778907Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:56.781275Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:56.781883Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:56.782274Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:56.782787Z :DEBUG: [db] [sessionid] [cluster] Requesting status for partition stream id: 1 2025-06-24T21:29:56.784221Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:56.784266Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:56.784299Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:56.784636Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:56.785303Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:56.785462Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:56.786099Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:56.786290Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:29:56.786393Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:56.786443Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:29:56.786614Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 >> Compression::WriteRAW >> TColumnShardTestSchema::DropWriteRace ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC [GOOD] Test command err: 2025-06-24T21:29:27.781215Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631180262885214:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.784656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:27.855660Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631177808776632:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.855718Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:28.219693Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:28.232120Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002353/r3tmp/tmpiCtx40/pdisk_1.dat 2025-06-24T21:29:28.834325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.835132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.835998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.836061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.874696Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:28.875322Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:28.879276Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:28.896814Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:28.896964Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:28.903747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7698, node 1 2025-06-24T21:29:29.323855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002353/r3tmp/yandexAI3M8u.tmp 2025-06-24T21:29:29.323888Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002353/r3tmp/yandexAI3M8u.tmp 2025-06-24T21:29:29.324077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002353/r3tmp/yandexAI3M8u.tmp 2025-06-24T21:29:29.324210Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:29.437051Z INFO: TTestServer started on Port 10996 GrpcPort 7698 TClient is connected to server localhost:10996 PQClient connected to localhost:7698 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:30.043859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:29:30.123020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:29:30.131386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:29:32.769847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631180262885214:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:32.769945Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:32.855897Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631177808776632:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:32.856003Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:33.017241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631201737722667:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:33.017360Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631201737722680:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:33.017412Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:33.021652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:33.058451Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631206032689987:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T21:29:33.314664Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631206032690079:2758] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:33.344570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:33.460311Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631206032690101:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:33.462796Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MmU1MjUyNDMtNjZiNzcwZS0yNmQ0ZTM2NS01MTg2ZGE2MA==, ActorId: [1:7519631201737722665:2298], ActorState: ExecuteState, TraceId: 01jyhxh8hza4c3a5r2gmmywfs9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:33.465585Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:29:33.487226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:33.651693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:29:33.961459Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyhxh9asaf38hkrvntrksz9h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWZmZDU4ZTgtZDI5MmI1NTgtZWMwMWI0Y2EtODZmOGI4NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default ... ERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037905][topic2] pipe [3:7519631299385430941:2513] disconnected; active server actors: 1 2025-06-24T21:29:55.754730Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037904] Destroy direct read session user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.754593Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037905][topic2] pipe [3:7519631299385430941:2513] client user1 disconnected session user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.754789Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037904] server disconnected, pipe [3:7519631299385430952:2520] destroyed 2025-06-24T21:29:55.754741Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037902] Destroy direct read session user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.754881Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037903] Destroy direct read session user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.754767Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037902] server disconnected, pipe [3:7519631299385430951:2519] destroyed 2025-06-24T21:29:55.754896Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037903] server disconnected, pipe [3:7519631299385430949:2517] destroyed 2025-06-24T21:29:55.754790Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037902] Destroy direct read session user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.754928Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.754803Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037902] server disconnected, pipe [3:7519631299385430950:2518] destroyed 2025-06-24T21:29:55.754944Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.754836Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.754849Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.759567Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037903] Destroy direct read session user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.759613Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037903] server disconnected, pipe [3:7519631299385430948:2516] destroyed 2025-06-24T21:29:55.759660Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_17826254507069573302_v1 2025-06-24T21:29:55.784602Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519631249973151438:2106], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:29:55.784754Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519631249973151438:2106], cacheItem# { Subscriber: { Subscriber: [4:7519631254268118757:2111] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:29:55.784832Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519631301512760243:2981], recipient# [4:7519631301512760242:2381], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:29:55.815826Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519631247845820167:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:29:55.815959Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519631247845820167:2128], cacheItem# { Subscriber: { Subscriber: [3:7519631252140787953:2444] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 29 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750800584442 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:29:55.816170Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519631299385430968:4476], recipient# [3:7519631247845819981:2085], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:29:55.868957Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2823: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [3:7519631247845820167:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:29:55.869054Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2062: FillEntry for TResolve: self# [3:7519631247845820167:2128], cacheItem# { Subscriber: { Subscriber: [3:7519631265025690462:2866] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 18 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800588075 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:29:55.869156Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2062: FillEntry for TResolve: self# [3:7519631247845820167:2128], cacheItem# { Subscriber: { Subscriber: [3:7519631265025690350:2791] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 18 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800587774 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:29:55.869432Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519631299385430971:4477], recipient# [3:7519631299385430970:2492], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:29:55.872665Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2823: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [4:7519631249973151438:2106], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : Cluster) IncFrom: 1 To: (Utf8 : Cluster) IncTo: 1 }] } 2025-06-24T21:29:55.872787Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2062: FillEntry for TResolve: self# [4:7519631249973151438:2106], cacheItem# { Subscriber: { Subscriber: [4:7519631275742955441:2220] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800588075 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:29:55.873024Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519631301512760250:2982], recipient# [4:7519631301512760248:2382], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : Cluster) IncFrom: 1 To: (Utf8 : Cluster) IncTo: 1 }] } >> TColumnShardTestSchema::DropWriteRace [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::DropWriteRace [GOOD] Test command err: 2025-06-24T21:29:58.499313Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:58.526440Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:58.526781Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:58.534570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:58.534832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:58.543332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:58.543631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:58.543800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:58.543931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:58.544078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:58.544194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:58.544313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:58.544453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:58.544572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:58.575194Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:58.575499Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:58.575561Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:58.575760Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:58.575927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:58.575997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:58.576040Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:58.576132Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:58.576200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:58.576256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:58.576296Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:58.576469Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:58.576529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:58.576564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:58.576589Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:58.576648Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:58.576681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:58.576730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:58.576750Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:58.576802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:58.576827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:58.576853Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:58.577003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:58.577038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:58.577056Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:58.577255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:58.577311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:58.577344Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:58.577427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:58.577457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:58.577476Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:58.577570Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:58.577623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:58.577648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:58.577669Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:58.578016Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=35; 2025-06-24T21:29:58.578100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=44; 2025-06-24T21:29:58.578166Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=28; 2025-06-24T21:29:58.578241Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=36; 2025-06-24T21:29:58.578314Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:29:58.578398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:29:58.578434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:29:58.578496Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... annot be started: no index at tablet; 2025-06-24T21:29:59.165493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=101;this=88923004791712;method=TTxController::StartProposeOnExecute;tx_info=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-24T21:29:59.165578Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=101;this=88923004791712;method=TTxController::StartProposeOnExecute;tx_info=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;fline=schema.h:38;event=sync_schema; 2025-06-24T21:29:59.178739Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;this=88923004791712;op_tx=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_op_tx=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_this=89129165614016;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T21:29:59.178846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;this=88923004791712;op_tx=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_op_tx=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_this=89129165614016;method=TTxController::FinishProposeOnComplete;tx_id=101;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:102:2135]; 2025-06-24T21:29:59.178908Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;this=88923004791712;op_tx=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_op_tx=101:TX_KIND_SCHEMA;min=1750800599469;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:0;;int_this=89129165614016;method=TTxController::FinishProposeOnComplete;tx_id=101;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=101; 2025-06-24T21:29:59.179367Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T21:29:59.179545Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750800599469 at tablet 9437184, mediator 0 2025-06-24T21:29:59.179613Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] execute at tablet 9437184 2025-06-24T21:29:59.180032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-24T21:29:59.180172Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000001, ss: 1} ttl settings: { Version: 1 } at tablet 9437184 2025-06-24T21:29:59.191298Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-24T21:29:59.191440Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-24T21:29:59.191525Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-24T21:29:59.198313Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-24T21:29:59.198533Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=tx_controller.cpp:215;event=finished_tx;tx_id=101; 2025-06-24T21:29:59.222905Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6120;columns=10; 2025-06-24T21:29:59.228300Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=1;last=1; 2025-06-24T21:29:59.228382Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=6120;operation_id=616679ca-514211f0-bdbca41b-d579c6e2;in_flight=1;size_in_flight=6120; 2025-06-24T21:29:59.244276Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:127:2157];write_id=1;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=8392;count=1;actions=__DEFAULT,;waiting=1;; 2025-06-24T21:29:59.246808Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6120;event=data_write_finished;writing_id=616679ca-514211f0-bdbca41b-d579c6e2; 2025-06-24T21:29:59.247104Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=78;data_size=59;sum=78;count=1; 2025-06-24T21:29:59.247293Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=230;data_size=227;sum=230;count=2;size_of_meta=136; 2025-06-24T21:29:59.247417Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=302;data_size=299;sum=302;count=1;size_of_portion=208; 2025-06-24T21:29:59.248744Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-06-24T21:29:59.248937Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=2;operation_id=1; 2025-06-24T21:29:59.265014Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-06-24T21:29:59.292898Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=103:TX_KIND_SCHEMA;min=1750800599477;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;this=88923005131744;op_tx=103:TX_KIND_SCHEMA;min=1750800599477;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_op_tx=103:TX_KIND_SCHEMA;min=1750800599477;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_this=89129165674176;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-24T21:29:59.293038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=103:TX_KIND_SCHEMA;min=1750800599477;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;this=88923005131744;op_tx=103:TX_KIND_SCHEMA;min=1750800599477;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_op_tx=103:TX_KIND_SCHEMA;min=1750800599477;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_this=89129165674176;method=TTxController::FinishProposeOnComplete;tx_id=103;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:102:2135]; 2025-06-24T21:29:59.293121Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=103:TX_KIND_SCHEMA;min=1750800599477;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;this=88923005131744;op_tx=103:TX_KIND_SCHEMA;min=1750800599477;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_op_tx=103:TX_KIND_SCHEMA;min=1750800599477;max=18446744073709551615;plan=0;src=[1:102:2135];cookie=00:2;;int_this=89129165674176;method=TTxController::FinishProposeOnComplete;tx_id=103;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=103; 2025-06-24T21:29:59.293571Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-24T21:29:59.293732Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750800599477 at tablet 9437184, mediator 0 2025-06-24T21:29:59.293794Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] execute at tablet 9437184 2025-06-24T21:29:59.294135Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: DropTable for pathId: {internal: 9438184000001, ss: 1} at tablet 9437184 2025-06-24T21:29:59.294240Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=103;fline=tx_controller.cpp:215;event=finished_tx;tx_id=103; 2025-06-24T21:29:59.306849Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] complete at tablet 9437184 2025-06-24T21:29:59.307302Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750800599478 at tablet 9437184, mediator 0 2025-06-24T21:29:59.307410Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[8] execute at tablet 9437184 2025-06-24T21:29:59.307849Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=102;fline=abstract.h:83;progress_tx_id=102;lock_id=1;broken=0; 2025-06-24T21:29:59.308396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=102;fline=tx_controller.cpp:215;event=finished_tx;tx_id=102; 2025-06-24T21:29:59.320990Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[8] complete at tablet 9437184 2025-06-24T21:29:59.321147Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=abstract.h:93;progress_tx_id=102;lock_id=1;broken=0; 2025-06-24T21:29:59.321406Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=102;commit_lock_id=1;fline=manager.cpp:177;event=remove_by_insert_id;id=2;operation_id=1; 2025-06-24T21:29:59.321473Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=102;commit_lock_id=1;fline=manager.cpp:180;event=remove_operation;operation_id=1; |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> BasicUsage::MaxByteSizeEqualZero [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage >> KqpBatchUpdate::SimpleOnePartition [GOOD] >> TColumnShardTestSchema::TTL+Reboot-Internal+FirstPkColumn [GOOD] |98.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL+Reboot-Internal+FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-24T21:29:33.609876Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:29:33.614947Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:29:33.615442Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:33.648867Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:33.649174Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:33.657346Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:33.657624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:33.657948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:33.658126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:33.658244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:33.658353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:33.658482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:33.658590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:33.658713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:33.658833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:33.658950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:33.686641Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:29:33.692160Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:33.692461Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:33.692543Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:33.692746Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:33.692936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:33.693024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:33.693068Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:33.693165Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:33.693246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:33.693293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:33.693389Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:33.693609Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:33.693679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:33.693729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:33.693764Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:33.693886Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:33.693951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:33.694009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:33.694047Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:33.694108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:33.694148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:33.694185Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:33.694457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:33.694507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:33.694547Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:33.694813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:33.694879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:33.694933Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:33.695057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:33.695108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:33.695167Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:33.695264Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:33.695340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:33.695390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:33.695435Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:33.695966Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=65; 2025-06-24T21:29:33.696065Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; 2025-06-24T21:29:33.696159Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=48; 2025-06-24T21:29:33.696254Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=42; 2025-06-24T21:29:33.696353Z node 1 : ... received;interval_idx=0;intervalId=90; 2025-06-24T21:30:01.164595Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=90; 2025-06-24T21:30:01.164642Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:30:01.164735Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:01.164772Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-24T21:30:01.164810Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:30:01.165113Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:01.165302Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=timestamp: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:01.165346Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:30:01.165470Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=1000; 2025-06-24T21:30:01.165545Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=timestamp; 2025-06-24T21:30:01.165840Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:587:2564];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=timestamp: uint64; 2025-06-24T21:30:01.166017Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:01.166131Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:01.166268Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:01.166439Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:01.166598Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:01.166721Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:01.166767Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:588:2565] finished for tablet 9437184 2025-06-24T21:30:01.167307Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:587:2564];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.014},{"events":["l_ProduceResults","f_Finish"],"t":0.015},{"events":["l_ack","l_processing","l_Finish"],"t":0.016}],"full":{"a":1750800601150780,"name":"_full_task","f":1750800601150780,"d_finished":0,"c":0,"l":1750800601166821,"d":16041},"events":[{"name":"bootstrap","f":1750800601151053,"d_finished":3023,"c":1,"l":1750800601154076,"d":3023},{"a":1750800601166418,"name":"ack","f":1750800601165086,"d_finished":1215,"c":1,"l":1750800601166301,"d":1618},{"a":1750800601166403,"name":"processing","f":1750800601154189,"d_finished":7127,"c":8,"l":1750800601166303,"d":7545},{"name":"ProduceResults","f":1750800601152779,"d_finished":2688,"c":11,"l":1750800601166751,"d":2688},{"a":1750800601166755,"name":"Finish","f":1750800601166755,"d_finished":0,"c":0,"l":1750800601166821,"d":66},{"name":"task_result","f":1750800601154220,"d_finished":5773,"c":7,"l":1750800601164898,"d":5773}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:01.167422Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:587:2564];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:30:01.167895Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:587:2564];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.014},{"events":["l_ProduceResults","f_Finish"],"t":0.015},{"events":["l_ack","l_processing","l_Finish"],"t":0.016}],"full":{"a":1750800601150780,"name":"_full_task","f":1750800601150780,"d_finished":0,"c":0,"l":1750800601167470,"d":16690},"events":[{"name":"bootstrap","f":1750800601151053,"d_finished":3023,"c":1,"l":1750800601154076,"d":3023},{"a":1750800601166418,"name":"ack","f":1750800601165086,"d_finished":1215,"c":1,"l":1750800601166301,"d":2267},{"a":1750800601166403,"name":"processing","f":1750800601154189,"d_finished":7127,"c":8,"l":1750800601166303,"d":8194},{"name":"ProduceResults","f":1750800601152779,"d_finished":2688,"c":11,"l":1750800601166751,"d":2688},{"a":1750800601166755,"name":"Finish","f":1750800601166755,"d_finished":0,"c":0,"l":1750800601167470,"d":715},{"name":"task_result","f":1750800601154220,"d_finished":5773,"c":7,"l":1750800601164898,"d":5773}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:01.167993Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:30:01.148688Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59184;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59184;selected_rows=0; 2025-06-24T21:30:01.168056Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:01.168447Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:588:2565];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> ReadSessionImplTest::SuccessfulInit [GOOD] >> ReadSessionImplTest::SuccessfulInitAndThenTimeoutCallback [GOOD] >> ReadSessionImplTest::StopsRetryAfterFailedAttempt [GOOD] >> ReadSessionImplTest::StopsRetryAfterTimeout [GOOD] >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::SimpleOnePartition [GOOD] Test command err: Trying to start YDB, gRPC: 27758, MsgBus: 31492 2025-06-24T21:29:12.889913Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631114073576093:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:12.890015Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ab9/r3tmp/tmpYq0nRf/pdisk_1.dat 2025-06-24T21:29:13.439290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:13.439455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:13.446014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:13.475266Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631114073576076:2079] 1750800552868354 != 1750800552868357 2025-06-24T21:29:13.483516Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27758, node 1 2025-06-24T21:29:13.605837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:13.605873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:13.605883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:13.606138Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:13.902260Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31492 TClient is connected to server localhost:31492 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:14.381814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:14.414226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.566722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.730697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.796926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:16.556945Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631131253446891:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.557045Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.847074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.888441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.929137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.970970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:17.032628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:17.078946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:17.118814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:17.205020Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631135548414848:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:17.205106Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:17.205323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631135548414853:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:17.209158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:17.220631Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631135548414855:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:17.309979Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631135548414906:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:17.895306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631114073576093:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:17.895357Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 63926, MsgBus: 24518 2025-06-24T21:29:24.413650Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631164439433423:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:24.415908Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... s.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519631219423650396:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:41.286371Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 13142, MsgBus: 10416 2025-06-24T21:29:48.587127Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519631269614712256:2151];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002ab9/r3tmp/tmprGCZWX/pdisk_1.dat 2025-06-24T21:29:48.709076Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:48.777472Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:48.783326Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519631269614712129:2079] 1750800588575670 != 1750800588575673 2025-06-24T21:29:48.793568Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:48.793653Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:48.796257Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13142, node 4 2025-06-24T21:29:48.882020Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:48.882045Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:48.882055Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:48.882240Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10416 TClient is connected to server localhost:10416 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:49.368761Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:49.376165Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:29:49.386844Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:49.482053Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:49.584922Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:49.683596Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:49.766345Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:52.516468Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519631286794582946:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:52.516559Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:52.586135Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:52.619056Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:52.660374Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:52.693888Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:52.728284Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:52.799431Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:52.834959Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:52.932384Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519631286794583606:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:52.932465Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:52.932540Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519631286794583611:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:52.936787Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:52.956396Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519631286794583613:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:53.023531Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519631291089550960:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:53.583217Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519631269614712256:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:53.583292Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] Test command err: 2025-06-24T21:30:02.371688Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.371757Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.371794Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:02.372203Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:02.372636Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:30:02.372711Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.373589Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.373608Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.373624Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:02.373936Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:02.374186Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-24T21:30:02.374235Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.375010Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.375033Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.375050Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:02.375437Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:30:02.375511Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.375539Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.375671Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: INTERNAL_ERROR Issues: "
: Error: Failed to establish connection to server "" ( cluster cluster). Attempts done: 1 " } 2025-06-24T21:30:02.376492Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.376512Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.376527Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:02.377070Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-24T21:30:02.377147Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.377177Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.377271Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: TIMEOUT Issues: "
: Error: Failed to establish connection to server. Attempts done: 1 " } 2025-06-24T21:30:02.378766Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-24T21:30:02.378830Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-24T21:30:02.378858Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:02.379246Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:02.379859Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:02.392053Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-24T21:30:02.392555Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:30:02.393016Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 2. Cluster: "TestCluster". Topic: "TestTopic". Partition: 2. Read offset: (NULL) 2025-06-24T21:30:02.396386Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-50) 2025-06-24T21:30:02.396671Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:02.396744Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:30:02.396771Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:30:02.396804Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-24T21:30:02.396854Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-24T21:30:02.396885Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-24T21:30:02.396911Z :DEBUG: Take Data. Partition 1. Read: {0, 6} (7-7) 2025-06-24T21:30:02.396929Z :DEBUG: Take Data. Partition 1. Read: {0, 7} (8-8) 2025-06-24T21:30:02.396970Z :DEBUG: Take Data. Partition 1. Read: {0, 8} (9-9) 2025-06-24T21:30:02.396992Z :DEBUG: Take Data. Partition 1. Read: {0, 9} (10-10) 2025-06-24T21:30:02.397022Z :DEBUG: Take Data. Partition 1. Read: {0, 10} (11-11) 2025-06-24T21:30:02.397053Z :DEBUG: Take Data. Partition 1. Read: {0, 11} (12-12) 2025-06-24T21:30:02.397077Z :DEBUG: Take Data. Partition 1. Read: {0, 12} (13-13) 2025-06-24T21:30:02.397094Z :DEBUG: Take Data. Partition 1. Read: {0, 13} (14-14) 2025-06-24T21:30:02.397110Z :DEBUG: Take Data. Partition 1. Read: {0, 14} (15-15) 2025-06-24T21:30:02.397139Z :DEBUG: Take Data. Partition 1. Read: {0, 15} (16-16) 2025-06-24T21:30:02.397178Z :DEBUG: Take Data. Partition 1. Read: {0, 16} (17-17) 2025-06-24T21:30:02.397199Z :DEBUG: Take Data. Partition 1. Read: {0, 17} (18-18) 2025-06-24T21:30:02.397216Z :DEBUG: Take Data. Partition 1. Read: {0, 18} (19-19) 2025-06-24T21:30:02.397236Z :DEBUG: Take Data. Partition 1. Read: {0, 19} (20-20) 2025-06-24T21:30:02.397268Z :DEBUG: Take Data. Partition 1. Read: {0, 20} (21-21) 2025-06-24T21:30:02.397289Z :DEBUG: Take Data. Partition 1. Read: {0, 21} (22-22) 2025-06-24T21:30:02.397315Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (23-23) 2025-06-24T21:30:02.397338Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (24-24) 2025-06-24T21:30:02.397358Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (25-25) 2025-06-24T21:30:02.397374Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (26-26) 2025-06-24T21:30:02.397397Z :DEBUG: Take Data. Partition 1. Read: {1, 4} (27-27) 2025-06-24T21:30:02.397414Z :DEBUG: Take Data. Partition 1. Read: {1, 5} (28-28) 2025-06-24T21:30:02.397431Z :DEBUG: Take Data. Partition 1. Read: {1, 6} (29-29) 2025-06-24T21:30:02.397447Z :DEBUG: Take Data. Partition 1. Read: {1, 7} (30-30) 2025-06-24T21:30:02.397499Z :DEBUG: Take Data. Partition 1. Read: {1, 8} (31-31) 2025-06-24T21:30:02.397517Z :DEBUG: Take Data. Partition 1. Read: {1, 9} (32-32) 2025-06-24T21:30:02.397597Z :DEBUG: Take Data. Partition 1. Read: {1, 10} (33-33) 2025-06-24T21:30:02.397616Z :DEBUG: Take Data. Partition 1. Read: {1, 11} (34-34) 2025-06-24T21:30:02.397633Z :DEBUG: Take Data. Partition 1. Read: {1, 12} (35-35) 2025-06-24T21:30:02.397662Z :DEBUG: Take Data. Partition 1. Read: {1, 13} (36-36) 2025-06-24T21:30:02.397690Z :DEBUG: Take Data. Partition 1. Read: {1, 14} (37-37) 2025-06-24T21:30:02.397708Z :DEBUG: Take Data. Partition 1. Read: {1, 15} (38-38) 2025-06-24T21:30:02.397726Z :DEBUG: Take Data. Partition 1. Read: {1, 16} (39-39) 2025-06-24T21:30:02.397742Z :DEBUG: Take Data. Partition 1. Read: {1, 17} (40-40) 2025-06-24T21:30:02.397761Z :DEBUG: Take Data. Partition 1. Read: {1, 18} (41-41) 2025-06-24T21:30:02.397779Z :DEBUG: Take Data. Partition 1. Read: {1, 19} (42-42) 2025-06-24T21:30:02.397795Z :DEBUG: Take Data. Partition 1. Read: {1, 20} (43-43) 2025-06-24T21:30:02.397811Z :DEBUG: Take Data. Partition 1. Read: {1, 21} (44-44) 2025-06-24T21:30:02.397828Z :DEBUG: Take Data. Partition 1. Read: {1, 22} (45-45) 2025-06-24T21:30:02.397845Z :DEBUG: Take Data. Partition 1. Read: {1, 23} (46-46) 2025-06-24T21:30:02.397879Z :DEBUG: Take Data. Partition 1. Read: {1, 24} (47-47) 2025-06-24T21:30:02.397896Z :DEBUG: Take Data. Partition 1. Read: {1, 25} (48-48) 2025-06-24T21:30:02.397913Z :DEBUG: Take Data. Partition 1. Read: {1, 26} (49-49) 2025-06-24T21:30:02.397928Z :DEBUG: Take Data. Partition 1. Read: {1, 27} (50-50) 2025-06-24T21:30:02.397979Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-24T21:30:02.400181Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 2 (51-100) 2025-06-24T21:30:02.400463Z :DEBUG: Take Data. Partition 2. Read: {0, 0} (51-51) 2025-06-24T21:30:02.400513Z :DEBUG: Take Data. Partition 2. Read: {0, 1} (52-52) 2025-06-24T21:30:02.400558Z :DEBUG: Take Data. Partition 2. Read: {0, 2} (53-53) 2025-06-24T21:30:02.400576Z :DEBUG: Take Data. Partition 2. Read: {0, 3} (54-54) 2025-06-24T21:30:02.400616Z :DEBUG: Take Data. Partition 2. Read: {0, 4} (55-55) 2025-06-24T21:30:02.400635Z :DEBUG: Take Data. Partition 2. Read: {0, 5} (56-56) 2025-06-24T21:30:02.400653Z :DEBUG: Take Data. Partition 2. Read: {0, 6} (57-57) 2025-06-24T21:30:02.400672Z :DEBUG: Take Data. Partition 2. Read: {0, 7} (58-58) 2025-06-24T21:30:02.400704Z :DEBUG: Take Data. Partition 2. Read: {0, 8} (59-59) 2025-06-24T21:30:02.400732Z :DEBUG: Take Data. Partition 2. Read: {0, 9} (60-60) 2025-06-24T21:30:02.400771Z :DEBUG: Take Data. Partition 2. Read: {0, 10} (61-61) 2025-06-24T21:30:02.400790Z :DEBUG: Take Data. Partition 2. Read: {0, 11} (62-62) 2025-06-24T21:30:02.400827Z :DEBUG: Take Data. Partition 2. Read: {0, 12} (63-63) 2025-06-24T21:30:02.400844Z :DEBUG: Take Data. Partition 2. Read: {0, 13} (64-64) 2025-06-24T21:30:02.400862Z :DEBUG: Take Data. Partition 2. Read: {0, 14} (65-65) 2025-06-24T21:30:02.400918Z :DEBUG: Take Data. Partition 2. Read: {0, 15} (66-66) 2025-06-24T21:30:02.400983Z :DEBUG: Take Data. Partition 2. Read: {0, 16} (67-67) 2025-06-24T21:30:02.401004Z :DEBUG: Take Data. Partition 2. Read: {0, 17} (68-68) 2025-06-24T21:30:02.401040Z :DEBUG: Take Data. Partition 2. Read: {0, 18} (69-69) 2025-06-24T21:30:02.401056Z :DEBUG: Take Data. Partition 2. Read: {0, 19} (70-70) 2025-06-24T21:30:02.401073Z :DEBUG: Take Data. Partition 2. Read: {0, 20} (71-71) 2025-06-24T21:30:02.401087Z :DEBUG: Take Data. Partition 2. Read: {0, 21} (72-72) 2025-06-24T21:30:02.401105Z :DEBUG: Take Data. Partition 2. Read: {1, 0} (73-73) 2025-06-24T21:30:02.401136Z :DEBUG: Take Data. Partition 2. Read: {1, 1} (74-74) 2025-06-24T21:30:02.401163Z :DEBUG: Take Data. Partition 2. Read: {1, 2} (75-75) 2025-06-24T21:30:02.401188Z :DEBUG: Take Data. Partition 2. Read: {1, 3} (76-76) 2025-06-24T21:30:02.401207Z :DEBUG: Take Data. Partition 2. Read: {1, 4} (77-77) 2025-06-24T21:30:02.401265Z :DEBUG: Take Data. Partition 2. Read: {1, 5} (78-78) 2025-06-24T21:30:02.401290Z :DEBUG: Take Data. Partition 2. Read: {1, 6} (79-79) 2025-06-24T21:30:02.401307Z :DEBUG: Take Data. Partition 2. Read: {1, 7} (80-80) 2025-06-24T21:30:02.401326Z :DEBUG: Take Data. Partition 2. Read: {1, 8} (81-81) 2025-06-24T21:30:02.401344Z :DEBUG: Take Data. Partition 2. Read: {1, 9} (82-82) 2025-06-24T21:30:02.401430Z :DEBUG: Take Data. Partition 2. Read: {1, 10} (83-83) 2025-06-24T21:30:02.401457Z :DEBUG: Take Data. Partition 2. Read: {1, 11} (84-84) 2025-06-24T21:30:02.401489Z :DEBUG: Take Data. Partition 2. Read: {1, 12} (85-85) 2025-06-24T21:30:02.401507Z :DEBUG: Take Data. Partition 2. Read: {1, 13} (86-86) 2025-06-24T21:30:02.401525Z :DEBUG: Take Data. Partition 2. Read: {1, 14} (87-87) 2025-06-24T21:30:02.401543Z :DEBUG: Take Data. Partition 2. Read: {1, 15} (88-88) 2025-06-24T21:30:02.401565Z :DEBUG: Take Data. Partition 2. Read: {1, 16} (89-89) 2025-06-24T21:30:02.401584Z :DEBUG: Take Data. Partition 2. Read: {1, 17} (90-90) 2025-06-24T21:30:02.401600Z :DEBUG: Take Data. Partition 2. Read: {1, 18} (91-91) 2025-06-24T21:30:02.401618Z :DEBUG: Take Data. Partition 2. Read: {1, 19} (92-92) 2025-06-24T21:30:02.401638Z :DEBUG: Take Data. Partition 2. Read: {1, 20} (93-93) 2025-06-24T21:30:02.401654Z :DEBUG: Take Data. Partition 2. Read: {1, 21} (94-94) 2025-06-24T21:30:02.401674Z :DEBUG: Take Data. Partition 2. Read: {1, 22} (95-95) 2025-06-24T21:30:02.401691Z :DEBUG: Take Data. Partition 2. Read: {1, 23} (96-96) 2025-06-24T21:30:02.401774Z :DEBUG: Take Data. Partition 2. Read: {1, 24} (97-97) 2025-06-24T21:30:02.401795Z :DEBUG: Take Data. Partition 2. Read: {1, 25} (98-98) 2025-06-24T21:30:02.401822Z :DEBUG: Take Data. Partition 2. Read: {1, 26} (99-99) 2025-06-24T21:30:02.401843Z :DEBUG: Take Data. Partition 2. Read: {1, 27} (100-100) 2025-06-24T21:30:02.401916Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-24T21:30:02.402093Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-24T21:30:02.403785Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.403813Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.403858Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:02.404252Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:02.404752Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:02.404938Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.405441Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:30:02.506658Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.506948Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-24T21:30:02.507022Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:02.507063Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-24T21:30:02.507136Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-24T21:30:02.710308Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-24T21:30:02.811140Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-24T21:30:02.811329Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-24T21:30:02.811500Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-24T21:30:02.812631Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.812657Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.812679Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:02.812981Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:02.813445Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:02.813608Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.815647Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:30:02.916722Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:02.916933Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-24T21:30:02.917045Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:02.917102Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-24T21:30:02.917195Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-24T21:30:02.917314Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-24T21:30:02.917478Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-24T21:30:02.917595Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-24T21:30:02.919271Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> ReadSessionImplTest::ForcefulDestroyPartitionStream [GOOD] >> ReadSessionImplTest::DestroyPartitionStreamRequest [GOOD] >> ReadSessionImplTest::DecompressZstdEmptyMessage [GOOD] >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit [GOOD] >> ReadSessionImplTest::PacksBatches_BatchesEqualToServerBatches [GOOD] >> ReadSessionImplTest::HoleBetweenOffsets [GOOD] >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_10 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] Test command err: 2025-06-24T21:30:04.186652Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.186728Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.186759Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:04.187293Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:04.188073Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:04.199626Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.200192Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:30:04.201722Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.201741Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.201763Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:04.202167Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:04.202812Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:04.203018Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.203247Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:30:04.203646Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-24T21:30:04.204707Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.204733Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.204758Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:04.205049Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:04.205852Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:04.206023Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.206301Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:30:04.207053Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.207368Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:30:04.207495Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:04.207542Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-24T21:30:04.208731Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.208762Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.208789Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:04.209157Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:04.209592Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:04.209732Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.210047Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 11 Compressed message data size: 31 2025-06-24T21:30:04.211179Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:30:04.211497Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-24T21:30:04.211866Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-24T21:30:04.212096Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-24T21:30:04.212221Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:04.212265Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:30:04.212316Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:30:04.212465Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2025-06-24T21:30:04.212502Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:30:04.212528Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-24T21:30:04.212547Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:30:04.212688Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2025-06-24T21:30:04.212774Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-24T21:30:04.212793Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-24T21:30:04.212811Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-24T21:30:04.212903Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2025-06-24T21:30:04.212946Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-24T21:30:04.212973Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-24T21:30:04.212996Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:30:04.213108Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 2025-06-24T21:30:04.214601Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.214644Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.214668Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:04.215079Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:04.215557Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:04.215698Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.215928Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-24T21:30:04.217046Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:30:04.217312Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-24T21:30:04.217702Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-24T21:30:04.217963Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-24T21:30:04.218122Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:04.218178Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:30:04.218215Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:30:04.218260Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-24T21:30:04.218304Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:30:04.218529Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 5). Partition stream id: 1 Getting new event 2025-06-24T21:30:04.218621Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-24T21:30:04.218642Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-24T21:30:04.218665Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-24T21:30:04.218682Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-24T21:30:04.218705Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-24T21:30:04.218880Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 9). Partition stream id: 1 2025-06-24T21:30:04.220164Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.220200Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.220233Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:04.220634Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:04.221206Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:04.221430Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:04.221745Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:30:04.222916Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:30:04.223717Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:30:04.224081Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (10-11) 2025-06-24T21:30:04.224265Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-24T21:30:04.224394Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:04.224440Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:30:04.224464Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (10-10) 2025-06-24T21:30:04.224483Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (11-11) 2025-06-24T21:30:04.224536Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes 2025-06-24T21:30:04.224570Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes got data event: DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 11 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-24T21:30:04.224714Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Got commit req { cookies { assign_id: 1 partition_cookie: 1 } } 2025-06-24T21:30:04.224834Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [10, 12). Partition stream id: 1 Got commit req { cookies { assign_id: 1 partition_cookie: 2 } } >> TBlobStorageProxyTest::TestProxyPutSingleTimeout >> TBlobStorageProxyTest::TestCollectGarbagePersistence >> TBlobStorageProxyTest::TestProxySimpleDiscover >> TBlobStorageProxyTest::TestPersistence ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_10 [GOOD] Test command err: 2025-06-24T21:29:27.656520Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631179399440833:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.656578Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:27.726033Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631179680243637:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.763480Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:28.098505Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:28.103969Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002322/r3tmp/tmpiG0p5E/pdisk_1.dat 2025-06-24T21:29:28.546607Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:28.566526Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.566635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.568253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.568326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.584764Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:28.585363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:28.585901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:28.676674Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 19432, node 1 2025-06-24T21:29:28.737863Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:29.016414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/002322/r3tmp/yandexrciPRT.tmp 2025-06-24T21:29:29.016447Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/002322/r3tmp/yandexrciPRT.tmp 2025-06-24T21:29:29.019603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/002322/r3tmp/yandexrciPRT.tmp 2025-06-24T21:29:29.019734Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:29.383360Z INFO: TTestServer started on Port 24676 GrpcPort 19432 TClient is connected to server localhost:24676 PQClient connected to localhost:19432 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:30.009800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:29:30.117891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:29:32.659278Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631179399440833:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:32.659361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:32.734312Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631179680243637:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:32.734457Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:32.811201Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631200874278383:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.816828Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631200874278373:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.816983Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.817940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:32.830901Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631200874278419:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.831417Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.857210Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631200874278387:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-24T21:29:33.182141Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631200874278467:2762] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:33.230477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:33.337808Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631205169245774:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:33.338827Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWEyM2UxMjAtZmM4ZGRkYTUtNGUxMTUyZjMtZTRlZTZhNzQ=, ActorId: [1:7519631200874278370:2299], ActorState: ExecuteState, TraceId: 01jyhxh8bq466ggwfp8a09w64e, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:33.341438Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:29:33.359980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:33.551928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, wei ... ikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root/PQ/Config/V2/Cluster PathId: Partial: 0 } 2025-06-24T21:30:03.652703Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519631253021088444:2128], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root/PQ/Config/V2/Cluster PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [3:7519631270200958631:2790] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800588551 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, by pathId# nullptr 2025-06-24T21:30:03.652739Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519631253021088444:2128], cacheItem# { Subscriber: { Subscriber: [3:7519631270200958631:2790] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800588551 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: Root/PQ/Config/V2/Cluster TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 28 IsSync: true Partial: 0 } 2025-06-24T21:30:03.652774Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519631253021088444:2128], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root/PQ/Config/V2/Versions PathId: Partial: 0 } 2025-06-24T21:30:03.652805Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519631253021088444:2128], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root/PQ/Config/V2/Versions PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [3:7519631270200958753:2876] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800588817 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, by pathId# nullptr 2025-06-24T21:30:03.652836Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519631253021088444:2128], cacheItem# { Subscriber: { Subscriber: [3:7519631270200958753:2876] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800588817 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: Root/PQ/Config/V2/Versions TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 28 IsSync: true Partial: 0 } 2025-06-24T21:30:03.653009Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519631334625471408:4902], recipient# [3:7519631334625471404:2868], result# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:30:03.653178Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519631334625471409:4903], recipient# [3:7519631334625471406:2870], result# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:30:03.653356Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519631253021088444:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:10:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:30:03.653415Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519631253021088444:2128], cacheItem# { Subscriber: { Subscriber: [3:7519631270200958631:2790] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800588551 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: TableId: [72057594046644480:10:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:30:03.653492Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519631253021088444:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:30:03.653538Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519631253021088444:2128], cacheItem# { Subscriber: { Subscriber: [3:7519631270200958753:2876] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800588817 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:30:03.653603Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519631334625471412:4904], recipient# [3:7519631253021088370:2190], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:30:03.653650Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519631334625471413:4905], recipient# [3:7519631253021088370:2190], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:30:03.653725Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519631253021088444:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:30:03.653798Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519631253021088444:2128], cacheItem# { Subscriber: { Subscriber: [3:7519631257316056229:2444] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750800585345 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:30:03.653898Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519631334625471414:4906], recipient# [3:7519631253021088370:2190], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap-useOltpSink >> TBlobStorageProxyTest::TestProxyPutSingleTimeout [GOOD] >> TBlobStorageProxyTest::TestProxyRestoreOnDiscoverBlock >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions [GOOD] >> PersQueueSdkReadSessionTest::SettingsValidation >> TBlobStorageProxyTest::TestProxySimpleDiscover [GOOD] >> TBlobStorageProxyTest::TestProxySimpleDiscoverMaxi >> TBlobStorageProxyTest::TestPersistence [GOOD] >> TBlobStorageProxyTest::TestPartialGetStripe >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDSEveryQueryWorks-ColumnStore 2025-06-24 21:30:06,281 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:30:06,478 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 894669 47.5M 46.4M 24.0M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/0039d2/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk173/testing_out_stuff/test_tool.args 894763 2.2G 2.2G 1.7G └─ ydb-core-kqp-ut-join --trace-path-append /home/runner/.ya/build/build_root/2btm/0039d2/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk173/ytest.report Test command err: Trying to start YDB, gRPC: 3401, MsgBus: 29238 2025-06-24T21:20:08.870606Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519628776767913946:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:08.871757Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0039d2/r3tmp/tmpdGFheu/pdisk_1.dat 2025-06-24T21:20:09.747276Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519628776767913834:2079] 1750800008790113 != 1750800008790116 2025-06-24T21:20:09.764884Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:20:09.765464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:20:09.765584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:20:09.770216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3401, node 1 2025-06-24T21:20:09.931825Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:20:10.063771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:20:10.063792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:20:10.063802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:20:10.063910Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29238 TClient is connected to server localhost:29238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:20:11.115171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:20:13.860759Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519628776767913946:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:20:13.860821Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:20:14.013136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628802537718259:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:14.013242Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:14.016008Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519628802537718271:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:20:14.025486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:20:14.043022Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519628802537718273:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:20:14.138794Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519628802537718326:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:20:15.307938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.445959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.492378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.542189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.623229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.700501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.784893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.842230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.927120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:15.994760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:16.087209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:16.125341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:16.179278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:20:16.225069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/co ... ld_date_sk = d_date_sk\n and d_week_seq = $week_seq_1999\n group by item.i_brand_id,item.i_class_id,item.i_category_id\n having sum(ss_quantity*ss_list_price) > $avg_sales) last_year\n where this_year.i_brand_id= last_year.i_brand_id\n and this_year.i_class_id = last_year.i_class_id\n and this_year.i_category_id = last_year.i_category_id\n order by ty_channel, ty_brand, ty_class, ty_category\n limit 100;\n\n-- end query 1 in stream 0 using template query14.tpl\n", parameters: 0b 2025-06-24T21:27:33.825723Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhxd70xfe7bcvrbvdnkjaex", SessionId: ydb://session/3?node_id=1&id=Mjk5Njg0MGQtNDI0MTZkN2QtM2YwOTQ5NzktYmNmMzdiMw==, Slow query, duration: 13.475512s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n$orders_with_several_warehouses = (\n select cs_order_number\n from `/Root/test/ds/catalog_sales` as catalog_sales\n group by cs_order_number\n having count(distinct cs_warehouse_sk) > 1\n);\n\n-- start query 1 in stream 0 using template query16.tpl and seed 171719422\nselect\n count(distinct cs1.cs_order_number) as `order count`\n ,sum(cs_ext_ship_cost) as `total shipping cost`\n ,sum(cs_net_profit) as `total net profit`\nfrom\n `/Root/test/ds/catalog_sales` cs1\n cross join `/Root/test/ds/date_dim` as date_dim\n cross join `/Root/test/ds/customer_address` as customer_address\n cross join `/Root/test/ds/call_center` as call_center\n left semi join $orders_with_several_warehouses cs2 on cs1.cs_order_number = cs2.cs_order_number\n left only join `/Root/test/ds/catalog_returns` cr1 on cs1.cs_order_number = cr1.cr_order_number\nwhere\n cast(d_date as date) between cast('2002-2-01' as date) and\n (cast('2002-2-01' as date) + DateTime::IntervalFromDays(60))\nand cs1.cs_ship_date_sk = d_date_sk\nand cs1.cs_ship_addr_sk = ca_address_sk\nand ca_state = 'GA'\nand cs1.cs_call_center_sk = cc_call_center_sk\nand cc_county in ('Williamson County','Williamson County','Williamson County','Williamson County',\n 'Williamson County'\n)\norder by `order count`\nlimit 100;\n\n-- end query 1 in stream 0 using template query16.tpl\n", parameters: 0b 2025-06-24T21:27:50.906814Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhxdm840kqfdek1zbdkj7sp", SessionId: ydb://session/3?node_id=1&id=Mjk5Njg0MGQtNDI0MTZkN2QtM2YwOTQ5NzktYmNmMzdiMw==, Slow query, duration: 17.013565s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query17.tpl and seed 1819994127\n$nantonull = ($n) -> {\n return case when Math::IsNaN($n)\n then null\n else $n\n end;\n};\n\nselect item.i_item_id\n ,item.i_item_desc\n ,store.s_state\n ,count(ss_quantity) as store_sales_quantitycount\n ,avg(ss_quantity) as store_sales_quantityave\n ,$nantonull(stddev_samp(ss_quantity)) as store_sales_quantitystdev\n ,$nantonull(stddev_samp(ss_quantity)/avg(ss_quantity)) as store_sales_quantitycov\n ,count(sr_return_quantity) as store_returns_quantitycount\n ,avg(sr_return_quantity) as store_returns_quantityave\n ,$nantonull(stddev_samp(sr_return_quantity)) as store_returns_quantitystdev\n ,$nantonull(stddev_samp(sr_return_quantity)/avg(sr_return_quantity)) as store_returns_quantitycov\n ,count(cs_quantity) as catalog_sales_quantitycount ,avg(cs_quantity) as catalog_sales_quantityave\n ,$nantonull(stddev_samp(cs_quantity)) as catalog_sales_quantitystdev\n ,$nantonull(stddev_samp(cs_quantity)/avg(cs_quantity)) as catalog_sales_quantitycov\n from `/Root/test/ds/store_sales` as store_sales\n cross join `/Root/test/ds/store_returns` as store_returns\n cross join `/Root/test/ds/catalog_sales` as catalog_sales\n cross join `/Root/test/ds/date_dim` d1\n cross join `/Root/test/ds/date_dim` d2\n cross join `/Root/test/ds/date_dim` d3\n cross join `/Root/test/ds/store` as store\n cross join `/Root/test/ds/item` as item\n where d1.d_quarter_name = '2001Q1'\n and d1.d_date_sk = ss_sold_date_sk\n and i_item_sk = ss_item_sk\n and s_store_sk = ss_store_sk\n and ss_customer_sk = sr_customer_sk\n and ss_item_sk = sr_item_sk\n and ss_ticket_number = sr_ticket_number\n and sr_returned_date_sk = d2.d_date_sk\n and d2.d_quarter_name in ('2001Q1','2001Q2','2001Q3')\n and sr_customer_sk = cs_bill_customer_sk\n and sr_item_sk = cs_item_sk\n and cs_sold_date_sk = d3.d_date_sk\n and d3.d_quarter_name in ('2001Q1','2001Q2','2001Q3')\n group by item.i_item_id\n ,item.i_item_desc\n ,store.s_state\n order by item.i_item_id\n ,item.i_item_desc\n ,store.s_state\nlimit 100;\n\n-- end query 1 in stream 0 using template query17.tpl\n", parameters: 0b 2025-06-24T21:28:39.485927Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhxe4wb42pj4be5snfcsd0w", SessionId: ydb://session/3?node_id=1&id=Mjk5Njg0MGQtNDI0MTZkN2QtM2YwOTQ5NzktYmNmMzdiMw==, Slow query, duration: 48.561876s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- TODO this commit should be reverted upon proper fix for https://github.com/ydb-platform/ydb/issues/7565\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query18.tpl and seed 1978355063\nselect item.i_item_id i_item_id,\n customer_address.ca_country ca_country,\n customer_address.ca_state ca_state,\n customer_address.ca_county ca_county,\n avg( cast(cs_quantity as float)) agg1,\n avg( cast(cs_list_price as float)) agg2,\n avg( cast(cs_coupon_amt as float)) agg3,\n avg( cast(cs_sales_price as float)) agg4,\n avg( cast(cs_net_profit as float)) agg5,\n avg( cast(c_birth_year as float)) agg6,\n avg( cast(cd1.cd_dep_count as float)) agg7\n from `/Root/test/ds/catalog_sales` as catalog_sales\n cross join `/Root/test/ds/customer_demographics` cd1\n cross join `/Root/test/ds/date_dim` as date_dim\n cross join `/Root/test/ds/customer` as customer\n cross join `/Root/test/ds/customer_demographics` cd2\n cross join `/Root/test/ds/customer_address` as customer_address\n cross join `/Root/test/ds/item` as item\n where cs_sold_date_sk = d_date_sk and\n cs_item_sk = i_item_sk and\n cs_bill_cdemo_sk = cd1.cd_demo_sk and\n cs_bill_customer_sk = c_customer_sk and\n cd1.cd_gender = 'F' and\n cd1.cd_education_status = 'Unknown' and\n c_current_cdemo_sk = cd2.cd_demo_sk and\n c_current_addr_sk = ca_address_sk and\n c_birth_month in (1,6,8,9,12,2) and\n d_year = 1998 and\n ca_state in ('MS','IN','ND'\n ,'OK','NM','VA','MS')\n group by rollup (item.i_item_id, customer_address.ca_country, customer_address.ca_state, customer_address.ca_county)\n order by ca_country,\n ca_state,\n ca_county,\n\ti_item_id, agg6\n limit 100;\n\n-- end query 1 in stream 0 using template query18.tpl\n", parameters: 0b 2025-06-24T21:29:15.242957Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhxg8s2fpc6jgph91nz3mhw", SessionId: ydb://session/3?node_id=1&id=Mjk5Njg0MGQtNDI0MTZkN2QtM2YwOTQ5NzktYmNmMzdiMw==, Slow query, duration: 14.791333s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query22.tpl and seed 1819994127\nselect item.i_product_name\n ,item.i_brand\n ,item.i_class\n ,item.i_category\n ,avg(inv_quantity_on_hand) qoh\n from `/Root/test/ds/inventory` as inventory\n cross join `/Root/test/ds/date_dim` as date_dim\n cross join `/Root/test/ds/item` as item\n where inv_date_sk=d_date_sk\n and inv_item_sk=i_item_sk\n and d_month_seq between 1200 and 1200 + 11\n group by rollup(item.i_product_name\n ,item.i_brand\n ,item.i_class\n ,item.i_category)\norder by qoh, item.i_product_name, item.i_brand, item.i_class, item.i_category\nlimit 100;\n\n-- end query 1 in stream 0 using template query22.tpl\n", parameters: 0b Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/0039d2/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk173/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/0039d2/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk173/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> KqpQueryService::DdlColumnTable [GOOD] >> KqpQueryService::DdlCache >> TBlobStorageProxyTest::TestPartialGetStripe [GOOD] >> TBlobStorageProxyTest::TestInFlightPuts |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPartialGetStripe [GOOD] >> Compression::WriteRAW [GOOD] >> Compression::WriteGZIP >> KqpBatchUpdate::Large_3 [GOOD] >> TColumnShardTestSchema::TTL-Reboot+Internal-FirstPkColumn [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage [GOOD] >> BasicUsage::TWriteSession_AutoBatching [GOOD] >> BasicUsage::TWriteSession_BatchingProducesContinueTokens [GOOD] >> BasicUsage::BrokenCredentialsProvider >> TBlobStorageProxyTest::TestBlockPersistence ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Large_3 [GOOD] Test command err: Trying to start YDB, gRPC: 14881, MsgBus: 14374 2025-06-24T21:29:14.292668Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631123522119459:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:14.310906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002a98/r3tmp/tmpg7iQjz/pdisk_1.dat 2025-06-24T21:29:14.762634Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:14.765798Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631123522119364:2079] 1750800554260912 != 1750800554260915 2025-06-24T21:29:14.779883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:14.780000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:14.781908Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14881, node 1 2025-06-24T21:29:14.866959Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:14.866979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:14.866988Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:14.867084Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14374 2025-06-24T21:29:15.289574Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14374 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:15.517742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:15.543812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:15.677114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:15.855182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:15.927916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:17.718412Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631136407022885:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:17.718511Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.119444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.161690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.244851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.291492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.369931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.433291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.484999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:18.550976Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631140701990845:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.551086Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631140701990850:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.551087Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:18.554603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:18.566975Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631140701990852:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:29:18.644882Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631140701990903:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:19.266736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631123522119459:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:19.266808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:19.722079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:29.708260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot ... ble_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631242856176374:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:42.896941Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002a98/r3tmp/tmpUiMqSB/pdisk_1.dat 2025-06-24T21:29:43.079904Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:43.080004Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:43.083131Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:43.084507Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631242856176353:2079] 1750800582894698 != 1750800582894701 2025-06-24T21:29:43.100682Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27444, node 2 2025-06-24T21:29:43.163748Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:43.163777Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:43.163790Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:43.163957Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16600 TClient is connected to server localhost:16600 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:43.768761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:43.786071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:43.873430Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:43.980529Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:44.054478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:44.133273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:46.587318Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631260036047174:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:46.587421Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:46.661599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:46.703960Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:46.747997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:46.793761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:46.837588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:46.884411Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:46.941623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:47.057931Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631264331015129:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:47.058295Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:47.058698Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631264331015134:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:47.069424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:47.083796Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631264331015136:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:47.157211Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631264331015187:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:47.896861Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631242856176374:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:47.896931Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:48.561195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:58.049248Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:29:58.049279Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded >> TBlobStorageProxyTest::TestInFlightPuts [GOOD] >> TBlobStorageProxyTest::TestHugeCollectGarbage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot+Internal-FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-24T21:29:30.493547Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:29:30.498487Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:29:30.498993Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:30.534228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:30.534526Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:30.545201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:30.545495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:30.545741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:30.545918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:30.546044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:30.546169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:30.546302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:30.546419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:30.546533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:30.546672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:30.546806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:30.574240Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:29:30.581306Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:30.581606Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:30.581662Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:30.581842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:30.582043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:30.582129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:30.582174Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:30.582268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:30.582330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:30.582377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:30.582441Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:30.582642Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:30.582714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:30.582776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:30.582807Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:30.582901Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:30.582962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:30.583044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:30.583079Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:30.583169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:30.583210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:30.583244Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:30.583475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:30.583529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:30.583564Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:30.583833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:30.583893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:30.583930Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:30.584048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:30.584110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:30.584147Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:30.584219Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:30.584288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:30.584334Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:30.584377Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:30.584864Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=63; 2025-06-24T21:29:30.584959Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; 2025-06-24T21:29:30.585034Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-24T21:29:30.585127Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=36; 2025-06-24T21:29:30.585220Z node 1 : ... 06-24T21:30:12.853580Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=295; 2025-06-24T21:30:12.853639Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:30:12.853739Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:12.853773Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-24T21:30:12.853813Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:30:12.854227Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:12.854422Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:12.854482Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:30:12.854715Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;);columns=1;rows=1000; 2025-06-24T21:30:12.854801Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=saved_at; 2025-06-24T21:30:12.855212Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:682:2685];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=saved_at: uint64; 2025-06-24T21:30:12.855409Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:12.855550Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:12.855693Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:12.855945Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:12.856117Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:12.856274Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:12.856323Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:683:2686] finished for tablet 9437184 2025-06-24T21:30:12.856945Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:682:2685];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["l_task_result"],"t":0.016},{"events":["f_ack"],"t":0.017},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.019}],"full":{"a":1750800612837025,"name":"_full_task","f":1750800612837025,"d_finished":0,"c":0,"l":1750800612856392,"d":19367},"events":[{"name":"bootstrap","f":1750800612837334,"d_finished":3881,"c":1,"l":1750800612841215,"d":3881},{"a":1750800612855917,"name":"ack","f":1750800612854186,"d_finished":1537,"c":1,"l":1750800612855723,"d":2012},{"a":1750800612855890,"name":"processing","f":1750800612842740,"d_finished":8554,"c":9,"l":1750800612855728,"d":9056},{"name":"ProduceResults","f":1750800612839666,"d_finished":3285,"c":12,"l":1750800612856306,"d":3285},{"a":1750800612856310,"name":"Finish","f":1750800612856310,"d_finished":0,"c":0,"l":1750800612856392,"d":82},{"name":"task_result","f":1750800612842770,"d_finished":6826,"c":8,"l":1750800612853931,"d":6826}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:12.857048Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:682:2685];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:30:12.857620Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:682:2685];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["l_task_result"],"t":0.016},{"events":["f_ack"],"t":0.017},{"events":["l_ProduceResults","f_Finish"],"t":0.019},{"events":["l_ack","l_processing","l_Finish"],"t":0.02}],"full":{"a":1750800612837025,"name":"_full_task","f":1750800612837025,"d_finished":0,"c":0,"l":1750800612857136,"d":20111},"events":[{"name":"bootstrap","f":1750800612837334,"d_finished":3881,"c":1,"l":1750800612841215,"d":3881},{"a":1750800612855917,"name":"ack","f":1750800612854186,"d_finished":1537,"c":1,"l":1750800612855723,"d":2756},{"a":1750800612855890,"name":"processing","f":1750800612842740,"d_finished":8554,"c":9,"l":1750800612855728,"d":9800},{"name":"ProduceResults","f":1750800612839666,"d_finished":3285,"c":12,"l":1750800612856306,"d":3285},{"a":1750800612856310,"name":"Finish","f":1750800612856310,"d_finished":0,"c":0,"l":1750800612857136,"d":826},{"name":"task_result","f":1750800612842770,"d_finished":6826,"c":8,"l":1750800612853931,"d":6826}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:12.857718Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:30:12.833891Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59288;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59288;selected_rows=0; 2025-06-24T21:30:12.857777Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:12.858252Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;; >> TBlobStorageProxyTest::TestProxySimpleDiscoverMaxi [GOOD] >> TAsyncIndexTests::SplitMainWithReboots[PipeResets] [GOOD] |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxySimpleDiscoverMaxi [GOOD] >> TBlobStorageProxyTest::TestCollectGarbagePersistence [GOOD] >> TBlobStorageProxyTest::TestCollectGarbageAfterLargeData >> TBlobStorageProxyTest::TestProxyLongTailDiscover >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap-useOltpSink [GOOD] >> TBlobStorageProxyTest::TestProxyRestoreOnDiscoverBlock [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitMainWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:29:26.816787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:29:26.816888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:29:26.816926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:29:26.816960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:29:26.817010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:29:26.817062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:29:26.817129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:29:26.817209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:29:26.817983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:29:26.818370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:29:26.903799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:29:26.903864Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:26.904663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:29:26.930715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:29:26.931387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:29:26.931571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:29:26.942843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:29:26.943164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:29:26.943893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:26.944187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:29:26.947685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:26.947919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:29:26.949286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:29:26.949362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:26.949601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:29:26.949672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:29:26.949718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:29:26.949868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:29:26.958636Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:29:27.128516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:29:27.128766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:27.129001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:29:27.129059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:29:27.129329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:29:27.129456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:29:27.136393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:27.136637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:29:27.136903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:27.136968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:29:27.137038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:29:27.137079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:29:27.144427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:27.144520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:29:27.144579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:29:27.148206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:27.148269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:27.148314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:29:27.148372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:29:27.152056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:29:27.154524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:29:27.154731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:29:27.155874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:27.156037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... ompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:15.072548Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409546][24:793:2631] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T21:30:15.072682Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][24:738:2631] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:30:15.072891Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409546][24:793:2631] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750800615042227 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750800615042227 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750800615042227 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:30:15.076563Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409546][24:793:2631] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-24T21:30:15.076697Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][24:738:2631] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:30:15.250307Z node 24 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:30:15.250652Z node 24 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 397us result status StatusSuccess 2025-06-24T21:30:15.251551Z node 24 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TBlobStorageProxyTest::TestHugeCollectGarbage [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] >> ReadSessionImplTest::DataReceivedCallback ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyRestoreOnDiscoverBlock [GOOD] Test command err: 2025-06-24T21:30:08.110980Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f49/r3tmp/tmpdy410w//vdisk_bad_1/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 2 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 2 2025-06-24T21:30:08.121926Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 2 VDISK[0:_:0:1:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T21:30:11.028401Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f49/r3tmp/tmpdy410w//vdisk_bad_2/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 3 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 3 2025-06-24T21:30:11.032367Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 3 VDISK[0:_:0:2:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T21:30:12.368452Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f49/r3tmp/tmpdy410w//vdisk_bad_2/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 3 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 3 2025-06-24T21:30:12.370643Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 3 VDISK[0:_:0:2:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T21:30:13.752224Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f49/r3tmp/tmpdy410w//vdisk_bad_2/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 3 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 3 2025-06-24T21:30:13.781376Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 3 VDISK[0:_:0:2:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T21:30:15.064761Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001f49/r3tmp/tmpdy410w//vdisk_bad_2/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 3 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 3 2025-06-24T21:30:15.066246Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 3 VDISK[0:_:0:2:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap-useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 15211, MsgBus: 20567 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044c9/r3tmp/tmpN6u2Wr/pdisk_1.dat TServer::EnableGrpc on GrpcPort 15211, node 1 TClient is connected to server localhost:20567 TClient is connected to server localhost:20567 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> TBlobStorageProxyTest::TestVPutVCollectVGetRace >> TBlobStorageProxyTest::TestBlockPersistence [GOOD] >> TBlobStorageProxyTest::TestCollectGarbage >> KqpQueryService::DdlCache [GOOD] >> KqpQueryService::DdlExecuteScript |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestHugeCollectGarbage [GOOD] >> TSchemeShardTest::ManyDirs [GOOD] >> TSchemeShardTest::ListNotCreatedDirCase >> TBlobStorageProxyTest::TestCollectGarbageAfterLargeData [GOOD] |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> LabeledDbCounters::TwoTablets [GOOD] >> LabeledDbCounters::TwoTabletsKillOneTablet |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestCollectGarbageAfterLargeData [GOOD] >> TBlobStorageProxyTest::TestVPutVCollectVGetRace [GOOD] >> TBlobStorageProxyTest::TestVGetNoData |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> ReadSessionImplTest::DataReceivedCallback [GOOD] >> TSchemeShardTest::ListNotCreatedDirCase [GOOD] >> TSchemeShardTest::ListNotCreatedIndexCase |98.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TBlobStorageProxyTest::TestCollectGarbage [GOOD] >> TColumnShardTestSchema::RebootForgetWithLostAnswer [GOOD] >> TBlobStorageProxyTest::TestDoubleGroups |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestCollectGarbage [GOOD] >> TBlobStorageProxyTest::TestProxyLongTailDiscover [GOOD] >> TBlobStorageProxyTest::TestProxyLongTailDiscoverMaxi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootForgetWithLostAnswer [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801166.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150801166.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801166.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130801166.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799966.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130799966.000000s;Name=;Codec=}; 2025-06-24T21:29:29.042277Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:29.075673Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:29.076056Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:29.083825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:29.084100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:29.084382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:29.084519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:29.084632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:29.084779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:29.084901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:29.085023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:29.085149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:29.085311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:29.085433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:29.123881Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:29.124197Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:29.124300Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:29.124489Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:29.124655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:29.124725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:29.124766Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:29.124853Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:29.124916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:29.124962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:29.124994Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:29.125172Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:29.125256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:29.125300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:29.125329Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:29.125420Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:29.125476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:29.125520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:29.125552Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:29.125619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:29.125658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:29.125689Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:29.125919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:29.125972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:29.126009Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:29.126192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:29.126265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:29.126300Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:29.126461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:29.126513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:29.126545Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:29.126622Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:29.126686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:29.126726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:29.126757Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:29.127388Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=59; 2025-06-24T21:29:29.127483Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-24T21:29:29.127 ... nit;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=3848; 2025-06-24T21:30:18.990682Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=12; 2025-06-24T21:30:18.990846Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=111; 2025-06-24T21:30:18.990885Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=4292; 2025-06-24T21:30:18.990964Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=4470; 2025-06-24T21:30:18.991026Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=10; 2025-06-24T21:30:18.991124Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=60; 2025-06-24T21:30:18.991183Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=5176; 2025-06-24T21:30:18.991379Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=107; 2025-06-24T21:30:18.991518Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=80; 2025-06-24T21:30:18.991663Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=87; 2025-06-24T21:30:18.991810Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=101; 2025-06-24T21:30:18.994560Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2684; 2025-06-24T21:30:18.997617Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=2958; 2025-06-24T21:30:18.997729Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=15; 2025-06-24T21:30:18.997786Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2025-06-24T21:30:18.997824Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=5; 2025-06-24T21:30:18.997930Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=61; 2025-06-24T21:30:18.997981Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-24T21:30:18.998091Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=78; 2025-06-24T21:30:18.998133Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-24T21:30:18.998226Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=40; 2025-06-24T21:30:18.998348Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=64; 2025-06-24T21:30:18.998631Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=225; 2025-06-24T21:30:18.998678Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=20273; 2025-06-24T21:30:18.998812Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=9739224;raw_bytes=13544452;count=2;records=160000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:30:18.998940Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:30:18.998996Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:30:18.999081Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:30:19.007656Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T21:30:19.007808Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:19.007948Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T21:30:19.008034Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800306567;tx_id=18446744073709551615;;current_snapshot_ts=1750800594454; 2025-06-24T21:30:19.008078Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:19.008131Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:19.008166Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:19.008231Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:30:19.008763Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;self_id=[1:1385:3226];tablet_id=9437184;parent=[1:1346:3195];fline=manager.cpp:88;event=ask_data;request=request_id=54;9438184000001={portions_count=2};; 2025-06-24T21:30:19.009504Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:30:19.009889Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:30:19.009931Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:30:19.009956Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:30:19.009999Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:19.010075Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T21:30:19.010181Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800306567;tx_id=18446744073709551615;;current_snapshot_ts=1750800594454; 2025-06-24T21:30:19.010249Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:19.010300Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:19.010336Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:19.010414Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 >> TSchemeShardTest::ListNotCreatedIndexCase [GOOD] >> TSchemeShardTest::FindSubDomainPathId |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::DataReceivedCallback [GOOD] Test command err: 2025-06-24T21:29:45.340120Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.340164Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.340192Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:45.340586Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:45.361881Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:45.362100Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.368802Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:45.373124Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.373324Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:29:45.373451Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:45.373518Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-24T21:29:45.374306Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.374333Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.374356Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:45.374677Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:45.375405Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:45.375539Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.375819Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:45.376299Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.376418Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:29:45.376566Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:45.376629Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-24T21:29:45.377768Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.377792Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.377816Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:45.378141Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:45.378810Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:45.378948Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.379181Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:45.380063Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.380331Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:29:45.380440Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:45.380489Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-24T21:29:45.381476Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.381500Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.381542Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:45.381934Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:45.382603Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:45.382747Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.382955Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:45.405008Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.409726Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:29:45.409976Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:45.410091Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-24T21:29:45.411382Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.411411Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.411577Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:45.412053Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:45.413758Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:45.413957Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.414487Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:45.415005Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.415347Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:29:45.416692Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:45.416765Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-24T21:29:45.417869Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.417898Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.417927Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:45.423428Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:45.424167Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:45.424320Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.425208Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:45.425717Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.425978Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:29:45.426103Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:45.426168Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-24T21:29:45.427698Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.427737Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.427761Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:45.428846Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:45.431399Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:45.431581Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.432058Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:45.432974Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.433160Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:29:45.433508Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:45.433585Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-24T21:29:45.434785Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.434831Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.434880Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:45.436563Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:29:45.437309Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:29:45.437465Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.437699Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:29:45.439567Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.440072Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:29:45.440177Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:29:45.440228Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-24T21:29:45.446842Z :ReadSession INFO: Random seed for debugging is 1750800585446807 2025-06-24T21:29:45.916790Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631255374648606:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:45.918933Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:45.967362Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631254816971048:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:45.967454Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;p ... mmit request from 3 to 3 in TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) 2025-06-24T21:30:05.159187Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer shared/user session shared/user_1_1_3852056499273837911_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) committing to position 3 prev 2 end 3 by cookie 3 2025-06-24T21:30:05.159428Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:30:05.159453Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:30:05.159526Z node 2 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user offset is set to 3 (startOffset 0) session shared/user_1_1_3852056499273837911_v1 2025-06-24T21:30:05.159629Z node 2 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-24T21:30:05.162200Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 3 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:30:05.162251Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-24T21:30:05.162275Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 3 2025-06-24T21:30:05.162298Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:30:05.162529Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/user session shared/user_1_1_3852056499273837911_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 3 } 2025-06-24T21:30:05.162587Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer shared/user session shared/user_1_1_3852056499273837911_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) commit done to position 3 endOffset 3 with cookie 3 2025-06-24T21:30:05.162630Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer shared/user session shared/user_1_1_3852056499273837911_v1 replying for commits: assignId# 1, from# 3, to# 3, offset# 3 2025-06-24T21:30:05.163105Z :DEBUG: [/Root] [/Root] [520091f4-e9b059f6-a67d876b-325cac0b] [dc1] Committed response: cookies { assign_id: 1 partition_cookie: 3 } 2025-06-24T21:30:05.249496Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|92d1f59d-18af1550-7258a17-b4d11d8b_0] Write session will now close 2025-06-24T21:30:05.249579Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|92d1f59d-18af1550-7258a17-b4d11d8b_0] Write session: aborting 2025-06-24T21:30:05.250465Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|92d1f59d-18af1550-7258a17-b4d11d8b_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:30:05.250465Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|92d1f59d-18af1550-7258a17-b4d11d8b_0] Write session is aborting and will not restart 2025-06-24T21:30:05.250529Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|92d1f59d-18af1550-7258a17-b4d11d8b_0] Write session: destroy 2025-06-24T21:30:05.250726Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message-group-id|92d1f59d-18af1550-7258a17-b4d11d8b_0 grpc read done: success: 0 data: 2025-06-24T21:30:05.250765Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message-group-id|92d1f59d-18af1550-7258a17-b4d11d8b_0 grpc read failed 2025-06-24T21:30:05.250796Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message-group-id|92d1f59d-18af1550-7258a17-b4d11d8b_0 grpc closed 2025-06-24T21:30:05.250810Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message-group-id|92d1f59d-18af1550-7258a17-b4d11d8b_0 is DEAD 2025-06-24T21:30:05.251658Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:30:05.252076Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [1:7519631341273997309:2565] destroyed 2025-06-24T21:30:05.252126Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:30:07.374077Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:30:07.851072Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_3852056499273837911_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 5 from offset 3 2025-06-24T21:30:12.375964Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:30:15.157360Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_3852056499273837911_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 6 from offset 3 2025-06-24T21:30:15.252591Z :INFO: [/Root] [/Root] [520091f4-e9b059f6-a67d876b-325cac0b] Closing read session. Close timeout: 0.000000s 2025-06-24T21:30:15.252677Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:3 2025-06-24T21:30:15.252730Z :INFO: [/Root] [/Root] [520091f4-e9b059f6-a67d876b-325cac0b] Counters: { Errors: 0 CurrentSessionLifetimeMs: 16455 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:30:15.252885Z :NOTICE: [/Root] [/Root] [520091f4-e9b059f6-a67d876b-325cac0b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:30:15.252957Z :DEBUG: [/Root] [/Root] [520091f4-e9b059f6-a67d876b-325cac0b] [dc1] Abort session to cluster 2025-06-24T21:30:15.253540Z :NOTICE: [/Root] [/Root] [520091f4-e9b059f6-a67d876b-325cac0b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:30:15.254427Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_3852056499273837911_v1 grpc read done: success# 0, data# { } 2025-06-24T21:30:15.254468Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_1_1_3852056499273837911_v1 grpc read failed 2025-06-24T21:30:15.256665Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session shared/user_1_1_3852056499273837911_v1 2025-06-24T21:30:15.256731Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [1:7519631311209225791:2488] destroyed 2025-06-24T21:30:15.254503Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_1_1_3852056499273837911_v1 grpc closed 2025-06-24T21:30:15.256807Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_3852056499273837911_v1 2025-06-24T21:30:15.254543Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_1_1_3852056499273837911_v1 is DEAD 2025-06-24T21:30:15.256883Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [1:7519631311209225788:2485] disconnected; active server actors: 1 2025-06-24T21:30:15.257196Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [1:7519631311209225788:2485] client user disconnected session shared/user_1_1_3852056499273837911_v1 2025-06-24T21:30:15.661740Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [1:7519631384223670754:2652] TxId: 281474976715724. Ctx: { TraceId: 01jyhxjhwh3d5r3zzmwp41mx3e, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDNlODMzNTQtOGM4NjIwMDgtZmZmMzAxMWEtYTFiNjU1MjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 2 2025-06-24T21:30:15.662462Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519631384223670758:2652], TxId: 281474976715724, task: 3. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NDNlODMzNTQtOGM4NjIwMDgtZmZmMzAxMWEtYTFiNjU1MjQ=. TraceId : 01jyhxjhwh3d5r3zzmwp41mx3e. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7519631384223670754:2652], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-24T21:30:16.911521Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:16.911580Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:16.911610Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:16.911970Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:16.912438Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:16.912675Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:16.912950Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:30:16.913740Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:30:16.914141Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:30:16.914371Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-24T21:30:16.914479Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:30:16.914541Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:16.914595Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-24T21:30:16.914774Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:30:16.914828Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Block |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBlobStorageProxyTest::TestVGetNoData [GOOD] >> TColumnShardTestSchema::RebootExportAfterFail [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure4Plus2Block >> TBSV::ShardsNotLeftInShardsToDelete |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestVGetNoData [GOOD] >> TBlobStorageProxyTest::TestProxySimpleDiscoverNone ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootExportAfterFail [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801177.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801177.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799977.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; 2025-06-24T21:29:39.363538Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:39.393667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:39.394007Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:39.401211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:39.401451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:39.401719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:39.401857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:39.401963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:39.402079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:39.402184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:39.402292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:39.402425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:39.402548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:39.402658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:39.436083Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:39.436405Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:39.436535Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:39.436747Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:39.436974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:39.437057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:39.437104Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:39.437228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:39.437301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:39.437352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:39.437388Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:39.437576Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:39.437645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:39.437689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:39.437720Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:39.437820Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:39.437877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:39.437923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:39.437956Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:39.438021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:39.438063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:39.438105Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:39.438307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:39.438348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:39.438378Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:39.438602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:39.438665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:39.438697Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:39.438843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:39.438893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:39.438928Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:39.439012Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:39.439080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:39.439122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:39.439572Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:39.440164Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=59; 2025-06-24T21:29:39.440266Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=45; 2025-06-24T21:29:39.440355Z node 1 :TX_COLUMNSH ... 4T21:30:20.354453Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=17006;data_size=16980;sum=338168;count=20;size_of_portion=208; 2025-06-24T21:30:20.355261Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=4746; 2025-06-24T21:30:20.355363Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=15; 2025-06-24T21:30:20.356181Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=737; 2025-06-24T21:30:20.356284Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=5972; 2025-06-24T21:30:20.356348Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=6140; 2025-06-24T21:30:20.356436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=14; 2025-06-24T21:30:20.356616Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=114; 2025-06-24T21:30:20.356672Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=7078; 2025-06-24T21:30:20.356859Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=116; 2025-06-24T21:30:20.357026Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=92; 2025-06-24T21:30:20.357222Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=108; 2025-06-24T21:30:20.357381Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=93; 2025-06-24T21:30:20.362007Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=4526; 2025-06-24T21:30:20.367012Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=4847; 2025-06-24T21:30:20.367171Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=18; 2025-06-24T21:30:20.367254Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=17; 2025-06-24T21:30:20.367319Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=9; 2025-06-24T21:30:20.367429Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=57; 2025-06-24T21:30:20.367495Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=9; 2025-06-24T21:30:20.367624Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=73; 2025-06-24T21:30:20.367686Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=8; 2025-06-24T21:30:20.367774Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=40; 2025-06-24T21:30:20.367894Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=67; 2025-06-24T21:30:20.368209Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=252; 2025-06-24T21:30:20.368276Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=28918; 2025-06-24T21:30:20.368452Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=9739224;raw_bytes=13544452;count=2;records=160000} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=0;raw_bytes=0;count=0;records=0} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:30:20.368611Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:30:20.368688Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:30:20.368786Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:30:20.381747Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T21:30:20.381968Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:20.382098Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T21:30:20.382188Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:20.382251Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:20.382301Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:20.382413Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:30:20.383390Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;self_id=[1:1328:3179];tablet_id=9437184;parent=[1:1293:3152];fline=manager.cpp:88;event=ask_data;request=request_id=48;9438184000001={portions_count=2};; 2025-06-24T21:30:20.384459Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:30:20.388367Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:30:20.388429Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:30:20.388457Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:30:20.388504Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:20.388644Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-24T21:30:20.388722Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:20.388778Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:20.388816Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:20.388917Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1293:3152];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 160000/9739224 160000/9739224 >> TSchemeShardTest::FindSubDomainPathId [GOOD] >> TSchemeShardTest::FindSubDomainPathIdActor >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI [GOOD] >> TBlobStorageProxyTest::TestDoubleEmptyGet >> PersQueueSdkReadSessionTest::SettingsValidation [GOOD] >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly >> TBlobStorageProxyTest::TestVPutVGetPersistence >> TBlobStorageProxyTest::TestEmptyDiscover >> TBlobStorageProxyTest::TestProxyRestoreOnGetStripe >> TBSV::ShardsNotLeftInShardsToDelete [GOOD] >> TBlobStorageProxyTest::TestProxySimpleDiscoverNone [GOOD] >> TBlobStorageProxyTest::TestPutGetMany >> TSchemeShardTest::FindSubDomainPathIdActor [GOOD] >> TSchemeShardTest::FindSubDomainPathIdActorAsync >> TBlobStorageProxyTest::TestQuadrupleGroups >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus1Block ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI [GOOD] Test command err: 2025-06-24T21:29:27.697345Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631177245983513:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.697400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:27.827575Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631177991206058:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:27.831519Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0023cb/r3tmp/tmpRVhVyy/pdisk_1.dat 2025-06-24T21:29:28.278987Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:28.283300Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:28.616883Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:28.618719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.618816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.636617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:28.636684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:28.662127Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:28.662294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:28.662484Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:28.670570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25628, node 1 2025-06-24T21:29:28.847451Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:29.023548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0023cb/r3tmp/yandexjOvZHD.tmp 2025-06-24T21:29:29.023576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0023cb/r3tmp/yandexjOvZHD.tmp 2025-06-24T21:29:29.023748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0023cb/r3tmp/yandexjOvZHD.tmp 2025-06-24T21:29:29.023877Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:29.380144Z INFO: TTestServer started on Port 21004 GrpcPort 25628 TClient is connected to server localhost:21004 PQClient connected to localhost:25628 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:30.006651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:29:30.120901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... waiting... 2025-06-24T21:29:32.550556Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631198720820843:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.550675Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631198720820878:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.550752Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.560226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:32.567870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631198720820913:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.567985Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:32.593361Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631198720820880:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:29:32.700481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631177245983513:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:32.700545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:32.823252Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519631177991206058:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:32.823347Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:32.839108Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631198720820954:2747] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:33.018972Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631198720820980:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:33.021337Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MWMxN2VhMGMtMTk1ZDk4MDktOTY1MzljNzYtNTAyOGRmMzk=, ActorId: [1:7519631198720820839:2297], ActorState: ExecuteState, TraceId: 01jyhxh83c1nyn3tx20tsmbr1r, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:33.038323Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:29:33.045485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:33.148036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:33.335315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, wei ... 12:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:30:20.383009Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519631359080581080:2128], cacheItem# { Subscriber: { Subscriber: [7:7519631376260451373:2860] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 16 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800613919 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:30:20.383122Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519631359080581080:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:10:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:30:20.383208Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519631359080581080:2128], cacheItem# { Subscriber: { Subscriber: [7:7519631376260451264:2788] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 16 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800613660 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: TableId: [72057594046644480:10:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:30:20.383343Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519631406325223890:3915], recipient# [7:7519631354785613696:2176], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:30:20.383388Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519631406325223891:3916], recipient# [7:7519631354785613696:2176], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:30:20.385874Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519631359080581080:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:30:20.386013Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519631359080581080:2128], cacheItem# { Subscriber: { Subscriber: [7:7519631359080581565:2441] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 27 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750800609866 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:30:20.386299Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519631406325223892:3917], recipient# [7:7519631354785613696:2176], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:30:20.467313Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519631359080581080:2128], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:30:20.467490Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519631359080581080:2128], cacheItem# { Subscriber: { Subscriber: [7:7519631376260451141:2724] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:30:20.467618Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519631406325223895:3919], recipient# [7:7519631406325223894:2486], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-24T21:30:20.778080Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2823: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [7:7519631359080581080:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:30:20.778194Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2062: FillEntry for TResolve: self# [7:7519631359080581080:2128], cacheItem# { Subscriber: { Subscriber: [7:7519631376260451373:2860] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 16 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800613919 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:30:20.778251Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2062: FillEntry for TResolve: self# [7:7519631359080581080:2128], cacheItem# { Subscriber: { Subscriber: [7:7519631376260451264:2788] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 16 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750800613660 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-24T21:30:20.778493Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519631406325223898:3920], recipient# [7:7519631406325223897:2480], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:30:20.779860Z node 7 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [7:7519631406325223896:2480] TxId: 281474976715685. Ctx: { TraceId: 01jyhxjpsx1tnbctdmjqdxw2ej, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NGNmZDk3N2ItZGMxMzIxNjctZGNlZWZiNjItZWJjY2VmZDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 8 2025-06-24T21:30:20.779972Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [7:7519631406325223900:2480], TxId: 281474976715685, task: 3. Ctx: { CustomerSuppliedId : . TraceId : 01jyhxjpsx1tnbctdmjqdxw2ej. SessionId : ydb://session/3?node_id=7&id=NGNmZDk3N2ItZGMxMzIxNjctZGNlZWZiNjItZWJjY2VmZDE=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [7:7519631406325223896:2480], status: UNAVAILABLE, reason: {
: Error: Terminate execution } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::ShardsNotLeftInShardsToDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:30:21.721533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:21.721683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:21.721733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:21.721777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:21.723042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:21.723112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:21.723212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:21.723333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:21.724219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:21.726051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:21.836081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:21.836140Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:21.856028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:21.856500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:21.856829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:21.871453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:21.871772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:21.874792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:21.875300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:21.901464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:21.905739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:21.921412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:21.921530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:21.921939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:21.922004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:21.922120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:21.922221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:30:21.931844Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:22.093540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:22.099552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:22.103324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:22.103440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:22.105231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:22.105402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:22.109343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:22.110618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:22.110946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:22.111129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:22.111288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:22.111344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:22.114134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:22.114208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:22.114272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:22.116696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:22.116757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:22.116829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:22.116889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:22.131609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:22.136455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:22.137890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:22.139252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:22.139452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:22.139529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:22.141631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:30:22.141719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:22.142023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:22.142138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:30:22.145142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:22.145206Z node 1 :FLAT_TX_SCHEMESHARD ... 7594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:22.360538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_bsv.cpp:40: TDropBlockStoreVolume TPropose, operationId: 102:0 HandleReply TEvOperationPlan, step: 5000003, at schemeshard: 72057594046678944 2025-06-24T21:30:22.360625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:30:22.360749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:30:22.360782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:30:22.360832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:30:22.360864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:30:22.360904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:22.360977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:30:22.361021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T21:30:22.361078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:30:22.361115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:30:22.361142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:30:22.361225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-24T21:30:22.361270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T21:30:22.361302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-24T21:30:22.361332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-24T21:30:22.363396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:30:22.363442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-24T21:30:22.363951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:30:22.363986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-24T21:30:22.364637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:30:22.364694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:30:22.364831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:22.364890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:22.365079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:30:22.365218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:22.365250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-24T21:30:22.365288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:30:22.365734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:22.365805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:22.365833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:30:22.365886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-24T21:30:22.365923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-24T21:30:22.366196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:30:22.366255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-24T21:30:22.366327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:30:22.366613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:22.366672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:22.366711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:30:22.366740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-24T21:30:22.366766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:22.366827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:30:22.367705Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-24T21:30:22.367890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:22.368436Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-06-24T21:30:22.368978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:30:22.369957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:22.371046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:30:22.371295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:22.372751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:30:22.372891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:30:22.373185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:30:22.373223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:30:22.373647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:30:22.373754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:30:22.373792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:398:2376] TestWaitNotification: OK eventTxId 102 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-24T21:30:22.374255Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-24T21:30:22.376679Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 { Type { Kind: Struct Struct { Member { Name: "ShardsToDelete" Type { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "List" Type { Kind: List List { Item { Kind: Struct Struct { Member { Name: "ShardIdx" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } Member { Name: "Truncated" Type { Kind: Data Data { Scheme: 6 } } } } } } } } } } Value { Struct { Optional { Struct { } Struct { Bool: false } } } } } >> TBlobStorageProxyTest::TestEmptyDiscover [GOOD] >> TBlobStorageProxyTest::TestEmptyDiscoverMaxi |98.4%| [TA] $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression |98.4%| [TA] {RESULT} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::FindSubDomainPathIdActorAsync [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure4Plus2Block [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure4Plus2Stripe >> TBlobStorageProxyTest::TestVPutVGetPersistence [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Block [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Stripe >> TBlobStorageProxyTest::TestSingleFailureMirror >> TBlobStorageProxyTest::TestDoubleGroups [GOOD] >> TBlobStorageProxyTest::TestDoubleFailureStripe4Plus2 >> Compression::WriteGZIP [GOOD] >> Compression::WriteZSTD >> TColumnShardTestSchema::TTL-Reboot+Internal+FirstPkColumn [GOOD] >> TColumnShardTestSchema::TTL+Reboot+Internal+FirstPkColumn [GOOD] |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestVPutVGetPersistence [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::FindSubDomainPathIdActorAsync [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:26:48.764201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:26:48.764289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.764319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:26:48.764348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:26:48.765341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:26:48.765375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:26:48.765424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:26:48.765504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:26:48.766161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:26:48.769070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:26:48.849776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:26:48.849848Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:26:48.863639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:26:48.863917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:26:48.864068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:26:48.871883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:26:48.874885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:26:48.876458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:48.880038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:26:48.888586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.889697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:26:48.897378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:48.897445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:26:48.898374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:26:48.898436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:26:48.898508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:26:48.898602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:26:48.905492Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:26:49.003709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:26:49.005262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.006199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:26:49.006250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:26:49.007239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:26:49.007313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:26:49.009366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.010103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:26:49.010271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.010310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:26:49.010354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:26:49.010381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:26:49.012222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.012272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:26:49.012301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:26:49.013821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.013867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:26:49.013901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.013941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:26:49.017754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:26:49.019631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:26:49.019794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:26:49.021679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:26:49.021803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:26:49.021844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.023188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:26:49.023239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:26:49.023410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:26:49.023488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:26:49.026552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:26:49.026606Z node 1 :FLAT_TX_SCHEMESHARD ... txId: 102, path id: 2 2025-06-24T21:30:24.036087Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:207:2207], at schemeshard: 72057594046678944, txId: 102, path id: 3 2025-06-24T21:30:24.036626Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:30:24.036706Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-24T21:30:24.036928Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:30:24.037002Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:30:24.037091Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 1/1 2025-06-24T21:30:24.037158Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:30:24.037236Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-24T21:30:24.037317Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-24T21:30:24.037395Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:30:24.037463Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:30:24.037750Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-24T21:30:24.037829Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-24T21:30:24.037893Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-24T21:30:24.037952Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-24T21:30:24.039652Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:24.039775Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:24.039831Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:30:24.039912Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T21:30:24.039992Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:30:24.041505Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:24.041609Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:24.041649Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:30:24.041684Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-24T21:30:24.041727Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:30:24.041827Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-24T21:30:24.058703Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:24.058893Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:30:24.065794Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:30:24.065883Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:30:24.066700Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:30:24.066874Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:30:24.066949Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [15:520:2469] TestWaitNotification: OK eventTxId 102 2025-06-24T21:30:24.067841Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomenA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:30:24.068237Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomenA" took 449us result status StatusSuccess 2025-06-24T21:30:24.068949Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA" PathDescription { Self { Name: "SubDomenA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ChildrenExist: false BalancerTabletID: 72075186233409547 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:24.069995Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomenA/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:30:24.070309Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomenA/Topic1" took 349us result status StatusSuccess 2025-06-24T21:30:24.071115Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:24.554272Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__find_subdomain_path_id.cpp:20: FindTabletSubDomainPathId for tablet 72075186233409546 >> TBlobStorageProxyTest::TestBlock >> TBlobStorageProxyTest::TestPutGetMany [GOOD] >> TColumnShardTestSchema::ColdTiers [GOOD] >> KqpQueryService::DdlExecuteScript [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL+Reboot+Internal+FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-24T21:29:54.618457Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:29:54.623281Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:29:54.623755Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:54.652374Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:54.652717Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:54.660533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:54.660792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:54.661086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:54.661232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:54.661346Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:54.661472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:54.661599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:54.661714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:54.661843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:54.661964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.662105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:54.688328Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:29:54.693912Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:54.694228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:54.694294Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:54.694512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:54.694727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:54.694811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:54.694862Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:54.694960Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:54.695034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:54.695082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:54.695162Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:54.695378Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:54.695470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:54.695531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:54.695567Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:54.695670Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:54.695746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:54.695806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:54.695850Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:54.695917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:54.695956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:54.695993Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:54.696268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:54.696319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:54.696359Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:54.696590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:54.696670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:54.696711Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:54.696852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:54.696897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.696935Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.697033Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:54.697110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:54.697182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:54.697234Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:54.697732Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=59; 2025-06-24T21:29:54.697841Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=52; 2025-06-24T21:29:54.697930Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-24T21:29:54.698024Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=47; 2025-06-24T21:29:54.698132Z node 1 : ... ed;interval_idx=0;intervalId=170; 2025-06-24T21:30:25.804678Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=170; 2025-06-24T21:30:25.804810Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:30:25.804945Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.804986Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-24T21:30:25.805029Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:30:25.805525Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:25.805755Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=timestamp: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.805820Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:30:25.806002Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=1000; 2025-06-24T21:30:25.806097Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=timestamp; 2025-06-24T21:30:25.806472Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:613:2590];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=timestamp: uint64; 2025-06-24T21:30:25.806716Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.806879Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.807038Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.807313Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:25.807488Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.807656Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.807711Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:614:2591] finished for tablet 9437184 2025-06-24T21:30:25.808331Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:613:2590];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap","f_processing"],"t":0.004},{"events":["f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.019},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.022}],"full":{"a":1750800625785534,"name":"_full_task","f":1750800625785534,"d_finished":0,"c":0,"l":1750800625807785,"d":22251},"events":[{"name":"bootstrap","f":1750800625785867,"d_finished":4509,"c":1,"l":1750800625790376,"d":4509},{"a":1750800625807276,"name":"ack","f":1750800625805480,"d_finished":1596,"c":1,"l":1750800625807076,"d":2105},{"a":1750800625807256,"name":"processing","f":1750800625790529,"d_finished":10327,"c":8,"l":1750800625807079,"d":10856},{"name":"ProduceResults","f":1750800625788721,"d_finished":3452,"c":11,"l":1750800625807693,"d":3452},{"a":1750800625807697,"name":"Finish","f":1750800625807697,"d_finished":0,"c":0,"l":1750800625807785,"d":88},{"name":"task_result","f":1750800625790567,"d_finished":8547,"c":7,"l":1750800625805192,"d":8547}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.808448Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:613:2590];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:30:25.809098Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:613:2590];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap","f_processing"],"t":0.004},{"events":["f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.019},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.022}],"full":{"a":1750800625785534,"name":"_full_task","f":1750800625785534,"d_finished":0,"c":0,"l":1750800625808512,"d":22978},"events":[{"name":"bootstrap","f":1750800625785867,"d_finished":4509,"c":1,"l":1750800625790376,"d":4509},{"a":1750800625807276,"name":"ack","f":1750800625805480,"d_finished":1596,"c":1,"l":1750800625807076,"d":2832},{"a":1750800625807256,"name":"processing","f":1750800625790529,"d_finished":10327,"c":8,"l":1750800625807079,"d":11583},{"name":"ProduceResults","f":1750800625788721,"d_finished":3452,"c":11,"l":1750800625807693,"d":3452},{"a":1750800625807697,"name":"Finish","f":1750800625807697,"d_finished":0,"c":0,"l":1750800625808512,"d":815},{"name":"task_result","f":1750800625790567,"d_finished":8547,"c":7,"l":1750800625805192,"d":8547}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.809218Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:30:25.783054Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59184;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59184;selected_rows=0; 2025-06-24T21:30:25.809283Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:25.809770Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:614:2591];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> TBlobStorageProxyTest::TestDoubleEmptyGet [GOOD] >> TBlobStorageProxyTest::TestCompactedGetMultipart [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot+Internal+FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-24T21:29:56.319240Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:29:56.324861Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:29:56.325370Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:56.358078Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:56.358433Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:56.367078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:56.367360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:56.367675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:56.367818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:56.367949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:56.368074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:56.368205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:56.368324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:56.368468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:56.368592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:56.368723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:56.395910Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:29:56.402479Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:56.402773Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:56.402892Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:56.403126Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:56.403343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:56.403424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:56.403469Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:56.403560Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:56.403627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:56.403671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:56.403745Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:56.403933Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:56.404010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:56.404071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:56.404094Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:56.404164Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:56.404208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:56.404241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:56.404259Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:56.404304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:56.404331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:56.404352Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:56.404568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:56.404601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:56.404624Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:56.404834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:56.404899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:56.404937Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:56.405068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:56.405116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:56.405161Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:56.405241Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:56.405309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:56.405367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:56.405422Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:56.406003Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=65; 2025-06-24T21:29:56.406117Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=50; 2025-06-24T21:29:56.406222Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=49; 2025-06-24T21:29:56.406314Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=42; 2025-06-24T21:29:56.406416Z node 1 : ... ent=interval_result_received;interval_idx=0;intervalId=170; 2025-06-24T21:30:25.715777Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=170; 2025-06-24T21:30:25.715823Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-24T21:30:25.715899Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.715927Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-24T21:30:25.715955Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:30:25.716257Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:25.716461Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=timestamp: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.716531Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:30:25.716703Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=1000; 2025-06-24T21:30:25.716791Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=timestamp; 2025-06-24T21:30:25.717206Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:505:2508];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=timestamp: uint64; 2025-06-24T21:30:25.717448Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.717614Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.717773Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.718016Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:25.718228Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.718400Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.718460Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:506:2509] finished for tablet 9437184 2025-06-24T21:30:25.719192Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:505:2508];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.015},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.017}],"full":{"a":1750800625700825,"name":"_full_task","f":1750800625700825,"d_finished":0,"c":0,"l":1750800625718543,"d":17718},"events":[{"name":"bootstrap","f":1750800625701199,"d_finished":3773,"c":1,"l":1750800625704972,"d":3773},{"a":1750800625717990,"name":"ack","f":1750800625716218,"d_finished":1588,"c":1,"l":1750800625717806,"d":2141},{"a":1750800625717964,"name":"processing","f":1750800625705144,"d_finished":7313,"c":8,"l":1750800625717811,"d":7892},{"name":"ProduceResults","f":1750800625703470,"d_finished":3214,"c":11,"l":1750800625718433,"d":3214},{"a":1750800625718443,"name":"Finish","f":1750800625718443,"d_finished":0,"c":0,"l":1750800625718543,"d":100},{"name":"task_result","f":1750800625705174,"d_finished":5569,"c":7,"l":1750800625716045,"d":5569}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.719338Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:505:2508];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:30:25.720007Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:505:2508];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.015},{"events":["l_ProduceResults","f_Finish"],"t":0.017},{"events":["l_ack","l_processing","l_Finish"],"t":0.018}],"full":{"a":1750800625700825,"name":"_full_task","f":1750800625700825,"d_finished":0,"c":0,"l":1750800625719406,"d":18581},"events":[{"name":"bootstrap","f":1750800625701199,"d_finished":3773,"c":1,"l":1750800625704972,"d":3773},{"a":1750800625717990,"name":"ack","f":1750800625716218,"d_finished":1588,"c":1,"l":1750800625717806,"d":3004},{"a":1750800625717964,"name":"processing","f":1750800625705144,"d_finished":7313,"c":8,"l":1750800625717811,"d":8755},{"name":"ProduceResults","f":1750800625703470,"d_finished":3214,"c":11,"l":1750800625718433,"d":3214},{"a":1750800625718443,"name":"Finish","f":1750800625718443,"d_finished":0,"c":0,"l":1750800625719406,"d":963},{"name":"task_result","f":1750800625705174,"d_finished":5569,"c":7,"l":1750800625716045,"d":5569}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:25.720135Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:30:25.697831Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59184;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59184;selected_rows=0; 2025-06-24T21:30:25.720208Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:25.720635Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus1Block [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus1Stripe |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetMany [GOOD] >> BasicUsage::BrokenCredentialsProvider [GOOD] |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestCompactedGetMultipart [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ColdTiers [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801168.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801168.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150801168.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801168.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801168.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150801168.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799968.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130801168.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130801168.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799968.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130799968.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130799968.000000s;Name=;Codec=}; 2025-06-24T21:29:29.518001Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:29.571512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:29.571800Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:29.577868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:29.578120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:29.578368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:29.578454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:29.578540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:29.578624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:29.578690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:29.578766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:29.578840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:29.578918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:29.578996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:29.611788Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:29.612074Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:29.612160Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:29.612363Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:29.612571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:29.612653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:29.612700Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:29.612807Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:29.612870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:29.612918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:29.612952Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:29.613130Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:29.613202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:29.613266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:29.613303Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:29.613400Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:29.613466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:29.613517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:29.613551Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:29.613621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:29.613674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:29.613712Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:29.613955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:29.614016Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:29.614052Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:29.614246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:29.614308Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:29.614341Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:29.614505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:29.614573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:29.614610Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:29.614694Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:29.614768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:29.614839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:29.614880Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:1 ... 2025-06-24T21:30:26.206656Z;index_granules=0;index_portions=1;index_batches=522;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=4873744;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=4873744;selected_rows=0; 2025-06-24T21:30:26.502072Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:948:2893];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:26.502266Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:948:2893];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:30:26.503022Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 9 at tablet 9437184 2025-06-24T21:30:26.503317Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800617628:max} readable: {1750800617628:max} at tablet 9437184 2025-06-24T21:30:26.503459Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:30:26.503583Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800617628:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:30:26.503635Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800617628:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:30:26.504168Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800617628:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:30:26.504273Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800617628:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:30:26.504750Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800617628:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:955:2900];trace_detailed=; 2025-06-24T21:30:26.505177Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:30:26.505404Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:30:26.505590Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:26.505731Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:26.505986Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:955:2900];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:26.506093Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:955:2900];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:26.506174Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:955:2900];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:26.506236Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:955:2900] finished for tablet 9437184 2025-06-24T21:30:26.506664Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:955:2900];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:954:2899];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750800626504675,"name":"_full_task","f":1750800626504675,"d_finished":0,"c":0,"l":1750800626506304,"d":1629},"events":[{"name":"bootstrap","f":1750800626504875,"d_finished":892,"c":1,"l":1750800626505767,"d":892},{"a":1750800626505962,"name":"ack","f":1750800626505962,"d_finished":0,"c":0,"l":1750800626506304,"d":342},{"a":1750800626505943,"name":"processing","f":1750800626505943,"d_finished":0,"c":0,"l":1750800626506304,"d":361},{"name":"ProduceResults","f":1750800626505501,"d_finished":463,"c":2,"l":1750800626506212,"d":463},{"a":1750800626506218,"name":"Finish","f":1750800626506218,"d_finished":0,"c":0,"l":1750800626506304,"d":86}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:26.506735Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:955:2900];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:954:2899];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:30:26.507195Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:955:2900];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:954:2899];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750800626504675,"name":"_full_task","f":1750800626504675,"d_finished":0,"c":0,"l":1750800626506782,"d":2107},"events":[{"name":"bootstrap","f":1750800626504875,"d_finished":892,"c":1,"l":1750800626505767,"d":892},{"a":1750800626505962,"name":"ack","f":1750800626505962,"d_finished":0,"c":0,"l":1750800626506782,"d":820},{"a":1750800626505943,"name":"processing","f":1750800626505943,"d_finished":0,"c":0,"l":1750800626506782,"d":839},{"name":"ProduceResults","f":1750800626505501,"d_finished":463,"c":2,"l":1750800626506212,"d":463},{"a":1750800626506218,"name":"Finish","f":1750800626506218,"d_finished":0,"c":0,"l":1750800626506782,"d":564}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:955:2900]->[1:954:2899] 2025-06-24T21:30:26.507311Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:955:2900];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:30:26.504239Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:30:26.507362Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:955:2900];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:26.507474Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:955:2900];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9739224 160000/9739224 160000/9739224 80000/4873744 0/0 |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_base/test-results/unittest/{meta.json ... results_accumulator.log} >> TBlobStorageProxyTest::TestSingleFailureMirror [GOOD] >> TBlobStorageProxyTest::TestVBlockVPutVGet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::DdlExecuteScript [GOOD] Test command err: Trying to start YDB, gRPC: 24741, MsgBus: 28007 2025-06-24T21:28:14.986499Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630866250968668:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:14.986590Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003048/r3tmp/tmpYde2rh/pdisk_1.dat 2025-06-24T21:28:15.288358Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630866250968644:2079] 1750800494985366 != 1750800494985369 2025-06-24T21:28:15.296195Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24741, node 1 2025-06-24T21:28:15.373236Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:28:15.373258Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:28:15.373268Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:28:15.373417Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:28:15.387930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:28:15.388081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:28:15.389901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28007 TClient is connected to server localhost:28007 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:28:15.881314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:28:15.906101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:15.993997Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:28:16.061174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:16.183911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:16.238770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:28:18.009931Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630883430839476:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:18.010069Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:18.271778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:18.305246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:18.335988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:18.367634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:18.407141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:18.480921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:18.519173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:18.590462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630883430840137:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:18.590564Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:18.590852Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630883430840142:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:28:18.594564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:28:18.609543Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630883430840144:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:28:18.698594Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630883430840195:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:28:19.772440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:28:19.987538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630866250968668:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:28:19.987641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:28:20.084130Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630892020775198:3666] ... WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:17.869260Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:30:17.900572Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:30:17.900601Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:30:17.900614Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:30:17.900776Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11809 TClient is connected to server localhost:11809 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:30:18.492474Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:30:18.507682Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:30:18.592927Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:30:18.746345Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:30:18.799769Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:30:18.876578Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:30:21.595348Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519631410381339777:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:21.595461Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:21.665103Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:21.707380Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:21.784888Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:21.827445Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:21.863288Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:21.940677Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:22.020356Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:22.108030Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519631414676307738:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:22.108145Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:22.108547Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519631414676307743:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:22.113187Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:30:22.127730Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519631414676307745:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:30:22.211378Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519631414676307796:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:30:22.692300Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519631393201468982:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:22.692396Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:30:23.669158Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:23.672742Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:23.674807Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:24.088550Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> TBlobStorageProxyTest::TestBlock [GOOD] >> TBlobStorageProxyTest::TestBatchedPutRequestDoesNotContainAHugeBlob >> TColumnShardTestSchema::TTL-Reboot-Internal-FirstPkColumn [GOOD] >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> BasicUsage::BrokenCredentialsProvider [GOOD] Test command err: 2025-06-24T21:29:45.346251Z :MaxByteSizeEqualZero INFO: Random seed for debugging is 1750800585346216 2025-06-24T21:29:45.973640Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631254964007080:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:45.973788Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:46.077775Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631262758201058:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:46.092388Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:46.317140Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:46.325940Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004a46/r3tmp/tmpKikFmc/pdisk_1.dat 2025-06-24T21:29:46.712019Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:46.713777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:46.713909Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:46.719642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:46.719736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:46.728081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:46.728649Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:46.733617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25843, node 1 2025-06-24T21:29:47.019324Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:47.089191Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:47.159940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/004a46/r3tmp/yandexuylILm.tmp 2025-06-24T21:29:47.159968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/004a46/r3tmp/yandexuylILm.tmp 2025-06-24T21:29:47.160140Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/004a46/r3tmp/yandexuylILm.tmp 2025-06-24T21:29:47.160312Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:47.436798Z INFO: TTestServer started on Port 65515 GrpcPort 25843 TClient is connected to server localhost:65515 PQClient connected to localhost:25843 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:47.830518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:29:49.930023Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631272143877244:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:49.930136Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:49.930372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631272143877261:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:49.940387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:49.942135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631272143877294:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:49.942241Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:49.979201Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631272143877263:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T21:29:50.282421Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631276438844638:2686] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:50.337602Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631276438844649:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:50.338116Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519631279938070446:2274], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:50.338545Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=OWZlYWFlZTktYjRmZTcwMzUtNDMzYzNjOGQtNjRjMDc5YWU=, ActorId: [2:7519631279938070414:2268], ActorState: ExecuteState, TraceId: 01jyhxhs67aexp42h1azzh91aw, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:50.342197Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:29:50.343069Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDUyNjlhNzYtMjM1NWFjMDMtYzY3ODBlMDEtZmVjNTY5YzY=, ActorId: [1:7519631272143877223:2296], ActorState: ExecuteState, TraceId: 01jyhxhs218pa1m0h48hd91pgh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:50.343523Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:29:50.437289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:50.589911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:50.773737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: E ... ookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:56752 proto=v1 topic=test-topic durationSec=0 2025-06-24T21:30:25.010882Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-24T21:30:25.012680Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-24T21:30:25.012802Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-24T21:30:25.012815Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T21:30:25.012828Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-24T21:30:25.012849Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [5:7519631427922579252:2468] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-24T21:30:25.020703Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [5:7519631427922579252:2468] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-24T21:30:25.241702Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [5:7519631427922579252:2468] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-24T21:30:25.242202Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519631427922579292:2468] connected; active server actors: 1 2025-06-24T21:30:25.242613Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [5:7519631427922579252:2468] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-24T21:30:25.242642Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [5:7519631427922579252:2468] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-24T21:30:25.249006Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519631427922579292:2468] disconnected; active server actors: 1 2025-06-24T21:30:25.249045Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519631427922579292:2468] disconnected no session 2025-06-24T21:30:25.367821Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [5:7519631427922579252:2468] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-24T21:30:25.367867Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [5:7519631427922579252:2468] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-24T21:30:25.367886Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [5:7519631427922579252:2468] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-24T21:30:25.367920Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T21:30:25.369479Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037892] server connected, pipe [5:7519631427922579313:2468], now have 1 active actors on pipe 2025-06-24T21:30:25.369568Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 5, Generation: 1 2025-06-24T21:30:25.369706Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:30:25.369739Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:30:25.369852Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|6f87dad5-4c9f857f-a20dc6a9-c923439_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-24T21:30:25.369988Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:30:25.370083Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:30:25.370233Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:30:25.370258Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:30:25.370328Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:30:25.370479Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|6f87dad5-4c9f857f-a20dc6a9-c923439_0 2025-06-24T21:30:25.371321Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750800625371 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:30:25.371443Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|6f87dad5-4c9f857f-a20dc6a9-c923439_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-24T21:30:25.372612Z :INFO: [] MessageGroupId [src] SessionId [src|6f87dad5-4c9f857f-a20dc6a9-c923439_0] Write session: close. Timeout = 0 ms 2025-06-24T21:30:25.372670Z :INFO: [] MessageGroupId [src] SessionId [src|6f87dad5-4c9f857f-a20dc6a9-c923439_0] Write session will now close 2025-06-24T21:30:25.372726Z :DEBUG: [] MessageGroupId [src] SessionId [src|6f87dad5-4c9f857f-a20dc6a9-c923439_0] Write session: aborting 2025-06-24T21:30:25.373258Z :INFO: [] MessageGroupId [src] SessionId [src|6f87dad5-4c9f857f-a20dc6a9-c923439_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:30:25.373303Z :DEBUG: [] MessageGroupId [src] SessionId [src|6f87dad5-4c9f857f-a20dc6a9-c923439_0] Write session: destroy 2025-06-24T21:30:25.374885Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|6f87dad5-4c9f857f-a20dc6a9-c923439_0 grpc read done: success: 0 data: 2025-06-24T21:30:25.374915Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|6f87dad5-4c9f857f-a20dc6a9-c923439_0 grpc read failed 2025-06-24T21:30:25.374980Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 1 sessionId: src|6f87dad5-4c9f857f-a20dc6a9-c923439_0 2025-06-24T21:30:25.375003Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|6f87dad5-4c9f857f-a20dc6a9-c923439_0 is DEAD 2025-06-24T21:30:25.375380Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:30:25.375587Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [5:7519631427922579313:2468] destroyed 2025-06-24T21:30:25.375636Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:30:25.405780Z :INFO: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] Starting read session 2025-06-24T21:30:25.405863Z :DEBUG: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] Starting session to cluster null (localhost:8641) 2025-06-24T21:30:25.410415Z :DEBUG: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:25.410480Z :DEBUG: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:25.410575Z :DEBUG: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] [null] Reconnecting session to cluster null in 0.000000s 2025-06-24T21:30:25.412376Z :ERROR: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] [null] Got error. Status: CLIENT_UNAUTHENTICATED. Description:
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation 2025-06-24T21:30:25.412480Z :DEBUG: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:25.412533Z :DEBUG: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:25.412685Z :INFO: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] [null] Closing session to cluster: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " } Get event on client 2025-06-24T21:30:25.412980Z :NOTICE: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:30:25.413032Z :DEBUG: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] [null] Abort session to cluster Got close event: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " }2025-06-24T21:30:25.413153Z :INFO: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] Closing read session. Close timeout: 0.000000s 2025-06-24T21:30:25.413205Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-24T21:30:25.413257Z :INFO: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] Counters: { Errors: 1 CurrentSessionLifetimeMs: 7 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:30:25.413368Z :NOTICE: [/Root] [/Root] [2604b878-24655615-25db08ea-366ff322] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TBlobStorageProxyTest::TestDoubleFailureStripe4Plus2 [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Stripe [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::DropTableWithInflightChanges[TabletReboots] >> TBlobStorageProxyTest::TestVBlockVPutVGet [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasure4Plus2Stripe [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot-Internal-FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-24T21:27:23.328373Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:27:23.333367Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:27:23.333893Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:27:23.363130Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:27:23.363499Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:27:23.371084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:23.371454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:23.371687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:23.371833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:23.371969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:23.372105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:23.372244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:23.372357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:23.372473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:23.372591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:23.372715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:23.398003Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:27:23.403245Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:27:23.403527Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:27:23.403601Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:27:23.403776Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:23.403956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:23.404032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:23.404078Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:27:23.404184Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:27:23.404248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:23.404300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:23.404343Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:27:23.404521Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:23.404593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:23.404643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:23.404680Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:27:23.404780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:27:23.404846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:23.404911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:23.404941Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:27:23.405010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:23.405046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:23.425783Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:27:23.426223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:23.426295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:23.426334Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:27:23.426556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:23.426614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:23.426652Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:27:23.426778Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:23.426831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:23.426873Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:27:23.426962Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:23.427030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:27:23.427085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:27:23.427130Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:27:23.427670Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=68; 2025-06-24T21:27:23.427778Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=51; 2025-06-24T21:27:23.427859Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=34; 2025-06-24T21:27:23.427954Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=51; 2025-06-24T21:27:23.428049Z node 1 : ... Id=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:28.458571Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:28.556090Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:30:28.556188Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:30:28.556518Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:28.556714Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:28.556771Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:30:28.556885Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;);columns=1;rows=31; 2025-06-24T21:30:28.556948Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=248;num_rows=31;batch_columns=saved_at; 2025-06-24T21:30:28.557329Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:629:2633];bytes=248;rows=31;faults=0;finished=0;fault=0;schema=saved_at: uint64; 2025-06-24T21:30:28.557461Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:28.557596Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:28.557772Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:28.558002Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:28.558098Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:28.558176Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:28.558215Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:630:2634] finished for tablet 9437184 2025-06-24T21:30:28.558876Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:629:2633];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.101},{"events":["l_bootstrap"],"t":0.302},{"events":["f_processing","f_task_result"],"t":0.304},{"events":["f_ack","l_task_result"],"t":1.221},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":1.424}],"full":{"a":1750800627133809,"name":"_full_task","f":1750800627133809,"d_finished":0,"c":0,"l":1750800628558300,"d":1424491},"events":[{"name":"bootstrap","f":1750800627134154,"d_finished":302164,"c":1,"l":1750800627436318,"d":302164},{"a":1750800628557980,"name":"ack","f":1750800628355513,"d_finished":201623,"c":4,"l":1750800628557815,"d":201943},{"a":1750800628557962,"name":"processing","f":1750800627438360,"d_finished":710165,"c":36,"l":1750800628557819,"d":710503},{"name":"ProduceResults","f":1750800627235670,"d_finished":305584,"c":42,"l":1750800628558197,"d":305584},{"a":1750800628558200,"name":"Finish","f":1750800628558200,"d_finished":0,"c":0,"l":1750800628558300,"d":100},{"name":"task_result","f":1750800627438399,"d_finished":507717,"c":32,"l":1750800628355235,"d":507717}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:28.656322Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:629:2633];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:30:28.657137Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:629:2633];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.101},{"events":["l_bootstrap"],"t":0.302},{"events":["f_processing","f_task_result"],"t":0.304},{"events":["f_ack","l_task_result"],"t":1.221},{"events":["l_ProduceResults","f_Finish"],"t":1.424},{"events":["l_ack","l_processing","l_Finish"],"t":1.522}],"full":{"a":1750800627133809,"name":"_full_task","f":1750800627133809,"d_finished":0,"c":0,"l":1750800628656432,"d":1522623},"events":[{"name":"bootstrap","f":1750800627134154,"d_finished":302164,"c":1,"l":1750800627436318,"d":302164},{"a":1750800628557980,"name":"ack","f":1750800628355513,"d_finished":201623,"c":4,"l":1750800628557815,"d":300075},{"a":1750800628557962,"name":"processing","f":1750800627438360,"d_finished":710165,"c":36,"l":1750800628557819,"d":808635},{"name":"ProduceResults","f":1750800627235670,"d_finished":305584,"c":42,"l":1750800628558197,"d":305584},{"a":1750800628558200,"name":"Finish","f":1750800628558200,"d_finished":0,"c":0,"l":1750800628656432,"d":98232},{"name":"task_result","f":1750800627438399,"d_finished":507717,"c":32,"l":1750800628355235,"d":507717}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:28.657240Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:30:26.826442Z;index_granules=0;index_portions=4;index_batches=13;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=71800;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=71800;selected_rows=0; 2025-06-24T21:30:28.657290Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:28.657700Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;; >> TUniqueIndexTests::CreateTable >> TBlobStorageProxyTest::TestBatchedPutRequestDoesNotContainAHugeBlob [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestDoubleFailureStripe4Plus2 [GOOD] Test command err: 2025-06-24T21:30:26.408884Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001eca/r3tmp/tmpwhnJUK//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-24T21:30:26.410143Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001eca/r3tmp/tmpwhnJUK//vdisk_bad_1/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 2 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 2 2025-06-24T21:30:26.460679Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 2 VDISK[0:_:0:1:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-24T21:30:26.460949Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Stripe [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestVBlockVPutVGet [GOOD] Test command err: 2025-06-24T21:30:25.912061Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001bee/r3tmp/tmpLhPVqJ//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-24T21:30:25.919741Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR >> TVectorIndexTests::VectorKmeansTreeImplTable [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestBatchedPutRequestDoesNotContainAHugeBlob [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasure4Plus2Stripe [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus1Stripe [GOOD] >> TColumnShardTestSchema::RebootOneColdTier [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::VectorKmeansTreeImplTable [GOOD] >> TBlobStorageProxyTest::TestEmptyDiscoverMaxi [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TUniqueIndexTests::CreateTable [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TBlobStorageProxyTest::TestProxyLongTailDiscoverMaxi [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus1Stripe [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootOneColdTier [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801178.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150801178.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801178.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130801178.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799978.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130799978.000000s;Name=;Codec=}; 2025-06-24T21:29:40.808567Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:40.835530Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:40.835856Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:40.843455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:40.843708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:40.843996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:40.844130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:40.844237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:40.844346Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:40.844461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:40.844572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:40.844689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:40.844807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:40.844918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:40.878656Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:40.878927Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:40.878989Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:40.879212Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:40.879408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:40.879489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:40.879538Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:40.879633Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:40.879692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:40.879734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:40.879763Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:40.879926Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:40.879985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:40.880023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:40.880051Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:40.880134Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:40.880190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:40.880237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:40.880271Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:40.880329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:40.880369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:40.880396Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:40.880581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:40.880622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:40.880651Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:40.880880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:40.880964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:40.881009Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:40.881137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:40.881206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:40.881254Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:40.881333Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:40.881403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:40.881449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:40.881478Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:40.881951Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=47; 2025-06-24T21:29:40.882030Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-24T21:29:40.882 ... t;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=5027; 2025-06-24T21:30:30.892482Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=12; 2025-06-24T21:30:30.892661Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=113; 2025-06-24T21:30:30.892720Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=5616; 2025-06-24T21:30:30.892770Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=5774; 2025-06-24T21:30:30.892846Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=11; 2025-06-24T21:30:30.892978Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=74; 2025-06-24T21:30:30.893020Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=6551; 2025-06-24T21:30:30.893217Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=121; 2025-06-24T21:30:30.893361Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=75; 2025-06-24T21:30:30.893512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=92; 2025-06-24T21:30:30.893668Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=105; 2025-06-24T21:30:30.896161Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2411; 2025-06-24T21:30:30.898661Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=2389; 2025-06-24T21:30:30.898764Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=12; 2025-06-24T21:30:30.898824Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2025-06-24T21:30:30.898874Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-24T21:30:30.898967Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=47; 2025-06-24T21:30:30.899024Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=13; 2025-06-24T21:30:30.899235Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=174; 2025-06-24T21:30:30.899285Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-24T21:30:30.899366Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=44; 2025-06-24T21:30:30.899460Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=51; 2025-06-24T21:30:30.899762Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=251; 2025-06-24T21:30:30.899809Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=22123; 2025-06-24T21:30:30.899951Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=9739224;raw_bytes=13544452;count=2;records=160000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:30:30.900067Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:30:30.900130Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:30:30.900200Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:30:30.910483Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T21:30:30.910644Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:30.910744Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T21:30:30.910823Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800318339;tx_id=18446744073709551615;;current_snapshot_ts=1750800606226; 2025-06-24T21:30:30.910873Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:30.910923Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:30.910965Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:30.911066Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:30:30.911917Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;self_id=[1:1385:3226];tablet_id=9437184;parent=[1:1346:3195];fline=manager.cpp:88;event=ask_data;request=request_id=54;9438184000001={portions_count=2};; 2025-06-24T21:30:30.912997Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:30:30.915767Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:30:30.915822Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:30:30.915848Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:30:30.915893Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:30.916024Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T21:30:30.916097Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800318339;tx_id=18446744073709551615;;current_snapshot_ts=1750800606226; 2025-06-24T21:30:30.916149Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:30.916229Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:30.916281Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:30.916379Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestEmptyDiscoverMaxi [GOOD] >> TAsyncIndexTests::Decimal >> TAsyncIndexTests::OnlineBuild ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TUniqueIndexTests::CreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:30:30.608337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:30.608435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:30.608477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:30.608513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:30.608559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:30.608609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:30.608721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:30.608791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:30.609588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:30.609973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:30.698957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:30.699009Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:30.717360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:30.717723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:30.717916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:30.751372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:30.751594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:30.752303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:30.752692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:30.756086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:30.756341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:30.757568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:30.757638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:30.757880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:30.757924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:30.757967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:30.758075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.766238Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:30.951965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:30.952215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.952515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:30.952569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:30.952846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:30.952943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:30.955697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:30.955931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:30.956158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.956235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:30.956294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:30.956348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:30.958675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.958736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:30.958777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:30.960807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.960848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.960910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:30.960956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:30.969846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:30.971963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:30.972156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:30.973205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:30.973346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:30.973390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:30.973618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:30:30.973662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:30.973793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:30.973846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:30:30.975821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:30.975871Z node 1 :FLAT_TX_SCHEMESHARD ... 2075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:31.306002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:30:31.306033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:30:31.306071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:30:31.311268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:30:31.311396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:30:31.313182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:30:31.313278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:30:31.313390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:30:31.313494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:30:31.313568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:30:31.313866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:30:31.314178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:30:31.314233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-24T21:30:31.314349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:2 progress is 2/3 2025-06-24T21:30:31.314402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-24T21:30:31.314441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:2 progress is 2/3 2025-06-24T21:30:31.314474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-24T21:30:31.314509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-24T21:30:31.314733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:30:31.314767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:30:31.314850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 3/3 2025-06-24T21:30:31.314880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:30:31.314910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 3/3 2025-06-24T21:30:31.314936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:30:31.314970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-24T21:30:31.315054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:379:2345] message: TxId: 101 2025-06-24T21:30:31.315099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:30:31.316206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:30:31.316281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:30:31.316438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:30:31.316490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:1 2025-06-24T21:30:31.316512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:1 2025-06-24T21:30:31.316544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:30:31.316565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:2 2025-06-24T21:30:31.316581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:2 2025-06-24T21:30:31.316640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:30:31.320162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:30:31.320247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:380:2346] TestWaitNotification: OK eventTxId 101 2025-06-24T21:30:31.320830Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:30:31.321225Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex" took 366us result status StatusSuccess 2025-06-24T21:30:31.322188Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex" PathDescription { Self { Name: "UserDefinedIndex" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "UserDefinedIndex" LocalPathId: 3 Type: EIndexTypeGlobalUnique State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TAsyncIndexTests::SplitBothWithReboots[PipeResets] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyLongTailDiscoverMaxi [GOOD] >> TAsyncIndexTests::MergeBothWithReboots[PipeResets] >> TVectorIndexTests::CreateTableWithError >> TBlobStorageProxyTest::TestProxyRestoreOnGetStripe [GOOD] >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror3Plus2 >> TVectorIndexTests::CreateTablePrefixCovering |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CdcAndSplitWithReboots[PipeResets] >> TBlobStorageProxyTest::TestQuadrupleGroups [GOOD] >> TBlobStorageProxyTest::TestSingleFailure >> TAsyncIndexTests::Decimal [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableWithError [GOOD] >> TAsyncIndexTests::CdcAndSplitWithReboots[TabletReboots] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::OnlineBuild [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::Decimal [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:30:32.624335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:32.624411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:32.624454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:32.624487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:32.624527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:32.624589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:32.624647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:32.624725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:32.625458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:32.625777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:32.699219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:32.699285Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:32.714799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:32.715216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:32.715373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:32.722658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:32.722851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:32.723432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:32.723750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:32.726555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:32.726763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:32.727915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:32.727982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:32.728230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:32.728278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:32.728319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:32.728431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:30:32.735072Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:32.858908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:32.859168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:32.859432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:32.859489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:32.859731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:32.859831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:32.862165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:32.862319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:32.862478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:32.862539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:32.862574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:32.862607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:32.864995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:32.865082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:32.865131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:32.867068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:32.867107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:32.867167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:32.867231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:32.870784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:32.872718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:32.872912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:32.873827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:32.873940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:32.873977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:32.874181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:30:32.874213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:32.874365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:32.874460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:30:32.876259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:32.876299Z node 1 :FLAT_TX_SCHEMESHARD ... 2075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.198798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:706: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.198830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:718: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-24T21:30:33.198863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 101:0 129 -> 240 2025-06-24T21:30:33.205829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:30:33.205970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:30:33.214674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:30:33.214925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-24T21:30:33.216352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:30:33.216745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.216920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:30:33.217369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-24T21:30:33.217414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-24T21:30:33.217520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:2 progress is 2/3 2025-06-24T21:30:33.217556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-24T21:30:33.217615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:2 progress is 2/3 2025-06-24T21:30:33.217681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-24T21:30:33.217720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-24T21:30:33.218178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:664: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.218600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.218643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-24T21:30:33.218705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 3/3 2025-06-24T21:30:33.218731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:30:33.218760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#101:0 progress is 3/3 2025-06-24T21:30:33.218783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:30:33.218808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-24T21:30:33.218879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:379:2345] message: TxId: 101 2025-06-24T21:30:33.218939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-24T21:30:33.218993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:0 2025-06-24T21:30:33.219041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:0 2025-06-24T21:30:33.219222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:30:33.219273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:1 2025-06-24T21:30:33.219301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:1 2025-06-24T21:30:33.219333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:30:33.219376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 101:2 2025-06-24T21:30:33.219396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 101:2 2025-06-24T21:30:33.219449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:30:33.222566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-24T21:30:33.222621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:380:2346] TestWaitNotification: OK eventTxId 101 2025-06-24T21:30:33.223237Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:30:33.223513Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex" took 307us result status StatusSuccess 2025-06-24T21:30:33.224413Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex" PathDescription { Self { Name: "UserDefinedIndex" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "UserDefinedIndex" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TAsyncIndexTests::MergeBothWithReboots[TabletReboots] >> TVectorIndexTests::CreateTablePrefixCovering [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableWithError [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:30:33.288719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:33.288795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:33.288832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:33.288862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:33.288902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:33.288947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:33.289006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:33.289095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:33.289809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:33.290143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:33.369785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:33.369835Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:33.386577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:33.386999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:33.387551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:33.395800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:33.395991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:33.396649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.396971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:33.399973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.400192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:33.401470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:33.401533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.401751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:33.401798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:33.401843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:33.401948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.408783Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:33.540524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:33.540754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.541011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:33.541077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:33.541312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:33.541417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:33.543583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.543763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:33.543945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.544006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:33.544058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:33.544096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:33.546080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.546128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:33.546179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:33.547914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.547961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.547999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.548067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:33.551594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:33.553569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:33.553754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:33.554705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.554834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:33.554882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.555183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:30:33.555241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.555423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:33.555497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:30:33.557759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:33.557810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:33.558026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.558077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-24T21:30:33.558406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.558452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-24T21:30:33.558554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:30:33.558585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:30:33.558620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#1:0 progress is 1/1 2025-06-24T21:30:33.558648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:30:33.558718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-24T21:30:33.558768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-24T21:30:33.558815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1:0 2025-06-24T21:30:33.558851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1:0 2025-06-24T21:30:33.558921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:30:33.558960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-24T21:30:33.558991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-24T21:30:33.560768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:30:33.560885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-24T21:30:33.560922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-24T21:30:33.560990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-24T21:30:33.561049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:33.561145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-24T21:30:33.564610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-24T21:30:33.565094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-24T21:30:33.566971Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:271:2260] Bootstrap 2025-06-24T21:30:33.588114Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:271:2260] Become StateWork (SchemeCache [1:276:2265]) 2025-06-24T21:30:33.590941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "vectors" Columns { Name: "id" Type: "Uint64" } Columns { Name: "__ydb_parent" Type: "String" } KeyColumnNames: "id" } IndexDescription { Name: "idx_vector" KeyColumnNames: "__ydb_parent" Type: EIndexTypeGlobalVectorKmeansTree VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:33.591405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /MyRoot/vectors domain path id: [OwnerId: 72057594046678944, LocalPathId: 1] domain path: /MyRoot shardsToCreate: 2 GetShardsInside: 0 MaxShards: 200000 2025-06-24T21:30:33.591550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: index key column shouldn't have a reserved name, at schemeshard: 72057594046678944 2025-06-24T21:30:33.591597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 101:1, propose status:StatusInvalidParameter, reason: index key column shouldn't have a reserved name, at schemeshard: 72057594046678944 2025-06-24T21:30:33.592418Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:30:33.595550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 101, response: Status: StatusInvalidParameter Reason: "index key column shouldn\'t have a reserved name" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:33.595778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: index key column shouldn't have a reserved name, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/vectors 2025-06-24T21:30:33.596272Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-24T21:30:33.599427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "vectors" Columns { Name: "id" Type: "Uint64" } Columns { Name: "embedding" Type: "String" } KeyColumnNames: "id" } IndexDescription { Name: "idx_vector" KeyColumnNames: "embedding" Type: EIndexTypeGlobalVectorKmeansTree DataColumnNames: "id" VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:33.599824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /MyRoot/vectors domain path id: [OwnerId: 72057594046678944, LocalPathId: 1] domain path: /MyRoot shardsToCreate: 2 GetShardsInside: 0 MaxShards: 200000 2025-06-24T21:30:33.599983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: the same column can't be used as key and data column for one index, for example id, at schemeshard: 72057594046678944 2025-06-24T21:30:33.600056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: the same column can't be used as key and data column for one index, for example id, at schemeshard: 72057594046678944 2025-06-24T21:30:33.602294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "the same column can\'t be used as key and data column for one index, for example id" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:33.602534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: the same column can't be used as key and data column for one index, for example id, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/vectors TestModificationResult got TxId: 102, wait until txId: 102 >> TVectorIndexTests::CreateTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::OnlineBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:30:32.804093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:32.804182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:32.804217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:32.804248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:32.804311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:32.804360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:32.804430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:32.804570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:32.805301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:32.805755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:32.889869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:32.889932Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:32.907509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:32.907878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:32.908055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:32.915688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:32.915861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:32.916474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:32.916797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:32.919783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:32.919987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:32.921092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:32.921160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:32.921421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:32.921477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:32.921523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:32.921636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:30:32.928221Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:33.039624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:33.039840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.040093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:33.040153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:33.040375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:33.040437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:33.044263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.044486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:33.044712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.044789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:33.044832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:33.044897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:33.047358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.047425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:33.047464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:33.049489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.049544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.049588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.049653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:33.053329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:33.055668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:33.055916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:33.056992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.057159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:33.057212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.057495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:30:33.057550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.057765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:33.057847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:30:33.061471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:33.061527Z node 1 :FLAT_TX_SCHEMESHARD ... ockTxId: 281474976710760, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, cookie: 102, record: Status: StatusAccepted TxId: 281474976710760 SchemeshardId: 72057594046678944 PathId: 2, status: StatusAccepted 2025-06-24T21:30:33.697388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.697438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:30: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 ProgressState 2025-06-24T21:30:33.697488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:30:33.697634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 281474976710760 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:33.700927Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-24T21:30:33.701025Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndex, IndexColumn: indexed, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:387:2357], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:30:33.701299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976710760:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710760 msg type: 269090816 2025-06-24T21:30:33.701420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976710760, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:30:33.701613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710760, at schemeshard: 72057594046678944 2025-06-24T21:30:33.701647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 0/1, is published: true 2025-06-24T21:30:33.701682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-06-24T21:30:33.701914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.702010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:33.702076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-24T21:30:33.702123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976710760:0 128 -> 240 2025-06-24T21:30:33.704296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.704352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-06-24T21:30:33.704484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-24T21:30:33.704540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:30:33.704578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-24T21:30:33.704612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:30:33.704653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-24T21:30:33.704708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:127:2151] message: TxId: 281474976710760 2025-06-24T21:30:33.704761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-24T21:30:33.704794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-24T21:30:33.704821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976710760:0 2025-06-24T21:30:33.704885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-24T21:30:33.706796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6826: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-24T21:30:33.706868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6828: Message: TxId: 281474976710760 2025-06-24T21:30:33.706949Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-24T21:30:33.707070Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndex, IndexColumn: indexed, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:387:2357], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-24T21:30:33.709149Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-24T21:30:33.709255Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndex, IndexColumn: indexed, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:387:2357], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:30:33.709317Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-24T21:30:33.711039Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-24T21:30:33.711126Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndex, IndexColumn: indexed, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:387:2357], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-24T21:30:33.711181Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-24T21:30:33.711304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:30:33.711366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:480:2439] TestWaitNotification: OK eventTxId 102 >> TVectorIndexTests::CreateTableCoveredEmbedding |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTablePrefixCovering [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:30:33.367130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:33.367244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:33.367284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:33.367317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:33.367360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:33.367410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:33.367478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:33.367554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:33.368359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:33.368721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:33.455457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:33.455528Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:33.471547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:33.471952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:33.472108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:33.482462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:33.482670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:33.483459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.483844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:33.487478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.487710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:33.488980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:33.489074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.489342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:33.489395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:33.489442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:33.489552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.496661Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:33.629051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:33.629295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.629569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:33.629624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:33.629857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:33.629951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:33.632515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.632710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:33.632945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.633013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:33.633086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:33.633125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:33.635267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.635333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:33.635392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:33.637536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.637594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.637644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.637726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:33.641542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:33.643706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:33.643891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:33.644706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.644804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:33.644838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.645061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:30:33.645096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.645239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:33.645295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:30:33.647426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:33.647458Z node 1 :FLAT_TX_SCHEMESHARD ... ementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-24T21:30:34.194296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:34.194372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:34.194416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:30:34.194462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 18446744073709551615 2025-06-24T21:30:34.194496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 4 2025-06-24T21:30:34.194558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/5, is published: true 2025-06-24T21:30:34.198202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:3, at schemeshard: 72057594046678944 2025-06-24T21:30:34.198263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:3 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:34.198597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T21:30:34.198720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:3 progress is 2/5 2025-06-24T21:30:34.198746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/5 2025-06-24T21:30:34.198772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:3 progress is 2/5 2025-06-24T21:30:34.198793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/5 2025-06-24T21:30:34.198817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/5, is published: true 2025-06-24T21:30:34.199950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:34.200042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:34.200068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:34.200113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:34.200234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:4, at schemeshard: 72057594046678944 2025-06-24T21:30:34.200271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:4 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:34.200491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-24T21:30:34.200587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:4 progress is 3/5 2025-06-24T21:30:34.200609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/5 2025-06-24T21:30:34.200643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:4 progress is 3/5 2025-06-24T21:30:34.200679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/5 2025-06-24T21:30:34.200708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/5, is published: true 2025-06-24T21:30:34.200879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-24T21:30:34.200910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:2 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:34.201054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:30:34.201114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:2 progress is 4/5 2025-06-24T21:30:34.201130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 4/5 2025-06-24T21:30:34.201147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:2 progress is 4/5 2025-06-24T21:30:34.201161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 4/5 2025-06-24T21:30:34.201176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 4/5, is published: true 2025-06-24T21:30:34.201325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.201357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:34.201467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:30:34.201534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 5/5 2025-06-24T21:30:34.201560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-24T21:30:34.201603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 5/5 2025-06-24T21:30:34.201628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-24T21:30:34.201654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 5/5, is published: true 2025-06-24T21:30:34.201712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:448:2392] message: TxId: 102 2025-06-24T21:30:34.201751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-24T21:30:34.201790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:30:34.201850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:30:34.201945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:30:34.201984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:1 2025-06-24T21:30:34.202001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:1 2025-06-24T21:30:34.202032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-24T21:30:34.202054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:2 2025-06-24T21:30:34.202071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:2 2025-06-24T21:30:34.202131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:30:34.202158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:3 2025-06-24T21:30:34.202192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:3 2025-06-24T21:30:34.202235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-24T21:30:34.202258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:4 2025-06-24T21:30:34.202275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:4 2025-06-24T21:30:34.202319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-24T21:30:34.202677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:34.202735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:34.202769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:34.203021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:34.203190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:34.204846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:34.206980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:30:34.207033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:590:2527] TestWaitNotification: OK eventTxId 102 >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |98.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableCoveredEmbedding [GOOD] >> TVectorIndexTests::CreateTable [GOOD] >> TBSV::ShouldLimitBlockStoreVolumeDropRate |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |98.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TColumnShardTestSchema::TTL+Reboot-Internal-FirstPkColumn [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableCoveredEmbedding [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:30:35.310441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:35.310515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:35.310550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:35.310582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:35.310621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:35.310665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:35.310721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:35.310792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:35.311524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:35.311836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:35.394024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:35.394083Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:35.410285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:35.410622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:35.410795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:35.420946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:35.421133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:35.421734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:35.422049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:35.425156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:35.425383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:35.426508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:35.426573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:35.426797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:35.426844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:35.426886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:35.426998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.435960Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:35.587716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:35.587960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.588237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:35.588290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:35.588518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:35.588609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:35.591084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:35.591315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:35.591495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.591557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:35.591623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:35.591659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:35.593794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.593858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:35.593908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:35.596039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.596095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.596153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:35.596247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:35.606717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:35.609277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:35.609540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:35.610353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:35.610516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:35.610566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:35.610892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:30:35.610964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:35.611187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:35.611276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:30:35.614178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:35.614222Z node 1 :FLAT_TX_SCHEMESHARD ... : 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "idx_vector" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "embedding" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataColumnNames: "embedding" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:35.951111Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector/indexImplLevelTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:30:35.951435Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector/indexImplLevelTable" took 333us result status StatusSuccess 2025-06-24T21:30:35.951854Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/idx_vector/indexImplLevelTable" PathDescription { Self { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplLevelTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_id" Type: "Uint64" TypeId: 4 Id: 2 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_centroid" Type: "String" TypeId: 4097 Id: 3 NotNull: true IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "__ydb_id" KeyColumnIds: 1 KeyColumnIds: 2 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:35.952520Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector/indexImplPostingTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:30:35.952772Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector/indexImplPostingTable" took 236us result status StatusSuccess 2025-06-24T21:30:35.953153Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/idx_vector/indexImplPostingTable" PathDescription { Self { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplPostingTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "id" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "embedding" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "id" KeyColumnIds: 1 KeyColumnIds: 2 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TBSV::CleanupDroppedVolumesOnRestart ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:30:35.165794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:35.165889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:35.165928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:35.165966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:35.166010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:35.166058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:35.166117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:35.166195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:35.166987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:35.167489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:35.248268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:35.248328Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:35.266994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:35.267409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:35.267615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:35.289473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:35.289636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:35.290200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:35.290490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:35.293189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:35.293372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:35.294293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:35.294345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:35.294532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:35.294574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:35.294608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:35.294689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.300700Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:35.428741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:35.428984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.429279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:35.429337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:35.429588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:35.429689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:35.431917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:35.432098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:35.432262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.432326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:35.432372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:35.432419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:35.434479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.434541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:35.434627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:35.436765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.436819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.436868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:35.436931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:35.445937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:35.448029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:35.448314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:35.449394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:35.449544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:35.449590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:35.449899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:30:35.449966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:35.450187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:35.450280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:30:35.452664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:35.452712Z node 1 :FLAT_TX_SCHEMESHARD ... chemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:35.942810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:30:35.942964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:35.943025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:35.943059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:30:35.944121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:35.944192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:35.944223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:30:35.944257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-24T21:30:35.944289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-24T21:30:35.945828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:35.945914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:30:35.945993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:30:35.946028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-24T21:30:35.946061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-24T21:30:35.946133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/4, is published: true 2025-06-24T21:30:35.948753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:3, at schemeshard: 72057594046678944 2025-06-24T21:30:35.948815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:3 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:35.949170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T21:30:35.949331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:3 progress is 2/4 2025-06-24T21:30:35.949368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/4 2025-06-24T21:30:35.949403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:3 progress is 2/4 2025-06-24T21:30:35.949439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/4 2025-06-24T21:30:35.949475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/4, is published: true 2025-06-24T21:30:35.952393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-24T21:30:35.952449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:2 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:35.952672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-24T21:30:35.952752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:2 progress is 3/4 2025-06-24T21:30:35.952801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/4 2025-06-24T21:30:35.952835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:2 progress is 3/4 2025-06-24T21:30:35.952873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 3/4 2025-06-24T21:30:35.952905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/4, is published: true 2025-06-24T21:30:35.953368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:35.953536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-24T21:30:35.953575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:35.953746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-24T21:30:35.953816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 4/4 2025-06-24T21:30:35.953833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-24T21:30:35.953861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:0 progress is 4/4 2025-06-24T21:30:35.953877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-24T21:30:35.953895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 4/4, is published: true 2025-06-24T21:30:35.953959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1660: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:415:2370] message: TxId: 102 2025-06-24T21:30:35.954010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-24T21:30:35.954043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:30:35.954069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:30:35.954138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-24T21:30:35.954168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:1 2025-06-24T21:30:35.954181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:1 2025-06-24T21:30:35.954213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:30:35.954230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:2 2025-06-24T21:30:35.954243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:2 2025-06-24T21:30:35.954273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:30:35.954288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:3 2025-06-24T21:30:35.954306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:3 2025-06-24T21:30:35.954337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-24T21:30:35.954712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:35.954790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:35.955091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:35.955126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:35.955251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:35.955330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:35.956752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-24T21:30:35.959115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:30:35.959223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:533:2481] TestWaitNotification: OK eventTxId 102 >> TBlobStorageProxyTest::TestSingleFailure [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TColumnShardTestSchema::RebootEnableColdTiersAfterNoEviction [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |98.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL+Reboot-Internal-FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-24T21:29:56.300075Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:29:56.305358Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:29:56.305822Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:56.333058Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:56.333341Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:56.341438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:56.341695Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:56.341918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:56.342021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:56.342100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:56.342189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:56.342266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:56.342331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:56.342396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:56.342470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:56.342572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:56.367489Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:105:2137], Recipient [1:127:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:29:56.377113Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:56.377511Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:56.377651Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:56.377840Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:56.378058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:56.378141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:56.378184Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:56.378566Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:56.378657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:56.378717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:56.378775Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:56.379012Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:56.379084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:56.379132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:56.379218Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:56.379320Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:56.379394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:56.379439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:56.379458Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:56.379512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:56.379548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:56.379575Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:56.379741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:56.379770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:56.379793Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:56.379956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:56.379990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:56.380013Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:56.380122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:56.380188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:56.380251Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:56.380405Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:56.380490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:56.380558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:56.380611Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:56.381015Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=52; 2025-06-24T21:29:56.381097Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-24T21:29:56.381170Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=40; 2025-06-24T21:29:56.381245Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=38; 2025-06-24T21:29:56.381311Z node 1 : ... 84;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:36.518650Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:36.518689Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:30:36.518729Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:30:36.518897Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:36.518995Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:36.519036Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:30:36.519131Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;);columns=1;rows=31; 2025-06-24T21:30:36.519208Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=248;num_rows=31;batch_columns=saved_at; 2025-06-24T21:30:36.519452Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:736:2713];bytes=248;rows=31;faults=0;finished=0;fault=0;schema=saved_at: uint64; 2025-06-24T21:30:36.519558Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:36.519658Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:36.519839Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:36.520009Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:36.520102Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:36.520189Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:36.520240Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:737:2714] finished for tablet 9437184 2025-06-24T21:30:36.520911Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:736:2713];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.004},{"events":["l_bootstrap"],"t":0.008},{"events":["f_processing","f_task_result"],"t":0.01},{"events":["l_task_result"],"t":0.044},{"events":["f_ack"],"t":0.045},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.05}],"full":{"a":1750800636469461,"name":"_full_task","f":1750800636469461,"d_finished":0,"c":0,"l":1750800636520318,"d":50857},"events":[{"name":"bootstrap","f":1750800636469795,"d_finished":8582,"c":1,"l":1750800636478377,"d":8582},{"a":1750800636519988,"name":"ack","f":1750800636514599,"d_finished":4823,"c":4,"l":1750800636519878,"d":5153},{"a":1750800636519973,"name":"processing","f":1750800636480241,"d_finished":23903,"c":36,"l":1750800636519881,"d":24248},{"name":"ProduceResults","f":1750800636473897,"d_finished":10369,"c":42,"l":1750800636520212,"d":10369},{"a":1750800636520217,"name":"Finish","f":1750800636520217,"d_finished":0,"c":0,"l":1750800636520318,"d":101},{"name":"task_result","f":1750800636480273,"d_finished":18456,"c":32,"l":1750800636514354,"d":18456}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:36.521046Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:736:2713];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:30:36.521734Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:736:2713];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.004},{"events":["l_bootstrap"],"t":0.008},{"events":["f_processing","f_task_result"],"t":0.01},{"events":["l_task_result"],"t":0.044},{"events":["f_ack"],"t":0.045},{"events":["l_ProduceResults","f_Finish"],"t":0.05},{"events":["l_ack","l_processing","l_Finish"],"t":0.051}],"full":{"a":1750800636469461,"name":"_full_task","f":1750800636469461,"d_finished":0,"c":0,"l":1750800636521130,"d":51669},"events":[{"name":"bootstrap","f":1750800636469795,"d_finished":8582,"c":1,"l":1750800636478377,"d":8582},{"a":1750800636519988,"name":"ack","f":1750800636514599,"d_finished":4823,"c":4,"l":1750800636519878,"d":5965},{"a":1750800636519973,"name":"processing","f":1750800636480241,"d_finished":23903,"c":36,"l":1750800636519881,"d":25060},{"name":"ProduceResults","f":1750800636473897,"d_finished":10369,"c":42,"l":1750800636520212,"d":10369},{"a":1750800636520217,"name":"Finish","f":1750800636520217,"d_finished":0,"c":0,"l":1750800636521130,"d":913},{"name":"task_result","f":1750800636480273,"d_finished":18456,"c":32,"l":1750800636514354,"d":18456}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-24T21:30:36.521847Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:30:36.466091Z;index_granules=0;index_portions=4;index_batches=13;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=71800;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=71800;selected_rows=0; 2025-06-24T21:30:36.521910Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:36.522379Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:737:2714];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestSingleFailure [GOOD] Test command err: 2025-06-24T21:30:34.452481Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:425} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/001d92/r3tmp/tmpgsarfG//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-24T21:30:34.456380Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR >> TBSV::CleanupDroppedVolumesOnRestart [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootEnableColdTiersAfterNoEviction [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801172.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801172.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150801172.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801172.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801172.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150801172.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799972.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130801172.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130801172.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799972.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130799972.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130799972.000000s;Name=;Codec=}; 2025-06-24T21:29:32.616667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:32.644458Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:32.644817Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:32.653443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:32.653638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:32.653850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:32.653934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:32.654000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:32.654087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:32.654154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:32.654219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:32.654280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:32.654357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:32.654424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:32.699894Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:32.700204Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:32.700292Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:32.700517Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:32.700707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:32.700782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:32.700830Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:32.700930Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:32.700996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:32.701040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:32.701072Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:32.701268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:32.701336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:32.701384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:32.701418Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:32.701515Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:32.701571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:32.701619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:32.701648Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:32.701711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:32.701755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:32.701787Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:32.702005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:32.702051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:32.702082Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:32.702315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:32.702384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:32.702419Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:32.702550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:32.702598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:32.702633Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:32.702738Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:32.702818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;ev ... RD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=10; 2025-06-24T21:30:36.912802Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=165; 2025-06-24T21:30:36.912842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=9823; 2025-06-24T21:30:36.912888Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=9934; 2025-06-24T21:30:36.912946Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=11; 2025-06-24T21:30:36.913141Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=157; 2025-06-24T21:30:36.913177Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=10619; 2025-06-24T21:30:36.913317Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=95; 2025-06-24T21:30:36.913449Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=84; 2025-06-24T21:30:36.913583Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=85; 2025-06-24T21:30:36.913700Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=75; 2025-06-24T21:30:36.918942Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=5173; 2025-06-24T21:30:36.923794Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=4739; 2025-06-24T21:30:36.923895Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=12; 2025-06-24T21:30:36.923946Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=13; 2025-06-24T21:30:36.923984Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-24T21:30:36.924067Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=51; 2025-06-24T21:30:36.924110Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-24T21:30:36.924192Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=49; 2025-06-24T21:30:36.924233Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-24T21:30:36.924303Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=36; 2025-06-24T21:30:36.924403Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=61; 2025-06-24T21:30:36.924737Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=289; 2025-06-24T21:30:36.924779Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=29423; 2025-06-24T21:30:36.924905Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=29251936;raw_bytes=43173354;count=6;records=480000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:30:36.925012Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:30:36.925078Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:30:36.925142Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:30:36.943653Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T21:30:36.943807Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:36.943895Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-24T21:30:36.943963Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800322449;tx_id=18446744073709551615;;current_snapshot_ts=1750800573927; 2025-06-24T21:30:36.944005Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:36.944094Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:36.944141Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:36.944223Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:30:36.944841Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;self_id=[1:2092:3893];tablet_id=9437184;parent=[1:2051:3860];fline=manager.cpp:88;event=ask_data;request=request_id=88;9438184000001={portions_count=6};; 2025-06-24T21:30:36.948298Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:30:36.948477Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:30:36.948513Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:30:36.948538Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:30:36.948582Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:36.948686Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-24T21:30:36.948752Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800322449;tx_id=18446744073709551615;;current_snapshot_ts=1750800573927; 2025-06-24T21:30:36.948792Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:36.948839Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:36.948881Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:36.948967Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2051:3860];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 160000/9752224 80000/4886744 0/0 |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |98.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TBSV::ShouldLimitBlockStoreVolumeDropRate [GOOD] >> IcbAsActorTests::TestHttpGetResponse |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::CleanupDroppedVolumesOnRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:30:37.471922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:37.472028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:37.472073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:37.472108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:37.472166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:37.472208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:37.472265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:37.472355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:37.473186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:37.473589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:37.560285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:37.560353Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:37.577924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:37.578349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:37.578538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:37.586449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:37.586649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:37.587366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:37.587677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:37.591933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:37.592200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:37.593342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:37.593399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:37.593604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:37.593657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:37.593720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:37.593812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.600167Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:37.733077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:37.733416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.733668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:37.733731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:37.734010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:37.734106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:37.740456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:37.740737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:37.740967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.741060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:37.741167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:37.741207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:37.744075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.744147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:37.744205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:37.746817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.746892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.746956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:37.747011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:37.750926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:37.753342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:37.753552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:37.754571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:37.754725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:37.754770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:37.755077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:30:37.755157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:37.755357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:37.755449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:30:37.757787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:37.757841Z node 1 :FLAT_TX_SCHEMESHARD ... sistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:409:2383] sender: [1:476:2058] recipient: [1:15:2062] 2025-06-24T21:30:38.064740Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:30:38.064933Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BSVolume" took 185us result status StatusPathDoesNotExist 2025-06-24T21:30:38.065123Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-24T21:30:38.065963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:409:2383] sender: [1:477:2058] recipient: [1:105:2138] Leader for TabletID 72057594046678944 is [1:409:2383] sender: [1:480:2058] recipient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:409:2383] sender: [1:481:2058] recipient: [1:479:2436] Leader for TabletID 72057594046678944 is [1:482:2437] sender: [1:483:2058] recipient: [1:479:2436] 2025-06-24T21:30:38.103823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:38.103901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:38.103936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:38.103965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:38.103993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:38.104015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:38.104050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:38.104097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:38.104674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:38.104936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:38.118801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:38.120667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:38.120880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:38.121045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:38.121081Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:38.121235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:38.121939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:38.122066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.122138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.122563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.122664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-24T21:30:38.122896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.123000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.123100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.123228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.123336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.123578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.123853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.123956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.124343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.124412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.124620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.124772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.124861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.125073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.125153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.125306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.125527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.125611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.125731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.125777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.125821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-24T21:30:38.136493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:38.138828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:38.138910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:38.138976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:38.139023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:38.139071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:38.139341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:482:2437] sender: [1:541:2058] recipient: [1:15:2062] 2025-06-24T21:30:38.171971Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:30:38.172240Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BSVolume" took 252us result status StatusPathDoesNotExist 2025-06-24T21:30:38.172442Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpGetResponse [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::ShouldLimitBlockStoreVolumeDropRate [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:30:36.816125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:36.816223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:36.816261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:36.816308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:36.816352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:36.816382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:36.816432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:36.816528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:36.817283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:36.817682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:36.894615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:36.894682Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:36.910505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:36.910916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:36.911116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:36.931543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:36.931773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:36.932388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:36.932788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:36.942997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:36.943242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:36.944450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:36.944517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:36.944751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:36.944812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:36.944864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:36.944943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:30:36.957136Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:37.082601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:37.082941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.083206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:37.083266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:37.083542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:37.083635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:37.086525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:37.086765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:37.086973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.087030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:37.087171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:37.087209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:37.089515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.089579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:37.089650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:37.091724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.091783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:37.091848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:37.091904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:37.095820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:37.098166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:37.098352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:37.099382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:37.099530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:37.099583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:37.099885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:30:37.099957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:37.100127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:37.100206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:30:37.103199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:37.103254Z node 1 :FLAT_TX_SCHEMESHARD ... 356265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 129, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 129 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 129 at step: 5000028 2025-06-24T21:30:38.357435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000028, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:38.357581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 129 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000028 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:30:38.357646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_bsv.cpp:40: TDropBlockStoreVolume TPropose, operationId: 129:0 HandleReply TEvOperationPlan, step: 5000028, at schemeshard: 72057594046678944 2025-06-24T21:30:38.357782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 2 2025-06-24T21:30:38.357902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#129:0 progress is 1/1 2025-06-24T21:30:38.357944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-24T21:30:38.357989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#129:0 progress is 1/1 2025-06-24T21:30:38.358036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-24T21:30:38.358095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:38.358174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 1 2025-06-24T21:30:38.358222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 129, ready parts: 1/1, is published: false 2025-06-24T21:30:38.358268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-24T21:30:38.358308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 129:0 2025-06-24T21:30:38.358339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 129:0 2025-06-24T21:30:38.358470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 2 2025-06-24T21:30:38.358509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 129, publications: 2, subscribers: 0 2025-06-24T21:30:38.358542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 1], 54 2025-06-24T21:30:38.358589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 13], 18446744073709551615 2025-06-24T21:30:38.362236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-24T21:30:38.362313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-06-24T21:30:38.362677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-06-24T21:30:38.362730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-06-24T21:30:38.364396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:24 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:30:38.364447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:23 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:30:38.364639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:38.364684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:38.364851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 13] 2025-06-24T21:30:38.365007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:38.365062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 129, path id: 1 2025-06-24T21:30:38.365100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:208:2208], at schemeshard: 72057594046678944, txId: 129, path id: 13 FAKE_COORDINATOR: Erasing txId 129 2025-06-24T21:30:38.365689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 13 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T21:30:38.365777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 13 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T21:30:38.365808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 129 2025-06-24T21:30:38.365867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 13], version: 18446744073709551615 2025-06-24T21:30:38.365932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 1 2025-06-24T21:30:38.366287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:30:38.366353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 13], at schemeshard: 72057594046678944 2025-06-24T21:30:38.366435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:30:38.366774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 54 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T21:30:38.366847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 54 PathOwnerId: 72057594046678944, cookie: 129 2025-06-24T21:30:38.366875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 129 2025-06-24T21:30:38.366916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 54 2025-06-24T21:30:38.366957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:30:38.367079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 129, subscribers: 0 2025-06-24T21:30:38.367350Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 24 TxId_Deprecated: 24 2025-06-24T21:30:38.367715Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 23 TxId_Deprecated: 23 2025-06-24T21:30:38.367831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 24 ShardOwnerId: 72057594046678944 ShardLocalIdx: 24, at schemeshard: 72057594046678944 2025-06-24T21:30:38.368430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 23 ShardOwnerId: 72057594046678944 ShardLocalIdx: 23, at schemeshard: 72057594046678944 2025-06-24T21:30:38.372137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-24T21:30:38.376701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-24T21:30:38.376848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-24T21:30:38.378568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-24T21:30:38.378686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 TestModificationResult got TxId: 129, wait until txId: 129 TestWaitNotification wait txId: 129 2025-06-24T21:30:38.379347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 129: send EvNotifyTxCompletion 2025-06-24T21:30:38.379401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 129 2025-06-24T21:30:38.380098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 129, at schemeshard: 72057594046678944 2025-06-24T21:30:38.380199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 129: got EvNotifyTxCompletionResult 2025-06-24T21:30:38.380238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 129: satisfy waiter [1:1675:3543] TestWaitNotification: OK eventTxId 129 |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpGetResponse [GOOD] >> KqpDataIntegrityTrails::Ddl |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |98.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_bsvolume/test-results/unittest/{meta.json ... results_accumulator.log} |98.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/test-results/unittest/{meta.json ... results_accumulator.log} |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |98.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap-useOltpSink |98.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink [GOOD] >> IcbAsActorTests::TestHttpPostReaction >> KqpDataIntegrityTrails::Upsert+LogEnabled+UseSink |98.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |98.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> IcbAsActorTests::TestHttpPostReaction [GOOD] >> Compression::WriteZSTD [GOOD] >> Compression::WriteWithMixedCodecs >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror3Plus2 [GOOD] |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpPostReaction [GOOD] |98.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror3Plus2 [GOOD] |98.6%| [TA] $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} |98.6%| [TA] {RESULT} $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 28339, MsgBus: 22783 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044c3/r3tmp/tmpACHbSF/pdisk_1.dat TServer::EnableGrpc on GrpcPort 28339, node 1 TClient is connected to server localhost:22783 TClient is connected to server localhost:22783 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> TColumnShardTestSchema::RebootEnableColdTiersAfterTtl [GOOD] >> TColumnShardTestSchema::RebootExportWithLostAnswer [GOOD] |98.6%| [TA] $(B)/ydb/core/blobstorage/dsproxy/ut_fat/test-results/unittest/{meta.json ... results_accumulator.log} |98.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/test-results/unittest/{meta.json ... results_accumulator.log} >> TColumnShardTestSchema::EnableColdTiersAfterTtl [GOOD] |98.6%| [TA] $(B)/ydb/services/metadata/initializer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |98.6%| [TA] {RESULT} $(B)/ydb/services/metadata/initializer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpDataIntegrityTrails::Select ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootEnableColdTiersAfterTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=150801177.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801177.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801177.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150801177.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801177.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801177.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150801177.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799977.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130801177.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130801177.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799977.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130799977.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130799977.000000s;Name=;Codec=}; 2025-06-24T21:29:38.311347Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:38.336368Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:38.336757Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:38.344684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:38.344884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:38.345135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:38.345289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:38.345403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:38.345529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:38.345636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:38.345741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:38.345845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:38.345965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:38.346081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:38.388272Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:38.388732Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:38.388830Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:38.389045Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:38.389332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:38.389436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:38.389494Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:38.389613Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:38.389703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:38.389777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:38.389825Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:38.390067Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:38.390175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:38.390252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:38.390309Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:38.390436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:38.390522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:38.390584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:38.390628Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:38.390722Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:38.390784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:38.390832Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:38.391111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:38.391430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:38.391514Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:38.391798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:38.391883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:38.391937Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:38.392114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:38.392189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:38.392234Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:38.392338Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:38.392460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.c ... RD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=7; 2025-06-24T21:30:42.708491Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=184; 2025-06-24T21:30:42.708536Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=9793; 2025-06-24T21:30:42.708579Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=9907; 2025-06-24T21:30:42.708647Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=11; 2025-06-24T21:30:42.708856Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=157; 2025-06-24T21:30:42.708903Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=10722; 2025-06-24T21:30:42.709088Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=131; 2025-06-24T21:30:42.709216Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=75; 2025-06-24T21:30:42.709385Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=117; 2025-06-24T21:30:42.709523Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=82; 2025-06-24T21:30:42.713673Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=4074; 2025-06-24T21:30:42.717976Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=4177; 2025-06-24T21:30:42.718092Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=14; 2025-06-24T21:30:42.718149Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2025-06-24T21:30:42.718192Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-24T21:30:42.718271Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=45; 2025-06-24T21:30:42.718316Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-24T21:30:42.718414Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=62; 2025-06-24T21:30:42.718453Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-24T21:30:42.718503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=23; 2025-06-24T21:30:42.718581Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=52; 2025-06-24T21:30:42.718912Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=293; 2025-06-24T21:30:42.718954Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=28979; 2025-06-24T21:30:42.719094Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=29251936;raw_bytes=43173354;count=6;records=480000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:30:42.719223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:30:42.719268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:30:42.719329Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:30:42.735907Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T21:30:42.736043Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:42.736123Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=3; 2025-06-24T21:30:42.736201Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800328142;tx_id=18446744073709551615;;current_snapshot_ts=1750800579620; 2025-06-24T21:30:42.736242Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:42.736282Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:42.736318Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:42.736401Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:30:42.737037Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;self_id=[1:2066:3867];tablet_id=9437184;parent=[1:2027:3836];fline=manager.cpp:88;event=ask_data;request=request_id=86;9438184000001={portions_count=6};; 2025-06-24T21:30:42.737720Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:30:42.738122Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:30:42.738165Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:30:42.738189Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:30:42.738230Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:42.738306Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=3; 2025-06-24T21:30:42.738371Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800328142;tx_id=18446744073709551615;;current_snapshot_ts=1750800579620; 2025-06-24T21:30:42.738446Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:42.738502Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:42.738540Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:42.738628Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2027:3836];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9752224 160000/9752224 160000/9752224 80000/4886744 0/0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootExportWithLostAnswer [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801191.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150801191.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801191.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130801191.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799991.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130799991.000000s;Name=;Codec=}; 2025-06-24T21:29:53.938778Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:53.964608Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:53.964959Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:53.972622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:53.972860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:53.973165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:53.973299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:53.973401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:53.973516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:53.973618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:53.973728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:53.973855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:53.973971Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:53.974083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:54.000530Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:54.000790Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:54.000867Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:54.001079Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:54.001277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:54.001363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:54.001409Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:54.001510Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:54.001572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:54.001613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:54.001641Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:54.001821Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:54.001891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:54.001929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:54.001960Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:54.002050Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:54.002103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:54.002139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:54.002169Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:54.002222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:54.002260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:54.002288Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:54.002494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:54.002539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:54.002572Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:54.002768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:54.002829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:54.002872Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:54.002990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:54.003032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.003060Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.003133Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:54.003224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:54.003269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:54.003299Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:54.003778Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=49; 2025-06-24T21:29:54.003871Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; 2025-06-24T21:29:54.003 ... e_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=3873; 2025-06-24T21:30:43.278023Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=13; 2025-06-24T21:30:43.278219Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=130; 2025-06-24T21:30:43.278277Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=4343; 2025-06-24T21:30:43.278367Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=4518; 2025-06-24T21:30:43.278436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=11; 2025-06-24T21:30:43.278549Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=66; 2025-06-24T21:30:43.278587Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=5236; 2025-06-24T21:30:43.278740Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=94; 2025-06-24T21:30:43.278888Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=92; 2025-06-24T21:30:43.279031Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=90; 2025-06-24T21:30:43.279223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=135; 2025-06-24T21:30:43.282218Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2929; 2025-06-24T21:30:43.283879Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=1573; 2025-06-24T21:30:43.283950Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=8; 2025-06-24T21:30:43.283999Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=9; 2025-06-24T21:30:43.284033Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=4; 2025-06-24T21:30:43.284098Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=40; 2025-06-24T21:30:43.284132Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=4; 2025-06-24T21:30:43.284220Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=47; 2025-06-24T21:30:43.284253Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=3; 2025-06-24T21:30:43.284317Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=31; 2025-06-24T21:30:43.284400Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=47; 2025-06-24T21:30:43.284618Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=183; 2025-06-24T21:30:43.284655Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=18875; 2025-06-24T21:30:43.284785Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=9739224;raw_bytes=13544452;count=2;records=160000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-24T21:30:43.284889Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SwitchToWork; 2025-06-24T21:30:43.284930Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=columnshard.cpp:80;event=initialize_shard;step=SignalTabletActive; 2025-06-24T21:30:43.285016Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=columnshard_impl.cpp:1328;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-24T21:30:43.291081Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-24T21:30:43.291281Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:43.291402Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T21:30:43.291500Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800331479;tx_id=18446744073709551615;;current_snapshot_ts=1750800619366; 2025-06-24T21:30:43.291548Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:43.291600Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:43.291654Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:43.291745Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:30:43.292493Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;self_id=[1:1385:3226];tablet_id=9437184;parent=[1:1346:3195];fline=manager.cpp:88;event=ask_data;request=request_id=54;9438184000001={portions_count=2};; 2025-06-24T21:30:43.300083Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:254;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-24T21:30:43.300257Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:243;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-24T21:30:43.300291Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-24T21:30:43.300319Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-24T21:30:43.300377Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:443;event=EnqueueBackgroundActivities;periodic=0; 2025-06-24T21:30:43.300503Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-24T21:30:43.300582Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750800331479;tx_id=18446744073709551615;;current_snapshot_ts=1750800619366; 2025-06-24T21:30:43.300625Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:43.300675Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:43.300717Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:43.300835Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1346:3195];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::EnableColdTiersAfterTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=150801184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801184.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801184.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150801184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801184.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801184.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150801184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799984.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130801184.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130801184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799984.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130799984.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130799984.000000s;Name=;Codec=}; 2025-06-24T21:29:44.478635Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:44.505104Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:44.505462Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:44.513041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:44.513314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:44.513577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:44.513697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:44.513803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:44.513922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:44.514023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:44.514127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:44.514225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:44.514359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:44.514469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:44.543332Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:44.543591Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:44.543687Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:44.543870Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:44.544043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:44.544117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:44.544172Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:44.544266Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:44.544330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:44.544374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:44.544405Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:44.544563Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:44.544624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:44.544671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:44.544702Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:44.544834Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:44.544965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:44.545011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:44.545045Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:44.545109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:44.545149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:44.545210Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:44.545431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:44.545472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:44.545518Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:44.545708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:44.545762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:44.545798Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:44.545965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:44.546024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:44.546072Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:44.546155Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:44.546226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.c ... HARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:43.478992Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:43.479044Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:43.479173Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:30:43.479382Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800632793:max} readable: {1750800632793:max} at tablet 9437184 2025-06-24T21:30:43.479518Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:30:43.479690Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800632793:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:30:43.479749Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800632793:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:30:43.480317Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800632793:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:30:43.480423Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800632793:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:30:43.480916Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800632793:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:1443:3387];trace_detailed=; 2025-06-24T21:30:43.481362Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:30:43.481586Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:30:43.481763Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:43.481905Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:43.482234Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:43.482340Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:43.482431Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:43.482474Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1443:3387] finished for tablet 9437184 2025-06-24T21:30:43.482946Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1442:3386];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750800643480841,"name":"_full_task","f":1750800643480841,"d_finished":0,"c":0,"l":1750800643482537,"d":1696},"events":[{"name":"bootstrap","f":1750800643481063,"d_finished":879,"c":1,"l":1750800643481942,"d":879},{"a":1750800643482210,"name":"ack","f":1750800643482210,"d_finished":0,"c":0,"l":1750800643482537,"d":327},{"a":1750800643482190,"name":"processing","f":1750800643482190,"d_finished":0,"c":0,"l":1750800643482537,"d":347},{"name":"ProduceResults","f":1750800643481685,"d_finished":441,"c":2,"l":1750800643482451,"d":441},{"a":1750800643482460,"name":"Finish","f":1750800643482460,"d_finished":0,"c":0,"l":1750800643482537,"d":77}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:43.483044Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1442:3386];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:30:43.483504Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1442:3386];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750800643480841,"name":"_full_task","f":1750800643480841,"d_finished":0,"c":0,"l":1750800643483099,"d":2258},"events":[{"name":"bootstrap","f":1750800643481063,"d_finished":879,"c":1,"l":1750800643481942,"d":879},{"a":1750800643482210,"name":"ack","f":1750800643482210,"d_finished":0,"c":0,"l":1750800643483099,"d":889},{"a":1750800643482190,"name":"processing","f":1750800643482190,"d_finished":0,"c":0,"l":1750800643483099,"d":909},{"name":"ProduceResults","f":1750800643481685,"d_finished":441,"c":2,"l":1750800643482451,"d":441},{"a":1750800643482460,"name":"Finish","f":1750800643482460,"d_finished":0,"c":0,"l":1750800643483099,"d":639}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1443:3387]->[1:1442:3386] 2025-06-24T21:30:43.483603Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:30:43.480392Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:30:43.483645Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:43.483746Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9752224 160000/9752224 160000/9752224 80000/4886744 0/0 >> KqpDataIntegrityTrails::Ddl [GOOD] >> KqpPg::TypeCoercionInsert-useSink >> KqpPg::InsertNoTargetColumns_Simple+useSink >> KqpPg::CreateTableSerialColumns+useSink >> KqpPg::NoTableQuery+useSink ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Ddl [GOOD] Test command err: Trying to start YDB, gRPC: 6686, MsgBus: 65326 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044b9/r3tmp/tmpD3VqPR/pdisk_1.dat TServer::EnableGrpc on GrpcPort 6686, node 1 TClient is connected to server localhost:65326 TClient is connected to server localhost:65326 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap-useOltpSink [GOOD] >> KqpDataIntegrityTrails::Upsert+LogEnabled+UseSink [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap-useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 10494, MsgBus: 22846 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044b1/r3tmp/tmpugQE8m/pdisk_1.dat TServer::EnableGrpc on GrpcPort 10494, node 1 TClient is connected to server localhost:22846 TClient is connected to server localhost:22846 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_bs_controller] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 26793, MsgBus: 4751 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044ad/r3tmp/tmpBCZza0/pdisk_1.dat TServer::EnableGrpc on GrpcPort 26793, node 1 TClient is connected to server localhost:4751 TClient is connected to server localhost:4751 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> TSentinelBaseTests::PDiskInitialStatus [GOOD] >> TSentinelBaseTests::PDiskErrorState [GOOD] >> TSentinelBaseTests::PDiskInactiveAfterStateChange [GOOD] >> TSentinelBaseTests::PDiskFaultyState [GOOD] >> TSentinelBaseTests::PDiskStateChangeNormalFlow [GOOD] >> TSentinelBaseTests::PDiskStateChangeNodePermanentlyBad [GOOD] >> TSentinelBaseTests::PDiskStateChangeNodeNotExpectedRestart [GOOD] >> TSentinelBaseTests::PDiskStateChangeNodeExpectedRestart [GOOD] >> TSentinelBaseTests::GuardianDataCenterRatio >> TxKeys::ComparePointKeys >> TSentinelBaseTests::GuardianDataCenterRatio [GOOD] >> TSentinelBaseTests::GuardianFaultyPDisks >> KqpDataIntegrityTrails::Select [GOOD] >> SequenceProxy::Basics >> TSentinelBaseTests::GuardianFaultyPDisks [GOOD] >> TSentinelBaseTests::GuardianRackRatio >> TSentinelBaseTests::GuardianRackRatio [GOOD] >> TSentinelTests::Smoke >> KqpPg::NoTableQuery+useSink [GOOD] >> KqpPg::NoTableQuery-useSink >> TRangeTreap::Simple [GOOD] >> TRangeTreap::Sequential >> GenericProviderLookupActor::Lookup >> GenericProviderLookupActor::Lookup [GOOD] >> GenericProviderLookupActor::LookupWithErrors >> GenericProviderLookupActor::LookupWithErrors [GOOD] >> KqpPg::InsertNoTargetColumns_Simple+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Simple-useSink >> ConfigGRPCService::ReplaceConfig >> KqpPg::CreateTableSerialColumns+useSink [GOOD] >> KqpPg::CreateTableSerialColumns-useSink ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Select [GOOD] Test command err: Trying to start YDB, gRPC: 11038, MsgBus: 10595 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0044f7/r3tmp/tmpl98yd4/pdisk_1.dat TServer::EnableGrpc on GrpcPort 11038, node 1 TClient is connected to server localhost:10595 TClient is connected to server localhost:10595 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/actors/ut/unittest >> GenericProviderLookupActor::LookupWithErrors [GOOD] Test command err: 2025-06-24 21:30:50.390 INFO ydb-library-yql-providers-generic-actors-ut(pid=1065002, tid=0x00007FC548C48B80) [generic] yql_generic_lookup_actor.cpp:151: New generic proivider lookup source actor(ActorId=[1:4:2051]) for kind=YDB, endpoint=host: "some_host" port: 2135, database=some_db, use_tls=1, protocol=NATIVE, table=lookup_test 2025-06-24 21:30:50.419 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1065002, tid=0x00007FC548C48B80) [generic] yql_generic_lookup_actor.cpp:288: ActorId=[1:4:2051] Got LookupRequest for 3 keys Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } items { column { name: "string_value" type { optional_type { item { type_id: STRING } } } } } } from { table: "lookup_test" } where { filter_typed { disjunction { operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 CRAB Expected: selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } items { column { name: "string_value" type { optional_type { item { type_id: STRING } } } } } } from { table: "lookup_test" } where { filter_typed { disjunction { operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } i ... left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } items { column { name: "string_value" type { optional_type { item { type_id: STRING } } } } } } from { table: "lookup_test" } where { filter_typed { disjunction { operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 ListSplits result. GRpcStatusCode: 0 2025-06-24 21:30:50.532 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1065002, tid=0x00007FC544666640) [generic] yql_generic_lookup_actor.cpp:319: ActorId=[2:7519631535236609323:2051] Got TListSplitsStreamIterator 2025-06-24 21:30:50.532 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1065002, tid=0x00007FC544666640) [generic] yql_generic_lookup_actor.cpp:196: ActorId=[2:7519631535236609323:2051] Got TListSplitsResponse from Connector Call ReadSplits. data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } splits { select { from { table: "example_1" } } description: "Actual split info is not important" } format: ARROW_IPC_STREAMING filtering: FILTERING_MANDATORY CRAB Expected: data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } splits { select { from { table: "example_1" } } description: "Actual split info is not important" } format: ARROW_IPC_STREAMING filtering: FILTERING_MANDATORY CRAB Actual: data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } splits { select { from { table: "example_1" } } description: "Actual split info is not important" } format: ARROW_IPC_STREAMING filtering: FILTERING_MANDATORY ReadSplits result. GRpcStatusCode: 0 2025-06-24 21:30:50.533 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1065002, tid=0x00007FC544666640) [generic] yql_generic_lookup_actor.cpp:229: ActorId=[2:7519631535236609323:2051] Got ReadSplitsStreamIterator from Connector 2025-06-24 21:30:50.533 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1065002, tid=0x00007FC544666640) [generic] yql_generic_lookup_actor.cpp:341: ActorId=[2:7519631535236609323:2051] Got DataChunk 2025-06-24 21:30:50.534 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1065002, tid=0x00007FC544666640) [generic] yql_generic_lookup_actor.cpp:352: ActorId=[2:7519631535236609323:2051] Got EOF 2025-06-24 21:30:50.534 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1065002, tid=0x00007FC544666640) [generic] yql_generic_lookup_actor.cpp:402: Sending lookup results for 3 keys |98.6%| [TS] {RESULT} ydb/library/yql/providers/generic/actors/ut/unittest >> TxKeys::ComparePointKeys [GOOD] >> TxKeys::ComparePointKeysWithNull >> Coordinator::ReadStepSubscribe >> PersQueueSdkReadSessionTest::StopResumeReadingData [GOOD] >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::BrokenCompressedData [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::CommonHandler >> ReadSessionImplTest::CommonHandler [GOOD] >> TSequence::CreateTableWithDefaultFromSequence |98.6%| [TA] $(B)/ydb/core/kqp/ut/data_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |98.6%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/data_integrity/test-results/unittest/{meta.json ... results_accumulator.log} >> TxKeys::ComparePointKeysWithNull [GOOD] >> TxKeys::ComparePointAndRange >> TSentinelTests::Smoke [GOOD] >> TSentinelTests::PDiskUnknownState ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::CommonHandler [GOOD] Test command err: 2025-06-24T21:29:53.478921Z :ReadSession INFO: Random seed for debugging is 1750800593478887 2025-06-24T21:29:53.791845Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631290546200442:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:53.794069Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:53.821330Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631290795332972:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:53.821511Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:54.006265Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:54.009269Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049cf/r3tmp/tmpvDTI5S/pdisk_1.dat 2025-06-24T21:29:54.285173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:54.285339Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:54.300813Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:54.305837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:54.305964Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:54.331815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:54.333922Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:54.334958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15373, node 1 2025-06-24T21:29:54.447885Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0049cf/r3tmp/yandexQ8aoiZ.tmp 2025-06-24T21:29:54.447915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0049cf/r3tmp/yandexQ8aoiZ.tmp 2025-06-24T21:29:54.448072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0049cf/r3tmp/yandexQ8aoiZ.tmp 2025-06-24T21:29:54.448233Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:54.510954Z INFO: TTestServer started on Port 21155 GrpcPort 15373 TClient is connected to server localhost:21155 PQClient connected to localhost:15373 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:54.769052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:54.794833Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-24T21:29:54.831628Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-24T21:29:57.197847Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631307726070579:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:57.198006Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:57.198688Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631307726070591:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:57.202958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:57.240651Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631307726070593:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:29:57.299760Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631307726070679:2677] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:57.624676Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519631307975202454:2274], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:57.625991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:57.626799Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YmUyZDA2MDItNThlNjYxMzItYjViYzg3YTMtZTY5MDAwMGY=, ActorId: [2:7519631307975202427:2268], ActorState: ExecuteState, TraceId: 01jyhxj08jdt6bkavp62x7yj2k, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:57.626918Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631307726070696:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:57.627141Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZmFlMjg1ZTQtYTYwMTdiZDQtZjExMDY0ZC00YmZjMzg2MQ==, ActorId: [1:7519631307726070576:2296], ActorState: ExecuteState, TraceId: 01jyhxj06aa216ccjdq5ndw9h5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:57.629571Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:29:57.629360Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:29:57.802704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:57.996670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:15373", true, true, 1000); 2025-06-24T21:29:58.249667Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhxj13gd6mkg6kbpjmbwywe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzIxYmUyMTEtYjc0ZDc0MGYt ... partition_actor.cpp:890: session cookie 1 consumer shared/user session shared/user_7_1_16160522962014498166_v1 after read state TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid 54e96cb0-86611e8-f1d0765b-ec2c04d6 has messages 1 2025-06-24T21:30:49.367104Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/user session shared/user_7_1_16160522962014498166_v1 read done: guid# 54e96cb0-86611e8-f1d0765b-ec2c04d6, partition# TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1), size# 220 2025-06-24T21:30:49.367198Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/user session shared/user_7_1_16160522962014498166_v1 response to read: guid# 54e96cb0-86611e8-f1d0765b-ec2c04d6 2025-06-24T21:30:49.367501Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/user session shared/user_7_1_16160522962014498166_v1 Process answer. Aval parts: 0 2025-06-24T21:30:49.368084Z :DEBUG: [/Root] [/Root] [c5b24dd-7b2558af-143f312-227c2536] [dc1] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:49.368480Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_7_1_16160522962014498166_v1 grpc read done: success# 1, data# { read { } } 2025-06-24T21:30:49.368681Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/user session shared/user_7_1_16160522962014498166_v1 got read request: guid# c7e96310-cbcc656b-4a0dbbb8-c5fe90f3 2025-06-24T21:30:49.371299Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 0 (2-2) 2025-06-24T21:30:49.371517Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (2-2) 2025-06-24T21:30:49.371615Z :DEBUG: [/Root] [/Root] [c5b24dd-7b2558af-143f312-227c2536] [dc1] The application data is transferred to the client. Number of messages 1, size 8 bytes DataReceived { PartitionStreamId: 1 PartitionId: 0 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "dc1". Topic: "test-topic" Partition: 0 PartitionKey: "" Information: { Offset: 2 SeqNo: 3 MessageGroupId: "test-message-group-id" CreateTime: 2025-06-24T21:30:48.245000Z WriteTime: 2025-06-24T21:30:48.247000Z Ip: "ipv6:[::1]:33504" UncompressedSize: 8 Meta: { "logtype": "unknown", "ident": "unknown", "server": "ipv6:[::1]:33504" } } } } 2025-06-24T21:30:49.371917Z :INFO: [/Root] [/Root] [c5b24dd-7b2558af-143f312-227c2536] Closing read session. Close timeout: 3.000000s 2025-06-24T21:30:49.371980Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-06-24T21:30:49.372037Z :INFO: [/Root] [/Root] [c5b24dd-7b2558af-143f312-227c2536] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1363 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:30:49.372782Z :INFO: [/Root] [/Root] [c5b24dd-7b2558af-143f312-227c2536] Closing read session. Close timeout: 0.000000s 2025-06-24T21:30:49.372848Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-06-24T21:30:49.372908Z :INFO: [/Root] [/Root] [c5b24dd-7b2558af-143f312-227c2536] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1364 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:30:49.373083Z :NOTICE: [/Root] [/Root] [c5b24dd-7b2558af-143f312-227c2536] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:30:49.373510Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_7_1_16160522962014498166_v1 grpc read done: success# 0, data# { } 2025-06-24T21:30:49.373540Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_7_1_16160522962014498166_v1 grpc read failed 2025-06-24T21:30:49.373571Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 1 consumer shared/user session shared/user_7_1_16160522962014498166_v1 closed 2025-06-24T21:30:49.374068Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_7_1_16160522962014498166_v1 is DEAD 2025-06-24T21:30:49.375011Z node 8 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [7:7519631529102217695:2492] disconnected; active server actors: 1 2025-06-24T21:30:49.375077Z node 8 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [7:7519631529102217695:2492] client user disconnected session shared/user_7_1_16160522962014498166_v1 2025-06-24T21:30:49.375843Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2444: [PQ: 72075186224037892] Destroy direct read session shared/user_7_1_16160522962014498166_v1 2025-06-24T21:30:49.375888Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [7:7519631529102217697:2495] destroyed 2025-06-24T21:30:49.375934Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_7_1_16160522962014498166_v1 2025-06-24T21:30:51.373215Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.373267Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.373309Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:51.373662Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:51.374161Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:51.374368Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.374801Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: 13. Commit offset: 31 2025-06-24T21:30:51.376444Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.376482Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.376570Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:51.376861Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:51.378425Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:51.378579Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.378837Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:30:51.380123Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-24T21:30:51.380784Z :INFO: Error decompressing data: (TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check) 2025-06-24T21:30:51.380881Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-3) 2025-06-24T21:30:51.381048Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:51.381108Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-24T21:30:51.381150Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-24T21:30:51.381203Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 3, size 16 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { DataDecompressionError: "(TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check)" Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-24T21:30:51.383244Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.383276Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.383313Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:51.383782Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:51.384241Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:51.384383Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.384623Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-24T21:30:51.385357Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.385561Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:30:51.385681Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:51.385743Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-24T21:30:51.385834Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 2). Partition stream id: 1 2025-06-24T21:30:51.387321Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.387375Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.387413Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:30:51.387668Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-24T21:30:51.399417Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-24T21:30:51.399617Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.400574Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:30:51.400736Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-24T21:30:51.400804Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-24T21:30:51.400887Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes >> SequenceProxy::Basics [GOOD] >> SequenceProxy::DropRecreate >> TxKeys::ComparePointAndRange [GOOD] >> TxKeys::ComparePointAndRangeWithNull >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages >> TxKeys::ComparePointAndRangeWithNull [GOOD] >> TxKeys::ComparePointAndRangeWithInf >> TSentinelTests::PDiskUnknownState [GOOD] >> TSentinelTests::PDiskErrorState >> KqpPg::NoTableQuery-useSink [GOOD] >> KqpPg::PgCreateTable >> TColumnShardTestSchema::HotTiersAfterTtl [GOOD] >> TRUCalculatorTests::TestReadTable [GOOD] >> TRUCalculatorTests::TestBulkUpsert [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_bs_controller] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_datashard] >> KqpPg::InsertNoTargetColumns_Simple-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Serial-useSink >> TxKeys::ComparePointAndRangeWithInf [GOOD] >> ConfigGRPCService::ReplaceConfig [GOOD] >> ConfigGRPCService::FetchConfig >> SequenceProxy::DropRecreate [GOOD] |98.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ru_calculator/unittest >> TRUCalculatorTests::TestBulkUpsert [GOOD] |98.6%| [TS] {RESULT} ydb/core/tx/schemeshard/ut_ru_calculator/unittest >> TColumnShardTestSchema::ForgetAfterFail [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::HotTiersAfterTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=150801194.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801194.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801194.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150801194.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801194.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150801194.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150801194.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799994.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130801194.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130801194.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799994.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130799994.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130799994.000000s;Name=;Codec=}; 2025-06-24T21:29:54.508343Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:54.545516Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:54.545918Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:54.554980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:54.555285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:54.555620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:54.555824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:54.555984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:54.556165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:54.556342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:54.556502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:54.556660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:54.556843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.557004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:54.611821Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:54.612223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:54.612305Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:54.612547Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:54.612789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:54.612893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:54.612965Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:54.613100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:54.613219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:54.613286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:54.613332Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:54.613542Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:54.613650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:54.613713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:54.613757Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:54.613874Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:54.613946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:54.614001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:54.614043Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:54.614126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:54.614186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:54.614232Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:54.614522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:54.614588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:54.614659Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:54.614910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:54.615001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:54.615053Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:54.615253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:54.615321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.615364Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:54.615495Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:54.615599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute; ... 7184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-24T21:30:54.753033Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:792;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:54.753084Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:821;background=cleanup;skip_reason=no_changes; 2025-06-24T21:30:54.753193Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:750;background=ttl;skip_reason=no_changes; 2025-06-24T21:30:54.753437Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750800642816:max} readable: {1750800642816:max} at tablet 9437184 2025-06-24T21:30:54.753582Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-24T21:30:54.753744Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800642816:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:30:54.753807Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800642816:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-24T21:30:54.754385Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800642816:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-24T21:30:54.754505Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800642816:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-24T21:30:54.754992Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750800642816:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:1443:3387];trace_detailed=; 2025-06-24T21:30:54.755985Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-24T21:30:54.756274Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-24T21:30:54.756489Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:54.756624Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:54.757038Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:54.757158Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:54.757274Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:54.757318Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1443:3387] finished for tablet 9437184 2025-06-24T21:30:54.757793Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1442:3386];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.001},{"events":["f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.002}],"full":{"a":1750800654754917,"name":"_full_task","f":1750800654754917,"d_finished":0,"c":0,"l":1750800654757393,"d":2476},"events":[{"name":"bootstrap","f":1750800654755123,"d_finished":1542,"c":1,"l":1750800654756665,"d":1542},{"a":1750800654757010,"name":"ack","f":1750800654757010,"d_finished":0,"c":0,"l":1750800654757393,"d":383},{"a":1750800654756970,"name":"processing","f":1750800654756970,"d_finished":0,"c":0,"l":1750800654757393,"d":423},{"name":"ProduceResults","f":1750800654756399,"d_finished":487,"c":2,"l":1750800654757300,"d":487},{"a":1750800654757304,"name":"Finish","f":1750800654757304,"d_finished":0,"c":0,"l":1750800654757393,"d":89}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:54.757882Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1442:3386];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:30:54.758317Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1442:3386];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.001},{"events":["f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.002},{"events":["l_ack","l_processing","l_Finish"],"t":0.003}],"full":{"a":1750800654754917,"name":"_full_task","f":1750800654754917,"d_finished":0,"c":0,"l":1750800654757932,"d":3015},"events":[{"name":"bootstrap","f":1750800654755123,"d_finished":1542,"c":1,"l":1750800654756665,"d":1542},{"a":1750800654757010,"name":"ack","f":1750800654757010,"d_finished":0,"c":0,"l":1750800654757932,"d":922},{"a":1750800654756970,"name":"processing","f":1750800654756970,"d_finished":0,"c":0,"l":1750800654757932,"d":962},{"name":"ProduceResults","f":1750800654756399,"d_finished":487,"c":2,"l":1750800654757300,"d":487},{"a":1750800654757304,"name":"Finish","f":1750800654757304,"d_finished":0,"c":0,"l":1750800654757932,"d":628}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1443:3387]->[1:1442:3386] 2025-06-24T21:30:54.758420Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:30:54.754471Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-24T21:30:54.758469Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:54.758588Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1443:3387];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9752224 160000/9752224 160000/9752224 80000/4886744 0/0 >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_keys/unittest >> TxKeys::ComparePointAndRangeWithInf [GOOD] Test command err: 2025-06-24T21:30:50.264125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:50.264185Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:50.268403Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:30:50.286843Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:30:50.288307Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2154] 2025-06-24T21:30:50.288656Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:30:50.335517Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2141], Recipient [1:133:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:30:50.367586Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:30:50.367935Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:30:50.377016Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:30:50.377148Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:30:50.377219Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:30:50.387512Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:30:50.387728Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:30:50.387826Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 2025-06-24T21:30:50.511469Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:30:50.556162Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:30:50.557575Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:30:50.557791Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2212] 2025-06-24T21:30:50.557875Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:30:50.558009Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:30:50.558106Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:30:50.558351Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:30:50.558449Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:30:50.560057Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:30:50.560254Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:30:50.560491Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:30:50.560615Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:30:50.560741Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:30:50.560803Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:30:50.560855Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:30:50.560898Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:30:50.560956Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:30:50.561192Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:212:2209], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:30:50.561235Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:30:50.561330Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2208], serverId# [1:212:2209], sessionId# [0:0:0] 2025-06-24T21:30:50.567228Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:102:2135], Recipient [1:133:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 102 RawX2: 4294969431 } TxBody: "\nY\n\006table2\032\n\n\004key1\030\002 \"\032\013\n\004key2\030\200$ #\032\014\n\005value\030\200$ 8(\"(#:\010Z\006\010\000\030\000(\000J\014/Root/table2\222\002\013\th\020\000\000\000\000\000\000\020\016" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-24T21:30:50.567324Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:30:50.567494Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-24T21:30:50.567794Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-24T21:30:50.567916Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-24T21:30:50.567983Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-24T21:30:50.568130Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-24T21:30:50.568175Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-24T21:30:50.568221Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-24T21:30:50.568263Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:30:50.568635Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-24T21:30:50.568685Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-24T21:30:50.568762Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-24T21:30:50.568844Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:30:50.568926Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-24T21:30:50.568968Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-24T21:30:50.569043Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-24T21:30:50.569093Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-24T21:30:50.569119Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-24T21:30:50.583286Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-24T21:30:50.583401Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-24T21:30:50.583500Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-24T21:30:50.583571Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-24T21:30:50.584711Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-24T21:30:50.594987Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:221:2217], Recipient [1:133:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:30:50.595071Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:30:50.595125Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:220:2216], serverId# [1:221:2217], sessionId# [0:0:0] 2025-06-24T21:30:50.595370Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:102:2135], Recipient [1:133:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-24T21:30:50.595418Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-24T21:30:50.595616Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-24T21:30:50.595677Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-24T21:30:50.595715Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-24T21:30:50.595781Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-24T21:30:50.606155Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 102 RawX2: 4294969431 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-24T21:30:50.606285Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:30:50.606582Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:133:2154], Recipient [1:133:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:30:50.606622Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:30:50.606697Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:30:50.606752Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:30:50.606841Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-24T21:30:50.606911Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-24T21:30:50.606959Z node 1 :TX_DATASHARD TRACE: dat ... ode 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:30:54.782265Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [5:24:2071], Recipient [5:133:2154]: {TEvRegisterTabletResult TabletId# 9437184 Entry# 0} 2025-06-24T21:30:54.782318Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-24T21:30:54.782363Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 9437184 time 0 2025-06-24T21:30:54.782407Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:30:54.785488Z node 5 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000001 txid# 1} 2025-06-24T21:30:54.785559Z node 5 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000001} 2025-06-24T21:30:54.785630Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:30:54.787938Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:30:54.788009Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000001:1] at 9437184 on unit CreateTable 2025-06-24T21:30:54.788054Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:30:54.788113Z node 5 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 9437184 2025-06-24T21:30:54.788151Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000001:1] at 9437184 on unit CompleteOperation 2025-06-24T21:30:54.788223Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000001 : 1] from 9437184 at tablet 9437184 send result to client [5:102:2135], exec latency: 0 ms, propose latency: 2 ms 2025-06-24T21:30:54.788279Z node 5 :TX_DATASHARD INFO: datashard.cpp:1590: 9437184 Sending notify to schemeshard 4200 txId 1 state Ready TxInFly 0 2025-06-24T21:30:54.788385Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:30:54.789150Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5603: Got TEvDataShard::TEvSchemaChanged for unknown txId 1 message# Source { RawX1: 133 RawX2: 21474838634 } Origin: 9437184 State: 2 TxId: 1 Step: 0 Generation: 2 2025-06-24T21:30:54.789266Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [5:230:2226], Recipient [5:133:2154]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 4200 Status: OK ServerId: [5:232:2227] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:30:54.789313Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:30:54.789410Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [5:127:2151], Recipient [5:133:2154]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 1 2025-06-24T21:30:54.789445Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-24T21:30:54.789490Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 1 datashard 9437184 state Ready 2025-06-24T21:30:54.789569Z node 5 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 9437184 Got TEvSchemaChangedResult from SS at 9437184 2025-06-24T21:30:54.790008Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 65543, Sender [5:102:2135], Recipient [5:133:2154]: NActors::TEvents::TEvPoison 2025-06-24T21:30:54.790386Z node 5 :TX_DATASHARD INFO: datashard.cpp:190: OnDetach: 9437184 2025-06-24T21:30:54.790512Z node 5 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 9437184 2025-06-24T21:30:54.802117Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [5:235:2228], Recipient [5:238:2229]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:30:54.805893Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [5:235:2228], Recipient [5:238:2229]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:30:54.806032Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828684, Sender [5:235:2228], Recipient [5:238:2229]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:30:54.814029Z node 5 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [5:238:2229] 2025-06-24T21:30:54.814291Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:30:54.818217Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:673: TxInitSchema.Execute Persist Sys_SubDomainInfo 2025-06-24T21:30:54.852006Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:30:54.852165Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:30:54.854427Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:30:54.854531Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:30:54.854599Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:30:54.855046Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:30:54.859504Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:30:54.859629Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [5:281:2229] in generation 3 2025-06-24T21:30:54.876245Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:30:54.876380Z node 5 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 9437184 2025-06-24T21:30:54.876483Z node 5 :TX_DATASHARD INFO: datashard.cpp:1590: 9437184 Sending notify to schemeshard 4200 txId 1 state Ready TxInFly 0 2025-06-24T21:30:54.876651Z node 5 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 9437184 mediators count is 0 coordinators count is 1 buckets per mediator 2 2025-06-24T21:30:54.876916Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [5:286:2268] 2025-06-24T21:30:54.876963Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:30:54.877036Z node 5 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 9437184 2025-06-24T21:30:54.877077Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:30:54.877273Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-24T21:30:54.877401Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-24T21:30:54.877643Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [5:238:2229], Recipient [5:238:2229]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:30:54.877703Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:30:54.878032Z node 5 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:30:54.878137Z node 5 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:30:54.878275Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [5:24:2071], Recipient [5:238:2229]: {TEvRegisterTabletResult TabletId# 9437184 Entry# 0} 2025-06-24T21:30:54.878322Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-24T21:30:54.878367Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 9437184 time 0 2025-06-24T21:30:54.878416Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:30:54.878552Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5603: Got TEvDataShard::TEvSchemaChanged for unknown txId 1 message# Source { RawX1: 238 RawX2: 21474838709 } Origin: 9437184 State: 2 TxId: 1 Step: 0 Generation: 3 2025-06-24T21:30:54.878619Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:30:54.878664Z node 5 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:30:54.878710Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:30:54.878757Z node 5 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:30:54.878800Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:30:54.878841Z node 5 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:30:54.878890Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:30:54.878993Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [5:24:2071], Recipient [5:238:2229]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 0 ReadStep# 0 } 2025-06-24T21:30:54.879030Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-24T21:30:54.879080Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 9437184 coordinator 72057594046316545 last step 0 next step 0 2025-06-24T21:30:54.879208Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [5:284:2266], Recipient [5:238:2229]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 4200 Status: OK ServerId: [5:288:2270] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:30:54.879251Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:30:54.879342Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [5:127:2151], Recipient [5:238:2229]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 1 2025-06-24T21:30:54.879377Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-24T21:30:54.879422Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 1 datashard 9437184 state Ready 2025-06-24T21:30:54.879487Z node 5 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 9437184 Got TEvSchemaChangedResult from SS at 9437184 2025-06-24T21:30:54.899000Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [5:284:2266], Recipient [5:238:2229]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 4200 ClientId: [5:284:2266] ServerId: [5:288:2270] } 2025-06-24T21:30:54.899074Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed |98.7%| [TM] {RESULT} ydb/core/tx/datashard/ut_keys/unittest >> KqpPg::CreateTableSerialColumns-useSink [GOOD] >> KqpPg::DropIndex ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/sequenceproxy/ut/unittest >> SequenceProxy::DropRecreate [GOOD] Test command err: 2025-06-24T21:30:50.874644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:50.874694Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:50.964839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:52.045484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSequence, opId: 281474976715657:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp:543) 2025-06-24T21:30:52.364962Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:30:52.365593Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0019eb/r3tmp/tmpaGzDej/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:30:52.366318Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0019eb/r3tmp/tmpaGzDej/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0019eb/r3tmp/tmpaGzDej/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 14110023735896241593 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:30:53.533128Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:30:53.533201Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:53.581692Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:54.164209Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSequence, opId: 281474976715657:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp:543) 2025-06-24T21:30:54.440671Z node 4 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:30:54.441260Z node 4 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0019eb/r3tmp/tmpep8U5G/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:30:54.441499Z node 4 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0019eb/r3tmp/tmpep8U5G/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0019eb/r3tmp/tmpep8U5G/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 16683787388515438673 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:30:54.568889Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropSequence, opId: 281474976715658:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp:343) 2025-06-24T21:30:54.866462Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSequence, opId: 281474976715659:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp:543) |98.7%| [TS] {RESULT} ydb/core/tx/sequenceproxy/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ForgetAfterFail [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150801183.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130801183.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130799983.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; 2025-06-24T21:29:45.966754Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:29:45.997426Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:29:45.997805Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:29:46.005915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:29:46.006214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:29:46.006509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:29:46.006644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:29:46.006756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:29:46.006881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:29:46.006994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:29:46.007104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:29:46.007263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:29:46.007438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:29:46.007571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:29:46.040817Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:29:46.041092Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:29:46.041191Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:29:46.041384Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:46.041579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:29:46.041665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:29:46.041714Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:29:46.041836Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:29:46.041928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:29:46.041976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:29:46.042012Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:29:46.042186Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:29:46.042260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:29:46.042307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:29:46.042340Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:29:46.042431Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:29:46.042490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:29:46.042540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:29:46.042572Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:29:46.042636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:29:46.042678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:29:46.042710Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:29:46.042923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:29:46.042972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:29:46.043007Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:29:46.043261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:29:46.043339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:29:46.043375Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:29:46.043546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:29:46.043604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:29:46.043645Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:29:46.043728Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:29:46.043796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:29:46.043839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:29:46.043871Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:29:46.044405Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=66; 2025-06-24T21:29:46.044514Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=43; 2025-06-24T21:29:46.044596Z node 1 :TX_COLUMNSH ... =produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:55.384703Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=160000;finished=1; 2025-06-24T21:30:55.384754Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-24T21:30:55.385115Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:55.385270Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:160000;schema=timestamp: timestamp[us];);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:55.385314Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-24T21:30:55.385418Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=160000; 2025-06-24T21:30:55.385478Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1280000;num_rows=160000;batch_columns=timestamp; 2025-06-24T21:30:55.385752Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1238:3174];bytes=1280000;rows=160000;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us]; Got TEvKqpCompute::TEvScanData [1:1239:3175]->[1:1238:3174] 2025-06-24T21:30:55.385888Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:55.386000Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:55.386089Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:55.386258Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-24T21:30:55.386363Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:55.386455Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:55.386495Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1239:3175] finished for tablet 9437184 2025-06-24T21:30:55.386937Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1238:3174];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.612},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.614}],"full":{"a":1750800654772433,"name":"_full_task","f":1750800654772433,"d_finished":0,"c":0,"l":1750800655386557,"d":614124},"events":[{"name":"bootstrap","f":1750800654772593,"d_finished":2132,"c":1,"l":1750800654774725,"d":2132},{"a":1750800655386234,"name":"ack","f":1750800655385085,"d_finished":1031,"c":1,"l":1750800655386116,"d":1354},{"a":1750800655386218,"name":"processing","f":1750800654776935,"d_finished":317849,"c":8,"l":1750800655386119,"d":318188},{"name":"ProduceResults","f":1750800654773793,"d_finished":2649,"c":11,"l":1750800655386478,"d":2649},{"a":1750800655386481,"name":"Finish","f":1750800655386481,"d_finished":0,"c":0,"l":1750800655386557,"d":76},{"name":"task_result","f":1750800654776960,"d_finished":316613,"c":7,"l":1750800655384871,"d":316613}],"id":"9437184::7"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-24T21:30:55.387002Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1238:3174];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-24T21:30:55.387407Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1238:3174];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.612},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.614}],"full":{"a":1750800654772433,"name":"_full_task","f":1750800654772433,"d_finished":0,"c":0,"l":1750800655387039,"d":614606},"events":[{"name":"bootstrap","f":1750800654772593,"d_finished":2132,"c":1,"l":1750800654774725,"d":2132},{"a":1750800655386234,"name":"ack","f":1750800655385085,"d_finished":1031,"c":1,"l":1750800655386116,"d":1836},{"a":1750800655386218,"name":"processing","f":1750800654776935,"d_finished":317849,"c":8,"l":1750800655386119,"d":318670},{"name":"ProduceResults","f":1750800654773793,"d_finished":2649,"c":11,"l":1750800655386478,"d":2649},{"a":1750800655386481,"name":"Finish","f":1750800655386481,"d_finished":0,"c":0,"l":1750800655387039,"d":558},{"name":"task_result","f":1750800654776960,"d_finished":316613,"c":7,"l":1750800655384871,"d":316613}],"id":"9437184::7"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1239:3175]->[1:1238:3174] 2025-06-24T21:30:55.387509Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-24T21:30:54.772009Z;index_granules=0;index_portions=1;index_batches=1146;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=10565848;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=10565848;selected_rows=0; 2025-06-24T21:30:55.387550Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:191;event=scan_aborted;reason=unexpected on destructor; 2025-06-24T21:30:55.387829Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1239:3175];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/10565848 160000/10565848 0/0 160000/10565848 >> Compression::WriteWithMixedCodecs [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithAbort >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListQueries >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeQuery >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_datashard] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_hive] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_schemeshard] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_schemeshard] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_tx_coordinator] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetQueryStatus >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_tx_coordinator] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[tx_allocator] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[keyvalueflat] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyQuery >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteQuery >> TSequence::CreateTableWithDefaultFromSequence [GOOD] >> TSequence::SequencesIndex >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendControlQuery >> Etcd_KV::SimpleWriteReadDelete >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink+UseDataQuery >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetResultData >> ConfigGRPCService::FetchConfig [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[keyvalueflat] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[tx_mediator] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[persqueue] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListJobs >> KqpPg::InsertNoTargetColumns_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault+useSink >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeJob >> CoordinatorTests::Route >> CoordinatorTests::Route [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/config/ut/unittest >> ConfigGRPCService::FetchConfig [GOOD] >> CoordinatorTests::RouteTwoTopicWichSameName Test command err: 2025-06-24T21:30:51.407861Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631538426832183:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:51.407963Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00161f/r3tmp/tmpHlEsBe/pdisk_1.dat 2025-06-24T21:30:52.116741Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:52.118926Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631538426832152:2080] 1750800651402320 != 1750800651402323 2025-06-24T21:30:52.151141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:52.151291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:52.162579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2183, node 1 2025-06-24T21:30:52.244579Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:367: Grpc request proxy started, nodeid# 1, serve as static node 2025-06-24T21:30:52.245069Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:557: Subscribe to /Root 2025-06-24T21:30:52.245343Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:403: Subscribed for config changes 2025-06-24T21:30:52.245359Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:411: Updated app config 2025-06-24T21:30:52.245436Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:367: Grpc request proxy started, nodeid# 1, serve as static node 2025-06-24T21:30:52.245550Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:557: Subscribe to /Root 2025-06-24T21:30:52.246126Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:420: Got proxy service configuration 2025-06-24T21:30:52.246140Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:420: Got proxy service configuration 2025-06-24T21:30:52.246505Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:403: Subscribed for config changes 2025-06-24T21:30:52.246522Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:411: Updated app config 2025-06-24T21:30:52.255379Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:30:52.255447Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:30:52.255471Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:30:52.255484Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:30:52.360488Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002a080] created request Name# BlobStorageConfig 2025-06-24T21:30:52.361787Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002a680] created request Name# HiveCreateTablet 2025-06-24T21:30:52.363856Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ac80] created request Name# TabletStateRequest 2025-06-24T21:30:52.364159Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002b280] created request Name# SchemeOperationStatus 2025-06-24T21:30:52.365277Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002b880] created request Name# ChooseProxy 2025-06-24T21:30:52.365655Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002be80] created request Name# ResolveNode 2025-06-24T21:30:52.366688Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002c480] created request Name# FillNode 2025-06-24T21:30:52.368900Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ca80] created request Name# DrainNode 2025-06-24T21:30:52.370152Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002d080] created request Name# InterconnectDebug 2025-06-24T21:30:52.370638Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002d680] created request Name# TestShardControl 2025-06-24T21:30:52.370941Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002dc80] created request Name# RegisterNode 2025-06-24T21:30:52.371333Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002e280] created request Name# CmsRequest 2025-06-24T21:30:52.371642Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002e880] created request Name# ConsoleRequest 2025-06-24T21:30:52.371932Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ee80] created request Name# SchemeInitRoot 2025-06-24T21:30:52.372385Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c0080] created request Name# PersQueueRequest 2025-06-24T21:30:52.373441Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c0680] created request Name# SchemeOperation 2025-06-24T21:30:52.374302Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c0c80] created request Name# SchemeDescribe 2025-06-24T21:30:52.433420Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:30:52.496139Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:30:52.496163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:30:52.496176Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:30:52.496327Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12149 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:30:53.306449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "hdd2" Kind: "hdd2" } StoragePools { Name: "hdd" Kind: "hdd" } StoragePools { Name: "hdd1" Kind: "hdd1" } StoragePools { Name: "ssd" Kind: "ssd" } StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:30:53.311639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:30:53.315280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T21:30:53.315336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T21:30:53.319269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:30:53.319409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:53.332762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T21:30:53.337971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T21:30:53.338210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:30:53.338278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T21:30:53.338401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-24T21:30:53.338415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 2 -> 3 2025-06-24T21:30:53.347455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:30:53.347543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:30:53.347561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 3 -> 128 2025-06-24T21:30:53.356537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:30:53.356575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:30:53.356624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T21:30:53.356657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-24T21:30:53.371107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:53.371609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644 ... te::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:30:56.673281Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T21:30:56.673298Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-24T21:30:56.673404Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:56.674950Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-24T21:30:56.675028Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-24T21:30:56.677405Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 1750800656724, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-24T21:30:56.677494Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750800656724 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-24T21:30:56.677529Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T21:30:56.677733Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 128 -> 240 2025-06-24T21:30:56.677766Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T21:30:56.677858Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T21:30:56.677883Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-24T21:30:56.679795Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-24T21:30:56.679812Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-24T21:30:56.679929Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-24T21:30:56.679938Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7519631563078678231:2366], at schemeshard: 72057594046644480, txId: 281474976715657, path id: 1 2025-06-24T21:30:56.679969Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:30:56.680013Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046644480] TDone opId# 281474976715657:0 ProgressState 2025-06-24T21:30:56.680089Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715657:0 progress is 1/1 2025-06-24T21:30:56.680103Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 1/1 2025-06-24T21:30:56.680120Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#281474976715657:0 progress is 1/1 2025-06-24T21:30:56.680128Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 1/1 2025-06-24T21:30:56.680141Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 1/1, is published: false 2025-06-24T21:30:56.680160Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 1/1 2025-06-24T21:30:56.680174Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 281474976715657:0 2025-06-24T21:30:56.680183Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 281474976715657:0 2025-06-24T21:30:56.680215Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-24T21:30:56.680224Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 281474976715657, publications: 1, subscribers: 1 2025-06-24T21:30:56.680233Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976715657, [OwnerId: 72057594046644480, LocalPathId: 1], 3 2025-06-24T21:30:56.681392Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715657 2025-06-24T21:30:56.681455Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715657 2025-06-24T21:30:56.681467Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715657 2025-06-24T21:30:56.681481Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715657, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 3 2025-06-24T21:30:56.681498Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-24T21:30:56.681546Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715657, subscribers: 1 2025-06-24T21:30:56.681558Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [3:7519631563078678527:2275] 2025-06-24T21:30:56.684443Z node 3 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:30:56.684533Z node 3 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:30:56.684551Z node 3 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:30:56.684578Z node 3 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:30:56.686427Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715657 2025-06-24T21:30:56.743138Z node 3 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# FetchConfigRequest, traceId# 01jyhxktb6djtrg33awyjz0f59, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50940, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:30:56.749916Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00008b280] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.750148Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000100280] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.750324Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000085280] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.750485Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000101480] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.750655Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a5c80] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.750829Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a6880] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.750982Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000098480] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.751159Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000097880] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.751282Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000096c80] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.751295Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000fde80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.751485Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000fe480] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.751485Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000095480] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.751645Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a6280] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.751661Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a6e80] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.751810Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a7480] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.751836Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000096080] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.751964Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000fa880] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 2025-06-24T21:30:56.996255Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |98.7%| [TM] {RESULT} ydb/services/config/ut/unittest >> CoordinatorTests::RouteTwoTopicWichSameName [GOOD] >> LeaderElectionTests::Test1 >> BasicStatistics::NotFullStatisticsDatashard [GOOD] >> KqpPg::DropIndex [GOOD] >> KqpPg::CreateUniqPgColumn+useSink >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnection >> TRangeTreap::Sequential [GOOD] >> TRangeTreap::Random >> LeaderElectionTests::Test1 [GOOD] >> LeaderElectionTests::TestLocalMode >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnectionWithServiceAccount >> LeaderElectionTests::TestLocalMode [GOOD] >> TRangeTreap::Random [GOOD] >> TopicSessionTests::TwoSessionsWithoutOffsets >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[persqueue] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[kesus] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListConnections ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::NotFullStatisticsDatashard [GOOD] Test command err: 2025-06-24T21:24:04.278302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:04.278696Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:04.278871Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004848/r3tmp/tmp7bTI6P/pdisk_1.dat 2025-06-24T21:24:04.748102Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19452, node 1 2025-06-24T21:24:05.074228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:05.074284Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:05.074317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:05.074569Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:05.077330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:05.196269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:05.196475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:05.210086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1876 2025-06-24T21:24:05.863138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:09.236830Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:09.283531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.283698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.354848Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:09.356509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:09.587120Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:09.623755Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.624461Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.625020Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.625164Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.625445Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.625542Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.625649Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.625732Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.625811Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.816706Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.816847Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.830316Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:10.003808Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:10.057960Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:10.058075Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:10.095759Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:10.097462Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:10.097699Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:10.097771Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:10.097841Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:10.097909Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:10.097998Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:10.098060Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:10.098963Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:10.135139Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:10.135289Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1788:2558], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:10.141813Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-24T21:24:10.146657Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1812:2576] 2025-06-24T21:24:10.146973Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1812:2576], schemeshard id = 72075186224037897 2025-06-24T21:24:10.154531Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:24:10.180748Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:10.180814Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:10.180893Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:24:10.197973Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:10.207776Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:10.207943Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:10.436540Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:10.669421Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:10.727992Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:11.251738Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:11.848804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:11.848975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:11.972067Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:12.587740Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2448:3072], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:12.587920Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:12.589205Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2453:3076]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:24:12.589399Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:24:12.589500Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2455:3078] 2025-06-24T21:24:12.590533Z nod ... .cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:30:09.720949Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:10.898229Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:10.898287Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:12.851523Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:30:12.851683Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:12.851724Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:15.326178Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:30:15.326417Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:15.326459Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:15.326598Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:15.327029Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:30:17.835561Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:17.835662Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:18.932683Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:30:20.091897Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-24T21:30:20.091966Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7957: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:30:20.091999Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7988: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:30:20.092031Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-24T21:30:20.303468Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:20.303551Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:21.489681Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:30:21.489854Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:21.490330Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:30:22.915787Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:22.915870Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:25.452476Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:30:25.452628Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:25.452663Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:27.809297Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:30:27.809503Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:27.809731Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:27.809769Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:27.810098Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:30:30.119598Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:30.119680Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:31.261321Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:30:32.691899Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:32.691987Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:33.946891Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:30:33.947069Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:33.947522Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:30:35.244212Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:35.244267Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:37.651180Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:30:37.651395Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:37.651440Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:40.527257Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:30:40.527509Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:40.527553Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:40.527837Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:30:40.528109Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:43.159762Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:43.159835Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:44.346047Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:30:45.727165Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:45.727250Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:47.017164Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:30:47.017661Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:30:47.017949Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:48.326414Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:48.326487Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:50.792899Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:30:50.793116Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:50.793162Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:53.399238Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:30:53.399406Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:53.399596Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:53.399631Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:53.399941Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-24T21:30:54.751875Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-24T21:30:54.751959Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 210.000000s, at schemeshard: 72075186224037897 2025-06-24T21:30:54.752199Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 49 ... waiting for TEvSchemeShardStats 2 (done) ... waiting for TEvPropagateStatistics 2025-06-24T21:30:54.772514Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:30:56.046110Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:56.046210Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:57.300936Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:30:58.663811Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-24T21:30:58.663904Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7957: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:30:58.663953Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7988: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:30:58.663998Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-24T21:30:58.870054Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:58.870120Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:00.239783Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-24T21:31:00.240461Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 ... waiting for TEvPropagateStatistics (done) 2025-06-24T21:31:00.240970Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:13376:7463]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:31:00.241456Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:31:00.244449Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-24T21:31:00.244523Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [2:13376:7463], StatRequests.size() = 1 >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeConnection ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/locks/ut_range_treap/unittest >> TRangeTreap::Random [GOOD] Test command err: NOTE: building treap of size 1000000 got height 50 and needed 1000000 ops (1000000 inserts 0 updates 0 deletes) and 31724528 comparisons (31.724528 per op) NOTE: building treap of size 8798 got height 31 and needed 10937 ops (9735 inserts 265 updates 937 deletes) and 219182 comparisons (20.04041328 per op) Checking point 4005 ... found 2209 ranges, needed 7543 comparisons (3.41466727 per range) Checking point 5065 ... found 2309 ranges, needed 8539 comparisons (3.698137722 per range) Checking point 1945 ... found 1415 ranges, needed 3858 comparisons (2.726501767 per range) Checking point 5607 ... found 2323 ranges, needed 8567 comparisons (3.687903573 per range) Checking point 4700 ... found 2325 ranges, needed 8495 comparisons (3.653763441 per range) Checking point 1333 ... found 1047 ranges, needed 2663 comparisons (2.543457498 per range) Checking point 8217 ... found 2251 ranges, needed 8204 comparisons (3.644602399 per range) Checking point 1196 ... found 951 ranges, needed 2393 comparisons (2.516298633 per range) Checking point 5416 ... found 2306 ranges, needed 8452 comparisons (3.665221162 per range) Checking point 3158 ... found 2004 ranges, needed 6139 comparisons (3.063373253 per range) |98.7%| [TM] {RESULT} ydb/core/tx/locks/ut_range_treap/unittest >> test_scheduling.py::TestSchedule::test_skip_busy[kikimr0] [SKIPPED] >> test_dc_local.py::TestAlloc::test_dc_locality[kikimr0] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnection >> test_timeout.py::TestTimeout::test_timeout >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnectionWithServiceAccount >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[kesus] [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteConnection >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnection >> Graph::CreateGraphShard >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnectionWithServiceAccount >> DataShardStats::OneChannelStatsCorrect >> TabletService_ChangeSchema::Basics >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateBinding >> TSequence::SequencesIndex [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceFromSelect >> KqpPg::InsertValuesFromTableWithDefault+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault-useSink >> MetadataConversion::MakeAuthTest [GOOD] >> MetadataConversion::ConvertingExternalSourceMetadata [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListBindings |98.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/gateway/ut/gtest >> MetadataConversion::ConvertingExternalSourceMetadata [GOOD] |98.7%| [TS] {RESULT} ydb/core/kqp/gateway/ut/gtest >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeBinding >> Graph::CreateGraphShard [GOOD] >> Graph::UseGraphShard >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyBinding >> Coordinator::ReadStepSubscribe [GOOD] >> Coordinator::LastStepSubscribe >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteBinding >> test_timeout.py::TestTimeout::test_timeout [GOOD] >> test_commit.py::TestCommit::test_commit >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateQuery >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListQueries >> Graph::UseGraphShard [GOOD] >> Graph::MemoryBackendFullCycle >> test_commit.py::TestCommit::test_commit [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeQuery >> HttpRequest::Probe [GOOD] >> TIndexProcesorTests::TestCreateIndexProcessor >> TGenerateQueueIdTests::MakeQueueIdBasic [GOOD] >> TParseParamsTests::CreateUser [GOOD] >> TParseParamsTests::ChangeMessageVisibilityBatchRequest [GOOD] >> TParseParamsTests::DeleteMessageBatchRequest [GOOD] >> TParseParamsTests::MessageBody [GOOD] >> TParseParamsTests::SendMessageBatchRequest [GOOD] >> TParseParamsTests::DeleteQueueBatchRequest [GOOD] >> TParseParamsTests::PurgeQueueBatchRequest [GOOD] >> TParseParamsTests::GetQueueAttributesBatchRequest [GOOD] >> TParseParamsTests::UnnumberedAttribute [GOOD] >> TParseParamsTests::UnnumberedAttributeName [GOOD] >> TParseParamsTests::FailsOnInvalidDeduplicationId [GOOD] >> TParseParamsTests::FailsOnInvalidGroupId [GOOD] >> TParseParamsTests::FailsOnInvalidReceiveRequestAttemptId [GOOD] >> TParseParamsTests::FailsOnInvalidMaxNumberOfMessages [GOOD] >> TParseParamsTests::FailsOnInvalidWaitTime [GOOD] >> TParseParamsTests::FailsOnInvalidDelaySeconds [GOOD] >> test_http_api.py::TestHttpApi::test_simple_analytics_query >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetQueryStatus |98.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/ut/unittest >> TParseParamsTests::FailsOnInvalidDelaySeconds [GOOD] |98.7%| [TS] {RESULT} ydb/core/ymq/ut/unittest >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyQuery >> Etcd_KV::SimpleWriteReadDelete [GOOD] >> Etcd_KV::ReadRangeWithLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::Probe [GOOD] Test command err: 2025-06-24T21:24:06.382788Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:416:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:06.383245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:06.383431Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00482b/r3tmp/tmpZPi3jC/pdisk_1.dat 2025-06-24T21:24:06.811574Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2042, node 1 2025-06-24T21:24:07.105162Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:07.105260Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:07.105288Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:07.105587Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:07.108443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:07.240615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:07.240768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:07.254863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19455 2025-06-24T21:24:07.896448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:11.482998Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:11.528321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:11.528474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:11.594538Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:11.597266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:11.813190Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:11.849119Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.849766Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.850355Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.850508Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.850592Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.850815Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.850910Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.850990Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:11.851104Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:12.022737Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:12.022846Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:12.041874Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:12.235758Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:12.287940Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:12.288073Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:12.325604Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:12.327101Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:12.327387Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:12.327457Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:12.327533Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:12.327596Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:12.327673Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:12.327741Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:12.329819Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:12.368674Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:12.368803Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:12.378537Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2568] 2025-06-24T21:24:12.391744Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1852:2588] 2025-06-24T21:24:12.392472Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1852:2588], schemeshard id = 72075186224037897 2025-06-24T21:24:12.398076Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:24:12.417541Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:12.417616Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:12.417697Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:24:12.429866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:12.441226Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:12.441387Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:12.665330Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:12.888145Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:12.948443Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:13.463119Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:13.725146Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2141:3022], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.725286Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:13.752171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:24:14.052497Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2276:2821];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:24:14.052746Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2276:2821];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:24:14.053079Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2276:2821];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:24:14.053267Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2276:2821];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:24:14.053421Z node 2 :TX_COLUMNSHARD WARN: ... G: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-24T21:31:09.130946Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-24T21:31:09.131269Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-24T21:31:09.132477Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:18926:11984], server id = [2:18931:11989], tablet id = 72075186224037899, status = OK 2025-06-24T21:31:09.132634Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:18926:11984], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:31:09.134350Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:18927:11985], server id = [2:18932:11990], tablet id = 72075186224037900, status = OK 2025-06-24T21:31:09.134465Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:18927:11985], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:31:09.135565Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:18928:11986], server id = [2:18933:11991], tablet id = 72075186224037901, status = OK 2025-06-24T21:31:09.135652Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:18928:11986], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:31:09.135899Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:18929:11987], server id = [2:18934:11992], tablet id = 72075186224037902, status = OK 2025-06-24T21:31:09.135974Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:18929:11987], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:31:09.137517Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:18930:11988], server id = [2:18935:11993], tablet id = 72075186224037903, status = OK 2025-06-24T21:31:09.137594Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:18930:11988], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:31:09.138289Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-24T21:31:09.139013Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:18926:11984], server id = [2:18931:11989], tablet id = 72075186224037899 2025-06-24T21:31:09.139066Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:31:09.140013Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-24T21:31:09.140867Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:18927:11985], server id = [2:18932:11990], tablet id = 72075186224037900 2025-06-24T21:31:09.140910Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:31:09.141599Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-24T21:31:09.141934Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-24T21:31:09.142292Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037903 2025-06-24T21:31:09.142699Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:18941:11999], server id = [2:18943:12001], tablet id = 72075186224037904, status = OK 2025-06-24T21:31:09.142801Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:18941:11999], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:31:09.142961Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:18929:11987], server id = [2:18934:11992], tablet id = 72075186224037902 2025-06-24T21:31:09.142997Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:31:09.143875Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:18942:12000], server id = [2:18944:12002], tablet id = 72075186224037905, status = OK 2025-06-24T21:31:09.143961Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:18942:12000], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:31:09.144828Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:18928:11986], server id = [2:18933:11991], tablet id = 72075186224037901 2025-06-24T21:31:09.144866Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:31:09.145201Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:18930:11988], server id = [2:18935:11993], tablet id = 72075186224037903 2025-06-24T21:31:09.145250Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:31:09.145848Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:18945:12003], server id = [2:18947:12005], tablet id = 72075186224037906, status = OK 2025-06-24T21:31:09.145934Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:18945:12003], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:31:09.146617Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:18946:12004], server id = [2:18949:12007], tablet id = 72075186224037907, status = OK 2025-06-24T21:31:09.146693Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:18946:12004], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:31:09.146848Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:18948:12006], server id = [2:18950:12008], tablet id = 72075186224037908, status = OK 2025-06-24T21:31:09.146906Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:18948:12006], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-24T21:31:09.149677Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037904 2025-06-24T21:31:09.150268Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-24T21:31:09.150904Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:18941:11999], server id = [2:18943:12001], tablet id = 72075186224037904 2025-06-24T21:31:09.150946Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:31:09.151409Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:18942:12000], server id = [2:18944:12002], tablet id = 72075186224037905 2025-06-24T21:31:09.151449Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:31:09.151803Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-24T21:31:09.152051Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-24T21:31:09.152289Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-24T21:31:09.152352Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-24T21:31:09.152611Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-24T21:31:09.152853Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-24T21:31:09.153674Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-24T21:31:09.157454Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:18945:12003], server id = [2:18947:12005], tablet id = 72075186224037906 2025-06-24T21:31:09.157515Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:31:09.158228Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:18948:12006], server id = [2:18950:12008], tablet id = 72075186224037908 2025-06-24T21:31:09.158264Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:31:09.158629Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-24T21:31:09.159479Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:18946:12004], server id = [2:18949:12007], tablet id = 72075186224037907 2025-06-24T21:31:09.159512Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-24T21:31:09.208088Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YWZkMWMzOWEtOWNhNmNkY2ItZDE0ODQ5NWYtNmFkNWNkZTg=, TxId: 2025-06-24T21:31:09.208191Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YWZkMWMzOWEtOWNhNmNkY2ItZDE0ODQ5NWYtNmFkNWNkZTg=, TxId: 2025-06-24T21:31:09.209479Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-24T21:31:09.226507Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-24T21:31:09.226604Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=@ ]l4mN, ActorId=[1:5751:3809] 2025-06-24T21:31:09.230544Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:18992:10318]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-24T21:31:09.235501Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:31:09.235608Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-24T21:31:09.238559Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-24T21:31:09.238655Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-24T21:31:09.238721Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 2 ] 2025-06-24T21:31:09.266604Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 Answer: '/Root/Database/Table1[Value]=4' >> KqpPg::CreateUniqPgColumn+useSink [GOOD] >> KqpPg::CreateUniqPgColumn-useSink >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteQuery >> KqpPg::InsertValuesFromTableWithDefault-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_tests/py3test >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[kesus] [GOOD] |98.7%| [TM] {RESULT} ydb/tests/functional/scheme_tests/py3test >> TabletService_ChangeSchema::Basics [GOOD] >> TabletService_ChangeSchema::OnlyAdminsAllowed >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendControlQuery >> PersQueueSdkReadSessionTest::ReadSessionWithAbort [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithClose >> TSequence::CreateTableWithDefaultFromSequenceFromSelect [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceBadRequest >> TTxDataShardBuildIndexScan::BadRequest >> TIndexProcesorTests::TestCreateIndexProcessor [GOOD] >> TIndexProcesorTests::TestSingleCreateQueueEvent >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetResultData >> DataShardBackgroundCompaction::ShouldCompact >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListJobs >> test_insert_restarts.py::TestS3::test_atomic_upload_commit[v1-client0] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeJob >> TSentinelTests::PDiskErrorState [GOOD] >> TSentinelTests::PDiskFaultyState |98.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/pq_read/test/py3test >> test_commit.py::TestCommit::test_commit [GOOD] |98.7%| [TS] {RESULT} ydb/tests/tools/pq_read/test/py3test >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnection ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDSEveryQueryWorks+ColumnStore 2025-06-24 21:31:11,817 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:31:12,184 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 898332 47.6M 46.5M 24.2M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/003951/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk172/testing_out_stuff/test_tool.args 898346 3.1G 3.0G 2.6G └─ ydb-core-kqp-ut-join --trace-path-append /home/runner/.ya/build/build_root/2btm/003951/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk172/ytest.report Test command err: Trying to start YDB, gRPC: 22610, MsgBus: 4410 2025-06-24T21:21:14.730069Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629062456262091:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:14.730526Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003951/r3tmp/tmpxyMvKx/pdisk_1.dat 2025-06-24T21:21:15.466874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:21:15.475460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:21:15.488243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:21:15.500904Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:21:15.507715Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629062456261989:2079] 1750800074567677 != 1750800074567680 TServer::EnableGrpc on GrpcPort 22610, node 1 2025-06-24T21:21:15.747507Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:21:15.839859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:21:15.839880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:21:15.839895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:21:15.840023Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4410 TClient is connected to server localhost:4410 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:21:17.038647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:21:17.077543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:21:19.639565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629062456262091:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:21:19.639691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:21:20.121792Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629088226066416:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:20.121969Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:20.122520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629088226066428:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:21:20.127681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:21:20.156310Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629088226066430:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:21:20.223517Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629088226066481:2340] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:21:21.469708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:21:21.821214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:21.821477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:21.821738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:21.821848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:21.821947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:21.822045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:21.822149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:21.822244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:21:21.822337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:21:21.822433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:21:21.822534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519629092521034178:2333];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:21:21.831848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629092521034240:2340];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:21:21.831903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629092521034240:2340];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:21:21.832124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629092521034240:2340];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:21:21.832230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629092521034240:2340];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:21:21.832316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629092521034240:2340];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:21:21.832405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629092521034240:2340];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:21:21.832497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629092521034240:2340];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:21:21.832586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519629092521034240:2340];tablet_ ... and d_year between 1999 and 1999 + 2) x);\n\n$week_seq_2001 = (select d_week_seq\n from `/Root/test/ds/date_dim` as date_dim\n where d_year = 1999 + 1\n and d_moy = 12\n and d_dom = 11);\n\n$week_seq_1999 = (select d_week_seq\n from `/Root/test/ds/date_dim` as date_dim\n where d_year = 1999\n and d_moy = 12\n and d_dom = 11);\n\n\n-- start query 1 in stream 0 using template query14.tpl and seed 1819994127\n\n select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales)\n from(\n select 'store' channel, item.i_brand_id i_brand_id,item.i_class_id i_class_id\n ,item.i_category_id i_category_id,sum(ss_quantity*ss_list_price) sales\n , count(*) number_sales\n from `/Root/test/ds/store_sales` as store_sales\n cross join `/Root/test/ds/item` as item\n cross join `/Root/test/ds/date_dim` as date_dim\n where ss_item_sk in $cross_items\n and ss_item_sk = i_item_sk\n and ss_sold_date_sk = d_date_sk\n and d_year = 1999+2\n and d_moy = 11\n group by item.i_brand_id,item.i_class_id,item.i_category_id\n having sum(ss_quantity*ss_list_price) > $avg_sales\n union all\n select 'catalog' channel, item.i_brand_id i_brand_id,item.i_class_id i_class_id,item.i_category_id i_category_id, sum(cs_quantity*cs_list_price) sales, count(*) number_sales\n from `/Root/test/ds/catalog_sales` as catalog_sales\n cross join `/Root/test/ds/item` as item\n cross join `/Root/test/ds/date_dim` as date_sim\n where cs_item_sk in $cross_items\n and cs_item_sk = i_item_sk\n and cs_sold_date_sk = d_date_sk\n and d_year = 1999+2\n and d_moy = 11\n group by item.i_brand_id,item.i_class_id,item.i_category_id\n having sum(cs_quantity*cs_list_price) > $avg_sales\n union all\n select 'web' channel, item.i_brand_id i_brand_id,item.i_class_id i_class_id,item.i_category_id i_category_id, sum(ws_quantity*ws_list_price) sales , count(*) number_sales\n from `/Root/test/ds/web_sales` as web_sales\n cross join `/Root/test/ds/item` as item\n cross join `/Root/test/ds/date_dim` as date_dim\n where ws_item_sk in $cross_items\n and ws_item_sk = i_item_sk\n and ws_sold_date_sk = d_date_sk\n and d_year = 1999+2\n and d_moy = 11\n group by item.i_brand_id,item.i_class_id,item.i_category_id\n having sum(ws_quantity*ws_list_price) > $avg_sales\n ) y\n group by rollup (channel, i_brand_id,i_class_id,i_category_id)\n order by channel,i_brand_id,i_class_id,i_category_id\n limit 100;\n\nselect this_year.channel ty_channel\n ,this_year.i_brand_id ty_brand\n ,this_year.i_class_id ty_class\n ,this_year.i_category_id ty_category\n ,this_year.sales ty_sales\n ,this_year.number_sales ty_number_sales\n ,last_year.channel ly_channel\n ,last_year.i_brand_id ly_brand\n ,last_year.i_class_id ly_class\n ,last_year.i_category_id ly_category\n ,last_year.sales ly_sales\n ,last_year.number_sales ly_number_sales\n from\n (select 'store' channel, item.i_brand_id i_brand_id,item.i_class_id i_class_id,item.i_category_id i_category_id\n ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales\n from `/Root/test/ds/store_sales` as store_sales\n cross join `/Root/test/ds/item` as item\n cross join `/Root/test/ds/date_dim` as date_dim\n where ss_item_sk in $cross_items\n and ss_item_sk = i_item_sk\n and ss_sold_date_sk = d_date_sk\n and d_week_seq = $week_seq_2001\n group by item.i_brand_id,item.i_class_id,item.i_category_id\n having sum(ss_quantity*ss_list_price) > $avg_sales) this_year cross join\n (select 'store' channel, item.i_brand_id i_brand_id,item.i_class_id i_class_id\n ,item.i_category_id i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales\n from `/Root/test/ds/store_sales` as store_sales\n cross join `/Root/test/ds/item` as item\n cross join `/Root/test/ds/date_dim` as date_dim\n where ss_item_sk in $cross_items\n and ss_item_sk = i_item_sk\n and ss_sold_date_sk = d_date_sk\n and d_week_seq = $week_seq_1999\n group by item.i_brand_id,item.i_class_id,item.i_category_id\n having sum(ss_quantity*ss_list_price) > $avg_sales) last_year\n where this_year.i_brand_id= last_year.i_brand_id\n and this_year.i_class_id = last_year.i_class_id\n and this_year.i_category_id = last_year.i_category_id\n order by ty_channel, ty_brand, ty_class, ty_category\n limit 100;\n\n-- end query 1 in stream 0 using template query14.tpl\n", parameters: 0b 2025-06-24T21:30:31.826742Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhxjg5a87g7awszqfvcr4wm", SessionId: ydb://session/3?node_id=1&id=OWViYjhiYS04NDk5NDUxMy1mMjYxZGRhNC1jNjA2NjUx, Slow query, duration: 18.273134s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n$orders_with_several_warehouses = (\n select cs_order_number\n from `/Root/test/ds/catalog_sales` as catalog_sales\n group by cs_order_number\n having count(distinct cs_warehouse_sk) > 1\n);\n\n-- start query 1 in stream 0 using template query16.tpl and seed 171719422\nselect\n count(distinct cs1.cs_order_number) as `order count`\n ,sum(cs_ext_ship_cost) as `total shipping cost`\n ,sum(cs_net_profit) as `total net profit`\nfrom\n `/Root/test/ds/catalog_sales` cs1\n cross join `/Root/test/ds/date_dim` as date_dim\n cross join `/Root/test/ds/customer_address` as customer_address\n cross join `/Root/test/ds/call_center` as call_center\n left semi join $orders_with_several_warehouses cs2 on cs1.cs_order_number = cs2.cs_order_number\n left only join `/Root/test/ds/catalog_returns` cr1 on cs1.cs_order_number = cr1.cr_order_number\nwhere\n cast(d_date as date) between cast('2002-2-01' as date) and\n (cast('2002-2-01' as date) + DateTime::IntervalFromDays(60))\nand cs1.cs_ship_date_sk = d_date_sk\nand cs1.cs_ship_addr_sk = ca_address_sk\nand ca_state = 'GA'\nand cs1.cs_call_center_sk = cc_call_center_sk\nand cc_county in ('Williamson County','Williamson County','Williamson County','Williamson County',\n 'Williamson County'\n)\norder by `order count`\nlimit 100;\n\n-- end query 1 in stream 0 using template query16.tpl\n", parameters: 0b 2025-06-24T21:30:52.216895Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhxk218b23zdgh3enzpd8b2", SessionId: ydb://session/3?node_id=1&id=OWViYjhiYS04NDk5NDUxMy1mMjYxZGRhNC1jNjA2NjUx, Slow query, duration: 20.367297s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query17.tpl and seed 1819994127\n$nantonull = ($n) -> {\n return case when Math::IsNaN($n)\n then null\n else $n\n end;\n};\n\nselect item.i_item_id\n ,item.i_item_desc\n ,store.s_state\n ,count(ss_quantity) as store_sales_quantitycount\n ,avg(ss_quantity) as store_sales_quantityave\n ,$nantonull(stddev_samp(ss_quantity)) as store_sales_quantitystdev\n ,$nantonull(stddev_samp(ss_quantity)/avg(ss_quantity)) as store_sales_quantitycov\n ,count(sr_return_quantity) as store_returns_quantitycount\n ,avg(sr_return_quantity) as store_returns_quantityave\n ,$nantonull(stddev_samp(sr_return_quantity)) as store_returns_quantitystdev\n ,$nantonull(stddev_samp(sr_return_quantity)/avg(sr_return_quantity)) as store_returns_quantitycov\n ,count(cs_quantity) as catalog_sales_quantitycount ,avg(cs_quantity) as catalog_sales_quantityave\n ,$nantonull(stddev_samp(cs_quantity)) as catalog_sales_quantitystdev\n ,$nantonull(stddev_samp(cs_quantity)/avg(cs_quantity)) as catalog_sales_quantitycov\n from `/Root/test/ds/store_sales` as store_sales\n cross join `/Root/test/ds/store_returns` as store_returns\n cross join `/Root/test/ds/catalog_sales` as catalog_sales\n cross join `/Root/test/ds/date_dim` d1\n cross join `/Root/test/ds/date_dim` d2\n cross join `/Root/test/ds/date_dim` d3\n cross join `/Root/test/ds/store` as store\n cross join `/Root/test/ds/item` as item\n where d1.d_quarter_name = '2001Q1'\n and d1.d_date_sk = ss_sold_date_sk\n and i_item_sk = ss_item_sk\n and s_store_sk = ss_store_sk\n and ss_customer_sk = sr_customer_sk\n and ss_item_sk = sr_item_sk\n and ss_ticket_number = sr_ticket_number\n and sr_returned_date_sk = d2.d_date_sk\n and d2.d_quarter_name in ('2001Q1','2001Q2','2001Q3')\n and sr_customer_sk = cs_bill_customer_sk\n and sr_item_sk = cs_item_sk\n and cs_sold_date_sk = d3.d_date_sk\n and d3.d_quarter_name in ('2001Q1','2001Q2','2001Q3')\n group by item.i_item_id\n ,item.i_item_desc\n ,store.s_state\n order by item.i_item_id\n ,item.i_item_desc\n ,store.s_state\nlimit 100;\n\n-- end query 1 in stream 0 using template query17.tpl\n", parameters: 0b Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/003951/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk172/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/003951/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk172/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListConnections >> TabletService_ChangeSchema::OnlyAdminsAllowed [GOOD] >> TabletService_ExecuteMiniKQL::BasicMiniKQLRead >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeConnection >> kikimr_config.py::test_kikimr_config_generator_generic_connector_config >> kikimr_config.py::test_kikimr_config_generator_generic_connector_config [GOOD] >> TopicSessionTests::TwoSessionsWithoutOffsets [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnection >> TIndexProcesorTests::TestSingleCreateQueueEvent [GOOD] >> TIndexProcesorTests::TestReindexSingleQueue >> TopicSessionTests::TwoSessionWithoutPredicate >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount |98.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/library/ut/py3test >> kikimr_config.py::test_kikimr_config_generator_generic_connector_config [GOOD] |98.7%| [TS] {RESULT} ydb/tests/library/ut/py3test >> Graph::MemoryBackendFullCycle [GOOD] >> Graph::LocalBackendFullCycle >> TSequence::CreateTableWithDefaultFromSequenceBadRequest [GOOD] >> NodeWardenDsProxyConfigRetrieval::Disconnect >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteConnection >> TTestYqlToMiniKQLCompile::CheckResolve >> TTestYqlToMiniKQLCompile::CheckResolve [GOOD] >> TTestYqlToMiniKQLCompile::OnlyResult >> TTestYqlToMiniKQLCompile::OnlyResult [GOOD] >> TTestYqlToMiniKQLCompile::EraseRow >> TTestYqlToMiniKQLCompile::EraseRow [GOOD] >> TTestYqlToMiniKQLCompile::UpdateRow >> TTestYqlToMiniKQLCompile::UpdateRow [GOOD] >> TTestYqlToMiniKQLCompile::SelectRow >> TIndexProcesorTests::TestReindexSingleQueue [GOOD] >> TIndexProcesorTests::TestDeletedQueueNotReindexed >> TTestYqlToMiniKQLCompile::SelectRow [GOOD] >> TTestYqlToMiniKQLCompile::SelectRange >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnection >> TTestYqlToMiniKQLCompile::SelectRange [GOOD] >> TTestYqlToMiniKQLCompile::SimpleCrossShardTx >> TTestYqlToMiniKQLCompile::SimpleCrossShardTx [GOOD] >> TTestYqlToMiniKQLCompile::AcquireLocks >> Etcd_KV::ReadRangeWithLimit [GOOD] >> Etcd_KV::ReadRangeWithLimitAndSort >> TTestYqlToMiniKQLCompile::AcquireLocks [GOOD] >> TTestYqlToMiniKQLCompile::StaticMapTypeOf >> TTestYqlToMiniKQLCompile::StaticMapTypeOf [GOOD] >> TTestYqlToMiniKQLCompile::SelectRangeAtomInRange [GOOD] >> TTestYqlToMiniKQLCompile::Extract >> TTxDataShardBuildIndexScan::BadRequest [GOOD] >> TTxDataShardBuildIndexScan::RunScan >> TTestYqlToMiniKQLCompile::Extract [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnectionWithServiceAccount ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_sequence/unittest >> TSequence::CreateTableWithDefaultFromSequenceBadRequest [GOOD] Test command err: 2025-06-24T21:30:55.274310Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:30:55.274877Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:30:55.275109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0016b6/r3tmp/tmp0kAdyl/pdisk_1.dat 2025-06-24T21:30:55.830262Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:30:55.841155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:55.885921Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:55.895191Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800651925956 != 1750800651925960 2025-06-24T21:30:55.947666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:55.947821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:55.961483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:30:56.072086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:56.614586Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:725:2597], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:56.614787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:735:2602], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:56.614874Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:56.625273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:30:56.691838Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:30:56.825990Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:739:2605], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:30:56.922242Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:809:2644] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:30:57.941071Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhxkt6x3sb73qqmbh7q2bk4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzBiYWU2MDEtMTlmYWFhMTQtZWUwMmM5ZTItODJiYjdkZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:30:58.230726Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhxkvmz77wttbb1djp6qbkh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFlOGQ3NjYtN2VlMDEzOGItOTE1ZjFlOTktZmE1Y2Q3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:30:58.392302Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhxkvv60ps6ek9cdcs4vg4y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTYwOGY4OWItM2RmYzY0YTctNjFlZjM1OTEtMzE0N2M0NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:30:58.723350Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhxkw089d9aacydy6er1xrp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjEyOGVmMGQtZWRmYjI1MjctYmJkNmYyNDgtNTQ5MTRmMGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { int64_value: 1 } items { uint32_value: 1 } }, { items { int64_value: 2 } items { uint32_value: 2 } }, { items { int64_value: 3 } items { uint32_value: 3 } }, { items { int64_value: 4 } items { uint32_value: 4 } }, { items { int64_value: 5 } items { uint32_value: 5 } }, { items { int64_value: 6 } items { uint32_value: 6 } }, { items { int64_value: 7 } items { uint32_value: 7 } }, { items { int64_value: 8 } items { uint32_value: 8 } }, { items { int64_value: 9 } items { uint32_value: 9 } } 2025-06-24T21:31:02.527062Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:02.527553Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:02.527693Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0016b6/r3tmp/tmpuDDHvX/pdisk_1.dat 2025-06-24T21:31:02.836045Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:31:02.837666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:02.871391Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:02.872818Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750800659352247 != 1750800659352251 2025-06-24T21:31:02.922050Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:02.922190Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:02.933719Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:03.020644Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:03.428120Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:774:2634], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:03.428280Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:785:2639], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:03.428370Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:03.433930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:31:03.488938Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:31:03.616641Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:788:2642], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:31:03.658075Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:859:2682] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:31:04.270571Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhxm0w125cz3ctwg1mxb43t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDhlMzVkZGYtZTU0MmQ5YjUtMTJmZWJiZGItZDNiOTNhN2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:04.308221Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhxm0w125cz3ctwg1mxb43t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDhlMzVkZGYtZTU0MmQ5YjUtMTJmZWJiZGItZDNiOTNhN2U=, CurrentExecutionId: ... /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:10.947466Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:10.948923Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:32:2079] 1750800666539133 != 1750800666539137 2025-06-24T21:31:11.002408Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:11.002592Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:11.019023Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:11.127278Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:11.510585Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:725:2597], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:11.510708Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:736:2602], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:11.510788Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:11.517393Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:31:11.574813Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:31:11.705014Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:739:2605], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:31:11.742961Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:809:2644] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:31:11.877035Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhxm8rm3jekdhnwwa0h6xde, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZGJhMDJkZjQtNzI4ZjkxNDYtYjc5YTY5MTUtYjhkODAzZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:12.127796Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhxm95mabnvxt8rb78073e1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZmU2NDBiNTktODEzYWU2ODItNWZjZDBlNTAtNGE0MDUxN2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { int64_value: 1 } items { uint32_value: 303 } } 2025-06-24T21:31:12.330921Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhxm9ct4nmhqz315vnpfgmv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjEzNGIxMzQtZDdiOTJmMTAtNWUwNjRjOTEtNjA3MzFkMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:12.516918Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyhxm9kg8w10tcv24tq85cck, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTI1NTY4ZDUtZWRjMGVkN2EtN2U3OTBlMWItNjM2YzRmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { int64_value: 1 } items { uint32_value: 303 } }, { items { int64_value: 2 } items { uint32_value: 303 } } 2025-06-24T21:31:12.741648Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyhxm9reap4dse1btnggfyp7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjFkNmYwMTctY2NlYzVhZWQtZTJiMzcyNTAtYzA2NDBmMDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:12.952283Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhxma0a1hw08apxm85d5732, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzMyY2YxNzctZmU1YTJkOTEtZTEwMmY2NzAtZGFkMTRhZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { int64_value: 1 } items { uint32_value: 303 } }, { items { int64_value: 2 } items { uint32_value: 303 } }, { items { int64_value: 3 } items { uint32_value: 303 } } 2025-06-24T21:31:17.081244Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:273:2316], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:17.081578Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:17.081678Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0016b6/r3tmp/tmp4A8hM0/pdisk_1.dat 2025-06-24T21:31:17.377846Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-24T21:31:17.379616Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:17.451678Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:17.452832Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:32:2079] 1750800673488763 != 1750800673488766 2025-06-24T21:31:17.501960Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:17.502120Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:17.514451Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:17.601856Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:17.939695Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:725:2597], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:17.939783Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:735:2602], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:17.939845Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:17.944348Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:31:17.998685Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:31:18.132713Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:739:2605], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:31:18.170729Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:809:2644] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:31:18.274842Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:819:2653], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:1:98: Error: Key columns are not specified., code: 2017
: Error: Execution, code: 1060
:1:98: Error: Key columns are not specified., code: 2017 2025-06-24T21:31:18.277915Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=N2RjMmVjZS02NTI5NzIzYS04ZjcxMzhiMS0xYjc2ZDc0MA==, ActorId: [4:722:2594], ActorState: ExecuteState, TraceId: 01jyhxmf1jagjt2s9gzdfddzyq, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-24T21:31:18.333163Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:841:2669], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:1:103: Error: Key columns are not specified., code: 2017
: Error: Execution, code: 1060
:1:103: Error: Key columns are not specified., code: 2017 2025-06-24T21:31:18.335982Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=Y2QzZjMzNmUtNzQ0YzM3NTAtOGVlMWZjMGQtYmJlYjliOQ==, ActorId: [4:833:2661], ActorState: ExecuteState, TraceId: 01jyhxmfcb8km9maaewdkyamn7, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: |98.7%| [TM] {RESULT} ydb/core/tx/datashard/ut_sequence/unittest >> TSentinelTests::PDiskFaultyState [GOOD] >> TSentinelTests::PDiskRackGuardHalfRack |98.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/client/minikql_compile/ut/unittest >> TTestYqlToMiniKQLCompile::Extract [GOOD] |98.7%| [TS] {RESULT} ydb/core/client/minikql_compile/ut/unittest >> NodeWardenDsProxyConfigRetrieval::Disconnect [GOOD] >> DataShardBackgroundCompaction::ShouldCompact [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactWhenBorrowed >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateBinding >> DataShardStats::OneChannelStatsCorrect [GOOD] >> DataShardStats::MultipleChannelsStatsCorrect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut_sequence/unittest >> NodeWardenDsProxyConfigRetrieval::Disconnect [GOOD] Test command err: Caught NodeWarden registration actorId# [1:11:2058] 2025-06-24T21:31:19.484404Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-24T21:31:19.528513Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/2btm/001a95/r3tmp/tmpjlBlO2/static.dat" PDiskGuid: 5189866908369752311 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 5189866908369752311 } VDiskKind: Default } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 5189866908369752311 } } } } AvailabilityDomains: 0 } 2025-06-24T21:31:19.530204Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:/home/runner/.ya/build/build_root/2btm/001a95/r3tmp/tmpjlBlO2/static.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-24T21:31:19.534628Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-24T21:31:19.538351Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:1 PDiskGuid# 5189866908369752311 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T21:31:19.539918Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:1 PDiskGuid# 5189866908369752311 2025-06-24T21:31:19.539980Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-24T21:31:19.541585Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:29:2076] ControllerId# 72057594037932033 2025-06-24T21:31:19.541633Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T21:31:19.543056Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-24T21:31:19.543279Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-24T21:31:19.586635Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-24T21:31:19.591522Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-24T21:31:19.637473Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T21:31:19.637544Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T21:31:19.648308Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-24T21:31:19.648378Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-24T21:31:19.654175Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-24T21:31:19.656181Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-24T21:31:19.658017Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-24T21:31:19.668112Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-24T21:31:19.669897Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/2btm/001a95/r3tmp/tmpjlBlO2/static.dat" PDiskGuid: 5189866908369752311 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 5189866908369752311 } VDiskKind: Default } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 5189866908369752311 } } } } AvailabilityDomains: 0 } 2025-06-24T21:31:19.670210Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-24T21:31:19.683971Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 3 2025-06-24T21:31:19.684032Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-24T21:31:19.684215Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "S\303\2419\324\267\022C*\337\336\241F\307\206\270\304_\022\301" } 2025-06-24T21:31:19.703707Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 3 2025-06-24T21:31:19.703916Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2025-06-24T21:31:19.706428Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-24T21:31:19.706506Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 2146435075 Sender# [1:49:2091] SessionId# [0:0:0] Cookie# 0 2025-06-24T21:31:19.706553Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.058047s 2025-06-24T21:31:19.706876Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-24T21:31:19.706916Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639248 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2025-06-24T21:31:19.749199Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-24T21:31:19.762755Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-24T21:31:19.792641Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-24T21:31:19.807812Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-24T21:31:19.810481Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-24T21:31:19.815275Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:31:19.816318Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-24T21:31:19.816624Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-24T21:31:19.817144Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-24T21:31:19.817528Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-24T21:31:19.843962Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-24T21:31:19.844163Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-24T21:31:19.848418Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-24T21:31:19.849027Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:31:19.849161Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-24T21:31:19.849385Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T21:31:19.902038Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-24T21:31:19.902206Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:31:19.927831Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-24T21:31:19.927986Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:31:19.928069Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-24T21:31:19.928168Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:31:19.928297Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-24T21:31:19.928378Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:31:19.928471Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-24T21:31:19.928538Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:31:19.947831Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-24T21:31:19.947981Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:31:19.964078Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-24T21:31:19.964283Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-24T21:31:19.967628Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-24T21:31:19.967696Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-24T21:31:19.997784Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-24T21:31:19.998564Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-24T21:31:19.999098Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639257 Sender# [1:93:2123] SessionId ... tmp/tmpjlBlO2/static.dat" PDiskConfig { ExpectedSlotCount: 2 } } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 ErasureSpecies: "none" VDiskKind: "Default" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-24T21:31:20.003948Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# SectorMap:/home/runner/.ya/build/build_root/2btm/001a95/r3tmp/tmpjlBlO2/static.dat 2025-06-24T21:31:20.034983Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-24T21:31:20.035194Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } } 2025-06-24T21:31:20.035295Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } 2025-06-24T21:31:20.035558Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1 PDiskGuid: 5189866908369752311 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-24T21:31:20.035702Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1 PDiskGuid: 5189866908369752311 Status: READY OnlyPhantomsRemain: false } } 2025-06-24T21:31:20.036809Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-24T21:31:20.038775Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-24T21:31:20.039033Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } Success: true } 2025-06-24T21:31:20.039366Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-24T21:31:20.039517Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } } 2025-06-24T21:31:20.055697Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-24T21:31:20.055886Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639257 Sender# [1:93:2123] SessionId# [0:0:0] Cookie# 0 === Waiting for pipe to establish === === Breaking pipe === === Sending put === Pipe disconnected clientId# [1:29:2076] 2025-06-24T21:31:20.056547Z node 1 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [1:29:2076] ServerId# [1:125:2147] TabletId# 72057594037932033 PipeClientId# [1:29:2076] 2025-06-24T21:31:20.056626Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:140:2160] ControllerId# 72057594037932033 2025-06-24T21:31:20.056652Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-24T21:31:20.057005Z node 1 :BS_NODE DEBUG: {NW46@node_warden_proxy.cpp:134} HandleForwarded GroupId# 2147483648 EnableProxyMock# false NoGroup# false 2025-06-24T21:31:20.057048Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 2147483648 2025-06-24T21:31:20.057081Z node 1 :BS_NODE DEBUG: {NW98@node_warden_group.cpp:266} RequestGroupConfig GroupId# 2147483648 2025-06-24T21:31:20.057290Z node 1 :BS_NODE INFO: {NW79@node_warden_group_resolver.cpp:74} TGroupResolverActor::Bootstrap GroupId# 2147483648 2025-06-24T21:31:20.057383Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 Pipe connected clientId# [1:140:2160] 2025-06-24T21:31:20.060409Z node 1 :BS_NODE DEBUG: {NW05@node_warden_pipe.cpp:52} TEvTabletPipe::TEvClientConnected OK ClientId# [1:140:2160] ServerId# [1:151:2169] TabletId# 72057594037932033 PipeClientId# [1:140:2160] 2025-06-24T21:31:20.060565Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1 PDiskGuid: 5189866908369752311 Status: READY OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-24T21:31:20.060904Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } Success: true } 2025-06-24T21:31:20.061079Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG02@get_group.cpp:58} TEvControllerGetGroup Sender# [1:11:2058] Cookie# 0 Recipient# [1:151:2169] RecipientRewrite# [1:93:2123] Request# {NodeID: 1 GroupIDs: 2147483648 } StopGivingGroups# false 2025-06-24T21:31:20.061152Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG01@get_group.cpp:22} Handle TEvControllerGetGroup Request# {NodeID: 1 GroupIDs: 2147483648 } 2025-06-24T21:31:20.061253Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } } 2025-06-24T21:31:20.061349Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } SatisfactionRank: 0 VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 0 } } 2025-06-24T21:31:20.076745Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:807} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/2btm/001a95/r3tmp/tmpjlBlO2/static.dat" PDiskGuid: 5189866908369752311 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } EntityStatus: INITIAL ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } VDisks { VDiskID { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 5189866908369752311 } VDiskKind: Default StoragePoolName: "" GroupSizeInUnits: 0 } Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 5189866908369752311 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } InstanceId: "de528d99-ac43f75f-54459bf4-e0a827be" Comprehensive: true AvailDomain: 0 } 2025-06-24T21:31:20.076990Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# true Origin# controller ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/2btm/001a95/r3tmp/tmpjlBlO2/static.dat" PDiskGuid: 5189866908369752311 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } EntityStatus: INITIAL ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } VDisks { VDiskID { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 5189866908369752311 } VDiskKind: Default StoragePoolName: "" GroupSizeInUnits: 0 } Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 5189866908369752311 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } 2025-06-24T21:31:20.077117Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [80000000:1:0:0:0] VSlotId# 1:1:1000 PDiskGuid# 5189866908369752311 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-24T21:31:20.077891Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [80000000:1:0:0:0] VSlotId# 1:1:1000 PDiskGuid# 5189866908369752311 2025-06-24T21:31:20.083872Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-24T21:31:20.091948Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:807} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 5189866908369752311 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } } 2025-06-24T21:31:20.092092Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 5189866908369752311 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } 2025-06-24T21:31:20.092263Z node 1 :BS_NODE INFO: {NW81@node_warden_group_resolver.cpp:270} TGroupResolverActor::PassAway GroupId# 2147483648 2025-06-24T21:31:20.095340Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1000 PDiskGuid: 5189866908369752311 Status: INIT_PENDING OnlyPhantomsRemain: false } } 2025-06-24T21:31:20.102670Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-24T21:31:20.112088Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } } 2025-06-24T21:31:20.132378Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-24T21:31:20.132796Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1000 PDiskGuid: 5189866908369752311 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-24T21:31:20.138422Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-24T21:31:20.138755Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1000 PDiskGuid: 5189866908369752311 Status: READY OnlyPhantomsRemain: false } } 2025-06-24T21:31:20.785916Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1 AvailableSize: 34189869056 TotalSize: 34359738368 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 17041457152 State: Normal } } |98.7%| [TM] {RESULT} ydb/core/blobstorage/nodewarden/ut_sequence/unittest >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListBindings >> TabletService_ExecuteMiniKQL::BasicMiniKQLRead [GOOD] >> TabletService_ExecuteMiniKQL::ParamsMiniKQLRead >> Coordinator::LastStepSubscribe [GOOD] >> Coordinator::RestoreDomainConfiguration >> test_postgres.py::TestPostgresSuite::test_postgres_suite[select_1] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeBinding >> KqpPg::CreateUniqPgColumn-useSink [GOOD] >> KqpPg::CreateUniqComplexPgColumn+useSink >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyBinding >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteBinding >> TIndexProcesorTests::TestDeletedQueueNotReindexed [GOOD] >> TIndexProcesorTests::TestManyMessages >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateQuery >> RangeOps::Intersection [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListQueries ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_range_ops/unittest >> RangeOps::Intersection [GOOD] Test command err: first [(Uint64 : NULL, Uint64 : NULL) ; ()) second [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] result [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] correct [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] first [(Uint64 : NULL) ; ()) second [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] result [(Uint64 : NULL) ; (Uint64 : 20, Uint64 : 20)] correct [(Uint64 : NULL) ; (Uint64 : 20, Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 5)] result [(Uint64 : 10) ; (Uint64 : 5)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)] result [(Uint64 : 10) ; (Uint64 : 10)] correct [(Uint64 : 10) ; (Uint64 : 10)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 15)] result [(Uint64 : 10) ; (Uint64 : 15)] correct [(Uint64 : 10) ; (Uint64 : 15)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 20)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 30)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 10)] result [(Uint64 : 10) ; (Uint64 : 10)] correct [(Uint64 : 10) ; (Uint64 : 10)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 15)] result [(Uint64 : 10) ; (Uint64 : 15)] correct [(Uint64 : 10) ; (Uint64 : 15)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 20)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 30)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 15) ; (Uint64 : 17)] result [(Uint64 : 15) ; (Uint64 : 17)] correct [(Uint64 : 15) ; (Uint64 : 17)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 15) ; (Uint64 : 20)] result [(Uint64 : 15) ; (Uint64 : 20)] correct [(Uint64 : 15) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 15) ; (Uint64 : 30)] result [(Uint64 : 15) ; (Uint64 : 20)] correct [(Uint64 : 15) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 20) ; (Uint64 : 20)] result [(Uint64 : 20) ; (Uint64 : 20)] correct [(Uint64 : 20) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 20) ; (Uint64 : 30)] result [(Uint64 : 20) ; (Uint64 : 20)] correct [(Uint64 : 20) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 25) ; (Uint64 : 30)] result [(Uint64 : 25) ; (Uint64 : 20)] first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)] result ((Uint64 : 10) ; (Uint64 : 10)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)) result [(Uint64 : 10) ; (Uint64 : 10)) first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)) result ((Uint64 : 10) ; (Uint64 : 10)) first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 15)] result ((Uint64 : 10) ; (Uint64 : 15)] correct ((Uint64 : 10) ; (Uint64 : 15)] first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 15)) result ((Uint64 : 10) ; (Uint64 : 15)) correct ((Uint64 : 10) ; (Uint64 : 15)) first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 20)) result ((Uint64 : 10) ; (Uint64 : 20)) correct ((Uint64 : 10) ; (Uint64 : 20)) first ((Uint64 : 10) ; (Uint64 : 20)) second [(Uint64 : 1) ; (Uint64 : 20)) result ((Uint64 : 10) ; (Uint64 : 20)) correct ((Uint64 : 10) ; (Uint64 : 20)) first [(Uint64 : NULL) ; ()) second [(Uint64 : 1) ; (Uint64 : 20)) result [(Uint64 : 1) ; (Uint64 : 20)) correct [(Uint64 : 1) ; (Uint64 : 20)) first [(Uint64 : 10) ; ()) second [(Uint64 : 1) ; (Uint64 : 20)) result [(Uint64 : 10) ; (Uint64 : 20)) correct [(Uint64 : 10) ; (Uint64 : 20)) first ((Uint64 : 10) ; ()) second [(Uint64 : 1) ; (Uint64 : 10)) result ((Uint64 : 10) ; (Uint64 : 10)) first ((Uint64 : 10) ; ()) second [(Uint64 : 1) ; (Uint64 : 20)) result ((Uint64 : 10) ; (Uint64 : 20)) correct ((Uint64 : 10) ; (Uint64 : 20)) first [(Uint64 : NULL) ; (Uint64 : 10)] second [(Uint64 : 1) ; (Uint64 : 20)) result [(Uint64 : 1) ; (Uint64 : 10)] correct [(Uint64 : 1) ; (Uint64 : 10)] first [(Uint64 : NULL) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)) result [(Uint64 : 1) ; (Uint64 : 10)) correct [(Uint64 : 1) ; (Uint64 : 10)) |98.7%| [TM] {RESULT} ydb/core/tx/datashard/ut_range_ops/unittest >> TopicSessionTests::TwoSessionWithoutPredicate [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeQuery >> TopicSessionTests::SessionWithPredicateAndSessionWithoutPredicate >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery [GOOD] >> BasicUsage::TWriteSession_WriteEncoded >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyQuery >> DataShardBackgroundCompaction::ShouldNotCompactWhenBorrowed [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactWhenCopyTable >> TabletService_ExecuteMiniKQL::ParamsMiniKQLRead [GOOD] >> TabletService_ExecuteMiniKQL::MalformedParams >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteQuery >> KqpService::PatternCache [GOOD] >> KqpService::RangeCache+UseCache >> Graph::LocalBackendFullCycle [GOOD] >> Graph::MemoryBordersOnGet >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendControlQuery >> TTxDataShardBuildIndexScan::RunScan [GOOD] >> TTxDataShardBuildIndexScan::ShadowBorrowCompaction >> Coordinator::RestoreDomainConfiguration [GOOD] >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-false >> TIndexProcesorTests::TestManyMessages [GOOD] >> TIndexProcesorTests::TestOver1000Queues >> test_query_cache.py::TestQueryCache::test >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendGetResultData >> test.py::test_wait_for_cluster_ready [GOOD] >> test.py::test_counter >> KqpPg::TypeCoercionInsert-useSink [GOOD] >> KqpPg::V1CreateTable >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListJobs >> Graph::MemoryBordersOnGet [GOOD] >> Graph::LocalBordersOnGet >> TGRpcRateLimiterTest::CreateResource >> test.py::test_counter [GOOD] >> test.py::test_viewer_nodes >> test.py::test_viewer_nodes [GOOD] >> test.py::test_viewer_nodes_all >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListJobs [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[select_1] [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeJob >> test_postgres.py::TestPostgresSuite::test_postgres_suite[text] >> test_http_api.py::TestHttpApi::test_simple_analytics_query [FAIL] >> test_http_api.py::TestHttpApi::test_empty_query [GOOD] >> test_http_api.py::TestHttpApi::test_warning >> test.py::test_viewer_nodes_all [GOOD] >> test.py::test_viewer_storage_nodes >> TDataShardRSTest::TestCleanupInRS+UseSink >> Etcd_KV::ReadRangeWithLimitAndSort [GOOD] >> Etcd_KV::ReadRangeFromRevision >> PersQueueSdkReadSessionTest::ReadSessionWithClose [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted >> test.py::test_viewer_storage_nodes [GOOD] >> test.py::test_viewer_storage_nodes_all >> test.py::test_viewer_storage_nodes_all [GOOD] >> test.py::test_storage_groups [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeJob [GOOD] >> test.py::test_viewer_sysinfo [GOOD] >> test.py::test_viewer_vdiskinfo [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnection >> test.py::test_viewer_pdiskinfo [GOOD] >> test.py::test_viewer_bsgroupinfo >> test.py::test_viewer_bsgroupinfo [GOOD] >> test.py::test_viewer_tabletinfo >> test_postgres.py::TestPostgresSuite::test_postgres_suite[text] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[withtable] >> test.py::test_viewer_tabletinfo [GOOD] >> test.py::test_viewer_describe >> Graph::LocalBordersOnGet [GOOD] >> test.py::test_viewer_describe [GOOD] >> test.py::test_viewer_cluster [GOOD] >> test.py::test_viewer_tenantinfo [GOOD] >> test.py::test_viewer_tenantinfo_db >> test.py::test_viewer_tenantinfo_db [GOOD] >> test.py::test_viewer_healthcheck >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnectionWithServiceAccount >> test.py::test_viewer_healthcheck [GOOD] >> test.py::test_viewer_acl >> TabletService_ExecuteMiniKQL::MalformedParams [GOOD] >> TabletService_ExecuteMiniKQL::MalformedProgram >> test.py::test_viewer_acl [GOOD] >> test.py::test_viewer_acl_write >> test_http_api.py::TestHttpApi::test_warning [GOOD] >> test_http_api.py::TestHttpApi::test_get_unknown_query [GOOD] >> test_http_api.py::TestHttpApi::test_unauthenticated [GOOD] >> test_http_api.py::TestHttpApi::test_create_idempotency ------- [TS] {asan, default-linux-x86_64, release} ydb/core/graph/ut/unittest >> Graph::LocalBordersOnGet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:31:06.862227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:31:06.862345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:31:06.862400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:31:06.862442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:31:06.867187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:31:06.867278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:31:06.867385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:31:06.867576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:31:06.868400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:31:06.875266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:31:06.965388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:31:06.966353Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:06.982210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:31:06.982644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:31:06.982883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:31:06.998481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:31:06.998847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:31:07.001072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:31:07.001449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:31:07.008635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:31:07.009695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:31:07.016013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:31:07.016091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:31:07.016321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:31:07.016367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:31:07.016450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:31:07.016533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:31:07.023866Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:31:07.173924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:31:07.176240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:31:07.179317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:31:07.179401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:31:07.180793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:31:07.180913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:07.189666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:31:07.190562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:31:07.190778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:31:07.190944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:31:07.191002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:31:07.191036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:31:07.193892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:31:07.193959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:31:07.193996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:31:07.200109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:31:07.200174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:31:07.200224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:31:07.200289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:31:07.211253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:31:07.218023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:31:07.223120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:31:07.224439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:31:07.224600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:31:07.224657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:31:07.227590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:31:07.227680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:31:07.227904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:31:07.227989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:31:07.235092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:31:07.235173Z node 1 :FLAT_TX_SCHEMESHARD ... -24T21:31:29.969401Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.969434Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.969539Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 109 } Time: 109 2025-06-24T21:31:29.969565Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.969593Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.969627Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.969695Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 110 } Time: 110 2025-06-24T21:31:29.969719Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.969747Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.969779Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.969877Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 111 } Time: 111 2025-06-24T21:31:29.969901Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.969921Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.969942Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.969995Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 112 } Time: 112 2025-06-24T21:31:29.970010Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.970029Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.970051Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.970098Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 113 } Time: 113 2025-06-24T21:31:29.970115Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.970135Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.970156Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.970200Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 114 } Time: 114 2025-06-24T21:31:29.970215Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.970232Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.970252Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.970294Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 115 } Time: 115 2025-06-24T21:31:29.970310Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.970327Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.970357Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.970435Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 116 } Time: 116 2025-06-24T21:31:29.970459Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.970486Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.970519Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.970593Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 117 } Time: 117 2025-06-24T21:31:29.970613Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.970640Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.970674Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.970746Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 118 } Time: 118 2025-06-24T21:31:29.970770Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.970796Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.970831Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.970921Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 119 } Time: 119 2025-06-24T21:31:29.970947Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-24T21:31:29.970975Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-24T21:31:29.971008Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-24T21:31:29.971103Z node 6 :GRAPH TRACE: shard_impl.cpp:226: SHARD Handle TEvGraph::TEvGetMetrics from [6:574:2501] 2025-06-24T21:31:29.971183Z node 6 :GRAPH DEBUG: tx_get_metrics.cpp:20: SHARD TTxGetMetrics::Execute 2025-06-24T21:31:29.971233Z node 6 :GRAPH DEBUG: backends.cpp:326: DB Querying from 0 to 119 2025-06-24T21:31:29.986612Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986706Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986737Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986765Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986788Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986826Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986853Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986877Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986905Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986931Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986955Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.986980Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987005Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987031Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987055Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987079Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987103Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987127Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987170Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987196Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987221Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987247Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987271Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987295Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987323Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987346Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987370Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987394Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987416Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987439Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987462Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987485Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987508Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987531Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987557Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987581Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987605Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987630Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987655Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987680Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987702Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987725Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987753Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987778Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987801Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987823Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987846Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987872Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987895Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987918Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987942Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987964Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.987987Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.988012Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.988035Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.988061Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.988084Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.988106Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.988130Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.988155Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-24T21:31:29.988189Z node 6 :GRAPH DEBUG: tx_get_metrics.cpp:25: SHARD TTxGetMetric::Complete 2025-06-24T21:31:29.988273Z node 6 :GRAPH TRACE: tx_get_metrics.cpp:26: SHARD TxGetMetrics returned 60 points for request 3 2025-06-24T21:31:29.988472Z node 6 :GRAPH TRACE: service_impl.cpp:199: SVC TEvMetricsResult 3 2025-06-24T21:31:29.988529Z node 6 :GRAPH TRACE: service_impl.cpp:202: SVC TEvMetricsResult found request 3 resending to [6:575:2502] |98.7%| [TS] {RESULT} ydb/core/graph/ut/unittest >> test.py::test_viewer_acl_write [GOOD] >> test.py::test_viewer_autocomplete >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink >> test.py::test_viewer_autocomplete [GOOD] >> test.py::test_viewer_check_access >> test.py::test_viewer_check_access [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListConnections >> test.py::test_viewer_query >> DataShardBackgroundCompaction::ShouldNotCompactWhenCopyTable [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactEmptyTable >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeConnection >> test.py::test_viewer_query [GOOD] >> test.py::test_viewer_query_from_table >> ReadUpdateWrite::Load >> test.py::test_viewer_query_from_table [GOOD] >> test.py::test_viewer_query_from_table_different_schemas >> DataShardStats::MultipleChannelsStatsCorrect [GOOD] >> DataShardStats::HistogramStatsCorrect >> TopicSessionTests::SessionWithPredicateAndSessionWithoutPredicate [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnection >> KqpPg::CreateUniqComplexPgColumn+useSink [GOOD] >> KqpPg::CreateUniqComplexPgColumn-useSink >> TopicSessionTests::SecondSessionWithoutOffsetsAfterSessionConnected >> test.py::test_viewer_query_from_table_different_schemas [GOOD] >> test.py::test_viewer_query_issue_13757 >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnectionWithServiceAccount >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteConnection >> test.py::test_viewer_query_issue_13757 [GOOD] >> test.py::test_viewer_query_issue_13945 >> TGRpcRateLimiterTest::CreateResource [GOOD] >> TGRpcRateLimiterTest::UpdateResource >> KqpBatchDelete::ManyPartitions_2 [GOOD] >> test.py::test_viewer_query_issue_13945 [GOOD] >> test.py::test_pqrb_tablet >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnection >> KesusProxyTest::ReconnectsWithKesusWhenNotConnected [GOOD] >> KesusProxyTest::ReconnectsWithKesusWhenPipeDestroyed >> KesusProxyTest::ReconnectsWithKesusWhenPipeDestroyed [GOOD] >> KesusProxyTest::RejectsNotCanonizedResourceName [GOOD] >> KesusProxyTest::SubscribesOnResource >> KesusProxyTest::SubscribesOnResource [GOOD] >> KesusProxyTest::SubscribesOnResourcesWhenReconnected [GOOD] >> KesusProxyTest::ProxyRequestDuringDisconnection [GOOD] >> KesusProxyTest::DeactivateSessionWhenResourceClosed [GOOD] >> KesusProxyTest::SendsProxySessionOnceOnSuccess [GOOD] >> KesusProxyTest::SendsProxySessionOnceOnFailure [GOOD] >> KesusProxyTest::AnswersWithSessionWhenResourceIsAlreadyKnown >> KesusProxyTest::AnswersWithSessionWhenResourceIsAlreadyKnown [GOOD] >> KesusProxyTest::SendsBrokenUpdateWhenKesusPassesError [GOOD] >> KesusProxyTest::AllocatesResourceWithKesus [GOOD] >> KesusProxyTest::DisconnectsDuringActiveSession [GOOD] >> KesusProxyTest::AllocatesResourceOffline [GOOD] >> KesusProxyTest::ConnectsDuringOfflineAllocation [GOOD] >> KesusResourceAllocationStatisticsTest::ReturnsDefaultValues [GOOD] >> KesusResourceAllocationStatisticsTest::CalculatesAverage [GOOD] >> KesusResourceAllocationStatisticsTest::TakesBestStat [GOOD] >> TQuoterServiceTest::StaticRateLimiter >> test.py::test_pqrb_tablet [GOOD] >> test.py::test_viewer_nodes_issue_14992 [GOOD] >> test.py::test_operations_list [GOOD] >> test.py::test_operations_list_page [GOOD] >> test.py::test_operations_list_page_bad [GOOD] >> test.py::test_scheme_directory >> test_dc_local.py::TestAlloc::test_dc_locality[kikimr0] [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnectionWithServiceAccount >> test.py::test_scheme_directory [GOOD] >> test.py::test_topic_data >> TabletService_ExecuteMiniKQL::MalformedProgram [GOOD] >> TabletService_ExecuteMiniKQL::DryRunEraseRow >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateBinding ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ManyPartitions_2 [GOOD] Test command err: Trying to start YDB, gRPC: 22824, MsgBus: 62691 2025-06-24T21:29:10.974704Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631104769621810:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.983878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002b29/r3tmp/tmptXeBnA/pdisk_1.dat 2025-06-24T21:29:11.348658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:11.348759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:11.352553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:11.394598Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:11.395392Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631104769621623:2079] 1750800550895581 != 1750800550895584 TServer::EnableGrpc on GrpcPort 22824, node 1 2025-06-24T21:29:11.519755Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.519781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.519809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.519949Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62691 2025-06-24T21:29:11.943359Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62691 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.378967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:29:12.412818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:12.602629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.787437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.873625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.637913Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631121949492434:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.638019Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.930248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.007651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.047455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.088290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.133217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.210690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.264596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.376799Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631126244460392:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.376880Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.377090Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631126244460397:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.381352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.398870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631126244460399:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.488244Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631126244460450:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.956737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631104769621810:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.956801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.595915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... Trying to start YDB, gRPC: 1785, MsgBus: 9127 2025-06-24T21:29:22.715754Z node 2 :METADATA_PROV ... ard__operation_create_table.cpp:667) waiting... Trying to start YDB, gRPC: 63217, MsgBus: 31133 2025-06-24T21:31:20.398396Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519631664074117686:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:20.398453Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002b29/r3tmp/tmpXmgIOM/pdisk_1.dat 2025-06-24T21:31:20.589663Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:20.590235Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519631664074117662:2079] 1750800680396277 != 1750800680396280 2025-06-24T21:31:20.614307Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:20.614446Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:20.618018Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63217, node 12 2025-06-24T21:31:20.690263Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:20.690293Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:20.690306Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:20.690504Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31133 2025-06-24T21:31:21.407347Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31133 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:21.743020Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:31:21.767485Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:21.878504Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:22.221658Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:22.354992Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:25.403300Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519631664074117686:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:25.403422Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:31:26.392869Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631689843923082:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:26.393020Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:26.476354Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:26.561365Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:26.621281Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:26.679878Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:26.748669Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:26.844841Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:26.941247Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:27.040180Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631694138891050:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:27.040285Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:27.040586Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631694138891055:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:27.045403Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:31:27.064198Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519631694138891057:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:31:27.136555Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519631694138891108:3439] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:31:29.218460Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> test_http_api.py::TestHttpApi::test_create_idempotency [GOOD] >> test_http_api.py::TestHttpApi::test_stop_idempotency >> DataShardBackgroundCompaction::ShouldNotCompactEmptyTable [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactSecondTime >> test_postgres.py::TestPostgresSuite::test_postgres_suite[withtable] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[horology] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListBindings >> test_query_cache.py::TestQueryCache::test [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeBinding >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyBinding >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink >> TSentinelTests::PDiskRackGuardHalfRack [GOOD] >> TSentinelTests::PDiskRackGuardFullRack >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteBinding >> TQuoterServiceTest::StaticRateLimiter [GOOD] >> TQuoterServiceTest::StaticMultipleAndResources >> TGRpcRateLimiterTest::UpdateResource [GOOD] >> TGRpcRateLimiterTest::DropResource >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateQuery >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListQueries >> TopicSessionTests::SecondSessionWithoutOffsetsAfterSessionConnected [GOOD] >> TopicSessionTests::TwoSessionsWithOffsets >> test.py::test_topic_data [GOOD] >> test.py::test_transfer_describe >> test.py::test_transfer_describe [GOOD] >> test.py::test_viewer_query_long >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeQuery >> TabletService_ExecuteMiniKQL::DryRunEraseRow [GOOD] >> TabletService_ExecuteMiniKQL::OnlyAdminsAllowed >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetQueryStatus >> TestFilterSet::FilterGroup >> test_alloc_default.py::TestAlloc::test_default_limits[kikimr0] >> DataShardBackgroundCompaction::ShouldNotCompactSecondTime [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyQuery >> TQuoterServiceTest::StaticMultipleAndResources [GOOD] >> TQuoterServiceTest::StaticDeadlines >> Etcd_KV::ReadRangeFromRevision [GOOD] >> Etcd_KV::UpdateWithGetPrevious |98.7%| [TA] $(B)/ydb/core/kqp/ut/join/test-results/unittest/{meta.json ... results_accumulator.log} >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteQuery >> SequenceShardTests::Basics >> KqpPg::CreateUniqComplexPgColumn-useSink [GOOD] >> KqpPg::CreateTempTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_background_compaction/unittest >> DataShardBackgroundCompaction::ShouldNotCompactSecondTime [GOOD] Test command err: 2025-06-24T21:31:17.753138Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:17.753597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:17.753886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001931/r3tmp/tmpX0uQy2/pdisk_1.dat 2025-06-24T21:31:18.266678Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:31:18.295104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:18.378484Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:18.394191Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800674450549 != 1750800674450553 2025-06-24T21:31:18.439854Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:31:18.447728Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:31:18.448153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:18.448330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:18.461805Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:18.571428Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:31:18.571526Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:31:18.575389Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:31:18.744103Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:31:18.744255Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:31:18.745084Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:31:18.745205Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:31:18.745656Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:31:18.745878Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:31:18.746045Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:31:18.749821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:18.750531Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:31:18.751325Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:31:18.751400Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:31:18.817059Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:31:18.818430Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:31:18.819018Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:31:18.819338Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:31:18.876996Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:31:18.877833Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:31:18.877970Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:31:18.879871Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:31:18.879997Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:31:18.880070Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:31:18.881613Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:31:18.881804Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:31:18.881903Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:31:18.892828Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:31:18.950401Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:31:18.952695Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:31:18.952896Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:31:18.952957Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:31:18.952998Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:31:18.953042Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:31:18.953303Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:31:18.953363Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:31:18.955756Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:31:18.955892Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:31:18.955963Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:31:18.956007Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:31:18.956117Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:31:18.956195Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:31:18.956242Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:31:18.956277Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:31:18.956326Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:31:18.956464Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:31:18.956521Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:31:18.956573Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:31:18.958410Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:31:18.958509Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:31:18.958645Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:31:18.959081Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:31:18.959313Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:31:18.959439Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:31:18.959602Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... node 5 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-06-24T21:31:42.008032Z node 5 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-24T21:31:42.008115Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:31:42.008152Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteWrite 2025-06-24T21:31:42.008198Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-24T21:31:42.008238Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:31:42.008330Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:31:42.008365Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-24T21:31:42.008406Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:31:42.008451Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:31:42.008496Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:31:42.008518Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:31:42.008551Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037888 has finished 2025-06-24T21:31:42.019924Z node 5 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-24T21:31:42.020038Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:31:42.020101Z node 5 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 2 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-24T21:31:42.020251Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:31:42.022739Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [5:59:2106] Handle TEvNavigate describe path /Root/table-1 2025-06-24T21:31:42.056541Z node 5 :TX_PROXY DEBUG: describe.cpp:272: Actor# [5:811:2641] HANDLE EvNavigateScheme /Root/table-1 2025-06-24T21:31:42.057258Z node 5 :TX_PROXY DEBUG: describe.cpp:356: Actor# [5:811:2641] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-24T21:31:42.057446Z node 5 :TX_PROXY DEBUG: describe.cpp:435: Actor# [5:811:2641] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/table-1" Options { ShowPrivateTable: true } 2025-06-24T21:31:42.058713Z node 5 :TX_PROXY DEBUG: describe.cpp:448: Actor# [5:811:2641] Handle TEvDescribeSchemeResult Forward to# [5:552:2478] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/table-1" PathDescription { Self { Name: "table-1" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715657 CreateStep: 1000 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 2025-06-24T21:31:42.061554Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [5:815:2645], Recipient [5:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:31:42.061637Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:31:42.061693Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [5:814:2644], serverId# [5:815:2645], sessionId# [0:0:0] 2025-06-24T21:31:42.061863Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553169, Sender [5:813:2643], Recipient [5:625:2529]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-24T21:31:42.062840Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [5:818:2648], Recipient [5:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:31:42.062911Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:31:42.062972Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [5:817:2647], serverId# [5:818:2648], sessionId# [0:0:0] 2025-06-24T21:31:42.063828Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [5:816:2646], Recipient [5:625:2529]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 2 } CompactBorrowed: false 2025-06-24T21:31:42.064043Z node 5 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 1 of 72075186224037888 tableId# 2 localTid# 1001, requested from [5:816:2646], partsCount# 0, memtableSize# 728, memtableWaste# 3880, memtableRows# 3 2025-06-24T21:31:42.067604Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 1, ts 1970-01-01T00:00:01.527004Z 2025-06-24T21:31:42.067718Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 1, front# 1 2025-06-24T21:31:42.067782Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001 sending TEvCompactTableResult to# [5:816:2646]pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:31:42.068688Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [5:616:2523], Recipient [5:625:2529]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:31:42.069353Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [5:825:2654], Recipient [5:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:31:42.069433Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:31:42.069501Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [5:824:2653], serverId# [5:825:2654], sessionId# [0:0:0] 2025-06-24T21:31:42.069676Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [5:823:2652], Recipient [5:625:2529]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 2 } CompactBorrowed: false 2025-06-24T21:31:42.069827Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:118: Background compaction of tablet# 72075186224037888 of path# [OwnerId: 72057594046644480, LocalPathId: 2], requested from# [5:823:2652] is not needed >> TGRpcRateLimiterTest::DropResource [GOOD] >> TGRpcRateLimiterTest::DescribeResource >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendControlQuery |98.7%| [TM] {RESULT} ydb/core/tx/datashard/ut_background_compaction/unittest >> TTxDataShardBuildIndexScan::ShadowBorrowCompaction [GOOD] >> TTxDataShardLocalKMeansScan::BadRequest >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetResultData |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/query_cache/py3test >> test_query_cache.py::TestQueryCache::test [GOOD] >> SequenceShardTests::Basics [GOOD] >> SequenceShardTests::MarkedPipeRetries |98.7%| [TM] {RESULT} ydb/tests/functional/query_cache/py3test >> SequenceShardTests::MarkedPipeRetries [GOOD] >> SequenceShardTests::FreezeRestoreRedirect >> test_postgres.py::TestPostgresSuite::test_postgres_suite[horology] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[float8] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListJobs >> test.py::test_viewer_query_long [GOOD] >> test.py::test_viewer_query_long_multipart >> TestFilterSet::FilterGroup [GOOD] >> SequenceShardTests::FreezeRestoreRedirect [GOOD] >> test.py::test_viewer_query_long_multipart [GOOD] >> TabletService_ExecuteMiniKQL::OnlyAdminsAllowed [GOOD] >> ReadUpdateWrite::Load [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted [GOOD] >> BasicStatistics::NotFullStatisticsColumnshard [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListJobs [GOOD] >> TQuoterServiceTest::StaticDeadlines [GOOD] >> BasicUsage::TWriteSession_WriteEncoded [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink >> TabletService_Restart::Basics >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds >> TestFilterSet::DuplicationValidation >> SequenceShardTests::NegativeIncrement >> TopicSessionTests::TwoSessionsWithOffsets [GOOD] >> TGRpcRateLimiterTest::DescribeResource [GOOD] >> KqpPg::CreateTempTable [GOOD] >> TTxDataShardLocalKMeansScan::BadRequest [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeJob >> QuoterWithKesusTest::ForbidsNotCanonizedQuoterPath >> CompressExecutor::TestReorderedExecutor >> SequenceShardTests::NegativeIncrement [GOOD] >> TestFilterSet::DuplicationValidation [GOOD] >> TopicSessionTests::BadDataSessionError >> KqpPg::CreateTempTableSerial >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeJob [GOOD] >> TGRpcRateLimiterTest::ListResources >> TTxDataShardLocalKMeansScan::TooManyClusters >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnection >> TestFilterSet::CompilationValidation >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnectionWithServiceAccount >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListConnections >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeConnection >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnection |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/tests/py3test >> test.py::test_viewer_query_long_multipart [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/sequenceshard/ut/unittest >> SequenceShardTests::NegativeIncrement [GOOD] Test command err: 2025-06-24T21:31:44.849495Z node 1 :SEQUENCESHARD TRACE: sequenceshard_impl.cpp:38: [sequenceshard 72057594037927937] OnActivateExecutor 2025-06-24T21:31:44.849632Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:14: [sequenceshard 72057594037927937] TTxInitSchema.Execute 2025-06-24T21:31:44.869474Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:14: [sequenceshard 72057594037927937] TTxInit.Execute 2025-06-24T21:31:44.875603Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:22: [sequenceshard 72057594037927937] TTxInitSchema.Complete 2025-06-24T21:31:44.875678Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:112: [sequenceshard 72057594037927937] TTxInit.Complete 2025-06-24T21:31:44.883253Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } 2025-06-24T21:31:44.884253Z node 1 :SEQUENCESHARD NOTICE: tx_create_sequence.cpp:113: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] MinValue# 1 MaxValue# 9223372036854775807 StartValue# 1 Cache# 1 Increment# 1 Cycle# false State# Active 2025-06-24T21:31:44.912206Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2025-06-24T21:31:44.912719Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } 2025-06-24T21:31:44.912806Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:33: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SEQUENCE_ALREADY_EXISTS PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-24T21:31:44.912891Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2025-06-24T21:31:44.913137Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Record# PathId { OwnerId: 123 LocalId: 51 } StartValue: 100001 Cache: 10 2025-06-24T21:31:44.913277Z node 1 :SEQUENCESHARD NOTICE: tx_create_sequence.cpp:113: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] MinValue# 1 MaxValue# 9223372036854775807 StartValue# 100001 Cache# 10 Increment# 1 Cycle# false State# Active 2025-06-24T21:31:44.934019Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2025-06-24T21:31:44.934519Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-24T21:31:44.934623Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 1 AllocationCount# 1 AllocationIncrement# 1 2025-06-24T21:31:44.947848Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:44.948239Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 10 2025-06-24T21:31:44.948717Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 2 AllocationCount# 10 AllocationIncrement# 1 2025-06-24T21:31:44.961085Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:44.961550Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2025-06-24T21:31:44.961669Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 100001 AllocationCount# 10 AllocationIncrement# 1 2025-06-24T21:31:44.980659Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:44.981264Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 50 2025-06-24T21:31:44.981366Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 100011 AllocationCount# 50 AllocationIncrement# 1 2025-06-24T21:31:44.995699Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:44.996241Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 99] Cache# 0 2025-06-24T21:31:44.996319Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:35: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_NOT_FOUND PathId# [OwnerId: 123, LocalPathId: 99] 2025-06-24T21:31:44.996383Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:44.996665Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 18446744073709551615 2025-06-24T21:31:44.996759Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 12 AllocationCount# 9223372036854775796 AllocationIncrement# 1 2025-06-24T21:31:45.010526Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:45.011013Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 1 2025-06-24T21:31:45.011072Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:72: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_OVERFLOW PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-24T21:31:45.011187Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:45.011515Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:20: [sequenceshard 72057594037927937] TTxDropSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-24T21:31:45.011607Z node 1 :SEQUENCESHARD NOTICE: tx_drop_sequence.cpp:43: [sequenceshard 72057594037927937] TTxDropSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-24T21:31:45.028297Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:48: [sequenceshard 72057594037927937] TTxDropSequence.Complete 2025-06-24T21:31:45.028924Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:20: [sequenceshard 72057594037927937] TTxDropSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-24T21:31:45.028991Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:33: [sequenceshard 72057594037927937] TTxDropSequence.Execute SEQUENCE_NOT_FOUND PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-24T21:31:45.029079Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:48: [sequenceshard 72057594037927937] TTxDropSequence.Complete 2025-06-24T21:31:45.074700Z node 1 :SEQUENCESHARD TRACE: sequenceshard_impl.cpp:38: [sequenceshard 72057594037927937] OnActivateExecutor 2025-06-24T21:31:45.074817Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:14: [sequenceshard 72057594037927937] TTxInitSchema.Execute 2025-06-24T21:31:45.075750Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:22: [sequenceshard 72057594037927937] TTxInitSchema.Complete 2025-06-24T21:31:45.076385Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:14: [sequenceshard 72057594037927937] TTxInit.Execute 2025-06-24T21:31:45.086381Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:112: [sequenceshard 72057594037927937] TTxInit.Complete 2025-06-24T21:31:45.094508Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-24T21:31:45.094596Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:35: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_NOT_FOUND PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-24T21:31:45.094658Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:45.094941Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2025-06-24T21:31:45.095041Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 100061 AllocationCount# 10 AllocationIncrement# 1 2025-06-24T21:31:45.111135Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:45.111757Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:21: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Record# PathId { OwnerId: 123 LocalId: 51 } NextValue: 200000 NextUsed: true 2025-06-24T21:31:45.111887Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:103: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] 2025-06-24T21:31:45.124417Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:108: [sequenceshard 72057594037927937] TTxUpdateSequence.Complete 2025-06-24T21:31:45.125021Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2025-06-24T21:31:45.125129Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 200001 AllocationCount# 10 AllocationIncrement# 1 2025-06-24T21:31:45.137592Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:45.138047Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:21: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Record# PathId { OwnerId: 123 LocalId: 51 } Cache: 5 2025-06-24T21:31:45.138131Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:103: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] 2025-06-24T21:31:45.179342Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:108: [sequenceshard 72057594037927937] TTxUpdateSequence.Complete 2025-06-24T21:31:45.179895Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2025-06-24T21:31:45.180015Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 200011 AllocationCount# 5 AllocationIncrement# 1 2025-06-24T21:31:45.213199Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard ... Id: 43] Cache# 0 2025-06-24T21:31:46.169633Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 43] AllocationStart# 11 AllocationCount# 100 AllocationIncrement# 1 2025-06-24T21:31:46.183730Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:46.184232Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:21: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] Record# PathId { OwnerId: 123 LocalId: 43 } MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 NextValue: 11 Cache: 100 Increment: 1 2025-06-24T21:31:46.184282Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:66: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute SEQUENCE_ALREADY_ACTIVE PathId# [OwnerId: 123, LocalPathId: 43] 2025-06-24T21:31:46.184346Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:103: [sequenceshard 72057594037927937] TTxRestoreSequence.Complete 2025-06-24T21:31:46.184585Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-24T21:31:46.184675Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-24T21:31:46.196768Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2025-06-24T21:31:46.197220Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-24T21:31:46.197309Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-24T21:31:46.210275Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2025-06-24T21:31:46.210639Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-24T21:31:46.210737Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-24T21:31:46.226810Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2025-06-24T21:31:46.227262Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-24T21:31:46.227342Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:54: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_MOVED PathId# [OwnerId: 123, LocalPathId: 42] MovedTo# 12345 2025-06-24T21:31:46.227417Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:46.227729Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:20: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] 2025-06-24T21:31:46.227826Z node 3 :SEQUENCESHARD NOTICE: tx_freeze_sequence.cpp:68: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 43] 2025-06-24T21:31:46.246923Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:73: [sequenceshard 72057594037927937] TTxFreezeSequence.Complete 2025-06-24T21:31:46.247561Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:21: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 NextValue: 111 Cache: 100 Increment: 1 2025-06-24T21:31:46.247741Z node 3 :SEQUENCESHARD NOTICE: tx_restore_sequence.cpp:98: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 NextValue: 111 Cache: 100 Increment: 1 2025-06-24T21:31:46.261598Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:103: [sequenceshard 72057594037927937] TTxRestoreSequence.Complete 2025-06-24T21:31:46.262066Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] RedirectTo# 54321 2025-06-24T21:31:46.262193Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 43] RedirectTo# 54321 2025-06-24T21:31:46.279570Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2025-06-24T21:31:46.280063Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:20: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] 2025-06-24T21:31:46.280131Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:48: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute SEQUENCE_MOVED PathId# [OwnerId: 123, LocalPathId: 43] MovedTo# 54321 2025-06-24T21:31:46.280205Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:73: [sequenceshard 72057594037927937] TTxFreezeSequence.Complete 2025-06-24T21:31:46.280551Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-24T21:31:46.280659Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 111 AllocationCount# 100 AllocationIncrement# 1 2025-06-24T21:31:46.295203Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:46.700262Z node 4 :SEQUENCESHARD TRACE: sequenceshard_impl.cpp:38: [sequenceshard 72057594037927937] OnActivateExecutor 2025-06-24T21:31:46.700370Z node 4 :SEQUENCESHARD TRACE: tx_init_schema.cpp:14: [sequenceshard 72057594037927937] TTxInitSchema.Execute 2025-06-24T21:31:46.709675Z node 4 :SEQUENCESHARD TRACE: tx_init.cpp:14: [sequenceshard 72057594037927937] TTxInit.Execute 2025-06-24T21:31:46.717929Z node 4 :SEQUENCESHARD TRACE: tx_init_schema.cpp:22: [sequenceshard 72057594037927937] TTxInitSchema.Complete 2025-06-24T21:31:46.717990Z node 4 :SEQUENCESHARD TRACE: tx_init.cpp:112: [sequenceshard 72057594037927937] TTxInit.Complete 2025-06-24T21:31:46.719845Z node 4 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } Cache: 10 Increment: -1 2025-06-24T21:31:46.719982Z node 4 :SEQUENCESHARD NOTICE: tx_create_sequence.cpp:113: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] MinValue# -9223372036854775808 MaxValue# -1 StartValue# -1 Cache# 10 Increment# -1 Cycle# false State# Active 2025-06-24T21:31:46.744802Z node 4 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2025-06-24T21:31:46.745159Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-24T21:31:46.745276Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -1 AllocationCount# 10 AllocationIncrement# -1 2025-06-24T21:31:46.758210Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:46.758617Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-24T21:31:46.758724Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -11 AllocationCount# 10 AllocationIncrement# -1 2025-06-24T21:31:46.772761Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:46.773327Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 18446744073709551615 2025-06-24T21:31:46.773477Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -21 AllocationCount# 9223372036854775788 AllocationIncrement# -1 2025-06-24T21:31:46.787378Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:46.787788Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 1 2025-06-24T21:31:46.787861Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:72: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_OVERFLOW PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-24T21:31:46.787930Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:46.788234Z node 4 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:21: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } Cycle: true 2025-06-24T21:31:46.788329Z node 4 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:103: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-24T21:31:46.808275Z node 4 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:108: [sequenceshard 72057594037927937] TTxUpdateSequence.Complete 2025-06-24T21:31:46.808656Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-24T21:31:46.808758Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -1 AllocationCount# 10 AllocationIncrement# -1 2025-06-24T21:31:46.821950Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-24T21:31:46.822430Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-24T21:31:46.822531Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -11 AllocationCount# 10 AllocationIncrement# -1 2025-06-24T21:31:46.835236Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::NotFullStatisticsColumnshard [GOOD] Test command err: 2025-06-24T21:24:03.119652Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:417:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:24:03.120247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:24:03.120353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00485e/r3tmp/tmpOF0DEj/pdisk_1.dat 2025-06-24T21:24:03.751502Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30041, node 1 2025-06-24T21:24:04.802116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:24:04.802186Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:24:04.802240Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:24:04.802835Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:24:04.822780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:24:04.948073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:04.948248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:04.973216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24437 2025-06-24T21:24:05.625643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-24T21:24:09.251387Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-24T21:24:09.318441Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.318550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.348016Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:24:09.352125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:09.589740Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:09.615052Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.615804Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.616375Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.616608Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.616902Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.617000Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.617086Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.617169Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.617263Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-24T21:24:09.827328Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:24:09.827472Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:24:09.845030Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:24:10.015200Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:24:10.087100Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-24T21:24:10.087294Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-24T21:24:10.152086Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-24T21:24:10.152379Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-24T21:24:10.152664Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-24T21:24:10.152755Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-24T21:24:10.152815Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-24T21:24:10.152878Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-24T21:24:10.152958Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-24T21:24:10.153018Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-24T21:24:10.153468Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-24T21:24:10.187916Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:10.188035Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7975: ConnectToSA(), pipe client id: [2:1790:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-24T21:24:10.197500Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2570] 2025-06-24T21:24:10.207764Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1847:2588] 2025-06-24T21:24:10.208065Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1847:2588], schemeshard id = 72075186224037897 2025-06-24T21:24:10.215987Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-24T21:24:10.234490Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-24T21:24:10.234561Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-24T21:24:10.234643Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-24T21:24:10.248469Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:24:10.257127Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-24T21:24:10.257301Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-24T21:24:10.476311Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-24T21:24:10.683190Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-24T21:24:10.765169Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-24T21:24:11.495942Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:24:11.848807Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2142:3021], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:11.849023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:24:11.976560Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:24:12.176572Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2233:2800];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:24:12.176916Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2233:2800];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:24:12.177235Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2233:2800];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:24:12.177383Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2233:2800];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:24:12.177502Z node 2 :TX_COLUMNSHARD WARN: ... .184236Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:48.184322Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:50.007369Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:30:50.007548Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:50.173361Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:50.173434Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:51.822360Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:51.822446Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:52.567364Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:30:53.849326Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:53.849418Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:54.778631Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:30:54.778871Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:55.863290Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:55.863359Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:57.395046Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:30:57.518499Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:57.518571Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:30:59.419840Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:30:59.420894Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:30:59.634553Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:30:59.634639Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:01.888183Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:01.888277Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:02.803019Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:31:04.232469Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:04.232556Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:05.399895Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:31:05.400100Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:31:07.055040Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:07.055125Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:09.579970Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:31:09.801115Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:09.801200Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:11.107125Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7945: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-24T21:31:11.107241Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7957: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:31:11.107279Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7988: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-24T21:31:11.107319Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-24T21:31:12.750822Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:31:12.751090Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:31:13.018039Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:13.018126Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:15.812679Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:15.812773Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:16.996570Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:31:18.570760Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:18.570831Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:19.709332Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:31:19.709559Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:31:21.208843Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:21.208936Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:23.605415Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:31:23.830322Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:23.830391Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:26.571922Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:31:26.572138Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:31:26.831719Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:26.831786Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:29.413703Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:29.413789Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:30.619926Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:31:32.435696Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:32.435784Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:33.736714Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:31:33.736945Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:31:35.365249Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:35.365335Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:37.626030Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:31:37.816021Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:37.816110Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:40.368083Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:31:40.368292Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-24T21:31:40.646790Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:40.646875Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:43.147924Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:43.148002Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:44.200054Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-24T21:31:45.435059Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8072: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-24T21:31:45.435190Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7915: Schedule next SendBaseStatsToSA in 193.000000s, at schemeshard: 72075186224037897 2025-06-24T21:31:45.435592Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 53 ... waiting for TEvSchemeShardStats 2 (done) ... waiting for TEvPropagateStatistics 2025-06-24T21:31:45.452241Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-24T21:31:45.625282Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-24T21:31:45.625366Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-24T21:31:46.640800Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-24T21:31:46.641069Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 ... waiting for TEvPropagateStatistics (done) 2025-06-24T21:31:46.641450Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [2:16825:10261]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-24T21:31:46.645824Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-24T21:31:46.645906Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [2:16825:10261], StatRequests.size() = 1 |98.7%| [TS] {RESULT} ydb/core/tx/sequenceshard/ut/unittest |98.7%| [TM] {RESULT} ydb/core/viewer/tests/py3test >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnectionWithServiceAccount >> test_http_api.py::TestHttpApi::test_stop_idempotency [GOOD] >> test_http_api.py::TestHttpApi::test_restart_idempotency ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/high_load/unittest >> ReadUpdateWrite::Load [GOOD] Test command err: Step 1. only write Was written: 0 MiB, Speed: 1844674407370955161 MiB/s Write: 10% 0.899401s 30% 0.899401s 50% 0.899401s 90% 0.899401s 99% 0.899401s Write: 10% 0.630541s 30% 0.630541s 50% 0.630541s 90% 0.630541s 99% 0.630541s Write: 10% 0.610507s 30% 0.610507s 50% 0.610507s 90% 0.610507s 99% 0.610507s Write: 10% 0.872480s 30% 0.872480s 50% 0.872480s 90% 0.872480s 99% 0.872480s Write: 10% 0.966003s 30% 0.966003s 50% 0.966003s 90% 0.966003s 99% 0.966003s Write: 10% 0.930609s 30% 0.930609s 50% 0.930609s 90% 0.930609s 99% 0.930609s Write: 10% 1.161177s 30% 1.161177s 50% 1.161177s 90% 1.161177s 99% 1.161177s Write: 10% 0.876843s 30% 0.876843s 50% 0.876843s 90% 0.876843s 99% 0.876843s Write: 10% 0.880384s 30% 0.880384s 50% 0.880384s 90% 0.880384s 99% 0.880384s Write: 10% 1.122651s 30% 1.122651s 50% 1.122651s 90% 1.122651s 99% 1.122651s Write: 10% 1.305647s 30% 1.305647s 50% 1.305647s 90% 1.305647s 99% 1.305647s Write: 10% 1.086474s 30% 1.086474s 50% 1.086474s 90% 1.086474s 99% 1.086474s Write: 10% 1.124168s 30% 1.124168s 50% 1.124168s 90% 1.124168s 99% 1.124168s Write: 10% 1.230976s 30% 1.230976s 50% 1.230976s 90% 1.230976s 99% 1.230976s Write: 10% 1.323504s 30% 1.323504s 50% 1.323504s 90% 1.323504s 99% 1.323504s Write: 10% 1.119825s 30% 1.119825s 50% 1.119825s 90% 1.119825s 99% 1.119825s Write: 10% 1.172408s 30% 1.172408s 50% 1.172408s 90% 1.172408s 99% 1.172408s Write: 10% 1.327723s 30% 1.327723s 50% 1.327723s 90% 1.327723s 99% 1.327723s Write: 10% 1.132456s 30% 1.132456s 50% 1.132456s 90% 1.132456s 99% 1.132456s Write: 10% 0.963648s 30% 0.963648s 50% 0.963648s 90% 0.963648s 99% Write: 10% 1.143294s 30% 1.143294s 50% 1.143294s 90% 1.143294s 99% 1.143294s 0.963648sWrite: 10% 1.141477s 30% 1.141477s 50% 1.141477s 90% 1.141477s 99% 1.141477s Write: 10% 1.132700s 30% 1.132700s 50% Write: 10% 1.134368s 30% 1.134368s 50% 1.134368s 90% 1.134368s 99% 1.134368s 1.132700s 90% 1.132700s 99% 1.132700sWrite: 10% 1.213306s 30% 1.213306s 50% 1.213306s 90% 1.213306s 99% 1.213306s Write: 10% 0.649906s 30% 0.649906s 50% 0.649906s 90% 0.649906s 99% 0.649906s Write: 10% Write: 10% 1.285449s 30% 1.285449s 50% 1.285449s 90% 1.285449s 99% 1.285449s 0.743924s 30% 0.743924s 50% 0.743924s 90% 0.743924s 99% Write: 10% 1.280263s 30% 1.280263s 50% 1.280263s 90% 1.280263s 99% 1.280263s 0.743924s Write: 10% Write: 10% 0.438378s 30% 0.438378s 50% 0.438378s 90% 0.438378s 99% 0.438378s Write: 10% 1.165968s 30% 1.165968s 50% 0.521990s 30% 1.165968s 90% 0.521990s 50% 1.165968s 99% 0.521990s1.165968s 90% 0.521990s 99% 0.521990s Write: 10% 1.175203s 30% 1.175203s 50% 1.175203s 90% 1.175203s 99% 1.175203s Write: 10% 1.195264s 30% 1.195264s 50% Write: 10% 0.514138s 30% 0.514138s 50% 0.514138s 90% 0.514138s 99% 0.514138s 1.195264s 90% 1.195264s 99% 1.195264s Write: 10% 0.984571s 30% 0.984571s 50% 0.984571s 90% 0.984571s 99% 0.984571s Write: 10% 1.165336s 30% 1.165336s 50% 1.165336s 90% 1.165336s 99% 1.165336sWrite: 10% 1.084742s 30% 1.084742s 50% 1.084742s 90% 1.084742s 99% 1.084742s Write: 10% 1.420614s 30% 1.420614s 50% 1.420614s 90% 1.420614s 99% 1.420614s Write: 10% 1.402321s 30% 1.402321s 50% Write: 10% 1.384083s 30% 1.384083s 50% 1.384083s 90% 1.384083s 99% 1.384083s 1.402321s 90% Write: 10% 1.190449s 30% 1.190449s 50% 1.190449s 90% 1.190449s 99% 1.190449s 1.402321s 99% 1.402321s Write: 10% 0.971037s 30% 0.971037s 50% 0.971037s 90% 0.971037s 99% Write: 10% 0.971037s0.575925s 30% 0.575925s 50% 0.575925s 90% 0.575925s 99% 0.575925s Write: 10% 1.203949s 30% 1.203949s 50% 1.203949s 90% 1.203949s 99% 1.203949s Write: 10% 0.724077s 30% 0.724077s 50% 0.724077s 90% 0.724077s 99% 0.724077s Write: 10% 1.112006s 30% 1.112006s 50% 1.112006s 90% 1.112006s 99% 1.112006s Write: 10% 0.573625s 30% 0.573625s 50% 0.573625s 90% 0.573625s 99% 0.573625s Write: 10% 0.882712s 30% 0.882712s 50% 0.882712s 90% 0.882712s 99% 0.882712s Write: 10% 0.582190s 30% 0.582190s 50% 0.582190s 90% 0.582190s 99% 0.582190s Write: 10% 0.557949s 30% 0.557949s 50% 0.557949s 90% 0.557949s 99% 0.557949s Write: 10% 1.242546s 30% 1.242546s 50% Write: 10% Write: 10% 0.965599s 30% 0.965599s 50% 0.965599s 90% 0.965599s 99% Write: 10% 1.409810s 30% 1.409810s 50% 1.409810s 90% Write: 10% 1.405744s 30% 1.405744s 50% 1.405744s 90% 1.242546s 90% 1.242546s 99% 1.242546s0.965599s 1.409810sWrite 99% : 10% 1.409810s0.543001s 30% Write0.543001s 50% : 10% 0.543001s 90% Write: 10% 0.543001s 99% 1.223122s 30% 1.223122s 50% 0.577084s 30% 0.577084s 50% 0.577084s 90% 0.577084s 99% 1.405744s 99% 1.405744s1.223122s 90% 1.223122s 99% 1.223122s1.019208s 30% 1.019208s 50% 1.019208s 90% 1.019208s 99% 0.543001s 0.577084s 1.019208s Write: 10% 0.558956s 30% 0.558956s 50% 0.558956s 90% 0.558956s 99% 0.558956s Write: 10% 1.043606s 30% 1.043606s 50% 1.043606s 90% 1.043606s 99% 1.043606s Write: 10% 0.557168s 30% 0.557168s 50% 0.557168s 90% 0.557168s 99% 0.557168s Write: 10% 1.535913s 30% 1.535913s 50% 1.535913s 90% Write: 10% 1.535913s 99% 1.412991s 30% 1.535913s1.412991s 50% 1.412991s 90% 1.412991s 99% 1.412991s Step 2. read write Write: 10% 0.322171s 30% 0.322171s 50% 0.322171s 90% 0.322171s 99% 0.322171s Write: 10% 0.250851s 30% 0.250851s 50% 0.250851s 90% 0.250851s 99% 0.250851s Write: 10% 0.286510s 30% 0.286510s 50% 0.286510s 90% 0.286510s 99% 0.286510s Write: 10% 0.277383s 30% 0.277383s 50% 0.277383s 90% 0.277383s 99% 0.277383s Write: 10% 0.321064s 30% 0.321064s 50% 0.321064s 90% 0.321064s 99% 0.321064s Write: 10% 0.486472s 30% 0.486472s 50% 0.486472s 90% Write: 10% 0.531235s 30% 0.531235s 50% 0.531235s 90% Write: 10% 0.531706s 30% 0.531706s 50% 0.531706s 90% 0.531706s 99% 0.531706s 0.486472s 99% 0.486472s0.531235s 99% 0.531235sWrite: 10% 0.484301s 30% 0.484301s 50% 0.484301s 90% 0.484301s 99% 0.484301s Write: 10% 0.503941s 30% 0.503941s 50% 0.503941s 90% 0.503941s 99% 0.503941s Write: 10% 0.543416s 30% 0.543416s 50% 0.543416s 90% 0.543416s 99% 0.543416s Write: 10% 0.494838s 30% 0.494838s 50% 0.494838s 90% 0.494838s 99% 0.494838sWrite: 10% 0.520541s 30% 0.520541s 50% 0.520541s 90% Write: 10% 0.564617s 30% 0.564617s 50% 0.564617s 90% 0.520541s 99% 0.520541s 0.564617s 99% Write: 10% 0.557511s 30% 0.557511s 50% 0.557511s 90% 0.557511s 99% 0.557511s Write: 10% 0.405457s 30% 0.405457s 50% 0.405457s 90% 0.405457s 99% 0.405457s 0.564617s Write: 10% 0.521568s 30% 0.521568s 50% 0.521568s 90% 0.521568s 99% 0.521568s Write: 10% 0.542761s 30% 0.542761s 50% 0.542761s 90% 0.542761s 99% 0.542761sWrite: 10% 0.559013s 30% 0.559013s 50% 0.559013s 90% Write: 10% 0.609148s 30% 0.609148s 50% 0.609148s 90% 0.609148s 99% 0.609148s 0.559013s 99% 0.559013sWrite: 10% 0.529998s 30% 0.529998s 50% 0.529998s 90% 0.529998s 99% 0.529998sWrite: 10% Write: 10% 0.519919s 30% 0.519919s 50% 0.519919s 90% 0.519919s 99% 0.519919s 0.502966s 30% 0.502966s 50% 0.502966s 90% 0.502966s 99% Write: 10% 0.556767s 30% 0.556767s 50% 0.556767s 90% 0.556767s 99% 0.556767s0.502966s Write: 10% 0.570246s 30% 0.570246s 50% 0.570246s 90% 0.570246s 99% 0.570246s Write: 10% 0.539931s 30% 0.539931s 50% 0.539931s 90% 0.539931s 99% 0.539931s Write: 10% 0.461526s 30% 0.461526s 50% 0.461526s 90% 0.461526s 99% Write: 10% 0.530437s 30% 0.530437s 50% 0.530437s 90% Write: 10% 0.581199s 30% 0.581199s 50% 0.581199s 90% 0.461526s0.530437s 99% 0.530437s0.581199s 99% 0.581199sWrite: 10% Write: 10% 0.561176s 30% 0.561176s 50% 0.561176s 90% Write: 10% 0.589287s 30% 0.589287s 50% 0.589287s 90% 0.561176s 99% Write: 10% 0.561176s0.587855s 30% 0.587855s 50% 0.587855s 90% 0.589287s 99% 0.589287s0.587855s 99% 0.587855s0.389558s 30% 0.389558s 50% 0.389558s 90% 0.389558s 99% Write: 10% 0.389558s0.409480s 30% 0.409480s 50% 0.409480s 90% 0.409480s 99% Write: 10% 0.591207s 30% 0.591207s 50% 0.591207s 90% Write: 10% 0.409480s0.502033s 30% 0.502033s 50% 0.591207s 99% 0.591207s0.502033s 90% 0.502033s 99% Write: 10% 0.502033sWrite: 10% 0.391395s 30% 0.391395s 50% 0.391395s 90% 0.391395s 99% 0.391395s 0.421973s 30% 0.421973s 50% 0.421973s 90% 0.421973s 99% Write: 10% 0.421973s0.508129s 30% 0.508129s 50% 0.508129s 90% 0.508129s 99% Write: 10% 0.559471s 30% 0.559471s 50% 0.559471s 90% 0.559471s 99% 0.559471s 0.508129s Write: 10% 0.430377s 30% 0.430377s 50% 0.430377s 90% 0.430377s 99% 0.430377s Write: 10% 0.639325s 30% 0.639325s 50% 0.639325s 90% 0.639325s 99% 0.639325s Write: 10% 0.477519s 30% 0.477519s 50% 0.477519s 90% 0.477519s 99% 0.477519s Write: 10% 0.404771s 30% 0.404771s 50% 0.404771s 90% 0.404771s 99% 0.404771s Write: 10% 0.451310s 30% 0.451310s 50% 0.451310s 90% 0.451310s 99% Write: 10% 0.227786s 30% 0.227786s 50% 0.227786s 90% 0.227786s 99% 0.451310s 0.227786s Write: 10% 0.430541s 30% 0.430541s 50% 0.430541s 90% 0.430541s 99% 0.430541s Write: 10% 0.318822s 30% 0.318822s 50% 0.318822s 90% 0.318822s 99% 0.318822s Write: 10% 0.235899s 30% 0.235899s 50% 0.235899s 90% 0.235899s 99% 0.235899s Write: 10% 0.146317s 30% 0.146317s 50% 0.146317s 90% 0.146317s 99% 0.146317s Write: 10% 0.362235s 30% 0.362235s 50% 0.362235s 90% 0.362235s 99% 0.362235s Write: 10% 0.407229s 30% 0.407229s 50% 0.407229s 90% 0.407229s 99% 0.407229s Write: 10% 0.425917s 30% 0.425917s 50% 0.425917s 90% 0.425917s 99% 0.425917s Write: 10% 0.229044s 30% 0.229044s 50% 0.229044s 90% 0.229044s 99% 0.229044s Write: 10% 0.297124s 30% 0.297124s 50% 0.297124s 90% 0.297124s 99% 0.297124s Write: 10% Write: 10% 0.345469s 30% 0.345469s 50% 0.345469s 90% 0.345469s 99% 0.345469s Write: 10% 0.208554s 30% 0.208554s 50% 0.208554s 90% 0.208554s 99% 0.208554s 0.458754s 30% 0.458754s 50% 0.458754s 90% 0.458754s 99% 0.458754s Write: 10% 0.498633s 30% 0.498633s 50% 0.498633s 90% 0.498633s 99% 0.498633s Write: 10% 0.321282s 30% 0.321282s 50% 0.321282s 90% 0.321282s 99% 0.321282s Write: 10% 0.382872s 30% 0.382872s 50% 0.382872s 90% 0.382872s 99% 0.382872s Write: 10% 0.508323s 30% 0.508323s 50% 0.508323s 90% 0.508323s 99% 0.508323s Write: 10% 0.249497s 30% 0.249497s 50% 0.249497s 90% 0.249497s 99% 0.249497s Write: 10% 0.108444s 30% 0.108444s 50% 0.108444s 90% 0.108444s 99% 0.108444s Read: 10% 1.118231s 30% 1.118231s 50% 1.118231s 90% 1.118231s 99% 1.118231s Step 3. write modify Write: 10% 0.511144s 30% 0.511144s 50% 0.511144s 90% 0.511144s 99% 0.511144s Write: 10% 0.247022s 30% 0.247022s 50% 0.247022s 90% 0.247022s 99% 0.247022s Write: 10% 0.302310s 30% 0.302310s 50% 0.302310s 90% 0.302310s 99% 0.302310s Write: 10% 0.586074s 30% 0.586074s 50% 0.586074s 90% 0.586074s 99% 0.586074s Write: 10% 0.221671s 30% 0.221671s 50% 0.221671s 90% 0.221671s 99% 0.221671s Write: 10% 0.593740s 30% 0.593740s 50% 0.593740s 90% 0.593740s 99% 0.593740s Write: 10% 0.369834s 30% 0.369834s 50% 0.369834s 90% 0.369834s 99% 0.369834s Write: 10% 0.406191s 30% 0.406191s 50% 0.406191s 90% 0.406191s 99% 0.406191s Write: 10% 0.613189s 30% 0.613189s 50% 0.613189s 90% 0.613189s 99% 0.613189s Write: 10% 0.421336s 30% 0.421336s 50% 0.421336s 90% 0.421336s 99% 0.421336sWrite: 10% 0.657223s 30% 0.657223s 50% 0.657223s 90% 0.657223s 99% 0.657223s Write: 10% Write: 10% 0.430084s 30% 0.430084s 50% 0.430084s 90% Write: 10% 0.384745s 30% 0.384745s 50% 0.384745s 90% 0.384745s 99% 0.384745s Write: 10% 0.400093s 30% 0.400093s 50% 0.400093s 90% 0.400093s 99% 0.400093s0.430084s 99% 0.430084s0.343125s 30% 0.343125s 50% 0.343125s 90% 0.343125s 99% 0.343125s Write: 10% 0.442352s 30% 0.442352s 50% 0.442352s 90% 0.442352s 99% 0.442352s Write: 10% 0.436280s 30% 0.436280s 50% 0.436280s 90% 0.436280s 99% 0.436280s Write: 10% 0.427499s 30% 0.427499s 50% 0.427499s 90% 0.427499s 99% 0.427499s Write: 10% 0.428895s 30% 0.428895s 50% 0.428895s 90% 0.428895s 99% 0.428895s Write: 10% 0.466666s 30% 0.466666s 50% 0.466666s 90% Write: 10% 0.693787s 30% 0.693787s 50% 0.693787s 90% 0.466666s 99% 0.466666s0.693787s 99% 0.693787s Write: 10% 0.540802s 30% 0.540802s 50% 0.540802s 90% 0.540802s 99% 0.540802s Write: 10% 0.333372s 30% 0.333372s 50% 0.333372s 90% 0.333372s 99% 0.333372s Write: 10% Write: 10% 0.547710s 30% 0.547710s 50% 0.547710s 90% Write: 10% Write: 10% 0.711258s 30% 0.711258s 50% 0.711258s 90% 0.711258s 99% 0.711258s 0.513148s 30% 0.513148s 50% 0.513148s 90% 0.812481s 30% 0.812481s 50% 0.812481s 90% 0.812481s 99% 0.513148s 99% 0.513148s0.547710s 99% 0.547710s Write: 10% 0.350644s 30% 0.350644s 50% 0.350644s 90% 0.350644s 99% Write: 10% 0.515971s 30% 0.515971s 50% 0.515971s 90% 0.812481s 0.350644sWrite: 10% 0.515971s 99% 0.515971sWrite: 10% 0.349199s 30% 0.349199s 50% 0.349199s 90% 0.349199s 99% 0.387667s 30% 0.387667s 50% 0.387667s 90% 0.387667s 99% 0.387667sWrite: 10% Write: 10% 0.512771s 30% 0.512771s 50% 0.512771s 90% 0.512771s 99% 0.433835s 30% 0.433835s 50% 0.433835s 90% 0.433835s 99% 0.349199s 0.512771s 0.433835s Write: 10% 0.481262s 30% 0.481262s 50% 0.481262s 90% 0.481262s 99% 0.481262s Write: 10% 0.351805s 30% 0.351805s 50% 0.351805s 90% 0.351805s 99% 0.351805s Write: 10% Write: 10% 0.345654s 30% 0.345654s 50% 0.345654s 90% 0.345654s 99% 0.335965s 30% 0.335965s 50% 0.335965s 90% 0.335965s 99% 0.345654s 0.335965sWrite: 10% 0.183611s 30% 0.183611s 50% 0.183611s 90% 0.183611s 99% Write: 10% 0.205235s 30% 0.205235s 50% 0.205235s 90% 0.205235s 99% 0.205235s Write: 10% 0.761882s 30% 0.761882s 50% 0.761882s 90% 0.761882s 99% 0.761882s 0.183611sWrite: 10% Write: 10% Write: 10% 0.347275s 30% 0.347275s 50% 0.347275s 90% 0.347275s 99% 0.267724s 30% 0.267724s 50% 0.267724s 90% 0.267724s 99% 0.341857s 30% 0.347275s0.267724s0.341857s 50% 0.341857s 90% Write: 10% 0.341857s 99% 0.450930s 30% 0.450930s 50% 0.341857s0.450930s 90% 0.450930s 99% 0.450930s Write: 10% 0.262176s 30% 0.262176s 50% 0.262176s 90% 0.262176s 99% 0.262176s Write: 10% 0.333726s 30% 0.333726s 50% 0.333726s 90% 0.333726s 99% 0.333726s Write: 10% 0.340772s 30% 0.340772s 50% 0.340772s 90% 0.340772s 99% 0.340772s Write: 10% 0.506144s 30% 0.506144s 50% 0.506144s 90% 0.506144s 99% 0.506144s Write: 10% 0.413436s 30% 0.413436s 50% 0.413436s 90% 0.413436s 99% 0.413436s Write: 10% 0.377055s 30% 0.377055s 50% 0.377055s 90% 0.377055s 99% 0.377055s Write: 10% 0.296382s 30% 0.296382s 50% 0.296382s 90% 0.296382s 99% 0.296382s Write: 10% 0.389278s 30% 0.389278s 50% 0.389278s 90% 0.389278s 99% 0.389278s Write: 10% 0.386290s 30% 0.386290s 50% 0.386290s 90% 0.386290s 99% 0.386290s Write: 10% 0.375395s 30% 0.375395s 50% 0.375395s 90% 0.375395s 99% 0.375395s Write: 10% 0.169507s 30% 0.169507s 50% 0.169507s 90% 0.169507s 99% 0.169507s Write: 10% 0.331288s 30% 0.331288s 50% 0.331288s 90% 0.331288s 99% 0.331288s Write: 10% 0.179860s 30% 0.179860s 50% 0.179860s 90% 0.179860s 99% 0.179860s Write: 10% 0.377558s 30% 0.377558s 50% 0.377558s 90% 0.377558s 99% 0.377558s Write: 10% 0.348098s 30% 0.348098s 50% 0.348098s 90% 0.348098s 99% 0.348098s Write: 10% 0.369803s 30% 0.369803s 50% 0.369803s 90% 0.369803s 99% 0.369803s Write: 10% 0.326488s 30% 0.326488s 50% 0.326488s 90% 0.326488s 99% 0.326488s Write: 10% 0.355876s 30% 0.355876s 50% 0.355876s 90% 0.355876s 99% 0.355876s Write: 10% 0.395673s 30% 0.395673s 50% 0.395673s 90% 0.395673s 99% 0.395673s Write: 10% 0.258028s 30% 0.258028s 50% 0.258028s 90% 0.258028s 99% 0.258028s Update: 10% 0.282972s 30% 0.282972s 50% 0.282972s 90% 0.282972s 99% 0.282972s Step 4. read modify write Write: 10% 0.334470sWrite: 10% 0.191391s 30% 0.191391s 50% 0.191391s 90% 0.191391s 99% 0.191391s 30% 0.334470s 50% 0.334470s 90% 0.334470s 99% 0.334470s Write: 10% 0.162901s 30% 0.162901s 50% 0.162901s 90% 0.162901s 99% 0.162901s Write: 10% 0.464600s 30% 0.464600s 50% 0.464600s 90% 0.464600s 99% 0.464600s Write: 10% 0.419120s 30% 0.419120s 50% 0.419120s 90% 0.419120s 99% 0.419120s Write: 10% 0.328053s 30% 0.328053s 50% 0.328053s 90% 0.328053s 99% 0.328053s Write: 10% 0.501666s 30% 0.501666s 50% 0.501666s 90% 0.501666s 99% 0.501666s Write: 10% 0.426472s 30% 0.426472s 50% 0.426472s 90% 0.426472s 99% 0.426472s Write: 10% 0.230547s 30% 0.230547s 50% 0.230547s 90% 0.230547s 99% 0.230547s Write: 10% 0.484610s 30% 0.484610s 50% 0.484610s 90% 0.484610s 99% 0.484610s Write: 10% 0.459473s 30% 0.459473s 50% 0.459473s 90% Write: 10% 0.416641s 30% 0.416641s 50% 0.416641s 90% 0.416641s 99% 0.416641s Write: 10% 0.461421s 30% 0.461421s 50% 0.461421s 90% 0.461421s 99% 0.461421s 0.459473s 99% 0.459473s Write: 10% 0.190041s 30% 0.190041s 50% 0.190041s 90% 0.190041s 99% 0.190041s Write: 10% 0.214632s 30% 0.214632s 50% 0.214632s 90% 0.214632s 99% 0.214632s Write: 10% 0.142693s 30% 0.142693s 50% 0.142693s 90% 0.142693s 99% 0.142693s Write: 10% 0.134355s 30% 0.134355s 50% 0.134355s 90% 0.134355s 99% 0.134355s Write: 10% 0.286744s 30% 0.286744s 50% 0.286744s 90% 0.286744s 99% 0.286744s Write: 10% 0.239854s 30% 0.239854s 50% 0.239854s 90% 0.239854s 99% 0.239854s Write: 10% 0.322648s 30% 0.322648s 50% 0.322648s 90% 0.322648s 99% 0.322648s Write: 10% 0.366475s 30% 0.366475s 50% 0.366475s 90% 0.366475s 99% 0.366475s Write: 10% 0.465879s 30% 0.465879s 50% 0.465879s 90% 0.465879s 99% 0.465879s Write: 10% 0.557347s 30% 0.557347s 50% 0.557347s 90% 0.557347s 99% 0.557347s Write: 10% 0.578015s 30% 0.578015s 50% 0.578015s 90% 0.578015s 99% 0.578015s Write: 10% 0.639051s 30% 0.639051s 50% 0.639051s 90% 0.639051s 99% 0.639051s Write: 10% 0.686444s 30% 0.686444s 50% 0.686444s 90% 0.686444s 99% 0.686444s Write: 10% 0.634450s 30% 0.634450s 50% 0.634450s 90% 0.634450s 99% 0.634450s Write: 10% 0.624445s 30% 0.624445s 50% 0.624445s 90% 0.624445s 99% 0.624445s Write: 10% 0.671663s 30% 0.671663s 50% 0.671663s 90% 0.671663s 99% 0.671663s Write: 10% 0.653417s 30% 0.653417s 50% 0.653417s 90% 0.653417s 99% 0.653417s Write: 10% 0.637809s 30% 0.637809s 50% 0.637809s 90% 0.637809s 99% 0.637809s Write: 10% 0.615036s 30% 0.615036s 50% 0.615036s 90% 0.615036s 99% 0.615036s Write: 10% 0.662486s 30% 0.662486s 50% 0.662486s 90% 0.662486s 99% 0.662486s Write: 10% 0.662958s 30% 0.662958s 50% 0.662958s 90% 0.662958s 99% 0.662958s Write: 10% 0.657092s 30% 0.657092s 50% 0.657092s 90% 0.657092s 99% 0.657092s Write: 10% 0.668767s 30% 0.668767s 50% 0.668767s 90% 0.668767s 99% 0.668767s Write: 10% 0.664723s 30% 0.664723s 50% 0.664723s 90% 0.664723s 99% 0.664723s Write: 10% 0.674116s 30% 0.674116s 50% 0.674116s 90% 0.674116s 99% 0.674116s Write: 10% 0.710773s 30% 0.710773s 50% 0.710773s 90% 0.710773s 99% 0.710773s Write: 10% 0.676434s 30% 0.676434s 50% 0.676434s 90% 0.676434s 99% 0.676434s Write: 10% 0.791412s 30% 0.791412s 50% 0.791412s 90% 0.791412s 99% 0.791412s Write: 10% 0.747267s 30% 0.747267s 50% 0.747267s 90% 0.747267s 99% 0.747267s Write: 10% 0.720734s 30% 0.720734s 50% 0.720734s 90% 0.720734s 99% 0.720734s Write: 10% 0.681595s 30% 0.681595s 50% 0.681595s 90% 0.681595s 99% 0.681595s Write: 10% 0.717846s 30% 0.717846s 50% 0.717846s 90% 0.717846s 99% 0.717846s Write: 10% 0.735611s 30% 0.735611s 50% 0.735611s 90% 0.735611s 99% 0.735611s Write: 10% 0.768671s 30% 0.768671s 50% 0.768671s 90% 0.768671s 99% 0.768671s Write: 10% 0.811560s 30% 0.811560s 50% 0.811560s 90% 0.811560s 99% 0.811560s Write: 10% 0.797439s 30% 0.797439s 50% 0.797439s 90% 0.797439s 99% 0.797439s Write: 10% 0.856547s 30% 0.856547s 50% 0.856547s 90% 0.856547s 99% 0.856547s Write: 10% Write: 10% 0.876195s 30% 0.876195s 50% 0.876195s 90% 0.876195s 99% 0.877380s 30% 0.877380s 50% 0.876195s0.877380s 90% 0.877380s 99% 0.877380s Write: 10% 0.887926s 30% 0.887926s 50% 0.887926s 90% 0.887926s 99% 0.887926s Write: 10% 0.945188s 30% 0.945188s 50% 0.945188s 90% 0.945188s 99% 0.945188s Write: 10% 0.839574s 30% 0.839574s 50% 0.839574s 90% 0.839574s 99% 0.839574s Write: 10% 0.936293s 30% 0.936293s 50% 0.936293s 90% 0.936293s 99% 0.936293s Write: 10% 0.906866s 30% 0.906866s 50% 0.906866s 90% 0.906866s 99% 0.906866s Write: 10% 0.915924s 30% 0.915924s 50% 0.915924s 90% 0.915924s 99% 0.915924s Write: 10% 0.931293s 30% 0.931293s 50% 0.931293s 90% 0.931293s 99% 0.931293s Write: 10% 0.891155s 30% 0.891155s 50% 0.891155s 90% 0.891155s 99% 0.891155s Write: 10% 0.911772s 30% 0.911772s 50% 0.911772s 90% 0.911772s 99% 0.911772s Write: 10% Write: 10% 0.875187s 30% 0.894050s 30% 0.894050s 50% 0.894050s 90% 0.894050s 99% 0.894050s 0.875187s 50% 0.875187s 90% 0.875187s 99% 0.875187s Write: 10% 0.906261s 30% 0.906261s 50% 0.906261s 90% 0.906261s 99% 0.906261s Update: 10% 0.754286s 30% 0.754286s 50% 0.797345s 90% 0.797345s 99% 0.797345s Read: 10% 1.912865s 30% 1.912865s 50% 1.912865s 90% 1.912865s 99% 1.912865s >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteConnection |98.7%| [TM] {RESULT} ydb/tests/olap/high_load/unittest >> TabletService_Restart::Basics [GOOD] >> TabletService_Restart::OnlyAdminsAllowed >> QuoterWithKesusTest::ForbidsNotCanonizedQuoterPath [GOOD] >> QuoterWithKesusTest::ForbidsNotCanonizedResourcePath >> test_postgres.py::TestPostgresSuite::test_postgres_suite[float8] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[roles] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[roles] [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnection >> test_postgres.py::TestPostgresSuite::test_postgres_suite[char] >> KqpService::RangeCache+UseCache [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[char] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[float4] >> TestFilterSet::CompilationValidation [GOOD] >> TestFormatHandler::ManyJsonClients >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnectionWithServiceAccount >> Etcd_KV::UpdateWithGetPrevious [GOOD] >> Etcd_KV::DeleteWithGetPrevious >> TGRpcRateLimiterTest::ListResources [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApi >> test_postgres.py::TestPostgresSuite::test_postgres_suite[float4] [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateBinding >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-false [GOOD] >> TTxDataShardLocalKMeansScan::TooManyClusters [GOOD] >> TTxDataShardLocalKMeansScan::MainToPosting >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateBinding [GOOD] >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-true >> test_postgres.py::TestPostgresSuite::test_postgres_suite[numeric] >> TopicSessionTests::BadDataSessionError [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListBindings >> TabletService_Restart::OnlyAdminsAllowed [GOOD] >> TestFormatHandler::ManyJsonClients [GOOD] >> DataShardStats::HistogramStatsCorrect [GOOD] >> QuoterWithKesusTest::ForbidsNotCanonizedResourcePath [GOOD] >> KqpPg::PgCreateTable [GOOD] >> TopicSessionTests::WrongFieldType >> KqpPg::CreateTempTableSerial [GOOD] >> TSentinelTests::PDiskRackGuardFullRack [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListBindings [GOOD] >> TSentinelTests::PDiskFaultyGuard >> KqpPg::PgUpdate+useSink >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeBinding >> TestFormatHandler::ManyRawClients >> DataShardStats::BlobsStatsCorrect >> QuoterWithKesusTest::HandlesNonExistentResource >> KqpPg::DropSequence >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyBinding >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteBinding >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateQuery >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListQueries ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 1060, MsgBus: 9716 2025-06-24T21:30:45.922003Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631514905931215:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:45.926795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ed0/r3tmp/tmpadKUDT/pdisk_1.dat 2025-06-24T21:30:46.393341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:46.393436Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:46.411171Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:46.416802Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631514905931006:2079] 1750800645910844 != 1750800645910847 2025-06-24T21:30:46.422302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1060, node 1 2025-06-24T21:30:46.686481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:30:46.686517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:30:46.686524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:30:46.686671Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:30:46.921701Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9716 TClient is connected to server localhost:9716 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:30:47.544487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:30:47.564747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:30:49.078296Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631532085800843:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.078302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631532085800834:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.078490Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.082490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:30:49.095844Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631532085800848:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:30:49.180090Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631532085800899:2335] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:30:49.364162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) Trying to start YDB, gRPC: 2148, MsgBus: 20579 2025-06-24T21:30:50.791864Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631537822849996:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:50.814066Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ed0/r3tmp/tmpPKcQRo/pdisk_1.dat 2025-06-24T21:30:50.939238Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631537822849948:2079] 1750800650780329 != 1750800650780332 2025-06-24T21:30:50.958115Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:50.973373Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:50.973480Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:50.976274Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2148, node 2 2025-06-24T21:30:51.050925Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:30:51.050950Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:30:51.050959Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:30:51.051095Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20579 TClient is connected to server localhost:20579 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:30:51.483082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:30:51.840853Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:30:53.946228Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631550707752460:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:53.946289Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631550707752473:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:53.946356Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:53.949793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:30:53.964589Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631550707752483:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:30:54.052314Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631555002719831:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.m ... hard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:31:44.025333Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519631745506733945:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:44.025450Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:31:44.575745Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519631766981571046:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:44.575872Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:44.576753Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519631766981571058:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:44.584547Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:31:44.608479Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519631766981571060:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:31:44.692521Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519631766981571112:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:31:44.756604Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:44.919479Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:45.048524Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519631766981571350:2322], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2025-06-24T21:31:45.051267Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=NjlkZDkwMTktZjZmOTc0ZWQtYWE1MGM2MzgtNjY3MDhmZTg=, ActorId: [10:7519631766981571348:2321], ActorState: ExecuteState, TraceId: 01jyhxn9f1apbza0jqbht7qtp7, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: Trying to start YDB, gRPC: 9294, MsgBus: 7551 2025-06-24T21:31:46.660572Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7519631777328406618:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:46.660703Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ed0/r3tmp/tmpUWHrYd/pdisk_1.dat 2025-06-24T21:31:46.848002Z node 11 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:46.848591Z node 11 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [11:7519631777328406598:2079] 1750800706659549 != 1750800706659552 2025-06-24T21:31:46.850710Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:46.850844Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:46.872617Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9294, node 11 2025-06-24T21:31:46.933623Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:46.933647Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:46.933660Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:46.933840Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7551 2025-06-24T21:31:47.675713Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7551 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:47.890468Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:31:51.660777Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519631777328406618:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:51.660900Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:31:52.179903Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519631803098210994:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:52.179975Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519631803098211014:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:52.180065Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:52.222978Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:31:52.238776Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519631803098211031:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:31:52.332361Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519631803098211082:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:31:52.375750Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:52.494177Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:52.599304Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [11:7519631803098211316:2322], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2025-06-24T21:31:52.603091Z node 11 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=11&id=NzlmMzcxOWQtYjY1MGExNWItOWIyNzdjY2EtYzdhZjkwYTI=, ActorId: [11:7519631803098211314:2321], ActorState: ExecuteState, TraceId: 01jyhxngvn5ktvc6pky1n5pxtw, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: |98.7%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/join/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpService::RangeCache+UseCache [GOOD] Test command err: Trying to start YDB, gRPC: 16566, MsgBus: 18849 2025-06-24T21:23:34.510381Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629662843136098:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:34.510577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00311a/r3tmp/tmpB3VbzZ/pdisk_1.dat 2025-06-24T21:23:35.291833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:35.295285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:35.319357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:35.329663Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:35.335288Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519629662843135907:2079] 1750800214480593 != 1750800214480596 2025-06-24T21:23:35.507720Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 16566, node 1 2025-06-24T21:23:35.769935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:35.769955Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:35.769970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:35.770102Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18849 TClient is connected to server localhost:18849 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:37.046681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:37.131927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:23:37.147590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:37.358523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:23:37.565565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:37.656061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:23:38.920159Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629680023006748:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:38.920318Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:39.511303Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519629662843136098:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:39.526171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:23:40.081428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.156821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.189406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.233539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.274024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.352674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.427515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:23:40.562145Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629688612942014:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:40.562209Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:40.562335Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519629688612942019:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:23:40.568276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:23:40.581623Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519629688612942021:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:23:40.648337Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519629688612942074:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:23:42.155297Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=1&id=M2M0ZjhiZTctOGM1MzUzMWUtZmVkZTVkZTQtMjY4MjgwNTI=, ActorId: [1:7519629680023006730:2366], ActorState: ReadyState, Session closed due to explicit close event 2025-06-24T21:23:42.155360Z node 1 :KQP_SESSION INFO: kq ... 32094Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519631690673865761:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:26.832190Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00311a/r3tmp/tmpsbRKf7/pdisk_1.dat 2025-06-24T21:31:27.070240Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:27.072565Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519631690673865745:2079] 1750800686831294 != 1750800686831297 2025-06-24T21:31:27.090717Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:27.090856Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:27.095034Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5784, node 8 2025-06-24T21:31:27.208110Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:27.208153Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:27.208169Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:27.208385Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16268 2025-06-24T21:31:27.856107Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16268 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:28.150000Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:31:28.157088Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:31:28.167740Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:28.272255Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:28.569833Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:28.711378Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:31.832388Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519631690673865761:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:31.832488Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:31:33.659188Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519631720738638469:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:33.659349Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:33.741277Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:33.789415Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:33.845278Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:33.926956Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:34.104055Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:34.163039Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:34.259052Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:34.402906Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519631725033606446:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:34.403026Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:34.403674Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519631725033606451:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:34.411700Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:31:34.440197Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7519631725033606453:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:31:34.507355Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7519631725033606504:3432] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:31:42.062890Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:31:42.062932Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded took: 14.951098s took: 14.937297s took: 14.961026s took: 14.961421s took: 14.942960s took: 14.948161s took: 14.969910s took: 14.971186s took: 14.957139s took: 14.979079s >> TIndexProcesorTests::TestOver1000Queues [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/tablet/ut/unittest >> TabletService_Restart::OnlyAdminsAllowed [GOOD] Test command err: 2025-06-24T21:31:10.088853Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:10.089361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:10.089576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018b7/r3tmp/tmpF5Newu/pdisk_1.dat 2025-06-24T21:31:10.891964Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:31:10.921730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:11.002822Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:11.031712Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800666733582 != 1750800666733586 2025-06-24T21:31:11.091348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:11.091559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:11.106756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected ... reading schema ... changing schema (dry run) ... reading schema ... changing schema ... reading schema 2025-06-24T21:31:16.107407Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:16.107720Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:16.107836Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018b7/r3tmp/tmprQSIu2/pdisk_1.dat 2025-06-24T21:31:16.399450Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:31:16.401144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:16.438607Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:16.441839Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750800672912587 != 1750800672912591 2025-06-24T21:31:16.494054Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:16.494198Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:16.506960Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected ... reading schema (without token) ... reading schema (non-admin token) ... reading schema (admin token) 2025-06-24T21:31:20.800486Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:258:2304], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:20.800865Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:31:20.800963Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018b7/r3tmp/tmpCip9bZ/pdisk_1.dat 2025-06-24T21:31:21.089375Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 2025-06-24T21:31:21.091232Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:21.121743Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:21.123900Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:32:2079] 1750800677395547 != 1750800677395551 2025-06-24T21:31:21.171369Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:21.171538Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:21.183629Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:25.082529Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:273:2316], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:25.082939Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:25.083065Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018b7/r3tmp/tmpEghvDr/pdisk_1.dat 2025-06-24T21:31:25.367366Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-24T21:31:25.369418Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:25.417941Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:25.419970Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:32:2079] 1750800681894414 != 1750800681894417 2025-06-24T21:31:25.469555Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:25.469711Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:25.483665Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:29.905293Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:29.905808Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:29.905987Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018b7/r3tmp/tmpHAQP23/pdisk_1.dat 2025-06-24T21:31:30.221748Z node 5 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 5 Type# 268639257 2025-06-24T21:31:30.223650Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:30.257544Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:30.260024Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:32:2079] 1750800686225790 != 1750800686225794 2025-06-24T21:31:30.308026Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:30.308192Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:30.320064Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:34.835381Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:258:2304], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:34.835915Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:31:34.836018Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018b7/r3tmp/tmp3Eo8tq/pdisk_1.dat 2025-06-24T21:31:35.208258Z node 6 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 6 Type# 268639257 2025-06-24T21:31:35.209691Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:35.256487Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:35.259065Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:32:2079] 1750800690963504 != 1750800690963508 2025-06-24T21:31:35.310562Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:35.310736Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:35.324385Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:40.240182Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:271:2314], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:40.240744Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:40.240825Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018b7/r3tmp/tmps8lJRp/pdisk_1.dat 2025-06-24T21:31:40.684044Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 2025-06-24T21:31:40.686156Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:40.747805Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:40.750602Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:32:2079] 1750800696181546 != 1750800696181549 2025-06-24T21:31:40.802223Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:40.802410Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:40.814452Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:45.431666Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:288:2329], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:45.432200Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:45.432396Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018b7/r3tmp/tmpikewSv/pdisk_1.dat 2025-06-24T21:31:45.736134Z node 8 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 8 Type# 268639257 2025-06-24T21:31:45.739007Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:45.776394Z node 8 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:45.779386Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:32:2079] 1750800701597307 != 1750800701597311 2025-06-24T21:31:45.827482Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:45.827659Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:45.839521Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:50.437431Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:258:2304], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:50.437789Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:31:50.437930Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018b7/r3tmp/tmpM8TZW7/pdisk_1.dat 2025-06-24T21:31:50.828041Z node 9 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 9 Type# 268639257 2025-06-24T21:31:50.830181Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:50.874493Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:50.877361Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:32:2079] 1750800706635589 != 1750800706635593 2025-06-24T21:31:50.929601Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:50.929787Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:50.944613Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected ... restarting tablet 72057594046644480 2025-06-24T21:31:51.093111Z node 9 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:51.239831Z node 9 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:31:56.156002Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:267:2310], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:56.156504Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:56.156590Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018b7/r3tmp/tmpHzyK4u/pdisk_1.dat 2025-06-24T21:31:56.483888Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 2025-06-24T21:31:56.486020Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:56.524743Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:56.527435Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:32:2079] 1750800712081684 != 1750800712081687 2025-06-24T21:31:56.576546Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:56.576721Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:56.588585Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected ... restarting tablet 72057594046644480 (without token) ... restarting tablet 72057594046644480 (non-admin token) 2025-06-24T21:31:56.808489Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... restarting tablet 72057594046644480 (admin token) 2025-06-24T21:31:56.979199Z node 10 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded |98.8%| [TA] $(B)/ydb/core/statistics/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeQuery >> test_tpch_import.py::TestS3TpchImport::test_import_and_export |98.8%| [TM] {RESULT} ydb/core/grpc_services/tablet/ut/unittest |98.8%| [TA] {RESULT} $(B)/ydb/core/statistics/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSentinelTests::PDiskFaultyGuard [GOOD] >> TSentinelTests::PDiskFaultyGuardWithForced >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetQueryStatus >> TTxDataShardLocalKMeansScan::MainToPosting [GOOD] >> TTxDataShardLocalKMeansScan::MainToBuild >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyQuery >> TSentinelTests::PDiskFaultyGuardWithForced [GOOD] >> TSentinelTests::BSControllerUnresponsive >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApi >> TestFormatHandler::ManyRawClients [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/ymq/actor/yc_search_ut/unittest >> TIndexProcesorTests::TestOver1000Queues [GOOD] Test command err: 2025-06-24T21:31:09.802651Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631615894963771:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:09.803200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001678/r3tmp/tmpMAM4LK/pdisk_1.dat 2025-06-24T21:31:10.345641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:10.345777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:10.350867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:10.351509Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:10.353367Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631615894963744:2079] 1750800669756188 != 1750800669756191 TServer::EnableGrpc on GrpcPort 65229, node 1 2025-06-24T21:31:10.495852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:10.495879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:10.495887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:10.496019Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:31:10.800961Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14605 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:10.996325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:31:11.061285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 TClient is connected to server localhost:14605 waiting... 2025-06-24T21:31:13.242440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:13.244254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) TClient is connected to server localhost:14605 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750800671081 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SQS" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750800671102 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 184467... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-24T21:31:13.721035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) Error 1: Check failed: path: '/Root/SQS', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges) 2025-06-24T21:31:13.731496Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631633074833805:2465] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/SQS\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } TClient is connected to server localhost:14605 waiting... 2025-06-24T21:31:13.992055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710664, at schemeshard: 72057594046644480 2025-06-24T21:31:14.036851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:14.040013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) ===Execute query: UPSERT INTO `/Root/SQS/SingleCreateQueueEvent/.Events` (Account, QueueName, EventType, CustomQueueName, EventTimestamp, FolderId, Labels) VALUES ("cloud1", "queue1", 1, "myQueueCustomName", 1750800673755, "myFolder", "{\"k1\": \"v1\"}"); 2025-06-24T21:31:14.249040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631637369801271:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:14.249195Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:14.249319Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631637369801279:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:14.262861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710667:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:31:14.280378Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631637369801285:2327], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710667 completed, doublechecking } 2025-06-24T21:31:14.338039Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631637369801336:2617] txid# 281474976710668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:31:14.767326Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631615894963771:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:14.767424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:31:15.516307Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710669. Ctx: { TraceId: 01jyhxmbdxctrhnc3n8w8y3f91, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:16.172268Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710670. Ctx: { TraceId: 01jyhxmcsx22471mfmr2g93x9e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjQyMzdjMWMtNjI3ZTk0MWYtZGEzMTQ5OS01Yjk5ODY4Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:16.393780Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710671. Ctx: { TraceId: 01jyhxmdbs11ee3zx1s4kssk4x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2NlM2RkZjMtNGUxYzlhMGUtNTc5YTk4ODItYTdlMmYwYjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:16.560185Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710672. Ctx: { TraceId: 01jyhxmdhj8bmykx2afyqh8pe0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGE2YWJlZmQtODAwODlhMTYtZGE2ZjdjNGUtOTgwNTFiZ ... p:120: TxId: 281474976710776. Ctx: { TraceId: 01jyhxn1nz38sg2dzgf5eqst10, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:38.530797Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710777. Ctx: { TraceId: 01jyhxn34p807xs6k54xv70q63, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:38.661332Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710778. Ctx: { TraceId: 01jyhxn34p807xs6k54xv70q63, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:39.979909Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710779. Ctx: { TraceId: 01jyhxn4hy5gfnhcj7vwc6mq89, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:40.092940Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710780. Ctx: { TraceId: 01jyhxn4hy5gfnhcj7vwc6mq89, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:41.492328Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710781. Ctx: { TraceId: 01jyhxn61720d1b369tqfq0nxq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:41.584740Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710782. Ctx: { TraceId: 01jyhxn61720d1b369tqfq0nxq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:43.044119Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710783. Ctx: { TraceId: 01jyhxn7hj8nx1qz7rnrdzgcky, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:43.188778Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710784. Ctx: { TraceId: 01jyhxn7hj8nx1qz7rnrdzgcky, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:44.482647Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710785. Ctx: { TraceId: 01jyhxn8yr56yhwkkgz4eaa6j6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:44.567663Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710786. Ctx: { TraceId: 01jyhxn8yr56yhwkkgz4eaa6j6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:46.016203Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710787. Ctx: { TraceId: 01jyhxnaej30g5xaye00rnk3tv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:46.121660Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710788. Ctx: { TraceId: 01jyhxnaej30g5xaye00rnk3tv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:47.224768Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710789. Ctx: { TraceId: 01jyhxnbmc4w0fsd967hcmjc83, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:47.325456Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710790. Ctx: { TraceId: 01jyhxnbmc4w0fsd967hcmjc83, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:48.606559Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710791. Ctx: { TraceId: 01jyhxnczgf6syf9tk84ssnkys, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:48.737038Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710792. Ctx: { TraceId: 01jyhxnczgf6syf9tk84ssnkys, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:50.081328Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710793. Ctx: { TraceId: 01jyhxned3f236hfgg0nn5phkb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:50.219413Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710794. Ctx: { TraceId: 01jyhxned3f236hfgg0nn5phkb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:51.521703Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710795. Ctx: { TraceId: 01jyhxnftmbbkfmrwxhykahk07, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:51.622683Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710796. Ctx: { TraceId: 01jyhxnftmbbkfmrwxhykahk07, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:52.900890Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710797. Ctx: { TraceId: 01jyhxnh5nbf2rnw04scrzf4fz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:53.012096Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710798. Ctx: { TraceId: 01jyhxnh5nbf2rnw04scrzf4fz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:54.149110Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710799. Ctx: { TraceId: 01jyhxnjctdskymf39j77wc6ya, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:54.257291Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710800. Ctx: { TraceId: 01jyhxnjctdskymf39j77wc6ya, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:55.416652Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710801. Ctx: { TraceId: 01jyhxnkm8bwzm303zy0r62r8y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:55.522188Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710802. Ctx: { TraceId: 01jyhxnkm8bwzm303zy0r62r8y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:56.278111Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710803. Ctx: { TraceId: 01jyhxnmfaep9g9zbz4g3m2hfj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:56.880495Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710804. Ctx: { TraceId: 01jyhxnmjta27pmqpfykwtc4fc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YThjODk3NDAtZjI0ZGExYzQtZWEwMzNmOTUtOTU4YjUwYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:58.165325Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710805. Ctx: { TraceId: 01jyhxnp2959ne0zknjpknxbnk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTYyZWM2MWMtNDlhMjRmNzUtYzQwODMzZDktMTQyMzM0Y2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:58.249540Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710806. Ctx: { TraceId: 01jyhxnpd7by1trfzwc2w13d6e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTYyZWM2MWMtNDlhMjRmNzUtYzQwODMzZDktMTQyMzM0Y2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:58.317899Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710807. Ctx: { TraceId: 01jyhxnpfcd7cdyhmnvx5sev71, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTYyZWM2MWMtNDlhMjRmNzUtYzQwODMzZDktMTQyMzM0Y2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:58.327252Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710808. Ctx: { TraceId: 01jyhxnpfn0exjmjgf025e28qs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTYyZWM2MWMtNDlhMjRmNzUtYzQwODMzZDktMTQyMzM0Y2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:31:58.509047Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710809. Ctx: { TraceId: 01jyhxnpfy5ehh77pw44s35dc0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmQ0MTdjY2EtMzhiNDI3MDMtNDcyOGY2MzAtYTA4MjM2ZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |98.8%| [TM] {RESULT} ydb/core/ymq/actor/yc_search_ut/unittest >> test_http_api.py::TestHttpApi::test_restart_idempotency [GOOD] >> test_http_api.py::TestHttpApi::test_simple_streaming_query >> TestFormatHandler::ClientValidation >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteQuery >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendControlQuery >> QuoterWithKesusTest::HandlesNonExistentResource [GOOD] >> QuoterWithKesusTest::HandlesAllRequestsForNonExistentResource >> KqpPg::PgUpdate+useSink [GOOD] >> KqpPg::PgUpdate-useSink >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetResultData >> test_http_api.py::TestHttpApi::test_simple_streaming_query [GOOD] >> test_http_api.py::TestHttpApi::test_integral_results >> HttpRouter::Basic [GOOD] |98.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/public_http/ut/unittest >> HttpRouter::Basic [GOOD] >> KqpBatchDelete::ManyPartitions_3 [GOOD] >> test.py::test[solomon-BadDownsamplingAggregation-] |98.8%| [TS] {RESULT} ydb/core/public_http/ut/unittest >> ServerRestartTest::RestartOnGetSession >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListJobs >> TopicSessionTests::WrongFieldType [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeJob >> TopicSessionTests::RestartSessionIfNewClientWithOffset >> KqpPg::DropSequence [GOOD] >> KqpPg::DeleteWithQueryService+useSink >> test_postgres.py::TestPostgresSuite::test_postgres_suite[numeric] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[name] >> test_http_api.py::TestHttpApi::test_integral_results [GOOD] >> test_http_api.py::TestHttpApi::test_optional_results ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ManyPartitions_3 [GOOD] Test command err: Trying to start YDB, gRPC: 15570, MsgBus: 15834 2025-06-24T21:29:10.412969Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631104533363897:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.428489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bb0/r3tmp/tmpCdZheV/pdisk_1.dat 2025-06-24T21:29:10.884287Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631104533363757:2079] 1750800550361444 != 1750800550361447 2025-06-24T21:29:10.966501Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.981367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.981481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.989478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15570, node 1 2025-06-24T21:29:11.139331Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.139358Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.139583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.139716Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.415178Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15834 TClient is connected to server localhost:15834 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.059228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.104156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.113975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.305100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.541397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.658643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.301056Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631121713234581:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.301183Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.800302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.874138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.957066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.993410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.037931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.118355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.159490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.228558Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631126008202548:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.228649Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.228914Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631126008202553:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.233643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.250107Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631126008202555:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.348278Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631126008202608:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.401326Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631104533363897:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.401381Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.626351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... s.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519631783261757260:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:48.182320Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bb0/r3tmp/tmpFuCCsU/pdisk_1.dat 2025-06-24T21:31:48.426704Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:48.443314Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519631783261757239:2079] 1750800708180850 != 1750800708180853 2025-06-24T21:31:48.447964Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:48.448113Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:48.451419Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8731, node 12 2025-06-24T21:31:48.519923Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:48.519954Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:48.519972Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:48.520179Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6689 2025-06-24T21:31:49.203427Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6689 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:49.681479Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:31:49.692529Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:31:49.718229Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:49.938632Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:50.263407Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:50.452668Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:31:53.183283Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519631783261757260:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:53.183392Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:31:54.445482Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631809031562650:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:54.445611Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:54.617774Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:54.673863Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:54.763982Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:54.832548Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:54.895485Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:54.963834Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:55.043069Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:55.233600Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631813326530626:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:55.233726Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:55.233772Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631813326530631:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:55.240398Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:31:55.256357Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519631813326530633:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:31:55.348289Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519631813326530684:3432] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:31:57.162788Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... >> test_liveness_wardens.py::TestLivenessWarden::test_hive_liveness_warden_reports_issues >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnection >> test_postgres.py::TestPostgresSuite::test_postgres_suite[name] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[int2] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] Test command err: 2025-06-24T21:29:58.106791Z :WriteRAW INFO: Random seed for debugging is 1750800598106756 2025-06-24T21:29:58.454324Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631310462498723:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:58.456103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:58.512367Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631313150500152:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:58.512451Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049c4/r3tmp/tmpkMpPXz/pdisk_1.dat 2025-06-24T21:29:58.715461Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:58.725668Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:58.962689Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:58.962787Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:58.965113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:58.965220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:58.971578Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:58.986187Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:58.986342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:58.988187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:59.016901Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 5114, node 1 2025-06-24T21:29:59.085897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0049c4/r3tmp/yandexVBgCCn.tmp 2025-06-24T21:29:59.085936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0049c4/r3tmp/yandexVBgCCn.tmp 2025-06-24T21:29:59.086093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0049c4/r3tmp/yandexVBgCCn.tmp 2025-06-24T21:29:59.086290Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:59.148900Z INFO: TTestServer started on Port 14240 GrpcPort 5114 TClient is connected to server localhost:14240 PQClient connected to localhost:5114 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:29:59.491885Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:59.517039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:29:59.528899Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... waiting... waiting... 2025-06-24T21:30:01.847051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631323347401564:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:01.847220Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:01.847527Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631323347401588:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:01.851627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:30:01.888931Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631323347401590:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-24T21:30:02.153235Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631323347401682:2682] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:30:02.180862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:02.185610Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631327642368998:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:30:02.185938Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OWY4MmEwZjUtODgxMTVmNzItN2JmOTIyOTgtYzBhNzJmZTI=, ActorId: [1:7519631323347401558:2296], ActorState: ExecuteState, TraceId: 01jyhxj4qa5er3mnjxtx1rdcab, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:30:02.184598Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519631326035402325:2274], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:30:02.186684Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MzUyOWM2OWItOTFlNzk4NTYtOTExMTE0MmYtOTA2Yjg1MGE=, ActorId: [2:7519631326035402293:2268], ActorState: ExecuteState, TraceId: 01jyhxj4sm63pz5ybvhfmtj4ny, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:30:02.188350Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:30:02.188331Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:30:02.353821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:02.507337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:5114", true, true, 1000); 2025-06-24T21:30:02.834852Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { ... AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-24T21:32:00.791431Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [15:7519631836907823360:2454] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-24T21:32:00.795504Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [15:7519631836907823360:2454] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-24T21:32:01.101830Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [15:7519631836907823360:2454] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-24T21:32:01.102719Z node 15 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [15:7519631841202790709:2454] connected; active server actors: 1 2025-06-24T21:32:01.102845Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [15:7519631836907823360:2454] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-24T21:32:01.102868Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [15:7519631836907823360:2454] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-24T21:32:01.104452Z node 15 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [15:7519631841202790709:2454] disconnected; active server actors: 1 2025-06-24T21:32:01.104492Z node 15 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [15:7519631841202790709:2454] disconnected no session 2025-06-24T21:32:01.260405Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [15:7519631836907823360:2454] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-24T21:32:01.260453Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [15:7519631836907823360:2454] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-24T21:32:01.260478Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [15:7519631836907823360:2454] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-24T21:32:01.260512Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-24T21:32:01.261949Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2885: [PQ: 72075186224037892] server connected, pipe [15:7519631841202790740:2454], now have 1 active actors on pipe 2025-06-24T21:32:01.262569Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:32:01.262610Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:32:01.262703Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|caff20f8-84729f1a-a16d4135-7e02bea4_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-24T21:32:01.262829Z node 16 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-24T21:32:01.262885Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:32:01.262251Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 16, Generation: 1 2025-06-24T21:32:01.263736Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-24T21:32:01.263766Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2799: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-24T21:32:01.263849Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-24T21:32:01.264283Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|caff20f8-84729f1a-a16d4135-7e02bea4_0 2025-06-24T21:32:01.265244Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750800721265 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:32:01.265398Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|caff20f8-84729f1a-a16d4135-7e02bea4_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-24T21:32:01.265592Z :INFO: [] MessageGroupId [src] SessionId [src|caff20f8-84729f1a-a16d4135-7e02bea4_0] Write session: close. Timeout = 0 ms 2025-06-24T21:32:01.265647Z :INFO: [] MessageGroupId [src] SessionId [src|caff20f8-84729f1a-a16d4135-7e02bea4_0] Write session will now close 2025-06-24T21:32:01.265694Z :DEBUG: [] MessageGroupId [src] SessionId [src|caff20f8-84729f1a-a16d4135-7e02bea4_0] Write session: aborting 2025-06-24T21:32:01.266150Z :INFO: [] MessageGroupId [src] SessionId [src|caff20f8-84729f1a-a16d4135-7e02bea4_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:32:01.266204Z :DEBUG: [] MessageGroupId [src] SessionId [src|caff20f8-84729f1a-a16d4135-7e02bea4_0] Write session: destroy 2025-06-24T21:32:01.268206Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|caff20f8-84729f1a-a16d4135-7e02bea4_0 grpc read done: success: 0 data: 2025-06-24T21:32:01.268233Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|caff20f8-84729f1a-a16d4135-7e02bea4_0 grpc read failed 2025-06-24T21:32:01.268266Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|caff20f8-84729f1a-a16d4135-7e02bea4_0 grpc closed 2025-06-24T21:32:01.268289Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|caff20f8-84729f1a-a16d4135-7e02bea4_0 is DEAD 2025-06-24T21:32:01.269724Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:32:01.270250Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [15:7519631841202790740:2454] destroyed 2025-06-24T21:32:01.270317Z node 16 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-24T21:32:01.313204Z :INFO: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Starting read session 2025-06-24T21:32:01.313260Z :DEBUG: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Starting cluster discovery 2025-06-24T21:32:01.313511Z :INFO: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:11591: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:11591
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:11591. " 2025-06-24T21:32:01.313556Z :DEBUG: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Restart cluster discovery in 0.008470s 2025-06-24T21:32:01.322357Z :DEBUG: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Starting cluster discovery 2025-06-24T21:32:01.322721Z :INFO: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:11591: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:11591
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:11591. " 2025-06-24T21:32:01.322781Z :DEBUG: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Restart cluster discovery in 0.016119s 2025-06-24T21:32:01.339451Z :DEBUG: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Starting cluster discovery 2025-06-24T21:32:01.339667Z :INFO: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:11591: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:11591
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:11591. " 2025-06-24T21:32:01.339703Z :DEBUG: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Restart cluster discovery in 0.038633s 2025-06-24T21:32:01.379871Z :DEBUG: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Starting cluster discovery 2025-06-24T21:32:01.380223Z :NOTICE: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Aborting read session. Description: SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:11591: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:11591
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:11591. " } 2025-06-24T21:32:01.380547Z :NOTICE: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:11591: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:11591
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:11591. " } 2025-06-24T21:32:01.380694Z :INFO: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Closing read session. Close timeout: 0.000000s 2025-06-24T21:32:01.380850Z :NOTICE: [/Root] [/Root] [82b4b1ba-706ec185-bd4684f6-627584b0] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:32:02.468446Z node 15 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710687, task: 1, CA Id [15:7519631845497758102:2479]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-24T21:32:02.503244Z node 15 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710687, task: 1, CA Id [15:7519631845497758102:2479]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:32:02.548142Z node 15 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710687, task: 1, CA Id [15:7519631845497758102:2479]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:32:02.619298Z node 15 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710687, task: 1, CA Id [15:7519631845497758102:2479]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> TestFormatHandler::ClientValidation [GOOD] >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSource >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSource [GOOD] >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSourcesAndWithChannel >> TestFormatHandler::ClientError >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSourcesAndWithChannel [GOOD] >> TCheckpointCoordinatorTests::ShouldAllSnapshots >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount >> TCheckpointCoordinatorTests::ShouldAllSnapshots [GOOD] >> TCheckpointCoordinatorTests::Should2Increments1Snapshot >> TCheckpointCoordinatorTests::Should2Increments1Snapshot [GOOD] >> TCheckpointCoordinatorTests::ShouldAbortPreviousCheckpointsIfNodeStateCantBeSaved >> TCheckpointCoordinatorTests::ShouldAbortPreviousCheckpointsIfNodeStateCantBeSaved [GOOD] >> test_alloc_default.py::TestAlloc::test_default_limits[kikimr0] [GOOD] >> test_http_api.py::TestHttpApi::test_optional_results [GOOD] >> test_http_api.py::TestHttpApi::test_pg_results >> test_postgres.py::TestPostgresSuite::test_postgres_suite[int2] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[comments] >> test_ctas.py::TestYtCtas::test_simple_ctast >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApiWithCancelAfter >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListConnections >> CompressExecutor::TestReorderedExecutor [GOOD] >> CompressExecutor::TestExecutorMemUsage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpointing/ut/unittest >> TCheckpointCoordinatorTests::ShouldAbortPreviousCheckpointsIfNodeStateCantBeSaved [GOOD] Test command err: 2025-06-24T21:32:06.251921Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:72: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT 2025-06-24T21:32:06.252199Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:113: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2025-06-24T21:32:06.252447Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:130: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2025-06-24T21:32:06.252494Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:139: [my-graph-id.42] Successfully registered in storage 2025-06-24T21:32:06.252529Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:140: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2025-06-24T21:32:06.254028Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:148: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2025-06-24T21:32:06.271565Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:185: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2025-06-24T21:32:06.271645Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:211: [my-graph-id.42] Found no checkpoints to restore from, creating a 'zero' checkpoint 2025-06-24T21:32:06.271682Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:348: [my-graph-id.42] [42:1] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-24T21:32:06.292054Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:1] Got TEvCreateCheckpointResponse 2025-06-24T21:32:06.292133Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:1] Checkpoint successfully created, going to inject barriers to 1 actor(s) 2025-06-24T21:32:06.292210Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:440: [my-graph-id.42] [42:1] Send TEvRun to all actors Waiting for TEvInjectCheckpointBarrier (ingress) 2025-06-24T21:32:06.292371Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.292412Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 2 more acks 2025-06-24T21:32:06.292459Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.292513Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 1 more acks 2025-06-24T21:32:06.292559Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.292597Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 0 more acks 2025-06-24T21:32:06.292641Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:484: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2025-06-24T21:32:06.292720Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:496: [my-graph-id.42] [42:1] Got TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T21:32:06.292759Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:511: [my-graph-id.42] [42:1] Checkpoint status changed to 'PendingCommit', committing states Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2025-06-24T21:32:06.293021Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 1 2025-06-24T21:32:06.293069Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:1] State committed [1:6:2053], need 1 more acks 2025-06-24T21:32:06.293115Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 3 2025-06-24T21:32:06.293155Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:1] State committed [1:8:2055], need 0 more acks 2025-06-24T21:32:06.293185Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:538: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2025-06-24T21:32:06.293255Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:549: [my-graph-id.42] [42:1] Got TEvCompleteCheckpointResponse 2025-06-24T21:32:06.293296Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:1] Checkpoint completed 2025-06-24T21:32:06.390652Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:72: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT 2025-06-24T21:32:06.390818Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:113: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2025-06-24T21:32:06.390924Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:130: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2025-06-24T21:32:06.390962Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:139: [my-graph-id.42] Successfully registered in storage 2025-06-24T21:32:06.391026Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:140: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2025-06-24T21:32:06.391090Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:148: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2025-06-24T21:32:06.391467Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:185: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2025-06-24T21:32:06.391504Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:211: [my-graph-id.42] Found no checkpoints to restore from, creating a 'zero' checkpoint 2025-06-24T21:32:06.391541Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:348: [my-graph-id.42] [42:1] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-24T21:32:06.391675Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:1] Got TEvCreateCheckpointResponse 2025-06-24T21:32:06.391718Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:1] Checkpoint successfully created, going to inject barriers to 1 actor(s) 2025-06-24T21:32:06.391766Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:440: [my-graph-id.42] [42:1] Send TEvRun to all actors Waiting for TEvInjectCheckpointBarrier (ingress) 2025-06-24T21:32:06.391871Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.391926Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 2 more acks 2025-06-24T21:32:06.391975Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.392009Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 1 more acks 2025-06-24T21:32:06.392054Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.392090Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 0 more acks 2025-06-24T21:32:06.392118Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:484: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2025-06-24T21:32:06.392202Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:496: [my-graph-id.42] [42:1] Got TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T21:32:06.392252Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:511: [my-graph-id.42] [42:1] Checkpoint status changed to 'PendingCommit', committing states Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2025-06-24T21:32:06.392380Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 1 2025-06-24T21:32:06.392427Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:1] State committed [2:6:2053], need 1 more acks 2025-06-24T21:32:06.392493Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 3 2025-06-24T21:32:06.392529Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:1] State committed [2:8:2055], need 0 more acks 2025-06-24T21:32:06.392564Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:538: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2025-06-24T21:32:06.392646Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:549: [my-graph-id.42] [42:1] Got TEvCompleteCheckpointResponse 2025-06-24T21:32:06.392683Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:1] Checkpoint completed 2025-06-24T21:32:06.489253Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:72: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT 2025-06-24T21:32:06.489417Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:113: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2025-06-24T21:32:06.489537Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:130: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2025-06-24T21:32:06.489578Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:139: [my-graph-id.42] Successfully registered in storage 2025-06-24T21:32:06.489614Z node 3 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:140: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2025-06-24T21:32:06.489668Z node 3 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:148: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2025-06-24T21:32:06.489833Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:185: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2025-06-24T21:32:06.489879Z node 3 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkp ... Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-24T21:32:06.588578Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:3] Got TEvCreateCheckpointResponse 2025-06-24T21:32:06.588613Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:3] Checkpoint successfully created, going to inject barriers to 1 actor(s) Waiting for TEvInjectCheckpointBarrier (ingress) 2025-06-24T21:32:06.588696Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:3] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.588730Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:3] Task state saved, need 2 more acks 2025-06-24T21:32:06.588771Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:3] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.588802Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:3] Task state saved, need 1 more acks 2025-06-24T21:32:06.588880Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:3] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.588927Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:3] Task state saved, need 0 more acks 2025-06-24T21:32:06.588956Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:484: [my-graph-id.42] [42:3] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2025-06-24T21:32:06.589010Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:496: [my-graph-id.42] [42:3] Got TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T21:32:06.589055Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:511: [my-graph-id.42] [42:3] Checkpoint status changed to 'PendingCommit', committing states Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2025-06-24T21:32:06.589162Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:3] Got TEvStateCommitted; task: 1 2025-06-24T21:32:06.589199Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:3] State committed [4:6:2053], need 1 more acks 2025-06-24T21:32:06.589239Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:3] Got TEvStateCommitted; task: 3 2025-06-24T21:32:06.589283Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:3] State committed [4:8:2055], need 0 more acks 2025-06-24T21:32:06.589313Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:538: [my-graph-id.42] [42:3] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2025-06-24T21:32:06.589385Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:549: [my-graph-id.42] [42:3] Got TEvCompleteCheckpointResponse 2025-06-24T21:32:06.589419Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:3] Checkpoint completed 2025-06-24T21:32:06.589455Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:372: [my-graph-id.42] Got TEvScheduleCheckpointing 2025-06-24T21:32:06.589491Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:348: [my-graph-id.42] [42:4] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-24T21:32:06.589566Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:4] Got TEvCreateCheckpointResponse 2025-06-24T21:32:06.589604Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:4] Checkpoint successfully created, going to inject barriers to 1 actor(s) Waiting for TEvInjectCheckpointBarrier (ingress) 2025-06-24T21:32:06.589688Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:4] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.589721Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:4] Task state saved, need 2 more acks 2025-06-24T21:32:06.589770Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:4] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.589805Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:4] Task state saved, need 1 more acks 2025-06-24T21:32:06.589869Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:4] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.589902Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:4] Task state saved, need 0 more acks 2025-06-24T21:32:06.589945Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:484: [my-graph-id.42] [42:4] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2025-06-24T21:32:06.590018Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:496: [my-graph-id.42] [42:4] Got TEvSetCheckpointPendingCommitStatusResponse 2025-06-24T21:32:06.590056Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:511: [my-graph-id.42] [42:4] Checkpoint status changed to 'PendingCommit', committing states Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2025-06-24T21:32:06.590164Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:4] Got TEvStateCommitted; task: 1 2025-06-24T21:32:06.590199Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:4] State committed [4:6:2053], need 1 more acks 2025-06-24T21:32:06.590247Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:4] Got TEvStateCommitted; task: 3 2025-06-24T21:32:06.590284Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:4] State committed [4:8:2055], need 0 more acks 2025-06-24T21:32:06.590317Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:538: [my-graph-id.42] [42:4] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2025-06-24T21:32:06.590373Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:549: [my-graph-id.42] [42:4] Got TEvCompleteCheckpointResponse 2025-06-24T21:32:06.590405Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:4] Checkpoint completed 2025-06-24T21:32:06.682885Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:72: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT 2025-06-24T21:32:06.683067Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:113: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2025-06-24T21:32:06.683432Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:130: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2025-06-24T21:32:06.683479Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:139: [my-graph-id.42] Successfully registered in storage 2025-06-24T21:32:06.683515Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:140: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2025-06-24T21:32:06.683584Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:148: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2025-06-24T21:32:06.683791Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:185: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2025-06-24T21:32:06.683833Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:211: [my-graph-id.42] Found no checkpoints to restore from, creating a 'zero' checkpoint 2025-06-24T21:32:06.683862Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:348: [my-graph-id.42] [42:1] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-24T21:32:06.684007Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:1] Got TEvCreateCheckpointResponse 2025-06-24T21:32:06.684047Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:1] Checkpoint successfully created, going to inject barriers to 1 actor(s) 2025-06-24T21:32:06.684132Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:440: [my-graph-id.42] [42:1] Send TEvRun to all actors Waiting for TEvInjectCheckpointBarrier (ingress) 2025-06-24T21:32:06.684272Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-24T21:32:06.684309Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 2 more acks 2025-06-24T21:32:06.684356Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: STORAGE_ERROR, size: 0 2025-06-24T21:32:06.684395Z node 5 :STREAMS_CHECKPOINT_COORDINATOR ERROR: checkpoint_coordinator.cpp:474: [my-graph-id.42] [42:1] StorageError: can't save node state, aborting checkpoint 2025-06-24T21:32:06.684440Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: STORAGE_ERROR, size: 0 2025-06-24T21:32:06.684509Z node 5 :STREAMS_CHECKPOINT_COORDINATOR ERROR: checkpoint_coordinator.cpp:474: [my-graph-id.42] [42:1] StorageError: can't save node state, aborting checkpoint 2025-06-24T21:32:06.684544Z node 5 :STREAMS_CHECKPOINT_COORDINATOR ERROR: checkpoint_coordinator.cpp:479: [my-graph-id.42] [42:1] Got all acks for aborted checkpoint, aborting in storage Waiting for TEvAbortCheckpointRequest (storage) 2025-06-24T21:32:06.684612Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:575: [my-graph-id.42] [42:1] Got TEvAbortCheckpointResponse 2025-06-24T21:32:06.684646Z node 5 :STREAMS_CHECKPOINT_COORDINATOR WARN: checkpoint_coordinator.cpp:581: [my-graph-id.42] [42:1] Checkpoint aborted 2025-06-24T21:32:06.684688Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:372: [my-graph-id.42] Got TEvScheduleCheckpointing 2025-06-24T21:32:06.684750Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:348: [my-graph-id.42] [42:2] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-24T21:32:06.684851Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:2] Got TEvCreateCheckpointResponse 2025-06-24T21:32:06.684894Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:2] Checkpoint successfully created, going to inject barriers to 1 actor(s) Waiting for TEvInjectCheckpointBarrier (ingress) |98.8%| [TM] {RESULT} ydb/core/fq/libs/checkpointing/ut/unittest >> TAsyncIndexTests::CdcAndSplitWithReboots[PipeResets] [GOOD] >> TTxDataShardLocalKMeansScan::MainToBuild [GOOD] >> TTxDataShardLocalKMeansScan::BuildToPosting >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeConnection >> test_postgres.py::TestPostgresSuite::test_postgres_suite[comments] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[boolean] >> test_example.py::TestExample::test_example ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CdcAndSplitWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:30:33.710271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:33.710367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:33.710412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:33.710449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:33.710501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:33.710549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:33.710619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:33.710699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:33.711567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:33.712001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:33.799372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:30:33.799446Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:33.800362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:30:33.823085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:33.823633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:33.823827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:33.836119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:33.836364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:33.836996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.837243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:33.840023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.840255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:33.841677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:33.841755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.842016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:33.842088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:33.842146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:33.842307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:30:33.849976Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:33.995089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:33.995368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.995595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:33.995650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:33.995929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:33.996062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:33.998553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.998777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:33.999027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.999111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:33.999192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:33.999242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:34.001364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.001428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:34.001476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:34.003408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.003463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.003508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:34.003580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:34.007506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:34.016311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:34.016589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:34.017870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:34.018053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... epInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 2 IsBackup: false CdcStreams { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 6 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:32:07.027382Z node 34 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409550:2][72075186233409546][34:1090:2879] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T21:32:07.027493Z node 34 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][34:1055:2879] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:32:07.027660Z node 34 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409550:2][72075186233409546][34:1090:2879] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750800726987509 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750800726987509 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 5 Group: 1750800726987509 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:32:07.031760Z node 34 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409550:2][72075186233409546][34:1090:2879] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 5 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 5 2025-06-24T21:32:07.031887Z node 34 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][34:1055:2879] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:32:07.309875Z node 34 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:32:07.310325Z node 34 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 468us result status StatusSuccess 2025-06-24T21:32:07.311569Z node 34 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> QuoterWithKesusTest::HandlesAllRequestsForNonExistentResource [GOOD] >> QuoterWithKesusTest::GetsQuota >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnection >> test_http_api.py::TestHttpApi::test_pg_results [GOOD] >> test_http_api.py::TestHttpApi::test_set_result >> Etcd_KV::DeleteWithGetPrevious [GOOD] >> Etcd_KV::UpdateWithIgnoreValue >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount >> MediatorTest::BasicTimecastUpdates >> KqpTpch::Query01 >> DataShardStats::BlobsStatsCorrect [GOOD] >> DataShardStats::SharedCacheGarbage >> DataShardFollowers::FollowerKeepsWorkingAfterMvccReadTable >> KqpPg::PgUpdate-useSink [GOOD] >> Splitter::Simple >> KqpPg::JoinWithQueryService-StreamLookup >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteConnection >> TestFormatHandler::ClientError [GOOD] >> Splitter::Simple [GOOD] >> Splitter::Small [GOOD] >> Splitter::Minimal [GOOD] >> Splitter::Trivial >> Splitter::Trivial [GOOD] >> Splitter::BigAndSmall >> TestFormatHandler::ClientErrorWithEmptyFilter >> Splitter::BigAndSmall [GOOD] >> Splitter::CritSmallPortions >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnection >> test_http_api.py::TestHttpApi::test_set_result [GOOD] >> test_http_api.py::TestHttpApi::test_complex_results >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnectionWithServiceAccount >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateBinding >> test_postgres.py::TestPostgresSuite::test_postgres_suite[boolean] [GOOD] >> KqpPg::DeleteWithQueryService+useSink [GOOD] >> KqpPg::DeleteWithQueryService-useSink >> test_postgres.py::TestPostgresSuite::test_postgres_suite[strings] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApiWithCancelAfter [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApiWithCancelAfter >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListBindings >> test_http_api.py::TestHttpApi::test_complex_results [GOOD] >> test_http_api.py::TestHttpApi::test_result_offset_limit >> RetryPolicy::TWriteSession_TestPolicy [GOOD] >> RetryPolicy::TWriteSession_TestBrokenPolicy >> test_alloc_default.py::TestAlloc::test_default_delta[kikimr0] >> MediatorTest::BasicTimecastUpdates [GOOD] >> LongTxService::BasicTransactions >> Splitter::CritSmallPortions [GOOD] >> Splitter::Crit >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeBinding >> TTxDataShardLocalKMeansScan::BuildToPosting [GOOD] >> TTxDataShardLocalKMeansScan::BuildToBuild >> MediatorTest::MultipleTablets >> test_inserts.py::TestYdbInsertsOperations::test_insert_multiple_rows >> QuoterWithKesusTest::GetsQuota [GOOD] >> QuoterWithKesusTest::GetsBigQuota >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyBinding >> TestFormatHandler::ClientErrorWithEmptyFilter [GOOD] >> KqpBatchUpdate::ManyPartitions_2 [GOOD] >> test_example.py::TestExample::test_example [GOOD] >> TopicSessionTests::RestartSessionIfNewClientWithOffset [GOOD] >> TestJsonParser::Simple1 >> ServerRestartTest::RestartOnGetSession [GOOD] >> test_http_api.py::TestHttpApi::test_result_offset_limit [GOOD] >> test_http_api.py::TestHttpApi::test_openapi_spec >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteBinding >> LongTxService::BasicTransactions [GOOD] >> LongTxService::AcquireSnapshot >> TestJsonParser::Simple1 [GOOD] >> test_http_api.py::TestHttpApi::test_openapi_spec [GOOD] >> TopicSessionTests::ReadNonExistentTopic >> TestJsonParser::Simple2 >> TestJsonParser::Simple2 [GOOD] >> TestJsonParser::Simple3 >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyShouldPassHids::ShouldCheckScenario >> TestJsonParser::Simple3 [GOOD] >> TestJsonParser::Simple4 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ManyPartitions_2 [GOOD] Test command err: Trying to start YDB, gRPC: 3550, MsgBus: 31494 2025-06-24T21:29:10.392493Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631106109398946:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.392932Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002c05/r3tmp/tmpueZQiA/pdisk_1.dat 2025-06-24T21:29:11.002503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:11.002610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:11.007116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:11.031233Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631106109398742:2079] 1750800550367614 != 1750800550367617 2025-06-24T21:29:11.040822Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3550, node 1 2025-06-24T21:29:11.047352Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:29:11.139575Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.139588Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.139592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.139671Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.400613Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31494 TClient is connected to server localhost:31494 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.122394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.153610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.330251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.559094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.654853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.427353Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631123289269561:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.427455Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.800107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.870571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.905058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.981853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.057824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.104374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.142063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.210040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631127584237518:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.210141Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.210263Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631127584237523:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.214786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.224769Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631127584237525:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.305106Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631127584237576:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.391466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631106109398946:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.391528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:16.579854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__o ... base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002c05/r3tmp/tmpuoCC6G/pdisk_1.dat 2025-06-24T21:31:58.652661Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:58.652797Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:58.656206Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:58.656551Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:58.657753Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519631826417408778:2079] 1750800718414183 != 1750800718414186 TServer::EnableGrpc on GrpcPort 11692, node 12 2025-06-24T21:31:58.802424Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:58.802454Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:58.802476Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:58.802686Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23814 2025-06-24T21:31:59.428109Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:59.951666Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:31:59.970557Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:31:59.984683Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:00.099311Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:00.401165Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:00.541455Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:03.417281Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519631826417408797:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:03.417371Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:32:05.274895Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631856482181512:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:05.275040Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:05.349886Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:05.440411Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:05.526617Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:05.588421Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:05.651171Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:05.740710Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:05.820607Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:05.934172Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631856482182199:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:05.934341Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:05.934750Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631856482182204:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:05.941203Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:05.964715Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519631856482182206:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:32:06.029449Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519631860777149553:3439] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:32:08.021946Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:13.641312Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:32:13.641348Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded >> TestJsonParser::Simple4 [GOOD] >> test.py::test[solomon-BadDownsamplingAggregation-] [GOOD] >> test.py::test[solomon-BadDownsamplingDisabled-] >> KqpPg::JoinWithQueryService-StreamLookup [GOOD] >> KqpPg::PgAggregate+useSink >> TestJsonParser::LargeStrings >> TestJsonParser::LargeStrings [GOOD] >> DataCleanup::ForceDataCleanup >> TestJsonParser::ManyValues >> MediatorTest::MultipleTablets [GOOD] >> TestJsonParser::ManyValues [GOOD] >> LongTxService::AcquireSnapshot [GOOD] >> LongTxService::LockSubscribe >> TestJsonParser::MissingFields >> MediatorTest::TabletAckBeforePlanComplete >> TestJsonParser::MissingFields [GOOD] >> TestJsonParser::NestedTypes >> TestJsonParser::NestedTypes [GOOD] >> Splitter::Crit [GOOD] >> Splitter::CritSimple >> TAsyncIndexTests::SplitBothWithReboots[PipeResets] [GOOD] >> TestJsonParser::SimpleBooleans >> KqpTpch::Query01 [GOOD] >> KqpTpch::Query02 >> DataShardFollowers::FollowerKeepsWorkingAfterMvccReadTable [GOOD] >> DataShardFollowers::FollowerStaleRo >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-true [GOOD] >> Coordinator::LastEmptyStepResent >> TestJsonParser::SimpleBooleans [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApiWithCancelAfter [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApi >> TestJsonParser::ManyBatches >> TestJsonParser::ManyBatches [GOOD] >> LongTxService::LockSubscribe [GOOD] >> TestJsonParser::LittleBatches >> test_ctas.py::TestYtCtas::test_simple_ctast [GOOD] >> test_yt_reading.py::TestYtReading::test_partitioned_reading ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitBothWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:30:33.122611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:33.122689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:33.122722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:33.122750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:33.122789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:33.122841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:33.122894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:33.122955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:33.123653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:33.123937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:33.197236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:30:33.197279Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:33.197880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:30:33.213685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:33.214206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:33.214390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:33.223203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:33.223548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:33.224386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.224692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:33.228750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.229021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:33.230408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:33.230480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.230736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:33.230807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:33.230861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:33.231015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:30:33.239337Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:33.427963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:33.428254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.428485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:33.428547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:33.428808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:33.428950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:33.431699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.431915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:33.432197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.432278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:33.432339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:33.432386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:33.435445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.435520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:33.435568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:33.438332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.438390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.438443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.438515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:33.442681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:33.445178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:33.445406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:33.446614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.446773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... mpty maybe) Users: [] Groups: [] } }] } 2025-06-24T21:32:18.821282Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [AsyncIndexChangeSenderMain][72075186233409548:2][41:827:2696] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046678944, LocalPathId: 5] Access: 0 SyncVersion: false Status: OkData Kind: KindAsyncIndexTable PartitionsCount: 2 DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL, Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-24T21:32:18.822024Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:229: [AsyncIndexChangeSenderMain][72075186233409548:2][41:827:2696] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750800738779626 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750800738779626 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750800738779626 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:32:18.822285Z node 41 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [41:271:2260] Handle TEvGetProxyServicesRequest 2025-06-24T21:32:18.822364Z node 41 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [41:271:2260] Handle TEvGetProxyServicesRequest 2025-06-24T21:32:18.822437Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:40: [TableChangeSenderShard][72075186233409548:2][72075186233409550][41:985:2696] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T21:32:18.822538Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:40: [TableChangeSenderShard][72075186233409548:2][72075186233409551][41:986:2696] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-24T21:32:18.835428Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409550][41:985:2696] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T21:32:18.835555Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409551][41:986:2696] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T21:32:18.835639Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][41:827:2696] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409550 } 2025-06-24T21:32:18.835719Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][41:827:2696] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-24T21:32:18.835848Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409550][41:985:2696] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750800738779626 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750800738779626 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:32:18.836492Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409551][41:986:2696] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 3 Group: 1750800738779626 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:32:18.844178Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409550][41:985:2696] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 2 2025-06-24T21:32:18.844591Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][41:827:2696] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409550 } 2025-06-24T21:32:18.845274Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409551][41:986:2696] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-24T21:32:18.845361Z node 41 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][41:827:2696] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-24T21:32:19.033638Z node 41 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:32:19.033992Z node 41 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 373us result status StatusSuccess 2025-06-24T21:32:19.034958Z node 41 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\004\000\000\0002\000\000\000\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/server_restart/gtest >> ServerRestartTest::RestartOnGetSession [GOOD] |98.8%| [TM] {RESULT} ydb/public/sdk/cpp/tests/integration/server_restart/gtest >> TestJsonParser::LittleBatches [GOOD] >> TestJsonParser::MissingFieldsValidation ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/long_tx_service/ut/unittest >> LongTxService::LockSubscribe [GOOD] Test command err: 2025-06-24T21:32:15.765281Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:32:15.765924Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0017dc/r3tmp/tmpuLperw/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:32:15.766579Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0017dc/r3tmp/tmpuLperw/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0017dc/r3tmp/tmpuLperw/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 12474693855964059453 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:32:15.845638Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:94: TLongTxService [Node 1] Received TEvBeginTx from [1:427:2317] 2025-06-24T21:32:15.845789Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:123: TLongTxService [Node 1] Created new LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=1 2025-06-24T21:32:15.859853Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:265: TLongTxService [Node 2] Received TEvAttachColumnShardWrites from [2:428:2100] LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=1 2025-06-24T21:32:15.859990Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 2] Received TEvNodeConnected for NodeId# 1 from session [2:87:2048] 2025-06-24T21:32:15.860258Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:265: TLongTxService [Node 1] Received TEvAttachColumnShardWrites from [2:152:2089] LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=1 2025-06-24T21:32:15.860497Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:139: TLongTxService [Node 2] Received TEvCommitTx from [2:428:2100] LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=1 2025-06-24T21:32:15.860678Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:139: TLongTxService [Node 1] Received TEvCommitTx from [2:152:2089] LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=1 2025-06-24T21:32:15.860754Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:162: TLongTxService [Node 1] Committed LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=1 without side-effects 2025-06-24T21:32:15.861164Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 2] Received TEvRollbackTx from [2:428:2100] LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=1 2025-06-24T21:32:15.861346Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 1] Received TEvRollbackTx from [2:152:2089] LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=1 2025-06-24T21:32:15.864832Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 2] Received TEvRollbackTx from [2:428:2100] LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=1 2025-06-24T21:32:15.865054Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 1] Received TEvRollbackTx from [2:152:2089] LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=1 2025-06-24T21:32:15.866160Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2025-06-24T21:32:15.866619Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 2 2025-06-24T21:32:15.866972Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 2] Received TEvNodeDisconnected for NodeId# 1 from session [2:87:2048] 2025-06-24T21:32:15.867371Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:60:2074] ServerId# [1:349:2267] TabletId# 72057594037932033 PipeClientId# [2:60:2074] 2025-06-24T21:32:15.872536Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:139: TLongTxService [Node 2] Received TEvCommitTx from [2:428:2100] LongTxId# ydb://long-tx/000000001f6abcbjdn91qbg5eg?node_id=3 2025-06-24T21:32:15.875327Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 2] Received TEvNodeDisconnected for NodeId# 3 from session [2:466:2102] 2025-06-24T21:32:16.656005Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:32:16.656087Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:16.762388Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:32:17.728194Z node 4 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:32:17.728778Z node 4 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0017dc/r3tmp/tmpkHxuKX/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:32:17.729543Z node 4 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0017dc/r3tmp/tmpkHxuKX/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0017dc/r3tmp/tmpkHxuKX/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 10903124510086838659 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:32:18.037239Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:346: TLongTxService [Node 3] Received TEvAcquireReadSnapshot from [3:513:2384] for database /dc-1 2025-06-24T21:32:18.037336Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:381: TLongTxService [Node 3] Scheduling TEvAcquireSnapshotFlush for database /dc-1 2025-06-24T21:32:18.048096Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:388: TLongTxService [Node 3] Received TEvAcquireSnapshotFlush for database /dc-1 2025-06-24T21:32:18.048327Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:48: LongTxService.AcquireSnapshot [3:564:2420] Sending navigate request for /dc-1 2025-06-24T21:32:18.052291Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:75: LongTxService.AcquireSnapshot [3:564:2420] Received navigate response status Ok 2025-06-24T21:32:18.052554Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:129: LongTxService.AcquireSnapshot [3:564:2420] Sending acquire step to coordinator 72057594046316545 2025-06-24T21:32:18.055570Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:165: LongTxService.AcquireSnapshot [3:564:2420] Received read step 1000 2025-06-24T21:32:18.056265Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:400: TLongTxService [Node 3] Received TEvAcquireSnapshotFinished, cookie = 1 2025-06-24T21:32:18.059085Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:94: TLongTxService [Node 3] Received TEvBeginTx from [3:513:2384] 2025-06-24T21:32:18.059178Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:381: TLongTxService [Node 3] Scheduling TEvAcquireSnapshotFlush for database /dc-1 2025-06-24T21:32:18.069702Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:388: TLongTxService [Node 3] Received TEvAcquireSnapshotFlush for database /dc-1 2025-06-24T21:32:18.069919Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:48: LongTxService.AcquireSnapshot [3:581:2431] Sending navigate request for /dc-1 2025-06-24T21:32:18.070183Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:75: LongTxService.AcquireSnapshot [3:581:2431] Received navigate response status Ok 2025-06-24T21:32:18.070237Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:129: LongTxService.AcquireSnapshot [3:581:2431] Sending acquire step to coordinator 72057594046316545 2025-06-24T21:32:18.070448Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:165: LongTxService.AcquireSnapshot [3:581:2431] Received read step 1500 2025-06-24T21:32:18.070532Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:400: TLongTxService [Node 3] Received TEvAcquireSnapshotFinished, cookie = 2 2025-06-24T21:32:18.070587Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:425: TLongTxService [Node 3] Created new read-only LongTxId# ydb://long-tx/read-only?snapshot=1500%3Amax 2025-06-24T21:32:18.070782Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:94: TLongTxService [Node 3] Received TEvBeginTx from [3:513:2384] 2025-06-24T21:32:18.070831Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:381: TLongTxService [Node 3] Scheduling TEvAcquireSnapshotFlush for database /dc-1 2025-06-24T21:32:18.081253Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:388: TLongTxService [Node 3] Received TEvAcquireSnapshotFlush for database /dc-1 2025-06-24T21:32:18.081482Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:48: LongTxService.AcquireSnapshot [3:583:2433] Sending navigate request for /dc-1 2025-06-24T21:32:18.081726Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:75: LongTxService.AcquireSnapshot [3:583:2433] Received navigate response status Ok 2025-06-24T21:32:18.081787Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:129: LongTxService.AcquireSnapshot [3:583:2433] Sending acquire step to coordinator 72057594046316545 2025-06-24T21:32:18.081982Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:165: LongTxService.AcquireSnapshot [3:583:2433] Received read step 1500 2025-06-24T21:32:18.082075Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:400: TLongTxService [Node 3] Received TEvAcquireSnapshotFinished, cookie = 3 2025-06-24T21:32:18.082142Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:423: TLongTxService [Node 3] Created new read-write LongTxId# ydb://long-tx/00000001e95979m9b41pj4qgxd?node_id=3&snapshot=1500%3Amax 2025-06-24T21:32:19.138114Z node 6 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-24T21:32:19.138513Z node 6 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/2btm/0017dc/r3tmp/tmp9nfqey/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-24T21:32:19.138677Z node 6 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/2btm/0017dc/r3tmp/tmp9nfqey/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/2btm/0017dc/r3tmp/tmp9nfqey/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 9589927338880983666 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-24T21:32:19.189236Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:468: TLongTxService [Node 5] Received TEvRegisterLock for LockId# 123 2025-06-24T21:32:19.189433Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [5:429:2319] for LockId# 987 LockNode# 5 2025-06-24T21:32:19.200642Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 6] Received TEvSubscribeLock from [6:430:2100] for LockId# 987 LockNode# 5 2025-06-24T21:32:19.201997Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:98:2048] 2025-06-24T21:32:19.207502Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [6:152:2089] for LockId# 987 LockNode# 5 2025-06-24T21:32:19.209071Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:611: TLongTxService [Node 6] Received TEvLockStatus from [5:151:2136] for LockId# 987 LockNode# 5 LockStatus# STATUS_NOT_FOUND 2025-06-24T21:32:19.209358Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [5:429:2319] for LockId# 123 LockNode# 5 2025-06-24T21:32:19.209534Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 6] Received TEvSubscribeLock from [6:430:2100] for LockId# 123 LockNode# 5 2025-06-24T21:32:19.213941Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [6:152:2089] for LockId# 123 LockNode# 5 2025-06-24T21:32:19.214219Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:611: TLongTxService [Node 6] Received TEvLockStatus from [5:151:2136] for LockId# 123 LockNode# 5 LockStatus# STATUS_SUBSCRIBED 2025-06-24T21:32:19.214408Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:479: TLongTxService [Node 5] Received TEvUnregisterLock for LockId# 123 2025-06-24T21:32:19.214560Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:611: TLongTxService [Node 6] Received TEvLockStatus from [5:151:2136] for LockId# 123 LockNode# 5 LockStatus# STATUS_NOT_FOUND 2025-06-24T21:32:19.214743Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 6] Received TEvSubscribeLock from [6:430:2100] for LockId# 234 LockNode# 5 2025-06-24T21:32:19.214960Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-24T21:32:19.215397Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2025-06-24T21:32:19.215823Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:73:2074] ServerId# [5:351:2269] TabletId# 72057594037932033 PipeClientId# [6:73:2074] 2025-06-24T21:32:19.216040Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:98:2048] 2025-06-24T21:32:19.451655Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:460:2048] 2025-06-24T21:32:19.451930Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 6 2025-06-24T21:32:19.452148Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:460:2048] 2025-06-24T21:32:19.452272Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-24T21:32:19.452312Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2025-06-24T21:32:19.452659Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:461:2101] ServerId# [5:465:2338] TabletId# 72057594037932033 PipeClientId# [6:461:2101] 2025-06-24T21:32:19.452831Z node 6 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [6:150:2088] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-06-24T21:32:19.728683Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:491:2048] 2025-06-24T21:32:19.728961Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-24T21:32:19.729019Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2025-06-24T21:32:19.729673Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:491:2048] 2025-06-24T21:32:19.729949Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:492:2102] ServerId# [5:496:2358] TabletId# 72057594037932033 PipeClientId# [6:492:2102] 2025-06-24T21:32:20.001788Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:512:2048] 2025-06-24T21:32:20.002101Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2025-06-24T21:32:20.002167Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-24T21:32:20.002332Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:512:2048] 2025-06-24T21:32:20.002848Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:515:2104] ServerId# [5:519:2372] TabletId# 72057594037932033 PipeClientId# [6:515:2104] |98.8%| [TS] {RESULT} ydb/core/tx/long_tx_service/ut/unittest >> TestJsonParser::MissingFieldsValidation [GOOD] >> TestJsonParser::TypeKindsValidation >> TestJsonParser::TypeKindsValidation [GOOD] >> QuoterWithKesusTest::GetsBigQuota [GOOD] >> QuoterWithKesusTest::GetsBigQuotaWithDeadline >> KqpPg::DeleteWithQueryService-useSink [GOOD] >> TestJsonParser::NumbersValidation >> Etcd_KV::UpdateWithIgnoreValue [GOOD] >> Etcd_KV::TxnWithoutCompares >> MediatorTest::TabletAckBeforePlanComplete [GOOD] >> TestJsonParser::NumbersValidation [GOOD] >> TSentinelTests::BSControllerUnresponsive [GOOD] >> TestJsonParser::StringsValidation >> TTxDataShardLocalKMeansScan::BuildToBuild [GOOD] >> TTxDataShardLocalKMeansScan::BuildToBuild_Ranges >> LabeledDbCounters::TwoTabletsKillOneTablet [GOOD] >> MediatorTest::TabletAckWhenDead >> ShowCreateView::Basic >> TestJsonParser::StringsValidation [GOOD] >> TestJsonParser::NestedJsonValidation >> TopicSessionTests::ReadNonExistentTopic [GOOD] >> TestJsonParser::NestedJsonValidation [GOOD] >> test.py::test[solomon-BadDownsamplingDisabled-] [GOOD] >> test.py::test[solomon-BadDownsamplingFill-] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::DeleteWithQueryService-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 10417, MsgBus: 13241 2025-06-24T21:30:45.914843Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631514654828377:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:45.920682Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ec6/r3tmp/tmpFqdUZa/pdisk_1.dat 2025-06-24T21:30:46.423310Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631514654828343:2079] 1750800645910449 != 1750800645910452 2025-06-24T21:30:46.432602Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:46.435973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:46.436080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:46.438731Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10417, node 1 2025-06-24T21:30:46.686475Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:30:46.686509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:30:46.686517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:30:46.686657Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:30:46.921803Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13241 TClient is connected to server localhost:13241 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:30:47.529411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:30:49.031519Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631531834698172:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.031637Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.354231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:49.518054Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631531834698308:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.518146Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.518168Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631531834698313:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.522134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:30:49.531110Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631531834698315:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:30:49.626450Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631531834698366:2415] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 1 1 1 Trying to start YDB, gRPC: 6752, MsgBus: 4953 2025-06-24T21:30:51.093470Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631541021064812:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:51.093547Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ec6/r3tmp/tmph5EPrE/pdisk_1.dat 2025-06-24T21:30:51.283755Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:51.287308Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631541021064781:2079] 1750800651092209 != 1750800651092212 2025-06-24T21:30:51.297170Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:51.297257Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 6752, node 2 2025-06-24T21:30:51.302696Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:30:51.407818Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:30:51.407842Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:30:51.407850Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:30:51.407971Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4953 TClient is connected to server localhost:4953 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:30:52.021425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:30:52.115318Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:30:54.384514Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631553905967303:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:54.384689Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:54.398036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:30:54.491242Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631553905967439:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:54.491365Z node 2 :KQP_WORKLOAD ... SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:06.402255Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:06.412491Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:32:10.115278Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519631856132784536:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:10.115388Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:32:11.345338Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519631881902588920:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:11.345509Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:11.377564Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:11.494212Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519631881902589026:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:11.494299Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519631881902589031:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:11.494382Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:11.506253Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:11.526029Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519631881902589033:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:32:11.587537Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519631881902589084:2401] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 20972, MsgBus: 14491 2025-06-24T21:32:13.463559Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519631891650454956:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:13.463653Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ec6/r3tmp/tmptvzwkI/pdisk_1.dat 2025-06-24T21:32:13.725155Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:13.725304Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:13.730612Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:13.731090Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:13.731641Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519631891650454934:2079] 1750800733454071 != 1750800733454074 TServer::EnableGrpc on GrpcPort 20972, node 12 2025-06-24T21:32:13.890720Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:13.890752Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:13.890767Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:13.890971Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14491 2025-06-24T21:32:14.495344Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14491 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:15.123295Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:15.133584Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:32:18.467272Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519631891650454956:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:18.467355Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:32:20.281182Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631921715226656:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:20.281339Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:20.314679Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:20.477452Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631921715226762:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:20.477691Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:20.478383Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519631921715226768:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:20.487992Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:20.515794Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519631921715226770:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:32:20.610415Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519631921715226823:2404] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TestJsonParser::BoolsValidation >> test_postgres.py::TestPostgresSuite::test_postgres_suite[strings] [GOOD] >> Splitter::CritSimple [GOOD] >> TestJsonParser::BoolsValidation [GOOD] >> TopicSessionTests::SlowSession >> TestJsonParser::JsonStructureValidation ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/splitter/ut/unittest >> Splitter::CritSimple [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280336;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280336;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=2088936;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=2088936;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5184936;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5184936;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50200;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=seria ... 82944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8947912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=71282912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8947912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7964800;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964800;columns=1; |98.8%| [TS] {RESULT} ydb/core/tx/columnshard/splitter/ut/unittest >> TestJsonParser::JsonStructureValidation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut_sentinel/unittest >> TSentinelTests::BSControllerUnresponsive [GOOD] Test command err: 2025-06-24T21:30:51.391113Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-24T21:30:51.391215Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-24T21:30:51.391301Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-24T21:30:51.391332Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-24T21:30:51.391450Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-24T21:30:51.391547Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-24T21:30:51.394980Z node 1 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "node-1" State: UNKNOWN Devices { Name: "pdisk-1-4" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-5" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-6" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-7" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 1 InterconnectPort: 10000 Location { Rack: "rack-1" } StartTimeSeconds: 0 } Hosts { Name: "node-2" State: UNKNOWN Devices { Name: "pdisk-2-8" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-9" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-10" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-11" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 2 InterconnectPort: 10000 Location { Rack: "rack-2" } StartTimeSeconds: 0 } Hosts { Name: "node-3" State: UNKNOWN Devices { Name: "pdisk-3-12" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-13" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-14" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-15" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 3 InterconnectPort: 10000 Location { Rack: "rack-3" } StartTimeSeconds: 0 } Hosts { Name: "node-4" State: UNKNOWN Devices { Name: "pdisk-4-16" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-17" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-18" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-19" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 4 InterconnectPort: 10000 Location { Rack: "rack-4" } StartTimeSeconds: 0 } Hosts { Name: "node-5" State: UNKNOWN Devices { Name: "pdisk-5-20" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-21" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-22" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-23" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 5 InterconnectPort: 10000 Location { Rack: "rack-5" } StartTimeSeconds: 0 } Hosts { Name: "node-6" State: UNKNOWN Devices { Name: "pdisk-6-24" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-25" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-26" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-27" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 6 InterconnectPort: 10000 Location { Rack: "rack-6" } StartTimeSeconds: 0 } Hosts { Name: "node-7" State: UNKNOWN Devices { Name: "pdisk-7-28" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-29" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-30" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-31" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 7 InterconnectPort: 10000 Location { Rack: "rack-7" } StartTimeSeconds: 0 } Hosts { Name: "node-8" State: UNKNOWN Devices { Name: "pdisk-8-32" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-33" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-34" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-35" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 8 InterconnectPort: 10000 Location { Rack: "rack-8" } StartTimeSeconds: 0 } } 2025-06-24T21:30:51.400485Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 4 Path: "/1/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 5 Path: "/1/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 6 Path: "/1/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 7 Path: "/1/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 8 Path: "/2/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 9 Path: "/2/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 10 Path: "/2/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 11 Path: "/2/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 12 Path: "/3/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 13 Path: "/3/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 14 Path: "/3/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 15 Path: "/3/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 16 Path: "/4/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 17 Path: "/4/pdisk-17.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 18 Path: "/4/pdisk-18.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 19 Path: "/4/pdisk-19.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 20 Path: "/5/pdisk-20.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 21 Path: "/5/pdisk-21.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 22 Path: "/5/pdisk-22.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 23 Path: "/5/pdisk-23.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 24 Path: "/6/pdisk-24.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 25 Path: "/6/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 26 Path: "/6/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 27 Path: "/6/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 28 Path: "/7/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 29 Path: "/7/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 30 Path: "/7/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 31 Path: "/7/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 32 Path: "/8/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 33 Path: "/8/pdisk-33.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 34 Path: "/8/pdisk-34.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 35 Path: "/8/pdisk-35.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1002 ... Path: "/75/pdisk-302.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 303 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-303.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860028 2025-06-24T21:32:20.959317Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 76, response# PDiskStateInfo { PDiskId: 304 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-304.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 305 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-305.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 306 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-306.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 307 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-307.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860028 2025-06-24T21:32:20.959489Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 77, response# PDiskStateInfo { PDiskId: 308 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-308.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 309 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-309.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 310 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-310.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 311 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-311.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860028 2025-06-24T21:32:20.959556Z node 71 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-24T21:32:20.959943Z node 71 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 73:295, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2025-06-24T21:32:20.960000Z node 71 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 78:312, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2025-06-24T21:32:20.960032Z node 71 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 77:311, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2025-06-24T21:32:20.960092Z node 71 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 3 2025-06-24T21:32:20.971577Z node 71 :CMS DEBUG: sentinel.cpp:1262: [Sentinel] [Main] Retrying: attempt# 1 2025-06-24T21:32:20.971651Z node 71 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 3 2025-06-24T21:32:20.982106Z node 71 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-24T21:32:20.982185Z node 71 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-24T21:32:20.982289Z node 71 :CMS DEBUG: sentinel.cpp:1262: [Sentinel] [Main] Retrying: attempt# 2 2025-06-24T21:32:20.982347Z node 71 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 3 2025-06-24T21:32:20.982519Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 71, wbId# [71:8388350642965737326:1634689637] 2025-06-24T21:32:20.982562Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 72, wbId# [72:8388350642965737326:1634689637] 2025-06-24T21:32:20.982612Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 73, wbId# [73:8388350642965737326:1634689637] 2025-06-24T21:32:20.982648Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 74, wbId# [74:8388350642965737326:1634689637] 2025-06-24T21:32:20.982675Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 75, wbId# [75:8388350642965737326:1634689637] 2025-06-24T21:32:20.982703Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 76, wbId# [76:8388350642965737326:1634689637] 2025-06-24T21:32:20.982729Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 77, wbId# [77:8388350642965737326:1634689637] 2025-06-24T21:32:20.982772Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 78, wbId# [78:8388350642965737326:1634689637] 2025-06-24T21:32:20.983375Z node 71 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Status { Success: true } Status { }, cookie# 123 2025-06-24T21:32:20.983413Z node 71 :CMS ERROR: sentinel.cpp:1244: [Sentinel] [Main] Unsuccesful response from BSC: error# 2025-06-24T21:32:20.983748Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 71, response# PDiskStateInfo { PDiskId: 284 CreateTime: 0 ChangeTime: 0 Path: "/71/pdisk-284.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 285 CreateTime: 0 ChangeTime: 0 Path: "/71/pdisk-285.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 286 CreateTime: 0 ChangeTime: 0 Path: "/71/pdisk-286.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 287 CreateTime: 0 ChangeTime: 0 Path: "/71/pdisk-287.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-24T21:32:20.984217Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 73, response# PDiskStateInfo { PDiskId: 292 CreateTime: 0 ChangeTime: 0 Path: "/73/pdisk-292.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 293 CreateTime: 0 ChangeTime: 0 Path: "/73/pdisk-293.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 294 CreateTime: 0 ChangeTime: 0 Path: "/73/pdisk-294.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 295 CreateTime: 0 ChangeTime: 0 Path: "/73/pdisk-295.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-24T21:32:20.984408Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 74, response# PDiskStateInfo { PDiskId: 296 CreateTime: 0 ChangeTime: 0 Path: "/74/pdisk-296.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 297 CreateTime: 0 ChangeTime: 0 Path: "/74/pdisk-297.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 298 CreateTime: 0 ChangeTime: 0 Path: "/74/pdisk-298.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 299 CreateTime: 0 ChangeTime: 0 Path: "/74/pdisk-299.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-24T21:32:20.984574Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 75, response# PDiskStateInfo { PDiskId: 300 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-300.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 301 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-301.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 302 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-302.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 303 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-303.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-24T21:32:20.984716Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 76, response# PDiskStateInfo { PDiskId: 304 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-304.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 305 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-305.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 306 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-306.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 307 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-307.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-24T21:32:20.984899Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 77, response# PDiskStateInfo { PDiskId: 308 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-308.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 309 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-309.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 310 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-310.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 311 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-311.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-24T21:32:20.985039Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 78, response# PDiskStateInfo { PDiskId: 312 CreateTime: 0 ChangeTime: 0 Path: "/78/pdisk-312.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 313 CreateTime: 0 ChangeTime: 0 Path: "/78/pdisk-313.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 314 CreateTime: 0 ChangeTime: 0 Path: "/78/pdisk-314.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 315 CreateTime: 0 ChangeTime: 0 Path: "/78/pdisk-315.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-24T21:32:20.985207Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 72, response# PDiskStateInfo { PDiskId: 288 CreateTime: 0 ChangeTime: 0 Path: "/72/pdisk-288.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 289 CreateTime: 0 ChangeTime: 0 Path: "/72/pdisk-289.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 290 CreateTime: 0 ChangeTime: 0 Path: "/72/pdisk-290.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 291 CreateTime: 0 ChangeTime: 0 Path: "/72/pdisk-291.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-24T21:32:20.985269Z node 71 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s >> TestPurecalcFilter::Simple1 >> KqpPg::PgAggregate+useSink [GOOD] >> KqpPg::PgAggregate-useSink >> DataShardFollowers::FollowerStaleRo [GOOD] >> DataShardFollowers::FollowerRebootAfterSysCompaction |98.8%| [TM] {RESULT} ydb/core/cms/ut_sentinel/unittest >> Coordinator::LastEmptyStepResent [GOOD] >> CoordinatorVolatile::PlanResentOnReboots >> test_yt_reading.py::TestYtReading::test_partitioned_reading [GOOD] >> test_yt_reading.py::TestYtReading::test_block_reading >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApi >> DataCleanup::ForceDataCleanup [GOOD] >> DataCleanup::ForceDataCleanupWithoutCompaction |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/http_api/py3test >> test_http_api.py::TestHttpApi::test_openapi_spec [GOOD] |98.8%| [TM] {RESULT} ydb/tests/fq/http_api/py3test >> test_cte.py::TestCte::test_toplevel >> TCreateAndDropViewTest::CheckCreatedView >> test_example.py::TestExample::test_example2 >> test.py::TestSqsSplitMergeStdTables::test_std_merge_split >> QuoterWithKesusTest::GetsBigQuotaWithDeadline [GOOD] >> QuoterWithKesusTest::FailsToGetBigQuota >> MediatorTest::TabletAckWhenDead [GOOD] >> MediatorTest::PlanStepAckToReconnectedMediator >> CompressExecutor::TestExecutorMemUsage [GOOD] >> GraphShard::NormalizeAndDownsample1 [GOOD] >> GraphShard::NormalizeAndDownsample2 [GOOD] >> GraphShard::NormalizeAndDownsample3 [GOOD] >> GraphShard::NormalizeAndDownsample4 [GOOD] >> GraphShard::NormalizeAndDownsample5 [GOOD] >> GraphShard::NormalizeAndDownsample6 [GOOD] >> GraphShard::CheckHistogramToPercentileConversions [GOOD] >> GraphShard::CreateGraphShard >> KqpTpch::Query02 [GOOD] >> KqpTpch::Query03 >> test.py::test[solomon-BadDownsamplingFill-] [GOOD] >> test.py::test[solomon-BadDownsamplingInterval-] >> KqpPg::V1CreateTable [GOOD] >> KqpPg::ValuesInsert+useSink >> TestPurecalcFilter::Simple1 [GOOD] >> TestPurecalcFilter::Simple2 >> DataShardReplication::SimpleApplyChanges >> CoordinatorVolatile::PlanResentOnReboots [GOOD] >> CoordinatorVolatile::MediatorReconnectPlanRace >> RetryPolicy::TWriteSession_TestBrokenPolicy [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> CompressExecutor::TestExecutorMemUsage [GOOD] Test command err: 2025-06-24T21:30:09.433679Z :WriteAndReadSomeMessagesWithAsyncCompression INFO: Random seed for debugging is 1750800609433648 2025-06-24T21:30:09.781292Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631360425014354:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:09.791410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:30:09.850376Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631360865352520:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:09.850458Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049b6/r3tmp/tmpNsGa0i/pdisk_1.dat 2025-06-24T21:30:10.127080Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:30:10.168321Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:30:10.372249Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:10.390145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:10.390260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:10.393502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:10.393601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:10.401288Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:30:10.401528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:30:10.403045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8014, node 1 2025-06-24T21:30:10.525578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0049b6/r3tmp/yandexwTnRGC.tmp 2025-06-24T21:30:10.525621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0049b6/r3tmp/yandexwTnRGC.tmp 2025-06-24T21:30:10.525820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0049b6/r3tmp/yandexwTnRGC.tmp 2025-06-24T21:30:10.525999Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:30:10.610519Z INFO: TTestServer started on Port 25800 GrpcPort 8014 2025-06-24T21:30:10.779288Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25800 PQClient connected to localhost:8014 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:30:10.932979Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:30:10.987012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:30:13.453531Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631378045221953:2268], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:13.453633Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631378045221974:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:13.453706Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:13.454161Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631377604884417:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:13.455644Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:13.459268Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631377604884430:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:13.463477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:30:13.483249Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631378045221992:2125] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-24T21:30:13.502737Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631377604884432:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T21:30:13.502827Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631378045221991:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-24T21:30:13.572680Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631377604884536:2691] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:30:13.592179Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631378045222019:2131] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:30:13.973683Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519631378045222034:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:30:13.975563Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWRiN2YwZGYtZjQ3Y2MzOTQtZWI2NzhiZmEtY2E5MjMxNDE=, ActorId: [2:7519631378045221951:2267], ActorState: ExecuteState, TraceId: 01jyhxjg299yshbbqscfkhghe4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:30:13.977641Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:30:13.978768Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631377604884546:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:30:13.979066Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTc2M2RiMS0zNDEyMTk3Yy0yMWQ1NjVkMy04M2UyY2ZkNw==, ActorId: [1:7519631377604884415:2296], ActorState: ExecuteState, TraceId: 01jyhxjg2b7jwjacsgqvg9szy5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:30:13.979509Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" e ... f8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-24T21:32:27.325007Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-24T21:32:27.325029Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [15:7519631951275505138:2534] (SourceId=test-message-group-id, PreferedPartition=(NULL)) StartKqpSession 2025-06-24T21:32:27.330262Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [15:7519631951275505138:2534] (SourceId=test-message-group-id, PreferedPartition=(NULL)) Select from the table 2025-06-24T21:32:27.519417Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715695. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-24T21:32:27.519547Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [15:7519631951275505150:2536] TxId: 281474976715695. Ctx: { TraceId: 01jyhxpjt2cces3ng3q3m75w3d, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=MWY4NzgzZTQtMmFlZTk2YjQtNDA2ZWVlZjYtMThjZmYxMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-24T21:32:27.519765Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=15&id=MWY4NzgzZTQtMmFlZTk2YjQtNDA2ZWVlZjYtMThjZmYxMGQ=, ActorId: [15:7519631951275505139:2536], ActorState: ExecuteState, TraceId: 01jyhxpjt2cces3ng3q3m75w3d, Create QueryResponse for error on request, msg: Test retry state: get retry delay 2025-06-24T21:32:27.524867Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5ec1c605-9e30b4d5-7b4c8fa6-819236a3_0] Got error. Status: UNAVAILABLE, Description:
: Error: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=MWY4NzgzZTQtMmFlZTk2YjQtNDA2ZWVlZjYtMThjZmYxMGQ=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyhxpjt309jww0tf958w41r5" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 , code: 500001 2025-06-24T21:32:27.524912Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5ec1c605-9e30b4d5-7b4c8fa6-819236a3_0] Write session will restart in 2.000000s 2025-06-24T21:32:27.525041Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5ec1c605-9e30b4d5-7b4c8fa6-819236a3_0] Write session: Do CDS request 2025-06-24T21:32:27.525078Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5ec1c605-9e30b4d5-7b4c8fa6-819236a3_0] Do schedule cds request after 2000 ms 2025-06-24T21:32:27.522087Z node 15 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [15:7519631951275505138:2534] (SourceId=test-message-group-id, PreferedPartition=(NULL)) ReplyError: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=MWY4NzgzZTQtMmFlZTk2YjQtNDA2ZWVlZjYtMThjZmYxMGQ=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyhxpjt309jww0tf958w41r5" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 2025-06-24T21:32:27.522260Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 3 reason: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=MWY4NzgzZTQtMmFlZTk2YjQtNDA2ZWVlZjYtMThjZmYxMGQ=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyhxpjt309jww0tf958w41r5" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 sessionId: 2025-06-24T21:32:27.522696Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: is DEAD 2025-06-24T21:32:27.539450Z node 15 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715692, task: 2, CA Id [15:7519631942685570346:2526]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T21:32:28.143763Z node 15 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715692, task: 1, CA Id [15:7519631942685570345:2525]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:32:28.317252Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5ec1c605-9e30b4d5-7b4c8fa6-819236a3_0] Write session: close. Timeout = 0 ms 2025-06-24T21:32:28.317347Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5ec1c605-9e30b4d5-7b4c8fa6-819236a3_0] Write session will now close 2025-06-24T21:32:28.320309Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5ec1c605-9e30b4d5-7b4c8fa6-819236a3_0] Write session: aborting 2025-06-24T21:32:28.321274Z :WARNING: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5ec1c605-9e30b4d5-7b4c8fa6-819236a3_0] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-24T21:32:28.321339Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5ec1c605-9e30b4d5-7b4c8fa6-819236a3_0] Write session: destroy 2025-06-24T21:32:28.435760Z node 16 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720684. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:32:28.435880Z node 16 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [16:7519631954642268550:2420] TxId: 281474976720684. Ctx: { TraceId: 01jyhxpkng80ed7zsww1qrxf6c, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=NzNmZGMxNWEtOWYzY2ZhZTAtM2JjODFkZjAtZTYyMDJlZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:32:28.436074Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=16&id=NzNmZGMxNWEtOWYzY2ZhZTAtM2JjODFkZjAtZTYyMDJlZDg=, ActorId: [16:7519631954642268547:2420], ActorState: ExecuteState, TraceId: 01jyhxpkng80ed7zsww1qrxf6c, Create QueryResponse for error on request, msg: 2025-06-24T21:32:28.438368Z node 16 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyhxpknhddss0dz725hx2afa" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-24T21:32:28.486934Z node 15 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715692, task: 2, CA Id [15:7519631942685570346:2526]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-24T21:32:28.652772Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715697. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:32:28.652930Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [15:7519631955570472532:2540] TxId: 281474976715697. Ctx: { TraceId: 01jyhxpkx84wn7jmhg4hj169g9, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=NzcxMjQ5MzctM2FlZDU2NjktYmQ5MTllODctMTViM2Y3MDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-24T21:32:28.653133Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=15&id=NzcxMjQ5MzctM2FlZDU2NjktYmQ5MTllODctMTViM2Y3MDU=, ActorId: [15:7519631955570472528:2540], ActorState: ExecuteState, TraceId: 01jyhxpkx84wn7jmhg4hj169g9, Create QueryResponse for error on request, msg: 2025-06-24T21:32:28.654631Z node 15 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyhxpkx98mxed803tk469ncs" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-24T21:32:29.139665Z node 15 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715692, task: 1, CA Id [15:7519631942685570345:2525]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-24T21:32:29.140195Z node 15 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [15:7519631942685570345:2525], TxId: 281474976715692, task: 1. Ctx: { SessionId : ydb://session/3?node_id=15&id=M2EzOWRiZjItZTBiZTFmNTAtZmFhZDk4NzgtYzU1OGY0OTE=. CustomerSuppliedId : . TraceId : 01jyhxpg84d6fg3dy50e2dhtq9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Source[0] fatal error: {
: Error: Too many retries for shard 72075186224037890 } 2025-06-24T21:32:29.140296Z node 15 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [15:7519631942685570345:2525], TxId: 281474976715692, task: 1. Ctx: { SessionId : ydb://session/3?node_id=15&id=M2EzOWRiZjItZTBiZTFmNTAtZmFhZDk4NzgtYzU1OGY0OTE=. CustomerSuppliedId : . TraceId : 01jyhxpg84d6fg3dy50e2dhtq9. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: UNAVAILABLE DEFAULT_ERROR: {
: Error: Too many retries for shard 72075186224037890 }. 2025-06-24T21:32:29.140937Z node 15 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [15:7519631942685570346:2526], TxId: 281474976715692, task: 2. Ctx: { SessionId : ydb://session/3?node_id=15&id=M2EzOWRiZjItZTBiZTFmNTAtZmFhZDk4NzgtYzU1OGY0OTE=. TraceId : 01jyhxpg84d6fg3dy50e2dhtq9. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [15:7519631942685570339:2515], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-24T21:32:29.140937Z node 15 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [15:7519631942685570347:2527], TxId: 281474976715692, task: 3. Ctx: { TraceId : 01jyhxpg84d6fg3dy50e2dhtq9. SessionId : ydb://session/3?node_id=15&id=M2EzOWRiZjItZTBiZTFmNTAtZmFhZDk4NzgtYzU1OGY0OTE=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [15:7519631942685570339:2515], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-24T21:32:29.141684Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=15&id=M2EzOWRiZjItZTBiZTFmNTAtZmFhZDk4NzgtYzU1OGY0OTE=, ActorId: [15:7519631938390602978:2515], ActorState: ExecuteState, TraceId: 01jyhxpg84d6fg3dy50e2dhtq9, Create QueryResponse for error on request, msg: 2025-06-24T21:32:29.143754Z node 15 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Too many retries for shard 72075186224037890" severity: 1 } TxMeta { id: "01jyhxpgtc4fkv30hejz7wy212" } } YdbStatus: UNAVAILABLE ConsumedRu: 387 } >> DataShardFollowers::FollowerRebootAfterSysCompaction [GOOD] >> DataShardFollowers::FollowerAfterSysCompaction >> GraphShard::CreateGraphShard [GOOD] >> DataCleanup::ForceDataCleanupWithoutCompaction [GOOD] >> DataCleanup::MultipleDataCleanups >> test_yt_reading.py::TestYtReading::test_block_reading [GOOD] >> TCreateAndDropViewTest::CheckCreatedView [GOOD] >> TCreateAndDropViewTest::CreateViewDisabledFeatureFlag >> KqpTpch::Query03 [GOOD] >> KqpTpch::Query04 >> KqpPg::PgAggregate-useSink [GOOD] >> KqpPg::MkqlTerminate ------- [TS] {asan, default-linux-x86_64, release} ydb/core/graph/shard/ut/unittest >> GraphShard::CreateGraphShard [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:129:2058] recipient: [1:111:2141] 2025-06-24T21:32:32.329655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:32:32.329824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:32:32.329872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:32:32.329917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:32:32.331114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:32:32.331215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:32:32.331330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:32:32.331530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:32:32.332347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:32:32.337476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:32:32.481629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7386: Cannot subscribe to console configs 2025-06-24T21:32:32.481692Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:32.516933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:32:32.521501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:32:32.521787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:32:32.563900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:32:32.564321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:32:32.568038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:32:32.568655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:32:32.593691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:32:32.597434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:32:32.622379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:32:32.622462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:32:32.622616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:32:32.622670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:32:32.622769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:32:32.622943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-24T21:32:32.639241Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-24T21:32:32.827340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:32:32.831463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:32:32.835320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:32:32.835444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:32:32.839351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:32:32.839498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:32:32.856494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:32:32.857757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:32:32.858071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:32:32.858255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:32:32.858299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:32:32.858347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:32:32.872296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:32:32.872404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:32:32.872454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:32:32.880474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:32:32.880542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:32:32.880609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:32:32.880677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:32:32.895725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:32:32.898193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:32:32.901478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:32:32.902723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:32:32.902905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:32:32.902958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:32:32.906299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 128 -> 240 2025-06-24T21:32:32.906369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:32:32.906590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-24T21:32:32.906695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-24T21:32:32.910106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:32:32.910185Z node 1 :FLAT_TX_SCHEMESHARD ... bDomainState::TPropose ProgressState leave, operationId 102:1, at tablet# 72057594046678944 2025-06-24T21:32:33.278333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 102 ready parts: 2/2 2025-06-24T21:32:33.278458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:32:33.283749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-24T21:32:33.283875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-24T21:32:33.284216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:32:33.284334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969452 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:32:33.284383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:723: TTxOperationPlanStep Execute operation part is already done, operationId: 102:0 2025-06-24T21:32:33.284443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:1, at tablet# 72057594046678944 2025-06-24T21:32:33.284771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:1 128 -> 240 2025-06-24T21:32:33.284865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:1, at tablet# 72057594046678944 2025-06-24T21:32:33.284984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-24T21:32:33.285079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:568: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[1:401:2367], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 72075186234409549, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-06-24T21:32:33.292062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:32:33.292113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-24T21:32:33.292291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:32:33.292329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-24T21:32:33.292627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:1, at schemeshard: 72057594046678944 2025-06-24T21:32:33.292685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 102:1, ProgressState, NeedSyncHive: 0 2025-06-24T21:32:33.292732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 102:1 240 -> 240 2025-06-24T21:32:33.293364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:32:33.293452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-24T21:32:33.293484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-24T21:32:33.293515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-24T21:32:33.293550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-24T21:32:33.293635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/2, is published: true 2025-06-24T21:32:33.296611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 102:1, at schemeshard: 72057594046678944 2025-06-24T21:32:33.296674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:493: [72057594046678944] TDone opId# 102:1 ProgressState 2025-06-24T21:32:33.296748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:1 progress is 2/2 2025-06-24T21:32:33.296814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-24T21:32:33.296856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:907: Part operation is done id#102:1 progress is 2/2 2025-06-24T21:32:33.296883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-24T21:32:33.296916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/2, is published: true 2025-06-24T21:32:33.296950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-24T21:32:33.296994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:0 2025-06-24T21:32:33.297072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:0 2025-06-24T21:32:33.297249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-24T21:32:33.297289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 102:1 2025-06-24T21:32:33.297322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 102:1 2025-06-24T21:32:33.297409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-24T21:32:33.297948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-24T21:32:33.299714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-24T21:32:33.299761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-24T21:32:33.300186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-24T21:32:33.300278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-24T21:32:33.300319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:564:2491] TestWaitNotification: OK eventTxId 102 2025-06-24T21:32:33.303709Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/db1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-24T21:32:33.304009Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/db1" took 297us result status StatusSuccess 2025-06-24T21:32:33.307385Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/db1" PathDescription { Self { Name: "db1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 GraphShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |98.8%| [TS] {RESULT} ydb/core/graph/shard/ut/unittest |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_import/py3test >> test_yt_reading.py::TestYtReading::test_block_reading [GOOD] |98.8%| [TM] {RESULT} ydb/tests/fq/yt/kqp_yt_import/py3test >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApiWithCancelAfter >> MediatorTest::PlanStepAckToReconnectedMediator [GOOD] >> TopicSessionTests::SlowSession [GOOD] >> MediatorTest::WatcherReconnect >> KeyValueGRPCService::SimpleAcquireLock >> TopicSessionTests::TwoSessionsWithDifferentSchemes >> KqpTpch::Query04 [GOOD] >> KqpTpch::Query05 >> XmlBuilderTest::WritesProperly [GOOD] >> XmlBuilderTest::MacroBuilder [GOOD] >> test.py::test[solomon-BadDownsamplingInterval-] [GOOD] >> test.py::test[solomon-Basic-default.txt] |98.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/http/ut/unittest >> XmlBuilderTest::MacroBuilder [GOOD] |98.8%| [TS] {RESULT} ydb/core/ymq/http/ut/unittest >> TestPurecalcFilter::Simple2 [GOOD] >> test_example.py::TestExample::test_example2 [GOOD] >> TestPurecalcFilter::ManyValues >> TCreateAndDropViewTest::CreateViewDisabledFeatureFlag [GOOD] >> TCreateAndDropViewTest::InvalidQuery >> QueryActorTest::SimpleQuery >> QuoterWithKesusTest::FailsToGetBigQuota [GOOD] >> QuoterWithKesusTest::PrefetchCoefficient >> CoordinatorVolatile::MediatorReconnectPlanRace [GOOD] >> CoordinatorVolatile::CoordinatorMigrateUncommittedVolatileTx >> test_cte.py::TestCte::test_toplevel [GOOD] >> MediatorTest::WatcherReconnect [GOOD] >> DataShardReplication::SimpleApplyChanges [GOOD] >> DataShardReplication::SplitMergeChanges |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/plan2svg/py3test >> test_cte.py::TestCte::test_toplevel [GOOD] |98.8%| [TM] {RESULT} ydb/tests/functional/kqp/plan2svg/py3test >> MediatorTest::MultipleSteps >> BulkUpsert::BulkUpsert >> DataShardFollowers::FollowerAfterSysCompaction [GOOD] >> DataShardFollowers::FollowerAfterDataCompaction >> test_alloc_default.py::TestAlloc::test_default_delta[kikimr0] [GOOD] >> DataCleanup::MultipleDataCleanups [GOOD] >> DataCleanup::MultipleDataCleanupsWithOldGenerations >> KeyValueGRPCService::SimpleAcquireLock [GOOD] >> KeyValueGRPCService::SimpleExecuteTransaction >> test_inserts.py::TestYdbInsertsOperations::test_insert_multiple_rows [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_concurrent_inserts >> KqpPg::MkqlTerminate [GOOD] >> KqpPg::NoSelectFullScan >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApiWithCancelAfter [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApiWithCancelAfter >> TCreateAndDropViewTest::InvalidQuery [GOOD] >> TCreateAndDropViewTest::ParsingSecurityInvoker >> StatisticsScan::RunScanOnShard >> KqpTpch::Query05 [GOOD] >> KqpTpch::Query06 >> TestPurecalcFilter::ManyValues [GOOD] >> test.py::test[solomon-Basic-default.txt] [GOOD] >> test.py::test[solomon-BasicExtractMembers-default.txt] >> MediatorTest::MultipleSteps [GOOD] >> TopicSessionTests::TwoSessionsWithDifferentSchemes [GOOD] >> QueryActorTest::SimpleQuery [GOOD] >> QueryActorTest::Rollback >> TestPurecalcFilter::NullValues >> MediatorTest::WatchesBeforeFirstStep >> KqpTpch::Query06 [GOOD] >> KqpTpch::Query07 >> test_example.py::TestExample::test_linked_with_testcase >> TopicSessionTests::TwoSessionsWithDifferentColumnTypes >> KeyValueGRPCService::SimpleExecuteTransaction [GOOD] >> KeyValueGRPCService::SimpleExecuteTransactionWithWrongGeneration >> CoordinatorVolatile::CoordinatorMigrateUncommittedVolatileTx [GOOD] >> CoordinatorVolatile::CoordinatorRestartWithEnqueuedVolatileStep >> Etcd_KV::TxnWithoutCompares [GOOD] >> Etcd_KV::OptimisticCreate >> DataShardReplication::SplitMergeChanges [GOOD] >> DataShardReplication::SplitMergeChangesReboots >> TTxDataShardLocalKMeansScan::BuildToBuild_Ranges [GOOD] >> TTxDataShardPrefixKMeansScan::BadRequest >> KqpQueryService::TableSink_ReplaceFromSelectLargeOlap [GOOD] >> KqpQueryService::TableSink_ReplaceDuplicatesOlap >> BasicExample::BasicExample >> MediatorTimeCast::ReadStepSubscribe >> QuoterWithKesusTest::PrefetchCoefficient [GOOD] >> QuoterWithKesusTest::GetsQuotaAfterPause >> DataCleanup::MultipleDataCleanupsWithOldGenerations [GOOD] >> DataCleanup::ForceDataCleanupWithRestart >> test_alloc_default.py::TestAlloc::test_node_limit[kikimr0] >> DataShardFollowers::FollowerAfterDataCompaction [GOOD] >> DataShardFollowers::FollowerDuringSysPartSwitch >> TCreateAndDropViewTest::ParsingSecurityInvoker [GOOD] >> TCreateAndDropViewTest::ListCreatedView >> MediatorTest::WatchesBeforeFirstStep [GOOD] >> MediatorTest::RebootTargetTablets >> QueryActorTest::Rollback [GOOD] >> QueryActorTest::Commit >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApiWithCancelAfter [GOOD] >> StatisticsScan::RunScanOnShard [GOOD] >> KeyValueGRPCService::SimpleExecuteTransactionWithWrongGeneration [GOOD] >> KeyValueGRPCService::SimpleRenameUnexistedKey >> TestPurecalcFilter::NullValues [GOOD] >> test.py::test[solomon-BasicExtractMembers-default.txt] [GOOD] >> test.py::test[solomon-Downsampling-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/rate_limiter/ut/unittest >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApiWithCancelAfter [GOOD] Test command err: 2025-06-24T21:31:29.555983Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631705055295787:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:29.560637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001ba3/r3tmp/tmpbfIXw8/pdisk_1.dat 2025-06-24T21:31:30.290053Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:30.290624Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:30.290730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:30.308283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24368, node 1 2025-06-24T21:31:30.571678Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:31:30.662541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:30.662572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:30.662579Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:30.662706Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12649 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:31.464934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:31:31.739622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:31:34.777969Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519631725263709066:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:34.778028Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001ba3/r3tmp/tmpXDd1Ng/pdisk_1.dat 2025-06-24T21:31:35.144342Z node 4 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:35.154013Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:35.154113Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:35.161881Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6492, node 4 2025-06-24T21:31:35.327394Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:35.327424Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:35.327430Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:35.327584Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32043 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:35.631238Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:31:35.732073Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:31:35.870004Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:31:39.938344Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519631747431814485:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:39.969494Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001ba3/r3tmp/tmp5iwMwV/pdisk_1.dat 2025-06-24T21:31:40.099855Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:40.132148Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:40.132232Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:40.139909Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:40.160079Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 8415, node 7 2025-06-24T21:31:40.220798Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:40.220831Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:40.220845Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:40.221006Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6962 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:40.518094Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:31:40.636784Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:31:44.867922Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519631767202662514:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:44.868887Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_unde ... nts::TEvWakeup; 2025-06-24T21:32:21.662309Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:32:27.327844Z node 31 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[31:7519631953270729288:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:27.328119Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001ba3/r3tmp/tmpSYezUw/pdisk_1.dat 2025-06-24T21:32:27.673747Z node 31 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:27.704532Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:27.704671Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:27.714916Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21706, node 31 2025-06-24T21:32:27.856416Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:27.856445Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:27.856456Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:27.856621Z node 31 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64108 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:28.293342Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:28.377340Z node 31 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:32:28.437794Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:32:35.403135Z node 34 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[34:7519631987071618317:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:35.490740Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001ba3/r3tmp/tmp8baX9J/pdisk_1.dat 2025-06-24T21:32:36.020427Z node 34 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:36.077014Z node 34 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:36.083449Z node 34 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:36.103989Z node 34 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63008, node 34 2025-06-24T21:32:36.322764Z node 34 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:36.322799Z node 34 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:36.322811Z node 34 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:36.323004Z node 34 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20412 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:32:36.508666Z node 34 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:36.554233Z node 34 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:36.689273Z node 34 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001ba3/r3tmp/tmpNQ234V/pdisk_1.dat 2025-06-24T21:32:43.061701Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:32:43.176205Z node 37 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:43.208188Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:43.208311Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:43.217248Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:43.235024Z node 37 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 37 Type# 268639257 TServer::EnableGrpc on GrpcPort 19530, node 37 2025-06-24T21:32:43.295006Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:43.295039Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:43.295052Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:43.295270Z node 37 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2225 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:43.558000Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:43.693366Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-24T21:32:43.927335Z node 37 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |98.8%| [TM] {RESULT} ydb/services/rate_limiter/ut/unittest >> TestPurecalcFilter::PartialPush >> ShowCreateView::Basic [GOOD] >> ShowCreateView::FromTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_column_stats/unittest >> StatisticsScan::RunScanOnShard [GOOD] Test command err: 2025-06-24T21:32:47.362833Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:32:47.363398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:32:47.363619Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001662/r3tmp/tmpK9CsV3/pdisk_1.dat 2025-06-24T21:32:47.873222Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:32:47.881969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:32:47.949576Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:47.961955Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800764033641 != 1750800764033645 2025-06-24T21:32:48.026183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:48.026342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:48.044077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:48.192606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:48.733608Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:692:2574], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:48.733804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:702:2579], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:48.738360Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:48.749747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:48.803831Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:32:48.938517Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:706:2582], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:32:49.030933Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:776:2621] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:32:50.372586Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhxq7pjbk61r51fgd2cw7ba, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmQxYWM4OGYtOTI5MTgxNi00NzQ0NmEzZC1kNzBjZDU0Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |98.8%| [TM] {RESULT} ydb/core/tx/datashard/ut_column_stats/unittest >> CoordinatorVolatile::CoordinatorRestartWithEnqueuedVolatileStep [GOOD] >> KqpPg::NoSelectFullScan [GOOD] >> KqpPg::LongDomainName >> TTxDataShardPrefixKMeansScan::BadRequest [GOOD] >> TTxDataShardPrefixKMeansScan::BuildToPosting >> TDataShardRSTest::TestCleanupInRS+UseSink [GOOD] >> TDataShardRSTest::TestCleanupInRS-UseSink >> TColumnShardTestSchema::ColdCompactionSmoke [GOOD] >> TopicSessionTests::TwoSessionsWithDifferentColumnTypes [GOOD] >> TAsyncIndexTests::SplitMainWithReboots[TabletReboots] [GOOD] >> MediatorTest::RebootTargetTablets [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/coordinator/ut/unittest >> CoordinatorVolatile::CoordinatorRestartWithEnqueuedVolatileStep [GOOD] Test command err: 2025-06-24T21:30:58.310770Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:624:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:30:58.311498Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:621:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:30:58.311602Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:30:58.311658Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:30:58.312154Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:30:58.312238Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00168a/r3tmp/tmpcGToLE/pdisk_1.dat 2025-06-24T21:30:58.908836Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ... waiting for the first mediator step 2025-06-24T21:30:59.226742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:59.226913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:59.231948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:59.232095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:59.251846Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:30:59.252421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:30:59.252838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected ... found first step to be 500 2025-06-24T21:30:59.535423Z node 1 :TX_COORDINATOR DEBUG: coordinator__acquire_read_step.cpp:97: tablet# 72057594046316545 HANDLE TEvAcquireReadStep ... acquired read step 500 ... waiting for the next mediator step ... found second step to be 1000 ... read step subscribe result: [500, 1000] 2025-06-24T21:31:00.014476Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:31:00.021795Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... read step subscribe update: 2000 2025-06-24T21:31:00.729514Z node 1 :TX_COORDINATOR DEBUG: coordinator__acquire_read_step.cpp:97: tablet# 72057594046316545 HANDLE TEvAcquireReadStep ... acquired read step 2000 ... read step subscribe result: [2000, 2000] ... read step subscribe update: 2500 ... read step subscribe update: 2500 ... read step subscribe update: 3000 ... read step subscribe update: 4000 ... read step subscribe update: 5000 ... read step subscribe update: 6000 ... read step subscribe result: [2000, 6000] 2025-06-24T21:31:03.552290Z node 1 :HIVE WARN: hive_impl.cpp:791: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeDisconnected, NodeId 2 2025-06-24T21:31:03.552430Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connected -> Disconnecting 2025-06-24T21:31:03.552694Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2025-06-24T21:31:03.552796Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037936131] NodeDisconnected NodeId# 2 2025-06-24T21:31:03.552838Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 2 2025-06-24T21:31:03.552873Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037968897] NodeDisconnected NodeId# 2 2025-06-24T21:31:03.552901Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037936129] NodeDisconnected NodeId# 2 2025-06-24T21:31:03.553242Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:91:2088] ServerId# [1:994:2593] TabletId# 72057594037932033 PipeClientId# [2:91:2088] 2025-06-24T21:31:03.553545Z node 2 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [2:241:2129] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-06-24T21:31:03.554034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnecting -> Disconnected 2025-06-24T21:31:03.563389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:03.593916Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:31:03.594852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected ... read step subscribe update: 7000 ... read step subscribe update: 8000 ... read step subscribe update: 9000 ... read step subscribe update: 10000 ... read step subscribe update: 11000 2025-06-24T21:31:15.586200Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:446:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:15.586794Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:15.587096Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:31:15.589490Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:15.590050Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:15.590265Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00168a/r3tmp/tmpwUCmRE/pdisk_1.dat 2025-06-24T21:31:15.972732Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:16.132574Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:52: Processing TEvSubscribeLastStep from [4:1058:2336] at coordinator 72057594046316545 with seqNo 123 and cookie 234 2025-06-24T21:31:16.245793Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:16.245943Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:16.252889Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:16.253020Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:16.268795Z node 3 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-24T21:31:16.269357Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:16.269764Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:16.945436Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:52: Processing TEvSubscribeLastStep from [4:1059:2337] at coordinator 72057594046316545 with seqNo 234 and cookie 345 2025-06-24T21:31:17.046379Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:31:17.046488Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:31:17.708028Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:52: Processing TEvSubscribeLastStep from [4:1058:2336] at coordinator 72057594046316545 with seqNo 124 and cookie 245 2025-06-24T21:31:17.721058Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:37: Ignored TEvSubscribeLastStep from [4:1058:2336] at coordinator 72057594046316545 with seqNo 123 existing seqNo 124 2025-06-24T21:31:18.412108Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:97: Processing TEvUnsubscribeLastStep from [4:1058:2336] at coordinator 72057594046316545 with seqNo 124 2025-06-24T21:31:25.474723Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:25.475194Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:25.475333Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00168a/r3tmp/tmp72jymQ/pdisk_1.dat 2025-06-24T21:31:25.773133Z node 5 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 5 Type# 268639257 2025-06-24T21:31:25.807741Z node 5 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:25.823971Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:32:2079] 1750800682262032 != 1750800682262036 2025-06-24T21:31:25.873660Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:25.873843Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:25.885568Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:26.0960 ... rker# C1 ... waiting for blocked put responses 2025-06-24T21:32:51.609375Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... coordinator 72057594046316545 gen 2 is planning step 1050 2025-06-24T21:32:51.610688Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 10000000 has been planned 2025-06-24T21:32:51.611738Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 10000000 for mediator 72057594046382081 tablet 72057594047365120 ... blocking put [72057594046316545:2:7:1:24576:168:0] response ... waiting for planning for the required step ... waiting for planning for the required step ... coordinator 72057594046316545 gen 2 is planning step 1100 ... starting a new coordinator instance ... waiting for migrated state 2025-06-24T21:32:51.684790Z node 20 :TX_COORDINATOR INFO: coordinator_impl.cpp:614: OnTabletStop: 72057594046316545 reason = ReasonDemoted 2025-06-24T21:32:51.685366Z node 20 :TX_COORDINATOR INFO: coordinator_impl.cpp:614: OnTabletStop: 72057594046316545 reason = ReasonDemoted 2025-06-24T21:32:51.702293Z node 20 :TX_COORDINATOR INFO: coordinator__init.cpp:120: tablet# 72057594046316545 CreateTxInit Complete ... blocking state response from [20:518:2361] to [20:646:2519] LastSentStep: 1000 LastAcquiredStep: 0 LastConfirmedStep: 0 ... unblocking put responses and requests 2025-06-24T21:32:51.704069Z node 20 :TX_COORDINATOR INFO: coordinator_impl.cpp:614: OnTabletStop: 72057594046316545 reason = ReasonDemoted 2025-06-24T21:32:51.704380Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 10000000 stepId# 1050 Status# 17 SEND EvProposeTransactionStatus to# [20:554:2479] Proxy 2025-06-24T21:32:51.706373Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:322: tablet# 72057594046382081 server# [20:530:2457] disconnnected 2025-06-24T21:32:51.706496Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:201: Actor# [20:531:2458] MediatorId# 72057594046382081 HANDLE TEvServerDisconnected server# [20:530:2457] ... trying to plan tx 10000011 ... waiting for planned another persistent tx 2025-06-24T21:32:51.733635Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594046382081 server# [20:653:2529] connected 2025-06-24T21:32:51.733876Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:139: tablet# 72057594046382081 HANDLE EvCoordinatorSync 2025-06-24T21:32:51.733953Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:83: tablet# 72057594046382081 SEND EvCoordinatorSyncResult to# [20:650:2527] Cookie# 1 CompleteStep# 1000 LatestKnownStep# 1000 SubjectiveTime# 952 Coordinator# 72057594046316545 2025-06-24T21:32:51.734390Z node 20 :TX_COORDINATOR NOTICE: coordinator_impl.cpp:412: tablet# 72057594046316545 HANDLE EvMediatorQueueRestart MediatorId# 72057594046382081 2025-06-24T21:32:51.734461Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 1050, txid# 10000000 marker# C2 ... observed step: Transactions { AffectedSet: 72057594047365120 TxId: 10000000 } Step: 1050 PrevStep: 0 MediatorID: 72057594046382081 CoordinatorID: 72057594046316545 ActiveCoordinatorGeneration: 3 2025-06-24T21:32:51.771502Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594046382081 HANDLE EvCoordinatorStep coordinator# 72057594046316545 step# 1050 2025-06-24T21:32:51.771626Z node 20 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594046382081], Coordinator [72057594046316545], step# [1050] transactions [1] 2025-06-24T21:32:51.771828Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594046382081 SEND EvCommitStep to# [20:531:2458] ExecQueue {TMediateStep From 1000 To# 1050Steps: {{TCoordinatorStep step# 1050 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 10000000 AckTo# [20:650:2527]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000000}}}}} marker# M0 2025-06-24T21:32:51.772089Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [20:531:2458] MediatorId# 72057594046382081 HANDLE TEvCommitStep {TMediateStep From 1000 To# 1050Steps: {{TCoordinatorStep step# 1050 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 10000000 AckTo# [20:650:2527]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000000}}}}} marker# M1 2025-06-24T21:32:51.772227Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [20:531:2458] MediatorId# 72057594046382081 SEND Ev to# [20:532:2459] step# 1050 forTablet# 72057594047365120 txid# 10000000 marker# M3 2025-06-24T21:32:51.772342Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:531:2458] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:532:2459] bucket.ActiveActor step# 1050 2025-06-24T21:32:51.772414Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:531:2458] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:533:2460] bucket.ActiveActor step# 1050 2025-06-24T21:32:51.772561Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [20:532:2459] Mediator# 72057594046382081 HANDLE {TEvCommitTabletStep step# 1050 TabletId# 72057594047365120 Transactions {{TTx Moderator# 0 txid# 10000000 AckTo# [20:650:2527]}}} marker# M4 2025-06-24T21:32:51.772821Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:533:2460] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1050} 2025-06-24T21:32:51.773040Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:532:2459] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1050} 2025-06-24T21:32:51.773125Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 10000011 HANDLE EvProposeTransaction marker# C0 2025-06-24T21:32:51.773212Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 10000011 step# 1100 Status# 16 SEND to# [20:554:2479] Proxy marker# C1 2025-06-24T21:32:51.774081Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [20:532:2459] Mediator# 72057594046382081 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365120 Status: OK ServerId: [20:657:2532] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:32:51.774214Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [20:532:2459] Mediator# 72057594046382081 SEND to# 72057594047365120 {TEvPlanStep step# 1050 MediatorId# 72057594046382081 TabletID 72057594047365120} ... observed tablet step: Transactions { TxId: 10000000 AckTo { RawX1: 0 RawX2: 0 } } Step: 1050 MediatorID: 72057594046382081 TabletID: 72057594047365120 ... blocked accept from 72057594047365120 ... waiting for planned another persistent tx ... coordinator 72057594046316545 gen 3 is planning step 1100 2025-06-24T21:32:51.795757Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 10000011 has been planned 2025-06-24T21:32:51.795902Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 10000011 for mediator 72057594046382081 tablet 72057594047365120 2025-06-24T21:32:51.797056Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 1100, txid# 10000011 marker# C2 2025-06-24T21:32:51.797182Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 10000011 stepId# 1100 Status# 17 SEND EvProposeTransactionStatus to# [20:554:2479] Proxy ... observed step: Transactions { AffectedSet: 72057594047365120 TxId: 10000011 } Step: 1100 PrevStep: 1050 MediatorID: 72057594046382081 CoordinatorID: 72057594046316545 ActiveCoordinatorGeneration: 3 2025-06-24T21:32:51.797625Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594046382081 HANDLE EvCoordinatorStep coordinator# 72057594046316545 step# 1100 2025-06-24T21:32:51.797700Z node 20 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594046382081], Coordinator [72057594046316545], step# [1100] transactions [1] 2025-06-24T21:32:51.797884Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594046382081 SEND EvCommitStep to# [20:531:2458] ExecQueue {TMediateStep From 1050 To# 1100Steps: {{TCoordinatorStep step# 1100 PrevStep# 1050Transactions: {{TTx Moderator# 0 txid# 10000011 AckTo# [20:650:2527]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000011}}}}} marker# M0 2025-06-24T21:32:51.798089Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [20:531:2458] MediatorId# 72057594046382081 HANDLE TEvCommitStep {TMediateStep From 1050 To# 1100Steps: {{TCoordinatorStep step# 1100 PrevStep# 1050Transactions: {{TTx Moderator# 0 txid# 10000011 AckTo# [20:650:2527]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000011}}}}} marker# M1 2025-06-24T21:32:51.798207Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [20:531:2458] MediatorId# 72057594046382081 SEND Ev to# [20:532:2459] step# 1100 forTablet# 72057594047365120 txid# 10000011 marker# M3 2025-06-24T21:32:51.798308Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:531:2458] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:532:2459] bucket.ActiveActor step# 1100 2025-06-24T21:32:51.798371Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:531:2458] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:533:2460] bucket.ActiveActor step# 1100 2025-06-24T21:32:51.798518Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [20:532:2459] Mediator# 72057594046382081 HANDLE {TEvCommitTabletStep step# 1100 TabletId# 72057594047365120 Transactions {{TTx Moderator# 0 txid# 10000011 AckTo# [20:650:2527]}}} marker# M4 2025-06-24T21:32:51.798640Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [20:532:2459] Mediator# 72057594046382081 SEND to# 72057594047365120 {TEvPlanStep step# 1100 MediatorId# 72057594046382081 TabletID 72057594047365120} 2025-06-24T21:32:51.798758Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:533:2460] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1100} 2025-06-24T21:32:51.798903Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:532:2459] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1100} ... observed tablet step: Transactions { TxId: 10000011 AckTo { RawX1: 0 RawX2: 0 } } Step: 1100 MediatorID: 72057594046382081 TabletID: 72057594047365120 ... blocked accept from 72057594047365120 ... coordinator 72057594046316545 gen 3 is planning step 1150 ... observed step: Step: 1150 PrevStep: 1100 MediatorID: 72057594046382081 CoordinatorID: 72057594046316545 ActiveCoordinatorGeneration: 3 2025-06-24T21:32:51.812397Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [20:531:2458] MediatorId# 72057594046382081 HANDLE TEvCommitStep {TMediateStep From 1100 To# 1150Steps: {{TCoordinatorStep step# 1150 PrevStep# 1100}}} marker# M1 2025-06-24T21:32:51.812513Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:531:2458] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:532:2459] bucket.ActiveActor step# 1150 2025-06-24T21:32:51.812602Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:531:2458] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:533:2460] bucket.ActiveActor step# 1150 2025-06-24T21:32:51.812693Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:532:2459] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1150} 2025-06-24T21:32:51.812765Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:533:2460] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1150} |98.8%| [TM] {RESULT} ydb/core/tx/coordinator/ut/unittest >> DataShardReplication::SplitMergeChangesReboots [GOOD] >> DataShardReplication::ReplicatedTable+UseSink >> TCreateAndDropViewTest::ListCreatedView [GOOD] >> TCreateAndDropViewTest::CreateSameViewTwice >> TopicSessionTests::RestartSessionIfQueryStopped >> MediatorTest::ResendSubset >> test_example.py::TestExample::test_linked_with_testcase [GOOD] >> BasicExample::BasicExample [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_concurrent_inserts [GOOD] >> QueryActorTest::Commit [GOOD] >> QueryActorTest::StreamQuery >> test_inserts.py::TestYdbInsertsOperations::test_transactional_update >> MediatorTimeCast::ReadStepSubscribe [GOOD] >> MediatorTimeCast::GranularTimecast >> DataCleanup::ForceDataCleanupWithRestart [GOOD] >> DataCleanup::OutReadSetsCleanedAfterCopyTable >> SdkCredProvider::PingFromProviderSyncDiscovery >> KqpTpch::Query07 [GOOD] >> KqpTpch::Query08 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitMainWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:29:29.987616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:29:29.987715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:29:29.987755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:29:29.987792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:29:29.987843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:29:29.987885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:29:29.987955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:29:29.988038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:29:29.989164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:29:29.989584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:29:30.087633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:29:30.087718Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:30.088616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:29:30.113750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:29:30.114334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:29:30.114528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:29:30.124075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:29:30.124392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:29:30.125183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:30.125510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:29:30.129284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:30.129546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:29:30.130906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:29:30.130979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:30.131235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:29:30.131315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:29:30.131367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:29:30.131528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:29:30.143546Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:29:30.281842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:29:30.282036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:30.282217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:29:30.282260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:29:30.282488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:29:30.282589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:29:30.285603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:30.285836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:29:30.286018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:30.286087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:29:30.286148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:29:30.286184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:29:30.288807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:30.288879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:29:30.288923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:29:30.290987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:30.291028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:30.291072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:29:30.291127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:29:30.306618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:29:30.309367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:29:30.309638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:29:30.310796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:30.310950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... ompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:32:53.715948Z node 93 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409546][93:783:2614] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T21:32:53.716079Z node 93 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][93:721:2614] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:32:53.716248Z node 93 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409546][93:783:2614] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750800773681304 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750800773681304 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750800773681304 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:32:53.719274Z node 93 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409546][93:783:2614] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-24T21:32:53.719402Z node 93 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][93:721:2614] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:32:53.912770Z node 93 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:32:53.913150Z node 93 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 433us result status StatusSuccess 2025-06-24T21:32:53.914211Z node 93 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KeyValueGRPCService::SimpleRenameUnexistedKey [GOOD] >> KeyValueGRPCService::SimpleConcatUnexistedKey >> test_liveness_wardens.py::TestLivenessWarden::test_hive_liveness_warden_reports_issues [GOOD] >> test_liveness_wardens.py::TestLivenessWarden::test_scheme_shard_has_no_in_flight_transactions >> DataShardFollowers::FollowerDuringSysPartSwitch [GOOD] >> DataShardFollowers::FollowerDuringDataPartSwitch >> DataShardStats::SharedCacheGarbage [GOOD] >> DataShardStats::CollectStatsForSeveralParts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ColdCompactionSmoke [GOOD] Test command err: 2025-06-24T21:27:57.648927Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-24T21:27:57.678903Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-24T21:27:57.679290Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-24T21:27:57.687565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:57.687827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:57.688120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:57.688245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:57.688394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:57.688518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:57.688655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:57.688777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:57.688891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:57.689013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:57.689157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:57.724794Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-24T21:27:57.725096Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-24T21:27:57.725175Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-24T21:27:57.725394Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:57.725573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-24T21:27:57.725666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-24T21:27:57.725715Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-24T21:27:57.725819Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-24T21:27:57.725899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-24T21:27:57.725953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-24T21:27:57.725987Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-24T21:27:57.726200Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-24T21:27:57.726292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-24T21:27:57.726339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-24T21:27:57.726373Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-24T21:27:57.726462Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-24T21:27:57.726517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-24T21:27:57.726565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-24T21:27:57.726594Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-24T21:27:57.726665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-24T21:27:57.726705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-24T21:27:57.726749Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-24T21:27:57.726985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-24T21:27:57.727047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-24T21:27:57.727086Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-24T21:27:57.727317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-24T21:27:57.727374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-24T21:27:57.727429Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-24T21:27:57.727615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-24T21:27:57.727669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-24T21:27:57.727701Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-24T21:27:57.727793Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-24T21:27:57.727880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-24T21:27:57.727926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-24T21:27:57.727958Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-24T21:27:57.728455Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=52; 2025-06-24T21:27:57.728568Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=50; 2025-06-24T21:27:57.728661Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=39; 2025-06-24T21:27:57.728752Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=37; 2025-06-24T21:27:57.728861Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-24T21:27:57.728976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-24T21:27:57.729029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-24T21:27:57.729099Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tabl ... B:0:9160];;column_id:8;chunk_idx:44;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:45;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:46;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:47;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:48;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:49;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:50;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:51;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:52;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:53;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:54;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:55;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:56;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:57;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:58;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:59;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:60;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:61;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:62;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:63;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:64;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:65;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:66;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:67;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:68;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:69;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:70;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:0;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:1;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:2;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:3;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:4;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:5;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:6;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:7;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:8;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:9;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:10;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:11;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:12;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:13;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:14;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:15;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:16;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:17;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:18;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:19;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:20;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:21;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:22;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:23;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:24;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:25;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:26;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:27;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:28;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:29;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:30;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:31;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:32;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:33;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:34;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:35;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:36;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:37;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:38;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:39;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:40;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:41;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:42;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:43;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:66;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:67;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:68;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:69;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:70;blob_range:[NO_BLOB:0:9160];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:73;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:74;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:75;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:76;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:77;blob_range:[NO_BLOB:0:8840];;column_id:10;chunk_idx:78;blob_range:[NO_BLOB:0:8064];;column_id:10;chunk_idx:79;blob_range:[NO_BLOB:0:8064];;;;switched=(portion_id:64;path_id:9438184000001;records_count:80000;schema_version:1;level:0;cs:plan_step=1750800487319;tx_id=128;;wi:27;;column_size:6817016;index_size:0;meta:(()););; 2025-06-24T21:32:53.605995Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=c753a5a0-514211f0-87d61a01-c1f34ad7;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=c753a5a0-514211f0-87d61a01-c1f34ad7;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:127:2157];task_id=c753a5a0-514211f0-87d61a01-c1f34ad7;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=1; 2025-06-24T21:32:53.610977Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-24T21:32:53.617547Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=write_controller.h:66;event=IWriteController aborted;reason=TTxWriteDraft aborted before complete; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=compacted_blob_constructor.cpp:47;event=TCompactedWriteController::DoAbort;reason=TTxWriteDraft aborted before complete; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TCompactedWriteController destructed with WriteIndexEv and WriteIndexEv->IndexChanges;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/hot' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/hot' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 >> KqpQueryService::TableSink_ReplaceDuplicatesOlap [GOOD] >> KqpQueryService::TableSink_Oltp_Replace-UseSink >> QuoterWithKesusTest::GetsQuotaAfterPause [GOOD] >> QuoterWithKesusTest::GetsSeveralQuotas >> TestPurecalcFilter::PartialPush [GOOD] >> SdkCredProvider::PingFromProviderSyncDiscovery [GOOD] >> SdkCredProvider::PingFromProviderAsyncDiscovery >> TestPurecalcFilter::CompilationValidation >> test.py::test[solomon-Downsampling-default.txt] [GOOD] >> test.py::test[solomon-DownsamplingValidSettings-default.txt] |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/basic_example/gtest >> BasicExample::BasicExample [GOOD] |98.8%| [TM] {RESULT} ydb/public/sdk/cpp/tests/integration/basic_example/gtest >> MediatorTest::ResendSubset [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink+UseDataQuery >> MediatorTest::ResendNotSubset >> MediatorTimeCast::GranularTimecast [GOOD] >> KqpPg::LongDomainName [GOOD] >> TCreateAndDropViewTest::CreateSameViewTwice [GOOD] >> TCreateAndDropViewTest::CreateViewOccupiedName >> SdkCredProvider::PingFromProviderAsyncDiscovery [GOOD] >> test_liveness_wardens.py::TestLivenessWarden::test_scheme_shard_has_no_in_flight_transactions [GOOD] >> KeyValueGRPCService::SimpleConcatUnexistedKey [GOOD] >> KeyValueGRPCService::SimpleCopyUnexistedKey >> DataShardReplication::ReplicatedTable+UseSink [GOOD] >> DataShardReplication::ReplicatedTable-UseSink >> Etcd_KV::OptimisticCreate [GOOD] >> Etcd_KV::OptimisticUpdate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/time_cast/ut/unittest >> MediatorTimeCast::GranularTimecast [GOOD] Test command err: 2025-06-24T21:32:52.026069Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:32:52.026690Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:32:52.026901Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00169b/r3tmp/tmpRlc8Mc/pdisk_1.dat 2025-06-24T21:32:52.751554Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:32:52.752707Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:916: Actor# [1:24:2071] HANDLE NKikimr::TEvMediatorTimecast::TEvSubscribeReadStep{ CoordinatorId# 72057594046316545 } 2025-06-24T21:32:52.753458Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [1:24:2071] HANDLE EvClientConnected 2025-06-24T21:32:52.758522Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:993: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepResult CoordinatorID: 72057594046316545 SeqNo: 1 LastAcquireStep: 0 NextAcquireStep: 0 2025-06-24T21:32:52.809380Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:52.826333Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800768243680 != 1750800768243684 2025-06-24T21:32:52.891955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:52.892133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:52.908205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:53.002644Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 500 2025-06-24T21:32:53.116909Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 1000 2025-06-24T21:32:53.140253Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:32:53.339085Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 2000 2025-06-24T21:32:53.482461Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 3000 2025-06-24T21:32:53.633459Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 4000 2025-06-24T21:32:53.764862Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 5000 2025-06-24T21:32:53.824106Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:964: Actor# [1:24:2071] HANDLE NKikimr::TEvMediatorTimecast::TEvWaitReadStep{ CoordinatorId# 72057594046316545 ReadStep# 7000 } 2025-06-24T21:32:53.965976Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 6000 2025-06-24T21:32:54.096291Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 7000 2025-06-24T21:32:54.109054Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:587: Actor# [1:24:2071] HANDLE EvClientDestroyed 2025-06-24T21:32:54.136082Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [1:24:2071] HANDLE EvClientConnected 2025-06-24T21:32:54.136958Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:993: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepResult CoordinatorID: 72057594046316545 SeqNo: 2 LastAcquireStep: 0 NextAcquireStep: 7000 2025-06-24T21:32:54.149387Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:964: Actor# [1:24:2071] HANDLE NKikimr::TEvMediatorTimecast::TEvWaitReadStep{ CoordinatorId# 72057594046316545 ReadStep# 12000 } 2025-06-24T21:32:54.270038Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 7500 2025-06-24T21:32:54.365664Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 8000 2025-06-24T21:32:54.536482Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 9000 2025-06-24T21:32:54.679320Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 10000 2025-06-24T21:32:54.831429Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 11000 2025-06-24T21:32:54.985310Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:24:2071] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 12000 2025-06-24T21:32:58.941880Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:32:58.942271Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:32:58.942391Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00169b/r3tmp/tmpabSZFq/pdisk_1.dat 2025-06-24T21:32:59.239723Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:32:59.260054Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:454: Actor# [2:24:2071] HANDLE {TEvRegisterTablet TabletId# 72057594047365120 ProcessingParams { Version: 0 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 1 Mediators: 72057594046382081 }} 2025-06-24T21:32:59.260934Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:270: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 1 Tablets: 72057594047365120 MinStep: 0 2025-06-24T21:32:59.261019Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:372: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2025-06-24T21:32:59.261080Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:502: Actor# [2:24:2071] SEND to Sender# [2:609:2508] {TEvRegisterTabletResult TabletId# 72057594047365120 Entry# 0} 2025-06-24T21:32:59.261543Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [2:24:2071] HANDLE EvClientConnected 2025-06-24T21:32:59.261659Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 1 LatestStep: 0 2025-06-24T21:32:59.261843Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:24:2071] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 0} 2025-06-24T21:32:59.262089Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:454: Actor# [2:24:2071] HANDLE {TEvRegisterTablet TabletId# 72057594047365121 ProcessingParams { Version: 0 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 1 Mediators: 72057594046382081 }} 2025-06-24T21:32:59.262188Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:298: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatchModify Bucket: 0 SubscriptionId: 2 AddTablets: 72057594047365121 2025-06-24T21:32:59.262256Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:502: Actor# [2:24:2071] SEND to Sender# [2:612:2510] {TEvRegisterTabletResult TabletId# 72057594047365121 Entry# 0} 2025-06-24T21:32:59.262477Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 2 LatestStep: 0 2025-06-24T21:32:59.262696Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:454: Actor# [2:24:2071] HANDLE {TEvRegisterTablet TabletId# 72057594047365123 ProcessingParams { Version: 0 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 1 Mediators: 72057594046382081 }} 2025-06-24T21:32:59.262778Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:298: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatchModify Bucket: 0 SubscriptionId: 3 AddTablets: 72057594047365123 2025-06-24T21:32:59.262830Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:502: Actor# [2:24:2071] SEND to Sender# [2:613:2511] {TEvRegisterTabletResult TabletId# 72057594047365123 Entry# 0} 2025-06-24T21:32:59.262999Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 3 LatestStep: 0 2025-06-24T21:32:59.288432Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:59.290124Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750800775496797 != 1750800775496801 2025-06-24T21:32:59.336225Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:59.336358Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:59.348575Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:59.436511Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 3 LatestStep: 500 2025-06-24T21:32:59.436625Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:24:2071] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 500} 2025-06-24T21:32:59.556456Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 3 LatestStep: 1000 2025-06-24T21:32:59.556559Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:24:2071] HANDLE {TEvUpdate Mediator# 7205759404638208 ... 0 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-24T21:33:00.158802Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 2500 FrozenTablets: 72057594047365120 FrozenTablets: 72057594047365121 FrozenSteps: 2499 FrozenSteps: 2499 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-24T21:33:00.169525Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 2500 FrozenTablets: 72057594047365120 FrozenTablets: 72057594047365121 FrozenSteps: 2499 FrozenSteps: 2499 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3000 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-24T21:33:00.180226Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3000 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-24T21:33:00.191358Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 ... unblocking plan for tablet2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 FrozenTablets: 72057594047365121 FrozenSteps: 2999 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-24T21:33:00.213687Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 FrozenTablets: 72057594047365121 FrozenSteps: 2999 ... unblocking plan for tablet2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 FrozenTablets: 72057594047365121 FrozenSteps: 3499 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-24T21:33:00.239724Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 FrozenTablets: 72057594047365121 FrozenSteps: 3499 ... unblocking plan for tablet2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 UnfrozenTablets: 72057594047365121 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-24T21:33:00.261197Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 UnfrozenTablets: 72057594047365121 ... restarting mediator 2025-06-24T21:33:00.272702Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:587: Actor# [2:24:2071] HANDLE EvClientDestroyed 2025-06-24T21:33:00.272860Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:270: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 8 Tablets: 72057594047365123 Tablets: 72057594047365120 Tablets: 72057594047365121 MinStep: 3500 2025-06-24T21:33:00.272915Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:355: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2025-06-24T21:33:00.273389Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [2:24:2071] HANDLE EvClientConnected 2025-06-24T21:33:00.273480Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:270: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 9 Tablets: 72057594047365123 Tablets: 72057594047365120 Tablets: 72057594047365121 MinStep: 3500 2025-06-24T21:33:00.273522Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:355: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2025-06-24T21:33:00.273936Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [2:24:2071] HANDLE EvClientConnected 2025-06-24T21:33:00.273999Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:270: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 10 Tablets: 72057594047365123 Tablets: 72057594047365120 Tablets: 72057594047365121 MinStep: 3500 2025-06-24T21:33:00.274027Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:355: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2025-06-24T21:33:00.274568Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [2:24:2071] HANDLE EvClientConnected 2025-06-24T21:33:00.274653Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:270: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 11 Tablets: 72057594047365123 Tablets: 72057594047365120 Tablets: 72057594047365121 MinStep: 3500 2025-06-24T21:33:00.274713Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:355: Actor# [2:24:2071] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2025-06-24T21:33:00.280175Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [2:24:2071] HANDLE EvClientConnected ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 2025-06-24T21:33:00.280900Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:24:2071] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 0} ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... fully unblocking tx1 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 2025-06-24T21:33:00.308270Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:24:2071] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 2500} ... tablet1 at 2500 ... tablet2 at 3500 ... tablet3 at 3500 ... fully unblocking tx2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 2025-06-24T21:33:00.321756Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:24:2071] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 3000} ... tablet1 at 3000 ... tablet2 at 3500 ... tablet3 at 3500 ... fully unblocking tx3 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 2025-06-24T21:33:00.334676Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:24:2071] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 3500} ... tablet1 at 3500 ... tablet2 at 3500 ... tablet3 at 3500 |98.8%| [TM] {RESULT} ydb/core/tx/time_cast/ut/unittest >> TSentinelUnstableTests::BSControllerCantChangeStatus ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::LongDomainName [GOOD] Test command err: Trying to start YDB, gRPC: 22459, MsgBus: 8088 2025-06-24T21:30:45.920133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631515823981122:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:45.926789Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003eb1/r3tmp/tmpekdC9c/pdisk_1.dat 2025-06-24T21:30:46.417881Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631515823981088:2079] 1750800645910429 != 1750800645910432 2025-06-24T21:30:46.430052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:46.430162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:46.430487Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:46.433304Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22459, node 1 2025-06-24T21:30:46.687685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:30:46.687721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:30:46.687729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:30:46.687912Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:30:46.920261Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8088 TClient is connected to server localhost:8088 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:30:47.523716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:30:49.037437Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631533003850927:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.037546Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631533003850916:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.037684Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.043997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:30:49.055939Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631533003850930:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:30:49.124312Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631533003850981:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 26529, MsgBus: 30888 2025-06-24T21:30:50.416786Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631533920858979:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:50.416832Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003eb1/r3tmp/tmpDKdVPq/pdisk_1.dat 2025-06-24T21:30:50.611080Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:50.614223Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631533920858957:2079] 1750800650416167 != 1750800650416170 2025-06-24T21:30:50.629882Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:50.629988Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:50.632144Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26529, node 2 2025-06-24T21:30:50.686423Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:30:50.686446Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:30:50.686454Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:30:50.686576Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30888 TClient is connected to server localhost:30888 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:30:51.202595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:30:51.211882Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:30:51.432801Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:30:53.741286Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631546805761480:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:53.741413Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:53.741665Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631546805761492:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:53.746375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:30:53.763201Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-24T21:30:53.764341Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631546805761494:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:30:53.854784Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631546805761545:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, ... aseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:48.452499Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:48.477605Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519632041638621496:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:32:48.538499Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519632041638621549:2340] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:32:48.588056Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["aid (null, 3)","aid [7, 7]"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"0","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["abalance"],"E-Cost":"0","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (null, 3)","aid [7, 7]"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["aid (null, 3)","aid [7, 7]"],"Name":"TableRangeScan","Path":"\/Root\/pgbench_accounts","E-Rows":"0","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["abalance"],"E-Cost":"0","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Operators":[{"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"1","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (4, 3)"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"0","Name":"TableRangeScan","Path":"\/Root\/pgbench_accounts","E-Rows":"1","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 3345, MsgBus: 8481 2025-06-24T21:32:52.811095Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7519632061614265273:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:52.811176Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003eb1/r3tmp/tmp7bU4OT/pdisk_1.dat 2025-06-24T21:32:53.115552Z node 11 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [11:7519632061614265255:2079] 1750800772810045 != 1750800772810048 2025-06-24T21:32:53.135420Z node 11 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:53.143068Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:53.143776Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:53.146748Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3345, node 11 2025-06-24T21:32:53.231897Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:53.231924Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:53.231937Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:53.232127Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8481 2025-06-24T21:32:53.890844Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8481 WaitRootIsUp 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'... TClient::Ls request: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_D... (TRUNCATED) WaitRootIsUp 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' success. 2025-06-24T21:32:54.187995Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:54.200080Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:32:57.812731Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519632061614265273:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:57.812867Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:32:59.164048Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519632091679036959:2294], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:59.164190Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:59.164608Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519632091679036986:2297], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:59.171314Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:59.191774Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519632091679036988:2298], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:32:59.276215Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519632091679037039:2342] txid# 281474976710659, issues: { message: "Check failed: path: \'/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:32:59.318285Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> ShowCreateView::FromTable [FAIL] >> ShowCreateView::WithPairedTablePathPrefix >> test_inserts.py::TestYdbInsertsOperations::test_transactional_update [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert >> KqpQueryService::ReturnAndCloseSameTime [GOOD] >> KqpQueryService::ReplaceIntoWithDefaultValue >> DataCleanup::OutReadSetsCleanedAfterCopyTable [GOOD] >> DataCleanup::BorrowerDataCleanedAfterCopyTable >> TestPurecalcFilter::CompilationValidation [GOOD] >> MediatorTest::ResendNotSubset [GOOD] >> TestRawParser::Simple >> DataShardFollowers::FollowerDuringDataPartSwitch [GOOD] >> DataShardFollowers::FollowerReadDuringSplit >> DataShardStats::CollectStatsForSeveralParts [GOOD] >> DataShardStats::NoData >> MediatorTest::OneCoordinatorResendTxNotLost >> test.py::test[solomon-DownsamplingValidSettings-default.txt] [GOOD] |98.9%| [TA] $(B)/ydb/core/tx/columnshard/ut_schema/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sdk/cpp/sdk_credprovider/unittest >> SdkCredProvider::PingFromProviderAsyncDiscovery [GOOD] Test command err: 2 2 >> TestRawParser::Simple [GOOD] >> test.py::test[solomon-HistResponse-default.txt] >> TAsyncIndexTests::DropTableWithInflightChanges[TabletReboots] [GOOD] >> TestRawParser::ManyValues |98.9%| [TM] {RESULT} ydb/tests/functional/sdk/cpp/sdk_credprovider/unittest >> QuoterWithKesusTest::GetsSeveralQuotas [GOOD] >> QuoterWithKesusTest::KesusRecreation >> TestRawParser::ManyValues [GOOD] >> DiscoveryIsNotBroken::NoKafkaEndpointInDiscovery >> TestRawParser::TypeKindsValidation >> test_postgres.py::TestPostgresSuite::test_postgres_suite[strings] [FAIL] >> test_example.py::TestExample::test_skipped_with_issue [SKIPPED] >> TestRawParser::TypeKindsValidation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::DropTableWithInflightChanges[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:30:30.352953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:30.353059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:30.353102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:30.353136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:30.353189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:30.353242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:30.353310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:30.353388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:30.354160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:30.354516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:30.439766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:30:30.439831Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:30.440633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:30:30.457541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:30.458121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:30.458326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:30.470754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:30.470964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:30.471525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:30.471741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:30.474553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:30.474787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:30.476127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:30.476193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:30.476411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:30.476479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:30.476528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:30.476664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:30:30.484077Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:30.627442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:30.627667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.627927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:30.627984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:30.628244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:30.628375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:30.630892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:30.631103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:30.631371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.631450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:30.631515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:30.631556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:30.633800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.633869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:30.633923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:30.635848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.635898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:30.635946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:30.636007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:30.640065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:30.642190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:30.642383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:30.643523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:30.643672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... de_effects.cpp:907: Part operation is done id#1003:0 progress is 3/3 2025-06-24T21:33:05.824626Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-24T21:33:05.824666Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 1003, ready parts: 3/3, is published: false 2025-06-24T21:33:05.824705Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1672: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-24T21:33:05.824776Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1003:0 2025-06-24T21:33:05.824817Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1003:0 2025-06-24T21:33:05.824940Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-24T21:33:05.824984Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1003:1 2025-06-24T21:33:05.825015Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1003:1 2025-06-24T21:33:05.825057Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-24T21:33:05.825091Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:974: Operation and all the parts is done, operation id: 1003:2 2025-06-24T21:33:05.825119Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5254: RemoveTx for txid 1003:2 2025-06-24T21:33:05.825175Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-24T21:33:05.825212Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:983: Publication still in progress, tx: 1003, publications: 1, subscribers: 0 2025-06-24T21:33:05.825252Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 5], 18446744073709551615 2025-06-24T21:33:05.826794Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-24T21:33:05.826911Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-24T21:33:05.826955Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-24T21:33:05.826998Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-24T21:33:05.827041Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-24T21:33:05.827157Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-24T21:33:05.828558Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:33:05.828679Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:33:05.833961Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:33:05.834067Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:33:05.834372Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:33:05.841714Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-24T21:33:05.848895Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 346 RawX2: 489626274072 } TabletId: 72075186233409546 State: 4 2025-06-24T21:33:05.849017Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-24T21:33:05.851664Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:33:05.852280Z node 114 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409546 2025-06-24T21:33:05.852515Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-24T21:33:05.852843Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 Forgetting tablet 72075186233409546 2025-06-24T21:33:05.855994Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:33:05.856044Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-24T21:33:05.856114Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-24T21:33:05.856149Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-24T21:33:05.856181Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-24T21:33:05.864439Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-24T21:33:05.864522Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409546 2025-06-24T21:33:05.865277Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-24T21:33:05.865531Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-24T21:33:05.865569Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-24T21:33:05.866752Z node 114 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-24T21:33:05.866877Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-24T21:33:05.866910Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [114:629:2553] 2025-06-24T21:33:05.877038Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5629: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 350 RawX2: 489626274075 } TabletId: 72075186233409547 State: 4 2025-06-24T21:33:05.877159Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409547, state: Offline, at schemeshard: 72057594046678944 2025-06-24T21:33:05.879836Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-24T21:33:05.880445Z node 114 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409547 2025-06-24T21:33:05.880695Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-24T21:33:05.881024Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409547 2025-06-24T21:33:05.888324Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-24T21:33:05.888402Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-24T21:33:05.888509Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-24T21:33:05.893160Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-24T21:33:05.893266Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409547 2025-06-24T21:33:05.893926Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1003 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-24T21:33:05.894356Z node 114 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-24T21:33:05.894438Z node 114 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 >> QueryActorTest::StreamQuery [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/postgresql/py3test >> test_postgres.py::TestPostgresSuite::test_postgres_suite[strings] [FAIL] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/example/py3test >> test_example.py::TestExample::test_skipped_with_issue [SKIPPED] |98.9%| [TA] {RESULT} $(B)/ydb/core/tx/columnshard/ut_schema/test-results/unittest/{meta.json ... results_accumulator.log} |98.9%| [TM] {RESULT} ydb/tests/example/py3test |98.9%| [TM] {RESULT} ydb/tests/functional/postgresql/py3test >> KeyValueGRPCService::SimpleCopyUnexistedKey [GOOD] >> KeyValueGRPCService::SimpleWriteRead >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_same_values >> TCreateAndDropViewTest::CreateViewOccupiedName [GOOD] >> TCreateAndDropViewTest::CreateViewIfNotExists >> KqpQueryService::TableSink_Oltp_Replace-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/row_dispatcher/format_handler/ut/unittest >> TestRawParser::TypeKindsValidation [GOOD] Test command err: 2025-06-24T21:31:42.605665Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [0:0:0] 2025-06-24T21:31:42.605698Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:200: TTopicFilters: Create purecalc filter for predicate 'where col_0 == "str1"' (filter id: [0:0:0]) 2025-06-24T21:31:42.605715Z node 1 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where col_0 == "str1"; 2025-06-24T21:31:42.605740Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:82: TTopicFilters: TFilterHandler [0:0:0] : Send compile request with id 1 2025-06-24T21:31:42.605960Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 1 from [1:7519631758182957297:2051] 2025-06-24T21:31:45.445788Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [1:7519631758182957297:2051] [id 1]: Started compile request 2025-06-24T21:31:46.157088Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [1:7519631758182957297:2051] [id 1]: Compilation completed for request 2025-06-24T21:31:46.157211Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 1 from [1:7519631758182957297:2051] 2025-06-24T21:31:46.158946Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:175: TTopicFilters: Got compile response for request with id 1 2025-06-24T21:31:46.158985Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:120: TTopicFilters: TFilterHandler [0:0:0] : Filter compilation finished 2025-06-24T21:31:46.159043Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [1:0:0] 2025-06-24T21:31:46.159056Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:200: TTopicFilters: Create purecalc filter for predicate 'where col_1 == "str2"' (filter id: [1:0:0]) 2025-06-24T21:31:46.159072Z node 1 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where col_1 == "str2"; 2025-06-24T21:31:46.159101Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:82: TTopicFilters: TFilterHandler [1:0:0] : Send compile request with id 2 2025-06-24T21:31:46.159234Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 2 from [1:7519631758182957297:2051] 2025-06-24T21:31:46.159328Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [1:7519631758182957297:2051] [id 2]: Started compile request 2025-06-24T21:31:46.186227Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [1:7519631758182957297:2051] [id 2]: Compilation completed for request 2025-06-24T21:31:46.186363Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 2 from [1:7519631758182957297:2051] 2025-06-24T21:31:46.186473Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:175: TTopicFilters: Got compile response for request with id 2 2025-06-24T21:31:46.186494Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:120: TTopicFilters: TFilterHandler [1:0:0] : Filter compilation finished 2025-06-24T21:31:46.186550Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [2:0:0] 2025-06-24T21:31:46.186639Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:155: TTopicFilters: FilterData for 3 clients, number rows: 3 2025-06-24T21:31:46.186672Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:259: TTopicFilters: Pass 3 rows to purecalc filter (filter id: [1:0:0]) 2025-06-24T21:31:46.186682Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 3 rows 2025-06-24T21:31:46.190830Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:262: TTopicFilters: Add 3 rows to client [2:0:0] without filtering 2025-06-24T21:31:46.190874Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:259: TTopicFilters: Pass 3 rows to purecalc filter (filter id: [0:0:0]) 2025-06-24T21:31:46.190883Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 3 rows 2025-06-24T21:31:46.190958Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:219: TTopicFilters: Remove filter with id [2:0:0] 2025-06-24T21:31:46.191037Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:155: TTopicFilters: FilterData for 2 clients, number rows: 1 2025-06-24T21:31:46.191050Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:259: TTopicFilters: Pass 1 rows to purecalc filter (filter id: [1:0:0]) 2025-06-24T21:31:46.191057Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-24T21:31:46.191115Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:259: TTopicFilters: Pass 1 rows to purecalc filter (filter id: [0:0:0]) 2025-06-24T21:31:46.191130Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-24T21:31:46.552418Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [0:0:0] 2025-06-24T21:31:46.552458Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:200: TTopicFilters: Create purecalc filter for predicate 'where a1 = "str1"' (filter id: [0:0:0]) 2025-06-24T21:31:46.552489Z node 2 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a1 = "str1"; 2025-06-24T21:31:46.552525Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:82: TTopicFilters: TFilterHandler [0:0:0] : Send compile request with id 1 2025-06-24T21:31:46.552733Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 1 from [2:7519631778192663382:2051] 2025-06-24T21:31:49.462833Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [2:7519631778192663382:2051] [id 1]: Started compile request 2025-06-24T21:31:49.497955Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [2:7519631778192663382:2051] [id 1]: Compilation completed for request 2025-06-24T21:31:49.498101Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 1 from [2:7519631778192663382:2051] 2025-06-24T21:31:49.499279Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:175: TTopicFilters: Got compile response for request with id 1 2025-06-24T21:31:49.499320Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:120: TTopicFilters: TFilterHandler [0:0:0] : Filter compilation finished 2025-06-24T21:31:49.499353Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [0:0:0] 2025-06-24T21:31:49.962454Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [0:0:0] 2025-06-24T21:31:49.962490Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:200: TTopicFilters: Create purecalc filter for predicate 'where a2 ... 50' (filter id: [0:0:0]) 2025-06-24T21:31:49.962521Z node 3 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 ... 50; 2025-06-24T21:31:49.962657Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:82: TTopicFilters: TFilterHandler [0:0:0] : Send compile request with id 1 2025-06-24T21:31:49.963307Z node 3 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 1 from [3:7519631788089509778:2051] 2025-06-24T21:31:52.787283Z node 3 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [3:7519631788089509778:2051] [id 1]: Started compile request 2025-06-24T21:31:52.823766Z node 3 :FQ_ROW_DISPATCHER ERROR: compile_service.cpp:68: TPurecalcCompileActor [3:7519631788089509778:2051] [id 1]: Compilation failed for request 2025-06-24T21:31:52.824081Z node 3 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 1 from [3:7519631788089509778:2051] 2025-06-24T21:31:52.827358Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:175: TTopicFilters: Got compile response for request with id 1 2025-06-24T21:31:52.827517Z node 3 :FQ_ROW_DISPATCHER ERROR: filters_set.cpp:110: TTopicFilters: TFilterHandler [0:0:0] : Filter compilation error: {
: Error: Failed to compile purecalc program subissue: {
: Error: Compile issues: generated.sql:2:36: Error: mismatched input '.' expecting {'$', ABORT, ACTION, ADD, AFTER, ALL, ALTER, ANALYZE, AND, ANSI, ANY, ARRAY, AS, ASC, ASSUME, ASYMMETRIC, ASYNC, AT, ATTACH, ATTRIBUTES, AUTOINCREMENT, BACKUP, BATCH, COLLECTION, BEFORE, BEGIN, BERNOULLI, BETWEEN, BITCAST, BY, CALLABLE, CASCADE, CASE, CAST, CHANGEFEED, CHECK, CLASSIFIER, COLLATE, COLUMN, COLUMNS, COMMIT, COMPACT, CONDITIONAL, CONFLICT, CONNECT, CONSTRAINT, CONSUMER, COVER, CREATE, CROSS, CUBE, CURRENT, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DATA, DATABASE, DECIMAL, DECLARE, DEFAULT, DEFERRABLE, DEFERRED, DEFINE, DELETE, DESC, DESCRIBE, DETACH, DICT, DIRECTORY, DISABLE, DISCARD, DISTINCT, DO, DROP, EACH, ELSE, EMPTY, EMPTY_ACTION, ENCRYPTED, END, ENUM, ERASE, ERROR, ESCAPE, EVALUATE, EXCEPT, EXCLUDE, EXCLUSION, EXCLUSIVE, EXISTS, EXPLAIN, EXPORT, EXTERNAL, FAIL, FAMILY, FILTER, FIRST, FLATTEN, FLOW, FOLLOWING, FOR, FOREIGN, FROM, FULL, FUNCTION, GLOB, GLOBAL, GRANT, GROUP, GROUPING, GROUPS, HASH, HAVING, HOP, IF, IGNORE, ILIKE, IMMEDIATE, IMPORT, IN, INCREMENT, INCREMENTAL, INDEX, INDEXED, INHERITS, INITIAL, INITIALLY, INNER, INSERT, INSTEAD, INTERSECT, INTO, IS, ISNULL, JOIN, JSON_EXISTS, JSON_QUERY, JSON_VALUE, KEY, LAST, LEFT, LEGACY, LIKE, LIMIT, LIST, LOCAL, LOGIN, MANAGE, MATCH, MATCHES, MATCH_RECOGNIZE, MEASURES, MICROSECONDS, MILLISECONDS, MODIFY, NANOSECONDS, NATURAL, NEXT, NO, NOLOGIN, NOT, NOTNULL, NULL, NULLS, OBJECT, OF, OFFSET, OMIT, ON, ONE, ONLY, OPTION, OPTIONAL, OR, ORDER, OTHERS, OUTER, OVER, OWNER, PARALLEL, PARTITION, PASSING, PASSWORD, PAST, PATTERN, PER, PERMUTE, PLAN, POOL, PRAGMA, PRECEDING, PRESORT, PRIMARY, PRIVILEGES, PROCESS, QUERY, QUEUE, RAISE, RANGE, REDUCE, REFERENCES, REGEXP, REINDEX, RELEASE, REMOVE, RENAME, REPLACE, REPLICATION, RESET, RESOURCE, RESPECT, RESTART, RESTORE, RESTRICT, RESULT, RETURN, RETURNING, REVERT, REVOKE, RIGHT, RLIKE, ROLLBACK, ROLLUP, ROW, ROWS, SAMPLE, SAVEPOINT, SCHEMA, SECONDS, SEEK, SELECT, SEMI, SET, SETS, SHOW, TSKIP, SEQUENCE, SOURCE, START, STREAM, STRUCT, SUBQUERY, SUBSET, SYMBOLS, SYMMETRIC, SYNC, SYSTEM, TABLE, TABLES, TABLESAMPLE, TABLESTORE, TAGGED, TEMP, TEMPORARY, THEN, TIES, TO, TOPIC, TRANSACTION, TRANSFER, TRIGGER, TUPLE, TYPE, UNBOUNDED, UNCONDITIONAL, UNION, UNIQUE, UNKNOWN, UNMATCHED, UPDATE, UPSERT, USE, USER, USING, VACUUM, VALUES, VARIANT, VIEW, VIRTUAL, WHEN, WHERE, WINDOW, WITH, WITHOUT, WRAPPER, XOR, STRING_VALUE, ID_PLAIN, ID_QUOTED, DIGITS} } subissue: {
: Error: Final yql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 ... 50; } } 2025-06-24T21:31:56.539888Z node 4 :FQ_ROW_DISPATCHER DEBUG: format_handler.cpp:379: TTopicFormatHandler [json_each_row]: Add client with id [0:0:0] 2025-06-24T21:31:56.541055Z node 4 :FQ_ROW_DISPATCHER DEBUG: format_handler.cpp:497: TTopicFormatHandler [json_each_row]: UpdateParser to new schema with size 2 2025-06-24T21:31:56.627811Z node 4 :FQ_ROW_DISPATCHER INFO: json_parser.cpp:350: TJsonParser: Simdjson active implementation icelake 2025-06-24T21:31:56.628040Z node 4 :FQ_ROW_DISPATCHER DEBUG: format_handler.cpp:506: TTopicFormatHandler [json_each_row]: Parser was updated on new schema with 2 columns 2025-06-24T21:31:56.628974Z node 4 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [0:0:0] 2025-06-24T21:31:56.629002Z node 4 :FQ_ROW_DISPATCHER ... st offset: 45, values: {"a1": "456", "a2": 42, "a3": 1.11.1} 2025-06-24T21:32:23.105208Z node 23 :FQ_ROW_DISPATCHER INFO: json_parser.cpp:350: TJsonParser: Simdjson active implementation icelake 2025-06-24T21:32:23.105487Z node 23 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-24T21:32:23.105532Z node 23 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 42, values: {"a1": "-456"} 2025-06-24T21:32:23.666571Z node 24 :FQ_ROW_DISPATCHER INFO: json_parser.cpp:350: TJsonParser: Simdjson active implementation icelake 2025-06-24T21:32:23.668323Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-24T21:32:23.668378Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 42, values: {"a1": {"key": "value"}, "a2": {"key2": "value2"}} 2025-06-24T21:32:23.668926Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-24T21:32:23.668963Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 43, values: {"a1": {"key": "value", "nested": {"a": "b", "c":}}, "a2": "str"} 2025-06-24T21:32:23.669316Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-24T21:32:23.669360Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 44, values: {"a1": {"key" "value"}, "a2": "str"} 2025-06-24T21:32:24.291060Z node 25 :FQ_ROW_DISPATCHER INFO: json_parser.cpp:350: TJsonParser: Simdjson active implementation icelake 2025-06-24T21:32:24.292207Z node 25 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-24T21:32:24.292271Z node 25 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 42, values: {"a1": true, "a2": false} 2025-06-24T21:32:24.292766Z node 25 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-24T21:32:24.292848Z node 25 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 43, values: {"a1": "true", "a2": falce} 2025-06-24T21:32:24.901415Z node 26 :FQ_ROW_DISPATCHER INFO: json_parser.cpp:350: TJsonParser: Simdjson active implementation icelake 2025-06-24T21:32:24.901723Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-24T21:32:24.901767Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 42, values: {"a1": Yelse} 2025-06-24T21:32:24.902488Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-24T21:32:24.902592Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 43, values: {"a1": "st""r"} 2025-06-24T21:32:24.902798Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-24T21:32:24.902839Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 44, values: {"a1": "x"} {"a1": "y"} 2025-06-24T21:32:24.903019Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-24T21:32:24.903061Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 45, values: { 2025-06-24T21:32:25.468584Z node 27 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 > 100; 2025-06-24T21:32:25.475264Z node 27 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [27:7519631941726244427:2051] 2025-06-24T21:32:30.881365Z node 27 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [27:7519631941726244427:2051] [id 0]: Started compile request 2025-06-24T21:32:30.965638Z node 27 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [27:7519631941726244427:2051] [id 0]: Compilation completed for request 2025-06-24T21:32:30.965817Z node 27 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [27:7519631941726244427:2051] 2025-06-24T21:32:30.966007Z node 27 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-24T21:32:30.969322Z node 27 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-24T21:32:31.712642Z node 28 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 > 100; 2025-06-24T21:32:31.719439Z node 28 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [28:7519631969681899349:2051] 2025-06-24T21:32:37.961657Z node 28 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [28:7519631969681899349:2051] [id 0]: Started compile request 2025-06-24T21:32:38.015575Z node 28 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [28:7519631969681899349:2051] [id 0]: Compilation completed for request 2025-06-24T21:32:38.015735Z node 28 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [28:7519631969681899349:2051] 2025-06-24T21:32:38.016027Z node 28 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-24T21:32:38.016196Z node 28 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-24T21:32:38.742053Z node 29 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 > 100; 2025-06-24T21:32:38.743267Z node 29 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [29:7519632001598440405:2051] 2025-06-24T21:32:44.152990Z node 29 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [29:7519632001598440405:2051] [id 0]: Started compile request 2025-06-24T21:32:44.228536Z node 29 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [29:7519632001598440405:2051] [id 0]: Compilation completed for request 2025-06-24T21:32:44.228693Z node 29 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [29:7519632001598440405:2051] 2025-06-24T21:32:44.229082Z node 29 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 2 rows 2025-06-24T21:32:44.229200Z node 29 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 2 rows 2025-06-24T21:32:44.229240Z node 29 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 2 rows 2025-06-24T21:32:44.229270Z node 29 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 2 rows 2025-06-24T21:32:44.229301Z node 29 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 2 rows 2025-06-24T21:32:45.040408Z node 30 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a1 is null; 2025-06-24T21:32:45.051833Z node 30 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [30:7519632028663481487:2051] 2025-06-24T21:32:50.991822Z node 30 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [30:7519632028663481487:2051] [id 0]: Started compile request 2025-06-24T21:32:51.068083Z node 30 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [30:7519632028663481487:2051] [id 0]: Compilation completed for request 2025-06-24T21:32:51.068280Z node 30 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [30:7519632028663481487:2051] 2025-06-24T21:32:51.071653Z node 30 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-24T21:32:51.856893Z node 31 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 > 50; 2025-06-24T21:32:51.857226Z node 31 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [31:7519632055097736012:2051] 2025-06-24T21:32:57.623306Z node 31 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [31:7519632055097736012:2051] [id 0]: Started compile request 2025-06-24T21:32:57.785415Z node 31 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [31:7519632055097736012:2051] [id 0]: Compilation completed for request 2025-06-24T21:32:57.785573Z node 31 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [31:7519632055097736012:2051] 2025-06-24T21:32:57.786838Z node 31 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-24T21:32:58.642384Z node 32 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 ... 50; 2025-06-24T21:32:58.642582Z node 32 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [32:7519632085644167035:2051] 2025-06-24T21:33:04.822692Z node 32 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [32:7519632085644167035:2051] [id 0]: Started compile request 2025-06-24T21:33:04.842371Z node 32 :FQ_ROW_DISPATCHER ERROR: compile_service.cpp:68: TPurecalcCompileActor [32:7519632085644167035:2051] [id 0]: Compilation failed for request 2025-06-24T21:33:04.842575Z node 32 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [32:7519632085644167035:2051] 2025-06-24T21:33:06.035048Z node 33 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:54: TRawParser: Add 1 messages to parse 2025-06-24T21:33:06.035121Z node 33 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:74: TRawParser: Do parsing, first offset: 42, value: {"a1": "hello1__large_str", "a2": 101, "event": "event1"} 2025-06-24T21:33:06.697629Z node 34 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:54: TRawParser: Add 3 messages to parse 2025-06-24T21:33:06.697707Z node 34 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:74: TRawParser: Do parsing, first offset: 42, value: {"a1": "hello1", "a2": "101", "event": "event1"} 2025-06-24T21:33:06.697838Z node 34 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:74: TRawParser: Do parsing, first offset: 43, value: {"a1": "hello1", "a2": "101", "event": "event2"} 2025-06-24T21:33:06.697867Z node 34 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:74: TRawParser: Do parsing, first offset: 44, value: {"a2": "101", "a1": "hello1", "event": "event3"} >> TopicSessionTests::RestartSessionIfQueryStopped [GOOD] |98.9%| [TM] {RESULT} ydb/core/fq/libs/row_dispatcher/format_handler/ut/unittest >> RowDispatcherTests::OneClientOneSession >> RowDispatcherTests::OneClientOneSession [GOOD] >> RowDispatcherTests::TwoClientOneSession >> RowDispatcherTests::TwoClientOneSession [GOOD] >> DataShardReplication::ReplicatedTable-UseSink [GOOD] >> DataShardReplication::ApplyChangesToReplicatedTable >> RowDispatcherTests::SessionError >> RowDispatcherTests::SessionError [GOOD] >> RowDispatcherTests::CoordinatorSubscribe >> RowDispatcherTests::CoordinatorSubscribe [GOOD] >> RowDispatcherTests::CoordinatorSubscribeBeforeCoordinatorChanged ------- [TS] {asan, default-linux-x86_64, release} ydb/library/query_actor/ut/unittest >> QueryActorTest::StreamQuery [GOOD] Test command err: 2025-06-24T21:32:39.509097Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519632005588596140:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:39.531738Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b33/r3tmp/tmpK2YvXt/pdisk_1.dat 2025-06-24T21:32:40.139510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:40.139626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:40.154947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:40.184763Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:40.189662Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519632005588595937:2079] 1750800759488099 != 1750800759488102 TClient is connected to server localhost:9571 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: 2025-06-24T21:32:40.508542Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:32:40.568103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:40.611689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:40.880055Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: dc-1 2025-06-24T21:32:43.057054Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:32:43.059031Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:32:43.079487Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993272946.472163s seconds to be completed 2025-06-24T21:32:43.101647Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=OTMyOGI1MGUtZjY5NTRlZmEtNTY1NWE2YWEtNmY2NDNmNmU=, workerId: [1:7519632022768465800:2275], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-24T21:32:43.101696Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:32:43.101875Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:32:43.102111Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:32:43.102132Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:32:43.102152Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:32:43.102248Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:32:43.102287Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:32:43.103629Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:32:43.103666Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:32:43.105468Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: SELECT 42 2025-06-24T21:32:43.115530Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=OTMyOGI1MGUtZjY5NTRlZmEtNTY1NWE2YWEtNmY2NDNmNmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7519632022768465800:2275] 2025-06-24T21:32:43.115623Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: [1:7519632022768465802:2350] 2025-06-24T21:32:43.119051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519632022768465803:2277], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:43.119165Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:43.123543Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519632022768465815:2280], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:43.135758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:43.151027Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519632022768465817:2281], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:32:43.234656Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519632022768465868:2385] txid# 281474976710660, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:32:44.105420Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:32:44.239008Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 3, sender: [1:7519632022768465801:2276], selfId: [1:7519632005588596163:2237], source: [1:7519632022768465800:2275] 2025-06-24T21:32:44.239230Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=OTMyOGI1MGUtZjY5NTRlZmEtNTY1NWE2YWEtNmY2NDNmNmU=, TxId: 2025-06-24T21:32:44.240203Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=OTMyOGI1MGUtZjY5NTRlZmEtNTY1NWE2YWEtNmY2NDNmNmU=, TxId: 2025-06-24T21:32:44.242502Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=1&id=OTMyOGI1MGUtZjY5NTRlZmEtNTY1NWE2YWEtNmY2NDNmNmU=, workerId: [1:7519632022768465800:2275], local sessions count: 0 2025-06-24T21:32:45.039779Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519632030067087302:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:45.055734Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b33/r3tmp/tmp5l7xkC/pdisk_1.dat 2025-06-24T21:32:45.268158Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:45.269955Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519632030067087264:2079] 1750800765024266 != 1750800765024269 2025-06-24T21:32:45.284194Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:45.284274Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:45.285587Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22614 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-24T21:32:45.549815Z nod ... 025-06-24T21:32:56.071520Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:32:56.075188Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:56.157788Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: dc-1 2025-06-24T21:32:56.535705Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:32:59.830311Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:32:59.833565Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:32:59.846907Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993272929.704736s seconds to be completed 2025-06-24T21:32:59.849378Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=4&id=ZDExNTAzYTctNmRhMThhNzUtOTEyMDM4OTUtMWU2ZDYyMGU=, workerId: [4:7519632088698490199:2276], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-24T21:32:59.849423Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:32:59.849609Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:32:59.849669Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-24T21:32:59.849696Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-24T21:32:59.849720Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-24T21:32:59.849766Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-24T21:32:59.849846Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:32:59.849893Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:32:59.849983Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:32:59.850008Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:32:59.850026Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:32:59.850336Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:269: [TQueryBase] RunStreamQuery: DECLARE $value AS Text; DECLARE $table_size AS Uint64; SELECT x FROM AS_TABLE( ()->(Yql::ToStream(ListReplicate(<|x:$value|>, $table_size))) ); 2025-06-24T21:32:59.853129Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:288: [TQueryBase] Start read next stream part 2025-06-24T21:32:59.856876Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jyhxqjjd5582e80ambdayexw", Created new session, sessionId: ydb://session/3?node_id=4&id=OTNkZjdkMGQtN2Y4MTViNDctZjg0YjU0OGYtZjYwNDNjNGM=, workerId: [4:7519632088698490217:2277], database: /dc-1, longSession: 0, local sessions count: 2 2025-06-24T21:32:59.857216Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jyhxqjjd5582e80ambdayexw, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=OTNkZjdkMGQtN2Y4MTViNDctZjg0YjU0OGYtZjYwNDNjNGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [4:7519632088698490217:2277] 2025-06-24T21:32:59.857247Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 3 timeout: 600.000000s actor id: [4:7519632088698490218:2353] 2025-06-24T21:32:59.859184Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519632088698490219:2278], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:59.859304Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:59.859603Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519632088698490231:2281], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:59.864585Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:59.877805Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519632088698490233:2282], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:32:59.948094Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519632088698490284:2388] txid# 281474976710660, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:33:00.479480Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519632071518620370:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:00.479578Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:33:01.228556Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:33:06.157882Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:299: [TQueryBase] TEvStreamQueryResultPart SUCCESS, Issues: 2025-06-24T21:33:06.172409Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:329: [TQueryBase] Cancel stream request 2025-06-24T21:33:06.172546Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=ZDExNTAzYTctNmRhMThhNzUtOTEyMDM4OTUtMWU2ZDYyMGU=, TxId: 2025-06-24T21:33:06.176695Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: dc-1 2025-06-24T21:33:06.338912Z node 4 :RPC_REQUEST WARN: rpc_stream_execute_scan_query.cpp:410: Client lost 2025-06-24T21:33:06.403756Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993272923.147894s seconds to be completed 2025-06-24T21:33:06.406266Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=4&id=NmRhZGE2Y2YtZWFkMGI2ODMtMThlOTBjYzYtZjk2ZGEyNTQ=, workerId: [4:7519632118763261435:2308], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-24T21:33:06.406477Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-24T21:33:06.406577Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=ZDExNTAzYTctNmRhMThhNzUtOTEyMDM4OTUtMWU2ZDYyMGU=, workerId: [4:7519632088698490199:2276], local sessions count: 2 2025-06-24T21:33:06.641071Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-24T21:33:06.642484Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:269: [TQueryBase] RunStreamQuery: DECLARE $value AS Text; DECLARE $table_size AS Uint64; SELECT x FROM AS_TABLE( ()->(Yql::ToStream(ListReplicate(<|x:$value|>, $table_size))) ); 2025-06-24T21:33:06.642596Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:288: [TQueryBase] Start read next stream part 2025-06-24T21:33:06.644829Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jyhxqs6j481av694wd9n5mjs", Created new session, sessionId: ydb://session/3?node_id=4&id=ZTNiM2ZiZjItN2EwYjc1MzktZGQ0MjNhMmEtM2Y1NzVmYTE=, workerId: [4:7519632118763261442:2310], database: /dc-1, longSession: 0, local sessions count: 3 2025-06-24T21:33:06.645094Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jyhxqs6j481av694wd9n5mjs, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=ZTNiM2ZiZjItN2EwYjc1MzktZGQ0MjNhMmEtM2Y1NzVmYTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 5, targetId: [4:7519632118763261442:2310] 2025-06-24T21:33:06.645132Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 5 timeout: 600.000000s actor id: [4:7519632118763261443:2432] 2025-06-24T21:33:06.778840Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:299: [TQueryBase] TEvStreamQueryResultPart SUCCESS, Issues: 2025-06-24T21:33:06.783477Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750800786763, txId: 281474976710663] shutting down 2025-06-24T21:33:06.786923Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jyhxqs6j481av694wd9n5mjs", Forwarded response to sender actor, requestId: 5, sender: [4:7519632118763261438:2427], selfId: [4:7519632071518620582:2237], source: [4:7519632118763261442:2310] 2025-06-24T21:33:06.787606Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=ZTNiM2ZiZjItN2EwYjc1MzktZGQ0MjNhMmEtM2Y1NzVmYTE=, workerId: [4:7519632118763261442:2310], local sessions count: 2 2025-06-24T21:33:06.790156Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:288: [TQueryBase] Start read next stream part 2025-06-24T21:33:06.790386Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:299: [TQueryBase] TEvStreamQueryResultPart SUCCESS, Issues: 2025-06-24T21:33:06.790475Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=NmRhZGE2Y2YtZWFkMGI2ODMtMThlOTBjYzYtZjk2ZGEyNTQ=, TxId: 2025-06-24T21:33:06.791029Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=NmRhZGE2Y2YtZWFkMGI2ODMtMThlOTBjYzYtZjk2ZGEyNTQ=, workerId: [4:7519632118763261435:2308], local sessions count: 1 >> RowDispatcherTests::CoordinatorSubscribeBeforeCoordinatorChanged [GOOD] |98.9%| [TS] {RESULT} ydb/library/query_actor/ut/unittest >> RowDispatcherTests::TwoClients4Sessions >> RowDispatcherTests::TwoClients4Sessions [GOOD] >> RowDispatcherTests::ReinitConsumerIfNewGeneration >> TAsyncIndexTests::MergeBothWithReboots[PipeResets] [GOOD] >> KqpQueryService::ReplaceIntoWithDefaultValue [GOOD] >> RowDispatcherTests::ReinitConsumerIfNewGeneration [GOOD] >> RowDispatcherTests::HandleTEvUndelivered >> RowDispatcherTests::HandleTEvUndelivered [GOOD] >> RowDispatcherTests::TwoClientTwoConnection >> RowDispatcherTests::TwoClientTwoConnection [GOOD] >> RowDispatcherTests::ProcessNoSession >> MediatorTest::OneCoordinatorResendTxNotLost [GOOD] >> KqpBatchUpdate::ManyPartitions_3 [GOOD] >> RowDispatcherTests::ProcessNoSession [GOOD] >> RowDispatcherTests::IgnoreWrongPartitionId >> RowDispatcherTests::IgnoreWrongPartitionId [GOOD] >> RowDispatcherTests::SessionFatalError >> RowDispatcherTests::SessionFatalError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeBothWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:30:33.251981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:33.252066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:33.252105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:33.252141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:33.252188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:33.252239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:33.252294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:33.252355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:33.253145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:33.253483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:33.335861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:30:33.335919Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:33.336701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:30:33.353539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:33.353982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:33.354165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:33.363492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:33.363764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:33.364449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.364674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:33.367779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.367986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:33.369222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:33.369286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:33.369510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:33.369559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:33.369607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:33.369762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:30:33.378023Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:33.504589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:33.504781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.504972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:33.505007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:33.505208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:33.505315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:33.507247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.507407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:33.507575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.507622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:33.507655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:33.507682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:33.509392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.509436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:33.509463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:33.511021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.511068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:33.511106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:33.511184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:33.514145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:33.515592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:33.515767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:33.516522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:33.516612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... xImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:33:10.394732Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409550:2][72075186233409551][54:1106:2881] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T21:33:10.394829Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][54:1065:2881] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-24T21:33:10.394959Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409550:2][72075186233409551][54:1106:2881] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750800790357002 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750800790357002 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750800790357002 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:33:10.405027Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409550:2][72075186233409551][54:1106:2881] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-24T21:33:10.405213Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][54:1065:2881] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-24T21:33:10.624097Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:33:10.624471Z node 54 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 422us result status StatusSuccess 2025-06-24T21:33:10.625526Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpTpch::Query08 [GOOD] >> KqpTpch::Query09 >> test.py::test[solomon-HistResponse-default.txt] [GOOD] >> test.py::test[solomon-InvalidProject-] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ReplaceIntoWithDefaultValue [GOOD] Test command err: Trying to start YDB, gRPC: 13503, MsgBus: 27428 2025-06-24T21:27:18.000419Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630625931888320:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:18.000513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030e2/r3tmp/tmpeDjSqK/pdisk_1.dat 2025-06-24T21:27:18.336676Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13503, node 1 2025-06-24T21:27:18.384319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:18.384419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:18.386883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:18.430680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:18.430721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:18.430730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:18.430874Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27428 TClient is connected to server localhost:27428 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:18.998785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:19.007267Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:19.017344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:19.177057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:19.357498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:19.427140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:21.183954Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630638816791810:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.184088Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.463674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:21.494031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:21.524818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:21.552478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:21.582003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:21.614347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:21.655935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:21.706217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630638816792464:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.706270Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.706396Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630638816792469:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:21.709439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:21.719131Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630638816792471:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:21.793150Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630638816792522:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 15943, MsgBus: 29234 2025-06-24T21:27:23.687133Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630645186234837:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:23.687188Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030e2/r3tmp/tmpQ7u6uE/pdisk_1.dat 2025-06-24T21:27:23.806453Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:23.806545Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:23.806991Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:23.816914Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected ... tDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:26.817552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:26.846814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:26.876193Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:26.899919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:26.929159Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:26.982985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:27.038053Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630662366106267:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:27.038160Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:27.038249Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519630662366106272:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:27.041366Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:27.050151Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519630662366106274:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:27.149382Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519630662366106325:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:27:28.687254Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519630645186234837:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:28.687337Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:38.798087Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:27:38.798116Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded Trying to start YDB, gRPC: 8068, MsgBus: 63130 2025-06-24T21:33:04.755507Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519632109542169850:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:04.761523Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030e2/r3tmp/tmpNA6XNX/pdisk_1.dat 2025-06-24T21:33:04.908104Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:04.911556Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519632109542169741:2079] 1750800784744104 != 1750800784744107 2025-06-24T21:33:04.924721Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:04.924822Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:04.926631Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8068, node 3 2025-06-24T21:33:05.079870Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:33:05.079896Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:33:05.079908Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:33:05.080094Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63130 2025-06-24T21:33:05.776638Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63130 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:33:05.899181Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:33:09.588053Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519632131017006835:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:09.588285Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:09.588857Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519632131017006870:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:09.594917Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:33:09.616603Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519632131017006872:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:33:09.704243Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519632131017006923:2334] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:33:09.750361Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519632109542169850:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:09.750435Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:33:09.794345Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) >> DataShardCompaction::CompactBorrowed >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_same_values [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_same_values_simple ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ManyPartitions_3 [GOOD] Test command err: Trying to start YDB, gRPC: 25282, MsgBus: 5952 2025-06-24T21:29:10.364675Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631105936050929:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.365526Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002cd2/r3tmp/tmpdfFF5U/pdisk_1.dat 2025-06-24T21:29:11.002147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:11.019328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:11.020933Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:11.029887Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631105936050902:2079] 1750800550361760 != 1750800550361763 2025-06-24T21:29:11.039956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25282, node 1 2025-06-24T21:29:11.181829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.181864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.181874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.181988Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.372567Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5952 TClient is connected to server localhost:5952 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.136273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.194999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.412727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.681761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.825871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.567139Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631123115921717:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.567269Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.900096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.937765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.977893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.031413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.061207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.139424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.213760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.304470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631127410889676:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.304537Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.304804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631127410889681:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.309099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.323725Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631127410889683:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.366764Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631105936050929:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.366842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:15.394116Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631127410889734:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:16.604393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:25.948690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot ge ... undelivered;self_id=[12:7519632046837581659:2152];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:49.789318Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002cd2/r3tmp/tmp3WjqAZ/pdisk_1.dat 2025-06-24T21:32:50.030680Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:50.031299Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519632046837581520:2079] 1750800769604866 != 1750800769604869 2025-06-24T21:32:50.043458Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:50.043592Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:50.046649Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12907, node 12 2025-06-24T21:32:50.192057Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:50.192097Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:50.192114Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:50.192326Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:32:50.651332Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26750 TClient is connected to server localhost:26750 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:51.445926Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:51.460718Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:51.624536Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:52.066976Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:52.206162Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:32:54.640456Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519632046837581659:2152];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:54.640564Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:32:57.103201Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519632081197321550:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:57.103415Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:57.140480Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:57.201294Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:57.264040Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:57.324954Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:57.411567Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:57.535542Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:57.601450Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:57.749770Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519632081197322230:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:57.749911Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:57.750218Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519632081197322235:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:57.760565Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:57.794298Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519632081197322237:2441], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:32:57.882783Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519632081197322288:3440] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:33:00.072705Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:05.007350Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:33:05.007401Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/mediator/ut/unittest >> MediatorTest::OneCoordinatorResendTxNotLost [GOOD] Test command err: 2025-06-24T21:32:13.479811Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:32:13.480348Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:32:13.480574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001940/r3tmp/tmp1c7lzi/pdisk_1.dat 2025-06-24T21:32:14.062726Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:32:14.067922Z node 1 :TX_MEDIATOR INFO: mediator__schema.cpp:23: tablet# 72057594047365120 TTxSchema Complete 2025-06-24T21:32:14.068551Z node 1 :TX_MEDIATOR INFO: mediator__init.cpp:88: tablet# 72057594047365120 CreateTxInit wait TEvMediatorConfiguration for switching to StateWork from external 2025-06-24T21:32:14.069352Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [1:576:2492] connected 2025-06-24T21:32:14.069478Z node 1 :TX_MEDIATOR NOTICE: mediator_impl.cpp:133: tablet# 72057594047365120 actor# [1:559:2482] HANDLE TEvMediatorConfiguration Version# 1 2025-06-24T21:32:14.069917Z node 1 :TX_MEDIATOR DEBUG: mediator__configure.cpp:77: tablet# 72057594047365120 version# 1 TTxConfigure Complete 2025-06-24T21:32:14.070127Z node 1 :TX_MEDIATOR INFO: mediator__init.cpp:64: tablet# 72057594047365120 CreateTxInit Complete ... waiting for watcher to connect 2025-06-24T21:32:14.070702Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [1:582:2497] connected 2025-06-24T21:32:14.070784Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [1:580:2496] to# [1:578:2494] ExecQueue 2025-06-24T21:32:14.070831Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:175: Actor# [1:578:2494] MediatorId# 72057594047365120 HANDLE TEvGranularWatch from# [1:580:2496] bucket# 0 ... waiting for watcher to connect (done) 2025-06-24T21:32:14.071076Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [1:580:2496] to# [1:578:2494] ExecQueue 2025-06-24T21:32:14.071186Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:159: Actor# [1:578:2494] MediatorId# 72057594047365120 HANDLE TEvWatch 2025-06-24T21:32:14.071242Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:164: Actor# [1:578:2494] MediatorId# 72057594047365120 SEND TEvWatchBucket to# [1:579:2495] bucket.ActiveActor 2025-06-24T21:32:14.071391Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:380: Actor# [1:579:2495] Mediator# 72057594047365120 HANDLE {TEvWatchBucket Source# [1:580:2496]} 2025-06-24T21:32:14.071476Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:391: Actor# [1:579:2495] Mediator# 72057594047365120 SEND to# [1:580:2496] {TEvUpdate Mediator# 72057594047365120 Bucket# 0 TimeBarrier# 0} 2025-06-24T21:32:14.083765Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [1:586:2501] connected 2025-06-24T21:32:14.083876Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:139: tablet# 72057594047365120 HANDLE EvCoordinatorSync 2025-06-24T21:32:14.083947Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:83: tablet# 72057594047365120 SEND EvCoordinatorSyncResult to# [1:584:2499] Cookie# 1 CompleteStep# 0 LatestKnownStep# 0 SubjectiveTime# 3 Coordinator# 72057594046316545 2025-06-24T21:32:14.084373Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [1:578:2494] MediatorId# 72057594047365120 HANDLE TEvCommitStep {TMediateStep From 0 To# 1000Steps: {{TCoordinatorStep step# 1000 PrevStep# 0}}} marker# M1 2025-06-24T21:32:14.084427Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [1:578:2494] MediatorId# 72057594047365120 SEND TEvStepPlanComplete to# [1:579:2495] bucket.ActiveActor step# 1000 2025-06-24T21:32:14.084490Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [1:579:2495] Mediator# 72057594047365120 HANDLE {TEvStepPlanComplete step# 1000} 2025-06-24T21:32:14.084700Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:171: Actor# [1:579:2495] Mediator# 72057594047365120 SEND to# [1:580:2496] {TEvUpdate Mediator# 72057594047365120 Bucket# 0 TimeBarrier# 1000} ... waiting for blocked plan step 2025-06-24T21:32:14.104263Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594047365120 HANDLE EvCoordinatorStep coordinator# 72057594046316545 step# 1010 2025-06-24T21:32:14.104341Z node 1 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594047365120], Coordinator [72057594046316545], step# [1010] transactions [1] 2025-06-24T21:32:14.104488Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594047365120 SEND EvCommitStep to# [1:578:2494] ExecQueue {TMediateStep From 1000 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [1:584:2499]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}}}}} marker# M0 2025-06-24T21:32:14.104597Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [1:578:2494] MediatorId# 72057594047365120 HANDLE TEvCommitStep {TMediateStep From 1000 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [1:584:2499]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}}}}} marker# M1 2025-06-24T21:32:14.104638Z node 1 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365121]. TxIds: txid# 1 marker# M2 2025-06-24T21:32:14.104682Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [1:578:2494] MediatorId# 72057594047365120 SEND Ev to# [1:579:2495] step# 1010 forTablet# 72057594047365121 txid# 1 marker# M3 2025-06-24T21:32:14.104735Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [1:578:2494] MediatorId# 72057594047365120 SEND TEvStepPlanComplete to# [1:579:2495] bucket.ActiveActor step# 1010 2025-06-24T21:32:14.104800Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [1:579:2495] Mediator# 72057594047365120 HANDLE {TEvCommitTabletStep step# 1010 TabletId# 72057594047365121 Transactions {{TTx Moderator# 0 txid# 1 AckTo# [1:584:2499]}}} marker# M4 2025-06-24T21:32:14.104959Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [1:579:2495] Mediator# 72057594047365120 HANDLE {TEvStepPlanComplete step# 1010} 2025-06-24T21:32:14.106161Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [1:579:2495] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365121 Status: OK ServerId: [1:608:2513] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:32:14.106229Z node 1 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 1, marker M5lu 2025-06-24T21:32:14.106279Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [1:579:2495] Mediator# 72057594047365120 SEND to# 72057594047365121 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365121} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 ... waiting for blocked plan step (done) ... waiting for no pending commands 2025-06-24T21:32:14.106720Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [1:580:2496] to# [1:578:2494] ExecQueue 2025-06-24T21:32:14.106773Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:189: Actor# [1:578:2494] MediatorId# 72057594047365120 HANDLE TEvGranularWatchModify from# [1:580:2496] bucket# 0 ... waiting for no pending commands (done) ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet ... waiting for watch updates 2025-06-24T21:32:14.107113Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:342: Actor# [1:579:2495] Mediator# 72057594047365120 HANDLE {TEvPlanStepAccepted TabletId# 72057594047365121 step# 1010} 2025-06-24T21:32:14.107212Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:415: Actor# [1:579:2495] Mediator# 72057594047365120 SEND to# [1:584:2499] {TEvPlanStepAck TabletId# 72057594047365121 step# 1010 txid# 1} 2025-06-24T21:32:14.107340Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:171: Actor# [1:579:2495] Mediator# 72057594047365120 SEND to# [1:580:2496] {TEvUpdate Mediator# 72057594047365120 Bucket# 0 TimeBarrier# 1010} ... waiting for watch updates (done) 2025-06-24T21:32:18.024320Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:32:18.024694Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:32:18.024837Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001940/r3tmp/tmpyFL13b/pdisk_1.dat 2025-06-24T21:32:18.292459Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:32:18.302521Z node 2 :TX_MEDIATOR INFO: mediator__schema.cpp:23: tablet# 72057594047365120 TTxSchema Complete 2025-06-24T21:32:18.307379Z node 2 :TX_MEDIATOR INFO: mediator__init.cpp:88: tablet# 72057594047365120 CreateTxInit wait TEvMediatorConfiguration for switching to StateWork from external 2025-06-24T21:32:18.308324Z node 2 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [2:576:2492] connected 2025-06-24T21:32:18.308460Z node 2 :TX_MEDIATOR NOTICE: mediator_impl.cpp:133: tablet# 72057594047365120 actor# [2:559:2482] HANDLE TEvMediatorConfiguration Version# 1 2025-06-24T21:32:18.309043Z node 2 :TX_MEDIATOR DEBUG: mediator__configure.cpp:77: tablet# 72057594047365120 version# 1 TTxConfigure Complete 2025-06-24T21:32:18.309231Z node 2 :TX_MEDIATOR INFO: mediator__init.cpp:64: tablet# 72057594047365120 CreateTxInit Complete ... waiting for watcher to connect 2025-06-24T21:32:18.309860Z node 2 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [2:582:2497] connected 2025-06-24T21:32:18.309913Z node 2 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [2:580:2496] to# [2:578:2494] ExecQueue 2025-06-24T21:32:18.309959Z node 2 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:175: Actor# [2:578:2494] MediatorId# 72057594047365120 HANDLE TEvGranularWatch from# [2:580:2496] bucket# 0 ... waiting for watcher to connect (done) 2025-06-24T21:32:18.310284Z node 2 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [2:580:2496] to# [2:578:2494] ExecQueue 2025-06-24T21:32:18.310363Z node 2 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:159: Actor# [2:578:2494] MediatorId# 72057594047365120 HANDLE TEvWatch 2025-06-24T21:32:18.310421Z node 2 :T ... -24T21:33:11.412330Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594047365120 HANDLE EvCoordinatorStep coordinator# 72057594046316546 step# 1010 2025-06-24T21:33:11.412361Z node 12 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594047365120], Coordinator [72057594046316546], step# [1010] transactions [1] 2025-06-24T21:33:11.412478Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594047365120 SEND EvCommitStep to# [12:579:2495] ExecQueue {TMediateStep From 0 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [12:622:2519]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}{tablet# 72057594047365122 txid# 1}}}{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [12:625:2522]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}}}} marker# M0 2025-06-24T21:33:11.412621Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [12:579:2495] MediatorId# 72057594047365120 HANDLE TEvCommitStep {TMediateStep From 0 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [12:622:2519]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}{tablet# 72057594047365122 txid# 1}}}{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [12:625:2522]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}}}} marker# M1 2025-06-24T21:33:11.412703Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365121]. TxIds: txid# 1 txid# 2 marker# M2 2025-06-24T21:33:11.412779Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:579:2495] MediatorId# 72057594047365120 SEND Ev to# [12:580:2496] step# 1010 forTablet# 72057594047365121 txid# 1 txid# 2 marker# M3 2025-06-24T21:33:11.412838Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365122]. TxIds: txid# 1 txid# 2 marker# M2 2025-06-24T21:33:11.412872Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:579:2495] MediatorId# 72057594047365120 SEND Ev to# [12:580:2496] step# 1010 forTablet# 72057594047365122 txid# 1 txid# 2 marker# M3 2025-06-24T21:33:11.412916Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [12:579:2495] MediatorId# 72057594047365120 SEND TEvStepPlanComplete to# [12:580:2496] bucket.ActiveActor step# 1010 2025-06-24T21:33:11.413022Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE {TEvCommitTabletStep step# 1010 TabletId# 72057594047365121 Transactions {{TTx Moderator# 0 txid# 1 AckTo# [12:622:2519]}{TTx Moderator# 0 txid# 2 AckTo# [12:625:2522]}}} marker# M4 2025-06-24T21:33:11.413217Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE {TEvCommitTabletStep step# 1010 TabletId# 72057594047365122 Transactions {{TTx Moderator# 0 txid# 1 AckTo# [12:622:2519]}{TTx Moderator# 0 txid# 2 AckTo# [12:625:2522]}}} marker# M4 2025-06-24T21:33:11.413320Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE {TEvStepPlanComplete step# 1010} 2025-06-24T21:33:11.414178Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365121 Status: OK ServerId: [12:633:2528] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:33:11.414270Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 1, marker M5lu 2025-06-24T21:33:11.414316Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 2, marker M5lu 2025-06-24T21:33:11.414369Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:580:2496] Mediator# 72057594047365120 SEND to# 72057594047365121 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365121} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2025-06-24T21:33:11.414805Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365122 Status: OK ServerId: [12:634:2529] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:33:11.414882Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 1, marker M5lu 2025-06-24T21:33:11.414924Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 2, marker M5lu 2025-06-24T21:33:11.414961Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:580:2496] Mediator# 72057594047365120 SEND to# 72057594047365122 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365122} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2025-06-24T21:33:11.426335Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [12:637:2532] connected 2025-06-24T21:33:11.426486Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:139: tablet# 72057594047365120 HANDLE EvCoordinatorSync 2025-06-24T21:33:11.426551Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:83: tablet# 72057594047365120 SEND EvCoordinatorSyncResult to# [12:635:2530] Cookie# 2 CompleteStep# 1010 LatestKnownStep# 1010 SubjectiveTime# 3 Coordinator# 72057594046316546 2025-06-24T21:33:11.426836Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594047365120 HANDLE EvCoordinatorStep coordinator# 72057594046316546 step# 1010 2025-06-24T21:33:11.426915Z node 12 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594047365120], Coordinator [72057594046316546], step# [1010] transactions [1] 2025-06-24T21:33:11.427017Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:223: tablet# 72057594047365120 SEND EvRequestLostAcks to# [12:579:2495] ExecQueue step {TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [0:0:0]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}} 2025-06-24T21:33:11.427177Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:130: Actor# [12:579:2495] MediatorId# 72057594047365120 HANDLE TEvRequestLostAcks {TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [0:0:0]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}} AckTo# [12:635:2530] 2025-06-24T21:33:11.427232Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365121]. TxIds: txid# 2 marker# M2 2025-06-24T21:33:11.427308Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:579:2495] MediatorId# 72057594047365120 SEND Ev to# [12:580:2496] step# 1010 forTablet# 72057594047365121 txid# 2 marker# M3 2025-06-24T21:33:11.427380Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365122]. TxIds: txid# 2 marker# M2 2025-06-24T21:33:11.427434Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:579:2495] MediatorId# 72057594047365120 SEND Ev to# [12:580:2496] step# 1010 forTablet# 72057594047365122 txid# 2 marker# M3 2025-06-24T21:33:11.427528Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:222: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE {TEvOoOTabletStep step# 1010 TabletId# 72057594047365121 Transactions {{TTx Moderator# 0 txid# 2 AckTo# [12:635:2530]}}} 2025-06-24T21:33:11.427619Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:222: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE {TEvOoOTabletStep step# 1010 TabletId# 72057594047365122 Transactions {{TTx Moderator# 0 txid# 2 AckTo# [12:635:2530]}}} 2025-06-24T21:33:11.440262Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:294: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594047365121 ClientId: [12:629:2526] ServerId: [12:633:2528] } 2025-06-24T21:33:11.481341Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:11.484207Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:32:2079] 1750800786048155 != 1750800786048159 2025-06-24T21:33:11.517500Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365121 Status: OK ServerId: [12:662:2545] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-24T21:33:11.517614Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 1, marker M5lu 2025-06-24T21:33:11.517660Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 2, marker M5lu 2025-06-24T21:33:11.517714Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:580:2496] Mediator# 72057594047365120 SEND to# 72057594047365121 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365121} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2025-06-24T21:33:11.529389Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:294: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594047365122 ClientId: [12:630:2527] ServerId: [12:634:2529] } 2025-06-24T21:33:11.568122Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:580:2496] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365122 Status: OK ServerId: [12:698:2558] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-24T21:33:11.568237Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 1, marker M5lu 2025-06-24T21:33:11.568289Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 2, marker M5lu 2025-06-24T21:33:11.568337Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:580:2496] Mediator# 72057594047365120 SEND to# 72057594047365122 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365122} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2025-06-24T21:33:11.583584Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:11.583757Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:11.595994Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected |98.9%| [TM] {RESULT} ydb/core/tx/mediator/ut/unittest >> RetryPolicy::TWriteSession_RetryOnTargetCluster [GOOD] >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster >> test.py::test_order_conflict [GOOD] >> test.py::test_missing_value [GOOD] >> test.py::test_unexpected_value [GOOD] >> test.py::test_local >> DataShardFollowers::FollowerReadDuringSplit [GOOD] >> DataCleanup::BorrowerDataCleanedAfterCopyTable [GOOD] >> ShowCreateView::WithPairedTablePathPrefix [FAIL] >> KeyValueGRPCService::SimpleWriteRead [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithIncorreectPath >> TCreateAndDropViewTest::CreateViewIfNotExists [GOOD] >> TCreateAndDropViewTest::DropView >> ExportS3BufferTest::MinBufferSize [GOOD] >> ExportS3BufferTest::MinBufferSizeWithCompression [GOOD] >> ExportS3BufferTest::MinBufferSizeWithCompressionAndEncryption [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_Oltp_Replace-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 8889, MsgBus: 6851 2025-06-24T21:27:28.839728Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630666141063854:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:28.841258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030dd/r3tmp/tmplopK0O/pdisk_1.dat 2025-06-24T21:27:29.229057Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:27:29.230492Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519630666141063826:2079] 1750800448830114 != 1750800448830117 2025-06-24T21:27:29.243205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:29.243327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8889, node 1 2025-06-24T21:27:29.245258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:29.286478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:29.286505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:29.286517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:29.286659Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6851 TClient is connected to server localhost:6851 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-24T21:27:29.851781Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:29.861384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... CREATE TABLE `/Root/ColumnShard1` (Col1 Int64 NOT NULL, Col2 Int32 NOT NULL, PRIMARY KEY (Col1)) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1000); 2025-06-24T21:27:31.835684Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630679025966354:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:31.835787Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:32.576247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-24T21:27:33.839719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630666141063854:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:33.839835Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:27:37.334502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:37.334502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:37.334811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:37.335165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:37.335244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-24T21:27:37.335329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:37.335454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-24T21:27:37.335479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:37.335594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-24T21:27:37.335614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:37.335725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-24T21:27:37.335744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:37.335845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-24T21:27:37.335897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:37.336005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-24T21:27:37.336062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:37.336139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-24T21:27:37.336229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:37.336257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-24T21:27:37.336359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-24T21:27:37.336361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519630700500806866:2313];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:37.336487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519630700500806920:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-24T21:27:37.380674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038871;self_id=[1:7519630700500806921:2333];tablet_id=72075186224038871;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-24T21:27:37.380763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038871;self_id=[1:7519630700500806921:2333];tablet_id=72075186224038871;process=TTxInit ... ler.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:32:54.762003Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:32:54.775989Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:32:54.776922Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:32:54.779739Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:32:54.780682Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-24T21:32:54.785127Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:32:54.789193Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-24T21:32:54.812624Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519632067633520441:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:54.812742Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:54.813134Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519632067633520446:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:54.819586Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:54.838109Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519632067633520448:2369], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:32:54.911805Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519632067633520501:2586] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:32:55.099794Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976710662;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-24T21:32:55.165187Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; Trying to start YDB, gRPC: 17065, MsgBus: 24992 2025-06-24T21:32:57.430861Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519632082547041866:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:57.430933Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0030dd/r3tmp/tmpmG4sgD/pdisk_1.dat 2025-06-24T21:32:57.928609Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:57.931448Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519632082547041848:2079] 1750800777428283 != 1750800777428286 2025-06-24T21:32:57.959351Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:57.959556Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:57.964946Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17065, node 3 2025-06-24T21:32:58.139558Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:58.139583Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:58.139593Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:58.139772Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:32:58.486367Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24992 TClient is connected to server localhost:24992 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:59.218251Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:33:02.435933Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519632082547041866:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:02.436041Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:33:03.591584Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519632108316846274:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:03.591721Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:03.613076Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:04.044048Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:04.638895Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519632112611814923:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:04.639040Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:04.639536Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519632112611814928:2411], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:04.650254Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:33:04.679279Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519632112611814930:2412], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-24T21:33:04.774077Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519632112611814981:3221] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> QuoterWithKesusTest::KesusRecreation [GOOD] >> QuoterWithKesusTest::AllocationStatistics >> DataShardReplication::ApplyChangesToReplicatedTable [GOOD] >> DataShardReplication::ApplyChangesToCommonTable >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_same_values_simple [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_with_valid_and_invalid_data |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_data_cleanup/unittest >> DataCleanup::BorrowerDataCleanedAfterCopyTable [GOOD] Test command err: 2025-06-24T21:32:21.926973Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2329], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:32:21.927272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:32:21.927502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b03/r3tmp/tmp6xhXf5/pdisk_1.dat 2025-06-24T21:32:22.521534Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:32:22.538202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:32:22.615918Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:22.626757Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:34:2081] 1750800738518189 != 1750800738518193 2025-06-24T21:32:22.700569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:22.700748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:22.720024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:22.860358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:23.551393Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:725:2606], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:23.551584Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:735:2611], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:23.551686Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:23.569379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:23.618729Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:32:23.773940Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:739:2614], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:32:23.879719Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:809:2653] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:32:26.042391Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhxpf3nehj20t5w01q921f7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWMxNjVlMWQtMWM5YmE3YmEtZDA2ZGViMWItYjBjMDQ5N2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:32:31.003513Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:285:2327], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:32:31.004041Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:32:31.004188Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b03/r3tmp/tmpLf3VmO/pdisk_1.dat 2025-06-24T21:32:31.347928Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:32:31.349711Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:32:31.394393Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:31.397433Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:34:2081] 1750800747528882 != 1750800747528886 2025-06-24T21:32:31.445086Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:31.445225Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:31.462674Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:31.558596Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:32.001160Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:717:2600], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:32.001284Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:727:2605], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:32.001364Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:32.007470Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:32:32.029900Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:32:32.176322Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:731:2608], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:32:32.215127Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:800:2646] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:32:32.680668Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhxpqby3wwjjmdsqgzf5606, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDM1N2U1MWMtYjE1NzhiMzQtYmExMTRjNmEtM2FjYjM5Nzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:32:38.215738Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:260:2306], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:32:38.216134Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:32:38.216228Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b03/r3tmp/tmp1sDfyR/pdisk_1.dat 2025-06-24T21:32:38.515023Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 2025-06-24T21:32:38.516729Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:32:38.570623Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:38.572299Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:34:2081] 1750800754050808 != 1750800754050812 2025-06-24T21:32:38.623551Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025 ... node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:738:2614], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:32:53.606888Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:808:2653] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:32:54.060123Z node 5 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyhxqc6nbvmz2fptectnyfbd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=NmIxOTgzZi1iYzdmNTc5Zi0xM2U1ZGNlYS0xMmZjZGMzNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:00.197394Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:260:2306], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:33:00.197936Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:33:00.198050Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b03/r3tmp/tmpJOaNuo/pdisk_1.dat 2025-06-24T21:33:00.567306Z node 6 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 6 Type# 268639257 2025-06-24T21:33:00.569501Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:33:00.639866Z node 6 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:00.641845Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:34:2081] 1750800775859253 != 1750800775859257 2025-06-24T21:33:00.693013Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:00.693202Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:00.707073Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:33:00.829929Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:01.443478Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:33:01.727946Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:830:2680], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:01.728105Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:01.728215Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:838:2685], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:01.735703Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:33:01.937432Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:844:2688], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:33:01.983038Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:901:2726] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:33:02.509939Z node 6 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhxqmcx40bhht9cm8ec4tgj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=YjBmODliYjctMzc3NWViNjktZGY4YzhmYTUtY2UzNzY0MjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:03.072472Z node 6 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhxqn7037dbycwqcrm36srq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=YTVhZWMxZDItNGRiNWY5MDktZDA1NTBiMmMtZTlhYjM0Nzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:10.185443Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:274:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:33:10.186007Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:33:10.186078Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b03/r3tmp/tmpJ5xPVw/pdisk_1.dat 2025-06-24T21:33:10.701075Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 2025-06-24T21:33:10.703263Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:33:10.738743Z node 7 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:10.741867Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:34:2081] 1750800784938491 != 1750800784938494 2025-06-24T21:33:10.789753Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:10.789929Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:10.801730Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:33:10.898306Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:11.455976Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:33:11.725564Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:827:2679], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:11.725691Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:837:2684], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:11.725786Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:11.732377Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:33:11.911777Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:841:2687], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:33:11.957507Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:898:2725] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:33:12.484274Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhxqy5b3z6dz9gn1whv0qyk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZGQzOWI1NTgtM2FjNTJjM2EtZmMwMmJlYjEtZGI1YjE4MDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:13.007940Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyhxqyy3frk9c5k64nbbvzft, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZmRhNjRkM2UtZGY3NDU3MjgtNjQzZmIzNjAtOTA5MzFj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:13.546797Z node 7 :TX_DATASHARD WARN: datashard__data_cleanup.cpp:37: DataCleanup of tablet# 72075186224037888: has borrowed parts, requested from [7:553:2479] |98.9%| [TM] {RESULT} ydb/core/tx/datashard/ut_data_cleanup/unittest |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_export/unittest >> ExportS3BufferTest::MinBufferSizeWithCompressionAndEncryption [GOOD] |98.9%| [TS] {RESULT} ydb/core/tx/datashard/ut_export/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_followers/unittest >> DataShardFollowers::FollowerReadDuringSplit [GOOD] Test command err: 2025-06-24T21:32:14.194825Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:32:14.195296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:32:14.195505Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001945/r3tmp/tmphchNLp/pdisk_1.dat 2025-06-24T21:32:14.877266Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:32:14.889810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:32:14.962652Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:14.983424Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800730847573 != 1750800730847577 2025-06-24T21:32:15.029294Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:32:15.031853Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:32:15.032258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:15.032406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:15.046649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:15.138526Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:32:15.138633Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:32:15.139667Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:32:15.270731Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 PartitionConfig { FollowerGroups { FollowerCount: 1 AllowLeaderPromotion: false } } } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:32:15.270870Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:32:15.271883Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:32:15.271997Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:32:15.272375Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:32:15.272653Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:32:15.272839Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:32:15.277597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:15.284219Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:32:15.285222Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:32:15.285310Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:32:15.331941Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:32:15.333232Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:32:15.333840Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:32:15.334127Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:32:15.396886Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:32:15.397774Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:32:15.397923Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:32:15.407040Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:32:15.407209Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:32:15.407271Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:32:15.415559Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:32:15.415790Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:32:15.415902Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:32:15.427803Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:32:15.486484Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:32:15.489014Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:32:15.489206Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:32:15.489263Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:32:15.489301Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:32:15.489341Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:32:15.489619Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:32:15.489684Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:32:15.490822Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:32:15.490935Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:32:15.490999Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:32:15.491041Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:32:15.491101Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:32:15.491138Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:32:15.491196Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:32:15.491239Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:32:15.491304Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:32:15.491427Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:32:15.491468Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:32:15.491514Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:32:15.495513Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:32:15.495595Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:32:15.495807Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:32:15.496170Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:32:15.496231Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:32:15.496337Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:32:15.496454Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status f ... eKqpTransaction 2025-06-24T21:33:14.036694Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [8:59:2106] TxId# 281474976715665 ProcessProposeKqpTransaction 2025-06-24T21:33:14.038568Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3269: StateWorkAsFollower, received event# 269877761, Sender [8:1062:2826], Recipient [8:1035:2808]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:33:14.038665Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3277: StateWorkAsFollower, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:33:14.038753Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at follower 1 tablet# 72075186224037890, clientId# [8:1060:2825], serverId# [8:1062:2826], sessionId# [0:0:0] 2025-06-24T21:33:14.038999Z node 8 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyhxr0671cg7a7zt1nfyx0nk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=8&id=NjM0YTkxMi1mYzc0ZTM1Ny1kZDg1NmQyMC1kY2E0ZDYyMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:14.041332Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3269: StateWorkAsFollower, received event# 269553215, Sender [8:1066:2827], Recipient [8:1035:2808]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 MaxRowsInResult: 1 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-24T21:33:14.041410Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3279: StateWorkAsFollower, processing event TEvDataShard::TEvRead 2025-06-24T21:33:14.041562Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2025-06-24T21:33:14.041645Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:14.041860Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 1 2025-06-24T21:33:14.041967Z node 8 :TX_DATASHARD DEBUG: datashard__init.cpp:806: Updating sys metadata on follower, tabletId 72075186224037890 prev TChangeCounter{serial=0, epoch=0} current TChangeCounter{serial=6, epoch=1} 2025-06-24T21:33:14.042747Z node 8 :TX_DATASHARD DEBUG: datashard__init.cpp:823: Updating tables metadata on follower, tabletId 72075186224037890 prev TChangeCounter{serial=0, epoch=0} current TChangeCounter{serial=4, epoch=1} 2025-06-24T21:33:14.043472Z node 8 :TX_DATASHARD DEBUG: datashard__init.cpp:894: Updating snapshots metadata on follower, tabletId 72075186224037890 prev TChangeCounter{serial=0, epoch=0} current TChangeCounter{serial=0, epoch=1} 2025-06-24T21:33:14.043609Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037890 changed HEAD read to repeatable v1500/18446744073709551615 2025-06-24T21:33:14.043731Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CheckRead 2025-06-24T21:33:14.043854Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-24T21:33:14.043910Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CheckRead 2025-06-24T21:33:14.043968Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-06-24T21:33:14.044025Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-24T21:33:14.044070Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1] at 72075186224037890 2025-06-24T21:33:14.044130Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-24T21:33:14.044162Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-24T21:33:14.044187Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit ExecuteRead 2025-06-24T21:33:14.044213Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2025-06-24T21:33:14.044380Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 MaxRowsInResult: 1 Reverse: false TotalRowsLimit: 1001 } 2025-06-24T21:33:14.044676Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Restart 2025-06-24T21:33:14.044719Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Restart at tablet# 72075186224037890 2025-06-24T21:33:14.044842Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> retry Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:33:14.044937Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} touch new 0b, 65b lo load (65b in total), 0b requested for data (4194304b in total) 2025-06-24T21:33:14.045031Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 8388608b of static mem, Memory{8388608 dyn 0} 2025-06-24T21:33:14.045122Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} requests PageCollection [72075186224037888:1:24:1:12288:190:0] 65 bytes, 1 pages: [0 4] 2025-06-24T21:33:14.045222Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} postponed, 65b, pages {1 wait, 1 load}, freshly touched 1 pages 2025-06-24T21:33:14.045527Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} got result TEvResult{1 pages [72075186224037888:1:24:1:12288:190:0] ok OK}, category 1 2025-06-24T21:33:14.045685Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 1 2025-06-24T21:33:14.045731Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2025-06-24T21:33:14.045849Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 2, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 MaxRowsInResult: 1 Reverse: false TotalRowsLimit: 1001 } 2025-06-24T21:33:14.046110Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037890 Complete read# {[8:1066:2827], 0} after executionsCount# 2 2025-06-24T21:33:14.046199Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037890 read iterator# {[8:1066:2827], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-24T21:33:14.046336Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-24T21:33:14.046368Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit ExecuteRead 2025-06-24T21:33:14.046395Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit CompletedOperations 2025-06-24T21:33:14.046428Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CompletedOperations 2025-06-24T21:33:14.046474Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-24T21:33:14.046498Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CompletedOperations 2025-06-24T21:33:14.046531Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1] at 72075186224037890 has finished 2025-06-24T21:33:14.046585Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-06-24T21:33:14.046701Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 2 -> done Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:33:14.046782Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 8388608b of static, Memory{0 dyn 0} 2025-06-24T21:33:14.046854Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-06-24T21:33:14.047086Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3269: StateWorkAsFollower, received event# 269553217, Sender [8:1035:2808], Recipient [8:1035:2808]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-24T21:33:14.047136Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3280: StateWorkAsFollower, processing event TEvDataShard::TEvReadContinue 2025-06-24T21:33:14.047358Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} queued, type NKikimr::NDataShard::TDataShard::TTxReadContinue 2025-06-24T21:33:14.047445Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:14.047542Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[8:1066:2827], 0}, firstUnprocessedQuery# 0 2025-06-24T21:33:14.047644Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[8:1066:2827], 0}, FirstUnprocessedQuery# 0 2025-06-24T21:33:14.047797Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[8:1066:2827], 0} sends rowCount# 0, bytes# 0, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:33:14.047888Z node 8 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3103: 72075186224037890 read iterator# {[8:1066:2827], 0} finished in ReadContinue 2025-06-24T21:33:14.048025Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} hope 1 -> done Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:33:14.048110Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:14.049242Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3269: StateWorkAsFollower, received event# 269553219, Sender [8:1066:2827], Recipient [8:1035:2808]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:33:14.049298Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3282: StateWorkAsFollower, processing event TEvDataShard::TEvReadCancel 2025-06-24T21:33:14.049385Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037890 ReadCancel: { ReadId: 0 } { items { uint32_value: 3 } items { uint32_value: 33 } } |98.9%| [TM] {RESULT} ydb/core/tx/datashard/ut_followers/unittest >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_with_valid_and_invalid_data [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_parallel ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/row_dispatcher/ut/unittest >> RowDispatcherTests::SessionFatalError [GOOD] Test command err: 2025-06-24T21:31:01.217219Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:247: Coordinator: Successfully bootstrapped coordinator, id [1:30:2057] 2025-06-24T21:31:01.217584Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [1:25:2054] 2025-06-24T21:31:01.217739Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [1:25:2054] 2025-06-24T21:31:01.217791Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [2:26:2054] 2025-06-24T21:31:01.217816Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [2:26:2054] 2025-06-24T21:31:01.217863Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [3:27:2054] 2025-06-24T21:31:01.217888Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [3:27:2054] 2025-06-24T21:31:01.218011Z node 1 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [1:28:2055], topic1, partIds: 0 2025-06-24T21:31:01.218132Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [1:28:2055] 2025-06-24T21:31:01.218294Z node 1 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [1:29:2056], topic1, partIds: 0 2025-06-24T21:31:01.218351Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [1:29:2056] 2025-06-24T21:31:01.228657Z node 1 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [1:29:2056], topic1, partIds: 1 2025-06-24T21:31:01.228825Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [1:29:2056] 2025-06-24T21:31:01.229227Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [2:31:2055] 2025-06-24T21:31:01.229287Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:265: Coordinator: Move all Locations from old actor [2:26:2054] to new [2:31:2055] 2025-06-24T21:31:01.229326Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [2:31:2055] 2025-06-24T21:31:01.229388Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [2:32:2056] 2025-06-24T21:31:01.229450Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:265: Coordinator: Move all Locations from old actor [2:31:2055] to new [2:32:2056] 2025-06-24T21:31:01.229651Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [2:32:2056] 2025-06-24T21:31:01.229765Z node 1 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [1:28:2055], topic1, partIds: 0 2025-06-24T21:31:01.229837Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [1:28:2055] 2025-06-24T21:31:01.229976Z node 1 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [1:29:2056], topic1, partIds: 1 2025-06-24T21:31:01.230030Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [1:29:2056] 2025-06-24T21:31:01.331672Z node 5 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:247: Coordinator: Successfully bootstrapped coordinator, id [5:30:2057] 2025-06-24T21:31:01.333896Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [5:25:2054] 2025-06-24T21:31:01.333972Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [5:25:2054] 2025-06-24T21:31:01.334021Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [6:26:2054] 2025-06-24T21:31:01.334059Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [6:26:2054] 2025-06-24T21:31:01.334097Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [7:27:2054] 2025-06-24T21:31:01.334116Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [7:27:2054] 2025-06-24T21:31:01.334227Z node 5 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [5:28:2055], topic1, partIds: 0, 1, 2 2025-06-24T21:31:01.334377Z node 5 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [5:28:2055] 2025-06-24T21:31:01.334537Z node 5 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [5:29:2056], topic1, partIds: 3 2025-06-24T21:31:01.334611Z node 5 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [5:29:2056] 2025-06-24T21:31:01.574690Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:224: TLeaderElection [9:8:2055] Successfully bootstrapped, local coordinator id [9:5:2052] 2025-06-24T21:31:01.574829Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:224: TLeaderElection [9:9:2056] Successfully bootstrapped, local coordinator id [9:6:2053] 2025-06-24T21:31:01.574896Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:224: TLeaderElection [9:10:2057] Successfully bootstrapped, local coordinator id [9:7:2054] 2025-06-24T21:31:01.575016Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "local/row_dispatcher//tenant" actor 2025-06-24T21:31:01.575064Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.575104Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.642038Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "local/row_dispatcher//tenant" actor 2025-06-24T21:31:01.642113Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.642143Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.645403Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "local/row_dispatcher//tenant" actor 2025-06-24T21:31:01.645498Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.645541Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.681571Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-24T21:31:01.681766Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.681857Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.683981Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-24T21:31:01.694332Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.694434Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.707237Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-24T21:31:01.707434Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.707484Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.719627Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-24T21:31:01.719805Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.719855Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.750580Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-24T21:31:01.750741Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.750779Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.753951Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-24T21:31:01.764257Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.764347Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.769796Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-24T21:31:01.769991Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.770029Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.791112Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:155: Successfully created coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.791241Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:122: Reply for create coordination node "local/row_dispatcher//tenant": 2025-06-24T21:31:01.791328Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:333: TLeaderElection [9:9:2056] Coordination node successfully created 2025-06-24T21:31:01.791370Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:312: TLeaderElection [9:9:2056] Start session 2025-06-24T21:31:01.793935Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:155: Successfully created coordination node "local/row_dispatcher//tenant" 2025-06-24T21:31:01.794045Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:122: Reply for create coordination node "local/row_dispatcher//tenant": {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateNoChanges) } 2025-06-24T21:31:01.794147Z node 9 :FQ_ROW_DISPATCHER DEBUG: ... ER DEBUG: leader_election.cpp:224: TLeaderElection [42:19:2060] Successfully bootstrapped, local coordinator id [42:18:2059] 2025-06-24T21:33:11.760959Z node 42 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "YDB_DATABASE/RowDispatcher/Tenant" actor 2025-06-24T21:33:11.760993Z node 42 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-24T21:33:11.761023Z node 42 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-24T21:33:11.762708Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:566: RowDispatcher: TEvCoordinatorChangesSubscribe from [42:18:2059] 2025-06-24T21:33:11.762999Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [43:16:2053], read group connection_id1, topicPath topic part id 100 query id QueryId cookie 42 2025-06-24T21:33:11.763312Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:829: RowDispatcher: Create new session: read group connection_id1 topic topic part id 100 2025-06-24T21:33:11.770874Z node 42 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "YDB_DATABASE/RowDispatcher/Tenant" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): DNS resolution failed for YDB_ENDPOINT: C-ares status is not ARES_SUCCESS qtype=A name=YDB_ENDPOINT is_balancer=0: DNS server returned general failure } {
: Error: Grpc error response on endpoint YDB_ENDPOINT } ] 2025-06-24T21:33:11.771228Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:987: RowDispatcher: TEvTryConnect to node id 43 2025-06-24T21:33:11.771649Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:516: RowDispatcher: EvNodeConnected, node id 43 2025-06-24T21:33:11.772310Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [42:22:2063] to [43:16:2053] query id QueryId 2025-06-24T21:33:11.772708Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [43:16:2053] part id 100 query id QueryId 2025-06-24T21:33:11.772880Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [42:22:2063] to [43:16:2053] query id QueryId 2025-06-24T21:33:11.773423Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:902: RowDispatcher: Received TEvNoSession from [43:16:2053], generation 41 2025-06-24T21:33:11.773520Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [42:22:2063] to [43:16:2053] query id QueryId 2025-06-24T21:33:11.773869Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [43:16:2053] part id 100 query id QueryId 2025-06-24T21:33:11.773991Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [42:22:2063] to [43:16:2053] query id QueryId 2025-06-24T21:33:11.774341Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:902: RowDispatcher: Received TEvNoSession from [43:16:2053], generation 42 2025-06-24T21:33:11.774414Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:951: RowDispatcher: DeleteConsumer, readActorId [43:16:2053] query id QueryId, partitions size 1 2025-06-24T21:33:11.774559Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:971: RowDispatcher: Session is not used, sent TEvPoisonPill to [42:22:2063] 2025-06-24T21:33:11.919794Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:474: RowDispatcher: Successfully bootstrapped row dispatcher, id [44:17:2058], tenant Tenant 2025-06-24T21:33:11.955337Z node 44 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:247: Coordinator: Successfully bootstrapped coordinator, id [44:18:2059] 2025-06-24T21:33:11.955427Z node 44 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:224: TLeaderElection [44:19:2060] Successfully bootstrapped, local coordinator id [44:18:2059] 2025-06-24T21:33:11.955498Z node 44 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "YDB_DATABASE/RowDispatcher/Tenant" actor 2025-06-24T21:33:11.955531Z node 44 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-24T21:33:11.955562Z node 44 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-24T21:33:11.955989Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:566: RowDispatcher: TEvCoordinatorChangesSubscribe from [44:18:2059] 2025-06-24T21:33:11.956220Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [44:14:2056], read group connection_id1, topicPath topic part id 100 query id QueryId cookie 1 2025-06-24T21:33:11.956459Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:829: RowDispatcher: Create new session: read group connection_id1 topic topic part id 100 2025-06-24T21:33:11.956935Z node 44 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [44:22:2063] to [44:14:2056] query id QueryId 2025-06-24T21:33:11.957072Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:936: RowDispatcher: Received TEvStopSession from [44:14:2056] topic topic query id QueryId 2025-06-24T21:33:11.957163Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:951: RowDispatcher: DeleteConsumer, readActorId [44:14:2056] query id QueryId, partitions size 1 2025-06-24T21:33:11.957260Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:971: RowDispatcher: Session is not used, sent TEvPoisonPill to [44:22:2063] 2025-06-24T21:33:12.153848Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:474: RowDispatcher: Successfully bootstrapped row dispatcher, id [46:17:2058], tenant Tenant 2025-06-24T21:33:12.165307Z node 46 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:247: Coordinator: Successfully bootstrapped coordinator, id [46:18:2059] 2025-06-24T21:33:12.165404Z node 46 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:224: TLeaderElection [46:19:2060] Successfully bootstrapped, local coordinator id [46:18:2059] 2025-06-24T21:33:12.165508Z node 46 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "YDB_DATABASE/RowDispatcher/Tenant" actor 2025-06-24T21:33:12.165551Z node 46 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-24T21:33:12.165584Z node 46 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-24T21:33:12.165962Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:566: RowDispatcher: TEvCoordinatorChangesSubscribe from [46:18:2059] 2025-06-24T21:33:12.166200Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [46:14:2056], read group connection_id1, topicPath topic part id 100,101 query id QueryId cookie 1 2025-06-24T21:33:12.166436Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:829: RowDispatcher: Create new session: read group connection_id1 topic topic part id 100 2025-06-24T21:33:12.166615Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:829: RowDispatcher: Create new session: read group connection_id1 topic topic part id 101 2025-06-24T21:33:12.166963Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [46:15:2057], read group connection_id1, topicPath topic part id 100,101 query id QueryId cookie 1 2025-06-24T21:33:12.167488Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1055: RowDispatcher: Forward TEvSessionError from [46:22:2063] to [46:14:2056] query id QueryId 2025-06-24T21:33:12.167599Z node 46 :FQ_ROW_DISPATCHER WARN: row_dispatcher.cpp:1075: RowDispatcher: Fatal session error, remove session [46:22:2063] 2025-06-24T21:33:12.167700Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:951: RowDispatcher: DeleteConsumer, readActorId [46:14:2056] query id QueryId, partitions size 2 2025-06-24T21:33:12.167988Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [46:23:2064] to [46:15:2057] query id QueryId 2025-06-24T21:33:12.168099Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [46:15:2057] part id 101 query id QueryId 2025-06-24T21:33:12.168234Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [46:23:2064] to [46:15:2057] query id QueryId 2025-06-24T21:33:12.168401Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [46:14:2056], read group connection_id1, topicPath topic part id 100,101 query id QueryId cookie 1 2025-06-24T21:33:12.168573Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:829: RowDispatcher: Create new session: read group connection_id1 topic topic part id 100 2025-06-24T21:33:12.168953Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1055: RowDispatcher: Forward TEvSessionError from [46:22:2063] to [46:15:2057] query id QueryId 2025-06-24T21:33:12.169030Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:951: RowDispatcher: DeleteConsumer, readActorId [46:15:2057] query id QueryId, partitions size 2 2025-06-24T21:33:12.169162Z node 46 :FQ_ROW_DISPATCHER ERROR: row_dispatcher.cpp:968: RowDispatcher: Wrong readActorId [46:15:2057], no such consumer 2025-06-24T21:33:12.169225Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:971: RowDispatcher: Session is not used, sent TEvPoisonPill to [46:22:2063] 2025-06-24T21:33:12.169497Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [46:15:2057], read group connection_id1, topicPath topic part id 100,101 query id QueryId cookie 1 2025-06-24T21:33:12.169955Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [46:24:2065] to [46:14:2056] query id QueryId 2025-06-24T21:33:12.170090Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [46:14:2056] part id 100 query id QueryId 2025-06-24T21:33:12.170189Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [46:24:2065] to [46:14:2056] query id QueryId 2025-06-24T21:33:12.170337Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [46:24:2065] to [46:15:2057] query id QueryId 2025-06-24T21:33:12.170498Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [46:15:2057] part id 100 query id QueryId 2025-06-24T21:33:12.170655Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [46:24:2065] to [46:15:2057] query id QueryId 2025-06-24T21:33:12.170895Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [46:23:2064] to [46:14:2056] query id QueryId 2025-06-24T21:33:12.171899Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [46:14:2056] part id 101 query id QueryId 2025-06-24T21:33:12.173276Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [46:23:2064] to [46:14:2056] query id QueryId 2025-06-24T21:33:12.177633Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [46:23:2064] to [46:15:2057] query id QueryId 2025-06-24T21:33:12.181999Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [46:15:2057] part id 101 query id QueryId 2025-06-24T21:33:12.182241Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [46:23:2064] to [46:15:2057] query id QueryId |98.9%| [TM] {RESULT} ydb/core/fq/libs/row_dispatcher/ut/unittest |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_unknown_data_source.py::TestUnknownDataSource::test_should_fail_unknown_data_source[v2-client0] >> TDqSolomonWriteActorTest::TestWriteFormat >> test.py::test[solomon-InvalidProject-] [GOOD] |98.9%| [TA] $(B)/ydb/core/kqp/ut/service/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[solomon-LabelColumns-default.txt] >> KqpPg::ValuesInsert+useSink [GOOD] >> KqpPg::ValuesInsert-useSink >> KqpBatchDelete::ManyPartitions_1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> ShowCreateView::WithPairedTablePathPrefix [FAIL] Test command err: 2025-06-24T21:23:29.191949Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519629641490064017:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:29.192008Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae7/r3tmp/tmpPubxG9/pdisk_1.dat 2025-06-24T21:23:29.787939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:29.788095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:29.803533Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:29.813394Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19047, node 1 2025-06-24T21:23:30.228946Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:23:30.283991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:23:30.284024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:23:30.284031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:23:30.284138Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13817 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:23:31.203041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:23:31.273474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) waiting... 2025-06-24T21:23:31.315840Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519629648874070336:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:23:31.315912Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Database1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; waiting... 2025-06-24T21:23:31.439683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:23:31.439773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:23:31.451504Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-24T21:23:31.471430Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:23:31.587947Z node 3 :SYSTEM_VIEWS INFO: processor_impl.cpp:41: [72075186224037893] OnActivateExecutor 2025-06-24T21:23:31.588017Z node 3 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:15: [72075186224037893] TTxInitSchema::Execute 2025-06-24T21:23:31.718433Z node 3 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:42: [72075186224037893] TTxInitSchema::Complete 2025-06-24T21:23:31.718520Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:136: [72075186224037893] TTxInit::Execute 2025-06-24T21:23:31.724555Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:257: [72075186224037893] Loading interval summaries: query count# 0, node ids count# 0, total count# 0 2025-06-24T21:23:31.724613Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:284: [72075186224037893] Loading interval metrics: query count# 0 2025-06-24T21:23:31.724696Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:362: [72075186224037893] Loading interval query tops: total query count# 0 2025-06-24T21:23:31.724733Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:408: [72075186224037893] Loading nodes to request: nodes count# 0, hashes count# 0 2025-06-24T21:23:31.724774Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 6, result count# 0 2025-06-24T21:23:31.724829Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 7, result count# 0 2025-06-24T21:23:31.724867Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 8, result count# 0 2025-06-24T21:23:31.724912Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 9, result count# 0 2025-06-24T21:23:31.724947Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 10, result count# 0 2025-06-24T21:23:31.727622Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 11, result count# 0 2025-06-24T21:23:31.727680Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 12, result count# 0 2025-06-24T21:23:31.727733Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 13, result count# 0 2025-06-24T21:23:31.727765Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 14, result count# 0 2025-06-24T21:23:31.727800Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 15, result count# 0 2025-06-24T21:23:31.727870Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:129: [72075186224037893] Loading results: table# 16, partCount count# 0 2025-06-24T21:23:31.727934Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:129: [72075186224037893] Loading results: table# 19, partCount count# 0 2025-06-24T21:23:31.727979Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 17, result count# 0 2025-06-24T21:23:31.728030Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 18, result count# 0 2025-06-24T21:23:31.728083Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 20, result count# 0 2025-06-24T21:23:31.728122Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 21, result count# 0 2025-06-24T21:23:31.729491Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:333: [72075186224037893] Reset: interval end# 2025-06-24T21:23:31.000000Z 2025-06-24T21:23:31.729755Z node 3 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:778: Handle TEvSysView::TEvRegisterDbCounters: service id# [3:7519629648874070307:2073], path id# [OwnerId: 72057594046644480, LocalPathId: 2], service# 2 2025-06-24T21:23:31.729790Z node 3 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:32: NSysView::TPartitionStatsCollector bootstrapped 2025-06-24T21:23:31.730299Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:23:31.742766Z node 3 :SYSTEM_VIEWS INFO: sysview_service.cpp:860: Navigate by path id succeeded: service id# [3:7519629648874070307:2073], path id# [OwnerId: 72057594046644480, LocalPathId: 2], database# /Root/Database1 2025-06-24T21:23:31.742890Z node 3 :SYSTEM_VIEWS INFO: sysview_service.cpp:886: Navigate by database succeeded: service id# [3:7519629648874070307:2073], database# /Root/Database1, no sysview processor 2025-06-24T21:23:31.745679Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:488: [72075186224037893] TTxInit::Complete 2025-06-24T21:23:31.747095Z node 3 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:14: [72075186224037893] TTxAggregate::Execute 2025-06-24T21:23:31.747168Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:136: [72075186224037893] PersistQueryResults: interval end# 2025-06-24T21:23:31.000000Z, query count# 0 2025-06-24T21:23:31.747194Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 8, interval end# 2025-06-24T21:23:31.000000Z, query count# 0, persisted# 0 2025-06-24T21:23:31.747215Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 10, interval end# 2025-06-24T21:23:31.000000Z, query count# 0, persisted# 0 2025-06-24T21:23:31.747252Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 12, interval end# 2025-06-24T21:23:31.000000Z, query count# 0, persisted# 0 2025-06-24T21:23:31.747270Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 14, interval end# 2025-06-24T21:23:31.000000Z, query count# 0, persisted# 0 2025-06-24T21:23:31.748393Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 9, interval end# 2025-06-24T22:00:00.000000Z, query count# 0, persisted# 0 2025-06-24T21:23:31.748421Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 11, interval end# 2025-06-24T22:00:00.000000Z, query count# 0, persisted# 0 2025-06-24T21:23:31.748446Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 13, interval end# 2025-06-24T22:00:00.000000Z, query count# 0, persisted# 0 2025-06-24T21:23:31.748485Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 15, interval end# 2025-06-24T22:00:00.000000Z, query count# 0, persisted# 0 2025-06-24T21:23:31.753563Z node 3 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:110: [72075186224037893] TTxAggregate::Complete 2025-06-24T21:23:31.753661Z node 3 :SYSTEM_VIEWS DEBUG: tx_configure.cpp:20: [72075186224037893] TTxConfigure::Execute: database# /Root/Database1 2025-06-24T21:23:31.768162Z node 3 :SYSTEM_VIEWS DEBUG: tx_configure.cpp:30: [72075186224037893] TTxConfigure::Complete 2025-06-24T21:23:31.768223Z node 3 :SYSTEM_VIEWS INFO: partition_stats.cpp:522: NSysView::TPartitionStatsCollector initialized: domain key# [OwnerId: 72057594046644480, LocalPathId: 2], sysview processor id# 72075186224037893 2025-06-24T21:23:31.825210Z node 1 :FLAT_TX ... d/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:37.227521Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:384) 2025-06-24T21:32:39.092372Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:32:39.092405Z node 19 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:43.224660Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715719:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T21:32:43.263326Z node 19 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyhxpnje8gw9jne3djay5cy8", SessionId: ydb://session/3?node_id=19&id=ZjdkZTVkMWMtMmZiODlkNi0zODY2ZTIzZi05MmY5MWViOQ==, Slow query, duration: 13.094792s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n ", parameters: 0b 2025-06-24T21:32:48.446459Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715752:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-24T21:32:48.874367Z node 19 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [19:7519632042270234623:3102], owner: [19:7519632042270234620:3100], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-24T21:32:48.877283Z node 19 :SYSTEM_VIEWS INFO: show_create.cpp:107: Scan prepared, actor: [19:7519632042270234623:3102] 2025-06-24T21:32:48.985019Z node 19 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [19:7519632042270234623:3102], row count: 1, finished: 1 2025-06-24T21:32:48.985181Z node 19 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [19:7519632042270234623:3102], owner: [19:7519632042270234620:3100], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-24T21:32:52.870683Z node 24 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[24:7519632060526595499:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:52.873242Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae7/r3tmp/tmps9afeB/pdisk_1.dat 2025-06-24T21:32:53.515510Z node 24 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:53.564306Z node 24 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(24, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:53.564452Z node 24 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(24, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:53.576931Z node 24 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(24, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10990, node 24 2025-06-24T21:32:53.900244Z node 24 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:53.900278Z node 24 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:53.900293Z node 24 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:53.900573Z node 24 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:32:53.999308Z node 24 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18479 TClient is connected to server localhost:18479 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:55.101384Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:57.873733Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[24:7519632060526595499:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:57.873861Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:33:00.842215Z node 24 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyhxqe039yw1pcrgmajvq511", Request deadline has expired for 0.672004s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession 2025-06-24T21:33:04.144813Z node 29 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[29:7519632112302571304:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:04.167120Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/004ae7/r3tmp/tmpy0VjFB/pdisk_1.dat 2025-06-24T21:33:04.528132Z node 29 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:04.565220Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:04.565385Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:04.579939Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:33:04.604996Z node 29 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 29 Type# 268639257 TServer::EnableGrpc on GrpcPort 30894, node 29 2025-06-24T21:33:04.763581Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:33:04.763614Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:33:04.763629Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:33:04.763857Z node 29 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:33:05.168245Z node 29 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18696 TClient is connected to server localhost:18696 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:33:06.247289Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:33:09.131296Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[29:7519632112302571304:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:09.131417Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:33:12.634751Z node 29 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyhxqrwgcbrbyt0q3q666gn4", Request deadline has expired for 1.317578s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession >> DiscoveryIsNotBroken::NoKafkaEndpointInDiscovery [GOOD] >> DiscoveryIsNotBroken::NoKafkaSslEndpointInDiscovery |98.9%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/service/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardReassign::AutoReassignOnYellowFlag >> TTxDataShardTestInit::TestGetShardStateAfterInitialization >> TMemoryController::Counters >> TCreateAndDropViewTest::DropView [GOOD] >> TCreateAndDropViewTest::DropViewDisabledFeatureFlag >> TTxDataShardPrefixKMeansScan::BuildToPosting [GOOD] >> TTxDataShardPrefixKMeansScan::BuildToBuild >> KeyValueGRPCService::SimpleWriteReadWithIncorreectPath [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutToken ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ManyPartitions_1 [GOOD] Test command err: Trying to start YDB, gRPC: 10240, MsgBus: 17768 2025-06-24T21:29:16.626964Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631132622657616:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:16.646361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0029bd/r3tmp/tmpJAKt1o/pdisk_1.dat 2025-06-24T21:29:17.141879Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631132622657432:2079] 1750800556607452 != 1750800556607455 2025-06-24T21:29:17.177072Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:17.182535Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:17.182613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:17.184062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10240, node 1 2025-06-24T21:29:17.251923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:17.251947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:17.251968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:17.252106Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17768 2025-06-24T21:29:17.627320Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17768 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:18.039802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:18.056043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:18.074239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:18.238742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:18.447461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:18.531094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:20.310533Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631149802528270:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:20.310670Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:20.570253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.604924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.643719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.682767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.754243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.785652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.822459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:20.911524Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631149802528936:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:20.911604Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:20.911678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631149802528941:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:20.916191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:20.925793Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631149802528943:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:20.991614Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631149802528994:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:21.622298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631132622657616:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:21.622377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:21.972069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0029bd/r3tmp/tmpEGU9zV/pdisk_1.dat 2025-06-24T21:33:02.452878Z node 20 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:02.454161Z node 20 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [20:7519632103886314877:2079] 1750800782037722 != 1750800782037725 2025-06-24T21:33:02.503750Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:02.503954Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:02.512629Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21002, node 20 2025-06-24T21:33:02.684382Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:33:02.684427Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:33:02.684456Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:33:02.684720Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:33:03.115378Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18714 TClient is connected to server localhost:18714 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:33:04.284066Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:33:04.306461Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:33:04.324039Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:04.705311Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:05.141664Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:05.364894Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:07.040110Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[20:7519632103886314899:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:07.040204Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:33:11.959439Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519632142541022228:2377], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:11.959637Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:12.013935Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.089223Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.179570Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.272086Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.370787Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.454670Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.539742Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.675819Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519632146835990196:2442], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:12.675994Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:12.678540Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519632146835990201:2445], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:12.687657Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:33:12.716995Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [20:7519632146835990203:2446], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:33:12.796383Z node 20 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [20:7519632146835990254:3462] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:33:15.316146Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:17.405373Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:33:17.405400Z node 20 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded >> DataShardReplication::ApplyChangesToCommonTable [GOOD] >> DataShardReplication::ApplyChangesWithConcurrentTx >> TTxDataShardTestInit::TestGetShardStateAfterInitialization [GOOD] >> TTxDataShardTestInit::TestTableHasPath >> KqpTpch::Query09 [GOOD] >> KqpTpch::Query10 >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> KqpBatchDelete::SimplePartitions [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] >> TSentinelUnstableTests::BSControllerCantChangeStatus [GOOD] >> QuoterWithKesusTest::AllocationStatistics [GOOD] >> QuoterWithKesusTest::UpdatesCountersForParentResources >> test_kqprun_recipe.py::TestKqprunRecipe::test_query_execution ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::SimplePartitions [GOOD] Test command err: Trying to start YDB, gRPC: 14124, MsgBus: 12443 2025-06-24T21:29:10.382532Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631108006211327:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.383055Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002c11/r3tmp/tmpxEJLSf/pdisk_1.dat 2025-06-24T21:29:10.883325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.883487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.886937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:10.912024Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.913962Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631108006211146:2079] 1750800550372216 != 1750800550372219 TServer::EnableGrpc on GrpcPort 14124, node 1 2025-06-24T21:29:11.138369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.138395Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.138407Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.138547Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.375379Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12443 TClient is connected to server localhost:12443 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.125968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.151489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.171558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.425443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.663305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.756408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.309228Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631125186081959:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.309381Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.803058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.879884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.910771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.947847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.988546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.065755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.105769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.176972Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631129481049919:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.177050Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.177320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631129481049924:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.181172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.197529Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631129481049926:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.255940Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631129481049979:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.379238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631108006211327:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.379306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8855, MsgBus: 25455 2025-06-24T21:29:22.878985Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631159275927955:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:22.879036Z node 2 :METADATA_PROVIDER ERROR: log.cpp ... RN: schemeshard_import.cpp:304: Table profiles were not loaded Trying to start YDB, gRPC: 4766, MsgBus: 30776 2025-06-24T21:33:04.798085Z node 16 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[16:7519632109908506337:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:04.798237Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002c11/r3tmp/tmpXivF7z/pdisk_1.dat 2025-06-24T21:33:05.007593Z node 16 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:05.011272Z node 16 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [16:7519632109908506300:2079] 1750800784793890 != 1750800784793893 2025-06-24T21:33:05.028514Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:05.028632Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:05.031383Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4766, node 16 2025-06-24T21:33:05.103896Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:33:05.103925Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:33:05.103935Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:33:05.104108Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30776 TClient is connected to server localhost:30776 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:33:05.753205Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:33:05.764216Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:33:05.784990Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:05.905512Z node 16 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:33:05.993255Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:06.306418Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:06.420545Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:09.798089Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[16:7519632109908506337:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:09.798180Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:33:11.502387Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519632139973279020:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:11.502501Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:11.603551Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:11.698035Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:11.768857Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:11.820785Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:11.904859Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:11.956967Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.042947Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.129479Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519632144268246990:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:12.129636Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:12.129747Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519632144268246995:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:12.135419Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:33:12.147919Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [16:7519632144268246997:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:33:12.225159Z node 16 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [16:7519632144268247048:3431] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:33:19.979278Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:33:19.979316Z node 16 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/wardens/py3test >> test_liveness_wardens.py::TestLivenessWarden::test_scheme_shard_has_no_in_flight_transactions [GOOD] |98.9%| [TM] {RESULT} ydb/tests/functional/wardens/py3test >> Etcd_KV::OptimisticUpdate [GOOD] >> Etcd_KV::OptimisticDelete |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_kqprun_recipe.py::TestKqprunRecipe::test_query_execution [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut_sentinel_unstable/unittest >> TSentinelUnstableTests::BSControllerCantChangeStatus [GOOD] Test command err: 2025-06-24T21:33:04.462306Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-24T21:33:04.462411Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-24T21:33:04.462505Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-24T21:33:04.462538Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-24T21:33:04.462615Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-24T21:33:04.462704Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-24T21:33:04.471802Z node 1 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "node-1" State: UNKNOWN Devices { Name: "pdisk-1-4" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-5" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-6" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-7" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 1 InterconnectPort: 10000 Location { Rack: "rack-1" } StartTimeSeconds: 0 } Hosts { Name: "node-2" State: UNKNOWN Devices { Name: "pdisk-2-8" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-9" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-10" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-11" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 2 InterconnectPort: 10000 Location { Rack: "rack-2" } StartTimeSeconds: 0 } Hosts { Name: "node-3" State: UNKNOWN Devices { Name: "pdisk-3-12" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-13" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-14" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-15" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 3 InterconnectPort: 10000 Location { Rack: "rack-3" } StartTimeSeconds: 0 } Hosts { Name: "node-4" State: UNKNOWN Devices { Name: "pdisk-4-16" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-17" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-18" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-19" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 4 InterconnectPort: 10000 Location { Rack: "rack-4" } StartTimeSeconds: 0 } Hosts { Name: "node-5" State: UNKNOWN Devices { Name: "pdisk-5-20" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-21" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-22" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-23" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 5 InterconnectPort: 10000 Location { Rack: "rack-5" } StartTimeSeconds: 0 } Hosts { Name: "node-6" State: UNKNOWN Devices { Name: "pdisk-6-24" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-25" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-26" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-27" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 6 InterconnectPort: 10000 Location { Rack: "rack-6" } StartTimeSeconds: 0 } Hosts { Name: "node-7" State: UNKNOWN Devices { Name: "pdisk-7-28" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-29" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-30" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-31" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 7 InterconnectPort: 10000 Location { Rack: "rack-7" } StartTimeSeconds: 0 } Hosts { Name: "node-8" State: UNKNOWN Devices { Name: "pdisk-8-32" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-33" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-34" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-35" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 8 InterconnectPort: 10000 Location { Rack: "rack-8" } StartTimeSeconds: 0 } } 2025-06-24T21:33:04.506131Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 4 Path: "/1/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 5 Path: "/1/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 6 Path: "/1/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 7 Path: "/1/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 8 Path: "/2/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 9 Path: "/2/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 10 Path: "/2/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 11 Path: "/2/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 12 Path: "/3/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 13 Path: "/3/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 14 Path: "/3/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 15 Path: "/3/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 16 Path: "/4/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 17 Path: "/4/pdisk-17.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 18 Path: "/4/pdisk-18.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 19 Path: "/4/pdisk-19.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 20 Path: "/5/pdisk-20.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 21 Path: "/5/pdisk-21.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 22 Path: "/5/pdisk-22.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 23 Path: "/5/pdisk-23.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 24 Path: "/6/pdisk-24.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 25 Path: "/6/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 26 Path: "/6/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 27 Path: "/6/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 28 Path: "/7/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 29 Path: "/7/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 30 Path: "/7/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 31 Path: "/7/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 32 Path: "/8/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 33 Path: "/8/pdisk-33.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 34 Path: "/8/pdisk-34.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 35 Path: "/8/pdisk-35.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1002 ... : 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37820025 2025-06-24T21:33:23.362113Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 2, response# PDiskStateInfo { PDiskId: 8 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-8.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 9 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-9.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37820025 2025-06-24T21:33:23.362249Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 3, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37820025 2025-06-24T21:33:23.362316Z node 1 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-24T21:33:23.375375Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-24T21:33:23.375449Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-24T21:33:23.375599Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 1, wbId# [1:8388350642965737326:1634689637] 2025-06-24T21:33:23.375654Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 2, wbId# [2:8388350642965737326:1634689637] 2025-06-24T21:33:23.375732Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 3, wbId# [3:8388350642965737326:1634689637] 2025-06-24T21:33:23.375758Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 4, wbId# [4:8388350642965737326:1634689637] 2025-06-24T21:33:23.375804Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 5, wbId# [5:8388350642965737326:1634689637] 2025-06-24T21:33:23.375834Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 6, wbId# [6:8388350642965737326:1634689637] 2025-06-24T21:33:23.375858Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 7, wbId# [7:8388350642965737326:1634689637] 2025-06-24T21:33:23.375885Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 8, wbId# [8:8388350642965737326:1634689637] 2025-06-24T21:33:23.376281Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 1, response# PDiskStateInfo { PDiskId: 4 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-4.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 5 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-5.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 6 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-6.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 7 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-7.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860025 2025-06-24T21:33:23.376776Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 8, response# PDiskStateInfo { PDiskId: 32 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-32.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 33 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-33.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 34 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-34.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 35 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-35.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860025 2025-06-24T21:33:23.376960Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 7, response# PDiskStateInfo { PDiskId: 28 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-28.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 29 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-29.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 30 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-30.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 31 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-31.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860025 2025-06-24T21:33:23.377188Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 3, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860025 2025-06-24T21:33:23.377336Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 4, response# PDiskStateInfo { PDiskId: 16 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-16.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 18 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-18.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 19 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-19.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860025 2025-06-24T21:33:23.377466Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 5, response# PDiskStateInfo { PDiskId: 20 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-20.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 21 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-21.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 22 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-22.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 23 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-23.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860025 2025-06-24T21:33:23.377559Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 6, response# PDiskStateInfo { PDiskId: 24 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-24.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 25 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-25.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 26 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-26.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 27 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-27.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860025 2025-06-24T21:33:23.377675Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 2, response# PDiskStateInfo { PDiskId: 8 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-8.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 9 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-9.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860025 2025-06-24T21:33:23.377724Z node 1 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-24T21:33:23.377982Z node 1 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 3:12, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2025-06-24T21:33:23.378022Z node 1 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 5:22, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2025-06-24T21:33:23.378042Z node 1 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 2:8, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2025-06-24T21:33:23.378067Z node 1 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 3 2025-06-24T21:33:23.378215Z node 1 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Status { Success: true } Status { }, cookie# 140 2025-06-24T21:33:23.378252Z node 1 :CMS ERROR: sentinel.cpp:1244: [Sentinel] [Main] Unsuccesful response from BSC: error# 2025-06-24T21:33:23.389578Z node 1 :CMS DEBUG: sentinel.cpp:1262: [Sentinel] [Main] Retrying: attempt# 1 2025-06-24T21:33:23.389651Z node 1 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 3 2025-06-24T21:33:23.389929Z node 1 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true, cookie# 141 2025-06-24T21:33:23.389979Z node 1 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 2:8 2025-06-24T21:33:23.390017Z node 1 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 3:12 2025-06-24T21:33:23.390052Z node 1 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 5:22 >> test.py::test[solomon-LabelColumns-default.txt] [GOOD] >> test.py::test[solomon-Subquery-default.txt] [SKIPPED] >> test.py::test[solomon-UnknownSetting-] |98.9%| [TM] {RESULT} ydb/core/cms/ut_sentinel_unstable/unittest >> TCloudEventsProcessorTests::TestCreateCloudEventProcessor >> TMemoryController::Counters [GOOD] >> TMemoryController::Counters_HardLimit >> TTxDataShardTestInit::TestTableHasPath [GOOD] >> TTxDataShardTestInit::TestResolvePathAfterRestart >> TDqSolomonWriteActorTest::TestWriteFormat [GOOD] >> TDqSolomonWriteActorTest::TestWriteBigBatchMonitoring >> TDqPqRdReadActorTests::TestReadFromTopic2 >> DataShardReplication::ApplyChangesWithConcurrentTx [GOOD] >> TDqPqRdReadActorTests::TestReadFromTopic2 [GOOD] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates4-invalid_pile_id] >> KeyValueGRPCService::SimpleWriteReadWithoutToken [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration1 >> TCreateAndDropViewTest::DropViewDisabledFeatureFlag [GOOD] >> TCreateAndDropViewTest::DropNonexistingView >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates1-multiple_primary_piles_in_request] |98.9%| [TA] $(B)/ydb/core/sys_view/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> DiscoveryIsNotBroken::NoKafkaSslEndpointInDiscovery [GOOD] >> TDqPqRdReadActorTests::IgnoreUndeliveredWithWrongGeneration >> DiscoveryIsNotBroken::HaveKafkaEndpointInDiscovery >> TDqPqRdReadActorTests::IgnoreUndeliveredWithWrongGeneration [GOOD] >> KqpTpch::Query10 [GOOD] >> KqpTpch::Query11 >> TDqPqRdReadActorTests::SessionError >> TDqPqRdReadActorTests::SessionError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/kqprun/tests/py3test >> test_kqprun_recipe.py::TestKqprunRecipe::test_query_execution [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. |98.9%| [TM] {RESULT} ydb/tests/tools/kqprun/tests/py3test >> DataShardCompaction::CompactBorrowed [GOOD] >> DataShardStats::NoData [GOOD] >> DataShardStats::Follower >> DataShardCompaction::CompactBorrowedTxStatus >> TDqPqRdReadActorTests::ReadWithFreeSpace >> DataShardReassign::AutoReassignOnYellowFlag [GOOD] >> TDqPqRdReadActorTests::ReadWithFreeSpace [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_replication/unittest >> DataShardReplication::ApplyChangesWithConcurrentTx [GOOD] Test command err: 2025-06-24T21:32:36.982383Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:32:36.983061Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:32:36.983324Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a7d/r3tmp/tmpRkwG2b/pdisk_1.dat 2025-06-24T21:32:37.635649Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:32:37.650740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:32:37.748176Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:37.762691Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800752850761 != 1750800752850765 2025-06-24T21:32:37.812046Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:32:37.815283Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:32:37.815729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:37.815903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:37.831853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:37.936033Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:32:37.936139Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:32:37.937626Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:32:38.126159Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_GLOBAL } } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:32:38.126282Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:32:38.127031Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:32:38.127175Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:32:38.127548Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:32:38.127764Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:32:38.127955Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:32:38.130994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:32:38.132225Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:32:38.132977Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:32:38.133060Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:32:38.186231Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:32:38.187802Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:32:38.190249Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:32:38.190534Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:32:38.248376Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:32:38.249276Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:32:38.249389Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:32:38.253164Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:32:38.253282Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:32:38.253369Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:32:38.254661Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:32:38.254915Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:32:38.255065Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:32:38.266051Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:32:38.332908Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:32:38.334491Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:32:38.334725Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:32:38.334778Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:32:38.334825Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:32:38.334868Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:32:38.335126Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:32:38.335198Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:32:38.337997Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:32:38.338112Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:32:38.338186Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:32:38.338222Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:32:38.338347Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:32:38.338468Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:32:38.338522Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:32:38.338560Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:32:38.338617Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:32:38.338767Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:32:38.338821Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:32:38.338860Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:32:38.339911Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:32:38.339980Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:32:38.340194Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:32:38.340523Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:32:38.340576Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:32:38.340657Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:32:38.340801Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Exec ... 06-24T21:33:28.728878Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:33:28.729050Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1500 TxId: 18446744073709551615 } LockTxId: 281474976715660 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 8 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-24T21:33:28.729485Z node 8 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715660, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:33:28.729566Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v1500/18446744073709551615 2025-06-24T21:33:28.729633Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[8:822:2653], 0} after executionsCount# 1 2025-06-24T21:33:28.729704Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[8:822:2653], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:33:28.729838Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[8:822:2653], 0} finished in read 2025-06-24T21:33:28.729952Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:33:28.729982Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:33:28.730009Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:33:28.730037Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:33:28.730085Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-24T21:33:28.730108Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:33:28.730143Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037888 has finished 2025-06-24T21:33:28.730195Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:33:28.730338Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:33:28.730722Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [8:61:2108], Recipient [8:625:2529]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715660 LockNode: 8 Status: STATUS_SUBSCRIBED 2025-06-24T21:33:28.736171Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [8:822:2653], Recipient [8:625:2529]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:33:28.736272Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 11 } } 2025-06-24T21:33:28.749228Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [8:826:2657], Recipient [8:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:33:28.749340Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:33:28.749419Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [8:825:2656], serverId# [8:826:2657], sessionId# [0:0:0] 2025-06-24T21:33:28.749771Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549570, Sender [8:824:2655], Recipient [8:625:2529]: NKikimrTxDataShard.TEvApplyReplicationChanges TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Source: "my-source" Changes { SourceOffset: 1 WriteTxId: 0 Key: "\001\000\004\000\000\000\001\000\000\000" Upsert { Tags: 2 Data: "\001\000\004\000\000\000\025\000\000\000" } } 2025-06-24T21:33:28.750005Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1000/281474976715657 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1000/18446744073709551615 ImmediateWriteEdgeReplied# v1000/18446744073709551615 2025-06-24T21:33:28.750197Z node 8 :TX_DATASHARD TRACE: locks.cpp:194: Lock 281474976715660 marked broken at v{min} 2025-06-24T21:33:28.764328Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-24T21:33:28.765312Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270977, Sender [8:24:2071], Recipient [8:625:2529]: {TEvNotifyPlanStep TabletId# 72075186224037888 PlanStep# 1501} 2025-06-24T21:33:28.765397Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3172: StateWork, processing event TEvMediatorTimecast::TEvNotifyPlanStep 2025-06-24T21:33:28.765482Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-24T21:33:28.765557Z node 8 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:33:28.987484Z node 8 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyhxresy29veq0e1rkkg9qqz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=8&id=YTFjZDA0ZjctNjljOTFkMjQtMWRmMTIzMjMtMjY2Zjc4Mzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:28.989830Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [8:849:2674], Recipient [8:625:2529]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1500 TxId: 18446744073709551615 } LockTxId: 281474976715660 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 8 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-24T21:33:28.990086Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-24T21:33:28.990188Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2025-06-24T21:33:28.990322Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:33:28.990388Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2025-06-24T21:33:28.990439Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:33:28.990493Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:33:28.990558Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2025-06-24T21:33:28.990607Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:33:28.990641Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:33:28.990668Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2025-06-24T21:33:28.990693Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-24T21:33:28.990857Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1500 TxId: 18446744073709551615 } LockTxId: 281474976715660 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 8 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-24T21:33:28.991415Z node 8 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715660, counter# 18446744073709551612 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:33:28.991509Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v1500/18446744073709551615 2025-06-24T21:33:28.991577Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[8:849:2674], 0} after executionsCount# 1 2025-06-24T21:33:28.991644Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[8:849:2674], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-24T21:33:28.991753Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[8:849:2674], 0} finished in read 2025-06-24T21:33:28.991853Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:33:28.991897Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2025-06-24T21:33:28.991927Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:33:28.991957Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:33:28.992013Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-24T21:33:28.992036Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:33:28.992077Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037888 has finished 2025-06-24T21:33:28.992129Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-24T21:33:28.992275Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-24T21:33:28.993416Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [8:849:2674], Recipient [8:625:2529]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-24T21:33:28.993499Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-24T21:33:28.996753Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [8:61:2108], Recipient [8:625:2529]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715660 LockNode: 8 Status: STATUS_NOT_FOUND { items { uint32_value: 1 } items { uint32_value: 11 } } |99.0%| [TA] {RESULT} $(B)/ydb/core/sys_view/ut/test-results/unittest/{meta.json ... results_accumulator.log} |99.0%| [TM] {RESULT} ydb/core/tx/datashard/ut_replication/unittest >> TDqPqRdReadActorTests::TestSaveLoadPqRdRead |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_reassign/unittest >> DataShardReassign::AutoReassignOnYellowFlag [GOOD] Test command err: 2025-06-24T21:33:25.460014Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:33:25.460691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:33:25.460958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001a35/r3tmp/tmpVdudoq/pdisk_1.dat 2025-06-24T21:33:26.257576Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:33:26.262179Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:5} Tx{8, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose 2025-06-24T21:33:26.262266Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:5} Tx{8, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:26.270924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:33:26.278291Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:5} Tx{8, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} hope 1 -> done Change{4, redo 987b alter 0b annex 0, ~{ 1, 33, 35, 42, 4 } -{ }, 0 gb} 2025-06-24T21:33:26.278468Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:5} Tx{8, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:26.280852Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:5:1:24576:515:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:26.281238Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:5:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:26.281399Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:6} commited cookie 1 for step 5 2025-06-24T21:33:26.295710Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:6} Tx{9, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress 2025-06-24T21:33:26.296114Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:6} Tx{9, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:26.297421Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:6} Tx{9, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} hope 1 -> done Change{5, redo 174b alter 0b annex 0, ~{ 42, 4 } -{ }, 0 gb} 2025-06-24T21:33:26.297534Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:6} Tx{9, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:26.298060Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:6:1:24576:129:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:26.298164Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:6:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:26.298250Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:7} commited cookie 1 for step 6 2025-06-24T21:33:26.298562Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:7} Tx{10, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress 2025-06-24T21:33:26.298604Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:7} Tx{10, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:26.298937Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:7} Tx{10, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} hope 1 -> done Change{6, redo 174b alter 0b annex 0, ~{ 42, 4 } -{ }, 0 gb} 2025-06-24T21:33:26.298991Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:7} Tx{10, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:26.299433Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:7:1:24576:130:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:26.299500Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:7:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:26.299581Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:8} commited cookie 1 for step 7 2025-06-24T21:33:26.299727Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:8} Tx{11, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress 2025-06-24T21:33:26.299780Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:8} Tx{11, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:26.301540Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:8} Tx{11, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} hope 1 -> done Change{7, redo 120b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-24T21:33:26.301586Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:8} Tx{11, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:26.301811Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:8:1:24576:89:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:26.301870Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:8:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:26.301949Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:9} commited cookie 1 for step 8 2025-06-24T21:33:26.306467Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:9} Tx{12, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion 2025-06-24T21:33:26.306574Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:9} Tx{12, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:26.306732Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:9} Tx{12, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} hope 1 -> done Change{8, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:33:26.306790Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:9} Tx{12, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:26.317921Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:8} Tx{14, NKikimr::NBsController::TBlobStorageController::TTxRegisterNode} queued, type NKikimr::NBsController::TBlobStorageController::TTxRegisterNode 2025-06-24T21:33:26.317992Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:8} Tx{14, NKikimr::NBsController::TBlobStorageController::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:26.318192Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:8} Tx{14, NKikimr::NBsController::TBlobStorageController::TTxRegisterNode} hope 1 -> done Change{7, redo 79b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-24T21:33:26.318239Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:8} Tx{14, NKikimr::NBsController::TBlobStorageController::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:26.368913Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:26.370026Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936130:2:3} Tx{3, NKikimr::NTenantSlotBroker::TTenantSlotBroker::TTxUpdateConfig} queued, type NKikimr::NTenantSlotBroker::TTenantSlotBroker::TTxUpdateConfig 2025-06-24T21:33:26.370284Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936130:2:3} Tx{3, NKikimr::NTenantSlotBroker::TTenantSlotBroker::TTxUpdateConfig} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:26.378592Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936130:2:3} Tx{3, NKikimr::NTenantSlotBroker::TTenantSlotBroker::TTxUpdateConfig} hope 1 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:33:26.378734Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936130:2:3} Tx{3, NKikimr::NTenantSlotBroker::TTenantSlotBroker::TTxUpdateConfig} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:26.378949Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936129:2:4} Tx{4, NKikimr::NNodeBroker::TNodeBroker::TTxUpdateConfig} queued, type NKikimr::NNodeBroker::TNodeBroker::TTxUpdateConfig 2025-06-24T21:33:26.379003Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936129:2:4} Tx{4, NKikimr::NNodeBroker::TNodeBroker::TTxUpdateConfig} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:26.381630Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936129:2:4} Tx{4, NKikimr::NNodeBroker::TNodeBroker::TTxUpdateConfig} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:33:26.381735Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936129:2:4} Tx{4, NKikimr::NNodeBroker::TNodeBroker::TTxUpdateConfig} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:26.400839Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800801230597 != 1750800801230601 2025-06-24T21:33:26.424174Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037932033:2:8:0:0:87:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:26.424318Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} commited cookie 1 for step 8 2025-06-24T21:33:26.428046Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{15, NKikimr::NBsController::TBlobStorageController::TTxUpdateNodeDrives} queued, type NKikimr::NBsController::TBlobStorageController::TTxUpdateNodeDrives 2025-06-24T21:33:26.428150Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{15, NKikimr::NBsController::TBlobStorageController::TTxUpdateNodeDrives} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:26.429059Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{15, NKikimr::NBsController::TBlobStorageController::TTxUpdateNodeDrives} hope 1 -> done Change{8, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:33:26.429139Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{15, NKikimr::NBsController::TBlobStorageController::TTxUpdateNodeDrives} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:26.457479Z ... UG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:21:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:32.332657Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:22} commited cookie 1 for step 21 2025-06-24T21:33:32.359547Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:10} Tx{18, NKikimr::NHive::TTxUpdateTabletMetrics} queued, type NKikimr::NHive::TTxUpdateTabletMetrics 2025-06-24T21:33:32.359665Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:10} Tx{18, NKikimr::NHive::TTxUpdateTabletMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:32.359869Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:10} Tx{18, NKikimr::NHive::TTxUpdateTabletMetrics} hope 1 -> done Change{12, redo 143b alter 0b annex 0, ~{ 16, 4 } -{ }, 0 gb} 2025-06-24T21:33:32.359952Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:10} Tx{18, NKikimr::NHive::TTxUpdateTabletMetrics} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:32.372268Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037968897:2:10:0:0:137:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:32.372458Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:11} commited cookie 1 for step 10 2025-06-24T21:33:32.513346Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:22} Tx{25, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-24T21:33:32.513444Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:22} Tx{25, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:32.513606Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:22} Tx{25, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{21, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-24T21:33:32.513661Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:22} Tx{25, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:32.514071Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:22:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:32.514148Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:22:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:32.514236Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:23} commited cookie 1 for step 22 2025-06-24T21:33:32.681535Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:23} Tx{26, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-24T21:33:32.681634Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:23} Tx{26, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:32.681792Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:23} Tx{26, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{22, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-24T21:33:32.681926Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:23} Tx{26, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:32.682377Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:23:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:32.682467Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:23:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:32.682609Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:24} commited cookie 1 for step 23 2025-06-24T21:33:32.847639Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:24} Tx{27, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-24T21:33:32.847757Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:24} Tx{27, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:32.847928Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:24} Tx{27, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{23, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-24T21:33:32.847981Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:24} Tx{27, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:32.848436Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:24:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:32.848556Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:24:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:32.848674Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:25} commited cookie 1 for step 24 2025-06-24T21:33:33.007516Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:25} Tx{28, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-24T21:33:33.007619Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:25} Tx{28, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:33.007839Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:25} Tx{28, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{24, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-24T21:33:33.007907Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:25} Tx{28, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:33.008328Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:25:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:33.008399Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:25:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:33.008503Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} commited cookie 1 for step 25 2025-06-24T21:33:33.020760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:33:33.020838Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:33.055041Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} queued, type NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics 2025-06-24T21:33:33.055170Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:33.055292Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} hope 1 -> done Change{9, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:33:33.055371Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:33.164206Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-24T21:33:33.164307Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-24T21:33:33.164409Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} queued, type NKikimr::NDataShard::TDataShard::TTxCleanupTransaction 2025-06-24T21:33:33.164500Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:33.164665Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186224037888 outdated step 15000 last cleanup 0 2025-06-24T21:33:33.164772Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:33:33.164814Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-24T21:33:33.164887Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:33:33.164924Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:33:33.165008Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} hope 1 -> done Change{11, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-24T21:33:33.165112Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:33.165318Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:33:33.242715Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} Tx{29, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-24T21:33:33.242839Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} Tx{29, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-24T21:33:33.243012Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} Tx{29, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{25, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-24T21:33:33.243062Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} Tx{29, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-24T21:33:33.243581Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:26:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:33.243701Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:26:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-24T21:33:33.243803Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:27} commited cookie 1 for step 26 --- Captured TEvCheckBlobstorageStatusResult event --- Waiting for TEvReassignTablet event... 2025-06-24T21:33:33.398211Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} CheckYellow current light yellow move channels: 0 1 2 --- Captured TEvReassignTablet event |99.0%| [TM] {RESULT} ydb/core/tx/datashard/ut_reassign/unittest >> TMemoryController::Counters_HardLimit [GOOD] >> TMemoryController::Counters_NoHardLimit |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test.py::test_local [GOOD] >> TTxDataShardTestInit::TestResolvePathAfterRestart [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TDqPqRdReadActorTests::TestSaveLoadPqRdRead [GOOD] >> test.py::test[solomon-UnknownSetting-] [GOOD] >> TDqPqRdReadActorTests::CoordinatorChanged >> TCloudEventsProcessorTests::TestCreateCloudEventProcessor [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_init/unittest >> TTxDataShardTestInit::TestResolvePathAfterRestart [GOOD] Test command err: 2025-06-24T21:33:21.825914Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:105:2137], Recipient [1:111:2141]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:33:21.845754Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:105:2137], Recipient [1:111:2141]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:33:21.859510Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:111:2141] 2025-06-24T21:33:21.867118Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:33:21.956565Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:105:2137], Recipient [1:111:2141]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:33:21.961600Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:33:21.961735Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:33:21.965209Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-24T21:33:21.965332Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-24T21:33:21.965399Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-24T21:33:21.967441Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:33:21.967588Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:33:21.967691Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:131:2141] in generation 2 2025-06-24T21:33:21.984492Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:33:22.018251Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-24T21:33:22.019429Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:33:22.019600Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:136:2159] 2025-06-24T21:33:22.019639Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-24T21:33:22.019676Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-24T21:33:22.019713Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-24T21:33:22.019949Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:111:2141], Recipient [1:111:2141]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:33:22.021036Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:33:22.021901Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-24T21:33:22.022008Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-24T21:33:22.022060Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-24T21:33:22.022111Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:33:22.022842Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-24T21:33:22.022916Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-24T21:33:22.022958Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-24T21:33:22.022993Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-24T21:33:22.023045Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-24T21:33:22.026064Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [1:102:2135], Recipient [1:111:2141]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 102 RawX2: 4294969431 } 2025-06-24T21:33:22.026141Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-24T21:33:26.670794Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:284:2326], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:33:26.671805Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:33:26.671990Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0019f2/r3tmp/tmpCpPbaS/pdisk_1.dat 2025-06-24T21:33:27.269432Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-24T21:33:27.301273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:33:27.398558Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:27.428252Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1750800802413395 != 1750800802413399 2025-06-24T21:33:27.500915Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:27.501168Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:27.517161Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:33:27.672674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:27.764373Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:624:2529] 2025-06-24T21:33:27.764681Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:33:27.940129Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:33:27.940380Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:33:27.942742Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:33:27.942879Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:33:27.947497Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:33:27.948025Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:33:27.948268Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:33:27.948417Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:640:2529] in generation 1 2025-06-24T21:33:27.959861Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:33:27.959968Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:33:27.960141Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:33:27.960260Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:642:2539] 2025-06-24T21:33:27.960327Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:33:27.960369Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:33:27.960408Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:33:27.960904Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:33:27.961031Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:33:27.961170Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:33:27.961214Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:33:27.961262Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:33:27.961306Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:33:27.961376Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:623:2528], serverId# [2:631:2533], sessionId# [0:0:0] 2025-06-24T21:33:27.964247Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:33:27.970297Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:33:27.970477Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:33:27.972856Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:33:27.984344Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:33:27.984484Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:33:28.150506Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:658:2549], serverId# [2:659:2550], sessionId# [0:0:0] 2025-06-24T21:33:28.161801Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction ... tive planned 0 immediate 0 planned 1 2025-06-24T21:33:37.003638Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:33:37.003896Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:33:37.004046Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:33:37.006294Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:33:37.006378Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:33:37.006822Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:33:37.007272Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:33:37.009169Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:33:37.009221Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:33:37.009874Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:33:37.009966Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:33:37.010885Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:33:37.010932Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:33:37.010980Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:33:37.011035Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:372:2366], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:33:37.011081Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:33:37.011784Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:33:37.013127Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:33:37.015019Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:33:37.015404Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:33:37.015469Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:33:37.194307Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-24T21:33:37.196085Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-24T21:33:37.259286Z node 3 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [3:697:2577] 2025-06-24T21:33:37.259574Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:33:37.265182Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:33:37.266458Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:33:37.270515Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:33:37.270652Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:33:37.270714Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:33:37.271210Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:33:37.271674Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:33:37.271763Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [3:712:2577] in generation 2 2025-06-24T21:33:37.295310Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:33:37.295483Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037888 2025-06-24T21:33:37.295582Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:33:37.295705Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:33:37.295803Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4180: Resolve path at 72075186224037888: reason# empty path 2025-06-24T21:33:37.296001Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [3:716:2587] 2025-06-24T21:33:37.296045Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:33:37.296106Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:33:37.296152Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:33:37.296493Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-24T21:33:37.296752Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-24T21:33:37.298282Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:33:37.298404Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:33:37.298564Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1000 2025-06-24T21:33:37.298623Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:33:37.299023Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:33:37.304761Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5603: Got TEvDataShard::TEvSchemaChanged for unknown txId 281474976715657 message# Source { RawX1: 697 RawX2: 12884904465 } Origin: 72075186224037888 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-24T21:33:37.305168Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:33:37.305248Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:33:37.305307Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:33:37.305393Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:33:37.351488Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4241: Got scheme resolve result at 72075186224037888: Status: StatusSuccess Path: "/Root/table-1" PathDescription { Self { Name: "table-1" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715657 CreateStep: 1000 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 2025-06-24T21:33:37.351902Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:33:37.351990Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:33:37.352098Z node 3 :TX_DATASHARD DEBUG: datashard__store_table_path.cpp:20: TTxStoreTablePath::Execute at 72075186224037888 2025-06-24T21:33:37.356212Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:33:37.357439Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:720:2591], serverId# [3:722:2592], sessionId# [0:0:0] 2025-06-24T21:33:37.372532Z node 3 :TX_DATASHARD DEBUG: datashard__store_table_path.cpp:39: TTxStoreTablePath::Complete at 72075186224037888 |99.0%| [TM] {RESULT} ydb/core/tx/datashard/ut_init/unittest >> TDqPqRdReadActorTests::CoordinatorChanged [GOOD] >> TDqSolomonWriteActorTest::TestWriteBigBatchMonitoring [GOOD] >> TDqSolomonWriteActorTest::TestWriteBigBatchSolomon [GOOD] >> TDqSolomonWriteActorTest::TestWriteWithTimeseries >> QuoterWithKesusTest::UpdatesCountersForParentResources [GOOD] >> QuoterWithKesusTest::CanDeleteResourceWhenUsingIt |99.0%| [TM] {asan, default-linux-x86_64, pic, release} ydb/tests/fq/solomon/py3test >> test.py::test[solomon-UnknownSetting-] [GOOD] |99.0%| [TM] {RESULT} ydb/tests/fq/solomon/py3test >> TDqPqRdReadActorTests::Backpressure >> test_insert_restarts.py::TestS3::test_atomic_upload_commit[v1-client0] [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/actor/cloud_events/cloud_events_ut/unittest >> TCloudEventsProcessorTests::TestCreateCloudEventProcessor [GOOD] Test command err: 2025-06-24T21:33:26.227317Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519632205748685517:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:26.227409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/00177d/r3tmp/tmpm6vadd/pdisk_1.dat 2025-06-24T21:33:26.958261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:26.958378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:26.968140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:33:27.038784Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27690, node 1 2025-06-24T21:33:27.255573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:33:27.255595Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:33:27.255603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:33:27.255727Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:33:27.266814Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14397 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:33:28.326374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:33:28.384282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 TClient is connected to server localhost:14397 waiting... 2025-06-24T21:33:28.785375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-24T21:33:31.228200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519632205748685517:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:31.228294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:33:31.523819Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519632227223522674:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:31.523856Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519632227223522686:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:31.523935Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:31.548564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:33:31.603771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:31.607397Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519632227223522688:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-24T21:33:31.710695Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519632227223522776:2407] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ===Execute query: UPSERT INTO`/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq` (CreatedAt,Id,QueueName,Type,CloudId,FolderId,UserSID,MaskedToken,AuthType,PeerName,RequestId,IdempotencyId,Labels)VALUES(7084680072,6085230376893563634,'queue1','CreateMessageQueue','cloud1','folder1','username','maskedToken123','authtype','localhost:8000','req1','idemp1','{"k1" : "v1"}'); 2025-06-24T21:33:33.705241Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyhxrk28d47qajhp0ftdseh7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFiZGUxYWQtYzM1NGI0N2MtN2I0Mzg4NTctNTM5N2ZjODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:33.711238Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyhxrhfh031mz0skycq1ks00, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTkwMzI0NzAtNWQ4NzM3Y2MtOTkzYzBmMDYtZDhmMmI5NzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root End execute query=== ===Execute query: UPSERT INTO`/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq` (CreatedAt,Id,QueueName,Type,CloudId,FolderId,UserSID,MaskedToken,AuthType,PeerName,RequestId,IdempotencyId,Labels)VALUES(7085351854,9176812543306425239,'queue1','UpdateMessageQueue','cloud1','folder1','username','maskedToken123','authtype','localhost:8000','req1','idemp1','{"k1" : "v1"}'); 2025-06-24T21:33:33.943985Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jyhxrkpra94s92q8nz56p5ry, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFiZGUxYWQtYzM1NGI0N2MtN2I0Mzg4NTctNTM5N2ZjODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root End execute query=== ===Execute query: UPSERT INTO`/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq` (CreatedAt,Id,QueueName,Type,CloudId,FolderId,UserSID,MaskedToken,AuthType,PeerName,RequestId,IdempotencyId,Labels)VALUES(7085538934,7658340407004452669,'queue1','DeleteMessageQueue','cloud1','folder1','username','maskedToken123','authtype','localhost:8000','req1','idemp1','{"k1" : "v1"}'); 2025-06-24T21:33:34.203127Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyhxrkwgafjwp10f8yqjfer7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFiZGUxYWQtYzM1NGI0N2MtN2I0Mzg4NTctNTM5N2ZjODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root End execute query=== 2025-06-24T21:33:35.775160Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jyhxrnms9bdqzafvv7epxa0x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWU4NjA0MmUtY2I5NzUwZmEtZmU5NGNkODctNzIzMmFmNWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:36.381906Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710668. Ctx: { TraceId: 01jyhxrnp17svhz565zvxb1mym, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWU4NjA0MmUtY2I5NzUwZmEtZmU5NGNkODctNzIzMmFmNWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:38.465425Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710669. Ctx: { TraceId: 01jyhxrr8sbc1d05yqcgay5ww4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmJiYjMyNmItZmEyYzgwYWUtZWU0MjQyNTctYWRmYjQ5OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-24T21:33:28.338355Z: component=schemeshard, tx_id=281474976710657, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//Root], status=SUCCESS, detailed_status=StatusAccepted 2025-06-24T21:33:28.378170Z: component=schemeshard, tx_id=281474976710658, remote_address={none}, subject={none}, sanitized_token={none}, database=/Root, operation=CREATE DIRECTORY, paths=[/Root/SQS], status=SUCCESS, detailed_status=StatusAccepted 2025-06-24T21:33:28.768933Z: component=schemeshard, tx_id=281474976710659, remote_address={none}, subject={none}, sanitized_token={none}, database=/Root, operation=CREATE DIRECTORY, paths=[/Root/SQS/Root/SQS/CreateCloudEventProcessor], status=SUCCESS, detailed_status=StatusAccepted 2025-06-24T21:33:31.553091Z: component=schemeshard, tx_id=281474976710660, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE RESOURCE POOL, paths=[.metadata/workload_manager/pools/default], status=SUCCESS, detailed_status=StatusAccepted, new_owner=metadata@system, acl_add=[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin] 2025-06-24T21:33:31.605718Z: component=schemeshard, tx_id=281474976710661, remote_address=::1, subject={none}, sanitized_token={none}, database=/Root, operation=CREATE TABLE, paths=[/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq], status=SUCCESS, detailed_status=StatusAccepted 2025-06-24T21:33:31.710255Z: component=schemeshard, tx_id=281474976710662, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE RESOURCE POOL, paths=[default], status=SUCCESS, detailed_status=StatusAlreadyExists, reason=Check failed: path: '/Root/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), new_owner=metadata@system, acl_add=[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin] 2025-06-24T21:33:35.806509Z: component=ymq, id=6085230376893563634$CreateMessageQueue$7084680072, operation=CreateMessageQueue, status=SUCCESS, remote_address=localhost:8000, subject=username, masked_token=maskedToken123, auth_type=authtype, permission=ymq.queues.create, created_at=7084680072, cloud_id=cloud1, folder_id=folder1, request_id=req1, idempotency_id=idemp1, queue=queue1, labels={"k1" : "v1"} 2025-06-24T21:33:35.806549Z: component=ymq, id=9176812543306425239$UpdateMessageQueue$7085351854, operation=UpdateMessageQueue, status=SUCCESS, remote_address=localhost:8000, subject=username, masked_token=maskedToken123, auth_type=authtype, permission=ymq.queues.setAttributes, created_at=7085351854, cloud_id=cloud1, folder_id=folder1, request_id=req1, idempotency_id=idemp1, queue=queue1, labels={"k1" : "v1"} 2025-06-24T21:33:35.806569Z: component=ymq, id=7658340407004452669$DeleteMessageQueue$7085538934, operation=DeleteMessageQueue, status=SUCCESS, remote_address=localhost:8000, subject=username, masked_token=maskedToken123, auth_type=authtype, permission=ymq.queues.delete, created_at=7085538934, cloud_id=cloud1, folder_id=folder1, request_id=req1, idempotency_id=idemp1, queue=queue1, labels={"k1" : "v1"} |99.0%| [TS] {RESULT} ydb/core/ymq/actor/cloud_events/cloud_events_ut/unittest >> TCreateAndDropViewTest::DropNonexistingView [FAIL] >> TCreateAndDropViewTest::CallDropViewOnTable |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TMemoryController::Counters_NoHardLimit [GOOD] >> TMemoryController::Config_ConsumerLimits >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration1 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration2 >> DiscoveryIsNotBroken::HaveKafkaEndpointInDiscovery [GOOD] >> DiscoveryIsNotBroken::HaveKafkaSslEndpointInDiscovery |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase1::test_too_large_acls |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serializable/py3test >> test.py::test_local [GOOD] |99.0%| [TM] {RESULT} ydb/tests/functional/serializable/py3test >> DataShardStats::Follower [GOOD] >> DataShardStats::Tli >> KqpTpch::Query11 [GOOD] >> KqpTpch::Query12 |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase0::test_effective_acls_are_too_large |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> TDqSolomonWriteActorTest::TestWriteWithTimeseries [GOOD] >> TDqSolomonWriteActorTest::TestCheckpoints >> TMemoryController::Config_ConsumerLimits [GOOD] >> TMemoryController::SharedCache >> TDqSolomonWriteActorTest::TestCheckpoints [GOOD] >> TDqSolomonWriteActorTest::TestShouldReturnAfterCheckpoint >> TCreateAndDropViewTest::CallDropViewOnTable [FAIL] >> TCreateAndDropViewTest::DropSameViewTwice >> test_insert_restarts.py::TestS3::test_atomic_upload_commit[v2-client0] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> KqpTpch::Query12 [GOOD] >> KqpTpch::Query13 >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration2 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithGetChannelStatus >> test_unknown_data_source.py::TestUnknownDataSource::test_should_fail_unknown_data_source[v2-client0] [GOOD] >> Etcd_KV::OptimisticDelete [GOOD] >> Etcd_KV::TxnReadOnly |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> DataShardCompaction::CompactBorrowedTxStatus [GOOD] >> QuoterWithKesusTest::CanDeleteResourceWhenUsingIt [GOOD] >> QuoterWithKesusTest::CanKillKesusWhenUsingIt >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates1-multiple_primary_piles_in_request] [GOOD] >> DiscoveryIsNotBroken::HaveKafkaSslEndpointInDiscovery [GOOD] >> Functions::CreateRequest [GOOD] >> Functions::CreateResponse [GOOD] >> KafkaProtocol::ProduceScenario >> KqpTpch::Query13 [GOOD] >> KqpTpch::Query14 |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_compaction/unittest >> DataShardCompaction::CompactBorrowedTxStatus [GOOD] Test command err: 2025-06-24T21:33:17.064651Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:33:17.065203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:33:17.065453Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001bb1/r3tmp/tmpHFTe3s/pdisk_1.dat 2025-06-24T21:33:17.567621Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:33:17.582039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:33:17.646866Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:17.663661Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800793254815 != 1750800793254819 2025-06-24T21:33:17.711255Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-24T21:33:17.714825Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-24T21:33:17.715296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:17.715443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:17.728419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:33:17.830253Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-24T21:33:17.830347Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-24T21:33:17.832640Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:600:2508] 2025-06-24T21:33:17.994859Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:600:2508] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-24T21:33:17.995030Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:600:2508] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-24T21:33:17.995909Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-24T21:33:17.996024Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:600:2508] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-24T21:33:17.996454Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-24T21:33:17.996695Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:600:2508] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-24T21:33:17.996899Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:600:2508] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-24T21:33:18.006713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:18.007461Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:600:2508] txid# 281474976715657 HANDLE EvClientConnected 2025-06-24T21:33:18.008333Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:600:2508] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-24T21:33:18.008417Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:600:2508] txid# 281474976715657 SEND to# [1:552:2478] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-24T21:33:18.059856Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:33:18.061914Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:33:18.062488Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:33:18.062826Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:33:18.116846Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:33:18.117687Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:33:18.117820Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:33:18.119756Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:33:18.119847Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:33:18.119901Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:33:18.122106Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:33:18.122310Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:33:18.122421Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:33:18.133409Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:33:18.174825Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:33:18.175991Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:33:18.176175Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:33:18.176217Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:33:18.176256Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:33:18.176292Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:33:18.176542Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:33:18.176604Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:33:18.178048Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:33:18.178167Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:33:18.178248Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:33:18.178295Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:33:18.178399Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:33:18.178440Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:33:18.178491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:33:18.178533Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:33:18.178580Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:33:18.178719Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:33:18.178763Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:33:18.178806Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:33:18.180179Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:33:18.180259Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:33:18.180388Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:33:18.180817Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:33:18.180905Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:33:18.181004Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:33:18.181132Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21: ... 1474976715661] at 72075186224037892 is DelayComplete 2025-06-24T21:33:58.758220Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037892 executing on unit CompleteOperation 2025-06-24T21:33:58.758252Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [71500:281474976715661] at 72075186224037892 to execution unit CompletedOperations 2025-06-24T21:33:58.758283Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037892 on unit CompletedOperations 2025-06-24T21:33:58.758319Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037892 is Executed 2025-06-24T21:33:58.758347Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037892 executing on unit CompletedOperations 2025-06-24T21:33:58.758372Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [71500:281474976715661] at 72075186224037892 has finished 2025-06-24T21:33:58.758407Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:33:58.758436Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037892 2025-06-24T21:33:58.758469Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2025-06-24T21:33:58.758501Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037892 2025-06-24T21:33:58.784982Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-24T21:33:58.785060Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-24T21:33:58.785103Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [71500:281474976715661] at 72075186224037892 on unit CompleteOperation 2025-06-24T21:33:58.785172Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [71500 : 281474976715661] from 72075186224037892 at tablet 72075186224037892 send result to client [2:1405:3207], exec latency: 0 ms, propose latency: 1 ms 2025-06-24T21:33:58.785229Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-24T21:33:58.785436Z node 2 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [2:1405:3207] txid# 281474976715661 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037892 marker# P12 2025-06-24T21:33:58.785491Z node 2 :TX_PROXY DEBUG: datareq.cpp:2968: Send stream clearance, shard: 72075186224037890, txid: 281474976715661, cleared: 1 2025-06-24T21:33:58.785647Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:1405:3207], Recipient [2:722:2598]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715661 Cleared: true 2025-06-24T21:33:58.785691Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-24T21:33:58.785782Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:722:2598], Recipient [2:722:2598]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:33:58.785815Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:33:58.785889Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-24T21:33:58.785946Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:33:58.786000Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [71500:281474976715661] at 72075186224037890 for WaitForStreamClearance 2025-06-24T21:33:58.786033Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037890 on unit WaitForStreamClearance 2025-06-24T21:33:58.786094Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [71500:281474976715661] at 72075186224037890 2025-06-24T21:33:58.786141Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037890 is Executed 2025-06-24T21:33:58.786178Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037890 executing on unit WaitForStreamClearance 2025-06-24T21:33:58.786211Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [71500:281474976715661] at 72075186224037890 to execution unit ReadTableScan 2025-06-24T21:33:58.786246Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037890 on unit ReadTableScan 2025-06-24T21:33:58.786517Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037890 is Continue 2025-06-24T21:33:58.786549Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:33:58.786578Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037890 2025-06-24T21:33:58.786609Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-24T21:33:58.786640Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-24T21:33:58.787256Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:1437:3236], Recipient [2:722:2598]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-24T21:33:58.787309Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-24T21:33:58.787554Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715661, MessageQuota: 1 2025-06-24T21:33:58.787672Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715661, MessageQuota: 1 2025-06-24T21:33:59.005712Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-24T21:33:59.005785Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037890 2025-06-24T21:33:59.006080Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:722:2598], Recipient [2:722:2598]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:33:59.006123Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:33:59.006195Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-24T21:33:59.006238Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:33:59.006277Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [71500:281474976715661] at 72075186224037890 for ReadTableScan 2025-06-24T21:33:59.006310Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037890 on unit ReadTableScan 2025-06-24T21:33:59.006356Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [71500:281474976715661] at 72075186224037890 error: , IsFatalError: 0 2025-06-24T21:33:59.006401Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037890 is Executed 2025-06-24T21:33:59.006431Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037890 executing on unit ReadTableScan 2025-06-24T21:33:59.006462Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [71500:281474976715661] at 72075186224037890 to execution unit CompleteOperation 2025-06-24T21:33:59.006491Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037890 on unit CompleteOperation 2025-06-24T21:33:59.006720Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037890 is DelayComplete 2025-06-24T21:33:59.006758Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037890 executing on unit CompleteOperation 2025-06-24T21:33:59.006788Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [71500:281474976715661] at 72075186224037890 to execution unit CompletedOperations 2025-06-24T21:33:59.006820Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037890 on unit CompletedOperations 2025-06-24T21:33:59.006857Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037890 is Executed 2025-06-24T21:33:59.006895Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037890 executing on unit CompletedOperations 2025-06-24T21:33:59.006923Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [71500:281474976715661] at 72075186224037890 has finished 2025-06-24T21:33:59.006955Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:33:59.006984Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-24T21:33:59.007015Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-24T21:33:59.007046Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-24T21:33:59.023640Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:33:59.023732Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-24T21:33:59.023774Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [71500:281474976715661] at 72075186224037890 on unit CompleteOperation 2025-06-24T21:33:59.023842Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [71500 : 281474976715661] from 72075186224037890 at tablet 72075186224037890 send result to client [2:1405:3207], exec latency: 1 ms, propose latency: 1 ms 2025-06-24T21:33:59.023901Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-24T21:33:59.024118Z node 2 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [2:1405:3207] txid# 281474976715661 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037890 marker# P12 2025-06-24T21:33:59.024215Z node 2 :TX_PROXY INFO: datareq.cpp:834: Actor# [2:1405:3207] txid# 281474976715661 RESPONSE Status# ExecComplete prepare time: 0.000500s execute time: 0.001500s total time: 0.002000s marker# P13 >> DataShardStats::Tli [GOOD] >> DataShardStats::HasSchemaChanges_BTreeIndex |99.0%| [TM] {RESULT} ydb/core/tx/datashard/ut_compaction/unittest >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates4-invalid_pile_id] [GOOD] >> test_bridge.py::TestBridgeBasic::test_update_and_get_cluster_state >> TTxDataShardPrefixKMeansScan::BuildToBuild [GOOD] >> TTxDataShardRecomputeKMeansScan::BadRequest >> TDqSolomonWriteActorTest::TestShouldReturnAfterCheckpoint [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> KqpTpch::Query14 [GOOD] >> KqpTpch::Query15 ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/solomon/actors/ut/unittest >> TDqSolomonWriteActorTest::TestShouldReturnAfterCheckpoint [GOOD] Test command err: 2025-06-24T21:33:19.015415Z node 1 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2025-06-24T21:33:19.047704Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 1 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-24T21:33:19.055308Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 121 bytes of data to buffer 2025-06-24T21:33:19.058035Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1 metrics with size of 121 bytes to solomon 2025-06-24T21:33:19.058079Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-24T21:33:19.071346Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Tue, 24 Jun 2025 21:33:19 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1} 2025-06-24T21:33:19.080432Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-24T21:33:29.644143Z node 2 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2025-06-24T21:33:29.676762Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 7500 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-24T21:33:29.721468Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 107903 bytes of data to buffer 2025-06-24T21:33:29.771268Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-24T21:33:29.811207Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-24T21:33:29.859505Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-24T21:33:29.879878Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-24T21:33:29.896095Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-24T21:33:29.915474Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-24T21:33:29.922744Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 54513 bytes of data to buffer 2025-06-24T21:33:29.923100Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 107903 bytes to solomon 2025-06-24T21:33:29.924050Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-24T21:33:29.924350Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-24T21:33:29.924370Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-24T21:33:30.051862Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[1]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Tue, 24 Jun 2025 21:33:30 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-24T21:33:30.052302Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-24T21:33:30.052324Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-24T21:33:30.127627Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[2]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Tue, 24 Jun 2025 21:33:30 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-24T21:33:30.128039Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-24T21:33:30.128061Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-24T21:33:30.200028Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Tue, 24 Jun 2025 21:33:30 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-24T21:33:30.200464Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-24T21:33:30.200498Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-24T21:33:30.363638Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[5]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Tue, 24 Jun 2025 21:33:30 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-24T21:33:30.364099Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-24T21:33:30.364119Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-24T21:33:30.428966Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[3]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Tue, 24 Jun 2025 21:33:30 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-24T21:33:30.429214Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 500 metrics with size of 54513 bytes to solomon 2025-06-24T21:33:30.429229Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer MaxRequestsInflight 2025-06-24T21:33:30.495903Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[4]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Tue, 24 Jun 2025 21:33:30 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-24T21:33:30.496034Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-24T21:33:30.551876Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[7]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 28 Date: Tue, 24 Jun 2025 21:33:30 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 500} 2025-06-24T21:33:30.552024Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-24T21:33:30.623771Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[6]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Tue, 24 Jun 2025 21:33:30 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-24T21:33:30.623910Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-24T21:33:43.103589Z node 3 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2025-06-24T21:33:43.107279Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 10 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-24T21:33:43.107779Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 579 bytes of data to buffer 2025-06-24T21:33:43.107928Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 10 metrics with size of 579 bytes to solomon 2025-06-24T21:33:43.107943Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-24T21:33:43.120809Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 27 Date: Tue, 24 Jun 2025 21:33:43 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 10} 2025-06-24T21:33:43.120924Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-24T21:33:53.689234Z node 4 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2025-06-24T21:33:53.692167Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 2400 items to send. Checkpoint: 1. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-24T21:33:53.714397Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 107903 bytes of data to buffer 2025-06-24T21:33:53.734586Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-24T21:33:53.742641Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 43613 bytes of data to buffer 2025-06-24T21:33:53.743031Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 107903 bytes to solomon 2025-06-24T21:33:53.744997Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-24T21:33:53.745105Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 400 metrics with size of 43613 bytes to solomon 2025-06-24T21:33:53.745122Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-24T21:33:53.827816Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[2]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 28 Date: Tue, 24 Jun 2025 21:33:53 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 400} 2025-06-24T21:33:53.827964Z node 4 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:373: TxId: TxId-42, Solomon sink. Process checkpoint. Inflight before checkpoint: 2 2025-06-24T21:33:54.151027Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[1]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Tue, 24 Jun 2025 21:33:54 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-24T21:33:54.151167Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: CheckpointInProgress Empty buffer 2025-06-24T21:33:54.234622Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Tue, 24 Jun 2025 21:33:54 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-24T21:33:54.234788Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-24T21:33:54.983661Z node 5 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2025-06-24T21:33:54.985516Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 1 items to send. Checkpoint: 1. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-24T21:33:54.985631Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 121 bytes of data to buffer 2025-06-24T21:33:54.985778Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1 metrics with size of 121 bytes to solomon 2025-06-24T21:33:54.985800Z node 5 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:373: TxId: TxId-42, Solomon sink. Process checkpoint. Inflight before checkpoint: 1 2025-06-24T21:33:54.993189Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Tue, 24 Jun 2025 21:33:54 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1} 2025-06-24T21:33:54.993356Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-24T21:33:54.993986Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 1 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-24T21:33:54.994119Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 121 bytes of data to buffer 2025-06-24T21:33:54.994246Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1 metrics with size of 121 bytes to solomon 2025-06-24T21:33:54.994261Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-24T21:33:55.004334Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[1]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Tue, 24 Jun 2025 21:33:55 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1} 2025-06-24T21:33:55.004484Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer |99.0%| [TS] {RESULT} ydb/library/yql/providers/solomon/actors/ut/unittest >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata [GOOD] >> TCreateAndDropViewTest::DropSameViewTwice [GOOD] >> TCreateAndDropViewTest::DropViewIfExists |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KafkaProtocol::ProduceScenario [GOOD] >> KafkaProtocol::IdempotentProducerScenario |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KeyValueGRPCService::SimpleWriteReadWithGetChannelStatus [GOOD] >> KeyValueGRPCService::SimpleWriteReadOverrun >> TTxDataShardRecomputeKMeansScan::BadRequest [GOOD] >> TTxDataShardRecomputeKMeansScan::MainTable >> test.py::TestSqsSplitMergeStdTables::test_std_merge_split [GOOD] >> TControlPlaneProxyShouldPassHids::ShouldCheckScenario [GOOD] >> TControlPlaneProxyTest::ShouldSendCreateQuery |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_parallel [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_insert_multiple_empty_rows |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TControlPlaneProxyTest::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyTest::FailsOnCreateQueryWhenRateLimiterResourceNotCreated |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KqpTpch::Query15 [GOOD] >> KqpTpch::Query16 >> QuoterWithKesusTest::CanKillKesusWhenUsingIt [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_unknown_data_source.py::TestUnknownDataSource::test_should_fail_unknown_data_source[v1-client0] >> TControlPlaneProxyTest::FailsOnCreateQueryWhenRateLimiterResourceNotCreated [GOOD] >> TControlPlaneProxyTest::ShouldSendListQueries >> test_alloc_default.py::TestAlloc::test_node_limit[kikimr0] [GOOD] >> KqpBatchUpdate::ManyPartitions_1 [GOOD] >> test_schemeshard_limits.py::TestSchemeShardLimitsCase0::test_effective_acls_are_too_large [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates4-invalid_pile_id] [GOOD] >> TCreateAndDropViewTest::DropViewIfExists [FAIL] >> TCreateAndDropViewTest::DropViewInFolder >> TControlPlaneProxyTest::ShouldSendListQueries [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeQuery >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift ------- [TM] {asan, default-linux-x86_64, release} ydb/core/quoter/ut/unittest >> QuoterWithKesusTest::CanKillKesusWhenUsingIt [GOOD] Test command err: 2025-06-24T21:31:34.762226Z node 1 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-24T21:31:34.762373Z node 1 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-24T21:31:34.767726Z node 1 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:806: [/Path/KesusName]: Failed to connect to tablet. Status: ERROR 2025-06-24T21:31:34.767807Z node 1 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1143: [/Path/KesusName]: Reconnecting to kesus 2025-06-24T21:31:34.795251Z node 2 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-24T21:31:34.795455Z node 2 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-24T21:31:34.795626Z node 2 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-24T21:31:34.795713Z node 2 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:819: [/Path/KesusName]: Disconnected from tablet 2025-06-24T21:31:34.795746Z node 2 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1143: [/Path/KesusName]: Reconnecting to kesus 2025-06-24T21:31:34.796006Z node 2 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-24T21:31:34.816124Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-24T21:31:34.816252Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-24T21:31:34.816685Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "/resource" 2025-06-24T21:31:34.816834Z node 3 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:491: [/Path/KesusName]: Resource "/resource" has incorrect name. Maybe this was some error on client side. 2025-06-24T21:31:34.816932Z node 3 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:354: [/Path/KesusName]: ProxySession("/resource", Error: GenericError) 2025-06-24T21:31:34.816997Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-24T21:31:34.817129Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "resource//resource" 2025-06-24T21:31:34.817187Z node 3 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:491: [/Path/KesusName]: Resource "resource//resource" has incorrect name. Maybe this was some error on client side. 2025-06-24T21:31:34.817229Z node 3 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:354: [/Path/KesusName]: ProxySession("resource//resource", Error: GenericError) 2025-06-24T21:31:34.840856Z node 4 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-24T21:31:34.840995Z node 4 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-24T21:31:34.841143Z node 4 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res" 2025-06-24T21:31:34.847613Z node 4 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-24T21:31:34.905029Z node 4 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 } } } }) 2025-06-24T21:31:34.905130Z node 4 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res" 2025-06-24T21:31:34.905192Z node 4 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:380: [/Path/KesusName]: ProxySession("res", 42) 2025-06-24T21:31:34.905282Z node 4 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res", Normal, {0: Front(20, 2)} }]) 2025-06-24T21:31:34.919603Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-24T21:31:34.919750Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-24T21:31:34.920058Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res0" 2025-06-24T21:31:34.920257Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-24T21:31:34.920578Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2025-06-24T21:31:34.920644Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res0" 2025-06-24T21:31:34.920692Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:380: [/Path/KesusName]: ProxySession("res0", 42) 2025-06-24T21:31:34.920761Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res0", Normal, {0: Front(1, 2)} }]) 2025-06-24T21:31:34.920874Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res1" 2025-06-24T21:31:34.920980Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:515: [/Path/KesusName]: Subscribe on resource "res1" 2025-06-24T21:31:34.921213Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 43 Error { Status: SUCCESS } EffectiveProps { ResourceId: 43 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2025-06-24T21:31:34.921266Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res1" 2025-06-24T21:31:34.921303Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:380: [/Path/KesusName]: ProxySession("res1", 43) 2025-06-24T21:31:34.921355Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res1", Normal, {0: Front(1, 2)} }]) 2025-06-24T21:31:34.921455Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res2" 2025-06-24T21:31:34.921541Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:515: [/Path/KesusName]: Subscribe on resource "res2" 2025-06-24T21:31:34.921803Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 44 Error { Status: SUCCESS } EffectiveProps { ResourceId: 44 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2025-06-24T21:31:34.921839Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res2" 2025-06-24T21:31:34.921876Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:380: [/Path/KesusName]: ProxySession("res2", 44) 2025-06-24T21:31:34.921944Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res2", Normal, {0: Front(1, 2)} }]) 2025-06-24T21:31:34.922152Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:730: [/Path/KesusName]: ProxyStats([{"res1", Consumed: 0, Queue: 5}]) 2025-06-24T21:31:34.922203Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:751: [/Path/KesusName]: Set info for resource "res1": { Available: 1, QueueWeight: 5 } 2025-06-24T21:31:34.922246Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:660: [/Path/KesusName]: Activate session to "res1". Connected: 1 2025-06-24T21:31:34.923060Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:583: [/Path/KesusName]: UpdateConsumptionState({ ResourcesInfo { ResourceId: 43 ConsumeResource: true Amount: inf } ActorID { RawX1: 5 RawX2: 21474838532 } }) 2025-06-24T21:31:34.924065Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res1", Normal, {0: Front(1, 2)} }]) 2025-06-24T21:31:34.924437Z node 5 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:819: [/Path/KesusName]: Disconnected from tablet 2025-06-24T21:31:34.924475Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1143: [/Path/KesusName]: Reconnecting to kesus 2025-06-24T21:31:34.924600Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:638: [/Path/KesusName]: Mark "res1" for offline allocation. Connected: 0, SessionIsActive: 1, AverageDuration: 0.100000s, AverageAmount: 0.5 2025-06-24T21:31:34.924652Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:612: [/Path/KesusName]: Schedule offline allocation in 0.000000s: [{ "res1", 0.5 }] 2025-06-24T21:31:34.924852Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-24T21:31:34.925157Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } Results { ResourceId: 43 Error { Status: SUCCESS } EffectiveProps { ResourceId: 43 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } Results { ResourceId: 44 Error { Status: SUCCESS } EffectiveProps { ResourceId: 44 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2025-06-24T21:31:34.925198Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res0" 2025-06-24T21:31:34.925233Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res1" 2025-06-24T21:31:34.925257Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res2" 2025-06-24T21:31:34.925307Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res0", Normal, {0: Front(1, 2)} }, { "res1", Normal, {0: Front(1, 2)} }, { "res2", Normal, {0: Front(1, 2)} }]) 2025-06-24T21:31:34.934134Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-24T21:31:34.934260Z node 6 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-24T21:31:34.934868Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res" 2025-06-24T21:31:34.935077Z node 6 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-24T21:31:34.935429Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 } } } }) 2025-06-24T21:31:34.935473Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res" 2025-06-24T21:31:34.935523Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:380: [/Path/KesusName]: ProxySession("res", 42) 2025-06-24T21:31:34.935610Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res", Normal, {0: Front(20, 2)} }]) 2025-06-24T21:31:34.946159Z node 7 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-24T21:31:34.946302Z node 7 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-24T21:31:34.946598Z node 7 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res" 2025-06-24T21:31:34.946763Z node 7 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-24T21:31:34.947047Z node 7 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 Hierarchic ... ER_PROXY TRACE: kesus_quoter_proxy.cpp:868: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2025-06-24T21:34:05.007545Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:877: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2025-06-24T21:34:05.007602Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Front(0.9996904619, 2)} }]) 2025-06-24T21:34:05.008071Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-24T21:34:05.032196Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1301: Feed resource "Resource". Balance: 0.9996904619. FreeBalance: 0.9996904619 2025-06-24T21:34:05.032246Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:346: Schedule next tick for "Resource". Tick size: 0.100000s. Time: 2025-06-24T21:34:05.127000Z 2025-06-24T21:34:05.032272Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1336: Allocate resource "Resource" 2025-06-24T21:34:05.032326Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:275: Charge "Resource" for 5. Balance: 0.9996904619. FreeBalance: 0.9996904619. TicksToFullfill: 5.00154817. DurationToFullfillInUs: 500154.817. TimeToFullfill: 2025-06-24T21:34:04.524194Z. Now: 2025-06-24T21:34:05.031080Z. LastAllocated: 2025-06-24T21:34:04.024039Z 2025-06-24T21:34:05.032912Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:730: [/dc-1/KesusQuoter]: ProxyStats([{"Resource", Consumed: 5, Queue: 0}]) 2025-06-24T21:34:05.032982Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:751: [/dc-1/KesusQuoter]: Set info for resource "Resource": { Available: -4.000309538, QueueWeight: 0 } 2025-06-24T21:34:05.033056Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2025-06-24T21:34:05.033171Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-24T21:34:05.103708Z node 50 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [49:7519632367271200488:2263]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2025-06-24T21:34:05.104792Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:868: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2025-06-24T21:34:05.104850Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:877: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2025-06-24T21:34:05.104911Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2025-06-24T21:34:05.105762Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-24T21:34:05.128195Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1301: Feed resource "Resource". Balance: 0. FreeBalance: 0 2025-06-24T21:34:05.203576Z node 50 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [49:7519632367271200488:2263]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2025-06-24T21:34:05.210178Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:868: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2025-06-24T21:34:05.210254Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:877: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2025-06-24T21:34:05.210320Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2025-06-24T21:34:05.210585Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-24T21:34:05.210607Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1301: Feed resource "Resource". Balance: 0. FreeBalance: 0 2025-06-24T21:34:05.303912Z node 50 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [49:7519632367271200488:2263]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2025-06-24T21:34:05.308003Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:868: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2025-06-24T21:34:05.308068Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:877: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2025-06-24T21:34:05.308135Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2025-06-24T21:34:05.308380Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-24T21:34:05.308404Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1301: Feed resource "Resource". Balance: 0. FreeBalance: 0 2025-06-24T21:34:05.947312Z node 49 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[49:7519632350091330474:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:34:05.947438Z node 49 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:34:05.963671Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:05.963722Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:06.319604Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037968897] send [49:7519632354386297989:2127] 2025-06-24T21:34:06.319643Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037968897] push event to server [49:7519632354386297989:2127] 2025-06-24T21:34:06.962946Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:06.962991Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:07.224203Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037968897] send [49:7519632354386297989:2127] 2025-06-24T21:34:07.224240Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037968897] push event to server [49:7519632354386297989:2127] 2025-06-24T21:34:07.956926Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:07.956967Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:08.960707Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:08.960753Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:09.958616Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:09.958658Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:10.799422Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037932033] send [49:7519632350091330444:2092] 2025-06-24T21:34:10.799451Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037932033] push event to server [49:7519632350091330444:2092] 2025-06-24T21:34:10.959454Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:10.959502Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:11.961420Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:11.961466Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:12.332010Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037968897] send [49:7519632354386297989:2127] 2025-06-24T21:34:12.332060Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037968897] push event to server [49:7519632354386297989:2127] 2025-06-24T21:34:12.961004Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:12.961041Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:13.971725Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:13.971762Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:14.972887Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:14.972935Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:15.960616Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:15.960655Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:16.255633Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037936129] send [49:7519632350091330436:2080] 2025-06-24T21:34:16.255681Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037936129] push event to server [49:7519632350091330436:2080] 2025-06-24T21:34:16.889730Z node 49 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:34:16.889778Z node 49 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:34:16.960916Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:16.960963Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:17.359609Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037968897] send [49:7519632354386297989:2127] 2025-06-24T21:34:17.359660Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037968897] push event to server [49:7519632354386297989:2127] 2025-06-24T21:34:17.961448Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:17.961490Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] 2025-06-24T21:34:18.961570Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519632358681265669:2386] 2025-06-24T21:34:18.961615Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519632358681265669:2386] |99.0%| [TM] {RESULT} ydb/core/quoter/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ManyPartitions_1 [GOOD] Test command err: Trying to start YDB, gRPC: 21643, MsgBus: 24556 2025-06-24T21:29:12.220873Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631113845677304:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:12.221136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002b20/r3tmp/tmp2lefrX/pdisk_1.dat 2025-06-24T21:29:12.720181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:12.720306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:12.736617Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:12.736892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21643, node 1 2025-06-24T21:29:13.035658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:13.035682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:13.035689Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:13.035795Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:13.121128Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24556 TClient is connected to server localhost:24556 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:13.735279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:13.752499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:29:13.760786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:13.900987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:29:14.065886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.127685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:15.901556Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631126730580632:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.901714Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.275399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.314026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.386740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.425312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.456141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.489838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.558717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:16.638114Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631131025548586:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.638192Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.638361Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631131025548591:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:16.641907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:16.659707Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631131025548593:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:16.754978Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631131025548644:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:17.212506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631113845677304:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:17.212593Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:29:17.956787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... Trying to start YDB, gRPC: 5200, MsgBus: 26840 2025-06-24T21:29:22.795922Z node 2 :METADATA_PROVIDER WARN: lo ... stsActor;event=undelivered;self_id=[20:7519632304133433541:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:49.934641Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002b20/r3tmp/tmpiCDspr/pdisk_1.dat 2025-06-24T21:33:50.475862Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:50.476032Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:50.480754Z node 20 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:50.519891Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19448, node 20 2025-06-24T21:33:50.727887Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:33:50.727917Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:33:50.727936Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:33:50.728164Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:33:50.997586Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28169 TClient is connected to server localhost:28169 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:33:52.813334Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:33:52.832051Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:33:52.861070Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:53.135045Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:53.769916Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:54.173014Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:33:55.305795Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[20:7519632304133433541:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:55.306459Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:34:02.717710Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519632359968010061:2383], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:34:02.717914Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:34:03.300522Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:03.572443Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:03.740815Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:03.874916Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:04.007908Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:04.158526Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:04.326413Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:04.546615Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519632368557945342:2459], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:34:04.546824Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:34:04.547826Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519632368557945347:2462], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:34:04.561261Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:34:04.623250Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [20:7519632368557945349:2463], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:34:04.695723Z node 20 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [20:7519632368557945402:3489] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:34:05.395469Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:34:05.395503Z node 20 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:34:08.598065Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TControlPlaneProxyTest::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendGetQueryStatus ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates1-multiple_primary_piles_in_request] [GOOD] 2025-06-24 21:34:24,292 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:34:24,330 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 1146704 760M 0b 0b ydb-tests-functional-bridge --basetemp /home/runner/.ya/build/build_root/2btm/004b38/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-mo Test command err: Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...est.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/004b38/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk2/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/004b38', '--source-root', '/home/runner/.ya/build/build_root/2btm/004b38/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/004b38/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk2/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/bridge', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '2', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/functional/bridge', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_bridge.py']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...est.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/004b38/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk2/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/004b38', '--source-root', '/home/runner/.ya/build/build_root/2btm/004b38/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/004b38/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk2/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/bridge', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '2', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/functional/bridge', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_bridge.py']' stopped by 60 seconds timeout",), {}) >> KafkaProtocol::IdempotentProducerScenario [GOOD] >> KafkaProtocol::FetchScenario >> DataShardStats::HasSchemaChanges_BTreeIndex [GOOD] >> DataShardStats::HasSchemaChanges_ByKeyFilter |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase1::test_too_large_acls [GOOD] >> TControlPlaneProxyTest::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyTest::ShouldSendModifyQuery >> KeyValueGRPCService::SimpleWriteReadOverrun [GOOD] >> KeyValueGRPCService::SimpleWriteReadRange |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TDataShardRSTest::TestCleanupInRS-UseSink [GOOD] >> TDataShardRSTest::TestDelayedRSAckForUnknownTx >> TTxDataShardRecomputeKMeansScan::MainTable [GOOD] >> TTxDataShardRecomputeKMeansScan::BuildTable >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates3-duplicate_pile_update] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates2-no_primary_pile_in_result] >> TControlPlaneProxyTest::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteQuery >> test_alloc_default.py::TestAlloc::test_alloc_and_free[kikimr0] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] [GOOD] >> KqpTpch::Query16 [GOOD] >> KqpTpch::Query17 >> TControlPlaneProxyTest::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendControlQuery |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase0::test_effective_acls_are_too_large [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TMemoryController::SharedCache [GOOD] >> TMemoryController::SharedCache_ConfigLimit >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates0-no_updates] >> TControlPlaneProxyTest::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendGetResultData |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> Etcd_KV::TxnReadOnly [GOOD] >> Etcd_KV::TxnMultiKeysAndOperations >> TCreateAndDropViewTest::DropViewInFolder [GOOD] >> TCreateAndDropViewTest::ContextPollution >> TControlPlaneProxyTest::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyTest::ShouldSendListJobs |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KafkaProtocol::FetchScenario [GOOD] >> KafkaProtocol::BalanceScenario >> TControlPlaneProxyTest::ShouldSendListJobs [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeJob >> TDataShardRSTest::TestDelayedRSAckForUnknownTx [GOOD] >> TDataShardRSTest::TestDelayedRSAckForOutOfOrderCompletedTx |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TControlPlaneProxyTest::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyTest::ShouldSendCreateConnection >> KeyValueGRPCService::SimpleWriteReadRange [GOOD] >> KeyValueGRPCService::SimpleWriteListRange >> TMemoryController::SharedCache_ConfigLimit [GOOD] >> TMemoryController::MemTable >> TTxDataShardRecomputeKMeansScan::BuildTable [GOOD] >> TTxDataShardRecomputeKMeansScan::EmptyCluster |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase1::test_too_large_acls [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TControlPlaneProxyTest::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendListConnections |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KqpTpch::Query17 [GOOD] >> KqpTpch::Query18 |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> DataShardStats::HasSchemaChanges_ByKeyFilter [GOOD] >> DataShardStats::HasSchemaChanges_Columns >> test_unknown_data_source.py::TestUnknownDataSource::test_should_fail_unknown_data_source[v1-client0] [GOOD] >> TControlPlaneProxyTest::ShouldSendListConnections [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeConnection |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/merge_split_common_table/std/py3test >> test.py::TestSqsSplitMergeStdTables::test_std_merge_split [GOOD] |99.1%| [TM] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/std/py3test |99.1%| [TA] $(B)/ydb/tests/functional/limits/test-results/py3test/{meta.json ... results_accumulator.log} |99.1%| [TA] {RESULT} $(B)/ydb/tests/functional/limits/test-results/py3test/{meta.json ... results_accumulator.log} >> TControlPlaneProxyTest::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendModifyConnection >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start >> test_bridge.py::TestBridgeBasic::test_update_and_get_cluster_state [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TControlPlaneProxyTest::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteConnection |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TCreateAndDropViewTest::ContextPollution [GOOD] >> TEvaluateExprInViewTest::EvaluateExpr >> TDataShardRSTest::TestDelayedRSAckForOutOfOrderCompletedTx [GOOD] >> TDataShardRSTest::TestGenericReadSetDecisionCommit >> TControlPlaneProxyTest::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendTestConnection |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KqpTpch::Query18 [GOOD] >> KqpTpch::Query19 >> KqpBatchUpdate::SimplePartitions [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_insert_multiple_empty_rows [GOOD] >> TTxDataShardRecomputeKMeansScan::EmptyCluster [GOOD] >> TTxDataShardReshuffleKMeansScan::BadRequest |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TControlPlaneProxyTest::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendCreateBinding |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::SimplePartitions [GOOD] Test command err: Trying to start YDB, gRPC: 8574, MsgBus: 12733 2025-06-24T21:29:10.394851Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631104633760606:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:10.395320Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bf8/r3tmp/tmpzr2ENg/pdisk_1.dat 2025-06-24T21:29:10.868009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:10.868104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:10.870914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:10.916579Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:10.917414Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631104633760422:2079] 1750800550375359 != 1750800550375362 TServer::EnableGrpc on GrpcPort 8574, node 1 2025-06-24T21:29:11.135796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:29:11.135823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:29:11.135849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:29:11.135990Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:11.385703Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12733 TClient is connected to server localhost:12733 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:12.053286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:29:12.108150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:29:12.123624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.353794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.573733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:12.699136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:29:14.124921Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631121813631252:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.125011Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:14.801548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.848168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.877210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.917243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.954108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:14.995431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.068156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:29:15.169466Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631126108599214:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.169544Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.169882Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631126108599219:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:15.173529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:15.184936Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631126108599221:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:29:15.241604Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631126108599272:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:15.387552Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631104633760606:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:15.387640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21200, MsgBus: 4675 2025-06-24T21:29:24.736790Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631167079142551:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:24.736938Z node 2 :METADATA_PROVIDER ERROR: log.cpp:7 ... : schemeshard_import.cpp:304: Table profiles were not loaded Trying to start YDB, gRPC: 20666, MsgBus: 15023 2025-06-24T21:34:18.407093Z node 16 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[16:7519632428986379054:2169];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/002bf8/r3tmp/tmpQJsRQ9/pdisk_1.dat 2025-06-24T21:34:18.779163Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:34:19.070347Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:34:19.070470Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:34:19.079727Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:34:19.080765Z node 16 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:34:19.091743Z node 16 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [16:7519632428986378910:2079] 1750800858334344 != 1750800858334347 TServer::EnableGrpc on GrpcPort 20666, node 16 2025-06-24T21:34:19.316827Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:34:19.316854Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:34:19.316870Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:34:19.317076Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:34:19.403622Z node 16 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15023 TClient is connected to server localhost:15023 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:34:20.268060Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:34:20.283728Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:34:20.306424Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:34:20.478204Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:34:20.792771Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:34:20.927086Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:34:23.379295Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[16:7519632428986379054:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:34:23.379400Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:34:26.356185Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519632463346118951:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:34:26.356309Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:34:26.422019Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:26.499054Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:26.583970Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:26.683830Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:26.778929Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:26.876727Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:26.943496Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:34:27.079104Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519632467641086914:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:34:27.079247Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:34:27.079965Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519632467641086919:2441], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:34:27.085225Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:34:27.111811Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [16:7519632467641086921:2442], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:34:27.212992Z node 16 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [16:7519632467641086972:3440] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:34:34.059423Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:34:34.059454Z node 16 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates3-duplicate_pile_update] [GOOD] >> KeyValueGRPCService::SimpleWriteListRange [GOOD] >> KeyValueGRPCService::SimpleGetStorageChannelStatus >> TControlPlaneProxyTest::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyTest::ShouldSendListBindings >> test_drain.py::TestHive::test_drain_on_stop >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates2-no_primary_pile_in_result] [GOOD] >> test_kill_tablets.py::TestKillTablets::test_when_kill_hive_it_will_be_restarted_and_can_create_tablets >> TControlPlaneProxyTest::ShouldSendListBindings [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeBinding |99.1%| [TA] $(B)/ydb/core/kqp/ut/batch_operations/test-results/unittest/{meta.json ... results_accumulator.log} |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] [GOOD] |99.1%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/batch_operations/test-results/unittest/{meta.json ... results_accumulator.log} |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/common/py3test >> test_unknown_data_source.py::TestUnknownDataSource::test_should_fail_unknown_data_source[v1-client0] [GOOD] |99.1%| [TM] {RESULT} ydb/tests/fq/common/py3test >> TControlPlaneProxyTest::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyTest::ShouldSendModifyBinding >> DataShardStats::HasSchemaChanges_Columns [GOOD] >> DataShardStats::HasSchemaChanges_Families ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeBasic::test_update_and_get_cluster_state [GOOD] 2025-06-24 21:34:56,325 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:34:57,525 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 1165037 863M 860M 786M ydb-tests-functional-bridge --basetemp /home/runner/.ya/build/build_root/2btm/004a56/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-mo 1168127 1.4G 1.2G 875M ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1168175 1.3G 0b 0b ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1192395 1.3G 0b 0b │ └─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_ 1168184 1.3G 0b 0b ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1192393 1.3G 0b 0b │ └─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_ 1168185 2.1G 0b 0b ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1192362 2.1G 0b 0b │ └─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_ 1168221 1.3G 1.3G 895M ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1192402 1.3G 0b 0b │ └─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_ 1168226 1.2G 0b 0b ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1168227 1.2G 0b 0b ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1192408 1.2G 0b 0b │ └─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_ 1168261 2.1G 0b 0b ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1192410 2.1G 0b 0b │ └─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_ 1168262 1.2G 1.2G 806M ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1168263 1.2G 0b 0b ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1192377 1.2G 0b 0b │ └─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_ 1168264 1.2G 0b 0b ├─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1192491 1.2G 0b 0b │ └─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_ 1168265 1.2G 0b 0b └─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out 1192444 1.2G 0b 0b └─ ydbd server --config-dir=/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_ Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 135, in runtestprotocol reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 183, in pytest_runtest_teardown item.session._setupstate.teardown_exact(nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 543, in teardown_exact fin() File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1042, in finish func() File "contrib/python/pytest/py3/_pytest/fixtures.py", line 926, in _teardown_yield_fixture next(it) File "contrib/python/pytest/py3/_pytest/python.py", line 847, in xunit_setup_class_fixture _call_with_optional_argument(func, self.obj) File "contrib/python/pytest/py3/_pytest/python.py", line 764, in _call_with_optional_argument func(arg) File "ydb/tests/functional/bridge/test_bridge.py", line 117, in teardown_class cls.cluster.stop() File "ydb/tests/library/harness/kikimr_runner.py", line 599, in stop thread.join() File "contrib/tools/python3/Lib/threading.py", line 1149, in join self._wait_for_tstate_lock() File "contrib/tools/python3/Lib/threading.py", line 1169, in _wait_for_tstate_lock if lock.acquire(block, timeout): File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...est.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/004a56', '--source-root', '/home/runner/.ya/build/build_root/2btm/004a56/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/bridge', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '0', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/functional/bridge', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_bridge.py']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...est.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/004a56', '--source-root', '/home/runner/.ya/build/build_root/2btm/004a56/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/004a56/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/test_bridge/chunk0/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/bridge', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '0', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/functional/bridge', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_bridge.py']' stopped by 60 seconds timeout",), {}) >> TControlPlaneProxyTest::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteBinding |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KqpTpch::Query19 [GOOD] >> KqpTpch::Query20 >> TDataShardRSTest::TestGenericReadSetDecisionCommit [GOOD] >> TDataShardRSTest::TestGenericReadSetDecisionAbort |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates0-no_updates] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteBinding [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TTxDataShardReshuffleKMeansScan::BadRequest [GOOD] >> TTxDataShardReshuffleKMeansScan::MainToPosting ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/control_plane_proxy/ut/unittest >> TControlPlaneProxyTest::ShouldSendDeleteBinding [GOOD] Test command err: 2025-06-24T21:30:56.829359Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:674: CreateQueryRequest, validation failed: test_user@staff **** (00000000) content { name: "my_query_name" } error:
: Error: No permission yq.queries.create@as in a given scope , code: 1000 2025-06-24T21:30:57.337928Z node 2 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:744: ListQueriesRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:30:57.804486Z node 3 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:810: DescribeQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:30:58.309436Z node 4 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:878: GetQueryStatusRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getStatus@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:30:58.758158Z node 5 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:945: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:30:59.239516Z node 6 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1021: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:30:59.750769Z node 7 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1087: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:00.228620Z node 8 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1156: GetResultDataRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getData@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:00.835455Z node 9 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1222: ListJobsRequest, validation failed: test_user@staff **** (00000000) query_id: "my_query_id" error:
: Error: No permission yq.jobs.get@as in a given scope , code: 1000 2025-06-24T21:31:01.391656Z node 10 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1288: DescribeJobRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.jobs.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:01.872639Z node 11 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:02.368629Z node 12 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) content { setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:02.901443Z node 13 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1509: ListConnectionsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:03.436012Z node 14 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1575: DescribeConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:03.927507Z node 15 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1646: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:04.452097Z node 16 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1646: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) content { setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:04.982644Z node 17 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1798: DeleteConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:05.519006Z node 18 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:06.054886Z node 19 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:06.561984Z node 20 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1979: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:07.239589Z node 21 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2134: ListBindingsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:07.799525Z node 22 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2200: DescribeBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:08.371181Z node 23 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2266: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:09.016015Z node 24 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2413: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:40.423925Z node 72 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:674: CreateQueryRequest, validation failed: test_user@staff **** (00000000) content { name: "my_query_name" } error:
: Error: No permission yq.queries.create@as in a given scope , code: 1000 2025-06-24T21:31:41.195799Z node 73 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:744: ListQueriesRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:41.972165Z node 74 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:810: DescribeQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:42.685543Z node 75 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:878: GetQueryStatusRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getStatus@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:43.472972Z node 76 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:945: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:44.234802Z node 77 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1021: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:44.982047Z node 78 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1087: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:45.664092Z node 79 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1156: GetResultDataRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getData@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:46.384931Z node 80 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1222: ListJobsRequest, validation failed: test_user@staff **** (00000000) query_id: "my_query_id" error:
: Error: No permission yq.jobs.get@as in a given scope , code: 1000 2025-06-24T21:31:47.095408Z node 81 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1288: DescribeJobRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.jobs.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:47.839041Z node 82 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:48.583398Z node 83 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) content { setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:49.345616Z node 84 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1509: ListConnectionsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:50.059017Z node 85 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1575: DescribeConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:50.805729Z node 86 :YQ_CONTROL_PLANE_STORAG ... L_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:53.833408Z node 90 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:54.571665Z node 91 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1979: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:55.293580Z node 92 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2134: ListBindingsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:55.982229Z node 93 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2200: DescribeBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:56.716701Z node 94 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2266: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:31:57.449392Z node 95 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2413: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:03.930555Z node 163 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:674: CreateQueryRequest, validation failed: test_user_3@staff **** (00000000) content { name: "my_query_name" } error:
: Error: No permission yq.queries.create@as in a given scope , code: 1000 2025-06-24T21:33:07.167963Z node 166 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:878: GetQueryStatusRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.getStatus@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:08.282871Z node 167 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:945: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:09.758608Z node 168 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1021: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:10.850105Z node 169 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1087: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:11.926615Z node 170 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1156: GetResultDataRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.getData@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:15.363776Z node 173 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:18.545170Z node 176 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1646: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:19.601988Z node 177 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1798: DeleteConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:21.006684Z node 178 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:22.123589Z node 179 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1979: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:27.106875Z node 182 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2266: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:29.590472Z node 183 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2413: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:34.342509Z node 185 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:744: ListQueriesRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:37.057854Z node 186 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:810: DescribeQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:43.517597Z node 188 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:945: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:45.458709Z node 189 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1021: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:47.510258Z node 190 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1087: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:51.445348Z node 192 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1222: ListJobsRequest, validation failed: test_user_4@staff **** (00000000) query_id: "my_query_id" error:
: Error: No permission yq.jobs.get@as in a given scope , code: 1000 2025-06-24T21:33:53.034174Z node 193 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1288: DescribeJobRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.jobs.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:54.878486Z node 194 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:56.668451Z node 195 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1509: ListConnectionsRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:33:58.676425Z node 196 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1575: DescribeConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:34:00.385121Z node 197 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1646: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:34:02.023389Z node 198 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1798: DeleteConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:34:04.375988Z node 199 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:34:06.137001Z node 200 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1979: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:34:08.314811Z node 201 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2134: ListBindingsRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:34:11.437981Z node 202 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2200: DescribeBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:34:14.644440Z node 203 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2266: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-24T21:34:16.943090Z node 204 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2413: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {RESULT} ydb/core/fq/libs/control_plane_proxy/ut/unittest |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KafkaProtocol::BalanceScenario [GOOD] >> KafkaProtocol::BalanceScenarioForFederation >> test_alloc_default.py::TestAlloc::test_alloc_and_free[kikimr0] [GOOD] >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start [GOOD] >> TEvaluateExprInViewTest::EvaluateExpr [GOOD] >> TEvaluateExprInViewTest::NakedCallToCurrentTimeFunction |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates3-duplicate_pile_update] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32--2147483648-True] >> KeyValueGRPCService::SimpleGetStorageChannelStatus [GOOD] >> KeyValueGRPCService::SimpleCreateAlterDropVolume >> KqpTpch::Query20 [GOOD] >> KqpTpch::Query21 |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_then_kill_system_tablets_and_it_increases_generation >> TDataShardRSTest::TestGenericReadSetDecisionAbort [GOOD] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--false] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_hive_it_will_be_restarted_and_can_create_tablets [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TMemoryController::MemTable [GOOD] >> TMemoryController::ResourceBroker |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates2-no_primary_pile_in_result] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_rs/unittest >> TDataShardRSTest::TestGenericReadSetDecisionAbort [GOOD] Test command err: 2025-06-24T21:31:33.041794Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:33.042492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:33.042772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001b90/r3tmp/tmpcWh8sl/pdisk_1.dat 2025-06-24T21:31:33.607630Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:31:33.629383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:33.716309Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:33.723824Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800689712879 != 1750800689712883 2025-06-24T21:31:33.780249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:33.780424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:33.794549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:33.900548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:33.974064Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:643:2538] 2025-06-24T21:31:33.974365Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:31:34.047700Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:31:34.047933Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:31:34.050718Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:31:34.050824Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:31:34.050887Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:31:34.052752Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:31:34.053240Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:31:34.053335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:677:2538] in generation 1 2025-06-24T21:31:34.054032Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:645:2540] 2025-06-24T21:31:34.054274Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:31:34.065115Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:31:34.065447Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:31:34.067021Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-24T21:31:34.067099Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-24T21:31:34.067171Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-24T21:31:34.067503Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:31:34.067951Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:31:34.068013Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:690:2540] in generation 1 2025-06-24T21:31:34.069900Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:648:2542] 2025-06-24T21:31:34.070119Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:31:34.083623Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:31:34.083918Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:31:34.085546Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037891 2025-06-24T21:31:34.085646Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037891 2025-06-24T21:31:34.085698Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037891 2025-06-24T21:31:34.086040Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:31:34.086583Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:651:2544] 2025-06-24T21:31:34.086829Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:31:34.104046Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:31:34.104135Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037891 persisting started state actor id [1:707:2542] in generation 1 2025-06-24T21:31:34.105104Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:31:34.105223Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:31:34.106759Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-24T21:31:34.106840Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-24T21:31:34.106902Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-24T21:31:34.107806Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:31:34.107956Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:31:34.108034Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:712:2544] in generation 1 2025-06-24T21:31:34.119362Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:31:34.158273Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:31:34.159489Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:31:34.159676Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:717:2580] 2025-06-24T21:31:34.159724Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:31:34.159764Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:31:34.159806Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:31:34.160880Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:31:34.160961Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-24T21:31:34.161038Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:31:34.161139Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:718:2581] 2025-06-24T21:31:34.161169Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-24T21:31:34.161209Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-24T21:31:34.161239Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:31:34.161755Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:31:34.161795Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037891 2025-06-24T21:31:34.161855Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037891 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:31:34.161908Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [1:719:2582] 2025-06-24T21:31:34.161929Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-24T21:31:34.161972Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2025-06-24T21:31:34.161998Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-24T21:31:34.162274Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:31:34.162396Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:31:34.162506Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:31:34.162554Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-24T21:31:34.162625Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:31:34.162700Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:720:2583] 2025-06-24T21:31:34.162724Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-24T21:31:34.162761Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-24T21:31:34.162792Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: ... D DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:35:15.100799Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2023:281474976715664] at 72075186224037888 on unit CompleteWrite 2025-06-24T21:35:15.100884Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2023 : 281474976715664] from 72075186224037888 at tablet 72075186224037888 send result to client [6:947:2701] 2025-06-24T21:35:15.100963Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:35:15.101081Z node 6 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-24T21:35:15.101170Z node 6 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Reply at 72075186224037888 {TEvReadSet step# 2023 txid# 281474976715664 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} ... nodata readset 2025-06-24T21:35:15.101308Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [6:626:2530], Recipient [6:712:2590]: {TEvReadSet step# 2023 txid# 281474976715664 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-24T21:35:15.101343Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-24T21:35:15.101375Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715664 2025-06-24T21:35:15.101430Z node 6 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 2023 txid# 281474976715664 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-24T21:35:15.101477Z node 6 :TX_DATASHARD TRACE: volatile_tx.cpp:863: Processed readset without data from 72075186224037888 to 72075186224037889 at tablet 72075186224037889 2025-06-24T21:35:15.102037Z node 6 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [6:947:2701], SessionActorId: [6:889:2701], Got LOCKS BROKEN for table. ShardID=72075186224037888, Sink=[6:947:2701].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-24T21:35:15.102239Z node 6 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [6:947:2701], SessionActorId: [6:889:2701], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[6:889:2701]. isRollback=0 2025-06-24T21:35:15.102790Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=6&id=ZDdmY2VjMmUtZDk3NDRlYmQtYzA5ZmRkZGUtZWVmOTNlZjI=, ActorId: [6:889:2701], ActorState: ExecuteState, TraceId: 01jyhxvph10nrdjmr8j68r3gn6, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [6:948:2701] from: [6:947:2701] 2025-06-24T21:35:15.103013Z node 6 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [6:948:2701] TxId: 281474976715664. Ctx: { TraceId: 01jyhxvph10nrdjmr8j68r3gn6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=ZDdmY2VjMmUtZDk3NDRlYmQtYzA5ZmRkZGUtZWVmOTNlZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-24T21:35:15.103593Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [6:947:2701], Recipient [6:626:2530]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715662 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-24T21:35:15.103639Z node 6 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-24T21:35:15.103774Z node 6 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-24T21:35:15.103814Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2023:281474976715664] at 72075186224037889 on unit CompleteWrite 2025-06-24T21:35:15.103871Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-24T21:35:15.103946Z node 6 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-24T21:35:15.104124Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [6:626:2530], Recipient [6:626:2530]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T21:35:15.104187Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-24T21:35:15.104274Z node 6 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-24T21:35:15.104450Z node 6 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715662 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-24T21:35:15.104586Z node 6 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715662, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-24T21:35:15.104708Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-06-24T21:35:15.104789Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:35:15.104852Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-06-24T21:35:15.104921Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-24T21:35:15.104975Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-24T21:35:15.105030Z node 6 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2023/281474976715664 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2001/0 ImmediateWriteEdgeReplied# v2001/0 2025-06-24T21:35:15.105094Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-24T21:35:15.105147Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:35:15.105177Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-24T21:35:15.105203Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-06-24T21:35:15.105227Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-06-24T21:35:15.105268Z node 6 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-24T21:35:15.105380Z node 6 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715662 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 2025-06-24T21:35:15.105449Z node 6 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:6] at 72075186224037888 2025-06-24T21:35:15.105514Z node 6 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-24T21:35:15.105607Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:35:15.105644Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-06-24T21:35:15.105705Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-24T21:35:15.105757Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:35:15.105794Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-06-24T21:35:15.105832Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-24T21:35:15.105888Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-24T21:35:15.105937Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-24T21:35:15.105992Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-24T21:35:15.106019Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-24T21:35:15.106055Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-24T21:35:15.106134Z node 6 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-24T21:35:15.106192Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-24T21:35:15.106260Z node 6 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-24T21:35:15.106358Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:35:15.106618Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=6&id=ZDdmY2VjMmUtZDk3NDRlYmQtYzA5ZmRkZGUtZWVmOTNlZjI=, ActorId: [6:889:2701], ActorState: ExecuteState, TraceId: 01jyhxvph10nrdjmr8j68r3gn6, Create QueryResponse for error on request, msg: 2025-06-24T21:35:15.107761Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [6:61:2108], Recipient [6:626:2530]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715662 LockNode: 6 Status: STATUS_NOT_FOUND 2025-06-24T21:35:15.107996Z node 6 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976715664; 2025-06-24T21:35:15.108232Z node 6 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [2023 : 281474976715664] from 72075186224037889 at tablet 72075186224037889, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } 2025-06-24T21:35:15.108325Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 |99.2%| [TM] {RESULT} ydb/core/tx/datashard/ut_rs/unittest >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--false] >> test_alloc_default.py::TestAlloc::test_up_down[kikimr0] >> TTxDataShardReshuffleKMeansScan::MainToPosting [GOOD] >> TTxDataShardReshuffleKMeansScan::MainToBuild |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> DataShardStats::HasSchemaChanges_Families [GOOD] >> KqpPg::ValuesInsert-useSink [GOOD] >> PgCatalog::PgType |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KeyValueGRPCService::SimpleCreateAlterDropVolume [GOOD] >> KeyValueGRPCService::SimpleListPartitions |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KeyValueGRPCService::SimpleListPartitions [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_hive_it_will_be_restarted_and_can_create_tablets [GOOD] >> TMemoryController::ResourceBroker [GOOD] >> TMemoryController::ResourceBroker_ConfigLimit |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates0-no_updates] [GOOD] >> TEvaluateExprInViewTest::NakedCallToCurrentTimeFunction [GOOD] >> TSelectFromViewTest::OneTable ------- [TM] {asan, default-linux-x86_64, release} ydb/services/keyvalue/ut/unittest >> KeyValueGRPCService::SimpleListPartitions [GOOD] Test command err: 2025-06-24T21:32:37.045229Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631996821667969:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:37.045463Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0016e1/r3tmp/tmpDVvT4c/pdisk_1.dat 2025-06-24T21:32:37.849748Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:37.941360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:37.941460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:37.946244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:38.045913Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 63249, node 1 2025-06-24T21:32:38.059460Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:367: Grpc request proxy started, nodeid# 1, serve as static node 2025-06-24T21:32:38.067560Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:557: Subscribe to /Root 2025-06-24T21:32:38.067658Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:420: Got proxy service configuration 2025-06-24T21:32:38.067713Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:367: Grpc request proxy started, nodeid# 1, serve as static node 2025-06-24T21:32:38.068158Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:557: Subscribe to /Root 2025-06-24T21:32:38.068197Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:420: Got proxy service configuration 2025-06-24T21:32:38.069424Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:403: Subscribed for config changes 2025-06-24T21:32:38.069438Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:411: Updated app config 2025-06-24T21:32:38.069500Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:403: Subscribed for config changes 2025-06-24T21:32:38.069520Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:411: Updated app config 2025-06-24T21:32:38.073040Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:32:38.073086Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:32:38.073230Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-24T21:32:38.073266Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-24T21:32:38.162936Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002c480] created request Name# BlobStorageConfig 2025-06-24T21:32:38.164121Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ca80] created request Name# HiveCreateTablet 2025-06-24T21:32:38.164601Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002d080] created request Name# TabletStateRequest 2025-06-24T21:32:38.164975Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002d680] created request Name# SchemeOperationStatus 2025-06-24T21:32:38.165959Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002dc80] created request Name# ChooseProxy 2025-06-24T21:32:38.166438Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002e280] created request Name# ResolveNode 2025-06-24T21:32:38.167334Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002e880] created request Name# FillNode 2025-06-24T21:32:38.167782Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ee80] created request Name# DrainNode 2025-06-24T21:32:38.168094Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c0080] created request Name# InterconnectDebug 2025-06-24T21:32:38.168364Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c0680] created request Name# TestShardControl 2025-06-24T21:32:38.168634Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c0c80] created request Name# RegisterNode 2025-06-24T21:32:38.168901Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c1280] created request Name# CmsRequest 2025-06-24T21:32:38.169191Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c1880] created request Name# ConsoleRequest 2025-06-24T21:32:38.170193Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c1e80] created request Name# SchemeInitRoot 2025-06-24T21:32:38.170581Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c2480] created request Name# PersQueueRequest 2025-06-24T21:32:38.171464Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c2a80] created request Name# SchemeOperation 2025-06-24T21:32:38.171874Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000c3080] created request Name# SchemeDescribe 2025-06-24T21:32:38.366241Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:38.366271Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:38.366286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:38.366510Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26990 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:39.182803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "hdd2-pool" Kind: "hdd2" } StoragePools { Name: "hdd-pool" Kind: "hdd" } StoragePools { Name: "hdd1-pool" Kind: "hdd1" } StoragePools { Name: "ssd-pool" Kind: "ssd" } StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-24T21:32:39.183700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:32:39.185462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-24T21:32:39.185494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-24T21:32:39.188299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-24T21:32:39.188371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:32:39.195045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-24T21:32:39.203520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-24T21:32:39.203771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:32:39.203810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-24T21:32:39.203912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-24T21:32:39.203934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 2 -> 3 waiting... 2025-06-24T21:32:39.216147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:32:39.216233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-24T21:32:39.216253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 281474976715657:0 3 -> 128 2025-06-24T21:32:39.219200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:32:39.219231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-24T21:32:39.219259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-24T21:32:39.219305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-24T21:32:39.238897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:32:39.239656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:32:39.239675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1637: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: ... lications: 2, subscribers: 1 2025-06-24T21:35:17.089262Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976715662, [OwnerId: 72057594046644480, LocalPathId: 2], 7 2025-06-24T21:35:17.089276Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:990: Publication details: tx: 281474976715662, [OwnerId: 72057594046644480, LocalPathId: 3], 18446744073709551615 2025-06-24T21:35:17.090344Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046644480, cookie: 281474976715662 2025-06-24T21:35:17.090459Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046644480, cookie: 281474976715662 2025-06-24T21:35:17.090482Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046644480, txId: 281474976715662 2025-06-24T21:35:17.090514Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715662, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], version: 18446744073709551615 2025-06-24T21:35:17.090545Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-24T21:35:17.093316Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5914: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046644480, cookie: 281474976715662 2025-06-24T21:35:17.093443Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046644480, cookie: 281474976715662 2025-06-24T21:35:17.093466Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715662 2025-06-24T21:35:17.093496Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715662, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 7 2025-06-24T21:35:17.093526Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-24T21:35:17.093626Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715662, subscribers: 1 2025-06-24T21:35:17.093656Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [33:7519632684061941612:2305] 2025-06-24T21:35:17.102503Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:35:17.102584Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:35:17.102601Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-24T21:35:17.102801Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715662 2025-06-24T21:35:17.102890Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715662 2025-06-24T21:35:17.107071Z node 33 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ListDirectoryRequest, traceId# 01jyhxvrkjewtrvhj443djh35f, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:55516, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-24T21:35:17.123721Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-24T21:35:17.124284Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-24T21:35:17.124667Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-24T21:35:17.124929Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-24T21:35:17.125090Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6018: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-24T21:35:17.125249Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-24T21:35:17.125644Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-24T21:35:17.125706Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-24T21:35:17.125784Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:511: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-24T21:35:17.133853Z node 33 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 33, TabletId: 72075186224037888 not found 2025-06-24T21:35:17.133901Z node 33 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 33, TabletId: 72075186224037889 not found 2025-06-24T21:35:17.133930Z node 33 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 33, TabletId: 72075186224037890 not found 2025-06-24T21:35:17.137848Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-24T21:35:17.137901Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-24T21:35:17.137973Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-24T21:35:17.137984Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-24T21:35:17.138008Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-24T21:35:17.138039Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-24T21:35:17.138093Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-24T21:35:17.139189Z node 33 :KEYVALUE DEBUG: keyvalue_flat_impl.h:365: KeyValue# 72075186224037890 OnTabletDead NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:35:17.140122Z node 33 :KEYVALUE DEBUG: keyvalue_flat_impl.h:365: KeyValue# 72075186224037888 OnTabletDead NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:35:17.140578Z node 33 :KEYVALUE DEBUG: keyvalue_flat_impl.h:365: KeyValue# 72075186224037889 OnTabletDead NKikimr::TEvTablet::TEvTabletDead 2025-06-24T21:35:17.164902Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000fd280] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.165347Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000110a80] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.165720Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000012680] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.166077Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00010f880] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.167517Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a2680] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.167924Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000121880] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.168481Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000004280] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.168842Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f1280] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.169206Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000fea80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.169554Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000122a80] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.169927Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000107a80] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.170279Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d3e80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.170616Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f5480] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.170943Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000124e80] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.172781Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d7a80] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.173186Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002ca80] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-24T21:35:17.173555Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000139880] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_stats/unittest >> DataShardStats::HasSchemaChanges_Families [GOOD] Test command err: 2025-06-24T21:31:10.180863Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:10.181477Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:10.181736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0018aa/r3tmp/tmpz96DQb/pdisk_1.dat 2025-06-24T21:31:10.825336Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:31:10.840167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:10.888562Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:10.895900Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-24T21:31:10.897334Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800666601606 != 1750800666601610 2025-06-24T21:31:10.950118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:10.950261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:10.965994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:11.099021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:11.173163Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvBoot 2025-06-24T21:31:11.174288Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvRestored 2025-06-24T21:31:11.174820Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:31:11.175116Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:31:11.228820Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:616:2523], Recipient [1:625:2529]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-24T21:31:11.229523Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:31:11.229676Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:31:11.231532Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:31:11.231626Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:31:11.231683Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:31:11.233460Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:31:11.233638Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:31:11.233752Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:31:11.246650Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:31:11.276674Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:31:11.277983Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:31:11.278231Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:31:11.278289Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:31:11.278322Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:31:11.278355Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:31:11.278542Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:625:2529], Recipient [1:625:2529]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-24T21:31:11.279559Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-24T21:31:11.283713Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:31:11.283868Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:31:11.283968Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:31:11.284014Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:31:11.284103Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:31:11.284147Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:31:11.284199Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:31:11.284237Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:31:11.284286Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:31:11.284445Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:630:2531], Recipient [1:625:2529]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:31:11.284489Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:31:11.284538Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:31:11.285779Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:371:2365], Recipient [1:630:2531] 2025-06-24T21:31:11.285847Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-24T21:31:11.286046Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:31:11.286358Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-24T21:31:11.286451Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:31:11.286565Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:31:11.286621Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-24T21:31:11.286667Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-24T21:31:11.286707Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-24T21:31:11.286745Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:31:11.287090Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-24T21:31:11.287166Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-24T21:31:11.287211Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-24T21:31:11.287246Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:31:11.287314Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-24T21:31:11.287348Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-24T21:31:11.287386Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-24T21:31:11.287452Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-24T21:31:11.287482Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-24T21:31:11.289366Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:644:2540], Recipient [1:625:2529]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-24T21:31:11.289423Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:31:11.300433Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:31:11.300522Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-24T21:31:11.300562Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-24T21:31:11.300637Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:1 ... 186224037888 2025-06-24T21:35:20.318725Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [13:1111:2920], Recipient [13:886:2712]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046644480 Status: OK ServerId: [13:1115:2924] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-24T21:35:20.318806Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-24T21:35:20.321137Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [13:371:2365], Recipient [13:886:2712]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715664 2025-06-24T21:35:20.321212Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-24T21:35:20.321285Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715664 datashard 72075186224037888 state Ready 2025-06-24T21:35:20.321404Z node 13 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 waiting for schema changes 2025-06-24T21:35:20.336302Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [13:1111:2920], Recipient [13:886:2712]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046644480 ClientId: [13:1111:2920] ServerId: [13:1115:2924] } 2025-06-24T21:35:20.336427Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-24T21:35:21.207430Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [13:886:2712]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:35:21.207545Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:614: UpdateTableStats at datashard 72075186224037888 2025-06-24T21:35:21.207877Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:509: Skipped at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 PartCount 1, with schema changes 2025-06-24T21:35:21.208090Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186224037888, FollowerId 0, tableId 2 Captured TEvDataShard::TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 2 Round: 9 TableStats { DataSize: 130 RowCount: 3 IndexSize: 82 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 20 HasLoanedParts: false Channels { Channel: 1 DataSize: 65 IndexSize: 82 } Channels { Channel: 2 DataSize: 65 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: true LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 2711 Memory: 124352 Storage: 254 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 13 StartTime: 5451 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-24T21:35:21.209977Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [13:1148:2957], Recipient [13:886:2712]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-24T21:35:21.210113Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-24T21:35:21.210208Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [13:1147:2956], serverId# [13:1148:2957], sessionId# [0:0:0] 2025-06-24T21:35:21.210521Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [13:1146:2955], Recipient [13:886:2712]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 2 } CompactBorrowed: false 2025-06-24T21:35:21.210766Z node 13 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 3 of 72075186224037888 tableId# 2 localTid# 1001, requested from [13:1146:2955], partsCount# 1, memtableSize# 0, memtableWaste# 0, memtableRows# 0 2025-06-24T21:35:21.212869Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 2, ts 1970-01-01T00:00:20.452024Z 2025-06-24T21:35:21.212960Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:614: UpdateTableStats at datashard 72075186224037888 2025-06-24T21:35:21.213465Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 2, front# 3 2025-06-24T21:35:21.215956Z node 13 :TABLET_STATS_BUILDER TRACE: flat_stat_table.cpp:22: Building stats at datashard 72075186224037888, for tableId 2: starting for mixed index 2025-06-24T21:35:21.217273Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [13:883:2710], Recipient [13:886:2712]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:35:21.219372Z node 13 :TABLET_STATS_BUILDER TRACE: flat_stat_table.cpp:30: Building stats at datashard 72075186224037888, for tableId 2: finished for mixed index ready: 1 stats: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 2025-06-24T21:35:21.219514Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:177: Stats at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 PartCount: 1, with schema changes, LoadedSize 82, Spent{time=0.000s,wait=0.000s,interrupts=2} 2025-06-24T21:35:21.219921Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435080, Sender [13:1153:2961], Recipient [13:886:2712]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvAsyncTableStats 2025-06-24T21:35:21.220013Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:381: Result received at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 2025-06-24T21:35:21.220148Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186224037888, FollowerId 0, tableId 2 2025-06-24T21:35:21.301649Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 3, ts 1970-01-01T00:00:30.452024Z 2025-06-24T21:35:21.301778Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:614: UpdateTableStats at datashard 72075186224037888 2025-06-24T21:35:21.301837Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 3, front# 3 2025-06-24T21:35:21.301905Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001 sending TEvCompactTableResult to# [13:1146:2955]pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-24T21:35:21.304516Z node 13 :TABLET_STATS_BUILDER TRACE: flat_stat_table.cpp:22: Building stats at datashard 72075186224037888, for tableId 2: starting for mixed index 2025-06-24T21:35:21.304975Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [13:883:2710], Recipient [13:886:2712]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-24T21:35:21.306940Z node 13 :TABLET_STATS_BUILDER TRACE: flat_stat_table.cpp:30: Building stats at datashard 72075186224037888, for tableId 2: finished for mixed index ready: 1 stats: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 2025-06-24T21:35:21.307090Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:177: Stats at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 PartCount: 1, LoadedSize 82, Spent{time=0.000s,wait=0.000s,interrupts=2} 2025-06-24T21:35:21.307423Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435080, Sender [13:1160:2967], Recipient [13:886:2712]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvAsyncTableStats 2025-06-24T21:35:21.307490Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:381: Result received at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 2025-06-24T21:35:21.307577Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186224037888, FollowerId 0, tableId 2 waiting for no schema changes 2025-06-24T21:35:21.320173Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:189: Updated last full compaction of tablet# 72075186224037888, tableId# 2, last full compaction# 1970-01-01T00:00:30.452024Z 2025-06-24T21:35:22.147769Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [13:886:2712]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-24T21:35:22.147914Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-24T21:35:22.148079Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186224037888 outdated step 35000 last cleanup 0 2025-06-24T21:35:22.148191Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:35:22.148276Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-24T21:35:22.148368Z node 13 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-24T21:35:22.148446Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-24T21:35:22.148694Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [13:886:2712]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-24T21:35:22.148754Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:614: UpdateTableStats at datashard 72075186224037888 2025-06-24T21:35:22.148973Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:509: Skipped at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 PartCount 1 2025-06-24T21:35:22.149148Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186224037888, FollowerId 0, tableId 2 Captured TEvDataShard::TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 2 Round: 12 TableStats { DataSize: 130 RowCount: 3 IndexSize: 82 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 30 HasLoanedParts: false Channels { Channel: 1 DataSize: 80 IndexSize: 82 } Channels { Channel: 2 DataSize: 50 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 2468 Memory: 124352 Storage: 254 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 13 StartTime: 5451 TableOwnerId: 72057594046644480 FollowerId: 0 |99.2%| [TM] {RESULT} ydb/services/keyvalue/ut/unittest |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {RESULT} ydb/core/tx/datashard/ut_stats/unittest >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32--2147483648-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32--2147483648-False] >> test_kill_tablets.py::TestKillTablets::test_when_kill_keyvalue_tablet_it_will_be_restarted >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32--2147483648-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32-2147483647-True] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TTxDataShardReshuffleKMeansScan::MainToBuild [GOOD] >> TTxDataShardReshuffleKMeansScan::BuildToPosting |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TA] $(B)/ydb/tests/functional/bridge/test-results/py3test/{meta.json ... results_accumulator.log} |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.2%| [TA] {RESULT} $(B)/ydb/tests/functional/bridge/test-results/py3test/{meta.json ... results_accumulator.log} |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them [GOOD] >> PgCatalog::PgType [GOOD] >> PgCatalog::InformationSchema >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--false] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32-2147483647-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32-2147483647-False] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--false] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32-2147483647-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-0-True] >> KqpTpch::Query21 [GOOD] >> KqpTpch::Query22 >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] >> test_drain.py::TestHive::test_drain_tablets |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TMemoryController::ResourceBroker_ConfigLimit [GOOD] >> TMemoryController::ResourceBroker_ConfigCS |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift [GOOD] >> RetryPolicy::RetryWithBatching |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--false] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-0-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-0-False] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:30:29.663003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:29.663098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:29.663136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:29.671372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:29.671455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:29.671504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:29.671602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:29.671703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:29.672545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:29.672941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:29.763277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:30:29.763341Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:29.764031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:30:29.781364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:29.781924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:29.782110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:29.794516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:29.794797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:29.795583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:29.795925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:29.801330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:29.801592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:29.802887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:29.802956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:29.803192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:29.803257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:29.803326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:29.803468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:30:29.812096Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:29.982617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:29.982882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:29.983130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:29.983202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:29.983450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:29.983581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:29.988482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:29.988713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:29.988971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:29.989058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:29.989105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:29.989184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:29.991580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:29.991643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:29.991683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:29.993779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:29.993826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:29.993874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:29.993945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:29.997899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:30.000299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:30.000517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:30.001656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:30.001845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... SizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:35:33.010697Z node 105 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:35:33.010961Z node 105 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 308us result status StatusSuccess 2025-06-24T21:35:33.011909Z node 105 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-0-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-4294967295-True] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_then_kill_system_tablets_and_it_increases_generation [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TTxDataShardReshuffleKMeansScan::BuildToPosting [GOOD] >> TTxDataShardReshuffleKMeansScan::BuildToBuild >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] >> PgCatalog::InformationSchema [GOOD] >> PgCatalog::CheckSetConfig >> test_kill_tablets.py::TestKillTablets::test_when_kill_keyvalue_tablet_it_will_be_restarted [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TSelectFromViewTest::OneTable [GOOD] >> TSelectFromViewTest::OneTableUsingRelativeName >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-4294967295-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-4294967295-False] >> KqpTpch::Query22 [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-4294967295-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64--9223372036854775808-True] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> TMemoryController::ResourceBroker_ConfigCS [GOOD] >> TMemTableMemoryConsumersCollection::Empty [GOOD] >> TMemTableMemoryConsumersCollection::Destruction [GOOD] >> TMemTableMemoryConsumersCollection::Register [GOOD] >> TMemTableMemoryConsumersCollection::Unregister [GOOD] >> TMemTableMemoryConsumersCollection::SetConsumption [GOOD] >> TMemTableMemoryConsumersCollection::CompactionComplete [GOOD] >> TMemTableMemoryConsumersCollection::SelectForCompaction [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/memory_controller/ut/unittest >> TMemTableMemoryConsumersCollection::SelectForCompaction [GOOD] Test command err: ResourceBrokerSelfConfig: LimitBytes: 0 QueryExecutionLimitBytes: 0 ColumnTablesCompactionLimitBytes: 0 2025-06-24T21:33:21.515104Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:228: Periodic memory stats: AnonRss: none CGroupLimit: none MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 200MiB SoftLimit: 150MiB TargetUtilization: 100MiB ActivitiesLimitBytes: 60MiB ConsumersConsumption: 0B OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 100MiB ResultingConsumersConsumption: 6MiB Coefficient: 0.9999990463 2025-06-24T21:33:21.517987Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer MemTable state: Consumption: 0B Limit: 6MiB Min: 2MiB Max: 6MiB 2025-06-24T21:33:21.519066Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:380: Consumer QueryExecution state: Consumption: 0B Limit: 40MiB 2025-06-24T21:33:21.521707Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:149: Bootstrapped with config HardLimitBytes: 209715200 2025-06-24T21:33:21.552428Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-24T21:33:21.561149Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1265: Bootstrap with config MemoryLimit: 33554432 2025-06-24T21:33:25.153795Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:298: Consumer SharedCache [1:20:2067] registered 2025-06-24T21:33:25.155476Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1189: New config diff: Queues { Name: "queue_kqp_resource_manager" Limit { Memory: 41943040 } } Queues { Name: "queue_cs_indexation" Limit { Memory: 4194304 } } Queues { Name: "queue_cs_ttl" Limit { Memory: 4194304 } } Queues { Name: "queue_cs_general" Limit { Memory: 12582912 } } Queues { Name: "queue_cs_normalizer" Limit { Memory: 12582912 } } ResourceLimit { Memory: 62914560 } 2025-06-24T21:33:25.156243Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default" Weight: 30 Limit { Cpu: 2 } } Queues { Name: "queue_compaction_gen0" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_compaction_gen1" Weight: 100 Limit { Cpu: 6 } } Queues { Name: "queue_compaction_gen2" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_gen3" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_borrowed" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_cs_indexation" Weight: 100 Limit { Cpu: 3 Memory: 4194304 } } Queues { Name: "queue_cs_ttl" Weight: 100 Limit { Cpu: 3 Memory: 4194304 } } Queues { Name: "queue_cs_general" Weight: 100 Limit { Cpu: 3 Memory: 12582912 } } Queues { Name: "queue_cs_scan_read" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_normalizer" Weight: 100 Limit { Cpu: 3 Memory: 12582912 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 41943040 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } ResourceLimit { Cpu: 256 Memory: 62914560 } 2025-06-24T21:33:25.157425Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1240: Configure result: Success: true 2025-06-24T21:33:25.157764Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:343: Register memory consumer 2025-06-24T21:33:25.166090Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:328: ResourceBroker configure result Success: true 2025-06-24T21:33:25.260088Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2327], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:33:25.260534Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:33:25.260707Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-24T21:33:25.454764Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 1 registered 2025-06-24T21:33:25.456038Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 2 registered 2025-06-24T21:33:25.456258Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 3 registered 2025-06-24T21:33:25.456494Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 4 registered 2025-06-24T21:33:25.456577Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 120 registered 2025-06-24T21:33:25.457554Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 5 registered 2025-06-24T21:33:25.461887Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 121 registered 2025-06-24T21:33:25.465965Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 6 registered 2025-06-24T21:33:25.470753Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 122 registered 2025-06-24T21:33:25.477136Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 7 registered 2025-06-24T21:33:25.478472Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 123 registered 2025-06-24T21:33:25.479798Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 8 registered 2025-06-24T21:33:25.480326Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 125 registered 2025-06-24T21:33:25.480416Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 126 registered 2025-06-24T21:33:25.480756Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 127 registered 2025-06-24T21:33:25.480931Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 128 registered 2025-06-24T21:33:25.481145Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 129 registered 2025-06-24T21:33:25.481833Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 100 registered 2025-06-24T21:33:25.482155Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 101 registered 2025-06-24T21:33:25.482333Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 131 registered 2025-06-24T21:33:25.483715Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 102 registered 2025-06-24T21:33:25.483930Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 103 registered 2025-06-24T21:33:25.486746Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:398:2360] 105 registered 2025-06-24T21:33:25.498098Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:417:2362] 7 registered 2025-06-24T21:33:25.498402Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:417:2362] 1 registered 2025-06-24T21:33:25.499657Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:417:2362] 2 registered 2025-06-24T21:33:25.501246Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:417:2362] 3 registered 2025-06-24T21:33:25.501322Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:417:2362] 4 registered 2025-06-24T21:33:25.501532Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:417:2362] 5 registered 2025-06-24T21:33:25.501659Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:417:2362] 6 registered 2025-06-24T21:33:25.503511Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:422:2364] 102 registered 2025-06-24T21:33:25.508485Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:422:2364] 103 registered 2025-06-24T21:33:25.508628Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:422:2364] 1 registered 2025-06-24T21:33:25.509833Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:422:2364] 104 registered 2025-06-24T21:33:25.510051Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:422:2364] 2 registered 2025-06-24T21:33:25.510159Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:422:2364] 3 registered 2025-06-24T21:33:25.510323Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:422:2364] 4 registered 2025-06-24T21:33:25.517519Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:422:2364] 5 registered 2025-06-24T21:33:25.525656Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:422:2364] 6 registered ... 25829120 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 209715200 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } ResourceLimit { Cpu: 256 Memory: 1048576000 } 2025-06-24T21:35:39.865624Z node 10 :RESOURCE_BROKER INFO: resource_broker.cpp:1240: Configure result: Success: true 2025-06-24T21:35:39.866496Z node 10 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:353: Limit memory consumer with 472MiB 2025-06-24T21:35:39.866761Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:35:39.876330Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:328: ResourceBroker configure result Success: true 2025-06-24T21:35:40.085340Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:228: Periodic memory stats: AnonRss: none CGroupLimit: 1000MiB MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 1000MiB SoftLimit: 750MiB TargetUtilization: 500MiB ActivitiesLimitBytes: 1000MiB ConsumersConsumption: 33.5KiB OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 500MiB ResultingConsumersConsumption: 500MiB Coefficient: 0.90625 2025-06-24T21:35:40.086039Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer SharedCache state: Consumption: 0B Limit: 472MiB Min: 200MiB Max: 500MiB 2025-06-24T21:35:40.086110Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer MemTable state: Consumption: 33.5KiB Limit: 28.1MiB Min: 10MiB Max: 30MiB 2025-06-24T21:35:40.086154Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:380: Consumer QueryExecution state: Consumption: 1.94MiB Limit: 200MiB 2025-06-24T21:35:40.086274Z node 10 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:353: Limit memory consumer with 472MiB 2025-06-24T21:35:40.237616Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:228: Periodic memory stats: AnonRss: none CGroupLimit: 50MiB MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 50MiB SoftLimit: 37.5MiB TargetUtilization: 25MiB ActivitiesLimitBytes: 1000MiB ConsumersConsumption: 33.6KiB OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 25MiB ResultingConsumersConsumption: 25MiB Coefficient: 0.90625 2025-06-24T21:35:40.238483Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer SharedCache state: Consumption: 0B Limit: 23.6MiB Min: 10MiB Max: 25MiB 2025-06-24T21:35:40.238589Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer MemTable state: Consumption: 33.6KiB Limit: 1.41MiB Min: 512KiB Max: 1.5MiB 2025-06-24T21:35:40.238649Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:380: Consumer QueryExecution state: Consumption: 1.94MiB Limit: 10MiB 2025-06-24T21:35:40.239076Z node 10 :RESOURCE_BROKER INFO: resource_broker.cpp:1189: New config diff: Queues { Name: "queue_kqp_resource_manager" Limit { Memory: 10485760 } } Queues { Name: "queue_cs_indexation" Limit { Memory: 2097152 } } Queues { Name: "queue_cs_ttl" Limit { Memory: 2097152 } } Queues { Name: "queue_cs_general" Limit { Memory: 6291456 } } Queues { Name: "queue_cs_normalizer" Limit { Memory: 6291456 } } ResourceLimit { Memory: 1048576000 } 2025-06-24T21:35:40.240419Z node 10 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default" Weight: 30 Limit { Cpu: 2 } } Queues { Name: "queue_compaction_gen0" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_compaction_gen1" Weight: 100 Limit { Cpu: 6 } } Queues { Name: "queue_compaction_gen2" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_gen3" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_borrowed" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_cs_indexation" Weight: 100 Limit { Cpu: 3 Memory: 2097152 } } Queues { Name: "queue_cs_ttl" Weight: 100 Limit { Cpu: 3 Memory: 2097152 } } Queues { Name: "queue_cs_general" Weight: 100 Limit { Cpu: 3 Memory: 6291456 } } Queues { Name: "queue_cs_scan_read" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_normalizer" Weight: 100 Limit { Cpu: 3 Memory: 6291456 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 10485760 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } ResourceLimit { Cpu: 256 Memory: 1048576000 } 2025-06-24T21:35:40.242461Z node 10 :RESOURCE_BROKER INFO: resource_broker.cpp:1240: Configure result: Success: true 2025-06-24T21:35:40.242907Z node 10 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:353: Limit memory consumer with 23.6MiB 2025-06-24T21:35:40.243653Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:328: ResourceBroker configure result Success: true 2025-06-24T21:35:40.384347Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:228: Periodic memory stats: AnonRss: none CGroupLimit: 50MiB MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 50MiB SoftLimit: 37.5MiB TargetUtilization: 25MiB ActivitiesLimitBytes: 1000MiB ConsumersConsumption: 33.7KiB OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 25MiB ResultingConsumersConsumption: 25MiB Coefficient: 0.90625 2025-06-24T21:35:40.385005Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer SharedCache state: Consumption: 0B Limit: 23.6MiB Min: 10MiB Max: 25MiB 2025-06-24T21:35:40.385069Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer MemTable state: Consumption: 33.7KiB Limit: 1.41MiB Min: 512KiB Max: 1.5MiB 2025-06-24T21:35:40.385111Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:380: Consumer QueryExecution state: Consumption: 1.94MiB Limit: 10MiB 2025-06-24T21:35:40.385227Z node 10 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:353: Limit memory consumer with 23.6MiB |99.3%| [TM] {RESULT} ydb/core/memory_controller/ut/unittest |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_then_kill_system_tablets_and_it_increases_generation [GOOD] >> KafkaProtocol::BalanceScenarioForFederation [GOOD] >> KafkaProtocol::BalanceScenarioCdc >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64--9223372036854775808-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64--9223372036854775808-False] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test ------- [TM] {asan, default-linux-x86_64, pic, release} ydb/core/kqp/tests/kikimr_tpch/unittest >> KqpTpch::Query22 [GOOD] Test command err: -- result -- rowIndex: 0 rowIndex: 4 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 10 -- result -- rowIndex: 0 rowIndex: 5 -- result -- rowIndex: 0 rowIndex: 2 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 4 -- result -- rowIndex: 0 rowIndex: 4 -- result -- rowIndex: 0 rowIndex: 10 -- result -- rowIndex: 0 rowIndex: 20 -- result -- rowIndex: 0 rowIndex: 10 -- result -- rowIndex: 0 rowIndex: 2 -- result -- rowIndex: 0 rowIndex: 28 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 37 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 4 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 5 |99.3%| [TM] {RESULT} ydb/core/kqp/tests/kikimr_tpch/unittest >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink+UseDataQuery [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64--9223372036854775808-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64-9223372036854775807-True] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--false] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--true] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--false] >> TTxDataShardReshuffleKMeansScan::BuildToBuild [GOOD] >> TTxDataShardSampleKScan::BadRequest |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_keyvalue_tablet_it_will_be_restarted [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink+UseDataQuery [GOOD] Test command err: Trying to start YDB, gRPC: 9143, MsgBus: 8802 2025-06-24T21:27:03.012847Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519630562776234019:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:03.012941Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032fb/r3tmp/tmpk4LwFQ/pdisk_1.dat 2025-06-24T21:27:03.349006Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9143, node 1 2025-06-24T21:27:03.390881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:27:03.391074Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:27:03.393064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:27:03.422109Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:27:03.422146Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:27:03.422156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:27:03.422317Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8802 TClient is connected to server localhost:8802 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:27:03.970583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:27:03.995555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.043583Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:27:04.139766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.282285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:04.361825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:27:05.992992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630571366170237:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:05.993127Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.279388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.311341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.338834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.365848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.393258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.439583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.508911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:27:06.559283Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630575661138194:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.559357Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.559442Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519630575661138199:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:27:06.563086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:27:06.573219Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519630575661138201:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-24T21:27:06.640223Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519630575661138252:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } [[[1u];["One"]];[[2u];["Two"]]] [[[1u];["One"]];[[2u];["Two"]]] 2025-06-24T21:27:08.013823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519630562776234019:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:08.013926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 22665, MsgBus: 19571 2025-06-24T21:27:08.567252Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519630583816249111:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:27:08.567422Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0032fb/r3tmp/tmpZdt2cx/pdisk_1.dat 2025-06-24T21:27:08.670172Z node 2 :IMPORT WARN: schemeshard_impo ... connected 2025-06-24T21:35:32.431402Z node 42 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:35:32.433911Z node 42 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29510, node 42 2025-06-24T21:35:32.491842Z node 42 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:35:32.491867Z node 42 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:35:32.491883Z node 42 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:35:32.492074Z node 42 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19058 TClient is connected to server localhost:19058 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-24T21:35:33.191522Z node 42 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:35:33.250406Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:35:33.263553Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:35:33.279741Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:35:33.379017Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:35:33.675590Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:35:33.811401Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... 2025-06-24T21:35:37.183335Z node 42 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[42:7519632748580205956:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:35:37.183429Z node 42 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:35:38.991450Z node 42 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [42:7519632774350011242:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:35:38.991613Z node 42 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:35:39.090437Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:35:39.228130Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:35:39.327880Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:35:39.425571Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:35:39.500115Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:35:39.572056Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:35:39.657452Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:35:39.803703Z node 42 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [42:7519632778644979214:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:35:39.803876Z node 42 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:35:39.804447Z node 42 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [42:7519632778644979219:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:35:39.812449Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:35:39.844226Z node 42 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [42:7519632778644979221:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-24T21:35:39.944416Z node 42 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [42:7519632778644979274:3432] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:35:42.235619Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:35:42.332440Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:35:42.417034Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) [[["4ef6340b-4723-4f56-b587-7fb4b27cd8f1"]]] [[["4ef6340b-4723-4f56-b587-7fb4b27cd8f1"]]] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64-9223372036854775807-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64-9223372036854775807-False] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64-9223372036854775807-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-0-True] >> test_drain.py::TestHive::test_drain_on_stop [FAIL] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_success |99.3%| [TA] $(B)/ydb/core/kqp/ut/opt/test-results/unittest/{meta.json ... results_accumulator.log} |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.3%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/opt/test-results/unittest/{meta.json ... results_accumulator.log} >> test_discovery.py::TestDiscoveryExtEndpoint::test_scenario >> test_alloc_default.py::TestAlloc::test_up_down[kikimr0] [GOOD] >> TSelectFromViewTest::OneTableUsingRelativeName [GOOD] >> TSelectFromViewTest::DisabledFeatureFlag >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-0-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-0-False] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-0-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-18446744073709551615-True] >> TTxDataShardSampleKScan::BadRequest [GOOD] >> TTxDataShardSampleKScan::RunScan >> PgCatalog::CheckSetConfig [FAIL] >> PgCatalog::PgDatabase+useSink >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-18446744073709551615-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-18446744073709551615-False] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-18446744073709551615-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_dynumber >> test_kv.py::TestYdbKvWorkload::test_dynumber [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_drain.py::TestHive::test_drain_on_stop [FAIL] >> test_alloc_default.py::TestAlloc::test_mkql_not_increased[kikimr0] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] >> BulkUpsert::BulkUpsert [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_success [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_new_column >> KafkaProtocol::BalanceScenarioCdc [GOOD] >> KafkaProtocol::OffsetCommitAndFetchScenario >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_new_column [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_change_column_type >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_change_column_type [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_column >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_column [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_to_key >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_to_key [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_from_key >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_from_key [GOOD] >> TDqPqRdReadActorTests::Backpressure [GOOD] >> TDqPqRdReadActorTests::RowDispatcherIsRestarted2 >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--false] >> TSelectFromViewTest::DisabledFeatureFlag [GOOD] >> TSelectFromViewTest::ReadTestCasesFromFiles >> TDqPqRdReadActorTests::RowDispatcherIsRestarted2 [GOOD] >> TTxDataShardSampleKScan::RunScan [GOOD] >> test_public_api.py::TestExplain::test_explain_data_query >> TDqPqRdReadActorTests::TwoPartitionsRowDispatcherIsRestarted >> RetryPolicy::RetryWithBatching [GOOD] >> PgCatalog::PgDatabase+useSink [GOOD] >> PgCatalog::PgDatabase-useSink >> TDqPqRdReadActorTests::TwoPartitionsRowDispatcherIsRestarted [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/bulk_upsert/gtest >> BulkUpsert::BulkUpsert [GOOD] |99.3%| [TM] {RESULT} ydb/public/sdk/cpp/tests/integration/bulk_upsert/gtest >> TDqPqRdReadActorTests::IgnoreMessageIfNoSessions ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> RetryPolicy::RetryWithBatching [GOOD] Test command err: 2025-06-24T21:29:45.319913Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.319978Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.320013Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-24T21:29:45.323711Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:29:45.323777Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.323808Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.339969Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.009304s 2025-06-24T21:29:45.347459Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:29:45.347505Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.347530Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.347591Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.009961s 2025-06-24T21:29:45.348063Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-24T21:29:45.348085Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.348105Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:29:45.348155Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008923s 2025-06-24T21:29:45.364588Z :TWriteSession_TestPolicy INFO: Random seed for debugging is 1750800585364558 2025-06-24T21:29:45.953490Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631255858280448:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:29:45.953595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:46.073523Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631262046456411:2158];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0049da/r3tmp/tmpBbUHMH/pdisk_1.dat 2025-06-24T21:29:46.296880Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:46.296881Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-24T21:29:46.385318Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:29:46.684907Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:46.703658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:46.703770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:46.708113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:29:46.708213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:29:46.720620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:29:46.725763Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-24T21:29:46.739825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3108, node 1 2025-06-24T21:29:46.996766Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:47.058488Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:29:47.163942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/0049da/r3tmp/yandex4We1Jf.tmp 2025-06-24T21:29:47.163965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/0049da/r3tmp/yandex4We1Jf.tmp 2025-06-24T21:29:47.164134Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/0049da/r3tmp/yandex4We1Jf.tmp 2025-06-24T21:29:47.164289Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:29:47.444532Z INFO: TTestServer started on Port 6964 GrpcPort 3108 TClient is connected to server localhost:6964 PQClient connected to localhost:3108 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:29:47.828221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-24T21:29:49.911460Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631274931358483:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:49.911465Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631274931358491:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:49.911642Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:29:49.920218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:29:49.943972Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519631274931358497:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-24T21:29:50.048843Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519631279226325821:2131] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:29:50.392136Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519631277333117965:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:50.394745Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWNjYTMzN2ItMjQwZTI1OTMtOTY2YThlYmMtMWNhYmRiMjE=, ActorId: [1:7519631277333117926:2297], ActorState: ExecuteState, TraceId: 01jyhxhs6n0637jbc1yk4ekrsb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:50.394811Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519631279226325836:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:29:50.395033Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NTM4Nzc1NzAtMmJkYTQ4YjItMjVmYjkxNTktNDk5YjgwZjM=, ActorId: [2:7519631274931358481:2268], ActorState: ExecuteState, TraceId: 01jyhxhs2m7kxac4r5w22e9dc4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:29:50.397273Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:29:50.397291Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. ... 6224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:36:02.633229Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-06-24T21:36:02.633254Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:36:02.633294Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-24T21:36:02.633322Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:36:02.633359Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-06-24T21:36:02.633381Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:36:02.633426Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 4 is stored on disk 2025-06-24T21:36:02.633456Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:36:02.633490Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-06-24T21:36:02.633513Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:36:02.633556Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 6 is stored on disk 2025-06-24T21:36:02.633597Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:36:02.633649Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 7 is stored on disk 2025-06-24T21:36:02.633677Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:36:02.633710Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 8 is stored on disk 2025-06-24T21:36:02.633737Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-24T21:36:02.633774Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 9 is stored on disk 2025-06-24T21:36:02.634131Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-24T21:36:02.634184Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-24T21:36:02.634281Z node 17 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1208, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:36:02.634565Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-24T21:36:02.634735Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-24T21:36:02.635357Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 10 max time lag 0ms effective offset 0 2025-06-24T21:36:02.635752Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 1208 count 10 last offset 0, current partition end offset: 10 2025-06-24T21:36:02.635797Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-24T21:36:02.635875Z node 17 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 10 parts_count 0 source 1 size 1208 accessed 0 times before, last time 2025-06-24T21:36:02.000000Z 2025-06-24T21:36:02.635907Z node 17 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-24T21:36:02.635975Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-24T21:36:02.636399Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 10 count 10 size 1188 from pos 0 cbcount 10 2025-06-24T21:36:02.636549Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1750800962621 queuesize 0 startOffset 0 2025-06-24T21:36:02.636830Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 suffix '63' size 1208 2025-06-24T21:36:02.636942Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 suffix '63' 2025-06-24T21:36:02.639917Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session got write response: sequence_numbers: 1 sequence_numbers: 2 sequence_numbers: 3 sequence_numbers: 4 sequence_numbers: 5 sequence_numbers: 6 sequence_numbers: 7 sequence_numbers: 8 sequence_numbers: 9 sequence_numbers: 10 offsets: 0 offsets: 1 offsets: 2 offsets: 3 offsets: 4 offsets: 5 offsets: 6 offsets: 7 offsets: 8 offsets: 9 already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false write_statistics { persist_duration_ms: 10 queued_in_partition_duration_ms: 2 } 2025-06-24T21:36:02.640026Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: acknoledged message 1 2025-06-24T21:36:02.640097Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: acknoledged message 2 2025-06-24T21:36:02.640146Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: acknoledged message 3 2025-06-24T21:36:02.640177Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: acknoledged message 4 2025-06-24T21:36:02.640208Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: acknoledged message 5 2025-06-24T21:36:02.640243Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: acknoledged message 6 2025-06-24T21:36:02.640284Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: acknoledged message 7 2025-06-24T21:36:02.640321Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: acknoledged message 8 2025-06-24T21:36:02.640367Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: acknoledged message 9 2025-06-24T21:36:02.640391Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: acknoledged message 10 2025-06-24T21:36:02.643504Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: close. Timeout = 0 ms 2025-06-24T21:36:02.643602Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session will now close 2025-06-24T21:36:02.643673Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: aborting 2025-06-24T21:36:02.644327Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: gracefully shut down, all writes complete 2025-06-24T21:36:02.644402Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0] Write session: destroy 2025-06-24T21:36:02.645885Z node 17 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0 grpc read done: success: 0 data: 2025-06-24T21:36:02.645943Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0 grpc read failed 2025-06-24T21:36:02.646018Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0 grpc closed 2025-06-24T21:36:02.646056Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message-group-id|196b5612-d28be43d-3a6465a5-8c20e9a5_0 is DEAD 2025-06-24T21:36:02.672030Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-24T21:36:02.672460Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:2910: [PQ: 72075186224037892] server disconnected, pipe [17:7519632874759976931:2591] destroyed 2025-06-24T21:36:02.672518Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TDqPqRdReadActorTests::IgnoreMessageIfNoSessions [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/build_index/ut/unittest >> TTxDataShardSampleKScan::RunScan [GOOD] Test command err: 2025-06-24T21:31:17.167345Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:285:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-24T21:31:17.167812Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-24T21:31:17.168033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001668/r3tmp/tmpfudu3B/pdisk_1.dat 2025-06-24T21:31:17.688964Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-24T21:31:17.700717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:31:17.755682Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:17.765938Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1750800673840664 != 1750800673840668 2025-06-24T21:31:17.817448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:17.817598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:17.831451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:31:17.927290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:17.986774Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:625:2529] 2025-06-24T21:31:17.987099Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-24T21:31:18.039443Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-24T21:31:18.039597Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-24T21:31:18.042500Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-24T21:31:18.042616Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-24T21:31:18.042698Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-24T21:31:18.044068Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-24T21:31:18.044257Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-24T21:31:18.044352Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:641:2529] in generation 1 2025-06-24T21:31:18.055366Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-24T21:31:18.096698Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-24T21:31:18.098606Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-24T21:31:18.098796Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:643:2539] 2025-06-24T21:31:18.098839Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:31:18.098876Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-24T21:31:18.098929Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:31:18.101042Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-24T21:31:18.101181Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-24T21:31:18.101268Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:31:18.101325Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:31:18.101461Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:31:18.101519Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:31:18.101640Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:621:2526], serverId# [1:630:2531], sessionId# [0:0:0] 2025-06-24T21:31:18.102806Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:31:18.103248Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-24T21:31:18.103824Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-24T21:31:18.105957Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:31:18.116777Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-24T21:31:18.116962Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-24T21:31:18.289548Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:659:2549], serverId# [1:660:2550], sessionId# [0:0:0] 2025-06-24T21:31:18.307642Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-24T21:31:18.307731Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:31:18.308544Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:31:18.308589Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-24T21:31:18.308645Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-24T21:31:18.308855Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-24T21:31:18.308984Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-24T21:31:18.309258Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:31:18.309304Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-24T21:31:18.314902Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-24T21:31:18.319825Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:31:18.321502Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-24T21:31:18.321566Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:31:18.322557Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-24T21:31:18.322642Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:31:18.325206Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:31:18.325265Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-24T21:31:18.325322Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-24T21:31:18.325388Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:371:2365], exec latency: 0 ms, propose latency: 0 ms 2025-06-24T21:31:18.325446Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-24T21:31:18.325562Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-24T21:31:18.330179Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-24T21:31:18.332759Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-24T21:31:18.332853Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-24T21:31:18.334083Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-24T21:31:18.403847Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:698:2579], serverId# [1:699:2580], sessionId# [0:0:0] 2025-06-24T21:31:18.407302Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-24T21:31:18.407500Z node 1 :TX_DATASHARD DEBUG: check_snapshot_tx_unit.cpp:153: Prepared Snapshot transaction txId 281474976715658 at tabl ... .538448Z node 24 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:36:03.538488Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:36:03.575921Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519632881922616752:2442], serverId# [24:7519632881922616754:2444], sessionId# [0:0:0] 2025-06-24T21:36:03.575971Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519632881922616753:2443], serverId# [24:7519632881922616755:2445], sessionId# [0:0:0] 2025-06-24T21:36:03.576199Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 9 Seed: 0 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SnapshotTxId: 281474976715661 SnapshotStep: 1750800963506 SeqNoGeneration: 12 SeqNoRound: 1 row version v1750800963506/281474976715661 2025-06-24T21:36:03.576259Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-24T21:36:03.576542Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 9 Seed: 0 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SnapshotTxId: 281474976715661 SnapshotStep: 1750800963506 SeqNoGeneration: 12 SeqNoRound: 1 row version v1750800963506/281474976715661 2025-06-24T21:36:03.576591Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-24T21:36:03.576848Z node 24 :BUILD_INDEX INFO: sample_k.cpp:108: Prepare TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-24T21:36:03.576899Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:116: Seek 0 TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-24T21:36:03.577079Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:142: Exhausted TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 5 2025-06-24T21:36:03.577235Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:171: Done TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 Id: 1 TabletId: 72075186224037888 Status: DONE ReadRows: 5 ReadBytes: 60 RequestSeqNoGeneration: 12 RequestSeqNoRound: 1 Probabilities: 2033423113665717216 Probabilities: 6650325416439211286 Probabilities: 7100737942502591086 Probabilities: 7451216775397157654 Probabilities: 16483534760593850368 Rows: 5 2025-06-24T21:36:03.577437Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:36:03.577480Z node 24 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:36:03.577503Z node 24 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:36:03.577534Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:36:03.616645Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519632881922616762:2450], serverId# [24:7519632881922616764:2452], sessionId# [0:0:0] 2025-06-24T21:36:03.616871Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 1 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 13 SeqNoRound: 1 row version v1750800963513/18446744073709551615 2025-06-24T21:36:03.616941Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 0 2025-06-24T21:36:03.617151Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519632881922616763:2451], serverId# [24:7519632881922616765:2453], sessionId# [0:0:0] 2025-06-24T21:36:03.617257Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 1 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 13 SeqNoRound: 1 row version v1750800963513/18446744073709551615 2025-06-24T21:36:03.617293Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 0 2025-06-24T21:36:03.617441Z node 24 :BUILD_INDEX INFO: sample_k.cpp:108: Prepare TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 0 2025-06-24T21:36:03.617495Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:116: Seek 0 TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 0 2025-06-24T21:36:03.617658Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:142: Exhausted TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 1 2025-06-24T21:36:03.617775Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:171: Done TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 0 Id: 1 TabletId: 72075186224037888 Status: DONE ReadRows: 5 ReadBytes: 60 RequestSeqNoGeneration: 13 RequestSeqNoRound: 1 Probabilities: 1182976742465768660 Rows: 1 2025-06-24T21:36:03.617944Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:36:03.617978Z node 24 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:36:03.618037Z node 24 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:36:03.618066Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:36:03.659905Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519632881922616776:2460], serverId# [24:7519632881922616778:2462], sessionId# [0:0:0] 2025-06-24T21:36:03.659986Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519632881922616777:2461], serverId# [24:7519632881922616779:2463], sessionId# [0:0:0] 2025-06-24T21:36:03.660236Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 3 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 14 SeqNoRound: 1 row version v1750800963513/18446744073709551615 2025-06-24T21:36:03.660308Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 0 2025-06-24T21:36:03.660636Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 3 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 14 SeqNoRound: 1 row version v1750800963513/18446744073709551615 2025-06-24T21:36:03.660693Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 0 2025-06-24T21:36:03.660874Z node 24 :BUILD_INDEX INFO: sample_k.cpp:108: Prepare TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 0 2025-06-24T21:36:03.660913Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:116: Seek 0 TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 0 2025-06-24T21:36:03.661087Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:142: Exhausted TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 3 2025-06-24T21:36:03.661214Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:171: Done TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 0 Id: 1 TabletId: 72075186224037888 Status: DONE ReadRows: 5 ReadBytes: 60 RequestSeqNoGeneration: 14 RequestSeqNoRound: 1 Probabilities: 1182976742465768660 Probabilities: 5318693189955351404 Probabilities: 10350790534088709650 Rows: 3 2025-06-24T21:36:03.661409Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:36:03.661436Z node 24 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:36:03.661457Z node 24 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:36:03.661518Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-24T21:36:03.705415Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519632881922616786:2468], serverId# [24:7519632881922616788:2470], sessionId# [0:0:0] 2025-06-24T21:36:03.705466Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519632881922616787:2469], serverId# [24:7519632881922616789:2471], sessionId# [0:0:0] 2025-06-24T21:36:03.705676Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 9 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 15 SeqNoRound: 1 row version v1750800963513/18446744073709551615 2025-06-24T21:36:03.705757Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-24T21:36:03.706070Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 9 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 15 SeqNoRound: 1 row version v1750800963513/18446744073709551615 2025-06-24T21:36:03.706120Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-24T21:36:03.711314Z node 24 :BUILD_INDEX INFO: sample_k.cpp:108: Prepare TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-24T21:36:03.711391Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:116: Seek 0 TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-24T21:36:03.711569Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:142: Exhausted TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 5 2025-06-24T21:36:03.711768Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:171: Done TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 Id: 1 TabletId: 72075186224037888 Status: DONE ReadRows: 5 ReadBytes: 60 RequestSeqNoGeneration: 15 RequestSeqNoRound: 1 Probabilities: 1182976742465768660 Probabilities: 5318693189955351404 Probabilities: 10350790534088709650 Probabilities: 15054120856910380438 Probabilities: 17710393315211645100 Rows: 5 2025-06-24T21:36:03.712158Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-24T21:36:03.712189Z node 24 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-24T21:36:03.712209Z node 24 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-24T21:36:03.712260Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 |99.3%| [TM] {RESULT} ydb/core/tx/datashard/build_index/ut/unittest >> TDqPqRdReadActorTests::MetadataFields |99.3%| [TA] $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TDqPqRdReadActorTests::MetadataFields [GOOD] |99.3%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TDqPqRdReadActorTests::IgnoreCoordinatorResultIfWrongState >> KafkaProtocol::OffsetCommitAndFetchScenario [GOOD] >> KafkaProtocol::CreateTopicsScenarioWithKafkaAuth >> test_discovery.py::TestDiscoveryExtEndpoint::test_scenario [GOOD] >> TDqPqRdReadActorTests::IgnoreCoordinatorResultIfWrongState [GOOD] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--false] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] [GOOD] >> TDqPqReadActorTest::TestReadFromTopic >> test_session_pool.py::TestSessionPool::test_session_pool_simple_acquire >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] [GOOD] >> PgCatalog::PgDatabase-useSink [GOOD] >> PgCatalog::PgRoles >> test_public_api.py::TestExplain::test_explain_data_query [GOOD] >> test_crud.py::TestYdbCrudOperations::test_crud_operations >> KafkaProtocol::CreateTopicsScenarioWithKafkaAuth [GOOD] >> KafkaProtocol::CreateTopicsScenarioWithoutKafkaAuth >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_simple_acquire [GOOD] >> TDqPqReadActorTest::TestReadFromTopic [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_1 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_2 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_keep_alive >> test_session_pool.py::TestSessionPool::test_session_pool_keep_alive [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_3 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_4 >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_4 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_release_logic [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_1 >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_1 [GOOD] >> test_session_pool.py::TestSessionPool::test_no_cluster_endpoints_no_failure >> TDqPqReadActorTest::TestReadFromTopicFromNow >> Etcd_KV::TxnMultiKeysAndOperations [GOOD] >> Etcd_KV::Compact >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] [GOOD] >> PgCatalog::PgRoles [GOOD] >> PgCatalog::PgTables >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--false] [GOOD] >> TDqPqReadActorTest::TestReadFromTopicFromNow [GOOD] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--true] >> TDqPqReadActorTest::ReadWithFreeSpace >> test_alloc_default.py::TestAlloc::test_mkql_not_increased[kikimr0] [GOOD] >> KafkaProtocol::CreateTopicsScenarioWithoutKafkaAuth [GOOD] >> KafkaProtocol::CreatePartitionsScenario |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_from_key [GOOD] >> TDqPqReadActorTest::ReadWithFreeSpace [GOOD] >> TDqPqReadActorTest::ReadNonExistentTopic >> TDqPqReadActorTest::ReadNonExistentTopic [GOOD] >> TDqPqReadActorTest::TestSaveLoadPqRead >> test_alloc_default.py::TestAlloc::test_hard_limit[kikimr0] >> test_crud.py::TestYdbCrudOperations::test_crud_operations [GOOD] >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_select_with_repetitions[10-64] >> KafkaProtocol::CreatePartitionsScenario [GOOD] >> KafkaProtocol::TopicsWithCleaunpPolicyScenario >> test_insert_restarts.py::TestS3::test_atomic_upload_commit[v2-client0] [GOOD] >> PgCatalog::PgTables [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] [GOOD] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> PgCatalog::PgTables [GOOD] Test command err: Trying to start YDB, gRPC: 62603, MsgBus: 3576 2025-06-24T21:30:45.914846Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631513379334004:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:45.915527Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ee6/r3tmp/tmpQlKK6K/pdisk_1.dat 2025-06-24T21:30:46.411169Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:46.419321Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631513379333971:2079] 1750800645910422 != 1750800645910425 2025-06-24T21:30:46.445320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:30:46.445400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:30:46.471322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62603, node 1 2025-06-24T21:30:46.686395Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:30:46.686437Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:30:46.686447Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:30:46.686654Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:30:46.929958Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3576 TClient is connected to server localhost:3576 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:30:47.558544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 1042 2025-06-24T21:30:49.006629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_17472595041006102391_17823623939509273229' Typemod mismatch, got type pgbpchar for column value, type mod , but expected 2 --!syntax_pg INSERT INTO Coerce_pgbpchar_17472595041006102391_17823623939509273229 (key, value) VALUES ( '0'::int2, 'abcd'::bpchar ) 2025-06-24T21:30:49.232814Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631530559203899:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.232815Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631530559203907:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.232922Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:30:49.237059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:30:49.251297Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631530559203913:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-24T21:30:49.307233Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631530559203964:2396] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:30:49.883562Z node 1 :TX_DATASHARD CRIT: execute_kqp_data_tx_unit.cpp:449: Exception while executing KQP transaction [0:281474976715663] at 72075186224037888: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-24T21:30:49.885867Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715663 at tablet 72075186224037888 status: EXEC_ERROR errors: UNKNOWN (Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ) | 2025-06-24T21:30:49.886119Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [1:7519631530559204015:2299] TxId: 281474976715663. Ctx: { TraceId: 01jyhxkk0e2bpq8c2h5vkzwjp7, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjA3MmUwNDQtY2Q5OGY5NzAtNTg0YzZjMDMtYWFlZDE3NDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ; 2025-06-24T21:30:49.894641Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YjA3MmUwNDQtY2Q5OGY5NzAtNTg0YzZjMDMtYWFlZDE3NDk=, ActorId: [1:7519631530559203896:2299], ActorState: ExecuteState, TraceId: 01jyhxkk0e2bpq8c2h5vkzwjp7, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-24T21:30:49.956178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667)
: Error: Bulk upsert to table '/Root/Coerce__pgbpchar_17472595041006102391_5352544928909966465' Typemod mismatch, got type _pgbpchar for column value, type mod , but expected 2 --!syntax_pg INSERT INTO Coerce__pgbpchar_17472595041006102391_5352544928909966465 (key, value) VALUES ( '0'::int2, '{abcd,abcd}'::_bpchar ) 2025-06-24T21:30:50.340308Z node 1 :TX_DATASHARD CRIT: execute_kqp_data_tx_unit.cpp:449: Exception while executing KQP transaction [0:281474976715668] at 72075186224037889: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-24T21:30:50.342719Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715668 at tablet 72075186224037889 status: EXEC_ERROR errors: UNKNOWN (Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ) | 2025-06-24T21:30:50.342930Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [1:7519631534854171447:2334] TxId: 281474976715668. Ctx: { TraceId: 01jyhxkktmc3yg7sprtqwn5yg5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Nzc0YTQ1ZC1lMmMwZWI2ZC04YTg3YmI2ZS1jMjRiOGFmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ; 2025-06-24T21:30:50.343141Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=Nzc0YTQ1ZC1lMmMwZWI2ZC04YTg3YmI2ZS1jMjRiOGFmYw==, ActorId: [1:7519631534854171406:2334], ActorState: ExecuteState, TraceId: 01jyhxkktmc3yg7sprtqwn5yg5, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 1042 2025-06-24T21:30:50.383222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 720575940466 ... 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:36:15.849738Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:36:19.655523Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519632927394550212:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:36:19.655660Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:36:21.022846Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519632957459321915:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:21.022884Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519632957459321923:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:21.022960Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:21.031369Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:36:21.049068Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519632957459321929:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:36:21.110966Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519632957459321980:2343] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 15782, MsgBus: 3893 2025-06-24T21:36:22.624920Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519632963143729533:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:36:22.625064Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/003ee6/r3tmp/tmpdrIsxX/pdisk_1.dat 2025-06-24T21:36:22.812728Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:36:22.812883Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:36:22.813506Z node 12 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:36:22.814939Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519632963143729506:2079] 1750800982624122 != 1750800982624125 2025-06-24T21:36:22.829471Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15782, node 12 2025-06-24T21:36:22.897430Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:36:22.897462Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:36:22.897479Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:36:22.897679Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3893 2025-06-24T21:36:23.645970Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3893 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:36:24.103179Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:36:27.625037Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519632963143729533:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:36:27.625160Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:36:28.978537Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519632988913533936:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:28.978663Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519632988913533928:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:28.978960Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:28.986622Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:36:29.005995Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519632988913533945:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:36:29.080342Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519632993208501292:2343] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:36:29.131564Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:36:29.224031Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:36:33.914196Z node 12 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 12, TabletId: 72075186224037888 not found 2025-06-24T21:36:33.952664Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:36:34.378169Z node 12 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [12:7519633014683338335:2412], TxId: 281474976710672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=12&id=MWFlZGNkYjktOWE2NzhjY2MtNTA5ZDMxMTktZTllYjdiNGQ=. TraceId : 01jyhxy3q6e4adrzh5cwa22yw5. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED DEFAULT_ERROR: {
: Error: Terminate was called, reason(57): ERROR: invalid input syntax for type boolean: "pg_proc" }. 2025-06-24T21:36:34.379795Z node 12 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [12:7519633014683338336:2413], TxId: 281474976710672, task: 2. Ctx: { SessionId : ydb://session/3?node_id=12&id=MWFlZGNkYjktOWE2NzhjY2MtNTA5ZDMxMTktZTllYjdiNGQ=. CustomerSuppliedId : . TraceId : 01jyhxy3q6e4adrzh5cwa22yw5. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [12:7519633014683338332:2408], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-24T21:36:34.380899Z node 12 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=12&id=MWFlZGNkYjktOWE2NzhjY2MtNTA5ZDMxMTktZTllYjdiNGQ=, ActorId: [12:7519633014683338323:2408], ActorState: ExecuteState, TraceId: 01jyhxy3q6e4adrzh5cwa22yw5, Create QueryResponse for error on request, msg: >> KafkaProtocol::TopicsWithCleaunpPolicyScenario [GOOD] >> KafkaProtocol::DescribeConfigsScenario |99.3%| [TA] $(B)/ydb/core/kqp/ut/pg/test-results/unittest/{meta.json ... results_accumulator.log} |99.3%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/pg/test-results/unittest/{meta.json ... results_accumulator.log} >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_select_with_repetitions[10-64] [GOOD] >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_upsert_data_with_repetitions[10-64] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/sql/py3test >> test_crud.py::TestYdbCrudOperations::test_crud_operations [GOOD] |99.3%| [TM] {RESULT} ydb/tests/sql/py3test >> test_public_api.py::TestCRUDOperations::test_prepared_query_pipeline >> TDqPqReadActorTest::TestSaveLoadPqRead [GOOD] >> TDqPqReadActorTest::LoadCorruptedState |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] [GOOD] >> TDqPqReadActorTest::LoadCorruptedState [GOOD] >> TDqPqReadActorTest::TestLoadFromSeveralStates |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] [GOOD] >> Etcd_KV::Compact [GOOD] >> Etcd_Lease::SimpleGrantAndRevoke >> test_insert.py::TestInsertOperations::test_several_inserts_per_transaction_are_success >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_upsert_data_with_repetitions[10-64] [GOOD] |99.3%| [TA] $(B)/ydb/tests/functional/rename/test-results/py3test/{meta.json ... results_accumulator.log} |99.3%| [TA] {RESULT} $(B)/ydb/tests/functional/rename/test-results/py3test/{meta.json ... results_accumulator.log} >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--false] [FAIL] >> test_session_pool.py::TestSessionPool::test_no_cluster_endpoints_no_failure [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_2 >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_2 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_min_size_feature [GOOD] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] >> TSelectFromViewTest::ReadTestCasesFromFiles [GOOD] >> TSelectFromViewTest::QueryCacheIsUpdated >> test_read_table.py::TestReadTableSuccessStories::test_read_table_only_specified_ranges >> KafkaProtocol::DescribeConfigsScenario [GOOD] >> KafkaProtocol::AlterConfigsScenario >> test_public_api.py::TestCRUDOperations::test_prepared_query_pipeline [GOOD] >> test_public_api.py::TestCRUDOperations::test_scheme_client_ops >> test_public_api.py::TestCRUDOperations::test_scheme_client_ops [GOOD] >> test_public_api.py::TestCRUDOperations::test_scheme_operation_errors_handle >> test_public_api.py::TestCRUDOperations::test_scheme_operation_errors_handle [GOOD] >> test_public_api.py::TestCRUDOperations::test_none_values [GOOD] >> test_public_api.py::TestCRUDOperations::test_parse_list_type >> test_public_api.py::TestCRUDOperations::test_parse_list_type [GOOD] >> test_public_api.py::TestCRUDOperations::test_parse_tuple |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_public_api.py::TestCRUDOperations::test_parse_tuple [GOOD] >> test_public_api.py::TestCRUDOperations::test_dict_type [GOOD] >> test_public_api.py::TestCRUDOperations::test_struct_type [GOOD] >> test_public_api.py::TestCRUDOperations::test_data_types >> test_public_api.py::TestCRUDOperations::test_data_types [GOOD] >> test_public_api.py::TestCRUDOperations::test_struct_type_parameter >> TDqPqReadActorTest::TestLoadFromSeveralStates [GOOD] >> test_public_api.py::TestCRUDOperations::test_struct_type_parameter [GOOD] >> test_public_api.py::TestCRUDOperations::test_bulk_prepared_insert_many_values >> TDqPqReadActorTest::TestReadFromTopicFirstWatermark >> test_public_api.py::TestCRUDOperations::test_bulk_prepared_insert_many_values [GOOD] >> test_public_api.py::TestCRUDOperations::test_bulk_upsert >> test_public_api.py::TestCRUDOperations::test_bulk_upsert [GOOD] >> test_public_api.py::TestCRUDOperations::test_all_enums_are_presented_as_exceptions [GOOD] >> test_public_api.py::TestCRUDOperations::test_type_builders_str_methods >> test_public_api.py::TestCRUDOperations::test_type_builders_str_methods [GOOD] >> test_public_api.py::TestCRUDOperations::test_create_and_delete_session_then_use_it_again [GOOD] >> test_public_api.py::TestCRUDOperations::test_locks_invalidated_error >> test_insert.py::TestInsertOperations::test_several_inserts_per_transaction_are_success [GOOD] >> test_insert.py::TestInsertOperations::test_insert_plus_update_per_transaction_are_success >> test_public_api.py::TestCRUDOperations::test_locks_invalidated_error [GOOD] >> test_public_api.py::TestCRUDOperations::test_tcl >> test_public_api.py::TestCRUDOperations::test_tcl [GOOD] >> test_public_api.py::TestCRUDOperations::test_tcl_2 [GOOD] >> test_public_api.py::TestCRUDOperations::test_tcl_3 [GOOD] >> test_public_api.py::TestCRUDOperations::test_reuse_session_to_tx_leak >> test_insert.py::TestInsertOperations::test_insert_plus_update_per_transaction_are_success [GOOD] >> test_insert.py::TestInsertOperations::test_update_plus_insert_per_transaction_are_success_prepared_case >> TDqPqReadActorTest::TestReadFromTopicFirstWatermark [GOOD] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--false] >> TDqPqReadActorTest::TestReadFromTopicWatermarks1 >> test_insert.py::TestInsertOperations::test_update_plus_insert_per_transaction_are_success_prepared_case [GOOD] >> test_insert.py::TestInsertOperations::test_upsert_plus_insert_per_transaction_are_success_prepared_case |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/restarts/py3test >> test_insert_restarts.py::TestS3::test_atomic_upload_commit[v2-client0] [GOOD] |99.3%| [TM] {RESULT} ydb/tests/fq/restarts/py3test >> test_insert.py::TestInsertOperations::test_upsert_plus_insert_per_transaction_are_success_prepared_case [GOOD] >> test_insert.py::TestInsertOperations::test_insert_plus_upsert_are_success >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_alloc_default.py::TestAlloc::test_hard_limit[kikimr0] [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_only_specified_ranges [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_constructed_key_range >> KafkaProtocol::AlterConfigsScenario [GOOD] >> KafkaProtocol::LoginWithApiKey >> test_insert.py::TestInsertOperations::test_insert_plus_upsert_are_success [GOOD] >> test_insert.py::TestInsertOperations::test_insert_revert_basis >> test_insert.py::TestInsertOperations::test_insert_revert_basis [GOOD] >> test_insert.py::TestInsertOperations::test_query_pairs >> TSelectFromViewTest::QueryCacheIsUpdated [GOOD] >> test_drain.py::TestHive::test_drain_tablets [GOOD] >> test_result_limits.py::TestResultLimits::test_many_rows >> test_discovery.py::TestDiscoveryFaultInjectionSlotStop::test_scenario >> test_read_table.py::TestReadTableSuccessStories::test_read_table_constructed_key_range [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_reads_only_specified_columns >> test_indexes.py::TestSecondaryIndexes::test_create_table_with_global_index >> test_read_table.py::TestReadTableSuccessStories::test_read_table_reads_only_specified_columns [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_without_data_has_snapshot |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_without_data_has_snapshot [GOOD] >> test_public_api.py::TestCRUDOperations::test_reuse_session_to_tx_leak [GOOD] >> test_public_api.py::TestCRUDOperations::test_direct_leak_tx_but_no_actual_leak_by_best_efforts >> TDqPqReadActorTest::TestReadFromTopicWatermarks1 [GOOD] >> TDqPqReadActorTest::WatermarkCheckpointWithItemsInReadyBuffer >> test_public_api.py::TestCRUDOperations::test_direct_leak_tx_but_no_actual_leak_by_best_efforts [GOOD] >> test_public_api.py::TestCRUDOperations::test_presented_in_cache >> test_public_api.py::TestCRUDOperations::test_presented_in_cache [GOOD] >> test_public_api.py::TestCRUDOperations::test_decimal_values_negative_stories [GOOD] >> test_public_api.py::TestCRUDOperations::test_decimal_values >> test_public_api.py::TestCRUDOperations::test_decimal_values [GOOD] >> test_public_api.py::TestCRUDOperations::test_list_directory_with_children >> test_public_api.py::TestCRUDOperations::test_list_directory_with_children [GOOD] >> test_public_api.py::TestCRUDOperations::test_validate_describe_path_result >> test_public_api.py::TestCRUDOperations::test_validate_describe_path_result [GOOD] >> test_public_api.py::TestCRUDOperations::test_acl_modifications_1 [GOOD] >> test_public_api.py::TestCRUDOperations::test_acl_modification_2 [GOOD] >> test_public_api.py::TestCRUDOperations::test_can_execute_valid_statement_after_invalid_success >> test_public_api.py::TestCRUDOperations::test_can_execute_valid_statement_after_invalid_success [GOOD] >> test_public_api.py::TestCRUDOperations::test_modify_permissions_3 [GOOD] >> test_public_api.py::TestCRUDOperations::test_directory_that_doesnt_exists [GOOD] >> test_public_api.py::TestCRUDOperations::test_crud_acl_actions [GOOD] >> test_public_api.py::TestCRUDOperations::test_too_many_pending_transactions >> test_public_api.py::TestCRUDOperations::test_too_many_pending_transactions [GOOD] >> test_public_api.py::TestCRUDOperations::test_query_set1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/view/unittest >> TSelectFromViewTest::QueryCacheIsUpdated [GOOD] Test command err: Trying to start YDB, gRPC: 1314, MsgBus: 22709 2025-06-24T21:32:28.267551Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631957418142944:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:28.267746Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001799/r3tmp/tmpWJZwrd/pdisk_1.dat 2025-06-24T21:32:28.844826Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:28.851100Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631957418142758:2079] 1750800748251172 != 1750800748251175 2025-06-24T21:32:28.865146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:28.865270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:28.883083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1314, node 1 2025-06-24T21:32:29.259926Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:32:29.355828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:29.355858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:29.355882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:29.356003Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22709 TClient is connected to server localhost:22709 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:30.453363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:30.508533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-24T21:32:32.095639Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631974598012583:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:32.095780Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Trying to start YDB, gRPC: 1738, MsgBus: 27600 2025-06-24T21:32:34.304084Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631980905731567:2144];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001799/r3tmp/tmpzxWI1Z/pdisk_1.dat 2025-06-24T21:32:34.412081Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-24T21:32:34.438127Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:34.441582Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631980905731448:2079] 1750800754154279 != 1750800754154282 2025-06-24T21:32:34.451367Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:34.451460Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 1738, node 2 2025-06-24T21:32:34.457181Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-24T21:32:34.606311Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:34.606334Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:34.606342Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:34.606446Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27600 TClient is connected to server localhost:27600 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:32:35.236516Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:32:35.248438Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-24T21:32:35.259321Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:32:37.949421Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631993790633970:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:32:37.949521Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Trying to start YDB, gRPC: 12339, MsgBus: 64914 2025-06-24T21:32:38.839900Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519631998596620230:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:32:38.839965Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001799/r3tmp/tmp6RNDib/pdisk_1.dat 2025-06-24T21:32:39.068471Z node 3 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:32:39.069882Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519631998596620211:2079] 1750800758815151 != 1750800758815154 2025-06-24T21:32:39.079377Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:32:39.079460Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:32:39.082055Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12339, node 3 2025-06-24T21:32:39.123568Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:32:39.123590Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:32:39.123597Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:32:39.123699Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64914 TClient is connected to server localhost:64914 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 ... 25-06-24 21:36:44.889 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpLogical-RewriteAggregate 2025-06-24 21:36:44.904 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpLogical-RewriteEquiJoin 2025-06-24 21:36:44.927 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpLogical-JoinToIndexLookup 2025-06-24 21:36:44.976 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpLogical-JoinToIndexLookup 2025-06-24 21:36:45.047 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildReadTableRangesStage 2025-06-24 21:36:45.069 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-PushAggregateCombineToStage 2025-06-24 21:36:45.090 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-ExpandAggregatePhase 2025-06-24 21:36:45.116 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-ExpandAggregatePhase 2025-06-24 21:36:45.145 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-ExpandAggregatePhase 2025-06-24 21:36:45.172 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-ExpandAggregatePhase 2025-06-24 21:36:45.220 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildShuffleStage 2025-06-24 21:36:45.252 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildPrecompute 2025-06-24 21:36:45.278 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildStreamLookupTableStages 2025-06-24 21:36:45.310 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-PrecomputeToInput 2025-06-24 21:36:45.349 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-PushSkipNullMembersToStage 2025-06-24 21:36:45.379 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildJoin 2025-06-24 21:36:45.420 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-PrecomputeToInput 2025-06-24 21:36:45.453 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildPrecompute 2025-06-24 21:36:45.482 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildStreamLookupTableStages 2025-06-24 21:36:45.512 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-PrecomputeToInput 2025-06-24 21:36:45.541 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-PushSkipNullMembersToStage 2025-06-24 21:36:45.571 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildJoin 2025-06-24 21:36:45.614 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-PrecomputeToInput 2025-06-24 21:36:45.644 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-PushExtractMembersToStage 2025-06-24 21:36:45.674 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-PushFlatmapToStage 2025-06-24 21:36:45.705 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildSortStage 2025-06-24 21:36:45.742 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPhysical-RewriteKqpReadTable 2025-06-24 21:36:45.936 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPeephole-RewriteMapJoinWithMapCore 2025-06-24 21:36:46.095 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPeephole-RewriteMapJoinWithMapCore 2025-06-24 21:36:46.290 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] yql_optimize.cpp:135: KqpPeepholeFinal-SetCombinerMemoryLimit 2025-06-24 21:36:46.641 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] kqp_host.cpp:1386: Compiled query: ( (return (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/count_episodes_with_titles")) '('typeId (String '"VIEW"))) (Void) '('('mode 'dropObject)))) ) 2025-06-24 21:36:46.649 INFO ydb-core-kqp-ut-view(pid=1112693, tid=0x00007FE75DDC0640) [KQP] kqp_transform.cpp:33: Optimized expr: ( (let $1 (DataSink '"kikimr" '"db")) (let $2 (KiDropObject! world $1 '"/Root/count_episodes_with_titles" '"VIEW" '() '0)) (return (Commit! $2 $1 '('('"mode" '"flush")))) ) Trying to start YDB, gRPC: 28569, MsgBus: 5545 2025-06-24T21:36:48.218552Z node 23 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[23:7519633075219052500:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:36:48.218719Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001799/r3tmp/tmpSk1cne/pdisk_1.dat 2025-06-24T21:36:48.446128Z node 23 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [23:7519633075219052481:2079] 1750801008217825 != 1750801008217828 2025-06-24T21:36:48.446244Z node 23 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:36:48.467227Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:36:48.467374Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:36:48.470775Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28569, node 23 2025-06-24T21:36:48.539050Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:36:48.539078Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:36:48.539094Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:36:48.539317Z node 23 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5545 2025-06-24T21:36:49.230679Z node 23 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5545 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:36:49.860592Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:36:53.218614Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[23:7519633075219052500:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:36:53.218776Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:36:55.728643Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7519633105283824205:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:55.728887Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:55.798635Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7519633105283824233:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:55.798781Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7519633105283824238:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:55.798825Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:36:55.805436Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:36:55.821545Z node 23 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [23:7519633105283824240:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-24T21:36:55.903541Z node 23 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [23:7519633105283824291:2361] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |99.3%| [TM] {RESULT} ydb/core/kqp/ut/view/unittest >> test_indexes.py::TestSecondaryIndexes::test_create_table_with_global_index [GOOD] >> KafkaProtocol::LoginWithApiKey [GOOD] >> KafkaProtocol::LoginWithApiKeyWithoutAt |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_crud.py::TestCRUDOperations::test_create_table_and_drop_table_success >> test_public_api.py::TestCRUDOperations::test_query_set1 [GOOD] >> test_public_api.py::TestCRUDOperations::test_queries_set2 >> test_public_api.py::TestCRUDOperations::test_queries_set2 [GOOD] >> test_public_api.py::TestCRUDOperations::test_when_result_set_is_large_then_issue_occure |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_session_pool.py::TestSessionPool::test_session_pool_min_size_feature [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_drain.py::TestHive::test_drain_tablets [GOOD] >> Etcd_Lease::SimpleGrantAndRevoke [GOOD] >> Etcd_Lease::TimeToLive |99.3%| [TA] $(B)/ydb/tests/functional/hive/test-results/py3test/{meta.json ... results_accumulator.log} >> TDqPqReadActorTest::WatermarkCheckpointWithItemsInReadyBuffer [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.3%| [TA] {RESULT} $(B)/ydb/tests/functional/hive/test-results/py3test/{meta.json ... results_accumulator.log} >> TPqWriterTest::TestWriteToTopic >> TPqWriterTest::TestWriteToTopic [GOOD] >> TPqWriterTest::TestWriteToTopicMultiBatch >> TPqWriterTest::TestWriteToTopicMultiBatch [GOOD] >> TAsyncIndexTests::CdcAndSplitWithReboots[TabletReboots] [GOOD] >> TPqWriterTest::TestDeferredWriteToTopic >> test_crud.py::TestCRUDOperations::test_create_table_and_drop_table_success [GOOD] >> test_crud.py::TestCRUDOperations::test_create_table_wrong_primary_key_failed1 [GOOD] >> test_crud.py::TestCRUDOperations::test_create_table_wrong_primary_key_failed2 [GOOD] >> TPqWriterTest::TestDeferredWriteToTopic [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> TPqWriterTest::WriteNonExistentTopic >> TPqWriterTest::WriteNonExistentTopic [GOOD] >> TPqWriterTest::TestCheckpoints >> KafkaProtocol::LoginWithApiKeyWithoutAt [GOOD] >> KafkaProtocol::MetadataScenario ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CdcAndSplitWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:30:34.441977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:34.442065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:34.442115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:34.442152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:34.442220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:34.442284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:34.442350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:34.442425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:34.443258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:34.443692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:34.528006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:30:34.528075Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:34.528925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:30:34.544763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:34.545277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:34.545445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:34.554593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:34.554869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:34.555749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:34.556024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:34.559422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:34.559654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:34.561006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:34.561099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:34.561352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:34.561407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:34.561455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:34.561631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:30:34.571271Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:34.722417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:34.722672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.722908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:34.722958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:34.723195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:34.723353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:34.725946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:34.726191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:34.726439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.726515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:34.726560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:34.726606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:34.729172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.729243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:34.729288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:34.731414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.731477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.731522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:34.731586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:34.735404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:34.737842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:34.738096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:34.739272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:34.739429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... ue BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 2 IsBackup: false CdcStreams { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 6 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:37:07.630951Z node 118 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409550:2][72075186233409546][118:1077:2861] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T21:37:07.631055Z node 118 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][118:1037:2861] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:37:07.631220Z node 118 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409550:2][72075186233409546][118:1077:2861] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750801027595396 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750801027595396 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 5 Group: 1750801027595396 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:37:07.633419Z node 118 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409550:2][72075186233409546][118:1077:2861] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 5 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 5 2025-06-24T21:37:07.633511Z node 118 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][118:1037:2861] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:37:07.895164Z node 118 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:37:07.895463Z node 118 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 329us result status StatusSuccess 2025-06-24T21:37:07.896359Z node 118 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_indexes.py::TestSecondaryIndexes::test_create_table_with_global_index [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> TPqWriterTest::TestCheckpoints [GOOD] >> TPqWriterTest::TestCheckpointWithEmptyBatch >> TPqWriterTest::TestCheckpointWithEmptyBatch [GOOD] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] >> test_isolation.py::TestTransactionIsolation::test_prevents_write_cycles_g0 >> test_session_grace_shutdown.py::Test::test_grace_shutdown_of_session >> test_insert.py::TestInsertOperations::test_query_pairs [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/pq_async_io/ut/unittest >> TPqWriterTest::TestCheckpointWithEmptyBatch [GOOD] Test command err: 2025-06-24T21:33:30.051851Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:528: SelfId: [0:0:0], TxId: query_1, task: 0. PQ source. Start read actor, local row dispatcher [1:7519632219077179377:2054], metadatafields: , partitions: 666 2025-06-24T21:33:30.307267Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:729: SelfId: [0:0:0], TxId: query_1, task: 0. PQ source. GetAsyncInputData freeSpace = 12345 2025-06-24T21:33:30.307352Z node 1 :KQP_COMPUTE DEBUG: dq_pq_rd_read_actor.cpp:1387: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Switch to single-cluster mode 2025-06-24T21:33:30.307370Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:578: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorChangesSubscribe to local RD ([1:7519632219077179377:2054]) 2025-06-24T21:33:30.307418Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:605: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorChangesSubscribe to local row dispatcher, self id [1:7519632223372146679:2048] 2025-06-24T21:33:30.311245Z node 1 :KQP_COMPUTE DEBUG: dq_pq_rd_read_actor.cpp:921: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvCoordinatorChanged, new coordinator [1:7519632219077179378:2055] 2025-06-24T21:33:30.311322Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:617: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorRequest to coordinator [1:7519632219077179378:2055], partIds: 666 cookie 1 2025-06-24T21:33:30.312399Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:965: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvCoordinatorResult from [1:7519632219077179378:2055], cookie 1 2025-06-24T21:33:30.312439Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1224: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. UpdateSessions, Sessions size 0 2025-06-24T21:33:30.312450Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1227: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Distribution is changed, remove sessions 2025-06-24T21:33:30.312475Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1246: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Create session to [1:7519632219077179380:2057], generation 1 2025-06-24T21:33:30.312520Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:681: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvStartSession to [1:7519632219077179380:2057], connection id 1 partitions offsets (666 / ), 2025-06-24T21:33:30.319407Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:789: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvStartSessionAck from [1:7519632219077179380:2057], seqNo 0, ConfirmedSeqNo 0, generation 1 2025-06-24T21:33:30.327304Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:857: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvNewDataArrived from [1:7519632219077179380:2057], partition 666, seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-24T21:33:30.340465Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1029: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvMessageBatch from [1:7519632219077179380:2057], seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-24T21:33:30.340506Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1065: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvMessageBatch NextOffset 1 2025-06-24T21:33:30.340510Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1065: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvMessageBatch NextOffset 2 2025-06-24T21:33:30.343282Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:729: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. GetAsyncInputData freeSpace = 1000 2025-06-24T21:33:30.343462Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:750: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. NextOffset 2 2025-06-24T21:33:30.343479Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:754: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Return 2 rows, buffer size 0, free space 948, result size 52 2025-06-24T21:33:30.347737Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:706: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. PassAway 2025-06-24T21:33:30.347826Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1114: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. State: used buffer size 0 ready buffer event size 0 state 5 InFlyAsyncInputData 0 Counters: CoordinatorChanged 1 CoordinatorResult 1 MessageBatch 1 StartSessionAck 1 NewDataArrived 1 SessionError 0 Statistics 0 NodeDisconnected 0 NodeConnected 0 Undelivered 0 Retry 0 PrivateHeartbeat 0 SessionClosed 0 Pong 0 Heartbeat 0 PrintState 0 ProcessState 0 GetAsyncInputData 2 NotifyCA 1 [1:7519632219077179380:2057] status 2 is waiting ack 0 connection id 1 id 1, LocalRecipient partitions 666 offsets 666=2 has pending data 2025-06-24T21:33:30.347836Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:698: SelfId: [1:7519632223372146679:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send StopSession to [1:7519632219077179380:2057] generation 1 2025-06-24T21:33:31.393016Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:528: SelfId: [0:0:0], TxId: query_1, task: 0. PQ source. Start read actor, local row dispatcher [3:7519632226477417945:2054], metadatafields: , partitions: 666 2025-06-24T21:33:31.617949Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:729: SelfId: [0:0:0], TxId: query_1, task: 0. PQ source. GetAsyncInputData freeSpace = 12345 2025-06-24T21:33:31.618000Z node 3 :KQP_COMPUTE DEBUG: dq_pq_rd_read_actor.cpp:1387: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Switch to single-cluster mode 2025-06-24T21:33:31.618015Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:578: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorChangesSubscribe to local RD ([3:7519632226477417945:2054]) 2025-06-24T21:33:31.618039Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:605: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorChangesSubscribe to local row dispatcher, self id [3:7519632226477417951:2048] 2025-06-24T21:33:31.618466Z node 3 :KQP_COMPUTE DEBUG: dq_pq_rd_read_actor.cpp:921: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvCoordinatorChanged, new coordinator [3:7519632226477417946:2055] 2025-06-24T21:33:31.618493Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:617: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorRequest to coordinator [3:7519632226477417946:2055], partIds: 666 cookie 1 2025-06-24T21:33:31.618660Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:965: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvCoordinatorResult from [3:7519632226477417946:2055], cookie 1 2025-06-24T21:33:31.618673Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1224: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. UpdateSessions, Sessions size 0 2025-06-24T21:33:31.618681Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1227: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Distribution is changed, remove sessions 2025-06-24T21:33:31.618719Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1246: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Create session to [3:7519632226477417948:2057], generation 1 2025-06-24T21:33:31.618758Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:681: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvStartSession to [3:7519632226477417948:2057], connection id 1 partitions offsets (666 / ), 2025-06-24T21:33:31.619232Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:789: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvStartSessionAck from [3:7519632226477417948:2057], seqNo 0, ConfirmedSeqNo 0, generation 1 2025-06-24T21:33:31.619418Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:857: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvNewDataArrived from [3:7519632226477417948:2057], partition 666, seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-24T21:33:31.620461Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1029: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvMessageBatch from [3:7519632226477417948:2057], seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-24T21:33:31.620495Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1065: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvMessageBatch NextOffset 1 2025-06-24T21:33:31.620502Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1065: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvMessageBatch NextOffset 2 2025-06-24T21:33:31.620707Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:729: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. GetAsyncInputData freeSpace = 1000 2025-06-24T21:33:31.620839Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:750: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. NextOffset 2 2025-06-24T21:33:31.620859Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:754: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Return 2 rows, buffer size 0, free space 948, result size 52 2025-06-24T21:33:31.621144Z node 3 :KQP_COMPUTE DEBUG: dq_pq_rd_read_actor.cpp:1003: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvUndelivered, TSystem::Undelivered from [3:7519632226477417948:2057], reason Disconnected, cookie 999 2025-06-24T21:33:31.621272Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:857: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvNewDataArrived from [3:7519632226477417948:2057], partition 666, seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-24T21:33:31.621758Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1029: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvMessageBatch from [3:7519632226477417948:2057], seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-24T21:33:31.621778Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1065: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvMessageBatch NextOffset 3 2025-06-24T21:33:31.621980Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:729: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. GetAsyncInputData freeSpace = 1000 2025-06-24T21:33:31.622037Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:750: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. NextOffset 3 2025-06-24T21:33:31.622047Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:754: SelfId: [3:7519632226477417951:2048], TxId: query_1, task: 0, Cluster: . PQ source. Return 1 rows, buffer size 0, free spa ... [d2cfbf31-fd4e56f6-fe917a9a-e4ba335|bbbb181b-4f0c947f-32119bed-88ac7c07_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-24T21:37:10.299923Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335|bbbb181b-4f0c947f-32119bed-88ac7c07_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-24T21:37:10.299987Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335|bbbb181b-4f0c947f-32119bed-88ac7c07_0] PartitionId [0] Generation [1] Write session: OnReadDone gRpcStatusCode: 1, Msg: CANCELLED, Details: , InternalError: 0 2025-06-24T21:37:10.300045Z :TRACE: [local] TRACE_EVENT Error status=CLIENT_CANCELLED 2025-06-24T21:37:10.300088Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335|bbbb181b-4f0c947f-32119bed-88ac7c07_0] PartitionId [0] Generation [1] Write session is aborting and will not restart 2025-06-24T21:37:10.300145Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335|bbbb181b-4f0c947f-32119bed-88ac7c07_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-24T21:37:10.686939Z node 53 :KQP_COMPUTE DEBUG: dq_pq_write_actor.cpp:252: SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. Load state: { SourceId: "d2cfbf31-fd4e56f6-fe917a9a-e4ba335" ConfirmedSeqNo: 3 EgressBytes: 3 } 2025-06-24T21:37:10.687213Z node 53 :KQP_COMPUTE TRACE: dq_pq_write_actor.cpp:179: SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. SendData. Batch: 2. Checkpoint: 0. Finished: 0 2025-06-24T21:37:10.703805Z node 53 :KQP_COMPUTE TRACE: dq_pq_write_actor.cpp:205: SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. Received data for sending: 4 2025-06-24T21:37:10.703843Z node 53 :KQP_COMPUTE TRACE: dq_pq_write_actor.cpp:205: SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. Received data for sending: 5 2025-06-24T21:37:10.721999Z :INFO: [local] OnFederationDiscovery fall back to single mode, database=local E0624 21:37:10.722817348 1243784 dns_resolver_ares.cc:452] no server name supplied in dns URI E0624 21:37:10.722961215 1243784 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-24T21:37:10.723432Z :INFO: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] Starting read session 2025-06-24T21:37:10.723498Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] Starting single session 2025-06-24T21:37:10.728890Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:37:10.729242Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:37:10.729397Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] Reconnecting session to cluster in 0.000000s [] [] Start federated write session to database '' (previous was ) FederationState: { Status: SUCCESS SelfLocation: "" DbInfos: [ { path: "local" endpoint: "localhost:3289" status: AVAILABLE weight: 100 } ] }2025-06-24T21:37:10.730427Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session: try to update token 2025-06-24T21:37:10.731305Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Start write session. Will connect to nodeId: 0 2025-06-24T21:37:10.745118Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] Successfully connected. Initializing session 2025-06-24T21:37:10.745129Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session: write to message_group: d2cfbf31-fd4e56f6-fe917a9a-e4ba335 2025-06-24T21:37:10.745267Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session: send init request: init_request { path: "Checkpoints" producer_id: "d2cfbf31-fd4e56f6-fe917a9a-e4ba335" message_group_id: "d2cfbf31-fd4e56f6-fe917a9a-e4ba335" } 2025-06-24T21:37:10.745306Z :TRACE: [local] TRACE_EVENT InitRequest 2025-06-24T21:37:10.745584Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session: OnWriteDone gRpcStatusCode: 0 2025-06-24T21:37:10.748291Z :INFO: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] Got InitResponse. ReadSessionId: test_client_1_22_11521089890629086330_v1 2025-06-24T21:37:10.748356Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] In ContinueReadingDataImpl, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-24T21:37:10.748591Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-24T21:37:10.752962Z :INFO: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "Checkpoints". Partition: 0. Read offset: (NULL) 2025-06-24T21:37:10.756489Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] Got ReadResponse, serverBytesSize = 1083, now ReadSizeBudget = 0, ReadSizeServerDelta = 52427717 2025-06-24T21:37:10.756701Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52427717 2025-06-24T21:37:10.757077Z :DEBUG: [local] Decompression task done. Partition/PartitionSessionId: 1 (0-4) 2025-06-24T21:37:10.757168Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] Returning serverBytesSize = 1083 to budget 2025-06-24T21:37:10.757233Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] In ContinueReadingDataImpl, ReadSizeBudget = 1083, ReadSizeServerDelta = 52427717 2025-06-24T21:37:10.757600Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-24T21:37:10.757714Z :DEBUG: [local] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-24T21:37:10.757765Z :DEBUG: [local] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-06-24T21:37:10.757833Z :DEBUG: [local] Take Data. Partition 0. Read: {2, 0} (2-2) 2025-06-24T21:37:10.757883Z :DEBUG: [local] Take Data. Partition 0. Read: {3, 0} (3-3) 2025-06-24T21:37:10.757967Z :DEBUG: [local] Take Data. Partition 0. Read: {4, 0} (4-4) 2025-06-24T21:37:10.758133Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] The application data is transferred to the client. Number of messages 5, size 5 bytes 2025-06-24T21:37:10.758146Z :INFO: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] Closing read session. Close timeout: 0.000000s 2025-06-24T21:37:10.758201Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] Returning serverBytesSize = 0 to budget 2025-06-24T21:37:10.758265Z :INFO: [local] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:Checkpoints:0:1:4:0 2025-06-24T21:37:10.758334Z :INFO: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] Counters: { Errors: 0 CurrentSessionLifetimeMs: 34 BytesRead: 5 MessagesRead: 5 BytesReadCompressed: 5 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:37:10.758475Z :NOTICE: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-24T21:37:10.758556Z :DEBUG: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] [] Abort session to cluster 2025-06-24T21:37:10.759550Z :INFO: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] Closing read session. Close timeout: 0.000000s 2025-06-24T21:37:10.759632Z :INFO: [local] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:Checkpoints:0:1:4:0 2025-06-24T21:37:10.759702Z :INFO: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] Counters: { Errors: 0 CurrentSessionLifetimeMs: 36 BytesRead: 5 MessagesRead: 5 BytesReadCompressed: 5 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:37:10.759850Z :NOTICE: [local] [local] [360680d-3ba5562d-61fce254-48e0d782] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-24T21:37:10.762844Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session: close. Timeout 0.000000s 2025-06-24T21:37:10.762890Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session will now close 2025-06-24T21:37:10.762969Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session: aborting 2025-06-24T21:37:10.763886Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session: gracefully shut down, all writes complete 2025-06-24T21:37:10.764418Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session: OnReadDone gRpcStatusCode: 0 2025-06-24T21:37:10.764491Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750801030764 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-24T21:37:10.764607Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session established. Init response: last_seq_no: 5 session_id: "d2cfbf31-fd4e56f6-fe917a9a-e4ba335|a20859ad-a3cecc18-71ebc15d-99493fbf_0" 2025-06-24T21:37:10.764661Z :TRACE: [local] TRACE_EVENT InitResponse partition_id=0 session_id=d2cfbf31-fd4e56f6-fe917a9a-e4ba335|a20859ad-a3cecc18-71ebc15d-99493fbf_0 2025-06-24T21:37:10.764717Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335|a20859ad-a3cecc18-71ebc15d-99493fbf_0] MessageGroupId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335] Write session: set DirectWriteToPartitionId 0 2025-06-24T21:37:10.764818Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [d2cfbf31-fd4e56f6-fe917a9a-e4ba335|a20859ad-a3cecc18-71ebc15d-99493fbf_0] PartitionId [0] Generation [0] Write session: destroy 2025-06-24T21:37:11.325457Z node 54 :KQP_COMPUTE TRACE: dq_pq_write_actor.cpp:179: SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. SendData. Batch: 0. Checkpoint: 1. Finished: 0 2025-06-24T21:37:11.339713Z node 54 :KQP_COMPUTE DEBUG: dq_pq_write_actor.cpp:224: SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. [Checkpoint 0.0] Send checkpoint state immediately 2025-06-24T21:37:11.340419Z node 54 :KQP_COMPUTE TRACE: dq_pq_write_actor.cpp:396: SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. Save checkpoint { Id: 0 Generation: 0 } state: { SourceId: "2d44c341-fdc414ae-c38e2dd0-1e1dd4c" } |99.4%| [TM] {RESULT} ydb/tests/fq/pq_async_io/ut/unittest >> test_public_api.py::TestCRUDOperations::test_when_result_set_is_large_then_issue_occure [GOOD] >> KafkaProtocol::MetadataScenario [GOOD] >> KafkaProtocol::MetadataInServerlessScenario >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes >> test_isolation.py::TestTransactionIsolation::test_prevents_write_cycles_g0 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_aborted_reads_g1a >> test_isolation.py::TestTransactionIsolation::test_prevents_aborted_reads_g1a [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_intermediate_reads_g1b >> test_isolation.py::TestTransactionIsolation::test_prevents_intermediate_reads_g1b [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_circular_information_flow_g1c >> test_isolation.py::TestTransactionIsolation::test_prevents_circular_information_flow_g1c [GOOD] >> test_isolation.py::TestTransactionIsolation::test_isolation_mailing_list_example >> test_session_grace_shutdown.py::Test::test_grace_shutdown_of_session [GOOD] >> test_isolation.py::TestTransactionIsolation::test_isolation_mailing_list_example [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_observed_transaction_vanishes_otv >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[async_read_table] >> test_isolation.py::TestTransactionIsolation::test_prevents_observed_transaction_vanishes_otv [GOOD] >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp [GOOD] >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp_for_write_predicates >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp_for_write_predicates [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_p4 >> test_discovery.py::TestDiscoveryFaultInjectionSlotStop::test_scenario [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_p4 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_p4 >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_p4 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_with_upsert_p4 >> KafkaProtocol::MetadataInServerlessScenario [GOOD] >> KafkaProtocol::NativeKafkaBalanceScenario >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_with_upsert_p4 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single >> Etcd_Lease::TimeToLive [GOOD] >> Etcd_Lease::Leases >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single [GOOD] >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_predicate_deps >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_predicate_deps [GOOD] >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_write_predicate >> test_crud.py::TestSelect::test_advanced_select_failed[select distinct b, a from (select a, b from t1 union all select b, a from t1 order by b) order by B-Column B is not in source column set.*] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_write_predicate [GOOD] >> test_isolation.py::TestTransactionIsolation::test_write_skew_g2_item >> test_isolation.py::TestTransactionIsolation::test_write_skew_g2_item [GOOD] >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2 >> test_session_grace_shutdown.py::TestIdle::test_idle_shutdown_of_session >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2_two_edges >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2_two_edges [GOOD] >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_insert.py::TestInsertOperations::test_query_pairs [GOOD] >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[async_read_table] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select distinct b, a from (select a, b from t1 union all select b, a from t1 order by b) order by B-Column B is not in source column set.*] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select count(a, b) from t1-Aggregation function Count requires exactly 1 argument] >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[read_table] >> test_crud.py::TestSelect::test_advanced_select_failed[select count(a, b) from t1-Aggregation function Count requires exactly 1 argument] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select min(a, b) from t1-Aggregation function Min requires exactly 1 argument] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select min(*) from t1-.*is not allowed here] >> test_crud.py::TestSelect::test_advanced_select_failed[select min(*) from t1-.*is not allowed here] [GOOD] >> test_result_limits.py::TestResultLimits::test_many_rows [GOOD] >> Etcd_Lease::Leases [GOOD] >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/apps/etcd_proxy/service/ut/unittest >> Etcd_Lease::Leases [GOOD] Test command err: 2025-06-24T21:30:59.838047Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519631573764132917:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:30:59.841569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017cc/r3tmp/tmp8dAatM/pdisk_1.dat 2025-06-24T21:31:00.398686Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519631573764132722:2079] 1750800659823377 != 1750800659823380 2025-06-24T21:31:00.420034Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:00.423100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:00.423221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:00.428393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8004, node 1 2025-06-24T21:31:00.777216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:00.777260Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:00.777272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:00.777420Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:31:00.834560Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14126 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:01.525073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:31:02.914748Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631586649035260:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:02.914756Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519631586649035249:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:02.914918Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:02.923451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:31:02.939803Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519631586649035263:2293], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:31:03.003389Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519631586649035314:2333] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:31:04.069583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:04.274100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:04.303597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:31:04.426587Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037890:1][1:7519631595238970319:2330] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-24T21:31:04.837128Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519631573764132917:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:04.837224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:31:12.114361Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519631631643023097:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:31:12.114405Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017cc/r3tmp/tmpcTXIGm/pdisk_1.dat 2025-06-24T21:31:12.252081Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:31:12.253197Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519631631643023076:2079] 1750800672112053 != 1750800672112056 2025-06-24T21:31:12.269308Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:31:12.269401Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:31:12.271102Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13180, node 2 2025-06-24T21:31:12.314355Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:31:12.314387Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:31:12.314395Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:31:12.314538Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19966 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:31:12.635250Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:31:13.131286Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:31:15.403543Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631644527925586:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:31:15.403605Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519631644527925578:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool ... 6-24T21:37:11.356117Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:37:12.040455Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519633177034601070:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:37:12.040514Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519633177034601051:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:37:12.040725Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:37:12.045942Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:37:12.056776Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [16:7519633177034601080:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-24T21:37:12.127116Z node 16 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [16:7519633177034601131:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:37:12.225609Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:37:12.310973Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:37:12.365338Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:37:12.474592Z node 16 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037890:1][16:7519633177034601540:2330] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-24T21:37:21.551385Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7378: Cannot get console configs 2025-06-24T21:37:21.551423Z node 16 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:37:24.807434Z node 17 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[17:7519633229597748267:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:37:24.807564Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/0017cc/r3tmp/tmpir2z6u/pdisk_1.dat 2025-06-24T21:37:24.990511Z node 17 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [17:7519633229597748248:2079] 1750801044806784 != 1750801044806787 2025-06-24T21:37:25.001888Z node 17 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:37:25.004028Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:37:25.004142Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:37:25.009612Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27342, node 17 2025-06-24T21:37:25.061797Z node 17 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-24T21:37:25.061823Z node 17 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-24T21:37:25.061834Z node 17 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-24T21:37:25.061984Z node 17 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13037 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:37:25.626788Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-24T21:37:25.816360Z node 17 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:37:29.807953Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[17:7519633229597748267:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:37:29.808100Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:37:30.283344Z node 17 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [17:7519633255367552662:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:37:30.283453Z node 17 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [17:7519633255367552670:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:37:30.283550Z node 17 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:37:30.289739Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:37:30.305514Z node 17 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [17:7519633255367552676:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-24T21:37:30.401567Z node 17 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [17:7519633255367552728:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:37:30.518599Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:37:30.582311Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:37:30.664684Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:37:30.752451Z node 17 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037890:1][17:7519633255367553134:2328] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } |99.4%| [TM] {RESULT} ydb/apps/etcd_proxy/service/ut/unittest >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] [GOOD] >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[read_table] [GOOD] >> test_discovery.py::TestMirror3DCDiscovery::test_mirror3dc_discovery_logic >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] >> test_public_api.py::TestSessionNotFound::test_session_not_found >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2_two_edges [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_crud.py::TestClientTimeouts::test_can_set_timeouts_on_query |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_session_grace_shutdown.py::TestIdle::test_idle_shutdown_of_session [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] [GOOD] >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[async_read_table] >> test_crud.py::TestClientTimeouts::test_can_set_timeouts_on_query [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_session_grace_shutdown.py::TestIdle::test_idle_shutdown_of_session [GOOD] >> test_discovery.py::TestMirror3DCDiscovery::test_mirror3dc_discovery_logic [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[async_read_table] [GOOD] >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[read_table] >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[read_table] [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes >> test_public_api.py::TestSessionNotFound::test_session_not_found [GOOD] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_discovery.py::TestMirror3DCDiscovery::test_mirror3dc_discovery_logic [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[read_table] [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_public_api.py::TestSessionNotFoundOperations::test_session_pool |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> TAsyncIndexTests::CdcAndMergeWithReboots[TabletReboots] [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CdcAndMergeWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:29:28.444730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:29:28.444831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:29:28.444872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:29:28.444920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:29:28.444971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:29:28.445014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:29:28.445074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:29:28.445148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:29:28.445905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:29:28.446231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:29:28.532585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:29:28.532656Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:29:28.533441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:29:28.550516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:29:28.551006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:29:28.551210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:29:28.562034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:29:28.562339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:29:28.563048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:28.563350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:29:28.566708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:28.566928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:29:28.568185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:29:28.568246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:29:28.568457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:29:28.568513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:29:28.568555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:29:28.568694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:29:28.575878Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:29:28.703896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:29:28.704128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:28.704348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:29:28.704397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:29:28.704651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:29:28.704788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:29:28.707375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:28.707596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:29:28.707866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:28.707948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:29:28.708007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:29:28.708050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:29:28.710293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:28.710352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:29:28.710407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:29:28.712403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:28.712454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:29:28.712510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:29:28.712574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:29:28.716291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:29:28.718023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:29:28.718213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:29:28.719037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:29:28.719193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... ionId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 2 IsBackup: false CdcStreams { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 6 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:38:30.552073Z node 164 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409551:2][72075186233409546][164:1163:2943] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T21:38:30.552211Z node 164 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409551:2][164:1134:2943] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:38:30.552387Z node 164 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409551:2][72075186233409546][164:1163:2943] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750801110507059 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750801110507059 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 5 Group: 1750801110507059 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:38:30.555211Z node 164 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409551:2][72075186233409546][164:1163:2943] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 5 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 5 2025-06-24T21:38:30.555343Z node 164 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409551:2][164:1134:2943] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-24T21:38:30.819537Z node 164 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:38:30.819821Z node 164 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 308us result status StatusSuccess 2025-06-24T21:38:30.820488Z node 164 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_crud.py::TestManySelectsInRow::test_selects_in_row_success[500-500-50] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_public_api.py::TestSessionNotFoundOperations::test_session_pool [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_ok_keep_alive_example [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_can_commit_bad_tx >> test_public_api.py::TestSessionNotFoundOperations::test_can_commit_bad_tx [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_cannot_commit_bad_tx [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_commit_successfully_after_success_commit >> test_public_api.py::TestSessionNotFoundOperations::test_commit_successfully_after_success_commit [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_invalid_keep_alive_example [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_describe_table_with_bounds >> test_public_api.py::TestSessionNotFoundOperations::test_describe_table_with_bounds [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_native_datetime_types [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_native_date_types >> test_public_api.py::TestSessionNotFoundOperations::test_native_date_types [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_keep_in_cache_disabled >> test_public_api.py::TestSessionNotFoundOperations::test_keep_in_cache_disabled [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_explicit_partitions_case_1 >> test_public_api.py::TestSessionNotFoundOperations::test_explicit_partitions_case_1 [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_explict_partitions_case_2 [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_simple_table_profile_settings [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] [FAIL] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can >> test_public_api.py::TestBadSession::test_simple >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] >> KafkaProtocol::NativeKafkaBalanceScenario [GOOD] >> KafkaProtocol::InitProducerId_withoutTransactionalIdShouldReturnRandomInt >> KafkaProtocol::InitProducerId_withoutTransactionalIdShouldReturnRandomInt [GOOD] >> KafkaProtocol::InitProducerId_forNewTransactionalIdShouldReturnIncrementingInt >> test_public_api.py::TestBadSession::test_simple [GOOD] >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes [GOOD] >> test_public_api.py::TestDriverCanRecover::test_driver_recovery |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] [FAIL] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes [GOOD] >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok >> KafkaProtocol::InitProducerId_forNewTransactionalIdShouldReturnIncrementingInt [GOOD] >> KafkaProtocol::InitProducerId_forSqlInjectionShouldReturnWithoutDropingDatabase >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown [GOOD] >> test_public_api.py::TestDriverCanRecover::test_driver_recovery [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown [GOOD] >> KafkaProtocol::InitProducerId_forSqlInjectionShouldReturnWithoutDropingDatabase [GOOD] >> KafkaProtocol::InitProducerId_forPreviouslySeenTransactionalIdShouldReturnSameProducerIdAndIncrementEpoch >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok >> test_public_api.py::TestSelectAfterDropWithRepetitions::test_select_on_dropped_table_unsuccessful[10] >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown [GOOD] >> KafkaProtocol::InitProducerId_forPreviouslySeenTransactionalIdShouldReturnSameProducerIdAndIncrementEpoch [GOOD] >> KafkaProtocol::InitProducerId_forPreviouslySeenTransactionalIdShouldReturnNewProducerIdIfEpochOverflown >> test_public_api.py::TestSelectAfterDropWithRepetitions::test_select_on_dropped_table_unsuccessful[10] [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown [GOOD] >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok >> test_result_limits.py::TestResultLimits::test_large_row |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeBothWithReboots[TabletReboots] 2025-06-24 21:40:32,798 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:40:32,948 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1057369 47.8M 45.6M 24.4M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/003cca/ydb/core/tx/schemeshard/ut_index/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.ar 1057948 736M 711M 430M └─ ydb-core-tx-schemeshard-ut_index --trace-path-append /home/runner/.ya/build/build_root/2btm/003cca/ydb/core/tx/schemeshard/ut_index/test-results/unittest/testing_out_st Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:129:2058] recipient: [1:111:2141] Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:135:2058] recipient: [1:112:2142] Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:139:2058] recipient: [1:113:2143] 2025-06-24T21:30:34.638475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7565: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-24T21:30:34.638582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7593: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:34.638632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7479: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-24T21:30:34.638673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7495: OperationsProcessing config: using default configuration 2025-06-24T21:30:34.638728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-24T21:30:34.638783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7501: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-24T21:30:34.638861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7625: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-24T21:30:34.638943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-24T21:30:34.639850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-24T21:30:34.640239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-24T21:30:34.723358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7728: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-24T21:30:34.723428Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:30:34.724302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7696: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:133:2155] sender: [1:177:2058] recipient: [1:15:2062] 2025-06-24T21:30:34.741694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-24T21:30:34.742208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-24T21:30:34.742413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-24T21:30:34.755736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-24T21:30:34.756075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-24T21:30:34.756856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:34.757150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-24T21:30:34.760884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:34.761101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-24T21:30:34.762313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-24T21:30:34.762384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-24T21:30:34.762624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-24T21:30:34.762688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-24T21:30:34.762732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-24T21:30:34.762871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:216:2058] recipient: [1:214:2213] Leader for TabletID 72057594037968897 is [1:220:2217] sender: [1:221:2058] recipient: [1:214:2213] 2025-06-24T21:30:34.773471Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:241:2058] recipient: [1:15:2062] 2025-06-24T21:30:34.919471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:389: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-24T21:30:34.919725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.919981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:500: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-24T21:30:34.920031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5230: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-24T21:30:34.920300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:139: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-24T21:30:34.920429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-24T21:30:34.923162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:468: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:34.923385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-24T21:30:34.923617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.923694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:314: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-24T21:30:34.923753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:368: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-24T21:30:34.923800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 2 -> 3 2025-06-24T21:30:34.926356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.926431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-24T21:30:34.926473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2559: Change state for txid 1:0 3 -> 128 2025-06-24T21:30:34.929066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:502: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.929132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-24T21:30:34.929179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-24T21:30:34.929248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1681: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-24T21:30:34.933019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1750: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-24T21:30:34.936341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:653: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-24T21:30:34.936588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1782: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:138:2158] sender: [1:256:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-24T21:30:34.937676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-24T21:30:34.937820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:697: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969454 } } Step: 5000001 Media ... iorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-24T21:40:32.188393Z node 191 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409550:2][72075186233409551][191:1156:2918] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-24T21:40:32.188552Z node 191 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][191:1115:2918] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-24T21:40:32.188737Z node 191 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409550:2][72075186233409551][191:1156:2918] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750801232164281 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750801232164281 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750801232164281 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-24T21:40:32.192247Z node 191 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409550:2][72075186233409551][191:1156:2918] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-24T21:40:32.192391Z node 191 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][191:1115:2918] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-24T21:40:32.367683Z node 191 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-24T21:40:32.368059Z node 191 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 399us result status StatusSuccess 2025-06-24T21:40:32.369036Z node 191 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 =========== RUN: Reboot tablet 72075186233409551 (#13) run 190 =========== Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/003cca/ydb/core/tx/schemeshard/ut_index/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/003cca/ydb/core/tx/schemeshard/ut_index/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can [GOOD] >> test_public_api.py::TestMetaDataInvalidation::test_invalidation_success >> KafkaProtocol::InitProducerId_forPreviouslySeenTransactionalIdShouldReturnNewProducerIdIfEpochOverflown [GOOD] >> KafkaProtocol::CommitTransactionScenario |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can [GOOD] |99.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_index/test-results/unittest/{meta.json ... results_accumulator.log} >> test_public_api.py::TestMetaDataInvalidation::test_invalidation_success [GOOD] |99.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index/test-results/unittest/{meta.json ... results_accumulator.log} |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown [GOOD] >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown [GOOD] >> test_public_api.py::TestJsonExample::test_json_unexpected_failure >> test_crud.py::TestManySelectsInRow::test_selects_in_row_success[500-500-50] [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown [GOOD] >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can [GOOD] >> test_public_api.py::TestJsonExample::test_json_unexpected_failure [GOOD] >> test_public_api.py::TestJsonExample::test_json_success >> test_public_api.py::TestJsonExample::test_json_success [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can [GOOD] >> KafkaProtocol::CommitTransactionScenario [GOOD] >> KafkaProtocol::AbortTransactionScenario >> test_clean.py::TestClean::test >> test_external.py::TestExternalE1::test[first_query_set.1.sql] >> test_upload.py::TestUploadTpchS1::test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/mem_alloc/py3test >> test_result_limits.py::TestResultLimits::test_large_row 2025-06-24 21:40:57,334 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:40:57,671 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1069215 770M 763M 409M ydb-tests-fq-mem_alloc --basetemp /home/runner/.ya/build/build_root/2btm/00434a/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modules 1283024 707M 15.3M 409M ├─ ydb-tests-fq-mem_alloc --basetemp /home/runner/.ya/build/build_root/2btm/00434a/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-m 1283025 2.0G 2.0G 1.6G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/00434a/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff/test_resu Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/fq/mem_alloc/test_result_limits.py", line 92, in test_large_row client.wait_query_status(query_id, fq.QueryMeta.FAILED, timeout=600) File "ydb/tests/tools/fq_runner/fq_client.py", line 307, in wait_query_status return self.wait_query(query_id, timeout, statuses=statuses).query.meta.status File "ydb/tests/tools/fq_runner/fq_client.py", line 302, in wait_query time.sleep(plain_or_under_sanitizer(0.5, 2)) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Thread 0x00007f806ea30640 (most recent call first): File "ydb/tests/library/common/wait_for.py", line 19 in wait_for File "ydb/tests/library/harness/daemon.py", line 198 in stop File "ydb/tests/library/harness/kikimr_runner.py", line 261 in stop File "ydb/tests/library/harness/kikimr_runner.py", line 574 in __stop_node File "ydb/tests/library/harness/kikimr_runner.py", line 588 in stop_node File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f80661e2640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/tools/python3/Lib/queue.py", line 180 in get File "contrib/python/ydb/py3/ydb/_sp_impl.py", line 376 in events_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f806a609640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f807bea6640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/tools/python3/Lib/queue.py", line 180 in get File "contrib/python/ydb/py3/ydb/_sp_impl.py", line 376 in events_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f806cc1d640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f806b913640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/tools/python3/Lib/queue.py", line 180 in get File "contrib/python/ydb/py3/ydb/_sp_impl.py", line 376 in events_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f8071845640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f806f231640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/tools/python3/Lib/queue.py", line 180 in get File "contrib/python/ydb/py3/ydb/_sp_impl.py", line 376 in events_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f8074962640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f807053b640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/tools/python3/Lib/queue.py", line 180 in get File "contrib/python/ydb/py3/ydb/_sp_impl.py", line 376 in events_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f8072b4f640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f807b39d640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/tools/python3/Lib/queue.py", line 180 in get File "contrib/python/ydb/py3/ydb/_sp_impl.py", line 376 in events_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f8077a7f640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f8075c6c640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/tools/python3/Lib/queue.py", line 180 in get File "contrib/python/ydb/py3/ydb/_sp_impl.py", line 376 in events_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f8079892640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f807a093640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/tools/python3/Lib/queue.py", line 180 in get File "contrib/python/ydb/py3/ydb/_sp_impl.py", line 376 in events_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f807c6a7640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f8081e3c640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/tools/python3/Lib/queue.py", line 180 in get File "contrib/python/ydb/py3/ydb/_sp_impl.py", line 376 in events_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f807e4ba640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f807f7da640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/tools/python3/Lib/queue.py", line 180 in get File "contrib/python/ydb/py3/ydb/_sp_impl.py", line 376 in events_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007f808315c640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Current thread 0x00007f80b2bb4940 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 1169 in _wait_for_tstate_lock File "contrib/tools/python3/Lib/threading.py", line 1149 in join File "ydb/tests/library/harness/kikimr_runner.py", line 599 in stop File "ydb/tests/tools/fq_runner/kikimr_runner.py", line 62 in stop File "ydb/tests/tools/fq_runner/kikimr_runner.py", line 746 in stop File "ydb/tests/fq/mem_alloc/test_result_limits.py", line 47 in kikimr File "contrib/python/pytest/py3/_pytest/fixtures.py", line 926 in _teardown_yield_fixture File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1042 in finish File "contrib/python/pytest/py3/_pytest/runner.py", line 543 in teardown_exact File "contrib/python/pytest/py3/_pytest/runner.py", line 109 in pytest_sessionfinish File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/main.py", line 308 in wrap_session File "contrib/python/pytest/py3/_pytest/main.py", line 320 in pytest_cmdline_main File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175 in main File "library/python/pytest/main.py", line 101 in main Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d '['/home/runner/.ya/build/build_root/2btm/00434a/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc', '--basetemp', '/home/runner/.ya/build/build_root/2btm/00434a/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/00434a/ydb/tests/fq/mem_alloc/test-results/py3test/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/00434a', '--source-root', '/home/runner/.ya/build/build_root/2btm/00434a/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/00434a/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/fq/mem_alloc', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/fq/mem_alloc', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d '['/home/runner/.ya/build/build_root/2btm/00434a/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc', '--basetemp', '/home/runner/.ya/build/build_root/2btm/00434a/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/00434a/ydb/tests/fq/mem_alloc/test-results/py3test/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/00434a', '--source-root', '/home/runner/.ya/build/build_root/2btm/00434a/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/00434a/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/fq/mem_alloc', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/fq/mem_alloc', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) 2025-06-24 21:41:31,671 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-06-24 21:41:31,672 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_crud.py::TestManySelectsInRow::test_selects_in_row_success[500-500-50] [GOOD] |99.5%| [TM] {RESULT} ydb/tests/fq/mem_alloc/py3test >> test_public_api.py::TestForPotentialDeadlock::test_deadlocked_threads_on_cleanup >> test_clickbench.py::TestClickbench::test_clickbench[Query00] >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.NO] >> test_public_api.py::TestForPotentialDeadlock::test_deadlocked_threads_on_cleanup [GOOD] >> test_public_api.py::TestRecursiveCreation::test_mkdir >> test_public_api.py::TestRecursiveCreation::test_mkdir [GOOD] >> test_public_api.py::TestRecursiveCreation::test_create_table >> test_public_api.py::TestRecursiveCreation::test_create_table [GOOD] >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can [GOOD] >> test_external.py::TestExternalE1::test[first_query_set.1.sql] [GOOD] >> test_external.py::TestExternalE1::test[first_query_set.2.yql] >> test_external.py::TestExternalE1::test[first_query_set.2.yql] [GOOD] >> test_external.py::TestExternalE1::test[second_query_set.join.sql] >> test_external.py::TestExternalE1::test[second_query_set.join.sql] [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can [GOOD] |99.5%| [TA] $(B)/ydb/tests/functional/cms/test-results/py3test/{meta.json ... results_accumulator.log} |99.5%| [TA] {RESULT} $(B)/ydb/tests/functional/cms/test-results/py3test/{meta.json ... results_accumulator.log} |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_external.py::TestExternalE1::test[second_query_set.join.sql] [GOOD] >> KafkaProtocol::AbortTransactionScenario [GOOD] >> KafkaProtocol::TransactionShouldBeAbortedIfPartitionIsAddedToTransactionButNoWritesToItWereReceived >> test_tpch.py::TestTpchS1::test_tpch[1] >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.NO] [GOOD] >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.WARNING] >> test_import_csv.py::TestExternalImportCsv::test >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.WARNING] [GOOD] >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.ERROR] >> test_public_api.py::TestAttributes::test_create_table >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.ERROR] [GOOD] >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.NO] >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok [GOOD] >> test_public_api.py::TestAttributes::test_create_table [GOOD] >> test_public_api.py::TestAttributes::test_copy_table >> test_public_api.py::TestAttributes::test_copy_table [GOOD] >> test_public_api.py::TestAttributes::test_create_indexed_table >> test_public_api.py::TestAttributes::test_create_indexed_table [GOOD] >> test_public_api.py::TestAttributes::test_alter_table >> test_public_api.py::TestAttributes::test_alter_table [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes0] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes1] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes2] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes3] >> test_public_api.py::TestAttributes::test_limits[attributes3] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes4] [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok [GOOD] >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/s3_import/py3test >> test_tpch_import.py::TestS3TpchImport::test_import_and_export 2025-06-24 21:41:53,611 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:41:54,734 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1096594 587M 526M 506M ydb-tests-olap-s3_import --basetemp /home/runner/.ya/build/build_root/2btm/0038b0/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modul 1099699 11.2G 11.1G 10.7G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/0038b0/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff/test_tp 1102966 391M 321M 358M ├─ moto_server s3 --port 23590 1117993 2.0G 2.0G 1.9G └─ ydb -e grpc://localhost:22294 -d /Root workload tpch import generator --scale 1 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/s3_import/test_tpch_import.py", line 94, in test_import_and_export self.ydb_client.run_cli_comand(["workload", "tpch", "import", "generator", "--scale", "1"]) File "ydb/tests/olap/common/ydb_client.py", line 37, in run_cli_comand process = yatest.common.process.execute(cmd, check_exit_code=False) File "library/python/testing/yatest_common/yatest/common/process.py", line 656, in execute res.wait(check_exit_code, timeout, on_timeout) File "library/python/testing/yatest_common/yatest/common/process.py", line 400, in wait _wait() File "library/python/testing/yatest_common/yatest/common/process.py", line 335, in _wait pid, sts, rusage = os.wait4(self._process.pid, 0) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Thread 0x00007feea5f92640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/grpcio/py3/grpc/_common.py", line 112 in _wait_once File "contrib/python/grpcio/py3/grpc/_common.py", line 150 in wait File "contrib/python/grpcio/py3/grpc/_channel.py", line 872 in _next File "contrib/python/grpcio/py3/grpc/_channel.py", line 475 in __next__ File "contrib/python/ydb/py3/ydb/_utilities.py", line 164 in _next File "contrib/python/ydb/py3/ydb/_utilities.py", line 173 in __next__ File "contrib/python/ydb/py3/ydb/query/session.py", line 267 in _check_session_status_loop File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007feea4c72640 (most recent call first): File "contrib/python/grpcio/py3/grpc/_channel.py", line 1392 in channel_spin File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007feea72b2640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Current thread 0x00007feed778ba40 (most recent call first): File "contrib/tools/python3/Lib/subprocess.py", line 2011 in _try_wait File "contrib/tools/python3/Lib/subprocess.py", line 2053 in _wait File "contrib/tools/python3/Lib/subprocess.py", line 1264 in wait File "library/python/testing/yatest_common/yatest/common/process.py", line 370 in _wait File "library/python/testing/yatest_common/yatest/common/process.py", line 400 in wait File "library/python/testing/yatest_common/yatest/common/process.py", line 656 in execute File "ydb/tests/olap/common/ydb_client.py", line 37 in run_cli_comand File "ydb/tests/olap/s3_import/test_tpch_import.py", line 94 in test_import_and_export File "library/python/pytest/plugins/ya.py", line 563 in pytest_pyfunc_call File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/python.py", line 1844 in runtest File "contrib/python/pytest/py3/_pytest/runner.py", line 170 in pytest_runtest_call File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/runner.py", line 263 in File "contrib/python/pytest/py3/_pytest/runner.py", line 342 in from_call File "contrib/python/pytest/py3/_pytest/runner.py", line 262 in call_runtest_hook File "contrib/python/pytest/py3/_pytest/runner.py", line 223 in call_and_report File "contrib/python/pytest/py3/_pytest/runner.py", line 134 in runtestprotocol File "contrib/python/pytest/py3/_pytest/runner.py", line 115 in pytest_runtest_protocol File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/main.py", line 352 in pytest_runtestloop File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/main.py", line 327 in _main File "contrib/python/pytest/py3/_pytest/main.py", line 273 in wrap_session File "contrib/python/pytest/py3/_pytest/main.py", line 320 in pytest_cmdline_main File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175 in main File "library/python/pytest/main.py", line 101 in main Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...unner/.ya/build/build_root/2btm/0038b0/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import', '--basetemp', '/home/runner/.ya/build/build_root/2btm/0038b0/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/0038b0/ydb/tests/olap/s3_import/test-results/py3test/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/0038b0', '--source-root', '/home/runner/.ya/build/build_root/2btm/0038b0/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/0038b0/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/s3_import', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/olap/s3_import', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...unner/.ya/build/build_root/2btm/0038b0/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import', '--basetemp', '/home/runner/.ya/build/build_root/2btm/0038b0/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/0038b0/ydb/tests/olap/s3_import/test-results/py3test/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/0038b0', '--source-root', '/home/runner/.ya/build/build_root/2btm/0038b0/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/0038b0/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/s3_import', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/olap/s3_import', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) 2025-06-24 21:42:25,301 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-06-24 21:42:25,301 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores >> KafkaProtocol::TransactionShouldBeAbortedIfPartitionIsAddedToTransactionButNoWritesToItWereReceived [GOOD] >> KafkaProtocol::ProducerFencedInTransactionScenario |99.5%| [TM] {RESULT} ydb/tests/olap/s3_import/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_import_csv.py::TestExternalImportCsv::test [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_import_csv.py::TestExternalImportCsvArrow::test >> test_clickbench.py::TestClickbench::test_clickbench[Query00] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query01] >> test_public_api.py::TestDocApiTables::test_create_table |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_self_heal.py::TestEnableSelfHeal::test_replication >> test_clickbench.py::TestClickbench::test_clickbench[Query01] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query02] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestDocApiTables::test_create_table [GOOD] >> test_public_api.py::TestDocApiTables::test_alter_table[None-BadRequest] >> test_public_api.py::TestDocApiTables::test_alter_table[None-BadRequest] [GOOD] >> test_public_api.py::TestDocApiTables::test_alter_table[settings1-None] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestDocApiTables::test_alter_table[settings1-None] [GOOD] >> test_public_api.py::TestDocApiTables::test_drop_table[None-None] >> test_public_api.py::TestDocApiTables::test_drop_table[None-None] [GOOD] >> test_public_api.py::TestDocApiTables::test_drop_table[settings1-None] >> test_public_api.py::TestDocApiTables::test_drop_table[settings1-None] [GOOD] >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query02] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query03] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query03] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query04] >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet [GOOD] >> test_self_heal.py::TestEnableSelfHeal::test_replication [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kafka_proxy/ut/unittest >> KafkaProtocol::ProducerFencedInTransactionScenario 2025-06-24 21:42:56,796 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:42:57,158 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1130856 47.8M 47.1M 24.4M test_tool run_ut @/home/runner/.ya/build/build_root/2btm/001803/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff/test_tool.args 1136474 3.4G 3.3G 2.9G └─ ydb-core-kafka_proxy-ut --trace-path-append /home/runner/.ya/build/build_root/2btm/001803/ydb/core/kafka_proxy/ut/test-results/unittest/ytest.report.trace +DiscoveryIsN Test command err: 2025-06-24T21:33:07.319677Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519632123990199052:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:07.319765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001803/r3tmp/tmpcVCHAh/pdisk_1.dat 2025-06-24T21:33:08.296640Z node 1 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:08.297736Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519632123990199023:2079] 1750800787307474 != 1750800787307477 2025-06-24T21:33:08.316194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:08.316292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:08.342690Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-24T21:33:08.346337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1432, node 1 2025-06-24T21:33:08.719355Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/2btm/001803/r3tmp/yandexPcspBj.tmp 2025-06-24T21:33:08.719382Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/2btm/001803/r3tmp/yandexPcspBj.tmp 2025-06-24T21:33:08.722067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/2btm/001803/r3tmp/yandexPcspBj.tmp 2025-06-24T21:33:08.723641Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-24T21:33:09.283907Z INFO: TTestServer started on Port 24996 GrpcPort 1432 TClient is connected to server localhost:24996 PQClient connected to localhost:1432 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-24T21:33:09.780034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-24T21:33:09.861666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) waiting... waiting... 2025-06-24T21:33:10.071529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-24T21:33:10.087464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-24T21:33:11.585231Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519632141170069014:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:11.585424Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:11.586515Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519632141170069027:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-24T21:33:11.593658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-24T21:33:11.611870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519632141170069029:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-24T21:33:11.693262Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519632141170069093:2445] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-24T21:33:12.310926Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519632141170069101:2315], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-24T21:33:12.332163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519632123990199052:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:12.332390Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=N2RjOTk3OTktOTRmZTI5ZjctZmE1NDU1Y2ItNGU4ZTE1OTI=, ActorId: [1:7519632141170069012:2303], ActorState: ExecuteState, TraceId: 01jyhxqxz8dm8w123tfngpjzvm, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-24T21:33:12.332530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-24T21:33:12.342332Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-24T21:33:12.421382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.488537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) 2025-06-24T21:33:12.659758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:195: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:667) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-24T21:33:13.240606Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jyhxqz6a57fz9x6gysq0crnw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWI3N2FlNmMtZTIzNzFiNGYtYjU1M2Y3YmQtOTVlYjc2Nzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7519632149760004006:2635] === CheckClustersList. Ok Run with port = 1432, kafka port = 1984 2025-06-24T21:33:20.092278Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519632178618284806:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-24T21:33:20.092333Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/2btm/001803/r3tmp/tmpLfA7OW/pdisk_1.dat 2025-06-24T21:33:20.270493Z node 2 :IMPORT WARN: schemeshard_import.cpp:304: Table profiles were not loaded 2025-06-24T21:33:20.278512Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-24T21:33:20.278601Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-24T21:33:20.280981Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8843, node 2 2025-06-24T21:33:20.375855Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, w ... on: 2, State: StateIdle] no data for compaction 2025-06-24T21:42:50.767353Z node 31 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {0, 0}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=134, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:42:50.777170Z node 31 :KAFKA_PROXY INFO: kafka_balancer_actor.cpp:134: TKafkaBalancerActor: GroupId# my-consumer, MemberId# [31:7519634606866670408:2618], CurrentStep# 0. Handle kqp response 2025-06-24T21:42:50.838633Z node 31 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: {1, {0, 0}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=134, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:42:51.248053Z node 31 :KAFKA_PROXY INFO: kafka_balancer_actor.cpp:134: TKafkaBalancerActor: GroupId# my-consumer, MemberId# [31:7519634606866670408:2618], CurrentStep# 1. Handle kqp response 2025-06-24T21:42:51.248417Z node 31 :KAFKA_PROXY INFO: kafka_balancer_actor.cpp:375: TKafkaBalancerActor: GroupId# my-consumer, MemberId# [31:7519634606866670408:2618], CurrentStep# 1. Check group before join status. memberId: [31:7519634606866670408:2618] instanceId: group: my-consumer exists: 0 protocolType: protocolName: master: generation: 0 lastSuccessGeneration: 0 state: 0 2025-06-24T21:42:51.346868Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:41: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Received event: NKafka::TEvKafka::TEvWakeup 2025-06-24T21:42:51.346949Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:56: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup 2025-06-24T21:42:51.347029Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:502: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Sending results. QueueSize= 0, ExpirationTime=2025-06-24T21:42:21.346551Z 2025-06-24T21:42:51.347116Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:99: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters 2025-06-24T21:42:51.347194Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:117: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters was completed successfully 2025-06-24T21:42:51.347271Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:64: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup was completed successfully 2025-06-24T21:42:52.348616Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:41: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Received event: NKafka::TEvKafka::TEvWakeup 2025-06-24T21:42:52.348707Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:56: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup 2025-06-24T21:42:52.348805Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:502: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Sending results. QueueSize= 0, ExpirationTime=2025-06-24T21:42:22.348697Z 2025-06-24T21:42:52.348909Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:99: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters 2025-06-24T21:42:52.348981Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:117: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters was completed successfully 2025-06-24T21:42:52.349065Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:64: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup was completed successfully 2025-06-24T21:42:52.487771Z node 31 :KAFKA_PROXY INFO: kafka_balancer_actor.cpp:134: TKafkaBalancerActor: GroupId# my-consumer, MemberId# [31:7519634606866670408:2618], CurrentStep# 2. Handle kqp response 2025-06-24T21:42:53.368512Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:41: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Received event: NKafka::TEvKafka::TEvWakeup 2025-06-24T21:42:53.368596Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:56: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup 2025-06-24T21:42:53.368680Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:502: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Sending results. QueueSize= 0, ExpirationTime=2025-06-24T21:42:23.368531Z 2025-06-24T21:42:53.368768Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:99: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters 2025-06-24T21:42:53.368831Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:117: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters was completed successfully 2025-06-24T21:42:53.368916Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:64: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup was completed successfully 2025-06-24T21:42:53.443238Z node 31 :KAFKA_PROXY INFO: kafka_balancer_actor.cpp:134: TKafkaBalancerActor: GroupId# my-consumer, MemberId# [31:7519634606866670408:2618], CurrentStep# 3. Handle kqp response 2025-06-24T21:42:54.370340Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:41: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Received event: NKafka::TEvKafka::TEvWakeup 2025-06-24T21:42:54.370417Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:56: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup 2025-06-24T21:42:54.370504Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:502: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Sending results. QueueSize= 0, ExpirationTime=2025-06-24T21:42:24.370417Z 2025-06-24T21:42:54.370590Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:99: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters 2025-06-24T21:42:54.370653Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:117: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters was completed successfully 2025-06-24T21:42:54.370729Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:64: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup was completed successfully 2025-06-24T21:42:54.577836Z node 31 :KAFKA_PROXY INFO: kafka_balancer_actor.cpp:134: TKafkaBalancerActor: GroupId# my-consumer, MemberId# [31:7519634606866670408:2618], CurrentStep# 4. Handle kqp response 2025-06-24T21:42:54.638798Z node 31 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037890, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=165, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:42:54.639106Z node 31 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037888, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T21:42:54.660680Z node 31 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037889, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T21:42:55.340696Z node 31 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-24T21:42:55.340940Z node 31 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 1, State: StateIdle] no data for compaction 2025-06-24T21:42:55.351715Z node 31 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037893, Partition: 2, State: StateIdle] no data for compaction 2025-06-24T21:42:55.371084Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:41: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Received event: NKafka::TEvKafka::TEvWakeup 2025-06-24T21:42:55.371181Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:56: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup 2025-06-24T21:42:55.371282Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:502: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Sending results. QueueSize= 0, ExpirationTime=2025-06-24T21:42:25.370899Z 2025-06-24T21:42:55.371377Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:99: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters 2025-06-24T21:42:55.371448Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:117: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters was completed successfully 2025-06-24T21:42:55.371529Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:64: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup was completed successfully 2025-06-24T21:42:55.767710Z node 31 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {0, 0}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=134, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:42:55.838789Z node 31 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: {1, {0, 0}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=134, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-24T21:42:56.371606Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:41: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Received event: NKafka::TEvKafka::TEvWakeup 2025-06-24T21:42:56.371692Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:56: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup 2025-06-24T21:42:56.371786Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:502: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Sending results. QueueSize= 0, ExpirationTime=2025-06-24T21:42:26.371412Z 2025-06-24T21:42:56.371883Z node 31 :KAFKA_PROXY DEBUG: kafka_produce_actor.cpp:99: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters 2025-06-24T21:42:56.371961Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:117: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: CleanWriters was completed successfully 2025-06-24T21:42:56.372059Z node 31 :KAFKA_PROXY TRACE: kafka_produce_actor.cpp:64: TKafkaProduceActor [31:7519634538147192589:2284] State: Work Produce actor: Wakeup was completed successfully Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/001803/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/2btm/001803/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {RESULT} ydb/core/kafka_proxy/ut/unittest |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_self_heal.py::TestEnableSelfHeal::test_replication [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query04] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query05] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_import_csv.py::TestExternalImportCsvArrow::test [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_public_api.py::TestDocApiTables::test_drop_table[settings1-None] [GOOD] |99.5%| [TA] $(B)/ydb/tests/functional/api/test-results/py3test/{meta.json ... results_accumulator.log} |99.6%| [TA] {RESULT} $(B)/ydb/tests/functional/api/test-results/py3test/{meta.json ... results_accumulator.log} |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_import_csv.py::TestExternalImportCsvArrow::test [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query05] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query06] >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query06] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query07] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[block-4-2] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query07] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query08] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.NO] [GOOD] >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.WARNING] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query08] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query09] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok [GOOD] >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.WARNING] [GOOD] >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.ERROR] >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query09] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query10] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.ERROR] [GOOD] >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.NO] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query10] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query11] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state [GOOD] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] [GOOD] >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--false] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query11] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query12] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query12] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query13] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] [GOOD] >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.NO] [GOOD] >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.WARNING] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[block-4-2] [GOOD] >> test_workload_oltp.py::TestWorkloadSimpleQueue::test_workload_oltp >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.WARNING] [GOOD] >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.ERROR] >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.ERROR] [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.ERROR] [GOOD] >> test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[row] >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[block-4-2] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query13] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query14] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageMultipleRingGroup::test_cluster_change_state_storage |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata [GOOD] >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store >> test_clickbench.py::TestClickbench::test_clickbench[Query14] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query15] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata [GOOD] >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version >> test_clickbench.py::TestClickbench::test_clickbench[Query15] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query16] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] [GOOD] >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata >> test_clickbench.py::TestClickbench::test_clickbench[Query16] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query17] >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store [GOOD] >> test_workload_oltp.py::TestWorkloadSimpleQueue::test_workload_oltp [FAIL] >> test_clickbench.py::TestClickbench::test_clickbench[Query17] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query18] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_workload_oltp.py::TestWorkloadSimpleQueue::test_workload_oltp [FAIL] >> test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[row] [FAIL] >> test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[column] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store [GOOD] >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageBadCases::test_cluster_change_state_storage >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query18] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query19] >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query19] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query20] >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query20] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query21] >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata [GOOD] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query21] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query22] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir >> test_clickbench.py::TestClickbench::test_clickbench[Query22] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query23] >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage [GOOD] >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.7%| [TA] $(B)/ydb/tests/functional/serverless/test-results/py3test/{meta.json ... results_accumulator.log} >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_clean.py::TestClean::test [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_clean.py::TestClean::test [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TA] {RESULT} $(B)/ydb/tests/functional/serverless/test-results/py3test/{meta.json ... results_accumulator.log} |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version [GOOD] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageReuseSameNodes::test_cluster_change_state_storage |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata [GOOD] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_seed_nodes >> test_distconf.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage [GOOD] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage >> test_clickbench.py::TestClickbench::test_clickbench[Query23] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query24] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_distconf >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query24] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query25] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query25] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query26] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.8%| [TA] $(B)/ydb/tests/functional/blobstorage/test-results/py3test/{meta.json ... results_accumulator.log} |99.8%| [TA] {RESULT} $(B)/ydb/tests/functional/blobstorage/test-results/py3test/{meta.json ... results_accumulator.log} |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageMultipleRingGroup::test_cluster_change_state_storage [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::TestUniformScopesDistribution |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_distconf [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_seed_nodes [GOOD] >> CompositeConveyorTests::TestUniformProcessDistribution >> CompositeConveyorTests::TestUniformDistribution >> test_clickbench.py::TestClickbench::test_clickbench[Query26] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query27] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageMultipleRingGroup::test_cluster_change_state_storage [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageBadCases::test_cluster_change_state_storage [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[column] [GOOD] >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[column] [GOOD] >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query27] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query28] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_distconf [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_seed_nodes [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageBadCases::test_cluster_change_state_storage [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::Test10xMultiDistribution |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::Test10xDistribution |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_clickbench.py::TestClickbench::test_clickbench[Query28] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query29] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_workload.py::TestYdbTransferWorkload::test[row] >> test_workload.py::TestShowCreateViewWorkload::test_show_create_view_workload[30-None] >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> test_encryption.py::TestEncryption::test_simple_encryption |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> KqpQueryService::ReplyPartLimitProxyNode |99.9%| [TA] $(B)/ydb/tests/functional/restarts/test-results/py3test/{meta.json ... results_accumulator.log} |99.9%| [TA] {RESULT} $(B)/ydb/tests/functional/restarts/test-results/py3test/{meta.json ... results_accumulator.log} >> ConsistentIndexRead::InteractiveTx >> KqpQueryService::ReplyPartLimitProxyNode [GOOD] >> NodeIdDescribe::HasDistribution >> S3PathStyleBackup::DisableVirtualAddressing >> test_clickbench.py::TestClickbench::test_clickbench[Query29] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query30] >> test_workload.py::TestShowCreateViewWorkload::test_show_create_view_workload[30-None] [GOOD] >> test_workload.py::TestShowCreateViewWorkload::test_show_create_view_workload[30-test_scv] >> S3PathStyleBackup::DisableVirtualAddressing [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/backup/s3_path_style/unittest >> S3PathStyleBackup::DisableVirtualAddressing [GOOD] |99.9%| [TM] {RESULT} ydb/tests/functional/backup/s3_path_style/unittest >> test_clickbench.py::TestClickbench::test_clickbench[Query30] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query31] >> test_workload.py::TestYdbWorkload::test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::TestUniformProcessDistribution Test command err: {1:85698};{2:92296};{3:85697}; {1:89527};{2:106409};{3:87392}; {1:105532};{2:122412};{3:103117}; {1:119352};{2:136233};{3:116606}; {1:134780};{2:155487};{3:131667}; {1:162149};{2:193433};{3:149002}; {1:196387};{2:224744};{3:181181}; {1:221443};{2:248050};{3:211313}; {1:249355};{2:264515};{3:238077}; {1:281533};{2:288952};{3:265793}; {1:315922};{2:322785};{3:279332}; {1:349322};{2:354410};{3:287389}; {1:377700};{2:379837};{3:305175}; {1:388837};{2:390023};{3:350935}; {1:396727};{2:397913};{3:358826}; {1:404754};{2:405940};{3:366852}; {1:412784};{2:413971};{3:374883}; {1:420042};{2:421228};{3:382140}; {1:429355};{2:432070};{3:388007}; {1:445848};{2:448824};{3:394012}; {1:459401};{2:461384};{3:403416}; {1:467263};{2:469441};{3:417681}; {1:475467};{2:477851};{3:435582}; {1:480811};{2:483329};{3:440708}; {1:486931};{2:489602};{3:446827}; {1:493309};{2:496139};{3:453205}; {1:500150};{2:503151};{3:460047}; {1:506213};{2:509366};{3:466110}; {1:513515};{2:516851};{3:473412}; {1:519029};{2:522501};{3:478925}; {1:525923};{2:528844};{3:486502}; {1:534653};{2:532384};{3:495232}; {1:547727};{2:536719};{3:508305}; {1:568470};{2:552546};{3:527947}; {1:591314};{2:574983};{3:550790}; {1:614414};{2:597219};{3:573892}; {1:635518};{2:617543};{3:594995}; {1:656053};{2:637327};{3:615530}; {1:675016};{2:655560};{3:634492}; {1:694935};{2:674620};{3:654411}; {1:713532};{2:692609};{3:673010}; {1:726673};{2:714692};{3:696085}; {1:740383};{2:737287};{3:719710}; {1:753341};{2:759966};{3:743402}; {1:763962};{2:774215};{3:763961}; {1:775794};{2:785904};{3:775793}; {1:788133};{2:797891};{3:788133}; {1:798159};{2:807673};{3:798159}; {1:806133};{2:815488};{3:806132}; {1:816451};{2:825593};{3:816450}; {1:825926};{2:834803};{3:825925}; {1:838984};{2:847616};{3:838984}; {1:848766};{2:857318};{3:848765}; {1:860889};{2:869170};{3:860888}; {1:873347};{2:881342};{3:873346}; {1:884939};{2:892734};{3:884938}; {1:889915};{2:897421};{3:889915}; {1:894807};{2:901979};{3:894806}; {1:901543};{2:908257};{3:901543}; {1:907840};{2:914123};{3:907839}; {1:913179};{2:919099};{3:913179}; {1:919139};{2:924652};{3:919139}; {1:925355};{2:930444};{3:925355}; {1:931918};{2:936560};{3:931918}; {1:938650};{2:942834};{3:938650}; {1:945623};{2:949330};{3:945623}; {1:951750};{2:955039};{3:951749}; {1:958829};{2:961636};{3:958828}; {1:964335};{2:966767};{3:964334}; {1:970134};{2:972171};{3:970134}; {1:976141};{2:977768};{3:976141}; {1:982657};{2:983839};{3:982657}; {1:989976};{2:990660};{3:989976}; {1:998049};{2:998183};{3:998049}; {1:1000000};{2:1000000};{3:1000000}; 234us per task 74.095599s;74.095618s;74.095622s; VERIFY failed (2025-06-24T21:48:58.446082Z): verification=NKikimr::NColumnShard::TMonitoringObjectsCounter::GetCounter().Val() == 4;fline=ut_simple.cpp:203;count=5; ydb/library/actors/core/log.cpp:800 ~TVerifyFormattedRecordWriter(): requirement false failed NPrivate::InternalPanicImpl(int, char const*, char const*, int, int, int, TBasicStringBuf>, char const*, unsigned long)+873 (0xEF040F9) NPrivate::Panic(NPrivate::TStaticBuf const&, int, char const*, char const*, char const*, ...)+571 (0xEEF278B) NActors::TVerifyFormattedRecordWriter::~TVerifyFormattedRecordWriter()+326 (0x1051E9C6) TTestingExecutor::Execute()+6426 (0xEB1757A) NTestSuiteCompositeConveyorTests::TTestCaseTestUniformProcessDistribution::Execute_(NUnitTest::TTestContext&)+160 (0xEB184D0) std::__y1::__function::__func, void ()>::operator()()+280 (0xEB1A3B8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0xF3A8806) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0xF381269) NTestSuiteCompositeConveyorTests::TCurrentTest::Execute()+1204 (0xEB19264) NUnitTest::TTestFactory::Execute()+2438 (0xF382B36) NUnitTest::RunMain(int, char**)+5213 (0xF3A2D7D) ??+0 (0x7F80ECDA1D90) __libc_start_main+128 (0x7F80ECDA1E40) _start+41 (0xC394029) >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage [GOOD] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageReuseSameNodes::test_cluster_change_state_storage [GOOD] >> test_workload.py::TestShowCreateViewWorkload::test_show_create_view_workload[30-test_scv] [GOOD] >> NodeIdDescribe::HasDistribution [GOOD] >> CompositeConveyorTests::TestUniformScopesDistribution [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::TestUniformScopesDistribution [GOOD] Test command err: {1:95818};{2:92155};{3:90658}; {1:103509};{2:98198};{3:100975}; {1:110489};{2:103682};{3:110338}; {1:119526};{2:109500};{3:126237}; {1:139903};{2:126725};{3:143920}; {1:157902};{2:138836};{3:169275}; {1:177239};{2:159780};{3:198027}; {1:199005};{2:192759};{3:227592}; {1:227283};{2:217328};{3:259545}; {1:243779};{2:245501};{3:287166}; {1:252093};{2:289741};{3:303127}; {1:270425};{2:310961};{3:310961}; {1:302245};{2:318808};{3:318808}; {1:326178};{2:326723};{3:326724}; {1:334418};{2:334762};{3:334763}; {1:342626};{2:342770};{3:358499}; {1:348541};{2:348635};{3:388356}; {1:348541};{2:354316};{3:412527}; {1:348541};{2:359694};{3:452694}; {1:352601};{2:364044};{3:497722}; {1:376657};{2:367962};{3:527199}; {1:417830};{2:374774};{3:534739}; {1:442904};{2:394899};{3:541581}; {1:450026};{2:432639};{3:549059}; {1:457105};{2:464705};{3:556492}; {1:465833};{2:485751};{3:565656}; {1:473860};{2:505838};{3:574084}; {1:480996};{2:525239};{3:581578}; {1:489568};{2:547039};{3:590579}; {1:497536};{2:571075};{3:598944}; {1:505618};{2:592786};{3:607431}; {1:513934};{2:595432};{3:616163}; {1:523068};{2:598338};{3:625753}; {1:532396};{2:601306};{3:635962}; {1:545672};{2:605530};{3:661880}; {1:562773};{2:627721};{3:683983}; {1:578529};{2:652738};{3:706811}; {1:593214};{2:677163};{3:728667}; {1:615876};{2:703460};{3:741283}; {1:639601};{2:726618};{3:751590}; {1:662838};{2:753616};{3:759616}; {1:672965};{2:777456};{3:767591}; {1:682673};{2:793992};{3:778593}; {1:692077};{2:795381};{3:789250}; {1:702333};{2:795381};{3:800874}; {1:710696};{2:795381};{3:810353}; {1:717959};{2:795381};{3:818584}; {1:726246};{2:795381};{3:827976}; {1:734475};{2:795381};{3:837301}; {1:743506};{2:795381};{3:847537}; {1:753066};{2:795381};{3:858371}; {1:762417};{2:795381};{3:868970}; {1:770223};{2:795381};{3:877817}; {1:779703};{2:795381};{3:888561}; {1:789074};{2:795381};{3:899181}; {1:797401};{2:795381};{3:908617}; {1:806622};{2:795381};{3:919068}; {1:815180};{2:795381};{3:928768}; {1:822198};{2:795381};{3:936722}; {1:831143};{2:795381};{3:946858}; {1:839670};{2:795381};{3:956523}; {1:848207};{2:795381};{3:966198}; {1:857013};{2:795381};{3:976179}; {1:865284};{2:795381};{3:985551}; {1:872847};{2:795381};{3:994123}; {1:879653};{2:795381};{3:1000000}; {1:897768};{2:795381};{3:1000000}; {1:913633};{2:795381};{3:1000000}; {1:932131};{2:795381};{3:1000000}; {1:952238};{2:795381};{3:1000000}; {1:968907};{2:795381};{3:1000000}; {1:977858};{2:800996};{3:1000000}; {1:986298};{2:811757};{3:1000000}; {1:994237};{2:821879};{3:1000000}; {1:1000000};{2:833270};{3:1000000}; {1:1000000};{2:852124};{3:1000000}; {1:1000000};{2:868666};{3:1000000}; {1:1000000};{2:883027};{3:1000000}; {1:1000000};{2:899566};{3:1000000}; {1:1000000};{2:920211};{3:1000000}; {1:1000000};{2:941152};{3:1000000}; {1:1000000};{2:959933};{3:1000000}; {1:1000000};{2:979377};{3:1000000}; {1:1000000};{2:997313};{3:1000000}; {1:1000000};{2:1000000};{3:1000000}; 266us per task 74.089213s;84.100973s;65.073726s; >> test_workload.py::TestYdbWorkload::test[row] >> test_clickbench.py::TestClickbench::test_clickbench[Query31] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query32] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/kqp_query_svc/unittest >> NodeIdDescribe::HasDistribution [GOOD] |99.9%| [TM] {RESULT} ydb/tests/functional/kqp/kqp_query_svc/unittest |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageReuseSameNodes::test_cluster_change_state_storage [GOOD] |99.9%| [TA] $(B)/ydb/tests/functional/config/test-results/py3test/{meta.json ... results_accumulator.log} |99.9%| [TA] {RESULT} $(B)/ydb/tests/functional/config/test-results/py3test/{meta.json ... results_accumulator.log} |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/show_create/view/tests/py3test >> test_workload.py::TestShowCreateViewWorkload::test_show_create_view_workload[30-test_scv] [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/show_create/view/tests/py3test >> test_workload.py::TestYdbKvWorkload::test[row] >> test_workload.py::TestYdbWorkload::test >> CompositeConveyorTests::Test10xMultiDistribution [GOOD] >> test_workload.py::TestYdbLogWorkload::test[row] >> test_workload.py::TestYdbWorkload::test >> test_workload.py::TestYdbTransferWorkload::test[row] [GOOD] >> test_workload.py::TestYdbTransferWorkload::test[column] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::Test10xMultiDistribution [GOOD] Test command err: {I:85575};{S:140264};{N:110959}; {I:103213};{S:149006};{N:143542}; {I:133408};{S:160604};{N:187212}; {I:163793};{S:167118};{N:202282}; {I:194134};{S:181112};{N:202282}; {I:201032};{S:194217};{N:207510}; {I:202283};{S:202282};{N:224238}; {I:202283};{S:202282};{N:241324}; {I:202283};{S:203581};{N:269176}; {I:202283};{S:223187};{N:290456}; {I:202283};{S:243364};{N:311226}; {I:206408};{S:274045};{N:320104}; {I:225151};{S:295279};{N:320104}; {I:244689};{S:315435};{N:320104}; {I:277549};{S:320104};{N:320104}; {I:298993};{S:320104};{N:320104}; {I:320105};{S:320104};{N:320104}; {I:320105};{S:320104};{N:320104}; {I:320105};{S:320104};{N:326411}; {I:320105};{S:332407};{N:345986}; {I:320105};{S:345624};{N:365229}; {I:320105};{S:345624};{N:384223}; {I:320105};{S:345624};{N:404766}; {I:320105};{S:345624};{N:427119}; {I:320105};{S:345624};{N:459707}; {I:320105};{S:345624};{N:493795}; {I:320105};{S:345624};{N:536844}; {I:320105};{S:355250};{N:570305}; {I:320105};{S:389850};{N:588641}; {I:320105};{S:422425};{N:603034}; {I:329396};{S:447382};{N:619463}; {I:347616};{S:471786};{N:633136}; {I:366066};{S:531976};{N:633136}; {I:383780};{S:587307};{N:633136}; {I:397369};{S:653313};{N:633136}; {I:415623};{S:724299};{N:633136}; {I:429249};{S:770535};{N:633136}; {I:441919};{S:808367};{N:633136}; {I:454752};{S:841114};{N:633136}; {I:472270};{S:866135};{N:633136}; {I:486447};{S:896217};{N:633136}; {I:507674};{S:937352};{N:633136}; {I:524648};{S:952668};{N:648550}; {I:544885};{S:973037};{N:668537}; {I:563824};{S:994819};{N:687076}; {I:591554};{S:1000000};{N:707149}; {I:620599};{S:1000000};{N:726071}; {I:633137};{S:1000000};{N:744139}; {I:633137};{S:1000000};{N:761788}; {I:633137};{S:1000000};{N:780301}; {I:633137};{S:1000000};{N:801810}; {I:633137};{S:1000000};{N:821581}; {I:633137};{S:1000000};{N:838199}; {I:633137};{S:1000000};{N:857213}; {I:633137};{S:1000000};{N:876336}; {I:633137};{S:1000000};{N:895393}; {I:633137};{S:1000000};{N:919106}; {I:633137};{S:1000000};{N:942842}; {I:633137};{S:1000000};{N:965713}; {I:633137};{S:1000000};{N:986383}; {I:639008};{S:1000000};{N:1000000}; {I:660159};{S:1000000};{N:1000000}; {I:679178};{S:1000000};{N:1000000}; {I:699894};{S:1000000};{N:1000000}; {I:722126};{S:1000000};{N:1000000}; {I:742964};{S:1000000};{N:1000000}; {I:763566};{S:1000000};{N:1000000}; {I:782763};{S:1000000};{N:1000000}; {I:802459};{S:1000000};{N:1000000}; {I:823471};{S:1000000};{N:1000000}; {I:843642};{S:1000000};{N:1000000}; {I:861225};{S:1000000};{N:1000000}; {I:882908};{S:1000000};{N:1000000}; {I:899605};{S:1000000};{N:1000000}; {I:917683};{S:1000000};{N:1000000}; {I:938535};{S:1000000};{N:1000000}; {I:959878};{S:1000000};{N:1000000}; {I:978623};{S:1000000};{N:1000000}; {I:996800};{S:1000000};{N:1000000}; {I:1000000};{S:1000000};{N:1000000}; 250us per task 79.099182s;45.059008s;60.078079s; >> test_clickbench.py::TestClickbench::test_clickbench[Query32] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query33] >> test_workload.py::TestYdbWorkload::test >> CompositeConveyorTests::Test10xDistribution [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::Test10xDistribution [GOOD] Test command err: {I:67353};{S:94946};{N:139902}; {I:84921};{S:116873};{N:165179}; {I:108616};{S:134054};{N:181017}; {I:125382};{S:149210};{N:192927}; {I:138153};{S:166570};{N:199372}; {I:145365};{S:177696};{N:231402}; {I:154440};{S:205584};{N:248325}; {I:181244};{S:213118};{N:262867}; {I:199233};{S:231651};{N:268266}; {I:205583};{S:252194};{N:282176}; {I:217974};{S:262868};{N:320525}; {I:239536};{S:283094};{N:344695}; {I:280560};{S:291687};{N:356225}; {I:283681};{S:311434};{N:356225}; {I:283681};{S:332569};{N:377443}; {I:283681};{S:354101};{N:397951}; {I:305191};{S:356225};{N:420640}; {I:325168};{S:356225};{N:439611}; {I:346040};{S:356225};{N:455723}; {I:356226};{S:364163};{N:465368}; {I:356226};{S:384672};{N:465368}; {I:356226};{S:400620};{N:465368}; {I:356226};{S:417873};{N:465368}; {I:356226};{S:433976};{N:465368}; {I:356226};{S:453309};{N:465368}; {I:363735};{S:465368};{N:465368}; {I:381765};{S:465368};{N:465368}; {I:396978};{S:465368};{N:465368}; {I:409868};{S:465368};{N:465368}; {I:427898};{S:465368};{N:465368}; {I:442611};{S:465368};{N:474096}; {I:463482};{S:465368};{N:481889}; {I:465368};{S:465368};{N:500107}; {I:465368};{S:465368};{N:518492}; {I:465368};{S:465368};{N:536626}; {I:465368};{S:465368};{N:552378}; {I:465368};{S:465368};{N:564984}; {I:465368};{S:465368};{N:584523}; {I:465368};{S:465368};{N:605659}; {I:465368};{S:479791};{N:624305}; {I:465368};{S:495637};{N:646351}; {I:465368};{S:514220};{N:663393}; {I:465368};{S:527286};{N:681172}; {I:465368};{S:543556};{N:699372}; {I:465368};{S:561201};{N:710106}; {I:465368};{S:579134};{N:725232}; {I:465368};{S:597120};{N:746026}; {I:465368};{S:615377};{N:765469}; {I:465368};{S:630703};{N:782950}; {I:465368};{S:649006};{N:800666}; {I:465368};{S:659412};{N:821440}; {I:465368};{S:679794};{N:839457}; {I:465368};{S:700957};{N:859002}; {I:465368};{S:717619};{N:875795}; {I:465368};{S:732398};{N:887801}; {I:465368};{S:747941};{N:899084}; {I:465368};{S:764639};{N:919687}; {I:465368};{S:781833};{N:939221}; {I:477114};{S:791909};{N:958797}; {I:490663};{S:791909};{N:979204}; {I:509317};{S:791909};{N:991259}; {I:527567};{S:801849};{N:1000000}; {I:549549};{S:817088};{N:1000000}; {I:570230};{S:829389};{N:1000000}; {I:590975};{S:846617};{N:1000000}; {I:612780};{S:868520};{N:1000000}; {I:630065};{S:889457};{N:1000000}; {I:647224};{S:909664};{N:1000000}; {I:669605};{S:930969};{N:1000000}; {I:689496};{S:950807};{N:1000000}; {I:708618};{S:969670};{N:1000000}; {I:724744};{S:988901};{N:1000000}; {I:741708};{S:1000000};{N:1000000}; {I:760793};{S:1000000};{N:1000000}; {I:779743};{S:1000000};{N:1000000}; {I:795816};{S:1000000};{N:1000000}; {I:813716};{S:1000000};{N:1000000}; {I:834634};{S:1000000};{N:1000000}; {I:853965};{S:1000000};{N:1000000}; {I:873355};{S:1000000};{N:1000000}; {I:897233};{S:1000000};{N:1000000}; {I:918953};{S:1000000};{N:1000000}; {I:937285};{S:1000000};{N:1000000}; {I:955786};{S:1000000};{N:1000000}; {I:976321};{S:1000000};{N:1000000}; {I:997667};{S:1000000};{N:1000000}; {I:1000000};{S:1000000};{N:1000000}; 272us per task 86.136852s;72.112331s;61.099425s; >> test_clickbench.py::TestClickbench::test_clickbench[Query33] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query34] >> KqpQuerySession::NoLocalAttach >> test_board_workload.py::TestReconfigStateStorageBoardWorkload::test_state_storage_board >> test_clickbench.py::TestClickbench::test_clickbench[Query34] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query35] >> test_clickbench.py::TestClickbench::test_clickbench[Query35] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query36] >> KqpQuerySession::NoLocalAttach [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query36] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query37] >> test_encryption.py::TestEncryption::test_simple_encryption [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/kqp_query_session/unittest >> KqpQuerySession::NoLocalAttach [GOOD] |99.9%| [TM] {RESULT} ydb/tests/functional/kqp/kqp_query_session/unittest >> test_clickbench.py::TestClickbench::test_clickbench[Query37] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query38] >> test_clickbench.py::TestClickbench::test_clickbench[Query38] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query39] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/encryption/py3test >> test_encryption.py::TestEncryption::test_simple_encryption [GOOD] |99.9%| [TM] {RESULT} ydb/tests/functional/encryption/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query39] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query40] >> test_workload.py::TestYdbTransferWorkload::test[column] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query40] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query41] >> Transfer::BaseScenario_Local >> Replication::Types >> test_clickbench.py::TestClickbench::test_clickbench[Query41] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query42] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/transfer/tests/py3test >> test_workload.py::TestYdbTransferWorkload::test[column] [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/transfer/tests/py3test >> Transfer::BaseScenario_Local [GOOD] >> Transfer::BaseScenario_Remote >> Replication::Types [GOOD] >> Replication::PauseAndResumeReplication >> test_clickbench.py::TestClickbench::test_clickbench[Query42] [GOOD] >> test_clickbench.py::TestClickbenchParallel::test[Query00] >> test_workload.py::TestYdbWorkload::test [GOOD] >> test_workload.py::TestYdbWorkload::test[row] [GOOD] >> test_workload.py::TestYdbWorkload::test[column] >> test_workload.py::TestYdbKvWorkload::test[row] [GOOD] >> test_workload.py::TestYdbKvWorkload::test[column] >> Transfer::BaseScenario_Remote [GOOD] >> Transfer::CreateTransfer_TargetNotFound >> Replication::PauseAndResumeReplication [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/cdc/tests/py3test >> test_workload.py::TestYdbWorkload::test [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/cdc/tests/py3test |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/reconfig_state_storage_workload/tests/py3test >> test_board_workload.py::TestReconfigStateStorageBoardWorkload::test_state_storage_board |99.9%| [TM] {RESULT} ydb/tests/stress/reconfig_state_storage_workload/tests/py3test >> Transfer::CreateTransfer_TargetNotFound [GOOD] >> Transfer::ConnectionString_BadChar ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/replication/unittest >> Replication::PauseAndResumeReplication [GOOD] Test command err: DDL: CREATE TABLE `SourceTable_16410868439195661789` ( Key Uint32, Key2 Uuid, v01 Uuid, v02 Uuid NOT NULL, v03 Double, PRIMARY KEY (Key, Key2) ); >>>>> Query: UPSERT INTO `SourceTable_16410868439195661789` (Key,Key2,v01,v02,v03) VALUES ( 1, CAST("00078af5-0000-0000-6c0b-040000000000" as Uuid), CAST("00078af5-0000-0000-6c0b-040000000001" as Uuid), UNWRAP(CAST("00078af5-0000-0000-6c0b-040000000002" as Uuid)), CAST("311111111113.222222223" as Double) ); DDL: CREATE ASYNC REPLICATION `Replication_16410868439195661789` FOR `SourceTable_16410868439195661789` AS `Table_16410868439195661789` WITH ( CONNECTION_STRING = 'grpc://localhost:12226/?database=local' ); >>>>> Query: SELECT `Key2`, `v01`, `v02`, `v03` FROM `Table_16410868439195661789` ORDER BY `Key2`, `v01`, `v02`, `v03` >>>>> Query error:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/local/Table_16410868439195661789]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 Attempt=19 count=-1 >>>>> Query: SELECT `Key2`, `v01`, `v02`, `v03` FROM `Table_16410868439195661789` ORDER BY `Key2`, `v01`, `v02`, `v03` >>>>> Query error:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/local/Table_16410868439195661789]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 Attempt=18 count=-1 >>>>> Query: SELECT `Key2`, `v01`, `v02`, `v03` FROM `Table_16410868439195661789` ORDER BY `Key2`, `v01`, `v02`, `v03` Attempt=17 count=1 DDL: DROP ASYNC REPLICATION `Replication_16410868439195661789`; DDL: DROP TABLE `SourceTable_16410868439195661789` DDL: CREATE TABLE `SourceTable_8153620888023867564` ( Key Uint64 NOT NULL, Message Utf8, PRIMARY KEY (Key) ); DDL: CREATE ASYNC REPLICATION `Replication_8153620888023867564` FOR `SourceTable_8153620888023867564` AS `Table_8153620888023867564` WITH ( CONNECTION_STRING = 'grpc://localhost:12226/?database=local' ); >>>>> Query: INSERT INTO `SourceTable_8153620888023867564` (`Key`, `Message`) VALUES (1, 'Message-1'); >>>>> Query error:
: Error: Scheme changed. Table: `/local/SourceTable_8153620888023867564`., code: 2028
: Error: Cannot parse tx 5. SCHEME_CHANGED: Table '/local/SourceTable_8153620888023867564' scheme changed. at tablet# 72075186224037893, code: 2034
: Error: Query invalidated on scheme/internal error during Data execution, code: 2019 >>>>> Query: INSERT INTO `SourceTable_8153620888023867564` (`Key`, `Message`) VALUES (1, 'Message-1'); >>>>> Query: SELECT `Message` FROM `Table_8153620888023867564` ORDER BY `Message` Attempt=19 count=1 State: Paused DDL: ALTER ASYNC REPLICATION `Replication_8153620888023867564` SET ( STATE = "Paused" ); >>>>> Query: INSERT INTO `SourceTable_8153620888023867564` (`Key`, `Message`) VALUES (2, 'Message-2'); >>>>> Query: SELECT `Message` FROM `Table_8153620888023867564` ORDER BY `Message` Attempt=19 count=1 State: StandBy DDL: ALTER ASYNC REPLICATION `Replication_8153620888023867564` SET ( STATE = "StandBy" ); >>>>> Query: SELECT `Message` FROM `Table_8153620888023867564` ORDER BY `Message` Attempt=19 count=2 DDL: ALTER ASYNC REPLICATION `Replication_8153620888023867564` SET ( STATE = "Paused" ); DDL: ALTER ASYNC REPLICATION `Replication_8153620888023867564` SET ( STATE = "StandBy" ); DDL: DROP ASYNC REPLICATION `Replication_8153620888023867564`; DDL: DROP TABLE `SourceTable_8153620888023867564` |99.9%| [TM] {RESULT} ydb/tests/functional/replication/unittest >> Transfer::ConnectionString_BadChar [GOOD] >> Transfer::ConnectionString_BadDNSName >> test_workload.py::TestYdbWorkload::test >> Backup::UuidValue >> Transfer::ConnectionString_BadDNSName [GOOD] >> Transfer::Create_WithPermission >> test_workload.py::TestYdbWorkload::test [GOOD] >> Backup::UuidValue [GOOD] >> Transfer::Create_WithPermission [GOOD] >> Transfer::Create_WithoutTablePermission >> test_workload.py::TestYdbWorkload::test [GOOD] >> Transfer::Create_WithoutTablePermission [GOOD] >> Transfer::LocalTopic_WithPermission ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/backup/unittest >> Backup::UuidValue [GOOD] Test command err: Found S3 object: "ProducerUuidValueBackup/data_00.csv" Found S3 object: "ProducerUuidValueBackup/metadata.json" Found S3 object: "ProducerUuidValueBackup/scheme.pb" |99.9%| [TM] {RESULT} ydb/tests/functional/backup/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_clickbench.py::TestClickbenchParallel::test[Query00] 2025-06-24 21:51:32,320 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:51:34,005 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1290248 733M 731M 650M ydb-tests-functional-tpc-medium --basetemp /home/runner/.ya/build/build_root/2btm/0030f5/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctes 1291387 1.8G 1.8G 1.4G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/0030f5/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/ 1292592 2.7G 2.7G 2.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/0030f5/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/ 1399226 1.4G 1.5G 1.0G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/0030f5/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/ 1401460 1.4G 1.5G 1.0G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/0030f5/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/ 1401987 124M 127M 55.8M └─ ydb -e grpc://localhost:3615 -d /local/test_db workload clickbench -p olap_yatests/clickbench/hits init --store=column Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 128, in runtestprotocol rep = call_and_report(item, "setup", log) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 158, in pytest_runtest_setup item.session._setupstate.setup(item) File "contrib/python/pytest/py3/_pytest/runner.py", line 511, in setup col.setup() File "contrib/python/pytest/py3/_pytest/python.py", line 1847, in setup self._request._fillfixtures() File "contrib/python/pytest/py3/_pytest/fixtures.py", line 689, in _fillfixtures item.funcargs[argname] = self.getfixturevalue(argname) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 547, in getfixturevalue fixturedef = self._get_active_fixturedef(argname) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 566, in _get_active_fixturedef self._compute_fixture_value(fixturedef) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 648, in _compute_fixture_value fixturedef.execute(request=subrequest) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1087, in execute result = ihook.pytest_fixture_setup(fixturedef=self, request=request) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1140, in pytest_fixture_setup result = call_fixture_func(fixturefunc, request, kwargs) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 910, in call_fixture_func fixture_result = next(generator) File "contrib/python/pytest/py3/_pytest/python.py", line 843, in xunit_setup_class_fixture _call_with_optional_argument(func, self.obj) File "contrib/python/pytest/py3/_pytest/python.py", line 764, in _call_with_optional_argument func(arg) File "ydb/tests/functional/tpc/medium/test_clickbench.py", line 25, in setup_class cls.run_cli(['workload', 'clickbench', '-p', 'olap_yatests/clickbench/hits', 'init', '--store=column']) File "ydb/tests/functional/tpc/lib/conftest.py", line 60, in run_cli return yatest.common.execute(YdbCliHelper.get_cli_command() + argv) File "library/python/testing/yatest_common/yatest/common/process.py", line 656, in execute res.wait(check_exit_code, timeout, on_timeout) File "library/python/testing/yatest_common/yatest/common/process.py", line 400, in wait _wait() File "library/python/testing/yatest_common/yatest/common/process.py", line 335, in _wait pid, sts, rusage = os.wait4(self._process.pid, 0) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...2btm/0030f5/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/0030f5/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_clickbench/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/0030f5', '--source-root', '/home/runner/.ya/build/build_root/2btm/0030f5/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/0030f5/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_clickbench/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/tpc/medium', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/functional/tpc/medium', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_clickbench.py']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...2btm/0030f5/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/0030f5/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_clickbench/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/0030f5', '--source-root', '/home/runner/.ya/build/build_root/2btm/0030f5/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/0030f5/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_clickbench/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/tpc/medium', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/functional/tpc/medium', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_clickbench.py']' stopped by 600 seconds timeout",), {}) |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/node_broker/tests/py3test >> test_workload.py::TestYdbWorkload::test [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/node_broker/tests/py3test >> Transfer::LocalTopic_WithPermission [GOOD] >> Transfer::LocalTopic_BigMessage ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_upload.py::TestUploadTpchS1::test 2025-06-24 21:51:24,768 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:51:27,384 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1289447 614M 611M 531M ydb-tests-functional-tpc-medium --basetemp /home/runner/.ya/build/build_root/2btm/003107/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctes 1289566 8.1G 8.0G 7.6G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/003107/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/ 1290965 5.0G 4.9G 4.5G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/003107/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/ 1296168 2.6G 2.6G 2.5G └─ ydb -e grpc://localhost:64650 -d /local/test_db workload tpch -p olap_yatests/upload/tpch/s1 import generator --scale 1 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/load/lib/upload.py", line 40, in test self.import_data() File "ydb/tests/olap/load/lib/upload.py", line 61, in import_data yatest.common.execute(YdbCliHelper.get_cli_command() + ['workload', 'tpch', '-p', self.__get_path(), 'import', 'generator', '--scale', str(self.scale)]) File "library/python/testing/yatest_common/yatest/common/process.py", line 656, in execute res.wait(check_exit_code, timeout, on_timeout) File "library/python/testing/yatest_common/yatest/common/process.py", line 400, in wait _wait() File "library/python/testing/yatest_common/yatest/common/process.py", line 335, in _wait pid, sts, rusage = os.wait4(self._process.pid, 0) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Thread 0x00007f5564d0f640 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 359 in wait File "contrib/python/ydb/py3/ydb/pool.py", line 274 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Current thread 0x00007f5596ada940 (most recent call first): File "contrib/tools/python3/Lib/subprocess.py", line 2011 in _try_wait File "contrib/tools/python3/Lib/subprocess.py", line 2053 in _wait File "contrib/tools/python3/Lib/subprocess.py", line 1264 in wait File "library/python/testing/yatest_common/yatest/common/process.py", line 370 in _wait File "library/python/testing/yatest_common/yatest/common/process.py", line 400 in wait File "library/python/testing/yatest_common/yatest/common/process.py", line 656 in execute File "ydb/tests/olap/load/lib/upload.py", line 61 in import_data File "ydb/tests/olap/load/lib/upload.py", line 40 in test File "library/python/pytest/plugins/ya.py", line 563 in pytest_pyfunc_call File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/python.py", line 1844 in runtest File "contrib/python/pytest/py3/_pytest/runner.py", line 170 in pytest_runtest_call File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/runner.py", line 263 in File "contrib/python/pytest/py3/_pytest/runner.py", line 342 in from_call File "contrib/python/pytest/py3/_pytest/runner.py", line 262 in call_runtest_hook File "contrib/python/pytest/py3/_pytest/runner.py", line 223 in call_and_report File "contrib/python/pytest/py3/_pytest/runner.py", line 134 in runtestprotocol File "contrib/python/pytest/py3/_pytest/runner.py", line 115 in pytest_runtest_protocol File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/main.py", line 352 in pytest_runtestloop File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/main.py", line 327 in _main File "contrib/python/pytest/py3/_pytest/main.py", line 273 in wrap_session File "contrib/python/pytest/py3/_pytest/main.py", line 320 in pytest_cmdline_main File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175 in main File "library/python/pytest/main.py", line 101 in main Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: .../build_root/2btm/003107/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/003107/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_upload/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/003107', '--source-root', '/home/runner/.ya/build/build_root/2btm/003107/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/003107/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_upload/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/tpc/medium', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/functional/tpc/medium', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_upload.py']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: ((".../build_root/2btm/003107/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/003107/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_upload/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/003107', '--source-root', '/home/runner/.ya/build/build_root/2btm/003107/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/003107/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_upload/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/tpc/medium', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/functional/tpc/medium', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_upload.py']' stopped by 600 seconds timeout",), {}) 2025-06-24 21:51:58,010 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-06-24 21:51:58,011 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores 2025-06-24 21:51:59,417 WARNING libarchive: File (test_upload.py.TestUploadTpchS1.test/cluster/slot_1/logfile_cgug5we8.log) size has changed. Can't write more data than was declared in the tar header (232448782). (probably file was changed during archiving) |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/oltp_workload/tests/py3test >> test_workload.py::TestYdbWorkload::test [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/oltp_workload/tests/py3test >> Transfer::LocalTopic_BigMessage [GOOD] >> Transfer::AlterLambda >> test_workload.py::TestYdbWorkload::test [FAIL] >> test_workload.py::TestYdbWorkload::test [GOOD] >> Transfer::AlterLambda [GOOD] >> Transfer::EnsureError >> Transfer::EnsureError [GOOD] >> Transfer::CheckCommittedOffset_Local |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/s3_backups/tests/py3test >> test_workload.py::TestYdbWorkload::test [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/s3_backups/tests/py3test >> Transfer::CheckCommittedOffset_Local [GOOD] >> Transfer::CheckCommittedOffset_Remote ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_tpch.py::TestTpchS1::test_tpch[1] 2025-06-24 21:52:01,430 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-24 21:52:03,520 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1295686 543M 542M 461M ydb-tests-functional-tpc-medium --basetemp /home/runner/.ya/build/build_root/2btm/00304f/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctes 1295993 7.9G 7.8G 7.5G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/00304f/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/ 1297116 5.0G 4.9G 4.5G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/2btm/00304f/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/ 1300807 2.7G 2.7G 2.6G └─ ydb -e grpc://localhost:8657 -d /local/test_db workload tpch -p olap_yatests/tpch/s1 import generator --scale=1 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 128, in runtestprotocol rep = call_and_report(item, "setup", log) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 158, in pytest_runtest_setup item.session._setupstate.setup(item) File "contrib/python/pytest/py3/_pytest/runner.py", line 511, in setup col.setup() File "contrib/python/pytest/py3/_pytest/python.py", line 1847, in setup self._request._fillfixtures() File "contrib/python/pytest/py3/_pytest/fixtures.py", line 689, in _fillfixtures item.funcargs[argname] = self.getfixturevalue(argname) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 547, in getfixturevalue fixturedef = self._get_active_fixturedef(argname) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 566, in _get_active_fixturedef self._compute_fixture_value(fixturedef) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 648, in _compute_fixture_value fixturedef.execute(request=subrequest) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1087, in execute result = ihook.pytest_fixture_setup(fixturedef=self, request=request) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1140, in pytest_fixture_setup result = call_fixture_func(fixturefunc, request, kwargs) File "contrib/python/pytest/py3/_pytest/fixtures.py", line 910, in call_fixture_func fixture_result = next(generator) File "contrib/python/pytest/py3/_pytest/python.py", line 843, in xunit_setup_class_fixture _call_with_optional_argument(func, self.obj) File "contrib/python/pytest/py3/_pytest/python.py", line 764, in _call_with_optional_argument func(arg) File "ydb/tests/functional/tpc/medium/test_tpch.py", line 12, in setup_class cls.run_cli(['workload', 'tpch', '-p', 'olap_yatests/tpch/s1', 'import', 'generator', '--scale=1']) File "ydb/tests/functional/tpc/lib/conftest.py", line 60, in run_cli return yatest.common.execute(YdbCliHelper.get_cli_command() + argv) File "library/python/testing/yatest_common/yatest/common/process.py", line 656, in execute res.wait(check_exit_code, timeout, on_timeout) File "library/python/testing/yatest_common/yatest/common/process.py", line 400, in wait _wait() File "library/python/testing/yatest_common/yatest/common/process.py", line 335, in _wait pid, sts, rusage = os.wait4(self._process.pid, 0) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Current thread 0x00007f52c25dc940 (most recent call first): File "contrib/tools/python3/Lib/subprocess.py", line 2011 in _try_wait File "contrib/tools/python3/Lib/subprocess.py", line 2053 in _wait File "contrib/tools/python3/Lib/subprocess.py", line 1264 in wait File "library/python/testing/yatest_common/yatest/common/process.py", line 370 in _wait File "library/python/testing/yatest_common/yatest/common/process.py", line 400 in wait File "library/python/testing/yatest_common/yatest/common/process.py", line 656 in execute File "ydb/tests/functional/tpc/lib/conftest.py", line 60 in run_cli File "ydb/tests/functional/tpc/medium/test_tpch.py", line 12 in setup_class File "contrib/python/pytest/py3/_pytest/python.py", line 764 in _call_with_optional_argument File "contrib/python/pytest/py3/_pytest/python.py", line 843 in xunit_setup_class_fixture File "contrib/python/pytest/py3/_pytest/fixtures.py", line 910 in call_fixture_func File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1140 in pytest_fixture_setup File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1087 in execute File "contrib/python/pytest/py3/_pytest/fixtures.py", line 648 in _compute_fixture_value File "contrib/python/pytest/py3/_pytest/fixtures.py", line 566 in _get_active_fixturedef File "contrib/python/pytest/py3/_pytest/fixtures.py", line 547 in getfixturevalue File "contrib/python/pytest/py3/_pytest/fixtures.py", line 689 in _fillfixtures File "contrib/python/pytest/py3/_pytest/python.py", line 1847 in setup File "contrib/python/pytest/py3/_pytest/runner.py", line 511 in setup File "contrib/python/pytest/py3/_pytest/runner.py", line 158 in pytest_runtest_setup File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/runner.py", line 263 in File "contrib/python/pytest/py3/_pytest/runner.py", line 342 in from_call File "contrib/python/pytest/py3/_pytest/runner.py", line 262 in call_runtest_hook File "contrib/python/pytest/py3/_pytest/runner.py", line 223 in call_and_report File "contrib/python/pytest/py3/_pytest/runner.py", line 128 in runtestprotocol File "contrib/python/pytest/py3/_pytest/runner.py", line 115 in pytest_runtest_protocol File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/main.py", line 352 in pytest_runtestloop File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/main.py", line 327 in _main File "contrib/python/pytest/py3/_pytest/main.py", line 273 in wrap_session File "contrib/python/pytest/py3/_pytest/main.py", line 320 in pytest_cmdline_main File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175 in main File "library/python/pytest/main.py", line 101 in main Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: .../build/build_root/2btm/00304f/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/00304f/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_tpch/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/00304f', '--source-root', '/home/runner/.ya/build/build_root/2btm/00304f/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/00304f/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_tpch/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/tpc/medium', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/functional/tpc/medium', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_tpch.py']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: ((".../build/build_root/2btm/00304f/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/2btm/00304f/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_tpch/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/2btm/00304f', '--source-root', '/home/runner/.ya/build/build_root/2btm/00304f/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/2btm/00304f/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_tpch/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/tpc/medium', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/functional/tpc/medium', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_tpch.py']' stopped by 600 seconds timeout",), {}) 2025-06-24 21:52:34,171 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-06-24 21:52:34,171 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores 2025-06-24 21:52:35,686 WARNING libarchive: File (test_tpch.py.TestTpchS1.test_tpch.1/cluster/slot_1/logfile_wpcqx32o.log) size has changed. Can't write more data than was declared in the tar header (223467043). (probably file was changed during archiving) |99.9%| [TA] $(B)/ydb/tests/functional/tpc/medium/test-results/py3test/{meta.json ... results_accumulator.log} |99.9%| [TA] {RESULT} $(B)/ydb/tests/functional/tpc/medium/test-results/py3test/{meta.json ... results_accumulator.log} >> test_workload.py::TestYdbWorkload::test[column] [GOOD] >> Transfer::CheckCommittedOffset_Remote [GOOD] >> Transfer::DropTransfer |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/olap_workload/tests/py3test >> test_workload.py::TestYdbWorkload::test [FAIL] |99.9%| [TM] {RESULT} ydb/tests/stress/olap_workload/tests/py3test >> test_workload.py::TestYdbLogWorkload::test[row] [GOOD] >> test_workload.py::TestYdbLogWorkload::test[column] >> Transfer::DropTransfer [GOOD] >> Transfer::CreateAndDropConsumer >> test_workload.py::TestYdbKvWorkload::test[column] [GOOD] >> Transfer::CreateAndDropConsumer [GOOD] >> Transfer::DescribeError_OnLambdaCompilation |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/simple_queue/tests/py3test >> test_workload.py::TestYdbWorkload::test[column] [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/kv/tests/py3test >> test_workload.py::TestYdbKvWorkload::test[column] [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/simple_queue/tests/py3test |99.9%| [TM] {RESULT} ydb/tests/stress/kv/tests/py3test >> Transfer::DescribeError_OnLambdaCompilation [GOOD] >> Transfer::CustomConsumer >> test_workload.py::TestYdbLogWorkload::test[column] [FAIL] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/log/tests/py3test >> test_workload.py::TestYdbLogWorkload::test[column] [FAIL] |99.9%| [TM] {RESULT} ydb/tests/stress/log/tests/py3test >> Transfer::CustomConsumer [GOOD] >> Transfer::CustomFlushInterval >> Transfer::CustomFlushInterval [GOOD] >> Transfer::AlterFlushInterval >> Transfer::AlterFlushInterval [GOOD] >> Transfer::AlterBatchSize >> Transfer::AlterBatchSize [GOOD] >> Transfer::CreateTransferSourceNotExists >> Transfer::CreateTransferSourceNotExists [GOOD] >> Transfer::TransferSourceDropped >> Transfer::TransferSourceDropped [GOOD] >> Transfer::CreateTransferSourceIsNotTopic >> Transfer::CreateTransferSourceIsNotTopic [GOOD] >> Transfer::CreateTransferTargetIsNotTable >> Transfer::CreateTransferTargetIsNotTable [GOOD] >> Transfer::CreateTransferTargetNotExists >> Transfer::CreateTransferTargetNotExists [GOOD] >> Transfer::PauseAndResumeTransfer >> Transfer::PauseAndResumeTransfer [GOOD] >> Transfer_ColumnTable::KeyColumnFirst >> Transfer_ColumnTable::KeyColumnFirst [GOOD] >> Transfer_ColumnTable::KeyColumnLast >> Transfer_ColumnTable::KeyColumnLast [GOOD] >> Transfer_ColumnTable::ComplexKey >> Transfer_ColumnTable::ComplexKey [GOOD] >> Transfer_ColumnTable::NullableColumn >> ConsistentIndexRead::InteractiveTx [GOOD] >> KqpExtTest::SecondaryIndexSelectUsingScripting >> KqpExtTest::SecondaryIndexSelectUsingScripting [GOOD] >> Transfer_ColumnTable::NullableColumn [GOOD] >> Transfer_ColumnTable::WriteNullToKeyColumn |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/kqp_indexes/unittest >> KqpExtTest::SecondaryIndexSelectUsingScripting [GOOD] |99.9%| [TM] {RESULT} ydb/tests/functional/kqp/kqp_indexes/unittest >> Transfer_ColumnTable::WriteNullToKeyColumn [GOOD] >> Transfer_ColumnTable::WriteNullToColumn >> Transfer_ColumnTable::WriteNullToColumn [GOOD] >> Transfer_ColumnTable::Upsert_DifferentBatch >> Transfer_ColumnTable::Upsert_DifferentBatch [GOOD] >> Transfer_ColumnTable::Upsert_OneBatch >> Transfer_ColumnTable::Upsert_OneBatch [GOOD] >> Transfer_ColumnTable::ColumnType_Date >> Transfer_ColumnTable::ColumnType_Date [GOOD] >> Transfer_ColumnTable::ColumnType_Double >> Transfer_ColumnTable::ColumnType_Double [GOOD] >> Transfer_ColumnTable::ColumnType_Int8 >> Transfer_ColumnTable::ColumnType_Int8 [GOOD] >> Transfer_ColumnTable::ColumnType_Int16 >> Transfer_ColumnTable::ColumnType_Int16 [GOOD] >> Transfer_ColumnTable::ColumnType_Int32 >> Transfer_ColumnTable::ColumnType_Int32 [GOOD] >> Transfer_ColumnTable::ColumnType_Int64 >> Transfer_ColumnTable::ColumnType_Int64 [GOOD] >> Transfer_ColumnTable::ColumnType_Utf8_LongValue >> Transfer_ColumnTable::ColumnType_Utf8_LongValue [GOOD] >> Transfer_ColumnTable::MessageField_Partition >> Transfer_ColumnTable::MessageField_Partition [GOOD] >> Transfer_ColumnTable::MessageField_SeqNo >> Transfer_ColumnTable::MessageField_SeqNo [GOOD] >> Transfer_ColumnTable::MessageField_ProducerId >> Transfer_ColumnTable::MessageField_ProducerId [GOOD] >> Transfer_ColumnTable::MessageField_MessageGroupId >> Transfer_ColumnTable::MessageField_MessageGroupId [GOOD] >> Transfer_ColumnTable::ProcessingJsonMessage >> Transfer_ColumnTable::ProcessingJsonMessage [GOOD] >> Transfer_ColumnTable::ProcessingCDCMessage >> Transfer_ColumnTable::ProcessingCDCMessage [GOOD] >> Transfer_RowTable::KeyColumnFirst >> Transfer_RowTable::KeyColumnFirst [GOOD] >> Transfer_RowTable::KeyColumnLast >> Transfer_RowTable::KeyColumnLast [GOOD] >> Transfer_RowTable::ComplexKey >> Transfer_RowTable::ComplexKey [GOOD] >> Transfer_RowTable::NullableColumn >> Transfer_RowTable::NullableColumn [GOOD] >> Transfer_RowTable::WriteNullToKeyColumn >> Transfer_RowTable::WriteNullToKeyColumn [GOOD] >> Transfer_RowTable::WriteNullToColumn >> Transfer_RowTable::WriteNullToColumn [GOOD] >> Transfer_RowTable::Upsert_DifferentBatch >> Transfer_RowTable::Upsert_DifferentBatch [GOOD] >> Transfer_RowTable::Upsert_OneBatch >> CompositeConveyorTests::TestUniformDistribution [GOOD] >> Transfer_RowTable::Upsert_OneBatch [GOOD] >> Transfer_RowTable::ColumnType_Bool >> Transfer_RowTable::ColumnType_Bool [GOOD] >> Transfer_RowTable::ColumnType_Date ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::TestUniformDistribution [GOOD] Test command err: {I_1_1:112158};{I_2_1:112158};{I_3_1:112158};{S_1_1:121325};{S_2_1:117826};{S_3_1:116472};{N_1_1:112158};{N_2_1:113275};{N_3_1:112158};{I_1_2:112158};{I_2_2:112158};{I_3_2:112158};{S_1_2:120965};{S_2_2:123064};{S_3_2:112158};{N_1_2:112367};{N_2_2:112518};{N_3_2:112858}; {I_1_1:112158};{I_2_1:112158};{I_3_1:112158};{S_1_1:122946};{S_2_1:119446};{S_3_1:118093};{N_1_1:112158};{N_2_1:115301};{N_3_1:112158};{I_1_2:112158};{I_2_2:112158};{I_3_2:113475};{S_1_2:122586};{S_2_2:124647};{S_3_2:112158};{N_1_2:114393};{N_2_2:114285};{N_3_2:114836}; {I_1_1:112158};{I_2_1:114360};{I_3_1:112158};{S_1_1:124200};{S_2_1:120700};{S_3_1:119347};{N_1_1:112158};{N_2_1:116868};{N_3_1:112158};{I_1_2:112158};{I_2_2:112158};{I_3_2:117541};{S_1_2:123839};{S_2_2:125870};{S_3_2:112158};{N_1_2:115961};{N_2_2:115652};{N_3_2:116365}; {I_1_1:112158};{I_2_1:117446};{I_3_1:112158};{S_1_1:125434};{S_2_1:121934};{S_3_1:120581};{N_1_1:112158};{N_2_1:118411};{N_3_1:112158};{I_1_2:112158};{I_2_2:112158};{I_3_2:120627};{S_1_2:125074};{S_2_2:127075};{S_3_2:112158};{N_1_2:117503};{N_2_2:116998};{N_3_2:117871}; {I_1_1:112158};{I_2_1:119997};{I_3_1:112158};{S_1_1:126454};{S_2_1:122955};{S_3_1:121601};{N_1_1:112158};{N_2_1:119686};{N_3_1:112158};{I_1_2:112158};{I_2_2:112158};{I_3_2:123178};{S_1_2:126094};{S_2_2:128072};{S_3_2:112158};{N_1_2:118779};{N_2_2:118110};{N_3_2:119117}; {I_1_1:112158};{I_2_1:122506};{I_3_1:112158};{S_1_1:127945};{S_2_1:124445};{S_3_1:123092};{N_1_1:112158};{N_2_1:121549};{N_3_1:112158};{I_1_2:114593};{I_2_2:112158};{I_3_2:125687};{S_1_2:127585};{S_2_2:129527};{S_3_2:112158};{N_1_2:120642};{N_2_2:119736};{N_3_2:120936}; {I_1_1:112158};{I_2_1:124999};{I_3_1:112431};{S_1_1:129495};{S_2_1:125996};{S_3_1:124642};{N_1_1:112158};{N_2_1:123487};{N_3_1:112158};{I_1_2:117085};{I_2_2:112158};{I_3_2:128180};{S_1_2:129135};{S_2_2:131040};{S_3_2:112158};{N_1_2:122580};{N_2_2:121426};{N_3_2:122827}; {I_1_1:112158};{I_2_1:126947};{I_3_1:114378};{S_1_1:131053};{S_2_1:127554};{S_3_1:126201};{N_1_1:112158};{N_2_1:125435};{N_3_1:112158};{I_1_2:119033};{I_2_2:112158};{I_3_2:130128};{S_1_2:130693};{S_2_2:132561};{S_3_2:112158};{N_1_2:124528};{N_2_2:123125};{N_3_2:124729}; {I_1_1:112158};{I_2_1:128600};{I_3_1:116032};{S_1_1:132380};{S_2_1:128881};{S_3_1:127528};{N_1_1:112158};{N_2_1:127278};{N_3_1:112158};{I_1_2:120686};{I_2_2:112918};{I_3_2:131781};{S_1_2:132020};{S_2_2:133857};{S_3_2:112896};{N_1_2:126371};{N_2_2:124734};{N_3_2:126528}; {I_1_1:112158};{I_2_1:129992};{I_3_1:117424};{S_1_1:133540};{S_2_1:130041};{S_3_1:128687};{N_1_1:112158};{N_2_1:129018};{N_3_1:112158};{I_1_2:122078};{I_2_2:114310};{I_3_2:133173};{S_1_2:133180};{S_2_2:134989};{S_3_2:114056};{N_1_2:128111};{N_2_2:126251};{N_3_2:128227}; {I_1_1:112158};{I_2_1:131417};{I_3_1:118849};{S_1_1:134728};{S_2_1:131229};{S_3_1:129875};{N_1_1:112158};{N_2_1:130800};{N_3_1:112158};{I_1_2:123504};{I_2_2:115735};{I_3_2:134598};{S_1_2:134368};{S_2_2:136148};{S_3_2:115244};{N_1_2:129892};{N_2_2:127805};{N_3_2:129966}; {I_1_1:112289};{I_2_1:132771};{I_3_1:120202};{S_1_1:135878};{S_2_1:132378};{S_3_1:131025};{N_1_1:112158};{N_2_1:132341};{N_3_1:112890};{I_1_2:124857};{I_2_2:117089};{I_3_2:135952};{S_1_2:135518};{S_2_2:137271};{S_3_2:116393};{N_1_2:131434};{N_2_2:129150};{N_3_2:131471}; {I_1_1:113082};{I_2_1:133565};{I_3_1:120996};{S_1_1:136671};{S_2_1:133172};{S_3_1:131819};{N_1_1:112158};{N_2_1:133294};{N_3_1:113842};{I_1_2:125651};{I_2_2:117883};{I_3_2:136745};{S_1_2:136311};{S_2_2:138046};{S_3_2:117187};{N_1_2:132387};{N_2_2:129981};{N_3_2:132400}; {I_1_1:113938};{I_2_1:134421};{I_3_1:121852};{S_1_1:137527};{S_2_1:134028};{S_3_1:132674};{N_1_1:112158};{N_2_1:134321};{N_3_1:114869};{I_1_2:126507};{I_2_2:118738};{I_3_2:137601};{S_1_2:137167};{S_2_2:138881};{S_3_2:118043};{N_1_2:133414};{N_2_2:130877};{N_3_2:133403}; {I_1_1:114815};{I_2_1:135297};{I_3_1:122728};{S_1_1:138404};{S_2_1:134904};{S_3_1:133551};{N_1_1:112158};{N_2_1:135373};{N_3_1:115921};{I_1_2:127383};{I_2_2:119615};{I_3_2:138478};{S_1_2:138044};{S_2_2:139737};{S_3_2:118919};{N_1_2:134466};{N_2_2:131794};{N_3_2:134430}; {I_1_1:115884};{I_2_1:136367};{I_3_1:123798};{S_1_1:139474};{S_2_1:135974};{S_3_1:134621};{N_1_1:112158};{N_2_1:136656};{N_3_1:117205};{I_1_2:128453};{I_2_2:120685};{I_3_2:139548};{S_1_2:139114};{S_2_2:140781};{S_3_2:119989};{N_1_2:135749};{N_2_2:132915};{N_3_2:135683}; {I_1_1:117013};{I_2_1:137495};{I_3_1:124927};{S_1_1:140602};{S_2_1:137103};{S_3_1:135749};{N_1_1:112158};{N_2_1:138011};{N_3_1:118559};{I_1_2:129582};{I_2_2:121813};{I_3_2:140676};{S_1_2:140242};{S_2_2:141883};{S_3_2:121118};{N_1_2:137103};{N_2_2:134096};{N_3_2:137005}; {I_1_1:118105};{I_2_1:138588};{I_3_1:126019};{S_1_1:141695};{S_2_1:138195};{S_3_1:136842};{N_1_1:112158};{N_2_1:139322};{N_3_1:119870};{I_1_2:130674};{I_2_2:122906};{I_3_2:141769};{S_1_2:141335};{S_2_2:142949};{S_3_2:122210};{N_1_2:138414};{N_2_2:135239};{N_3_2:138285}; {I_1_1:118990};{I_2_1:139473};{I_3_1:126904};{S_1_1:142580};{S_2_1:139080};{S_3_1:137727};{N_1_1:112158};{N_2_1:140384};{N_3_1:120932};{I_1_2:131559};{I_2_2:123791};{I_3_2:142654};{S_1_2:142219};{S_2_2:143813};{S_3_2:123095};{N_1_2:139477};{N_2_2:136166};{N_3_2:139321}; {I_1_1:120061};{I_2_1:140543};{I_3_1:127975};{S_1_1:143650};{S_2_1:140151};{S_3_1:138797};{N_1_1:112158};{N_2_1:141668};{N_3_1:122217};{I_1_2:132630};{I_2_2:124861};{I_3_2:143724};{S_1_2:143290};{S_2_2:144858};{S_3_2:124166};{N_1_2:140761};{N_2_2:137287};{N_3_2:140575}; {I_1_1:120921};{I_2_1:141404};{I_3_1:128835};{S_1_1:144511};{S_2_1:141011};{S_3_1:139658};{N_1_1:112158};{N_2_1:142701};{N_3_1:123249};{I_1_2:133490};{I_2_2:125722};{I_3_2:144585};{S_1_2:144150};{S_2_2:145698};{S_3_2:125026};{N_1_2:141794};{N_2_2:138187};{N_3_2:141584}; {I_1_1:121879};{I_2_1:142362};{I_3_1:129793};{S_1_1:145468};{S_2_1:141969};{S_3_1:140616};{N_1_1:112158};{N_2_1:143850};{N_3_1:124398};{I_1_2:134448};{I_2_2:126679};{I_3_2:145542};{S_1_2:145108};{S_2_2:146633};{S_3_2:125984};{N_1_2:142943};{N_2_2:139190};{N_3_2:142705}; {I_1_1:122856};{I_2_1:143338};{I_3_1:130769};{S_1_1:146445};{S_2_1:142945};{S_3_1:141592};{N_1_1:112158};{N_2_1:145022};{N_3_1:125570};{I_1_2:135424};{I_2_2:127656};{I_3_2:146519};{S_1_2:146085};{S_2_2:147586};{S_3_2:126960};{N_1_2:144115};{N_2_2:140212};{N_3_2:143849}; {I_1_1:123456};{I_2_1:143938};{I_3_1:131370};{S_1_1:147045};{S_2_1:143546};{S_3_1:142192};{N_1_1:112158};{N_2_1:145742};{N_3_1:126291};{I_1_2:136024};{I_2_2:128256};{I_3_2:147119};{S_1_2:146685};{S_2_2:148172};{S_3_2:127560};{N_1_2:144835};{N_2_2:140840};{N_3_2:144552}; {I_1_1:124476};{I_2_1:144959};{I_3_1:132390};{S_1_1:151703};{S_2_1:145141};{S_3_1:143802};{N_1_1:112158};{N_2_1:147778};{N_3_1:127515};{I_1_2:137045};{I_2_2:129277};{I_3_2:148140};{S_1_2:151429};{S_2_2:153780};{S_3_2:132485};{N_1_2:146060};{N_2_2:141908};{N_3_2:145748}; {I_1_1:125596};{I_2_1:146078};{I_3_1:133509};{S_1_1:154438};{S_2_1:147729};{S_3_1:146497};{N_1_1:114045};{N_2_1:151302};{N_3_1:130541};{I_1_2:139250};{I_2_2:130396};{I_3_2:150118};{S_1_2:154123};{S_2_2:156449};{S_3_2:135180};{N_1_2:149335};{N_2_2:143080};{N_3_2:148739}; {I_1_1:126715};{I_2_1:148606};{I_3_1:135905};{S_1_1:156681};{S_2_1:149872};{S_3_1:148713};{N_1_1:115533};{N_2_1:153921};{N_3_1:133161};{I_1_2:142303};{I_2_2:131515};{I_3_2:153171};{S_1_2:156340};{S_2_2:158638};{S_3_2:137397};{N_1_2:151924};{N_2_2:144216};{N_3_2:151327}; {I_1_1:127822};{I_2_1:151434};{I_3_1:138692};{S_1_1:158964};{S_2_1:152048};{S_3_1:150968};{N_1_1:117984};{N_2_1:156405};{N_3_1:135645};{I_1_2:145132};{I_2_2:132622};{I_3_2:155999};{S_1_2:158594};{S_2_2:160866};{S_3_2:139652};{N_1_2:154374};{N_2_2:145182};{N_3_2:153784}; {I_1_1:128938};{I_2_1:154234};{I_3_1:141451};{S_1_1:161231};{S_2_1:154209};{S_3_1:153207};{N_1_1:120415};{N_2_1:158868};{N_3_1:138107};{I_1_2:147930};{I_2_2:133739};{I_3_2:158798};{S_1_2:160832};{S_2_2:163078};{S_3_2:141889};{N_1_2:156806};{N_2_2:146156};{N_3_2:156220}; {I_1_1:129941};{I_2_1:156709};{I_3_1:143892};{S_1_1:163240};{S_2_1:156127};{S_3_1:155191};{N_1_1:122568};{N_2_1:161049};{N_3_1:140288};{I_1_2:150406};{I_2_2:134741};{I_3_2:161274};{S_1_2:162817};{S_2_2:165039};{S_3_2:143875};{N_1_2:158958};{N_2_2:147031};{N_3_2:158377}; {I_1_1:130728};{I_2_1:159181};{I_3_1:146323};{S_1_1:165178};{S_2_1:157960};{S_3_1:157101};{N_1_1:124670};{N_2_1:163184};{N_3_1:142424};{I_1_2:152878};{I_2_2:135528};{I_3_2:163746};{S_1_2:164728};{S_2_2:166931};{S_3_2:145784};{N_1_2:161061};{N_2_2:147717};{N_3_2:160493}; {I_1_1:131766};{I_2_1:161778};{I_3_1:148881};{S_1_1:167275};{S_2_1:159956};{S_3_1:159171};{N_1_1:126925};{N_2_1:165469};{N_3_1:144708};{I_1_2:155474};{I_2_2:136521};{I_3_2:166343};{S_1_2:166797};{S_2_2:168976};{S_3_2:147854};{N_1_2:163315};{N_2_2:148583};{N_3_2:162755}; {I_1_1:134350};{I_2_1:164362};{I_3_1:151430};{S_1_1:169643};{S_2_1:162211};{S_3_1:161510};{N_1_1:129370};{N_2_1:167947};{N_3_1:147186};{I_1_2:158058};{I_2_2:137632};{I_3_2:168925};{S_1_2:169135};{S_2_2:171288};{S_3_2:150192};{N_1_2:165760};{N_2_2:150081};{N_3_2:165206}; {I_1_1:136753};{I_2_1:166765};{I_3_1:153800};{S_1_1:171846};{S_2_1:164307};{S_3_1:163684};{N_1_1:131517};{N_2_1:170121};{N_3_1:149360};{I_1_2:160461};{I_2_2:138660};{I_3_2:171329};{S_1_2:171309};{S_2_2:173438};{S_3_2:152366};{N_1_2:167908};{N_2_2:152123};{N_3_2:167355}; {I_1_1:139101};{I_2_1:169112};{I_3_1:156117};{S_1_1:174007};{S_2_1:166368};{S_3_1:165817};{N_1_1:133625};{N_2_1:172255};{N_3_1:151493};{I_1_2:162808};{I_2_2:139727};{I_3_2:173676};{S_1_2:173444};{S_2_2:175546};{S_3_2:154500};{N_1_2:170016};{N_2_2:154122};{N_3_2:169464}; {I_1_1:141262};{I_2_1:171273};{I_3_1:158251};{S_1_1:175994};{S_2_1:168263};{S_3_1:167779};{N_1_1:135565};{N_2_1:174217};{N_3_1:153456};{I_1_2:164970};{I_2_2:140693};{I_3_2:175838};{S_1_2:175406};{S_2_2:177485};{S_3_2:156463};{N_1_2:171954};{N_2_2:155960};{N_3_2:171404}; {I_1_1:143713};{I_2_1:173725};{I_3_1:160668};{S_1_1:178242};{S_2_1:170402};{S_3_1:169999};{N_1_1:137756};{N_2_1:176436};{N_3_1:155675};{I_1_2:167421};{I_2_2:141752};{I_3_2:178290};{S_1_2:177625};{S_2_2:179679};{S_3_2:158682};{N_1_2:174145};{N_2_2:158044};{N_3_2:173598}; {I_1_1:146267};{I_2_1:176278};{I_3_1:163191};{S_1_1:180625};{S_2_1:172683};{S_3_1:172354};{N_1_1:140085};{N_2_1:178792};{N_3_1:158030};{I_1_2:169975};{I_2_2:143117};{I_3_2:180843};{S_1_2:179981};{S_2_2:182005};{S_3_2:161037};{N_1_2:176475};{N_2_2:160240};{N_3_2:175923}; {I_1_1:148445};{I_2_1:178455};{I_3_1:165344};{S_1_1:182828};{S_2_1:174790};{S_3_1:174531};{N_1_1:142238};{N_2_1:180969};{N_3_1:160208};{I_1_2:172152};{I_2_2:145294};{I_3_2:183020};{S_1_2:182158};{S_2_2:184155};{S_3_2:163214};{N_1_2:178628};{N_2_2:162269};{N_3_2:178073}; {I_1_1:150647};{I_2_1:180657};{I_3_1:167520};{S_1_1:185086};{S_2_1:176950};{S_3_1:176764};{N_1_1:144414};{N_2_1:183171};{N_3_1:162410};{I_1_2:174354};{I_2_2:147496};{I_3_2:185222};{S_1_2:184390};{S_2_2:186181};{S_3_2:165447};{N_1_2:180804};{N_2_2:164327};{N_3_2:180248}; {I_1_1:152912};{I_2_1:182923};{I_3_1:169764};{S_1_1:186594};{S_2_1:179720};{S_3_1:179597};{N_1_1:146519};{N_2_1:185298};{N_3_1:164538};{I_1_2:176619};{I_2_2:149762};{I_3_2:186661};{S_1_2:186232};{S_2_2:187121};{S_3_2:168281};{N_1_2:182910};{N_2_2:166304};{N_3_2:182348}; {I_1_1:155007};{I_2_1:185018};{I_3_1:171837};{S_1_1:187548};{S_2_1:182528};{S_3_1:182469};{N_1_1:148619};{N_2_1:186307};{N_3_1:166659};{I_1_2:178715};{I_2_2:151857};{I_3_2:187592};{S_1_2:187163};{S_2_2:188052};{S_3_2:171152};{N_1_2:184803};{N_2_2:168273};{N_3_2:184440}; {I_1_1:156981};{I_2_1:186991};{I_3_1:173791};{S_1_1:188412};{S_2_1:183521};{S_3_1:184873};{N_1_1:151137};{N_2_1:187150};{N_3_1:169198};{I_1_2:180688};{I_2_2:153830};{I_3_2:188434};{S_1_2:188006};{S_2_2:188895};{S_3_2:175878};{N_1_2:185625};{N_2_2:170594};{N_3_2:185836}; {I_1_1:159466};{I_2_1:187907};{I_3_1:176255};{S_1_1:189289};{S_2_1:184318};{S_3_1:185729};{N_1_1:154164};{N_2_1:188006};{N_3_1:172244};{I_1_2:183172};{I_2_2:156315};{I_3_2:189290};{S_1_2:188862};{S_2_2:189750};{S_3_2:183310};{N_1_2:186460};{N_2_2:173362};{N_3_2:186692}; {I_1_1:161687};{I_2_1:188825};{I_3_1:178454};{S_1_1:190229};{S_2_1:185173};{ ... _3_1:878023};{N_1_1:1000000};{N_2_1:926561};{N_3_1:909881};{I_1_2:881548};{I_2_2:911519};{I_3_2:873433};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:837423};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:928286};{I_2_1:984464};{I_3_1:881204};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:883636};{N_1_1:1000000};{N_2_1:929946};{N_3_1:912909};{I_1_2:882177};{I_2_2:912737};{I_3_2:875065};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:842902};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:929591};{I_2_1:986035};{I_3_1:881840};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:889269};{N_1_1:1000000};{N_2_1:933342};{N_3_1:915948};{I_1_2:882808};{I_2_2:913959};{I_3_2:876703};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:848401};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:930905};{I_2_1:987618};{I_3_1:882481};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:894941};{N_1_1:1000000};{N_2_1:936762};{N_3_1:919008};{I_1_2:883443};{I_2_2:915189};{I_3_2:878352};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:853938};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:932208};{I_2_1:989187};{I_3_1:883117};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:900570};{N_1_1:1000000};{N_2_1:940155};{N_3_1:922045};{I_1_2:884074};{I_2_2:916410};{I_3_2:879989};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:859433};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:933511};{I_2_1:990755};{I_3_1:883751};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:906192};{N_1_1:1000000};{N_2_1:943545};{N_3_1:925077};{I_1_2:884704};{I_2_2:917630};{I_3_2:881623};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:864920};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:934813};{I_2_1:992323};{I_3_1:884387};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:911812};{N_1_1:1000000};{N_2_1:946934};{N_3_1:928110};{I_1_2:885333};{I_2_2:918849};{I_3_2:883258};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:870407};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:936116};{I_2_1:993892};{I_3_1:885022};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:917438};{N_1_1:1000000};{N_2_1:950326};{N_3_1:931144};{I_1_2:885963};{I_2_2:920070};{I_3_2:884893};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:875899};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:937422};{I_2_1:995465};{I_3_1:885659};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:923077};{N_1_1:1000000};{N_2_1:953726};{N_3_1:934187};{I_1_2:886595};{I_2_2:921293};{I_3_2:886533};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:881404};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:938734};{I_2_1:997045};{I_3_1:886298};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:928742};{N_1_1:1000000};{N_2_1:957141};{N_3_1:937243};{I_1_2:887230};{I_2_2:922522};{I_3_2:888180};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:886933};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:940049};{I_2_1:998628};{I_3_1:886940};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:934419};{N_1_1:1000000};{N_2_1:960564};{N_3_1:940305};{I_1_2:887866};{I_2_2:923753};{I_3_2:889831};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:892475};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:941398};{I_2_1:1000000};{I_3_1:887598};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:940093};{N_1_1:1000000};{N_2_1:963986};{N_3_1:943366};{I_1_2:888518};{I_2_2:925017};{I_3_2:891525};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:898014};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:943003};{I_2_1:1000000};{I_3_1:888380};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:945866};{N_1_1:1000000};{N_2_1:967466};{N_3_1:946480};{I_1_2:889294};{I_2_2:926520};{I_3_2:893540};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:903649};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:944604};{I_2_1:1000000};{I_3_1:889161};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:951626};{N_1_1:1000000};{N_2_1:970940};{N_3_1:949588};{I_1_2:890069};{I_2_2:928019};{I_3_2:895549};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:909273};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:946207};{I_2_1:1000000};{I_3_1:889943};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:957394};{N_1_1:1000000};{N_2_1:974417};{N_3_1:952700};{I_1_2:890844};{I_2_2:929521};{I_3_2:897562};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:914904};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:947799};{I_2_1:1000000};{I_3_1:890719};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:963120};{N_1_1:1000000};{N_2_1:977869};{N_3_1:955789};{I_1_2:891614};{I_2_2:931011};{I_3_2:899560};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:920494};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:949397};{I_2_1:1000000};{I_3_1:891498};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:968866};{N_1_1:1000000};{N_2_1:981334};{N_3_1:958888};{I_1_2:892386};{I_2_2:932507};{I_3_2:901565};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:926102};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:951000};{I_2_1:1000000};{I_3_1:892280};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:974637};{N_1_1:1000000};{N_2_1:984813};{N_3_1:962002};{I_1_2:893162};{I_2_2:934009};{I_3_2:903579};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:931736};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:952595};{I_2_1:1000000};{I_3_1:893057};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:980372};{N_1_1:1000000};{N_2_1:988271};{N_3_1:965096};{I_1_2:893933};{I_2_2:935502};{I_3_2:905580};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:937334};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:954186};{I_2_1:1000000};{I_3_1:893833};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:986096};{N_1_1:1000000};{N_2_1:991723};{N_3_1:968184};{I_1_2:894702};{I_2_2:936993};{I_3_2:907577};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:942923};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:955780};{I_2_1:1000000};{I_3_1:894610};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:991832};{N_1_1:1000000};{N_2_1:995181};{N_3_1:971278};{I_1_2:895474};{I_2_2:938486};{I_3_2:909579};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:948521};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:957374};{I_2_1:1000000};{I_3_1:895388};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:997568};{N_1_1:1000000};{N_2_1:998640};{N_3_1:974372};{I_1_2:896244};{I_2_2:939979};{I_3_2:911580};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:954120};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:959001};{I_2_1:1000000};{I_3_1:896181};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:979469};{I_1_2:897031};{I_2_2:941502};{I_3_2:913622};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:963171};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:960633};{I_2_1:1000000};{I_3_1:896977};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:985803};{I_1_2:897820};{I_2_2:943030};{I_3_2:915671};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:974634};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:962267};{I_2_1:1000000};{I_3_1:897773};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:992144};{I_1_2:898610};{I_2_2:944560};{I_3_2:917722};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:986108};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:963896};{I_2_1:1000000};{I_3_1:898568};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:998469};{I_1_2:899398};{I_2_2:946086};{I_3_2:919767};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:997552};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:968846};{I_2_1:1000000};{I_3_1:900981};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:901792};{I_2_2:950722};{I_3_2:925981};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:974784};{I_2_1:1000000};{I_3_1:903877};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:904664};{I_2_2:956283};{I_3_2:933435};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:980729};{I_2_1:1000000};{I_3_1:906776};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:907539};{I_2_2:961851};{I_3_2:940898};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:986644};{I_2_1:1000000};{I_3_1:909660};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:910400};{I_2_2:967390};{I_3_2:948323};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:992560};{I_2_1:1000000};{I_3_1:912544};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:913261};{I_2_2:972930};{I_3_2:955749};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:998488};{I_2_1:1000000};{I_3_1:915435};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:916127};{I_2_2:978482};{I_3_2:963191};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:918995};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:919659};{I_2_2:985320};{I_3_2:972357};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:922807};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:923440};{I_2_2:992642};{I_3_2:982172};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:926610};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:927211};{I_2_2:999946};{I_3_2:991962};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:934914};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:935447};{I_2_2:1000000};{I_3_2:1000000};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:947286};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:947718};{I_2_2:1000000};{I_3_2:1000000};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:959710};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:960040};{I_2_2:1000000};{I_3_2:1000000};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:972092};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:972321};{I_2_2:1000000};{I_3_2:1000000};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:984493};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:984619};{I_2_2:1000000};{I_3_2:1000000};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:996908};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:996932};{I_2_2:1000000};{I_3_2:1000000};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:1000000};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:1000000};{I_2_2:1000000};{I_3_2:1000000};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; 439us per task 473.386678s;452.383306s;482.388135s;417.377793s;428.379610s;463.385157s;417.377813s;463.385162s;467.385813s;482.388169s;476.387263s;476.387265s;439.381382s;415.377558s;467.385831s;441.381692s;421.378541s;425.379169s; |99.9%| [TA] $(B)/ydb/core/tx/conveyor_composite/ut/test-results/unittest/{meta.json ... results_accumulator.log} |99.9%| [TA] {RESULT} $(B)/ydb/core/tx/conveyor_composite/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Transfer_RowTable::ColumnType_Date [GOOD] >> Transfer_RowTable::ColumnType_Double >> Transfer_RowTable::ColumnType_Double [GOOD] >> Transfer_RowTable::ColumnType_Int8 >> Transfer_RowTable::ColumnType_Int8 [GOOD] >> Transfer_RowTable::ColumnType_Int16 >> Transfer_RowTable::ColumnType_Int16 [GOOD] >> Transfer_RowTable::ColumnType_Int32 >> Transfer_RowTable::ColumnType_Int32 [GOOD] >> Transfer_RowTable::ColumnType_Int64 >> Transfer_RowTable::ColumnType_Int64 [GOOD] >> Transfer_RowTable::ColumnType_Utf8_LongValue >> Transfer_RowTable::ColumnType_Utf8_LongValue [GOOD] >> Transfer_RowTable::ColumnType_Uuid >> Transfer_RowTable::ColumnType_Uuid [GOOD] >> Transfer_RowTable::MessageField_Partition >> Transfer_RowTable::MessageField_Partition [GOOD] >> Transfer_RowTable::MessageField_SeqNo >> Transfer_RowTable::MessageField_SeqNo [GOOD] >> Transfer_RowTable::MessageField_ProducerId >> Transfer_RowTable::MessageField_ProducerId [GOOD] >> Transfer_RowTable::MessageField_MessageGroupId >> Transfer_RowTable::MessageField_MessageGroupId [GOOD] >> Transfer_RowTable::ProcessingJsonMessage >> Transfer_RowTable::ProcessingJsonMessage [GOOD] >> Transfer_RowTable::ProcessingCDCMessage >> Transfer_RowTable::ProcessingCDCMessage [GOOD] >> Transfer_RowTable::TableWithSyncIndex >> Transfer_RowTable::TableWithSyncIndex [GOOD] >> Transfer_RowTable::TableWithAsyncIndex >> Transfer_RowTable::TableWithAsyncIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/transfer/ut/functional/unittest >> Transfer_RowTable::TableWithAsyncIndex [GOOD] Test command err: DDL: CREATE TABLE `Table_17390401329826609258` ( Key Uint64 NOT NULL, Message Utf8, PRIMARY KEY (Key) ) WITH ( STORE = ROW ); DDL: CREATE TOPIC `Topic_17390401329826609258` WITH ( MIN_ACTIVE_PARTITIONS = 1 ); DDL: $l = ($x) -> { return [ <| Key:$x._offset, Message:CAST($x._data AS Utf8) |> ]; }; ; CREATE TRANSFER `Transfer_17390401329826609258` FROM `Topic_17390401329826609258` TO `Table_17390401329826609258` USING $l WITH ( FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> Query: SELECT `Message` FROM `Table_17390401329826609258` ORDER BY `Message` Attempt=19 count=0 >>>>> Query: SELECT `Message` FROM `Table_17390401329826609258` ORDER BY `Message` Attempt=18 count=0 >>>>> Query: SELECT `Message` FROM `Table_17390401329826609258` ORDER BY `Message` Attempt=17 count=0 >>>>> Query: SELECT `Message` FROM `Table_17390401329826609258` ORDER BY `Message` Attempt=16 count=0 >>>>> Query: SELECT `Message` FROM `Table_17390401329826609258` ORDER BY `Message` Attempt=15 count=0 >>>>> Query: SELECT `Message` FROM `Table_17390401329826609258` ORDER BY `Message` Attempt=14 count=1 DDL: DROP TRANSFER `Transfer_17390401329826609258`; DDL: DROP TABLE `Table_17390401329826609258` DDL: DROP TOPIC `Topic_17390401329826609258` DDL: CREATE TABLE `Table_1077394902490908164` ( Key Uint64 NOT NULL, Message Utf8, PRIMARY KEY (Key) ) WITH ( STORE = ROW ); DDL: CREATE TOPIC `Topic_1077394902490908164` WITH ( MIN_ACTIVE_PARTITIONS = 1 ); DDL: $l = ($x) -> { return [ <| Key:$x._offset, Message:CAST($x._data AS Utf8) |> ]; }; ; CREATE TRANSFER `Transfer_1077394902490908164` FROM `Topic_1077394902490908164` TO `Table_1077394902490908164` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> Query: SELECT `Message` FROM `Table_1077394902490908164` ORDER BY `Message` Attempt=19 count=0 >>>>> Query: SELECT `Message` FROM `Table_1077394902490908164` ORDER BY `Message` Attempt=18 count=0 >>>>> Query: SELECT `Message` FROM `Table_1077394902490908164` ORDER BY `Message` Attempt=17 count=0 >>>>> Query: SELECT `Message` FROM `Table_1077394902490908164` ORDER BY `Message` Attempt=16 count=0 >>>>> Query: SELECT `Message` FROM `Table_1077394902490908164` ORDER BY `Message` Attempt=15 count=0 >>>>> Query: SELECT `Message` FROM `Table_1077394902490908164` ORDER BY `Message` Attempt=14 count=0 >>>>> Query: SELECT `Message` FROM `Table_1077394902490908164` ORDER BY `Message` Attempt=13 count=0 >>>>> Query: SELECT `Message` FROM `Table_1077394902490908164` ORDER BY `Message` Attempt=12 count=0 >>>>> Query: SELECT `Message` FROM `Table_1077394902490908164` ORDER BY `Message` Attempt=11 count=1 DDL: DROP TRANSFER `Transfer_1077394902490908164`; DDL: DROP TABLE `Table_1077394902490908164` DDL: DROP TOPIC `Topic_1077394902490908164` DDL: CREATE TOPIC `Topic_2875788716372006140` WITH ( MIN_ACTIVE_PARTITIONS = 10 ); DDL: $l = ($x) -> { return [ <| Key:CAST($x._offset AS Uint64) |> ]; }; ; CREATE TRANSFER `Transfer_2875788716372006140` FROM `Topic_2875788716372006140` TO `Table_2875788716372006140` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> ACTUAL: [ {
: Error: Path does not exist, code: 2003 } {
: Error: Query invalidated on scheme/internal error during Scheme execution, code: 2019 } ] >>>>> EXPECTED: Path does not exist DDL: DROP TOPIC `Topic_2875788716372006140` DDL: CREATE TABLE `Table_3106576622878784895` ( Key Uint64 NOT NULL, Message Utf8, PRIMARY KEY (Key) ) WITH ( STORE = COLUMN ); DDL: CREATE TOPIC `Topic_3106576622878784895` WITH ( MIN_ACTIVE_PARTITIONS = 1 ); DDL: $l = ($x) -> { return [ <| Key: 1, Message:CAST("Message-1" AS Utf8) |> ]; }; CREATE TRANSFER Transfer_3106576622878784895 FROM Topic_3106576622878784895 TO Table_3106576622878784895 USING $l WITH ( CONNECTION_STRING = "grp§c://localhost:2135" ) >>>>> ACTUAL: {
: Error: Discovery error: /Topic_3106576622878784895: TRANSPORT_UNAVAILABLE ([ {
: Error: GRpc error: (14): DNS resolution failed for grp§c://localhost:2135: C-ares status is not ARES_SUCCESS qtype=A name=grp§c://localhost:2135 is_balancer=0: Misformatted domain name } {
: Error: Grpc error response on endpoint grp§c://localhost:2135 } ]) } >>>>> EXPECTED: DNS resolution failed for grp§c://localhost:2135 DDL: DROP TRANSFER `Transfer_3106576622878784895`; DDL: DROP TABLE `Table_3106576622878784895` DDL: DROP TOPIC `Topic_3106576622878784895` DDL: CREATE TABLE `Table_16674015009831219104` ( Key Uint64 NOT NULL, Message Utf8, PRIMARY KEY (Key) ) WITH ( STORE = COLUMN ); DDL: CREATE TOPIC `Topic_16674015009831219104` WITH ( MIN_ACTIVE_PARTITIONS = 1 ); DDL: $l = ($x) -> { return [ <| Key: 1, Message:CAST("Message-1" AS Utf8) |> ]; }; CREATE TRANSFER Transfer_16674015009831219104 FROM Topic_16674015009831219104 TO Table_16674015009831219104 USING $l WITH ( CONNECTION_STRING = "grpc://domain-not-exists-localhost.com.moc:2135" ) >>>>> ACTUAL: {
: Error: Discovery error: /Topic_16674015009831219104: TRANSPORT_UNAVAILABLE ([ {
: Error: GRpc error: (14): DNS resolution failed for domain-not-exists-localhost.com.moc:2135: C-ares status is not ARES_SUCCESS qtype=A name=domain-not-exists-localhost.com.moc is_balancer=0: Domain name not found } {
: Error: Grpc error response on endpoint domain-not-exists-localhost.com.moc:2135 } ]) } >>>>> EXPECTED: Grpc error response on endpoint domain-not-exists-localhost.com.moc:2135 DDL: DROP TRANSFER `Transfer_16674015009831219104`; DDL: DROP TABLE `Table_16674015009831219104` DDL: DROP TOPIC `Topic_16674015009831219104` DDL: CREATE USER u31665 DDL: GRANT 'ydb.granular.create_table', 'ydb.granular.create_queue' ON `/local` TO `u31665@builtin` DDL: CREATE TABLE `Table_11616040241908007913` ( Key Uint64 NOT NULL, Message Utf8, PRIMARY KEY (Key) ) WITH ( STORE = COLUMN ); DDL: GRANT 'ydb.generic.write', 'ydb.generic.read' ON `/local/Table_11616040241908007913` TO `u31665@builtin` DDL: CREATE TOPIC `Topic_11616040241908007913` WITH ( MIN_ACTIVE_PARTITIONS = 1 ); DDL: GRANT ALL ON `/local/Topic_11616040241908007913` TO `u31665@builtin` DDL: $l = ($x) -> { return [ <| Key:CAST($x._offset AS Uint64), Message:CAST($x._data AS Utf8) |> ]; }; ; CREATE TRANSFER `Transfer_11616040241908007913` FROM `Topic_11616040241908007913` TO `Table_11616040241908007913` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); DDL: DROP TOPIC `Topic_11616040241908007913` DDL: DROP TRANSFER `Transfer_11616040241908007913`; DDL: CREATE USER u58577 DDL: GRANT 'ydb.granular.create_table', 'ydb.granular.create_queue' ON `/local` TO `u58577@builtin` DDL: CREATE TABLE `Table_12349147030838147207` ( Key Uint64 NOT NULL, Message Utf8, PRIMARY KEY (Key) ) WITH ( STORE = COLUMN ); DDL: GRANT 'ydb.generic.read' ON `/local/Table_12349147030838147207` TO `u58577@builtin` DDL: CREATE TOPIC `Topic_12349147030838147207` WITH ( MIN_ACTIVE_PARTITIONS = 1 ); DDL: GRANT ALL ON `/local/Topic_12349147030838147207` TO `u58577@builtin` DDL: $l = ($x) -> { return [ <| Key:CAST($x._offset AS Uint64), Message:CAST($x._data AS Utf8) |> ]; }; ; CREATE TRANSFER `Transfer_12349147030838147207` FROM `Topic_12349147030838147207` TO `Table_12349147030838147207` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> ACTUAL: {
: Error: Access denied for scheme request, code: 2018 subissue: {
: Error: Access denied. } } >>>>> EXPECTED: Access denied for scheme request DDL: DROP TOPIC `Topic_12349147030838147207` DDL: CREATE USER u50949 DDL: GRANT 'ydb.granular.create_table', 'ydb.granular.create_queue' ON `/local` TO `u50949@builtin` DDL: CREATE TABLE `Table_16192785469604377702` ( Key Uint64 NOT NULL, Message Utf8, PRIMARY KEY (Key) ); DDL: GRANT 'ydb.generic.write', 'ydb.generic.read' ON `/local/Table_16192785469604377702` TO `u50949@builtin` DDL: CREATE TOPIC `Topic_16192785469604377702` WITH ( MIN_ACTIVE_PARTITIONS = 1 ); DDL: GRANT ALL ON `/local/Topic_16192785469604377702` TO `u50949@builtin` DDL: $l = ($x) -> { return [ <| Key:CAST($x._offset AS Uint64), ... t=19 count=0 >>>>> Query: SELECT `Partition`, `Message` FROM `Table_4021602514492716225` ORDER BY `Partition`, `Message` Attempt=18 count=0 >>>>> Query: SELECT `Partition`, `Message` FROM `Table_4021602514492716225` ORDER BY `Partition`, `Message` Attempt=17 count=1 DDL: DROP TRANSFER `Transfer_4021602514492716225`; DDL: DROP TABLE `Table_4021602514492716225` DDL: DROP TOPIC `Topic_4021602514492716225` DDL: CREATE TABLE `Table_14836981430315796214` ( SeqNo Uint64 NOT NULL, Message Utf8, PRIMARY KEY (SeqNo) ) WITH ( STORE = ROW ); DDL: CREATE TOPIC `Topic_14836981430315796214` WITH ( MIN_ACTIVE_PARTITIONS = 10 ); DDL: $l = ($x) -> { return [ <| SeqNo:CAST($x._seq_no AS Uint32), Message:CAST($x._data AS Utf8) |> ]; }; ; CREATE TRANSFER `Transfer_14836981430315796214` FROM `Topic_14836981430315796214` TO `Table_14836981430315796214` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> Query: SELECT `SeqNo` FROM `Table_14836981430315796214` ORDER BY `SeqNo` Attempt=19 count=0 >>>>> Query: SELECT `SeqNo` FROM `Table_14836981430315796214` ORDER BY `SeqNo` Attempt=18 count=1 DDL: DROP TRANSFER `Transfer_14836981430315796214`; DDL: DROP TABLE `Table_14836981430315796214` DDL: DROP TOPIC `Topic_14836981430315796214` DDL: CREATE TABLE `Table_12919948605907633900` ( Offset Uint64 NOT NULL, ProducerId Utf8, PRIMARY KEY (Offset) ) WITH ( STORE = ROW ); DDL: CREATE TOPIC `Topic_12919948605907633900` WITH ( MIN_ACTIVE_PARTITIONS = 10 ); DDL: $l = ($x) -> { return [ <| Offset:CAST($x._offset AS Uint64), ProducerId:CAST($x._producer_id AS Utf8) |> ]; }; ; CREATE TRANSFER `Transfer_12919948605907633900` FROM `Topic_12919948605907633900` TO `Table_12919948605907633900` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> Query: SELECT `ProducerId` FROM `Table_12919948605907633900` ORDER BY `ProducerId` Attempt=19 count=0 >>>>> Query: SELECT `ProducerId` FROM `Table_12919948605907633900` ORDER BY `ProducerId` Attempt=18 count=0 >>>>> Query: SELECT `ProducerId` FROM `Table_12919948605907633900` ORDER BY `ProducerId` Attempt=17 count=1 DDL: DROP TRANSFER `Transfer_12919948605907633900`; DDL: DROP TABLE `Table_12919948605907633900` DDL: DROP TOPIC `Topic_12919948605907633900` DDL: CREATE TABLE `Table_14951064055086090113` ( Offset Uint64 NOT NULL, ProducerId Utf8, PRIMARY KEY (Offset) ) WITH ( STORE = ROW ); DDL: CREATE TOPIC `Topic_14951064055086090113` WITH ( MIN_ACTIVE_PARTITIONS = 10 ); DDL: $l = ($x) -> { return [ <| Offset:CAST($x._offset AS Uint64), ProducerId:CAST($x._producer_id AS Utf8) |> ]; }; ; CREATE TRANSFER `Transfer_14951064055086090113` FROM `Topic_14951064055086090113` TO `Table_14951064055086090113` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> Query: SELECT `ProducerId` FROM `Table_14951064055086090113` ORDER BY `ProducerId` Attempt=19 count=0 >>>>> Query: SELECT `ProducerId` FROM `Table_14951064055086090113` ORDER BY `ProducerId` Attempt=18 count=0 >>>>> Query: SELECT `ProducerId` FROM `Table_14951064055086090113` ORDER BY `ProducerId` Attempt=17 count=1 DDL: DROP TRANSFER `Transfer_14951064055086090113`; DDL: DROP TABLE `Table_14951064055086090113` DDL: DROP TOPIC `Topic_14951064055086090113` DDL: CREATE TABLE `Table_15830272075441633268` ( Id Uint64 NOT NULL, FirstName Utf8 NOT NULL, LastName Utf8 NOT NULL, Salary Uint64 NOT NULL, PRIMARY KEY (Id) ) WITH ( STORE = ROW ); DDL: CREATE TOPIC `Topic_15830272075441633268` WITH ( MIN_ACTIVE_PARTITIONS = 10 ); DDL: $l = ($x) -> { $input = CAST($x._data AS JSON); return [ <| Id: Yson::ConvertToUint64($input.id), FirstName: CAST(Yson::ConvertToString($input.first_name) AS Utf8), LastName: CAST(Yson::ConvertToString($input.last_name) AS Utf8), Salary: CAST(Yson::ConvertToString($input.salary) AS UInt64) |> ]; }; ; CREATE TRANSFER `Transfer_15830272075441633268` FROM `Topic_15830272075441633268` TO `Table_15830272075441633268` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> Query: SELECT `Id`, `FirstName`, `LastName`, `Salary` FROM `Table_15830272075441633268` ORDER BY `Id`, `FirstName`, `LastName`, `Salary` Attempt=19 count=0 >>>>> Query: SELECT `Id`, `FirstName`, `LastName`, `Salary` FROM `Table_15830272075441633268` ORDER BY `Id`, `FirstName`, `LastName`, `Salary` Attempt=18 count=1 DDL: DROP TRANSFER `Transfer_15830272075441633268`; DDL: DROP TABLE `Table_15830272075441633268` DDL: DROP TOPIC `Topic_15830272075441633268` DDL: CREATE TABLE `SourceTable_10478181568745742751` ( object_id Utf8 NOT NULL, timestamp Datetime NOT NULL, operation Utf8, PRIMARY KEY (object_id, timestamp) ) WITH ( STORE = ROW ) DDL: ALTER TABLE `SourceTable_10478181568745742751` ADD CHANGEFEED `cdc_10478181568745742751` WITH ( MODE = 'UPDATES', FORMAT = 'JSON' ) DDL: CREATE TABLE `Table_10478181568745742751` ( timestamp Datetime NOT NULL, object_id Utf8 NOT NULL, operation Utf8, PRIMARY KEY (timestamp, object_id) ) WITH ( STORE = ROW ) DDL: $l = ($x) -> { $d = CAST($x._data AS JSON); return [ <| timestamp: DateTime::MakeDatetime(DateTime::ParseIso8601(CAST(Yson::ConvertToString($d.key[1]) AS Utf8))), object_id: CAST(Yson::ConvertToString($d.key[0]) AS Utf8), operation: CAST(Yson::ConvertToString($d.update.operation) AS Utf8) |> ]; }; ; CREATE TRANSFER `Transfer_10478181568745742751` FROM `SourceTable_10478181568745742751/cdc_10478181568745742751` TO `Table_10478181568745742751` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> Query: INSERT INTO `SourceTable_10478181568745742751` (`object_id`, `timestamp`, `operation`) VALUES ('id_1', Datetime('2019-01-01T15:30:00Z'), 'value_1'); >>>>> Query: SELECT `operation`, `object_id`, `timestamp` FROM `Table_10478181568745742751` ORDER BY `operation`, `object_id`, `timestamp` Attempt=19 count=0 >>>>> Query: SELECT `operation`, `object_id`, `timestamp` FROM `Table_10478181568745742751` ORDER BY `operation`, `object_id`, `timestamp` Attempt=18 count=0 >>>>> Query: SELECT `operation`, `object_id`, `timestamp` FROM `Table_10478181568745742751` ORDER BY `operation`, `object_id`, `timestamp` Attempt=17 count=1 DDL: DROP TRANSFER `Transfer_10478181568745742751`; DDL: DROP TABLE `Table_10478181568745742751` DDL: DROP TABLE `SourceTable_10478181568745742751` DDL: CREATE TABLE `Table_13759515811584021790` ( Key Uint64 NOT NULL, Message Utf8, INDEX `title_index` GLOBAL SYNC ON (`Message`), PRIMARY KEY (Key) ) WITH ( STORE = ROW ); DDL: CREATE TOPIC `Topic_13759515811584021790` WITH ( MIN_ACTIVE_PARTITIONS = 1 ); DDL: $l = ($x) -> { return [ <| Key:$x._offset, Message:CAST($x._data AS Utf8) |> ]; }; ; CREATE TRANSFER `Transfer_13759515811584021790` FROM `Topic_13759515811584021790` TO `Table_13759515811584021790` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> ACTUAL: {
: Error: Error in target #1: {
: Error: Bulk upsert to table 'local/Table_13759515811584021790' Only async-indexed tables are supported by BulkUpsert } } >>>>> EXPECTED: Only async-indexed tables are supported by BulkUpsert DDL: DROP TRANSFER `Transfer_13759515811584021790`; DDL: DROP TABLE `Table_13759515811584021790` DDL: DROP TOPIC `Topic_13759515811584021790` DDL: CREATE TABLE `Table_10229182096116111811` ( Key Uint64 NOT NULL, Message Utf8, INDEX `title_index` GLOBAL ASYNC ON (`Message`), PRIMARY KEY (Key) ) WITH ( STORE = ROW ); DDL: CREATE TOPIC `Topic_10229182096116111811` WITH ( MIN_ACTIVE_PARTITIONS = 1 ); DDL: $l = ($x) -> { return [ <| Key:$x._offset, Message:CAST($x._data AS Utf8) |> ]; }; ; CREATE TRANSFER `Transfer_10229182096116111811` FROM `Topic_10229182096116111811` TO `Table_10229182096116111811` USING $l WITH ( CONNECTION_STRING = 'grpc://localhost:10897/?database=local' ,FLUSH_INTERVAL = Interval('PT1S') ,BATCH_SIZE_BYTES = 8388608 ); >>>>> Query: SELECT `Key`, `Message` FROM `Table_10229182096116111811` ORDER BY `Key`, `Message` Attempt=19 count=0 >>>>> Query: SELECT `Key`, `Message` FROM `Table_10229182096116111811` ORDER BY `Key`, `Message` Attempt=18 count=1 DDL: DROP TRANSFER `Transfer_10229182096116111811`; DDL: DROP TABLE `Table_10229182096116111811` DDL: DROP TOPIC `Topic_10229182096116111811` |99.9%| [TM] {RESULT} ydb/core/transfer/ut/functional/unittest |99.9%| CLEANING BUILD ROOT Number of suites skipped by size: 142 ydb/public/sdk/cpp/tests/integration/sessions_pool [size:medium] nchunks:10 ------ [3/10] chunk ran 1 test (total:37.98s - setup:0.01s recipes:21.10s test:12.39s recipes:4.42s) [fail] YdbSdkSessionsPool::StressTestSync/1 [default-linux-x86_64-release-asan] (10.23s) Value param: 10 ydb/public/sdk/cpp/tests/integration/sessions_pool/main.cpp:269: Expected equality of these values: Client->GetCurrentPoolSize() Which is: 0 activeSessionsLimit Which is: 10 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/testing_out_stuff/YdbSdkSessionsPool.StressTestSync.1.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/testing_out_stuff/YdbSdkSessionsPool.StressTestSync.1.out ------ FAIL: 11 - GOOD, 1 - FAIL ydb/public/sdk/cpp/tests/integration/sessions_pool ydb/core/viewer/tests [size:medium] ------ sole chunk ran 36 tests (total:63.87s - test:63.52s canon:0.09s) [fail] test.py::test_topic_data [default-linux-x86_64-release-asan] (5.38s) Test results differ from canonical: test_result['response_last_offset']['Messages'][3]: extra value {'Codec': 0, 'CreateTimestamp': 'not-zero-number', 'Message':..., test_result['response_last_offset']['Messages'][4]: extra value {'Codec': 0, 'CreateTimestamp': 'not-zero-number', 'Message':..., test_result['response_last_offset']['Messages'][5]: extra value {'Codec': 0, 'CreateTimestamp': 'not-zero-number', 'Message':..., test_result['response_last_offset']['Messages'][6]: extra value {'Codec': 0, 'CreateTimestamp': 'not-zero-number', 'Message':..., test_result['response_last_offset']['Messages'][7]: extra value {'Codec': 0, 'CreateTimestamp': 'not-zero-number', 'Message':..., test_result['response_last_offset']['Messages'][8]: extra value {'Codec': 0, 'CreateTimestamp': 'not-zero-number', 'Message':..., test_result['response_last_offset']['Messages'][9]: extra value {'Codec': 0, 'CreateTimestamp': 'not-zero-number', 'Message':... Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/tests/test-results/py3test/testing_out_stuff/test.py.test_topic_data.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/tests/test-results/py3test/testing_out_stuff ------ FAIL: 35 - GOOD, 1 - FAIL ydb/core/viewer/tests ydb/tests/datashard/ttl [size:medium] nchunks:36 ------ [27/36] chunk ran 1 test (total:344.41s - setup:0.01s test:344.32s) [fail] test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] [default-linux-x86_64-release-asan] (338.47s) ydb/tests/datashard/ttl/test_ttl.py:200: in test_ttl self.select(table_name, pk_types, all_types, index, dml) ydb/tests/datashard/ttl/test_ttl.py:263: in select self.create_select(table_name, pk_types, all_types, index, i, 0, dml) ydb/tests/datashard/ttl/test_ttl.py:283: in create_select assert ( E AssertionError: Expected 0 rows, error when deleting 6 lines, table table_Uint32_1_UNIQUE_SYNC E assert (1 == 1 and 1 == 0) E + where 1 = len([{'count': 1}]) E + and 1 = {'count': 1}.count Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/datashard/ttl/test-results/py3test/testing_out_stuff/test_ttl.py.TestTTL.test_ttl.table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/datashard/ttl/test-results/py3test/testing_out_stuff ------ [3/36] chunk ran 1 test (total:346.61s - test:346.45s) [fail] test_ttl.py::TestTTL::test_ttl[table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC] [default-linux-x86_64-release-asan] (340.31s) ydb/tests/datashard/ttl/test_ttl.py:200: in test_ttl self.select(table_name, pk_types, all_types, index, dml) ydb/tests/datashard/ttl/test_ttl.py:263: in select self.create_select(table_name, pk_types, all_types, index, i, 0, dml) ydb/tests/datashard/ttl/test_ttl.py:283: in create_select assert ( E AssertionError: Expected 0 rows, error when deleting 5 lines, table table_Date_1_UNIQUE_SYNC E assert (1 == 1 and 1 == 0) E + where 1 = len([{'count': 1}]) E + and 1 = {'count': 1}.count Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/datashard/ttl/test-results/py3test/testing_out_stuff/test_ttl.py.TestTTL.test_ttl.table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/datashard/ttl/test-results/py3test/testing_out_stuff ------ [9/36] chunk ran 1 test (total:332.03s - test:331.91s) [fail] test_ttl.py::TestTTL::test_ttl[table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC] [default-linux-x86_64-release-asan] (325.03s) ydb/tests/datashard/ttl/test_ttl.py:200: in test_ttl self.select(table_name, pk_types, all_types, index, dml) ydb/tests/datashard/ttl/test_ttl.py:263: in select self.create_select(table_name, pk_types, all_types, index, i, 0, dml) ydb/tests/datashard/ttl/test_ttl.py:283: in create_select assert ( E AssertionError: Expected 0 rows, error when deleting 6 lines, table table_Datetime_1_UNIQUE_SYNC E assert (1 == 1 and 1 == 0) E + where 1 = len([{'count': 1}]) E + and 1 = {'count': 1}.count Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/datashard/ttl/test-results/py3test/testing_out_stuff/test_ttl.py.TestTTL.test_ttl.table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/datashard/ttl/test-results/py3test/testing_out_stuff ------ FAIL: 33 - GOOD, 3 - FAIL ydb/tests/datashard/ttl ydb/tests/fq/http_api [size:medium] ------ sole chunk ran 16 tests (total:93.75s - recipes:10.14s test:79.53s recipes:4.02s) [fail] test_http_api.py::TestHttpApi::test_simple_analytics_query [default-linux-x86_64-release-asan] (17.74s) ydb/tests/fq/http_api/test_http_api.py:106: in test_simple_analytics_query response = client.stop_query(query_id) ydb/core/fq/libs/http_api_client/http_client.py:202: in stop_query self._validate_http_error(response, expected_code=expected_code) ydb/core/fq/libs/http_api_client/http_client.py:111: in _validate_http_error raise YQHttpClientException( E ydb.core.fq.libs.http_api_client.http_client.YQHttpClientException: Error occurred. http code=400, status=400010, msg=BAD_REQUEST, details=[{'message': 'Conversion from status COMPLETING to ABORTING_BY_USER is not possible. Please wait for the previous operation to be completed', 'issue_code': 1001, 'severity': 'ERROR', 'issues': []}] Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/http_api/test-results/py3test/testing_out_stuff/test_http_api.py.TestHttpApi.test_simple_analytics_query.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/http_api/test-results/py3test/testing_out_stuff ------ FAIL: 15 - GOOD, 1 - FAIL ydb/tests/fq/http_api ydb/tests/fq/mem_alloc [size:medium] ------ sole chunk ran 12 tests (total:644.81s - recipes:10.07s test:600.08s recipes:3.43s) Chunk exceeded 600s timeout, failed to shutdown gracefully in 30s and was terminated using SIGQUIT signal List of the tests involved in the launch: test_result_limits.py::TestResultLimits::test_many_rows (good) duration: 217.80s test_alloc_default.py::TestAlloc::test_node_limit[kikimr0] (good) duration: 100.64s test_result_limits.py::TestResultLimits::test_large_row (timeout) duration: 52.75s test_alloc_default.py::TestAlloc::test_alloc_and_free[kikimr0] (good) duration: 50.08s test_alloc_default.py::TestAlloc::test_up_down[kikimr0] (good) duration: 41.03s test_dc_local.py::TestAlloc::test_dc_locality[kikimr0] (good) duration: 38.93s test_alloc_default.py::TestAlloc::test_default_delta[kikimr0] (good) duration: 34.12s test_alloc_default.py::TestAlloc::test_default_limits[kikimr0] (good) duration: 31.54s test_alloc_default.py::TestAlloc::test_hard_limit[kikimr0] (good) duration: 30.68s test_alloc_default.py::TestAlloc::test_mkql_not_increased[kikimr0] (good) duration: 27.06s 1 more tests with 0.00s total duration are not listed. test_result_limits.py::TestResultLimits::test_quotas[kikimr0] test was not launched inside chunk. Info: Test run has exceeded 8.0G (8388608K) memory limit with 8.5G (8931760K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1063746 45.3M 42.0M 6.6M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1063892 35.2M 23.0M 11.5M ├─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1069215 898M 891M 830M │ └─ ydb-tests-fq-mem_alloc --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctes 1283025 5.3G 5.3G 4.9G │ └─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff/tes 1065142 2.3G 2.2G 1.8G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff/ydb_data_ Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/mem_alloc/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff/stderr [timeout] test_result_limits.py::TestResultLimits::test_large_row [default-linux-x86_64-release-asan] (52.75s) Killed by timeout (600 s) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff/test_result_limits.py.TestResultLimits.test_large_row.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff ------ TIMEOUT: 9 - GOOD, 1 - NOT_LAUNCHED, 1 - TIMEOUT, 1 - SKIPPED ydb/tests/fq/mem_alloc ydb/tests/fq/streaming_optimize [size:medium] nchunks:4 ------ [test_sql_streaming.py 0/4] chunk ran 6 tests (total:115.51s - recipes:0.76s test:114.02s recipes:0.60s) [fail] test_sql_streaming.py::test[suites-GroupByHop-default.txt] [default-linux-x86_64-release-asan] (25.70s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_jl0b91v1/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_jl0b91v1/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_jl0b91v1/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_jl0b91v1/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_jl0b91v1/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_jl0b91v1/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_jl0b91v1/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_jl0b91v1/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_jl0b91v1/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_jl0b91v1/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f1c47271e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f1c471f4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f1c472a0fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f1c4719951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f1c4719951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f1c47199360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f1c472f055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f1c472f055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f1c47270e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f1c47270e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f1c471f4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f1c471989ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f1c471989ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-GroupByHop-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-GroupByHopByStringKey-default.txt] [default-linux-x86_64-release-asan] (15.83s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8nzryi3e/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8nzryi3e/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8nzryi3e/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8nzryi3e/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8nzryi3e/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8nzryi3e/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8nzryi3e/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8nzryi3e/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8nzryi3e/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8nzryi3e/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f2305601e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f2305584025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f2305630fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f230552951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f230552951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f2305529360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f230568055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f230568055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f2305600e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f2305600e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f2305584025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f23055289ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f23055289ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-GroupByHopByStringKey-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-GroupByHopExprKey-default.txt] [default-linux-x86_64-release-asan] (17.83s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_dsl6mhqm/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_dsl6mhqm/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_dsl6mhqm/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_dsl6mhqm/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_dsl6mhqm/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_dsl6mhqm/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_dsl6mhqm/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_dsl6mhqm/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_dsl6mhqm/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_dsl6mhqm/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7fc7527d1e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7fc752754025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7fc752800fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7fc7526f951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7fc7526f951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7fc7526f9360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7fc75285055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7fc75285055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7fc7527d0e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7fc7527d0e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7fc752754025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7fc7526f89ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7fc7526f89ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-GroupByHopExprKey-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-GroupByHopListKey-default.txt] [default-linux-x86_64-release-asan] (16.67s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8tts_6ho/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8tts_6ho/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8tts_6ho/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8tts_6ho/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8tts_6ho/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8tts_6ho/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8tts_6ho/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8tts_6ho/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8tts_6ho/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8tts_6ho/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7fef52691e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7fef52614025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7fef526c0fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7fef525b951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7fef525b951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7fef525b9360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7fef5271055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7fef5271055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7fef52690e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7fef52690e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7fef52614025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7fef525b89ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7fef525b89ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-GroupByHopListKey-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-GroupByHopNoKey-default.txt] [default-linux-x86_64-release-asan] (16.16s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_nxi0h6md/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_nxi0h6md/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_nxi0h6md/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_nxi0h6md/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_nxi0h6md/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_nxi0h6md/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_nxi0h6md/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_nxi0h6md/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_nxi0h6md/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_nxi0h6md/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f7c0c281e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f7c0c204025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f7c0c2b0fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f7c0c1a951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f7c0c1a951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f7c0c1a9360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f7c0c30055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f7c0c30055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f7c0c280e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f7c0c280e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f7c0c204025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f7c0c1a89ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f7c0c1a89ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 452811 byte(s) leaked in 8598 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-GroupByHopNoKey-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] [default-linux-x86_64-release-asan] (17.99s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_u5cn6nl7/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_u5cn6nl7/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_u5cn6nl7/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_u5cn6nl7/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_u5cn6nl7/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_u5cn6nl7/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_u5cn6nl7/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_u5cn6nl7/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_u5cn6nl7/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_u5cn6nl7/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f0ac1441e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f0ac13c4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f0ac1470fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f0ac136951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f0ac136951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f0ac1369360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f0ac14c055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f0ac14c055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f0ac1440e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f0ac1440e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f0ac13c4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f0ac13689ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f0ac13689ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 452738 byte(s) leaked in 8595 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-GroupByHopPercentile-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff ------ [test_sql_streaming.py 1/4] chunk ran 5 tests (total:93.75s - recipes:0.45s test:92.57s recipes:0.63s) [fail] test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] [default-linux-x86_64-release-asan] (24.06s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_01lkqc1_/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_01lkqc1_/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_01lkqc1_/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_01lkqc1_/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_01lkqc1_/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_01lkqc1_/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_01lkqc1_/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_01lkqc1_/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_01lkqc1_/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_01lkqc1_/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7fd7827c1e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7fd782744025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7fd7827f0fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7fd7826e951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7fd7826e951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7fd7826e9360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7fd78284055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7fd78284055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7fd7827c0e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7fd7827c0e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7fd782744025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7fd7826e89ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7fd7826e89ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-GroupByHopTimeExtractorUnusedColumns-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-GroupByHopWithDataWatermarks-default.txt] [default-linux-x86_64-release-asan] (15.79s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_m0mud7k5/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_m0mud7k5/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_m0mud7k5/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_m0mud7k5/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_m0mud7k5/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_m0mud7k5/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_m0mud7k5/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_m0mud7k5/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_m0mud7k5/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_m0mud7k5/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f4493651e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f44935d4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f4493680fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f449357951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f449357951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f4493579360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f44936d055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f44936d055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f4493650e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f4493650e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f44935d4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f44935789ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f44935789ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453035 byte(s) leaked in 8602 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-GroupByHopWithDataWatermarks-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-GroupByHoppingWithDataWatermarks-default.txt] [default-linux-x86_64-release-asan] (17.37s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_lfi7pork/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_lfi7pork/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_lfi7pork/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_lfi7pork/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_lfi7pork/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_lfi7pork/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_lfi7pork/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_lfi7pork/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_lfi7pork/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_lfi7pork/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f69dcd41e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f69dccc4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f69dcd70fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f69dcc6951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f69dcc6951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f69dcc69360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f69dcdc055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f69dcdc055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f69dcd40e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f69dcd40e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f69dccc4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f69dcc689ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f69dcc689ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-GroupByHoppingWithDataWatermarks-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-ReadTopic-default.txt] [default-linux-x86_64-release-asan] (16.28s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_p_dn20dy/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_p_dn20dy/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_p_dn20dy/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_p_dn20dy/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_p_dn20dy/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_p_dn20dy/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_p_dn20dy/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_p_dn20dy/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_p_dn20dy/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_p_dn20dy/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f24c11c1e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f24c1144025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f24c11f0fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f24c10e951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f24c10e951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f24c10e9360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f24c124055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f24c124055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f24c11c0e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f24c11c0e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f24c1144025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f24c10e89ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f24c10e89ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadTopic-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] [default-linux-x86_64-release-asan] (16.11s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_ymw4u4x0/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_ymw4u4x0/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_ymw4u4x0/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_ymw4u4x0/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_ymw4u4x0/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_ymw4u4x0/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_ymw4u4x0/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_ymw4u4x0/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_ymw4u4x0/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_ymw4u4x0/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f55476c1e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f5547644025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f55476f0fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f55475e951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f55475e951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f55475e9360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f554774055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f554774055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f55476c0e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f55476c0e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f5547644025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f55475e89ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f55475e89ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453190 byte(s) leaked in 8605 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadTopicGroupWriteToSolomon-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff ------ [test_sql_streaming.py 2/4] chunk ran 5 tests (total:204.30s - recipes:3.11s test:198.90s recipes:1.30s) [fail] test_sql_streaming.py::test[suites-ReadTopicWithMetadata-default.txt] [default-linux-x86_64-release-asan] (102.00s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_rbd61x49/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_rbd61x49/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_rbd61x49/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_rbd61x49/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_rbd61x49/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_rbd61x49/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_rbd61x49/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_rbd61x49/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_rbd61x49/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_rbd61x49/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f60e4921e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f60e48a4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f60e4950fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f60e484951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f60e484951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f60e4849360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f60e49a055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f60e49a055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f60e4920e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f60e4920e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f60e48a4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f60e48489ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f60e48489ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453336 byte(s) leaked in 8608 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadTopicWithMetadata-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-ReadTopicWithMetadataInsideFilter-default.txt] [default-linux-x86_64-release-asan] (21.40s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_2ecs52za/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_2ecs52za/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_2ecs52za/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_2ecs52za/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_2ecs52za/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_2ecs52za/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_2ecs52za/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_2ecs52za/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_2ecs52za/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_2ecs52za/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f450e011e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f450df94025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f450e040fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f450df3951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f450df3951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f450df39360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f450e09055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f450e09055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f450e010e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f450e010e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f450df94025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f450df389ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f450df389ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadTopicWithMetadataInsideFilter-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] [default-linux-x86_64-release-asan] (20.82s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_9wcbnzy5/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_9wcbnzy5/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_9wcbnzy5/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_9wcbnzy5/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_9wcbnzy5/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_9wcbnzy5/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_9wcbnzy5/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_9wcbnzy5/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_9wcbnzy5/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_9wcbnzy5/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f32d58e1e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f32d5864025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f32d5910fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f32d580951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f32d580951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f32d5809360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f32d596055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f32d596055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f32d58e0e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f32d58e0e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f32d5864025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f32d58089ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f32d58089ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadTopicWithMetadataNestedDeep-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-ReadTopicWithMetadataWithFilter-default.txt] [default-linux-x86_64-release-asan] (23.49s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8dkfnc6j/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8dkfnc6j/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8dkfnc6j/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8dkfnc6j/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8dkfnc6j/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8dkfnc6j/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8dkfnc6j/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8dkfnc6j/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8dkfnc6j/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_8dkfnc6j/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f3628f51e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f3628ed4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f3628f80fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f3628e7951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f3628e7951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f3628e79360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f3628fd055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f3628fd055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f3628f50e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f3628f50e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f3628ed4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f3628e789ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f3628e789ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 452645 byte(s) leaked in 8593 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadTopicWithMetadataWithFilter-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] [default-linux-x86_64-release-asan] (22.77s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_s4y5nvie/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_s4y5nvie/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_s4y5nvie/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_s4y5nvie/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_s4y5nvie/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_s4y5nvie/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_s4y5nvie/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_s4y5nvie/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_s4y5nvie/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_s4y5nvie/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7fd048b81e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7fd048b04025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7fd048bb0fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7fd048aa951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7fd048aa951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7fd048aa9360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7fd048c0055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7fd048c0055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7fd048b80e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7fd048b80e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7fd048b04025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7fd048aa89ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7fd048aa89ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadTopicWithSchema-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff ------ [test_sql_streaming.py 3/4] chunk ran 5 tests (total:89.89s - recipes:0.44s test:88.79s recipes:0.54s) [fail] test_sql_streaming.py::test[suites-ReadTwoTopics-default.txt] [default-linux-x86_64-release-asan] (21.71s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_79aodn49/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_79aodn49/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_79aodn49/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_79aodn49/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_79aodn49/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_79aodn49/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_79aodn49/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_79aodn49/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_79aodn49/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_79aodn49/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7fc918461e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7fc9183e4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7fc918490fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7fc91838951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7fc91838951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7fc918389360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7fc9184e055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7fc9184e055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7fc918460e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7fc918460e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7fc9183e4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7fc9183889ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7fc9183889ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadTwoTopics-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-ReadWriteSameTopic-default.txt] [default-linux-x86_64-release-asan] (15.52s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_b1_6j232/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_b1_6j232/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_b1_6j232/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_b1_6j232/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_b1_6j232/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_b1_6j232/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_b1_6j232/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_b1_6j232/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_b1_6j232/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_b1_6j232/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f301ad51e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f301acd4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f301ad80fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f301ac7951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f301ac7951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f301ac79360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f301add055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f301add055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f301ad50e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f301ad50e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f301acd4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f301ac789ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f301ac789ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadWriteSameTopic-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-ReadWriteTopic-default.txt] [default-linux-x86_64-release-asan] (17.44s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vuqxi5cn/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vuqxi5cn/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vuqxi5cn/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vuqxi5cn/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vuqxi5cn/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vuqxi5cn/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vuqxi5cn/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vuqxi5cn/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vuqxi5cn/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vuqxi5cn/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7feb562f1e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7feb56274025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7feb56320fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7feb5621951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7feb5621951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7feb56219360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7feb5637055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7feb5637055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7feb562f0e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7feb562f0e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7feb56274025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7feb562189ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7feb562189ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadWriteTopic-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-ReadWriteTopicWithSchema-default.txt] [default-linux-x86_64-release-asan] (15.82s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vboxscc4/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vboxscc4/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vboxscc4/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vboxscc4/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vboxscc4/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vboxscc4/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vboxscc4/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vboxscc4/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vboxscc4/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_vboxscc4/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7fab77a41e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7fab779c4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7fab77a70fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7fab7796951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7fab7796951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7fab77969360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7fab77ac055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7fab77ac055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7fab77a40e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7fab77a40e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7fab779c4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7fab779689ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7fab779689ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-ReadWriteTopicWithSchema-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff [fail] test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] [default-linux-x86_64-release-asan] (15.36s) ydb/tests/fq/streaming_optimize/test_sql_streaming.py:38: in test result = fq_run.yql_exec(action="explain") ydb/tests/fq/tools/fqrun.py:80: in yql_exec proc_result = yatest.common.process.execute(cmd.strip().split(), check_exit_code=False, cwd=self.res_dir) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:425: in _finalise self.verify_sanitize_errors() library/python/testing/yatest_common/yatest/common/process.py:454: in verify_sanitize_errors raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/fqrun/fqrun --exclude-linked-udfs --action=explain --cfg=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_513wfor9/fq_config.conf --result-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_513wfor9/results.txt --ast-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_513wfor9/ast.txt --plan-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_513wfor9/plan.json --log-file=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_513wfor9/log.txt --udfs-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql_udfs --result-format=full-proto --canonical-output --query=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_513wfor9/query_0.sql --emulate-pq=test_topic_input@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_513wfor9/topic_0.txt --emulate-pq=test_topic_input2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_513wfor9/topic_1.txt --emulate-pq=test_topic_output@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_513wfor9/topic_2.txt --emulate-pq=test_topic_output2@/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/yql/test_fqrun_513wfor9/topic_3.txt' has failed with code 100. E Errors: E ...mpl /-S/contrib/tools/python3/Python/bltinmodule.c:1093:17 E #30 0x7f3b6d151e71 in builtin_exec /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:543:20 E #31 0x7f3b6d0d4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #32 0x7f3b6d180fba in _PyEval_EvalFrameDefault /tmp/Python/bytecodes.c:3263:26 E #33 0x7f3b6d07951a in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #34 0x7f3b6d07951a in object_vacall /-S/contrib/tools/python3/Objects/call.c:850:14 E #35 0x7f3b6d079360 in PyObject_CallMethodObjArgs /-S/contrib/tools/python3/Objects/call.c:911:24 E #36 0x7f3b6d1d055a in import_find_and_load /-S/contrib/tools/python3/Python/import.c:2802:11 E #37 0x7f3b6d1d055a in PyImport_ImportModuleLevelObject /-S/contrib/tools/python3/Python/import.c:2885:15 E #38 0x7f3b6d150e67 in builtin___import___impl /-S/contrib/tools/python3/Python/bltinmodule.c:276:12 E #39 0x7f3b6d150e67 in builtin___import__ /-S/contrib/tools/python3/Python/clinic/bltinmodule.c.h:107:20 E #40 0x7f3b6d0d4025 in cfunction_vectorcall_FASTCALL_KEYWORDS /-S/contrib/tools/python3/Objects/methodobject.c:438:24 E #41 0x7f3b6d0789ec in _PyObject_VectorcallTstate /-S/contrib/tools/python3/Include/internal/pycore_call.h:92:11 E #42 0x7f3b6d0789ec in _PyObject_CallFunctionVa /-S/contrib/tools/python3/Objects/call.c:562:18 E E SUMMARY: AddressSanitizer: 453477 byte(s) leaked in 8611 allocation(s). Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff/test_sql_streaming.py.test.suites-WriteTwoTopics-default.txt.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/fq/streaming_optimize/test-results/py3test/testing_out_stuff ------ FAIL: 21 - FAIL ydb/tests/fq/streaming_optimize ------ [test_discovery.py] chunk ran 3 tests (total:156.88s - test:156.81s) Info: Test run has exceeded 10.0G (10485760K) memory limit with 14.1G (14788316K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1217122 45.3M 45.3M 6.3M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1217303 33.2M 21.2M 9.0M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1217322 829M 835M 754M └─ ydb-tests-functional-api --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doct 1254317 1.4G 1.5G 1.0G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff/t 1254318 1.4G 1.5G 1.0G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff/t 1254326 1.4G 1.5G 1.0G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff/t 1254328 1.5G 1.5G 1.0G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff/t 1254330 1.5G 1.5G 1.0G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff/t 1254331 1.5G 1.5G 1.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff/t 1254333 1.5G 1.5G 1.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff/t 1254335 1.5G 1.5G 1.0G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff/t 1254336 1.5G 1.5G 1.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff/t 1256515 183M 0b 0b └─ ydbd --server=grpc://localhost:15003 admin blobstorage config invoke --proto=Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/run Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/api/test-results/py3test/testing_out_stuff/stderr ydb/tests/functional/benchmarks_init [size:medium] nchunks:20 ------ [test_generator.py 3/10] chunk ran 1 test (total:631.51s - test:600.09s) Chunk exceeded 600s timeout, failed to shutdown gracefully in 30s and was terminated using SIGQUIT signal List of the tests involved in the launch: test_generator.py::TestTpcdsGenerator::test_s1_state_and_parts (timeout) duration: 625.07s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/benchmarks_init/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff/stderr [timeout] test_generator.py::TestTpcdsGenerator::test_s1_state_and_parts [default-linux-x86_64-release-asan] (625.07s) Killed by timeout (600 s) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff/test_generator.py.TestTpcdsGenerator.test_s1_state_and_parts.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff ------ TIMEOUT: 22 - GOOD, 1 - TIMEOUT ydb/tests/functional/benchmarks_init ydb/tests/functional/bridge [size:medium] nchunks:10 ------ [test_bridge.py 0/10] chunk ran 1 test (total:68.89s - setup:0.08s test:60.10s) Chunk exceeded 60s timeout and was killed List of the tests involved in the launch: test_bridge.py::TestBridgeBasic::test_update_and_get_cluster_state (good) duration: 46.46s Killed by timeout (60 s) Info: Test run has exceeded 16.0G (16777216K) memory limit with 24.0G (25121724K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1164549 45.3M 39.6M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1164908 32.4M 19.2M 8.2M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1165037 858M 854M 782M └─ ydb-tests-functional-bridge --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --d 1168127 1.3G 1.3G 978M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168133 1.3G 1.3G 968M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168175 1.3G 1.3G 946M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168184 1.3G 1.3G 971M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168185 2.2G 2.2G 1.1G ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168220 1.3G 1.3G 954M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168221 1.3G 1.3G 950M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168224 1.4G 1.3G 975M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168225 2.1G 2.1G 1.1G ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168226 1.3G 1.3G 994M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168227 1.3G 1.3G 977M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168261 2.2G 2.2G 1.1G ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168262 1.3G 1.3G 951M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168263 1.3G 1.2G 868M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168264 1.4G 1.1G 781M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1168265 1.3G 1.1G 728M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1188575 450M 0b 0b └─ ydbd --server=grpc://localhost:22978 admin blobstorage config invoke --proto=Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "dynamic_ 1189326 450M 0b 0b └─ ydbd --server=grpc://localhost:22978 admin blobstorage config invoke --proto=Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "dynam Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/stderr ------ [test_bridge.py 1/10] chunk ran 1 test (total:59.22s - setup:0.01s test:58.84s) Info: Test run has exceeded 16.0G (16777216K) memory limit with 23.1G (24172116K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1175944 45.3M 44.9M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1176158 32.4M 20.4M 8.2M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1176163 810M 812M 733M └─ ydb-tests-functional-bridge --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --d 1182326 1.3G 1.3G 939M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182328 1.3G 1.3G 946M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182329 2.1G 2.2G 1.1G ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182330 1.3G 1.3G 965M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182332 1.3G 1.3G 940M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182336 1.3G 1.3G 934M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182338 1.3G 1.2G 842M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182339 2.2G 2.1G 1.0G ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182341 1.3G 1.1G 744M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182379 1.3G 1.1G 748M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182382 1.3G 1.1G 768M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182386 1.4G 1.1G 791M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182399 1.3G 1.2G 795M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182403 1.3G 1.2G 826M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182408 1.3G 1.2G 812M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1182416 1.3G 1.1G 777M └─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/stderr ------ [test_bridge.py 2/10] chunk ran 1 test (total:60.89s - test:60.10s) Chunk exceeded 60s timeout and was killed List of the tests involved in the launch: test_bridge.py::TestBridgeValidation::test_invalid_updates[updates1-multiple_primary_piles_in_request] (good) duration: 52.77s Killed by timeout (60 s) Info: Test run has exceeded 16.0G (16777216K) memory limit with 23.9G (25064372K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1145873 45.3M 44.6M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1146643 33.7M 18.9M 9.4M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1146704 822M 809M 745M └─ ydb-tests-functional-bridge --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --d 1151320 1.4G 1.4G 988M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151388 1.3G 1.3G 948M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151406 1.3G 1.3G 961M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151412 1.3G 1.3G 963M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151452 1.3G 1.3G 953M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151479 1.7G 2.1G 1.0G ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151482 1.3G 1.3G 962M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151485 1.3G 1.3G 969M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151488 1.3G 1.3G 961M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151491 1.3G 1.3G 964M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151530 1.3G 1.3G 967M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151534 1.3G 1.3G 971M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151575 1.3G 1.3G 971M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151616 1.3G 1.3G 947M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151617 1.3G 1.3G 969M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151635 2.2G 2.2G 1.1G └─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/stderr ------ [test_bridge.py 3/10] chunk ran 1 test (total:54.38s - setup:0.06s test:54.21s) Info: Test run has exceeded 16.0G (16777216K) memory limit with 23.9G (25031416K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1174857 45.3M 44.6M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1174983 32.9M 20.7M 8.7M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1175027 758M 760M 680M └─ ydb-tests-functional-bridge --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --d 1177423 1.3G 1.3G 959M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177424 2.2G 2.2G 1.1G ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177425 1.3G 1.2G 836M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177427 1.3G 1.1G 733M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177428 1.3G 1.1G 741M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177430 1.3G 1.1G 728M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177432 1.3G 1.1G 702M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177469 1.4G 1.2G 763M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177504 2.2G 2.1G 1001M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177510 1.3G 1.2G 813M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177515 1.3G 1.3G 923M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177526 1.3G 1.2G 856M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177528 1.3G 1.2G 794M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177586 1.3G 1.2G 792M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177620 1.3G 1.2G 829M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177628 1.3G 1.2G 792M └─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/stderr ------ [test_bridge.py 4/10] chunk ran 1 test (total:49.11s - setup:0.03s test:48.82s) Info: Test run has exceeded 16.0G (16777216K) memory limit with 24.1G (25244552K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1174852 45.3M 44.6M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1174937 33.3M 21.2M 9.1M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1174986 795M 797M 717M └─ ydb-tests-functional-bridge --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --d 1177111 1.3G 1.3G 965M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177112 1.3G 1.4G 977M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177113 1.3G 1.3G 973M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177114 1.4G 1.4G 998M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177115 1.3G 1.3G 965M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177116 1.3G 1.3G 962M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177117 1.3G 1.3G 950M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177167 1.3G 1.4G 978M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177168 1.3G 1.3G 971M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177175 1.3G 1.3G 965M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177191 1.3G 1.3G 972M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177199 2.1G 2.2G 1.1G ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177200 1.3G 1.3G 955M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177249 2.2G 2.2G 1.1G ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177256 1.4G 1.4G 991M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1177257 1.3G 1.4G 974M └─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/stderr ------ [test_bridge.py 5/10] chunk ran 1 test (total:58.64s - test:58.39s) Info: Test run has exceeded 16.0G (16777216K) memory limit with 23.8G (25004104K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1145858 45.3M 45.2M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1146205 33.7M 21.6M 9.5M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1146354 827M 820M 750M └─ ydb-tests-functional-bridge --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --d 1151259 1.3G 1.3G 969M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151290 1.3G 1.3G 967M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151300 1.3G 1.3G 972M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151303 1.3G 1.2G 784M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151321 1.3G 1.1G 759M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151395 1.3G 1.1G 714M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151405 1.3G 1.1G 765M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151409 1.3G 1.1G 715M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151459 2.2G 2.1G 984M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151476 2.2G 2.1G 969M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151480 1.3G 1.2G 778M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151483 1.3G 1.1G 758M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151489 1.3G 1.2G 834M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151525 1.3G 1.2G 805M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151529 1.3G 1.2G 829M ├─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi 1151532 1.3G 1.2G 832M └─ ydbd server --config-dir=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testi Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/bridge/test-results/py3test/testing_out_stuff/stderr ------ TIMEOUT: 6 - GOOD ydb/tests/functional/bridge ydb/tests/functional/hive [size:medium] nchunks:80 ------ [test_drain.py 0/20] chunk ran 1 test (total:67.82s - setup:0.10s test:67.31s) [fail] test_drain.py::TestHive::test_drain_on_stop [default-linux-x86_64-release-asan] (58.32s) ydb/tests/functional/hive/test_drain.py:93: in test_drain_on_stop wait_tablets_are_active( ydb/tests/library/common/delayed.py:151: in wait_tablets_are_active predicate(raise_error=True) ydb/tests/library/common/delayed.py:141: in predicate raise AssertionError( E AssertionError: E ############################## E 0 seconds passed, 59 tablet(s) are not active. Inactive tablets are (first 10 entries): (72075186224038607: 6) (72075186224038634: 6) (72075186224038646: 4) (72075186224038666: 6) (72075186224038674: 6) (72075186224038682: 6) (72075186224038714: 6) (72075186224038726: 4) (72075186224038734: 4) (72075186224038742: 4). Additional info is empty E ############################## Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/hive/test-results/py3test/testing_out_stuff/test_drain.py.TestHive.test_drain_on_stop.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/hive/test-results/py3test/testing_out_stuff ------ FAIL: 6 - GOOD, 1 - FAIL ydb/tests/functional/hive ydb/tests/functional/postgresql [size:medium] ------ sole chunk ran 14 tests (total:111.60s - setup:0.01s test:111.30s) [fail] test_postgres.py::TestPostgresSuite::test_postgres_suite[strings] [default-linux-x86_64-release-asan] (53.18s) teardown failed: ydb/tests/functional/postgresql/test_postgres.py:77: in teardown_class cls.cluster.stop() ydb/tests/library/harness/kikimr_runner.py:606: in stop raise daemon.SeveralDaemonErrors(saved_exceptions) E ydb.tests.library.harness.daemon.SeveralDaemonErrors: Daemon failed with message: Bad exit_code.. E Process exit_code = 100. E Stdout file name: E /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/postgresql/test-results/py3test/testing_out_stuff/test_postgres.py.TestPostgresSuite.test_postgres_suite.select_1/cluster/node_1/stdout E Stderr file name: E /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/postgresql/test-results/py3test/testing_out_stuff/test_postgres.py.TestPostgresSuite.test_postgres_suite.select_1/cluster/node_1/stderr E Stderr content: E E GRpc memory quota was set but disabled due to issues with grpc quoter, to enable it use EnableGRpcMemoryQuota option E Current KQP shutdown state: spent 1e-06 seconds, not started yet E warning: address range table at offset 0x10c0 has a premature terminator entry at offset 0x10d0 E E ================================================================= E ==1081563==ERROR: LeakSanitizer: detected memory leaks E E Indirect leak of 27440 byte(s) in 7 object(s) allocated from: E #0 0x1e37388d in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 E #1 0x452d916f in MakeIntrusive, NYql::TTypeAnnotationContext &> /-S/util/generic/ptr.h:834:12 E #2 0x452d916f in NYql::TYtState::TYtState(NYql::TTypeAnnotationContext*) /-S/yt/yql/providers/yt/provider/yql_yt_provider.h:102:25 E #3 0x452cab39 in MakeIntrusive, NYql::TTypeAnnotationContext *> /-S/util/generic/ptr.h:834:16 E #4 0x452cab39 in NYql::CreateYtNativeState(TIntrusivePtr>, TBasicString> const&, TBasicString> const&, NYql::TYtGatewayConfig const*, TIntrusivePtr>, std::__y1::shared_ptr const&, std::__y1::shared_ptr< ..[snippet truncated].. ibcxx/include/new:271:10 E #2 0x450c7856 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 E #3 0x450c7856 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 E #4 0x450c7856 in initialize_dynamic /-S/util/generic/hash_table.h:233:35 E #5 0x450c7856 in initialize_buckets_dynamic /-S/util/generic/hash_table.h:906:17 E #6 0x450c7856 in THashTable> const, TIntrusivePtr>>, TBasicString>, THash>>, TSelect1st, TEqualTo>>, std::__y1::allocator>>>::reserve(unsigned long) /-S/util/generic/hash_table.h:1324:13 E #7 0x450ac5c6 in insert_unique >, TIntrusivePtr > > > /-S/util/generic/hash_table.h:673:9 E #8 0x450ac5c6 in insert /-S/util/generic/hash.h:154:20 E #9 0x450ac5c6 in NYql::NCommon::TSettingDispatcher::TSettingHandlerImpl& NYql::NCommon::TSettingDispatcher::AddSetting(TBasicString> const&, NYql::NCommon::TConfSetting&) /-S/yql/essentials/providers/common/config/yql_dispatch.h:355:24 E #10 0x4508bd63 in NYql::TYtConfiguration::TYtConfiguration(NYql::TTypeAnnotationContext&) /-S/yt/yql/providers/yt/common/yql_yt_settings.cpp:472:5 E #11 0x452d9180 in TYtVersionedConfiguration /-S/yt/yql/providers/yt/common/yql_yt_settings.h:388:11 E #12 0x452d9180 in MakeIntrusive, NYql::TTypeAnnotationContext &> /-S/util/generic/ptr.h:834:16 E #13 0x452d9180 in NYql::TYtState::TYtState(NYql::TTypeAnnotationContext*) /-S/yt/yql/providers/yt/provider/yql_yt_provider.h:102:25 E #14 0x452cab39 in MakeIntrusive, NYql::TTypeAnnotationContext *> /-S/util/generic/ptr.h:834:16 E #15 0x452cab39 in NYql::Creat... Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/postgresql/test-results/py3test/testing_out_stuff/test_postgres.py.TestPostgresSuite.test_postgres_suite.strings.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/postgresql/test-results/py3test/testing_out_stuff ------ FAIL: 13 - GOOD, 1 - FAIL ydb/tests/functional/postgresql ydb/tests/functional/serverless [size:medium] nchunks:20 ------ [test_serverless.py 4/10] chunk ran 2 tests (total:253.37s - setup:0.02s test:252.29s) [fail] test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--false] [default-linux-x86_64-release-asan] (74.65s) ydb/tests/functional/serverless/test_serverless.py:452: in test_database_with_disk_quotas IOLoop.current().run_sync(lambda: async_write_key(path, 0, 'test', ignore_out_of_space=False)) contrib/python/tornado/tornado-4/tornado/ioloop.py:458: in run_sync return future_cell[0].result() contrib/python/tornado/tornado-4/tornado/concurrent.py:238: in result raise_exc_info(self._exc_info) :4: in raise_exc_info ??? contrib/python/tornado/tornado-4/tornado/gen.py:1064: in run yielded = self.gen.throw(*exc_info) ydb/tests/functional/serverless/test_serverless.py:349: in wrapped res = yield func(*args, **kwargs) contrib/python/tornado/tornado-4/tornado/gen.py:1056: in run value = future.result() contrib/python/tornado/tornado-4/tornado/concurrent.py:238: in result raise_exc_info(self._exc_info) :4: in raise_exc_info ??? contrib/python/tornado/tornado-4/tornado/gen.py:1064: in run yielded = self.gen.throw(*exc_info) ydb/tests/functional/serverless/test_serverless.py:369: in async_write_key yield tx.async_execute( contrib/python/tornado/tornado-4/tornado/gen.py:1056: in run value = future.result() contrib/tools/python3/Lib/concurrent/futures/_base.py:449: in result return self.__get_result() contrib/tools/python3/Lib/concurrent/futures/_base.py:401: in __get_result raise self._exception contrib/python/ydb/py3/ydb/connection.py:105: in _on_response_callback response = response if wrap_result is None else wrap_result(rpc_state, response, *wrap_args) contrib/python/ydb/py3/ydb/_session_impl.py:20: in decorator return func(rpc_state, response_pb, session_state, *args, **kwargs) contrib/python/ydb/py3/ydb/_tx_ctx_impl.py:9: in decorator return func(rpc_state, response_pb, session_state, tx_state, *args, **kwargs) contrib/python/ydb/py3/ydb/_tx_ctx_impl.py:22: in decorator return func(rpc_state, response_pb, session_state, tx_state, query, *args, **kwargs) contrib/python/ydb/py3/ydb/_tx_ctx_impl.py:165: in wrap_result_and_tx_id issues._process_response(response_pb.operation) contrib/python/ydb/py3/ydb/issues.py:229: in _process_response raise exc_obj(_format_response(response_proto), response_proto.issues) E ydb.issues.Unavailable: message: "Disk space exhausted. Table `/Root/quoted_serverless/test_database_with_disk_quotas_enable_alter_database_create_hive_first--false_/dirA0/table`." issue_code: 2033 severity: 1 issues { message: "Cannot perform writes: database is out of disk space" issue_code: 2033 severity: 1 } (server_code: 400050) During handling of the above exception, another exception occurred: ydb/tests/functional/serverless/test_serverless.py:451: in test_database_with_disk_quotas with pytest.raises(ydb.Unavailable, match=r'.*DISK_SPACE_EXHAUSTED.*'): E AssertionError: Regex pattern did not match. E Regex: '.*DISK_SPACE_EXHAUSTED.*' E Input: 'message: "Disk space exhausted. Table `/Root/quoted_serverless/test_database_with_disk_quotas_enable_alter_database_create_hive_first--false_/dirA0/table`." issue_code: 2033 severity: 1 issues { message: "Cannot perform writes: database is out of disk space" issue_code: 2033 severity: 1 } (server_code: 400050)' Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/serverless/test-results/py3test/testing_out_stuff/test_serverless.py.test_database_with_disk_quotas.enable_alter_database_create_hive_first--false.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/serverless/test-results/py3test/testing_out_stuff [fail] test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] [default-linux-x86_64-release-asan] (168.45s) ydb/tests/functional/serverless/test_serverless.py:452: in test_database_with_disk_quotas IOLoop.current().run_sync(lambda: async_write_key(path, 0, 'test', ignore_out_of_space=False)) contrib/python/tornado/tornado-4/tornado/ioloop.py:458: in run_sync return future_cell[0].result() contrib/python/tornado/tornado-4/tornado/concurrent.py:238: in result raise_exc_info(self._exc_info) :4: in raise_exc_info ??? contrib/python/tornado/tornado-4/tornado/gen.py:1064: in run yielded = self.gen.throw(*exc_info) ydb/tests/functional/serverless/test_serverless.py:349: in wrapped res = yield func(*args, **kwargs) contrib/python/tornado/tornado-4/tornado/gen.py:1056: in run value = future.result() contrib/python/tornado/tornado-4/tornado/concurrent.py:238: in result raise_exc_info(self._exc_info) :4: in raise_exc_info ??? contrib/python/tornado/tornado-4/tornado/gen.py:1064: in run yielded = self.gen.throw(*exc_info) ydb/tests/functional/serverless/test_serverless.py:369: in async_write_key yield tx.async_execute( contrib/python/tornado/tornado-4/tornado/gen.py:1056: in run value = future.result() contrib/tools/python3/Lib/concurrent/futures/_base.py:449: in result return self.__get_result() contrib/tools/python3/Lib/concurrent/futures/_base.py:401: in __get_result raise self._exception contrib/python/ydb/py3/ydb/connection.py:105: in _on_response_callback response = response if wrap_result is None else wrap_result(rpc_state, response, *wrap_args) contrib/python/ydb/py3/ydb/_session_impl.py:20: in decorator return func(rpc_state, response_pb, session_state, *args, **kwargs) contrib/python/ydb/py3/ydb/_tx_ctx_impl.py:9: in decorator return func(rpc_state, response_pb, session_state, tx_state, *args, **kwargs) contrib/python/ydb/py3/ydb/_tx_ctx_impl.py:22: in decorator return func(rpc_state, response_pb, session_state, tx_state, query, *args, **kwargs) contrib/python/ydb/py3/ydb/_tx_ctx_impl.py:165: in wrap_result_and_tx_id issues._process_response(response_pb.operation) contrib/python/ydb/py3/ydb/issues.py:229: in _process_response raise exc_obj(_format_response(response_proto), response_proto.issues) E ydb.issues.Unavailable: message: "Disk space exhausted. Table `/Root/quoted_serverless/test_database_with_disk_quotas_enable_alter_database_create_hive_first--true_/dirA0/table`." issue_code: 2033 severity: 1 issues { message: "Cannot perform writes: database is out of disk space" issue_code: 2033 severity: 1 } (server_code: 400050) During handling of the above exception, another exception occurred: ydb/tests/functional/serverless/test_serverless.py:451: in test_database_with_disk_quotas with pytest.raises(ydb.Unavailable, match=r'.*DISK_SPACE_EXHAUSTED.*'): E AssertionError: Regex pattern did not match. E Regex: '.*DISK_SPACE_EXHAUSTED.*' E Input: 'message: "Disk space exhausted. Table `/Root/quoted_serverless/test_database_with_disk_quotas_enable_alter_database_create_hive_first--true_/dirA0/table`." issue_code: 2033 severity: 1 issues { message: "Cannot perform writes: database is out of disk space" issue_code: 2033 severity: 1 } (server_code: 400050)' Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/serverless/test-results/py3test/testing_out_stuff/test_serverless.py.test_database_with_disk_quotas.enable_alter_database_create_hive_first--true.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/serverless/test-results/py3test/testing_out_stuff ------ FAIL: 20 - GOOD, 2 - FAIL ydb/tests/functional/serverless ydb/tests/functional/sqs/cloud [size:medium] nchunks:40 ------ [35/40] chunk ran 2 tests (total:113.19s - setup:0.01s test:113.12s) [fail] test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] [default-linux-x86_64-release-asan] (77.25s) ydb/tests/functional/sqs/cloud/test_yandex_cloud_mode.py:829: in test_yc_events_processor assert len(lines) >= 2, "Got only %s event lines after all attempts" % len(lines) E AssertionError: Got only 0 event lines after all attempts E assert 0 >= 2 E + where 0 = len([]) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/cloud/test-results/py3test/testing_out_stuff/test_yandex_cloud_mode.py.TestSqsYandexCloudMode.test_yc_events_processor.tables_format_v1.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/cloud/test-results/py3test/testing_out_stuff ------ FAIL: 76 - GOOD, 1 - FAIL ydb/tests/functional/sqs/cloud ------ sole chunk ran 1 test (total:146.00s - test:145.11s) Info: Test run has exceeded 8.0G (8388608K) memory limit with 13.8G (14483848K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1110000 45.3M 45.3M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1110203 33.7M 21.8M 9.5M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1110219 1.1G 1.1G 1.0G └─ functional-sqs-merge_split_common_table-std --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini 1113003 1.6G 1.6G 1.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-result 1113006 1.7G 1.6G 1.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-result 1113007 1.6G 1.6G 1.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-result 1113017 1.6G 1.6G 1.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-result 1113023 1.6G 1.6G 1.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-result 1113024 1.6G 1.6G 1.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-result 1113028 1.6G 1.6G 1.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-result 1113046 1.7G 1.7G 1.2G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-result Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/sqs/merge_split_common_table/std/test-results/py3test/testing_out_stuff/stderr ydb/tests/functional/tpc/medium [size:medium] nchunks:9 ------ [test_clickbench.py] chunk ran 130 tests (total:615.81s - setup:0.02s test:600.09s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: test_clickbench.py::TestClickbench::test_clickbench[Query00] (good) duration: 62.91s test_clickbench.py::TestClickbench::test_clickbench[Query23] (good) duration: 52.05s test_clickbench.py::TestClickbench::test_clickbench[Query29] (good) duration: 48.34s test_clickbench.py::TestClickbenchParallel::test[Query00] (timeout) duration: 39.18s test_clickbench.py::TestClickbench::test_clickbench[Query30] (good) duration: 21.46s test_clickbench.py::TestClickbench::test_clickbench[Query31] (good) duration: 19.54s test_clickbench.py::TestClickbench::test_clickbench[Query22] (good) duration: 16.59s test_clickbench.py::TestClickbench::test_clickbench[Query34] (good) duration: 15.89s test_clickbench.py::TestClickbench::test_clickbench[Query35] (good) duration: 14.89s test_clickbench.py::TestClickbench::test_clickbench[Query33] (good) duration: 14.81s 34 more tests with 305.09s total duration are not listed. 86 tests were not launched inside chunk. Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/stderr [timeout] test_clickbench.py::TestClickbenchParallel::test[Query00] [default-linux-x86_64-release-asan] (39.18s) Killed by timeout (600 s) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_clickbench.py.TestClickbenchParallel.test.Query00.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff ------ [test_tpch.py] chunk ran 22 tests (total:634.35s - test:600.03s) Chunk exceeded 600s timeout, failed to shutdown gracefully in 30s and was terminated using SIGQUIT signal List of the tests involved in the launch: test_tpch.py::TestTpchS1::test_tpch[1] (timeout) duration: 628.60s 21 tests were not launched inside chunk. Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/stderr [timeout] test_tpch.py::TestTpchS1::test_tpch[1] [default-linux-x86_64-release-asan] (628.60s) Killed by timeout (600 s) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_tpch.py.TestTpchS1.test_tpch.1.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff ------ [test_upload.py] chunk ran 1 test (total:634.80s - test:600.08s) Chunk exceeded 600s timeout, failed to shutdown gracefully in 30s and was terminated using SIGQUIT signal List of the tests involved in the launch: test_upload.py::TestUploadTpchS1::test (timeout) duration: 629.20s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/stderr [timeout] test_upload.py::TestUploadTpchS1::test [default-linux-x86_64-release-asan] (629.20s) Killed by timeout (600 s) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_upload.py.TestUploadTpchS1.test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff ------ [test_workload_oltp.py] chunk ran 1 test (total:53.20s - test:53.15s) [fail] test_workload_oltp.py::TestWorkloadSimpleQueue::test_workload_oltp [default-linux-x86_64-release-asan] (45.94s) ydb/tests/olap/load/lib/workload_oltp.py:29: in test_workload_oltp self.execute_workload_test( ydb/tests/olap/load/lib/workload_executor.py:224: in execute_workload_test return self._execute_workload_with_deployment( ydb/tests/olap/load/lib/workload_executor.py:251: in _execute_workload_with_deployment final_result = self._finalize_workload_results( ydb/tests/olap/load/lib/workload_executor.py:358: in _finalize_workload_results self.process_workload_result_with_diagnostics(overall_result, workload_name, False) ydb/tests/olap/load/lib/conftest.py:756: in process_workload_result_with_diagnostics raise exc E Failed: WORKLOAD EXECUTION FAILED: OltpWorkload E Main error: All 1 runs failed to execute successfully E E Execution details: E Total iterations attempted: 1 E E FAILED ITERATIONS (1): E - Iteration 1: Workload execution failed. stderr: FATAL: exceptions must derive from BaseException E Traceback (most recent call last): E File "ydb/tests/stress/common/common.py", line 91, in wrapper E f() E File "ydb/tests/stress/oltp_workload/workload/type/select_partition.py", line 25, in _loop E raise "partitiont > 1" E TypeError: exceptions must derive from BaseException E (time: 3.9s) E E STDERR (last 500 chars): E === Run 1 stderr === E FATAL: exceptions must derive from BaseException E Traceback (most recent call last): E File "ydb/tests/stress/common/common.py", line 91, in wrapper E f() E File "ydb/tests/stress/oltp_workload/workload/type/select_partition.py", line 25, in _loop E raise "partitiont > 1" E TypeError: exceptions must derive from BaseException E E RUN STATISTICS: E Successful runs: 0/1 E Failed runs: 1 E Success rate: 0.0% Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_workload_oltp.py.TestWorkloadSimpleQueue.test_workload_oltp.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff ------ [test_workload_simple_queue.py] chunk ran 2 tests (total:193.39s - setup:0.06s test:193.21s) [fail] test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[row] [default-linux-x86_64-release-asan] (43.79s) ydb/tests/olap/load/lib/workload_simple_queue.py:38: in test_workload_simple_queue self.execute_workload_test( ydb/tests/olap/load/lib/workload_executor.py:224: in execute_workload_test return self._execute_workload_with_deployment( ydb/tests/olap/load/lib/workload_executor.py:251: in _execute_workload_with_deployment final_result = self._finalize_workload_results( ydb/tests/olap/load/lib/workload_executor.py:358: in _finalize_workload_results self.process_workload_result_with_diagnostics(overall_result, workload_name, False) ydb/tests/olap/load/lib/conftest.py:756: in process_workload_result_with_diagnostics raise exc E Failed: WORKLOAD EXECUTION FAILED: SimpleQueue_row E Main error: All 1 runs failed to execute successfully E E Execution details: E Total iterations attempted: 1 E E FAILED ITERATIONS (1): E - Iteration 1: Workload execution failed. stderr: Command failed with exit code -15, but stderr is empty (time: 10.2s) E E STDERR (last 500 chars): E === Run 1 stderr === E Command failed with exit code -15, but stderr is empty E E RUN STATISTICS: E Successful runs: 0/1 E Failed runs: 1 E Success rate: 0.0% Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_workload_simple_queue.py.TestWorkloadSimpleQueue.test_workload_simple_queue.row.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff ------ TIMEOUT: 59 - GOOD, 2 - FAIL, 107 - NOT_LAUNCHED, 3 - TIMEOUT ydb/tests/functional/tpc/medium ydb/tests/olap/delete [size:medium] nchunks:10 ------ [1/10] chunk ran 1 test (total:615.85s - test:600.07s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: test_delete_by_explicit_row_id.py::TestDeleteByExplicitRowId::test_delete_row_by_explicit_row_id (timeout) duration: 611.47s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/delete/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/stderr [timeout] test_delete_by_explicit_row_id.py::TestDeleteByExplicitRowId::test_delete_row_by_explicit_row_id [default-linux-x86_64-release-asan] (611.47s) Killed by timeout (600 s) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/test_delete_by_explicit_row_id.py.TestDeleteByExplicitRowId.test_delete_row_by_explicit_row_id.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff ------ TIMEOUT: 1 - TIMEOUT, 1 - SKIPPED ydb/tests/olap/delete ydb/tests/olap/s3_import [size:medium] ------ sole chunk ran 1 test (total:632.17s - test:600.03s) Chunk exceeded 600s timeout, failed to shutdown gracefully in 30s and was terminated using SIGQUIT signal List of the tests involved in the launch: test_tpch_import.py::TestS3TpchImport::test_import_and_export (timeout) duration: 625.97s Info: Test run has exceeded 8.0G (8388608K) memory limit with 15.1G (15786688K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1096418 45.3M 43.7M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1096584 36.1M 23.6M 12.1M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1096594 596M 532M 514M └─ ydb-tests-olap-s3_import --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doct 1099699 11.7G 11.6G 11.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff/t 1102966 391M 321M 358M ├─ moto_server s3 --port 23590 1117993 2.5G 2.5G 2.5G └─ ydb -e grpc://localhost:22294 -d /Root workload tpch import generator --scale 1 Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/s3_import/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff/stderr [timeout] test_tpch_import.py::TestS3TpchImport::test_import_and_export [default-linux-x86_64-release-asan] (625.97s) Killed by timeout (600 s) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff/test_tpch_import.py.TestS3TpchImport.test_import_and_export.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff ------ TIMEOUT: 1 - TIMEOUT ydb/tests/olap/s3_import ydb/tests/olap/scenario [size:medium] nchunks:10 ------ [0/10] chunk ran 2 tests (total:606.69s - setup:0.01s test:600.09s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: test_alter_compression.py::TestAlterCompression::test[alter_compression] (good) duration: 578.86s test_alter_compression.py::TestAlterCompression::test_multi[alter_compression] (good) duration: 0.01s Killed by timeout (600 s) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/scenario/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/scenario/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/scenario/test-results/py3test/testing_out_stuff/stderr ------ [1/10] chunk ran 2 tests (total:207.53s - setup:0.02s test:206.28s) [fail] test_alter_tiering.py::TestAlterTiering::test[many_tables] [default-linux-x86_64-release-asan] (163.59s) ydb/tests/olap/scenario/conftest.py:125: in test raise errors[0] ydb/tests/olap/scenario/conftest.py:84: in worker self._test_suffix(local_ctx, suffix, codes, idx) ydb/tests/olap/scenario/conftest.py:135: in _test_suffix ctx.executable(self, ctx) ydb/tests/olap/scenario/test_alter_tiering.py:357: in scenario_many_tables threads.start_and_wait_all() ydb/tests/olap/common/thread_helper.py:49: in start_and_wait_all self.join_all() ydb/tests/olap/common/thread_helper.py:45: in join_all thread.join(timeout=timeout) ydb/tests/olap/common/thread_helper.py:18: in join raise self.exc ydb/tests/olap/common/thread_helper.py:11: in run self.ret = self._target(*self._args, **self._kwargs) ydb/tests/olap/scenario/test_alter_tiering.py:245: in _loop_scan sth.execute_scan_query( ydb/tests/olap/scenario/helpers/scenario_tests_helper.py:476: in execute_scan_query for result_set in it: contrib/python/ydb/py3/ydb/_utilities.py:173: in __next__ return self._next() contrib/python/ydb/py3/ydb/_utilities.py:164: in _next res = self.wrapper(next(self.it)) contrib/python/ydb/py3/ydb/table.py:1212: in lambda resp: _wrap_scan_query_response(resp, self._table_client_settings), contrib/python/ydb/py3/ydb/table.py:982: in _wrap_scan_query_response issues._process_response(response) contrib/python/ydb/py3/ydb/issues.py:229: in _process_response raise exc_obj(_format_response(response_proto), response_proto.issues) E ydb.issues.GenericError: message: "Scan failed at tablet 72075186224037946, reason: task_error:Error reading blob range for data: { Blob: DS:4294967295:[72075186224037946:1:1:255:1:5304:0] Offset: 0 Size: 5304 }, error: cannot get blob: curlCode: 28, Timeout was reached, detailed error: , curlCode: 28, Timeout was reached, status: ERROR" issue_code: 2013 severity: 1 (server_code: 400080) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/scenario/test-results/py3test/testing_out_stuff/test_alter_tiering.py.TestAlterTiering.test.many_tables.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/scenario/test-results/py3test/testing_out_stuff ------ TIMEOUT: 17 - GOOD, 1 - FAIL ydb/tests/olap/scenario ydb/tests/olap/ttl_tiering [size:medium] nchunks:10 ------ [0/10] chunk ran 1 test (total:246.79s - setup:0.01s test:232.90s) Info: Test run has exceeded 8.0G (8388608K) memory limit with 8.2G (8641748K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 359356 45.3M 45.2M 6.3M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 359497 31.1M 19.2M 7.1M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 359504 3.9G 4.0G 4.0G └─ ydb-tests-olap-ttl_tiering --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doc 360795 3.5G 3.5G 2.9G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/ 362600 722M 722M 690M └─ moto_server s3 --port 25785 Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/stderr ------ [3/10] chunk ran 2 tests (total:614.53s - test:600.09s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering (timeout) duration: 335.55s ttl_delete_s3.py::TestDeleteS3Ttl::test_data_unchanged_after_ttl_change (good) duration: 264.97s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/stderr [timeout] ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering [default-linux-x86_64-release-asan] (335.55s) Killed by timeout (600 s) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/ttl_delete_s3.py.TestDeleteS3Ttl.test_delete_s3_tiering.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff ------ [4/10] chunk ran 1 test (total:618.64s - setup:0.01s test:600.12s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete (timeout) duration: 608.74s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/stderr [timeout] ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete [default-linux-x86_64-release-asan] (608.74s) Killed by timeout (600 s) Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/ttl_delete_s3.py.TestDeleteTtl.test_ttl_delete.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff ------ TIMEOUT: 6 - GOOD, 2 - TIMEOUT ydb/tests/olap/ttl_tiering ydb/tests/stress/log/tests [size:medium] ------ sole chunk ran 2 tests (total:243.36s - setup:0.01s test:243.29s) [fail] test_workload.py::TestYdbLogWorkload::test[column] [default-linux-x86_64-release-asan] (25.90s) ydb/tests/stress/log/tests/test_workload.py:76: in test yatest.common.execute(command, wait=True) library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:422: in _finalise raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/apps/ydb/ydb --verbose --endpoint grpc://localhost:62064 --database=/Root workload log import --bulk-size 1000 -t 1 generator --path column --int-cols 2 --str-cols 5 --key-cols 4 --len 200 --rows 100000' has failed with code 1. E Errors: E ...ror: Grpc error response on endpoint ghrun-4pjfmvffsq.auto.internal:62064 E E Bulk upset to /Root/column failed, TRANSPORT_UNAVAILABLE,
: Error: GRpc error: (14): recvmsg:Connection reset by peer E
: Error: Grpc error response on endpoint ghrun-4pjfmvffsq.auto.internal:62064 E E Bulk upset to /Root/column failed, TRANSPORT_UNAVAILABLE,
: Error: GRpc error: (14): recvmsg:Connection reset by peer E
: Error: Grpc error response on endpoint ghrun-4pjfmvffsq.auto.internal:62064 E E Bulk upset to /Root/column failed, TRANSPORT_UNAVAILABLE,
: Error: GRpc error: (14): recvmsg:Connection reset by peer E
: Error: Grpc error response on endpoint ghrun-4pjfmvffsq.auto.internal:62064 E E Bulk upset to /Root/column failed, TRANSPORT_UNAVAILABLE,
: Error: GRpc error: (14): recvmsg:Connection reset by peer E
: Error: Grpc error response on endpoint ghrun-4pjfmvffsq.auto.internal:62064 E E Bulk upset to /Root/column failed, TRANSPORT_UNAVAILABLE,
: Error: GRpc error: (14): recvmsg:Connection reset by peer E
: Error: Grpc error response on endpoint ghrun-4pjfmvffsq.auto.internal:62064 E E Bulk upset to /Root/column failed, TRANSPORT_UNAVAILABLE,
: Error: GRpc error: (14): Broken pipe E
: Error: Grpc error response on endpoint ghrun-4pjfmvffsq.auto.internal:62064 E E Bulk upset to /Root/column failed, TRANSPORT_UNAVAILABLE,
: Error: GRpc error: (14): Broke ..[snippet truncated].. NYql::NNodes::TExprBase, NYql::TExprContext&))::'lambda'(NYql::NNodes::TExprBase, NYql::TExprContext&, NYql::IOptimizationContext&, std::__y1::function, std::__y1::equal_to, std::__y1::allocator>, std::__y1::hash, std::__y1::equal_to, std::__y1::allocator, std::__y1::equal_to, std::__y1::allocator>>>> const* ()> const&)::operator()(NYql::NNodes::TExprBase, NYql::TExprContext&, NYql::IOptimizationContext&, std::__y1::function, std::__y1::equal_to, std::__y1::allocator>, std::__y1::hash, std::__y1::equal_to, std::__y1::allocator, std::__y1::equal_to, std::__y1::allocator>>>> const* ()> const&) const /-S/yql/essentials/providers/common/transform/yql_optimize.h:86:20 E #15 0x4e495ab7 in decltype(std::declval()(std::declval(), std::declval(), std::declval(), std::declval, std::__y1::equal_to, std::__y1::allocator>, std::__y1::hash, std::__y1::equal_to, std::__y1::allocator, std::__y1::equal_to, std::__y1::allocator>>>> const* ()> const&>())) std::__y1::__invoke[abi:fe200000] (NYql... Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/stress/log/tests/test-results/py3test/testing_out_stuff/test_workload.py.TestYdbLogWorkload.test.column.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/stress/log/tests/test-results/py3test/testing_out_stuff ------ FAIL: 1 - GOOD, 1 - FAIL ydb/tests/stress/log/tests ydb/tests/stress/olap_workload/tests [size:medium] ------ sole chunk ran 1 test (total:89.60s - setup:0.02s test:89.45s) [fail] test_workload.py::TestYdbWorkload::test [default-linux-x86_64-release-asan] (79.44s) ydb/tests/stress/olap_workload/tests/test_workload.py:19: in test yatest.common.execute([ library/python/testing/yatest_common/yatest/common/process.py:656: in execute res.wait(check_exit_code, timeout, on_timeout) library/python/testing/yatest_common/yatest/common/process.py:411: in wait self._finalise(check_exit_code) library/python/testing/yatest_common/yatest/common/process.py:422: in _finalise raise ExecutionError(self) E yatest.common.process.ExecutionError: Command '/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/stress/olap_workload/olap_workload --endpoint grpc://localhost:11162 --database /Root --duration 120' has failed with code 1. E Errors: E ...^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync E for next_opt in opt_generator: E ^^^^^^^^^^^^^ E File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl E result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) E ^^^^^^^^^^^^^^^^^^^^^^^ E File "contrib/python/ydb/py3/ydb/query/pool.py", line 200, in wrapped_callee E with self.checkout(timeout=retry_settings.max_session_acquire_timeout) as session: E ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E File "contrib/python/ydb/py3/ydb/query/pool.py", line 237, in __enter__ E self._session = self._pool.acquire(self._timeout) E ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E File "contrib/python/ydb/py3/ydb/query/pool.py", line 104, in acquire E session = self._create_new_session(time_left) E ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ E File "contrib/python/ydb/py3/ydb/query/pool.py", line 56, in _create_new_session E session.create(settings=BaseRequestSettings().with_timeout(timeout)) E File "contrib/python/ydb/py3/ydb/query/session.py", line 299, in create E self._attach() E File "contrib/python/ydb/py3/ydb/query/session.py", line 253, in _attach E raise e E File "contrib/python/ydb/py3/ydb/query/session.py", line 249, in _attach E raise RuntimeError("Failed to attach session") E RuntimeError: Failed to attach session Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/stress/olap_workload/tests/test-results/py3test/testing_out_stuff/test_workload.py.TestYdbWorkload.test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/stress/olap_workload/tests/test-results/py3test/testing_out_stuff ------ FAIL: 1 - FAIL ydb/tests/stress/olap_workload/tests ydb/tests/stress/reconfig_state_storage_workload/tests [size:medium] ------ sole chunk ran 3 tests (total:101.71s - setup:0.01s test:101.34s) Test failed with 1 exit code. See logs for more info Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/stress/reconfig_state_storage_workload/tests/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/stress/reconfig_state_storage_workload/tests/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/stress/reconfig_state_storage_workload/tests/test-results/py3test/testing_out_stuff/stderr [crashed] test_board_workload.py::TestReconfigStateStorageBoardWorkload::test_state_storage_board [default-linux-x86_64-release-asan] (0.00s) Test crashed Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/stress/reconfig_state_storage_workload/tests/test-results/py3test/testing_out_stuff/test_board_workload.py.TestReconfigStateStorageBoardWorkload.test_state_storage_board.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/stress/reconfig_state_storage_workload/tests/test-results/py3test/testing_out_stuff ------ FAIL: 2 - NOT_LAUNCHED, 1 - CRASHED ydb/tests/stress/reconfig_state_storage_workload/tests ------ [test_disk.py 0/10] chunk ran 1 test (total:42.62s - setup:0.02s test:42.50s) Info: Test run has exceeded 8.0G (8388608K) memory limit with 13.5G (14145020K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 503898 45.3M 45.2M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 504031 32.7M 20.7M 8.5M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 504055 740M 743M 660M └─ ydb-tests-tools-nemesis-ut --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doc 505299 1.4G 1.4G 1007M ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 505300 1.4G 1.4G 998M ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 505301 1.4G 1.4G 1005M ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 505302 1.4G 1.4G 996M ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 505303 1.4G 1.4G 1003M ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 505306 1.4G 1.4G 1009M ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 505308 1.4G 1.4G 1003M ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 505309 1.4G 1.4G 1011M ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 505310 1.4G 1.4G 1000M └─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/stderr ------ [test_tablet.py 0/10] chunk ran 1 test (total:118.61s - setup:0.01s test:118.34s) Info: Test run has exceeded 8.0G (8388608K) memory limit with 14.2G (14840764K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 507239 45.3M 44.9M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 507385 32.5M 20.5M 8.2M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 507406 722M 722M 642M └─ ydb-tests-tools-nemesis-ut --basetemp /home/runner/actions_runner/_work/ydb/ydb/tmp/out/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doc 508201 1.7G 1.7G 1.3G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 508207 1.6G 1.6G 1.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 508208 1.7G 1.7G 1.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 508211 1.6G 1.6G 1.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 508212 1.6G 1.7G 1.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 508213 1.7G 1.7G 1.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 508214 1.7G 1.7G 1.3G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ 508215 1.7G 1.7G 1.2G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/ Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/tools/nemesis/ut/test-results/py3test/testing_out_stuff/stderr ydb/core/blobstorage/pdisk/ut [size:medium] nchunks:10 ------ [3/10] chunk ran 21 tests (total:75.52s - setup:0.03s test:75.36s) [fail] TPDiskTest::PDiskSlotSizeInUnits [default-linux-x86_64-release-asan] (12.64s) (TWithBackTrace) Event queue is still empty.ydb/library/actors/testlib/test_runtime.cpp:1375: TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 TWithBackTrace::TWithBackTrace<>() at /-S/util/generic/yexception.h:146:5 NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant) at /-S/ydb/library/actors/testlib/test_runtime.cpp:0:24 NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration) at /-S/ydb/library/actors/testlib/test_runtime.cpp:1554:0 __root at /-S/contrib/libs/cxxsupp/libcxx/include/__tree:969:54 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 NKikimr::NTestSuiteTPDiskTest::AwaitAndCheckEvPDiskStateUpdate(NKikimr::TActorTestContext&, unsigned int, unsigned int) at /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp:1074:13 NKikimr::NTestSuiteTPDiskTest::TTestCasePDiskSlotSizeInUnits::Execute_(NUnitTest::TTestContext&) at /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp:0:9 operator() at /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/pdisk/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/pdisk/ut/test-results/unittest/testing_out_stuff/TPDiskTest.PDiskSlotSizeInUnits.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/pdisk/ut/test-results/unittest/testing_out_stuff/TPDiskTest.PDiskSlotSizeInUnits.out ------ [4/10] chunk ran 21 tests (total:267.41s - test:267.31s) [fail] TPDiskTest::TestStartEncryptedOrPlainAndRestart [default-linux-x86_64-release-asan] (151.16s) (TWithBackTrace) Event queue is still empty.ydb/library/actors/testlib/test_runtime.cpp:1375: TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 TWithBackTrace::TWithBackTrace<>() at /-S/util/generic/yexception.h:146:5 NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant) at /-S/ydb/library/actors/testlib/test_runtime.cpp:0:24 NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration) at /-S/ydb/library/actors/testlib/test_runtime.cpp:1554:0 __root at /-S/contrib/libs/cxxsupp/libcxx/include/__tree:969:54 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 Get at /-S/util/generic/ptr.h:337:16 operator() at /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/pdisk/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/pdisk/ut/test-results/unittest/testing_out_stuff/TPDiskTest.TestStartEncryptedOrPlainAndRestart.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/pdisk/ut/test-results/unittest/testing_out_stuff/TPDiskTest.TestStartEncryptedOrPlainAndRestart.out ------ FAIL: 209 - GOOD, 2 - FAIL ydb/core/blobstorage/pdisk/ut ------ [1/10] chunk ran 1 test (total:298.59s - setup:0.05s test:298.29s) Info: Test run has exceeded 8.0G (8388608K) memory limit with 9.5G (9999108K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 90676 45.3M 45.3M 6.2M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 90718 34.7M 22.6M 10.5M └─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 90725 47.4M 47.4M 23.9M └─ test_tool run_ut @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/testing 91021 9.5G 9.4G 9.4G └─ ydb-core-blobstorage-ut_blobstorage-ut_balancing --trace-path-append /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/testing_out_stuff/stderr ydb/core/kafka_proxy/ut [size:medium] ------ sole chunk ran 105 tests (total:605.20s - test:600.03s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: KafkaProtocol::NativeKafkaBalanceScenario (good) duration: 114.62s KafkaProtocol::CommitTransactionScenario (good) duration: 41.19s KafkaProtocol::AbortTransactionScenario (good) duration: 38.51s KafkaProtocol::ProducerFencedInTransactionScenario (timeout) duration: 35.46s KafkaProtocol::BalanceScenario (good) duration: 34.66s KafkaProtocol::BalanceScenarioForFederation (good) duration: 32.73s KafkaProtocol::InitProducerId_forPreviouslySeenTransactionalIdShouldReturnNewProducerIdIfEpochOverflown (good) duration: 21.16s KafkaProtocol::TransactionShouldBeAbortedIfPartitionIsAddedToTransactionButNoWritesToItWereReceived (good) duration: 21.01s KafkaProtocol::InitProducerId_forPreviouslySeenTransactionalIdShouldReturnSameProducerIdAndIncrementEpoch (good) duration: 20.07s KafkaProtocol::InitProducerId_forSqlInjectionShouldReturnWithoutDropingDatabase (good) duration: 18.95s 23 more tests with 216.51s total duration are not listed. 72 tests were not launched inside chunk. Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kafka_proxy/ut/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff/stderr [timeout] KafkaProtocol::ProducerFencedInTransactionScenario [default-linux-x86_64-release-asan] (35.46s) Killed by timeout (600 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff/KafkaProtocol.ProducerFencedInTransactionScenario.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff/KafkaProtocol.ProducerFencedInTransactionScenario.out ------ TIMEOUT: 32 - GOOD, 72 - NOT_LAUNCHED, 1 - TIMEOUT ydb/core/kafka_proxy/ut ydb/core/keyvalue/ut_trace [size:medium] nchunks:5 ------ [0/5] chunk ran 1 test (total:11.72s - test:11.68s) [fail] TKeyValueTracingTest::ReadHuge [default-linux-x86_64-release-asan] (2.30s) equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/keyvalue/keyvalue_ut_trace.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff/TKeyValueTracingTest.ReadHuge.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff/TKeyValueTracingTest.ReadHuge.out ------ [1/5] chunk ran 1 test (total:14.73s - test:14.70s) [fail] TKeyValueTracingTest::ReadSmall [default-linux-x86_64-release-asan] (2.41s) equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/keyvalue/keyvalue_ut_trace.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff/TKeyValueTracingTest.ReadSmall.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff/TKeyValueTracingTest.ReadSmall.out ------ [2/5] chunk ran 1 test (total:12.91s - test:12.88s) [fail] TKeyValueTracingTest::WriteHuge [default-linux-x86_64-release-asan] (2.40s) assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:103, void TestOneWrite(TString, TVector &&): (env.WilsonUploader->Traces.size() == 1) failed: (2 != 1) TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 TestOneWrite(TBasicString>, TVector>, std::__y1::allocator>>>&&) at /-S/ydb/core/keyvalue/keyvalue_ut_trace.cpp:103:5 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/keyvalue/keyvalue_ut_trace.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff/TKeyValueTracingTest.WriteHuge.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff/TKeyValueTracingTest.WriteHuge.out ------ [3/5] chunk ran 1 test (total:14.23s - test:14.20s) [fail] TKeyValueTracingTest::WriteSmall [default-linux-x86_64-release-asan] (2.32s) assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:103, void TestOneWrite(TString, TVector &&): (env.WilsonUploader->Traces.size() == 1) failed: (2 != 1) TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 TestOneWrite(TBasicString>, TVector>, std::__y1::allocator>>>&&) at /-S/ydb/core/keyvalue/keyvalue_ut_trace.cpp:103:5 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/keyvalue/keyvalue_ut_trace.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff/TKeyValueTracingTest.WriteSmall.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/keyvalue/ut_trace/test-results/unittest/testing_out_stuff/TKeyValueTracingTest.WriteSmall.out ------ FAIL: 4 - FAIL ydb/core/keyvalue/ut_trace ydb/core/kqp/ut/federated_query/generic_ut nchunks:10 ------ [0/10] chunk ran 4 tests (total:79.69s - test:60.08s) Chunk exceeded 60s timeout and was killed List of the tests involved in the launch: GenericFederatedQuery::ClickHouseFilterPushdown (timeout) duration: 36.04s GenericFederatedQuery::ClickHouseManagedSelectAll (good) duration: 14.80s GenericFederatedQuery::ClickHouseSelectCount (good) duration: 14.08s GenericFederatedQuery::ClickHouseManagedSelectConstant (good) duration: 12.86s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/stderr [timeout] GenericFederatedQuery::ClickHouseFilterPushdown [default-linux-x86_64-release-asan] (36.04s) Killed by timeout (60 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/GenericFederatedQuery.ClickHouseFilterPushdown.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/GenericFederatedQuery.ClickHouseFilterPushdown.out ------ TIMEOUT: 38 - GOOD, 1 - TIMEOUT ydb/core/kqp/ut/federated_query/generic_ut ydb/core/kqp/ut/indexes [size:medium] nchunks:50 ------ [23/50] chunk ran 3 tests (total:54.71s - test:54.67s) [fail] KqpMultishardIndex::DataColumnWrite+UseSink [default-linux-x86_64-release-asan] (30.70s) assertion failed at ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:2058, virtual void NKikimr::NKqp::NTestSuiteKqpMultishardIndex::TTestCaseDataColumnWrite::Execute_(NUnitTest::TTestContext &) [UseSink = true]: (result.IsSuccess())
: Error: Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037928 is overloaded. Table `/Root/MultiShardIndexedWithDataColumn/index/indexImplTable`., code: 2006
: Error: Rejecting data TxId 281474976715729 because datashard 72075186224037928: is in process of split opId 281474976710657 state SplitSrcMakeSnapshot (wrong shard state), code: 2006 0. /-S/util/system/backtrace.cpp:284: ?? @ 0x1A50589B 1. /tmp//-S/library/cpp/testing/unittest/registar.cpp:46: RaiseError @ 0x1A9CD2EF 2. /tmp//-S/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:2058: Execute_ @ 0x1A0352DC 3. /tmp//-S/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:1354: operator() @ 0x1A01A8D7 4. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149: __invoke<(lambda at /-S/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:1354:1) &> @ 0x1A01A8D7 5. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224: __call<(lambda at /-S/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:1354:1) &> @ 0x1A01A8D7 6. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169: operator() @ 0x1A01A8D7 7. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314: operator() @ 0x1A01A8D7 8. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431: operator() @ 0x1AA044D5 9. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990: operator() @ 0x1AA044D5 10. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:525: Run @ 0x1AA044D5 11. /tmp//-S/library/cpp/testing/unittest/registar.cpp:373: Run @ 0x1A9D3E78 12. /tmp//-S/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:1354: Execute @ 0x1A019AA3 13. /tmp//-S/library/cpp/testing/unittest/registar.cpp:494: Execute @ 0x1A9D5745 14. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:872: RunMain @ 0x1A9FEA4C 15. ??:0: ?? @ 0x7FE59C344D8F 16. ??:0: ?? @ 0x7FE59C344E3F 17. ??:0: ?? @ 0x17021028 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/indexes/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/indexes/test-results/unittest/testing_out_stuff/KqpMultishardIndex.DataColumnWrite.UseSink.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/indexes/test-results/unittest/testing_out_stuff/KqpMultishardIndex.DataColumnWrite.UseSink.out ------ FAIL: 135 - GOOD, 1 - FAIL ydb/core/kqp/ut/indexes ydb/core/kqp/ut/join [size:medium] nchunks:200 ------ [172/200] chunk ran 1 test (total:604.03s - test:600.03s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: KqpJoinOrder::TPCDSEveryQueryWorks+ColumnStore (timeout) duration: 601.47s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/stderr [timeout] KqpJoinOrder::TPCDSEveryQueryWorks+ColumnStore [default-linux-x86_64-release-asan] (601.47s) Killed by timeout (600 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/KqpJoinOrder.TPCDSEveryQueryWorks.ColumnStore.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/KqpJoinOrder.TPCDSEveryQueryWorks.ColumnStore.out ------ [173/200] chunk ran 1 test (total:603.37s - test:600.06s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: KqpJoinOrder::TPCDSEveryQueryWorks-ColumnStore (timeout) duration: 600.99s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/stderr [timeout] KqpJoinOrder::TPCDSEveryQueryWorks-ColumnStore [default-linux-x86_64-release-asan] (600.99s) Killed by timeout (600 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/KqpJoinOrder.TPCDSEveryQueryWorks-ColumnStore.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/KqpJoinOrder.TPCDSEveryQueryWorks-ColumnStore.out ------ [93/200] chunk ran 1 test (total:484.18s - test:484.09s) [fail] KqpJoinOrder::Chain65Nodes [default-linux-x86_64-release-asan] (476.65s) assertion failed at ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:387, void NKikimr::NKqp::TChainTester::JoinTables(): (result.GetStatus() == EStatus::SUCCESS) failed: (TIMEOUT != SUCCESS) , with diff: (TIM|SUCC)E(OUT|SS) 0. /-S/util/system/backtrace.cpp:284: ?? @ 0x19F5618B 1. /tmp//-S/library/cpp/testing/unittest/registar.cpp:46: RaiseError @ 0x1A427E3F 2. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:387: JoinTables @ 0x19B51F4B 3. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:333: Test @ 0x19B05D50 4. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:554: Execute_ @ 0x19B05D50 5. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552: operator() @ 0x19B4E597 6. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149: __invoke<(lambda at /-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552:1) &> @ 0x19B4E597 7. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224: __call<(lambda at /-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552:1) &> @ 0x19B4E597 8. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169: operator() @ 0x19B4E597 9. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314: operator() @ 0x19B4E597 10. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431: operator() @ 0x1A45F025 11. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990: operator() @ 0x1A45F025 12. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:525: Run @ 0x1A45F025 13. /tmp//-S/library/cpp/testing/unittest/registar.cpp:373: Run @ 0x1A42E9C8 14. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552: Execute @ 0x19B4D763 15. /tmp//-S/library/cpp/testing/unittest/registar.cpp:494: Execute @ 0x1A430295 16. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:872: RunMain @ 0x1A45959C 17. ??:0: ?? @ 0x7F47DF3C6D8F 18. ??:0: ?? @ 0x7F47DF3C6E3F 19. ??:0: ?? @ 0x16F73028 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/KqpJoinOrder.Chain65Nodes.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/KqpJoinOrder.Chain65Nodes.out ------ TIMEOUT: 260 - GOOD, 1 - FAIL, 2 - TIMEOUT ydb/core/kqp/ut/join ydb/core/kqp/ut/pg [size:medium] nchunks:10 ------ [8/10] chunk ran 11 tests (total:403.47s - setup:0.01s test:403.28s) [fail] KqpPg::TempTablesSessionsIsolation [default-linux-x86_64-release-asan] (7.91s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/pg/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/pg/test-results/unittest/testing_out_stuff/KqpPg.TempTablesSessionsIsolation.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/pg/test-results/unittest/testing_out_stuff/KqpPg.TempTablesSessionsIsolation.out ------ [9/10] chunk ran 11 tests (total:360.48s - test:360.41s) [fail] PgCatalog::CheckSetConfig [default-linux-x86_64-release-asan] (17.00s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/pg/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/pg/test-results/unittest/testing_out_stuff/PgCatalog.CheckSetConfig.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/pg/test-results/unittest/testing_out_stuff/PgCatalog.CheckSetConfig.out ------ FAIL: 112 - GOOD, 2 - FAIL ydb/core/kqp/ut/pg ydb/core/kqp/ut/query [size:medium] nchunks:50 ------ [0/50] chunk ran 5 tests (total:183.95s - test:183.86s) [fail] KqpAnalyze::AnalyzeTable+ColumnStore [default-linux-x86_64-release-asan] (129.36s) assertion failed at ydb/core/kqp/ut/query/kqp_analyze_ut.cpp:103, virtual void NKikimr::NKqp::NTestSuiteKqpAnalyze::TTestCaseAnalyzeTable::Execute_(NUnitTest::TTestContext &) [ColumnStore = true]: (stat >= 1500) 0 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 NKikimr::NKqp::NTestSuiteKqpAnalyze::TTestCaseAnalyzeTable::Execute_(NUnitTest::TTestContext&) at /-S/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp:0:5 operator() at /-S/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/query/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/query/test-results/unittest/testing_out_stuff/KqpAnalyze.AnalyzeTable.ColumnStore.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/query/test-results/unittest/testing_out_stuff/KqpAnalyze.AnalyzeTable.ColumnStore.out ------ [47/50] chunk ran 4 tests (total:178.69s - test:178.64s) [fail] KqpStats::SysViewClientLost [default-linux-x86_64-release-asan] (96.16s) assertion failed at ydb/core/kqp/ut/query/kqp_stats_ut.cpp:591, virtual void NKikimr::NKqp::NTestSuiteKqpStats::TTestCaseSysViewClientLost::Execute_(NUnitTest::TTestContext &): (timeoutedCount == 1) 0. /-S/util/system/backtrace.cpp:284: ?? @ 0x1A421FDB 1. /tmp//-S/library/cpp/testing/unittest/registar.cpp:46: RaiseError @ 0x1A8E902F 2. /tmp//-S/ydb/core/kqp/ut/query/kqp_stats_ut.cpp:591: Execute_ @ 0x19FBABD8 3. /tmp//-S/ydb/core/kqp/ut/query/kqp_stats_ut.cpp:18: operator() @ 0x19FCDD57 4. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149: __invoke<(lambda at /-S/ydb/core/kqp/ut/query/kqp_stats_ut.cpp:18:1) &> @ 0x19FCDD57 5. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224: __call<(lambda at /-S/ydb/core/kqp/ut/query/kqp_stats_ut.cpp:18:1) &> @ 0x19FCDD57 6. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169: operator() @ 0x19FCDD57 7. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314: operator() @ 0x19FCDD57 8. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431: operator() @ 0x1A920215 9. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990: operator() @ 0x1A920215 10. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:525: Run @ 0x1A920215 11. /tmp//-S/library/cpp/testing/unittest/registar.cpp:373: Run @ 0x1A8EFBB8 12. /tmp//-S/ydb/core/kqp/ut/query/kqp_stats_ut.cpp:18: Execute @ 0x19FCCEDB 13. /tmp//-S/library/cpp/testing/unittest/registar.cpp:494: Execute @ 0x1A8F1485 14. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:872: RunMain @ 0x1A91A78C 15. ??:0: ?? @ 0x7F1829D8AD8F 16. ??:0: ?? @ 0x7F1829D8AE3F 17. ??:0: ?? @ 0x17055028 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/query/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/query/test-results/unittest/testing_out_stuff/KqpStats.SysViewClientLost.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/query/test-results/unittest/testing_out_stuff/KqpStats.SysViewClientLost.out ------ FAIL: 203 - GOOD, 2 - FAIL ydb/core/kqp/ut/query ydb/core/kqp/ut/runtime [size:medium] nchunks:10 ------ [1/10] chunk ran 2 tests (total:621.79s - test:600.05s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: KqpScanLogs::GraceJoin+EnabledLogs (good) duration: 388.08s KqpScanLogs::GraceJoin-EnabledLogs (timeout) duration: 219.01s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/runtime/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/stderr [timeout] KqpScanLogs::GraceJoin-EnabledLogs [default-linux-x86_64-release-asan] (219.01s) Killed by timeout (600 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/KqpScanLogs.GraceJoin-EnabledLogs.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/KqpScanLogs.GraceJoin-EnabledLogs.out ------ TIMEOUT: 11 - GOOD, 1 - TIMEOUT ydb/core/kqp/ut/runtime ydb/core/kqp/ut/view [size:medium] ------ sole chunk ran 23 tests (total:282.44s - test:282.38s) [fail] TCreateAndDropViewTest::DropNonexistingView [default-linux-x86_64-release-asan] (13.99s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/view/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/view/test-results/unittest/testing_out_stuff/TCreateAndDropViewTest.DropNonexistingView.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/view/test-results/unittest/testing_out_stuff/TCreateAndDropViewTest.DropNonexistingView.out [fail] TCreateAndDropViewTest::CallDropViewOnTable [default-linux-x86_64-release-asan] (10.62s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/view/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/view/test-results/unittest/testing_out_stuff/TCreateAndDropViewTest.CallDropViewOnTable.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/view/test-results/unittest/testing_out_stuff/TCreateAndDropViewTest.CallDropViewOnTable.out [fail] TCreateAndDropViewTest::DropViewIfExists [default-linux-x86_64-release-asan] (12.11s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/view/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/view/test-results/unittest/testing_out_stuff/TCreateAndDropViewTest.DropViewIfExists.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/kqp/ut/view/test-results/unittest/testing_out_stuff/TCreateAndDropViewTest.DropViewIfExists.out ------ FAIL: 20 - GOOD, 3 - FAIL ydb/core/kqp/ut/view ydb/core/mind/hive/ut [size:medium] nchunks:10 ------ [2/10] chunk ran 15 tests (total:608.52s - test:600.09s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: THiveTest::TestCreateSubHiveCreateManyTablets (timeout) duration: 351.92s THiveTest::TestCheckSubHiveMigrationManyTablets (good) duration: 181.15s THiveTest::TestCreate100Tablets (good) duration: 48.78s THiveTest::TestCreateSubHiveCreateTablet (good) duration: 5.68s THiveTest::TestCreateTablet (good) duration: 4.18s 10 tests were not launched inside chunk. Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/mind/hive/ut/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/mind/hive/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/mind/hive/ut/test-results/unittest/testing_out_stuff/stderr [timeout] THiveTest::TestCreateSubHiveCreateManyTablets [default-linux-x86_64-release-asan] (351.92s) Killed by timeout (600 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/mind/hive/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/mind/hive/ut/test-results/unittest/testing_out_stuff/THiveTest.TestCreateSubHiveCreateManyTablets.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/mind/hive/ut/test-results/unittest/testing_out_stuff/THiveTest.TestCreateSubHiveCreateManyTablets.out ------ TIMEOUT: 138 - GOOD, 10 - NOT_LAUNCHED, 1 - TIMEOUT ydb/core/mind/hive/ut ydb/core/statistics/aggregator/ut [size:medium] nchunks:60 ------ [4/60] chunk ran 1 test (total:597.06s - test:597.02s) [fail] AnalyzeColumnshard::AnalyzeRebootColumnShard [default-linux-x86_64-release-asan] (580.91s) (TWithBackTrace) ydb/library/actors/testlib/test_runtime.h:579: Exception occured while waiting for NKikimr::NStat::TEvStatistics::TEvAnalyzeResponse: (NActors::TSchedulingLimitReachedException) TestActorRuntime Processed over 100000 events.ydb/library/actors/testlib/test_runtime.cpp:716: TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 TWithBackTrace::TWithBackTrace<>() at /-S/util/generic/yexception.h:146:5 NKikimr::NStat::TEvStatistics::TEvAnalyzeResponse::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventRethrow(NActors::TActorId const&, TDuration) at /-S/ydb/library/actors/testlib/test_runtime.h:0:24 DoDestroy at /-S/util/generic/ptr.h:253:13 operator() at /-S/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/statistics/aggregator/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/statistics/aggregator/ut/test-results/unittest/testing_out_stuff/AnalyzeColumnshard.AnalyzeRebootColumnShard.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/statistics/aggregator/ut/test-results/unittest/testing_out_stuff/AnalyzeColumnshard.AnalyzeRebootColumnShard.out ------ FAIL: 35 - GOOD, 1 - FAIL ydb/core/statistics/aggregator/ut ydb/core/sys_view/ut [size:medium] nchunks:10 ------ [0/10] chunk ran 9 tests (total:597.31s - test:597.16s) [fail] ShowCreateView::WithPairedTablePathPrefix [default-linux-x86_64-release-asan] (11.69s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/ShowCreateView.WithPairedTablePathPrefix.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/ShowCreateView.WithPairedTablePathPrefix.out [fail] ShowCreateView::FromTable [default-linux-x86_64-release-asan] (11.28s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/ShowCreateView.FromTable.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/ShowCreateView.FromTable.out ------ [6/10] chunk ran 9 tests (total:344.18s - test:343.11s) [fail] SystemView::ShowCreateTableColumnAlterObject [default-linux-x86_64-release-asan] (9.99s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableColumnAlterObject.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableColumnAlterObject.out [fail] SystemView::ShowCreateTableChangefeeds [default-linux-x86_64-release-asan] (35.39s) assertion failed at ydb/core/sys_view/ut_kqp.cpp:311, void NKikimr::NSysView::(anonymous namespace)::TShowCreateChecker::CheckShowCreateTable(const std::string &, const std::string &, TString, bool, bool): (UnescapeC(formatQuery) == UnescapeC(showCreateTableQuery)) failed: ("CREATE TABLE `test_show_create` (\n `Key` Uint64,\n `Value` String,\n PRIMARY KEY (`Key`)\n);\n\nALTER TABLE `test_show_create`\n ADD CHANGEFEED `feed_1` WITH (MODE = 'OLD_IMAGE', FORMAT = 'DEBEZIUM_JSON', RETENTION_PERIOD = INTERVAL('PT1H'), TOPIC_MIN_ACTIVE_PARTITIONS = 1)\n;\n\nALTER TABLE `test_show_create`\n ADD CHANGEFEED `feed_2` WITH (MODE = 'NEW_IMAGE', FORMAT = 'JSON', VIRTUAL_TIMESTAMPS = TRUE, RETENTION_PERIOD = INTERVAL('PT3H'), TOPIC_MIN_ACTIVE_PARTITIONS = 10)\n;\n\nALTER TABLE `test_show_create`\n ADD CHANGEFEED `feed_3` WITH (MODE = 'KEYS_ONLY', FORMAT = 'JSON', RETENTION_PERIOD = INTERVAL('PT30M'), TOPIC_MIN_ACTIVE_PARTITIONS = 3, INITIAL_SCAN = TRUE)\n;\n" != "CREATE TABLE `test_show_create` (\n `Key` Uint64,\n `Value` String,\n PRIMARY KEY (`Key`)\n);\n\nALTER TABLE `test_show_create`\n ADD CHANGEFEED `feed_1` WITH (MODE = 'OLD_IMAGE', FORMAT = 'DEBEZIUM_JSON', RETENTION_PERIOD = INTERVAL('PT1H'), TOPIC_MIN_ACTIVE_PARTITIONS = 1)\n;\n\nALTER TABLE `test_show_create`\n ADD CHANGEFEED `feed_2` WITH (MODE = 'NEW_IMAGE', FORMAT = 'JSON', VIRTUAL_TIMESTAMPS = TRUE, RETENTION_PERIOD = INTERVAL('PT3H'), TOPIC_MIN_ACTIVE_PARTITIONS = 10)\n;\n\nALTER TABLE `test_show_create`\n ADD CHANGEFEED `feed_3` WITH (MODE = 'KEYS_ONLY', FORMAT = 'JSON', RETENTION_PERIOD = INTERVAL('PT30M'), TOPIC_MIN_ACTIVE_PARTITIONS = 3)\n;\n") CREATE TABLE `test_show_create` ( `Key` Uint64, `Value` String, PRIMARY KEY (`Key`) ); ALTER TABLE `test_show_create` ADD CHANGEFEED `feed_1` WITH (MODE = 'OLD_IMAGE', FORMAT = 'DEBEZIUM_JSON', RETENTION_PERIOD = INTERVAL('PT1H'), TOPIC_MIN_ACTIVE_PARTITIONS = 1) ; ALTER TABLE `te ..[snippet truncated].. \n `Value` String,\n PRIMARY KEY (`Key`)\n);\n\nALTER TABLE `test_show_create`\n ADD CHANGEFEED `feed_1` WITH (MODE = 'OLD_IMAGE', FORMAT = 'DEBEZIUM_JSON', RETENTION_PERIOD = INTERVAL('PT1H'), TOPIC_MIN_ACTIVE_PARTITIONS = 1)\n;\n\nALTER TABLE `test_show_create`\n ADD CHANGEFEED `feed_2` WITH (MODE = 'NEW_IMAGE', FORMAT = 'JSON', VIRTUAL_TIMESTAMPS = TRUE, RETENTION_PERIOD = INTERVAL('PT3H'), TOPIC_MIN_ACTIVE_PARTITIONS = 10)\n;\n\nALTER TABLE `test_show_create`\n ADD CHANGEFEED `feed_3` WITH (MODE = 'KEYS_ONLY', FORMAT = 'JSON', RETENTION_PERIOD = INTERVAL('PT30M'), TOPIC_MIN_ACTIVE_PARTITIONS = 3(, INITIAL_SCAN = TRUE|))\n;\n" TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 NKikimr::NSysView::(anonymous namespace)::TShowCreateChecker::CheckShowCreateTable(std::__y1::basic_string, std::__y1::allocator> const&, std::__y1::basic_string, std::__y1::allocator> const&, TBasicString>, bool, bool) at /-S/ydb/core/sys_view/ut_kqp.cpp:311:13 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/sys_view/ut_kqp.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableChangefeeds.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableChangefeeds.out ------ [7/10] chunk ran 9 tests (total:221.91s - test:216.38s) [fail] SystemView::ShowCreateTableReadReplicas [default-linux-x86_64-release-asan] (12.23s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableReadReplicas.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableReadReplicas.out [fail] SystemView::ShowCreateTableTtlSettings [default-linux-x86_64-release-asan] (10.80s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableTtlSettings.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableTtlSettings.out [fail] SystemView::ShowCreateTablePartitionPolicyIndexTable [default-linux-x86_64-release-asan] (11.73s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTablePartitionPolicyIndexTable.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTablePartitionPolicyIndexTable.out [fail] SystemView::ShowCreateTableSequences [default-linux-x86_64-release-asan] (12.99s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableSequences.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableSequences.out [fail] SystemView::ShowCreateTableTemporary [default-linux-x86_64-release-asan] (14.39s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableTemporary.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/sys_view/ut/test-results/unittest/testing_out_stuff/SystemView.ShowCreateTableTemporary.out ------ FAIL: 81 - GOOD, 9 - FAIL ydb/core/sys_view/ut ydb/core/tx/columnshard/ut_schema [size:medium] nchunks:60 ------ [0/60] chunk ran 1 test (total:7.45s - test:7.41s) [crashed] MoveTable::EmptyTable [default-linux-x86_64-release-asan] (0.00s) Test crashed (return code: 100) ==1031890==ERROR: AddressSanitizer: heap-use-after-free on address 0x503000a3eb60 at pc 0x00002b088579 bp 0x7ffcfdc5fb10 sp 0x7ffcfdc5fb08 READ of size 8 at 0x503000a3eb60 thread T0 #0 0x2b088578 in pair /-S/contrib/libs/cxxsupp/libcxx/include/__utility/pair.h:149:22 #1 0x2b088578 in new_node /-S/util/generic/hash_table.h:950:47 #2 0x2b088578 in emplace_unique_noresize /-S/util/generic/hash_table.h:1018:17 #3 0x2b088578 in std::__y1::pair<__yhashtable_iterator>, bool> THashTable, NKikimr::NColumnShard::TSchemeShardLocalPathId, THash, TSelect1st, TEqualTo, std::__y1::allocator>::emplace_unique(NKikimr::NColumnShard::TSchemeShardLocalPathId const&, NKikimr::NColumnShard::TInternalPathId const&) /-S/util/generic/hash_table.h:710:16 #4 0x2b083b39 in emplace /-S/util/generic/hash.h:177:20 #5 0x2b083b39 in NKikimr::NColumnShard::TTablesManager::MoveTableProgress(NKikimr::NIceDb::TNiceDb&, NKikimr::NColumnShard::TSchemeShardLocalPathId, NKikimr::NColumnShard::TSchemeShardLocalPathId) /-S/ydb/core/tx/columnshard/tables_manager.cpp:470:5 #6 0x2af55eae in Ru ..[snippet truncated].. ry/cpp/testing/unittest/registar.cpp:494:19 #32 0x10e8502c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #33 0x7fcef666cd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) #34 0x7fcef666ce3f in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29e3f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) #35 0xde42028 in _start (/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema+0xde42028) (BuildId: 14f65051164d988c6f5e9a0a6fdd20f081839ea6) SUMMARY: AddressSanitizer: heap-use-after-free /-S/contrib/libs/cxxsupp/libcxx/include/__utility/pair.h:149:22 in pair Shadow bytes around the buggy address: 0x503000a3e880: 00 00 00 00 fa fa fd fd fd fd fa fa fd fd fd fd 0x503000a3e900: fa fa 00 00 00 00 fa fa 00 00 00 00 fa fa fd fd 0x503000a3e980: fd fd fa fa fd fd fd fd fa fa fd fd fd fd fa fa 0x503000a3ea00: fd fd fd fd fa fa fd fd fd fd fa fa fd fd fd fd 0x503000a3ea80: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd =>0x503000a3eb00: fd fd fa fa fd fd fd fd fa fa fd fd[fd]fa fa fa 0x503000a3eb80: 00 00 00 00 fa fa fd fd fd fd fa fa fd fd fd fd 0x503000a3ec00: fa fa fd fd fd fd fa fa fd fd fd fa fa fa fd fd 0x503000a3ec80: fd fa fa fa fd fd fd fa fa fa fd fd fd fd fa fa 0x503000a3ed00: fd fd fd fd fa fa fd fd fd fd fa fa 00 00 00 00 0x503000a3ed80: fa fa fd fd fd fa fa fa fd fd fd fa fa fa 00 00 Shadow byte legend (one shadow byte represents 8 application bytes): Addressable: 00 Partially addressable: 01 02 03 04 05 06 07 Heap left redzone: fa Freed heap region: fd Stack left redzone: f1 Stack mid redzone: f2 Stack right redzone: f3 Stack after return: f5 Stack use after scope: f8 Global redzone: f9 Global init order: f6 Poisoned by user: f7 Container overflow: fc Array cookie: ac Intra object redzone: bb ASan internal: fe Left alloca redzone: ca Right alloca redzone: cb ==1031890==ABORTING Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/test-results/unittest/testing_out_stuff/MoveTable.EmptyTable.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/test-results/unittest/testing_out_stuff/MoveTable.EmptyTable.out ------ [3/60] chunk ran 1 test (total:7.41s - test:7.37s) [crashed] MoveTable::WithData [default-linux-x86_64-release-asan] (0.00s) Test crashed (return code: 100) ==996001==ERROR: AddressSanitizer: heap-use-after-free on address 0x503000aa51c0 at pc 0x00002b088579 bp 0x7ffc081c5af0 sp 0x7ffc081c5ae8 READ of size 8 at 0x503000aa51c0 thread T0 #0 0x2b088578 in pair /-S/contrib/libs/cxxsupp/libcxx/include/__utility/pair.h:149:22 #1 0x2b088578 in new_node /-S/util/generic/hash_table.h:950:47 #2 0x2b088578 in emplace_unique_noresize /-S/util/generic/hash_table.h:1018:17 #3 0x2b088578 in std::__y1::pair<__yhashtable_iterator>, bool> THashTable, NKikimr::NColumnShard::TSchemeShardLocalPathId, THash, TSelect1st, TEqualTo, std::__y1::allocator>::emplace_unique(NKikimr::NColumnShard::TSchemeShardLocalPathId const&, NKikimr::NColumnShard::TInternalPathId const&) /-S/util/generic/hash_table.h:710:16 #4 0x2b083b39 in emplace /-S/util/generic/hash.h:177:20 #5 0x2b083b39 in NKikimr::NColumnShard::TTablesManager::MoveTableProgress(NKikimr::NIceDb::TNiceDb&, NKikimr::NColumnShard::TSchemeShardLocalPathId, NKikimr::NColumnShard::TSchemeShardLocalPathId) /-S/ydb/core/tx/columnshard/tables_manager.cpp:470:5 #6 0x2af55eae in Run ..[snippet truncated].. ary/cpp/testing/unittest/registar.cpp:494:19 #32 0x10e8502c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #33 0x7f6ed0927d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) #34 0x7f6ed0927e3f in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29e3f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) #35 0xde42028 in _start (/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema+0xde42028) (BuildId: 14f65051164d988c6f5e9a0a6fdd20f081839ea6) SUMMARY: AddressSanitizer: heap-use-after-free /-S/contrib/libs/cxxsupp/libcxx/include/__utility/pair.h:149:22 in pair Shadow bytes around the buggy address: 0x503000aa4f00: fa fa fd fd fd fd fa fa fd fd fd fd fa fa 00 00 0x503000aa4f80: 00 00 fa fa 00 00 00 00 fa fa fd fd fd fd fa fa 0x503000aa5000: fd fd fd fd fa fa fd fd fd fd fa fa fd fd fd fd 0x503000aa5080: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd 0x503000aa5100: fd fd fa fa fd fd fd fd fa fa fd fd fd fd fa fa =>0x503000aa5180: fd fd fd fd fa fa fd fd[fd]fa fa fa 00 00 00 00 0x503000aa5200: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd 0x503000aa5280: fd fd fa fa fd fd fd fa fa fa fd fd fd fa fa fa 0x503000aa5300: fd fd fd fa fa fa fd fd fd fd fa fa fd fd fd fd 0x503000aa5380: fa fa fd fd fd fd fa fa fd fd fd fd fa fa 00 00 0x503000aa5400: 00 00 fa fa fd fd fd fa fa fa fd fd fd fa fa fa Shadow byte legend (one shadow byte represents 8 application bytes): Addressable: 00 Partially addressable: 01 02 03 04 05 06 07 Heap left redzone: fa Freed heap region: fd Stack left redzone: f1 Stack mid redzone: f2 Stack right redzone: f3 Stack after return: f5 Stack use after scope: f8 Global redzone: f9 Global init order: f6 Poisoned by user: f7 Container overflow: fc Array cookie: ac Intra object redzone: bb ASan internal: fe Left alloca redzone: ca Right alloca redzone: cb ==996001==ABORTING Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/test-results/unittest/testing_out_stuff/MoveTable.WithData.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/test-results/unittest/testing_out_stuff/MoveTable.WithData.out ------ [4/60] chunk ran 1 test (total:15.78s - test:15.75s) [crashed] MoveTable::WithUncomittedData [default-linux-x86_64-release-asan] (0.00s) Test crashed (return code: 100) ==974766==ERROR: AddressSanitizer: heap-use-after-free on address 0x503000a52e10 at pc 0x00002b088579 bp 0x7fff5c323990 sp 0x7fff5c323988 READ of size 8 at 0x503000a52e10 thread T0 #0 0x2b088578 in pair /-S/contrib/libs/cxxsupp/libcxx/include/__utility/pair.h:149:22 #1 0x2b088578 in new_node /-S/util/generic/hash_table.h:950:47 #2 0x2b088578 in emplace_unique_noresize /-S/util/generic/hash_table.h:1018:17 #3 0x2b088578 in std::__y1::pair<__yhashtable_iterator>, bool> THashTable, NKikimr::NColumnShard::TSchemeShardLocalPathId, THash, TSelect1st, TEqualTo, std::__y1::allocator>::emplace_unique(NKikimr::NColumnShard::TSchemeShardLocalPathId const&, NKikimr::NColumnShard::TInternalPathId const&) /-S/util/generic/hash_table.h:710:16 #4 0x2b083b39 in emplace /-S/util/generic/hash.h:177:20 #5 0x2b083b39 in NKikimr::NColumnShard::TTablesManager::MoveTableProgress(NKikimr::NIceDb::TNiceDb&, NKikimr::NColumnShard::TSchemeShardLocalPathId, NKikimr::NColumnShard::TSchemeShardLocalPathId) /-S/ydb/core/tx/columnshard/tables_manager.cpp:470:5 #6 0x2af55eae in Run ..[snippet truncated].. ary/cpp/testing/unittest/registar.cpp:494:19 #32 0x10e8502c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #33 0x7fbb591f4d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) #34 0x7fbb591f4e3f in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29e3f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) #35 0xde42028 in _start (/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema+0xde42028) (BuildId: 14f65051164d988c6f5e9a0a6fdd20f081839ea6) SUMMARY: AddressSanitizer: heap-use-after-free /-S/contrib/libs/cxxsupp/libcxx/include/__utility/pair.h:149:22 in pair Shadow bytes around the buggy address: 0x503000a52b80: fa fa fd fd fd fd fa fa 00 00 00 00 fa fa 00 00 0x503000a52c00: 00 00 fa fa fd fd fd fd fa fa fd fd fd fd fa fa 0x503000a52c80: fd fd fd fd fa fa fd fd fd fd fa fa fd fd fd fd 0x503000a52d00: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd 0x503000a52d80: fd fd fa fa fd fd fd fd fa fa fd fd fd fd fa fa =>0x503000a52e00: fd fd[fd]fa fa fa 00 00 00 00 fa fa fd fd fd fd 0x503000a52e80: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd 0x503000a52f00: fd fa fa fa fd fd fd fa fa fa fd fd fd fa fa fa 0x503000a52f80: fd fd fd fd fa fa fd fd fd fd fa fa fd fd fd fd 0x503000a53000: fa fa 00 00 00 00 fa fa fd fd fd fa fa fa fd fd 0x503000a53080: fd fa fa fa 00 00 00 00 fa fa 00 00 00 00 fa fa Shadow byte legend (one shadow byte represents 8 application bytes): Addressable: 00 Partially addressable: 01 02 03 04 05 06 07 Heap left redzone: fa Freed heap region: fd Stack left redzone: f1 Stack mid redzone: f2 Stack right redzone: f3 Stack after return: f5 Stack use after scope: f8 Global redzone: f9 Global init order: f6 Poisoned by user: f7 Container overflow: fc Array cookie: ac Intra object redzone: bb ASan internal: fe Left alloca redzone: ca Right alloca redzone: cb ==974766==ABORTING Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/test-results/unittest/testing_out_stuff/MoveTable.WithUncomittedData.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/columnshard/ut_schema/test-results/unittest/testing_out_stuff/MoveTable.WithUncomittedData.out ------ FAIL: 42 - GOOD, 3 - CRASHED ydb/core/tx/columnshard/ut_schema ydb/core/tx/conveyor_composite/ut [size:medium] nchunks:60 ------ [3/60] chunk ran 1 test (total:103.21s - setup:0.03s test:103.10s) [crashed] CompositeConveyorTests::TestUniformProcessDistribution [default-linux-x86_64-release-asan] (0.00s) Test crashed (return code: -6) See logs for more info Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/conveyor_composite/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/conveyor_composite/ut/test-results/unittest/testing_out_stuff/CompositeConveyorTests.TestUniformProcessDistribution.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/conveyor_composite/ut/test-results/unittest/testing_out_stuff/CompositeConveyorTests.TestUniformProcessDistribution.out ------ FAIL: 4 - GOOD, 1 - CRASHED ydb/core/tx/conveyor_composite/ut ydb/core/tx/datashard/ut_incremental_backup [size:medium] nchunks:4 ------ [0/4] chunk ran 3 tests (total:77.57s - test:77.30s) [fail] IncrementalBackup::ComplexRestoreBackupCollection+WithIncremental [default-linux-x86_64-release-asan] (24.99s) assertion failed at ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:829, virtual void NKikimr::NTestSuiteIncrementalBackup::TTestCaseComplexRestoreBackupCollection::Execute_(NUnitTest::TTestContext &) [WithIncremental = true]: (KqpSimpleExec(runtime, R"( SELECT key, value FROM `/Root/Table` ORDER BY key )") == "{ items { uint32_value: 2 } items { uint32_value: 2000 } }, " "{ items { uint32_value: 3 } items { uint32_value: 30 } }, " "{ items { uint32_value: 4 } items { uint32_value: 40 } }") failed: ("{ items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }, { items { uint32_value: 5 } items { uint32_value: 50 } }" != { items { uint32_value: 2 } items { uint32_value: 2000 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }) , with diff: ("|){ items { uint32_value: (1 } items { uint3|)2(_value:|) (10 |)} (}, { |)items { uint32_value: 2( } items { uint32_value: 2|00)0 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }(, { items { uint32_value: 5 } items { uint32_value: 50 } }"|) TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 NKikimr::NTestSuiteIncrementalBackup::TTestCaseComplexRestoreBackupCollection::Execute_(NUnitTest::TTestContext&) at /-S/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:0:13 operator() at /-S/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff/IncrementalBackup.ComplexRestoreBackupCollection.WithIncremental.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff/IncrementalBackup.ComplexRestoreBackupCollection.WithIncremental.out ------ [1/4] chunk ran 3 tests (total:93.24s - test:92.75s) [fail] IncrementalBackup::E2EBackupCollection [default-linux-x86_64-release-asan] (24.97s) assertion failed at ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:925, virtual void NKikimr::NTestSuiteIncrementalBackup::TTestCaseE2EBackupCollection::Execute_(NUnitTest::TTestContext &): (expected == actual) failed: ("{ items { uint32_value: 2 } items { uint32_value: 200 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }" != "{ items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }") , with diff: "{ items { uint32_value: (|1 } items { uint3)2(|_value:) (|10 )} (|}, { )items { uint32_value: 2(0| } items { uint32_value: 2)0 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }" TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 NKikimr::NTestSuiteIncrementalBackup::TTestCaseE2EBackupCollection::Execute_(NUnitTest::TTestContext&) at /-S/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:0:9 operator() at /-S/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff/IncrementalBackup.E2EBackupCollection.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff/IncrementalBackup.E2EBackupCollection.out [crashed] IncrementalBackup::MultiRestore [default-linux-x86_64-release-asan] (0.00s) Test crashed (return code: -6) See logs for more info Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff/IncrementalBackup.MultiRestore.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff/IncrementalBackup.MultiRestore.out ------ [3/4] chunk ran 2 tests (total:53.02s - test:52.80s) [fail] IncrementalBackup::SimpleRestoreBackupCollection+WithIncremental [default-linux-x86_64-release-asan] (21.19s) assertion failed at ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:623, virtual void NKikimr::NTestSuiteIncrementalBackup::TTestCaseSimpleRestoreBackupCollection::Execute_(NUnitTest::TTestContext &) [WithIncremental = true]: (KqpSimpleExec(runtime, R"( SELECT key, value FROM `/Root/Table` ORDER BY key )") == "{ items { uint32_value: 2 } items { uint32_value: 2000 } }, " "{ items { uint32_value: 3 } items { uint32_value: 30 } }, " "{ items { uint32_value: 4 } items { uint32_value: 40 } }") failed: ("{ items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }, { items { uint32_value: 5 } items { uint32_value: 50 } }" != { items { uint32_value: 2 } items { uint32_value: 2000 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }) , with diff: ("|){ items { uint32_value: (1 } items { uint3|)2(_value:|) (10 |)} (}, { |)items { uint32_value: 2( } items { uint32_value: 2|00)0 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }(, { items { uint32_value: 5 } items { uint32_value: 50 } }"|) TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 NKikimr::NTestSuiteIncrementalBackup::TTestCaseSimpleRestoreBackupCollection::Execute_(NUnitTest::TTestContext&) at /-S/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:0:13 operator() at /-S/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff/IncrementalBackup.SimpleRestoreBackupCollection.WithIncremental.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/testing_out_stuff/IncrementalBackup.SimpleRestoreBackupCollection.WithIncremental.out ------ FAIL: 7 - GOOD, 3 - FAIL, 1 - CRASHED ydb/core/tx/datashard/ut_incremental_backup ydb/core/tx/datashard/ut_read_iterator [size:medium] nchunks:10 ------ [9/10] chunk ran 15 tests (total:605.01s - test:600.05s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: ReadIteratorExternalBlobs::ExtBlobsWithSpecificKeys (good) duration: 130.68s ReadIteratorExternalBlobs::ExtBlobsMultipleColumns (timeout) duration: 106.16s ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheEnd (good) duration: 61.66s ReadIteratorExternalBlobs::ExtBlobs (good) duration: 57.57s ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloadedWithReboot (good) duration: 57.24s ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloaded (good) duration: 54.99s ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheBeginning (good) duration: 54.08s ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheMiddle (good) duration: 53.53s DataShardReadIteratorSysTables::ShouldNotAllowArrow (good) duration: 7.43s DataShardReadIteratorSysTables::ShouldRead (good) duration: 7.08s 2 more tests with 11.94s total duration are not listed. 3 tests were not launched inside chunk. Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff/stderr [timeout] ReadIteratorExternalBlobs::ExtBlobsMultipleColumns [default-linux-x86_64-release-asan] (106.16s) Killed by timeout (600 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff/ReadIteratorExternalBlobs.ExtBlobsMultipleColumns.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff/ReadIteratorExternalBlobs.ExtBlobsMultipleColumns.out ------ TIMEOUT: 148 - GOOD, 3 - NOT_LAUNCHED, 1 - TIMEOUT ydb/core/tx/datashard/ut_read_iterator ydb/core/tx/schemeshard/ut_index [size:medium] nchunks:80 ------ [9/80] chunk ran 1 test (total:605.52s - test:600.08s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: TAsyncIndexTests::MergeBothWithReboots[TabletReboots] (timeout) duration: 603.31s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/schemeshard/ut_index/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/schemeshard/ut_index/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/schemeshard/ut_index/test-results/unittest/testing_out_stuff/stderr [timeout] TAsyncIndexTests::MergeBothWithReboots[TabletReboots] [default-linux-x86_64-release-asan] (603.31s) Killed by timeout (600 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/schemeshard/ut_index/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/schemeshard/ut_index/test-results/unittest/testing_out_stuff/TAsyncIndexTests.MergeBothWithReboots.TabletReboots.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/schemeshard/ut_index/test-results/unittest/testing_out_stuff/TAsyncIndexTests.MergeBothWithReboots.TabletReboots.out ------ TIMEOUT: 29 - GOOD, 1 - TIMEOUT ydb/core/tx/schemeshard/ut_index ydb/core/tx/tiering/ut [size:medium] nchunks:60 ------ [3/60] chunk ran 1 test (total:43.88s - test:43.80s) [crashed] ColumnShardTiers::TTLUsage [default-linux-x86_64-release-asan] (0.00s) Test crashed (return code: -6) See logs for more info Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tiering/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tiering/ut/test-results/unittest/testing_out_stuff/ColumnShardTiers.TTLUsage.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tiering/ut/test-results/unittest/testing_out_stuff/ColumnShardTiers.TTLUsage.out ------ FAIL: 9 - GOOD, 1 - CRASHED ydb/core/tx/tiering/ut ydb/core/tx/tx_proxy/ut_schemereq [size:medium] nchunks:10 ------ [1/10] chunk ran 30 tests (total:273.39s - test:273.08s) [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-65 [default-linux-x86_64-release-asan] (8.42s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:27177 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-65.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-65.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-69 [default-linux-x86_64-release-asan] (10.21s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:18102 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-69.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-69.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-59 [default-linux-x86_64-release-asan] (8.29s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:12808 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-59.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-59.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-68 [default-linux-x86_64-release-asan] (8.95s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:24356 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-68.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-68.out ------ [3/10] chunk ran 30 tests (total:266.72s - test:266.49s) [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 [default-linux-x86_64-release-asan] (8.61s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:6387 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 [default-linux-x86_64-release-asan] (8.86s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:11760 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 [default-linux-x86_64-release-asan] (10.18s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:13092 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 [default-linux-x86_64-release-asan] (10.13s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:6035 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 [default-linux-x86_64-release-asan] (10.16s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:26758 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 [default-linux-x86_64-release-asan] (10.28s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:23521 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 [default-linux-x86_64-release-asan] (9.44s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:24864 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 [default-linux-x86_64-release-asan] (10.32s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:10446 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 [default-linux-x86_64-release-asan] (11.82s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:18217 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 [default-linux-x86_64-release-asan] (9.08s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:8101 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69.out ------ [7/10] chunk ran 30 tests (total:270.30s - test:269.90s) [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10 [default-linux-x86_64-release-asan] (8.69s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:30025 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14 [default-linux-x86_64-release-asan] (8.04s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:22594 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8 [default-linux-x86_64-release-asan] (8.96s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:2394 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3 [default-linux-x86_64-release-asan] (10.31s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:16786 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21 [default-linux-x86_64-release-asan] (10.24s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:21451 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16 [default-linux-x86_64-release-asan] (9.23s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:13182 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4 [default-linux-x86_64-release-asan] (10.60s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:18934 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4.out ------ [8/10] chunk ran 30 tests (total:272.88s - setup:0.02s test:272.56s) [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-68 [default-linux-x86_64-release-asan] (9.40s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:18965 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-68.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-68.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-53 [default-linux-x86_64-release-asan] (11.36s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:2254 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-53.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-53.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-62 [default-linux-x86_64-release-asan] (10.47s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:63303 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-62.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-62.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-56 [default-linux-x86_64-release-asan] (9.40s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:17349 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-56.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-56.out [fail] SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-63 [default-linux-x86_64-release-asan] (10.25s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:5584 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase) at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:5 __invoke at /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:18 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-63.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAccess.AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-63.out ------ [9/10] chunk ran 30 tests (total:257.58s - test:257.48s) [fail] SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly-StrictAclCheck [default-linux-x86_64-release-asan] (10.63s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:17217 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAdminAccessInTenant.ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly-StrictAclCheck.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAdminAccessInTenant.ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly-StrictAclCheck.out [fail] SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant [default-linux-x86_64-release-asan] (10.97s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:22261 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAdminAccessInTenant.ClusterAdminCanAuthOnNonEmptyTenant.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAdminAccessInTenant.ClusterAdminCanAuthOnNonEmptyTenant.out [fail] SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-StrictAclCheck [default-linux-x86_64-release-asan] (10.63s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:61305 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAdminAccessInTenant.ClusterAdminCanAuthOnNonEmptyTenant-StrictAclCheck.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAdminAccessInTenant.ClusterAdminCanAuthOnNonEmptyTenant-StrictAclCheck.out [fail] SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly-StrictAclCheck [default-linux-x86_64-release-asan] (10.97s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:24080 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAdminAccessInTenant.ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly-StrictAclCheck.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAdminAccessInTenant.ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly-StrictAclCheck.out [fail] SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly [default-linux-x86_64-release-asan] (11.66s) assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:4040 TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/tx/tx_proxy/schemereq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAdminAccessInTenant.ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/testing_out_stuff/SchemeReqAdminAccessInTenant.ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly.out ------ FAIL: 269 - GOOD, 31 - FAIL ydb/core/tx/tx_proxy/ut_schemereq ydb/core/viewer/ut [size:medium] nchunks:10 ------ [4/10] chunk ran 5 tests (total:610.37s - setup:0.01s test:600.12s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: Viewer::JsonStorageListingV1PDiskIdFilter (timeout) duration: 199.06s Viewer::JsonStorageListingV1NodeIdFilter (good) duration: 168.84s Viewer::JsonStorageListingV1GroupIdFilter (good) duration: 129.35s Viewer::JsonStorageListingV1 (good) duration: 90.58s Viewer::JsonAutocompleteStartOfDatabaseName (good) duration: 9.51s Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/stderr [timeout] Viewer::JsonStorageListingV1PDiskIdFilter [default-linux-x86_64-release-asan] (199.06s) Killed by timeout (600 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/Viewer.JsonStorageListingV1PDiskIdFilter.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/Viewer.JsonStorageListingV1PDiskIdFilter.out ------ [6/10] chunk ran 5 tests (total:113.96s - test:113.86s) [fail] Viewer::QueryExecuteScript [default-linux-x86_64-release-asan] (18.50s) assertion failed at ydb/core/viewer/viewer_ut.cpp:1914, virtual void NTestSuiteViewer::TTestCaseQueryExecuteScript::Execute_(NUnitTest::TTestContext &): (json["metadata"].GetMap().at("exec_stats").GetMap().contains("process_cpu_time_us")) {"metadata":{"result_sets_meta":[{"finished":true,"columns":[{"name":"Key","type":{"optional_type":{"item":{"type_id":"UINT64"}}}},{"name":"Value","type":{"optional_type":{"item":{"type_id":"STRING"}}}}],"number_rows":"15"}],"execution_id":"60bb3d8-d65e588-55f8ea2-9c063a28","exec_stats":{"query_plan":"{}"},"script_content":{"text":"SELECT * FROM `/Root/Test`;"},"exec_mode":"EXEC_MODE_EXECUTE","exec_status":"EXEC_STATUS_RUNNING","@type":"type.googleapis.com/Ydb.Query.ExecuteScriptMetadata"},"status":"SUCCESS","id":"ydb://scriptexec/9?id=60bb3d8-d65e588-55f8ea2-9c063a28"} TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/core/viewer/viewer_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/Viewer.QueryExecuteScript.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/Viewer.QueryExecuteScript.out [fail] Viewer::Plan2SvgBad [default-linux-x86_64-release-asan] (10.74s) (TSystemError) (Error 11: Resource temporarily unavailable) util/network/socket.cpp:910: can not read from socket input stream Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/Viewer.Plan2SvgBad.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/Viewer.Plan2SvgBad.out ------ TIMEOUT: 48 - GOOD, 2 - FAIL, 1 - TIMEOUT ydb/core/viewer/ut ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut [size:medium] nchunks:10 ------ [1/10] chunk ran 20 tests (total:499.68s - setup:0.01s test:498.77s) [fail] TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [default-linux-x86_64-release-asan] (31.88s) assertion failed at ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp:653, virtual std::vector NYdb::NTopic::NTests::NTestSuiteTxUsage::TFixture::TQuerySession::Execute(const std::string &, TTransactionBase *, bool, const TParams &): (result.IsSuccess())
: Error: Pending previous query completion TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/vector:546:18 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/TxUsage.Sinks_Olap_WriteToTopicAndTable_4_Query.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/TxUsage.Sinks_Olap_WriteToTopicAndTable_4_Query.out ------ [2/10] chunk ran 19 tests (total:661.52s - setup:0.02s test:600.11s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: TxUsage::Transactions_Conflict_On_SeqNo_Query (good) duration: 191.05s TxUsage::Sinks_Oltp_WriteToTopics_3_Query (good) duration: 49.97s TxUsage::Sinks_Oltp_WriteToTopics_3_Table (good) duration: 47.99s TxUsage::TestRetentionOnLongTxAndBigMessages (timeout) duration: 38.07s TxUsage::Sinks_Oltp_WriteToTopics_2_Query (good) duration: 27.63s TxUsage::Sinks_Oltp_WriteToTopics_2_Table (good) duration: 25.68s TxUsage::Sinks_Oltp_WriteToTopics_4_Table (good) duration: 25.27s TxUsage::Sinks_Oltp_WriteToTopics_4_Query (good) duration: 24.61s TxUsage::Sinks_Oltp_WriteToTopics_1_Table (good) duration: 24.00s TxUsage::Sinks_Oltp_WriteToTopics_1_Query (good) duration: 22.13s 9 more tests with 150.00s total duration are not listed. Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/stderr [timeout] TxUsage::TestRetentionOnLongTxAndBigMessages [default-linux-x86_64-release-asan] (38.07s) Killed by timeout (600 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/TxUsage.TestRetentionOnLongTxAndBigMessages.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/TxUsage.TestRetentionOnLongTxAndBigMessages.out ------ [9/10] chunk ran 19 tests (total:626.12s - setup:0.01s test:600.10s) Chunk exceeded 600s timeout and was killed List of the tests involved in the launch: TxUsage::Write_Only_Big_Messages_In_Wide_Transactions_Table (timeout) duration: 206.50s TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Query (good) duration: 80.68s TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Table (good) duration: 70.77s TxUsage::WriteToTopic_Demo_5_Query (good) duration: 21.34s TxUsage::WriteToTopic_Demo_9_Query (good) duration: 20.77s TxUsage::WriteToTopic_Demo_7_Table (good) duration: 20.23s TxUsage::WriteToTopic_Demo_7_Query (good) duration: 19.88s TxUsage::WriteToTopic_Demo_6_Table (good) duration: 19.71s TxUsage::WriteToTopic_Demo_9_Table (good) duration: 19.23s TxUsage::WriteToTopic_Demo_6_Query (good) duration: 19.12s 8 more tests with 112.60s total duration are not listed. TxUsage::Write_Only_Big_Messages_In_Wide_Transactions_Query test was not launched inside chunk. Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/stderr [timeout] TxUsage::Write_Only_Big_Messages_In_Wide_Transactions_Table [default-linux-x86_64-release-asan] (206.50s) Killed by timeout (600 s) Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/TxUsage.Write_Only_Big_Messages_In_Wide_Transactions_Table.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/TxUsage.Write_Only_Big_Messages_In_Wide_Transactions_Table.out ------ TIMEOUT: 188 - GOOD, 1 - FAIL, 1 - NOT_LAUNCHED, 2 - TIMEOUT ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut ydb/services/fq/ut_integration [size:medium] nchunks:10 ------ [4/10] chunk ran 2 tests (total:120.81s - setup:0.01s test:120.56s) [fail] Yq_1::CreateQuery_With_Idempotency [default-linux-x86_64-release-asan] (67.38s) assertion failed at ydb/services/fq/ut_integration/fq_ut.cpp:575, virtual void NTestSuiteYq_1::TTestCaseCreateQuery_With_Idempotency::Execute_(NUnitTest::TTestContext &): (result.GetStatus() == EStatus::SUCCESS) failed: (BAD_REQUEST != SUCCESS)
: Error: Control Plane is not ready yet. Please retry later., code: 1007 , with diff: (BAD_REQ|S)U(|CC)ES(T|S) TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 NTestSuiteYq_1::TTestCaseCreateQuery_With_Idempotency::Execute_(NUnitTest::TTestContext&) at /-S/ydb/services/fq/ut_integration/fq_ut.cpp:0:13 operator() at /-S/ydb/services/fq/ut_integration/fq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/fq/ut_integration/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/fq/ut_integration/test-results/unittest/testing_out_stuff/Yq_1.CreateQuery_With_Idempotency.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/fq/ut_integration/test-results/unittest/testing_out_stuff/Yq_1.CreateQuery_With_Idempotency.out ------ [7/10] chunk ran 2 tests (total:110.53s - setup:0.01s test:110.10s) [fail] Yq_1::DescribeJob [default-linux-x86_64-release-asan] (49.46s) assertion failed at ydb/services/fq/ut_integration/fq_ut.cpp:57, TString (anonymous namespace)::CreateNewHistoryAndWaitFinish(const TString &, NYdb::NFq::TClient &, const TString &, const FederatedQuery::QueryMeta::ComputeStatus &): (result.GetStatus() == EStatus::SUCCESS) failed: (BAD_REQUEST != SUCCESS)
: Error: Control Plane is not ready yet. Please retry later., code: 1007 , with diff: (BAD_REQ|S)U(|CC)ES(T|S) TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 (anonymous namespace)::CreateNewHistoryAndWaitFinish(TBasicString> const&, NYdb::NFq::TClient&, TBasicString> const&, FederatedQuery::QueryMeta_ComputeStatus const&) at /-S/ydb/services/fq/ut_integration/fq_ut.cpp:57:13 UnRef at /-S/util/generic/ptr.h:640:13 operator() at /-S/ydb/services/fq/ut_integration/fq_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/fq/ut_integration/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/fq/ut_integration/test-results/unittest/testing_out_stuff/Yq_1.DescribeJob.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/fq/ut_integration/test-results/unittest/testing_out_stuff/Yq_1.DescribeJob.out ------ FAIL: 20 - GOOD, 2 - FAIL ydb/services/fq/ut_integration ydb/services/persqueue_v1/ut [size:medium] nchunks:10 ------ [3/10] chunk ran 14 tests (total:422.72s - test:421.91s) [crashed] TPersQueueTest::DisableDeduplication [default-linux-x86_64-release-asan] (24.43s) Test crashed (return code: 100) ==200999==ERROR: LeakSanitizer: detected memory leaks Indirect leak of 7541240 byte(s) in 115 object(s) allocated from: #0 0x19b6f62f in malloc /-S/contrib/libs/clang18-rt/lib/asan/asan_malloc_linux.cpp:68:3 #1 0x1b1592a3 in grpc_event_engine::experimental::MemoryAllocator::MakeSlice(grpc_event_engine::experimental::MemoryRequest) /-S/contrib/libs/grpc/src/core/lib/event_engine/memory_allocator.cc:63:13 #2 0x1b133cad in maybe_make_read_slices /-S/contrib/libs/grpc/src/core/lib/iomgr/tcp_posix.cc:1070:57 #3 0x1b133cad in tcp_handle_read(void*, y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/lib/iomgr/tcp_posix.cc:1094:5 #4 0x1b138107 in Run /-S/contrib/libs/grpc/src/core/lib/iomgr/closure.h:303:5 #5 0x1b138107 in tcp_read(grpc_endpoint*, grpc_slice_buffer*, grpc_closure*, bool, int) /-S/contrib/libs/grpc/src/core/lib/iomgr/tcp_posix.cc:1156:5 #6 0x1b4953a8 in continue_read_action_locked /-S/contrib/libs/grpc/src/core/ext/transport/chttp2/transport/chttp2_transport.cc:2597:3 #7 0x1b4953a8 in read_action_locked(void*, y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/ext/transport/chttp2/transport/chttp2_transport.cc:2586:7 #8 0x1b0231e6 in grpc_combiner_continue_exec_ctx() /-S/contrib/libs/grpc/src/core/lib/iomgr/combiner.cc:231:5 #9 0x1affb7a4 in grpc_core::ExecCtx::Flush() /-S/contrib/libs/grpc/src/core/lib/iomgr/exec_ctx.cc:75:17 #10 0x1b14728d in end_worker ..[snippet truncated].. llset.cc:48:10 #14 0x1b121bc7 in cq_next(grpc_completion_queue*, gpr_timespec, void*) /-S/contrib/libs/grpc/src/core/lib/surface/completion_queue.cc:1036:29 #15 0x1b9ad10e in grpc::CompletionQueue::AsyncNextInternal(void**, bool*, gpr_timespec) /-S/contrib/libs/grpc/src/cpp/common/completion_queue_cc.cc:166:15 #16 0x20d0fecf in Next /-S/contrib/libs/grpc/include/grpcpp/completion_queue.h:182:13 #17 0x20d0fecf in NYdbGrpc::Dev::PullEvents(grpc::CompletionQueue*) /-S/ydb/public/sdk/cpp/src/library/grpc/client/grpc_client_low.cpp:190:18 #18 0x1b9c34ae in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #19 0x1b9c34ae in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #20 0x1b9c34ae in (anonymous namespace)::TThreadFactoryFuncObj::DoExecute() /-S/util/thread/factory.cpp:61:13 #21 0x1b9c39fc in Execute /-S/util/thread/factory.h:15:13 #22 0x1b9c39fc in (anonymous namespace)::TSystemThreadFactory::TPoolThread::ThreadProc(void*) /-S/util/thread/factory.cpp:36:41 #23 0x19eb7c84 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #24 0x19b6d178 in asan_thread_start(void*) /-S/contrib/libs/clang18-rt/lib/asan/asan_interceptors.cpp:239:28 SUMMARY: AddressSanitizer: 7900200 byte(s) leaked in 2223 allocation(s). Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/persqueue_v1/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/persqueue_v1/ut/test-results/unittest/testing_out_stuff/TPersQueueTest.DisableDeduplication.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/persqueue_v1/ut/test-results/unittest/testing_out_stuff/TPersQueueTest.DisableDeduplication.out ------ FAIL: 136 - GOOD, 1 - CRASHED ydb/services/persqueue_v1/ut ydb/services/ydb/backup_ut [size:medium] nchunks:10 ------ [1/10] chunk ran 21 tests (total:313.11s - test:312.78s) [fail] BackupRestore::ReplicasAreNotBackedUp [default-linux-x86_64-release-asan] (18.00s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.ReplicasAreNotBackedUp.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.ReplicasAreNotBackedUp.out [fail] BackupRestore::RestoreReplicationWithoutSecret [default-linux-x86_64-release-asan] (10.54s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.RestoreReplicationWithoutSecret.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.RestoreReplicationWithoutSecret.out [fail] BackupRestore::RestoreReplicationThatDoesNotUseSecret [default-linux-x86_64-release-asan] (17.63s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.RestoreReplicationThatDoesNotUseSecret.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.RestoreReplicationThatDoesNotUseSecret.out [fail] BackupRestore::RestoreViewDependentOnAnotherView [default-linux-x86_64-release-asan] (11.06s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.RestoreViewDependentOnAnotherView.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.RestoreViewDependentOnAnotherView.out [fail] BackupRestore::RestoreViewReferenceTable [default-linux-x86_64-release-asan] (11.49s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.RestoreViewReferenceTable.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.RestoreViewReferenceTable.out [fail] BackupRestore::RestoreViewToDifferentDatabase [default-linux-x86_64-release-asan] (16.05s) (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.RestoreViewToDifferentDatabase.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.RestoreViewToDifferentDatabase.out ------ [2/10] chunk ran 20 tests (total:152.74s - test:152.67s) [crashed] BackupRestore::TestAllPrimitiveTypes-INT8 [default-linux-x86_64-release-asan] (0.00s) Test crashed (return code: -6) See logs for more info Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.TestAllPrimitiveTypes-INT8.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.TestAllPrimitiveTypes-INT8.out ------ [3/10] chunk ran 20 tests (total:136.84s - test:136.57s) [crashed] BackupRestore::TestAllPrimitiveTypes-UUID [default-linux-x86_64-release-asan] (0.00s) Test crashed (return code: -6) See logs for more info Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.TestAllPrimitiveTypes-UUID.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/BackupRestore.TestAllPrimitiveTypes-UUID.out ------ [8/10] chunk ran 20 tests (total:178.67s - test:178.40s) [fail] CommonEncryptionRequirementsTest::CommonEncryptionRequirements [default-linux-x86_64-release-asan] (26.56s) assertion failed at ydb/services/ydb/backup_ut/s3_backup_test_base.h:127, TMaybe TS3BackupTestFixture::WaitOpStatus(const TResponseType &, NYdb::EStatus, const TString &) [TResponseType = NYdb::NImport::TImportFromS3Response]: (op->Status().GetStatus() == status) failed: (CANCELLED != SUCCESS) . Status: CANCELLED. Issues:
: Error: Check failed: path: '/Root/Prefix_0/Anonymized_Dir', error: path hasn't been resolved, nearest resolved path: '/Root' (id: [OwnerId: 72057594046644480, LocalPathId: 1])
: Error: Check failed: path: '/Root/Prefix_0/Anonymized_Dir', error: path hasn't been resolved, nearest resolved path: '/Root' (id: [OwnerId: 72057594046644480, LocalPathId: 1]) , with diff: (|SU)C(AN|)CE(LLED|SS) TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 TMaybe TS3BackupTestFixture::WaitOpStatus(NYdb::Dev::NImport::TImportFromS3Response const&, NYdb::Dev::EStatus, TBasicString> const&) at /-S/ydb/services/ydb/backup_ut/s3_backup_test_base.h:0:13 ~TStorageBase at /-S/util/generic/maybe_traits.h:57:23 operator() at /-S/ydb/services/ydb/backup_ut/encrypted_backup_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/CommonEncryptionRequirementsTest.CommonEncryptionRequirements.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/CommonEncryptionRequirementsTest.CommonEncryptionRequirements.out ------ FAIL: 193 - GOOD, 7 - FAIL, 2 - CRASHED ydb/services/ydb/backup_ut ydb/services/ydb/ut [size:medium] nchunks:60 ------ [30/60] chunk ran 5 tests (total:178.00s - test:177.87s) [fail] YdbLogStore::AlterLogTable [default-linux-x86_64-release-asan] (6.90s) assertion failed at ydb/services/ydb/ydb_logstore_ut.cpp:435, virtual void NTestSuiteYdbLogStore::TTestCaseAlterLogTable::Execute_(NUnitTest::TTestContext &): (res.GetStatus() == EStatus::SUCCESS) failed: (PRECONDITION_FAILED != SUCCESS)
: Error: Column stores are not supported , with diff: (PRE|SUC)C(ONDITION_FAIL|)E(D|SS) TBackTrace::Capture() at /-S/util/system/backtrace.cpp:284:9 GetCurrentTest at /-S/library/cpp/testing/unittest/registar.cpp:70:12 NTestSuiteYdbLogStore::TTestCaseAlterLogTable::Execute_(NUnitTest::TTestContext&) at /-S/ydb/services/ydb/ydb_logstore_ut.cpp:0:13 operator() at /-S/ydb/services/ydb/ydb_logstore_ut.cpp:0:1 operator() at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:0:12 ~__value_func at /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:398:16 UnRef at /-S/util/generic/ptr.h:640:13 UnRef at /-S/util/generic/ptr.h:640:13 NUnitTest::RunMain(int, char**) at /-S/library/cpp/testing/unittest/utmain.cpp:0:0 ?? at ??:0:0 ?? at ??:0:0 _start at ??:0:0 Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/ut/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/ut/test-results/unittest/testing_out_stuff/YdbLogStore.AlterLogTable.err Stdout: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/services/ydb/ut/test-results/unittest/testing_out_stuff/YdbLogStore.AlterLogTable.out ------ FAIL: 287 - GOOD, 1 - FAIL ydb/services/ydb/ut ------ sole chunk ran 2 tests (total:420.81s - setup:0.03s recipes:17.78s test:399.68s recipes:3.14s) Info: Test run has exceeded 16.0G (16777216K) memory limit with 16.0G (16818780K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1369430 45.3M 45.2M 6.3M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1369438 34.1M 22.2M 10.1M ├─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1375014 47.4M 42.7M 23.9M │ └─ test_tool run_ut @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_stuff/test_tool.args 1375205 2.0G 2.0G 1.9G │ └─ ydb-tests-functional-kqp-kqp_indexes --trace-path-append /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/ 1369551 1.7G 1.7G 1.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_ 1369552 1.8G 1.8G 1.3G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_ 1369553 1.8G 1.7G 1.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_ 1369554 1.7G 1.7G 1.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_ 1369555 1.8G 1.7G 1.3G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_ 1369556 1.8G 1.8G 1.3G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_ 1369557 1.8G 1.7G 1.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_ 1369558 1.8G 1.8G 1.3G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_ Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_indexes/test-results/unittest/testing_out_stuff/stderr ------ sole chunk ran 2 tests (total:91.07s - setup:0.02s recipes:17.23s test:69.50s recipes:4.23s) Info: Test run has exceeded 16.0G (16777216K) memory limit with 16.3G (17109416K) used. This may lead to test failure on the Autocheck/CI You can increase test's ram requirement using REQUIREMENTS(ram:X) in the ya.make pid rss ref pdirt 1369427 45.3M 45.3M 6.4M test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1369432 31.5M 19.5M 7.5M ├─ test_tool run_test @/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ya_command_file_0.args 1381626 176M 394M 325M │ └─ ydb_recipe --build-root /home/runner/actions_runner/_work/ydb/ydb/tmp/out --source-root /home/runner/actions_runner/_work/ydb/ydb --gdb-path /home/runner/.ya/tools/v4/89 1369538 1.9G 1.9G 1.5G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/testing_ou 1369539 2.0G 2.0G 1.6G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/testing_ou 1369540 1.9G 1.9G 1.5G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/testing_ou 1369541 2.0G 1.9G 1.5G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/testing_ou 1369542 2.0G 2.0G 1.6G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/testing_ou 1369543 2.1G 2.0G 1.6G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/testing_ou 1369544 2.0G 2.0G 1.6G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/testing_ou 1369545 2.0G 2.0G 1.5G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/testing_ou Log: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/run_test.log Logsdir: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/testing_out_stuff Stderr: /home/runner/actions_runner/_work/ydb/ydb/tmp/out/ydb/tests/functional/kqp/kqp_query_svc/test-results/unittest/testing_out_stuff/stderr Total 612 suites: 566 - GOOD 29 - FAIL 17 - TIMEOUT Total 12286 tests: 11942 - GOOD 109 - FAIL 196 - NOT_LAUNCHED 20 - TIMEOUT 9 - SKIPPED 10 - CRASHED Cache efficiency ratio is 81.15% (42204 of 52009). Local: 0 (0.00%), dist: 9712 (18.67%), by dynamic uids: 0 (0.00%), avoided: 32492 (62.47%) Dist cache download: count=5573, size=12.37 GiB, speed=91.36 MiB/s Disk usage for tools/sdk at least 142.05 MiB Additional disk space consumed for build cache 992.59 GiB Critical path: [ 57518 ms] [CC] [DvKPpE1tBefnHWloOzF3Bw tool]: $(BUILD_ROOT)/ydb/core/protos/config.pb.cc [started: 0 (1750794327117), finished: 57518 (1750794384635)] [ 1398 ms] [AR] [kSogG3qxqLTW-iWpfmEpmg tool]: $(BUILD_ROOT)/ydb/core/protos/libydb-core-protos.a [started: 78582 (1750794405699), finished: 79980 (1750794407097)] [ 1349 ms] [LD] [WAVbHuYXUiAtnUcCo0YD-w tool]: $(BUILD_ROOT)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen [started: 93428 (1750794420545), finished: 94777 (1750794421894)] [ 181 ms] [PR] [IvlvLxViJEuW6S0-i6xxrw default-linux-x86_64 release asan]: $(BUILD_ROOT)/ydb/core/base/generated/runtime_feature_flags.h [started: 94802 (1750794421919), finished: 94983 (1750794422100)] [173484 ms] [CC] [OMLvgCZq-SlGn7eiIANaKw default-linux-x86_64 release asan]: $(SOURCE_ROOT)/ydb/core/viewer/json_handlers_viewer.cpp [started: 745824 (1750795072941), finished: 919308 (1750795246425)] [ 507 ms] [AR] [SyAmsI1Sa-Wdt5jwALzfig default-linux-x86_64 release asan]: $(BUILD_ROOT)/ydb/core/viewer/libydb-core-viewer.a [started: 940097 (1750795267214), finished: 940604 (1750795267721)] [ 27403 ms] [LD] [arCavX9tjVYxxLgdDINJOg default-linux-x86_64 release asan]: $(BUILD_ROOT)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut [started: 2919921 (1750797247038), finished: 2947324 (1750797274441)] [662179 ms] [TM] [rnd-5555817502003407178 asan default-linux-x86_64 release]: ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest [started: 2952397 (1750797279514), finished: 3614576 (1750797941693)] [ 33207 ms] [TA] [rnd-9octckj1uz8ui4gh]: $(BUILD_ROOT)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/{meta.json ... results_accumulator.log} [started: 3614846 (1750797941963), finished: 3648053 (1750797975170)] Time from start: 8141745.974121094 ms, time elapsed by graph 957226 ms, time diff 7184519.974121094 ms. The longest 10 tasks: [662179 ms] [TM] [rnd-5555817502003407178 asan default-linux-x86_64 release]: ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest [started: 1750797279514, finished: 1750797941693] [645294 ms] [TM] [rnd-91wwnm3g4o8pvdct asan default-linux-x86_64 release]: ydb/tests/fq/mem_alloc/py3test [started: 1750800646801, finished: 1750801292095] [635303 ms] [TM] [rnd-6327413578872967604 asan default-linux-x86_64 release]: ydb/tests/functional/tpc/medium/py3test [started: 1750801284316, finished: 1750801919619] [634880 ms] [TM] [rnd-9360061400177805656 asan default-linux-x86_64 release]: ydb/tests/functional/tpc/medium/py3test [started: 1750801321010, finished: 1750801955890] [632687 ms] [TM] [rnd-wb02d4ky0gbjxcbn asan default-linux-x86_64 release]: ydb/tests/olap/s3_import/py3test [started: 1750800713167, finished: 1750801345854] [631995 ms] [TM] [rnd-4926621446154747883 asan default-linux-x86_64 release]: ydb/tests/functional/benchmarks_init/py3test [started: 1750794403838, finished: 1750795035833] [627181 ms] [TM] [rnd-4082405510772920134 asan default-linux-x86_64 release]: ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest [started: 1750797280161, finished: 1750797907342] [622407 ms] [TM] [rnd-11198788149380449042 asan default-linux-x86_64 release]: ydb/core/kqp/ut/runtime/unittest [started: 1750797288246, finished: 1750797910653] [619652 ms] [TM] [rnd-7273748168789092760 asan default-linux-x86_64 release]: ydb/tests/olap/ttl_tiering/py3test [started: 1750797861617, finished: 1750798481269] [616416 ms] [TM] [rnd-17391708406588889646 asan default-linux-x86_64 release]: ydb/tests/functional/tpc/medium/py3test [started: 1750801291790, finished: 1750801908206] Total time by type: [232849970 ms] [TM] [count: 4964, ave time 46907.73 msec] [151209850 ms] [CC] [count: 3077, ave time 49141.97 msec] [ 15512907 ms] [prepare:get from dist cache] [count: 9712, ave time 1597.29 msec] [ 13033023 ms] [LD] [count: 528, ave time 24683.76 msec] [ 2607305 ms] [TS] [count: 413, ave time 6313.09 msec] [ 744557 ms] [prepare:bazel-store] [count: 3, ave time 248185.67 msec] [ 701412 ms] [prepare:put to dist cache] [count: 4063, ave time 172.63 msec] [ 538196 ms] [TA] [count: 271, ave time 1985.96 msec] [ 238254 ms] [prepare:put into local cache, clean build dir] [count: 9729, ave time 24.49 msec] [ 237887 ms] [prepare:tools] [count: 20, ave time 11894.35 msec] [ 193865 ms] [prepare:AC] [count: 4, ave time 48466.25 msec] [ 110863 ms] [AR] [count: 442, ave time 250.82 msec] [ 105587 ms] [PY] [count: 20, ave time 5279.35 msec] [ 11789 ms] [PB] [count: 20, ave time 589.45 msec] [ 1754 ms] [EN] [count: 34, ave time 51.59 msec] [ 1631 ms] [prepare:resources] [count: 1, ave time 1631.00 msec] [ 929 ms] [BN] [count: 7, ave time 132.71 msec] [ 821 ms] [PR] [count: 16, ave time 51.31 msec] [ 653 ms] [CF] [count: 2, ave time 326.50 msec] [ 601 ms] [UN] [count: 2, ave time 300.50 msec] [ 509 ms] [SB] [count: 1, ave time 509.00 msec] [ 388 ms] [CP] [count: 2, ave time 194.00 msec] [ 319 ms] [ld] [count: 2, ave time 159.50 msec] [ 264 ms] [PK] [count: 1, ave time 264.00 msec] [ 189 ms] [UNKNOWN] [count: 1, ave time 189.00 msec] [ 167 ms] [BI] [count: 1, ave time 167.00 msec] [ 123 ms] [PD] [count: 1, ave time 123.00 msec] [ 32 ms] [prepare:clean] [count: 3, ave time 10.67 msec] Total tasks times: Total failed tasks time - 0 ms (0.00%) Total tests tasks time - 235995471 ms (58.93%) Total run tasks time - 400473300 ms Configure time - 43.6 s Statistics overhead 2453 ms Info: Dump junit report to /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/junit.xml Info: Dump results report to /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/report.json Ok + echo 0